Split bundler up into multiple files (#20192)

This commit is contained in:
Zack Radisic
2025-06-06 18:34:18 -07:00
committed by GitHub
parent 5b0523a32a
commit fa1d37b4e3
39 changed files with 16151 additions and 14144 deletions

View File

@@ -63,6 +63,7 @@ const BunBuildOptions = struct {
/// `./build/codegen` or equivalent
codegen_path: []const u8,
no_llvm: bool,
override_no_export_cpp_apis: bool,
cached_options_module: ?*Module = null,
windows_shim: ?WindowsShim = null,
@@ -95,6 +96,7 @@ const BunBuildOptions = struct {
opts.addOption(bool, "enable_asan", this.enable_asan);
opts.addOption([]const u8, "reported_nodejs_version", b.fmt("{}", .{this.reported_nodejs_version}));
opts.addOption(bool, "zig_self_hosted_backend", this.no_llvm);
opts.addOption(bool, "override_no_export_cpp_apis", this.override_no_export_cpp_apis);
const mod = opts.createModule();
this.cached_options_module = mod;
@@ -206,6 +208,7 @@ pub fn build(b: *Build) !void {
const obj_format = b.option(ObjectFormat, "obj_format", "Output file for object files") orelse .obj;
const no_llvm = b.option(bool, "no_llvm", "Experiment with Zig self hosted backends. No stability guaranteed") orelse false;
const override_no_export_cpp_apis = b.option(bool, "override-no-export-cpp-apis", "Override the default export_cpp_apis logic to disable exports") orelse false;
var build_options = BunBuildOptions{
.target = target,
@@ -217,6 +220,7 @@ pub fn build(b: *Build) !void {
.codegen_path = codegen_path,
.codegen_embed = codegen_embed,
.no_llvm = no_llvm,
.override_no_export_cpp_apis = override_no_export_cpp_apis,
.version = try Version.parse(bun_version),
.canary_revision = canary: {
@@ -476,6 +480,7 @@ fn addMultiCheck(
.codegen_path = root_build_options.codegen_path,
.no_llvm = root_build_options.no_llvm,
.enable_asan = root_build_options.enable_asan,
.override_no_export_cpp_apis = root_build_options.override_no_export_cpp_apis,
};
var obj = addBunObject(b, &options);

View File

@@ -250,8 +250,39 @@ src/bun.js/webcore/TextEncoder.zig
src/bun.js/webcore/TextEncoderStreamEncoder.zig
src/bun.js/WTFTimer.zig
src/bun.zig
src/bundler/AstBuilder.zig
src/bundler/bundle_v2.zig
src/bundler/BundleThread.zig
src/bundler/Chunk.zig
src/bundler/DeferredBatchTask.zig
src/bundler/entry_points.zig
src/bundler/Graph.zig
src/bundler/linker_context/computeChunks.zig
src/bundler/linker_context/computeCrossChunkDependencies.zig
src/bundler/linker_context/convertStmtsForChunk.zig
src/bundler/linker_context/convertStmtsForChunkForDevServer.zig
src/bundler/linker_context/doStep5.zig
src/bundler/linker_context/findAllImportedPartsInJSOrder.zig
src/bundler/linker_context/findImportedCSSFilesInJSOrder.zig
src/bundler/linker_context/findImportedFilesInCSSOrder.zig
src/bundler/linker_context/generateChunksInParallel.zig
src/bundler/linker_context/generateCodeForFileInChunkJS.zig
src/bundler/linker_context/generateCodeForLazyExport.zig
src/bundler/linker_context/generateCompileResultForCssChunk.zig
src/bundler/linker_context/generateCompileResultForHtmlChunk.zig
src/bundler/linker_context/generateCompileResultForJSChunk.zig
src/bundler/linker_context/postProcessCSSChunk.zig
src/bundler/linker_context/postProcessHTMLChunk.zig
src/bundler/linker_context/postProcessJSChunk.zig
src/bundler/linker_context/prepareCssAstsForChunk.zig
src/bundler/linker_context/renameSymbolsInChunk.zig
src/bundler/linker_context/scanImportsAndExports.zig
src/bundler/linker_context/writeOutputFilesToDisk.zig
src/bundler/LinkerContext.zig
src/bundler/LinkerGraph.zig
src/bundler/ParseTask.zig
src/bundler/ServerComponentParseTask.zig
src/bundler/ThreadPool.zig
src/bunfig.zig
src/cache.zig
src/ci_info.zig

View File

@@ -24,7 +24,7 @@
},
"scripts": {
"build": "bun run build:debug",
"watch": "zig build check --watch -fincremental --prominent-compile-errors --global-cache-dir build/debug/zig-check-cache --zig-lib-dir vendor/zig/lib",
"watch": "zig build check --watch -fincremental --prominent-compile-errors --global-cache-dir build/debug/zig-check-cache --zig-lib-dir vendor/zig/lib -Doverride-no-export-cpp-apis=true",
"watch-windows": "zig build check-windows --watch -fincremental --prominent-compile-errors --global-cache-dir build/debug/zig-check-cache --zig-lib-dir vendor/zig/lib",
"bd:v": "(bun run --silent build:debug &> /tmp/bun.debug.build.log || (cat /tmp/bun.debug.build.log && rm -rf /tmp/bun.debug.build.log && exit 1)) && rm -f /tmp/bun.debug.build.log && ./build/debug/bun-debug",
"bd": "BUN_DEBUG_QUIET_LOGS=1 bun bd:v",

View File

@@ -1820,6 +1820,8 @@ pub const StringMap = struct {
pub const DotEnv = @import("./env_loader.zig");
pub const bundle_v2 = @import("./bundler/bundle_v2.zig");
pub const js_ast = bun.bundle_v2.js_ast;
pub const Loader = bundle_v2.Loader;
pub const BundleV2 = bundle_v2.BundleV2;
pub const ParseTask = bundle_v2.ParseTask;

371
src/bundler/AstBuilder.zig Normal file
View File

@@ -0,0 +1,371 @@
/// Utility to construct `Ast`s intended for generated code, such as the
/// boundary modules when dealing with server components. This is a saner
/// alternative to building a string, then sending it through `js_parser`
///
/// For in-depth details on the fields, most of these are documented
/// inside of `js_parser`
pub const AstBuilder = struct {
allocator: std.mem.Allocator,
source: *const Logger.Source,
source_index: u31,
stmts: std.ArrayListUnmanaged(Stmt),
scopes: std.ArrayListUnmanaged(*Scope),
symbols: std.ArrayListUnmanaged(Symbol),
import_records: std.ArrayListUnmanaged(ImportRecord),
named_imports: js_ast.Ast.NamedImports,
named_exports: js_ast.Ast.NamedExports,
import_records_for_current_part: std.ArrayListUnmanaged(u32),
export_star_import_records: std.ArrayListUnmanaged(u32),
current_scope: *Scope,
log: Logger.Log,
module_ref: Ref,
declared_symbols: js_ast.DeclaredSymbol.List,
/// When set, codegen is altered
hot_reloading: bool,
hmr_api_ref: Ref,
// stub fields for ImportScanner duck typing
comptime options: js_parser.Parser.Options = .{
.jsx = .{},
.bundle = true,
},
comptime import_items_for_namespace: struct {
pub fn get(_: @This(), _: Ref) ?js_parser.ImportItemForNamespaceMap {
return null;
}
} = .{},
pub const parser_features = struct {
pub const typescript = false;
};
pub fn init(allocator: std.mem.Allocator, source: *const Logger.Source, hot_reloading: bool) !AstBuilder {
const scope = try allocator.create(Scope);
scope.* = .{
.kind = .entry,
.label_ref = null,
.parent = null,
.generated = .{},
};
var ab: AstBuilder = .{
.allocator = allocator,
.current_scope = scope,
.source = source,
.source_index = @intCast(source.index.get()),
.stmts = .{},
.scopes = .{},
.symbols = .{},
.import_records = .{},
.import_records_for_current_part = .{},
.named_imports = .{},
.named_exports = .{},
.log = Logger.Log.init(allocator),
.export_star_import_records = .{},
.declared_symbols = .{},
.hot_reloading = hot_reloading,
.module_ref = undefined,
.hmr_api_ref = undefined,
};
ab.module_ref = try ab.newSymbol(.other, "module");
ab.hmr_api_ref = try ab.newSymbol(.other, "hmr");
return ab;
}
pub fn pushScope(p: *AstBuilder, kind: Scope.Kind) *js_ast.Scope {
try p.scopes.ensureUnusedCapacity(p.allocator, 1);
try p.current_scope.children.ensureUnusedCapacity(p.allocator, 1);
const scope = try p.allocator.create(Scope);
scope.* = .{
.kind = kind,
.label_ref = null,
.parent = p.current_scope,
.generated = .{},
};
p.current_scope.children.appendAssumeCapacity(scope);
p.scopes.appendAssumeCapacity(p.current_scope);
p.current_scope = scope;
return scope;
}
pub fn popScope(p: *AstBuilder) void {
p.current_scope = p.scopes.pop();
}
pub fn newSymbol(p: *AstBuilder, kind: Symbol.Kind, identifier: []const u8) !Ref {
const inner_index: Ref.Int = @intCast(p.symbols.items.len);
try p.symbols.append(p.allocator, .{
.kind = kind,
.original_name = identifier,
});
const ref: Ref = .{
.inner_index = inner_index,
.source_index = p.source_index,
.tag = .symbol,
};
try p.current_scope.generated.push(p.allocator, ref);
try p.declared_symbols.append(p.allocator, .{
.ref = ref,
.is_top_level = p.scopes.items.len == 0 or p.current_scope == p.scopes.items[0],
});
return ref;
}
pub fn getSymbol(p: *AstBuilder, ref: Ref) *Symbol {
bun.assert(ref.source_index == p.source.index.get());
return &p.symbols.items[ref.inner_index];
}
pub fn addImportRecord(p: *AstBuilder, path: []const u8, kind: ImportKind) !u32 {
const index = p.import_records.items.len;
try p.import_records.append(p.allocator, .{
.path = bun.fs.Path.init(path),
.kind = kind,
.range = .{},
});
return @intCast(index);
}
pub fn addImportStmt(
p: *AstBuilder,
path: []const u8,
identifiers_to_import: anytype,
) ![identifiers_to_import.len]Expr {
var out: [identifiers_to_import.len]Expr = undefined;
const record = try p.addImportRecord(path, .stmt);
var path_name = bun.fs.PathName.init(path);
const name = try strings.append(p.allocator, "import_", try path_name.nonUniqueNameString(p.allocator));
const namespace_ref = try p.newSymbol(.other, name);
const clauses = try p.allocator.alloc(js_ast.ClauseItem, identifiers_to_import.len);
inline for (identifiers_to_import, &out, clauses) |import_id_untyped, *out_ref, *clause| {
const import_id: []const u8 = import_id_untyped; // must be given '[N][]const u8'
const ref = try p.newSymbol(.import, import_id);
if (p.hot_reloading) {
p.getSymbol(ref).namespace_alias = .{
.namespace_ref = namespace_ref,
.alias = import_id,
.import_record_index = record,
};
}
out_ref.* = p.newExpr(E.ImportIdentifier{ .ref = ref });
clause.* = .{
.name = .{ .loc = Logger.Loc.Empty, .ref = ref },
.original_name = import_id,
.alias = import_id,
};
}
try p.appendStmt(S.Import{
.namespace_ref = namespace_ref,
.import_record_index = record,
.items = clauses,
.is_single_line = identifiers_to_import.len < 1,
});
return out;
}
pub fn appendStmt(p: *AstBuilder, data: anytype) !void {
try p.stmts.ensureUnusedCapacity(p.allocator, 1);
p.stmts.appendAssumeCapacity(p.newStmt(data));
}
pub fn newStmt(p: *AstBuilder, data: anytype) Stmt {
_ = p;
return Stmt.alloc(@TypeOf(data), data, Logger.Loc.Empty);
}
pub fn newExpr(p: *AstBuilder, data: anytype) Expr {
_ = p;
return Expr.init(@TypeOf(data), data, Logger.Loc.Empty);
}
pub fn newExternalSymbol(p: *AstBuilder, name: []const u8) !Ref {
const ref = try p.newSymbol(.other, name);
const sym = p.getSymbol(ref);
sym.must_not_be_renamed = true;
return ref;
}
pub fn toBundledAst(p: *AstBuilder, target: options.Target) !js_ast.BundledAst {
// TODO: missing import scanner
bun.assert(p.scopes.items.len == 0);
const module_scope = p.current_scope;
var parts = try Part.List.initCapacity(p.allocator, 2);
parts.len = 2;
parts.mut(0).* = .{};
parts.mut(1).* = .{
.stmts = p.stmts.items,
.can_be_removed_if_unused = false,
// pretend that every symbol was used
.symbol_uses = uses: {
var map: Part.SymbolUseMap = .{};
try map.ensureTotalCapacity(p.allocator, p.symbols.items.len);
for (0..p.symbols.items.len) |i| {
map.putAssumeCapacity(Ref{
.tag = .symbol,
.source_index = p.source_index,
.inner_index = @intCast(i),
}, .{ .count_estimate = 1 });
}
break :uses map;
},
};
const single_u32 = try BabyList(u32).fromSlice(p.allocator, &.{1});
var top_level_symbols_to_parts = js_ast.Ast.TopLevelSymbolToParts{};
try top_level_symbols_to_parts.entries.setCapacity(p.allocator, module_scope.generated.len);
top_level_symbols_to_parts.entries.len = module_scope.generated.len;
const slice = top_level_symbols_to_parts.entries.slice();
for (
slice.items(.key),
slice.items(.value),
module_scope.generated.slice(),
) |*k, *v, ref| {
k.* = ref;
v.* = single_u32;
}
try top_level_symbols_to_parts.reIndex(p.allocator);
// For more details on this section, look at js_parser.toAST
// This is mimicking how it calls ImportScanner
if (p.hot_reloading) {
var hmr_transform_ctx = js_parser.ConvertESMExportsForHmr{
.last_part = parts.last() orelse
unreachable, // was definitely allocated
.is_in_node_modules = p.source.path.isNodeModule(),
};
try hmr_transform_ctx.stmts.ensureTotalCapacity(p.allocator, prealloc_count: {
// get a estimate on how many statements there are going to be
const count = p.stmts.items.len;
break :prealloc_count count + 2;
});
_ = try js_parser.ImportScanner.scan(AstBuilder, p, p.stmts.items, false, true, &hmr_transform_ctx);
try hmr_transform_ctx.finalize(p, parts.slice());
const new_parts = parts.slice();
// preserve original capacity
parts.len = @intCast(new_parts.len);
bun.assert(new_parts.ptr == parts.ptr);
} else {
const result = try js_parser.ImportScanner.scan(AstBuilder, p, p.stmts.items, false, false, {});
parts.mut(1).stmts = result.stmts;
}
parts.mut(1).declared_symbols = p.declared_symbols;
parts.mut(1).scopes = p.scopes.items;
parts.mut(1).import_record_indices = BabyList(u32).fromList(p.import_records_for_current_part);
return .{
.parts = parts,
.module_scope = module_scope.*,
.symbols = js_ast.Symbol.List.fromList(p.symbols),
.exports_ref = Ref.None,
.wrapper_ref = Ref.None,
.module_ref = p.module_ref,
.import_records = ImportRecord.List.fromList(p.import_records),
.export_star_import_records = &.{},
.approximate_newline_count = 1,
.exports_kind = .esm,
.named_imports = p.named_imports,
.named_exports = p.named_exports,
.top_level_symbols_to_parts = top_level_symbols_to_parts,
.char_freq = .{},
.flags = .{},
.target = target,
.top_level_await_keyword = Logger.Range.None,
// .nested_scope_slot_counts = if (p.options.features.minify_identifiers)
// renamer.assignNestedScopeSlots(p.allocator, p.scopes.items[0], p.symbols.items)
// else
// js_ast.SlotCounts{},
};
}
// stub methods for ImportScanner duck typing
pub fn generateTempRef(ab: *AstBuilder, name: ?[]const u8) Ref {
return ab.newSymbol(.other, name orelse "temp") catch bun.outOfMemory();
}
pub fn recordExport(p: *AstBuilder, _: Logger.Loc, alias: []const u8, ref: Ref) !void {
if (p.named_exports.get(alias)) |_| {
// Duplicate exports are an error
Output.panic(
"In generated file, duplicate export \"{s}\"",
.{alias},
);
} else {
try p.named_exports.put(p.allocator, alias, .{ .alias_loc = Logger.Loc.Empty, .ref = ref });
}
}
pub fn recordExportedBinding(p: *AstBuilder, binding: Binding) void {
switch (binding.data) {
.b_missing => {},
.b_identifier => |ident| {
p.recordExport(binding.loc, p.symbols.items[ident.ref.innerIndex()].original_name, ident.ref) catch unreachable;
},
.b_array => |array| {
for (array.items) |prop| {
p.recordExportedBinding(prop.binding);
}
},
.b_object => |obj| {
for (obj.properties) |prop| {
p.recordExportedBinding(prop.value);
}
},
}
}
pub fn ignoreUsage(p: *AstBuilder, ref: Ref) void {
_ = p;
_ = ref;
}
pub fn panic(p: *AstBuilder, comptime fmt: []const u8, args: anytype) noreturn {
_ = p;
Output.panic(fmt, args);
}
pub fn @"module.exports"(p: *AstBuilder, loc: Logger.Loc) Expr {
return p.newExpr(E.Dot{ .name = "exports", .name_loc = loc, .target = p.newExpr(E.Identifier{ .ref = p.module_ref }) });
}
};
const bun = @import("bun");
const string = bun.string;
const Output = bun.Output;
const strings = bun.strings;
const std = @import("std");
const Logger = @import("../logger.zig");
const options = @import("../options.zig");
const js_parser = bun.js_parser;
const Part = js_ast.Part;
const js_ast = @import("../js_ast.zig");
pub const Ref = @import("../ast/base.zig").Ref;
const BabyList = @import("../baby_list.zig").BabyList;
const ImportRecord = bun.ImportRecord;
const ImportKind = bun.ImportKind;
pub const Index = @import("../ast/base.zig").Index;
const Symbol = js_ast.Symbol;
const Stmt = js_ast.Stmt;
const Expr = js_ast.Expr;
const E = js_ast.E;
const S = js_ast.S;
const Binding = js_ast.Binding;
const renamer = bun.renamer;
const Scope = js_ast.Scope;
const Loc = Logger.Loc;
pub const DeferredBatchTask = bun.bundle_v2.DeferredBatchTask;
pub const ThreadPool = bun.bundle_v2.ThreadPool;
pub const ParseTask = bun.bundle_v2.ParseTask;

View File

@@ -0,0 +1,190 @@
/// Used to keep the bundle thread from spinning on Windows
pub fn timerCallback(_: *bun.windows.libuv.Timer) callconv(.C) void {}
/// Originally, bake.DevServer required a separate bundling thread, but that was
/// later removed. The bundling thread's scheduling logic is generalized over
/// the completion structure.
///
/// CompletionStruct's interface:
///
/// - `configureBundler` is used to configure `Bundler`.
/// - `completeOnBundleThread` is used to tell the task that it is done.
pub fn BundleThread(CompletionStruct: type) type {
return struct {
const Self = @This();
waker: bun.Async.Waker,
ready_event: std.Thread.ResetEvent,
queue: bun.UnboundedQueue(CompletionStruct, .next),
generation: bun.Generation = 0,
/// To initialize, put this somewhere in memory, and then call `spawn()`
pub const uninitialized: Self = .{
.waker = undefined,
.queue = .{},
.generation = 0,
.ready_event = .{},
};
pub fn spawn(instance: *Self) !std.Thread {
const thread = try std.Thread.spawn(.{}, threadMain, .{instance});
instance.ready_event.wait();
return thread;
}
/// Lazily-initialized singleton. This is used for `Bun.build` since the
/// bundle thread may not be needed.
pub const singleton = struct {
var once = std.once(loadOnceImpl);
var instance: ?*Self = null;
// Blocks the calling thread until the bun build thread is created.
// std.once also blocks other callers of this function until the first caller is done.
fn loadOnceImpl() void {
const bundle_thread = bun.default_allocator.create(Self) catch bun.outOfMemory();
bundle_thread.* = uninitialized;
instance = bundle_thread;
// 2. Spawn the bun build thread.
const os_thread = bundle_thread.spawn() catch
Output.panic("Failed to spawn bun build thread", .{});
os_thread.detach();
}
pub fn get() *Self {
once.call();
return instance.?;
}
pub fn enqueue(completion: *CompletionStruct) void {
get().enqueue(completion);
}
};
pub fn enqueue(instance: *Self, completion: *CompletionStruct) void {
instance.queue.push(completion);
instance.waker.wake();
}
fn threadMain(instance: *Self) void {
Output.Source.configureNamedThread("Bundler");
instance.waker = bun.Async.Waker.init() catch @panic("Failed to create waker");
// Unblock the calling thread so it can continue.
instance.ready_event.set();
var timer: bun.windows.libuv.Timer = undefined;
if (bun.Environment.isWindows) {
timer.init(instance.waker.loop.uv_loop);
timer.start(std.math.maxInt(u64), std.math.maxInt(u64), &timerCallback);
}
var has_bundled = false;
while (true) {
while (instance.queue.pop()) |completion| {
generateInNewThread(completion, instance.generation) catch |err| {
completion.result = .{ .err = err };
completion.completeOnBundleThread();
};
has_bundled = true;
}
instance.generation +|= 1;
if (has_bundled) {
bun.Mimalloc.mi_collect(false);
has_bundled = false;
}
_ = instance.waker.wait();
}
}
/// This is called from `Bun.build` in JavaScript.
fn generateInNewThread(completion: *CompletionStruct, generation: bun.Generation) !void {
var heap = try ThreadlocalArena.init();
defer heap.deinit();
const allocator = heap.allocator();
var ast_memory_allocator = try allocator.create(js_ast.ASTMemoryAllocator);
ast_memory_allocator.* = .{ .allocator = allocator };
ast_memory_allocator.reset();
ast_memory_allocator.push();
const transpiler = try allocator.create(bun.Transpiler);
try completion.configureBundler(transpiler, allocator);
transpiler.resolver.generation = generation;
const this = try BundleV2.init(
transpiler,
null, // TODO: Kit
allocator,
JSC.AnyEventLoop.init(allocator),
false,
JSC.WorkPool.get(),
heap,
);
this.plugins = completion.plugins;
this.completion = switch (CompletionStruct) {
BundleV2.JSBundleCompletionTask => completion,
else => @compileError("Unknown completion struct: " ++ CompletionStruct),
};
completion.transpiler = this;
defer {
this.graph.pool.reset();
ast_memory_allocator.pop();
this.deinitWithoutFreeingArena();
}
errdefer {
// Wait for wait groups to finish. There still may be ongoing work.
this.linker.source_maps.line_offset_wait_group.wait();
this.linker.source_maps.quoted_contents_wait_group.wait();
var out_log = Logger.Log.init(bun.default_allocator);
this.transpiler.log.appendToWithRecycled(&out_log, true) catch bun.outOfMemory();
completion.log = out_log;
}
completion.result = .{ .value = .{
.output_files = try this.runFromJSInNewThread(transpiler.options.entry_points),
} };
var out_log = Logger.Log.init(bun.default_allocator);
this.transpiler.log.appendToWithRecycled(&out_log, true) catch bun.outOfMemory();
completion.log = out_log;
completion.completeOnBundleThread();
}
};
}
const Transpiler = bun.Transpiler;
const bun = @import("bun");
const Output = bun.Output;
const Environment = bun.Environment;
const default_allocator = bun.default_allocator;
const std = @import("std");
const Logger = @import("../logger.zig");
const options = @import("../options.zig");
const js_ast = @import("../js_ast.zig");
const linker = @import("../linker.zig");
pub const Ref = @import("../ast/base.zig").Ref;
const ThreadlocalArena = @import("../allocators/mimalloc_arena.zig").Arena;
const allocators = @import("../allocators.zig");
const Timer = @import("../system_timer.zig");
pub const Index = @import("../ast/base.zig").Index;
const JSC = bun.JSC;
const Async = bun.Async;
const bake = bun.bake;
const bundler = bun.bundle_v2;
const BundleV2 = bundler.BundleV2;
pub const DeferredBatchTask = bun.bundle_v2.DeferredBatchTask;
pub const ThreadPool = bun.bundle_v2.ThreadPool;
pub const ParseTask = bun.bundle_v2.ParseTask;

620
src/bundler/Chunk.zig Normal file
View File

@@ -0,0 +1,620 @@
pub const ChunkImport = struct {
chunk_index: u32,
import_kind: ImportKind,
};
pub const Chunk = struct {
/// This is a random string and is used to represent the output path of this
/// chunk before the final output path has been computed. See OutputPiece
/// for more info on this technique.
unique_key: string = "",
files_with_parts_in_chunk: std.AutoArrayHashMapUnmanaged(Index.Int, void) = .{},
/// We must not keep pointers to this type until all chunks have been allocated.
entry_bits: AutoBitSet = undefined,
final_rel_path: string = "",
/// The path template used to generate `final_rel_path`
template: PathTemplate = .{},
/// For code splitting
cross_chunk_imports: BabyList(ChunkImport) = .{},
content: Content,
entry_point: Chunk.EntryPoint = .{},
is_executable: bool = false,
has_html_chunk: bool = false,
output_source_map: sourcemap.SourceMapPieces,
intermediate_output: IntermediateOutput = .{ .empty = {} },
isolated_hash: u64 = std.math.maxInt(u64),
renamer: renamer.Renamer = undefined,
compile_results_for_chunk: []CompileResult = &.{},
pub inline fn isEntryPoint(this: *const Chunk) bool {
return this.entry_point.is_entry_point;
}
pub fn getJSChunkForHTML(this: *const Chunk, chunks: []Chunk) ?*Chunk {
const entry_point_id = this.entry_point.entry_point_id;
for (chunks) |*other| {
if (other.content == .javascript) {
if (other.entry_point.entry_point_id == entry_point_id) {
return other;
}
}
}
return null;
}
pub fn getCSSChunkForHTML(this: *const Chunk, chunks: []Chunk) ?*Chunk {
const entry_point_id = this.entry_point.entry_point_id;
for (chunks) |*other| {
if (other.content == .css) {
if (other.entry_point.entry_point_id == entry_point_id) {
return other;
}
}
}
return null;
}
pub inline fn entryBits(this: *const Chunk) *const AutoBitSet {
return &this.entry_bits;
}
pub const Order = struct {
source_index: Index.Int = 0,
distance: u32 = 0,
tie_breaker: u32 = 0,
pub fn lessThan(_: @This(), a: Order, b: Order) bool {
return (a.distance < b.distance) or
(a.distance == b.distance and a.tie_breaker < b.tie_breaker);
}
/// Sort so files closest to an entry point come first. If two files are
/// equidistant to an entry point, then break the tie by sorting on the
/// stable source index derived from the DFS over all entry points.
pub fn sort(a: []Order) void {
std.sort.pdq(Order, a, Order{}, lessThan);
}
};
/// TODO: rewrite this
/// This implementation is just slow.
/// Can we make the JSPrinter itself track this without increasing
/// complexity a lot?
pub const IntermediateOutput = union(enum) {
/// If the chunk has references to other chunks, then "pieces" contains
/// the contents of the chunk. Another joiner will have to be
/// constructed later when merging the pieces together.
///
/// See OutputPiece's documentation comment for more details.
pieces: bun.BabyList(OutputPiece),
/// If the chunk doesn't have any references to other chunks, then
/// `joiner` contains the contents of the chunk. This is more efficient
/// because it avoids doing a join operation twice.
joiner: StringJoiner,
empty: void,
pub fn allocatorForSize(size: usize) std.mem.Allocator {
if (size >= 512 * 1024)
return std.heap.page_allocator
else
return bun.default_allocator;
}
pub const CodeResult = struct {
buffer: []u8,
shifts: []sourcemap.SourceMapShifts,
};
pub fn code(
this: *IntermediateOutput,
allocator_to_use: ?std.mem.Allocator,
parse_graph: *const Graph,
linker_graph: *const LinkerGraph,
import_prefix: []const u8,
chunk: *Chunk,
chunks: []Chunk,
display_size: ?*usize,
enable_source_map_shifts: bool,
) !CodeResult {
return switch (enable_source_map_shifts) {
inline else => |source_map_shifts| this.codeWithSourceMapShifts(
allocator_to_use,
parse_graph,
linker_graph,
import_prefix,
chunk,
chunks,
display_size,
source_map_shifts,
),
};
}
pub fn codeWithSourceMapShifts(
this: *IntermediateOutput,
allocator_to_use: ?std.mem.Allocator,
graph: *const Graph,
linker_graph: *const LinkerGraph,
import_prefix: []const u8,
chunk: *Chunk,
chunks: []Chunk,
display_size: ?*usize,
comptime enable_source_map_shifts: bool,
) !CodeResult {
const additional_files = graph.input_files.items(.additional_files);
const unique_key_for_additional_files = graph.input_files.items(.unique_key_for_additional_file);
switch (this.*) {
.pieces => |*pieces| {
const entry_point_chunks_for_scb = linker_graph.files.items(.entry_point_chunk_index);
var shift = if (enable_source_map_shifts)
sourcemap.SourceMapShifts{
.after = .{},
.before = .{},
};
var shifts = if (enable_source_map_shifts)
try std.ArrayList(sourcemap.SourceMapShifts).initCapacity(bun.default_allocator, pieces.len + 1);
if (enable_source_map_shifts)
shifts.appendAssumeCapacity(shift);
var count: usize = 0;
var from_chunk_dir = std.fs.path.dirnamePosix(chunk.final_rel_path) orelse "";
if (strings.eqlComptime(from_chunk_dir, "."))
from_chunk_dir = "";
for (pieces.slice()) |piece| {
count += piece.data_len;
switch (piece.query.kind) {
.chunk, .asset, .scb => {
const index = piece.query.index;
const file_path = switch (piece.query.kind) {
.asset => brk: {
const files = additional_files[index];
if (!(files.len > 0)) {
Output.panic("Internal error: missing asset file", .{});
}
const output_file = files.last().?.output_file;
break :brk graph.additional_output_files.items[output_file].dest_path;
},
.chunk => chunks[index].final_rel_path,
.scb => chunks[entry_point_chunks_for_scb[index]].final_rel_path,
.none => unreachable,
};
const cheap_normalizer = cheapPrefixNormalizer(
import_prefix,
if (from_chunk_dir.len == 0)
file_path
else
bun.path.relativePlatform(from_chunk_dir, file_path, .posix, false),
);
count += cheap_normalizer[0].len + cheap_normalizer[1].len;
},
.none => {},
}
}
if (display_size) |amt| {
amt.* = count;
}
const debug_id_len = if (enable_source_map_shifts and FeatureFlags.source_map_debug_id)
std.fmt.count("\n//# debugId={}\n", .{bun.sourcemap.DebugIDFormatter{ .id = chunk.isolated_hash }})
else
0;
const total_buf = try (allocator_to_use orelse allocatorForSize(count)).alloc(u8, count + debug_id_len);
var remain = total_buf;
for (pieces.slice()) |piece| {
const data = piece.data();
if (enable_source_map_shifts) {
var data_offset = sourcemap.LineColumnOffset{};
data_offset.advance(data);
shift.before.add(data_offset);
shift.after.add(data_offset);
}
if (data.len > 0)
@memcpy(remain[0..data.len], data);
remain = remain[data.len..];
switch (piece.query.kind) {
.asset, .chunk, .scb => {
const index = piece.query.index;
const file_path = switch (piece.query.kind) {
.asset => brk: {
const files = additional_files[index];
bun.assert(files.len > 0);
const output_file = files.last().?.output_file;
if (enable_source_map_shifts) {
shift.before.advance(unique_key_for_additional_files[index]);
}
break :brk graph.additional_output_files.items[output_file].dest_path;
},
.chunk => brk: {
const piece_chunk = chunks[index];
if (enable_source_map_shifts) {
shift.before.advance(piece_chunk.unique_key);
}
break :brk piece_chunk.final_rel_path;
},
.scb => brk: {
const piece_chunk = chunks[entry_point_chunks_for_scb[index]];
if (enable_source_map_shifts) {
shift.before.advance(piece_chunk.unique_key);
}
break :brk piece_chunk.final_rel_path;
},
else => unreachable,
};
// normalize windows paths to '/'
bun.path.platformToPosixInPlace(u8, @constCast(file_path));
const cheap_normalizer = cheapPrefixNormalizer(
import_prefix,
if (from_chunk_dir.len == 0)
file_path
else
bun.path.relativePlatform(from_chunk_dir, file_path, .posix, false),
);
if (cheap_normalizer[0].len > 0) {
@memcpy(remain[0..cheap_normalizer[0].len], cheap_normalizer[0]);
remain = remain[cheap_normalizer[0].len..];
if (enable_source_map_shifts)
shift.after.advance(cheap_normalizer[0]);
}
if (cheap_normalizer[1].len > 0) {
@memcpy(remain[0..cheap_normalizer[1].len], cheap_normalizer[1]);
remain = remain[cheap_normalizer[1].len..];
if (enable_source_map_shifts)
shift.after.advance(cheap_normalizer[1]);
}
if (enable_source_map_shifts)
shifts.appendAssumeCapacity(shift);
},
.none => {},
}
}
if (enable_source_map_shifts and FeatureFlags.source_map_debug_id) {
// This comment must go before the //# sourceMappingURL comment
remain = remain[(std.fmt.bufPrint(
remain,
"\n//# debugId={}\n",
.{bun.sourcemap.DebugIDFormatter{ .id = chunk.isolated_hash }},
) catch bun.outOfMemory()).len..];
}
bun.assert(remain.len == 0);
bun.assert(total_buf.len == count + debug_id_len);
return .{
.buffer = total_buf,
.shifts = if (enable_source_map_shifts)
shifts.items
else
&[_]sourcemap.SourceMapShifts{},
};
},
.joiner => |*joiner| {
const allocator = allocator_to_use orelse allocatorForSize(joiner.len);
if (display_size) |amt| {
amt.* = joiner.len;
}
const buffer = brk: {
if (enable_source_map_shifts and FeatureFlags.source_map_debug_id) {
// This comment must go before the //# sourceMappingURL comment
const debug_id_fmt = std.fmt.allocPrint(
graph.allocator,
"\n//# debugId={}\n",
.{bun.sourcemap.DebugIDFormatter{ .id = chunk.isolated_hash }},
) catch bun.outOfMemory();
break :brk try joiner.doneWithEnd(allocator, debug_id_fmt);
}
break :brk try joiner.done(allocator);
};
return .{
.buffer = buffer,
.shifts = &[_]sourcemap.SourceMapShifts{},
};
},
.empty => return .{
.buffer = "",
.shifts = &[_]sourcemap.SourceMapShifts{},
},
}
}
};
/// An issue with asset files and server component boundaries is they
/// contain references to output paths, but those paths are not known until
/// very late in the bundle. The solution is to have a magic word in the
/// bundle text (BundleV2.unique_key, a random u64; impossible to guess).
/// When a file wants a path to an emitted chunk, it emits the unique key
/// in hex followed by the kind of path it wants:
///
/// `74f92237f4a85a6aA00000009` --> `./some-asset.png`
/// ^--------------^|^------- .query.index
/// unique_key .query.kind
///
/// An output piece is the concatenation of source code text and an output
/// path, in that order. An array of pieces makes up an entire file.
pub const OutputPiece = struct {
/// Pointer and length split to reduce struct size
data_ptr: [*]const u8,
data_len: u32,
query: Query,
pub fn data(this: OutputPiece) []const u8 {
return this.data_ptr[0..this.data_len];
}
pub const Query = packed struct(u32) {
index: u30,
kind: Kind,
pub const Kind = enum(u2) {
/// The last piece in an array uses this to indicate it is just data
none,
/// Given a source index, print the asset's output
asset,
/// Given a chunk index, print the chunk's output path
chunk,
/// Given a server component boundary index, print the chunk's output path
scb,
};
pub const none: Query = .{ .index = 0, .kind = .none };
};
pub fn init(data_slice: []const u8, query: Query) OutputPiece {
return .{
.data_ptr = data_slice.ptr,
.data_len = @intCast(data_slice.len),
.query = query,
};
}
};
pub const OutputPieceIndex = OutputPiece.Query;
pub const EntryPoint = packed struct(u64) {
/// Index into `Graph.input_files`
source_index: u32 = 0,
entry_point_id: ID = 0,
is_entry_point: bool = false,
is_html: bool = false,
/// so `EntryPoint` can be a u64
pub const ID = u30;
};
pub const JavaScriptChunk = struct {
files_in_chunk_order: []const Index.Int = &.{},
parts_in_chunk_in_order: []const PartRange = &.{},
// for code splitting
exports_to_other_chunks: std.ArrayHashMapUnmanaged(Ref, string, Ref.ArrayHashCtx, false) = .{},
imports_from_other_chunks: ImportsFromOtherChunks = .{},
cross_chunk_prefix_stmts: BabyList(Stmt) = .{},
cross_chunk_suffix_stmts: BabyList(Stmt) = .{},
/// Indexes to CSS chunks. Currently this will only ever be zero or one
/// items long, but smarter css chunking will allow multiple js entry points
/// share a css file, or have an entry point contain multiple css files.
///
/// Mutated while sorting chunks in `computeChunks`
css_chunks: []u32 = &.{},
};
pub const CssChunk = struct {
imports_in_chunk_in_order: BabyList(CssImportOrder),
/// When creating a chunk, this is to be an uninitialized slice with
/// length of `imports_in_chunk_in_order`
///
/// Multiple imports may refer to the same file/stylesheet, but may need to
/// wrap them in conditions (e.g. a layer).
///
/// When we go through the `prepareCssAstsForChunk()` step, each import will
/// create a shallow copy of the file's AST (just dereferencing the pointer).
asts: []bun.css.BundlerStyleSheet,
};
const CssImportKind = enum {
source_index,
external_path,
import_layers,
};
pub const CssImportOrder = struct {
conditions: BabyList(bun.css.ImportConditions) = .{},
condition_import_records: BabyList(ImportRecord) = .{},
kind: union(enum) {
/// Represents earlier imports that have been made redundant by later ones (see `isConditionalImportRedundant`)
/// We don't want to redundantly print the rules of these redundant imports
/// BUT, the imports may include layers.
/// We'll just print layer name declarations so that the original ordering is preserved.
layers: Layers,
external_path: bun.fs.Path,
source_index: Index,
},
pub const Layers = bun.ptr.Cow(bun.BabyList(bun.css.LayerName), struct {
const Self = bun.BabyList(bun.css.LayerName);
pub fn copy(self: *const Self, allocator: std.mem.Allocator) Self {
return self.deepClone2(allocator);
}
pub fn deinit(self: *Self, a: std.mem.Allocator) void {
// do shallow deinit since `LayerName` has
// allocations in arena
self.deinitWithAllocator(a);
}
});
pub fn hash(this: *const CssImportOrder, hasher: anytype) void {
// TODO: conditions, condition_import_records
bun.writeAnyToHasher(hasher, std.meta.activeTag(this.kind));
switch (this.kind) {
.layers => |layers| {
for (layers.inner().sliceConst()) |layer| {
for (layer.v.slice(), 0..) |layer_name, i| {
const is_last = i == layers.inner().len - 1;
if (is_last) {
hasher.update(layer_name);
} else {
hasher.update(layer_name);
hasher.update(".");
}
}
}
hasher.update("\x00");
},
.external_path => |path| hasher.update(path.text),
.source_index => |idx| bun.writeAnyToHasher(hasher, idx),
}
}
pub fn fmt(this: *const CssImportOrder, ctx: *LinkerContext) CssImportOrderDebug {
return .{
.inner = this,
.ctx = ctx,
};
}
pub const CssImportOrderDebug = struct {
inner: *const CssImportOrder,
ctx: *LinkerContext,
pub fn format(this: *const CssImportOrderDebug, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) !void {
try writer.print("{s} = ", .{@tagName(this.inner.kind)});
switch (this.inner.kind) {
.layers => |layers| {
try writer.print("[", .{});
const l = layers.inner();
for (l.sliceConst(), 0..) |*layer, i| {
if (i > 0) try writer.print(", ", .{});
try writer.print("\"{}\"", .{layer});
}
try writer.print("]", .{});
},
.external_path => |path| {
try writer.print("\"{s}\"", .{path.pretty});
},
.source_index => |source_index| {
const source = this.ctx.parse_graph.input_files.items(.source)[source_index.get()];
try writer.print("{d} ({s})", .{ source_index.get(), source.path.text });
},
}
}
};
};
pub const ImportsFromOtherChunks = std.AutoArrayHashMapUnmanaged(Index.Int, CrossChunkImport.Item.List);
pub const Content = union(enum) {
javascript: JavaScriptChunk,
css: CssChunk,
html,
pub fn sourcemap(this: *const Content, default: options.SourceMapOption) options.SourceMapOption {
return switch (this.*) {
.javascript => default,
.css => .none, // TODO: css source maps
.html => .none,
};
}
pub fn loader(this: *const Content) Loader {
return switch (this.*) {
.javascript => .js,
.css => .css,
.html => .html,
};
}
pub fn ext(this: *const Content) string {
return switch (this.*) {
.javascript => "js",
.css => "css",
.html => "html",
};
}
};
};
const bun = @import("bun");
const string = bun.string;
const Output = bun.Output;
const strings = bun.strings;
const default_allocator = bun.default_allocator;
const FeatureFlags = bun.FeatureFlags;
const std = @import("std");
const options = @import("../options.zig");
const js_ast = @import("../js_ast.zig");
const sourcemap = bun.sourcemap;
const StringJoiner = bun.StringJoiner;
pub const Ref = @import("../ast/base.zig").Ref;
const BabyList = @import("../baby_list.zig").BabyList;
const ImportRecord = bun.ImportRecord;
const ImportKind = bun.ImportKind;
const Loader = options.Loader;
pub const Index = @import("../ast/base.zig").Index;
const Stmt = js_ast.Stmt;
const AutoBitSet = bun.bit_set.AutoBitSet;
const renamer = bun.renamer;
const bundler = bun.bundle_v2;
const BundleV2 = bundler.BundleV2;
const Graph = bundler.Graph;
const LinkerGraph = bundler.LinkerGraph;
pub const DeferredBatchTask = bun.bundle_v2.DeferredBatchTask;
pub const ThreadPool = bun.bundle_v2.ThreadPool;
pub const ParseTask = bun.bundle_v2.ParseTask;
const PathTemplate = bundler.PathTemplate;
const PartRange = bundler.PartRange;
const EntryPoint = bundler.EntryPoint;
const CrossChunkImport = bundler.CrossChunkImport;
const CompileResult = bundler.CompileResult;
const cheapPrefixNormalizer = bundler.cheapPrefixNormalizer;
const LinkerContext = bundler.LinkerContext;

View File

@@ -0,0 +1,52 @@
/// This task is run once all parse and resolve tasks have been complete
/// and we have deferred onLoad plugins that we need to resume
///
/// It enqueues a task to be run on the JS thread which resolves the promise
/// for every onLoad callback which called `.defer()`.
pub const DeferredBatchTask = @This();
running: if (Environment.isDebug) bool else u0 = if (Environment.isDebug) false else 0,
pub fn init(this: *DeferredBatchTask) void {
if (comptime Environment.isDebug) bun.debugAssert(!this.running);
this.* = .{
.running = if (comptime Environment.isDebug) false else 0,
};
}
pub fn getBundleV2(this: *DeferredBatchTask) *bun.BundleV2 {
return @alignCast(@fieldParentPtr("drain_defer_task", this));
}
pub fn schedule(this: *DeferredBatchTask) void {
if (comptime Environment.isDebug) {
bun.assert(!this.running);
this.running = false;
}
this.getBundleV2().jsLoopForPlugins().enqueueTaskConcurrent(JSC.ConcurrentTask.create(JSC.Task.init(this)));
}
pub fn deinit(this: *DeferredBatchTask) void {
if (comptime Environment.isDebug) {
this.running = false;
}
}
pub fn runOnJSThread(this: *DeferredBatchTask) void {
defer this.deinit();
var bv2 = this.getBundleV2();
bv2.plugins.?.drainDeferred(
if (bv2.completion) |completion|
completion.result == .err
else
false,
);
}
const bun = @import("bun");
const Environment = bun.Environment;
pub const Ref = @import("../ast/base.zig").Ref;
pub const Index = @import("../ast/base.zig").Index;
const JSC = bun.JSC;

128
src/bundler/Graph.zig Normal file
View File

@@ -0,0 +1,128 @@
pub const Graph = @This();
pool: *ThreadPool,
heap: ThreadlocalArena = .{},
/// This allocator is thread-local to the Bundler thread
/// .allocator == .heap.allocator()
allocator: std.mem.Allocator = undefined,
/// Mapping user-specified entry points to their Source Index
entry_points: std.ArrayListUnmanaged(Index) = .{},
/// Every source index has an associated InputFile
input_files: MultiArrayList(InputFile) = .{},
/// Every source index has an associated Ast
/// When a parse is in progress / queued, it is `Ast.empty`
ast: MultiArrayList(JSAst) = .{},
/// During the scan + parse phase, this value keeps a count of the remaining
/// tasks. Once it hits zero, the scan phase ends and linking begins. Note
/// that if `deferred_pending > 0`, it means there are plugin callbacks
/// to invoke before linking, which can initiate another scan phase.
///
/// Increment and decrement this via `incrementScanCounter` and
/// `decrementScanCounter`, as asynchronous bundles check for `0` in the
/// decrement function, instead of at the top of the event loop.
///
/// - Parsing a file (ParseTask and ServerComponentParseTask)
/// - onResolve and onLoad functions
/// - Resolving an onDefer promise
pending_items: u32 = 0,
/// When an `onLoad` plugin calls `.defer()`, the count from `pending_items`
/// is "moved" into this counter (pending_items -= 1; deferred_pending += 1)
///
/// When `pending_items` hits zero and there are deferred pending tasks, those
/// tasks will be run, and the count is "moved" back to `pending_items`
deferred_pending: u32 = 0,
/// Maps a hashed path string to a source index, if it exists in the compilation.
/// Instead of accessing this directly, consider using BundleV2.pathToSourceIndexMap
path_to_source_index_map: PathToSourceIndexMap = .{},
/// When using server components, a completely separate file listing is
/// required to avoid incorrect inlining of defines and dependencies on
/// other files. This is relevant for files shared between server and client
/// and have no "use <side>" directive, and must be duplicated.
///
/// To make linking easier, this second graph contains indices into the
/// same `.ast` and `.input_files` arrays.
client_path_to_source_index_map: PathToSourceIndexMap = .{},
/// When using server components with React, there is an additional module
/// graph which is used to contain SSR-versions of all client components;
/// the SSR graph. The difference between the SSR graph and the server
/// graph is that this one does not apply '--conditions react-server'
///
/// In Bun's React Framework, it includes SSR versions of 'react' and
/// 'react-dom' (an export condition is used to provide a different
/// implementation for RSC, which is potentially how they implement
/// server-only features such as async components).
ssr_path_to_source_index_map: PathToSourceIndexMap = .{},
/// When Server Components is enabled, this holds a list of all boundary
/// files. This happens for all files with a "use <side>" directive.
server_component_boundaries: ServerComponentBoundary.List = .{},
estimated_file_loader_count: usize = 0,
/// For Bake, a count of the CSS asts is used to make precise
/// pre-allocations without re-iterating the file listing.
css_file_count: usize = 0,
additional_output_files: std.ArrayListUnmanaged(options.OutputFile) = .{},
kit_referenced_server_data: bool,
kit_referenced_client_data: bool,
pub const InputFile = struct {
source: Logger.Source,
loader: options.Loader = options.Loader.file,
side_effects: _resolver.SideEffects,
allocator: std.mem.Allocator = bun.default_allocator,
additional_files: BabyList(AdditionalFile) = .{},
unique_key_for_additional_file: string = "",
content_hash_for_additional_file: u64 = 0,
is_plugin_file: bool = false,
};
/// Schedule a task to be run on the JS thread which resolves the promise of
/// each `.defer()` called in an onLoad plugin.
///
/// Returns true if there were more tasks queued.
pub fn drainDeferredTasks(this: *@This(), transpiler: *BundleV2) bool {
transpiler.thread_lock.assertLocked();
if (this.deferred_pending > 0) {
this.pending_items += this.deferred_pending;
this.deferred_pending = 0;
transpiler.drain_defer_task.init();
transpiler.drain_defer_task.schedule();
return true;
}
return false;
}
const bun = @import("bun");
const string = bun.string;
const default_allocator = bun.default_allocator;
const std = @import("std");
const Logger = @import("../logger.zig");
const options = @import("../options.zig");
const js_ast = @import("../js_ast.zig");
pub const Ref = @import("../ast/base.zig").Ref;
const ThreadlocalArena = @import("../allocators/mimalloc_arena.zig").Arena;
const BabyList = @import("../baby_list.zig").BabyList;
const _resolver = @import("../resolver/resolver.zig");
const allocators = @import("../allocators.zig");
const JSAst = js_ast.BundledAst;
const Loader = options.Loader;
pub const Index = @import("../ast/base.zig").Index;
const MultiArrayList = bun.MultiArrayList;
const ThreadPool = bun.bundle_v2.ThreadPool;
const ParseTask = bun.bundle_v2.ParseTask;
const PathToSourceIndexMap = bun.bundle_v2.PathToSourceIndexMap;
const ServerComponentBoundary = js_ast.ServerComponentBoundary;
const BundleV2 = bun.bundle_v2.BundleV2;
const AdditionalFile = bun.bundle_v2.AdditionalFile;

File diff suppressed because it is too large Load Diff

467
src/bundler/LinkerGraph.zig Normal file
View File

@@ -0,0 +1,467 @@
pub const LinkerGraph = @This();
const debug = Output.scoped(.LinkerGraph, false);
files: File.List = .{},
files_live: BitSet = undefined,
entry_points: EntryPoint.List = .{},
symbols: js_ast.Symbol.Map = .{},
allocator: std.mem.Allocator,
code_splitting: bool = false,
// This is an alias from Graph
// it is not a clone!
ast: MultiArrayList(JSAst) = .{},
meta: MultiArrayList(JSMeta) = .{},
/// We should avoid traversing all files in the bundle, because the linker
/// should be able to run a linking operation on a large bundle where only
/// a few files are needed (e.g. an incremental compilation scenario). This
/// holds all files that could possibly be reached through the entry points.
/// If you need to iterate over all files in the linking operation, iterate
/// over this array. This array is also sorted in a deterministic ordering
/// to help ensure deterministic builds (source indices are random).
reachable_files: []Index = &[_]Index{},
/// Index from `.parse_graph.input_files` to index in `.files`
stable_source_indices: []const u32 = &[_]u32{},
is_scb_bitset: BitSet = .{},
has_client_components: bool = false,
has_server_components: bool = false,
/// This is for cross-module inlining of detected inlinable constants
// const_values: js_ast.Ast.ConstValuesMap = .{},
/// This is for cross-module inlining of TypeScript enum constants
ts_enums: js_ast.Ast.TsEnumsMap = .{},
pub fn init(allocator: std.mem.Allocator, file_count: usize) !LinkerGraph {
return LinkerGraph{
.allocator = allocator,
.files_live = try BitSet.initEmpty(allocator, file_count),
};
}
pub fn runtimeFunction(this: *const LinkerGraph, name: string) Ref {
return this.ast.items(.named_exports)[Index.runtime.value].get(name).?.ref;
}
pub fn generateNewSymbol(this: *LinkerGraph, source_index: u32, kind: Symbol.Kind, original_name: string) Ref {
const source_symbols = &this.symbols.symbols_for_source.slice()[source_index];
var ref = Ref.init(
@truncate(source_symbols.len),
@truncate(source_index),
false,
);
ref.tag = .symbol;
// TODO: will this crash on resize due to using threadlocal mimalloc heap?
source_symbols.push(
this.allocator,
.{
.kind = kind,
.original_name = original_name,
},
) catch unreachable;
this.ast.items(.module_scope)[source_index].generated.push(this.allocator, ref) catch unreachable;
return ref;
}
pub fn generateRuntimeSymbolImportAndUse(
graph: *LinkerGraph,
source_index: Index.Int,
entry_point_part_index: Index,
name: []const u8,
count: u32,
) !void {
if (count == 0) return;
debug("generateRuntimeSymbolImportAndUse({s}) for {d}", .{ name, source_index });
const ref = graph.runtimeFunction(name);
try graph.generateSymbolImportAndUse(
source_index,
entry_point_part_index.get(),
ref,
count,
Index.runtime,
);
}
pub fn addPartToFile(
graph: *LinkerGraph,
id: u32,
part: Part,
) !u32 {
var parts: *Part.List = &graph.ast.items(.parts)[id];
const part_id = @as(u32, @truncate(parts.len));
try parts.push(graph.allocator, part);
var top_level_symbol_to_parts_overlay: ?*TopLevelSymbolToParts = null;
const Iterator = struct {
graph: *LinkerGraph,
id: u32,
top_level_symbol_to_parts_overlay: *?*TopLevelSymbolToParts,
part_id: u32,
pub fn next(self: *@This(), ref: Ref) void {
var overlay = brk: {
if (self.top_level_symbol_to_parts_overlay.*) |out| {
break :brk out;
}
const out = &self.graph.meta.items(.top_level_symbol_to_parts_overlay)[self.id];
self.top_level_symbol_to_parts_overlay.* = out;
break :brk out;
};
var entry = overlay.getOrPut(self.graph.allocator, ref) catch unreachable;
if (!entry.found_existing) {
if (self.graph.ast.items(.top_level_symbols_to_parts)[self.id].get(ref)) |original_parts| {
var list = std.ArrayList(u32).init(self.graph.allocator);
list.ensureTotalCapacityPrecise(original_parts.len + 1) catch unreachable;
list.appendSliceAssumeCapacity(original_parts.slice());
list.appendAssumeCapacity(self.part_id);
entry.value_ptr.* = .init(list.items);
} else {
entry.value_ptr.* = BabyList(u32).fromSlice(self.graph.allocator, &.{self.part_id}) catch bun.outOfMemory();
}
} else {
entry.value_ptr.push(self.graph.allocator, self.part_id) catch unreachable;
}
}
};
var ctx = Iterator{
.graph = graph,
.id = id,
.part_id = part_id,
.top_level_symbol_to_parts_overlay = &top_level_symbol_to_parts_overlay,
};
js_ast.DeclaredSymbol.forEachTopLevelSymbol(&parts.ptr[part_id].declared_symbols, &ctx, Iterator.next);
return part_id;
}
pub fn generateSymbolImportAndUse(
g: *LinkerGraph,
source_index: u32,
part_index: u32,
ref: Ref,
use_count: u32,
source_index_to_import_from: Index,
) !void {
if (use_count == 0) return;
var parts_list = g.ast.items(.parts)[source_index].slice();
var part: *Part = &parts_list[part_index];
// Mark this symbol as used by this part
var uses = &part.symbol_uses;
var uses_entry = uses.getOrPut(g.allocator, ref) catch unreachable;
if (!uses_entry.found_existing) {
uses_entry.value_ptr.* = .{ .count_estimate = use_count };
} else {
uses_entry.value_ptr.count_estimate += use_count;
}
const exports_ref = g.ast.items(.exports_ref)[source_index];
const module_ref = g.ast.items(.module_ref)[source_index];
if (!exports_ref.isNull() and ref.eql(exports_ref)) {
g.ast.items(.flags)[source_index].uses_exports_ref = true;
}
if (!module_ref.isNull() and ref.eql(module_ref)) {
g.ast.items(.flags)[source_index].uses_module_ref = true;
}
// null ref shouldn't be there.
bun.assert(!ref.isEmpty());
// Track that this specific symbol was imported
if (source_index_to_import_from.get() != source_index) {
const imports_to_bind = &g.meta.items(.imports_to_bind)[source_index];
try imports_to_bind.put(g.allocator, ref, .{
.data = .{
.source_index = source_index_to_import_from,
.import_ref = ref,
},
});
}
// Pull in all parts that declare this symbol
var dependencies = &part.dependencies;
const part_ids = g.topLevelSymbolToParts(source_index_to_import_from.get(), ref);
const new_dependencies = try dependencies.writableSlice(g.allocator, part_ids.len);
for (part_ids, new_dependencies) |part_id, *dependency| {
dependency.* = .{
.source_index = source_index_to_import_from,
.part_index = @as(u32, @truncate(part_id)),
};
}
}
pub fn topLevelSymbolToParts(g: *LinkerGraph, id: u32, ref: Ref) []u32 {
if (g.meta.items(.top_level_symbol_to_parts_overlay)[id].get(ref)) |overlay| {
return overlay.slice();
}
if (g.ast.items(.top_level_symbols_to_parts)[id].get(ref)) |list| {
return list.slice();
}
return &.{};
}
pub fn load(
this: *LinkerGraph,
entry_points: []const Index,
sources: []const Logger.Source,
server_component_boundaries: ServerComponentBoundary.List,
dynamic_import_entry_points: []const Index.Int,
) !void {
const scb = server_component_boundaries.slice();
try this.files.setCapacity(this.allocator, sources.len);
this.files.zero();
this.files_live = try BitSet.initEmpty(
this.allocator,
sources.len,
);
this.files.len = sources.len;
var files = this.files.slice();
var entry_point_kinds = files.items(.entry_point_kind);
{
const kinds = std.mem.sliceAsBytes(entry_point_kinds);
@memset(kinds, 0);
}
// Setup entry points
{
try this.entry_points.setCapacity(this.allocator, entry_points.len + server_component_boundaries.list.len + dynamic_import_entry_points.len);
this.entry_points.len = entry_points.len;
const source_indices = this.entry_points.items(.source_index);
const path_strings: []bun.PathString = this.entry_points.items(.output_path);
{
const output_was_auto_generated = std.mem.sliceAsBytes(this.entry_points.items(.output_path_was_auto_generated));
@memset(output_was_auto_generated, 0);
}
for (entry_points, path_strings, source_indices) |i, *path_string, *source_index| {
const source = sources[i.get()];
if (comptime Environment.allow_assert) {
bun.assert(source.index.get() == i.get());
}
entry_point_kinds[source.index.get()] = EntryPoint.Kind.user_specified;
path_string.* = bun.PathString.init(source.path.text);
source_index.* = source.index.get();
}
for (dynamic_import_entry_points) |id| {
bun.assert(this.code_splitting); // this should never be a thing without code splitting
if (entry_point_kinds[id] != .none) {
// You could dynamic import a file that is already an entry point
continue;
}
const source = &sources[id];
entry_point_kinds[id] = EntryPoint.Kind.dynamic_import;
this.entry_points.appendAssumeCapacity(.{
.source_index = id,
.output_path = bun.PathString.init(source.path.text),
.output_path_was_auto_generated = true,
});
}
var import_records_list: []ImportRecord.List = this.ast.items(.import_records);
try this.meta.setCapacity(this.allocator, import_records_list.len);
this.meta.len = this.ast.len;
this.meta.zero();
if (scb.list.len > 0) {
this.is_scb_bitset = BitSet.initEmpty(this.allocator, this.files.len) catch unreachable;
// Index all SCBs into the bitset. This is needed so chunking
// can track the chunks that SCBs belong to.
for (scb.list.items(.use_directive), scb.list.items(.source_index), scb.list.items(.reference_source_index)) |use, original_id, ref_id| {
switch (use) {
.none => {},
.client => {
this.is_scb_bitset.set(original_id);
this.is_scb_bitset.set(ref_id);
},
.server => {
bun.todoPanic(@src(), "um", .{});
},
}
}
// For client components, the import record index currently points to the original source index, instead of the reference source index.
for (this.reachable_files) |source_id| {
for (import_records_list[source_id.get()].slice()) |*import_record| {
if (import_record.source_index.isValid() and this.is_scb_bitset.isSet(import_record.source_index.get())) {
import_record.source_index = Index.init(
scb.getReferenceSourceIndex(import_record.source_index.get()) orelse
// If this gets hit, might be fine to switch this to `orelse continue`
// not confident in this assertion
Output.panic("Missing SCB boundary for file #{d}", .{import_record.source_index.get()}),
);
bun.assert(import_record.source_index.isValid()); // did not generate
}
}
}
} else {
this.is_scb_bitset = .{};
}
}
// Setup files
{
var stable_source_indices = try this.allocator.alloc(Index, sources.len + 1);
// set it to max value so that if we access an invalid one, it crashes
@memset(std.mem.sliceAsBytes(stable_source_indices), 255);
for (this.reachable_files, 0..) |source_index, i| {
stable_source_indices[source_index.get()] = Index.source(i);
}
@memset(
files.items(.distance_from_entry_point),
(LinkerGraph.File{}).distance_from_entry_point,
);
this.stable_source_indices = @as([]const u32, @ptrCast(stable_source_indices));
}
{
var input_symbols = js_ast.Symbol.Map.initList(js_ast.Symbol.NestedList.init(this.ast.items(.symbols)));
var symbols = input_symbols.symbols_for_source.clone(this.allocator) catch bun.outOfMemory();
for (symbols.slice(), input_symbols.symbols_for_source.slice()) |*dest, src| {
dest.* = src.clone(this.allocator) catch bun.outOfMemory();
}
this.symbols = js_ast.Symbol.Map.initList(symbols);
}
// TODO: const_values
// {
// var const_values = this.const_values;
// var count: usize = 0;
// for (this.ast.items(.const_values)) |const_value| {
// count += const_value.count();
// }
// if (count > 0) {
// try const_values.ensureTotalCapacity(this.allocator, count);
// for (this.ast.items(.const_values)) |const_value| {
// for (const_value.keys(), const_value.values()) |key, value| {
// const_values.putAssumeCapacityNoClobber(key, value);
// }
// }
// }
// this.const_values = const_values;
// }
{
var count: usize = 0;
for (this.ast.items(.ts_enums)) |ts_enums| {
count += ts_enums.count();
}
if (count > 0) {
try this.ts_enums.ensureTotalCapacity(this.allocator, count);
for (this.ast.items(.ts_enums)) |ts_enums| {
for (ts_enums.keys(), ts_enums.values()) |key, value| {
this.ts_enums.putAssumeCapacityNoClobber(key, value);
}
}
}
}
const src_named_exports: []js_ast.Ast.NamedExports = this.ast.items(.named_exports);
const dest_resolved_exports: []ResolvedExports = this.meta.items(.resolved_exports);
for (src_named_exports, dest_resolved_exports, 0..) |src, *dest, source_index| {
var resolved = ResolvedExports{};
resolved.ensureTotalCapacity(this.allocator, src.count()) catch unreachable;
for (src.keys(), src.values()) |key, value| {
resolved.putAssumeCapacityNoClobber(key, .{ .data = .{
.import_ref = value.ref,
.name_loc = value.alias_loc,
.source_index = Index.source(source_index),
} });
}
dest.* = resolved;
}
}
pub const File = struct {
entry_bits: AutoBitSet = undefined,
input_file: Index = Index.source(0),
/// The minimum number of links in the module graph to get from an entry point
/// to this file
distance_from_entry_point: u32 = std.math.maxInt(u32),
/// This file is an entry point if and only if this is not ".none".
/// Note that dynamically-imported files are allowed to also be specified by
/// the user as top-level entry points, so some dynamically-imported files
/// may be ".user_specified" instead of ".dynamic_import".
entry_point_kind: EntryPoint.Kind = .none,
/// If "entry_point_kind" is not ".none", this is the index of the
/// corresponding entry point chunk.
///
/// This is also initialized for files that are a SCB's generated
/// reference, pointing to its destination. This forms a lookup map from
/// a Source.Index to its output path inb reakOutputIntoPieces
entry_point_chunk_index: u32 = std.math.maxInt(u32),
line_offset_table: bun.sourcemap.LineOffsetTable.List = .empty,
quoted_source_contents: string = "",
pub fn isEntryPoint(this: *const File) bool {
return this.entry_point_kind.isEntryPoint();
}
pub fn isUserSpecifiedEntryPoint(this: *const File) bool {
return this.entry_point_kind.isUserSpecifiedEntryPoint();
}
pub const List = MultiArrayList(File);
};
const bun = @import("bun");
const Environment = bun.Environment;
const std = @import("std");
const string = bun.string;
const Output = bun.Output;
const BitSet = bun.bit_set.DynamicBitSetUnmanaged;
const BabyList = bun.BabyList;
const Logger = bun.bundle_v2.Logger;
const TopLevelSymbolToParts = bun.bundle_v2.TopLevelSymbolToParts;
const Index = bun.bundle_v2.Index;
const Part = bun.bundle_v2.Part;
const Ref = bun.bundle_v2.Ref;
const EntryPoint = bun.bundle_v2.EntryPoint;
const ServerComponentBoundary = bun.bundle_v2.ServerComponentBoundary;
const MultiArrayList = bun.MultiArrayList;
const JSAst = bun.bundle_v2.JSAst;
const JSMeta = bun.bundle_v2.JSMeta;
const js_ast = @import("../js_ast.zig");
const Symbol = @import("../js_ast.zig").Symbol;
const ImportRecord = bun.ImportRecord;
const ResolvedExports = bun.bundle_v2.ResolvedExports;
const AutoBitSet = bun.bit_set.AutoBitSet;

1446
src/bundler/ParseTask.zig Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,236 @@
/// Files for Server Components are generated using `AstBuilder`, instead of
/// running through the js_parser. It emits a ParseTask.Result and joins
/// with the same logic that it runs though.
pub const ServerComponentParseTask = @This();
task: ThreadPoolLib.Task = .{ .callback = &taskCallbackWrap },
data: Data,
ctx: *BundleV2,
source: Logger.Source,
pub const Data = union(enum) {
/// Generate server-side code for a "use client" module. Given the
/// client ast, a "reference proxy" is created with identical exports.
client_reference_proxy: ReferenceProxy,
client_entry_wrapper: ClientEntryWrapper,
pub const ReferenceProxy = struct {
other_source: Logger.Source,
named_exports: JSAst.NamedExports,
};
pub const ClientEntryWrapper = struct {
path: []const u8,
};
};
fn taskCallbackWrap(thread_pool_task: *ThreadPoolLib.Task) void {
const task: *ServerComponentParseTask = @fieldParentPtr("task", thread_pool_task);
var worker = ThreadPool.Worker.get(task.ctx);
defer worker.unget();
var log = Logger.Log.init(worker.allocator);
const result = bun.default_allocator.create(ParseTask.Result) catch bun.outOfMemory();
result.* = .{
.ctx = task.ctx,
.task = undefined,
.value = if (taskCallback(
task,
&log,
worker.allocator,
)) |success|
.{ .success = success }
else |err| switch (err) {
error.OutOfMemory => bun.outOfMemory(),
},
.watcher_data = .none,
};
switch (worker.ctx.loop().*) {
.js => |jsc_event_loop| {
jsc_event_loop.enqueueTaskConcurrent(JSC.ConcurrentTask.fromCallback(result, ParseTask.onComplete));
},
.mini => |*mini| {
mini.enqueueTaskConcurrentWithExtraCtx(
ParseTask.Result,
BundleV2,
result,
BundleV2.onParseTaskComplete,
.task,
);
},
}
}
fn taskCallback(
task: *ServerComponentParseTask,
log: *Logger.Log,
allocator: std.mem.Allocator,
) bun.OOM!ParseTask.Result.Success {
var ab = try AstBuilder.init(allocator, &task.source, task.ctx.transpiler.options.hot_module_reloading);
switch (task.data) {
.client_reference_proxy => |data| try task.generateClientReferenceProxy(data, &ab),
.client_entry_wrapper => |data| try task.generateClientEntryWrapper(data, &ab),
}
return .{
.ast = try ab.toBundledAst(switch (task.data) {
// Server-side
.client_reference_proxy => task.ctx.transpiler.options.target,
// Client-side,
.client_entry_wrapper => .browser,
}),
.source = task.source,
.loader = .js,
.log = log.*,
.use_directive = .none,
.side_effects = .no_side_effects__pure_data,
};
}
fn generateClientEntryWrapper(_: *ServerComponentParseTask, data: Data.ClientEntryWrapper, b: *AstBuilder) !void {
const record = try b.addImportRecord(data.path, .stmt);
const namespace_ref = try b.newSymbol(.other, "main");
try b.appendStmt(S.Import{
.namespace_ref = namespace_ref,
.import_record_index = record,
.items = &.{},
.is_single_line = true,
});
b.import_records.items[record].was_originally_bare_import = true;
}
fn generateClientReferenceProxy(task: *ServerComponentParseTask, data: Data.ReferenceProxy, b: *AstBuilder) !void {
const server_components = task.ctx.framework.?.server_components orelse
unreachable; // config must be non-null to enter this function
const client_named_exports = data.named_exports;
const register_client_reference = (try b.addImportStmt(
server_components.server_runtime_import,
&.{server_components.server_register_client_reference},
))[0];
const module_path = b.newExpr(E.String{
// In development, the path loaded is the source file: Easy!
//
// In production, the path here must be the final chunk path, but
// that information is not yet available since chunks are not
// computed. The unique_key replacement system is used here.
.data = if (task.ctx.transpiler.options.dev_server != null)
data.other_source.path.pretty
else
try std.fmt.allocPrint(b.allocator, "{}S{d:0>8}", .{
bun.fmt.hexIntLower(task.ctx.unique_key),
data.other_source.index.get(),
}),
});
for (client_named_exports.keys()) |key| {
const is_default = bun.strings.eqlComptime(key, "default");
// This error message is taken from
// https://github.com/facebook/react/blob/c5b9375767e2c4102d7e5559d383523736f1c902/packages/react-server-dom-webpack/src/ReactFlightWebpackNodeLoader.js#L323-L354
const err_msg_string = try if (is_default)
std.fmt.allocPrint(
b.allocator,
"Attempted to call the default export of {[module_path]s} from " ++
"the server, but it's on the client. It's not possible to invoke a " ++
"client function from the server, it can only be rendered as a " ++
"Component or passed to props of a Client Component.",
.{ .module_path = data.other_source.path.pretty },
)
else
std.fmt.allocPrint(
b.allocator,
"Attempted to call {[key]s}() from the server but {[key]s} " ++
"is on the client. It's not possible to invoke a client function from " ++
"the server, it can only be rendered as a Component or passed to " ++
"props of a Client Component.",
.{ .key = key },
);
// throw new Error(...)
const err_msg = b.newExpr(E.New{
.target = b.newExpr(E.Identifier{
.ref = try b.newExternalSymbol("Error"),
}),
.args = try BabyList(Expr).fromSlice(b.allocator, &.{
b.newExpr(E.String{ .data = err_msg_string }),
}),
.close_parens_loc = Logger.Loc.Empty,
});
// registerClientReference(
// () => { throw new Error(...) },
// "src/filepath.tsx",
// "Comp"
// );
const value = b.newExpr(E.Call{
.target = register_client_reference,
.args = try js_ast.ExprNodeList.fromSlice(b.allocator, &.{
b.newExpr(E.Arrow{ .body = .{
.stmts = try b.allocator.dupe(Stmt, &.{
b.newStmt(S.Throw{ .value = err_msg }),
}),
.loc = Logger.Loc.Empty,
} }),
module_path,
b.newExpr(E.String{ .data = key }),
}),
});
if (is_default) {
// export default registerClientReference(...);
try b.appendStmt(S.ExportDefault{ .value = .{ .expr = value }, .default_name = .{} });
} else {
// export const Component = registerClientReference(...);
const export_ref = try b.newSymbol(.other, key);
try b.appendStmt(S.Local{
.decls = try G.Decl.List.fromSlice(b.allocator, &.{.{
.binding = Binding.alloc(b.allocator, B.Identifier{ .ref = export_ref }, Logger.Loc.Empty),
.value = value,
}}),
.is_export = true,
.kind = .k_const,
});
}
}
}
const bun = @import("bun");
const strings = bun.strings;
const default_allocator = bun.default_allocator;
const std = @import("std");
const Logger = @import("../logger.zig");
const options = @import("../options.zig");
const js_parser = bun.js_parser;
const js_ast = @import("../js_ast.zig");
pub const Ref = @import("../ast/base.zig").Ref;
const ThreadPoolLib = @import("../thread_pool.zig");
const BabyList = @import("../baby_list.zig").BabyList;
const OOM = bun.OOM;
const JSAst = js_ast.BundledAst;
pub const Index = @import("../ast/base.zig").Index;
const Stmt = js_ast.Stmt;
const Expr = js_ast.Expr;
const E = js_ast.E;
const S = js_ast.S;
const G = js_ast.G;
const B = js_ast.B;
const Binding = js_ast.Binding;
const JSC = bun.JSC;
const Loc = Logger.Loc;
const bundler = bun.bundle_v2;
const BundleV2 = bundler.BundleV2;
pub const DeferredBatchTask = bun.bundle_v2.DeferredBatchTask;
pub const ThreadPool = bun.bundle_v2.ThreadPool;
pub const ParseTask = bun.bundle_v2.ParseTask;
const AstBuilder = bundler.AstBuilder;

293
src/bundler/ThreadPool.zig Normal file
View File

@@ -0,0 +1,293 @@
pub const ThreadPool = struct {
/// macOS holds an IORWLock on every file open.
/// This causes massive contention after about 4 threads as of macOS 15.2
/// On Windows, this seemed to be a small performance improvement.
/// On Linux, this was a performance regression.
/// In some benchmarks on macOS, this yielded up to a 60% performance improvement in microbenchmarks that load ~10,000 files.
io_pool: *ThreadPoolLib = undefined,
worker_pool: *ThreadPoolLib = undefined,
workers_assignments: std.AutoArrayHashMap(std.Thread.Id, *Worker) = std.AutoArrayHashMap(std.Thread.Id, *Worker).init(bun.default_allocator),
workers_assignments_lock: bun.Mutex = .{},
v2: *BundleV2 = undefined,
const debug = Output.scoped(.ThreadPool, false);
pub fn reset(this: *ThreadPool) void {
if (this.usesIOPool()) {
if (this.io_pool.threadpool_context == @as(?*anyopaque, @ptrCast(this))) {
this.io_pool.threadpool_context = null;
}
}
if (this.worker_pool.threadpool_context == @as(?*anyopaque, @ptrCast(this))) {
this.worker_pool.threadpool_context = null;
}
}
pub fn go(this: *ThreadPool, allocator: std.mem.Allocator, comptime Function: anytype) !ThreadPoolLib.ConcurrentFunction(Function) {
return this.worker_pool.go(allocator, Function);
}
pub fn start(this: *ThreadPool, v2: *BundleV2, existing_thread_pool: ?*ThreadPoolLib) !void {
this.v2 = v2;
if (existing_thread_pool) |pool| {
this.worker_pool = pool;
} else {
const cpu_count = bun.getThreadCount();
this.worker_pool = try v2.graph.allocator.create(ThreadPoolLib);
this.worker_pool.* = ThreadPoolLib.init(.{
.max_threads = cpu_count,
});
debug("{d} workers", .{cpu_count});
}
this.worker_pool.setThreadContext(this);
this.worker_pool.warm(8);
const IOThreadPool = struct {
var thread_pool: ThreadPoolLib = undefined;
var once = bun.once(startIOThreadPool);
fn startIOThreadPool() void {
thread_pool = ThreadPoolLib.init(.{
.max_threads = @max(@min(bun.getThreadCount(), 4), 2),
// Use a much smaller stack size for the IO thread pool
.stack_size = 512 * 1024,
});
}
pub fn get() *ThreadPoolLib {
once.call(.{});
return &thread_pool;
}
};
if (this.usesIOPool()) {
this.io_pool = IOThreadPool.get();
this.io_pool.setThreadContext(this);
this.io_pool.warm(1);
}
}
pub fn usesIOPool(_: *const ThreadPool) bool {
if (bun.getRuntimeFeatureFlag(.BUN_FEATURE_FLAG_FORCE_IO_POOL)) {
// For testing.
return true;
}
if (bun.getRuntimeFeatureFlag(.BUN_FEATURE_FLAG_DISABLE_IO_POOL)) {
// For testing.
return false;
}
if (Environment.isMac or Environment.isWindows) {
// 4 was the sweet spot on macOS. Didn't check the sweet spot on Windows.
return bun.getThreadCount() > 3;
}
return false;
}
pub fn scheduleWithOptions(this: *ThreadPool, parse_task: *ParseTask, is_inside_thread_pool: bool) void {
if (parse_task.contents_or_fd == .contents and parse_task.stage == .needs_source_code) {
parse_task.stage = .{
.needs_parse = .{
.contents = parse_task.contents_or_fd.contents,
.fd = bun.invalid_fd,
},
};
}
const scheduleFn = if (is_inside_thread_pool) &ThreadPoolLib.scheduleInsideThreadPool else &ThreadPoolLib.schedule;
if (this.usesIOPool()) {
switch (parse_task.stage) {
.needs_parse => {
scheduleFn(this.worker_pool, .from(&parse_task.task));
},
.needs_source_code => {
scheduleFn(this.io_pool, .from(&parse_task.io_task));
},
}
} else {
scheduleFn(this.worker_pool, .from(&parse_task.task));
}
}
pub fn schedule(this: *ThreadPool, parse_task: *ParseTask) void {
this.scheduleWithOptions(parse_task, false);
}
pub fn scheduleInsideThreadPool(this: *ThreadPool, parse_task: *ParseTask) void {
this.scheduleWithOptions(parse_task, true);
}
pub fn getWorker(this: *ThreadPool, id: std.Thread.Id) *Worker {
var worker: *Worker = undefined;
{
this.workers_assignments_lock.lock();
defer this.workers_assignments_lock.unlock();
const entry = this.workers_assignments.getOrPut(id) catch unreachable;
if (entry.found_existing) {
return entry.value_ptr.*;
}
worker = bun.default_allocator.create(Worker) catch unreachable;
entry.value_ptr.* = worker;
}
worker.* = .{
.ctx = this.v2,
.allocator = undefined,
.thread = ThreadPoolLib.Thread.current,
};
worker.init(this.v2);
return worker;
}
pub const Worker = struct {
heap: ThreadlocalArena = ThreadlocalArena{},
/// Thread-local memory allocator
/// All allocations are freed in `deinit` at the very end of bundling.
allocator: std.mem.Allocator,
ctx: *BundleV2,
data: WorkerData = undefined,
quit: bool = false,
ast_memory_allocator: js_ast.ASTMemoryAllocator = undefined,
has_created: bool = false,
thread: ?*ThreadPoolLib.Thread = null,
deinit_task: ThreadPoolLib.Task = .{ .callback = deinitCallback },
temporary_arena: bun.ArenaAllocator = undefined,
stmt_list: LinkerContext.StmtList = undefined,
pub fn deinitCallback(task: *ThreadPoolLib.Task) void {
debug("Worker.deinit()", .{});
var this: *Worker = @alignCast(@fieldParentPtr("deinit_task", task));
this.deinit();
}
pub fn deinitSoon(this: *Worker) void {
if (this.thread) |thread| {
thread.pushIdleTask(&this.deinit_task);
}
}
pub fn deinit(this: *Worker) void {
if (this.has_created) {
this.heap.deinit();
}
bun.default_allocator.destroy(this);
}
pub fn get(ctx: *BundleV2) *Worker {
var worker = ctx.graph.pool.getWorker(std.Thread.getCurrentId());
if (!worker.has_created) {
worker.create(ctx);
}
worker.ast_memory_allocator.push();
if (comptime FeatureFlags.help_catch_memory_issues) {
worker.heap.helpCatchMemoryIssues();
}
return worker;
}
pub fn unget(this: *Worker) void {
if (comptime FeatureFlags.help_catch_memory_issues) {
this.heap.helpCatchMemoryIssues();
}
this.ast_memory_allocator.pop();
}
pub const WorkerData = struct {
log: *Logger.Log,
estimated_input_lines_of_code: usize = 0,
macro_context: js_ast.Macro.MacroContext,
transpiler: Transpiler = undefined,
};
pub fn init(worker: *Worker, v2: *BundleV2) void {
worker.ctx = v2;
}
fn create(this: *Worker, ctx: *BundleV2) void {
const trace = bun.perf.trace("Bundler.Worker.create");
defer trace.end();
this.has_created = true;
Output.Source.configureThread();
this.heap = ThreadlocalArena.init() catch unreachable;
this.allocator = this.heap.allocator();
var allocator = this.allocator;
this.ast_memory_allocator = .{ .allocator = this.allocator };
this.ast_memory_allocator.reset();
this.data = WorkerData{
.log = allocator.create(Logger.Log) catch unreachable,
.estimated_input_lines_of_code = 0,
.macro_context = undefined,
};
this.data.log.* = Logger.Log.init(allocator);
this.ctx = ctx;
this.data.transpiler = ctx.transpiler.*;
this.data.transpiler.setLog(this.data.log);
this.data.transpiler.setAllocator(allocator);
this.data.transpiler.linker.resolver = &this.data.transpiler.resolver;
this.data.transpiler.macro_context = js_ast.Macro.MacroContext.init(&this.data.transpiler);
this.data.macro_context = this.data.transpiler.macro_context.?;
this.temporary_arena = bun.ArenaAllocator.init(this.allocator);
this.stmt_list = LinkerContext.StmtList.init(this.allocator);
const CacheSet = @import("../cache.zig");
this.data.transpiler.resolver.caches = CacheSet.Set.init(this.allocator);
debug("Worker.create()", .{});
}
pub fn run(this: *Worker, ctx: *BundleV2) void {
if (!this.has_created) {
this.create(ctx);
}
// no funny business mr. cache
}
};
};
const Transpiler = bun.Transpiler;
const bun = @import("bun");
const Output = bun.Output;
const Environment = bun.Environment;
const default_allocator = bun.default_allocator;
const FeatureFlags = bun.FeatureFlags;
const std = @import("std");
const Logger = @import("../logger.zig");
const js_ast = @import("../js_ast.zig");
const linker = @import("../linker.zig");
pub const Ref = @import("../ast/base.zig").Ref;
const ThreadPoolLib = @import("../thread_pool.zig");
const ThreadlocalArena = @import("../allocators/mimalloc_arena.zig").Arena;
const allocators = @import("../allocators.zig");
pub const Index = @import("../ast/base.zig").Index;
const BundleV2 = bun.bundle_v2.BundleV2;
const ParseTask = bun.bundle_v2.ParseTask;
const LinkerContext = bun.bundle_v2.LinkerContext;

File diff suppressed because it is too large Load Diff

View File

@@ -1,12 +1,3 @@
const logger = bun.logger;
const std = @import("std");
const bun = @import("bun");
const string = bun.string;
const Fs = @import("../fs.zig");
const js_ast = bun.JSAst;
const Transpiler = bun.Transpiler;
const strings = bun.strings;
pub const FallbackEntryPoint = struct {
code_buffer: [8192]u8 = undefined,
path_buffer: bun.PathBuffer = undefined,
@@ -345,3 +336,12 @@ pub const MacroEntryPoint = struct {
entry.source.path.namespace = js_ast.Macro.namespace;
}
};
const logger = bun.logger;
const std = @import("std");
const bun = @import("bun");
const string = bun.string;
const Fs = @import("../fs.zig");
const js_ast = bun.JSAst;
const Transpiler = bun.Transpiler;
const strings = bun.strings;

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,382 @@
pub noinline fn computeChunks(
this: *LinkerContext,
unique_key: u64,
) ![]Chunk {
const trace = bun.perf.trace("Bundler.computeChunks");
defer trace.end();
bun.assert(this.dev_server == null); // use
var stack_fallback = std.heap.stackFallback(4096, this.allocator);
const stack_all = stack_fallback.get();
var arena = bun.ArenaAllocator.init(stack_all);
defer arena.deinit();
var temp_allocator = arena.allocator();
var js_chunks = bun.StringArrayHashMap(Chunk).init(temp_allocator);
try js_chunks.ensureUnusedCapacity(this.graph.entry_points.len);
// Key is the hash of the CSS order. This deduplicates identical CSS files.
var css_chunks = std.AutoArrayHashMap(u64, Chunk).init(temp_allocator);
var js_chunks_with_css: usize = 0;
const entry_source_indices = this.graph.entry_points.items(.source_index);
const css_asts = this.graph.ast.items(.css);
const css_chunking = this.options.css_chunking;
var html_chunks = bun.StringArrayHashMap(Chunk).init(temp_allocator);
const loaders = this.parse_graph.input_files.items(.loader);
const code_splitting = this.graph.code_splitting;
// Create chunks for entry points
for (entry_source_indices, 0..) |source_index, entry_id_| {
const entry_bit = @as(Chunk.EntryPoint.ID, @truncate(entry_id_));
var entry_bits = &this.graph.files.items(.entry_bits)[source_index];
entry_bits.set(entry_bit);
const has_html_chunk = loaders[source_index] == .html;
const js_chunk_key = brk: {
if (code_splitting) {
break :brk try temp_allocator.dupe(u8, entry_bits.bytes(this.graph.entry_points.len));
} else {
// Force HTML chunks to always be generated, even if there's an identical JS file.
break :brk try std.fmt.allocPrint(temp_allocator, "{}", .{JSChunkKeyFormatter{
.has_html = has_html_chunk,
.entry_bits = entry_bits.bytes(this.graph.entry_points.len),
}});
}
};
// Put this early on in this loop so that CSS-only entry points work.
if (has_html_chunk) {
const html_chunk_entry = try html_chunks.getOrPut(js_chunk_key);
if (!html_chunk_entry.found_existing) {
html_chunk_entry.value_ptr.* = .{
.entry_point = .{
.entry_point_id = entry_bit,
.source_index = source_index,
.is_entry_point = true,
},
.entry_bits = entry_bits.*,
.content = .html,
.output_source_map = sourcemap.SourceMapPieces.init(this.allocator),
};
}
}
if (css_asts[source_index] != null) {
const order = this.findImportedFilesInCSSOrder(temp_allocator, &.{Index.init(source_index)});
// Create a chunk for the entry point here to ensure that the chunk is
// always generated even if the resulting file is empty
const hash_to_use = if (!this.options.css_chunking)
bun.hash(try temp_allocator.dupe(u8, entry_bits.bytes(this.graph.entry_points.len)))
else brk: {
var hasher = std.hash.Wyhash.init(5);
bun.writeAnyToHasher(&hasher, order.len);
for (order.slice()) |x| x.hash(&hasher);
break :brk hasher.final();
};
const css_chunk_entry = try css_chunks.getOrPut(hash_to_use);
if (!css_chunk_entry.found_existing) {
// const css_chunk_entry = try js_chunks.getOrPut();
css_chunk_entry.value_ptr.* = .{
.entry_point = .{
.entry_point_id = entry_bit,
.source_index = source_index,
.is_entry_point = true,
},
.entry_bits = entry_bits.*,
.content = .{
.css = .{
.imports_in_chunk_in_order = order,
.asts = this.allocator.alloc(bun.css.BundlerStyleSheet, order.len) catch bun.outOfMemory(),
},
},
.output_source_map = sourcemap.SourceMapPieces.init(this.allocator),
.has_html_chunk = has_html_chunk,
};
}
continue;
}
// Create a chunk for the entry point here to ensure that the chunk is
// always generated even if the resulting file is empty
const js_chunk_entry = try js_chunks.getOrPut(js_chunk_key);
js_chunk_entry.value_ptr.* = .{
.entry_point = .{
.entry_point_id = entry_bit,
.source_index = source_index,
.is_entry_point = true,
},
.entry_bits = entry_bits.*,
.content = .{
.javascript = .{},
},
.has_html_chunk = has_html_chunk,
.output_source_map = sourcemap.SourceMapPieces.init(this.allocator),
};
{
// If this JS entry point has an associated CSS entry point, generate it
// now. This is essentially done by generating a virtual CSS file that
// only contains "@import" statements in the order that the files were
// discovered in JS source order, where JS source order is arbitrary but
// consistent for dynamic imports. Then we run the CSS import order
// algorithm to determine the final CSS file order for the chunk.
const css_source_indices = this.findImportedCSSFilesInJSOrder(temp_allocator, Index.init(source_index));
if (css_source_indices.len > 0) {
const order = this.findImportedFilesInCSSOrder(temp_allocator, css_source_indices.slice());
const hash_to_use = if (!css_chunking)
bun.hash(try temp_allocator.dupe(u8, entry_bits.bytes(this.graph.entry_points.len)))
else brk: {
var hasher = std.hash.Wyhash.init(5);
bun.writeAnyToHasher(&hasher, order.len);
for (order.slice()) |x| x.hash(&hasher);
break :brk hasher.final();
};
const css_chunk_entry = try css_chunks.getOrPut(hash_to_use);
js_chunk_entry.value_ptr.content.javascript.css_chunks = try this.allocator.dupe(u32, &.{
@intCast(css_chunk_entry.index),
});
js_chunks_with_css += 1;
if (!css_chunk_entry.found_existing) {
var css_files_with_parts_in_chunk = std.AutoArrayHashMapUnmanaged(Index.Int, void){};
for (order.slice()) |entry| {
if (entry.kind == .source_index) {
css_files_with_parts_in_chunk.put(this.allocator, entry.kind.source_index.get(), {}) catch bun.outOfMemory();
}
}
css_chunk_entry.value_ptr.* = .{
.entry_point = .{
.entry_point_id = entry_bit,
.source_index = source_index,
.is_entry_point = true,
},
.entry_bits = entry_bits.*,
.content = .{
.css = .{
.imports_in_chunk_in_order = order,
.asts = this.allocator.alloc(bun.css.BundlerStyleSheet, order.len) catch bun.outOfMemory(),
},
},
.files_with_parts_in_chunk = css_files_with_parts_in_chunk,
.output_source_map = sourcemap.SourceMapPieces.init(this.allocator),
.has_html_chunk = has_html_chunk,
};
}
}
}
}
var file_entry_bits: []AutoBitSet = this.graph.files.items(.entry_bits);
const Handler = struct {
chunks: []Chunk,
allocator: std.mem.Allocator,
source_id: u32,
pub fn next(c: *@This(), chunk_id: usize) void {
_ = c.chunks[chunk_id].files_with_parts_in_chunk.getOrPut(c.allocator, @as(u32, @truncate(c.source_id))) catch unreachable;
}
};
const css_reprs = this.graph.ast.items(.css);
// Figure out which JS files are in which chunk
if (js_chunks.count() > 0) {
for (this.graph.reachable_files) |source_index| {
if (this.graph.files_live.isSet(source_index.get())) {
if (this.graph.ast.items(.css)[source_index.get()] == null) {
const entry_bits: *const AutoBitSet = &file_entry_bits[source_index.get()];
if (css_reprs[source_index.get()] != null) continue;
if (this.graph.code_splitting) {
const js_chunk_key = try temp_allocator.dupe(u8, entry_bits.bytes(this.graph.entry_points.len));
var js_chunk_entry = try js_chunks.getOrPut(js_chunk_key);
if (!js_chunk_entry.found_existing) {
js_chunk_entry.value_ptr.* = .{
.entry_bits = entry_bits.*,
.entry_point = .{
.source_index = source_index.get(),
},
.content = .{
.javascript = .{},
},
.output_source_map = sourcemap.SourceMapPieces.init(this.allocator),
};
}
_ = js_chunk_entry.value_ptr.files_with_parts_in_chunk.getOrPut(this.allocator, @as(u32, @truncate(source_index.get()))) catch unreachable;
} else {
var handler = Handler{
.chunks = js_chunks.values(),
.allocator = this.allocator,
.source_id = source_index.get(),
};
entry_bits.forEach(Handler, &handler, Handler.next);
}
}
}
}
}
// Sort the chunks for determinism. This matters because we use chunk indices
// as sorting keys in a few places.
const chunks: []Chunk = sort_chunks: {
var sorted_chunks = try BabyList(Chunk).initCapacity(this.allocator, js_chunks.count() + css_chunks.count() + html_chunks.count());
var sorted_keys = try BabyList(string).initCapacity(temp_allocator, js_chunks.count());
// JS Chunks
sorted_keys.appendSliceAssumeCapacity(js_chunks.keys());
sorted_keys.sortAsc();
var js_chunk_indices_with_css = try BabyList(u32).initCapacity(temp_allocator, js_chunks_with_css);
for (sorted_keys.slice()) |key| {
const chunk = js_chunks.get(key) orelse unreachable;
if (chunk.content.javascript.css_chunks.len > 0)
js_chunk_indices_with_css.appendAssumeCapacity(sorted_chunks.len);
sorted_chunks.appendAssumeCapacity(chunk);
// Attempt to order the JS HTML chunk immediately after the non-html one.
if (chunk.has_html_chunk) {
if (html_chunks.fetchSwapRemove(key)) |html_chunk| {
sorted_chunks.appendAssumeCapacity(html_chunk.value);
}
}
}
if (css_chunks.count() > 0) {
const sorted_css_keys = try temp_allocator.dupe(u64, css_chunks.keys());
std.sort.pdq(u64, sorted_css_keys, {}, std.sort.asc(u64));
// A map from the index in `css_chunks` to it's final index in `sorted_chunks`
const remapped_css_indexes = try temp_allocator.alloc(u32, css_chunks.count());
const css_chunk_values = css_chunks.values();
for (sorted_css_keys, js_chunks.count()..) |key, sorted_index| {
const index = css_chunks.getIndex(key) orelse unreachable;
sorted_chunks.appendAssumeCapacity(css_chunk_values[index]);
remapped_css_indexes[index] = @intCast(sorted_index);
}
// Update all affected JS chunks to point at the correct CSS chunk index.
for (js_chunk_indices_with_css.slice()) |js_index| {
for (sorted_chunks.slice()[js_index].content.javascript.css_chunks) |*idx| {
idx.* = remapped_css_indexes[idx.*];
}
}
}
// We don't care about the order of the HTML chunks that have no JS chunks.
try sorted_chunks.append(this.allocator, html_chunks.values());
break :sort_chunks sorted_chunks.slice();
};
const entry_point_chunk_indices: []u32 = this.graph.files.items(.entry_point_chunk_index);
// Map from the entry point file to this chunk. We will need this later if
// a file contains a dynamic import to this entry point, since we'll need
// to look up the path for this chunk to use with the import.
for (chunks, 0..) |*chunk, chunk_id| {
if (chunk.entry_point.is_entry_point) {
entry_point_chunk_indices[chunk.entry_point.source_index] = @intCast(chunk_id);
}
}
// Determine the order of JS files (and parts) within the chunk ahead of time
try this.findAllImportedPartsInJSOrder(temp_allocator, chunks);
const unique_key_item_len = std.fmt.count("{any}C{d:0>8}", .{ bun.fmt.hexIntLower(unique_key), chunks.len });
var unique_key_builder = try bun.StringBuilder.initCapacity(this.allocator, unique_key_item_len * chunks.len);
this.unique_key_buf = unique_key_builder.allocatedSlice();
errdefer {
unique_key_builder.deinit(this.allocator);
this.unique_key_buf = "";
}
const kinds = this.graph.files.items(.entry_point_kind);
const output_paths = this.graph.entry_points.items(.output_path);
for (chunks, 0..) |*chunk, chunk_id| {
// Assign a unique key to each chunk. This key encodes the index directly so
// we can easily recover it later without needing to look it up in a map. The
// last 8 numbers of the key are the chunk index.
chunk.unique_key = unique_key_builder.fmt("{}C{d:0>8}", .{ bun.fmt.hexIntLower(unique_key), chunk_id });
if (this.unique_key_prefix.len == 0)
this.unique_key_prefix = chunk.unique_key[0..std.fmt.count("{}", .{bun.fmt.hexIntLower(unique_key)})];
if (chunk.entry_point.is_entry_point and
(chunk.content == .html or (kinds[chunk.entry_point.source_index] == .user_specified and !chunk.has_html_chunk)))
{
chunk.template = PathTemplate.file;
if (this.resolver.opts.entry_naming.len > 0)
chunk.template.data = this.resolver.opts.entry_naming;
} else {
chunk.template = PathTemplate.chunk;
if (this.resolver.opts.chunk_naming.len > 0)
chunk.template.data = this.resolver.opts.chunk_naming;
}
const pathname = Fs.PathName.init(output_paths[chunk.entry_point.entry_point_id].slice());
chunk.template.placeholder.name = pathname.base;
chunk.template.placeholder.ext = chunk.content.ext();
// this if check is a specific fix for `bun build hi.ts --external '*'`, without leading `./`
const dir_path = if (pathname.dir.len > 0) pathname.dir else ".";
var real_path_buf: bun.PathBuffer = undefined;
const dir = dir: {
var dir = std.fs.cwd().openDir(dir_path, .{}) catch {
break :dir bun.path.normalizeBuf(dir_path, &real_path_buf, .auto);
};
defer dir.close();
break :dir try bun.FD.fromStdDir(dir).getFdPath(&real_path_buf);
};
chunk.template.placeholder.dir = try resolve_path.relativeAlloc(this.allocator, this.resolver.opts.root_dir, dir);
}
return chunks;
}
const JSChunkKeyFormatter = struct {
has_html: bool,
entry_bits: []const u8,
pub fn format(this: @This(), comptime _: []const u8, _: anytype, writer: anytype) !void {
try writer.writeAll(&[_]u8{@intFromBool(!this.has_html)});
try writer.writeAll(this.entry_bits);
}
};
const bun = @import("bun");
const resolve_path = bun.bundle_v2.resolve_path;
const Fs = bun.bundle_v2.Fs;
const options = bun.options;
const BabyList = bun.BabyList;
const Index = bun.bundle_v2.Index;
const LinkerContext = bun.bundle_v2.LinkerContext;
const string = bun.string;
const std = @import("std");
const sourcemap = bun.sourcemap;
const AutoBitSet = bun.bit_set.AutoBitSet;
const bundler = bun.bundle_v2;
pub const DeferredBatchTask = bun.bundle_v2.DeferredBatchTask;
pub const ThreadPool = bun.bundle_v2.ThreadPool;
pub const ParseTask = bun.bundle_v2.ParseTask;
const Chunk = bundler.Chunk;
const PathTemplate = bundler.PathTemplate;
const EntryPoint = bundler.EntryPoint;

View File

@@ -0,0 +1,455 @@
pub fn computeCrossChunkDependencies(c: *LinkerContext, chunks: []Chunk) !void {
if (!c.graph.code_splitting) {
// No need to compute cross-chunk dependencies if there can't be any
return;
}
const chunk_metas = try c.allocator.alloc(ChunkMeta, chunks.len);
for (chunk_metas) |*meta| {
// these must be global allocator
meta.* = .{
.imports = ChunkMeta.Map.init(bun.default_allocator),
.exports = ChunkMeta.Map.init(bun.default_allocator),
.dynamic_imports = std.AutoArrayHashMap(Index.Int, void).init(bun.default_allocator),
};
}
defer {
for (chunk_metas) |*meta| {
meta.imports.deinit();
meta.exports.deinit();
meta.dynamic_imports.deinit();
}
c.allocator.free(chunk_metas);
}
{
const cross_chunk_dependencies = c.allocator.create(CrossChunkDependencies) catch unreachable;
defer c.allocator.destroy(cross_chunk_dependencies);
cross_chunk_dependencies.* = .{
.chunks = chunks,
.chunk_meta = chunk_metas,
.parts = c.graph.ast.items(.parts),
.import_records = c.graph.ast.items(.import_records),
.flags = c.graph.meta.items(.flags),
.entry_point_chunk_indices = c.graph.files.items(.entry_point_chunk_index),
.imports_to_bind = c.graph.meta.items(.imports_to_bind),
.wrapper_refs = c.graph.ast.items(.wrapper_ref),
.sorted_and_filtered_export_aliases = c.graph.meta.items(.sorted_and_filtered_export_aliases),
.resolved_exports = c.graph.meta.items(.resolved_exports),
.ctx = c,
.symbols = &c.graph.symbols,
};
c.parse_graph.pool.worker_pool.doPtr(
c.allocator,
&c.wait_group,
cross_chunk_dependencies,
CrossChunkDependencies.walk,
chunks,
) catch unreachable;
}
try computeCrossChunkDependenciesWithChunkMetas(c, chunks, chunk_metas);
}
const CrossChunkDependencies = struct {
chunk_meta: []ChunkMeta,
chunks: []Chunk,
parts: []BabyList(Part),
import_records: []BabyList(bun.ImportRecord),
flags: []const JSMeta.Flags,
entry_point_chunk_indices: []Index.Int,
imports_to_bind: []RefImportData,
wrapper_refs: []const Ref,
sorted_and_filtered_export_aliases: []const []const string,
resolved_exports: []const ResolvedExports,
ctx: *LinkerContext,
symbols: *Symbol.Map,
pub fn walk(deps: *@This(), chunk: *Chunk, chunk_index: usize) void {
var chunk_meta = &deps.chunk_meta[chunk_index];
var imports = &deps.chunk_meta[chunk_index].imports;
const entry_point_chunk_indices = deps.entry_point_chunk_indices;
// Go over each file in this chunk
for (chunk.files_with_parts_in_chunk.keys()) |source_index| {
// TODO: make this switch
if (chunk.content == .css) {
continue;
}
if (chunk.content != .javascript) continue;
// Go over each part in this file that's marked for inclusion in this chunk
const parts = deps.parts[source_index].slice();
var import_records = deps.import_records[source_index].slice();
const imports_to_bind = deps.imports_to_bind[source_index];
const wrap = deps.flags[source_index].wrap;
const wrapper_ref = deps.wrapper_refs[source_index];
const _chunks = deps.chunks;
for (parts) |part| {
if (!part.is_live)
continue;
// Rewrite external dynamic imports to point to the chunk for that entry point
for (part.import_record_indices.slice()) |import_record_id| {
var import_record = &import_records[import_record_id];
if (import_record.source_index.isValid() and deps.ctx.isExternalDynamicImport(import_record, source_index)) {
const other_chunk_index = entry_point_chunk_indices[import_record.source_index.get()];
import_record.path.text = _chunks[other_chunk_index].unique_key;
import_record.source_index = Index.invalid;
// Track this cross-chunk dynamic import so we make sure to
// include its hash when we're calculating the hashes of all
// dependencies of this chunk.
if (other_chunk_index != chunk_index)
chunk_meta.dynamic_imports.put(other_chunk_index, {}) catch unreachable;
}
}
// Remember what chunk each top-level symbol is declared in. Symbols
// with multiple declarations such as repeated "var" statements with
// the same name should already be marked as all being in a single
// chunk. In that case this will overwrite the same value below which
// is fine.
deps.symbols.assignChunkIndex(part.declared_symbols, @as(u32, @truncate(chunk_index)));
const used_refs = part.symbol_uses.keys();
// Record each symbol used in this part. This will later be matched up
// with our map of which chunk a given symbol is declared in to
// determine if the symbol needs to be imported from another chunk.
for (used_refs) |ref| {
const ref_to_use = brk: {
var ref_to_use = ref;
var symbol = deps.symbols.getConst(ref_to_use).?;
// Ignore unbound symbols
if (symbol.kind == .unbound)
continue;
// Ignore symbols that are going to be replaced by undefined
if (symbol.import_item_status == .missing)
continue;
// If this is imported from another file, follow the import
// reference and reference the symbol in that file instead
if (imports_to_bind.get(ref_to_use)) |import_data| {
ref_to_use = import_data.data.import_ref;
symbol = deps.symbols.getConst(ref_to_use).?;
} else if (wrap == .cjs and ref_to_use.eql(wrapper_ref)) {
// The only internal symbol that wrapped CommonJS files export
// is the wrapper itself.
continue;
}
// If this is an ES6 import from a CommonJS file, it will become a
// property access off the namespace symbol instead of a bare
// identifier. In that case we want to pull in the namespace symbol
// instead. The namespace symbol stores the result of "require()".
if (symbol.namespace_alias) |*namespace_alias| {
ref_to_use = namespace_alias.namespace_ref;
}
break :brk ref_to_use;
};
if (comptime Environment.allow_assert)
debug("Cross-chunk import: {s} {}", .{ deps.symbols.get(ref_to_use).?.original_name, ref_to_use });
// We must record this relationship even for symbols that are not
// imports. Due to code splitting, the definition of a symbol may
// be moved to a separate chunk than the use of a symbol even if
// the definition and use of that symbol are originally from the
// same source file.
imports.put(ref_to_use, {}) catch unreachable;
}
}
}
// Include the exports if this is an entry point chunk
if (chunk.content == .javascript) {
if (chunk.entry_point.is_entry_point) {
const flags = deps.flags[chunk.entry_point.source_index];
if (flags.wrap != .cjs) {
const resolved_exports = deps.resolved_exports[chunk.entry_point.source_index];
const sorted_and_filtered_export_aliases = deps.sorted_and_filtered_export_aliases[chunk.entry_point.source_index];
for (sorted_and_filtered_export_aliases) |alias| {
const export_ = resolved_exports.get(alias).?;
var target_ref = export_.data.import_ref;
// If this is an import, then target what the import points to
if (deps.imports_to_bind[export_.data.source_index.get()].get(target_ref)) |import_data| {
target_ref = import_data.data.import_ref;
}
// If this is an ES6 import from a CommonJS file, it will become a
// property access off the namespace symbol instead of a bare
// identifier. In that case we want to pull in the namespace symbol
// instead. The namespace symbol stores the result of "require()".
if (deps.symbols.getConst(target_ref).?.namespace_alias) |namespace_alias| {
target_ref = namespace_alias.namespace_ref;
}
if (comptime Environment.allow_assert)
debug("Cross-chunk export: {s}", .{deps.symbols.get(target_ref).?.original_name});
imports.put(target_ref, {}) catch unreachable;
}
}
// Ensure "exports" is included if the current output format needs it
if (flags.force_include_exports_for_entry_point) {
imports.put(deps.wrapper_refs[chunk.entry_point.source_index], {}) catch unreachable;
}
// Include the wrapper if present
if (flags.wrap != .none) {
imports.put(deps.wrapper_refs[chunk.entry_point.source_index], {}) catch unreachable;
}
}
}
}
};
fn computeCrossChunkDependenciesWithChunkMetas(c: *LinkerContext, chunks: []Chunk, chunk_metas: []ChunkMeta) !void {
// Mark imported symbols as exported in the chunk from which they are declared
for (chunks, chunk_metas, 0..) |*chunk, *chunk_meta, chunk_index| {
if (chunk.content != .javascript) {
continue;
}
var js = &chunk.content.javascript;
// Find all uses in this chunk of symbols from other chunks
for (chunk_meta.imports.keys()) |import_ref| {
const symbol = c.graph.symbols.getConst(import_ref).?;
// Ignore uses that aren't top-level symbols
if (symbol.chunkIndex()) |other_chunk_index| {
if (@as(usize, other_chunk_index) != chunk_index) {
if (comptime Environment.allow_assert)
debug("Import name: {s} (in {s})", .{
symbol.original_name,
c.parse_graph.input_files.get(import_ref.sourceIndex()).source.path.text,
});
{
var entry = try js
.imports_from_other_chunks
.getOrPutValue(c.allocator, other_chunk_index, .{});
try entry.value_ptr.push(c.allocator, .{
.ref = import_ref,
});
}
_ = chunk_metas[other_chunk_index].exports.getOrPut(import_ref) catch unreachable;
} else {
debug("{s} imports from itself (chunk {d})", .{ symbol.original_name, chunk_index });
}
}
}
// If this is an entry point, make sure we import all chunks belonging to
// this entry point, even if there are no imports. We need to make sure
// these chunks are evaluated for their side effects too.
if (chunk.entry_point.is_entry_point) {
for (chunks, 0..) |*other_chunk, other_chunk_index| {
if (other_chunk_index == chunk_index or other_chunk.content != .javascript) continue;
if (other_chunk.entry_bits.isSet(chunk.entry_point.entry_point_id)) {
_ = js.imports_from_other_chunks.getOrPutValue(
c.allocator,
@as(u32, @truncate(other_chunk_index)),
CrossChunkImport.Item.List{},
) catch unreachable;
}
}
}
// Make sure we also track dynamic cross-chunk imports. These need to be
// tracked so we count them as dependencies of this chunk for the purpose
// of hash calculation.
if (chunk_meta.dynamic_imports.count() > 0) {
const dynamic_chunk_indices = chunk_meta.dynamic_imports.keys();
std.sort.pdq(Index.Int, dynamic_chunk_indices, {}, std.sort.asc(Index.Int));
var imports = chunk.cross_chunk_imports.listManaged(c.allocator);
defer chunk.cross_chunk_imports.update(imports);
imports.ensureUnusedCapacity(dynamic_chunk_indices.len) catch unreachable;
const prev_len = imports.items.len;
imports.items.len += dynamic_chunk_indices.len;
for (dynamic_chunk_indices, imports.items[prev_len..]) |dynamic_chunk_index, *item| {
item.* = .{
.import_kind = .dynamic,
.chunk_index = dynamic_chunk_index,
};
}
}
}
// Generate cross-chunk exports. These must be computed before cross-chunk
// imports because of export alias renaming, which must consider all export
// aliases simultaneously to avoid collisions.
{
bun.assert(chunk_metas.len == chunks.len);
var r = renamer.ExportRenamer.init(c.allocator);
defer r.deinit();
debug("Generating cross-chunk exports", .{});
var stable_ref_list = std.ArrayList(StableRef).init(c.allocator);
defer stable_ref_list.deinit();
for (chunks, chunk_metas) |*chunk, *chunk_meta| {
if (chunk.content != .javascript) continue;
var repr = &chunk.content.javascript;
switch (c.options.output_format) {
.esm => {
c.sortedCrossChunkExportItems(
chunk_meta.exports,
&stable_ref_list,
);
var clause_items = BabyList(js_ast.ClauseItem).initCapacity(c.allocator, stable_ref_list.items.len) catch unreachable;
clause_items.len = @as(u32, @truncate(stable_ref_list.items.len));
repr.exports_to_other_chunks.ensureUnusedCapacity(c.allocator, stable_ref_list.items.len) catch unreachable;
r.clearRetainingCapacity();
for (stable_ref_list.items, clause_items.slice()) |stable_ref, *clause_item| {
const ref = stable_ref.ref;
const alias = if (c.options.minify_identifiers) try r.nextMinifiedName(c.allocator) else r.nextRenamedName(c.graph.symbols.get(ref).?.original_name);
clause_item.* = .{
.name = .{
.ref = ref,
.loc = Logger.Loc.Empty,
},
.alias = alias,
.alias_loc = Logger.Loc.Empty,
.original_name = "",
};
repr.exports_to_other_chunks.putAssumeCapacity(
ref,
alias,
);
}
if (clause_items.len > 0) {
var stmts = BabyList(js_ast.Stmt).initCapacity(c.allocator, 1) catch unreachable;
const export_clause = c.allocator.create(js_ast.S.ExportClause) catch unreachable;
export_clause.* = .{
.items = clause_items.slice(),
.is_single_line = true,
};
stmts.appendAssumeCapacity(.{
.data = .{
.s_export_clause = export_clause,
},
.loc = Logger.Loc.Empty,
});
repr.cross_chunk_suffix_stmts = stmts;
}
},
else => {},
}
}
}
// Generate cross-chunk imports. These must be computed after cross-chunk
// exports because the export aliases must already be finalized so they can
// be embedded in the generated import statements.
{
debug("Generating cross-chunk imports", .{});
var list = CrossChunkImport.List.init(c.allocator);
defer list.deinit();
for (chunks) |*chunk| {
if (chunk.content != .javascript) continue;
var repr = &chunk.content.javascript;
var cross_chunk_prefix_stmts = BabyList(js_ast.Stmt){};
CrossChunkImport.sortedCrossChunkImports(&list, chunks, &repr.imports_from_other_chunks) catch unreachable;
const cross_chunk_imports_input: []CrossChunkImport = list.items;
var cross_chunk_imports = chunk.cross_chunk_imports;
for (cross_chunk_imports_input) |cross_chunk_import| {
switch (c.options.output_format) {
.esm => {
const import_record_index = @as(u32, @intCast(cross_chunk_imports.len));
var clauses = std.ArrayList(js_ast.ClauseItem).initCapacity(c.allocator, cross_chunk_import.sorted_import_items.len) catch unreachable;
for (cross_chunk_import.sorted_import_items.slice()) |item| {
clauses.appendAssumeCapacity(.{
.name = .{
.ref = item.ref,
.loc = Logger.Loc.Empty,
},
.alias = item.export_alias,
.alias_loc = Logger.Loc.Empty,
});
}
cross_chunk_imports.push(c.allocator, .{
.import_kind = .stmt,
.chunk_index = cross_chunk_import.chunk_index,
}) catch unreachable;
const import = c.allocator.create(js_ast.S.Import) catch unreachable;
import.* = .{
.items = clauses.items,
.import_record_index = import_record_index,
.namespace_ref = Ref.None,
};
cross_chunk_prefix_stmts.push(
c.allocator,
.{
.data = .{
.s_import = import,
},
.loc = Logger.Loc.Empty,
},
) catch unreachable;
},
else => {},
}
}
repr.cross_chunk_prefix_stmts = cross_chunk_prefix_stmts;
chunk.cross_chunk_imports = cross_chunk_imports;
}
}
}
const bun = @import("bun");
const Ref = bun.bundle_v2.Ref;
const BabyList = bun.BabyList;
const Logger = bun.logger;
const Index = bun.bundle_v2.Index;
const Loc = Logger.Loc;
const LinkerContext = bun.bundle_v2.LinkerContext;
const debug = LinkerContext.debug;
const string = bun.string;
const Environment = bun.Environment;
const default_allocator = bun.default_allocator;
const std = @import("std");
const Part = js_ast.Part;
const js_ast = bun.js_ast;
const ImportRecord = bun.ImportRecord;
const Symbol = js_ast.Symbol;
const Stmt = js_ast.Stmt;
const S = js_ast.S;
const renamer = bun.renamer;
const bundler = bun.bundle_v2;
pub const DeferredBatchTask = bun.bundle_v2.DeferredBatchTask;
pub const ThreadPool = bun.bundle_v2.ThreadPool;
pub const ParseTask = bun.bundle_v2.ParseTask;
const Chunk = bundler.Chunk;
const JSMeta = bundler.JSMeta;
const ResolvedExports = bundler.ResolvedExports;
const RefImportData = bundler.RefImportData;
const CrossChunkImport = bundler.CrossChunkImport;
const StableRef = bundler.StableRef;
const ChunkMeta = LinkerContext.ChunkMeta;

View File

@@ -0,0 +1,552 @@
/// Code we ultimately include in the bundle is potentially wrapped
///
/// In that case, we do a final pass over the statements list to figure out
/// where it needs to go in the wrapper, following the syntax of the output
/// format ESM import and export statements to always be top-level, so they
/// can never be inside the wrapper.
///
/// prefix - outer
/// ...
/// var init_foo = __esm(() => {
/// prefix - inner
/// ...
/// suffix - inenr
/// });
/// ...
/// suffix - outer
///
/// Keep in mind that we may need to wrap ES modules in some cases too
/// Consider:
/// import * as foo from 'bar';
/// foo[computedProperty]
///
/// In that case, when bundling, we still need to preserve that module
/// namespace object (foo) because we cannot know what they are going to
/// attempt to access statically
pub fn convertStmtsForChunk(
c: *LinkerContext,
source_index: u32,
stmts: *StmtList,
part_stmts: []const js_ast.Stmt,
chunk: *Chunk,
allocator: std.mem.Allocator,
wrap: WrapKind,
ast: *const JSAst,
) !void {
const shouldExtractESMStmtsForWrap = wrap != .none;
const shouldStripExports = c.options.mode != .passthrough or c.graph.files.items(.entry_point_kind)[source_index] != .none;
const flags = c.graph.meta.items(.flags);
const output_format = c.options.output_format;
// If this file is a CommonJS entry point, double-write re-exports to the
// external CommonJS "module.exports" object in addition to our internal ESM
// export namespace object. The difference between these two objects is that
// our internal one must not have the "__esModule" marker while the external
// one must have the "__esModule" marker. This is done because an ES module
// importing itself should not see the "__esModule" marker but a CommonJS module
// importing us should see the "__esModule" marker.
var module_exports_for_export: ?Expr = null;
if (output_format == .cjs and chunk.isEntryPoint()) {
module_exports_for_export = Expr.allocate(
allocator,
E.Dot,
E.Dot{
.target = Expr.allocate(
allocator,
E.Identifier,
E.Identifier{
.ref = c.unbound_module_ref,
},
Logger.Loc.Empty,
),
.name = "exports",
.name_loc = Logger.Loc.Empty,
},
Logger.Loc.Empty,
);
}
for (part_stmts) |stmt_| {
var stmt = stmt_;
process_stmt: {
switch (stmt.data) {
.s_import => |s| {
// "import * as ns from 'path'"
// "import {foo} from 'path'"
if (try c.shouldRemoveImportExportStmt(
stmts,
stmt.loc,
s.namespace_ref,
s.import_record_index,
allocator,
ast,
)) {
continue;
}
// Make sure these don't end up in the wrapper closure
if (shouldExtractESMStmtsForWrap) {
try stmts.outside_wrapper_prefix.append(stmt);
continue;
}
},
.s_export_star => |s| {
// "export * as ns from 'path'"
if (s.alias) |alias| {
if (try c.shouldRemoveImportExportStmt(
stmts,
stmt.loc,
s.namespace_ref,
s.import_record_index,
allocator,
ast,
)) {
continue;
}
if (shouldStripExports) {
// Turn this statement into "import * as ns from 'path'"
stmt = Stmt.alloc(
S.Import,
S.Import{
.namespace_ref = s.namespace_ref,
.import_record_index = s.import_record_index,
.star_name_loc = alias.loc,
},
stmt.loc,
);
}
// Make sure these don't end up in the wrapper closure
if (shouldExtractESMStmtsForWrap) {
try stmts.outside_wrapper_prefix.append(stmt);
continue;
}
break :process_stmt;
}
// "export * from 'path'"
if (!shouldStripExports) {
break :process_stmt;
}
const record = ast.import_records.at(s.import_record_index);
// Is this export star evaluated at run time?
if (!record.source_index.isValid() and c.options.output_format.keepES6ImportExportSyntax()) {
if (record.calls_runtime_re_export_fn) {
// Turn this statement into "import * as ns from 'path'"
stmt = Stmt.alloc(
S.Import,
S.Import{
.namespace_ref = s.namespace_ref,
.import_record_index = s.import_record_index,
.star_name_loc = stmt.loc,
},
stmt.loc,
);
// Prefix this module with "__reExport(exports, ns, module.exports)"
const export_star_ref = c.runtimeFunction("__reExport");
var args = try allocator.alloc(Expr, 2 + @as(usize, @intFromBool(module_exports_for_export != null)));
args[0..2].* = .{
Expr.init(
E.Identifier,
E.Identifier{
.ref = ast.exports_ref,
},
stmt.loc,
),
Expr.init(
E.Identifier,
E.Identifier{
.ref = s.namespace_ref,
},
stmt.loc,
),
};
if (module_exports_for_export) |mod| {
args[3] = mod;
}
try stmts.inside_wrapper_prefix.append(
Stmt.alloc(
S.SExpr,
S.SExpr{
.value = Expr.allocate(
allocator,
E.Call,
E.Call{
.target = Expr.allocate(
allocator,
E.Identifier,
E.Identifier{
.ref = export_star_ref,
},
stmt.loc,
),
.args = bun.BabyList(Expr).init(args),
},
stmt.loc,
),
},
stmt.loc,
),
);
// Make sure these don't end up in the wrapper closure
if (shouldExtractESMStmtsForWrap) {
try stmts.outside_wrapper_prefix.append(stmt);
continue;
}
}
} else {
if (record.source_index.isValid()) {
const flag = flags[record.source_index.get()];
const wrapper_ref = c.graph.ast.items(.wrapper_ref)[record.source_index.get()];
if (flag.wrap == .esm and wrapper_ref.isValid()) {
try stmts.inside_wrapper_prefix.append(
Stmt.alloc(S.SExpr, .{
.value = Expr.init(E.Call, .{
.target = Expr.init(
E.Identifier,
E.Identifier{
.ref = wrapper_ref,
},
stmt.loc,
),
}, stmt.loc),
}, stmt.loc),
);
}
}
if (record.calls_runtime_re_export_fn) {
const target: Expr = brk: {
if (record.source_index.isValid() and c.graph.ast.items(.exports_kind)[record.source_index.get()].isESMWithDynamicFallback()) {
// Prefix this module with "__reExport(exports, otherExports, module.exports)"
break :brk Expr.initIdentifier(c.graph.ast.items(.exports_ref)[record.source_index.get()], stmt.loc);
}
break :brk Expr.init(
E.RequireString,
E.RequireString{
.import_record_index = s.import_record_index,
},
stmt.loc,
);
};
// Prefix this module with "__reExport(exports, require(path), module.exports)"
const export_star_ref = c.runtimeFunction("__reExport");
var args = try allocator.alloc(Expr, 2 + @as(usize, @intFromBool(module_exports_for_export != null)));
args[0..2].* = .{
Expr.init(
E.Identifier,
E.Identifier{
.ref = ast.exports_ref,
},
stmt.loc,
),
target,
};
if (module_exports_for_export) |mod| {
args[2] = mod;
}
try stmts.inside_wrapper_prefix.append(
Stmt.alloc(
S.SExpr,
S.SExpr{
.value = Expr.init(
E.Call,
E.Call{
.target = Expr.init(
E.Identifier,
E.Identifier{
.ref = export_star_ref,
},
stmt.loc,
),
.args = js_ast.ExprNodeList.init(args),
},
stmt.loc,
),
},
stmt.loc,
),
);
}
// Remove the export star statement
continue;
}
},
.s_export_from => |s| {
// "export {foo} from 'path'"
if (try c.shouldRemoveImportExportStmt(
stmts,
stmt.loc,
s.namespace_ref,
s.import_record_index,
allocator,
ast,
)) {
continue;
}
if (shouldStripExports) {
// Turn this statement into "import {foo} from 'path'"
// TODO: is this allocation necessary?
const items = allocator.alloc(js_ast.ClauseItem, s.items.len) catch unreachable;
for (s.items, items) |src, *dest| {
dest.* = .{
.alias = src.original_name,
.alias_loc = src.alias_loc,
.name = src.name,
};
}
stmt = Stmt.alloc(
S.Import,
S.Import{
.items = items,
.import_record_index = s.import_record_index,
.namespace_ref = s.namespace_ref,
.is_single_line = s.is_single_line,
},
stmt.loc,
);
}
// Make sure these don't end up in the wrapper closure
if (shouldExtractESMStmtsForWrap) {
try stmts.outside_wrapper_prefix.append(stmt);
continue;
}
},
.s_export_clause => {
// "export {foo}"
if (shouldStripExports) {
// Remove export statements entirely
continue;
}
// Make sure these don't end up in the wrapper closure
if (shouldExtractESMStmtsForWrap) {
try stmts.outside_wrapper_prefix.append(stmt);
continue;
}
},
.s_function => |s| {
// Strip the "export" keyword while bundling
if (shouldStripExports and s.func.flags.contains(.is_export)) {
// Be c areful to not modify the original statement
stmt = Stmt.alloc(
S.Function,
S.Function{
.func = s.func,
},
stmt.loc,
);
stmt.data.s_function.func.flags.remove(.is_export);
}
},
.s_class => |s| {
// Strip the "export" keyword while bundling
if (shouldStripExports and s.is_export) {
// Be careful to not modify the original statement
stmt = Stmt.alloc(
S.Class,
S.Class{
.class = s.class,
.is_export = false,
},
stmt.loc,
);
}
},
.s_local => |s| {
// Strip the "export" keyword while bundling
if (shouldStripExports and s.is_export) {
// Be careful to not modify the original statement
stmt = Stmt.alloc(
S.Local,
s.*,
stmt.loc,
);
stmt.data.s_local.is_export = false;
} else if (FeatureFlags.unwrap_commonjs_to_esm and s.was_commonjs_export and wrap == .cjs) {
bun.assert(stmt.data.s_local.decls.len == 1);
const decl = stmt.data.s_local.decls.ptr[0];
if (decl.value) |decl_value| {
stmt = Stmt.alloc(
S.SExpr,
S.SExpr{
.value = Expr.init(
E.Binary,
E.Binary{
.op = .bin_assign,
.left = Expr.init(
E.CommonJSExportIdentifier,
E.CommonJSExportIdentifier{
.ref = decl.binding.data.b_identifier.ref,
},
decl.binding.loc,
),
.right = decl_value,
},
stmt.loc,
),
},
stmt.loc,
);
} else {
continue;
}
}
},
.s_export_default => |s| {
// "export default foo"
if (shouldStripExports) {
switch (s.value) {
.stmt => |stmt2| {
switch (stmt2.data) {
.s_expr => |s2| {
// "export default foo;" => "var default = foo;"
stmt = Stmt.alloc(
S.Local,
S.Local{
.decls = try G.Decl.List.fromSlice(
allocator,
&.{
.{
.binding = Binding.alloc(
allocator,
B.Identifier{
.ref = s.default_name.ref.?,
},
s2.value.loc,
),
.value = s2.value,
},
},
),
},
stmt.loc,
);
},
.s_function => |s2| {
// "export default function() {}" => "function default() {}"
// "export default function foo() {}" => "function foo() {}"
// Be careful to not modify the original statement
stmt = Stmt.alloc(
S.Function,
S.Function{
.func = s2.func,
},
stmt.loc,
);
stmt.data.s_function.func.name = s.default_name;
},
.s_class => |s2| {
// "export default class {}" => "class default {}"
// "export default class foo {}" => "class foo {}"
// Be careful to not modify the original statement
stmt = Stmt.alloc(
S.Class,
S.Class{
.class = s2.class,
.is_export = false,
},
stmt.loc,
);
stmt.data.s_class.class.class_name = s.default_name;
},
else => bun.unreachablePanic(
"Unexpected type {any} in source file {s}",
.{
stmt2.data,
c.parse_graph.input_files.get(c.graph.files.get(source_index).input_file.get()).source.path.text,
},
),
}
},
.expr => |e| {
stmt = Stmt.alloc(
S.Local,
S.Local{
.decls = try G.Decl.List.fromSlice(
allocator,
&.{
.{
.binding = Binding.alloc(
allocator,
B.Identifier{
.ref = s.default_name.ref.?,
},
e.loc,
),
.value = e,
},
},
),
},
stmt.loc,
);
},
}
}
},
else => {},
}
}
try stmts.inside_wrapper_suffix.append(stmt);
}
}
const bun = @import("bun");
const BabyList = bun.BabyList;
const Logger = bun.logger;
const Loc = Logger.Loc;
const LinkerContext = bun.bundle_v2.LinkerContext;
const FeatureFlags = bun.FeatureFlags;
const std = @import("std");
const js_ast = bun.js_ast;
const JSAst = js_ast.BundledAst;
const Stmt = js_ast.Stmt;
const Expr = js_ast.Expr;
const E = js_ast.E;
const S = js_ast.S;
const G = js_ast.G;
const B = js_ast.B;
const Binding = js_ast.Binding;
const bundler = bun.bundle_v2;
pub const DeferredBatchTask = bun.bundle_v2.DeferredBatchTask;
pub const ThreadPool = bun.bundle_v2.ThreadPool;
pub const ParseTask = bun.bundle_v2.ParseTask;
const Chunk = bundler.Chunk;
const WrapKind = bundler.WrapKind;
const StmtList = LinkerContext.StmtList;

View File

@@ -0,0 +1,175 @@
/// For CommonJS, all statements are copied `inside_wrapper_suffix` and this returns.
/// The conversion logic is completely different for format .internal_bake_dev
///
/// For ESM, this function populates all three lists:
/// 1. outside_wrapper_prefix: all import statements, unmodified.
/// 2. inside_wrapper_prefix: a var decl line and a call to `module.retrieve`
/// 3. inside_wrapper_suffix: all non-import statements
///
/// The imports are rewritten at print time to fit the packed array format
/// that the HMR runtime can decode. This encoding is low on JS objects and
/// indentation.
///
/// 1 ┃ "module/esm": [ [
/// ┃ 'module_1', 1, "add",
/// ┃ 'module_2', 2, "mul", "div",
/// ┃ 'module_3', 0, // bare or import star
/// ], [ "default" ], [], (hmr) => {
/// 2 ┃ var [module_1, module_2, module_3] = hmr.imports;
/// ┃ hmr.onUpdate = [
/// ┃ (module) => (module_1 = module),
/// ┃ (module) => (module_2 = module),
/// ┃ (module) => (module_3 = module),
/// ┃ ];
///
/// 3 ┃ console.log("my module", module_1.add(1, module_2.mul(2, 3));
/// ┃ module.exports = {
/// ┃ default: module_3.something(module_2.div),
/// ┃ };
/// }, false ],
/// ----- "is the module async?"
pub fn convertStmtsForChunkForDevServer(
c: *LinkerContext,
stmts: *StmtList,
part_stmts: []const js_ast.Stmt,
allocator: std.mem.Allocator,
ast: *JSAst,
) !void {
const hmr_api_ref = ast.wrapper_ref;
const hmr_api_id = Expr.initIdentifier(hmr_api_ref, Logger.Loc.Empty);
var esm_decls: std.ArrayListUnmanaged(B.Array.Item) = .empty;
var esm_callbacks: std.ArrayListUnmanaged(Expr) = .empty;
for (ast.import_records.slice()) |*record| {
if (record.path.is_disabled) continue;
if (record.source_index.isValid() and c.parse_graph.input_files.items(.loader)[record.source_index.get()] == .css) {
record.path.is_disabled = true;
continue;
}
// Make sure the printer gets the resolved path
if (record.source_index.isValid()) {
record.path = c.parse_graph.input_files.items(.source)[record.source_index.get()].path;
}
}
// Modules which do not have side effects
for (part_stmts) |stmt| switch (stmt.data) {
else => try stmts.inside_wrapper_suffix.append(stmt),
.s_import => |st| {
const record = ast.import_records.mut(st.import_record_index);
if (record.path.is_disabled) continue;
const is_builtin = record.tag == .builtin or record.tag == .bun_test or record.tag == .bun or record.tag == .runtime;
const is_bare_import = st.star_name_loc == null and st.items.len == 0 and st.default_name == null;
if (is_builtin) {
if (!is_bare_import) {
// hmr.importBuiltin('...') or hmr.require('bun:wrap')
const call = Expr.init(E.Call, .{
.target = Expr.init(E.Dot, .{
.target = hmr_api_id,
.name = if (record.tag == .runtime) "require" else "builtin",
.name_loc = stmt.loc,
}, stmt.loc),
.args = .init(try allocator.dupe(Expr, &.{Expr.init(E.String, .{
.data = if (record.tag == .runtime) "bun:wrap" else record.path.pretty,
}, record.range.loc)})),
}, stmt.loc);
// var namespace = ...;
try stmts.inside_wrapper_prefix.append(Stmt.alloc(S.Local, .{
.kind = .k_var, // remove a tdz
.decls = try G.Decl.List.fromSlice(allocator, &.{.{
.binding = Binding.alloc(
allocator,
B.Identifier{ .ref = st.namespace_ref },
st.star_name_loc orelse stmt.loc,
),
.value = call,
}}),
}, stmt.loc));
}
} else {
const loc = st.star_name_loc orelse stmt.loc;
if (is_bare_import) {
try esm_decls.append(allocator, .{ .binding = .{ .data = .b_missing, .loc = .Empty } });
try esm_callbacks.append(allocator, Expr.init(E.Arrow, .noop_return_undefined, .Empty));
} else {
const binding = Binding.alloc(allocator, B.Identifier{ .ref = st.namespace_ref }, loc);
try esm_decls.append(allocator, .{ .binding = binding });
try esm_callbacks.append(allocator, Expr.init(E.Arrow, .{
.args = try allocator.dupe(G.Arg, &.{.{
.binding = Binding.alloc(allocator, B.Identifier{
.ref = ast.module_ref,
}, .Empty),
}}),
.prefer_expr = true,
.body = try .initReturnExpr(allocator, Expr.init(E.Binary, .{
.op = .bin_assign,
.left = Expr.initIdentifier(st.namespace_ref, .Empty),
.right = Expr.initIdentifier(ast.module_ref, .Empty),
}, .Empty)),
}, .Empty));
}
try stmts.outside_wrapper_prefix.append(stmt);
}
},
};
if (esm_decls.items.len > 0) {
// var ...;
try stmts.inside_wrapper_prefix.append(Stmt.alloc(S.Local, .{
.kind = .k_var, // remove a tdz
.decls = try .fromSlice(allocator, &.{.{
.binding = Binding.alloc(allocator, B.Array{
.items = esm_decls.items,
.is_single_line = true,
}, .Empty),
.value = Expr.init(E.Dot, .{
.target = hmr_api_id,
.name = "imports",
.name_loc = .Empty,
}, .Empty),
}}),
}, .Empty));
// hmr.onUpdate = [ ... ];
try stmts.inside_wrapper_prefix.append(Stmt.alloc(S.SExpr, .{
.value = Expr.init(E.Binary, .{
.op = .bin_assign,
.left = Expr.init(E.Dot, .{
.target = hmr_api_id,
.name = "updateImport",
.name_loc = .Empty,
}, .Empty),
.right = Expr.init(E.Array, .{
.items = .fromList(esm_callbacks),
.is_single_line = esm_callbacks.items.len <= 2,
}, .Empty),
}, .Empty),
}, .Empty));
}
}
const bun = @import("bun");
const Logger = bun.logger;
const Loc = Logger.Loc;
const LinkerContext = bun.bundle_v2.LinkerContext;
const std = @import("std");
const js_ast = bun.js_ast;
const JSAst = js_ast.BundledAst;
const Stmt = js_ast.Stmt;
const Expr = js_ast.Expr;
const E = js_ast.E;
const S = js_ast.S;
const G = js_ast.G;
const B = js_ast.B;
const Binding = js_ast.Binding;
pub const DeferredBatchTask = bun.bundle_v2.DeferredBatchTask;
pub const ThreadPool = bun.bundle_v2.ThreadPool;
pub const ParseTask = bun.bundle_v2.ParseTask;
const StmtList = LinkerContext.StmtList;

View File

@@ -0,0 +1,497 @@
/// Step 5: Create namespace exports for every file. This is always necessary
/// for CommonJS files, and is also necessary for other files if they are
/// imported using an import star statement.
pub fn doStep5(c: *LinkerContext, source_index_: Index, _: usize) void {
const source_index = source_index_.get();
const trace = bun.perf.trace("Bundler.CreateNamespaceExports");
defer trace.end();
const id = source_index;
if (id > c.graph.meta.len) return;
const worker: *ThreadPool.Worker = ThreadPool.Worker.get(@fieldParentPtr("linker", c));
defer worker.unget();
// we must use this allocator here
const allocator = worker.allocator;
const resolved_exports: *ResolvedExports = &c.graph.meta.items(.resolved_exports)[id];
// Now that all exports have been resolved, sort and filter them to create
// something we can iterate over later.
var aliases = std.ArrayList(string).initCapacity(allocator, resolved_exports.count()) catch unreachable;
var alias_iter = resolved_exports.iterator();
const imports_to_bind = c.graph.meta.items(.imports_to_bind);
const probably_typescript_type = c.graph.meta.items(.probably_typescript_type);
// counting in here saves us an extra pass through the array
var re_exports_count: usize = 0;
next_alias: while (alias_iter.next()) |entry| {
var export_ = entry.value_ptr.*;
const alias = entry.key_ptr.*;
const this_id = export_.data.source_index.get();
var inner_count: usize = 0;
// Re-exporting multiple symbols with the same name causes an ambiguous
// export. These names cannot be used and should not end up in generated code.
if (export_.potentially_ambiguous_export_star_refs.len > 0) {
const main = imports_to_bind[this_id].get(export_.data.import_ref) orelse ImportData{ .data = export_.data };
for (export_.potentially_ambiguous_export_star_refs.slice()) |ambig| {
const _id = ambig.data.source_index.get();
const ambig_ref = if (imports_to_bind[_id].get(ambig.data.import_ref)) |bound|
bound.data.import_ref
else
ambig.data.import_ref;
if (!main.data.import_ref.eql(ambig_ref)) {
continue :next_alias;
}
inner_count += @as(usize, ambig.re_exports.len);
}
}
// Ignore re-exported imports in TypeScript files that failed to be
// resolved. These are probably just type-only imports so the best thing to
// do is to silently omit them from the export list.
if (probably_typescript_type[this_id].contains(export_.data.import_ref)) {
continue;
}
re_exports_count += inner_count;
aliases.appendAssumeCapacity(alias);
}
// TODO: can this be u32 instead of a string?
// if yes, we could just move all the hidden exports to the end of the array
// and only store a count instead of an array
strings.sortDesc(aliases.items);
const export_aliases = aliases.toOwnedSlice() catch unreachable;
c.graph.meta.items(.sorted_and_filtered_export_aliases)[id] = export_aliases;
// Export creation uses "sortedAndFilteredExportAliases" so this must
// come second after we fill in that array
c.createExportsForFile(
allocator,
id,
resolved_exports,
imports_to_bind,
export_aliases,
re_exports_count,
);
// Each part tracks the other parts it depends on within this file
var local_dependencies = std.AutoHashMap(u32, u32).init(allocator);
defer local_dependencies.deinit();
const parts_slice: []Part = c.graph.ast.items(.parts)[id].slice();
const named_imports: *js_ast.Ast.NamedImports = &c.graph.ast.items(.named_imports)[id];
const our_imports_to_bind = imports_to_bind[id];
outer: for (parts_slice, 0..) |*part, part_index| {
// Now that all files have been parsed, determine which property
// accesses off of imported symbols are inlined enum values and
// which ones aren't
for (
part.import_symbol_property_uses.keys(),
part.import_symbol_property_uses.values(),
) |ref, properties| {
const use = part.symbol_uses.getPtr(ref).?;
// Rare path: this import is a TypeScript enum
if (our_imports_to_bind.get(ref)) |import_data| {
const import_ref = import_data.data.import_ref;
if (c.graph.symbols.get(import_ref)) |symbol| {
if (symbol.kind == .ts_enum) {
if (c.graph.ts_enums.get(import_ref)) |enum_data| {
var found_non_inlined_enum = false;
var it = properties.iterator();
while (it.next()) |next| {
const name = next.key_ptr.*;
const prop_use = next.value_ptr;
if (enum_data.get(name) == null) {
found_non_inlined_enum = true;
use.count_estimate += prop_use.count_estimate;
}
}
if (!found_non_inlined_enum) {
if (use.count_estimate == 0) {
_ = part.symbol_uses.swapRemove(ref);
}
continue;
}
}
}
}
}
// Common path: this import isn't a TypeScript enum
var it = properties.valueIterator();
while (it.next()) |prop_use| {
use.count_estimate += prop_use.count_estimate;
}
}
// TODO: inline function calls here
// TODO: Inline cross-module constants
// if (c.graph.const_values.count() > 0) {
// // First, find any symbol usage that points to a constant value.
// // This will be pretty rare.
// const first_constant_i: ?usize = brk: {
// for (part.symbol_uses.keys(), 0..) |ref, j| {
// if (c.graph.const_values.contains(ref)) {
// break :brk j;
// }
// }
// break :brk null;
// };
// if (first_constant_i) |j| {
// var end_i: usize = 0;
// // symbol_uses is an array
// var keys = part.symbol_uses.keys()[j..];
// var values = part.symbol_uses.values()[j..];
// for (keys, values) |ref, val| {
// if (c.graph.const_values.contains(ref)) {
// continue;
// }
// keys[end_i] = ref;
// values[end_i] = val;
// end_i += 1;
// }
// part.symbol_uses.entries.len = end_i + j;
// if (part.symbol_uses.entries.len == 0 and part.can_be_removed_if_unused) {
// part.tag = .dead_due_to_inlining;
// part.dependencies.len = 0;
// continue :outer;
// }
// part.symbol_uses.reIndex(allocator) catch unreachable;
// }
// }
if (false) break :outer; // this `if` is here to preserve the unused
// block label from the above commented code.
// Now that we know this, we can determine cross-part dependencies
for (part.symbol_uses.keys(), 0..) |ref, j| {
if (comptime Environment.allow_assert) {
bun.assert(part.symbol_uses.values()[j].count_estimate > 0);
}
const other_parts = c.topLevelSymbolsToParts(id, ref);
for (other_parts) |other_part_index| {
const local = local_dependencies.getOrPut(other_part_index) catch unreachable;
if (!local.found_existing or local.value_ptr.* != part_index) {
local.value_ptr.* = @as(u32, @intCast(part_index));
// note: if we crash on append, it is due to threadlocal heaps in mimalloc
part.dependencies.push(
allocator,
.{
.source_index = Index.source(source_index),
.part_index = other_part_index,
},
) catch unreachable;
}
}
// Also map from imports to parts that use them
if (named_imports.getPtr(ref)) |existing| {
existing.local_parts_with_uses.push(allocator, @intCast(part_index)) catch unreachable;
}
}
}
}
pub fn createExportsForFile(
c: *LinkerContext,
allocator: std.mem.Allocator,
id: u32,
resolved_exports: *ResolvedExports,
imports_to_bind: []RefImportData,
export_aliases: []const string,
re_exports_count: usize,
) void {
////////////////////////////////////////////////////////////////////////////////
// WARNING: This method is run in parallel over all files. Do not mutate data
// for other files within this method or you will create a data race.
////////////////////////////////////////////////////////////////////////////////
Stmt.Disabler.disable();
defer Stmt.Disabler.enable();
Expr.Disabler.disable();
defer Expr.Disabler.enable();
// 1 property per export
var properties = std.ArrayList(js_ast.G.Property)
.initCapacity(allocator, export_aliases.len) catch bun.outOfMemory();
var ns_export_symbol_uses = Part.SymbolUseMap{};
ns_export_symbol_uses.ensureTotalCapacity(allocator, export_aliases.len) catch bun.outOfMemory();
const initial_flags = c.graph.meta.items(.flags)[id];
const needs_exports_variable = initial_flags.needs_exports_variable;
const force_include_exports_for_entry_point = c.options.output_format == .cjs and initial_flags.force_include_exports_for_entry_point;
const stmts_count =
// 1 statement for every export
export_aliases.len +
// + 1 if there are non-zero exports
@as(usize, @intFromBool(export_aliases.len > 0)) +
// + 1 if we need to inject the exports variable
@as(usize, @intFromBool(needs_exports_variable)) +
// + 1 if we need to do module.exports = __toCommonJS(exports)
@as(usize, @intFromBool(force_include_exports_for_entry_point));
var stmts = js_ast.Stmt.Batcher.init(allocator, stmts_count) catch bun.outOfMemory();
defer stmts.done();
const loc = Logger.Loc.Empty;
// todo: investigate if preallocating this array is faster
var ns_export_dependencies = std.ArrayList(js_ast.Dependency).initCapacity(allocator, re_exports_count) catch bun.outOfMemory();
for (export_aliases) |alias| {
var exp = resolved_exports.getPtr(alias).?.*;
// If this is an export of an import, reference the symbol that the import
// was eventually resolved to. We need to do this because imports have
// already been resolved by this point, so we can't generate a new import
// and have that be resolved later.
if (imports_to_bind[exp.data.source_index.get()].get(exp.data.import_ref)) |import_data| {
exp.data.import_ref = import_data.data.import_ref;
exp.data.source_index = import_data.data.source_index;
ns_export_dependencies.appendSlice(import_data.re_exports.slice()) catch bun.outOfMemory();
}
// Exports of imports need EImportIdentifier in case they need to be re-
// written to a property access later on
// note: this is stack allocated
const value: js_ast.Expr = brk: {
if (c.graph.symbols.getConst(exp.data.import_ref)) |symbol| {
if (symbol.namespace_alias != null) {
break :brk js_ast.Expr.init(
js_ast.E.ImportIdentifier,
js_ast.E.ImportIdentifier{
.ref = exp.data.import_ref,
},
loc,
);
}
}
break :brk js_ast.Expr.init(
js_ast.E.Identifier,
js_ast.E.Identifier{
.ref = exp.data.import_ref,
},
loc,
);
};
const fn_body = js_ast.G.FnBody{
.stmts = stmts.eat1(
js_ast.Stmt.allocate(
allocator,
js_ast.S.Return,
.{ .value = value },
loc,
),
),
.loc = loc,
};
properties.appendAssumeCapacity(.{
.key = js_ast.Expr.allocate(
allocator,
js_ast.E.String,
.{
// TODO: test emoji work as expected
// relevant for WASM exports
.data = alias,
},
loc,
),
.value = js_ast.Expr.allocate(
allocator,
js_ast.E.Arrow,
.{ .prefer_expr = true, .body = fn_body },
loc,
),
});
ns_export_symbol_uses.putAssumeCapacity(exp.data.import_ref, .{ .count_estimate = 1 });
// Make sure the part that declares the export is included
const parts = c.topLevelSymbolsToParts(exp.data.source_index.get(), exp.data.import_ref);
ns_export_dependencies.ensureUnusedCapacity(parts.len) catch unreachable;
for (parts, ns_export_dependencies.unusedCapacitySlice()[0..parts.len]) |part_id, *dest| {
// Use a non-local dependency since this is likely from a different
// file if it came in through an export star
dest.* = .{
.source_index = exp.data.source_index,
.part_index = part_id,
};
}
ns_export_dependencies.items.len += parts.len;
}
var declared_symbols = js_ast.DeclaredSymbol.List{};
const exports_ref = c.graph.ast.items(.exports_ref)[id];
const all_export_stmts: []js_ast.Stmt = stmts.head[0 .. @as(usize, @intFromBool(needs_exports_variable)) +
@as(usize, @intFromBool(properties.items.len > 0) +
@as(usize, @intFromBool(force_include_exports_for_entry_point)))];
stmts.head = stmts.head[all_export_stmts.len..];
var remaining_stmts = all_export_stmts;
defer bun.assert(remaining_stmts.len == 0); // all must be used
// Prefix this part with "var exports = {}" if this isn't a CommonJS entry point
if (needs_exports_variable) {
var decls = allocator.alloc(js_ast.G.Decl, 1) catch unreachable;
decls[0] = .{
.binding = js_ast.Binding.alloc(
allocator,
js_ast.B.Identifier{
.ref = exports_ref,
},
loc,
),
.value = js_ast.Expr.allocate(allocator, js_ast.E.Object, .{}, loc),
};
remaining_stmts[0] = js_ast.Stmt.allocate(
allocator,
js_ast.S.Local,
.{
.decls = G.Decl.List.init(decls),
},
loc,
);
remaining_stmts = remaining_stmts[1..];
declared_symbols.append(allocator, .{ .ref = exports_ref, .is_top_level = true }) catch unreachable;
}
// "__export(exports, { foo: () => foo })"
var export_ref = Ref.None;
if (properties.items.len > 0) {
export_ref = c.runtimeFunction("__export");
var args = allocator.alloc(js_ast.Expr, 2) catch unreachable;
args[0..2].* = [_]js_ast.Expr{
js_ast.Expr.initIdentifier(exports_ref, loc),
js_ast.Expr.allocate(allocator, js_ast.E.Object, .{ .properties = js_ast.G.Property.List.fromList(properties) }, loc),
};
remaining_stmts[0] = js_ast.Stmt.allocate(
allocator,
js_ast.S.SExpr,
.{
.value = js_ast.Expr.allocate(
allocator,
js_ast.E.Call,
.{
.target = js_ast.Expr.initIdentifier(export_ref, loc),
.args = js_ast.ExprNodeList.init(args),
},
loc,
),
},
loc,
);
remaining_stmts = remaining_stmts[1..];
// Make sure this file depends on the "__export" symbol
const parts = c.topLevelSymbolsToPartsForRuntime(export_ref);
ns_export_dependencies.ensureUnusedCapacity(parts.len) catch unreachable;
for (parts) |part_index| {
ns_export_dependencies.appendAssumeCapacity(
.{ .source_index = Index.runtime, .part_index = part_index },
);
}
// Make sure the CommonJS closure, if there is one, includes "exports"
c.graph.ast.items(.flags)[id].uses_exports_ref = true;
}
// Decorate "module.exports" with the "__esModule" flag to indicate that
// we used to be an ES module. This is done by wrapping the exports object
// instead of by mutating the exports object because other modules in the
// bundle (including the entry point module) may do "import * as" to get
// access to the exports object and should NOT see the "__esModule" flag.
if (force_include_exports_for_entry_point) {
const toCommonJSRef = c.runtimeFunction("__toCommonJS");
var call_args = allocator.alloc(js_ast.Expr, 1) catch unreachable;
call_args[0] = Expr.initIdentifier(exports_ref, Loc.Empty);
remaining_stmts[0] = js_ast.Stmt.assign(
Expr.allocate(
allocator,
E.Dot,
E.Dot{
.name = "exports",
.name_loc = Loc.Empty,
.target = Expr.initIdentifier(c.unbound_module_ref, Loc.Empty),
},
Loc.Empty,
),
Expr.allocate(
allocator,
E.Call,
E.Call{
.target = Expr.initIdentifier(toCommonJSRef, Loc.Empty),
.args = js_ast.ExprNodeList.init(call_args),
},
Loc.Empty,
),
);
remaining_stmts = remaining_stmts[1..];
}
// No need to generate a part if it'll be empty
if (all_export_stmts.len > 0) {
// - we must already have preallocated the parts array
// - if the parts list is completely empty, we shouldn't have gotten here in the first place
// Initialize the part that was allocated for us earlier. The information
// here will be used after this during tree shaking.
c.graph.ast.items(.parts)[id].slice()[js_ast.namespace_export_part_index] = .{
.stmts = if (c.options.output_format != .internal_bake_dev) all_export_stmts else &.{},
.symbol_uses = ns_export_symbol_uses,
.dependencies = js_ast.Dependency.List.fromList(ns_export_dependencies),
.declared_symbols = declared_symbols,
// This can be removed if nothing uses it
.can_be_removed_if_unused = true,
// Make sure this is trimmed if unused even if tree shaking is disabled
.force_tree_shaking = true,
};
// Pull in the "__export" symbol if it was used
if (export_ref.isValid()) {
c.graph.meta.items(.flags)[id].needs_export_symbol_from_runtime = true;
}
}
}
const bun = @import("bun");
const string = bun.string;
const strings = bun.strings;
const LinkerContext = bun.bundle_v2.LinkerContext;
const Index = bun.bundle_v2.Index;
const Part = bun.bundle_v2.Part;
const std = @import("std");
const js_ast = bun.bundle_v2.js_ast;
const Ref = bun.bundle_v2.js_ast.Ref;
const Environment = bun.Environment;
const ResolvedExports = bun.bundle_v2.ResolvedExports;
const Logger = bun.logger;
const RefImportData = bun.bundle_v2.RefImportData;
const ImportData = bun.bundle_v2.ImportData;
const Dependency = js_ast.Dependency;
const options = bun.options;
pub const ThreadPool = bun.bundle_v2.ThreadPool;
const Stmt = js_ast.Stmt;
const Expr = js_ast.Expr;
const E = js_ast.E;
const S = js_ast.S;
const G = js_ast.G;
const B = js_ast.B;
const Loc = Logger.Loc;

View File

@@ -0,0 +1,218 @@
pub fn findAllImportedPartsInJSOrder(this: *LinkerContext, temp_allocator: std.mem.Allocator, chunks: []Chunk) !void {
const trace = bun.perf.trace("Bundler.findAllImportedPartsInJSOrder");
defer trace.end();
var part_ranges_shared = std.ArrayList(PartRange).init(temp_allocator);
var parts_prefix_shared = std.ArrayList(PartRange).init(temp_allocator);
defer part_ranges_shared.deinit();
defer parts_prefix_shared.deinit();
for (chunks, 0..) |*chunk, index| {
switch (chunk.content) {
.javascript => {
try this.findImportedPartsInJSOrder(
chunk,
&part_ranges_shared,
&parts_prefix_shared,
@intCast(index),
);
},
.css => {}, // handled in `findImportedCSSFilesInJSOrder`
.html => {},
}
}
}
pub fn findImportedPartsInJSOrder(
this: *LinkerContext,
chunk: *Chunk,
part_ranges_shared: *std.ArrayList(PartRange),
parts_prefix_shared: *std.ArrayList(PartRange),
chunk_index: u32,
) !void {
var chunk_order_array = try std.ArrayList(Chunk.Order).initCapacity(this.allocator, chunk.files_with_parts_in_chunk.count());
defer chunk_order_array.deinit();
const distances = this.graph.files.items(.distance_from_entry_point);
for (chunk.files_with_parts_in_chunk.keys()) |source_index| {
chunk_order_array.appendAssumeCapacity(
.{
.source_index = source_index,
.distance = distances[source_index],
.tie_breaker = this.graph.stable_source_indices[source_index],
},
);
}
Chunk.Order.sort(chunk_order_array.items);
const FindImportedPartsVisitor = struct {
entry_bits: *const AutoBitSet,
flags: []const JSMeta.Flags,
parts: []BabyList(Part),
import_records: []BabyList(ImportRecord),
files: std.ArrayList(Index.Int),
part_ranges: std.ArrayList(PartRange),
visited: std.AutoHashMap(Index.Int, void),
parts_prefix: std.ArrayList(PartRange),
c: *LinkerContext,
entry_point: Chunk.EntryPoint,
chunk_index: u32,
fn appendOrExtendRange(
ranges: *std.ArrayList(PartRange),
source_index: Index.Int,
part_index: Index.Int,
) void {
if (ranges.items.len > 0) {
var last_range = &ranges.items[ranges.items.len - 1];
if (last_range.source_index.get() == source_index and last_range.part_index_end == part_index) {
last_range.part_index_end += 1;
return;
}
}
ranges.append(.{
.source_index = Index.init(source_index),
.part_index_begin = part_index,
.part_index_end = part_index + 1,
}) catch unreachable;
}
// Traverse the graph using this stable order and linearize the files with
// dependencies before dependents
pub fn visit(
v: *@This(),
source_index: Index.Int,
comptime with_code_splitting: bool,
comptime with_scb: bool,
) void {
if (source_index == Index.invalid.value) return;
const visited_entry = v.visited.getOrPut(source_index) catch unreachable;
if (visited_entry.found_existing) return;
var is_file_in_chunk = if (with_code_splitting and v.c.graph.ast.items(.css)[source_index] == null)
// when code splitting, include the file in the chunk if ALL of the entry points overlap
v.entry_bits.eql(&v.c.graph.files.items(.entry_bits)[source_index])
else
// when NOT code splitting, include the file in the chunk if ANY of the entry points overlap
v.entry_bits.hasIntersection(&v.c.graph.files.items(.entry_bits)[source_index]);
// Wrapped files can't be split because they are all inside the wrapper
const can_be_split = v.flags[source_index].wrap == .none;
const parts = v.parts[source_index].slice();
if (can_be_split and is_file_in_chunk and parts[js_ast.namespace_export_part_index].is_live) {
appendOrExtendRange(&v.part_ranges, source_index, js_ast.namespace_export_part_index);
}
const records = v.import_records[source_index].slice();
for (parts, 0..) |part, part_index_| {
const part_index = @as(u32, @truncate(part_index_));
const is_part_in_this_chunk = is_file_in_chunk and part.is_live;
for (part.import_record_indices.slice()) |record_id| {
const record: *const ImportRecord = &records[record_id];
if (record.source_index.isValid() and (record.kind == .stmt or is_part_in_this_chunk)) {
if (v.c.isExternalDynamicImport(record, source_index)) {
// Don't follow import() dependencies
continue;
}
v.visit(record.source_index.get(), with_code_splitting, with_scb);
}
}
// Then include this part after the files it imports
if (is_part_in_this_chunk) {
is_file_in_chunk = true;
if (can_be_split and
part_index != js_ast.namespace_export_part_index and
v.c.shouldIncludePart(source_index, part))
{
const js_parts = if (source_index == Index.runtime.value)
&v.parts_prefix
else
&v.part_ranges;
appendOrExtendRange(js_parts, source_index, part_index);
}
}
}
if (is_file_in_chunk) {
if (with_scb and v.c.graph.is_scb_bitset.isSet(source_index)) {
v.c.graph.files.items(.entry_point_chunk_index)[source_index] = v.chunk_index;
}
v.files.append(source_index) catch bun.outOfMemory();
// CommonJS files are all-or-nothing so all parts must be contiguous
if (!can_be_split) {
v.parts_prefix.append(
.{
.source_index = Index.init(source_index),
.part_index_begin = 0,
.part_index_end = @as(u32, @truncate(parts.len)),
},
) catch bun.outOfMemory();
}
}
}
};
part_ranges_shared.clearRetainingCapacity();
parts_prefix_shared.clearRetainingCapacity();
var visitor = FindImportedPartsVisitor{
.files = std.ArrayList(Index.Int).init(this.allocator),
.part_ranges = part_ranges_shared.*,
.parts_prefix = parts_prefix_shared.*,
.visited = std.AutoHashMap(Index.Int, void).init(this.allocator),
.flags = this.graph.meta.items(.flags),
.parts = this.graph.ast.items(.parts),
.import_records = this.graph.ast.items(.import_records),
.entry_bits = chunk.entryBits(),
.c = this,
.entry_point = chunk.entry_point,
.chunk_index = chunk_index,
};
defer {
part_ranges_shared.* = visitor.part_ranges;
parts_prefix_shared.* = visitor.parts_prefix;
visitor.visited.deinit();
}
switch (this.graph.code_splitting) {
inline else => |with_code_splitting| switch (this.graph.is_scb_bitset.bit_length > 0) {
inline else => |with_scb| {
visitor.visit(Index.runtime.value, with_code_splitting, with_scb);
for (chunk_order_array.items) |order| {
visitor.visit(order.source_index, with_code_splitting, with_scb);
}
},
},
}
const parts_in_chunk_order = try this.allocator.alloc(PartRange, visitor.part_ranges.items.len + visitor.parts_prefix.items.len);
bun.concat(PartRange, parts_in_chunk_order, &.{
visitor.parts_prefix.items,
visitor.part_ranges.items,
});
chunk.content.javascript.files_in_chunk_order = visitor.files.items;
chunk.content.javascript.parts_in_chunk_in_order = parts_in_chunk_order;
}
const bun = @import("bun");
const LinkerContext = bun.bundle_v2.LinkerContext;
const Index = bun.bundle_v2.Index;
const BabyList = bun.BabyList;
pub const BitSet = bun.bit_set.DynamicBitSetUnmanaged;
const ImportRecord = bun.ImportRecord;
const Part = bun.bundle_v2.Part;
const std = @import("std");
const Chunk = bun.bundle_v2.Chunk;
const PartRange = bun.bundle_v2.PartRange;
const AutoBitSet = bun.bit_set.AutoBitSet;
const JSMeta = bun.bundle_v2.JSMeta;
const js_ast = bun.bundle_v2.js_ast;

View File

@@ -0,0 +1,100 @@
/// JavaScript modules are traversed in depth-first postorder. This is the
/// order that JavaScript modules were evaluated in before the top-level await
/// feature was introduced.
///
/// A
/// / \
/// B C
/// \ /
/// D
///
/// If A imports B and then C, B imports D, and C imports D, then the JavaScript
/// traversal order is D B C A.
///
/// This function may deviate from ESM import order for dynamic imports (both
/// "require()" and "import()"). This is because the import order is impossible
/// to determine since the imports happen at run-time instead of compile-time.
/// In this case we just pick an arbitrary but consistent order.
pub fn findImportedCSSFilesInJSOrder(this: *LinkerContext, temp_allocator: std.mem.Allocator, entry_point: Index) BabyList(Index) {
var visited = BitSet.initEmpty(temp_allocator, this.graph.files.len) catch bun.outOfMemory();
var order: BabyList(Index) = .{};
const all_import_records = this.graph.ast.items(.import_records);
const all_loaders = this.parse_graph.input_files.items(.loader);
const all_parts = this.graph.ast.items(.parts);
const visit = struct {
fn visit(
c: *LinkerContext,
import_records: []const BabyList(ImportRecord),
parts: []const Part.List,
loaders: []const Loader,
temp: std.mem.Allocator,
visits: *BitSet,
o: *BabyList(Index),
source_index: Index,
is_css: bool,
) void {
if (visits.isSet(source_index.get())) return;
visits.set(source_index.get());
const records: []ImportRecord = import_records[source_index.get()].slice();
const p = &parts[source_index.get()];
// Iterate over each part in the file in order
for (p.sliceConst()) |part| {
// Traverse any files imported by this part. Note that CommonJS calls
// to "require()" count as imports too, sort of as if the part has an
// ESM "import" statement in it. This may seem weird because ESM imports
// are a compile-time concept while CommonJS imports are a run-time
// concept. But we don't want to manipulate <style> tags at run-time so
// this is the only way to do it.
for (part.import_record_indices.sliceConst()) |import_record_index| {
const record = &records[import_record_index];
if (record.source_index.isValid()) {
visit(
c,
import_records,
parts,
loaders,
temp,
visits,
o,
record.source_index,
loaders[record.source_index.get()].isCSS(),
);
}
}
}
if (is_css and source_index.isValid()) {
o.push(temp, source_index) catch bun.outOfMemory();
}
}
}.visit;
// Include all files reachable from the entry point
visit(
this,
all_import_records,
all_parts,
all_loaders,
temp_allocator,
&visited,
&order,
entry_point,
false,
);
return order;
}
const bun = @import("bun");
const LinkerContext = bun.bundle_v2.LinkerContext;
const Index = bun.bundle_v2.Index;
const BabyList = bun.BabyList;
pub const BitSet = bun.bit_set.DynamicBitSetUnmanaged;
const ImportRecord = bun.ImportRecord;
const Part = bun.bundle_v2.Part;
const Loader = bun.Loader;
const std = @import("std");

View File

@@ -0,0 +1,680 @@
/// CSS files are traversed in depth-first postorder just like JavaScript. But
/// unlike JavaScript import statements, CSS "@import" rules are evaluated every
/// time instead of just the first time.
///
/// A
/// / \
/// B C
/// \ /
/// D
///
/// If A imports B and then C, B imports D, and C imports D, then the CSS
/// traversal order is D B D C A.
///
/// However, evaluating a CSS file multiple times is sort of equivalent to
/// evaluating it once at the last location. So we basically drop all but the
/// last evaluation in the order.
///
/// The only exception to this is "@layer". Evaluating a CSS file multiple
/// times is sort of equivalent to evaluating it once at the first location
/// as far as "@layer" is concerned. So we may in some cases keep both the
/// first and last locations and only write out the "@layer" information
/// for the first location.
pub fn findImportedFilesInCSSOrder(this: *LinkerContext, temp_allocator: std.mem.Allocator, entry_points: []const Index) BabyList(Chunk.CssImportOrder) {
const Visitor = struct {
allocator: std.mem.Allocator,
temp_allocator: std.mem.Allocator,
css_asts: []?*bun.css.BundlerStyleSheet,
all_import_records: []const BabyList(ImportRecord),
graph: *LinkerGraph,
parse_graph: *Graph,
has_external_import: bool = false,
visited: BabyList(Index),
order: BabyList(Chunk.CssImportOrder) = .{},
pub fn visit(
visitor: *@This(),
source_index: Index,
wrapping_conditions: *BabyList(bun.css.ImportConditions),
wrapping_import_records: *BabyList(ImportRecord),
) void {
debug(
"Visit file: {d}={s}",
.{ source_index.get(), visitor.parse_graph.input_files.items(.source)[source_index.get()].path.pretty },
);
// The CSS specification strangely does not describe what to do when there
// is a cycle. So we are left with reverse-engineering the behavior from a
// real browser. Here's what the WebKit code base has to say about this:
//
// "Check for a cycle in our import chain. If we encounter a stylesheet
// in our parent chain with the same URL, then just bail."
//
// So that's what we do here. See "StyleRuleImport::requestStyleSheet()" in
// WebKit for more information.
for (visitor.visited.slice()) |visitedSourceIndex| {
if (visitedSourceIndex.get() == source_index.get()) {
debug(
"Skip file: {d}={s}",
.{ source_index.get(), visitor.parse_graph.input_files.items(.source)[source_index.get()].path.pretty },
);
return;
}
}
visitor.visited.push(
visitor.temp_allocator,
source_index,
) catch bun.outOfMemory();
const repr: *const bun.css.BundlerStyleSheet = visitor.css_asts[source_index.get()] orelse return; // Sanity check
const top_level_rules = &repr.rules;
// TODO: should we even do this? @import rules have to be the first rules in the stylesheet, why even allow pre-import layers?
// Any pre-import layers come first
// if len(repr.AST.LayersPreImport) > 0 {
// order = append(order, cssImportOrder{
// kind: cssImportLayers,
// layers: repr.AST.LayersPreImport,
// conditions: wrappingConditions,
// conditionImportRecords: wrappingImportRecords,
// })
// }
defer {
_ = visitor.visited.pop();
}
// Iterate over the top-level "@import" rules
var import_record_idx: usize = 0;
for (top_level_rules.v.items) |*rule| {
if (rule.* == .import) {
defer import_record_idx += 1;
const record = visitor.all_import_records[source_index.get()].at(import_record_idx);
// Follow internal dependencies
if (record.source_index.isValid()) {
// If this import has conditions, fork our state so that the entire
// imported stylesheet subtree is wrapped in all of the conditions
if (rule.import.hasConditions()) {
// Fork our state
var nested_conditions = wrapping_conditions.deepClone2(visitor.allocator);
var nested_import_records = wrapping_import_records.clone(visitor.allocator) catch bun.outOfMemory();
// Clone these import conditions and append them to the state
nested_conditions.push(visitor.allocator, rule.import.conditionsWithImportRecords(visitor.allocator, &nested_import_records)) catch bun.outOfMemory();
visitor.visit(record.source_index, &nested_conditions, wrapping_import_records);
continue;
}
visitor.visit(record.source_index, wrapping_conditions, wrapping_import_records);
continue;
}
// Record external depednencies
if (!record.is_internal) {
var all_conditions = wrapping_conditions.deepClone2(visitor.allocator);
var all_import_records = wrapping_import_records.clone(visitor.allocator) catch bun.outOfMemory();
// If this import has conditions, append it to the list of overall
// conditions for this external import. Note that an external import
// may actually have multiple sets of conditions that can't be
// merged. When this happens we need to generate a nested imported
// CSS file using a data URL.
if (rule.import.hasConditions()) {
all_conditions.push(visitor.allocator, rule.import.conditionsWithImportRecords(visitor.allocator, &all_import_records)) catch bun.outOfMemory();
visitor.order.push(
visitor.allocator,
Chunk.CssImportOrder{
.kind = .{
.external_path = record.path,
},
.conditions = all_conditions,
.condition_import_records = all_import_records,
},
) catch bun.outOfMemory();
} else {
visitor.order.push(
visitor.allocator,
Chunk.CssImportOrder{
.kind = .{
.external_path = record.path,
},
.conditions = wrapping_conditions.*,
.condition_import_records = wrapping_import_records.*,
},
) catch bun.outOfMemory();
}
debug(
"Push external: {d}={s}",
.{ source_index.get(), visitor.parse_graph.input_files.items(.source)[source_index.get()].path.pretty },
);
visitor.has_external_import = true;
}
}
}
// Iterate over the "composes" directives. Note that the order doesn't
// matter for these because the output order is explicitly undfened
// in the specification.
for (visitor.all_import_records[source_index.get()].sliceConst()) |*record| {
if (record.kind == .composes and record.source_index.isValid()) {
visitor.visit(record.source_index, wrapping_conditions, wrapping_import_records);
}
}
if (comptime bun.Environment.isDebug) {
debug(
"Push file: {d}={s}",
.{ source_index.get(), visitor.parse_graph.input_files.items(.source)[source_index.get()].path.pretty },
);
}
// Accumulate imports in depth-first postorder
visitor.order.push(visitor.allocator, Chunk.CssImportOrder{
.kind = .{ .source_index = source_index },
.conditions = wrapping_conditions.*,
}) catch bun.outOfMemory();
}
};
var visitor = Visitor{
.allocator = this.allocator,
.temp_allocator = temp_allocator,
.graph = &this.graph,
.parse_graph = this.parse_graph,
.visited = BabyList(Index).initCapacity(temp_allocator, 16) catch bun.outOfMemory(),
.css_asts = this.graph.ast.items(.css),
.all_import_records = this.graph.ast.items(.import_records),
};
var wrapping_conditions: BabyList(bun.css.ImportConditions) = .{};
var wrapping_import_records: BabyList(ImportRecord) = .{};
// Include all files reachable from any entry point
for (entry_points) |entry_point| {
visitor.visit(entry_point, &wrapping_conditions, &wrapping_import_records);
}
var order = visitor.order;
var wip_order = BabyList(Chunk.CssImportOrder).initCapacity(temp_allocator, order.len) catch bun.outOfMemory();
const css_asts: []const ?*bun.css.BundlerStyleSheet = this.graph.ast.items(.css);
debugCssOrder(this, &order, .BEFORE_HOISTING);
// CSS syntax unfortunately only allows "@import" rules at the top of the
// file. This means we must hoist all external "@import" rules to the top of
// the file when bundling, even though doing so will change the order of CSS
// evaluation.
if (visitor.has_external_import) {
// Pass 1: Pull out leading "@layer" and external "@import" rules
var is_at_layer_prefix = true;
for (order.slice()) |*entry| {
if ((entry.kind == .layers and is_at_layer_prefix) or entry.kind == .external_path) {
wip_order.push(temp_allocator, entry.*) catch bun.outOfMemory();
}
if (entry.kind != .layers) {
is_at_layer_prefix = false;
}
}
// Pass 2: Append everything that we didn't pull out in pass 1
is_at_layer_prefix = true;
for (order.slice()) |*entry| {
if ((entry.kind != .layers or !is_at_layer_prefix) and entry.kind != .external_path) {
wip_order.push(temp_allocator, entry.*) catch bun.outOfMemory();
}
if (entry.kind != .layers) {
is_at_layer_prefix = false;
}
}
order.len = wip_order.len;
@memcpy(order.slice(), wip_order.slice());
wip_order.clearRetainingCapacity();
}
debugCssOrder(this, &order, .AFTER_HOISTING);
// Next, optimize import order. If there are duplicate copies of an imported
// file, replace all but the last copy with just the layers that are in that
// file. This works because in CSS, the last instance of a declaration
// overrides all previous instances of that declaration.
{
var source_index_duplicates = std.AutoArrayHashMap(u32, BabyList(u32)).init(temp_allocator);
var external_path_duplicates = std.StringArrayHashMap(BabyList(u32)).init(temp_allocator);
var i: u32 = visitor.order.len;
next_backward: while (i != 0) {
i -= 1;
const entry = visitor.order.at(i);
switch (entry.kind) {
.source_index => |idx| {
const gop = source_index_duplicates.getOrPut(idx.get()) catch bun.outOfMemory();
if (!gop.found_existing) {
gop.value_ptr.* = BabyList(u32){};
}
for (gop.value_ptr.slice()) |j| {
if (isConditionalImportRedundant(&entry.conditions, &order.at(j).conditions)) {
// This import is redundant, but it might have @layer rules.
// So we should keep the @layer rules so that the cascade ordering of layers
// is preserved
order.mut(i).kind = .{
.layers = Chunk.CssImportOrder.Layers.borrow(&css_asts[idx.get()].?.layer_names),
};
continue :next_backward;
}
}
gop.value_ptr.push(temp_allocator, i) catch bun.outOfMemory();
},
.external_path => |p| {
const gop = external_path_duplicates.getOrPut(p.text) catch bun.outOfMemory();
if (!gop.found_existing) {
gop.value_ptr.* = BabyList(u32){};
}
for (gop.value_ptr.slice()) |j| {
if (isConditionalImportRedundant(&entry.conditions, &order.at(j).conditions)) {
// Don't remove duplicates entirely. The import conditions may
// still introduce layers to the layer order. Represent this as a
// file with an empty layer list.
order.mut(i).kind = .{
.layers = .{ .owned = .{} },
};
continue :next_backward;
}
}
gop.value_ptr.push(temp_allocator, i) catch bun.outOfMemory();
},
.layers => {},
}
}
}
debugCssOrder(this, &order, .AFTER_REMOVING_DUPLICATES);
// Then optimize "@layer" rules by removing redundant ones. This loop goes
// forward instead of backward because "@layer" takes effect at the first
// copy instead of the last copy like other things in CSS.
{
const DuplicateEntry = struct {
layers: []const bun.css.LayerName,
indices: bun.BabyList(u32) = .{},
};
var layer_duplicates = bun.BabyList(DuplicateEntry){};
next_forward: for (order.slice()) |*entry| {
debugCssOrder(this, &wip_order, .WHILE_OPTIMIZING_REDUNDANT_LAYER_RULES);
switch (entry.kind) {
// Simplify the conditions since we know they only wrap "@layer"
.layers => |*layers| {
// Truncate the conditions at the first anonymous layer
for (entry.conditions.slice(), 0..) |*condition_, i| {
const conditions: *bun.css.ImportConditions = condition_;
// The layer is anonymous if it's a "layer" token without any
// children instead of a "layer(...)" token with children:
//
// /* entry.css */
// @import "foo.css" layer;
//
// /* foo.css */
// @layer foo;
//
// We don't need to generate this (as far as I can tell):
//
// @layer {
// @layer foo;
// }
//
if (conditions.hasAnonymousLayer()) {
entry.conditions.len = @intCast(i);
layers.replace(temp_allocator, .{});
break;
}
}
// If there are no layer names for this file, trim all conditions
// without layers because we know they have no effect.
//
// (They have no effect because this is a `.layer` import with no rules
// and only layer declarations.)
//
// /* entry.css */
// @import "foo.css" layer(foo) supports(display: flex);
//
// /* foo.css */
// @import "empty.css" supports(display: grid);
//
// That would result in this:
//
// @supports (display: flex) {
// @layer foo {
// @supports (display: grid) {}
// }
// }
//
// Here we can trim "supports(display: grid)" to generate this:
//
// @supports (display: flex) {
// @layer foo;
// }
//
if (layers.inner().len == 0) {
var i: u32 = entry.conditions.len;
while (i != 0) {
i -= 1;
const condition = entry.conditions.at(i);
if (condition.layer != null) {
break;
}
entry.conditions.len = i;
}
}
// Remove unnecessary entries entirely
if (entry.conditions.len == 0 and layers.inner().len == 0) {
continue;
}
},
else => {},
}
// Omit redundant "@layer" rules with the same set of layer names. Note
// that this tests all import order entries (not just layer ones) because
// sometimes non-layer ones can make following layer ones redundant.
// layers_post_import
const layers_key: []const bun.css.LayerName = switch (entry.kind) {
.source_index => css_asts[entry.kind.source_index.get()].?.layer_names.sliceConst(),
.layers => entry.kind.layers.inner().sliceConst(),
.external_path => &.{},
};
var index: usize = 0;
while (index < layer_duplicates.len) : (index += 1) {
const both_equal = both_equal: {
if (layers_key.len != layer_duplicates.at(index).layers.len) {
break :both_equal false;
}
for (layers_key, layer_duplicates.at(index).layers) |*a, *b| {
if (!a.eql(b)) {
break :both_equal false;
}
}
break :both_equal true;
};
if (both_equal) {
break;
}
}
if (index == layer_duplicates.len) {
// This is the first time we've seen this combination of layer names.
// Allocate a new set of duplicate indices to track this combination.
layer_duplicates.push(temp_allocator, DuplicateEntry{
.layers = layers_key,
}) catch bun.outOfMemory();
}
var duplicates = layer_duplicates.at(index).indices.slice();
var j = duplicates.len;
while (j != 0) {
j -= 1;
const duplicate_index = duplicates[j];
if (isConditionalImportRedundant(&entry.conditions, &wip_order.at(duplicate_index).conditions)) {
if (entry.kind != .layers) {
// If an empty layer is followed immediately by a full layer and
// everything else is identical, then we don't need to emit the
// empty layer. For example:
//
// @media screen {
// @supports (display: grid) {
// @layer foo;
// }
// }
// @media screen {
// @supports (display: grid) {
// @layer foo {
// div {
// color: red;
// }
// }
// }
// }
//
// This can be improved by dropping the empty layer. But we can
// only do this if there's nothing in between these two rules.
if (j == duplicates.len - 1 and duplicate_index == wip_order.len - 1) {
const other = wip_order.at(duplicate_index);
if (other.kind == .layers and importConditionsAreEqual(entry.conditions.sliceConst(), other.conditions.sliceConst())) {
// Remove the previous entry and then overwrite it below
duplicates = duplicates[0..j];
wip_order.len = duplicate_index;
break;
}
}
// Non-layer entries still need to be present because they have
// other side effects beside inserting things in the layer order
wip_order.push(temp_allocator, entry.*) catch bun.outOfMemory();
}
// Don't add this to the duplicate list below because it's redundant
continue :next_forward;
}
}
layer_duplicates.mut(index).indices.push(
temp_allocator,
wip_order.len,
) catch bun.outOfMemory();
wip_order.push(temp_allocator, entry.*) catch bun.outOfMemory();
}
debugCssOrder(this, &wip_order, .WHILE_OPTIMIZING_REDUNDANT_LAYER_RULES);
order.len = wip_order.len;
@memcpy(order.slice(), wip_order.slice());
wip_order.clearRetainingCapacity();
}
debugCssOrder(this, &order, .AFTER_OPTIMIZING_REDUNDANT_LAYER_RULES);
// Finally, merge adjacent "@layer" rules with identical conditions together.
{
var did_clone: i32 = -1;
for (order.slice()) |*entry| {
if (entry.kind == .layers and wip_order.len > 0) {
const prev_index = wip_order.len - 1;
const prev = wip_order.at(prev_index);
if (prev.kind == .layers and importConditionsAreEqual(prev.conditions.sliceConst(), entry.conditions.sliceConst())) {
if (did_clone != prev_index) {
did_clone = @intCast(prev_index);
}
// need to clone the layers here as they could be references to css ast
wip_order.mut(prev_index).kind.layers.toOwned(temp_allocator).append(
temp_allocator,
entry.kind.layers.inner().sliceConst(),
) catch bun.outOfMemory();
}
}
}
}
debugCssOrder(this, &order, .AFTER_MERGING_ADJACENT_LAYER_RULES);
return order;
}
fn importConditionsAreEqual(a: []const bun.css.ImportConditions, b: []const bun.css.ImportConditions) bool {
if (a.len != b.len) {
return false;
}
for (a, b) |*ai, *bi| {
if (!ai.layersEql(bi) or !ai.supportsEql(bi) or !ai.media.eql(&bi.media)) return false;
}
return true;
}
/// Given two "@import" rules for the same source index (an earlier one and a
/// later one), the earlier one is masked by the later one if the later one's
/// condition list is a prefix of the earlier one's condition list.
///
/// For example:
///
/// // entry.css
/// @import "foo.css" supports(display: flex);
/// @import "bar.css" supports(display: flex);
///
/// // foo.css
/// @import "lib.css" screen;
///
/// // bar.css
/// @import "lib.css";
///
/// When we bundle this code we'll get an import order as follows:
///
/// 1. lib.css [supports(display: flex), screen]
/// 2. foo.css [supports(display: flex)]
/// 3. lib.css [supports(display: flex)]
/// 4. bar.css [supports(display: flex)]
/// 5. entry.css []
///
/// For "lib.css", the entry with the conditions [supports(display: flex)] should
/// make the entry with the conditions [supports(display: flex), screen] redundant.
///
/// Note that all of this deliberately ignores the existence of "@layer" because
/// that is handled separately. All of this is only for handling unlayered styles.
pub fn isConditionalImportRedundant(earlier: *const BabyList(bun.css.ImportConditions), later: *const BabyList(bun.css.ImportConditions)) bool {
if (later.len > earlier.len) return false;
for (0..later.len) |i| {
const a = earlier.at(i);
const b = later.at(i);
// Only compare "@supports" and "@media" if "@layers" is equal
if (a.layersEql(b)) {
const same_supports = a.supportsEql(b);
const same_media = a.media.eql(&b.media);
// If the import conditions are exactly equal, then only keep
// the later one. The earlier one is redundant. Example:
//
// @import "foo.css" layer(abc) supports(display: flex) screen;
// @import "foo.css" layer(abc) supports(display: flex) screen;
//
// The later one makes the earlier one redundant.
if (same_supports and same_media) {
continue;
}
// If the media conditions are exactly equal and the later one
// doesn't have any supports conditions, then the later one will
// apply in all cases where the earlier one applies. Example:
//
// @import "foo.css" layer(abc) supports(display: flex) screen;
// @import "foo.css" layer(abc) screen;
//
// The later one makes the earlier one redundant.
if (same_media and b.supports == null) {
continue;
}
// If the supports conditions are exactly equal and the later one
// doesn't have any media conditions, then the later one will
// apply in all cases where the earlier one applies. Example:
//
// @import "foo.css" layer(abc) supports(display: flex) screen;
// @import "foo.css" layer(abc) supports(display: flex);
//
// The later one makes the earlier one redundant.
if (same_supports and b.media.media_queries.items.len == 0) {
continue;
}
}
return false;
}
return true;
}
const CssOrderDebugStep = enum {
BEFORE_HOISTING,
AFTER_HOISTING,
AFTER_REMOVING_DUPLICATES,
WHILE_OPTIMIZING_REDUNDANT_LAYER_RULES,
AFTER_OPTIMIZING_REDUNDANT_LAYER_RULES,
AFTER_MERGING_ADJACENT_LAYER_RULES,
};
fn debugCssOrder(this: *LinkerContext, order: *const BabyList(Chunk.CssImportOrder), comptime step: CssOrderDebugStep) void {
if (comptime bun.Environment.isDebug) {
const env_var = "BUN_DEBUG_CSS_ORDER_" ++ @tagName(step);
const enable_all = bun.getenvTruthy("BUN_DEBUG_CSS_ORDER");
if (enable_all or bun.getenvTruthy(env_var)) {
debugCssOrderImpl(this, order, step);
}
}
}
fn debugCssOrderImpl(this: *LinkerContext, order: *const BabyList(Chunk.CssImportOrder), comptime step: CssOrderDebugStep) void {
if (comptime bun.Environment.isDebug) {
debug("CSS order {s}:\n", .{@tagName(step)});
var arena = bun.ArenaAllocator.init(bun.default_allocator);
defer arena.deinit();
for (order.slice(), 0..) |entry, i| {
const conditions_str = if (entry.conditions.len > 0) conditions_str: {
var arrlist = std.ArrayListUnmanaged(u8){};
const writer = arrlist.writer(arena.allocator());
const W = @TypeOf(writer);
arrlist.appendSlice(arena.allocator(), "[") catch unreachable;
var symbols = Symbol.Map{};
for (entry.conditions.sliceConst(), 0..) |*condition_, j| {
const condition: *const bun.css.ImportConditions = condition_;
const scratchbuf = std.ArrayList(u8).init(arena.allocator());
var printer = bun.css.Printer(W).new(
arena.allocator(),
scratchbuf,
writer,
bun.css.PrinterOptions.default(),
.{
.import_records = &entry.condition_import_records,
.ast_urls_for_css = this.parse_graph.ast.items(.url_for_css),
.ast_unique_key_for_additional_file = this.parse_graph.input_files.items(.unique_key_for_additional_file),
},
&this.mangled_props,
&symbols,
);
condition.toCss(W, &printer) catch unreachable;
if (j != entry.conditions.len - 1) {
arrlist.appendSlice(arena.allocator(), ", ") catch unreachable;
}
}
arrlist.appendSlice(arena.allocator(), " ]") catch unreachable;
break :conditions_str arrlist.items;
} else "[]";
debug(" {d}: {} {s}\n", .{ i, entry.fmt(this), conditions_str });
}
}
}
const bun = @import("bun");
const BabyList = bun.BabyList;
const Index = bun.bundle_v2.Index;
const LinkerContext = bun.bundle_v2.LinkerContext;
const Environment = bun.Environment;
const default_allocator = bun.default_allocator;
const std = @import("std");
const js_ast = bun.js_ast;
const ImportRecord = bun.ImportRecord;
const Symbol = js_ast.Symbol;
const B = js_ast.B;
const bundler = bun.bundle_v2;
const Graph = bundler.Graph;
const LinkerGraph = bundler.LinkerGraph;
pub const DeferredBatchTask = bun.bundle_v2.DeferredBatchTask;
pub const ThreadPool = bun.bundle_v2.ThreadPool;
pub const ParseTask = bun.bundle_v2.ParseTask;
const Chunk = bundler.Chunk;
const debug = LinkerContext.debug;

View File

@@ -0,0 +1,595 @@
pub fn generateChunksInParallel(c: *LinkerContext, chunks: []Chunk, comptime is_dev_server: bool) !if (is_dev_server) void else std.ArrayList(options.OutputFile) {
const trace = bun.perf.trace("Bundler.generateChunksInParallel");
defer trace.end();
c.mangleLocalCss();
var has_js_chunk = false;
var has_css_chunk = false;
var has_html_chunk = false;
bun.assert(chunks.len > 0);
{
// TODO(@paperclover/bake): instead of running a renamer per chunk, run it per file
debug(" START {d} renamers", .{chunks.len});
defer debug(" DONE {d} renamers", .{chunks.len});
var wait_group = try c.allocator.create(sync.WaitGroup);
wait_group.init();
defer {
wait_group.deinit();
c.allocator.destroy(wait_group);
}
wait_group.counter = @as(u32, @truncate(chunks.len));
const ctx = GenerateChunkCtx{ .chunk = &chunks[0], .wg = wait_group, .c = c, .chunks = chunks };
try c.parse_graph.pool.worker_pool.doPtr(c.allocator, wait_group, ctx, LinkerContext.generateJSRenamer, chunks);
}
if (c.source_maps.line_offset_tasks.len > 0) {
debug(" START {d} source maps (line offset)", .{chunks.len});
defer debug(" DONE {d} source maps (line offset)", .{chunks.len});
c.source_maps.line_offset_wait_group.wait();
c.allocator.free(c.source_maps.line_offset_tasks);
c.source_maps.line_offset_tasks.len = 0;
}
{
// Per CSS chunk:
// Remove duplicate rules across files. This must be done in serial, not
// in parallel, and must be done from the last rule to the first rule.
if (c.parse_graph.css_file_count > 0) {
var wait_group = try c.allocator.create(sync.WaitGroup);
wait_group.init();
defer {
wait_group.deinit();
c.allocator.destroy(wait_group);
}
const total_count = total_count: {
var total_count: usize = 0;
for (chunks) |*chunk| {
if (chunk.content == .css) total_count += 1;
}
break :total_count total_count;
};
debug(" START {d} prepare CSS ast (total count)", .{total_count});
defer debug(" DONE {d} prepare CSS ast (total count)", .{total_count});
var batch = ThreadPoolLib.Batch{};
const tasks = c.allocator.alloc(LinkerContext.PrepareCssAstTask, total_count) catch bun.outOfMemory();
var i: usize = 0;
for (chunks) |*chunk| {
if (chunk.content == .css) {
tasks[i] = LinkerContext.PrepareCssAstTask{
.task = ThreadPoolLib.Task{
.callback = &LinkerContext.prepareCssAstsForChunk,
},
.chunk = chunk,
.linker = c,
.wg = wait_group,
};
batch.push(.from(&tasks[i].task));
i += 1;
}
}
wait_group.counter = @as(u32, @truncate(total_count));
c.parse_graph.pool.worker_pool.schedule(batch);
wait_group.wait();
} else if (Environment.isDebug) {
for (chunks) |*chunk| {
bun.assert(chunk.content != .css);
}
}
}
{
const chunk_contexts = c.allocator.alloc(GenerateChunkCtx, chunks.len) catch unreachable;
defer c.allocator.free(chunk_contexts);
var wait_group = try c.allocator.create(sync.WaitGroup);
wait_group.init();
defer {
wait_group.deinit();
c.allocator.destroy(wait_group);
}
errdefer wait_group.wait();
{
var total_count: usize = 0;
for (chunks, chunk_contexts) |*chunk, *chunk_ctx| {
switch (chunk.content) {
.javascript => {
chunk_ctx.* = .{ .wg = wait_group, .c = c, .chunks = chunks, .chunk = chunk };
total_count += chunk.content.javascript.parts_in_chunk_in_order.len;
chunk.compile_results_for_chunk = c.allocator.alloc(CompileResult, chunk.content.javascript.parts_in_chunk_in_order.len) catch bun.outOfMemory();
has_js_chunk = true;
},
.css => {
has_css_chunk = true;
chunk_ctx.* = .{ .wg = wait_group, .c = c, .chunks = chunks, .chunk = chunk };
total_count += chunk.content.css.imports_in_chunk_in_order.len;
chunk.compile_results_for_chunk = c.allocator.alloc(CompileResult, chunk.content.css.imports_in_chunk_in_order.len) catch bun.outOfMemory();
},
.html => {
has_html_chunk = true;
// HTML gets only one chunk.
chunk_ctx.* = .{ .wg = wait_group, .c = c, .chunks = chunks, .chunk = chunk };
total_count += 1;
chunk.compile_results_for_chunk = c.allocator.alloc(CompileResult, 1) catch bun.outOfMemory();
},
}
}
debug(" START {d} compiling part ranges", .{total_count});
defer debug(" DONE {d} compiling part ranges", .{total_count});
const combined_part_ranges = c.allocator.alloc(PendingPartRange, total_count) catch unreachable;
defer c.allocator.free(combined_part_ranges);
var remaining_part_ranges = combined_part_ranges;
var batch = ThreadPoolLib.Batch{};
for (chunks, chunk_contexts) |*chunk, *chunk_ctx| {
switch (chunk.content) {
.javascript => {
for (chunk.content.javascript.parts_in_chunk_in_order, 0..) |part_range, i| {
if (Environment.enable_logs) {
debugPartRanges(
"Part Range: {s} {s} ({d}..{d})",
.{
c.parse_graph.input_files.items(.source)[part_range.source_index.get()].path.pretty,
@tagName(c.parse_graph.ast.items(.target)[part_range.source_index.get()].bakeGraph()),
part_range.part_index_begin,
part_range.part_index_end,
},
);
}
remaining_part_ranges[0] = .{
.part_range = part_range,
.i = @intCast(i),
.task = .{
.callback = &generateCompileResultForJSChunk,
},
.ctx = chunk_ctx,
};
batch.push(.from(&remaining_part_ranges[0].task));
remaining_part_ranges = remaining_part_ranges[1..];
}
},
.css => {
for (0..chunk.content.css.imports_in_chunk_in_order.len) |i| {
remaining_part_ranges[0] = .{
.part_range = .{},
.i = @intCast(i),
.task = .{
.callback = &generateCompileResultForCssChunk,
},
.ctx = chunk_ctx,
};
batch.push(.from(&remaining_part_ranges[0].task));
remaining_part_ranges = remaining_part_ranges[1..];
}
},
.html => {
remaining_part_ranges[0] = .{
.part_range = .{},
.i = 0,
.task = .{
.callback = &generateCompileResultForHtmlChunk,
},
.ctx = chunk_ctx,
};
batch.push(.from(&remaining_part_ranges[0].task));
remaining_part_ranges = remaining_part_ranges[1..];
},
}
}
wait_group.counter = @as(u32, @truncate(total_count));
c.parse_graph.pool.worker_pool.schedule(batch);
wait_group.wait();
}
if (c.source_maps.quoted_contents_tasks.len > 0) {
debug(" START {d} source maps (quoted contents)", .{chunks.len});
defer debug(" DONE {d} source maps (quoted contents)", .{chunks.len});
c.source_maps.quoted_contents_wait_group.wait();
c.allocator.free(c.source_maps.quoted_contents_tasks);
c.source_maps.quoted_contents_tasks.len = 0;
}
// For dev server, only post-process CSS + HTML chunks.
const chunks_to_do = if (is_dev_server) chunks[1..] else chunks;
if (!is_dev_server or chunks_to_do.len > 0) {
bun.assert(chunks_to_do.len > 0);
debug(" START {d} postprocess chunks", .{chunks_to_do.len});
defer debug(" DONE {d} postprocess chunks", .{chunks_to_do.len});
wait_group.init();
wait_group.counter = @as(u32, @truncate(chunks_to_do.len));
try c.parse_graph.pool.worker_pool.doPtr(
c.allocator,
wait_group,
chunk_contexts[0],
generateChunk,
chunks_to_do,
);
}
}
// When bake.DevServer is in use, we're going to take a different code path at the end.
// We want to extract the source code of each part instead of combining it into a single file.
// This is so that when hot-module updates happen, we can:
//
// - Reuse unchanged parts to assemble the full bundle if Cmd+R is used in the browser
// - Send only the newly changed code through a socket.
// - Use IncrementalGraph to have full knowledge of referenced CSS files.
//
// When this isn't the initial bundle, concatenation as usual would produce a
// broken module. It is DevServer's job to create and send HMR patches.
if (is_dev_server) return;
// TODO: enforceNoCyclicChunkImports()
{
var path_names_map = bun.StringHashMap(void).init(c.allocator);
defer path_names_map.deinit();
const DuplicateEntry = struct {
sources: std.ArrayListUnmanaged(*Chunk) = .{},
};
var duplicates_map: bun.StringArrayHashMapUnmanaged(DuplicateEntry) = .{};
var chunk_visit_map = try AutoBitSet.initEmpty(c.allocator, chunks.len);
defer chunk_visit_map.deinit(c.allocator);
// Compute the final hashes of each chunk, then use those to create the final
// paths of each chunk. This can technically be done in parallel but it
// probably doesn't matter so much because we're not hashing that much data.
for (chunks, 0..) |*chunk, index| {
var hash: ContentHasher = .{};
c.appendIsolatedHashesForImportedChunks(&hash, chunks, @intCast(index), &chunk_visit_map);
chunk_visit_map.setAll(false);
chunk.template.placeholder.hash = hash.digest();
const rel_path = std.fmt.allocPrint(c.allocator, "{any}", .{chunk.template}) catch bun.outOfMemory();
bun.path.platformToPosixInPlace(u8, rel_path);
if ((try path_names_map.getOrPut(rel_path)).found_existing) {
// collect all duplicates in a list
const dup = try duplicates_map.getOrPut(bun.default_allocator, rel_path);
if (!dup.found_existing) dup.value_ptr.* = .{};
try dup.value_ptr.sources.append(bun.default_allocator, chunk);
continue;
}
// resolve any /./ and /../ occurrences
// use resolvePosix since we asserted above all seps are '/'
if (Environment.isWindows and std.mem.indexOf(u8, rel_path, "/./") != null) {
var buf: bun.PathBuffer = undefined;
const rel_path_fixed = c.allocator.dupe(u8, bun.path.normalizeBuf(rel_path, &buf, .posix)) catch bun.outOfMemory();
chunk.final_rel_path = rel_path_fixed;
continue;
}
chunk.final_rel_path = rel_path;
}
if (duplicates_map.count() > 0) {
var msg = std.ArrayList(u8).init(bun.default_allocator);
errdefer msg.deinit();
var entry_naming: ?[]const u8 = null;
var chunk_naming: ?[]const u8 = null;
var asset_naming: ?[]const u8 = null;
const writer = msg.writer();
try writer.print("Multiple files share the same output path\n", .{});
const kinds = c.graph.files.items(.entry_point_kind);
for (duplicates_map.keys(), duplicates_map.values()) |key, dup| {
try writer.print(" {s}:\n", .{key});
for (dup.sources.items) |chunk| {
if (chunk.entry_point.is_entry_point) {
if (kinds[chunk.entry_point.source_index] == .user_specified) {
entry_naming = chunk.template.data;
} else {
chunk_naming = chunk.template.data;
}
} else {
asset_naming = chunk.template.data;
}
const source_index = chunk.entry_point.source_index;
const file: Logger.Source = c.parse_graph.input_files.items(.source)[source_index];
try writer.print(" from input {s}\n", .{file.path.pretty});
}
}
try c.log.addError(null, Logger.Loc.Empty, try msg.toOwnedSlice());
inline for (.{
.{ .name = "entry", .template = entry_naming },
.{ .name = "chunk", .template = chunk_naming },
.{ .name = "asset", .template = asset_naming },
}) |x| brk: {
const template = x.template orelse break :brk;
const name = x.name;
try c.log.addMsg(.{
.kind = .note,
.data = .{
.text = try std.fmt.allocPrint(bun.default_allocator, name ++ " naming is '{s}', consider adding '[hash]' to make filenames unique", .{template}),
},
});
}
return error.DuplicateOutputPath;
}
}
var output_files = std.ArrayList(options.OutputFile).initCapacity(
bun.default_allocator,
(if (c.options.source_maps.hasExternalFiles()) chunks.len * 2 else chunks.len) +
@as(usize, c.parse_graph.additional_output_files.items.len),
) catch unreachable;
const root_path = c.resolver.opts.output_dir;
const more_than_one_output = c.parse_graph.additional_output_files.items.len > 0 or c.options.generate_bytecode_cache or (has_css_chunk and has_js_chunk) or (has_html_chunk and (has_js_chunk or has_css_chunk));
if (!c.resolver.opts.compile and more_than_one_output and !c.resolver.opts.supports_multiple_outputs) {
try c.log.addError(null, Logger.Loc.Empty, "cannot write multiple output files without an output directory");
return error.MultipleOutputFilesWithoutOutputDir;
}
if (root_path.len > 0) {
try c.writeOutputFilesToDisk(root_path, chunks, &output_files);
} else {
// In-memory build
for (chunks) |*chunk| {
var display_size: usize = 0;
const _code_result = chunk.intermediate_output.code(
null,
c.parse_graph,
&c.graph,
c.resolver.opts.public_path,
chunk,
chunks,
&display_size,
chunk.content.sourcemap(c.options.source_maps) != .none,
);
var code_result = _code_result catch @panic("Failed to allocate memory for output file");
var sourcemap_output_file: ?options.OutputFile = null;
const input_path = try bun.default_allocator.dupe(
u8,
if (chunk.entry_point.is_entry_point)
c.parse_graph.input_files.items(.source)[chunk.entry_point.source_index].path.text
else
chunk.final_rel_path,
);
switch (chunk.content.sourcemap(c.options.source_maps)) {
.external, .linked => |tag| {
const output_source_map = chunk.output_source_map.finalize(bun.default_allocator, code_result.shifts) catch @panic("Failed to allocate memory for external source map");
var source_map_final_rel_path = bun.default_allocator.alloc(u8, chunk.final_rel_path.len + ".map".len) catch unreachable;
bun.copy(u8, source_map_final_rel_path, chunk.final_rel_path);
bun.copy(u8, source_map_final_rel_path[chunk.final_rel_path.len..], ".map");
if (tag == .linked) {
const a, const b = if (c.options.public_path.len > 0)
cheapPrefixNormalizer(c.options.public_path, source_map_final_rel_path)
else
.{ "", std.fs.path.basename(source_map_final_rel_path) };
const source_map_start = "//# sourceMappingURL=";
const total_len = code_result.buffer.len + source_map_start.len + a.len + b.len + "\n".len;
var buf = std.ArrayList(u8).initCapacity(Chunk.IntermediateOutput.allocatorForSize(total_len), total_len) catch @panic("Failed to allocate memory for output file with inline source map");
buf.appendSliceAssumeCapacity(code_result.buffer);
buf.appendSliceAssumeCapacity(source_map_start);
buf.appendSliceAssumeCapacity(a);
buf.appendSliceAssumeCapacity(b);
buf.appendAssumeCapacity('\n');
Chunk.IntermediateOutput.allocatorForSize(code_result.buffer.len).free(code_result.buffer);
code_result.buffer = buf.items;
}
sourcemap_output_file = options.OutputFile.init(.{
.data = .{
.buffer = .{
.data = output_source_map,
.allocator = bun.default_allocator,
},
},
.hash = null,
.loader = .json,
.input_loader = .file,
.output_path = source_map_final_rel_path,
.output_kind = .sourcemap,
.input_path = try strings.concat(bun.default_allocator, &.{ input_path, ".map" }),
.side = null,
.entry_point_index = null,
.is_executable = false,
});
},
.@"inline" => {
const output_source_map = chunk.output_source_map.finalize(bun.default_allocator, code_result.shifts) catch @panic("Failed to allocate memory for external source map");
const encode_len = base64.encodeLen(output_source_map);
const source_map_start = "//# sourceMappingURL=data:application/json;base64,";
const total_len = code_result.buffer.len + source_map_start.len + encode_len + 1;
var buf = std.ArrayList(u8).initCapacity(Chunk.IntermediateOutput.allocatorForSize(total_len), total_len) catch @panic("Failed to allocate memory for output file with inline source map");
buf.appendSliceAssumeCapacity(code_result.buffer);
buf.appendSliceAssumeCapacity(source_map_start);
buf.items.len += encode_len;
_ = base64.encode(buf.items[buf.items.len - encode_len ..], output_source_map);
buf.appendAssumeCapacity('\n');
Chunk.IntermediateOutput.allocatorForSize(code_result.buffer.len).free(code_result.buffer);
code_result.buffer = buf.items;
},
.none => {},
}
const bytecode_output_file: ?options.OutputFile = brk: {
if (c.options.generate_bytecode_cache) {
const loader: Loader = if (chunk.entry_point.is_entry_point)
c.parse_graph.input_files.items(.loader)[
chunk.entry_point.source_index
]
else
.js;
if (loader.isJavaScriptLike()) {
JSC.VirtualMachine.is_bundler_thread_for_bytecode_cache = true;
JSC.initialize(false);
var fdpath: bun.PathBuffer = undefined;
var source_provider_url = try bun.String.createFormat("{s}" ++ bun.bytecode_extension, .{chunk.final_rel_path});
source_provider_url.ref();
defer source_provider_url.deref();
if (JSC.CachedBytecode.generate(c.options.output_format, code_result.buffer, &source_provider_url)) |result| {
const bytecode, const cached_bytecode = result;
const source_provider_url_str = source_provider_url.toSlice(bun.default_allocator);
defer source_provider_url_str.deinit();
debug("Bytecode cache generated {s}: {}", .{ source_provider_url_str.slice(), bun.fmt.size(bytecode.len, .{ .space_between_number_and_unit = true }) });
@memcpy(fdpath[0..chunk.final_rel_path.len], chunk.final_rel_path);
fdpath[chunk.final_rel_path.len..][0..bun.bytecode_extension.len].* = bun.bytecode_extension.*;
break :brk options.OutputFile.init(.{
.output_path = bun.default_allocator.dupe(u8, source_provider_url_str.slice()) catch unreachable,
.input_path = std.fmt.allocPrint(bun.default_allocator, "{s}" ++ bun.bytecode_extension, .{chunk.final_rel_path}) catch unreachable,
.input_loader = .js,
.hash = if (chunk.template.placeholder.hash != null) bun.hash(bytecode) else null,
.output_kind = .bytecode,
.loader = .file,
.size = @as(u32, @truncate(bytecode.len)),
.display_size = @as(u32, @truncate(bytecode.len)),
.data = .{
.buffer = .{ .data = bytecode, .allocator = cached_bytecode.allocator() },
},
.side = null,
.entry_point_index = null,
.is_executable = false,
});
} else {
// an error
c.log.addErrorFmt(null, Logger.Loc.Empty, bun.default_allocator, "Failed to generate bytecode for {s}", .{
chunk.final_rel_path,
}) catch unreachable;
}
}
}
break :brk null;
};
const source_map_index: ?u32 = if (sourcemap_output_file != null)
@as(u32, @truncate(output_files.items.len + 1))
else
null;
const bytecode_index: ?u32 = if (bytecode_output_file != null and source_map_index != null)
@as(u32, @truncate(output_files.items.len + 2))
else if (bytecode_output_file != null)
@as(u32, @truncate(output_files.items.len + 1))
else
null;
const output_kind = if (chunk.content == .css)
.asset
else if (chunk.entry_point.is_entry_point)
c.graph.files.items(.entry_point_kind)[chunk.entry_point.source_index].outputKind()
else
.chunk;
try output_files.append(options.OutputFile.init(.{
.data = .{
.buffer = .{
.data = code_result.buffer,
.allocator = Chunk.IntermediateOutput.allocatorForSize(code_result.buffer.len),
},
},
.hash = chunk.template.placeholder.hash,
.loader = chunk.content.loader(),
.input_path = input_path,
.display_size = @as(u32, @truncate(display_size)),
.output_kind = output_kind,
.input_loader = if (chunk.entry_point.is_entry_point) c.parse_graph.input_files.items(.loader)[chunk.entry_point.source_index] else .js,
.output_path = try bun.default_allocator.dupe(u8, chunk.final_rel_path),
.is_executable = chunk.is_executable,
.source_map_index = source_map_index,
.bytecode_index = bytecode_index,
.side = if (chunk.content == .css)
.client
else switch (c.graph.ast.items(.target)[chunk.entry_point.source_index]) {
.browser => .client,
else => .server,
},
.entry_point_index = if (output_kind == .@"entry-point")
chunk.entry_point.source_index - @as(u32, (if (c.framework) |fw| if (fw.server_components != null) 3 else 1 else 1))
else
null,
.referenced_css_files = switch (chunk.content) {
.javascript => |js| @ptrCast(try bun.default_allocator.dupe(u32, js.css_chunks)),
.css => &.{},
.html => &.{},
},
}));
if (sourcemap_output_file) |sourcemap_file| {
try output_files.append(sourcemap_file);
}
if (bytecode_output_file) |bytecode_file| {
try output_files.append(bytecode_file);
}
}
try output_files.appendSlice(c.parse_graph.additional_output_files.items);
}
return output_files;
}
const bun = @import("bun");
const strings = bun.strings;
const LinkerContext = bun.bundle_v2.LinkerContext;
const Part = bun.bundle_v2.Part;
const Loader = bun.Loader;
const std = @import("std");
const debug = LinkerContext.debug;
const Environment = bun.Environment;
const Logger = bun.logger;
const options = bun.options;
pub const ThreadPool = bun.bundle_v2.ThreadPool;
const Loc = Logger.Loc;
const Chunk = bun.bundle_v2.Chunk;
const sync = bun.ThreadPool;
const GenerateChunkCtx = LinkerContext.GenerateChunkCtx;
const CompileResult = LinkerContext.CompileResult;
const PendingPartRange = LinkerContext.PendingPartRange;
const Output = bun.Output;
const debugPartRanges = Output.scoped(.PartRanges, true);
const generateCompileResultForJSChunk = LinkerContext.generateCompileResultForJSChunk;
const generateCompileResultForCssChunk = LinkerContext.generateCompileResultForCssChunk;
const generateCompileResultForHtmlChunk = LinkerContext.generateCompileResultForHtmlChunk;
const generateChunk = LinkerContext.generateChunk;
const AutoBitSet = bun.bit_set.AutoBitSet;
const ContentHasher = bun.bundle_v2.ContentHasher;
const cheapPrefixNormalizer = bun.bundle_v2.cheapPrefixNormalizer;
const base64 = bun.base64;
const JSC = bun.JSC;
pub const ThreadPoolLib = bun.ThreadPool;

View File

@@ -0,0 +1,709 @@
pub fn generateCodeForFileInChunkJS(
c: *LinkerContext,
writer: *js_printer.BufferWriter,
r: renamer.Renamer,
chunk: *Chunk,
part_range: PartRange,
toCommonJSRef: Ref,
toESMRef: Ref,
runtimeRequireRef: ?Ref,
stmts: *StmtList,
allocator: std.mem.Allocator,
temp_allocator: std.mem.Allocator,
) js_printer.PrintResult {
const parts: []Part = c.graph.ast.items(.parts)[part_range.source_index.get()].slice()[part_range.part_index_begin..part_range.part_index_end];
const all_flags: []const JSMeta.Flags = c.graph.meta.items(.flags);
const flags = all_flags[part_range.source_index.get()];
const wrapper_part_index = if (flags.wrap != .none)
c.graph.meta.items(.wrapper_part_index)[part_range.source_index.get()]
else
Index.invalid;
// referencing everything by array makes the code a lot more annoying :(
var ast: JSAst = c.graph.ast.get(part_range.source_index.get());
// For HMR, part generation is entirely special cased.
// - export wrapping is already done.
// - imports are split from the main code.
// - one part range per file
if (c.options.output_format == .internal_bake_dev) brk: {
if (part_range.source_index.isRuntime()) {
@branchHint(.cold);
bun.debugAssert(c.dev_server == null);
break :brk; // this is from `bun build --format=internal_bake_dev`
}
const hmr_api_ref = ast.wrapper_ref;
for (parts) |part| {
c.convertStmtsForChunkForDevServer(stmts, part.stmts, allocator, &ast) catch |err|
return .{ .err = err };
}
const main_stmts_len = stmts.inside_wrapper_prefix.items.len + stmts.inside_wrapper_suffix.items.len;
const all_stmts_len = main_stmts_len + stmts.outside_wrapper_prefix.items.len + 1;
stmts.all_stmts.ensureUnusedCapacity(all_stmts_len) catch bun.outOfMemory();
stmts.all_stmts.appendSliceAssumeCapacity(stmts.inside_wrapper_prefix.items);
stmts.all_stmts.appendSliceAssumeCapacity(stmts.inside_wrapper_suffix.items);
const inner = stmts.all_stmts.items[0..main_stmts_len];
var clousure_args = std.BoundedArray(G.Arg, 3).fromSlice(&.{
.{ .binding = Binding.alloc(temp_allocator, B.Identifier{
.ref = hmr_api_ref,
}, Logger.Loc.Empty) },
}) catch unreachable; // is within bounds
if (ast.flags.uses_module_ref or ast.flags.uses_exports_ref) {
clousure_args.appendSliceAssumeCapacity(&.{
.{
.binding = Binding.alloc(temp_allocator, B.Identifier{
.ref = ast.module_ref,
}, Logger.Loc.Empty),
},
.{
.binding = Binding.alloc(temp_allocator, B.Identifier{
.ref = ast.exports_ref,
}, Logger.Loc.Empty),
},
});
}
stmts.all_stmts.appendAssumeCapacity(Stmt.allocateExpr(temp_allocator, Expr.init(E.Function, .{ .func = .{
.args = temp_allocator.dupe(G.Arg, clousure_args.slice()) catch bun.outOfMemory(),
.body = .{
.stmts = inner,
.loc = Logger.Loc.Empty,
},
} }, Logger.Loc.Empty)));
stmts.all_stmts.appendSliceAssumeCapacity(stmts.outside_wrapper_prefix.items);
ast.flags.uses_module_ref = true;
// TODO: there is a weird edge case where the pretty path is not computed
// it does not reproduce when debugging.
var source = c.getSource(part_range.source_index.get()).*;
if (source.path.text.ptr == source.path.pretty.ptr) {
source.path = genericPathWithPrettyInitialized(
source.path,
c.options.target,
c.resolver.fs.top_level_dir,
allocator,
) catch bun.outOfMemory();
}
return c.printCodeForFileInChunkJS(
r,
allocator,
writer,
stmts.all_stmts.items[main_stmts_len..],
&ast,
flags,
.None,
.None,
null,
part_range.source_index,
&source,
);
}
var needs_wrapper = false;
const namespace_export_part_index = js_ast.namespace_export_part_index;
stmts.reset();
const part_index_for_lazy_default_export: u32 = brk: {
if (ast.flags.has_lazy_export) {
if (c.graph.meta.items(.resolved_exports)[part_range.source_index.get()].get("default")) |default| {
break :brk c.graph.topLevelSymbolToParts(part_range.source_index.get(), default.data.import_ref)[0];
}
}
break :brk std.math.maxInt(u32);
};
const output_format = c.options.output_format;
// The top-level directive must come first (the non-wrapped case is handled
// by the chunk generation code, although only for the entry point)
if (flags.wrap != .none and ast.flags.has_explicit_use_strict_directive and !chunk.isEntryPoint() and !output_format.isAlwaysStrictMode()) {
stmts.inside_wrapper_prefix.append(Stmt.alloc(S.Directive, .{
.value = "use strict",
}, Logger.Loc.Empty)) catch unreachable;
}
// TODO: handle directive
if (namespace_export_part_index >= part_range.part_index_begin and
namespace_export_part_index < part_range.part_index_end and
parts[namespace_export_part_index].is_live)
{
c.convertStmtsForChunk(
part_range.source_index.get(),
stmts,
parts[namespace_export_part_index].stmts,
chunk,
temp_allocator,
flags.wrap,
&ast,
) catch |err| {
bun.handleErrorReturnTrace(err, @errorReturnTrace());
return .{ .err = err };
};
switch (flags.wrap) {
.esm => {
stmts.outside_wrapper_prefix.appendSlice(stmts.inside_wrapper_suffix.items) catch unreachable;
},
else => {
stmts.inside_wrapper_prefix.appendSlice(stmts.inside_wrapper_suffix.items) catch unreachable;
},
}
stmts.inside_wrapper_suffix.clearRetainingCapacity();
}
// Add all other parts in this chunk
for (parts, 0..) |part, index_| {
const index = part_range.part_index_begin + @as(u32, @truncate(index_));
if (!part.is_live) {
// Skip the part if it's not in this chunk
continue;
}
if (index == namespace_export_part_index) {
// Skip the namespace export part because we already handled it above
continue;
}
if (index == wrapper_part_index.get()) {
// Skip the wrapper part because we already handled it above
needs_wrapper = true;
continue;
}
var single_stmts_list = [1]Stmt{undefined};
var part_stmts = part.stmts;
// If this could be a JSON or TOML file that exports a top-level object literal, go
// over the non-default top-level properties that ended up being imported
// and substitute references to them into the main top-level object literal.
// So this JSON file:
//
// {
// "foo": [1, 2, 3],
// "bar": [4, 5, 6],
// }
//
// is initially compiled into this:
//
// export var foo = [1, 2, 3];
// export var bar = [4, 5, 6];
// export default {
// foo: [1, 2, 3],
// bar: [4, 5, 6],
// };
//
// But we turn it into this if both "foo" and "default" are imported:
//
// export var foo = [1, 2, 3];
// export default {
// foo,
// bar: [4, 5, 6],
// };
//
if (index == part_index_for_lazy_default_export) {
bun.assert(index != std.math.maxInt(u32));
const stmt = part_stmts[0];
if (stmt.data != .s_export_default)
@panic("expected Lazy default export to be an export default statement");
const default_export = stmt.data.s_export_default;
var default_expr = default_export.value.expr;
// Be careful: the top-level value in a JSON file is not necessarily an object
if (default_expr.data == .e_object) {
var new_properties = default_expr.data.e_object.properties.clone(temp_allocator) catch unreachable;
var resolved_exports = c.graph.meta.items(.resolved_exports)[part_range.source_index.get()];
// If any top-level properties ended up being imported directly, change
// the property to just reference the corresponding variable instead
for (new_properties.slice()) |*prop| {
if (prop.key == null or prop.key.?.data != .e_string or prop.value == null) continue;
const name = prop.key.?.data.e_string.slice(temp_allocator);
if (strings.eqlComptime(name, "default") or
strings.eqlComptime(name, "__esModule") or
!bun.js_lexer.isIdentifier(name)) continue;
if (resolved_exports.get(name)) |export_data| {
const export_ref = export_data.data.import_ref;
const export_part = ast.parts.slice()[c.graph.topLevelSymbolToParts(part_range.source_index.get(), export_ref)[0]];
if (export_part.is_live) {
prop.* = .{
.key = prop.key,
.value = Expr.initIdentifier(export_ref, prop.value.?.loc),
};
}
}
}
default_expr = Expr.allocate(
temp_allocator,
E.Object,
E.Object{
.properties = new_properties,
},
default_expr.loc,
);
}
single_stmts_list[0] = Stmt.allocate(
temp_allocator,
S.ExportDefault,
.{
.default_name = default_export.default_name,
.value = .{ .expr = default_expr },
},
stmt.loc,
);
part_stmts = single_stmts_list[0..];
}
c.convertStmtsForChunk(
part_range.source_index.get(),
stmts,
part_stmts,
chunk,
temp_allocator,
flags.wrap,
&ast,
) catch |err| return .{
.err = err,
};
}
// Hoist all import statements before any normal statements. ES6 imports
// are different than CommonJS imports. All modules imported via ES6 import
// statements are evaluated before the module doing the importing is
// evaluated (well, except for cyclic import scenarios). We need to preserve
// these semantics even when modules imported via ES6 import statements end
// up being CommonJS modules.
stmts.all_stmts.ensureUnusedCapacity(stmts.inside_wrapper_prefix.items.len + stmts.inside_wrapper_suffix.items.len) catch unreachable;
stmts.all_stmts.appendSliceAssumeCapacity(stmts.inside_wrapper_prefix.items);
stmts.all_stmts.appendSliceAssumeCapacity(stmts.inside_wrapper_suffix.items);
stmts.inside_wrapper_prefix.items.len = 0;
stmts.inside_wrapper_suffix.items.len = 0;
if (c.options.minify_syntax) {
mergeAdjacentLocalStmts(&stmts.all_stmts, temp_allocator);
}
var out_stmts: []js_ast.Stmt = stmts.all_stmts.items;
// Optionally wrap all statements in a closure
if (needs_wrapper) {
switch (flags.wrap) {
.cjs => {
// Only include the arguments that are actually used
var args = std.ArrayList(G.Arg).initCapacity(
temp_allocator,
if (ast.flags.uses_module_ref or ast.flags.uses_exports_ref) 2 else 0,
) catch unreachable;
if (ast.flags.uses_module_ref or ast.flags.uses_exports_ref) {
args.appendAssumeCapacity(
G.Arg{
.binding = Binding.alloc(
temp_allocator,
B.Identifier{
.ref = ast.exports_ref,
},
Logger.Loc.Empty,
),
},
);
if (ast.flags.uses_module_ref) {
args.appendAssumeCapacity(
G.Arg{
.binding = Binding.alloc(
temp_allocator,
B.Identifier{
.ref = ast.module_ref,
},
Logger.Loc.Empty,
),
},
);
}
}
// TODO: variants of the runtime functions
var cjs_args = temp_allocator.alloc(Expr, 1) catch unreachable;
cjs_args[0] = Expr.init(
E.Arrow,
E.Arrow{
.args = args.items,
.body = .{
.stmts = stmts.all_stmts.items,
.loc = Logger.Loc.Empty,
},
},
Logger.Loc.Empty,
);
const commonjs_wrapper_definition = Expr.init(
E.Call,
E.Call{
.target = Expr.init(
E.Identifier,
E.Identifier{
.ref = c.cjs_runtime_ref,
},
Logger.Loc.Empty,
),
.args = bun.BabyList(Expr).init(cjs_args),
},
Logger.Loc.Empty,
);
// "var require_foo = __commonJS(...);"
{
var decls = temp_allocator.alloc(G.Decl, 1) catch unreachable;
decls[0] = G.Decl{
.binding = Binding.alloc(
temp_allocator,
B.Identifier{
.ref = ast.wrapper_ref,
},
Logger.Loc.Empty,
),
.value = commonjs_wrapper_definition,
};
stmts.outside_wrapper_prefix.append(
Stmt.alloc(
S.Local,
S.Local{
.decls = G.Decl.List.init(decls),
},
Logger.Loc.Empty,
),
) catch unreachable;
}
},
.esm => {
// The wrapper only needs to be "async" if there is a transitive async
// dependency. For correctness, we must not use "async" if the module
// isn't async because then calling "require()" on that module would
// swallow any exceptions thrown during module initialization.
const is_async = flags.is_async_or_has_async_dependency;
const ExportHoist = struct {
decls: std.ArrayListUnmanaged(G.Decl),
allocator: std.mem.Allocator,
pub fn wrapIdentifier(w: *@This(), loc: Logger.Loc, ref: Ref) Expr {
w.decls.append(
w.allocator,
.{
.binding = Binding.alloc(
w.allocator,
B.Identifier{
.ref = ref,
},
loc,
),
.value = null,
},
) catch bun.outOfMemory();
return Expr.initIdentifier(ref, loc);
}
};
var hoist = ExportHoist{
.decls = .{},
.allocator = temp_allocator,
};
var inner_stmts = stmts.all_stmts.items;
// Hoist all top-level "var" and "function" declarations out of the closure
{
var end: usize = 0;
for (stmts.all_stmts.items) |stmt| {
const transformed = switch (stmt.data) {
.s_local => |local| stmt: {
// Convert the declarations to assignments
var value = Expr.empty;
for (local.decls.slice()) |*decl| {
if (decl.value) |initializer| {
const can_be_moved = initializer.canBeMoved();
if (can_be_moved) {
// if the value can be moved, move the decl directly to preserve destructuring
// ie `const { main } = class { static main() {} }` => `var {main} = class { static main() {} }`
hoist.decls.append(hoist.allocator, decl.*) catch bun.outOfMemory();
} else {
// if the value cannot be moved, add every destructuring key separately
// ie `var { append } = { append() {} }` => `var append; __esm(() => ({ append } = { append() {} }))`
const binding = decl.binding.toExpr(&hoist);
value = value.joinWithComma(
binding.assign(initializer),
temp_allocator,
);
}
} else {
_ = decl.binding.toExpr(&hoist);
}
}
if (value.isEmpty()) {
continue;
}
break :stmt Stmt.allocateExpr(temp_allocator, value);
},
.s_function => {
stmts.outside_wrapper_prefix.append(stmt) catch bun.outOfMemory();
continue;
},
.s_class => |class| stmt: {
if (class.class.canBeMoved()) {
stmts.outside_wrapper_prefix.append(stmt) catch bun.outOfMemory();
continue;
}
break :stmt Stmt.allocateExpr(
temp_allocator,
Expr.assign(hoist.wrapIdentifier(
class.class.class_name.?.loc,
class.class.class_name.?.ref.?,
), .{
.data = .{ .e_class = &class.class },
.loc = stmt.loc,
}),
);
},
else => stmt,
};
inner_stmts[end] = transformed;
end += 1;
}
inner_stmts.len = end;
}
if (hoist.decls.items.len > 0) {
stmts.outside_wrapper_prefix.append(
Stmt.alloc(
S.Local,
S.Local{
.decls = G.Decl.List.fromList(hoist.decls),
},
Logger.Loc.Empty,
),
) catch unreachable;
hoist.decls.items.len = 0;
}
if (inner_stmts.len > 0) {
// See the comment in needsWrapperRef for why the symbol
// is sometimes not generated.
bun.assert(!ast.wrapper_ref.isEmpty()); // js_parser's needsWrapperRef thought wrapper was not needed
// "__esm(() => { ... })"
var esm_args = temp_allocator.alloc(Expr, 1) catch bun.outOfMemory();
esm_args[0] = Expr.init(E.Arrow, .{
.args = &.{},
.is_async = is_async,
.body = .{
.stmts = inner_stmts,
.loc = Logger.Loc.Empty,
},
}, Logger.Loc.Empty);
// "var init_foo = __esm(...);"
const value = Expr.init(E.Call, .{
.target = Expr.initIdentifier(c.esm_runtime_ref, Logger.Loc.Empty),
.args = bun.BabyList(Expr).init(esm_args),
}, Logger.Loc.Empty);
var decls = temp_allocator.alloc(G.Decl, 1) catch bun.outOfMemory();
decls[0] = G.Decl{
.binding = Binding.alloc(
temp_allocator,
B.Identifier{
.ref = ast.wrapper_ref,
},
Logger.Loc.Empty,
),
.value = value,
};
stmts.outside_wrapper_prefix.append(
Stmt.alloc(S.Local, .{
.decls = G.Decl.List.init(decls),
}, Logger.Loc.Empty),
) catch bun.outOfMemory();
} else {
// // If this fails, then there will be places we reference
// // `init_foo` without it actually existing.
// bun.assert(ast.wrapper_ref.isEmpty());
// TODO: the edge case where we are wrong is when there
// are references to other ESM modules, but those get
// fully hoisted. The look like side effects, but they
// are removed.
//
// It is too late to retroactively delete the
// wrapper_ref, since printing has already begun. The
// most we can do to salvage the situation is to print
// an empty arrow function.
//
// This is marked as a TODO, because this can be solved
// via a count of external modules, decremented during
// linking.
if (!ast.wrapper_ref.isEmpty()) {
const value = Expr.init(E.Arrow, .{
.args = &.{},
.is_async = is_async,
.body = .{
.stmts = inner_stmts,
.loc = Logger.Loc.Empty,
},
}, Logger.Loc.Empty);
stmts.outside_wrapper_prefix.append(
Stmt.alloc(S.Local, .{
.decls = G.Decl.List.fromSlice(temp_allocator, &.{.{
.binding = Binding.alloc(
temp_allocator,
B.Identifier{
.ref = ast.wrapper_ref,
},
Logger.Loc.Empty,
),
.value = value,
}}) catch bun.outOfMemory(),
}, Logger.Loc.Empty),
) catch bun.outOfMemory();
}
}
},
else => {},
}
out_stmts = stmts.outside_wrapper_prefix.items;
}
if (out_stmts.len == 0) {
return .{
.result = .{
.code = &[_]u8{},
.source_map = null,
},
};
}
return c.printCodeForFileInChunkJS(
r,
allocator,
writer,
out_stmts,
&ast,
flags,
toESMRef,
toCommonJSRef,
runtimeRequireRef,
part_range.source_index,
c.getSource(part_range.source_index.get()),
);
}
fn mergeAdjacentLocalStmts(stmts: *std.ArrayList(Stmt), allocator: std.mem.Allocator) void {
if (stmts.items.len == 0)
return;
var did_merge_with_previous_local = false;
var end: usize = 1;
for (stmts.items[1..]) |stmt| {
// Try to merge with the previous variable statement
if (stmt.data == .s_local) {
var after = stmt.data.s_local;
if (stmts.items[end - 1].data == .s_local) {
var before = stmts.items[end - 1].data.s_local;
// It must be the same kind of variable statement (i.e. let/var/const)
if (before.canMergeWith(after)) {
if (did_merge_with_previous_local) {
// Avoid O(n^2) behavior for repeated variable declarations
// Appending to this decls list is safe because did_merge_with_previous_local is true
before.decls.append(allocator, after.decls.slice()) catch unreachable;
} else {
// Append the declarations to the previous variable statement
did_merge_with_previous_local = true;
var clone = std.ArrayList(G.Decl).initCapacity(allocator, before.decls.len + after.decls.len) catch unreachable;
clone.appendSliceAssumeCapacity(before.decls.slice());
clone.appendSliceAssumeCapacity(after.decls.slice());
// we must clone instead of overwrite in-place incase the same S.Local is used across threads
// https://github.com/oven-sh/bun/issues/2942
stmts.items[end - 1] = Stmt.allocate(
allocator,
S.Local,
S.Local{
.decls = BabyList(G.Decl).fromList(clone),
.is_export = before.is_export,
.was_commonjs_export = before.was_commonjs_export,
.was_ts_import_equals = before.was_ts_import_equals,
.kind = before.kind,
},
stmts.items[end - 1].loc,
);
}
continue;
}
}
}
did_merge_with_previous_local = false;
stmts.items[end] = stmt;
end += 1;
}
stmts.items.len = end;
}
const bun = @import("bun");
const BabyList = bun.BabyList;
const strings = bun.strings;
const LinkerContext = bun.bundle_v2.LinkerContext;
const Index = bun.bundle_v2.Index;
const Part = bun.bundle_v2.Part;
const std = @import("std");
const JSMeta = bun.bundle_v2.JSMeta;
const JSAst = bun.bundle_v2.JSAst;
const js_ast = bun.bundle_v2.js_ast;
const Ref = bun.bundle_v2.js_ast.Ref;
const Logger = bun.logger;
const options = bun.options;
const js_printer = bun.bundle_v2.js_printer;
const renamer = bun.bundle_v2.renamer;
const Chunk = bun.bundle_v2.Chunk;
const PartRange = bun.bundle_v2.PartRange;
const StmtList = LinkerContext.StmtList;
const Stmt = js_ast.Stmt;
const Expr = js_ast.Expr;
const E = js_ast.E;
const S = js_ast.S;
const G = js_ast.G;
const B = js_ast.B;
const Binding = js_ast.Binding;
const genericPathWithPrettyInitialized = bun.bundle_v2.genericPathWithPrettyInitialized;

View File

@@ -0,0 +1,419 @@
pub fn generateCodeForLazyExport(this: *LinkerContext, source_index: Index.Int) !void {
const exports_kind = this.graph.ast.items(.exports_kind)[source_index];
const all_sources = this.parse_graph.input_files.items(.source);
const all_css_asts = this.graph.ast.items(.css);
const maybe_css_ast: ?*bun.css.BundlerStyleSheet = all_css_asts[source_index];
var parts = &this.graph.ast.items(.parts)[source_index];
if (parts.len < 1) {
@panic("Internal error: expected at least one part for lazy export");
}
var part: *Part = &parts.ptr[1];
if (part.stmts.len == 0) {
@panic("Internal error: expected at least one statement in the lazy export");
}
const module_ref = this.graph.ast.items(.module_ref)[source_index];
// Handle css modules
//
// --- original comment from esbuild ---
// If this JavaScript file is a stub from a CSS file, populate the exports of
// this JavaScript stub with the local names from that CSS file. This is done
// now instead of earlier because we need the whole bundle to be present.
if (maybe_css_ast) |css_ast| {
const stmt: Stmt = part.stmts[0];
if (stmt.data != .s_lazy_export) {
@panic("Internal error: expected top-level lazy export statement");
}
if (css_ast.local_scope.count() > 0) out: {
var exports = E.Object{};
const symbols: *const Symbol.List = &this.graph.ast.items(.symbols)[source_index];
const all_import_records: []const BabyList(bun.css.ImportRecord) = this.graph.ast.items(.import_records);
const values = css_ast.local_scope.values();
if (values.len == 0) break :out;
const size = size: {
var size: u32 = 0;
for (values) |entry| {
size = @max(size, entry.ref.inner_index);
}
break :size size + 1;
};
var inner_visited = try BitSet.initEmpty(this.allocator, size);
defer inner_visited.deinit(this.allocator);
var composes_visited = std.AutoArrayHashMap(bun.bundle_v2.Ref, void).init(this.allocator);
defer composes_visited.deinit();
const Visitor = struct {
inner_visited: *BitSet,
composes_visited: *std.AutoArrayHashMap(bun.bundle_v2.Ref, void),
parts: *std.ArrayList(E.TemplatePart),
all_import_records: []const BabyList(bun.css.ImportRecord),
all_css_asts: []?*bun.css.BundlerStyleSheet,
all_sources: []const Logger.Source,
all_symbols: []const Symbol.List,
source_index: Index.Int,
log: *Logger.Log,
loc: Loc,
allocator: std.mem.Allocator,
fn clearAll(visitor: *@This()) void {
visitor.inner_visited.setAll(false);
visitor.composes_visited.clearRetainingCapacity();
}
fn visitName(visitor: *@This(), ast: *bun.css.BundlerStyleSheet, ref: bun.css.CssRef, idx: Index.Int) void {
bun.assert(ref.canBeComposed());
const from_this_file = ref.sourceIndex(idx) == visitor.source_index;
if ((from_this_file and visitor.inner_visited.isSet(ref.innerIndex())) or
(!from_this_file and visitor.composes_visited.contains(ref.toRealRef(idx))))
{
return;
}
visitor.visitComposes(ast, ref, idx);
visitor.parts.append(E.TemplatePart{
.value = Expr.init(
E.NameOfSymbol,
E.NameOfSymbol{
.ref = ref.toRealRef(idx),
},
visitor.loc,
),
.tail = .{
.cooked = E.String.init(" "),
},
.tail_loc = visitor.loc,
}) catch bun.outOfMemory();
if (from_this_file) {
visitor.inner_visited.set(ref.innerIndex());
} else {
visitor.composes_visited.put(ref.toRealRef(idx), {}) catch unreachable;
}
}
fn warnNonSingleClassComposes(visitor: *@This(), ast: *bun.css.BundlerStyleSheet, css_ref: bun.css.CssRef, idx: Index.Int, compose_loc: Loc) void {
const ref = css_ref.toRealRef(idx);
_ = ref;
const syms: *const Symbol.List = &visitor.all_symbols[css_ref.sourceIndex(idx)];
const name = syms.at(css_ref.innerIndex()).original_name;
const loc = ast.local_scope.get(name).?.loc;
visitor.log.addRangeErrorFmtWithNote(
&visitor.all_sources[idx],
.{ .loc = compose_loc },
visitor.allocator,
"The composes property cannot be used with {}, because it is not a single class name.",
.{
bun.fmt.quote(name),
},
"The definition of {} is here.",
.{
bun.fmt.quote(name),
},
.{
.loc = loc,
},
) catch bun.outOfMemory();
}
fn visitComposes(visitor: *@This(), ast: *bun.css.BundlerStyleSheet, css_ref: bun.css.CssRef, idx: Index.Int) void {
const ref = css_ref.toRealRef(idx);
if (ast.composes.count() > 0) {
const composes = ast.composes.getPtr(ref) orelse return;
// while parsing we check that we only allow `composes` on single class selectors
bun.assert(css_ref.tag.class);
for (composes.composes.slice()) |*compose| {
// it is imported
if (compose.from != null) {
if (compose.from.? == .import_record_index) {
const import_record_idx = compose.from.?.import_record_index;
const import_records: *const BabyList(bun.css.ImportRecord) = &visitor.all_import_records[idx];
const import_record = import_records.at(import_record_idx);
if (import_record.source_index.isValid()) {
const other_file = visitor.all_css_asts[import_record.source_index.get()] orelse {
visitor.log.addErrorFmt(
&visitor.all_sources[idx],
compose.loc,
visitor.allocator,
"Cannot use the \"composes\" property with the {} file (it is not a CSS file)",
.{bun.fmt.quote(visitor.all_sources[import_record.source_index.get()].path.pretty)},
) catch bun.outOfMemory();
continue;
};
for (compose.names.slice()) |name| {
const other_name_entry = other_file.local_scope.get(name.v) orelse continue;
const other_name_ref = other_name_entry.ref;
if (!other_name_ref.canBeComposed()) {
visitor.warnNonSingleClassComposes(other_file, other_name_ref, import_record.source_index.get(), compose.loc);
} else {
visitor.visitName(other_file, other_name_ref, import_record.source_index.get());
}
}
}
} else if (compose.from.? == .global) {
// E.g.: `composes: foo from global`
//
// In this example `foo` is global and won't be rewritten to a locally scoped
// name, so we can just add it as a string.
for (compose.names.slice()) |name| {
visitor.parts.append(
E.TemplatePart{
.value = Expr.init(
E.String,
E.String.init(name.v),
visitor.loc,
),
.tail = .{
.cooked = E.String.init(" "),
},
.tail_loc = visitor.loc,
},
) catch bun.outOfMemory();
}
}
} else {
// it is from the current file
for (compose.names.slice()) |name| {
const name_entry = ast.local_scope.get(name.v) orelse {
visitor.log.addErrorFmt(
&visitor.all_sources[idx],
compose.loc,
visitor.allocator,
"The name {} never appears in {} as a CSS modules locally scoped class name. Note that \"composes\" only works with single class selectors.",
.{
bun.fmt.quote(name.v),
bun.fmt.quote(visitor.all_sources[idx].path.pretty),
},
) catch bun.outOfMemory();
continue;
};
const name_ref = name_entry.ref;
if (!name_ref.canBeComposed()) {
visitor.warnNonSingleClassComposes(ast, name_ref, idx, compose.loc);
} else {
visitor.visitName(ast, name_ref, idx);
}
}
}
}
}
}
};
var visitor = Visitor{
.inner_visited = &inner_visited,
.composes_visited = &composes_visited,
.source_index = source_index,
.parts = undefined,
.all_import_records = all_import_records,
.all_css_asts = all_css_asts,
.loc = stmt.loc,
.log = this.log,
.all_sources = all_sources,
.allocator = this.allocator,
.all_symbols = this.graph.ast.items(.symbols),
};
for (values) |entry| {
const ref = entry.ref;
bun.assert(ref.inner_index < symbols.len);
var template_parts = std.ArrayList(E.TemplatePart).init(this.allocator);
var value = Expr.init(E.NameOfSymbol, E.NameOfSymbol{ .ref = ref.toRealRef(source_index) }, stmt.loc);
visitor.parts = &template_parts;
visitor.clearAll();
visitor.inner_visited.set(ref.innerIndex());
if (ref.tag.class) visitor.visitComposes(css_ast, ref, source_index);
if (template_parts.items.len > 0) {
template_parts.append(E.TemplatePart{
.value = value,
.tail_loc = stmt.loc,
.tail = .{ .cooked = E.String.init("") },
}) catch bun.outOfMemory();
value = Expr.init(
E.Template,
E.Template{
.parts = template_parts.items,
.head = .{
.cooked = E.String.init(""),
},
},
stmt.loc,
);
}
const key = symbols.at(ref.innerIndex()).original_name;
try exports.put(this.allocator, key, value);
}
part.stmts[0].data.s_lazy_export.* = Expr.init(E.Object, exports, stmt.loc).data;
}
}
const stmt: Stmt = part.stmts[0];
if (stmt.data != .s_lazy_export) {
@panic("Internal error: expected top-level lazy export statement");
}
const expr = Expr{
.data = stmt.data.s_lazy_export.*,
.loc = stmt.loc,
};
switch (exports_kind) {
.cjs => {
part.stmts[0] = Stmt.assign(
Expr.init(
E.Dot,
E.Dot{
.target = Expr.initIdentifier(module_ref, stmt.loc),
.name = "exports",
.name_loc = stmt.loc,
},
stmt.loc,
),
expr,
);
try this.graph.generateSymbolImportAndUse(source_index, 0, module_ref, 1, Index.init(source_index));
// If this is a .napi addon and it's not node, we need to generate a require() call to the runtime
if (expr.data == .e_call and
expr.data.e_call.target.data == .e_require_call_target and
// if it's commonjs, use require()
this.options.output_format != .cjs)
{
try this.graph.generateRuntimeSymbolImportAndUse(
source_index,
Index.part(1),
"__require",
1,
);
}
},
else => {
// Otherwise, generate ES6 export statements. These are added as additional
// parts so they can be tree shaken individually.
part.stmts.len = 0;
if (expr.data == .e_object) {
for (expr.data.e_object.properties.slice()) |property_| {
const property: G.Property = property_;
if (property.key == null or property.key.?.data != .e_string or property.value == null or
property.key.?.data.e_string.eqlComptime("default") or property.key.?.data.e_string.eqlComptime("__esModule"))
{
continue;
}
const name = property.key.?.data.e_string.slice(this.allocator);
// TODO: support non-identifier names
if (!bun.js_lexer.isIdentifier(name))
continue;
// This initializes the generated variable with a copy of the property
// value, which is INCORRECT for values that are objects/arrays because
// they will have separate object identity. This is fixed up later in
// "generateCodeForFileInChunkJS" by changing the object literal to
// reference this generated variable instead.
//
// Changing the object literal is deferred until that point instead of
// doing it now because we only want to do this for top-level variables
// that actually end up being used, and we don't know which ones will
// end up actually being used at this point (since import binding hasn't
// happened yet). So we need to wait until after tree shaking happens.
const generated = try this.generateNamedExportInFile(source_index, module_ref, name, name);
parts.ptr[generated[1]].stmts = this.allocator.alloc(Stmt, 1) catch unreachable;
parts.ptr[generated[1]].stmts[0] = Stmt.alloc(
S.Local,
S.Local{
.is_export = true,
.decls = js_ast.G.Decl.List.fromSlice(
this.allocator,
&.{
.{
.binding = Binding.alloc(
this.allocator,
B.Identifier{
.ref = generated[0],
},
expr.loc,
),
.value = property.value.?,
},
},
) catch unreachable,
},
property.key.?.loc,
);
}
}
{
const generated = try this.generateNamedExportInFile(
source_index,
module_ref,
std.fmt.allocPrint(
this.allocator,
"{}_default",
.{this.parse_graph.input_files.items(.source)[source_index].fmtIdentifier()},
) catch unreachable,
"default",
);
parts.ptr[generated[1]].stmts = this.allocator.alloc(Stmt, 1) catch unreachable;
parts.ptr[generated[1]].stmts[0] = Stmt.alloc(
S.ExportDefault,
S.ExportDefault{
.default_name = .{
.ref = generated[0],
.loc = stmt.loc,
},
.value = .{
.expr = expr,
},
},
stmt.loc,
);
}
},
}
}
const bun = @import("bun");
const Ref = bun.bundle_v2.Ref;
const BabyList = bun.BabyList;
const Logger = bun.logger;
const Index = bun.bundle_v2.Index;
const Loc = Logger.Loc;
const LinkerContext = bun.bundle_v2.LinkerContext;
const string = bun.string;
const std = @import("std");
const Part = js_ast.Part;
const js_ast = bun.js_ast;
const ImportRecord = bun.ImportRecord;
const Symbol = js_ast.Symbol;
const Stmt = js_ast.Stmt;
const Expr = js_ast.Expr;
const E = js_ast.E;
const S = js_ast.S;
const G = js_ast.G;
const B = js_ast.B;
const Binding = js_ast.Binding;
const BitSet = bun.bit_set.DynamicBitSetUnmanaged;
pub const DeferredBatchTask = bun.bundle_v2.DeferredBatchTask;
pub const ThreadPool = bun.bundle_v2.ThreadPool;
pub const ParseTask = bun.bundle_v2.ParseTask;

View File

@@ -0,0 +1,168 @@
pub fn generateCompileResultForCssChunk(task: *ThreadPoolLib.Task) void {
const part_range: *const PendingPartRange = @fieldParentPtr("task", task);
const ctx = part_range.ctx;
defer ctx.wg.finish();
var worker = ThreadPool.Worker.get(@fieldParentPtr("linker", ctx.c));
defer worker.unget();
const prev_action = if (Environment.show_crash_trace) bun.crash_handler.current_action;
defer if (Environment.show_crash_trace) {
bun.crash_handler.current_action = prev_action;
};
if (Environment.show_crash_trace) bun.crash_handler.current_action = .{ .bundle_generate_chunk = .{
.chunk = ctx.chunk,
.context = ctx.c,
.part_range = &part_range.part_range,
} };
ctx.chunk.compile_results_for_chunk[part_range.i] = generateCompileResultForCssChunkImpl(worker, ctx.c, ctx.chunk, part_range.i);
}
fn generateCompileResultForCssChunkImpl(worker: *ThreadPool.Worker, c: *LinkerContext, chunk: *Chunk, imports_in_chunk_index: u32) CompileResult {
const trace = bun.perf.trace("Bundler.generateCodeForFileInChunkCss");
defer trace.end();
var arena = &worker.temporary_arena;
var buffer_writer = js_printer.BufferWriter.init(worker.allocator);
defer _ = arena.reset(.retain_capacity);
const css_import = chunk.content.css.imports_in_chunk_in_order.at(imports_in_chunk_index);
const css: *const bun.css.BundlerStyleSheet = &chunk.content.css.asts[imports_in_chunk_index];
// const symbols: []const Symbol.List = c.graph.ast.items(.symbols);
const symbols = &c.graph.symbols;
switch (css_import.kind) {
.layers => {
const printer_options = bun.css.PrinterOptions{
// TODO: make this more configurable
.minify = c.options.minify_whitespace,
.targets = bun.css.Targets.forBundlerTarget(c.options.target),
};
_ = switch (css.toCssWithWriter(
worker.allocator,
&buffer_writer,
printer_options,
.{
.import_records = &css_import.condition_import_records,
.ast_urls_for_css = c.parse_graph.ast.items(.url_for_css),
.ast_unique_key_for_additional_file = c.parse_graph.input_files.items(.unique_key_for_additional_file),
},
&c.mangled_props,
// layer does not need symbols i think
symbols,
)) {
.result => {},
.err => {
return CompileResult{
.css = .{
.result = .{ .err = error.PrintError },
.source_index = Index.invalid.get(),
},
};
},
};
return CompileResult{
.css = .{
.result = .{ .result = buffer_writer.getWritten() },
.source_index = Index.invalid.get(),
},
};
},
.external_path => {
var import_records = BabyList(ImportRecord).init(css_import.condition_import_records.sliceConst());
const printer_options = bun.css.PrinterOptions{
// TODO: make this more configurable
.minify = c.options.minify_whitespace,
.targets = bun.css.Targets.forBundlerTarget(c.options.target),
};
_ = switch (css.toCssWithWriter(
worker.allocator,
&buffer_writer,
printer_options,
.{
.import_records = &import_records,
.ast_urls_for_css = c.parse_graph.ast.items(.url_for_css),
.ast_unique_key_for_additional_file = c.parse_graph.input_files.items(.unique_key_for_additional_file),
},
&c.mangled_props,
// external_path does not need symbols i think
symbols,
)) {
.result => {},
.err => {
return CompileResult{
.css = .{
.result = .{ .err = error.PrintError },
.source_index = Index.invalid.get(),
},
};
},
};
return CompileResult{
.css = .{
.result = .{ .result = buffer_writer.getWritten() },
.source_index = Index.invalid.get(),
},
};
},
.source_index => |idx| {
const printer_options = bun.css.PrinterOptions{
.targets = bun.css.Targets.forBundlerTarget(c.options.target),
// TODO: make this more configurable
.minify = c.options.minify_whitespace or c.options.minify_syntax or c.options.minify_identifiers,
};
_ = switch (css.toCssWithWriter(
worker.allocator,
&buffer_writer,
printer_options,
.{
.import_records = &c.graph.ast.items(.import_records)[idx.get()],
.ast_urls_for_css = c.parse_graph.ast.items(.url_for_css),
.ast_unique_key_for_additional_file = c.parse_graph.input_files.items(.unique_key_for_additional_file),
},
&c.mangled_props,
symbols,
)) {
.result => {},
.err => {
return CompileResult{
.css = .{
.result = .{ .err = error.PrintError },
.source_index = idx.get(),
},
};
},
};
return CompileResult{
.css = .{
.result = .{ .result = buffer_writer.getWritten() },
.source_index = idx.get(),
},
};
},
}
}
const bun = @import("bun");
const options = bun.options;
const BabyList = bun.BabyList;
const Index = bun.bundle_v2.Index;
const js_printer = bun.js_printer;
const LinkerContext = bun.bundle_v2.LinkerContext;
const ThreadPoolLib = bun.ThreadPool;
const Environment = bun.Environment;
const js_ast = bun.js_ast;
const ImportRecord = bun.ImportRecord;
const Symbol = js_ast.Symbol;
const bundler = bun.bundle_v2;
pub const DeferredBatchTask = bun.bundle_v2.DeferredBatchTask;
pub const ThreadPool = bun.bundle_v2.ThreadPool;
pub const ParseTask = bun.bundle_v2.ParseTask;
const Chunk = bundler.Chunk;
const CompileResult = bundler.CompileResult;
const PendingPartRange = LinkerContext.PendingPartRange;

View File

@@ -0,0 +1,278 @@
/// Rrewrite the HTML with the following transforms:
/// 1. Remove all <script> and <link> tags which were not marked as
/// external. This is defined by the source_index on the ImportRecord,
/// when it's not Index.invalid then we update it accordingly. This will
/// need to be a reference to the chunk or asset.
/// 2. For all other non-external URLs, update the "src" or "href"
/// attribute to point to the asset's unique key. Later, when joining
/// chunks, we will rewrite these to their final URL or pathname,
/// including the public_path.
/// 3. If a JavaScript chunk exists, add a <script type="module" crossorigin> tag that contains
/// the JavaScript for the entry point which uses the "src" attribute
/// to point to the JavaScript chunk's unique key.
/// 4. If a CSS chunk exists, add a <link rel="stylesheet" href="..." crossorigin> tag that contains
/// the CSS for the entry point which uses the "href" attribute to point to the
/// CSS chunk's unique key.
/// 5. For each imported module or chunk within the JavaScript code, add
/// a <link rel="modulepreload" href="..." crossorigin> tag that
/// points to the module or chunk's unique key so that we tell the
/// browser to preload the user's code.
pub fn generateCompileResultForHtmlChunk(task: *ThreadPoolLib.Task) void {
const part_range: *const PendingPartRange = @fieldParentPtr("task", task);
const ctx = part_range.ctx;
defer ctx.wg.finish();
var worker = ThreadPool.Worker.get(@fieldParentPtr("linker", ctx.c));
defer worker.unget();
ctx.chunk.compile_results_for_chunk[part_range.i] = generateCompileResultForHTMLChunkImpl(worker, ctx.c, ctx.chunk, ctx.chunks);
}
fn generateCompileResultForHTMLChunkImpl(worker: *ThreadPool.Worker, c: *LinkerContext, chunk: *Chunk, chunks: []Chunk) CompileResult {
const parse_graph = c.parse_graph;
const input_files = parse_graph.input_files.slice();
const sources = input_files.items(.source);
const import_records = c.graph.ast.items(.import_records);
const HTMLLoader = struct {
linker: *LinkerContext,
source_index: Index.Int,
import_records: []const ImportRecord,
log: *Logger.Log,
allocator: std.mem.Allocator,
current_import_record_index: u32 = 0,
chunk: *Chunk,
chunks: []Chunk,
minify_whitespace: bool,
output: std.ArrayList(u8),
end_tag_indices: struct {
head: ?u32 = 0,
body: ?u32 = 0,
html: ?u32 = 0,
},
added_head_tags: bool,
pub fn onWriteHTML(this: *@This(), bytes: []const u8) void {
this.output.appendSlice(bytes) catch bun.outOfMemory();
}
pub fn onHTMLParseError(_: *@This(), err: []const u8) void {
Output.panic("Parsing HTML during replacement phase errored, which should never happen since the first pass succeeded: {s}", .{err});
}
pub fn onTag(this: *@This(), element: *lol.Element, _: []const u8, url_attribute: []const u8, _: ImportKind) void {
if (this.current_import_record_index >= this.import_records.len) {
Output.panic("Assertion failure in HTMLLoader.onTag: current_import_record_index ({d}) >= import_records.len ({d})", .{ this.current_import_record_index, this.import_records.len });
}
const import_record: *const ImportRecord = &this.import_records[this.current_import_record_index];
this.current_import_record_index += 1;
const unique_key_for_additional_files = if (import_record.source_index.isValid())
this.linker.parse_graph.input_files.items(.unique_key_for_additional_file)[import_record.source_index.get()]
else
"";
const loader: Loader = if (import_record.source_index.isValid())
this.linker.parse_graph.input_files.items(.loader)[import_record.source_index.get()]
else
.file;
if (import_record.is_external_without_side_effects) {
debug("Leaving external import: {s}", .{import_record.path.text});
return;
}
if (this.linker.dev_server != null) {
if (unique_key_for_additional_files.len > 0) {
element.setAttribute(url_attribute, unique_key_for_additional_files) catch bun.outOfMemory();
} else if (import_record.path.is_disabled or loader.isJavaScriptLike() or loader.isCSS()) {
element.remove();
} else {
element.setAttribute(url_attribute, import_record.path.pretty) catch bun.outOfMemory();
}
return;
}
if (import_record.source_index.isInvalid()) {
debug("Leaving import with invalid source index: {s}", .{import_record.path.text});
return;
}
if (loader.isJavaScriptLike() or loader.isCSS()) {
// Remove the original non-external tags
element.remove();
return;
}
if (unique_key_for_additional_files.len > 0) {
// Replace the external href/src with the unique key so that we later will rewrite it to the final URL or pathname
element.setAttribute(url_attribute, unique_key_for_additional_files) catch bun.outOfMemory();
return;
}
}
pub fn onHeadTag(this: *@This(), element: *lol.Element) bool {
element.onEndTag(endHeadTagHandler, this) catch return true;
return false;
}
pub fn onHtmlTag(this: *@This(), element: *lol.Element) bool {
element.onEndTag(endHtmlTagHandler, this) catch return true;
return false;
}
pub fn onBodyTag(this: *@This(), element: *lol.Element) bool {
element.onEndTag(endBodyTagHandler, this) catch return true;
return false;
}
/// This is called for head, body, and html; whichever ends up coming first.
fn addHeadTags(this: *@This(), endTag: *lol.EndTag) !void {
if (this.added_head_tags) return;
this.added_head_tags = true;
var html_appender = std.heap.stackFallback(256, bun.default_allocator);
const allocator = html_appender.get();
const slices = this.getHeadTags(allocator);
defer for (slices.slice()) |slice|
allocator.free(slice);
for (slices.slice()) |slice|
try endTag.before(slice, true);
}
fn getHeadTags(this: *@This(), allocator: std.mem.Allocator) std.BoundedArray([]const u8, 2) {
var array: std.BoundedArray([]const u8, 2) = .{};
// Put CSS before JS to reduce changes of flash of unstyled content
if (this.chunk.getCSSChunkForHTML(this.chunks)) |css_chunk| {
const link_tag = std.fmt.allocPrintZ(allocator, "<link rel=\"stylesheet\" crossorigin href=\"{s}\">", .{css_chunk.unique_key}) catch bun.outOfMemory();
array.appendAssumeCapacity(link_tag);
}
if (this.chunk.getJSChunkForHTML(this.chunks)) |js_chunk| {
// type="module" scripts do not block rendering, so it is okay to put them in head
const script = std.fmt.allocPrintZ(allocator, "<script type=\"module\" crossorigin src=\"{s}\"></script>", .{js_chunk.unique_key}) catch bun.outOfMemory();
array.appendAssumeCapacity(script);
}
return array;
}
fn endHeadTagHandler(end: *lol.EndTag, opaque_this: ?*anyopaque) callconv(.C) lol.Directive {
const this: *@This() = @alignCast(@ptrCast(opaque_this.?));
if (this.linker.dev_server == null) {
this.addHeadTags(end) catch return .stop;
} else {
this.end_tag_indices.head = @intCast(this.output.items.len);
}
return .@"continue";
}
fn endBodyTagHandler(end: *lol.EndTag, opaque_this: ?*anyopaque) callconv(.C) lol.Directive {
const this: *@This() = @alignCast(@ptrCast(opaque_this.?));
if (this.linker.dev_server == null) {
this.addHeadTags(end) catch return .stop;
} else {
this.end_tag_indices.body = @intCast(this.output.items.len);
}
return .@"continue";
}
fn endHtmlTagHandler(end: *lol.EndTag, opaque_this: ?*anyopaque) callconv(.C) lol.Directive {
const this: *@This() = @alignCast(@ptrCast(opaque_this.?));
if (this.linker.dev_server == null) {
this.addHeadTags(end) catch return .stop;
} else {
this.end_tag_indices.html = @intCast(this.output.items.len);
}
return .@"continue";
}
};
// HTML bundles for dev server must be allocated to it, as it must outlive
// the bundle task. See `DevServer.RouteBundle.HTML.bundled_html_text`
const output_allocator = if (c.dev_server) |dev| dev.allocator else worker.allocator;
var html_loader: HTMLLoader = .{
.linker = c,
.source_index = chunk.entry_point.source_index,
.import_records = import_records[chunk.entry_point.source_index].slice(),
.log = c.log,
.allocator = worker.allocator,
.minify_whitespace = c.options.minify_whitespace,
.chunk = chunk,
.chunks = chunks,
.output = std.ArrayList(u8).init(output_allocator),
.current_import_record_index = 0,
.end_tag_indices = .{
.html = null,
.body = null,
.head = null,
},
.added_head_tags = false,
};
HTMLScanner.HTMLProcessor(HTMLLoader, true).run(
&html_loader,
sources[chunk.entry_point.source_index].contents,
) catch bun.outOfMemory();
// There are some cases where invalid HTML will make it so </head> is
// never emitted, even if the literal text DOES appear. These cases are
// along the lines of having a self-closing tag for a non-self closing
// element. In this case, head_end_tag_index will be 0, and a simple
// search through the page is done to find the "</head>"
// See https://github.com/oven-sh/bun/issues/17554
const script_injection_offset: u32 = if (c.dev_server != null) brk: {
if (html_loader.end_tag_indices.head) |head|
break :brk head;
if (bun.strings.indexOf(html_loader.output.items, "</head>")) |head|
break :brk @intCast(head);
if (html_loader.end_tag_indices.body) |body|
break :brk body;
if (html_loader.end_tag_indices.html) |html|
break :brk html;
break :brk @intCast(html_loader.output.items.len); // inject at end of file.
} else brk: {
if (!html_loader.added_head_tags) {
@branchHint(.cold); // this is if the document is missing all head, body, and html elements.
var html_appender = std.heap.stackFallback(256, bun.default_allocator);
const allocator = html_appender.get();
const slices = html_loader.getHeadTags(allocator);
for (slices.slice()) |slice| {
html_loader.output.appendSlice(slice) catch bun.outOfMemory();
allocator.free(slice);
}
}
break :brk if (Environment.isDebug) undefined else 0; // value is ignored. fail loud if hit in debug
};
return .{ .html = .{
.code = html_loader.output.items,
.source_index = chunk.entry_point.source_index,
.script_injection_offset = script_injection_offset,
} };
}
const bun = @import("bun");
const Loader = bun.Loader;
const HTMLScanner = bun.bundle_v2.HTMLScanner;
const Logger = bun.logger;
const Index = bun.bundle_v2.Index;
const LinkerContext = bun.bundle_v2.LinkerContext;
const ThreadPoolLib = bun.ThreadPool;
const debug = LinkerContext.debug;
const Output = bun.Output;
const Environment = bun.Environment;
const strings = bun.strings;
const default_allocator = bun.default_allocator;
const std = @import("std");
const ImportRecord = bun.ImportRecord;
const ImportKind = bun.ImportKind;
const lol = bun.LOLHTML;
const bundler = bun.bundle_v2;
pub const DeferredBatchTask = bun.bundle_v2.DeferredBatchTask;
pub const ThreadPool = bun.bundle_v2.ThreadPool;
pub const ParseTask = bun.bundle_v2.ParseTask;
const Chunk = bundler.Chunk;
const CompileResult = bundler.CompileResult;
const PendingPartRange = LinkerContext.PendingPartRange;

View File

@@ -0,0 +1,95 @@
pub fn generateCompileResultForJSChunk(task: *ThreadPoolLib.Task) void {
const part_range: *const PendingPartRange = @fieldParentPtr("task", task);
const ctx = part_range.ctx;
defer ctx.wg.finish();
var worker = ThreadPool.Worker.get(@fieldParentPtr("linker", ctx.c));
defer worker.unget();
const prev_action = if (Environment.show_crash_trace) bun.crash_handler.current_action;
defer if (Environment.show_crash_trace) {
bun.crash_handler.current_action = prev_action;
};
if (Environment.show_crash_trace) bun.crash_handler.current_action = .{ .bundle_generate_chunk = .{
.chunk = ctx.chunk,
.context = ctx.c,
.part_range = &part_range.part_range,
} };
if (Environment.show_crash_trace) {
const path = ctx.c.parse_graph.input_files.items(.source)[part_range.part_range.source_index.get()].path;
if (bun.CLI.debug_flags.hasPrintBreakpoint(path)) {
@breakpoint();
}
}
ctx.chunk.compile_results_for_chunk[part_range.i] = generateCompileResultForJSChunkImpl(worker, ctx.c, ctx.chunk, part_range.part_range);
}
fn generateCompileResultForJSChunkImpl(worker: *ThreadPool.Worker, c: *LinkerContext, chunk: *Chunk, part_range: PartRange) CompileResult {
const trace = bun.perf.trace("Bundler.generateCodeForFileInChunkJS");
defer trace.end();
// Client bundles for Bake must be globally allocated,
// as it must outlive the bundle task.
const allocator = if (c.dev_server) |dev|
if (c.parse_graph.ast.items(.target)[part_range.source_index.get()].bakeGraph() == .client)
dev.allocator
else
default_allocator
else
default_allocator;
var arena = &worker.temporary_arena;
var buffer_writer = js_printer.BufferWriter.init(allocator);
defer _ = arena.reset(.retain_capacity);
worker.stmt_list.reset();
var runtime_scope: *Scope = &c.graph.ast.items(.module_scope)[c.graph.files.items(.input_file)[Index.runtime.value].get()];
var runtime_members = &runtime_scope.members;
const toCommonJSRef = c.graph.symbols.follow(runtime_members.get("__toCommonJS").?.ref);
const toESMRef = c.graph.symbols.follow(runtime_members.get("__toESM").?.ref);
const runtimeRequireRef = if (c.options.output_format == .cjs) null else c.graph.symbols.follow(runtime_members.get("__require").?.ref);
const result = c.generateCodeForFileInChunkJS(
&buffer_writer,
chunk.renamer,
chunk,
part_range,
toCommonJSRef,
toESMRef,
runtimeRequireRef,
&worker.stmt_list,
worker.allocator,
arena.allocator(),
);
return .{
.javascript = .{
.result = result,
.source_index = part_range.source_index.get(),
},
};
}
const bun = @import("bun");
const Index = bun.bundle_v2.Index;
const js_printer = bun.js_printer;
const LinkerContext = bun.bundle_v2.LinkerContext;
const ThreadPool = bun.bundle_v2.ThreadPool;
const ThreadPoolLib = bun.ThreadPool;
const Environment = bun.Environment;
const default_allocator = bun.default_allocator;
const js_ast = bun.js_ast;
const renamer = bun.renamer;
const Scope = js_ast.Scope;
const bundler = bun.bundle_v2;
pub const DeferredBatchTask = bun.bundle_v2.DeferredBatchTask;
pub const ParseTask = bun.bundle_v2.ParseTask;
const Chunk = bundler.Chunk;
const PartRange = bundler.PartRange;
const CompileResult = bundler.CompileResult;
const PendingPartRange = LinkerContext.PendingPartRange;

View File

@@ -0,0 +1,128 @@
/// This runs after we've already populated the compile results
pub fn postProcessCSSChunk(ctx: GenerateChunkCtx, worker: *ThreadPool.Worker, chunk: *Chunk) !void {
const c = ctx.c;
var j = StringJoiner{
.allocator = worker.allocator,
.watcher = .{
.input = chunk.unique_key,
},
};
var line_offset: bun.sourcemap.LineColumnOffset.Optional = if (c.options.source_maps != .none) .{ .value = .{} } else .{ .null = {} };
var newline_before_comment = false;
// TODO: css banner
// if len(c.options.CSSBanner) > 0 {
// prevOffset.AdvanceString(c.options.CSSBanner)
// j.AddString(c.options.CSSBanner)
// prevOffset.AdvanceString("\n")
// j.AddString("\n")
// }
// TODO: (this is where we would put the imports)
// Generate any prefix rules now
// (THIS SHOULD BE SET WHEN GENERATING PREFIX RULES!)
// newline_before_comment = true;
// TODO: meta
// Concatenate the generated CSS chunks together
const compile_results = chunk.compile_results_for_chunk;
var compile_results_for_source_map: std.MultiArrayList(CompileResultForSourceMap) = .{};
compile_results_for_source_map.setCapacity(worker.allocator, compile_results.len) catch bun.outOfMemory();
const sources: []const Logger.Source = c.parse_graph.input_files.items(.source);
for (compile_results) |compile_result| {
const source_index = compile_result.sourceIndex();
if (c.options.mode == .bundle and !c.options.minify_whitespace and Index.init(source_index).isValid()) {
if (newline_before_comment) {
j.pushStatic("\n");
line_offset.advance("\n");
}
const pretty = sources[source_index].path.pretty;
j.pushStatic("/* ");
line_offset.advance("/* ");
j.pushStatic(pretty);
line_offset.advance(pretty);
j.pushStatic(" */\n");
line_offset.advance(" */\n");
}
if (compile_result.code().len > 0) {
newline_before_comment = true;
}
// Save the offset to the start of the stored JavaScript
j.push(compile_result.code(), bun.default_allocator);
if (compile_result.sourceMapChunk()) |source_map_chunk| {
if (c.options.source_maps != .none) {
try compile_results_for_source_map.append(worker.allocator, CompileResultForSourceMap{
.source_map_chunk = source_map_chunk,
.generated_offset = line_offset.value,
.source_index = compile_result.sourceIndex(),
});
}
line_offset.reset();
} else {
line_offset.advance(compile_result.code());
}
}
// Make sure the file ends with a newline
j.ensureNewlineAtEnd();
// if c.options.UnsupportedCSSFeatures.Has(compat.InlineStyle) {
// slashTag = ""
// }
// c.maybeAppendLegalComments(c.options.LegalComments, legalCommentList, chunk, &j, slashTag)
// if len(c.options.CSSFooter) > 0 {
// j.AddString(c.options.CSSFooter)
// j.AddString("\n")
// }
chunk.intermediate_output = c.breakOutputIntoPieces(
worker.allocator,
&j,
@as(u32, @truncate(ctx.chunks.len)),
) catch bun.outOfMemory();
// TODO: meta contents
chunk.isolated_hash = c.generateIsolatedHash(chunk);
// chunk.is_executable = is_executable;
if (c.options.source_maps != .none) {
const can_have_shifts = chunk.intermediate_output == .pieces;
chunk.output_source_map = try c.generateSourceMapForChunk(
chunk.isolated_hash,
worker,
compile_results_for_source_map,
c.resolver.opts.output_dir,
can_have_shifts,
);
}
}
const bun = @import("bun");
const LinkerContext = bun.bundle_v2.LinkerContext;
const Index = bun.bundle_v2.Index;
const std = @import("std");
const Logger = bun.logger;
const options = bun.options;
const Chunk = bun.bundle_v2.Chunk;
const GenerateChunkCtx = bun.bundle_v2.LinkerContext.GenerateChunkCtx;
const ThreadPool = bun.bundle_v2.ThreadPool;
const StringJoiner = bun.StringJoiner;
const CompileResultForSourceMap = bun.bundle_v2.CompileResultForSourceMap;

View File

@@ -0,0 +1,36 @@
pub fn postProcessHTMLChunk(ctx: GenerateChunkCtx, worker: *ThreadPool.Worker, chunk: *Chunk) !void {
// This is where we split output into pieces
const c = ctx.c;
var j = StringJoiner{
.allocator = worker.allocator,
.watcher = .{
.input = chunk.unique_key,
},
};
const compile_results = chunk.compile_results_for_chunk;
for (compile_results) |compile_result| {
j.push(compile_result.code(), bun.default_allocator);
}
j.ensureNewlineAtEnd();
chunk.intermediate_output = c.breakOutputIntoPieces(
worker.allocator,
&j,
@as(u32, @truncate(ctx.chunks.len)),
) catch bun.outOfMemory();
chunk.isolated_hash = c.generateIsolatedHash(chunk);
}
const bun = @import("bun");
const LinkerContext = bun.bundle_v2.LinkerContext;
const Chunk = bun.bundle_v2.Chunk;
const GenerateChunkCtx = bun.bundle_v2.LinkerContext.GenerateChunkCtx;
const ThreadPool = bun.bundle_v2.ThreadPool;
const StringJoiner = bun.StringJoiner;

View File

@@ -0,0 +1,901 @@
/// This runs after we've already populated the compile results
pub fn postProcessJSChunk(ctx: GenerateChunkCtx, worker: *ThreadPool.Worker, chunk: *Chunk, chunk_index: usize) !void {
const trace = bun.perf.trace("Bundler.postProcessJSChunk");
defer trace.end();
_ = chunk_index;
const c = ctx.c;
bun.assert(chunk.content == .javascript);
js_ast.Expr.Data.Store.create();
js_ast.Stmt.Data.Store.create();
defer chunk.renamer.deinit(bun.default_allocator);
var arena = bun.ArenaAllocator.init(worker.allocator);
defer arena.deinit();
// Also generate the cross-chunk binding code
var cross_chunk_prefix: []u8 = &.{};
var cross_chunk_suffix: []u8 = &.{};
var runtime_scope: *Scope = &c.graph.ast.items(.module_scope)[c.graph.files.items(.input_file)[Index.runtime.value].get()];
var runtime_members = &runtime_scope.members;
const toCommonJSRef = c.graph.symbols.follow(runtime_members.get("__toCommonJS").?.ref);
const toESMRef = c.graph.symbols.follow(runtime_members.get("__toESM").?.ref);
const runtimeRequireRef = if (c.options.output_format == .cjs) null else c.graph.symbols.follow(runtime_members.get("__require").?.ref);
{
const print_options = js_printer.Options{
.bundling = true,
.indent = .{},
.has_run_symbol_renamer = true,
.allocator = worker.allocator,
.require_ref = runtimeRequireRef,
.minify_whitespace = c.options.minify_whitespace,
.minify_identifiers = c.options.minify_identifiers,
.minify_syntax = c.options.minify_syntax,
.target = c.options.target,
.print_dce_annotations = c.options.emit_dce_annotations,
.mangled_props = &c.mangled_props,
// .const_values = c.graph.const_values,
};
var cross_chunk_import_records = ImportRecord.List.initCapacity(worker.allocator, chunk.cross_chunk_imports.len) catch unreachable;
defer cross_chunk_import_records.deinitWithAllocator(worker.allocator);
for (chunk.cross_chunk_imports.slice()) |import_record| {
cross_chunk_import_records.appendAssumeCapacity(
.{
.kind = import_record.import_kind,
.path = Fs.Path.init(ctx.chunks[import_record.chunk_index].unique_key),
.range = Logger.Range.None,
},
);
}
const ast = c.graph.ast.get(chunk.entry_point.source_index);
cross_chunk_prefix = js_printer.print(
worker.allocator,
c.resolver.opts.target,
ast.toAST(),
c.getSource(chunk.entry_point.source_index),
print_options,
cross_chunk_import_records.slice(),
&[_]Part{
.{ .stmts = chunk.content.javascript.cross_chunk_prefix_stmts.slice() },
},
chunk.renamer,
false,
).result.code;
cross_chunk_suffix = js_printer.print(
worker.allocator,
c.resolver.opts.target,
ast.toAST(),
c.getSource(chunk.entry_point.source_index),
print_options,
&.{},
&[_]Part{
.{ .stmts = chunk.content.javascript.cross_chunk_suffix_stmts.slice() },
},
chunk.renamer,
false,
).result.code;
}
// Generate the exports for the entry point, if there are any
const entry_point_tail = brk: {
if (chunk.isEntryPoint()) {
break :brk generateEntryPointTailJS(
c,
toCommonJSRef,
toESMRef,
chunk.entry_point.source_index,
worker.allocator,
arena.allocator(),
chunk.renamer,
);
}
break :brk CompileResult.empty;
};
var j = StringJoiner{
.allocator = worker.allocator,
.watcher = .{
.input = chunk.unique_key,
},
};
const output_format = c.options.output_format;
var line_offset: bun.sourcemap.LineColumnOffset.Optional = if (c.options.source_maps != .none) .{ .value = .{} } else .{ .null = {} };
// Concatenate the generated JavaScript chunks together
var newline_before_comment = false;
var is_executable = false;
// Start with the hashbang if there is one. This must be done before the
// banner because it only works if it's literally the first character.
if (chunk.isEntryPoint()) {
const is_bun = ctx.c.graph.ast.items(.target)[chunk.entry_point.source_index].isBun();
const hashbang = c.graph.ast.items(.hashbang)[chunk.entry_point.source_index];
if (hashbang.len > 0) {
j.pushStatic(hashbang);
j.pushStatic("\n");
line_offset.advance(hashbang);
line_offset.advance("\n");
newline_before_comment = true;
is_executable = true;
}
if (is_bun) {
const cjs_entry_chunk = "(function(exports, require, module, __filename, __dirname) {";
if (ctx.c.options.generate_bytecode_cache and output_format == .cjs) {
const input = "// @bun @bytecode @bun-cjs\n" ++ cjs_entry_chunk;
j.pushStatic(input);
line_offset.advance(input);
} else if (ctx.c.options.generate_bytecode_cache) {
j.pushStatic("// @bun @bytecode\n");
line_offset.advance("// @bun @bytecode\n");
} else if (output_format == .cjs) {
j.pushStatic("// @bun @bun-cjs\n" ++ cjs_entry_chunk);
line_offset.advance("// @bun @bun-cjs\n" ++ cjs_entry_chunk);
} else {
j.pushStatic("// @bun\n");
line_offset.advance("// @bun\n");
}
}
}
if (c.options.banner.len > 0) {
if (newline_before_comment) {
j.pushStatic("\n");
line_offset.advance("\n");
}
j.pushStatic(ctx.c.options.banner);
line_offset.advance(ctx.c.options.banner);
j.pushStatic("\n");
line_offset.advance("\n");
}
// Add the top-level directive if present (but omit "use strict" in ES
// modules because all ES modules are automatically in strict mode)
if (chunk.isEntryPoint() and !output_format.isAlwaysStrictMode()) {
const flags: JSAst.Flags = c.graph.ast.items(.flags)[chunk.entry_point.source_index];
if (flags.has_explicit_use_strict_directive) {
j.pushStatic("\"use strict\";\n");
line_offset.advance("\"use strict\";\n");
newline_before_comment = true;
}
}
// For Kit, hoist runtime.js outside of the IIFE
const compile_results = chunk.compile_results_for_chunk;
if (c.options.output_format == .internal_bake_dev) {
for (compile_results) |compile_result| {
const source_index = compile_result.sourceIndex();
if (source_index != Index.runtime.value) break;
line_offset.advance(compile_result.code());
j.push(compile_result.code(), bun.default_allocator);
}
}
switch (c.options.output_format) {
.internal_bake_dev => {
const start = bun.bake.getHmrRuntime(if (c.options.target.isServerSide()) .server else .client);
j.pushStatic(start.code);
line_offset.advance(start.code);
},
.iife => {
// Bun does not do arrow function lowering. So the wrapper can be an arrow.
const start = if (c.options.minify_whitespace) "(()=>{" else "(() => {\n";
j.pushStatic(start);
line_offset.advance(start);
},
else => {}, // no wrapper
}
if (cross_chunk_prefix.len > 0) {
newline_before_comment = true;
line_offset.advance(cross_chunk_prefix);
j.push(cross_chunk_prefix, bun.default_allocator);
}
// Concatenate the generated JavaScript chunks together
var prev_filename_comment: Index.Int = 0;
var compile_results_for_source_map: std.MultiArrayList(CompileResultForSourceMap) = .{};
compile_results_for_source_map.setCapacity(worker.allocator, compile_results.len) catch bun.outOfMemory();
const show_comments = c.options.mode == .bundle and
!c.options.minify_whitespace;
const emit_targets_in_commands = show_comments and (if (ctx.c.framework) |fw| fw.server_components != null else false);
const sources: []const Logger.Source = c.parse_graph.input_files.items(.source);
const targets: []const options.Target = c.parse_graph.ast.items(.target);
for (compile_results) |compile_result| {
const source_index = compile_result.sourceIndex();
const is_runtime = source_index == Index.runtime.value;
// TODO: extracated legal comments
// Add a comment with the file path before the file contents
if (show_comments and source_index != prev_filename_comment and compile_result.code().len > 0) {
prev_filename_comment = source_index;
if (newline_before_comment) {
j.pushStatic("\n");
line_offset.advance("\n");
}
// Make sure newlines in the path can't cause a syntax error.
const CommentType = enum {
multiline,
single,
};
const pretty = sources[source_index].path.pretty;
// TODO: quote this. This is really janky.
const comment_type = if (strings.indexOfNewlineOrNonASCII(pretty, 0) != null)
CommentType.multiline
else
CommentType.single;
if (!c.options.minify_whitespace and
(output_format == .iife or output_format == .internal_bake_dev))
{
j.pushStatic(" ");
line_offset.advance(" ");
}
switch (comment_type) {
.multiline => {
j.pushStatic("/* ");
line_offset.advance("/* ");
},
.single => {
j.pushStatic("// ");
line_offset.advance("// ");
},
}
j.pushStatic(pretty);
line_offset.advance(pretty);
if (emit_targets_in_commands) {
j.pushStatic(" (");
line_offset.advance(" (");
const target = @tagName(targets[source_index].bakeGraph());
j.pushStatic(target);
line_offset.advance(target);
j.pushStatic(")");
line_offset.advance(")");
}
switch (comment_type) {
.multiline => {
j.pushStatic(" */\n");
line_offset.advance(" */\n");
},
.single => {
j.pushStatic("\n");
line_offset.advance("\n");
},
}
}
if (is_runtime) {
if (c.options.output_format != .internal_bake_dev) {
line_offset.advance(compile_result.code());
j.push(compile_result.code(), bun.default_allocator);
}
} else {
j.push(compile_result.code(), bun.default_allocator);
if (compile_result.sourceMapChunk()) |source_map_chunk| {
if (c.options.source_maps != .none) {
try compile_results_for_source_map.append(worker.allocator, CompileResultForSourceMap{
.source_map_chunk = source_map_chunk,
.generated_offset = line_offset.value,
.source_index = compile_result.sourceIndex(),
});
}
line_offset.reset();
} else {
line_offset.advance(compile_result.code());
}
}
// TODO: metafile
newline_before_comment = compile_result.code().len > 0;
}
const tail_code = entry_point_tail.code();
if (tail_code.len > 0) {
// Stick the entry point tail at the end of the file. Deliberately don't
// include any source mapping information for this because it's automatically
// generated and doesn't correspond to a location in the input file.
j.push(tail_code, bun.default_allocator);
}
// Put the cross-chunk suffix inside the IIFE
if (cross_chunk_suffix.len > 0) {
if (newline_before_comment) {
j.pushStatic("\n");
}
j.push(cross_chunk_suffix, bun.default_allocator);
}
switch (output_format) {
.iife => {
const without_newline = "})();";
const with_newline = if (newline_before_comment)
without_newline ++ "\n"
else
without_newline;
j.pushStatic(with_newline);
},
.internal_bake_dev => {
{
const str = "}, {\n main: ";
j.pushStatic(str);
line_offset.advance(str);
}
{
const input = c.parse_graph.input_files.items(.source)[chunk.entry_point.source_index].path;
var buf = MutableString.initEmpty(worker.allocator);
js_printer.quoteForJSONBuffer(input.pretty, &buf, true) catch bun.outOfMemory();
const str = buf.slice(); // worker.allocator is an arena
j.pushStatic(str);
line_offset.advance(str);
}
// {
// const str = "\n react_refresh: ";
// j.pushStatic(str);
// line_offset.advance(str);
// }
{
const str = "\n});";
j.pushStatic(str);
line_offset.advance(str);
}
},
.cjs => {
if (chunk.isEntryPoint()) {
const is_bun = ctx.c.graph.ast.items(.target)[chunk.entry_point.source_index].isBun();
if (is_bun) {
j.pushStatic("})\n");
line_offset.advance("})\n");
}
}
},
else => {},
}
j.ensureNewlineAtEnd();
// TODO: maybeAppendLegalComments
if (c.options.footer.len > 0) {
if (newline_before_comment) {
j.pushStatic("\n");
line_offset.advance("\n");
}
j.pushStatic(ctx.c.options.footer);
line_offset.advance(ctx.c.options.footer);
j.pushStatic("\n");
line_offset.advance("\n");
}
chunk.intermediate_output = c.breakOutputIntoPieces(
worker.allocator,
&j,
@as(u32, @truncate(ctx.chunks.len)),
) catch @panic("Unhandled out of memory error in breakOutputIntoPieces()");
// TODO: meta contents
chunk.isolated_hash = c.generateIsolatedHash(chunk);
chunk.is_executable = is_executable;
if (c.options.source_maps != .none) {
const can_have_shifts = chunk.intermediate_output == .pieces;
chunk.output_source_map = try c.generateSourceMapForChunk(
chunk.isolated_hash,
worker,
compile_results_for_source_map,
c.resolver.opts.output_dir,
can_have_shifts,
);
}
}
pub fn generateEntryPointTailJS(
c: *LinkerContext,
toCommonJSRef: Ref,
toESMRef: Ref,
source_index: Index.Int,
allocator: std.mem.Allocator,
temp_allocator: std.mem.Allocator,
r: renamer.Renamer,
) CompileResult {
const flags: JSMeta.Flags = c.graph.meta.items(.flags)[source_index];
var stmts = std.ArrayList(Stmt).init(temp_allocator);
defer stmts.deinit();
const ast: JSAst = c.graph.ast.get(source_index);
switch (c.options.output_format) {
.esm => {
switch (flags.wrap) {
.cjs => {
stmts.append(
Stmt.alloc(
// "export default require_foo();"
S.ExportDefault,
.{
.default_name = .{
.loc = Logger.Loc.Empty,
.ref = ast.wrapper_ref,
},
.value = .{
.expr = Expr.init(
E.Call,
E.Call{
.target = Expr.initIdentifier(
ast.wrapper_ref,
Logger.Loc.Empty,
),
},
Logger.Loc.Empty,
),
},
},
Logger.Loc.Empty,
),
) catch unreachable;
},
else => {
if (flags.wrap == .esm and ast.wrapper_ref.isValid()) {
if (flags.is_async_or_has_async_dependency) {
// "await init_foo();"
stmts.append(
Stmt.alloc(
S.SExpr,
.{
.value = Expr.init(
E.Await,
E.Await{
.value = Expr.init(
E.Call,
E.Call{
.target = Expr.initIdentifier(
ast.wrapper_ref,
Logger.Loc.Empty,
),
},
Logger.Loc.Empty,
),
},
Logger.Loc.Empty,
),
},
Logger.Loc.Empty,
),
) catch unreachable;
} else {
// "init_foo();"
stmts.append(
Stmt.alloc(
S.SExpr,
.{
.value = Expr.init(
E.Call,
E.Call{
.target = Expr.initIdentifier(
ast.wrapper_ref,
Logger.Loc.Empty,
),
},
Logger.Loc.Empty,
),
},
Logger.Loc.Empty,
),
) catch unreachable;
}
}
const sorted_and_filtered_export_aliases = c.graph.meta.items(.sorted_and_filtered_export_aliases)[source_index];
if (sorted_and_filtered_export_aliases.len > 0) {
const resolved_exports: ResolvedExports = c.graph.meta.items(.resolved_exports)[source_index];
const imports_to_bind: RefImportData = c.graph.meta.items(.imports_to_bind)[source_index];
// If the output format is ES6 modules and we're an entry point, generate an
// ES6 export statement containing all exports. Except don't do that if this
// entry point is a CommonJS-style module, since that would generate an ES6
// export statement that's not top-level. Instead, we will export the CommonJS
// exports as a default export later on.
var items = std.ArrayList(js_ast.ClauseItem).init(temp_allocator);
const cjs_export_copies = c.graph.meta.items(.cjs_export_copies)[source_index];
var had_default_export = false;
for (sorted_and_filtered_export_aliases, 0..) |alias, i| {
var resolved_export = resolved_exports.get(alias).?;
had_default_export = had_default_export or strings.eqlComptime(alias, "default");
// If this is an export of an import, reference the symbol that the import
// was eventually resolved to. We need to do this because imports have
// already been resolved by this point, so we can't generate a new import
// and have that be resolved later.
if (imports_to_bind.get(resolved_export.data.import_ref)) |import_data| {
resolved_export.data.import_ref = import_data.data.import_ref;
resolved_export.data.source_index = import_data.data.source_index;
}
// Exports of imports need EImportIdentifier in case they need to be re-
// written to a property access later on
if (c.graph.symbols.get(resolved_export.data.import_ref).?.namespace_alias != null) {
const temp_ref = cjs_export_copies[i];
// Create both a local variable and an export clause for that variable.
// The local variable is initialized with the initial value of the
// export. This isn't fully correct because it's a "dead" binding and
// doesn't update with the "live" value as it changes. But ES6 modules
// don't have any syntax for bare named getter functions so this is the
// best we can do.
//
// These input files:
//
// // entry_point.js
// export {foo} from './cjs-format.js'
//
// // cjs-format.js
// Object.defineProperty(exports, 'foo', {
// enumerable: true,
// get: () => Math.random(),
// })
//
// Become this output file:
//
// // cjs-format.js
// var require_cjs_format = __commonJS((exports) => {
// Object.defineProperty(exports, "foo", {
// enumerable: true,
// get: () => Math.random()
// });
// });
//
// // entry_point.js
// var cjs_format = __toESM(require_cjs_format());
// var export_foo = cjs_format.foo;
// export {
// export_foo as foo
// };
//
stmts.append(
Stmt.alloc(
S.Local,
.{
.decls = js_ast.G.Decl.List.fromSlice(
temp_allocator,
&.{
.{
.binding = Binding.alloc(
temp_allocator,
B.Identifier{
.ref = temp_ref,
},
Logger.Loc.Empty,
),
.value = Expr.init(
E.ImportIdentifier,
E.ImportIdentifier{
.ref = resolved_export.data.import_ref,
},
Logger.Loc.Empty,
),
},
},
) catch unreachable,
},
Logger.Loc.Empty,
),
) catch unreachable;
items.append(
.{
.name = js_ast.LocRef{
.ref = temp_ref,
.loc = Logger.Loc.Empty,
},
.alias = alias,
.alias_loc = Logger.Loc.Empty,
},
) catch unreachable;
} else {
// Local identifiers can be exported using an export clause. This is done
// this way instead of leaving the "export" keyword on the local declaration
// itself both because it lets the local identifier be minified and because
// it works transparently for re-exports across files.
//
// These input files:
//
// // entry_point.js
// export * from './esm-format.js'
//
// // esm-format.js
// export let foo = 123
//
// Become this output file:
//
// // esm-format.js
// let foo = 123;
//
// // entry_point.js
// export {
// foo
// };
//
items.append(.{
.name = js_ast.LocRef{
.ref = resolved_export.data.import_ref,
.loc = resolved_export.data.name_loc,
},
.alias = alias,
.alias_loc = resolved_export.data.name_loc,
}) catch unreachable;
}
}
stmts.append(
Stmt.alloc(
S.ExportClause,
.{
.items = items.items,
.is_single_line = false,
},
Logger.Loc.Empty,
),
) catch unreachable;
if (flags.needs_synthetic_default_export and !had_default_export) {
var properties = G.Property.List.initCapacity(allocator, items.items.len) catch unreachable;
const getter_fn_body = allocator.alloc(Stmt, items.items.len) catch unreachable;
var remain_getter_fn_body = getter_fn_body;
for (items.items) |export_item| {
var fn_body = remain_getter_fn_body[0..1];
remain_getter_fn_body = remain_getter_fn_body[1..];
fn_body[0] = Stmt.alloc(
S.Return,
S.Return{
.value = Expr.init(
E.Identifier,
E.Identifier{
.ref = export_item.name.ref.?,
},
export_item.name.loc,
),
},
Logger.Loc.Empty,
);
properties.appendAssumeCapacity(
G.Property{
.key = Expr.init(
E.String,
E.String{
.data = export_item.alias,
.is_utf16 = false,
},
export_item.alias_loc,
),
.value = Expr.init(
E.Function,
E.Function{
.func = G.Fn{
.body = G.FnBody{
.loc = Logger.Loc.Empty,
.stmts = fn_body,
},
},
},
export_item.alias_loc,
),
.kind = G.Property.Kind.get,
.flags = js_ast.Flags.Property.init(.{
.is_method = true,
}),
},
);
}
stmts.append(
Stmt.alloc(
S.ExportDefault,
S.ExportDefault{
.default_name = .{
.ref = Ref.None,
.loc = Logger.Loc.Empty,
},
.value = .{
.expr = Expr.init(
E.Object,
E.Object{
.properties = properties,
},
Logger.Loc.Empty,
),
},
},
Logger.Loc.Empty,
),
) catch unreachable;
}
}
},
}
},
// TODO: iife
.iife => {},
.internal_bake_dev => {
// nothing needs to be done here, as the exports are already
// forwarded in the module closure.
},
.cjs => {
switch (flags.wrap) {
.cjs => {
// "module.exports = require_foo();"
stmts.append(
Stmt.assign(
Expr.init(
E.Dot,
.{
.target = Expr.initIdentifier(c.unbound_module_ref, Logger.Loc.Empty),
.name = "exports",
.name_loc = Logger.Loc.Empty,
},
Logger.Loc.Empty,
),
Expr.init(
E.Call,
.{
.target = Expr.initIdentifier(ast.wrapper_ref, Logger.Loc.Empty),
},
Logger.Loc.Empty,
),
),
) catch unreachable;
},
.esm => {
// "init_foo();"
stmts.append(
Stmt.alloc(
S.SExpr,
.{
.value = Expr.init(
E.Call,
.{
.target = Expr.initIdentifier(ast.wrapper_ref, Logger.Loc.Empty),
},
Logger.Loc.Empty,
),
},
Logger.Loc.Empty,
),
) catch unreachable;
},
else => {},
}
// TODO:
// If we are generating CommonJS for node, encode the known export names in
// a form that node can understand them. This relies on the specific behavior
// of this parser, which the node project uses to detect named exports in
// CommonJS files: https://github.com/guybedford/cjs-module-lexer. Think of
// this code as an annotation for that parser.
},
}
if (stmts.items.len == 0) {
return .{
.javascript = .{
.source_index = source_index,
.result = .{ .result = .{
.code = "",
} },
},
};
}
const print_options = js_printer.Options{
// TODO: IIFE indent
.indent = .{},
.has_run_symbol_renamer = true,
.allocator = allocator,
.to_esm_ref = toESMRef,
.to_commonjs_ref = toCommonJSRef,
.require_or_import_meta_for_source_callback = js_printer.RequireOrImportMeta.Callback.init(LinkerContext, LinkerContext.requireOrImportMetaForSource, c),
.minify_whitespace = c.options.minify_whitespace,
.print_dce_annotations = c.options.emit_dce_annotations,
.minify_syntax = c.options.minify_syntax,
.mangled_props = &c.mangled_props,
// .const_values = c.graph.const_values,
};
return .{
.javascript = .{
.result = js_printer.print(
allocator,
c.resolver.opts.target,
ast.toAST(),
c.getSource(source_index),
print_options,
ast.import_records.slice(),
&[_]Part{
.{
.stmts = stmts.items,
},
},
r,
false,
),
.source_index = source_index,
},
};
}
const bun = @import("bun");
const strings = bun.strings;
const LinkerContext = bun.bundle_v2.LinkerContext;
const Index = bun.bundle_v2.Index;
const ImportRecord = bun.ImportRecord;
const Part = bun.bundle_v2.Part;
const std = @import("std");
const JSMeta = bun.bundle_v2.JSMeta;
const JSAst = bun.bundle_v2.JSAst;
const js_ast = bun.bundle_v2.js_ast;
const Ref = bun.bundle_v2.js_ast.Ref;
const ResolvedExports = bun.bundle_v2.ResolvedExports;
const Logger = bun.logger;
const RefImportData = bun.bundle_v2.RefImportData;
const options = bun.options;
const js_printer = bun.bundle_v2.js_printer;
const renamer = bun.bundle_v2.renamer;
const Chunk = bun.bundle_v2.Chunk;
const Stmt = js_ast.Stmt;
const Expr = js_ast.Expr;
const E = js_ast.E;
const S = js_ast.S;
const G = js_ast.G;
const B = js_ast.B;
const Binding = js_ast.Binding;
const GenerateChunkCtx = bun.bundle_v2.LinkerContext.GenerateChunkCtx;
const ThreadPool = bun.bundle_v2.ThreadPool;
const Scope = js_ast.Scope;
const Fs = bun.bundle_v2.Fs;
const CompileResult = bun.bundle_v2.CompileResult;
const StringJoiner = bun.StringJoiner;
const CompileResultForSourceMap = bun.bundle_v2.CompileResultForSourceMap;
const MutableString = bun.MutableString;

View File

@@ -0,0 +1,288 @@
pub const PrepareCssAstTask = struct {
task: ThreadPoolLib.Task,
chunk: *Chunk,
linker: *LinkerContext,
wg: *sync.WaitGroup,
};
pub fn prepareCssAstsForChunk(task: *ThreadPoolLib.Task) void {
const prepare_css_asts: *const PrepareCssAstTask = @fieldParentPtr("task", task);
defer prepare_css_asts.wg.finish();
var worker = ThreadPool.Worker.get(@fieldParentPtr("linker", prepare_css_asts.linker));
defer worker.unget();
prepareCssAstsForChunkImpl(prepare_css_asts.linker, prepare_css_asts.chunk, worker.allocator);
}
fn prepareCssAstsForChunkImpl(c: *LinkerContext, chunk: *Chunk, allocator: std.mem.Allocator) void {
const asts: []const ?*bun.css.BundlerStyleSheet = c.graph.ast.items(.css);
// Prepare CSS asts
// Remove duplicate rules across files. This must be done in serial, not
// in parallel, and must be done from the last rule to the first rule.
{
var i: usize = chunk.content.css.imports_in_chunk_in_order.len;
while (i != 0) {
i -= 1;
const entry = chunk.content.css.imports_in_chunk_in_order.mut(i);
switch (entry.kind) {
.layers => |layers| {
const len = layers.inner().len;
var rules = bun.css.BundlerCssRuleList{};
if (len > 0) {
rules.v.append(allocator, bun.css.BundlerCssRule{
.layer_statement = bun.css.LayerStatementRule{
.names = bun.css.SmallList(bun.css.LayerName, 1).fromBabyListNoDeinit(layers.inner().*),
.loc = bun.css.Location.dummy(),
},
}) catch bun.outOfMemory();
}
var ast = bun.css.BundlerStyleSheet{
.rules = rules,
.sources = .{},
.source_map_urls = .{},
.license_comments = .{},
.options = bun.css.ParserOptions.default(allocator, null),
.composes = .{},
};
wrapRulesWithConditions(&ast, allocator, &entry.conditions);
chunk.content.css.asts[i] = ast;
},
.external_path => |*p| {
var conditions: ?*bun.css.ImportConditions = null;
if (entry.conditions.len > 0) {
conditions = entry.conditions.mut(0);
entry.condition_import_records.push(
allocator,
bun.ImportRecord{ .kind = .at, .path = p.*, .range = Logger.Range{} },
) catch bun.outOfMemory();
// Handling a chain of nested conditions is complicated. We can't
// necessarily join them together because a) there may be multiple
// layer names and b) layer names are only supposed to be inserted
// into the layer order if the parent conditions are applied.
//
// Instead we handle them by preserving the "@import" nesting using
// imports of data URL stylesheets. This may seem strange but I think
// this is the only way to do this in CSS.
var j: usize = entry.conditions.len;
while (j != 1) {
j -= 1;
const ast_import = bun.css.BundlerStyleSheet{
.options = bun.css.ParserOptions.default(allocator, null),
.license_comments = .{},
.sources = .{},
.source_map_urls = .{},
.rules = rules: {
var rules = bun.css.BundlerCssRuleList{};
var import_rule = bun.css.ImportRule{
.url = p.pretty,
.import_record_idx = entry.condition_import_records.len,
.loc = bun.css.Location.dummy(),
};
import_rule.conditionsMut().* = entry.conditions.at(j).*;
rules.v.append(allocator, bun.css.BundlerCssRule{
.import = import_rule,
}) catch bun.outOfMemory();
break :rules rules;
},
.composes = .{},
};
const printer_options = bun.css.PrinterOptions{
.targets = bun.css.Targets.forBundlerTarget(c.options.target),
// TODO: make this more configurable
.minify = c.options.minify_whitespace or c.options.minify_syntax or c.options.minify_identifiers,
};
const print_result = switch (ast_import.toCss(
allocator,
printer_options,
.{
.import_records = &entry.condition_import_records,
.ast_urls_for_css = c.parse_graph.ast.items(.url_for_css),
.ast_unique_key_for_additional_file = c.parse_graph.input_files.items(.unique_key_for_additional_file),
},
&c.mangled_props,
&c.graph.symbols,
)) {
.result => |v| v,
.err => |e| {
c.log.addErrorFmt(null, Loc.Empty, c.allocator, "Error generating CSS for import: {}", .{e}) catch bun.outOfMemory();
continue;
},
};
p.* = bun.fs.Path.init(DataURL.encodeStringAsShortestDataURL(allocator, "text/css", std.mem.trim(u8, print_result.code, " \n\r\t")));
}
}
var empty_conditions = bun.css.ImportConditions{};
const actual_conditions = if (conditions) |cc| cc else &empty_conditions;
entry.condition_import_records.push(allocator, bun.ImportRecord{
.kind = .at,
.path = p.*,
.range = Logger.Range.none,
}) catch bun.outOfMemory();
chunk.content.css.asts[i] = bun.css.BundlerStyleSheet{
.rules = rules: {
var rules = bun.css.BundlerCssRuleList{};
var import_rule = bun.css.ImportRule.fromUrlAndImportRecordIdx(p.pretty, entry.condition_import_records.len);
import_rule.conditionsMut().* = actual_conditions.*;
rules.v.append(allocator, bun.css.BundlerCssRule{
.import = import_rule,
}) catch bun.outOfMemory();
break :rules rules;
},
.sources = .{},
.source_map_urls = .{},
.license_comments = .{},
.options = bun.css.ParserOptions.default(allocator, null),
.composes = .{},
};
},
.source_index => |source_index| {
// Multiple imports may refer to the same file/AST, but they
// may wrap or modify the AST in different ways. So we need
// to make a shallow copy and be careful not to modify shared
// references.
var ast = ast: {
const original_stylesheet = asts[source_index.get()].?;
chunk.content.css.asts[i] = original_stylesheet.*;
break :ast &chunk.content.css.asts[i];
};
filter: {
// Filter out "@charset", "@import", and leading "@layer" rules
// TODO: we are doing simple version rn, only @import
for (ast.rules.v.items, 0..) |*rule, ruleidx| {
// if ((rule.* == .import and import_records[source_index.get()].at(rule.import.import_record_idx).is_internal) or rule.* == .ignored) {} else {
if (rule.* == .import or rule.* == .ignored) {} else {
// It's okay to do this because AST is allocated into arena
const reslice = ast.rules.v.items[ruleidx..];
ast.rules.v = .{
.items = reslice,
.capacity = ast.rules.v.capacity - (ast.rules.v.items.len - reslice.len),
};
break :filter;
}
}
ast.rules.v.items.len = 0;
}
wrapRulesWithConditions(ast, allocator, &entry.conditions);
// TODO: Remove top-level duplicate rules across files
},
}
}
}
}
fn wrapRulesWithConditions(
ast: *bun.css.BundlerStyleSheet,
temp_allocator: std.mem.Allocator,
conditions: *const BabyList(bun.css.ImportConditions),
) void {
var dummy_import_records = bun.BabyList(bun.ImportRecord){};
defer bun.debugAssert(dummy_import_records.len == 0);
var i: usize = conditions.len;
while (i > 0) {
i -= 1;
const item = conditions.at(i);
// Generate "@layer" wrappers. Note that empty "@layer" rules still have
// a side effect (they set the layer order) so they cannot be removed.
if (item.layer) |l| {
const layer = l.v;
var do_block_rule = true;
if (ast.rules.v.items.len == 0) {
if (l.v == null) {
// Omit an empty "@layer {}" entirely
continue;
} else {
// Generate "@layer foo;" instead of "@layer foo {}"
ast.rules.v = .{};
do_block_rule = false;
}
}
ast.rules = brk: {
var new_rules = bun.css.BundlerCssRuleList{};
new_rules.v.append(
temp_allocator,
if (do_block_rule) .{ .layer_block = bun.css.BundlerLayerBlockRule{
.name = layer,
.rules = ast.rules,
.loc = bun.css.Location.dummy(),
} } else .{
.layer_statement = .{
.names = if (layer) |ly| bun.css.SmallList(bun.css.LayerName, 1).withOne(ly) else .{},
.loc = bun.css.Location.dummy(),
},
},
) catch bun.outOfMemory();
break :brk new_rules;
};
}
// Generate "@supports" wrappers. This is not done if the rule block is
// empty because empty "@supports" rules have no effect.
if (ast.rules.v.items.len > 0) {
if (item.supports) |*supports| {
ast.rules = brk: {
var new_rules = bun.css.BundlerCssRuleList{};
new_rules.v.append(temp_allocator, .{
.supports = bun.css.BundlerSupportsRule{
.condition = supports.cloneWithImportRecords(
temp_allocator,
&dummy_import_records,
),
.rules = ast.rules,
.loc = bun.css.Location.dummy(),
},
}) catch bun.outOfMemory();
break :brk new_rules;
};
}
}
// Generate "@media" wrappers. This is not done if the rule block is
// empty because empty "@media" rules have no effect.
if (ast.rules.v.items.len > 0 and item.media.media_queries.items.len > 0) {
ast.rules = brk: {
var new_rules = bun.css.BundlerCssRuleList{};
new_rules.v.append(temp_allocator, .{
.media = bun.css.BundlerMediaRule{
.query = item.media.cloneWithImportRecords(temp_allocator, &dummy_import_records),
.rules = ast.rules,
.loc = bun.css.Location.dummy(),
},
}) catch bun.outOfMemory();
break :brk new_rules;
};
}
}
}
const bun = @import("bun");
const BabyList = bun.BabyList;
const DataURL = bun.bundle_v2.DataURL;
const Logger = bun.logger;
const Loc = Logger.Loc;
const LinkerContext = bun.bundle_v2.LinkerContext;
const ThreadPoolLib = bun.ThreadPool;
const std = @import("std");
const sync = bun.ThreadPool;
const ImportRecord = bun.ImportRecord;
const bundler = bun.bundle_v2;
pub const DeferredBatchTask = bun.bundle_v2.DeferredBatchTask;
pub const ThreadPool = bun.bundle_v2.ThreadPool;
pub const ParseTask = bun.bundle_v2.ParseTask;
const Chunk = bundler.Chunk;

View File

@@ -0,0 +1,274 @@
/// TODO: investigate if we need to parallelize this function
/// esbuild does parallelize it.
pub fn renameSymbolsInChunk(
c: *LinkerContext,
allocator: std.mem.Allocator,
chunk: *Chunk,
files_in_order: []const u32,
) !renamer.Renamer {
const trace = bun.perf.trace("Bundler.renameSymbolsInChunk");
defer trace.end();
const all_module_scopes = c.graph.ast.items(.module_scope);
const all_flags: []const JSMeta.Flags = c.graph.meta.items(.flags);
const all_parts: []const Part.List = c.graph.ast.items(.parts);
const all_wrapper_refs: []const Ref = c.graph.ast.items(.wrapper_ref);
const all_import_records: []const ImportRecord.List = c.graph.ast.items(.import_records);
var reserved_names = try renamer.computeInitialReservedNames(allocator, c.options.output_format);
for (files_in_order) |source_index| {
renamer.computeReservedNamesForScope(&all_module_scopes[source_index], &c.graph.symbols, &reserved_names, allocator);
}
var sorted_imports_from_other_chunks: std.ArrayList(StableRef) = brk: {
var list = std.ArrayList(StableRef).init(allocator);
var count: u32 = 0;
const imports_from_other_chunks = chunk.content.javascript.imports_from_other_chunks.values();
for (imports_from_other_chunks) |item| {
count += item.len;
}
list.ensureTotalCapacityPrecise(count) catch unreachable;
list.items.len = count;
var remain = list.items;
const stable_source_indices = c.graph.stable_source_indices;
for (imports_from_other_chunks) |item| {
for (item.slice()) |ref| {
remain[0] = StableRef{
.stable_source_index = stable_source_indices[ref.ref.sourceIndex()],
.ref = ref.ref,
};
remain = remain[1..];
}
}
std.sort.pdq(StableRef, list.items, {}, StableRef.isLessThan);
break :brk list;
};
defer sorted_imports_from_other_chunks.deinit();
if (c.options.minify_identifiers) {
const first_top_level_slots: js_ast.SlotCounts = brk: {
var slots = js_ast.SlotCounts{};
const nested_scope_slot_counts = c.graph.ast.items(.nested_scope_slot_counts);
for (files_in_order) |i| {
slots.unionMax(nested_scope_slot_counts[i]);
}
break :brk slots;
};
var minify_renamer = try MinifyRenamer.init(allocator, c.graph.symbols, first_top_level_slots, reserved_names);
var top_level_symbols = renamer.StableSymbolCount.Array.init(allocator);
defer top_level_symbols.deinit();
var top_level_symbols_all = renamer.StableSymbolCount.Array.init(allocator);
const stable_source_indices = c.graph.stable_source_indices;
var freq = js_ast.CharFreq{
.freqs = [_]i32{0} ** 64,
};
const ast_flags_list = c.graph.ast.items(.flags);
var capacity = sorted_imports_from_other_chunks.items.len;
{
const char_freqs = c.graph.ast.items(.char_freq);
for (files_in_order) |source_index| {
if (ast_flags_list[source_index].has_char_freq) {
freq.include(char_freqs[source_index]);
}
}
}
const exports_ref_list = c.graph.ast.items(.exports_ref);
const module_ref_list = c.graph.ast.items(.module_ref);
const parts_list = c.graph.ast.items(.parts);
for (files_in_order) |source_index| {
const ast_flags = ast_flags_list[source_index];
const uses_exports_ref = ast_flags.uses_exports_ref;
const uses_module_ref = ast_flags.uses_module_ref;
const exports_ref = exports_ref_list[source_index];
const module_ref = module_ref_list[source_index];
const parts = parts_list[source_index];
top_level_symbols.clearRetainingCapacity();
if (uses_exports_ref) {
try minify_renamer.accumulateSymbolUseCount(&top_level_symbols, exports_ref, 1, stable_source_indices);
}
if (uses_module_ref) {
try minify_renamer.accumulateSymbolUseCount(&top_level_symbols, module_ref, 1, stable_source_indices);
}
for (parts.slice()) |part| {
if (!part.is_live) {
continue;
}
try minify_renamer.accumulateSymbolUseCounts(&top_level_symbols, part.symbol_uses, stable_source_indices);
for (part.declared_symbols.refs()) |declared_ref| {
try minify_renamer.accumulateSymbolUseCount(&top_level_symbols, declared_ref, 1, stable_source_indices);
}
}
std.sort.pdq(renamer.StableSymbolCount, top_level_symbols.items, {}, StableSymbolCount.lessThan);
capacity += top_level_symbols.items.len;
top_level_symbols_all.appendSlice(top_level_symbols.items) catch unreachable;
}
top_level_symbols.clearRetainingCapacity();
for (sorted_imports_from_other_chunks.items) |stable_ref| {
try minify_renamer.accumulateSymbolUseCount(&top_level_symbols, stable_ref.ref, 1, stable_source_indices);
}
top_level_symbols_all.appendSlice(top_level_symbols.items) catch unreachable;
try minify_renamer.allocateTopLevelSymbolSlots(top_level_symbols_all);
var minifier = freq.compile(allocator);
try minify_renamer.assignNamesByFrequency(&minifier);
return minify_renamer.toRenamer();
}
var r = try renamer.NumberRenamer.init(
allocator,
allocator,
c.graph.symbols,
reserved_names,
);
for (sorted_imports_from_other_chunks.items) |stable_ref| {
r.addTopLevelSymbol(stable_ref.ref);
}
var sorted_ = std.ArrayList(u32).init(r.temp_allocator);
var sorted = &sorted_;
defer sorted.deinit();
for (files_in_order) |source_index| {
const wrap = all_flags[source_index].wrap;
const parts: []const Part = all_parts[source_index].slice();
switch (wrap) {
// Modules wrapped in a CommonJS closure look like this:
//
// // foo.js
// var require_foo = __commonJS((exports, module) => {
// exports.foo = 123;
// });
//
// The symbol "require_foo" is stored in "file.ast.WrapperRef". We want
// to be able to minify everything inside the closure without worrying
// about collisions with other CommonJS modules. Set up the scopes such
// that it appears as if the file was structured this way all along. It's
// not completely accurate (e.g. we don't set the parent of the module
// scope to this new top-level scope) but it's good enough for the
// renaming code.
.cjs => {
r.addTopLevelSymbol(all_wrapper_refs[source_index]);
// External import statements will be hoisted outside of the CommonJS
// wrapper if the output format supports import statements. We need to
// add those symbols to the top-level scope to avoid causing name
// collisions. This code special-cases only those symbols.
if (c.options.output_format.keepES6ImportExportSyntax()) {
const import_records = all_import_records[source_index].slice();
for (parts) |*part| {
for (part.stmts) |stmt| {
switch (stmt.data) {
.s_import => |import| {
if (!import_records[import.import_record_index].source_index.isValid()) {
r.addTopLevelSymbol(import.namespace_ref);
if (import.default_name) |default_name| {
if (default_name.ref) |ref| {
r.addTopLevelSymbol(ref);
}
}
for (import.items) |*item| {
if (item.name.ref) |ref| {
r.addTopLevelSymbol(ref);
}
}
}
},
.s_export_star => |export_| {
if (!import_records[export_.import_record_index].source_index.isValid()) {
r.addTopLevelSymbol(export_.namespace_ref);
}
},
.s_export_from => |export_| {
if (!import_records[export_.import_record_index].source_index.isValid()) {
r.addTopLevelSymbol(export_.namespace_ref);
for (export_.items) |*item| {
if (item.name.ref) |ref| {
r.addTopLevelSymbol(ref);
}
}
}
},
else => {},
}
}
}
}
r.assignNamesRecursiveWithNumberScope(&r.root, &all_module_scopes[source_index], source_index, sorted);
continue;
},
// Modules wrapped in an ESM closure look like this:
//
// // foo.js
// var foo, foo_exports = {};
// __export(foo_exports, {
// foo: () => foo
// });
// let init_foo = __esm(() => {
// foo = 123;
// });
//
// The symbol "init_foo" is stored in "file.ast.WrapperRef". We need to
// minify everything inside the closure without introducing a new scope
// since all top-level variables will be hoisted outside of the closure.
.esm => {
r.addTopLevelSymbol(all_wrapper_refs[source_index]);
},
else => {},
}
for (parts) |*part| {
if (!part.is_live) continue;
r.addTopLevelDeclaredSymbols(part.declared_symbols);
for (part.scopes) |scope| {
r.assignNamesRecursiveWithNumberScope(&r.root, scope, source_index, sorted);
}
r.number_scope_pool.hive.used = @TypeOf(r.number_scope_pool.hive.used).initEmpty();
}
}
return r.toRenamer();
}
const bun = @import("bun");
const LinkerContext = bun.bundle_v2.LinkerContext;
const std = @import("std");
const Part = js_ast.Part;
const js_ast = bun.js_ast;
const ImportRecord = bun.ImportRecord;
const renamer = bun.renamer;
const StableSymbolCount = renamer.StableSymbolCount;
const MinifyRenamer = renamer.MinifyRenamer;
const bundler = bun.bundle_v2;
pub const DeferredBatchTask = bun.bundle_v2.DeferredBatchTask;
pub const ThreadPool = bun.bundle_v2.ThreadPool;
pub const ParseTask = bun.bundle_v2.ParseTask;
const Chunk = bundler.Chunk;
const JSMeta = bundler.JSMeta;
const StableRef = bundler.StableRef;
const Ref = bun.bundle_v2.Ref;

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,438 @@
pub fn writeOutputFilesToDisk(
c: *LinkerContext,
root_path: string,
chunks: []Chunk,
output_files: *std.ArrayList(options.OutputFile),
) !void {
const trace = bun.perf.trace("Bundler.writeOutputFilesToDisk");
defer trace.end();
var root_dir = std.fs.cwd().makeOpenPath(root_path, .{}) catch |err| {
if (err == error.NotDir) {
c.log.addErrorFmt(null, Logger.Loc.Empty, bun.default_allocator, "Failed to create output directory {} is a file. Please choose a different outdir or delete {}", .{
bun.fmt.quote(root_path),
bun.fmt.quote(root_path),
}) catch unreachable;
} else {
c.log.addErrorFmt(null, Logger.Loc.Empty, bun.default_allocator, "Failed to create output directory {s} {}", .{
@errorName(err),
bun.fmt.quote(root_path),
}) catch unreachable;
}
return err;
};
defer root_dir.close();
// Optimization: when writing to disk, we can re-use the memory
var max_heap_allocator: bun.MaxHeapAllocator = undefined;
defer max_heap_allocator.deinit();
const code_allocator = max_heap_allocator.init(bun.default_allocator);
var max_heap_allocator_source_map: bun.MaxHeapAllocator = undefined;
defer max_heap_allocator_source_map.deinit();
const source_map_allocator = max_heap_allocator_source_map.init(bun.default_allocator);
var max_heap_allocator_inline_source_map: bun.MaxHeapAllocator = undefined;
defer max_heap_allocator_inline_source_map.deinit();
const code_with_inline_source_map_allocator = max_heap_allocator_inline_source_map.init(bun.default_allocator);
var pathbuf: bun.PathBuffer = undefined;
for (chunks) |*chunk| {
const trace2 = bun.perf.trace("Bundler.writeChunkToDisk");
defer trace2.end();
defer max_heap_allocator.reset();
const rel_path = chunk.final_rel_path;
if (std.fs.path.dirnamePosix(rel_path)) |rel_parent| {
if (rel_parent.len > 0) {
root_dir.makePath(rel_parent) catch |err| {
c.log.addErrorFmt(null, Logger.Loc.Empty, bun.default_allocator, "{s} creating outdir {} while saving chunk {}", .{
@errorName(err),
bun.fmt.quote(rel_parent),
bun.fmt.quote(chunk.final_rel_path),
}) catch unreachable;
return err;
};
}
}
var display_size: usize = 0;
var code_result = chunk.intermediate_output.code(
code_allocator,
c.parse_graph,
&c.graph,
c.resolver.opts.public_path,
chunk,
chunks,
&display_size,
chunk.content.sourcemap(c.options.source_maps) != .none,
) catch |err| bun.Output.panic("Failed to create output chunk: {s}", .{@errorName(err)});
var source_map_output_file: ?options.OutputFile = null;
const input_path = try bun.default_allocator.dupe(
u8,
if (chunk.entry_point.is_entry_point)
c.parse_graph.input_files.items(.source)[chunk.entry_point.source_index].path.text
else
chunk.final_rel_path,
);
switch (chunk.content.sourcemap(c.options.source_maps)) {
.external, .linked => |tag| {
const output_source_map = chunk.output_source_map.finalize(source_map_allocator, code_result.shifts) catch @panic("Failed to allocate memory for external source map");
const source_map_final_rel_path = strings.concat(default_allocator, &.{
chunk.final_rel_path,
".map",
}) catch @panic("Failed to allocate memory for external source map path");
if (tag == .linked) {
const a, const b = if (c.options.public_path.len > 0)
cheapPrefixNormalizer(c.options.public_path, source_map_final_rel_path)
else
.{ "", std.fs.path.basename(source_map_final_rel_path) };
const source_map_start = "//# sourceMappingURL=";
const total_len = code_result.buffer.len + source_map_start.len + a.len + b.len + "\n".len;
var buf = std.ArrayList(u8).initCapacity(Chunk.IntermediateOutput.allocatorForSize(total_len), total_len) catch @panic("Failed to allocate memory for output file with inline source map");
buf.appendSliceAssumeCapacity(code_result.buffer);
buf.appendSliceAssumeCapacity(source_map_start);
buf.appendSliceAssumeCapacity(a);
buf.appendSliceAssumeCapacity(b);
buf.appendAssumeCapacity('\n');
code_result.buffer = buf.items;
}
switch (JSC.Node.fs.NodeFS.writeFileWithPathBuffer(
&pathbuf,
.{
.data = JSC.Node.StringOrBuffer{
.buffer = bun.api.node.Buffer{
.buffer = .{
.ptr = @constCast(output_source_map.ptr),
// TODO: handle > 4 GB files
.len = @as(u32, @truncate(output_source_map.len)),
.byte_len = @as(u32, @truncate(output_source_map.len)),
},
},
},
.encoding = .buffer,
.dirfd = .fromStdDir(root_dir),
.file = .{
.path = .{
.string = bun.PathString.init(source_map_final_rel_path),
},
},
},
)) {
.err => |err| {
try c.log.addSysError(bun.default_allocator, err, "writing sourcemap for chunk {}", .{
bun.fmt.quote(chunk.final_rel_path),
});
return error.WriteFailed;
},
.result => {},
}
source_map_output_file = options.OutputFile.init(.{
.output_path = source_map_final_rel_path,
.input_path = try strings.concat(bun.default_allocator, &.{ input_path, ".map" }),
.loader = .json,
.input_loader = .file,
.output_kind = .sourcemap,
.size = @as(u32, @truncate(output_source_map.len)),
.data = .{
.saved = 0,
},
.side = .client,
.entry_point_index = null,
.is_executable = false,
});
},
.@"inline" => {
const output_source_map = chunk.output_source_map.finalize(source_map_allocator, code_result.shifts) catch @panic("Failed to allocate memory for external source map");
const encode_len = base64.encodeLen(output_source_map);
const source_map_start = "//# sourceMappingURL=data:application/json;base64,";
const total_len = code_result.buffer.len + source_map_start.len + encode_len + 1;
var buf = std.ArrayList(u8).initCapacity(code_with_inline_source_map_allocator, total_len) catch @panic("Failed to allocate memory for output file with inline source map");
buf.appendSliceAssumeCapacity(code_result.buffer);
buf.appendSliceAssumeCapacity(source_map_start);
buf.items.len += encode_len;
_ = base64.encode(buf.items[buf.items.len - encode_len ..], output_source_map);
buf.appendAssumeCapacity('\n');
code_result.buffer = buf.items;
},
.none => {},
}
const bytecode_output_file: ?options.OutputFile = brk: {
if (c.options.generate_bytecode_cache) {
const loader: Loader = if (chunk.entry_point.is_entry_point)
c.parse_graph.input_files.items(.loader)[
chunk.entry_point.source_index
]
else
.js;
if (loader.isJavaScriptLike()) {
JSC.VirtualMachine.is_bundler_thread_for_bytecode_cache = true;
JSC.initialize(false);
var fdpath: bun.PathBuffer = undefined;
var source_provider_url = try bun.String.createFormat("{s}" ++ bun.bytecode_extension, .{chunk.final_rel_path});
source_provider_url.ref();
defer source_provider_url.deref();
if (JSC.CachedBytecode.generate(c.options.output_format, code_result.buffer, &source_provider_url)) |result| {
const source_provider_url_str = source_provider_url.toSlice(bun.default_allocator);
defer source_provider_url_str.deinit();
const bytecode, const cached_bytecode = result;
debug("Bytecode cache generated {s}: {}", .{ source_provider_url_str.slice(), bun.fmt.size(bytecode.len, .{ .space_between_number_and_unit = true }) });
@memcpy(fdpath[0..chunk.final_rel_path.len], chunk.final_rel_path);
fdpath[chunk.final_rel_path.len..][0..bun.bytecode_extension.len].* = bun.bytecode_extension.*;
defer cached_bytecode.deref();
switch (JSC.Node.fs.NodeFS.writeFileWithPathBuffer(
&pathbuf,
.{
.data = .{
.buffer = .{
.buffer = .{
.ptr = @constCast(bytecode.ptr),
.len = @as(u32, @truncate(bytecode.len)),
.byte_len = @as(u32, @truncate(bytecode.len)),
},
},
},
.encoding = .buffer,
.mode = if (chunk.is_executable) 0o755 else 0o644,
.dirfd = .fromStdDir(root_dir),
.file = .{
.path = .{
.string = bun.PathString.init(fdpath[0 .. chunk.final_rel_path.len + bun.bytecode_extension.len]),
},
},
},
)) {
.result => {},
.err => |err| {
c.log.addErrorFmt(null, Logger.Loc.Empty, bun.default_allocator, "{} writing bytecode for chunk {}", .{
err,
bun.fmt.quote(chunk.final_rel_path),
}) catch unreachable;
return error.WriteFailed;
},
}
break :brk options.OutputFile.init(.{
.output_path = bun.default_allocator.dupe(u8, source_provider_url_str.slice()) catch unreachable,
.input_path = std.fmt.allocPrint(bun.default_allocator, "{s}" ++ bun.bytecode_extension, .{chunk.final_rel_path}) catch unreachable,
.input_loader = .file,
.hash = if (chunk.template.placeholder.hash != null) bun.hash(bytecode) else null,
.output_kind = .bytecode,
.loader = .file,
.size = @as(u32, @truncate(bytecode.len)),
.display_size = @as(u32, @truncate(bytecode.len)),
.data = .{
.saved = 0,
},
.side = null,
.entry_point_index = null,
.is_executable = false,
});
}
}
}
break :brk null;
};
switch (JSC.Node.fs.NodeFS.writeFileWithPathBuffer(
&pathbuf,
.{
.data = .{
.buffer = .{
.buffer = .{
.ptr = @constCast(code_result.buffer.ptr),
// TODO: handle > 4 GB files
.len = @as(u32, @truncate(code_result.buffer.len)),
.byte_len = @as(u32, @truncate(code_result.buffer.len)),
},
},
},
.encoding = .buffer,
.mode = if (chunk.is_executable) 0o755 else 0o644,
.dirfd = .fromStdDir(root_dir),
.file = .{
.path = JSC.Node.PathLike{
.string = bun.PathString.init(rel_path),
},
},
},
)) {
.err => |err| {
try c.log.addSysError(bun.default_allocator, err, "writing chunk {}", .{
bun.fmt.quote(chunk.final_rel_path),
});
return error.WriteFailed;
},
.result => {},
}
const source_map_index: ?u32 = if (source_map_output_file != null)
@as(u32, @truncate(output_files.items.len + 1))
else
null;
const bytecode_index: ?u32 = if (bytecode_output_file != null and source_map_index != null)
@as(u32, @truncate(output_files.items.len + 2))
else if (bytecode_output_file != null)
@as(u32, @truncate(output_files.items.len + 1))
else
null;
const output_kind = if (chunk.content == .css)
.asset
else if (chunk.entry_point.is_entry_point)
c.graph.files.items(.entry_point_kind)[chunk.entry_point.source_index].outputKind()
else
.chunk;
try output_files.append(options.OutputFile.init(.{
.output_path = bun.default_allocator.dupe(u8, chunk.final_rel_path) catch unreachable,
.input_path = input_path,
.input_loader = if (chunk.entry_point.is_entry_point)
c.parse_graph.input_files.items(.loader)[chunk.entry_point.source_index]
else
.js,
.hash = chunk.template.placeholder.hash,
.output_kind = output_kind,
.loader = .js,
.source_map_index = source_map_index,
.bytecode_index = bytecode_index,
.size = @as(u32, @truncate(code_result.buffer.len)),
.display_size = @as(u32, @truncate(display_size)),
.is_executable = chunk.is_executable,
.data = .{
.saved = 0,
},
.side = if (chunk.content == .css)
.client
else switch (c.graph.ast.items(.target)[chunk.entry_point.source_index]) {
.browser => .client,
else => .server,
},
.entry_point_index = if (output_kind == .@"entry-point")
chunk.entry_point.source_index - @as(u32, (if (c.framework) |fw| if (fw.server_components != null) 3 else 1 else 1))
else
null,
.referenced_css_files = switch (chunk.content) {
.javascript => |js| @ptrCast(try bun.default_allocator.dupe(u32, js.css_chunks)),
.css => &.{},
.html => &.{},
},
}));
if (source_map_output_file) |sourcemap_file| {
try output_files.append(sourcemap_file);
}
if (bytecode_output_file) |bytecode_file| {
try output_files.append(bytecode_file);
}
}
{
const offset = output_files.items.len;
output_files.items.len += c.parse_graph.additional_output_files.items.len;
for (c.parse_graph.additional_output_files.items, output_files.items[offset..][0..c.parse_graph.additional_output_files.items.len]) |*src, *dest| {
const bytes = src.value.buffer.bytes;
src.value.buffer.bytes.len = 0;
defer {
src.value.buffer.allocator.free(bytes);
}
if (std.fs.path.dirname(src.dest_path)) |rel_parent| {
if (rel_parent.len > 0) {
root_dir.makePath(rel_parent) catch |err| {
c.log.addErrorFmt(null, Logger.Loc.Empty, bun.default_allocator, "{s} creating outdir {} while saving file {}", .{
@errorName(err),
bun.fmt.quote(rel_parent),
bun.fmt.quote(src.dest_path),
}) catch unreachable;
return err;
};
}
}
switch (JSC.Node.fs.NodeFS.writeFileWithPathBuffer(
&pathbuf,
.{
.data = .{
.buffer = .{
.buffer = .{
.ptr = @constCast(bytes.ptr),
.len = @as(u32, @truncate(bytes.len)),
.byte_len = @as(u32, @truncate(bytes.len)),
},
},
},
.encoding = .buffer,
.dirfd = .fromStdDir(root_dir),
.file = .{
.path = JSC.Node.PathLike{
.string = bun.PathString.init(src.dest_path),
},
},
},
)) {
.err => |err| {
c.log.addSysError(bun.default_allocator, err, "writing file {}", .{
bun.fmt.quote(src.src_path.text),
}) catch unreachable;
return error.WriteFailed;
},
.result => {},
}
dest.* = src.*;
dest.value = .{
.saved = .{},
};
dest.size = @as(u32, @truncate(bytes.len));
}
}
}
const bun = @import("bun");
const options = bun.options;
const Loader = bun.Loader;
const Logger = bun.logger;
const Loc = Logger.Loc;
const LinkerContext = bun.bundle_v2.LinkerContext;
const string = bun.string;
const Output = bun.Output;
const strings = bun.strings;
const default_allocator = bun.default_allocator;
const std = @import("std");
const sourcemap = bun.sourcemap;
const base64 = bun.base64;
const JSC = bun.JSC;
const bundler = bun.bundle_v2;
pub const DeferredBatchTask = bun.bundle_v2.DeferredBatchTask;
pub const ThreadPool = bun.bundle_v2.ThreadPool;
pub const ParseTask = bun.bundle_v2.ParseTask;
const Chunk = bundler.Chunk;
const cheapPrefixNormalizer = bundler.cheapPrefixNormalizer;
const debug = LinkerContext.debug;

View File

@@ -30,7 +30,7 @@ pub const show_crash_trace = isDebug or isTest or enable_asan;
/// All calls to `@export` should be gated behind this check, so that code
/// generators that compile Zig code know not to reference and compile a ton of
/// unused code.
pub const export_cpp_apis = @import("builtin").output_mode == .Obj or isTest;
pub const export_cpp_apis = if (build_options.override_no_export_cpp_apis) false else (@import("builtin").output_mode == .Obj or isTest);
pub const build_options = @import("build_options");