mirror of
https://github.com/oven-sh/bun
synced 2026-02-02 23:18:47 +00:00
Compare commits
1 Commits
dylan/byte
...
jarred/bun
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
61e75e93a5 |
@@ -229,7 +229,11 @@ src/bun.js/webcore/TextEncoderStreamEncoder.zig
|
||||
src/bun.js/WTFTimer.zig
|
||||
src/bun.zig
|
||||
src/bundler/bundle_v2.zig
|
||||
src/bundler/Chunk.zig
|
||||
src/bundler/entry_points.zig
|
||||
src/bundler/linker.zig
|
||||
src/bundler/LinkerGraph.zig
|
||||
src/bundler/ParseTask.zig
|
||||
src/bunfig.zig
|
||||
src/cache.zig
|
||||
src/ci_info.zig
|
||||
@@ -481,7 +485,6 @@ src/json_parser.zig
|
||||
src/libarchive/libarchive-bindings.zig
|
||||
src/libarchive/libarchive.zig
|
||||
src/linear_fifo.zig
|
||||
src/linker.zig
|
||||
src/linux.zig
|
||||
src/logger.zig
|
||||
src/macho.zig
|
||||
@@ -493,6 +496,7 @@ src/multi_array_list.zig
|
||||
src/Mutex.zig
|
||||
src/napi/napi.zig
|
||||
src/node_fallbacks.zig
|
||||
src/old_linker.zig
|
||||
src/open.zig
|
||||
src/options.zig
|
||||
src/output.zig
|
||||
|
||||
@@ -1190,6 +1190,7 @@ pub const js_parser = @import("./js_parser.zig");
|
||||
pub const js_printer = @import("./js_printer.zig");
|
||||
pub const js_lexer = @import("./js_lexer.zig");
|
||||
pub const JSON = @import("./json_parser.zig");
|
||||
pub const TOML = @import("./toml/toml_parser.zig").TOML;
|
||||
pub const JSAst = @import("./js_ast.zig");
|
||||
pub const bit_set = @import("./bit_set.zig");
|
||||
|
||||
|
||||
608
src/bundler/Chunk.zig
Normal file
608
src/bundler/Chunk.zig
Normal file
@@ -0,0 +1,608 @@
|
||||
/// This is a random string and is used to represent the output path of this
|
||||
/// chunk before the final output path has been computed. See OutputPiece
|
||||
/// for more info on this technique.
|
||||
unique_key: string = "",
|
||||
|
||||
files_with_parts_in_chunk: std.AutoArrayHashMapUnmanaged(Index.Int, void) = .{},
|
||||
|
||||
/// We must not keep pointers to this type until all chunks have been allocated.
|
||||
entry_bits: AutoBitSet = undefined,
|
||||
|
||||
final_rel_path: string = "",
|
||||
/// The path template used to generate `final_rel_path`
|
||||
template: PathTemplate = .{},
|
||||
|
||||
/// For code splitting
|
||||
cross_chunk_imports: BabyList(ChunkImport) = .{},
|
||||
|
||||
content: Content,
|
||||
|
||||
entry_point: Chunk.EntryPoint = .{},
|
||||
|
||||
is_executable: bool = false,
|
||||
has_html_chunk: bool = false,
|
||||
|
||||
output_source_map: sourcemap.SourceMapPieces,
|
||||
|
||||
intermediate_output: IntermediateOutput = .{ .empty = {} },
|
||||
isolated_hash: u64 = std.math.maxInt(u64),
|
||||
|
||||
renamer: bun.renamer.Renamer = undefined,
|
||||
|
||||
compile_results_for_chunk: []bundler.CompileResult = &.{},
|
||||
|
||||
pub const ChunkImport = struct {
|
||||
chunk_index: u32,
|
||||
import_kind: bun.ImportKind,
|
||||
};
|
||||
|
||||
pub inline fn isEntryPoint(this: *const Chunk) bool {
|
||||
return this.entry_point.is_entry_point;
|
||||
}
|
||||
|
||||
pub fn getJSChunkForHTML(this: *const Chunk, chunks: []Chunk) ?*Chunk {
|
||||
const entry_point_id = this.entry_point.entry_point_id;
|
||||
for (chunks) |*other| {
|
||||
if (other.content == .javascript) {
|
||||
if (other.entry_point.entry_point_id == entry_point_id) {
|
||||
return other;
|
||||
}
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
pub fn getCSSChunkForHTML(this: *const Chunk, chunks: []Chunk) ?*Chunk {
|
||||
const entry_point_id = this.entry_point.entry_point_id;
|
||||
for (chunks) |*other| {
|
||||
if (other.content == .css) {
|
||||
if (other.entry_point.entry_point_id == entry_point_id) {
|
||||
return other;
|
||||
}
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
pub inline fn entryBits(this: *const Chunk) *const AutoBitSet {
|
||||
return &this.entry_bits;
|
||||
}
|
||||
|
||||
pub const Order = struct {
|
||||
source_index: Index.Int = 0,
|
||||
distance: u32 = 0,
|
||||
tie_breaker: u32 = 0,
|
||||
|
||||
pub fn lessThan(_: @This(), a: Order, b: Order) bool {
|
||||
return (a.distance < b.distance) or
|
||||
(a.distance == b.distance and a.tie_breaker < b.tie_breaker);
|
||||
}
|
||||
|
||||
/// Sort so files closest to an entry point come first. If two files are
|
||||
/// equidistant to an entry point, then break the tie by sorting on the
|
||||
/// stable source index derived from the DFS over all entry points.
|
||||
pub fn sort(a: []Order) void {
|
||||
std.sort.pdq(Order, a, Order{}, lessThan);
|
||||
}
|
||||
};
|
||||
|
||||
/// TODO: rewrite this
|
||||
/// This implementation is just slow.
|
||||
/// Can we make the JSPrinter itself track this without increasing
|
||||
/// complexity a lot?
|
||||
pub const IntermediateOutput = union(enum) {
|
||||
/// If the chunk has references to other chunks, then "pieces" contains
|
||||
/// the contents of the chunk. Another joiner will have to be
|
||||
/// constructed later when merging the pieces together.
|
||||
///
|
||||
/// See OutputPiece's documentation comment for more details.
|
||||
pieces: bun.BabyList(OutputPiece),
|
||||
|
||||
/// If the chunk doesn't have any references to other chunks, then
|
||||
/// `joiner` contains the contents of the chunk. This is more efficient
|
||||
/// because it avoids doing a join operation twice.
|
||||
joiner: StringJoiner,
|
||||
|
||||
empty: void,
|
||||
|
||||
pub fn allocatorForSize(size: usize) std.mem.Allocator {
|
||||
if (size >= 512 * 1024)
|
||||
return std.heap.page_allocator
|
||||
else
|
||||
return bun.default_allocator;
|
||||
}
|
||||
|
||||
pub const CodeResult = struct {
|
||||
buffer: []u8,
|
||||
shifts: []sourcemap.SourceMapShifts,
|
||||
};
|
||||
|
||||
pub fn code(
|
||||
this: *IntermediateOutput,
|
||||
allocator_to_use: ?std.mem.Allocator,
|
||||
parse_graph: *const Graph,
|
||||
linker_graph: *const LinkerGraph,
|
||||
import_prefix: []const u8,
|
||||
chunk: *Chunk,
|
||||
chunks: []Chunk,
|
||||
display_size: ?*usize,
|
||||
enable_source_map_shifts: bool,
|
||||
) !CodeResult {
|
||||
return switch (enable_source_map_shifts) {
|
||||
inline else => |source_map_shifts| this.codeWithSourceMapShifts(
|
||||
allocator_to_use,
|
||||
parse_graph,
|
||||
linker_graph,
|
||||
import_prefix,
|
||||
chunk,
|
||||
chunks,
|
||||
display_size,
|
||||
source_map_shifts,
|
||||
),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn codeWithSourceMapShifts(
|
||||
this: *IntermediateOutput,
|
||||
allocator_to_use: ?std.mem.Allocator,
|
||||
graph: *const Graph,
|
||||
linker_graph: *const LinkerGraph,
|
||||
import_prefix: []const u8,
|
||||
chunk: *Chunk,
|
||||
chunks: []Chunk,
|
||||
display_size: ?*usize,
|
||||
comptime enable_source_map_shifts: bool,
|
||||
) !CodeResult {
|
||||
const additional_files = graph.input_files.items(.additional_files);
|
||||
const unique_key_for_additional_files = graph.input_files.items(.unique_key_for_additional_file);
|
||||
switch (this.*) {
|
||||
.pieces => |*pieces| {
|
||||
const entry_point_chunks_for_scb = linker_graph.files.items(.entry_point_chunk_index);
|
||||
|
||||
var shift = if (enable_source_map_shifts)
|
||||
sourcemap.SourceMapShifts{
|
||||
.after = .{},
|
||||
.before = .{},
|
||||
};
|
||||
var shifts = if (enable_source_map_shifts)
|
||||
try std.ArrayList(sourcemap.SourceMapShifts).initCapacity(bun.default_allocator, pieces.len + 1);
|
||||
|
||||
if (enable_source_map_shifts)
|
||||
shifts.appendAssumeCapacity(shift);
|
||||
|
||||
var count: usize = 0;
|
||||
var from_chunk_dir = std.fs.path.dirnamePosix(chunk.final_rel_path) orelse "";
|
||||
if (strings.eqlComptime(from_chunk_dir, "."))
|
||||
from_chunk_dir = "";
|
||||
|
||||
for (pieces.slice()) |piece| {
|
||||
count += piece.data_len;
|
||||
|
||||
switch (piece.query.kind) {
|
||||
.chunk, .asset, .scb => {
|
||||
const index = piece.query.index;
|
||||
const file_path = switch (piece.query.kind) {
|
||||
.asset => brk: {
|
||||
const files = additional_files[index];
|
||||
if (!(files.len > 0)) {
|
||||
Output.panic("Internal error: missing asset file", .{});
|
||||
}
|
||||
|
||||
const output_file = files.last().?.output_file;
|
||||
|
||||
break :brk graph.additional_output_files.items[output_file].dest_path;
|
||||
},
|
||||
.chunk => chunks[index].final_rel_path,
|
||||
.scb => chunks[entry_point_chunks_for_scb[index]].final_rel_path,
|
||||
.none => unreachable,
|
||||
};
|
||||
|
||||
const cheap_normalizer = cheapPrefixNormalizer(
|
||||
import_prefix,
|
||||
if (from_chunk_dir.len == 0)
|
||||
file_path
|
||||
else
|
||||
bun.path.relativePlatform(from_chunk_dir, file_path, .posix, false),
|
||||
);
|
||||
count += cheap_normalizer[0].len + cheap_normalizer[1].len;
|
||||
},
|
||||
.none => {},
|
||||
}
|
||||
}
|
||||
|
||||
if (display_size) |amt| {
|
||||
amt.* = count;
|
||||
}
|
||||
|
||||
const debug_id_len = if (enable_source_map_shifts and FeatureFlags.source_map_debug_id)
|
||||
std.fmt.count("\n//# debugId={}\n", .{bun.sourcemap.DebugIDFormatter{ .id = chunk.isolated_hash }})
|
||||
else
|
||||
0;
|
||||
|
||||
const total_buf = try (allocator_to_use orelse allocatorForSize(count)).alloc(u8, count + debug_id_len);
|
||||
var remain = total_buf;
|
||||
|
||||
for (pieces.slice()) |piece| {
|
||||
const data = piece.data();
|
||||
|
||||
if (enable_source_map_shifts) {
|
||||
var data_offset = sourcemap.LineColumnOffset{};
|
||||
data_offset.advance(data);
|
||||
shift.before.add(data_offset);
|
||||
shift.after.add(data_offset);
|
||||
}
|
||||
|
||||
if (data.len > 0)
|
||||
@memcpy(remain[0..data.len], data);
|
||||
|
||||
remain = remain[data.len..];
|
||||
|
||||
switch (piece.query.kind) {
|
||||
.asset, .chunk, .scb => {
|
||||
const index = piece.query.index;
|
||||
const file_path = switch (piece.query.kind) {
|
||||
.asset => brk: {
|
||||
const files = additional_files[index];
|
||||
bun.assert(files.len > 0);
|
||||
|
||||
const output_file = files.last().?.output_file;
|
||||
|
||||
if (enable_source_map_shifts) {
|
||||
shift.before.advance(unique_key_for_additional_files[index]);
|
||||
}
|
||||
|
||||
break :brk graph.additional_output_files.items[output_file].dest_path;
|
||||
},
|
||||
.chunk => brk: {
|
||||
const piece_chunk = chunks[index];
|
||||
|
||||
if (enable_source_map_shifts) {
|
||||
shift.before.advance(piece_chunk.unique_key);
|
||||
}
|
||||
|
||||
break :brk piece_chunk.final_rel_path;
|
||||
},
|
||||
.scb => brk: {
|
||||
const piece_chunk = chunks[entry_point_chunks_for_scb[index]];
|
||||
|
||||
if (enable_source_map_shifts) {
|
||||
shift.before.advance(piece_chunk.unique_key);
|
||||
}
|
||||
|
||||
break :brk piece_chunk.final_rel_path;
|
||||
},
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
// normalize windows paths to '/'
|
||||
bun.path.platformToPosixInPlace(u8, @constCast(file_path));
|
||||
const cheap_normalizer = cheapPrefixNormalizer(
|
||||
import_prefix,
|
||||
if (from_chunk_dir.len == 0)
|
||||
file_path
|
||||
else
|
||||
bun.path.relativePlatform(from_chunk_dir, file_path, .posix, false),
|
||||
);
|
||||
|
||||
if (cheap_normalizer[0].len > 0) {
|
||||
@memcpy(remain[0..cheap_normalizer[0].len], cheap_normalizer[0]);
|
||||
remain = remain[cheap_normalizer[0].len..];
|
||||
if (enable_source_map_shifts)
|
||||
shift.after.advance(cheap_normalizer[0]);
|
||||
}
|
||||
|
||||
if (cheap_normalizer[1].len > 0) {
|
||||
@memcpy(remain[0..cheap_normalizer[1].len], cheap_normalizer[1]);
|
||||
remain = remain[cheap_normalizer[1].len..];
|
||||
if (enable_source_map_shifts)
|
||||
shift.after.advance(cheap_normalizer[1]);
|
||||
}
|
||||
|
||||
if (enable_source_map_shifts)
|
||||
shifts.appendAssumeCapacity(shift);
|
||||
},
|
||||
.none => {},
|
||||
}
|
||||
}
|
||||
|
||||
if (enable_source_map_shifts and FeatureFlags.source_map_debug_id) {
|
||||
// This comment must go before the //# sourceMappingURL comment
|
||||
remain = remain[(std.fmt.bufPrint(
|
||||
remain,
|
||||
"\n//# debugId={}\n",
|
||||
.{bun.sourcemap.DebugIDFormatter{ .id = chunk.isolated_hash }},
|
||||
) catch bun.outOfMemory()).len..];
|
||||
}
|
||||
|
||||
bun.assert(remain.len == 0);
|
||||
bun.assert(total_buf.len == count + debug_id_len);
|
||||
|
||||
return .{
|
||||
.buffer = total_buf,
|
||||
.shifts = if (enable_source_map_shifts)
|
||||
shifts.items
|
||||
else
|
||||
&[_]sourcemap.SourceMapShifts{},
|
||||
};
|
||||
},
|
||||
.joiner => |*joiner| {
|
||||
const allocator = allocator_to_use orelse allocatorForSize(joiner.len);
|
||||
|
||||
if (display_size) |amt| {
|
||||
amt.* = joiner.len;
|
||||
}
|
||||
|
||||
const buffer = brk: {
|
||||
if (enable_source_map_shifts and FeatureFlags.source_map_debug_id) {
|
||||
// This comment must go before the //# sourceMappingURL comment
|
||||
const debug_id_fmt = std.fmt.allocPrint(
|
||||
graph.allocator,
|
||||
"\n//# debugId={}\n",
|
||||
.{bun.sourcemap.DebugIDFormatter{ .id = chunk.isolated_hash }},
|
||||
) catch bun.outOfMemory();
|
||||
|
||||
break :brk try joiner.doneWithEnd(allocator, debug_id_fmt);
|
||||
}
|
||||
|
||||
break :brk try joiner.done(allocator);
|
||||
};
|
||||
|
||||
return .{
|
||||
.buffer = buffer,
|
||||
.shifts = &[_]sourcemap.SourceMapShifts{},
|
||||
};
|
||||
},
|
||||
.empty => return .{
|
||||
.buffer = "",
|
||||
.shifts = &[_]sourcemap.SourceMapShifts{},
|
||||
},
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/// An issue with asset files and server component boundaries is they
|
||||
/// contain references to output paths, but those paths are not known until
|
||||
/// very late in the bundle. The solution is to have a magic word in the
|
||||
/// bundle text (BundleV2.unique_key, a random u64; impossible to guess).
|
||||
/// When a file wants a path to an emitted chunk, it emits the unique key
|
||||
/// in hex followed by the kind of path it wants:
|
||||
///
|
||||
/// `74f92237f4a85a6aA00000009` --> `./some-asset.png`
|
||||
/// ^--------------^|^------- .query.index
|
||||
/// unique_key .query.kind
|
||||
///
|
||||
/// An output piece is the concatenation of source code text and an output
|
||||
/// path, in that order. An array of pieces makes up an entire file.
|
||||
pub const OutputPiece = struct {
|
||||
/// Pointer and length split to reduce struct size
|
||||
data_ptr: [*]const u8,
|
||||
data_len: u32,
|
||||
query: Query,
|
||||
|
||||
pub fn data(this: OutputPiece) []const u8 {
|
||||
return this.data_ptr[0..this.data_len];
|
||||
}
|
||||
|
||||
pub const Query = packed struct(u32) {
|
||||
index: u30,
|
||||
kind: Kind,
|
||||
|
||||
pub const Kind = enum(u2) {
|
||||
/// The last piece in an array uses this to indicate it is just data
|
||||
none,
|
||||
/// Given a source index, print the asset's output
|
||||
asset,
|
||||
/// Given a chunk index, print the chunk's output path
|
||||
chunk,
|
||||
/// Given a server component boundary index, print the chunk's output path
|
||||
scb,
|
||||
};
|
||||
|
||||
pub const none: Query = .{ .index = 0, .kind = .none };
|
||||
};
|
||||
|
||||
pub fn init(data_slice: []const u8, query: Query) OutputPiece {
|
||||
return .{
|
||||
.data_ptr = data_slice.ptr,
|
||||
.data_len = @intCast(data_slice.len),
|
||||
.query = query,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub const OutputPieceIndex = OutputPiece.Query;
|
||||
|
||||
pub const EntryPoint = packed struct(u64) {
|
||||
/// Index into `Graph.input_files`
|
||||
source_index: u32 = 0,
|
||||
entry_point_id: ID = 0,
|
||||
is_entry_point: bool = false,
|
||||
is_html: bool = false,
|
||||
|
||||
/// so `EntryPoint` can be a u64
|
||||
pub const ID = u30;
|
||||
};
|
||||
|
||||
pub const JavaScriptChunk = struct {
|
||||
files_in_chunk_order: []const Index.Int = &.{},
|
||||
parts_in_chunk_in_order: []const PartRange = &.{},
|
||||
|
||||
// for code splitting
|
||||
exports_to_other_chunks: std.ArrayHashMapUnmanaged(Ref, string, Ref.ArrayHashCtx, false) = .{},
|
||||
imports_from_other_chunks: ImportsFromOtherChunks = .{},
|
||||
cross_chunk_prefix_stmts: BabyList(Stmt) = .{},
|
||||
cross_chunk_suffix_stmts: BabyList(Stmt) = .{},
|
||||
|
||||
/// Indexes to CSS chunks. Currently this will only ever be zero or one
|
||||
/// items long, but smarter css chunking will allow multiple js entry points
|
||||
/// share a css file, or have an entry point contain multiple css files.
|
||||
///
|
||||
/// Mutated while sorting chunks in `computeChunks`
|
||||
css_chunks: []u32 = &.{},
|
||||
};
|
||||
|
||||
pub const CssChunk = struct {
|
||||
imports_in_chunk_in_order: BabyList(CssImportOrder),
|
||||
/// When creating a chunk, this is to be an uninitialized slice with
|
||||
/// length of `imports_in_chunk_in_order`
|
||||
///
|
||||
/// Multiple imports may refer to the same file/stylesheet, but may need to
|
||||
/// wrap them in conditions (e.g. a layer).
|
||||
///
|
||||
/// When we go through the `prepareCssAstsForChunk()` step, each import will
|
||||
/// create a shallow copy of the file's AST (just dereferencing the pointer).
|
||||
asts: []bun.css.BundlerStyleSheet,
|
||||
};
|
||||
|
||||
const CssImportKind = enum {
|
||||
source_index,
|
||||
external_path,
|
||||
import_layers,
|
||||
};
|
||||
|
||||
pub const CssImportOrder = struct {
|
||||
conditions: BabyList(bun.css.ImportConditions) = .{},
|
||||
condition_import_records: BabyList(ImportRecord) = .{},
|
||||
|
||||
kind: union(enum) {
|
||||
/// Represents earlier imports that have been made redundant by later ones (see `isConditionalImportRedundant`)
|
||||
/// We don't want to redundantly print the rules of these redundant imports
|
||||
/// BUT, the imports may include layers.
|
||||
/// We'll just print layer name declarations so that the original ordering is preserved.
|
||||
layers: Layers,
|
||||
external_path: bun.fs.Path,
|
||||
source_index: Index,
|
||||
},
|
||||
|
||||
pub const Layers = bun.ptr.Cow(bun.BabyList(bun.css.LayerName), struct {
|
||||
const Self = bun.BabyList(bun.css.LayerName);
|
||||
pub fn copy(self: *const Self, allocator: std.mem.Allocator) Self {
|
||||
return self.deepClone2(allocator);
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Self, a: std.mem.Allocator) void {
|
||||
// do shallow deinit since `LayerName` has
|
||||
// allocations in arena
|
||||
self.deinitWithAllocator(a);
|
||||
}
|
||||
});
|
||||
|
||||
pub fn hash(this: *const CssImportOrder, hasher: anytype) void {
|
||||
// TODO: conditions, condition_import_records
|
||||
|
||||
bun.writeAnyToHasher(hasher, std.meta.activeTag(this.kind));
|
||||
switch (this.kind) {
|
||||
.layers => |layers| {
|
||||
for (layers.inner().sliceConst()) |layer| {
|
||||
for (layer.v.slice(), 0..) |layer_name, i| {
|
||||
const is_last = i == layers.inner().len - 1;
|
||||
if (is_last) {
|
||||
hasher.update(layer_name);
|
||||
} else {
|
||||
hasher.update(layer_name);
|
||||
hasher.update(".");
|
||||
}
|
||||
}
|
||||
}
|
||||
hasher.update("\x00");
|
||||
},
|
||||
.external_path => |path| hasher.update(path.text),
|
||||
.source_index => |idx| bun.writeAnyToHasher(hasher, idx),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn fmt(this: *const CssImportOrder, ctx: *LinkerContext) CssImportOrderDebug {
|
||||
return .{
|
||||
.inner = this,
|
||||
.ctx = ctx,
|
||||
};
|
||||
}
|
||||
|
||||
const CssImportOrderDebug = struct {
|
||||
inner: *const CssImportOrder,
|
||||
ctx: *LinkerContext,
|
||||
|
||||
pub fn format(this: *const CssImportOrderDebug, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) !void {
|
||||
try writer.print("{s} = ", .{@tagName(this.inner.kind)});
|
||||
switch (this.inner.kind) {
|
||||
.layers => |layers| {
|
||||
try writer.print("[", .{});
|
||||
const l = layers.inner();
|
||||
for (l.sliceConst(), 0..) |*layer, i| {
|
||||
if (i > 0) try writer.print(", ", .{});
|
||||
try writer.print("\"{}\"", .{layer});
|
||||
}
|
||||
|
||||
try writer.print("]", .{});
|
||||
},
|
||||
.external_path => |path| {
|
||||
try writer.print("\"{s}\"", .{path.pretty});
|
||||
},
|
||||
.source_index => |source_index| {
|
||||
const source = this.ctx.parse_graph.input_files.items(.source)[source_index.get()];
|
||||
try writer.print("{d} ({s})", .{ source_index.get(), source.path.text });
|
||||
},
|
||||
}
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
pub const ImportsFromOtherChunks = std.AutoArrayHashMapUnmanaged(Index.Int, CrossChunkImport.Item.List);
|
||||
|
||||
pub const Content = union(enum) {
|
||||
javascript: JavaScriptChunk,
|
||||
css: CssChunk,
|
||||
html,
|
||||
|
||||
pub fn sourcemap(this: *const Content, default: options.SourceMapOption) options.SourceMapOption {
|
||||
return switch (this.*) {
|
||||
.javascript => default,
|
||||
.css => .none, // TODO: css source maps
|
||||
.html => .none,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn loader(this: *const Content) Loader {
|
||||
return switch (this.*) {
|
||||
.javascript => .js,
|
||||
.css => .css,
|
||||
.html => .html,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn ext(this: *const Content) string {
|
||||
return switch (this.*) {
|
||||
.javascript => "js",
|
||||
.css => "css",
|
||||
.html => "html",
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
const Chunk = @This();
|
||||
const std = @import("std");
|
||||
const bun = @import("bun");
|
||||
const strings = bun.strings;
|
||||
const Output = bun.Output;
|
||||
const bundler = bun.bundle_v2;
|
||||
const PathTemplate = options.PathTemplate;
|
||||
const cheapPrefixNormalizer = bundler.cheapPrefixNormalizer;
|
||||
|
||||
const sourcemap = bun.sourcemap;
|
||||
const options = bun.options;
|
||||
const Index = bundler.Index;
|
||||
const string = []const u8;
|
||||
const AutoBitSet = bun.bit_set.AutoBitSet;
|
||||
const BabyList = bun.BabyList;
|
||||
const ImportRecord = bun.ImportRecord;
|
||||
const StringJoiner = bun.StringJoiner;
|
||||
const LinkerContext = bundler.LinkerContext;
|
||||
const Loader = bun.options.Loader;
|
||||
const Graph = bundler.Graph;
|
||||
const LinkerGraph = bundler.LinkerGraph;
|
||||
const CompileResult = bundler.CompileResult;
|
||||
const CrossChunkImport = bundler.CrossChunkImport;
|
||||
const FeatureFlags = bun.FeatureFlags;
|
||||
const PartRange = bundler.PartRange;
|
||||
const Ref = bundler.Ref;
|
||||
const Stmt = bun.JSAst.Stmt;
|
||||
465
src/bundler/LinkerGraph.zig
Normal file
465
src/bundler/LinkerGraph.zig
Normal file
@@ -0,0 +1,465 @@
|
||||
files: File.List = .{},
|
||||
files_live: BitSet = undefined,
|
||||
entry_points: EntryPoint.List = .{},
|
||||
symbols: js_ast.Symbol.Map = .{},
|
||||
|
||||
allocator: std.mem.Allocator,
|
||||
|
||||
code_splitting: bool = false,
|
||||
|
||||
// This is an alias from Graph
|
||||
// it is not a clone!
|
||||
ast: MultiArrayList(JSAst) = .{},
|
||||
meta: MultiArrayList(JSMeta) = .{},
|
||||
|
||||
/// We should avoid traversing all files in the bundle, because the linker
|
||||
/// should be able to run a linking operation on a large bundle where only
|
||||
/// a few files are needed (e.g. an incremental compilation scenario). This
|
||||
/// holds all files that could possibly be reached through the entry points.
|
||||
/// If you need to iterate over all files in the linking operation, iterate
|
||||
/// over this array. This array is also sorted in a deterministic ordering
|
||||
/// to help ensure deterministic builds (source indices are random).
|
||||
reachable_files: []Index = &[_]Index{},
|
||||
|
||||
/// Index from `.parse_graph.input_files` to index in `.files`
|
||||
stable_source_indices: []const u32 = &[_]u32{},
|
||||
|
||||
is_scb_bitset: BitSet = .{},
|
||||
has_client_components: bool = false,
|
||||
has_server_components: bool = false,
|
||||
|
||||
/// This is for cross-module inlining of detected inlinable constants
|
||||
// const_values: js_ast.Ast.ConstValuesMap = .{},
|
||||
/// This is for cross-module inlining of TypeScript enum constants
|
||||
ts_enums: js_ast.Ast.TsEnumsMap = .{},
|
||||
|
||||
pub fn init(allocator: std.mem.Allocator, file_count: usize) !LinkerGraph {
|
||||
return LinkerGraph{
|
||||
.allocator = allocator,
|
||||
.files_live = try BitSet.initEmpty(allocator, file_count),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn runtimeFunction(this: *const LinkerGraph, name: string) Ref {
|
||||
return this.ast.items(.named_exports)[Index.runtime.value].get(name).?.ref;
|
||||
}
|
||||
|
||||
pub fn generateNewSymbol(this: *LinkerGraph, source_index: u32, kind: Symbol.Kind, original_name: string) Ref {
|
||||
const source_symbols = &this.symbols.symbols_for_source.slice()[source_index];
|
||||
|
||||
var ref = Ref.init(
|
||||
@truncate(source_symbols.len),
|
||||
@truncate(source_index),
|
||||
false,
|
||||
);
|
||||
ref.tag = .symbol;
|
||||
|
||||
// TODO: will this crash on resize due to using threadlocal mimalloc heap?
|
||||
source_symbols.push(
|
||||
this.allocator,
|
||||
.{
|
||||
.kind = kind,
|
||||
.original_name = original_name,
|
||||
},
|
||||
) catch unreachable;
|
||||
|
||||
this.ast.items(.module_scope)[source_index].generated.push(this.allocator, ref) catch unreachable;
|
||||
return ref;
|
||||
}
|
||||
|
||||
pub fn generateRuntimeSymbolImportAndUse(
|
||||
graph: *LinkerGraph,
|
||||
source_index: Index.Int,
|
||||
entry_point_part_index: Index,
|
||||
name: []const u8,
|
||||
count: u32,
|
||||
) !void {
|
||||
if (count == 0) return;
|
||||
debug("generateRuntimeSymbolImportAndUse({s}) for {d}", .{ name, source_index });
|
||||
|
||||
const ref = graph.runtimeFunction(name);
|
||||
try graph.generateSymbolImportAndUse(
|
||||
source_index,
|
||||
entry_point_part_index.get(),
|
||||
ref,
|
||||
count,
|
||||
Index.runtime,
|
||||
);
|
||||
}
|
||||
|
||||
pub fn addPartToFile(
|
||||
graph: *LinkerGraph,
|
||||
id: u32,
|
||||
part: Part,
|
||||
) !u32 {
|
||||
var parts: *Part.List = &graph.ast.items(.parts)[id];
|
||||
const part_id = @as(u32, @truncate(parts.len));
|
||||
try parts.push(graph.allocator, part);
|
||||
var top_level_symbol_to_parts_overlay: ?*TopLevelSymbolToParts = null;
|
||||
|
||||
const Iterator = struct {
|
||||
graph: *LinkerGraph,
|
||||
id: u32,
|
||||
top_level_symbol_to_parts_overlay: *?*TopLevelSymbolToParts,
|
||||
part_id: u32,
|
||||
|
||||
pub fn next(self: *@This(), ref: Ref) void {
|
||||
var overlay = brk: {
|
||||
if (self.top_level_symbol_to_parts_overlay.*) |out| {
|
||||
break :brk out;
|
||||
}
|
||||
|
||||
const out = &self.graph.meta.items(.top_level_symbol_to_parts_overlay)[self.id];
|
||||
|
||||
self.top_level_symbol_to_parts_overlay.* = out;
|
||||
break :brk out;
|
||||
};
|
||||
|
||||
var entry = overlay.getOrPut(self.graph.allocator, ref) catch unreachable;
|
||||
if (!entry.found_existing) {
|
||||
if (self.graph.ast.items(.top_level_symbols_to_parts)[self.id].get(ref)) |original_parts| {
|
||||
var list = std.ArrayList(u32).init(self.graph.allocator);
|
||||
list.ensureTotalCapacityPrecise(original_parts.len + 1) catch unreachable;
|
||||
list.appendSliceAssumeCapacity(original_parts.slice());
|
||||
list.appendAssumeCapacity(self.part_id);
|
||||
|
||||
entry.value_ptr.* = .init(list.items);
|
||||
} else {
|
||||
entry.value_ptr.* = BabyList(u32).fromSlice(self.graph.allocator, &.{self.part_id}) catch bun.outOfMemory();
|
||||
}
|
||||
} else {
|
||||
entry.value_ptr.push(self.graph.allocator, self.part_id) catch unreachable;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
var ctx = Iterator{
|
||||
.graph = graph,
|
||||
.id = id,
|
||||
.part_id = part_id,
|
||||
.top_level_symbol_to_parts_overlay = &top_level_symbol_to_parts_overlay,
|
||||
};
|
||||
|
||||
js_ast.DeclaredSymbol.forEachTopLevelSymbol(&parts.ptr[part_id].declared_symbols, &ctx, Iterator.next);
|
||||
|
||||
return part_id;
|
||||
}
|
||||
|
||||
pub fn generateSymbolImportAndUse(
|
||||
g: *LinkerGraph,
|
||||
source_index: u32,
|
||||
part_index: u32,
|
||||
ref: Ref,
|
||||
use_count: u32,
|
||||
source_index_to_import_from: Index,
|
||||
) !void {
|
||||
if (use_count == 0) return;
|
||||
|
||||
var parts_list = g.ast.items(.parts)[source_index].slice();
|
||||
var part: *Part = &parts_list[part_index];
|
||||
|
||||
// Mark this symbol as used by this part
|
||||
|
||||
var uses = &part.symbol_uses;
|
||||
var uses_entry = uses.getOrPut(g.allocator, ref) catch unreachable;
|
||||
|
||||
if (!uses_entry.found_existing) {
|
||||
uses_entry.value_ptr.* = .{ .count_estimate = use_count };
|
||||
} else {
|
||||
uses_entry.value_ptr.count_estimate += use_count;
|
||||
}
|
||||
|
||||
const exports_ref = g.ast.items(.exports_ref)[source_index];
|
||||
const module_ref = g.ast.items(.module_ref)[source_index];
|
||||
if (!exports_ref.isNull() and ref.eql(exports_ref)) {
|
||||
g.ast.items(.flags)[source_index].uses_exports_ref = true;
|
||||
}
|
||||
|
||||
if (!module_ref.isNull() and ref.eql(module_ref)) {
|
||||
g.ast.items(.flags)[source_index].uses_module_ref = true;
|
||||
}
|
||||
|
||||
// null ref shouldn't be there.
|
||||
bun.assert(!ref.isEmpty());
|
||||
|
||||
// Track that this specific symbol was imported
|
||||
if (source_index_to_import_from.get() != source_index) {
|
||||
const imports_to_bind = &g.meta.items(.imports_to_bind)[source_index];
|
||||
try imports_to_bind.put(g.allocator, ref, .{
|
||||
.data = .{
|
||||
.source_index = source_index_to_import_from,
|
||||
.import_ref = ref,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
// Pull in all parts that declare this symbol
|
||||
var dependencies = &part.dependencies;
|
||||
const part_ids = g.topLevelSymbolToParts(source_index_to_import_from.get(), ref);
|
||||
const new_dependencies = try dependencies.writableSlice(g.allocator, part_ids.len);
|
||||
for (part_ids, new_dependencies) |part_id, *dependency| {
|
||||
dependency.* = .{
|
||||
.source_index = source_index_to_import_from,
|
||||
.part_index = @as(u32, @truncate(part_id)),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
pub fn topLevelSymbolToParts(g: *LinkerGraph, id: u32, ref: Ref) []u32 {
|
||||
if (g.meta.items(.top_level_symbol_to_parts_overlay)[id].get(ref)) |overlay| {
|
||||
return overlay.slice();
|
||||
}
|
||||
|
||||
if (g.ast.items(.top_level_symbols_to_parts)[id].get(ref)) |list| {
|
||||
return list.slice();
|
||||
}
|
||||
|
||||
return &.{};
|
||||
}
|
||||
|
||||
pub fn load(
|
||||
this: *LinkerGraph,
|
||||
entry_points: []const Index,
|
||||
sources: []const Logger.Source,
|
||||
server_component_boundaries: ServerComponentBoundary.List,
|
||||
dynamic_import_entry_points: []const Index.Int,
|
||||
) !void {
|
||||
const scb = server_component_boundaries.slice();
|
||||
try this.files.setCapacity(this.allocator, sources.len);
|
||||
this.files.zero();
|
||||
this.files_live = try BitSet.initEmpty(
|
||||
this.allocator,
|
||||
sources.len,
|
||||
);
|
||||
this.files.len = sources.len;
|
||||
var files = this.files.slice();
|
||||
|
||||
var entry_point_kinds = files.items(.entry_point_kind);
|
||||
{
|
||||
const kinds = std.mem.sliceAsBytes(entry_point_kinds);
|
||||
@memset(kinds, 0);
|
||||
}
|
||||
|
||||
// Setup entry points
|
||||
{
|
||||
try this.entry_points.setCapacity(this.allocator, entry_points.len + server_component_boundaries.list.len + dynamic_import_entry_points.len);
|
||||
this.entry_points.len = entry_points.len;
|
||||
const source_indices = this.entry_points.items(.source_index);
|
||||
|
||||
const path_strings: []bun.PathString = this.entry_points.items(.output_path);
|
||||
{
|
||||
const output_was_auto_generated = std.mem.sliceAsBytes(this.entry_points.items(.output_path_was_auto_generated));
|
||||
@memset(output_was_auto_generated, 0);
|
||||
}
|
||||
|
||||
for (entry_points, path_strings, source_indices) |i, *path_string, *source_index| {
|
||||
const source = sources[i.get()];
|
||||
if (comptime Environment.allow_assert) {
|
||||
bun.assert(source.index.get() == i.get());
|
||||
}
|
||||
entry_point_kinds[source.index.get()] = EntryPoint.Kind.user_specified;
|
||||
path_string.* = bun.PathString.init(source.path.text);
|
||||
source_index.* = source.index.get();
|
||||
}
|
||||
|
||||
for (dynamic_import_entry_points) |id| {
|
||||
bun.assert(this.code_splitting); // this should never be a thing without code splitting
|
||||
|
||||
if (entry_point_kinds[id] != .none) {
|
||||
// You could dynamic import a file that is already an entry point
|
||||
continue;
|
||||
}
|
||||
|
||||
const source = &sources[id];
|
||||
entry_point_kinds[id] = EntryPoint.Kind.dynamic_import;
|
||||
|
||||
this.entry_points.appendAssumeCapacity(.{
|
||||
.source_index = id,
|
||||
.output_path = bun.PathString.init(source.path.text),
|
||||
.output_path_was_auto_generated = true,
|
||||
});
|
||||
}
|
||||
|
||||
var import_records_list: []ImportRecord.List = this.ast.items(.import_records);
|
||||
try this.meta.setCapacity(this.allocator, import_records_list.len);
|
||||
this.meta.len = this.ast.len;
|
||||
this.meta.zero();
|
||||
|
||||
if (scb.list.len > 0) {
|
||||
this.is_scb_bitset = BitSet.initEmpty(this.allocator, this.files.len) catch unreachable;
|
||||
|
||||
// Index all SCBs into the bitset. This is needed so chunking
|
||||
// can track the chunks that SCBs belong to.
|
||||
for (scb.list.items(.use_directive), scb.list.items(.source_index), scb.list.items(.reference_source_index)) |use, original_id, ref_id| {
|
||||
switch (use) {
|
||||
.none => {},
|
||||
.client => {
|
||||
this.is_scb_bitset.set(original_id);
|
||||
this.is_scb_bitset.set(ref_id);
|
||||
},
|
||||
.server => {
|
||||
bun.todoPanic(@src(), "um", .{});
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// For client components, the import record index currently points to the original source index, instead of the reference source index.
|
||||
for (this.reachable_files) |source_id| {
|
||||
for (import_records_list[source_id.get()].slice()) |*import_record| {
|
||||
if (import_record.source_index.isValid() and this.is_scb_bitset.isSet(import_record.source_index.get())) {
|
||||
import_record.source_index = Index.init(
|
||||
scb.getReferenceSourceIndex(import_record.source_index.get()) orelse
|
||||
// If this gets hit, might be fine to switch this to `orelse continue`
|
||||
// not confident in this assertion
|
||||
Output.panic("Missing SCB boundary for file #{d}", .{import_record.source_index.get()}),
|
||||
);
|
||||
bun.assert(import_record.source_index.isValid()); // did not generate
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
this.is_scb_bitset = .{};
|
||||
}
|
||||
}
|
||||
|
||||
// Setup files
|
||||
{
|
||||
var stable_source_indices = try this.allocator.alloc(Index, sources.len + 1);
|
||||
|
||||
// set it to max value so that if we access an invalid one, it crashes
|
||||
@memset(std.mem.sliceAsBytes(stable_source_indices), 255);
|
||||
|
||||
for (this.reachable_files, 0..) |source_index, i| {
|
||||
stable_source_indices[source_index.get()] = Index.source(i);
|
||||
}
|
||||
|
||||
@memset(
|
||||
files.items(.distance_from_entry_point),
|
||||
(LinkerGraph.File{}).distance_from_entry_point,
|
||||
);
|
||||
this.stable_source_indices = @as([]const u32, @ptrCast(stable_source_indices));
|
||||
}
|
||||
|
||||
{
|
||||
var input_symbols = js_ast.Symbol.Map.initList(js_ast.Symbol.NestedList.init(this.ast.items(.symbols)));
|
||||
var symbols = input_symbols.symbols_for_source.clone(this.allocator) catch bun.outOfMemory();
|
||||
for (symbols.slice(), input_symbols.symbols_for_source.slice()) |*dest, src| {
|
||||
dest.* = src.clone(this.allocator) catch bun.outOfMemory();
|
||||
}
|
||||
this.symbols = js_ast.Symbol.Map.initList(symbols);
|
||||
}
|
||||
|
||||
// TODO: const_values
|
||||
// {
|
||||
// var const_values = this.const_values;
|
||||
// var count: usize = 0;
|
||||
|
||||
// for (this.ast.items(.const_values)) |const_value| {
|
||||
// count += const_value.count();
|
||||
// }
|
||||
|
||||
// if (count > 0) {
|
||||
// try const_values.ensureTotalCapacity(this.allocator, count);
|
||||
// for (this.ast.items(.const_values)) |const_value| {
|
||||
// for (const_value.keys(), const_value.values()) |key, value| {
|
||||
// const_values.putAssumeCapacityNoClobber(key, value);
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// this.const_values = const_values;
|
||||
// }
|
||||
|
||||
{
|
||||
var count: usize = 0;
|
||||
for (this.ast.items(.ts_enums)) |ts_enums| {
|
||||
count += ts_enums.count();
|
||||
}
|
||||
if (count > 0) {
|
||||
try this.ts_enums.ensureTotalCapacity(this.allocator, count);
|
||||
for (this.ast.items(.ts_enums)) |ts_enums| {
|
||||
for (ts_enums.keys(), ts_enums.values()) |key, value| {
|
||||
this.ts_enums.putAssumeCapacityNoClobber(key, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const src_named_exports: []js_ast.Ast.NamedExports = this.ast.items(.named_exports);
|
||||
const dest_resolved_exports: []ResolvedExports = this.meta.items(.resolved_exports);
|
||||
for (src_named_exports, dest_resolved_exports, 0..) |src, *dest, source_index| {
|
||||
var resolved = ResolvedExports{};
|
||||
resolved.ensureTotalCapacity(this.allocator, src.count()) catch unreachable;
|
||||
for (src.keys(), src.values()) |key, value| {
|
||||
resolved.putAssumeCapacityNoClobber(key, .{ .data = .{
|
||||
.import_ref = value.ref,
|
||||
.name_loc = value.alias_loc,
|
||||
.source_index = Index.source(source_index),
|
||||
} });
|
||||
}
|
||||
dest.* = resolved;
|
||||
}
|
||||
}
|
||||
|
||||
pub const File = struct {
|
||||
entry_bits: AutoBitSet = undefined,
|
||||
|
||||
input_file: Index = Index.source(0),
|
||||
|
||||
/// The minimum number of links in the module graph to get from an entry point
|
||||
/// to this file
|
||||
distance_from_entry_point: u32 = std.math.maxInt(u32),
|
||||
|
||||
/// This file is an entry point if and only if this is not ".none".
|
||||
/// Note that dynamically-imported files are allowed to also be specified by
|
||||
/// the user as top-level entry points, so some dynamically-imported files
|
||||
/// may be ".user_specified" instead of ".dynamic_import".
|
||||
entry_point_kind: EntryPoint.Kind = .none,
|
||||
|
||||
/// If "entry_point_kind" is not ".none", this is the index of the
|
||||
/// corresponding entry point chunk.
|
||||
///
|
||||
/// This is also initialized for files that are a SCB's generated
|
||||
/// reference, pointing to its destination. This forms a lookup map from
|
||||
/// a Source.Index to its output path inb reakOutputIntoPieces
|
||||
entry_point_chunk_index: u32 = std.math.maxInt(u32),
|
||||
|
||||
line_offset_table: bun.sourcemap.LineOffsetTable.List = .empty,
|
||||
quoted_source_contents: string = "",
|
||||
|
||||
pub fn isEntryPoint(this: *const File) bool {
|
||||
return this.entry_point_kind.isEntryPoint();
|
||||
}
|
||||
|
||||
pub fn isUserSpecifiedEntryPoint(this: *const File) bool {
|
||||
return this.entry_point_kind.isUserSpecifiedEntryPoint();
|
||||
}
|
||||
|
||||
pub const List = MultiArrayList(File);
|
||||
};
|
||||
|
||||
const LinkerGraph = @This();
|
||||
const debug = Output.scoped(.LinkerGraph, false);
|
||||
const bun = @import("bun");
|
||||
const std = @import("std");
|
||||
const AutoBitSet = bun.bit_set.AutoBitSet;
|
||||
const BabyList = bun.BabyList;
|
||||
const BitSet = bun.bit_set.DynamicBitSetUnmanaged;
|
||||
const EntryPoint = bundler.EntryPoint;
|
||||
const Environment = bun.Environment;
|
||||
const ImportRecord = bun.ImportRecord;
|
||||
const Index = bundler.Index;
|
||||
const JSAst = js_ast.BundledAst;
|
||||
const JSMeta = bundler.JSMeta;
|
||||
const Logger = bun.logger;
|
||||
const MultiArrayList = bun.MultiArrayList;
|
||||
const Output = bun.Output;
|
||||
const Part = js_ast.Part;
|
||||
const Ref = js_ast.Ref;
|
||||
const ResolvedExports = bundler.ResolvedExports;
|
||||
const ServerComponentBoundary = js_ast.ServerComponentBoundary;
|
||||
const Symbol = js_ast.Symbol;
|
||||
const TopLevelSymbolToParts = js_ast.Ast.TopLevelSymbolToParts;
|
||||
const bundler = bun.bundle_v2;
|
||||
const js_ast = bun.JSAst;
|
||||
const string = []const u8;
|
||||
1443
src/bundler/ParseTask.zig
Normal file
1443
src/bundler/ParseTask.zig
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
10579
src/bundler/linker.zig
Normal file
10579
src/bundler/linker.zig
Normal file
File diff suppressed because it is too large
Load Diff
@@ -13,7 +13,7 @@ const Progress = bun.Progress;
|
||||
const logger = bun.logger;
|
||||
|
||||
const js_ast = bun.JSAst;
|
||||
const linker = @import("../linker.zig");
|
||||
const linker = @import("../old_linker.zig");
|
||||
|
||||
const Command = @import("../cli.zig").Command;
|
||||
|
||||
|
||||
@@ -630,7 +630,7 @@ const std = @import("std");
|
||||
const logger = bun.logger;
|
||||
|
||||
const js_ast = bun.JSAst;
|
||||
const linker = @import("../linker.zig");
|
||||
const linker = @import("../old_linker.zig");
|
||||
|
||||
const BundleV2 = bun.bundle_v2.BundleV2;
|
||||
const Command = bun.CLI.Command;
|
||||
|
||||
@@ -16,7 +16,7 @@ const js_parser = bun.js_parser;
|
||||
const JSON = bun.JSON;
|
||||
const js_printer = bun.js_printer;
|
||||
const js_ast = bun.JSAst;
|
||||
const linker = @import("linker.zig");
|
||||
const linker = @import("old_linker.zig");
|
||||
const Ref = @import("ast/base.zig").Ref;
|
||||
|
||||
const Fs = @import("fs.zig");
|
||||
|
||||
Reference in New Issue
Block a user