From 7717693c707f3db5ef205d12b0722d7a5e730763 Mon Sep 17 00:00:00 2001 From: "taylor.fish" Date: Fri, 22 Aug 2025 23:04:58 -0700 Subject: [PATCH] Dev server refactoring, part 1 (mainly `IncrementalGraph`) (#22010) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * `IncrementalGraph(.client).File` packs its fields in a specific way to save space, but it makes the struct hard to use and error-prone (e.g., untagged unions with tags stored in a separate `flags` struct). This PR changes `File` to have a human-readable layout, but adds methods to convert it to and from `File.Packed`, a packed version with the same space efficiency as before. * Reduce the need to pass the dev allocator to functions (e.g., `deinit`) by storing it as a struct field via the new `DevAllocator` type. This type has no overhead in release builds, or when `AllocationScope` is disabled. * Use owned pointers in `PackedMap`. * Use `bun.ptr.Shared` for `PackedMap` instead of the old `bun.ptr.RefPtr`. * Add `bun.ptr.ScopedOwned`, which is like `bun.ptr.Owned`, but can store an `AllocationScope`. No overhead in release builds or when `AllocationScope` is disabled. * Reduce redundant allocators in `BundleV2`. * Add owned pointer conversions to `MutableString`. * Make `AllocationScope` behave like a pointer, so it can be moved without invalidating allocations. This eliminates the need for self-references. * Change memory cost algorithm so it doesn't rely on “dedupe bits”. These bits used to take advantage of padding but there is now no padding in `PackedMap`. * Replace `VoidFieldTypes` with `useAllFields`; this eliminates the need for `voidFieldTypesDiscardHelper`. (For internal tracking: fixes STAB-1035, STAB-1036, STAB-1037, STAB-1038, STAB-1039, STAB-1040, STAB-1041, STAB-1042, STAB-1043, STAB-1044, STAB-1045) --------- Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: Jarred Sumner Co-authored-by: Claude Bot Co-authored-by: Claude --- cmake/sources/ZigSources.txt | 2 + src/allocators/AllocationScope.zig | 208 ++-- src/bake/DevServer.zig | 341 +++---- src/bake/DevServer/Assets.zig | 6 +- src/bake/DevServer/DevAllocator.zig | 19 + src/bake/DevServer/DirectoryWatchStore.zig | 14 +- src/bake/DevServer/ErrorReportRequest.zig | 12 +- src/bake/DevServer/HmrSocket.zig | 10 +- src/bake/DevServer/HotReloadEvent.zig | 17 +- src/bake/DevServer/IncrementalGraph.zig | 915 +++++++++--------- src/bake/DevServer/PackedMap.zig | 174 ++-- src/bake/DevServer/SerializedFailure.zig | 12 +- src/bake/DevServer/SourceMapStore.zig | 135 +-- src/bake/DevServer/memory_cost.zig | 25 +- src/bun.js/api/server.zig | 2 +- src/bundler/Chunk.zig | 2 +- src/bundler/Graph.zig | 6 - src/bundler/LinkerContext.zig | 152 +-- src/bundler/LinkerGraph.zig | 3 +- src/bundler/ThreadPool.zig | 4 +- src/bundler/bundle_v2.zig | 279 +++--- src/bundler/linker_context/computeChunks.zig | 34 +- .../computeCrossChunkDependencies.zig | 42 +- .../findAllImportedPartsInJSOrder.zig | 8 +- .../findImportedFilesInCSSOrder.zig | 2 +- .../generateChunksInParallel.zig | 34 +- .../generateCodeForFileInChunkJS.zig | 1 - .../generateCodeForLazyExport.zig | 24 +- .../generateCompileResultForHtmlChunk.zig | 2 +- .../generateCompileResultForJSChunk.zig | 12 +- .../linker_context/postProcessJSChunk.zig | 13 +- .../linker_context/prepareCssAstsForChunk.zig | 2 +- .../linker_context/scanImportsAndExports.zig | 18 +- src/env.zig | 4 +- src/js_printer.zig | 2 - src/meta.zig | 4 +- src/ptr.zig | 1 + src/ptr/owned.zig | 51 +- src/ptr/owned/maybe.zig | 12 +- src/ptr/owned/scoped.zig | 148 +++ src/ptr/shared.zig | 6 +- src/safety/alloc.zig | 2 +- src/string/MutableString.zig | 25 +- test/internal/ban-limits.json | 4 +- 44 files changed, 1484 insertions(+), 1305 deletions(-) create mode 100644 src/bake/DevServer/DevAllocator.zig create mode 100644 src/ptr/owned/scoped.zig diff --git a/cmake/sources/ZigSources.txt b/cmake/sources/ZigSources.txt index e106f04854..92eef83ab5 100644 --- a/cmake/sources/ZigSources.txt +++ b/cmake/sources/ZigSources.txt @@ -64,6 +64,7 @@ src/async/windows_event_loop.zig src/bake.zig src/bake/DevServer.zig src/bake/DevServer/Assets.zig +src/bake/DevServer/DevAllocator.zig src/bake/DevServer/DirectoryWatchStore.zig src/bake/DevServer/ErrorReportRequest.zig src/bake/DevServer/HmrSocket.zig @@ -795,6 +796,7 @@ src/ptr/CowSlice.zig src/ptr/meta.zig src/ptr/owned.zig src/ptr/owned/maybe.zig +src/ptr/owned/scoped.zig src/ptr/ref_count.zig src/ptr/shared.zig src/ptr/tagged_pointer.zig diff --git a/src/allocators/AllocationScope.zig b/src/allocators/AllocationScope.zig index a37e3fa555..e8244c55c2 100644 --- a/src/allocators/AllocationScope.zig +++ b/src/allocators/AllocationScope.zig @@ -1,19 +1,24 @@ //! AllocationScope wraps another allocator, providing leak and invalid free assertions. //! It also allows measuring how much memory a scope has allocated. +//! +//! AllocationScope is conceptually a pointer, so it can be moved without invalidating allocations. +//! Therefore, it isn't necessary to pass an AllocationScope by pointer. -const AllocationScope = @This(); +const Self = @This(); pub const enabled = bun.Environment.enableAllocScopes; -parent: Allocator, -state: if (enabled) struct { +internal_state: if (enabled) *State else Allocator, + +const State = struct { + parent: Allocator, mutex: bun.Mutex, total_memory_allocated: usize, allocations: std.AutoHashMapUnmanaged([*]const u8, Allocation), frees: std.AutoArrayHashMapUnmanaged([*]const u8, Free), /// Once `frees` fills up, entries are overwritten from start to end. free_overwrite_index: std.math.IntFittingRange(0, max_free_tracking + 1), -} else void, +}; pub const max_free_tracking = 2048 - 1; @@ -36,55 +41,72 @@ pub const Extra = union(enum) { const RefCountDebugData = @import("../ptr/ref_count.zig").DebugData; }; -pub fn init(parent: Allocator) AllocationScope { - return if (comptime enabled) - .{ - .parent = parent, - .state = .{ - .total_memory_allocated = 0, - .allocations = .empty, - .frees = .empty, - .free_overwrite_index = 0, - .mutex = .{}, - }, - } +pub fn init(parent_alloc: Allocator) Self { + const state = if (comptime enabled) + bun.new(State, .{ + .parent = parent_alloc, + .total_memory_allocated = 0, + .allocations = .empty, + .frees = .empty, + .free_overwrite_index = 0, + .mutex = .{}, + }) else - .{ .parent = parent, .state = {} }; + parent_alloc; + return .{ .internal_state = state }; } -pub fn deinit(scope: *AllocationScope) void { - if (comptime enabled) { - scope.state.mutex.lock(); - defer scope.state.allocations.deinit(scope.parent); - const count = scope.state.allocations.count(); - if (count == 0) return; - Output.errGeneric("Allocation scope leaked {d} allocations ({})", .{ - count, - bun.fmt.size(scope.state.total_memory_allocated, .{}), - }); - var it = scope.state.allocations.iterator(); - var n: usize = 0; - while (it.next()) |entry| { - Output.prettyErrorln("- {any}, len {d}, at:", .{ entry.key_ptr.*, entry.value_ptr.len }); - bun.crash_handler.dumpStackTrace(entry.value_ptr.allocated_at.trace(), trace_limits); +pub fn deinit(scope: Self) void { + if (comptime !enabled) return; - switch (entry.value_ptr.extra) { - .none => {}, - inline else => |t| t.onAllocationLeak(@constCast(entry.key_ptr.*[0..entry.value_ptr.len])), - } + const state = scope.internal_state; + state.mutex.lock(); + defer bun.destroy(state); + defer state.allocations.deinit(state.parent); + const count = state.allocations.count(); + if (count == 0) return; + Output.errGeneric("Allocation scope leaked {d} allocations ({})", .{ + count, + bun.fmt.size(state.total_memory_allocated, .{}), + }); + var it = state.allocations.iterator(); + var n: usize = 0; + while (it.next()) |entry| { + Output.prettyErrorln("- {any}, len {d}, at:", .{ entry.key_ptr.*, entry.value_ptr.len }); + bun.crash_handler.dumpStackTrace(entry.value_ptr.allocated_at.trace(), trace_limits); - n += 1; - if (n >= 8) { - Output.prettyErrorln("(only showing first 10 leaks)", .{}); - break; - } + switch (entry.value_ptr.extra) { + .none => {}, + inline else => |t| t.onAllocationLeak(@constCast(entry.key_ptr.*[0..entry.value_ptr.len])), + } + + n += 1; + if (n >= 8) { + Output.prettyErrorln("(only showing first 10 leaks)", .{}); + break; } - Output.panic("Allocation scope leaked {}", .{bun.fmt.size(scope.state.total_memory_allocated, .{})}); } + Output.panic("Allocation scope leaked {}", .{bun.fmt.size(state.total_memory_allocated, .{})}); } -pub fn allocator(scope: *AllocationScope) Allocator { - return if (comptime enabled) .{ .ptr = scope, .vtable = &vtable } else scope.parent; +pub fn allocator(scope: Self) Allocator { + const state = scope.internal_state; + return if (comptime enabled) .{ .ptr = state, .vtable = &vtable } else state; +} + +pub fn parent(scope: Self) Allocator { + const state = scope.internal_state; + return if (comptime enabled) state.parent else state; +} + +pub fn total(self: Self) usize { + if (comptime !enabled) @compileError("AllocationScope must be enabled"); + return self.internal_state.total_memory_allocated; +} + +pub fn numAllocations(self: Self) usize { + if (comptime !enabled) @compileError("AllocationScope must be enabled"); + return self.internal_state.allocations.count(); } const vtable: Allocator.VTable = .{ @@ -107,60 +129,61 @@ pub const free_trace_limits: bun.crash_handler.WriteStackTraceLimits = .{ }; fn alloc(ctx: *anyopaque, len: usize, alignment: std.mem.Alignment, ret_addr: usize) ?[*]u8 { - const scope: *AllocationScope = @ptrCast(@alignCast(ctx)); - scope.state.mutex.lock(); - defer scope.state.mutex.unlock(); - scope.state.allocations.ensureUnusedCapacity(scope.parent, 1) catch + const state: *State = @ptrCast(@alignCast(ctx)); + + state.mutex.lock(); + defer state.mutex.unlock(); + state.allocations.ensureUnusedCapacity(state.parent, 1) catch return null; - const result = scope.parent.vtable.alloc(scope.parent.ptr, len, alignment, ret_addr) orelse + const result = state.parent.vtable.alloc(state.parent.ptr, len, alignment, ret_addr) orelse return null; - scope.trackAllocationAssumeCapacity(result[0..len], ret_addr, .none); + trackAllocationAssumeCapacity(state, result[0..len], ret_addr, .none); return result; } -fn trackAllocationAssumeCapacity(scope: *AllocationScope, buf: []const u8, ret_addr: usize, extra: Extra) void { +fn trackAllocationAssumeCapacity(state: *State, buf: []const u8, ret_addr: usize, extra: Extra) void { const trace = StoredTrace.capture(ret_addr); - scope.state.allocations.putAssumeCapacityNoClobber(buf.ptr, .{ + state.allocations.putAssumeCapacityNoClobber(buf.ptr, .{ .allocated_at = trace, .len = buf.len, .extra = extra, }); - scope.state.total_memory_allocated += buf.len; + state.total_memory_allocated += buf.len; } fn free(ctx: *anyopaque, buf: []u8, alignment: std.mem.Alignment, ret_addr: usize) void { - const scope: *AllocationScope = @ptrCast(@alignCast(ctx)); - scope.state.mutex.lock(); - defer scope.state.mutex.unlock(); - const invalid = scope.trackFreeAssumeLocked(buf, ret_addr); + const state: *State = @ptrCast(@alignCast(ctx)); + state.mutex.lock(); + defer state.mutex.unlock(); + const invalid = trackFreeAssumeLocked(state, buf, ret_addr); - scope.parent.vtable.free(scope.parent.ptr, buf, alignment, ret_addr); + state.parent.vtable.free(state.parent.ptr, buf, alignment, ret_addr); // If asan did not catch the free, panic now. if (invalid) @panic("Invalid free"); } -fn trackFreeAssumeLocked(scope: *AllocationScope, buf: []const u8, ret_addr: usize) bool { - if (scope.state.allocations.fetchRemove(buf.ptr)) |entry| { - scope.state.total_memory_allocated -= entry.value.len; +fn trackFreeAssumeLocked(state: *State, buf: []const u8, ret_addr: usize) bool { + if (state.allocations.fetchRemove(buf.ptr)) |entry| { + state.total_memory_allocated -= entry.value.len; free_entry: { - scope.state.frees.put(scope.parent, buf.ptr, .{ + state.frees.put(state.parent, buf.ptr, .{ .allocated_at = entry.value.allocated_at, .freed_at = StoredTrace.capture(ret_addr), }) catch break :free_entry; // Store a limited amount of free entries - if (scope.state.frees.count() >= max_free_tracking) { - const i = scope.state.free_overwrite_index; - scope.state.free_overwrite_index = @mod(scope.state.free_overwrite_index + 1, max_free_tracking); - scope.state.frees.swapRemoveAt(i); + if (state.frees.count() >= max_free_tracking) { + const i = state.free_overwrite_index; + state.free_overwrite_index = @mod(state.free_overwrite_index + 1, max_free_tracking); + state.frees.swapRemoveAt(i); } } return false; } else { bun.Output.errGeneric("Invalid free, pointer {any}, len {d}", .{ buf.ptr, buf.len }); - if (scope.state.frees.get(buf.ptr)) |free_entry_const| { + if (state.frees.get(buf.ptr)) |free_entry_const| { var free_entry = free_entry_const; bun.Output.printErrorln("Pointer allocated here:", .{}); bun.crash_handler.dumpStackTrace(free_entry.allocated_at.trace(), trace_limits); @@ -176,27 +199,29 @@ fn trackFreeAssumeLocked(scope: *AllocationScope, buf: []const u8, ret_addr: usi } } -pub fn assertOwned(scope: *AllocationScope, ptr: anytype) void { +pub fn assertOwned(scope: Self, ptr: anytype) void { if (comptime !enabled) return; const cast_ptr: [*]const u8 = @ptrCast(switch (@typeInfo(@TypeOf(ptr)).pointer.size) { .c, .one, .many => ptr, .slice => if (ptr.len > 0) ptr.ptr else return, }); - scope.state.mutex.lock(); - defer scope.state.mutex.unlock(); - _ = scope.state.allocations.getPtr(cast_ptr) orelse + const state = scope.internal_state; + state.mutex.lock(); + defer state.mutex.unlock(); + _ = state.allocations.getPtr(cast_ptr) orelse @panic("this pointer was not owned by the allocation scope"); } -pub fn assertUnowned(scope: *AllocationScope, ptr: anytype) void { +pub fn assertUnowned(scope: Self, ptr: anytype) void { if (comptime !enabled) return; const cast_ptr: [*]const u8 = @ptrCast(switch (@typeInfo(@TypeOf(ptr)).pointer.size) { .c, .one, .many => ptr, .slice => if (ptr.len > 0) ptr.ptr else return, }); - scope.state.mutex.lock(); - defer scope.state.mutex.unlock(); - if (scope.state.allocations.getPtr(cast_ptr)) |owned| { + const state = scope.internal_state; + state.mutex.lock(); + defer state.mutex.unlock(); + if (state.allocations.getPtr(cast_ptr)) |owned| { Output.warn("Owned pointer allocated here:"); bun.crash_handler.dumpStackTrace(owned.allocated_at.trace(), trace_limits, trace_limits); } @@ -205,17 +230,18 @@ pub fn assertUnowned(scope: *AllocationScope, ptr: anytype) void { /// Track an arbitrary pointer. Extra data can be stored in the allocation, /// which will be printed when a leak is detected. -pub fn trackExternalAllocation(scope: *AllocationScope, ptr: []const u8, ret_addr: ?usize, extra: Extra) void { +pub fn trackExternalAllocation(scope: Self, ptr: []const u8, ret_addr: ?usize, extra: Extra) void { if (comptime !enabled) return; - scope.state.mutex.lock(); - defer scope.state.mutex.unlock(); - scope.state.allocations.ensureUnusedCapacity(scope.parent, 1) catch bun.outOfMemory(); - trackAllocationAssumeCapacity(scope, ptr, ptr.len, ret_addr orelse @returnAddress(), extra); + const state = scope.internal_state; + state.mutex.lock(); + defer state.mutex.unlock(); + state.allocations.ensureUnusedCapacity(state.parent, 1) catch bun.outOfMemory(); + trackAllocationAssumeCapacity(state, ptr, ptr.len, ret_addr orelse @returnAddress(), extra); } /// Call when the pointer from `trackExternalAllocation` is freed. /// Returns true if the free was invalid. -pub fn trackExternalFree(scope: *AllocationScope, slice: anytype, ret_addr: ?usize) bool { +pub fn trackExternalFree(scope: Self, slice: anytype, ret_addr: ?usize) bool { if (comptime !enabled) return false; const ptr: []const u8 = switch (@typeInfo(@TypeOf(slice))) { .pointer => |p| switch (p.size) { @@ -231,23 +257,25 @@ pub fn trackExternalFree(scope: *AllocationScope, slice: anytype, ret_addr: ?usi }; // Empty slice usually means invalid pointer if (ptr.len == 0) return false; - scope.state.mutex.lock(); - defer scope.state.mutex.unlock(); - return trackFreeAssumeLocked(scope, ptr, ret_addr orelse @returnAddress()); + const state = scope.internal_state; + state.mutex.lock(); + defer state.mutex.unlock(); + return trackFreeAssumeLocked(state, ptr, ret_addr orelse @returnAddress()); } -pub fn setPointerExtra(scope: *AllocationScope, ptr: *anyopaque, extra: Extra) void { +pub fn setPointerExtra(scope: Self, ptr: *anyopaque, extra: Extra) void { if (comptime !enabled) return; - scope.state.mutex.lock(); - defer scope.state.mutex.unlock(); - const allocation = scope.state.allocations.getPtr(ptr) orelse + const state = scope.internal_state; + state.mutex.lock(); + defer state.mutex.unlock(); + const allocation = state.allocations.getPtr(ptr) orelse @panic("Pointer not owned by allocation scope"); allocation.extra = extra; } -pub inline fn downcast(a: Allocator) ?*AllocationScope { +pub inline fn downcast(a: Allocator) ?Self { return if (enabled and a.vtable == &vtable) - @ptrCast(@alignCast(a.ptr)) + .{ .internal_state = @ptrCast(@alignCast(a.ptr)) } else null; } diff --git a/src/bake/DevServer.zig b/src/bake/DevServer.zig index 3f5f56a60b..54b7de4381 100644 --- a/src/bake/DevServer.zig +++ b/src/bake/DevServer.zig @@ -39,10 +39,7 @@ magic: if (Environment.isDebug) enum(u128) { valid = 0x1ffd363f121f5c12 } else enum { valid } = .valid, -/// Used for all server-wide allocations. In debug, is is backed by a scope. Thread-safe. -allocator: Allocator, -/// All methods are no-op in release builds. -allocation_scope: AllocationScope, +allocation_scope: if (AllocationScope.enabled) AllocationScope else void, /// Absolute path to project root directory. For the HMR /// runtime, its module IDs are strings relative to this. root: []const u8, @@ -254,7 +251,6 @@ pub const RouteBundle = @import("./DevServer/RouteBundle.zig"); /// DevServer is stored on the heap, storing its allocator. pub fn init(options: Options) bun.JSOOM!*DevServer { - const unchecked_allocator = bun.default_allocator; bun.analytics.Features.dev_server +|= 1; var dump_dir = if (bun.FeatureFlags.bake_debugging_features) @@ -271,10 +267,8 @@ pub fn init(options: Options) bun.JSOOM!*DevServer { const separate_ssr_graph = if (options.framework.server_components) |sc| sc.separate_ssr_graph else false; const dev = bun.new(DevServer, .{ - .allocator = undefined, - // 'init' is a no-op in release - .allocation_scope = AllocationScope.init(unchecked_allocator), - + .allocation_scope = if (comptime AllocationScope.enabled) + AllocationScope.init(bun.default_allocator), .root = options.root, .vm = options.vm, .server = null, @@ -335,10 +329,9 @@ pub fn init(options: Options) bun.JSOOM!*DevServer { .deferred_request_pool = undefined, }); errdefer bun.destroy(dev); - const allocator = dev.allocation_scope.allocator(); - dev.allocator = allocator; - dev.log = .init(allocator); - dev.deferred_request_pool = .init(allocator); + const alloc = dev.allocator(); + dev.log = .init(alloc); + dev.deferred_request_pool = .init(alloc); const global = dev.vm.global; @@ -398,9 +391,9 @@ pub fn init(options: Options) bun.JSOOM!*DevServer { return global.throwValue(try dev.log.toJSAggregateError(global, bun.String.static("Framework is missing required files!"))); }; - errdefer dev.route_lookup.clearAndFree(allocator); - errdefer dev.client_graph.deinit(allocator); - errdefer dev.server_graph.deinit(allocator); + errdefer dev.route_lookup.clearAndFree(alloc); + errdefer dev.client_graph.deinit(); + errdefer dev.server_graph.deinit(); dev.configuration_hash_key = hash_key: { var hash = std.hash.Wyhash.init(128); @@ -487,8 +480,8 @@ pub fn init(options: Options) bun.JSOOM!*DevServer { // Initialize FrameworkRouter dev.router = router: { - var types = try std.ArrayListUnmanaged(FrameworkRouter.Type).initCapacity(allocator, options.framework.file_system_router_types.len); - errdefer types.deinit(allocator); + var types = try std.ArrayListUnmanaged(FrameworkRouter.Type).initCapacity(alloc, options.framework.file_system_router_types.len); + errdefer types.deinit(alloc); for (options.framework.file_system_router_types, 0..) |fsr, i| { const buf = bun.path_buffer_pool.get(); @@ -499,7 +492,7 @@ pub fn init(options: Options) bun.JSOOM!*DevServer { const server_file = try dev.server_graph.insertStaleExtra(fsr.entry_server, false, true); - try types.append(allocator, .{ + try types.append(alloc, .{ .abs_root = bun.strings.withoutTrailingSlash(entry.abs_path), .prefix = fsr.prefix, .ignore_underscores = fsr.ignore_underscores, @@ -515,13 +508,13 @@ pub fn init(options: Options) bun.JSOOM!*DevServer { .server_file_string = .empty, }); - try dev.route_lookup.put(allocator, server_file, .{ + try dev.route_lookup.put(alloc, server_file, .{ .route_index = FrameworkRouter.Route.Index.init(@intCast(i)), .should_recurse_when_visiting = true, }); } - break :router try FrameworkRouter.initEmpty(dev.root, types.items, allocator); + break :router try FrameworkRouter.initEmpty(dev.root, types.items, alloc); }; // TODO: move scanning to be one tick after server startup. this way the @@ -541,11 +534,9 @@ pub fn deinit(dev: *DevServer) void { debug.log("deinit", .{}); dev_server_deinit_count_for_testing +|= 1; - const allocator = dev.allocator; - const discard = voidFieldTypeDiscardHelper; - _ = VoidFieldTypes(DevServer){ + const alloc = dev.allocator(); + useAllFields(DevServer, .{ .allocation_scope = {}, // deinit at end - .allocator = {}, .assume_perfect_incremental_bundling = {}, .bundler_options = {}, .bundles_since_last_error = {}, @@ -573,7 +564,7 @@ pub fn deinit(dev: *DevServer) void { if (s.underlying) |websocket| websocket.close(); } - dev.active_websocket_connections.deinit(allocator); + dev.active_websocket_connections.deinit(alloc); }, .memory_visualizer_timer = if (dev.memory_visualizer_timer.state == .ACTIVE) @@ -587,52 +578,52 @@ pub fn deinit(dev: *DevServer) void { .has_pre_crash_handler = if (dev.has_pre_crash_handler) bun.crash_handler.removePreCrashHandler(dev), .router = { - dev.router.deinit(allocator); + dev.router.deinit(alloc); }, .route_bundles = { for (dev.route_bundles.items) |*rb| { - rb.deinit(allocator); + rb.deinit(alloc); } - dev.route_bundles.deinit(allocator); + dev.route_bundles.deinit(alloc); }, - .server_graph = dev.server_graph.deinit(allocator), - .client_graph = dev.client_graph.deinit(allocator), - .assets = dev.assets.deinit(allocator), - .incremental_result = discard(VoidFieldTypes(IncrementalResult){ + .server_graph = dev.server_graph.deinit(), + .client_graph = dev.client_graph.deinit(), + .assets = dev.assets.deinit(alloc), + .incremental_result = useAllFields(IncrementalResult, .{ .had_adjusted_edges = {}, - .client_components_added = dev.incremental_result.client_components_added.deinit(allocator), - .framework_routes_affected = dev.incremental_result.framework_routes_affected.deinit(allocator), - .client_components_removed = dev.incremental_result.client_components_removed.deinit(allocator), - .failures_removed = dev.incremental_result.failures_removed.deinit(allocator), - .client_components_affected = dev.incremental_result.client_components_affected.deinit(allocator), - .failures_added = dev.incremental_result.failures_added.deinit(allocator), - .html_routes_soft_affected = dev.incremental_result.html_routes_soft_affected.deinit(allocator), - .html_routes_hard_affected = dev.incremental_result.html_routes_hard_affected.deinit(allocator), + .client_components_added = dev.incremental_result.client_components_added.deinit(alloc), + .framework_routes_affected = dev.incremental_result.framework_routes_affected.deinit(alloc), + .client_components_removed = dev.incremental_result.client_components_removed.deinit(alloc), + .failures_removed = dev.incremental_result.failures_removed.deinit(alloc), + .client_components_affected = dev.incremental_result.client_components_affected.deinit(alloc), + .failures_added = dev.incremental_result.failures_added.deinit(alloc), + .html_routes_soft_affected = dev.incremental_result.html_routes_soft_affected.deinit(alloc), + .html_routes_hard_affected = dev.incremental_result.html_routes_hard_affected.deinit(alloc), }), .has_tailwind_plugin_hack = if (dev.has_tailwind_plugin_hack) |*hack| { for (hack.keys()) |key| { - allocator.free(key); + alloc.free(key); } - hack.deinit(allocator); + hack.deinit(alloc); }, .directory_watchers = { // dev.directory_watchers.dependencies for (dev.directory_watchers.watches.keys()) |dir_name| { - allocator.free(dir_name); + alloc.free(dir_name); } for (dev.directory_watchers.dependencies.items) |watcher| { - allocator.free(watcher.specifier); + alloc.free(watcher.specifier); } - dev.directory_watchers.watches.deinit(allocator); - dev.directory_watchers.dependencies.deinit(allocator); - dev.directory_watchers.dependencies_free_list.deinit(allocator); + dev.directory_watchers.watches.deinit(alloc); + dev.directory_watchers.dependencies.deinit(alloc); + dev.directory_watchers.dependencies_free_list.deinit(alloc); }, - .html_router = dev.html_router.map.deinit(dev.allocator), + .html_router = dev.html_router.map.deinit(alloc), .bundling_failures = { for (dev.bundling_failures.keys()) |failure| { failure.deinit(dev); } - dev.bundling_failures.deinit(allocator); + dev.bundling_failures.deinit(alloc); }, .current_bundle = { if (dev.current_bundle) |_| { @@ -648,30 +639,30 @@ pub fn deinit(dev: *DevServer) void { defer request.data.deref(); r = request.next; } - dev.next_bundle.route_queue.deinit(allocator); + dev.next_bundle.route_queue.deinit(alloc); }, - .route_lookup = dev.route_lookup.deinit(allocator), + .route_lookup = dev.route_lookup.deinit(alloc), .source_maps = { for (dev.source_maps.entries.values()) |*value| { bun.assert(value.ref_count > 0); value.ref_count = 0; - value.deinit(dev); + value.deinit(); } - dev.source_maps.entries.deinit(allocator); + dev.source_maps.entries.deinit(alloc); if (dev.source_maps.weak_ref_sweep_timer.state == .ACTIVE) dev.vm.timer.remove(&dev.source_maps.weak_ref_sweep_timer); }, .watcher_atomics = for (&dev.watcher_atomics.events) |*event| { - event.dirs.deinit(dev.allocator); - event.files.deinit(dev.allocator); - event.extra_files.deinit(dev.allocator); + event.dirs.deinit(dev.allocator()); + event.files.deinit(dev.allocator()); + event.extra_files.deinit(dev.allocator()); }, .testing_batch_events = switch (dev.testing_batch_events) { .disabled => {}, .enabled => |*batch| { - batch.entry_points.deinit(allocator); + batch.entry_points.deinit(alloc); }, .enable_after_bundle => {}, }, @@ -681,11 +672,22 @@ pub fn deinit(dev: *DevServer) void { bun.debugAssert(dev.magic == .valid); dev.magic = undefined; }, - }; - dev.allocation_scope.deinit(); + }); + if (comptime AllocationScope.enabled) { + dev.allocation_scope.deinit(); + } bun.destroy(dev); } +pub fn allocator(dev: *const DevServer) Allocator { + return dev.dev_allocator().get(); +} + +pub fn dev_allocator(dev: *const DevServer) DevAllocator { + return .{ .maybe_scope = dev.allocation_scope }; +} + +pub const DevAllocator = @import("./DevServer/DevAllocator.zig"); pub const MemoryCost = @import("./DevServer/memory_cost.zig"); pub const memoryCost = MemoryCost.memoryCost; pub const memoryCostDetailed = MemoryCost.memoryCostDetailed; @@ -721,7 +723,7 @@ fn initServerRuntime(dev: *DevServer) void { /// Deferred one tick so that the server can be up faster fn scanInitialRoutes(dev: *DevServer) !void { try dev.router.scanAll( - dev.allocator, + dev.allocator(), &dev.server_transpiler.resolver, FrameworkRouter.InsertionContext.wrap(DevServer, dev), ); @@ -832,15 +834,15 @@ fn onJsRequest(dev: *DevServer, req: *Request, resp: AnyResponse) void { const source_id: SourceMapStore.SourceId = @bitCast(id); const entry = dev.source_maps.entries.getPtr(.init(id)) orelse return notFound(resp); - var arena = std.heap.ArenaAllocator.init(dev.allocator); + var arena = std.heap.ArenaAllocator.init(dev.allocator()); defer arena.deinit(); const json_bytes = entry.renderJSON( dev, arena.allocator(), source_id.kind, - dev.allocator, + dev.allocator(), ) catch bun.outOfMemory(); - const response = StaticRoute.initFromAnyBlob(&.fromOwnedSlice(dev.allocator, json_bytes), .{ + const response = StaticRoute.initFromAnyBlob(&.fromOwnedSlice(dev.allocator(), json_bytes), .{ .server = dev.server, .mime_type = &.json, }); @@ -958,7 +960,7 @@ fn ensureRouteIsBundled( sw: switch (dev.routeBundlePtr(route_bundle_index).server_state) { .unqueued => { if (dev.current_bundle != null) { - try dev.next_bundle.route_queue.put(dev.allocator, route_bundle_index, {}); + try dev.next_bundle.route_queue.put(dev.allocator(), route_bundle_index, {}); dev.routeBundlePtr(route_bundle_index).server_state = .bundling; try dev.deferRequest(&dev.next_bundle.requests, route_bundle_index, kind, req, resp); } else { @@ -993,7 +995,7 @@ fn ensureRouteIsBundled( } }, .pending => { - try dev.next_bundle.route_queue.put(dev.allocator, route_bundle_index, {}); + try dev.next_bundle.route_queue.put(dev.allocator(), route_bundle_index, {}); dev.routeBundlePtr(route_bundle_index).server_state = .bundling; try dev.deferRequest(&dev.next_bundle.requests, route_bundle_index, kind, req, resp); return; @@ -1007,7 +1009,7 @@ fn ensureRouteIsBundled( } // Prepare a bundle with just this route. - var sfa = std.heap.stackFallback(4096, dev.allocator); + var sfa = std.heap.stackFallback(4096, dev.allocator()); const temp_alloc = sfa.get(); var entry_points: EntryPointList = .empty; @@ -1102,7 +1104,7 @@ fn checkRouteFailures( route_bundle_index: RouteBundle.Index, resp: anytype, ) !enum { stop, ok, rebuild } { - var sfa_state = std.heap.stackFallback(65536, dev.allocator); + var sfa_state = std.heap.stackFallback(65536, dev.allocator()); const sfa = sfa_state.get(); var gts = try dev.initGraphTraceState(sfa, 0); defer gts.deinit(sfa); @@ -1164,9 +1166,8 @@ fn appendRouteEntryPointsIfNotStale(dev: *DevServer, entry_points: *EntryPointLi if (dev.has_tailwind_plugin_hack) |*map| { for (map.keys()) |abs_path| { - const file = dev.client_graph.bundled_files.get(abs_path) orelse - continue; - if (file.flags.kind == .css) + const file = (dev.client_graph.bundled_files.get(abs_path) orelse continue).unpack(); + if (file.kind() == .css) entry_points.appendCss(alloc, abs_path) catch bun.outOfMemory(); } } @@ -1322,10 +1323,10 @@ fn onHtmlRequestWithBundle(dev: *DevServer, route_bundle_index: RouteBundle.Inde const blob = html.cached_response orelse generate: { const payload = generateHTMLPayload(dev, route_bundle_index, route_bundle, html) catch bun.outOfMemory(); - errdefer dev.allocator.free(payload); + errdefer dev.allocator().free(payload); html.cached_response = StaticRoute.initFromAnyBlob( - &.fromOwnedSlice(dev.allocator, payload), + &.fromOwnedSlice(dev.allocator(), payload), .{ .mime_type = &.html, .server = dev.server orelse unreachable, @@ -1381,7 +1382,7 @@ fn generateHTMLPayload(dev: *DevServer, route_bundle_index: RouteBundle.Index, r defer dev.graph_safety_lock.unlock(); // Prepare bitsets for tracing - var sfa_state = std.heap.stackFallback(65536, dev.allocator); + var sfa_state = std.heap.stackFallback(65536, dev.allocator()); const sfa = sfa_state.get(); var gts = try dev.initGraphTraceState(sfa, 0); defer gts.deinit(sfa); @@ -1399,8 +1400,8 @@ fn generateHTMLPayload(dev: *DevServer, route_bundle_index: RouteBundle.Index, r "-0000000000000000.js".len + script_unref_payload.len; - var array: std.ArrayListUnmanaged(u8) = try std.ArrayListUnmanaged(u8).initCapacity(dev.allocator, payload_size); - errdefer array.deinit(dev.allocator); + var array: std.ArrayListUnmanaged(u8) = try std.ArrayListUnmanaged(u8).initCapacity(dev.allocator(), payload_size); + errdefer array.deinit(dev.allocator()); array.appendSliceAssumeCapacity(before_head_end); // Insert all link tags before "" @@ -1433,7 +1434,7 @@ fn generateJavaScriptCodeForHTMLFile( input_file_sources: []bun.logger.Source, loaders: []bun.options.Loader, ) bun.OOM![]const u8 { - var sfa_state = std.heap.stackFallback(65536, dev.allocator); + var sfa_state = std.heap.stackFallback(65536, dev.allocator()); const sfa = sfa_state.get(); var array = std.ArrayListUnmanaged(u8).initCapacity(sfa, 65536) catch bun.outOfMemory(); defer array.deinit(sfa); @@ -1449,9 +1450,8 @@ fn generateJavaScriptCodeForHTMLFile( continue; // ignore non-JavaScript imports } else { // Find the in-graph import. - const file = dev.client_graph.bundled_files.get(import.path.text) orelse - continue; - if (file.flags.kind != .js) + const file = (dev.client_graph.bundled_files.get(import.path.text) orelse continue).unpack(); + if (file.content != .js) continue; } if (!any) { @@ -1469,7 +1469,7 @@ fn generateJavaScriptCodeForHTMLFile( // Avoid-recloning if it is was moved to the heap return if (array.items.ptr == &sfa_state.buffer) - try dev.allocator.dupe(u8, array.items) + try dev.allocator().dupe(u8, array.items) else array.items; } @@ -1478,9 +1478,9 @@ pub fn onJsRequestWithBundle(dev: *DevServer, bundle_index: RouteBundle.Index, r const route_bundle = dev.routeBundlePtr(bundle_index); const blob = route_bundle.client_bundle orelse generate: { const payload = dev.generateClientBundle(route_bundle) catch bun.outOfMemory(); - errdefer dev.allocator.free(payload); + errdefer dev.allocator().free(payload); route_bundle.client_bundle = StaticRoute.initFromAnyBlob( - &.fromOwnedSlice(dev.allocator, payload), + &.fromOwnedSlice(dev.allocator(), payload), .{ .mime_type = &.javascript, .server = dev.server orelse unreachable, @@ -1515,7 +1515,7 @@ pub fn onSrcRequest(dev: *DevServer, req: *uws.Request, resp: anytype) void { // if (bun.strings.indexOfChar(url, ':')) |colon| { // url = url[0..colon]; // } - // editor.open(ctx.path, url, line, column, dev.allocator) catch { + // editor.open(ctx.path, url, line, column, dev.allocator()) catch { // resp.writeStatus("202 No Content"); // resp.end("", false); // return; @@ -1627,7 +1627,7 @@ pub fn startAsyncBundle( // Notify inspector about bundle start if (dev.inspector()) |agent| { - var sfa_state = std.heap.stackFallback(256, dev.allocator); + var sfa_state = std.heap.stackFallback(256, dev.allocator()); const sfa = sfa_state.get(); var trigger_files = try std.ArrayList(bun.String).initCapacity(sfa, entry_points.set.count()); defer trigger_files.deinit(); @@ -1648,9 +1648,9 @@ pub fn startAsyncBundle( var heap = ThreadLocalArena.init(); errdefer heap.deinit(); - const allocator = heap.allocator(); - const ast_memory_allocator = try allocator.create(bun.ast.ASTMemoryAllocator); - var ast_scope = ast_memory_allocator.enter(allocator); + const alloc = heap.allocator(); + const ast_memory_allocator = try alloc.create(bun.ast.ASTMemoryAllocator); + var ast_scope = ast_memory_allocator.enter(alloc); defer ast_scope.exit(); const bv2 = try BundleV2.init( @@ -1661,7 +1661,7 @@ pub fn startAsyncBundle( .ssr_transpiler = &dev.ssr_transpiler, .plugins = dev.bundler_options.plugin, }, - allocator, + alloc, .{ .js = dev.vm.eventLoop() }, false, // watching is handled separately jsc.WorkPool.get(), @@ -1718,7 +1718,7 @@ pub fn prepareAndLogResolutionFailures(dev: *DevServer) !void { fn indexFailures(dev: *DevServer) !void { // After inserting failures into the IncrementalGraphs, they are traced to their routes. - var sfa_state = std.heap.stackFallback(65536, dev.allocator); + var sfa_state = std.heap.stackFallback(65536, dev.allocator()); const sfa = sfa_state.get(); if (dev.incremental_result.failures_added.items.len > 0) { @@ -1801,7 +1801,7 @@ fn generateClientBundle(dev: *DevServer, route_bundle: *RouteBundle) bun.OOM![]u defer dev.graph_safety_lock.unlock(); // Prepare bitsets - var sfa_state = std.heap.stackFallback(65536, dev.allocator); + var sfa_state = std.heap.stackFallback(65536, dev.allocator()); const sfa = sfa_state.get(); var gts = try dev.initGraphTraceState(sfa, 0); defer gts.deinit(sfa); @@ -1837,7 +1837,7 @@ fn generateClientBundle(dev: *DevServer, route_bundle: *RouteBundle) bun.OOM![]u gts.clearAndFree(sfa); var arena = std.heap.ArenaAllocator.init(sfa); defer arena.deinit(); - try dev.client_graph.takeSourceMap(arena.allocator(), dev.allocator, entry); + try dev.client_graph.takeSourceMap(arena.allocator(), dev.allocator(), entry); }, .shared => {}, } @@ -1865,7 +1865,7 @@ fn generateCssJSArray(dev: *DevServer, route_bundle: *RouteBundle) bun.JSError!j defer dev.graph_safety_lock.unlock(); // Prepare bitsets - var sfa_state = std.heap.stackFallback(65536, dev.allocator); + var sfa_state = std.heap.stackFallback(65536, dev.allocator()); const sfa = sfa_state.get(); var gts = try dev.initGraphTraceState(sfa, 0); @@ -1992,7 +1992,7 @@ pub fn finalizeBundle( dev.log.clearAndFree(); heap.deinit(); - dev.assets.reindexIfNeeded(dev.allocator) catch { + dev.assets.reindexIfNeeded(dev.allocator()) catch { // not fatal: the assets may be reindexed some time later. }; @@ -2038,7 +2038,7 @@ pub fn finalizeBundle( const targets = bv2.graph.ast.items(.target); const scbs = bv2.graph.server_component_boundaries.slice(); - var sfa = std.heap.stackFallback(65536, bv2.graph.allocator); + var sfa = std.heap.stackFallback(65536, bv2.allocator()); const stack_alloc = sfa.get(); var scb_bitset = try bun.bit_set.DynamicBitSetUnmanaged.initEmpty(stack_alloc, input_file_sources.len); for ( @@ -2052,7 +2052,7 @@ pub fn finalizeBundle( scb_bitset.set(ssr_index); } - const resolved_index_cache = try bv2.graph.allocator.alloc(u32, input_file_sources.len * 2); + const resolved_index_cache = try bv2.allocator().alloc(u32, input_file_sources.len * 2); @memset(resolved_index_cache, @intFromEnum(IncrementalGraph(.server).FileIndex.Optional.none)); var ctx: bun.bake.DevServer.HotUpdateContext = .{ @@ -2066,7 +2066,7 @@ pub fn finalizeBundle( .gts = undefined, }; - const quoted_source_contents: []?[]u8 = bv2.linker.graph.files.items(.quoted_source_contents); + const quoted_source_contents = bv2.linker.graph.files.items(.quoted_source_contents); // Pass 1, update the graph's nodes, resolving every bundler source // index into its `IncrementalGraph(...).FileIndex` for ( @@ -2095,7 +2095,6 @@ pub fn finalizeBundle( .{ .js = .{ .code = compile_result.javascript.code(), - .code_allocator = compile_result.javascript.allocator(), .source_map = .{ .chunk = source_map, .escaped_source = quoted_contents, @@ -2113,7 +2112,7 @@ pub fn finalizeBundle( const index = bun.ast.Index.init(chunk.entry_point.source_index); const code = try chunk.intermediate_output.code( - dev.allocator, + dev.allocator(), &bv2.graph, &bv2.linker.graph, "THIS_SHOULD_NEVER_BE_EMITTED_IN_DEV_MODE", @@ -2136,7 +2135,7 @@ pub fn finalizeBundle( const hash = bun.hash(key); const asset_index = try dev.assets.replacePath( key, - &.fromOwnedSlice(dev.allocator, code.buffer), + &.fromOwnedSlice(dev.allocator(), code.buffer), &.css, hash, ); @@ -2148,13 +2147,13 @@ pub fn finalizeBundle( if (dev.has_tailwind_plugin_hack) |*map| { const first_1024 = code.buffer[0..@min(code.buffer.len, 1024)]; if (std.mem.indexOf(u8, first_1024, "tailwind") != null) { - const entry = try map.getOrPut(dev.allocator, key); + const entry = try map.getOrPut(dev.allocator(), key); if (!entry.found_existing) { - entry.key_ptr.* = try dev.allocator.dupe(u8, key); + entry.key_ptr.* = try dev.allocator().dupe(u8, key); } } else { if (map.fetchSwapRemove(key)) |entry| { - dev.allocator.free(entry.key); + dev.allocator().free(entry.key); } } } @@ -2187,7 +2186,6 @@ pub fn finalizeBundle( index, .{ .js = .{ .code = generated_js, - .code_allocator = dev.allocator, .source_map = null, } }, false, @@ -2204,9 +2202,11 @@ pub fn finalizeBundle( route_bundle.invalidateClientBundle(dev); } if (html.bundled_html_text) |slice| { - dev.allocator.free(slice); + dev.allocator().free(slice); + } + if (comptime AllocationScope.enabled) { + dev.allocation_scope.assertOwned(compile_result.code); } - dev.allocation_scope.assertOwned(compile_result.code); html.bundled_html_text = compile_result.code; html.script_injection_offset = .init(compile_result.script_injection_offset); @@ -2214,12 +2214,12 @@ pub fn finalizeBundle( } var gts = try dev.initGraphTraceState( - bv2.graph.allocator, + bv2.allocator(), if (result.cssChunks().len > 0) bv2.graph.input_files.len else 0, ); - defer gts.deinit(bv2.graph.allocator); + defer gts.deinit(bv2.allocator()); ctx.gts = >s; - ctx.server_seen_bit_set = try bun.bit_set.DynamicBitSetUnmanaged.initEmpty(bv2.graph.allocator, dev.server_graph.bundled_files.count()); + ctx.server_seen_bit_set = try bun.bit_set.DynamicBitSetUnmanaged.initEmpty(bv2.allocator(), dev.server_graph.bundled_files.count()); dev.incremental_result.had_adjusted_edges = false; @@ -2230,17 +2230,17 @@ pub fn finalizeBundle( // have been modified. for (js_chunk.content.javascript.parts_in_chunk_in_order) |part_range| { switch (targets[part_range.source_index.get()].bakeGraph()) { - .server, .ssr => try dev.server_graph.processChunkDependencies(&ctx, .normal, part_range.source_index, bv2.graph.allocator), - .client => try dev.client_graph.processChunkDependencies(&ctx, .normal, part_range.source_index, bv2.graph.allocator), + .server, .ssr => try dev.server_graph.processChunkDependencies(&ctx, .normal, part_range.source_index, bv2.allocator()), + .client => try dev.client_graph.processChunkDependencies(&ctx, .normal, part_range.source_index, bv2.allocator()), } } for (result.htmlChunks()) |*chunk| { const index = bun.ast.Index.init(chunk.entry_point.source_index); - try dev.client_graph.processChunkDependencies(&ctx, .normal, index, bv2.graph.allocator); + try dev.client_graph.processChunkDependencies(&ctx, .normal, index, bv2.allocator()); } for (result.cssChunks()) |*chunk| { const entry_index = bun.ast.Index.init(chunk.entry_point.source_index); - try dev.client_graph.processChunkDependencies(&ctx, .css, entry_index, bv2.graph.allocator); + try dev.client_graph.processChunkDependencies(&ctx, .css, entry_index, bv2.allocator()); } // Index all failed files now that the incremental graph has been updated. @@ -2267,7 +2267,7 @@ pub fn finalizeBundle( // Load all new chunks into the server runtime. if (!dev.frontend_only and dev.server_graph.current_chunk_len > 0) { const server_bundle = try dev.server_graph.takeJSBundle(&.{ .kind = .hmr_chunk }); - defer dev.allocator.free(server_bundle); + defer dev.allocator().free(server_bundle); const server_modules = c.BakeLoadServerHmrPatch(@ptrCast(dev.vm.global), bun.String.cloneLatin1(server_bundle)) catch |err| { // No user code has been evaluated yet, since everything is to @@ -2303,7 +2303,7 @@ pub fn finalizeBundle( var has_route_bits_set = false; - var hot_update_payload_sfa = std.heap.stackFallback(65536, dev.allocator); + var hot_update_payload_sfa = std.heap.stackFallback(65536, dev.allocator()); var hot_update_payload = std.ArrayList(u8).initCapacity(hot_update_payload_sfa.get(), 65536) catch unreachable; // enough space defer hot_update_payload.deinit(); @@ -2479,9 +2479,9 @@ pub fn finalizeBundle( const values = dev.client_graph.bundled_files.values(); for (dev.client_graph.current_chunk_parts.items) |part| { source_map_hash.update(keys[part.get()]); - const val = &values[part.get()]; - if (val.flags.source_map_state == .ref) { - source_map_hash.update(val.source_map.ref.data.vlq()); + const val = values[part.get()].unpack(); + if (val.source_map.get()) |source_map| { + source_map_hash.update(source_map.vlq()); } } // Set the bottom bit. This ensures that the resource can never be confused for a route bundle. @@ -2492,7 +2492,7 @@ pub fn finalizeBundle( while (it.next()) |socket_ptr_ptr| { const socket: *HmrSocket = socket_ptr_ptr.*; if (socket.subscriptions.hot_update) { - const entry = socket.referenced_source_maps.getOrPut(dev.allocator, script_id) catch bun.outOfMemory(); + const entry = socket.referenced_source_maps.getOrPut(dev.allocator(), script_id) catch bun.outOfMemory(); if (!entry.found_existing) { sockets += 1; } else { @@ -2506,7 +2506,7 @@ pub fn finalizeBundle( mapLog("inc {x}, for {d} sockets", .{ script_id.get(), sockets }); const entry = switch (try dev.source_maps.putOrIncrementRefCount(script_id, sockets)) { .uninitialized => |entry| brk: { - try dev.client_graph.takeSourceMap(bv2.graph.allocator, dev.allocator, entry); + try dev.client_graph.takeSourceMap(bv2.allocator(), dev.allocator(), entry); break :brk entry; }, .shared => |entry| entry, @@ -2667,7 +2667,7 @@ fn startNextBundleIfPresent(dev: *DevServer) void { // If there were pending requests, begin another bundle. if (dev.next_bundle.reload_event != null or dev.next_bundle.requests.first != null) { - var sfb = std.heap.stackFallback(4096, dev.allocator); + var sfb = std.heap.stackFallback(4096, dev.allocator()); const temp_alloc = sfb.get(); var entry_points: EntryPointList = .empty; defer entry_points.deinit(temp_alloc); @@ -2754,9 +2754,9 @@ pub fn getLogForResolutionFailures(dev: *DevServer, abs_path: []const u8, graph: .insertStale(abs_path, !is_client and graph == .ssr), ).encode(), }; - const gop = try current_bundle.resolution_failure_entries.getOrPut(current_bundle.bv2.graph.allocator, owner); + const gop = try current_bundle.resolution_failure_entries.getOrPut(current_bundle.bv2.allocator(), owner); if (!gop.found_existing) { - gop.value_ptr.* = bun.logger.Log.init(current_bundle.bv2.graph.allocator); + gop.value_ptr.* = bun.logger.Log.init(current_bundle.bv2.allocator()); } return gop.value_ptr; } @@ -2779,7 +2779,7 @@ pub fn isFileCached(dev: *DevServer, path: []const u8, side: bake.Graph) ?CacheE const index = g.bundled_files.getIndex(path) orelse return null; // non-existent files are considered stale if (!g.stale_files.isSet(index)) { - return .{ .kind = g.bundled_files.values()[index].fileKind() }; + return .{ .kind = g.getFileByIndex(.init(@intCast(index))).fileKind() }; } return null; }, @@ -2851,7 +2851,7 @@ fn getOrPutRouteBundle(dev: *DevServer, route: RouteBundle.UnresolvedIndex) !Rou const bundle_index = RouteBundle.Index.init(@intCast(dev.route_bundles.items.len)); - try dev.route_bundles.ensureUnusedCapacity(dev.allocator, 1); + try dev.route_bundles.ensureUnusedCapacity(dev.allocator(), 1); dev.route_bundles.appendAssumeCapacity(.{ .data = switch (route) { .framework => |route_index| .{ .framework = .{ @@ -2863,8 +2863,10 @@ fn getOrPutRouteBundle(dev: *DevServer, route: RouteBundle.UnresolvedIndex) !Rou } }, .html => |html| brk: { const incremental_graph_index = try dev.client_graph.insertStaleExtra(html.bundle.data.path, false, true); - const file = &dev.client_graph.bundled_files.values()[incremental_graph_index.get()]; - file.source_map.empty.html_bundle_route_index = .init(bundle_index.get()); + const packed_file = &dev.client_graph.bundled_files.values()[incremental_graph_index.get()]; + var file = packed_file.unpack(); + file.html_route_bundle_index = bundle_index; + packed_file.* = file.pack(); break :brk .{ .html = .{ .html_bundle = .initRef(html), .bundled_file = incremental_graph_index, @@ -2905,8 +2907,8 @@ fn encodeSerializedFailures( ) bun.OOM!void { var all_failures_len: usize = 0; for (failures) |fail| all_failures_len += fail.data.len; - var all_failures = try std.ArrayListUnmanaged(u8).initCapacity(dev.allocator, all_failures_len); - defer all_failures.deinit(dev.allocator); + var all_failures = try std.ArrayListUnmanaged(u8).initCapacity(dev.allocator(), all_failures_len); + defer all_failures.deinit(dev.allocator()); for (failures) |fail| all_failures.appendSliceAssumeCapacity(fail.data); const failures_start_buf_pos = buf.items.len; @@ -2933,7 +2935,7 @@ fn sendSerializedFailures( kind: ErrorPageKind, inspector_agent: ?*BunFrontendDevServerAgent, ) !void { - var buf: std.ArrayList(u8) = try .initCapacity(dev.allocator, 2048); + var buf: std.ArrayList(u8) = try .initCapacity(dev.allocator(), 2048); errdefer buf.deinit(); try buf.appendSlice(switch (kind) { @@ -2984,14 +2986,14 @@ fn sendBuiltInNotFound(resp: anytype) void { } fn printMemoryLine(dev: *DevServer) void { - if (comptime !bun.Environment.enableAllocScopes) { + if (comptime !AllocationScope.enabled) { return; } if (!debug.isVisible()) return; Output.prettyErrorln("DevServer tracked {}, measured: {} ({}), process: {}", .{ bun.fmt.size(dev.memoryCost(), .{}), - dev.allocation_scope.state.allocations.count(), - bun.fmt.size(dev.allocation_scope.state.total_memory_allocated, .{}), + dev.allocation_scope.numAllocations(), + bun.fmt.size(dev.allocation_scope.total(), .{}), bun.fmt.size(bun.sys.selfProcessMemoryUsage() orelse 0, .{}), }); } @@ -3011,7 +3013,7 @@ pub const FileKind = enum(u2) { /// '/_bun/css/0000000000000000.css' css, - pub fn hasInlinejscodeChunk(self: @This()) bool { + pub fn hasInlineJsCodeChunk(self: @This()) bool { return switch (self) { .js, .asset => true, else => false, @@ -3115,13 +3117,13 @@ pub const GraphTraceState = struct { gts.client_bits.setAll(false); } - pub fn resize(gts: *GraphTraceState, side: bake.Side, allocator: Allocator, new_size: usize) !void { + pub fn resize(gts: *GraphTraceState, side: bake.Side, alloc: Allocator, new_size: usize) !void { const b = switch (side) { .client => >s.client_bits, .server => >s.server_bits, }; if (b.bit_length < new_size) { - try b.resize(allocator, new_size, false); + try b.resize(alloc, new_size, false); } } @@ -3220,7 +3222,7 @@ pub fn emitVisualizerMessageIfNeeded(dev: *DevServer) void { defer dev.emitMemoryVisualizerMessageIfNeeded(); if (dev.emit_incremental_visualizer_events == 0) return; - var sfb = std.heap.stackFallback(65536, dev.allocator); + var sfb = std.heap.stackFallback(65536, dev.allocator()); var payload = std.ArrayList(u8).initCapacity(sfb.get(), 65536) catch unreachable; // enough capacity on the stack defer payload.deinit(); @@ -3250,7 +3252,7 @@ pub fn emitMemoryVisualizerMessage(dev: *DevServer) void { comptime assert(bun.FeatureFlags.bake_debugging_features); bun.debugAssert(dev.emit_memory_visualizer_events > 0); - var sfb = std.heap.stackFallback(65536, dev.allocator); + var sfb = std.heap.stackFallback(65536, dev.allocator()); var payload = std.ArrayList(u8).initCapacity(sfb.get(), 65536) catch unreachable; // enough capacity on the stack defer payload.deinit(); @@ -3282,8 +3284,8 @@ pub fn writeMemoryVisualizerMessage(dev: *DevServer, payload: *std.ArrayList(u8) .source_maps = @truncate(cost.source_maps), .assets = @truncate(cost.assets), .other = @truncate(cost.other), - .devserver_tracked = if (AllocationScope.enabled) - @truncate(dev.allocation_scope.state.total_memory_allocated) + .devserver_tracked = if (comptime AllocationScope.enabled) + @truncate(dev.allocation_scope.total()) else 0, .process_used = @truncate(bun.sys.selfProcessMemoryUsage() orelse 0), @@ -3327,23 +3329,24 @@ pub fn writeVisualizerMessage(dev: *DevServer, payload: *std.ArrayList(u8)) !voi g.bundled_files.values(), 0.., ) |k, v, i| { + const file = v.unpack(); const relative_path_buf = bun.path_buffer_pool.get(); defer bun.path_buffer_pool.put(relative_path_buf); const normalized_key = dev.relativePath(relative_path_buf, k); try w.writeInt(u32, @intCast(normalized_key.len), .little); if (k.len == 0) continue; try w.writeAll(normalized_key); - try w.writeByte(@intFromBool(g.stale_files.isSetAllowOutOfBound(i, true) or switch (side) { - .server => v.failed, - .client => v.flags.failed, - })); - try w.writeByte(@intFromBool(side == .server and v.is_rsc)); - try w.writeByte(@intFromBool(side == .server and v.is_ssr)); - try w.writeByte(@intFromBool(if (side == .server) v.is_route else v.flags.is_html_route)); - try w.writeByte(@intFromBool(side == .client and v.flags.is_special_framework_file)); + try w.writeByte(@intFromBool(g.stale_files.isSetAllowOutOfBound(i, true) or file.failed)); + try w.writeByte(@intFromBool(side == .server and file.is_rsc)); + try w.writeByte(@intFromBool(side == .server and file.is_ssr)); try w.writeByte(@intFromBool(switch (side) { - .server => v.is_client_component_boundary, - .client => v.flags.is_hmr_root, + .server => file.is_route, + .client => file.html_route_bundle_index != null, + })); + try w.writeByte(@intFromBool(side == .client and file.is_special_framework_file)); + try w.writeByte(@intFromBool(switch (side) { + .server => file.is_client_component_boundary, + .client => file.is_hmr_root, })); } } @@ -3371,7 +3374,7 @@ pub fn onWebSocketUpgrade( assert(id == 0); const dw = HmrSocket.new(dev, res); - dev.active_websocket_connections.put(dev.allocator, dw, {}) catch bun.outOfMemory(); + dev.active_websocket_connections.put(dev.allocator(), dw, {}) catch bun.outOfMemory(); _ = res.upgrade( *HmrSocket, dw, @@ -3601,7 +3604,7 @@ const c = struct { pub fn startReloadBundle(dev: *DevServer, event: *HotReloadEvent) bun.OOM!void { defer event.files.clearRetainingCapacity(); - var sfb = std.heap.stackFallback(4096, dev.allocator); + var sfb = std.heap.stackFallback(4096, dev.allocator()); const temp_alloc = sfb.get(); var entry_points: EntryPointList = EntryPointList.empty; defer entry_points.deinit(temp_alloc); @@ -3696,15 +3699,15 @@ pub fn onFileUpdate(dev: *DevServer, events: []Watcher.Event, changed_files: []? dev.bun_watcher.removeAtIndex(event.index, 0, &.{}, .file); } - ev.appendFile(dev.allocator, file_path); + ev.appendFile(dev.allocator(), file_path); }, .directory => { // INotifyWatcher stores sub paths into `changed_files` // the other platforms do not appear to write anything into `changed_files` ever. if (Environment.isLinux) { - ev.appendDir(dev.allocator, file_path, if (event.name_len > 0) changed_files[event.name_off] else null); + ev.appendDir(dev.allocator(), file_path, if (event.name_len > 0) changed_files[event.name_off] else null); } else { - ev.appendDir(dev.allocator, file_path, null); + ev.appendDir(dev.allocator(), file_path, null); } }, } @@ -3738,7 +3741,7 @@ const SafeFileId = packed struct(u32) { /// Interface function for FrameworkRouter pub fn getFileIdForRouter(dev: *DevServer, abs_path: []const u8, associated_route: Route.Index, file_kind: Route.FileKind) !OpaqueFileId { const index = try dev.server_graph.insertStaleExtra(abs_path, false, true); - try dev.route_lookup.put(dev.allocator, index, .{ + try dev.route_lookup.put(dev.allocator(), index, .{ .route_index = associated_route, .should_recurse_when_visiting = file_kind == .layout, }); @@ -3843,7 +3846,7 @@ fn dumpStateDueToCrash(dev: *DevServer) !void { try file.writeAll(start); try file.writeAll("\nlet inlinedData = Uint8Array.from(atob(\""); - var sfb = std.heap.stackFallback(4096, dev.allocator); + var sfb = std.heap.stackFallback(4096, dev.allocator()); var payload = try std.ArrayList(u8).initCapacity(sfb.get(), 4096); defer payload.deinit(); try dev.writeVisualizerMessage(&payload); @@ -3884,33 +3887,33 @@ pub const EntryPointList = struct { unused: enum(u4) { unused = 0 } = .unused, }; - pub fn deinit(entry_points: *EntryPointList, allocator: std.mem.Allocator) void { - entry_points.set.deinit(allocator); + pub fn deinit(entry_points: *EntryPointList, alloc: Allocator) void { + entry_points.set.deinit(alloc); } pub fn appendJs( entry_points: *EntryPointList, - allocator: std.mem.Allocator, + alloc: Allocator, abs_path: []const u8, side: bake.Graph, ) !void { - return entry_points.append(allocator, abs_path, switch (side) { + return entry_points.append(alloc, abs_path, switch (side) { .server => .{ .server = true }, .client => .{ .client = true }, .ssr => .{ .ssr = true }, }); } - pub fn appendCss(entry_points: *EntryPointList, allocator: std.mem.Allocator, abs_path: []const u8) !void { - return entry_points.append(allocator, abs_path, .{ + pub fn appendCss(entry_points: *EntryPointList, alloc: Allocator, abs_path: []const u8) !void { + return entry_points.append(alloc, abs_path, .{ .client = true, .css = true, }); } /// Deduplictes requests to bundle the same file twice. - pub fn append(entry_points: *EntryPointList, allocator: std.mem.Allocator, abs_path: []const u8, flags: Flags) !void { - const gop = try entry_points.set.getOrPut(allocator, abs_path); + pub fn append(entry_points: *EntryPointList, alloc: Allocator, abs_path: []const u8, flags: Flags) !void { + const gop = try entry_points.set.getOrPut(alloc, abs_path); if (gop.found_existing) { const T = @typeInfo(Flags).@"struct".backing_integer.?; gop.value_ptr.* = @bitCast(@as(T, @bitCast(gop.value_ptr.*)) | @as(T, @bitCast(flags))); @@ -3999,7 +4002,7 @@ const UnrefSourceMapRequest = struct { fn run(dev: *DevServer, _: *Request, resp: anytype) void { const ctx = bun.new(UnrefSourceMapRequest, .{ .dev = dev, - .body = .init(dev.allocator), + .body = .init(dev.allocator()), }); ctx.dev.server.?.onPendingRequest(); ctx.body.readBody(resp); @@ -4038,7 +4041,7 @@ const TestingBatch = struct { pub fn append(self: *@This(), dev: *DevServer, entry_points: EntryPointList) !void { assert(entry_points.set.count() > 0); for (entry_points.set.keys(), entry_points.set.values()) |k, v| { - try self.entry_points.append(dev.allocator, k, v); + try self.entry_points.append(dev.allocator(), k, v); } } }; @@ -4066,6 +4069,7 @@ const Log = bun.logger.Log; const MimeType = bun.http.MimeType; const ThreadLocalArena = bun.allocators.MimallocArena; const Transpiler = bun.transpiler.Transpiler; +const useAllFields = bun.meta.useAllFields; const EventLoopTimer = bun.api.Timer.EventLoopTimer; const StaticRoute = bun.api.server.StaticRoute; @@ -4087,9 +4091,6 @@ const Plugin = jsc.API.JSBundler.Plugin; const BunFrontendDevServerAgent = jsc.Debugger.BunFrontendDevServerAgent; const DebuggerId = jsc.Debugger.DebuggerId; -const VoidFieldTypes = bun.meta.VoidFieldTypes; -const voidFieldTypeDiscardHelper = bun.meta.voidFieldTypeDiscardHelper; - const uws = bun.uws; const AnyResponse = bun.uws.AnyResponse; const Request = uws.Request; diff --git a/src/bake/DevServer/Assets.zig b/src/bake/DevServer/Assets.zig index 1d3e8c4c13..3367a5c370 100644 --- a/src/bake/DevServer/Assets.zig +++ b/src/bake/DevServer/Assets.zig @@ -41,7 +41,7 @@ pub fn replacePath( ) !EntryIndex { assert(assets.owner().magic == .valid); defer assert(assets.files.count() == assets.refs.items.len); - const alloc = assets.owner().allocator; + const alloc = assets.owner().allocator(); debug.log("replacePath {} {} - {s}/{s} ({s})", .{ bun.fmt.quote(abs_path), content_hash, @@ -100,9 +100,9 @@ pub fn replacePath( /// means there is already data here. pub fn putOrIncrementRefCount(assets: *Assets, content_hash: u64, ref_count: u32) !?**StaticRoute { defer assert(assets.files.count() == assets.refs.items.len); - const file_index_gop = try assets.files.getOrPut(assets.owner().allocator, content_hash); + const file_index_gop = try assets.files.getOrPut(assets.owner().allocator(), content_hash); if (!file_index_gop.found_existing) { - try assets.refs.append(assets.owner().allocator, ref_count); + try assets.refs.append(assets.owner().allocator(), ref_count); return file_index_gop.value_ptr; } else { assets.refs.items[file_index_gop.index] += ref_count; diff --git a/src/bake/DevServer/DevAllocator.zig b/src/bake/DevServer/DevAllocator.zig new file mode 100644 index 0000000000..626e392dc3 --- /dev/null +++ b/src/bake/DevServer/DevAllocator.zig @@ -0,0 +1,19 @@ +const Self = @This(); + +maybe_scope: if (AllocationScope.enabled) AllocationScope else void, + +pub fn get(self: Self) Allocator { + return if (comptime AllocationScope.enabled) + self.maybe_scope.allocator() + else + bun.default_allocator; +} + +pub fn scope(self: Self) ?AllocationScope { + return if (comptime AllocationScope.enabled) self.maybe_scope else null; +} + +const bun = @import("bun"); +const std = @import("std"); +const AllocationScope = bun.allocators.AllocationScope; +const Allocator = std.mem.Allocator; diff --git a/src/bake/DevServer/DirectoryWatchStore.zig b/src/bake/DevServer/DirectoryWatchStore.zig index ef0b88ad8e..bcfd21210d 100644 --- a/src/bake/DevServer/DirectoryWatchStore.zig +++ b/src/bake/DevServer/DirectoryWatchStore.zig @@ -100,14 +100,14 @@ fn insert( }); if (store.dependencies_free_list.items.len == 0) - try store.dependencies.ensureUnusedCapacity(dev.allocator, 1); + try store.dependencies.ensureUnusedCapacity(dev.allocator(), 1); - const gop = try store.watches.getOrPut(dev.allocator, bun.strings.withoutTrailingSlashWindowsPath(dir_name_to_watch)); + const gop = try store.watches.getOrPut(dev.allocator(), bun.strings.withoutTrailingSlashWindowsPath(dir_name_to_watch)); const specifier_cloned = if (specifier[0] == '.' or std.fs.path.isAbsolute(specifier)) - try dev.allocator.dupe(u8, specifier) + try dev.allocator().dupe(u8, specifier) else - try std.fmt.allocPrint(dev.allocator, "./{s}", .{specifier}); - errdefer dev.allocator.free(specifier_cloned); + try std.fmt.allocPrint(dev.allocator(), "./{s}", .{specifier}); + errdefer dev.allocator().free(specifier_cloned); if (gop.found_existing) { const dep = store.appendDepAssumeCapacity(.{ @@ -163,8 +163,8 @@ fn insert( if (owned_fd) "from dir cache" else "owned fd", }); - const dir_name = try dev.allocator.dupe(u8, dir_name_to_watch); - errdefer dev.allocator.free(dir_name); + const dir_name = try dev.allocator().dupe(u8, dir_name_to_watch); + errdefer dev.allocator().free(dir_name); gop.key_ptr.* = bun.strings.withoutTrailingSlashWindowsPath(dir_name); diff --git a/src/bake/DevServer/ErrorReportRequest.zig b/src/bake/DevServer/ErrorReportRequest.zig index 2a901436f7..c622557e70 100644 --- a/src/bake/DevServer/ErrorReportRequest.zig +++ b/src/bake/DevServer/ErrorReportRequest.zig @@ -22,7 +22,7 @@ body: uws.BodyReaderMixin(@This(), "body", runWithBody, finalize), pub fn run(dev: *DevServer, _: *Request, resp: anytype) void { const ctx = bun.new(ErrorReportRequest, .{ .dev = dev, - .body = .init(dev.allocator), + .body = .init(dev.allocator()), }); ctx.dev.server.?.onPendingRequest(); ctx.body.readBody(resp); @@ -41,8 +41,8 @@ pub fn runWithBody(ctx: *ErrorReportRequest, body: []const u8, r: AnyResponse) ! var s = std.io.fixedBufferStream(body); const reader = s.reader(); - var sfa_general = std.heap.stackFallback(65536, ctx.dev.allocator); - var sfa_sourcemap = std.heap.stackFallback(65536, ctx.dev.allocator); + var sfa_general = std.heap.stackFallback(65536, ctx.dev.allocator()); + var sfa_sourcemap = std.heap.stackFallback(65536, ctx.dev.allocator()); const temp_alloc = sfa_general.get(); var arena = std.heap.ArenaAllocator.init(temp_alloc); defer arena.deinit(); @@ -169,8 +169,8 @@ pub fn runWithBody(ctx: *ErrorReportRequest, body: []const u8, r: AnyResponse) ! if (runtime_lines == null) { const file = result.entry_files.get(@intCast(index - 1)); - if (file != .empty) { - const json_encoded_source_code = file.ref.data.quotedContents(); + if (file.get()) |source_map| { + const json_encoded_source_code = source_map.quotedContents(); // First line of interest is two above the target line. const target_line = @as(usize, @intCast(frame.position.line.zeroBased())); first_line_of_interest = target_line -| 2; @@ -238,7 +238,7 @@ pub fn runWithBody(ctx: *ErrorReportRequest, body: []const u8, r: AnyResponse) ! ) catch {}, } - var out: std.ArrayList(u8) = .init(ctx.dev.allocator); + var out: std.ArrayList(u8) = .init(ctx.dev.allocator()); errdefer out.deinit(); const w = out.writer(); diff --git a/src/bake/DevServer/HmrSocket.zig b/src/bake/DevServer/HmrSocket.zig index b3cafff0bd..7330a080aa 100644 --- a/src/bake/DevServer/HmrSocket.zig +++ b/src/bake/DevServer/HmrSocket.zig @@ -12,7 +12,7 @@ referenced_source_maps: std.AutoHashMapUnmanaged(SourceMapStore.Key, void), inspector_connection_id: i32 = -1, pub fn new(dev: *DevServer, res: anytype) *HmrSocket { - return bun.create(dev.allocator, HmrSocket, .{ + return bun.create(dev.allocator(), HmrSocket, .{ .dev = dev, .is_from_localhost = if (res.getRemoteSocketInfo()) |addr| if (addr.is_ipv6) @@ -54,7 +54,7 @@ pub fn onMessage(s: *HmrSocket, ws: AnyWebSocket, msg: []const u8, opcode: uws.O return ws.close(); const source_map_id = SourceMapStore.Key.init(@as(u64, generation) << 32); if (s.dev.source_maps.removeOrUpgradeWeakRef(source_map_id, .upgrade)) { - s.referenced_source_maps.put(s.dev.allocator, source_map_id, {}) catch + s.referenced_source_maps.put(s.dev.allocator(), source_map_id, {}) catch bun.outOfMemory(); } }, @@ -166,7 +166,7 @@ pub fn onMessage(s: *HmrSocket, ws: AnyWebSocket, msg: []const u8, opcode: uws.O std.time.Timer.start() catch @panic("timers unsupported"), ) catch bun.outOfMemory(); - event.entry_points.deinit(s.dev.allocator); + event.entry_points.deinit(s.dev.allocator()); }, }, .console_log => { @@ -256,9 +256,9 @@ pub fn onClose(s: *HmrSocket, ws: AnyWebSocket, exit_code: i32, message: []const while (it.next()) |key| { s.dev.source_maps.unref(key.*); } - s.referenced_source_maps.deinit(s.dev.allocator); + s.referenced_source_maps.deinit(s.dev.allocator()); bun.debugAssert(s.dev.active_websocket_connections.remove(s)); - s.dev.allocator.destroy(s); + s.dev.allocator().destroy(s); } fn notifyInspectorClientNavigation(s: *const HmrSocket, pattern: []const u8, rbi: RouteBundle.Index.Optional) void { diff --git a/src/bake/DevServer/HotReloadEvent.zig b/src/bake/DevServer/HotReloadEvent.zig index d756386f37..e956c2540f 100644 --- a/src/bake/DevServer/HotReloadEvent.zig +++ b/src/bake/DevServer/HotReloadEvent.zig @@ -110,8 +110,8 @@ pub fn processFileList( // this resolution result is not preserved as passing it // into BundleV2 is too complicated. the resolution is // cached, anyways. - event.appendFile(dev.allocator, dep.source_file_path); - dev.directory_watchers.freeDependencyIndex(dev.allocator, index) catch bun.outOfMemory(); + event.appendFile(dev.allocator(), dep.source_file_path); + dev.directory_watchers.freeDependencyIndex(dev.allocator(), index) catch bun.outOfMemory(); } else { // rebuild a new linked list for unaffected files dep.next = new_chain; @@ -123,18 +123,18 @@ pub fn processFileList( entry.first_dep = new_first_dep; } else { // without any files to depend on this watcher is freed - dev.directory_watchers.freeEntry(dev.allocator, watcher_index); + dev.directory_watchers.freeEntry(dev.allocator(), watcher_index); } } }; var rest_extra = event.extra_files.items; while (bun.strings.indexOfChar(rest_extra, 0)) |str| { - event.files.put(dev.allocator, rest_extra[0..str], {}) catch bun.outOfMemory(); + event.files.put(dev.allocator(), rest_extra[0..str], {}) catch bun.outOfMemory(); rest_extra = rest_extra[str + 1 ..]; } if (rest_extra.len > 0) { - event.files.put(dev.allocator, rest_extra, {}) catch bun.outOfMemory(); + event.files.put(dev.allocator(), rest_extra, {}) catch bun.outOfMemory(); } const changed_file_paths = event.files.keys(); @@ -163,9 +163,8 @@ pub fn processFileList( if (dev.has_tailwind_plugin_hack) |*map| { for (map.keys()) |abs_path| { - const file = dev.client_graph.bundled_files.get(abs_path) orelse - continue; - if (file.flags.kind == .css) + const file = (dev.client_graph.bundled_files.get(abs_path) orelse continue).unpack(); + if (file.kind() == .css) entry_points.appendCss(temp_alloc, abs_path) catch bun.outOfMemory(); } } @@ -188,7 +187,7 @@ pub fn run(first: *HotReloadEvent) void { return; } - var sfb = std.heap.stackFallback(4096, dev.allocator); + var sfb = std.heap.stackFallback(4096, dev.allocator()); const temp_alloc = sfb.get(); var entry_points: EntryPointList = .empty; defer entry_points.deinit(temp_alloc); diff --git a/src/bake/DevServer/IncrementalGraph.zig b/src/bake/DevServer/IncrementalGraph.zig index abe2f7614d..efba4ac533 100644 --- a/src/bake/DevServer/IncrementalGraph.zig +++ b/src/bake/DevServer/IncrementalGraph.zig @@ -1,3 +1,205 @@ +const JsCode = []const u8; +const CssAssetId = u64; + +// The server's incremental graph does not store previously bundled code because there is +// only one instance of the server. Instead, it stores which module graphs it is a part of. +// This makes sure that recompilation knows what bundler options to use. +const ServerFile = struct { + /// Is this file built for the Server graph. + is_rsc: bool, + /// Is this file built for the SSR graph. + is_ssr: bool, + /// If set, the client graph contains a matching file. + /// The server + is_client_component_boundary: bool, + /// If this file is a route root, the route can be looked up in + /// the route list. This also stops dependency propagation. + is_route: bool, + /// If the file has an error, the failure can be looked up + /// in the `.failures` map. + failed: bool, + /// CSS and Asset files get special handling + kind: FileKind, + + // `ClientFile` has a separate packed version, but `ServerFile` is already packed. + // We still need to define a `Packed` type, though, so we can write `File.Packed` + // regardless of `side`. + pub const Packed = ServerFile; + + pub fn pack(self: *const ServerFile) Packed { + return self; + } + + pub fn unpack(self: Packed) ServerFile { + return self; + } + + fn stopsDependencyTrace(self: ServerFile) bool { + return self.is_client_component_boundary; + } + + pub fn fileKind(self: *const ServerFile) FileKind { + return self.kind; + } +}; + +const Content = union(enum) { + unknown: void, + /// When stale, the code is "", otherwise it contains at least one non-whitespace + /// character, as empty chunks contain at least a function wrapper. + js: JsCode, + asset: JsCode, + /// A CSS root is the first file in a CSS bundle, aka the one that the JS or HTML file + /// points into. + /// + /// There are many complicated rules when CSS files reference each other, none of which + /// are modelled in IncrementalGraph. Instead, any change to downstream files will find + /// the CSS root, and queue it for a re-bundle. Additionally, CSS roots only have one + /// level of imports, as the code in `finalizeBundle` will add all referenced files as + /// edges directly to the root, creating a flat list instead of a tree. Those downstream + /// files remaining empty; only present so that invalidation can trace them to this + /// root. + css_root: CssAssetId, + css_child: void, + + const Untagged = blk: { + var info = @typeInfo(Content); + info.@"union".tag_type = null; + break :blk @Type(info); + }; +}; + +const ClientFile = struct { + content: Content, + source_map: PackedMap.Shared = .none, + /// This should always be null if `source_map` is `.some`, since HTML files do not have + /// source maps. + html_route_bundle_index: ?RouteBundle.Index = null, + /// If the file has an error, the failure can be looked up in the `.failures` map. + failed: bool = false, + /// For JS files, this is a component root; the server contains a matching file. + is_hmr_root: bool = false, + /// This is a file is an entry point to the framework. Changing this will always cause + /// a full page reload. + is_special_framework_file: bool = false, + + /// Packed version of `ClientFile`. Don't access fields directly; call `unpack`. + pub const Packed = struct { + // Due to padding, using `packed struct` here wouldn't save any space. + unsafe_packed_data: struct { + content: Content.Untagged, + source_map: union { + some: Shared(*PackedMap), + none: struct { + line_count: union { + some: LineCount, + none: void, + }, + html_route_bundle_index: union { + some: RouteBundle.Index, + none: void, + }, + }, + }, + content_tag: std.meta.Tag(Content), + source_map_tag: std.meta.Tag(PackedMap.Shared), + is_html_route: bool, + failed: bool, + is_hmr_root: bool, + is_special_framework_file: bool, + }, + + pub fn unpack(self: Packed) ClientFile { + const data = self.unsafe_packed_data; + return .{ + .content = switch (data.content_tag) { + inline else => |tag| @unionInit( + Content, + @tagName(tag), + @field(data.content, @tagName(tag)), + ), + }, + .source_map = switch (data.source_map_tag) { + .some => .{ .some = data.source_map.some }, + .none => .none, + .line_count => .{ .line_count = data.source_map.none.line_count.some }, + }, + .html_route_bundle_index = if (data.is_html_route) + data.source_map.none.html_route_bundle_index.some + else + null, + .failed = data.failed, + .is_hmr_root = data.is_hmr_root, + .is_special_framework_file = data.is_special_framework_file, + }; + } + + comptime { + if (!Environment.allow_assert) { + bun.assert_eql(@sizeOf(@This()), @sizeOf(u64) * 4); + bun.assert_eql(@alignOf(@This()), @alignOf([*]u8)); + } + } + }; + + pub fn pack(self: *const ClientFile) Packed { + // HTML files should not have source maps + assert(self.html_route_bundle_index == null or self.source_map != .some); + return .{ .unsafe_packed_data = .{ + .content = switch (std.meta.activeTag(self.content)) { + inline else => |tag| @unionInit( + Content.Untagged, + @tagName(tag), + @field(self.content, @tagName(tag)), + ), + }, + .source_map = switch (self.source_map) { + .some => |map| .{ .some = map }, + else => .{ .none = .{ + .line_count = switch (self.source_map) { + .line_count => |count| .{ .some = count }, + else => .{ .none = {} }, + }, + .html_route_bundle_index = if (self.html_route_bundle_index) |index| + .{ .some = index } + else + .{ .none = {} }, + } }, + }, + .content_tag = self.content, + .source_map_tag = self.source_map, + .is_html_route = self.html_route_bundle_index != null, + .failed = self.failed, + .is_hmr_root = self.is_hmr_root, + .is_special_framework_file = self.is_special_framework_file, + } }; + } + + pub fn kind(self: *const ClientFile) FileKind { + return switch (self.content) { + .unknown => .unknown, + .js => .js, + .asset => .asset, + .css_root, .css_child => .css, + }; + } + + fn jsCode(self: *const ClientFile) ?[]const u8 { + return switch (self.content) { + .js, .asset => |code| code, + else => null, + }; + } + + inline fn stopsDependencyTrace(_: ClientFile) bool { + return false; + } + + pub fn fileKind(self: *const ClientFile) FileKind { + return self.kind(); + } +}; + /// The paradigm of Bake's incremental state is to store a separate list of files /// than the Graph in bundle_v2. When watch events happen, the bundler is run on /// the changed files, excluding non-stale files via `isFileStale`. @@ -23,16 +225,18 @@ /// JSON source map files (`takeSourceMap`), even after hot updates. The /// lifetime for these sourcemaps is a bit tricky and depend on the lifetime of /// of WebSocket connections; see comments in `Assets` for more details. -pub fn IncrementalGraph(side: bake.Side) type { +pub fn IncrementalGraph(comptime side: bake.Side) type { return struct { + const Self = @This(); + // Unless otherwise mentioned, all data structures use DevServer's allocator. // All arrays are indexed by FileIndex, except for the two edge-related arrays. /// Keys are absolute paths for the "file" namespace, or the /// pretty-formatted path value that appear in imports. Absolute paths /// are stored so the watcher can quickly query and invalidate them. - /// Key slices are owned by `dev.allocator` - bundled_files: bun.StringArrayHashMapUnmanaged(File), + /// Key slices are owned by `dev.allocator()` + bundled_files: bun.StringArrayHashMapUnmanaged(File.Packed), /// Track bools for files which are "stale", meaning they should be /// re-bundled before being used. Resizing this is usually deferred /// until after a bundle, since resizing the bit-set requires an @@ -72,11 +276,11 @@ pub fn IncrementalGraph(side: bake.Side) type { /// Asset IDs, which can be printed as hex in '/_bun/asset/{hash}.css' current_css_files: switch (side) { - .client => ArrayListUnmanaged(u64), + .client => ArrayListUnmanaged(CssAssetId), .server => void, }, - pub const empty: @This() = .{ + pub const empty: Self = .{ .bundled_files = .empty, .stale_files = .empty, .first_dep = .empty, @@ -96,181 +300,28 @@ pub fn IncrementalGraph(side: bake.Side) type { // code because there is only one instance of the server. Instead, // it stores which module graphs it is a part of. This makes sure // that recompilation knows what bundler options to use. - .server => packed struct(u8) { - /// Is this file built for the Server graph. - is_rsc: bool, - /// Is this file built for the SSR graph. - is_ssr: bool, - /// If set, the client graph contains a matching file. - /// The server - is_client_component_boundary: bool, - /// If this file is a route root, the route can be looked up in - /// the route list. This also stops dependency propagation. - is_route: bool, - /// If the file has an error, the failure can be looked up - /// in the `.failures` map. - failed: bool, - /// CSS and Asset files get special handling - kind: FileKind, - - unused: enum(u1) { unused } = .unused, - - fn stopsDependencyTrace(file: @This()) bool { - return file.is_client_component_boundary; - } - - pub fn fileKind(file: @This()) FileKind { - return file.kind; - } - }, - .client => struct { - /// Content depends on `flags.kind` - /// See function wrappers to safely read into this data - content: union { - /// Access contents with `.jsCode()`. - /// When stale, the code is "", otherwise it contains at - /// least one non-whitespace character, as empty chunks - /// contain at least a function wrapper. - js_code: struct { - ptr: [*]const u8, - allocator: std.mem.Allocator, - }, - /// Access with `.cssAssetId()` - css_asset_id: u64, - - unknown: enum(u32) { unknown = 0 }, - }, - /// Separated from the pointer to reduce struct size. - /// Parser does not support files >4gb anyways. - code_len: u32, - flags: Flags, - source_map: PackedMap.RefOrEmpty.Untagged, - - const Flags = packed struct(u32) { - /// Kind determines the data representation in `content`, as - /// well as how this file behaves when tracing. - kind: FileKind, - /// If the file has an error, the failure can be looked up - /// in the `.failures` map. - failed: bool, - /// For JS files, this is a component root; the server contains a matching file. - is_hmr_root: bool, - /// This is a file is an entry point to the framework. - /// Changing this will always cause a full page reload. - is_special_framework_file: bool, - /// If this file has a HTML RouteBundle. The bundle index is tucked away in: - /// `graph.source_maps.items[i].extra.empty.html_bundle_route_index` - is_html_route: bool, - /// A CSS root is the first file in a CSS bundle, aka the - /// one that the JS or HTML file points into. - /// - /// There are many complicated rules when CSS files - /// reference each other, none of which are modelled in - /// IncrementalGraph. Instead, any change to downstream - /// files will find the CSS root, and queue it for a - /// re-bundle. Additionally, CSS roots only have one level - /// of imports, as the code in `finalizeBundle` will add all - /// referenced files as edges directly to the root, creating - /// a flat list instead of a tree. Those downstream files - /// remaining empty; only present so that invalidation can - /// trace them to this root. - is_css_root: bool, - /// Affects `file.source_map` - source_map_state: PackedMap.RefOrEmpty.Tag, - - unused: enum(u24) { unused } = .unused, - }; - - comptime { - // Debug and ReleaseSafe builds add a tag to untagged unions - if (!Environment.allow_assert) { - bun.assert_eql(@sizeOf(@This()), @sizeOf(u64) * 5); - bun.assert_eql(@alignOf(@This()), @alignOf([*]u8)); - } - } - - fn initJavaScript(code_slice: []const u8, code_allocator: std.mem.Allocator, flags: Flags, source_map: PackedMap.RefOrEmpty) @This() { - assert(flags.kind == .js or flags.kind == .asset); - assert(flags.source_map_state == std.meta.activeTag(source_map)); - return .{ - .content = .{ .js_code = .{ - .ptr = code_slice.ptr, - .allocator = code_allocator, - } }, - .code_len = @intCast(code_slice.len), - .flags = flags, - .source_map = source_map.untag(), - }; - } - - fn initCSS(asset_id: u64, flags: Flags) @This() { - assert(flags.kind == .css); - assert(flags.source_map_state == .empty); - return .{ - .content = .{ .css_asset_id = asset_id }, - .code_len = 0, // unused - .flags = flags, - .source_map = .blank_empty, - }; - } - - fn initUnknown(flags: Flags, empty_map: PackedMap.RefOrEmpty.Empty) @This() { - assert(flags.source_map_state == .empty); - return .{ - .content = .{ .unknown = .unknown }, // unused - .code_len = 0, // unused - .flags = flags, - .source_map = .{ .empty = empty_map }, - }; - } - - fn jsCode(file: @This()) []const u8 { - assert(file.flags.kind.hasInlinejscodeChunk()); - return file.content.js_code.ptr[0..file.code_len]; - } - - fn freeJsCode(file: *@This()) void { - assert(file.flags.kind.hasInlinejscodeChunk()); - file.content.js_code.allocator.free(file.jsCode()); - } - - fn cssAssetId(file: @This()) u64 { - assert(file.flags.kind == .css); - return file.content.css_asset_id; - } - - inline fn stopsDependencyTrace(_: @This()) bool { - return false; - } - - pub fn fileKind(file: @This()) FileKind { - return file.flags.kind; - } - - fn sourceMap(file: @This()) PackedMap.RefOrEmpty { - return file.source_map.decode(file.flags.source_map_state); - } - - fn setSourceMap(file: *@This(), new_source_map: PackedMap.RefOrEmpty) void { - file.flags.source_map_state = new_source_map; - file.source_map = new_source_map.untag(); - } - }, + .server => ServerFile, + .client => ClientFile, }; - fn freeFileContent(g: *IncrementalGraph(.client), key: []const u8, file: *File, css: enum { unref_css, ignore_css }) void { - switch (file.flags.kind) { - .js, .asset => { - file.freeJsCode(); - switch (file.sourceMap()) { - .ref => |ptr| { - ptr.derefWithContext(g.owner()); - file.setSourceMap(.blank_empty); - }, - .empty => {}, - } + fn freeFileContent( + g: *Self, + key: []const u8, + file: *File, + css: enum { unref_css, ignore_css }, + ) void { + comptime { + bun.assertf(side == .client, "freeFileContent requires client graph", .{}); + } + if (file.source_map.take()) |ptr| { + ptr.deinit(); + } + defer file.content = .unknown; + switch (file.content) { + .js, .asset => |code| { + g.allocator().free(code); }, - .css => if (css == .unref_css) { + .css_root, .css_child => if (css == .unref_css) { g.owner().assets.unrefByPath(key); }, .unknown => {}, @@ -307,25 +358,29 @@ pub fn IncrementalGraph(side: bake.Side) type { /// An index into `edges` pub const EdgeIndex = bun.GenericIndex(u32, Edge); - pub fn deinit(g: *@This(), allocator: Allocator) void { - _ = VoidFieldTypes(@This()){ + pub fn deinit(g: *Self) void { + const alloc = g.allocator(); + useAllFields(Self, .{ .bundled_files = { - for (g.bundled_files.keys(), g.bundled_files.values()) |k, *v| { - allocator.free(k); - if (side == .client) - g.freeFileContent(k, v, .ignore_css); + for (g.bundled_files.keys(), g.bundled_files.values()) |k, v| { + alloc.free(k); + if (comptime side == .client) { + var file = v.unpack(); + g.freeFileContent(k, &file, .ignore_css); + } } - g.bundled_files.deinit(allocator); + g.bundled_files.deinit(alloc); }, - .stale_files = g.stale_files.deinit(allocator), - .first_dep = g.first_dep.deinit(allocator), - .first_import = g.first_import.deinit(allocator), - .edges = g.edges.deinit(allocator), - .edges_free_list = g.edges_free_list.deinit(allocator), + .stale_files = g.stale_files.deinit(alloc), + .first_dep = g.first_dep.deinit(alloc), + .first_import = g.first_import.deinit(alloc), + .edges = g.edges.deinit(alloc), + .edges_free_list = g.edges_free_list.deinit(alloc), .current_chunk_len = {}, - .current_chunk_parts = g.current_chunk_parts.deinit(allocator), - .current_css_files = if (side == .client) g.current_css_files.deinit(allocator), - }; + .current_chunk_parts = g.current_chunk_parts.deinit(alloc), + .current_css_files = if (comptime side == .client) + g.current_css_files.deinit(alloc), + }); } const MemoryCost = struct { @@ -334,8 +389,8 @@ pub fn IncrementalGraph(side: bake.Side) type { source_maps: usize, }; - /// Does NOT count @sizeOf(@This()) - pub fn memoryCostDetailed(g: *@This(), new_dedupe_bits: u32) @This().MemoryCost { + /// Does NOT count @sizeOf(Self) + pub fn memoryCostDetailed(g: *Self) MemoryCost { var graph: usize = 0; var code: usize = 0; var source_maps: usize = 0; @@ -346,16 +401,15 @@ pub fn IncrementalGraph(side: bake.Side) type { graph += DevServer.memoryCostArrayList(g.edges); graph += DevServer.memoryCostArrayList(g.edges_free_list); graph += DevServer.memoryCostArrayList(g.current_chunk_parts); - if (side == .client) { + if (comptime side == .client) { graph += DevServer.memoryCostArrayList(g.current_css_files); - for (g.bundled_files.values()) |*file| { - if (file.flags.kind.hasInlinejscodeChunk()) code += file.code_len; - switch (file.sourceMap()) { - .ref => |ptr| { - source_maps += ptr.data.memoryCostWithDedupe(new_dedupe_bits); - }, - .empty => {}, + for (g.bundled_files.values()) |packed_file| { + const file = packed_file.unpack(); + switch (file.content) { + .js, .asset => |code_slice| code += code_slice.len, + else => {}, } + source_maps += file.source_map.memoryCost(); } } return .{ @@ -365,21 +419,17 @@ pub fn IncrementalGraph(side: bake.Side) type { }; } - pub fn getFileIndex(g: *@This(), path: []const u8) ?FileIndex { + pub fn getFileIndex(g: *const Self, path: []const u8) ?FileIndex { return if (g.bundled_files.getIndex(path)) |i| FileIndex.init(@intCast(i)) else null; } /// Prefer calling .values() and indexing manually if accessing more than one - pub fn getFileByIndex(g: *@This(), index: FileIndex) File { - return g.bundled_files.values()[index.get()]; + pub fn getFileByIndex(g: *const Self, index: FileIndex) File { + return g.bundled_files.values()[index.get()].unpack(); } - pub fn htmlRouteBundleIndex(g: *@This(), index: FileIndex) RouteBundle.Index { - if (Environment.allow_assert) { - assert(g.bundled_files.values()[index.get()].flags.is_html_route); - } - return .init(@intCast((g.bundled_files.values()[index.get()].source_map.empty.html_bundle_route_index.unwrap() orelse - @panic("Internal assertion failure: HTML bundle not registered correctly")).get())); + pub fn htmlRouteBundleIndex(g: *const Self, index: FileIndex) RouteBundle.Index { + return g.getFileByIndex(index).html_route_bundle_index.?; } /// Tracks a bundled code chunk for cross-bundle chunks, @@ -391,19 +441,18 @@ pub fn IncrementalGraph(side: bake.Side) type { /// `current_chunk_parts` array, where it must live until /// takeJSBundle is called. Then it can be freed. pub fn receiveChunk( - g: *@This(), + g: *Self, ctx: *HotUpdateContext, index: bun.ast.Index, content: union(enum) { js: struct { - code: []const u8, - code_allocator: std.mem.Allocator, + code: JsCode, source_map: ?struct { chunk: SourceMap.Chunk, - escaped_source: ?[]u8, + escaped_source: Owned(?[]u8), }, }, - css: u64, + css: CssAssetId, }, is_ssr_graph: bool, ) !void { @@ -434,13 +483,13 @@ pub fn IncrementalGraph(side: bake.Side) type { DevServer.dumpBundleForChunk(dev, dump_dir, side, key, content.js.code, true, is_ssr_graph); }; - const gop = try g.bundled_files.getOrPut(dev.allocator, key); + const gop = try g.bundled_files.getOrPut(dev.allocator(), key); const file_index = FileIndex.init(@intCast(gop.index)); if (!gop.found_existing) { - gop.key_ptr.* = try dev.allocator.dupe(u8, key); - try g.first_dep.append(dev.allocator, .none); - try g.first_import.append(dev.allocator, .none); + gop.key_ptr.* = try dev.allocator().dupe(u8, key); + try g.first_dep.append(dev.allocator(), .none); + try g.first_import.append(dev.allocator(), .none); } if (g.stale_files.bit_length > gop.index) { @@ -451,79 +500,78 @@ pub fn IncrementalGraph(side: bake.Side) type { switch (side) { .client => { - var flags: File.Flags = .{ - .failed = false, - .is_hmr_root = ctx.server_to_client_bitset.isSet(index.get()), - .is_special_framework_file = false, - .is_html_route = false, - .is_css_root = content == .css, // non-root CSS files never get registered in this function - .kind = switch (content) { - .js => if (ctx.loaders[index.get()].isJavaScriptLike()) .js else .asset, - .css => .css, - }, - .source_map_state = .empty, - }; + var html_route_bundle_index: ?RouteBundle.Index = null; + var is_special_framework_file = false; + if (gop.found_existing) { + var existing = gop.value_ptr.unpack(); + // Free the original content + old source map - g.freeFileContent(key, gop.value_ptr, .ignore_css); + g.freeFileContent(key, &existing, .ignore_css); // Free a failure if it exists - if (gop.value_ptr.flags.failed) { + if (existing.failed) { const kv = dev.bundling_failures.fetchSwapRemoveAdapted( SerializedFailure.Owner{ .client = file_index }, SerializedFailure.ArrayHashAdapter{}, ) orelse Output.panic("Missing SerializedFailure in IncrementalGraph", .{}); try dev.incremental_result.failures_removed.append( - dev.allocator, + dev.allocator(), kv.key, ); } - // Persist some flags - flags.is_special_framework_file = gop.value_ptr.flags.is_special_framework_file; - flags.is_html_route = gop.value_ptr.flags.is_html_route; + // Persist some data + html_route_bundle_index = existing.html_route_bundle_index; + is_special_framework_file = existing.is_special_framework_file; } - switch (content) { - .css => |css| gop.value_ptr.* = .initCSS(css, flags), - .js => |js| { - // Insert new source map or patch existing empty source map. - const source_map: PackedMap.RefOrEmpty = brk: { + + gop.value_ptr.* = File.pack(&.{ + .content = switch (content) { + // non-root CSS files never get registered in this function + .css => |css| .{ .css_root = css }, + .js => |js| if (ctx.loaders[index.get()].isJavaScriptLike()) + .{ .js = js.code } + else + .{ .asset = js.code }, + }, + .source_map = switch (content) { + .css => .none, + .js => |js| blk: { + // Insert new source map or patch existing empty source map. if (js.source_map) |source_map| { - bun.debugAssert(!flags.is_html_route); // suspect behind #17956 - if (source_map.chunk.buffer.len() > 0) { - flags.source_map_state = .ref; - break :brk .{ .ref = PackedMap.newNonEmpty( - source_map.chunk, - source_map.escaped_source.?, + bun.assert(html_route_bundle_index == null); // suspect behind #17956 + var chunk = source_map.chunk; + var escaped_source = source_map.escaped_source; + if (chunk.buffer.len() > 0) { + break :blk .{ .some = PackedMap.newNonEmpty( + chunk, + escaped_source.take().?, ) }; } - var take = source_map.chunk.buffer; - take.deinit(); - if (source_map.escaped_source) |escaped_source| { - bun.default_allocator.free(escaped_source); - } + chunk.buffer.deinit(); + escaped_source.deinit(); } // Must precompute this. Otherwise, source maps won't have // the info needed to concatenate VLQ mappings. const count: u32 = @intCast(bun.strings.countChar(js.code, '\n')); - break :brk .{ .empty = .{ - .line_count = .init(count), - .html_bundle_route_index = if (flags.is_html_route) ri: { - assert(gop.found_existing); - assert(gop.value_ptr.flags.source_map_state == .empty); - break :ri gop.value_ptr.source_map.empty.html_bundle_route_index; - } else .none, - } }; - }; - - gop.value_ptr.* = .initJavaScript(js.code, js.code_allocator, flags, source_map); + break :blk .{ .line_count = .init(count) }; + }, + }, + .html_route_bundle_index = html_route_bundle_index, + .is_hmr_root = ctx.server_to_client_bitset.isSet(index.get()), + .is_special_framework_file = is_special_framework_file, + }); + switch (content) { + .js => |js| { // Track JavaScript chunks for concatenation - try g.current_chunk_parts.append(dev.allocator, file_index); + try g.current_chunk_parts.append(dev.allocator(), file_index); g.current_chunk_len += js.code.len; }, + else => {}, } }, .server => { @@ -543,7 +591,7 @@ pub fn IncrementalGraph(side: bake.Side) type { }; if (client_component_boundary) { - try dev.incremental_result.client_components_added.append(dev.allocator, file_index); + try dev.incremental_result.client_components_added.append(dev.allocator(), file_index); } } else { gop.value_ptr.kind = switch (content) { @@ -559,7 +607,7 @@ pub fn IncrementalGraph(side: bake.Side) type { if (ctx.server_to_client_bitset.isSet(index.get())) { gop.value_ptr.is_client_component_boundary = true; - try dev.incremental_result.client_components_added.append(dev.allocator, file_index); + try dev.incremental_result.client_components_added.append(dev.allocator(), file_index); } else if (gop.value_ptr.is_client_component_boundary) { const client_graph = &g.owner().client_graph; const client_index = client_graph.getFileIndex(gop.key_ptr.*) orelse @@ -567,7 +615,7 @@ pub fn IncrementalGraph(side: bake.Side) type { client_graph.disconnectAndDeleteFile(client_index); gop.value_ptr.is_client_component_boundary = false; - try dev.incremental_result.client_components_removed.append(dev.allocator, file_index); + try dev.incremental_result.client_components_removed.append(dev.allocator(), file_index); } if (gop.value_ptr.failed) { @@ -578,20 +626,18 @@ pub fn IncrementalGraph(side: bake.Side) type { ) orelse Output.panic("Missing failure in IncrementalGraph", .{}); try dev.incremental_result.failures_removed.append( - dev.allocator, + dev.allocator(), kv.key, ); } } if (content == .js) { - try g.current_chunk_parts.append(dev.allocator, content.js.code); + try g.current_chunk_parts.append(dev.allocator(), content.js.code); g.current_chunk_len += content.js.code.len; if (content.js.source_map) |source_map| { - var take = source_map.chunk.buffer; - take.deinit(); - if (source_map.escaped_source) |escaped_source| { - bun.default_allocator.free(escaped_source); - } + var buffer = source_map.chunk.buffer; + buffer.deinit(); + source_map.escaped_source.deinit(); } } }, @@ -609,7 +655,7 @@ pub fn IncrementalGraph(side: bake.Side) type { /// - Updates dependency information for each file /// - Resolves what the HMR roots are pub fn processChunkDependencies( - g: *@This(), + g: *Self, ctx: *HotUpdateContext, comptime mode: enum { normal, css }, bundle_graph_index: bun.ast.Index, @@ -656,7 +702,7 @@ pub fn IncrementalGraph(side: bake.Side) type { if (mode == .normal and side == .server) { if (ctx.server_seen_bit_set.isSet(file_index.get())) return; - const file = &g.bundled_files.values()[file_index.get()]; + const file = g.getFileByIndex(file_index); // Process both files in the server-components graph at the same // time. If they were done separately, the second would detach @@ -698,7 +744,7 @@ pub fn IncrementalGraph(side: bake.Side) type { } } - if (side == .server) { + if (comptime side == .server) { // Follow this file to the route to mark it as stale. try g.traceDependencies(file_index, ctx.gts, .stop_at_boundary, file_index); } else { @@ -713,7 +759,7 @@ pub fn IncrementalGraph(side: bake.Side) type { /// /// DO NOT ONLY CALL THIS FUNCTION TO TRY TO DELETE AN EDGE, YOU MUST DELETE /// THE IMPORTS TOO! - fn disconnectEdgeFromDependencyList(g: *@This(), edge_index: EdgeIndex) void { + fn disconnectEdgeFromDependencyList(g: *Self, edge_index: EdgeIndex) void { const edge = &g.edges.items[edge_index.get()]; const imported = edge.imported.get(); const log = bun.Output.scoped(.disconnectEdgeFromDependencyList, .hidden); @@ -752,7 +798,7 @@ pub fn IncrementalGraph(side: bake.Side) type { } fn processCSSChunkImportRecords( - g: *@This(), + g: *Self, ctx: *HotUpdateContext, temp_alloc: Allocator, quick_lookup: *TempLookup.HashTable, @@ -789,7 +835,7 @@ pub fn IncrementalGraph(side: bake.Side) type { } fn processEdgeAttachment( - g: *@This(), + g: *Self, ctx: *HotUpdateContext, temp_alloc: Allocator, quick_lookup: *TempLookup.HashTable, @@ -912,7 +958,7 @@ pub fn IncrementalGraph(side: bake.Side) type { } fn processChunkImportRecords( - g: *@This(), + g: *Self, ctx: *HotUpdateContext, temp_alloc: Allocator, quick_lookup: *TempLookup.HashTable, @@ -1003,7 +1049,7 @@ pub fn IncrementalGraph(side: bake.Side) type { }; pub fn traceDependencies( - g: *@This(), + g: *Self, file_index: FileIndex, gts: *GraphTraceState, goal: TraceDependencyGoal, @@ -1023,7 +1069,7 @@ pub fn IncrementalGraph(side: bake.Side) type { return; gts.bits(side).set(file_index.get()); - const file = g.bundled_files.values()[file_index.get()]; + const file = g.getFileByIndex(file_index); switch (side) { .server => { @@ -1033,32 +1079,30 @@ pub fn IncrementalGraph(side: bake.Side) type { Output.panic("Route not in lookup index: {d} {}", .{ file_index.get(), bun.fmt.quote(g.bundled_files.keys()[file_index.get()]) }); igLog("\\<- Route", .{}); - try dev.incremental_result.framework_routes_affected.append(dev.allocator, route_index); + try dev.incremental_result.framework_routes_affected.append(dev.allocator(), route_index); } if (file.is_client_component_boundary) { - try dev.incremental_result.client_components_affected.append(dev.allocator, file_index); + try dev.incremental_result.client_components_affected.append(dev.allocator(), file_index); } }, .client => { const dev = g.owner(); - if (file.flags.is_hmr_root) { + if (file.is_hmr_root) { const key = g.bundled_files.keys()[file_index.get()]; const index = dev.server_graph.getFileIndex(key) orelse Output.panic("Server Incremental Graph is missing component for {}", .{bun.fmt.quote(key)}); try dev.server_graph.traceDependencies(index, gts, goal, index); - } else if (file.flags.is_html_route) { - const route_bundle_index = dev.client_graph.htmlRouteBundleIndex(file_index); - + } else if (file.html_route_bundle_index) |route_bundle_index| { // If the HTML file itself was modified, or an asset was // modified, this must be a hard reload. Otherwise just // invalidate the script tag. const list = if (from_file_index == file_index or - g.bundled_files.values()[from_file_index.get()].flags.kind == .asset) + g.getFileByIndex(from_file_index).content == .asset) &dev.incremental_result.html_routes_hard_affected else &dev.incremental_result.html_routes_soft_affected; - try list.append(dev.allocator, route_bundle_index); + try list.append(dev.allocator(), route_bundle_index); if (goal == .stop_at_boundary) return; @@ -1090,7 +1134,7 @@ pub fn IncrementalGraph(side: bake.Side) type { } } - pub fn traceImports(g: *@This(), file_index: FileIndex, gts: *GraphTraceState, comptime goal: DevServer.TraceImportGoal) !void { + pub fn traceImports(g: *Self, file_index: FileIndex, gts: *GraphTraceState, comptime goal: DevServer.TraceImportGoal) !void { g.owner().graph_safety_lock.assertLocked(); if (Environment.enable_logs) { @@ -1106,9 +1150,9 @@ pub fn IncrementalGraph(side: bake.Side) type { return; gts.bits(side).set(file_index.get()); - const file = g.bundled_files.values()[file_index.get()]; + const file = g.getFileByIndex(file_index); - switch (side) { + switch (comptime side) { .server => { if (file.is_client_component_boundary or file.kind == .css) { const dev = g.owner(); @@ -1129,36 +1173,40 @@ pub fn IncrementalGraph(side: bake.Side) type { SerializedFailure.ArrayHashAdapter{}, ) orelse @panic("Failed to get bundling failure"); - try g.owner().incremental_result.failures_added.append(g.owner().allocator, fail); + try g.owner().incremental_result.failures_added.append(g.allocator(), fail); } }, .client => { - if (file.flags.kind == .css) { - // It is only possible to find CSS roots by tracing. - bun.debugAssert(file.flags.is_css_root); + switch (file.content) { + .css_child => { + bun.assertf(false, "only CSS roots should be found by tracing", .{}); + }, + .css_root => |id| { + if (goal == .find_css) { + try g.current_css_files.append(g.allocator(), id); + } - if (goal == .find_css) { - try g.current_css_files.append(g.owner().allocator, file.cssAssetId()); - } - - // See the comment on `is_css_root` on how CSS roots - // have a slightly different meaning for their assets. - // Regardless, CSS can't import JS, so this trace is done. - return; + // See the comment on `Content.css_root` on how CSS roots + // have a slightly different meaning for their assets. + // Regardless, CSS can't import JS, so this trace is done. + return; + }, + else => {}, } if (goal == .find_client_modules) { - try g.current_chunk_parts.append(g.owner().allocator, file_index); - g.current_chunk_len += file.code_len; + try g.current_chunk_parts.append(g.allocator(), file_index); + // TODO: will `file.jsCode` ever return null here? + g.current_chunk_len += if (file.jsCode()) |code| code.len else 0; } - if (goal == .find_errors and file.flags.failed) { + if (goal == .find_errors and file.failed) { const fail = g.owner().bundling_failures.getKeyAdapted( SerializedFailure.Owner{ .client = file_index }, SerializedFailure.ArrayHashAdapter{}, ) orelse @panic("Failed to get bundling failure"); - try g.owner().incremental_result.failures_added.append(g.owner().allocator, fail); + try g.owner().incremental_result.failures_added.append(g.allocator(), fail); return; } }, @@ -1175,26 +1223,27 @@ pub fn IncrementalGraph(side: bake.Side) type { /// Never takes ownership of `abs_path` /// Marks a chunk but without any content. Used to track dependencies to files that don't exist. - pub fn insertStale(g: *@This(), abs_path: []const u8, is_ssr_graph: bool) bun.OOM!FileIndex { + pub fn insertStale(g: *Self, abs_path: []const u8, is_ssr_graph: bool) bun.OOM!FileIndex { return g.insertStaleExtra(abs_path, is_ssr_graph, false); } - pub fn insertStaleExtra(g: *@This(), abs_path: []const u8, is_ssr_graph: bool, is_route: bool) bun.OOM!FileIndex { + // TODO: `is_route` is unused in client graph + pub fn insertStaleExtra(g: *Self, abs_path: []const u8, is_ssr_graph: bool, is_route: bool) bun.OOM!FileIndex { g.owner().graph_safety_lock.assertLocked(); - const dev_allocator = g.owner().allocator; + const dev_alloc = g.allocator(); debug.log("Insert stale: {s}", .{abs_path}); - const gop = try g.bundled_files.getOrPut(dev_allocator, abs_path); + const gop = try g.bundled_files.getOrPut(dev_alloc, abs_path); const file_index = FileIndex.init(@intCast(gop.index)); - if (!gop.found_existing) { - gop.key_ptr.* = try dev_allocator.dupe(u8, abs_path); - try g.first_dep.append(dev_allocator, .none); - try g.first_import.append(dev_allocator, .none); - } else { - if (side == .server) { - if (is_route) gop.value_ptr.*.is_route = true; + if (gop.found_existing) { + if (side == .server and is_route) { + gop.value_ptr.is_route = true; } + } else { + gop.key_ptr.* = try dev_alloc.dupe(u8, abs_path); + try g.first_dep.append(dev_alloc, .none); + try g.first_import.append(dev_alloc, .none); } if (g.stale_files.bit_length > gop.index) { @@ -1203,27 +1252,13 @@ pub fn IncrementalGraph(side: bake.Side) type { switch (side) { .client => { - var flags: File.Flags = .{ - .failed = false, - .is_hmr_root = false, - .is_special_framework_file = false, - .is_html_route = is_route, - .is_css_root = false, - .source_map_state = .empty, - .kind = .unknown, - }; - var source_map = PackedMap.RefOrEmpty.blank_empty.empty; - if (gop.found_existing) { - g.freeFileContent(gop.key_ptr.*, gop.value_ptr, .unref_css); - - flags.is_html_route = flags.is_html_route or gop.value_ptr.flags.is_html_route; - flags.failed = gop.value_ptr.flags.failed; - flags.is_special_framework_file = gop.value_ptr.flags.is_special_framework_file; - flags.is_hmr_root = gop.value_ptr.flags.is_hmr_root; - flags.is_css_root = gop.value_ptr.flags.is_css_root; - source_map = gop.value_ptr.source_map.empty; - } - gop.value_ptr.* = File.initUnknown(flags, source_map); + const new_file: File = if (gop.found_existing) blk: { + var existing = gop.value_ptr.unpack(); + // sets .content to .unknown + g.freeFileContent(gop.key_ptr.*, &existing, .unref_css); + break :blk existing; + } else .{ .content = .unknown }; + gop.value_ptr.* = new_file.pack(); }, .server => { if (!gop.found_existing) { @@ -1247,25 +1282,24 @@ pub fn IncrementalGraph(side: bake.Side) type { } /// Returns the key that was inserted. - pub fn insertEmpty(g: *@This(), abs_path: []const u8, kind: FileKind) bun.OOM!struct { + pub fn insertEmpty(g: *Self, abs_path: []const u8, kind: FileKind) bun.OOM!struct { index: FileIndex, key: []const u8, } { g.owner().graph_safety_lock.assertLocked(); - const dev_allocator = g.owner().allocator; - const gop = try g.bundled_files.getOrPut(dev_allocator, abs_path); + const dev_alloc = g.allocator(); + const gop = try g.bundled_files.getOrPut(dev_alloc, abs_path); if (!gop.found_existing) { - gop.key_ptr.* = try dev_allocator.dupe(u8, abs_path); + gop.key_ptr.* = try dev_alloc.dupe(u8, abs_path); gop.value_ptr.* = switch (side) { - .client => File.initUnknown(.{ - .failed = false, - .is_hmr_root = false, - .is_special_framework_file = false, - .is_html_route = false, - .is_css_root = false, - .source_map_state = .empty, - .kind = kind, - }, PackedMap.RefOrEmpty.blank_empty.empty), + .client => File.pack(&.{ + .content = switch (kind) { + .unknown => .unknown, + .js => .{ .js = "" }, + .asset => .{ .asset = "" }, + .css => .css_child, + }, + }), .server => .{ .is_rsc = false, .is_ssr = false, @@ -1275,8 +1309,8 @@ pub fn IncrementalGraph(side: bake.Side) type { .kind = kind, }, }; - try g.first_dep.append(dev_allocator, .none); - try g.first_import.append(dev_allocator, .none); + try g.first_dep.append(dev_alloc, .none); + try g.first_import.append(dev_alloc, .none); try g.ensureStaleBitCapacity(true); } return .{ .index = .init(@intCast(gop.index)), .key = gop.key_ptr.* }; @@ -1284,18 +1318,18 @@ pub fn IncrementalGraph(side: bake.Side) type { /// Server CSS files are just used to be targets for graph traversal. /// Its content lives only on the client. - pub fn insertCssFileOnServer(g: *@This(), ctx: *HotUpdateContext, index: bun.ast.Index, abs_path: []const u8) bun.OOM!void { + pub fn insertCssFileOnServer(g: *Self, ctx: *HotUpdateContext, index: bun.ast.Index, abs_path: []const u8) bun.OOM!void { g.owner().graph_safety_lock.assertLocked(); - const dev_allocator = g.owner().allocator; + const dev_alloc = g.allocator(); debug.log("Insert stale: {s}", .{abs_path}); - const gop = try g.bundled_files.getOrPut(dev_allocator, abs_path); + const gop = try g.bundled_files.getOrPut(dev_alloc, abs_path); const file_index: FileIndex = .init(@intCast(gop.index)); if (!gop.found_existing) { - gop.key_ptr.* = try dev_allocator.dupe(u8, abs_path); - try g.first_dep.append(dev_allocator, .none); - try g.first_import.append(dev_allocator, .none); + gop.key_ptr.* = try dev_alloc.dupe(u8, abs_path); + try g.first_dep.append(dev_alloc, .none); + try g.first_import.append(dev_alloc, .none); } switch (side) { @@ -1314,7 +1348,7 @@ pub fn IncrementalGraph(side: bake.Side) type { } pub fn insertFailure( - g: *@This(), + g: *Self, comptime mode: enum { abs_path, index }, key: switch (mode) { .abs_path => []const u8, @@ -1325,14 +1359,14 @@ pub fn IncrementalGraph(side: bake.Side) type { ) bun.OOM!void { g.owner().graph_safety_lock.assertLocked(); - const dev_allocator = g.owner().allocator; + const dev_alloc = g.allocator(); - const Gop = std.StringArrayHashMapUnmanaged(File).GetOrPutResult; + const Gop = bun.StringArrayHashMapUnmanaged(File.Packed).GetOrPutResult; // found_existing is destructured separately so that it is // comptime-known true when mode == .index const gop: Gop, const found_existing, const file_index = switch (mode) { .abs_path => brk: { - const gop = try g.bundled_files.getOrPut(dev_allocator, key); + const gop = try g.bundled_files.getOrPut(dev_alloc, key); break :brk .{ gop, gop.found_existing, FileIndex.init(@intCast(gop.index)) }; }, // When given an index, no fetch is needed. @@ -1353,9 +1387,9 @@ pub fn IncrementalGraph(side: bake.Side) type { if (!found_existing) { comptime assert(mode == .abs_path); - gop.key_ptr.* = try dev_allocator.dupe(u8, key); - try g.first_dep.append(dev_allocator, .none); - try g.first_import.append(dev_allocator, .none); + gop.key_ptr.* = try dev_alloc.dupe(u8, key); + try g.first_dep.append(dev_alloc, .none); + try g.first_import.append(dev_alloc, .none); } try g.ensureStaleBitCapacity(true); @@ -1363,25 +1397,14 @@ pub fn IncrementalGraph(side: bake.Side) type { switch (side) { .client => { - var flags: File.Flags = .{ - .failed = true, - .is_hmr_root = false, - .is_special_framework_file = false, - .is_html_route = false, - .is_css_root = false, - .kind = .unknown, - .source_map_state = .empty, - }; - var source_map = PackedMap.RefOrEmpty.blank_empty.empty; - if (found_existing) { - g.freeFileContent(gop.key_ptr.*, gop.value_ptr, .unref_css); - flags.is_html_route = gop.value_ptr.flags.is_html_route; - flags.is_special_framework_file = gop.value_ptr.flags.is_special_framework_file; - flags.is_hmr_root = gop.value_ptr.flags.is_hmr_root; - flags.is_css_root = gop.value_ptr.flags.is_css_root; - source_map = gop.value_ptr.source_map.empty; - } - gop.value_ptr.* = File.initUnknown(flags, source_map); + var new_file: File = if (found_existing) blk: { + var existing = gop.value_ptr.unpack(); + // sets .content to .unknown + g.freeFileContent(gop.key_ptr.*, &existing, .unref_css); + break :blk existing; + } else .{ .content = .unknown }; + new_file.failed = true; + gop.value_ptr.* = new_file.pack(); }, .server => { if (!gop.found_existing) { @@ -1425,15 +1448,15 @@ pub fn IncrementalGraph(side: bake.Side) type { log.msgs.items, ); }; - const fail_gop = try dev.bundling_failures.getOrPut(dev.allocator, failure); - try dev.incremental_result.failures_added.append(dev.allocator, failure); + const fail_gop = try dev.bundling_failures.getOrPut(dev.allocator(), failure); + try dev.incremental_result.failures_added.append(dev.allocator(), failure); if (fail_gop.found_existing) { - try dev.incremental_result.failures_removed.append(dev.allocator, fail_gop.key_ptr.*); + try dev.incremental_result.failures_removed.append(dev.allocator(), fail_gop.key_ptr.*); fail_gop.key_ptr.* = failure; } } - pub fn onFileDeleted(g: *@This(), abs_path: []const u8, bv2: *bun.BundleV2) void { + pub fn onFileDeleted(g: *Self, abs_path: []const u8, bv2: *bun.BundleV2) void { const index = g.getFileIndex(abs_path) orelse return; const keys = g.bundled_files.keys(); @@ -1479,9 +1502,9 @@ pub fn IncrementalGraph(side: bake.Side) type { } } - pub fn ensureStaleBitCapacity(g: *@This(), are_new_files_stale: bool) !void { + pub fn ensureStaleBitCapacity(g: *Self, are_new_files_stale: bool) !void { try g.stale_files.resize( - g.owner().allocator, + g.allocator(), std.mem.alignForward( usize, @max(g.bundled_files.count(), g.stale_files.bit_length), @@ -1495,7 +1518,7 @@ pub fn IncrementalGraph(side: bake.Side) type { /// Given a set of paths, mark the relevant files as stale and append /// them into `entry_points`. This is called whenever a file is changed, /// and a new bundle has to be run. - pub fn invalidate(g: *@This(), paths: []const []const u8, entry_points: *EntryPointList, alloc: Allocator) !void { + pub fn invalidate(g: *Self, paths: []const []const u8, entry_points: *EntryPointList, alloc: Allocator) !void { g.owner().graph_safety_lock.assertLocked(); const keys = g.bundled_files.keys(); const values = g.bundled_files.values(); @@ -1507,11 +1530,11 @@ pub fn IncrementalGraph(side: bake.Side) type { continue; }; g.stale_files.set(index); - const data = &values[index]; + const data = values[index].unpack(); switch (side) { - .client => switch (data.flags.kind) { - .css => { - if (data.flags.is_css_root) { + .client => switch (data.content) { + .css_root, .css_child => { + if (data.content == .css_root) { try entry_points.appendCss(alloc, path); } @@ -1521,8 +1544,8 @@ pub fn IncrementalGraph(side: bake.Side) type { const dep = entry.dependency; g.stale_files.set(dep.get()); - const dep_file = values[dep.get()]; - if (dep_file.flags.is_css_root) { + const dep_file = values[dep.get()].unpack(); + if (dep_file.content == .css_root) { try entry_points.appendCss(alloc, keys[dep.get()]); } @@ -1536,7 +1559,7 @@ pub fn IncrementalGraph(side: bake.Side) type { const dep = entry.dependency; g.stale_files.set(dep.get()); - const dep_file = values[dep.get()]; + const dep_file = values[dep.get()].unpack(); // Assets violate the "do not reprocess // unchanged files" rule by reprocessing ALL // dependencies, instead of just the CSS roots. @@ -1546,7 +1569,7 @@ pub fn IncrementalGraph(side: bake.Side) type { // asset URL. Additionally, it is currently seen // as a bit nicer in HMR to do this for all JS // files, though that could be reconsidered. - if (dep_file.flags.is_css_root) { + if (dep_file.content == .css_root) { try entry_points.appendCss(alloc, keys[dep.get()]); } else { try entry_points.appendJs(alloc, keys[dep.get()], .client); @@ -1560,7 +1583,7 @@ pub fn IncrementalGraph(side: bake.Side) type { // When re-bundling SCBs, only bundle the server. Otherwise // the bundler gets confused and bundles both sides without // knowledge of the boundary between them. - .js, .unknown => if (!data.flags.is_hmr_root) { + .js, .unknown => if (!data.is_hmr_root) { try entry_points.appendJs(alloc, path, .client); }, }, @@ -1574,11 +1597,13 @@ pub fn IncrementalGraph(side: bake.Side) type { } } - pub fn reset(g: *@This()) void { + pub fn reset(g: *Self) void { g.owner().graph_safety_lock.assertLocked(); g.current_chunk_len = 0; g.current_chunk_parts.clearRetainingCapacity(); - if (side == .client) g.current_css_files.clearRetainingCapacity(); + if (comptime side == .client) { + g.current_css_files.clearRetainingCapacity(); + } } const TakeJSBundleOptions = switch (side) { @@ -1595,17 +1620,17 @@ pub fn IncrementalGraph(side: bake.Side) type { }; pub fn takeJSBundle( - g: *@This(), + g: *Self, options: *const TakeJSBundleOptions, ) ![]u8 { - var chunk = std.ArrayList(u8).init(g.owner().allocator); + var chunk = std.ArrayList(u8).init(g.allocator()); try g.takeJSBundleToList(&chunk, options); bun.assert(chunk.items.len == chunk.capacity); return chunk.items; } pub fn takeJSBundleToList( - g: *@This(), + g: *Self, list: *std.ArrayList(u8), options: *const TakeJSBundleOptions, ) !void { @@ -1627,14 +1652,14 @@ pub fn IncrementalGraph(side: bake.Side) type { // to inform the HMR runtime some crucial entry-point info. The // exact upper bound of this can be calculated, but is not to // avoid worrying about windows paths. - var end_sfa = std.heap.stackFallback(65536, g.owner().allocator); + var end_sfa = std.heap.stackFallback(65536, g.allocator()); var end_list = std.ArrayList(u8).initCapacity(end_sfa.get(), 65536) catch unreachable; defer end_list.deinit(); const end = end: { const w = end_list.writer(); switch (kind) { .initial_response => { - if (side == .server) @panic("unreachable"); + if (comptime side == .server) @panic("unreachable"); try w.writeAll("}, {\n main: "); const initial_response_entry_point = options.initial_response_entry_point; if (initial_response_entry_point.len > 0) { @@ -1684,7 +1709,7 @@ pub fn IncrementalGraph(side: bake.Side) type { .server => try w.writeAll("})"), }, } - if (side == .client) { + if (comptime side == .client) { try w.writeAll("\n//# sourceMappingURL=" ++ DevServer.client_prefix ++ "/"); try w.writeAll(&std.fmt.bytesToHex(std.mem.asBytes(&options.script_id), .lower)); try w.writeAll(".js.map\n"); @@ -1704,7 +1729,7 @@ pub fn IncrementalGraph(side: bake.Side) type { for (g.current_chunk_parts.items) |entry| { list.appendSliceAssumeCapacity(switch (side) { // entry is an index into files - .client => files[entry.get()].jsCode(), + .client => files[entry.get()].unpack().jsCode().?, // entry is the '[]const u8' itself .server => entry, }); @@ -1733,8 +1758,8 @@ pub fn IncrementalGraph(side: bake.Side) type { }; /// Uses `arena` as a temporary allocator, fills in all fields of `out` except ref_count - pub fn takeSourceMap(g: *@This(), arena: std.mem.Allocator, gpa: Allocator, out: *SourceMapStore.Entry) bun.OOM!void { - if (side == .server) @compileError("not implemented"); + pub fn takeSourceMap(g: *Self, arena: std.mem.Allocator, gpa: Allocator, out: *SourceMapStore.Entry) bun.OOM!void { + if (comptime side == .server) @compileError("not implemented"); const paths = g.bundled_files.keys(); const files = g.bundled_files.values(); @@ -1748,32 +1773,34 @@ pub fn IncrementalGraph(side: bake.Side) type { var file_paths = try ArrayListUnmanaged([]const u8).initCapacity(gpa, g.current_chunk_parts.items.len); errdefer file_paths.deinit(gpa); - var contained_maps: bun.MultiArrayList(PackedMap.RefOrEmpty) = .empty; + var contained_maps: bun.MultiArrayList(PackedMap.Shared) = .empty; try contained_maps.ensureTotalCapacity(gpa, g.current_chunk_parts.items.len); errdefer contained_maps.deinit(gpa); - var overlapping_memory_cost: u32 = 0; + var overlapping_memory_cost: usize = 0; for (g.current_chunk_parts.items) |file_index| { file_paths.appendAssumeCapacity(paths[file_index.get()]); - const source_map = files[file_index.get()].sourceMap(); - contained_maps.appendAssumeCapacity(source_map.dupeRef()); - if (source_map == .ref) { - overlapping_memory_cost += @intCast(source_map.ref.data.memoryCost()); + const source_map = files[file_index.get()].unpack().source_map.clone(); + if (source_map.get()) |map| { + overlapping_memory_cost += map.memoryCost(); } + contained_maps.appendAssumeCapacity(source_map); } - overlapping_memory_cost += @intCast(contained_maps.memoryCost() + DevServer.memoryCostSlice(file_paths.items)); + overlapping_memory_cost += contained_maps.memoryCost() + DevServer.memoryCostSlice(file_paths.items); + const ref_count = out.ref_count; out.* = .{ - .ref_count = out.ref_count, + .dev_allocator = g.dev_allocator(), + .ref_count = ref_count, .paths = file_paths.items, .files = contained_maps, - .overlapping_memory_cost = overlapping_memory_cost, + .overlapping_memory_cost = @intCast(overlapping_memory_cost), }; } - fn disconnectAndDeleteFile(g: *@This(), file_index: FileIndex) void { + fn disconnectAndDeleteFile(g: *Self, file_index: FileIndex) void { bun.assert(g.first_dep.items[file_index.get()] == .none); // must have no dependencies // Disconnect all imports @@ -1796,7 +1823,7 @@ pub fn IncrementalGraph(side: bake.Side) type { const keys = g.bundled_files.keys(); - g.owner().allocator.free(keys[file_index.get()]); + g.allocator().free(keys[file_index.get()]); keys[file_index.get()] = ""; // cannot be `undefined` as it may be read by hashmap logic assert_eql(g.first_dep.items[file_index.get()], .none); @@ -1808,20 +1835,20 @@ pub fn IncrementalGraph(side: bake.Side) type { // go in a free-list for use by new files. } - fn newEdge(g: *@This(), edge: Edge) !EdgeIndex { + fn newEdge(g: *Self, edge: Edge) !EdgeIndex { if (g.edges_free_list.pop()) |index| { g.edges.items[index.get()] = edge; return index; } const index = EdgeIndex.init(@intCast(g.edges.items.len)); - try g.edges.append(g.owner().allocator, edge); + try g.edges.append(g.allocator(), edge); return index; } /// Does nothing besides release the `Edge` for reallocation by `newEdge` /// Caller must detach the dependency from the linked list it is in. - fn freeEdge(g: *@This(), edge_index: EdgeIndex) void { + fn freeEdge(g: *Self, edge_index: EdgeIndex) void { igLog("IncrementalGraph(0x{x}, {s}).freeEdge({d})", .{ @intFromPtr(g), @tagName(side), edge_index.get() }); defer g.checkEdgeRemoval(edge_index); if (Environment.isDebug) { @@ -1831,7 +1858,7 @@ pub fn IncrementalGraph(side: bake.Side) type { if (edge_index.get() == (g.edges.items.len - 1)) { g.edges.items.len -= 1; } else { - g.edges_free_list.append(g.owner().allocator, edge_index) catch { + g.edges_free_list.append(g.allocator(), edge_index) catch { // Leak an edge object; Ok since it may get cleaned up by // the next incremental graph garbage-collection cycle. }; @@ -1845,7 +1872,7 @@ pub fn IncrementalGraph(side: bake.Side) type { /// /// So we'll check it manually by making sure there are no references to /// `edge_index` in the graph. - fn checkEdgeRemoval(g: *@This(), edge_index: EdgeIndex) void { + fn checkEdgeRemoval(g: *Self, edge_index: EdgeIndex) void { // Enable this on any builds with asan enabled so we can catch stuff // in CI too const enabled = bun.asan.enabled or bun.Environment.ci_assert; @@ -1881,9 +1908,17 @@ pub fn IncrementalGraph(side: bake.Side) type { } } - pub fn owner(g: *@This()) *DevServer { + pub fn owner(g: *Self) *DevServer { return @alignCast(@fieldParentPtr(@tagName(side) ++ "_graph", g)); } + + fn dev_allocator(g: *Self) DevAllocator { + return g.owner().dev_allocator(); + } + + fn allocator(g: *Self) Allocator { + return g.dev_allocator().get(); + } }; } @@ -1895,27 +1930,33 @@ const assert_eql = bun.assert_eql; const bake = bun.bake; const DynamicBitSetUnmanaged = bun.bit_set.DynamicBitSetUnmanaged; const Log = bun.logger.Log; -const VoidFieldTypes = bun.meta.VoidFieldTypes; +const useAllFields = bun.meta.useAllFields; const DevServer = bake.DevServer; const ChunkKind = DevServer.ChunkKind; +const DevAllocator = DevServer.DevAllocator; const EntryPointList = DevServer.EntryPointList; const FileKind = DevServer.FileKind; const GraphTraceState = DevServer.GraphTraceState; const HotUpdateContext = DevServer.HotUpdateContext; -const PackedMap = DevServer.PackedMap; const RouteBundle = DevServer.RouteBundle; const SerializedFailure = DevServer.SerializedFailure; const SourceMapStore = DevServer.SourceMapStore; const debug = DevServer.debug; const igLog = DevServer.igLog; +const PackedMap = DevServer.PackedMap; +const LineCount = PackedMap.LineCount; + const FrameworkRouter = bake.FrameworkRouter; const Route = FrameworkRouter.Route; const BundleV2 = bun.bundle_v2.BundleV2; const Chunk = bun.bundle_v2.Chunk; +const Owned = bun.ptr.Owned; +const Shared = bun.ptr.Shared; + const SourceMap = bun.sourcemap; const VLQ = SourceMap.VLQ; diff --git a/src/bake/DevServer/PackedMap.zig b/src/bake/DevServer/PackedMap.zig index c53431db5d..1626f3cf25 100644 --- a/src/bake/DevServer/PackedMap.zig +++ b/src/bake/DevServer/PackedMap.zig @@ -1,23 +1,14 @@ -/// Packed source mapping data for a single file. -/// Owned by one IncrementalGraph file and/or multiple SourceMapStore entries. -pub const PackedMap = @This(); +//! Packed source mapping data for a single file. +//! Owned by one IncrementalGraph file and/or multiple SourceMapStore entries. +const Self = @This(); -const RefCount = bun.ptr.RefCount(@This(), "ref_count", destroy, .{ - .destructor_ctx = *DevServer, -}); - -ref_count: RefCount, -/// Allocated by `dev.allocator`. Access with `.vlq()` +/// Allocated by `dev.allocator()`. Access with `.vlq()` /// This is stored to allow lazy construction of source map files. -vlq_ptr: [*]u8, -vlq_len: u32, -vlq_allocator: std.mem.Allocator, +vlq_: ScopedOwned([]u8), /// The bundler runs quoting on multiple threads, so it only makes /// sense to preserve that effort for concatenation and /// re-concatenation. -// TODO: rename to `escaped_source_*` -quoted_contents_ptr: [*]u8, -quoted_contents_len: u32, +escaped_source: Owned([]u8), /// Used to track the last state of the source map chunk. This /// is used when concatenating chunks. The generated column is /// not tracked because it is always zero (all chunks end in a @@ -27,22 +18,13 @@ end_state: struct { original_line: i32, original_column: i32, }, -/// There is 32 bits of extra padding in this struct. These are used while -/// implementing `DevServer.memoryCost` to check which PackedMap entries are -/// already counted for. -bits_used_for_memory_cost_dedupe: u32 = 0, -pub fn newNonEmpty(chunk: SourceMap.Chunk, quoted_contents: []u8) bun.ptr.RefPtr(PackedMap) { - assert(chunk.buffer.list.items.len > 0); +pub fn newNonEmpty(chunk: SourceMap.Chunk, escaped_source: Owned([]u8)) bun.ptr.Shared(*Self) { var buffer = chunk.buffer; - const slice = buffer.toOwnedSlice(); + assert(!buffer.isEmpty()); return .new(.{ - .ref_count = .init(), - .vlq_ptr = slice.ptr, - .vlq_len = @intCast(slice.len), - .vlq_allocator = buffer.allocator, - .quoted_contents_ptr = quoted_contents.ptr, - .quoted_contents_len = @intCast(quoted_contents.len), + .vlq_ = .fromDynamic(buffer.toDynamicOwned()), + .escaped_source = escaped_source, .end_state = .{ .original_line = chunk.end_state.original_line, .original_column = chunk.end_state.original_column, @@ -50,126 +32,90 @@ pub fn newNonEmpty(chunk: SourceMap.Chunk, quoted_contents: []u8) bun.ptr.RefPtr }); } -fn destroy(self: *@This(), _: *DevServer) void { - self.vlq_allocator.free(self.vlq()); - bun.destroy(self); +pub fn deinit(self: *Self) void { + self.vlq_.deinit(); + self.escaped_source.deinit(); } -pub fn memoryCost(self: *const @This()) usize { - return self.vlq_len + self.quoted_contents_len + @sizeOf(@This()); +pub fn memoryCost(self: *const Self) usize { + return self.vlq().len + self.quotedContents().len + @sizeOf(Self); } -/// When DevServer iterates everything to calculate memory usage, it passes -/// a generation number along which is different on each sweep, but -/// consistent within one. It is used to avoid counting memory twice. -pub fn memoryCostWithDedupe(self: *@This(), new_dedupe_bits: u32) usize { - if (self.bits_used_for_memory_cost_dedupe == new_dedupe_bits) { - return 0; // already counted. - } - self.bits_used_for_memory_cost_dedupe = new_dedupe_bits; - return self.memoryCost(); -} - -pub fn vlq(self: *const @This()) []u8 { - return self.vlq_ptr[0..self.vlq_len]; +pub fn vlq(self: *const Self) []const u8 { + return self.vlq_.getConst(); } // TODO: rename to `escapedSource` -pub fn quotedContents(self: *const @This()) []u8 { - return self.quoted_contents_ptr[0..self.quoted_contents_len]; +pub fn quotedContents(self: *const Self) []const u8 { + return self.escaped_source.getConst(); } comptime { // `ci_assert` builds add a `safety.ThreadLock` if (!Environment.ci_assert) { - assert_eql(@sizeOf(@This()), @sizeOf(usize) * 7); - assert_eql(@alignOf(@This()), @alignOf(usize)); + assert_eql(@sizeOf(Self), @sizeOf(usize) * 5); + assert_eql(@alignOf(Self), @alignOf(usize)); } } +const PackedMap = Self; + +pub const LineCount = bun.GenericIndex(u32, u8); + /// HTML, CSS, Assets, and failed files do not have source maps. These cases /// should never allocate an object. There is still relevant state for these -/// files to encode, so those fields fit within the same 64 bits the pointer -/// would have used. -/// -/// The tag is stored out of line with `Untagged` -/// - `IncrementalGraph(.client).File` offloads this bit into `File.Flags` -/// - `SourceMapStore.Entry` uses `MultiArrayList` -pub const RefOrEmpty = union(enum(u1)) { - ref: bun.ptr.RefPtr(PackedMap), - empty: Empty, +/// files to encode, so a tagged union is used. +pub const Shared = union(enum) { + some: bun.ptr.Shared(*PackedMap), + none: void, + line_count: LineCount, - pub const Empty = struct { - /// Number of lines to skip when there is an associated JS chunk. - line_count: bun.GenericIndex(u32, u8).Optional, - /// This technically is not source-map related, but - /// all HTML files have no source map, so this can - /// fit in this space. - html_bundle_route_index: RouteBundle.Index.Optional, - }; + pub fn get(self: Shared) ?*PackedMap { + return switch (self) { + .some => |ptr| ptr.get(), + else => null, + }; + } - pub const blank_empty: @This() = .{ .empty = .{ - .line_count = .none, - .html_bundle_route_index = .none, - } }; - - pub fn deref(map: *const @This(), dev: *DevServer) void { - switch (map.*) { - .ref => |ptr| ptr.derefWithContext(dev), - .empty => {}, + pub fn take(self: *Shared) ?bun.ptr.Shared(*PackedMap) { + switch (self.*) { + .some => |ptr| { + self.* = .none; + return ptr; + }, + else => return null, } } - pub fn dupeRef(map: *const @This()) @This() { - return switch (map.*) { - .ref => |ptr| .{ .ref = ptr.dupeRef() }, - .empty => map.*, + pub fn clone(self: Shared) Shared { + return switch (self) { + .some => |ptr| .{ .some = ptr.clone() }, + else => self, }; } - pub fn untag(map: @This()) Untagged { - return switch (map) { - .ref => |ptr| .{ .ref = ptr }, - .empty => |empty| .{ .empty = empty }, - }; + pub fn deinit(self: Shared) void { + switch (self) { + .some => |ptr| ptr.deinit(), + else => {}, + } } - pub const Tag = @typeInfo(@This()).@"union".tag_type.?; - pub const Untagged = brk: { - @setRuntimeSafety(Environment.isDebug); // do not store a union tag in windows release - break :brk union { - ref: bun.ptr.RefPtr(PackedMap), - empty: Empty, - - pub const blank_empty = RefOrEmpty.blank_empty.untag(); - - pub fn decode(untagged: @This(), tag: Tag) RefOrEmpty { - return switch (tag) { - .ref => .{ .ref = untagged.ref }, - .empty => .{ .empty = untagged.empty }, - }; - } - - comptime { - if (!Environment.isDebug) { - assert_eql(@sizeOf(@This()), @sizeOf(usize)); - assert_eql(@alignOf(@This()), @alignOf(usize)); - } - } + /// Amortized memory cost across all references to the same `PackedMap` + pub fn memoryCost(self: Shared) usize { + return switch (self) { + .some => |ptr| ptr.get().memoryCost() / ptr.strongCount(), + else => 0, }; - }; + } }; -const std = @import("std"); - const bun = @import("bun"); const Environment = bun.Environment; const SourceMap = bun.sourcemap; const assert = bun.assert; const assert_eql = bun.assert_eql; -const bake = bun.bake; const Chunk = bun.bundle_v2.Chunk; -const RefPtr = bun.ptr.RefPtr; -const DevServer = bake.DevServer; -const RouteBundle = DevServer.RouteBundle; +const Owned = bun.ptr.Owned; +const ScopedOwned = bun.ptr.ScopedOwned; diff --git a/src/bake/DevServer/SerializedFailure.zig b/src/bake/DevServer/SerializedFailure.zig index a05ba999c7..0b4c9609a5 100644 --- a/src/bake/DevServer/SerializedFailure.zig +++ b/src/bake/DevServer/SerializedFailure.zig @@ -9,12 +9,12 @@ /// for deterministic output; there is code in DevServer that uses `swapRemove`. pub const SerializedFailure = @This(); -/// Serialized data is always owned by dev.allocator +/// Serialized data is always owned by dev.allocator() /// The first 32 bits of this slice contain the owner data: []u8, pub fn deinit(f: SerializedFailure, dev: *DevServer) void { - dev.allocator.free(f.data); + dev.allocator().free(f.data); } /// The metaphorical owner of an incremental file error. The packed variant @@ -110,7 +110,7 @@ pub fn initFromJs(dev: *DevServer, owner: Owner, value: JSValue) !SerializedFail @panic("TODO"); } // Avoid small re-allocations without requesting so much from the heap - var sfb = std.heap.stackFallback(65536, dev.allocator); + var sfb = std.heap.stackFallback(65536, dev.allocator()); var payload = std.ArrayList(u8).initCapacity(sfb.get(), 65536) catch unreachable; // enough space const w = payload.writer(); @@ -120,7 +120,7 @@ pub fn initFromJs(dev: *DevServer, owner: Owner, value: JSValue) !SerializedFail // Avoid-recloning if it is was moved to the hap const data = if (payload.items.ptr == &sfb.buffer) - try dev.allocator.dupe(u8, payload.items) + try dev.allocator().dupe(u8, payload.items) else payload.items; @@ -137,7 +137,7 @@ pub fn initFromLog( assert(messages.len > 0); // Avoid small re-allocations without requesting so much from the heap - var sfb = std.heap.stackFallback(65536, dev.allocator); + var sfb = std.heap.stackFallback(65536, dev.allocator()); var payload = std.ArrayList(u8).initCapacity(sfb.get(), 65536) catch unreachable; // enough space const w = payload.writer(); @@ -154,7 +154,7 @@ pub fn initFromLog( // Avoid-recloning if it is was moved to the hap const data = if (payload.items.ptr == &sfb.buffer) - try dev.allocator.dupe(u8, payload.items) + try dev.allocator().dupe(u8, payload.items) else payload.items; diff --git a/src/bake/DevServer/SourceMapStore.zig b/src/bake/DevServer/SourceMapStore.zig index 00f9ccaa17..b22837ddcc 100644 --- a/src/bake/DevServer/SourceMapStore.zig +++ b/src/bake/DevServer/SourceMapStore.zig @@ -1,14 +1,14 @@ -/// Storage for source maps on `/_bun/client/{id}.js.map` -/// -/// All source maps are referenced counted, so that when a websocket disconnects -/// or a bundle is replaced, the unreachable source map URLs are revoked. Source -/// maps that aren't reachable from IncrementalGraph can still be reached by -/// a browser tab if it has a callback to a previously loaded chunk; so DevServer -/// should be aware of it. -pub const SourceMapStore = @This(); +//! Storage for source maps on `/_bun/client/{id}.js.map` +//! +//! All source maps are referenced counted, so that when a websocket disconnects +//! or a bundle is replaced, the unreachable source map URLs are revoked. Source +//! maps that aren't reachable from IncrementalGraph can still be reached by +//! a browser tab if it has a callback to a previously loaded chunk; so DevServer +//! should be aware of it. +const Self = @This(); /// See `SourceId` for what the content of u64 is. -pub const Key = bun.GenericIndex(u64, .{ "Key of", SourceMapStore }); +pub const Key = bun.GenericIndex(u64, .{ "Key of", Self }); entries: AutoArrayHashMapUnmanaged(Key, Entry), /// When a HTML bundle is loaded, it places a "weak reference" to the @@ -20,7 +20,7 @@ weak_refs: bun.LinearFifo(WeakRef, .{ .Static = weak_ref_entry_max }), /// Shared weak_ref_sweep_timer: EventLoopTimer, -pub const empty: SourceMapStore = .{ +pub const empty: Self = .{ .entries = .empty, .weak_ref_sweep_timer = .initPaused(.DevServerSweepSourceMaps), .weak_refs = .init(), @@ -54,6 +54,7 @@ pub const SourceId = packed struct(u64) { /// `SourceMapStore.Entry` is the information + refcount holder to /// construct the actual JSON file associated with a bundle/hot update. pub const Entry = struct { + dev_allocator: DevAllocator, /// Sum of: /// - How many active sockets have code that could reference this source map? /// - For route bundle client scripts, +1 until invalidation. @@ -62,13 +63,13 @@ pub const Entry = struct { /// Outer slice is owned, inner slice is shared with IncrementalGraph. paths: []const []const u8, /// Indexes are off by one because this excludes the HMR Runtime. - files: bun.MultiArrayList(PackedMap.RefOrEmpty), + files: bun.MultiArrayList(PackedMap.Shared), /// The memory cost can be shared between many entries and IncrementalGraph /// So this is only used for eviction logic, to pretend this was the only /// entry. To compute the memory cost of DevServer, this cannot be used. overlapping_memory_cost: u32, - pub fn sourceContents(entry: @This()) []const bun.StringPointer { + pub fn sourceContents(entry: Entry) []const bun.StringPointer { return entry.source_contents[0..entry.file_paths.len]; } @@ -145,16 +146,16 @@ pub const Entry = struct { j.pushStatic( \\],"sourcesContent":["// (Bun's internal HMR runtime is minified)" ); - for (map_files.items(.tags), map_files.items(.data)) |tag, chunk| { - // For empty chunks, put a blank entry. This allows HTML - // files to get their stack remapped, despite having no - // actual mappings. - if (tag == .empty) { + for (0..map_files.len) |i| { + const chunk = map_files.get(i); + const source_map = chunk.get() orelse { + // For empty chunks, put a blank entry. This allows HTML files to get their stack + // remapped, despite having no actual mappings. j.pushStatic(",\"\""); continue; - } + }; j.pushStatic(","); - const quoted_slice = chunk.ref.data.quotedContents(); + const quoted_slice = source_map.quotedContents(); if (quoted_slice.len == 0) { bun.debugAssert(false); // vlq without source contents! j.pushStatic(",\"// Did not have source contents for this file.\n// This is a bug in Bun's bundler and should be reported with a reproduction.\""); @@ -210,20 +211,10 @@ pub const Entry = struct { var lines_between: u32 = runtime.line_count + 2; // Join all of the mappings together. - for (map_files.items(.tags), map_files.items(.data), 1..) |tag, chunk, source_index| switch (tag) { - .empty => { - lines_between += (chunk.empty.line_count.unwrap() orelse - // NOTE: It is too late to compute this info since the - // bundled text may have been freed already. For example, a - // HMR chunk is never persisted. - @panic("Missing internal precomputed line count.")).get(); - - // - Empty file has no breakpoints that could remap. - // - Codegen of HTML files cannot throw. - continue; - }, - .ref => { - const content = chunk.ref.data; + for (0..map_files.len) |i| switch (map_files.get(i)) { + .some => |source_map| { + const source_index = i + 1; + const content = source_map.get(); const start_state: SourceMap.SourceMapState = .{ .source_index = @intCast(source_index), .generated_line = @intCast(lines_between), @@ -249,24 +240,37 @@ pub const Entry = struct { .original_column = content.end_state.original_column, }; }, + .line_count => |count| { + lines_between += count.get(); + // - Empty file has no breakpoints that could remap. + // - Codegen of HTML files cannot throw. + }, + .none => { + // NOTE: It is too late to compute the line count since the bundled text may + // have been freed already. For example, a HMR chunk is never persisted. + @panic("Missing internal precomputed line count."); + }, }; } - pub fn deinit(entry: *Entry, dev: *DevServer) void { - _ = VoidFieldTypes(Entry){ + pub fn deinit(entry: *Entry) void { + useAllFields(Entry, .{ + .dev_allocator = {}, .ref_count = assert(entry.ref_count == 0), .overlapping_memory_cost = {}, .files = { - for (entry.files.items(.tags), entry.files.items(.data)) |tag, data| { - switch (tag) { - .ref => data.ref.derefWithContext(dev), - .empty => {}, - } + const files = entry.files.slice(); + for (0..files.len) |i| { + files.get(i).deinit(); } - entry.files.deinit(dev.allocator); + entry.files.deinit(entry.allocator()); }, - .paths = dev.allocator.free(entry.paths), - }; + .paths = entry.allocator().free(entry.paths), + }); + } + + fn allocator(entry: *const Entry) Allocator { + return entry.dev_allocator.get(); } }; @@ -297,10 +301,18 @@ pub const WeakRef = struct { } }; -pub fn owner(store: *SourceMapStore) *DevServer { +pub fn owner(store: *Self) *DevServer { return @alignCast(@fieldParentPtr("source_maps", store)); } +fn dev_allocator(store: *Self) DevAllocator { + return store.owner().dev_allocator(); +} + +fn allocator(store: *Self) Allocator { + return store.dev_allocator().get(); +} + const PutOrIncrementRefCount = union(enum) { /// If an *Entry is returned, caller must initialize some /// fields with the source map data. @@ -308,11 +320,13 @@ const PutOrIncrementRefCount = union(enum) { /// Already exists, ref count was incremented. shared: *Entry, }; -pub fn putOrIncrementRefCount(store: *SourceMapStore, script_id: Key, ref_count: u32) !PutOrIncrementRefCount { - const gop = try store.entries.getOrPut(store.owner().allocator, script_id); + +pub fn putOrIncrementRefCount(store: *Self, script_id: Key, ref_count: u32) !PutOrIncrementRefCount { + const gop = try store.entries.getOrPut(store.allocator(), script_id); if (!gop.found_existing) { bun.debugAssert(ref_count > 0); // invalid state gop.value_ptr.* = .{ + .dev_allocator = store.dev_allocator(), .ref_count = ref_count, .overlapping_memory_cost = undefined, .paths = undefined, @@ -326,29 +340,29 @@ pub fn putOrIncrementRefCount(store: *SourceMapStore, script_id: Key, ref_count: } } -pub fn unref(store: *SourceMapStore, key: Key) void { +pub fn unref(store: *Self, key: Key) void { unrefCount(store, key, 1); } -pub fn unrefCount(store: *SourceMapStore, key: Key, count: u32) void { +pub fn unrefCount(store: *Self, key: Key, count: u32) void { const index = store.entries.getIndex(key) orelse return bun.debugAssert(false); unrefAtIndex(store, index, count); } -fn unrefAtIndex(store: *SourceMapStore, index: usize, count: u32) void { +fn unrefAtIndex(store: *Self, index: usize, count: u32) void { const e = &store.entries.values()[index]; e.ref_count -= count; if (bun.Environment.enable_logs) { mapLog("dec {x}, {d} | {d} -> {d}", .{ store.entries.keys()[index].get(), count, e.ref_count + count, e.ref_count }); } if (e.ref_count == 0) { - e.deinit(store.owner()); + e.deinit(); store.entries.swapRemoveAt(index); } } -pub fn addWeakRef(store: *SourceMapStore, key: Key) void { +pub fn addWeakRef(store: *Self, key: Key) void { // This function expects that `weak_ref_entry_max` is low. const entry = store.entries.getPtr(key) orelse return bun.debugAssert(false); @@ -390,7 +404,7 @@ pub fn addWeakRef(store: *SourceMapStore, key: Key) void { } /// Returns true if the ref count was incremented (meaning there was a source map to transfer) -pub fn removeOrUpgradeWeakRef(store: *SourceMapStore, key: Key, mode: enum(u1) { +pub fn removeOrUpgradeWeakRef(store: *Self, key: Key, mode: enum(u1) { /// Remove the weak ref entirely remove = 0, /// Convert the weak ref into a strong ref @@ -420,7 +434,7 @@ pub fn removeOrUpgradeWeakRef(store: *SourceMapStore, key: Key, mode: enum(u1) { return true; } -pub fn locateWeakRef(store: *SourceMapStore, key: Key) ?struct { index: usize, ref: WeakRef } { +pub fn locateWeakRef(store: *Self, key: Key) ?struct { index: usize, ref: WeakRef } { for (0..store.weak_refs.count) |i| { const ref = store.weak_refs.peekItem(i); if (ref.key() == key) return .{ .index = i, .ref = ref }; @@ -430,7 +444,7 @@ pub fn locateWeakRef(store: *SourceMapStore, key: Key) ?struct { index: usize, r pub fn sweepWeakRefs(timer: *EventLoopTimer, now_ts: *const bun.timespec) EventLoopTimer.Arm { mapLog("sweepWeakRefs", .{}); - const store: *SourceMapStore = @fieldParentPtr("weak_ref_sweep_timer", timer); + const store: *Self = @fieldParentPtr("weak_ref_sweep_timer", timer); assert(store.owner().magic == .valid); const now: u64 = @max(now_ts.sec, 0); @@ -461,22 +475,22 @@ pub const GetResult = struct { index: bun.GenericIndex(u32, Entry), mappings: SourceMap.Mapping.List, file_paths: []const []const u8, - entry_files: *const bun.MultiArrayList(PackedMap.RefOrEmpty), + entry_files: *const bun.MultiArrayList(PackedMap.Shared), - pub fn deinit(self: *@This(), allocator: Allocator) void { - self.mappings.deinit(allocator); + pub fn deinit(self: *@This(), alloc: Allocator) void { + self.mappings.deinit(alloc); // file paths and source contents are borrowed } }; /// This is used in exactly one place: remapping errors. /// In that function, an arena allows reusing memory between different source maps -pub fn getParsedSourceMap(store: *SourceMapStore, script_id: Key, arena: Allocator, gpa: Allocator) ?GetResult { +pub fn getParsedSourceMap(store: *Self, script_id: Key, arena: Allocator, gpa: Allocator) ?GetResult { const index = store.entries.getIndex(script_id) orelse return null; // source map was collected. const entry = &store.entries.values()[index]; - const script_id_decoded: SourceMapStore.SourceId = @bitCast(script_id.get()); + const script_id_decoded: SourceId = @bitCast(script_id.get()); const vlq_bytes = entry.renderMappings(script_id_decoded.kind, arena, arena) catch bun.outOfMemory(); switch (SourceMap.Mapping.parse( @@ -509,11 +523,12 @@ const SourceMap = bun.sourcemap; const StringJoiner = bun.StringJoiner; const assert = bun.assert; const bake = bun.bake; -const VoidFieldTypes = bun.meta.VoidFieldTypes; +const useAllFields = bun.meta.useAllFields; const EventLoopTimer = bun.api.Timer.EventLoopTimer; const DevServer = bun.bake.DevServer; const ChunkKind = DevServer.ChunkKind; +const DevAllocator = DevServer.DevAllocator; const PackedMap = DevServer.PackedMap; const dumpBundle = DevServer.dumpBundle; const mapLog = DevServer.mapLog; diff --git a/src/bake/DevServer/memory_cost.zig b/src/bake/DevServer/memory_cost.zig index 9647eb4823..be236faa05 100644 --- a/src/bake/DevServer/memory_cost.zig +++ b/src/bake/DevServer/memory_cost.zig @@ -23,12 +23,9 @@ pub fn memoryCostDetailed(dev: *DevServer) MemoryCost { var js_code: usize = 0; var source_maps: usize = 0; var assets: usize = 0; - const dedupe_bits: u32 = @truncate(@abs(std.time.nanoTimestamp())); - const discard = voidFieldTypeDiscardHelper; // See https://github.com/ziglang/zig/issues/21879 - _ = VoidFieldTypes(DevServer){ + useAllFields(DevServer, .{ // does not contain pointers - .allocator = {}, .assume_perfect_incremental_bundling = {}, .bun_watcher = {}, .bundles_since_last_error = {}, @@ -71,13 +68,13 @@ pub fn memoryCostDetailed(dev: *DevServer) MemoryCost { other_bytes += bundle.memoryCost(); }, .server_graph = { - const cost = dev.server_graph.memoryCostDetailed(dedupe_bits); + const cost = dev.server_graph.memoryCostDetailed(); incremental_graph_server += cost.graph; js_code += cost.code; source_maps += cost.source_maps; }, .client_graph = { - const cost = dev.client_graph.memoryCostDetailed(dedupe_bits); + const cost = dev.client_graph.memoryCostDetailed(); incremental_graph_client += cost.graph; js_code += cost.code; source_maps += cost.source_maps; @@ -92,15 +89,13 @@ pub fn memoryCostDetailed(dev: *DevServer) MemoryCost { other_bytes += memoryCostArrayHashMap(dev.source_maps.entries); for (dev.source_maps.entries.values()) |entry| { source_maps += entry.files.memoryCost(); - for (entry.files.items(.tags), entry.files.items(.data)) |tag, data| { - switch (tag) { - .ref => source_maps += data.ref.data.memoryCostWithDedupe(dedupe_bits), - .empty => {}, - } + const files = entry.files.slice(); + for (0..files.len) |i| { + source_maps += files.get(i).memoryCost(); } } }, - .incremental_result = discard(VoidFieldTypes(IncrementalResult){ + .incremental_result = useAllFields(IncrementalResult, .{ .had_adjusted_edges = {}, .client_components_added = { other_bytes += memoryCostArrayList(dev.incremental_result.client_components_added); @@ -176,7 +171,7 @@ pub fn memoryCostDetailed(dev: *DevServer) MemoryCost { }, .enable_after_bundle => {}, }, - }; + }); return .{ .assets = assets, .incremental_graph_client = incremental_graph_client, @@ -210,12 +205,10 @@ const std = @import("std"); const bun = @import("bun"); const jsc = bun.jsc; +const useAllFields = bun.meta.useAllFields; const HTMLBundle = jsc.API.HTMLBundle; const DevServer = bun.bake.DevServer; const DeferredRequest = DevServer.DeferredRequest; const HmrSocket = DevServer.HmrSocket; const IncrementalResult = DevServer.IncrementalResult; - -const VoidFieldTypes = bun.meta.VoidFieldTypes; -const voidFieldTypeDiscardHelper = bun.meta.voidFieldTypeDiscardHelper; diff --git a/src/bun.js/api/server.zig b/src/bun.js/api/server.zig index 573f69908c..9f7b5e757b 100644 --- a/src/bun.js/api/server.zig +++ b/src/bun.js/api/server.zig @@ -2590,7 +2590,7 @@ pub fn NewServer(protocol_enum: enum { http, https }, development_kind: enum { d .html => |html_bundle_route| { ServerConfig.applyStaticRoute(any_server, ssl_enabled, app, *HTMLBundle.Route, html_bundle_route.data, entry.path, entry.method); if (dev_server) |dev| { - dev.html_router.put(dev.allocator, entry.path, html_bundle_route.data) catch bun.outOfMemory(); + dev.html_router.put(dev.allocator(), entry.path, html_bundle_route.data) catch bun.outOfMemory(); } needs_plugins = true; }, diff --git a/src/bundler/Chunk.zig b/src/bundler/Chunk.zig index 775021aa00..261e3d8032 100644 --- a/src/bundler/Chunk.zig +++ b/src/bundler/Chunk.zig @@ -374,7 +374,7 @@ pub const Chunk = struct { if (enable_source_map_shifts and FeatureFlags.source_map_debug_id) { // This comment must go before the //# sourceMappingURL comment const debug_id_fmt = std.fmt.allocPrint( - graph.allocator, + graph.heap.allocator(), "\n//# debugId={}\n", .{bun.sourcemap.DebugIDFormatter{ .id = chunk.isolated_hash }}, ) catch bun.outOfMemory(); diff --git a/src/bundler/Graph.zig b/src/bundler/Graph.zig index 0c6ac3321d..4ff0fe7090 100644 --- a/src/bundler/Graph.zig +++ b/src/bundler/Graph.zig @@ -2,9 +2,6 @@ const Graph = @This(); pool: *ThreadPool, heap: ThreadLocalArena, -/// This allocator is thread-local to the Bundler thread -/// .allocator == .heap.allocator() -allocator: std.mem.Allocator, /// Mapping user-specified entry points to their Source Index entry_points: std.ArrayListUnmanaged(Index) = .{}, @@ -113,10 +110,7 @@ const Loader = options.Loader; const bun = @import("bun"); const MultiArrayList = bun.MultiArrayList; -const default_allocator = bun.default_allocator; const BabyList = bun.collections.BabyList; - -const allocators = bun.allocators; const ThreadLocalArena = bun.allocators.MimallocArena; const js_ast = bun.ast; diff --git a/src/bundler/LinkerContext.zig b/src/bundler/LinkerContext.zig index d73497b9bb..09a1543b92 100644 --- a/src/bundler/LinkerContext.zig +++ b/src/bundler/LinkerContext.zig @@ -7,7 +7,6 @@ pub const LinkerContext = struct { parse_graph: *Graph = undefined, graph: LinkerGraph = undefined, - allocator: std.mem.Allocator = undefined, log: *Logger.Log = undefined, resolver: *Resolver = undefined, @@ -45,8 +44,12 @@ pub const LinkerContext = struct { mangled_props: MangledProps = .{}, + pub fn allocator(this: *const LinkerContext) std.mem.Allocator { + return this.graph.allocator; + } + pub fn pathWithPrettyInitialized(this: *LinkerContext, path: Fs.Path) !Fs.Path { - return bundler.genericPathWithPrettyInitialized(path, this.options.target, this.resolver.fs.top_level_dir, this.graph.allocator); + return bundler.genericPathWithPrettyInitialized(path, this.options.target, this.resolver.fs.top_level_dir, this.allocator()); } pub const LinkerOptions = struct { @@ -112,16 +115,16 @@ pub const LinkerContext = struct { // was generated. This will be preserved so that remapping // stack traces can show the source code, even after incremental // rebuilds occur. - const allocator = if (worker.ctx.transpiler.options.dev_server) |dev| - dev.allocator + const alloc = if (worker.ctx.transpiler.options.dev_server) |dev| + dev.allocator() else worker.allocator; - SourceMapData.computeQuotedSourceContents(task.ctx, allocator, task.source_index); + SourceMapData.computeQuotedSourceContents(task.ctx, alloc, task.source_index); } }; - pub fn computeLineOffsets(this: *LinkerContext, allocator: std.mem.Allocator, source_index: Index.Int) void { + pub fn computeLineOffsets(this: *LinkerContext, alloc: std.mem.Allocator, source_index: Index.Int) void { debug("Computing LineOffsetTable: {d}", .{source_index}); const line_offset_table: *bun.sourcemap.LineOffsetTable.List = &this.graph.files.items(.line_offset_table)[source_index]; @@ -137,7 +140,7 @@ pub const LinkerContext = struct { const approximate_line_count = this.graph.ast.items(.approximate_newline_count)[source_index]; line_offset_table.* = bun.sourcemap.LineOffsetTable.generate( - allocator, + alloc, source.contents, // We don't support sourcemaps for source files with more than 2^31 lines @@ -147,23 +150,20 @@ pub const LinkerContext = struct { pub fn computeQuotedSourceContents(this: *LinkerContext, _: std.mem.Allocator, source_index: Index.Int) void { debug("Computing Quoted Source Contents: {d}", .{source_index}); + const quoted_source_contents = &this.graph.files.items(.quoted_source_contents)[source_index]; + if (quoted_source_contents.take()) |old| { + old.deinit(); + } + const loader: options.Loader = this.parse_graph.input_files.items(.loader)[source_index]; - const quoted_source_contents: *?[]u8 = &this.graph.files.items(.quoted_source_contents)[source_index]; if (!loader.canHaveSourceMap()) { - if (quoted_source_contents.*) |slice| { - bun.default_allocator.free(slice); - quoted_source_contents.* = null; - } return; } const source: *const Logger.Source = &this.parse_graph.input_files.items(.source)[source_index]; var mutable = MutableString.initEmpty(bun.default_allocator); js_printer.quoteForJSON(source.contents, &mutable, false) catch bun.outOfMemory(); - if (quoted_source_contents.*) |slice| { - bun.default_allocator.free(slice); - } - quoted_source_contents.* = mutable.slice(); + quoted_source_contents.* = mutable.toDefaultOwned().toOptional(); } }; @@ -205,7 +205,7 @@ pub const LinkerContext = struct { this.log = bundle.transpiler.log; this.resolver = &bundle.transpiler.resolver; - this.cycle_detector = std.ArrayList(ImportTracker).init(this.allocator); + this.cycle_detector = std.ArrayList(ImportTracker).init(this.allocator()); this.graph.reachable_files = reachable; @@ -258,8 +258,8 @@ pub const LinkerContext = struct { bun.assert(this.options.source_maps != .none); this.source_maps.line_offset_wait_group = .initWithCount(reachable.len); this.source_maps.quoted_contents_wait_group = .initWithCount(reachable.len); - this.source_maps.line_offset_tasks = this.allocator.alloc(SourceMapData.Task, reachable.len) catch unreachable; - this.source_maps.quoted_contents_tasks = this.allocator.alloc(SourceMapData.Task, reachable.len) catch unreachable; + this.source_maps.line_offset_tasks = this.allocator().alloc(SourceMapData.Task, reachable.len) catch unreachable; + this.source_maps.quoted_contents_tasks = this.allocator().alloc(SourceMapData.Task, reachable.len) catch unreachable; var batch = ThreadPoolLib.Batch{}; var second_batch = ThreadPoolLib.Batch{}; @@ -308,7 +308,7 @@ pub const LinkerContext = struct { @panic("Assertion failed: HTML import file not found in pathToSourceIndexMap"); }; - html_source_indices.push(this.graph.allocator, source_index) catch bun.outOfMemory(); + html_source_indices.push(this.allocator(), source_index) catch bun.outOfMemory(); // S.LazyExport is a call to __jsonParse. const original_ref = parts[html_import] @@ -442,7 +442,7 @@ pub const LinkerContext = struct { const ref = this.graph.generateNewSymbol(source_index, .other, name); const part_index = this.graph.addPartToFile(source_index, .{ .declared_symbols = js_ast.DeclaredSymbol.List.fromSlice( - this.allocator, + this.allocator(), &[_]js_ast.DeclaredSymbol{ .{ .ref = ref, .is_top_level = true }, }, @@ -452,13 +452,13 @@ pub const LinkerContext = struct { try this.graph.generateSymbolImportAndUse(source_index, part_index, module_ref, 1, Index.init(source_index)); var top_level = &this.graph.meta.items(.top_level_symbol_to_parts_overlay)[source_index]; - var parts_list = this.allocator.alloc(u32, 1) catch unreachable; + var parts_list = this.allocator().alloc(u32, 1) catch unreachable; parts_list[0] = part_index; - top_level.put(this.allocator, ref, BabyList(u32).init(parts_list)) catch unreachable; + top_level.put(this.allocator(), ref, BabyList(u32).init(parts_list)) catch unreachable; var resolved_exports = &this.graph.meta.items(.resolved_exports)[source_index]; - resolved_exports.put(this.allocator, alias, ExportData{ + resolved_exports.put(this.allocator(), alias, ExportData{ .data = ImportTracker{ .source_index = Index.init(source_index), .import_ref = ref, @@ -494,7 +494,7 @@ pub const LinkerContext = struct { log.addErrorFmt( source, record.range.loc, - this.allocator, + this.allocator(), "Cannot import a \".{s}\" file into a CSS file", .{@tagName(loader)}, ) catch bun.outOfMemory(); @@ -582,7 +582,7 @@ pub const LinkerContext = struct { // AutoBitSet needs to be initialized if it is dynamic if (AutoBitSet.needsDynamic(entry_points.len)) { for (file_entry_bits) |*bits| { - bits.* = try AutoBitSet.initEmpty(c.allocator, entry_points.len); + bits.* = try AutoBitSet.initEmpty(c.allocator(), entry_points.len); } } else if (file_entry_bits.len > 0) { // assert that the tag is correct @@ -747,11 +747,13 @@ pub const LinkerContext = struct { const source_indices_for_contents = source_id_map.keys(); if (source_indices_for_contents.len > 0) { j.pushStatic("\n "); - j.pushStatic(quoted_source_map_contents[source_indices_for_contents[0]] orelse ""); + j.pushStatic( + quoted_source_map_contents[source_indices_for_contents[0]].getConst() orelse "", + ); for (source_indices_for_contents[1..]) |index| { j.pushStatic(",\n "); - j.pushStatic(quoted_source_map_contents[index] orelse ""); + j.pushStatic(quoted_source_map_contents[index].getConst() orelse ""); } } j.pushStatic( @@ -964,7 +966,7 @@ pub const LinkerContext = struct { // Require of a top-level await chain is forbidden if (record.kind == .require) { - var notes = std.ArrayList(Logger.Data).init(c.allocator); + var notes = std.ArrayList(Logger.Data).init(c.allocator()); var tla_pretty_path: string = ""; var other_source_index = record.source_index.get(); @@ -979,7 +981,7 @@ pub const LinkerContext = struct { const source = &input_files[other_source_index]; tla_pretty_path = source.path.pretty; notes.append(Logger.Data{ - .text = std.fmt.allocPrint(c.allocator, "The top-level await in {s} is here:", .{tla_pretty_path}) catch bun.outOfMemory(), + .text = std.fmt.allocPrint(c.allocator(), "The top-level await in {s} is here:", .{tla_pretty_path}) catch bun.outOfMemory(), .location = .initOrNull(source, parent_result_tla_keyword), }) catch bun.outOfMemory(); break; @@ -995,7 +997,7 @@ pub const LinkerContext = struct { other_source_index = parent_tla_check.parent; try notes.append(Logger.Data{ - .text = try std.fmt.allocPrint(c.allocator, "The file {s} imports the file {s} here:", .{ + .text = try std.fmt.allocPrint(c.allocator(), "The file {s} imports the file {s} here:", .{ input_files[parent_source_index].path.pretty, input_files[other_source_index].path.pretty, }), @@ -1006,9 +1008,9 @@ pub const LinkerContext = struct { const source: *const Logger.Source = &input_files[source_index]; const imported_pretty_path = source.path.pretty; const text: string = if (strings.eql(imported_pretty_path, tla_pretty_path)) - try std.fmt.allocPrint(c.allocator, "This require call is not allowed because the imported file \"{s}\" contains a top-level await", .{imported_pretty_path}) + try std.fmt.allocPrint(c.allocator(), "This require call is not allowed because the imported file \"{s}\" contains a top-level await", .{imported_pretty_path}) else - try std.fmt.allocPrint(c.allocator, "This require call is not allowed because the transitive dependency \"{s}\" contains a top-level await", .{tla_pretty_path}); + try std.fmt.allocPrint(c.allocator(), "This require call is not allowed because the transitive dependency \"{s}\" contains a top-level await", .{tla_pretty_path}); try c.log.addRangeErrorWithNotes(source, record.range, text, notes.items); } @@ -1047,12 +1049,12 @@ pub const LinkerContext = struct { this.all_stmts.deinit(); } - pub fn init(allocator: std.mem.Allocator) StmtList { + pub fn init(alloc: std.mem.Allocator) StmtList { return .{ - .inside_wrapper_prefix = std.ArrayList(Stmt).init(allocator), - .outside_wrapper_prefix = std.ArrayList(Stmt).init(allocator), - .inside_wrapper_suffix = std.ArrayList(Stmt).init(allocator), - .all_stmts = std.ArrayList(Stmt).init(allocator), + .inside_wrapper_prefix = std.ArrayList(Stmt).init(alloc), + .outside_wrapper_prefix = std.ArrayList(Stmt).init(alloc), + .inside_wrapper_suffix = std.ArrayList(Stmt).init(alloc), + .all_stmts = std.ArrayList(Stmt).init(alloc), }; } }; @@ -1063,7 +1065,7 @@ pub const LinkerContext = struct { loc: Logger.Loc, namespace_ref: Ref, import_record_index: u32, - allocator: std.mem.Allocator, + alloc: std.mem.Allocator, ast: *const JSAst, ) !bool { const record = ast.import_records.at(import_record_index); @@ -1080,11 +1082,11 @@ pub const LinkerContext = struct { S.Local, S.Local{ .decls = G.Decl.List.fromSlice( - allocator, + alloc, &.{ .{ .binding = Binding.alloc( - allocator, + alloc, B.Identifier{ .ref = namespace_ref, }, @@ -1121,10 +1123,10 @@ pub const LinkerContext = struct { try stmts.inside_wrapper_prefix.append( Stmt.alloc(S.Local, .{ .decls = try G.Decl.List.fromSlice( - allocator, + alloc, &.{ .{ - .binding = Binding.alloc(allocator, B.Identifier{ + .binding = Binding.alloc(alloc, B.Identifier{ .ref = namespace_ref, }, loc), .value = Expr.init(E.RequireString, .{ @@ -1193,7 +1195,7 @@ pub const LinkerContext = struct { pub fn printCodeForFileInChunkJS( c: *LinkerContext, r: renamer.Renamer, - allocator: std.mem.Allocator, + alloc: std.mem.Allocator, writer: *js_printer.BufferWriter, out_stmts: []Stmt, ast: *const js_ast.BundledAst, @@ -1229,13 +1231,13 @@ pub const LinkerContext = struct { .print_dce_annotations = c.options.emit_dce_annotations, .has_run_symbol_renamer = true, - .allocator = allocator, + .allocator = alloc, .source_map_allocator = if (c.dev_server != null and c.parse_graph.input_files.items(.loader)[source_index.get()].isJavaScriptLike()) // The loader check avoids globally allocating asset source maps writer.buffer.allocator else - allocator, + alloc, .to_esm_ref = to_esm_ref, .to_commonjs_ref = to_commonjs_ref, .require_ref = switch (c.options.output_format) { @@ -1322,9 +1324,9 @@ pub const LinkerContext = struct { const all_sources: []Logger.Source = c.parse_graph.input_files.items(.source); // Collect all local css names - var sfb = std.heap.stackFallback(512, c.allocator); - const allocator = sfb.get(); - var local_css_names = std.AutoHashMap(bun.bundle_v2.Ref, void).init(allocator); + var sfb = std.heap.stackFallback(512, c.allocator()); + const alloc = sfb.get(); + var local_css_names = std.AutoHashMap(bun.bundle_v2.Ref, void).init(alloc); defer local_css_names.deinit(); for (all_css_asts, 0..) |maybe_css_ast, source_index| { @@ -1351,15 +1353,15 @@ pub const LinkerContext = struct { const original_name = symbol.original_name; const path_hash = bun.css.css_modules.hash( - allocator, + alloc, "{s}", // use path relative to cwd for determinism .{source.path.pretty}, false, ); - const final_generated_name = std.fmt.allocPrint(c.graph.allocator, "{s}_{s}", .{ original_name, path_hash }) catch bun.outOfMemory(); - c.mangled_props.put(c.allocator, ref, final_generated_name) catch bun.outOfMemory(); + const final_generated_name = std.fmt.allocPrint(c.allocator(), "{s}_{s}", .{ original_name, path_hash }) catch bun.outOfMemory(); + c.mangled_props.put(c.allocator(), ref, final_generated_name) catch bun.outOfMemory(); } } } @@ -1730,7 +1732,7 @@ pub const LinkerContext = struct { defer c.cycle_detector.shrinkRetainingCapacity(cycle_detector_top); var tracker = init_tracker; - var ambiguous_results = std.ArrayList(MatchImport).init(c.allocator); + var ambiguous_results = std.ArrayList(MatchImport).init(c.allocator()); defer ambiguous_results.clearAndFree(); var result: MatchImport = MatchImport{}; @@ -1801,7 +1803,7 @@ pub const LinkerContext = struct { c.log.addRangeWarningFmt( source, source.rangeOfIdentifier(named_import.alias_loc.?), - c.allocator, + c.allocator(), "Import \"{s}\" will always be undefined because the file \"{s}\" has no exports", .{ named_import.alias.?, @@ -1868,7 +1870,7 @@ pub const LinkerContext = struct { c.log.addRangeWarningFmtWithNote( source, r, - c.allocator, + c.allocator(), "Browser polyfill for module \"{s}\" doesn't have a matching export named \"{s}\"", .{ next_source.path.pretty, @@ -1882,7 +1884,7 @@ pub const LinkerContext = struct { c.log.addRangeWarningFmt( source, r, - c.allocator, + c.allocator(), "Import \"{s}\" will always be undefined because there is no matching export in \"{s}\"", .{ named_import.alias.?, @@ -1894,7 +1896,7 @@ pub const LinkerContext = struct { c.log.addRangeErrorFmtWithNote( source, r, - c.allocator, + c.allocator(), "Browser polyfill for module \"{s}\" doesn't have a matching export named \"{s}\"", .{ next_source.path.pretty, @@ -1908,7 +1910,7 @@ pub const LinkerContext = struct { c.log.addRangeErrorFmt( source, r, - c.allocator, + c.allocator(), "No matching export in \"{s}\" for import \"{s}\"", .{ next_source.path.pretty, @@ -2049,7 +2051,7 @@ pub const LinkerContext = struct { // Generate a dummy part that depends on the "__commonJS" symbol. const dependencies: []js_ast.Dependency = if (c.options.output_format != .internal_bake_dev) brk: { - const dependencies = c.allocator.alloc(js_ast.Dependency, common_js_parts.len) catch bun.outOfMemory(); + const dependencies = c.allocator().alloc(js_ast.Dependency, common_js_parts.len) catch bun.outOfMemory(); for (common_js_parts, dependencies) |part, *cjs| { cjs.* = .{ .part_index = part, @@ -2059,14 +2061,14 @@ pub const LinkerContext = struct { break :brk dependencies; } else &.{}; var symbol_uses: Part.SymbolUseMap = .empty; - symbol_uses.put(c.allocator, wrapper_ref, .{ .count_estimate = 1 }) catch bun.outOfMemory(); + symbol_uses.put(c.allocator(), wrapper_ref, .{ .count_estimate = 1 }) catch bun.outOfMemory(); const part_index = c.graph.addPartToFile( source_index, .{ .stmts = &.{}, .symbol_uses = symbol_uses, .declared_symbols = js_ast.DeclaredSymbol.List.fromSlice( - c.allocator, + c.allocator(), &[_]js_ast.DeclaredSymbol{ .{ .ref = c.graph.ast.items(.exports_ref)[source_index], .is_top_level = true }, .{ .ref = c.graph.ast.items(.module_ref)[source_index], .is_top_level = true }, @@ -2108,7 +2110,7 @@ pub const LinkerContext = struct { &.{}; // generate a dummy part that depends on the "__esm" symbol - const dependencies = c.allocator.alloc(js_ast.Dependency, esm_parts.len) catch unreachable; + const dependencies = c.allocator().alloc(js_ast.Dependency, esm_parts.len) catch unreachable; for (esm_parts, dependencies) |part, *esm| { esm.* = .{ .part_index = part, @@ -2117,12 +2119,12 @@ pub const LinkerContext = struct { } var symbol_uses: Part.SymbolUseMap = .empty; - symbol_uses.put(c.allocator, wrapper_ref, .{ .count_estimate = 1 }) catch bun.outOfMemory(); + symbol_uses.put(c.allocator(), wrapper_ref, .{ .count_estimate = 1 }) catch bun.outOfMemory(); const part_index = c.graph.addPartToFile( source_index, .{ .symbol_uses = symbol_uses, - .declared_symbols = js_ast.DeclaredSymbol.List.fromSlice(c.allocator, &[_]js_ast.DeclaredSymbol{ + .declared_symbols = js_ast.DeclaredSymbol.List.fromSlice(c.allocator(), &[_]js_ast.DeclaredSymbol{ .{ .ref = wrapper_ref, .is_top_level = true }, }) catch unreachable, .dependencies = Dependency.List.init(dependencies), @@ -2278,7 +2280,7 @@ pub const LinkerContext = struct { imports_to_bind: *RefImportData, source_index: Index.Int, ) void { - var named_imports = named_imports_ptr.clone(c.allocator) catch bun.outOfMemory(); + var named_imports = named_imports_ptr.clone(c.allocator()) catch bun.outOfMemory(); defer named_imports_ptr.* = named_imports; const Sorter = struct { @@ -2302,7 +2304,7 @@ pub const LinkerContext = struct { const import_ref = ref; - var re_exports = std.ArrayList(js_ast.Dependency).init(c.allocator); + var re_exports = std.ArrayList(js_ast.Dependency).init(c.allocator()); const result = c.matchImportWithExport(.{ .source_index = Index.source(source_index), .import_ref = import_ref, @@ -2311,7 +2313,7 @@ pub const LinkerContext = struct { switch (result.kind) { .normal => { imports_to_bind.put( - c.allocator, + c.allocator(), import_ref, .{ .re_exports = bun.BabyList(js_ast.Dependency).init(re_exports.items), @@ -2330,7 +2332,7 @@ pub const LinkerContext = struct { }, .normal_and_namespace => { imports_to_bind.put( - c.allocator, + c.allocator(), import_ref, .{ .re_exports = bun.BabyList(js_ast.Dependency).init(re_exports.items), @@ -2352,7 +2354,7 @@ pub const LinkerContext = struct { c.log.addRangeErrorFmt( source, r, - c.allocator, + c.allocator(), "Detected cycle while resolving import \"{s}\"", .{ named_import.alias.?, @@ -2361,7 +2363,7 @@ pub const LinkerContext = struct { }, .probably_typescript_type => { c.graph.meta.items(.probably_typescript_type)[source_index].put( - c.allocator, + c.allocator(), import_ref, {}, ) catch unreachable; @@ -2379,7 +2381,7 @@ pub const LinkerContext = struct { c.log.addRangeWarningFmt( source, r, - c.allocator, + c.allocator(), "Import \"{s}\" will always be undefined because there are multiple matching exports", .{ named_import.alias.?, @@ -2389,7 +2391,7 @@ pub const LinkerContext = struct { c.log.addRangeErrorFmt( source, r, - c.allocator, + c.allocator(), "Ambiguous import \"{s}\" has multiple matching exports", .{ named_import.alias.?, @@ -2404,7 +2406,7 @@ pub const LinkerContext = struct { pub fn breakOutputIntoPieces( c: *LinkerContext, - allocator: std.mem.Allocator, + alloc: std.mem.Allocator, j: *StringJoiner, count: u32, ) !Chunk.IntermediateOutput { @@ -2423,10 +2425,10 @@ pub const LinkerContext = struct { var pieces = brk: { errdefer j.deinit(); - break :brk try std.ArrayList(OutputPiece).initCapacity(allocator, count); + break :brk try std.ArrayList(OutputPiece).initCapacity(alloc, count); }; errdefer pieces.deinit(); - const complete_output = try j.done(allocator); + const complete_output = try j.done(alloc); var output = complete_output; const prefix = c.unique_key_prefix; diff --git a/src/bundler/LinkerGraph.zig b/src/bundler/LinkerGraph.zig index bec4e3d392..c1fdba66df 100644 --- a/src/bundler/LinkerGraph.zig +++ b/src/bundler/LinkerGraph.zig @@ -429,7 +429,7 @@ pub const File = struct { entry_point_chunk_index: u32 = std.math.maxInt(u32), line_offset_table: bun.sourcemap.LineOffsetTable.List = .empty, - quoted_source_contents: ?[]u8 = null, + quoted_source_contents: Owned(?[]u8) = .initNull(), pub fn isEntryPoint(this: *const File) bool { return this.entry_point_kind.isEntryPoint(); @@ -452,6 +452,7 @@ const Environment = bun.Environment; const ImportRecord = bun.ImportRecord; const MultiArrayList = bun.MultiArrayList; const Output = bun.Output; +const Owned = bun.ptr.Owned; const js_ast = bun.ast; const Symbol = js_ast.Symbol; diff --git a/src/bundler/ThreadPool.zig b/src/bundler/ThreadPool.zig index 31f878b283..693e1d05ee 100644 --- a/src/bundler/ThreadPool.zig +++ b/src/bundler/ThreadPool.zig @@ -82,7 +82,7 @@ pub const ThreadPool = struct { pub fn init(v2: *BundleV2, worker_pool: ?*ThreadPoolLib) !ThreadPool { const pool = worker_pool orelse blk: { const cpu_count = bun.getThreadCount(); - const pool = try v2.graph.allocator.create(ThreadPoolLib); + const pool = try v2.allocator().create(ThreadPoolLib); pool.* = .init(.{ .max_threads = cpu_count }); debug("{d} workers", .{cpu_count}); break :blk pool; @@ -103,7 +103,7 @@ pub const ThreadPool = struct { pub fn deinit(this: *ThreadPool) void { if (this.worker_pool_is_owned) { this.worker_pool.deinit(); - this.v2.graph.allocator.destroy(this.worker_pool); + this.v2.allocator().destroy(this.worker_pool); } if (usesIOPool()) { IOThreadPool.release(); diff --git a/src/bundler/bundle_v2.zig b/src/bundler/bundle_v2.zig index b2f48758f7..2b95109b22 100644 --- a/src/bundler/bundle_v2.zig +++ b/src/bundler/bundle_v2.zig @@ -178,17 +178,17 @@ pub const BundleV2 = struct { fn initializeClientTranspiler(this: *BundleV2) !*Transpiler { @branchHint(.cold); - const allocator = this.graph.allocator; + const alloc = this.allocator(); const this_transpiler = this.transpiler; - const client_transpiler = try allocator.create(Transpiler); + const client_transpiler = try alloc.create(Transpiler); client_transpiler.* = this_transpiler.*; client_transpiler.options = this_transpiler.options; client_transpiler.options.target = .browser; client_transpiler.options.main_fields = options.Target.DefaultMainFields.get(options.Target.browser); client_transpiler.options.conditions = try options.ESMConditions.init( - allocator, + alloc, options.Target.browser.defaultConditions(), false, &.{}, @@ -206,11 +206,11 @@ pub const BundleV2 = struct { } client_transpiler.setLog(this_transpiler.log); - client_transpiler.setAllocator(allocator); + client_transpiler.setAllocator(alloc); client_transpiler.linker.resolver = &client_transpiler.resolver; client_transpiler.macro_context = js_ast.Macro.MacroContext.init(client_transpiler); const CacheSet = @import("../cache.zig"); - client_transpiler.resolver.caches = CacheSet.Set.init(allocator); + client_transpiler.resolver.caches = CacheSet.Set.init(alloc); try client_transpiler.configureDefines(); client_transpiler.resolver.opts = client_transpiler.options; @@ -365,7 +365,7 @@ pub const BundleV2 = struct { // Create a quick index for server-component boundaries. // We need to mark the generated files as reachable, or else many files will appear missing. - var sfa = std.heap.stackFallback(4096, this.graph.allocator); + var sfa = std.heap.stackFallback(4096, this.allocator()); const stack_alloc = sfa.get(); var scb_bitset = if (this.graph.server_component_boundaries.list.len > 0) try this.graph.server_component_boundaries.slice().bitSet(stack_alloc, this.graph.input_files.len) @@ -380,13 +380,13 @@ pub const BundleV2 = struct { additional_files_imported_by_css_and_inlined.deinit(stack_alloc); } - this.dynamic_import_entry_points = std.AutoArrayHashMap(Index.Int, void).init(this.graph.allocator); + this.dynamic_import_entry_points = std.AutoArrayHashMap(Index.Int, void).init(this.allocator()); const all_urls_for_css = this.graph.ast.items(.url_for_css); var visitor = ReachableFileVisitor{ - .reachable = try std.ArrayList(Index).initCapacity(this.graph.allocator, this.graph.entry_points.items.len + 1), - .visited = try bun.bit_set.DynamicBitSet.initEmpty(this.graph.allocator, this.graph.input_files.len), + .reachable = try std.ArrayList(Index).initCapacity(this.allocator(), this.graph.entry_points.items.len + 1), + .visited = try bun.bit_set.DynamicBitSet.initEmpty(this.allocator(), this.graph.input_files.len), .redirects = this.graph.ast.items(.redirect_import_record_index), .all_import_records = this.graph.ast.items(.import_records), .all_loaders = this.graph.input_files.items(.loader), @@ -533,7 +533,7 @@ pub const BundleV2 = struct { log, source, import_record.range, - this.graph.allocator, + this.allocator(), "Browser build cannot {s} Node.js module: \"{s}\". To use Node.js builtins, set target to 'node' or 'bun'", .{ import_record.kind.errorLabel(), path_to_use }, import_record.kind, @@ -543,7 +543,7 @@ pub const BundleV2 = struct { log, source, import_record.range, - this.graph.allocator, + this.allocator(), "Could not resolve: \"{s}\". Maybe you need to \"bun install\"?", .{path_to_use}, import_record.kind, @@ -554,7 +554,7 @@ pub const BundleV2 = struct { log, source, import_record.range, - this.graph.allocator, + this.allocator(), "Could not resolve: \"{s}\"", .{ path_to_use, @@ -590,7 +590,7 @@ pub const BundleV2 = struct { if (path.pretty.ptr == path.text.ptr) { // TODO: outbase const rel = bun.path.relativePlatform(transpiler.fs.top_level_dir, path.text, .loose, false); - path.pretty = this.graph.allocator.dupe(u8, rel) catch bun.outOfMemory(); + path.pretty = this.allocator().dupe(u8, rel) catch bun.outOfMemory(); } path.assertPrettyIsValid(); @@ -600,11 +600,11 @@ pub const BundleV2 = struct { secondary != path and !strings.eqlLong(secondary.text, path.text, true)) { - secondary_path_to_copy = secondary.dupeAlloc(this.graph.allocator) catch bun.outOfMemory(); + secondary_path_to_copy = secondary.dupeAlloc(this.allocator()) catch bun.outOfMemory(); } } - const entry = this.pathToSourceIndexMap(target).getOrPut(this.graph.allocator, path.hashKey()) catch bun.outOfMemory(); + const entry = this.pathToSourceIndexMap(target).getOrPut(this.allocator(), path.hashKey()) catch bun.outOfMemory(); if (!entry.found_existing) { path.* = this.pathWithPrettyInitialized(path.*, target) catch bun.outOfMemory(); const loader: Loader = brk: { @@ -636,9 +636,9 @@ pub const BundleV2 = struct { .browser => .{ this.pathToSourceIndexMap(this.transpiler.options.target), this.pathToSourceIndexMap(.bake_server_components_ssr) }, .bake_server_components_ssr => .{ this.pathToSourceIndexMap(this.transpiler.options.target), this.pathToSourceIndexMap(.browser) }, }; - a.put(this.graph.allocator, entry.key_ptr.*, entry.value_ptr.*) catch bun.outOfMemory(); + a.put(this.allocator(), entry.key_ptr.*, entry.value_ptr.*) catch bun.outOfMemory(); if (this.framework.?.server_components.?.separate_ssr_graph) - b.put(this.graph.allocator, entry.key_ptr.*, entry.value_ptr.*) catch bun.outOfMemory(); + b.put(this.allocator(), entry.key_ptr.*, entry.value_ptr.*) catch bun.outOfMemory(); } } else { out_source_index = Index.init(entry.value_ptr.*); @@ -656,7 +656,7 @@ pub const BundleV2 = struct { target: options.Target, ) !void { // TODO: plugins with non-file namespaces - const entry = try this.pathToSourceIndexMap(target).getOrPut(this.graph.allocator, bun.hash(path_slice)); + const entry = try this.pathToSourceIndexMap(target).getOrPut(this.allocator(), bun.hash(path_slice)); if (entry.found_existing) { return; } @@ -674,9 +674,9 @@ pub const BundleV2 = struct { path = this.pathWithPrettyInitialized(path, target) catch bun.outOfMemory(); path.assertPrettyIsValid(); entry.value_ptr.* = source_index.get(); - this.graph.ast.append(this.graph.allocator, JSAst.empty) catch bun.outOfMemory(); + this.graph.ast.append(this.allocator(), JSAst.empty) catch bun.outOfMemory(); - try this.graph.input_files.append(this.graph.allocator, .{ + try this.graph.input_files.append(this.allocator(), .{ .source = .{ .path = path, .contents = "", @@ -685,7 +685,7 @@ pub const BundleV2 = struct { .loader = loader, .side_effects = result.primary_side_effects_data, }); - var task = try this.graph.allocator.create(ParseTask); + var task = try this.allocator().create(ParseTask); task.* = ParseTask.init(&result, source_index, this); task.loader = loader; task.task.node.next = null; @@ -701,7 +701,7 @@ pub const BundleV2 = struct { if (!this.enqueueOnLoadPluginIfNeeded(task)) { if (loader.shouldCopyForBundling()) { var additional_files: *BabyList(AdditionalFile) = &this.graph.input_files.items(.additional_files)[source_index.get()]; - additional_files.push(this.graph.allocator, .{ .source_index = task.source_index.get() }) catch unreachable; + additional_files.push(this.allocator(), .{ .source_index = task.source_index.get() }) catch unreachable; this.graph.input_files.items(.side_effects)[source_index.get()] = .no_side_effects__pure_data; this.graph.estimated_file_loader_count += 1; } @@ -720,7 +720,7 @@ pub const BundleV2 = struct { var result = resolve; var path = result.path() orelse return null; - const entry = try this.pathToSourceIndexMap(target).getOrPut(this.graph.allocator, hash orelse path.hashKey()); + const entry = try this.pathToSourceIndexMap(target).getOrPut(this.allocator(), hash orelse path.hashKey()); if (entry.found_existing) { return null; } @@ -735,9 +735,9 @@ pub const BundleV2 = struct { path.* = this.pathWithPrettyInitialized(path.*, target) catch bun.outOfMemory(); path.assertPrettyIsValid(); entry.value_ptr.* = source_index.get(); - this.graph.ast.append(this.graph.allocator, JSAst.empty) catch bun.outOfMemory(); + this.graph.ast.append(this.allocator(), JSAst.empty) catch bun.outOfMemory(); - try this.graph.input_files.append(this.graph.allocator, .{ + try this.graph.input_files.append(this.allocator(), .{ .source = .{ .path = path.*, .contents = "", @@ -746,7 +746,7 @@ pub const BundleV2 = struct { .loader = loader, .side_effects = resolve.primary_side_effects_data, }); - var task = try this.graph.allocator.create(ParseTask); + var task = try this.allocator().create(ParseTask); task.* = ParseTask.init(&result, source_index, this); task.loader = loader; task.task.node.next = null; @@ -766,7 +766,7 @@ pub const BundleV2 = struct { if (!this.enqueueOnLoadPluginIfNeeded(task)) { if (loader.shouldCopyForBundling()) { var additional_files: *BabyList(AdditionalFile) = &this.graph.input_files.items(.additional_files)[source_index.get()]; - additional_files.push(this.graph.allocator, .{ .source_index = task.source_index.get() }) catch unreachable; + additional_files.push(this.allocator(), .{ .source_index = task.source_index.get() }) catch unreachable; this.graph.input_files.items(.side_effects)[source_index.get()] = _resolver.SideEffects.no_side_effects__pure_data; this.graph.estimated_file_loader_count += 1; } @@ -774,7 +774,7 @@ pub const BundleV2 = struct { this.graph.pool.schedule(task); } - try this.graph.entry_points.append(this.graph.allocator, source_index); + try this.graph.entry_points.append(this.allocator(), source_index); return source_index.get(); } @@ -783,7 +783,7 @@ pub const BundleV2 = struct { pub fn init( transpiler: *Transpiler, bake_options: ?BakeOptions, - allocator: std.mem.Allocator, + alloc: std.mem.Allocator, event_loop: EventLoop, cli_watch_flag: bool, thread_pool: ?*ThreadPoolLib, @@ -791,7 +791,7 @@ pub const BundleV2 = struct { ) !*BundleV2 { transpiler.env.loadTracy(); - const this = try allocator.create(BundleV2); + const this = try alloc.create(BundleV2); transpiler.options.mark_builtins_as_external = transpiler.options.target.isBun() or transpiler.options.target == .node; transpiler.resolver.opts.mark_builtins_as_external = transpiler.options.target.isBun() or transpiler.options.target == .node; @@ -803,14 +803,13 @@ pub const BundleV2 = struct { .graph = .{ .pool = undefined, .heap = heap, - .allocator = undefined, .kit_referenced_server_data = false, .kit_referenced_client_data = false, }, .linker = .{ .loop = event_loop, .graph = .{ - .allocator = undefined, + .allocator = heap.allocator(), }, }, .bun_watcher = null, @@ -831,12 +830,10 @@ pub const BundleV2 = struct { bun.assert(this.ssr_transpiler.options.server_components); } } - this.linker.graph.allocator = this.graph.heap.allocator(); - this.graph.allocator = this.linker.graph.allocator; - this.transpiler.allocator = this.graph.allocator; - this.transpiler.resolver.allocator = this.graph.allocator; - this.transpiler.linker.allocator = this.graph.allocator; - this.transpiler.log.msgs.allocator = this.graph.allocator; + this.transpiler.allocator = heap.allocator(); + this.transpiler.resolver.allocator = heap.allocator(); + this.transpiler.linker.allocator = heap.allocator(); + this.transpiler.log.msgs.allocator = heap.allocator(); this.transpiler.log.clone_line_text = true; // We don't expose an option to disable this. Bake forbids tree-shaking @@ -870,7 +867,7 @@ pub const BundleV2 = struct { this.linker.dev_server = transpiler.options.dev_server; - const pool = try this.graph.allocator.create(ThreadPool); + const pool = try this.allocator().create(ThreadPool); if (cli_watch_flag) { Watcher.enableHotModuleReloading(this); } @@ -883,6 +880,10 @@ pub const BundleV2 = struct { return this; } + pub fn allocator(this: *const BundleV2) std.mem.Allocator { + return this.graph.heap.allocator(); + } + const logScanCounter = bun.Output.scoped(.scan_counter, .visible); pub fn incrementScanCounter(this: *BundleV2) void { @@ -921,16 +922,16 @@ pub const BundleV2 = struct { { // Add the runtime const rt = ParseTask.getRuntimeSource(this.transpiler.options.target); - try this.graph.input_files.append(this.graph.allocator, Graph.InputFile{ + try this.graph.input_files.append(this.allocator(), Graph.InputFile{ .source = rt.source, .loader = .js, .side_effects = _resolver.SideEffects.no_side_effects__pure_data, }); // try this.graph.entry_points.append(allocator, Index.runtime); - try this.graph.ast.append(this.graph.allocator, JSAst.empty); - try this.pathToSourceIndexMap(this.transpiler.options.target).put(this.graph.allocator, bun.hash("bun:wrap"), Index.runtime.get()); - var runtime_parse_task = try this.graph.allocator.create(ParseTask); + try this.graph.ast.append(this.allocator(), JSAst.empty); + try this.pathToSourceIndexMap(this.transpiler.options.target).put(this.allocator(), bun.hash("bun:wrap"), Index.runtime.get()); + var runtime_parse_task = try this.allocator().create(ParseTask); runtime_parse_task.* = rt.parse_task; runtime_parse_task.ctx = this; runtime_parse_task.tree_shaking = true; @@ -957,8 +958,8 @@ pub const BundleV2 = struct { .dev_server => data.files.set.count(), }; - try this.graph.entry_points.ensureUnusedCapacity(this.graph.allocator, num_entry_points); - try this.graph.input_files.ensureUnusedCapacity(this.graph.allocator, num_entry_points); + try this.graph.entry_points.ensureUnusedCapacity(this.allocator(), num_entry_points); + try this.graph.input_files.ensureUnusedCapacity(this.allocator(), num_entry_points); switch (variant) { .normal => { @@ -1014,7 +1015,7 @@ pub const BundleV2 = struct { if (flags.client) brk: { const source_index = try this.enqueueEntryItem(null, resolved, true, .browser) orelse break :brk; if (flags.css) { - try data.css_data.putNoClobber(this.graph.allocator, Index.init(source_index), .{ .imported_on_server = false }); + try data.css_data.putNoClobber(this.allocator(), Index.init(source_index), .{ .imported_on_server = false }); } } if (flags.server) _ = try this.enqueueEntryItem(null, resolved, true, this.transpiler.options.target); @@ -1040,9 +1041,9 @@ pub const BundleV2 = struct { fn cloneAST(this: *BundleV2) !void { const trace = bun.perf.trace("Bundler.cloneAST"); defer trace.end(); - this.linker.allocator = this.transpiler.allocator; - this.linker.graph.allocator = this.transpiler.allocator; - this.linker.graph.ast = try this.graph.ast.clone(this.linker.allocator); + bun.safety.alloc.assertEq(this.allocator(), this.transpiler.allocator); + bun.safety.alloc.assertEq(this.allocator(), this.linker.graph.allocator); + this.linker.graph.ast = try this.graph.ast.clone(this.allocator()); var ast = this.linker.graph.ast.slice(); for (ast.items(.module_scope)) |*module_scope| { for (module_scope.children.slice()) |child| { @@ -1053,7 +1054,7 @@ pub const BundleV2 = struct { this.graph.heap.helpCatchMemoryIssues(); } - module_scope.generated = try module_scope.generated.clone(this.linker.allocator); + module_scope.generated = try module_scope.generated.clone(this.allocator()); } } @@ -1067,10 +1068,10 @@ pub const BundleV2 = struct { if (!this.graph.kit_referenced_server_data and !this.graph.kit_referenced_client_data) return; - const alloc = this.graph.allocator; + const alloc = this.allocator(); - var server = try AstBuilder.init(this.graph.allocator, &bake.server_virtual_source, this.transpiler.options.hot_module_reloading); - var client = try AstBuilder.init(this.graph.allocator, &bake.client_virtual_source, this.transpiler.options.hot_module_reloading); + var server = try AstBuilder.init(this.allocator(), &bake.server_virtual_source, this.transpiler.options.hot_module_reloading); + var client = try AstBuilder.init(this.allocator(), &bake.client_virtual_source, this.transpiler.options.hot_module_reloading); var server_manifest_props: std.ArrayListUnmanaged(G.Property) = .{}; var client_manifest_props: std.ArrayListUnmanaged(G.Property) = .{}; @@ -1199,14 +1200,14 @@ pub const BundleV2 = struct { known_target: options.Target, ) OOM!Index.Int { const source_index = Index.init(@as(u32, @intCast(this.graph.ast.len))); - this.graph.ast.append(this.graph.allocator, JSAst.empty) catch unreachable; + this.graph.ast.append(this.allocator(), JSAst.empty) catch unreachable; - this.graph.input_files.append(this.graph.allocator, .{ + this.graph.input_files.append(this.allocator(), .{ .source = source.*, .loader = loader, .side_effects = loader.sideEffects(), }) catch bun.outOfMemory(); - var task = this.graph.allocator.create(ParseTask) catch bun.outOfMemory(); + var task = this.allocator().create(ParseTask) catch bun.outOfMemory(); task.* = ParseTask.init(resolve_result, source_index, this); task.loader = loader; task.jsx = this.transpilerForTarget(known_target).options.jsx; @@ -1221,7 +1222,7 @@ pub const BundleV2 = struct { if (!this.enqueueOnLoadPluginIfNeeded(task)) { if (loader.shouldCopyForBundling()) { var additional_files: *BabyList(AdditionalFile) = &this.graph.input_files.items(.additional_files)[source_index.get()]; - additional_files.push(this.graph.allocator, .{ .source_index = task.source_index.get() }) catch unreachable; + additional_files.push(this.allocator(), .{ .source_index = task.source_index.get() }) catch unreachable; this.graph.input_files.items(.side_effects)[source_index.get()] = _resolver.SideEffects.no_side_effects__pure_data; this.graph.estimated_file_loader_count += 1; } @@ -1239,14 +1240,14 @@ pub const BundleV2 = struct { known_target: options.Target, ) OOM!Index.Int { const source_index = Index.init(@as(u32, @intCast(this.graph.ast.len))); - this.graph.ast.append(this.graph.allocator, JSAst.empty) catch unreachable; + this.graph.ast.append(this.allocator(), JSAst.empty) catch unreachable; - this.graph.input_files.append(this.graph.allocator, .{ + this.graph.input_files.append(this.allocator(), .{ .source = source.*, .loader = loader, .side_effects = loader.sideEffects(), }) catch bun.outOfMemory(); - var task = this.graph.allocator.create(ParseTask) catch bun.outOfMemory(); + var task = this.allocator().create(ParseTask) catch bun.outOfMemory(); task.* = .{ .ctx = this, .path = source.path, @@ -1275,7 +1276,7 @@ pub const BundleV2 = struct { if (!this.enqueueOnLoadPluginIfNeeded(task)) { if (loader.shouldCopyForBundling()) { var additional_files: *BabyList(AdditionalFile) = &this.graph.input_files.items(.additional_files)[source_index.get()]; - additional_files.push(this.graph.allocator, .{ .source_index = task.source_index.get() }) catch unreachable; + additional_files.push(this.allocator(), .{ .source_index = task.source_index.get() }) catch unreachable; this.graph.input_files.items(.side_effects)[source_index.get()] = _resolver.SideEffects.no_side_effects__pure_data; this.graph.estimated_file_loader_count += 1; } @@ -1295,12 +1296,12 @@ pub const BundleV2 = struct { var new_source: Logger.Source = source_without_index; const source_index = this.graph.input_files.len; new_source.index = Index.init(source_index); - try this.graph.input_files.append(this.graph.allocator, .{ + try this.graph.input_files.append(this.allocator(), .{ .source = new_source, .loader = .js, .side_effects = .has_side_effects, }); - try this.graph.ast.append(this.graph.allocator, JSAst.empty); + try this.graph.ast.append(this.allocator(), JSAst.empty); const task = bun.new(ServerComponentParseTask, .{ .data = data, @@ -1369,7 +1370,7 @@ pub const BundleV2 = struct { pub fn generateFromCLI( transpiler: *Transpiler, - allocator: std.mem.Allocator, + alloc: std.mem.Allocator, event_loop: EventLoop, enable_reloading: bool, reachable_files_count: *usize, @@ -1380,7 +1381,7 @@ pub const BundleV2 = struct { var this = try BundleV2.init( transpiler, null, - allocator, + alloc, event_loop, enable_reloading, null, @@ -1428,7 +1429,7 @@ pub const BundleV2 = struct { // Do this at the very end, after processing all the imports/exports so that we can follow exports as needed. if (fetcher) |fetch| { try this.getAllDependencies(reachable_files, fetch); - return std.ArrayList(options.OutputFile).init(allocator); + return std.ArrayList(options.OutputFile).init(alloc); } return try this.linker.generateChunksInParallel(chunks, false); @@ -1438,13 +1439,13 @@ pub const BundleV2 = struct { entry_points: bake.production.EntryPointMap, server_transpiler: *Transpiler, bake_options: BakeOptions, - allocator: std.mem.Allocator, + alloc: std.mem.Allocator, event_loop: EventLoop, ) !std.ArrayList(options.OutputFile) { var this = try BundleV2.init( server_transpiler, bake_options, - allocator, + alloc, event_loop, false, null, @@ -1501,7 +1502,7 @@ pub const BundleV2 = struct { // create two separate chunks. (note: bake passes each route as an entrypoint) { const scbs = this.graph.server_component_boundaries.slice(); - try this.graph.entry_points.ensureUnusedCapacity(this.graph.allocator, scbs.list.len * 2); + try this.graph.entry_points.ensureUnusedCapacity(this.allocator(), scbs.list.len * 2); for (scbs.list.items(.source_index), scbs.list.items(.ssr_source_index)) |original_index, ssr_index| { inline for (.{ original_index, ssr_index }) |idx| { this.graph.entry_points.appendAssumeCapacity(Index.init(idx)); @@ -1580,7 +1581,7 @@ pub const BundleV2 = struct { .entry_point_index = null, .is_executable = false, })) catch unreachable; - additional_files[index].push(this.graph.allocator, AdditionalFile{ + additional_files[index].push(this.allocator(), AdditionalFile{ .output_file = @as(u32, @truncate(additional_output_files.items.len - 1)), }) catch unreachable; } @@ -1632,9 +1633,9 @@ pub const BundleV2 = struct { plugins: ?*bun.jsc.API.JSBundler.Plugin, globalThis: *jsc.JSGlobalObject, event_loop: *bun.jsc.EventLoop, - allocator: std.mem.Allocator, + alloc: std.mem.Allocator, ) OOM!bun.jsc.JSValue { - const completion = try createAndScheduleCompletionTask(config, plugins, globalThis, event_loop, allocator); + const completion = try createAndScheduleCompletionTask(config, plugins, globalThis, event_loop, alloc); completion.promise = jsc.JSPromise.Strong.init(globalThis); return completion.promise.value(); } @@ -1694,12 +1695,12 @@ pub const BundleV2 = struct { pub fn configureBundler( completion: *JSBundleCompletionTask, transpiler: *Transpiler, - allocator: std.mem.Allocator, + alloc: std.mem.Allocator, ) !void { const config = &completion.config; transpiler.* = try bun.Transpiler.init( - allocator, + alloc, &completion.log, api.TransformOptions{ .define = if (config.define.count() > 0) config.define.toAPI() else null, @@ -1730,7 +1731,7 @@ pub const BundleV2 = struct { transpiler.options.entry_points = config.entry_points.keys(); transpiler.options.jsx = config.jsx; transpiler.options.no_macros = config.no_macros; - transpiler.options.loaders = try options.loadersFromTransformOptions(allocator, config.loaders, config.target); + transpiler.options.loaders = try options.loadersFromTransformOptions(alloc, config.loaders, config.target); transpiler.options.entry_naming = config.names.entry_point.data; transpiler.options.chunk_naming = config.names.chunk.data; transpiler.options.asset_naming = config.names.asset.data; @@ -2071,7 +2072,7 @@ pub const BundleV2 = struct { if (should_copy_for_bundling) { const source_index = load.source_index; var additional_files: *BabyList(AdditionalFile) = &this.graph.input_files.items(.additional_files)[source_index.get()]; - additional_files.push(this.graph.allocator, .{ .source_index = source_index.get() }) catch unreachable; + additional_files.push(this.allocator(), .{ .source_index = source_index.get() }) catch unreachable; this.graph.input_files.items(.side_effects)[source_index.get()] = .no_side_effects__pure_data; this.graph.estimated_file_loader_count += 1; } @@ -2124,7 +2125,7 @@ pub const BundleV2 = struct { .clone_line_text = false, .errors = @intFromBool(msg.kind == .err), .warnings = @intFromBool(msg.kind == .warn), - .msgs = std.ArrayList(Logger.Msg).fromOwnedSlice(this.graph.allocator, (&msg_mut)[0..1]), + .msgs = std.ArrayList(Logger.Msg).fromOwnedSlice(this.allocator(), (&msg_mut)[0..1]), }; dev.handleParseTaskFailure( error.Plugin, @@ -2205,7 +2206,7 @@ pub const BundleV2 = struct { path.namespace = result.namespace; } - const existing = this.pathToSourceIndexMap(resolve.import_record.original_target).getOrPut(this.graph.allocator, path.hashKey()) catch unreachable; + const existing = this.pathToSourceIndexMap(resolve.import_record.original_target).getOrPut(this.allocator(), path.hashKey()) catch unreachable; if (!existing.found_existing) { this.free_list.appendSlice(&.{ result.namespace, result.path }) catch {}; @@ -2215,10 +2216,10 @@ pub const BundleV2 = struct { const source_index = Index.init(@as(u32, @intCast(this.graph.ast.len))); existing.value_ptr.* = source_index.get(); out_source_index = source_index; - this.graph.ast.append(this.graph.allocator, JSAst.empty) catch unreachable; + this.graph.ast.append(this.allocator(), JSAst.empty) catch unreachable; const loader = path.loader(&this.transpiler.options.loaders) orelse options.Loader.file; - this.graph.input_files.append(this.graph.allocator, .{ + this.graph.input_files.append(this.allocator(), .{ .source = .{ .path = path, .contents = "", @@ -2253,7 +2254,7 @@ pub const BundleV2 = struct { if (!this.enqueueOnLoadPluginIfNeeded(task)) { if (loader.shouldCopyForBundling()) { var additional_files: *BabyList(AdditionalFile) = &this.graph.input_files.items(.additional_files)[source_index.get()]; - additional_files.push(this.graph.allocator, .{ .source_index = task.source_index.get() }) catch unreachable; + additional_files.push(this.allocator(), .{ .source_index = task.source_index.get() }) catch unreachable; this.graph.input_files.items(.side_effects)[source_index.get()] = _resolver.SideEffects.no_side_effects__pure_data; this.graph.estimated_file_loader_count += 1; } @@ -2274,14 +2275,14 @@ pub const BundleV2 = struct { const source_import_records = &this.graph.ast.items(.import_records)[resolve.import_record.importer_source_index]; if (source_import_records.len <= resolve.import_record.import_record_index) { const entry = this.resolve_tasks_waiting_for_import_source_index.getOrPut( - this.graph.allocator, + this.allocator(), resolve.import_record.importer_source_index, ) catch bun.outOfMemory(); if (!entry.found_existing) { entry.value_ptr.* = .{}; } entry.value_ptr.push( - this.graph.allocator, + this.allocator(), .{ .to_source_index = source_index, .import_record_index = resolve.import_record.import_record_index, @@ -2314,8 +2315,8 @@ pub const BundleV2 = struct { on_parse_finalizers.deinit(bun.default_allocator); } - defer this.graph.ast.deinit(this.graph.allocator); - defer this.graph.input_files.deinit(this.graph.allocator); + defer this.graph.ast.deinit(this.allocator()); + defer this.graph.input_files.deinit(this.allocator()); if (this.graph.pool.workers_assignments.count() > 0) { { this.graph.pool.workers_assignments_lock.lock(); @@ -2430,14 +2431,14 @@ pub const BundleV2 = struct { this.graph.heap.helpCatchMemoryIssues(); - this.dynamic_import_entry_points = .init(this.graph.allocator); + this.dynamic_import_entry_points = .init(this.allocator()); var html_files: std.AutoArrayHashMapUnmanaged(Index, void) = .{}; // Separate non-failing files into two lists: JS and CSS const js_reachable_files = reachable_files: { - var css_total_files = try std.ArrayListUnmanaged(Index).initCapacity(this.graph.allocator, this.graph.css_file_count); - try start.css_entry_points.ensureUnusedCapacity(this.graph.allocator, this.graph.css_file_count); - var js_files = try std.ArrayListUnmanaged(Index).initCapacity(this.graph.allocator, this.graph.ast.len - this.graph.css_file_count - 1); + var css_total_files = try std.ArrayListUnmanaged(Index).initCapacity(this.allocator(), this.graph.css_file_count); + try start.css_entry_points.ensureUnusedCapacity(this.allocator(), this.graph.css_file_count); + var js_files = try std.ArrayListUnmanaged(Index).initCapacity(this.allocator(), this.graph.ast.len - this.graph.css_file_count - 1); const asts = this.graph.ast.slice(); const css_asts = asts.items(.css); @@ -2462,7 +2463,7 @@ pub const BundleV2 = struct { // This means the file can become an error after // resolution, which is not usually the case. css_total_files.appendAssumeCapacity(Index.init(index)); - var log = Logger.Log.init(this.graph.allocator); + var log = Logger.Log.init(this.allocator()); defer log.deinit(); if (this.linker.scanCSSImports( @intCast(index), @@ -2491,7 +2492,7 @@ pub const BundleV2 = struct { // to routes in DevServer. They have a JS chunk too, // derived off of the import record list. if (loaders[index] == .html) { - try html_files.put(this.graph.allocator, Index.init(index), {}); + try html_files.put(this.allocator(), Index.init(index), {}); } else { js_files.appendAssumeCapacity(Index.init(index)); @@ -2530,7 +2531,7 @@ pub const BundleV2 = struct { for (this.graph.entry_points.items) |entry_point| { if (css[entry_point.get()] != null) { try start.css_entry_points.put( - this.graph.allocator, + this.allocator(), entry_point, .{ .imported_on_server = false }, ); @@ -2578,7 +2579,7 @@ pub const BundleV2 = struct { this.graph.heap.helpCatchMemoryIssues(); // Generate chunks - const js_part_ranges = try this.graph.allocator.alloc(PartRange, js_reachable_files.len); + const js_part_ranges = try this.allocator().alloc(PartRange, js_reachable_files.len); const parts = this.graph.ast.items(.parts); for (js_reachable_files, js_part_ranges) |source_index, *part_range| { part_range.* = .{ @@ -2588,7 +2589,7 @@ pub const BundleV2 = struct { }; } - const chunks = try this.graph.allocator.alloc( + const chunks = try this.allocator().alloc( Chunk, 1 + start.css_entry_points.count() + html_files.count(), ); @@ -2607,12 +2608,12 @@ pub const BundleV2 = struct { .parts_in_chunk_in_order = js_part_ranges, }, }, - .output_source_map = sourcemap.SourceMapPieces.init(this.graph.allocator), + .output_source_map = sourcemap.SourceMapPieces.init(this.allocator()), }; // Then all the distinct CSS bundles (these are JS->CSS, not CSS->CSS) for (chunks[1..][0..start.css_entry_points.count()], start.css_entry_points.keys()) |*chunk, entry_point| { - const order = this.linker.findImportedFilesInCSSOrder(this.graph.allocator, &.{entry_point}); + const order = this.linker.findImportedFilesInCSSOrder(this.allocator(), &.{entry_point}); chunk.* = .{ .entry_point = .{ .entry_point_id = @intCast(entry_point.get()), @@ -2622,10 +2623,10 @@ pub const BundleV2 = struct { .content = .{ .css = .{ .imports_in_chunk_in_order = order, - .asts = try this.graph.allocator.alloc(bun.css.BundlerStyleSheet, order.len), + .asts = try this.allocator().alloc(bun.css.BundlerStyleSheet, order.len), }, }, - .output_source_map = sourcemap.SourceMapPieces.init(this.graph.allocator), + .output_source_map = sourcemap.SourceMapPieces.init(this.allocator()), }; } @@ -2638,7 +2639,7 @@ pub const BundleV2 = struct { .is_entry_point = false, }, .content = .html, - .output_source_map = sourcemap.SourceMapPieces.init(this.graph.allocator), + .output_source_map = sourcemap.SourceMapPieces.init(this.allocator()), }; } @@ -2739,7 +2740,7 @@ pub const BundleV2 = struct { } fn pathWithPrettyInitialized(this: *BundleV2, path: Fs.Path, target: options.Target) !Fs.Path { - return genericPathWithPrettyInitialized(path, target, this.transpiler.fs.top_level_dir, this.graph.allocator); + return genericPathWithPrettyInitialized(path, target, this.transpiler.fs.top_level_dir, this.allocator()); } fn reserveSourceIndexesForBake(this: *BundleV2) !void { @@ -2750,8 +2751,8 @@ pub const BundleV2 = struct { bun.assert(this.graph.input_files.len == 1); bun.assert(this.graph.ast.len == 1); - try this.graph.ast.ensureUnusedCapacity(this.graph.allocator, 2); - try this.graph.input_files.ensureUnusedCapacity(this.graph.allocator, 2); + try this.graph.ast.ensureUnusedCapacity(this.allocator(), 2); + try this.graph.input_files.ensureUnusedCapacity(this.allocator(), 2); const server_source = bake.server_virtual_source; const client_source = bake.client_virtual_source; @@ -2798,7 +2799,7 @@ pub const BundleV2 = struct { estimated_resolve_queue_count += @as(usize, @intFromBool(!(import_record.is_internal or import_record.is_unused or import_record.source_index.isValid()))); } - var resolve_queue = ResolveQueue.init(this.graph.allocator); + var resolve_queue = ResolveQueue.init(this.allocator()); resolve_queue.ensureTotalCapacity(estimated_resolve_queue_count) catch bun.outOfMemory(); var last_error: ?anyerror = null; @@ -2915,7 +2916,7 @@ pub const BundleV2 = struct { this.logForResolutionFailures(source.path.text, .ssr).addErrorFmt( source, import_record.range.loc, - this.graph.allocator, + this.allocator(), "The 'bunBakeGraph' import attribute cannot be used outside of a Bun Bake bundle", .{}, ) catch @panic("unexpected log error"); @@ -2928,7 +2929,7 @@ pub const BundleV2 = struct { this.logForResolutionFailures(source.path.text, .ssr).addErrorFmt( source, import_record.range.loc, - this.graph.allocator, + this.allocator(), "Framework does not have a separate SSR graph to put this import into", .{}, ) catch @panic("unexpected log error"); @@ -2998,7 +2999,7 @@ pub const BundleV2 = struct { log, source, import_record.range, - this.graph.allocator, + this.allocator(), "Browser build cannot {s} Node.js builtin: \"{s}\"{s}", .{ import_record.kind.errorLabel(), @@ -3015,7 +3016,7 @@ pub const BundleV2 = struct { log, source, import_record.range, - this.graph.allocator, + this.allocator(), "Browser build cannot {s} Bun builtin: \"{s}\"{s}", .{ import_record.kind.errorLabel(), @@ -3032,7 +3033,7 @@ pub const BundleV2 = struct { log, source, import_record.range, - this.graph.allocator, + this.allocator(), "Browser build cannot {s} Bun builtin: \"{s}\"{s}", .{ import_record.kind.errorLabel(), @@ -3049,7 +3050,7 @@ pub const BundleV2 = struct { log, source, import_record.range, - this.graph.allocator, + this.allocator(), "Could not resolve: \"{s}\". Maybe you need to \"bun install\"?", .{import_record.path.text}, import_record.kind, @@ -3069,7 +3070,7 @@ pub const BundleV2 = struct { log, source, import_record.range, - this.graph.allocator, + this.allocator(), "Could not resolve: \"{s}\"", .{specifier_to_use}, import_record.kind, @@ -3112,7 +3113,7 @@ pub const BundleV2 = struct { log.addRangeErrorFmt( source, import_record.range, - this.graph.allocator, + this.allocator(), "Browser builds cannot import HTML files.", .{}, ) catch bun.outOfMemory(); @@ -3134,7 +3135,7 @@ pub const BundleV2 = struct { const hash = dev_server.assets.getHash(path.text) orelse @panic("cached asset not found"); import_record.path.text = path.text; import_record.path.namespace = "file"; - import_record.path.pretty = std.fmt.allocPrint(this.graph.allocator, bun.bake.DevServer.asset_prefix ++ "/{s}{s}", .{ + import_record.path.pretty = std.fmt.allocPrint(this.allocator(), bun.bake.DevServer.asset_prefix ++ "/{s}{s}", .{ &std.fmt.bytesToHex(std.mem.asBytes(&hash), .lower), std.fs.path.extension(path.text), }) catch bun.outOfMemory(); @@ -3185,7 +3186,7 @@ pub const BundleV2 = struct { secondary != path and !strings.eqlLong(secondary.text, path.text, true)) { - secondary_path_to_copy = secondary.dupeAlloc(this.graph.allocator) catch bun.outOfMemory(); + secondary_path_to_copy = secondary.dupeAlloc(this.allocator()) catch bun.outOfMemory(); } } @@ -3246,7 +3247,7 @@ pub const BundleV2 = struct { var js_parser_options = bun.js_parser.Parser.Options.init(this.transpilerForTarget(target).options.jsx, .html); js_parser_options.bundle = true; - const unique_key = try std.fmt.allocPrint(graph.allocator, "{any}H{d:0>8}", .{ + const unique_key = try std.fmt.allocPrint(this.allocator(), "{any}H{d:0>8}", .{ bun.fmt.hexIntLower(this.unique_key), graph.html_imports.server_source_indices.len, }); @@ -3254,7 +3255,7 @@ pub const BundleV2 = struct { const transpiler = this.transpilerForTarget(target); const ast_for_html_entrypoint = JSAst.init((try bun.js_parser.newLazyExportAST( - graph.allocator, + this.allocator(), transpiler.options.define, js_parser_options, transpiler.log, @@ -3276,12 +3277,12 @@ pub const BundleV2 = struct { .side_effects = .no_side_effects__pure_data, }; - try graph.input_files.append(graph.allocator, fake_input_file); - try graph.ast.append(graph.allocator, ast_for_html_entrypoint); + try graph.input_files.append(this.allocator(), fake_input_file); + try graph.ast.append(this.allocator(), ast_for_html_entrypoint); import_record.source_index = fake_input_file.source.index; - try this.pathToSourceIndexMap(target).put(graph.allocator, hash_key, fake_input_file.source.index.get()); - try graph.html_imports.server_source_indices.push(graph.allocator, fake_input_file.source.index.get()); + try this.pathToSourceIndexMap(target).put(this.allocator(), hash_key, fake_input_file.source.index.get()); + try graph.html_imports.server_source_indices.push(this.allocator(), fake_input_file.source.index.get()); this.ensureClientTranspiler(); } @@ -3324,7 +3325,7 @@ pub const BundleV2 = struct { this.onAfterDecrementScanCounter(); } - var resolve_queue = ResolveQueue.init(graph.allocator); + var resolve_queue = ResolveQueue.init(this.allocator()); defer resolve_queue.deinit(); var process_log = true; @@ -3416,7 +3417,7 @@ pub const BundleV2 = struct { const is_html_entrypoint = loader == .html and original_target.isServerSide() and this.transpiler.options.dev_server == null; const map = if (is_html_entrypoint) this.pathToSourceIndexMap(.browser) else path_to_source_index_map; - var existing = map.getOrPut(graph.allocator, hash) catch unreachable; + var existing = map.getOrPut(this.allocator(), hash) catch unreachable; // If the same file is imported and required, and those point to different files // Automatically rewrite it to the secondary one @@ -3446,12 +3447,12 @@ pub const BundleV2 = struct { diff += 1; - graph.input_files.append(this.graph.allocator, new_input_file) catch unreachable; - graph.ast.append(this.graph.allocator, JSAst.empty) catch unreachable; + graph.input_files.append(this.allocator(), new_input_file) catch unreachable; + graph.ast.append(this.allocator(), JSAst.empty) catch unreachable; if (is_html_entrypoint) { this.ensureClientTranspiler(); - this.graph.entry_points.append(this.graph.allocator, new_input_file.source.index) catch unreachable; + this.graph.entry_points.append(this.allocator(), new_input_file.source.index) catch unreachable; } if (this.enqueueOnLoadPluginIfNeeded(new_task)) { @@ -3460,7 +3461,7 @@ pub const BundleV2 = struct { if (loader.shouldCopyForBundling()) { var additional_files: *BabyList(AdditionalFile) = &graph.input_files.items(.additional_files)[result.source.index.get()]; - additional_files.push(graph.allocator, .{ .source_index = new_task.source_index.get() }) catch unreachable; + additional_files.push(this.allocator(), .{ .source_index = new_task.source_index.get() }) catch unreachable; new_input_file.side_effects = _resolver.SideEffects.no_side_effects__pure_data; graph.estimated_file_loader_count += 1; } @@ -3469,7 +3470,7 @@ pub const BundleV2 = struct { } else { if (loader.shouldCopyForBundling()) { var additional_files: *BabyList(AdditionalFile) = &graph.input_files.items(.additional_files)[result.source.index.get()]; - additional_files.push(graph.allocator, .{ .source_index = existing.value_ptr.* }) catch unreachable; + additional_files.push(this.allocator(), .{ .source_index = existing.value_ptr.* }) catch unreachable; graph.estimated_file_loader_count += 1; } @@ -3477,7 +3478,7 @@ pub const BundleV2 = struct { } } - var import_records = result.ast.import_records.clone(graph.allocator) catch unreachable; + var import_records = result.ast.import_records.clone(this.allocator()) catch unreachable; const input_file_loaders = graph.input_files.items(.loader); const save_import_record_source_index = this.transpiler.options.dev_server == null or @@ -3494,7 +3495,7 @@ pub const BundleV2 = struct { } var list = pending_entry.value.list(); - list.deinit(graph.allocator); + list.deinit(this.allocator()); } if (result.ast.css != null) { @@ -3509,7 +3510,7 @@ pub const BundleV2 = struct { if (getRedirectId(result.ast.redirect_import_record_index)) |compare| { if (compare == @as(u32, @truncate(i))) { path_to_source_index_map.put( - graph.allocator, + this.allocator(), result.source.path.hashKey(), source_index, ) catch unreachable; @@ -3565,13 +3566,13 @@ pub const BundleV2 = struct { }; graph.pathToSourceIndexMap(result.ast.target).put( - graph.allocator, + this.allocator(), result.source.path.hashKey(), reference_source_index, ) catch bun.outOfMemory(); graph.server_component_boundaries.put( - graph.allocator, + this.allocator(), result.source.index.get(), result.use_directive, reference_source_index, @@ -3988,14 +3989,6 @@ pub const CompileResult = union(enum) { else => "", }; } - - pub fn allocator(this: @This()) std.mem.Allocator { - return switch (this.result) { - .result => |result| result.code_allocator, - // empty slice can be freed by any allocator - else => bun.default_allocator, - }; - } }, css: struct { result: bun.Maybe([]const u8, anyerror), @@ -4015,7 +4008,6 @@ pub const CompileResult = union(enum) { .result = js_printer.PrintResult{ .result = .{ .code = "", - .code_allocator = bun.default_allocator, }, }, }, @@ -4032,13 +4024,6 @@ pub const CompileResult = union(enum) { }; } - pub fn allocator(this: *const CompileResult) ?std.mem.Allocator { - return switch (this.*) { - .javascript => |js| js.allocator(), - else => null, - }; - } - pub fn sourceMapChunk(this: *const CompileResult) ?sourcemap.Chunk { return switch (this.*) { .javascript => |r| switch (r.result) { diff --git a/src/bundler/linker_context/computeChunks.zig b/src/bundler/linker_context/computeChunks.zig index 30517ecb6c..a7ba643eee 100644 --- a/src/bundler/linker_context/computeChunks.zig +++ b/src/bundler/linker_context/computeChunks.zig @@ -7,7 +7,7 @@ pub noinline fn computeChunks( bun.assert(this.dev_server == null); // use - var stack_fallback = std.heap.stackFallback(4096, this.allocator); + var stack_fallback = std.heap.stackFallback(4096, this.allocator()); const stack_all = stack_fallback.get(); var arena = bun.ArenaAllocator.init(stack_all); defer arena.deinit(); @@ -63,7 +63,7 @@ pub noinline fn computeChunks( }, .entry_bits = entry_bits.*, .content = .html, - .output_source_map = sourcemap.SourceMapPieces.init(this.allocator), + .output_source_map = sourcemap.SourceMapPieces.init(this.allocator()), .is_browser_chunk_from_server_build = could_be_browser_target_from_server_build and ast_targets[source_index] == .browser, }; } @@ -94,10 +94,10 @@ pub noinline fn computeChunks( .content = .{ .css = .{ .imports_in_chunk_in_order = order, - .asts = this.allocator.alloc(bun.css.BundlerStyleSheet, order.len) catch bun.outOfMemory(), + .asts = this.allocator().alloc(bun.css.BundlerStyleSheet, order.len) catch bun.outOfMemory(), }, }, - .output_source_map = sourcemap.SourceMapPieces.init(this.allocator), + .output_source_map = sourcemap.SourceMapPieces.init(this.allocator()), .has_html_chunk = has_html_chunk, .is_browser_chunk_from_server_build = could_be_browser_target_from_server_build and ast_targets[source_index] == .browser, }; @@ -120,7 +120,7 @@ pub noinline fn computeChunks( .javascript = .{}, }, .has_html_chunk = has_html_chunk, - .output_source_map = sourcemap.SourceMapPieces.init(this.allocator), + .output_source_map = sourcemap.SourceMapPieces.init(this.allocator()), .is_browser_chunk_from_server_build = could_be_browser_target_from_server_build and ast_targets[source_index] == .browser, }; @@ -147,7 +147,7 @@ pub noinline fn computeChunks( const css_chunk_entry = try css_chunks.getOrPut(hash_to_use); - js_chunk_entry.value_ptr.content.javascript.css_chunks = try this.allocator.dupe(u32, &.{ + js_chunk_entry.value_ptr.content.javascript.css_chunks = try this.allocator().dupe(u32, &.{ @intCast(css_chunk_entry.index), }); js_chunks_with_css += 1; @@ -156,7 +156,7 @@ pub noinline fn computeChunks( var css_files_with_parts_in_chunk = std.AutoArrayHashMapUnmanaged(Index.Int, void){}; for (order.slice()) |entry| { if (entry.kind == .source_index) { - css_files_with_parts_in_chunk.put(this.allocator, entry.kind.source_index.get(), {}) catch bun.outOfMemory(); + css_files_with_parts_in_chunk.put(this.allocator(), entry.kind.source_index.get(), {}) catch bun.outOfMemory(); } } css_chunk_entry.value_ptr.* = .{ @@ -169,11 +169,11 @@ pub noinline fn computeChunks( .content = .{ .css = .{ .imports_in_chunk_in_order = order, - .asts = this.allocator.alloc(bun.css.BundlerStyleSheet, order.len) catch bun.outOfMemory(), + .asts = this.allocator().alloc(bun.css.BundlerStyleSheet, order.len) catch bun.outOfMemory(), }, }, .files_with_parts_in_chunk = css_files_with_parts_in_chunk, - .output_source_map = sourcemap.SourceMapPieces.init(this.allocator), + .output_source_map = sourcemap.SourceMapPieces.init(this.allocator()), .has_html_chunk = has_html_chunk, .is_browser_chunk_from_server_build = could_be_browser_target_from_server_build and ast_targets[source_index] == .browser, }; @@ -217,16 +217,16 @@ pub noinline fn computeChunks( .content = .{ .javascript = .{}, }, - .output_source_map = sourcemap.SourceMapPieces.init(this.allocator), + .output_source_map = sourcemap.SourceMapPieces.init(this.allocator()), .is_browser_chunk_from_server_build = is_browser_chunk_from_server_build, }; } - _ = js_chunk_entry.value_ptr.files_with_parts_in_chunk.getOrPut(this.allocator, @as(u32, @truncate(source_index.get()))) catch unreachable; + _ = js_chunk_entry.value_ptr.files_with_parts_in_chunk.getOrPut(this.allocator(), @as(u32, @truncate(source_index.get()))) catch unreachable; } else { var handler = Handler{ .chunks = js_chunks.values(), - .allocator = this.allocator, + .allocator = this.allocator(), .source_id = source_index.get(), }; entry_bits.forEach(Handler, &handler, Handler.next); @@ -239,7 +239,7 @@ pub noinline fn computeChunks( // Sort the chunks for determinism. This matters because we use chunk indices // as sorting keys in a few places. const chunks: []Chunk = sort_chunks: { - var sorted_chunks = try BabyList(Chunk).initCapacity(this.allocator, js_chunks.count() + css_chunks.count() + html_chunks.count()); + var sorted_chunks = try BabyList(Chunk).initCapacity(this.allocator(), js_chunks.count() + css_chunks.count() + html_chunks.count()); var sorted_keys = try BabyList(string).initCapacity(temp_allocator, js_chunks.count()); @@ -286,7 +286,7 @@ pub noinline fn computeChunks( } // We don't care about the order of the HTML chunks that have no JS chunks. - try sorted_chunks.append(this.allocator, html_chunks.values()); + try sorted_chunks.append(this.allocator(), html_chunks.values()); break :sort_chunks sorted_chunks.slice(); }; @@ -317,11 +317,11 @@ pub noinline fn computeChunks( } const unique_key_item_len = std.fmt.count("{any}C{d:0>8}", .{ bun.fmt.hexIntLower(unique_key), chunks.len }); - var unique_key_builder = try bun.StringBuilder.initCapacity(this.allocator, unique_key_item_len * chunks.len); + var unique_key_builder = try bun.StringBuilder.initCapacity(this.allocator(), unique_key_item_len * chunks.len); this.unique_key_buf = unique_key_builder.allocatedSlice(); errdefer { - unique_key_builder.deinit(this.allocator); + unique_key_builder.deinit(this.allocator()); this.unique_key_buf = ""; } @@ -392,7 +392,7 @@ pub noinline fn computeChunks( break :dir try dir.getFdPath(&real_path_buf); }; - chunk.template.placeholder.dir = try resolve_path.relativeAlloc(this.allocator, this.resolver.opts.root_dir, dir); + chunk.template.placeholder.dir = try resolve_path.relativeAlloc(this.allocator(), this.resolver.opts.root_dir, dir); } } diff --git a/src/bundler/linker_context/computeCrossChunkDependencies.zig b/src/bundler/linker_context/computeCrossChunkDependencies.zig index 2638bca36c..111281f41e 100644 --- a/src/bundler/linker_context/computeCrossChunkDependencies.zig +++ b/src/bundler/linker_context/computeCrossChunkDependencies.zig @@ -4,7 +4,7 @@ pub fn computeCrossChunkDependencies(c: *LinkerContext, chunks: []Chunk) !void { return; } - const chunk_metas = try c.allocator.alloc(ChunkMeta, chunks.len); + const chunk_metas = try c.allocator().alloc(ChunkMeta, chunks.len); for (chunk_metas) |*meta| { // these must be global allocator meta.* = .{ @@ -19,12 +19,12 @@ pub fn computeCrossChunkDependencies(c: *LinkerContext, chunks: []Chunk) !void { meta.exports.deinit(); meta.dynamic_imports.deinit(); } - c.allocator.free(chunk_metas); + c.allocator().free(chunk_metas); } { - const cross_chunk_dependencies = c.allocator.create(CrossChunkDependencies) catch unreachable; - defer c.allocator.destroy(cross_chunk_dependencies); + const cross_chunk_dependencies = c.allocator().create(CrossChunkDependencies) catch unreachable; + defer c.allocator().destroy(cross_chunk_dependencies); cross_chunk_dependencies.* = .{ .chunks = chunks, @@ -42,7 +42,7 @@ pub fn computeCrossChunkDependencies(c: *LinkerContext, chunks: []Chunk) !void { }; c.parse_graph.pool.worker_pool.eachPtr( - c.allocator, + c.allocator(), cross_chunk_dependencies, CrossChunkDependencies.walk, chunks, @@ -236,8 +236,8 @@ fn computeCrossChunkDependenciesWithChunkMetas(c: *LinkerContext, chunks: []Chun { var entry = try js .imports_from_other_chunks - .getOrPutValue(c.allocator, other_chunk_index, .{}); - try entry.value_ptr.push(c.allocator, .{ + .getOrPutValue(c.allocator(), other_chunk_index, .{}); + try entry.value_ptr.push(c.allocator(), .{ .ref = import_ref, }); } @@ -257,7 +257,7 @@ fn computeCrossChunkDependenciesWithChunkMetas(c: *LinkerContext, chunks: []Chun if (other_chunk.entry_bits.isSet(chunk.entry_point.entry_point_id)) { _ = js.imports_from_other_chunks.getOrPutValue( - c.allocator, + c.allocator(), @as(u32, @truncate(other_chunk_index)), CrossChunkImport.Item.List{}, ) catch unreachable; @@ -272,7 +272,7 @@ fn computeCrossChunkDependenciesWithChunkMetas(c: *LinkerContext, chunks: []Chun const dynamic_chunk_indices = chunk_meta.dynamic_imports.keys(); std.sort.pdq(Index.Int, dynamic_chunk_indices, {}, std.sort.asc(Index.Int)); - var imports = chunk.cross_chunk_imports.listManaged(c.allocator); + var imports = chunk.cross_chunk_imports.listManaged(c.allocator()); defer chunk.cross_chunk_imports.update(imports); imports.ensureUnusedCapacity(dynamic_chunk_indices.len) catch unreachable; const prev_len = imports.items.len; @@ -291,11 +291,11 @@ fn computeCrossChunkDependenciesWithChunkMetas(c: *LinkerContext, chunks: []Chun // aliases simultaneously to avoid collisions. { bun.assert(chunk_metas.len == chunks.len); - var r = renamer.ExportRenamer.init(c.allocator); + var r = renamer.ExportRenamer.init(c.allocator()); defer r.deinit(); debug("Generating cross-chunk exports", .{}); - var stable_ref_list = std.ArrayList(StableRef).init(c.allocator); + var stable_ref_list = std.ArrayList(StableRef).init(c.allocator()); defer stable_ref_list.deinit(); for (chunks, chunk_metas) |*chunk, *chunk_meta| { @@ -309,14 +309,14 @@ fn computeCrossChunkDependenciesWithChunkMetas(c: *LinkerContext, chunks: []Chun chunk_meta.exports, &stable_ref_list, ); - var clause_items = BabyList(js_ast.ClauseItem).initCapacity(c.allocator, stable_ref_list.items.len) catch unreachable; + var clause_items = BabyList(js_ast.ClauseItem).initCapacity(c.allocator(), stable_ref_list.items.len) catch unreachable; clause_items.len = @as(u32, @truncate(stable_ref_list.items.len)); - repr.exports_to_other_chunks.ensureUnusedCapacity(c.allocator, stable_ref_list.items.len) catch unreachable; + repr.exports_to_other_chunks.ensureUnusedCapacity(c.allocator(), stable_ref_list.items.len) catch unreachable; r.clearRetainingCapacity(); for (stable_ref_list.items, clause_items.slice()) |stable_ref, *clause_item| { const ref = stable_ref.ref; - const alias = if (c.options.minify_identifiers) try r.nextMinifiedName(c.allocator) else r.nextRenamedName(c.graph.symbols.get(ref).?.original_name); + const alias = if (c.options.minify_identifiers) try r.nextMinifiedName(c.allocator()) else r.nextRenamedName(c.graph.symbols.get(ref).?.original_name); clause_item.* = .{ .name = .{ @@ -335,8 +335,8 @@ fn computeCrossChunkDependenciesWithChunkMetas(c: *LinkerContext, chunks: []Chun } if (clause_items.len > 0) { - var stmts = BabyList(js_ast.Stmt).initCapacity(c.allocator, 1) catch unreachable; - const export_clause = c.allocator.create(js_ast.S.ExportClause) catch unreachable; + var stmts = BabyList(js_ast.Stmt).initCapacity(c.allocator(), 1) catch unreachable; + const export_clause = c.allocator().create(js_ast.S.ExportClause) catch unreachable; export_clause.* = .{ .items = clause_items.slice(), .is_single_line = true, @@ -360,7 +360,7 @@ fn computeCrossChunkDependenciesWithChunkMetas(c: *LinkerContext, chunks: []Chun // be embedded in the generated import statements. { debug("Generating cross-chunk imports", .{}); - var list = CrossChunkImport.List.init(c.allocator); + var list = CrossChunkImport.List.init(c.allocator()); defer list.deinit(); for (chunks) |*chunk| { if (chunk.content != .javascript) continue; @@ -375,7 +375,7 @@ fn computeCrossChunkDependenciesWithChunkMetas(c: *LinkerContext, chunks: []Chun .esm => { const import_record_index = @as(u32, @intCast(cross_chunk_imports.len)); - var clauses = std.ArrayList(js_ast.ClauseItem).initCapacity(c.allocator, cross_chunk_import.sorted_import_items.len) catch unreachable; + var clauses = std.ArrayList(js_ast.ClauseItem).initCapacity(c.allocator(), cross_chunk_import.sorted_import_items.len) catch unreachable; for (cross_chunk_import.sorted_import_items.slice()) |item| { clauses.appendAssumeCapacity(.{ .name = .{ @@ -387,18 +387,18 @@ fn computeCrossChunkDependenciesWithChunkMetas(c: *LinkerContext, chunks: []Chun }); } - cross_chunk_imports.push(c.allocator, .{ + cross_chunk_imports.push(c.allocator(), .{ .import_kind = .stmt, .chunk_index = cross_chunk_import.chunk_index, }) catch unreachable; - const import = c.allocator.create(js_ast.S.Import) catch unreachable; + const import = c.allocator().create(js_ast.S.Import) catch unreachable; import.* = .{ .items = clauses.items, .import_record_index = import_record_index, .namespace_ref = Ref.None, }; cross_chunk_prefix_stmts.push( - c.allocator, + c.allocator(), .{ .data = .{ .s_import = import, diff --git a/src/bundler/linker_context/findAllImportedPartsInJSOrder.zig b/src/bundler/linker_context/findAllImportedPartsInJSOrder.zig index c7d72ffa84..c797dc1279 100644 --- a/src/bundler/linker_context/findAllImportedPartsInJSOrder.zig +++ b/src/bundler/linker_context/findAllImportedPartsInJSOrder.zig @@ -29,7 +29,7 @@ pub fn findImportedPartsInJSOrder( parts_prefix_shared: *std.ArrayList(PartRange), chunk_index: u32, ) !void { - var chunk_order_array = try std.ArrayList(Chunk.Order).initCapacity(this.allocator, chunk.files_with_parts_in_chunk.count()); + var chunk_order_array = try std.ArrayList(Chunk.Order).initCapacity(this.allocator(), chunk.files_with_parts_in_chunk.count()); defer chunk_order_array.deinit(); const distances = this.graph.files.items(.distance_from_entry_point); for (chunk.files_with_parts_in_chunk.keys()) |source_index| { @@ -164,10 +164,10 @@ pub fn findImportedPartsInJSOrder( parts_prefix_shared.clearRetainingCapacity(); var visitor = FindImportedPartsVisitor{ - .files = std.ArrayList(Index.Int).init(this.allocator), + .files = std.ArrayList(Index.Int).init(this.allocator()), .part_ranges = part_ranges_shared.*, .parts_prefix = parts_prefix_shared.*, - .visited = std.AutoHashMap(Index.Int, void).init(this.allocator), + .visited = std.AutoHashMap(Index.Int, void).init(this.allocator()), .flags = this.graph.meta.items(.flags), .parts = this.graph.ast.items(.parts), .import_records = this.graph.ast.items(.import_records), @@ -194,7 +194,7 @@ pub fn findImportedPartsInJSOrder( }, } - const parts_in_chunk_order = try this.allocator.alloc(PartRange, visitor.part_ranges.items.len + visitor.parts_prefix.items.len); + const parts_in_chunk_order = try this.allocator().alloc(PartRange, visitor.part_ranges.items.len + visitor.parts_prefix.items.len); bun.concat(PartRange, parts_in_chunk_order, &.{ visitor.parts_prefix.items, visitor.part_ranges.items, diff --git a/src/bundler/linker_context/findImportedFilesInCSSOrder.zig b/src/bundler/linker_context/findImportedFilesInCSSOrder.zig index af9fea02b6..3ea0d50749 100644 --- a/src/bundler/linker_context/findImportedFilesInCSSOrder.zig +++ b/src/bundler/linker_context/findImportedFilesInCSSOrder.zig @@ -177,7 +177,7 @@ pub fn findImportedFilesInCSSOrder(this: *LinkerContext, temp_allocator: std.mem }; var visitor = Visitor{ - .allocator = this.allocator, + .allocator = this.allocator(), .temp_allocator = temp_allocator, .graph = &this.graph, .parse_graph = this.parse_graph, diff --git a/src/bundler/linker_context/generateChunksInParallel.zig b/src/bundler/linker_context/generateChunksInParallel.zig index 6e0d91e933..de82516eec 100644 --- a/src/bundler/linker_context/generateChunksInParallel.zig +++ b/src/bundler/linker_context/generateChunksInParallel.zig @@ -18,14 +18,14 @@ pub fn generateChunksInParallel( debug(" START {d} renamers", .{chunks.len}); defer debug(" DONE {d} renamers", .{chunks.len}); const ctx = GenerateChunkCtx{ .chunk = &chunks[0], .c = c, .chunks = chunks }; - try c.parse_graph.pool.worker_pool.eachPtr(c.allocator, ctx, LinkerContext.generateJSRenamer, chunks); + try c.parse_graph.pool.worker_pool.eachPtr(c.allocator(), ctx, LinkerContext.generateJSRenamer, chunks); } if (c.source_maps.line_offset_tasks.len > 0) { debug(" START {d} source maps (line offset)", .{chunks.len}); defer debug(" DONE {d} source maps (line offset)", .{chunks.len}); c.source_maps.line_offset_wait_group.wait(); - c.allocator.free(c.source_maps.line_offset_tasks); + c.allocator().free(c.source_maps.line_offset_tasks); c.source_maps.line_offset_tasks.len = 0; } @@ -46,7 +46,7 @@ pub fn generateChunksInParallel( defer debug(" DONE {d} prepare CSS ast (total count)", .{total_count}); var batch = ThreadPoolLib.Batch{}; - const tasks = c.allocator.alloc(LinkerContext.PrepareCssAstTask, total_count) catch bun.outOfMemory(); + const tasks = c.allocator().alloc(LinkerContext.PrepareCssAstTask, total_count) catch bun.outOfMemory(); var i: usize = 0; for (chunks) |*chunk| { if (chunk.content == .css) { @@ -71,8 +71,8 @@ pub fn generateChunksInParallel( } { - const chunk_contexts = c.allocator.alloc(GenerateChunkCtx, chunks.len) catch bun.outOfMemory(); - defer c.allocator.free(chunk_contexts); + const chunk_contexts = c.allocator().alloc(GenerateChunkCtx, chunks.len) catch bun.outOfMemory(); + defer c.allocator().free(chunk_contexts); { var total_count: usize = 0; @@ -81,29 +81,29 @@ pub fn generateChunksInParallel( .javascript => { chunk_ctx.* = .{ .c = c, .chunks = chunks, .chunk = chunk }; total_count += chunk.content.javascript.parts_in_chunk_in_order.len; - chunk.compile_results_for_chunk = c.allocator.alloc(CompileResult, chunk.content.javascript.parts_in_chunk_in_order.len) catch bun.outOfMemory(); + chunk.compile_results_for_chunk = c.allocator().alloc(CompileResult, chunk.content.javascript.parts_in_chunk_in_order.len) catch bun.outOfMemory(); has_js_chunk = true; }, .css => { has_css_chunk = true; chunk_ctx.* = .{ .c = c, .chunks = chunks, .chunk = chunk }; total_count += chunk.content.css.imports_in_chunk_in_order.len; - chunk.compile_results_for_chunk = c.allocator.alloc(CompileResult, chunk.content.css.imports_in_chunk_in_order.len) catch bun.outOfMemory(); + chunk.compile_results_for_chunk = c.allocator().alloc(CompileResult, chunk.content.css.imports_in_chunk_in_order.len) catch bun.outOfMemory(); }, .html => { has_html_chunk = true; // HTML gets only one chunk. chunk_ctx.* = .{ .c = c, .chunks = chunks, .chunk = chunk }; total_count += 1; - chunk.compile_results_for_chunk = c.allocator.alloc(CompileResult, 1) catch bun.outOfMemory(); + chunk.compile_results_for_chunk = c.allocator().alloc(CompileResult, 1) catch bun.outOfMemory(); }, } } debug(" START {d} compiling part ranges", .{total_count}); defer debug(" DONE {d} compiling part ranges", .{total_count}); - const combined_part_ranges = c.allocator.alloc(PendingPartRange, total_count) catch bun.outOfMemory(); - defer c.allocator.free(combined_part_ranges); + const combined_part_ranges = c.allocator().alloc(PendingPartRange, total_count) catch bun.outOfMemory(); + defer c.allocator().free(combined_part_ranges); var remaining_part_ranges = combined_part_ranges; var batch = ThreadPoolLib.Batch{}; for (chunks, chunk_contexts) |*chunk, *chunk_ctx| { @@ -173,7 +173,7 @@ pub fn generateChunksInParallel( debug(" START {d} source maps (quoted contents)", .{chunks.len}); defer debug(" DONE {d} source maps (quoted contents)", .{chunks.len}); c.source_maps.quoted_contents_wait_group.wait(); - c.allocator.free(c.source_maps.quoted_contents_tasks); + c.allocator().free(c.source_maps.quoted_contents_tasks); c.source_maps.quoted_contents_tasks.len = 0; } @@ -185,7 +185,7 @@ pub fn generateChunksInParallel( defer debug(" DONE {d} postprocess chunks", .{chunks_to_do.len}); try c.parse_graph.pool.worker_pool.eachPtr( - c.allocator, + c.allocator(), chunk_contexts[0], generateChunk, chunks_to_do, @@ -207,7 +207,7 @@ pub fn generateChunksInParallel( // TODO: enforceNoCyclicChunkImports() { - var path_names_map = bun.StringHashMap(void).init(c.allocator); + var path_names_map = bun.StringHashMap(void).init(c.allocator()); defer path_names_map.deinit(); const DuplicateEntry = struct { @@ -215,8 +215,8 @@ pub fn generateChunksInParallel( }; var duplicates_map: bun.StringArrayHashMapUnmanaged(DuplicateEntry) = .{}; - var chunk_visit_map = try AutoBitSet.initEmpty(c.allocator, chunks.len); - defer chunk_visit_map.deinit(c.allocator); + var chunk_visit_map = try AutoBitSet.initEmpty(c.allocator(), chunks.len); + defer chunk_visit_map.deinit(c.allocator()); // Compute the final hashes of each chunk, then use those to create the final // paths of each chunk. This can technically be done in parallel but it @@ -227,7 +227,7 @@ pub fn generateChunksInParallel( chunk_visit_map.setAll(false); chunk.template.placeholder.hash = hash.digest(); - const rel_path = std.fmt.allocPrint(c.allocator, "{any}", .{chunk.template}) catch bun.outOfMemory(); + const rel_path = std.fmt.allocPrint(c.allocator(), "{any}", .{chunk.template}) catch bun.outOfMemory(); bun.path.platformToPosixInPlace(u8, rel_path); if ((try path_names_map.getOrPut(rel_path)).found_existing) { @@ -242,7 +242,7 @@ pub fn generateChunksInParallel( // use resolvePosix since we asserted above all seps are '/' if (Environment.isWindows and std.mem.indexOf(u8, rel_path, "/./") != null) { var buf: bun.PathBuffer = undefined; - const rel_path_fixed = c.allocator.dupe(u8, bun.path.normalizeBuf(rel_path, &buf, .posix)) catch bun.outOfMemory(); + const rel_path_fixed = c.allocator().dupe(u8, bun.path.normalizeBuf(rel_path, &buf, .posix)) catch bun.outOfMemory(); chunk.final_rel_path = rel_path_fixed; continue; } diff --git a/src/bundler/linker_context/generateCodeForFileInChunkJS.zig b/src/bundler/linker_context/generateCodeForFileInChunkJS.zig index fd03de0e42..9fb99b10df 100644 --- a/src/bundler/linker_context/generateCodeForFileInChunkJS.zig +++ b/src/bundler/linker_context/generateCodeForFileInChunkJS.zig @@ -604,7 +604,6 @@ pub fn generateCodeForFileInChunkJS( return .{ .result = .{ .code = "", - .code_allocator = bun.default_allocator, .source_map = null, }, }; diff --git a/src/bundler/linker_context/generateCodeForLazyExport.zig b/src/bundler/linker_context/generateCodeForLazyExport.zig index 8fde055441..bd098d78a5 100644 --- a/src/bundler/linker_context/generateCodeForLazyExport.zig +++ b/src/bundler/linker_context/generateCodeForLazyExport.zig @@ -44,9 +44,9 @@ pub fn generateCodeForLazyExport(this: *LinkerContext, source_index: Index.Int) break :size size + 1; }; - var inner_visited = try BitSet.initEmpty(this.allocator, size); - defer inner_visited.deinit(this.allocator); - var composes_visited = std.AutoArrayHashMap(bun.bundle_v2.Ref, void).init(this.allocator); + var inner_visited = try BitSet.initEmpty(this.allocator(), size); + defer inner_visited.deinit(this.allocator()); + var composes_visited = std.AutoArrayHashMap(bun.bundle_v2.Ref, void).init(this.allocator()); defer composes_visited.deinit(); const Visitor = struct { @@ -219,7 +219,7 @@ pub fn generateCodeForLazyExport(this: *LinkerContext, source_index: Index.Int) .loc = stmt.loc, .log = this.log, .all_sources = all_sources, - .allocator = this.allocator, + .allocator = this.allocator(), .all_symbols = this.graph.ast.items(.symbols), }; @@ -227,7 +227,7 @@ pub fn generateCodeForLazyExport(this: *LinkerContext, source_index: Index.Int) const ref = entry.ref; bun.assert(ref.inner_index < symbols.len); - var template_parts = std.ArrayList(E.TemplatePart).init(this.allocator); + var template_parts = std.ArrayList(E.TemplatePart).init(this.allocator()); var value = Expr.init(E.NameOfSymbol, E.NameOfSymbol{ .ref = ref.toRealRef(source_index) }, stmt.loc); visitor.parts = &template_parts; @@ -254,7 +254,7 @@ pub fn generateCodeForLazyExport(this: *LinkerContext, source_index: Index.Int) } const key = symbols.at(ref.innerIndex()).original_name; - try exports.put(this.allocator, key, value); + try exports.put(this.allocator(), key, value); } part.stmts[0].data.s_lazy_export.* = Expr.init(E.Object, exports, stmt.loc).data; @@ -315,7 +315,7 @@ pub fn generateCodeForLazyExport(this: *LinkerContext, source_index: Index.Int) continue; } - const name = property.key.?.data.e_string.slice(this.allocator); + const name = property.key.?.data.e_string.slice(this.allocator()); // TODO: support non-identifier names if (!bun.js_lexer.isIdentifier(name)) @@ -333,17 +333,17 @@ pub fn generateCodeForLazyExport(this: *LinkerContext, source_index: Index.Int) // end up actually being used at this point (since import binding hasn't // happened yet). So we need to wait until after tree shaking happens. const generated = try this.generateNamedExportInFile(source_index, module_ref, name, name); - parts.ptr[generated[1]].stmts = this.allocator.alloc(Stmt, 1) catch unreachable; + parts.ptr[generated[1]].stmts = this.allocator().alloc(Stmt, 1) catch unreachable; parts.ptr[generated[1]].stmts[0] = Stmt.alloc( S.Local, S.Local{ .is_export = true, .decls = js_ast.G.Decl.List.fromSlice( - this.allocator, + this.allocator(), &.{ .{ .binding = Binding.alloc( - this.allocator, + this.allocator(), B.Identifier{ .ref = generated[0], }, @@ -364,13 +364,13 @@ pub fn generateCodeForLazyExport(this: *LinkerContext, source_index: Index.Int) source_index, module_ref, std.fmt.allocPrint( - this.allocator, + this.allocator(), "{}_default", .{this.parse_graph.input_files.items(.source)[source_index].fmtIdentifier()}, ) catch unreachable, "default", ); - parts.ptr[generated[1]].stmts = this.allocator.alloc(Stmt, 1) catch unreachable; + parts.ptr[generated[1]].stmts = this.allocator().alloc(Stmt, 1) catch unreachable; parts.ptr[generated[1]].stmts[0] = Stmt.alloc( S.ExportDefault, S.ExportDefault{ diff --git a/src/bundler/linker_context/generateCompileResultForHtmlChunk.zig b/src/bundler/linker_context/generateCompileResultForHtmlChunk.zig index 9c6f106374..2f4626fe3f 100644 --- a/src/bundler/linker_context/generateCompileResultForHtmlChunk.zig +++ b/src/bundler/linker_context/generateCompileResultForHtmlChunk.zig @@ -184,7 +184,7 @@ fn generateCompileResultForHTMLChunkImpl(worker: *ThreadPool.Worker, c: *LinkerC // HTML bundles for dev server must be allocated to it, as it must outlive // the bundle task. See `DevServer.RouteBundle.HTML.bundled_html_text` - const output_allocator = if (c.dev_server) |dev| dev.allocator else worker.allocator; + const output_allocator = if (c.dev_server) |dev| dev.allocator() else worker.allocator; var html_loader: HTMLLoader = .{ .linker = c, diff --git a/src/bundler/linker_context/generateCompileResultForJSChunk.zig b/src/bundler/linker_context/generateCompileResultForJSChunk.zig index cd0b13c8fc..06767fa06f 100644 --- a/src/bundler/linker_context/generateCompileResultForJSChunk.zig +++ b/src/bundler/linker_context/generateCompileResultForJSChunk.zig @@ -30,13 +30,11 @@ fn generateCompileResultForJSChunkImpl(worker: *ThreadPool.Worker, c: *LinkerCon // Client bundles for Bake must be globally allocated, // as it must outlive the bundle task. - const allocator = if (c.dev_server) |dev| - if (c.parse_graph.ast.items(.target)[part_range.source_index.get()].bakeGraph() == .client) - dev.allocator - else - default_allocator - else - default_allocator; + const allocator = blk: { + const dev = c.dev_server orelse break :blk default_allocator; + const graph = c.parse_graph.ast.items(.target)[part_range.source_index.get()].bakeGraph(); + break :blk if (graph == .client) dev.allocator() else default_allocator; + }; var arena = &worker.temporary_arena; var buffer_writer = js_printer.BufferWriter.init(allocator); diff --git a/src/bundler/linker_context/postProcessJSChunk.zig b/src/bundler/linker_context/postProcessJSChunk.zig index 1d4b99a431..c95f1d4ac7 100644 --- a/src/bundler/linker_context/postProcessJSChunk.zig +++ b/src/bundler/linker_context/postProcessJSChunk.zig @@ -203,7 +203,7 @@ pub fn postProcessJSChunk(ctx: GenerateChunkCtx, worker: *ThreadPool.Worker, chu if (cross_chunk_prefix.result.code.len > 0) { newline_before_comment = true; line_offset.advance(cross_chunk_prefix.result.code); - j.push(cross_chunk_prefix.result.code, cross_chunk_prefix.result.code_allocator); + j.push(cross_chunk_prefix.result.code, worker.allocator); } // Concatenate the generated JavaScript chunks together @@ -323,7 +323,7 @@ pub fn postProcessJSChunk(ctx: GenerateChunkCtx, worker: *ThreadPool.Worker, chu // Stick the entry point tail at the end of the file. Deliberately don't // include any source mapping information for this because it's automatically // generated and doesn't correspond to a location in the input file. - j.push(tail_code, entry_point_tail.allocator()); + j.push(tail_code, worker.allocator); } // Put the cross-chunk suffix inside the IIFE @@ -332,7 +332,7 @@ pub fn postProcessJSChunk(ctx: GenerateChunkCtx, worker: *ThreadPool.Worker, chu j.pushStatic("\n"); } - j.push(cross_chunk_suffix.result.code, cross_chunk_suffix.result.code_allocator); + j.push(cross_chunk_suffix.result.code, worker.allocator); } switch (output_format) { @@ -814,10 +814,9 @@ pub fn generateEntryPointTailJS( return .{ .javascript = .{ .source_index = source_index, - .result = .{ .result = .{ - .code = "", - .code_allocator = bun.default_allocator, - } }, + .result = .{ + .result = .{ .code = "" }, + }, }, }; } diff --git a/src/bundler/linker_context/prepareCssAstsForChunk.zig b/src/bundler/linker_context/prepareCssAstsForChunk.zig index e896ebfe47..98bff1ef14 100644 --- a/src/bundler/linker_context/prepareCssAstsForChunk.zig +++ b/src/bundler/linker_context/prepareCssAstsForChunk.zig @@ -107,7 +107,7 @@ fn prepareCssAstsForChunkImpl(c: *LinkerContext, chunk: *Chunk, allocator: std.m )) { .result => |v| v, .err => |e| { - c.log.addErrorFmt(null, Loc.Empty, c.allocator, "Error generating CSS for import: {}", .{e}) catch bun.outOfMemory(); + c.log.addErrorFmt(null, Loc.Empty, c.allocator(), "Error generating CSS for import: {}", .{e}) catch bun.outOfMemory(); continue; }, }; diff --git a/src/bundler/linker_context/scanImportsAndExports.zig b/src/bundler/linker_context/scanImportsAndExports.zig index 3286b478c3..da0c0cd2c2 100644 --- a/src/bundler/linker_context/scanImportsAndExports.zig +++ b/src/bundler/linker_context/scanImportsAndExports.zig @@ -62,7 +62,7 @@ pub fn scanImportsAndExports(this: *LinkerContext) !void { try this.log.addErrorFmt( &input_files[record.source_index.get()], compose.loc, - this.allocator, + this.allocator(), "The name \"{s}\" never appears in \"{s}\" as a CSS modules locally scoped class name. Note that \"composes\" only works with single class selectors.", .{ name.v, @@ -202,7 +202,7 @@ pub fn scanImportsAndExports(this: *LinkerContext) !void { .import_records = import_records_list, .exports_kind = exports_kind, .entry_point_kinds = entry_point_kinds, - .export_star_map = std.AutoHashMap(u32, void).init(this.allocator), + .export_star_map = std.AutoHashMap(u32, void).init(this.allocator()), .export_star_records = export_star_import_records, .output_format = output_format, }; @@ -271,14 +271,14 @@ pub fn scanImportsAndExports(this: *LinkerContext) !void { if (export_star_ids.len > 0) { if (export_star_ctx == null) { export_star_ctx = ExportStarContext{ - .allocator = this.allocator, + .allocator = this.allocator(), .resolved_exports = resolved_exports, .import_records_list = import_records_list, .export_star_records = export_star_import_records, .imports_to_bind = this.graph.meta.items(.imports_to_bind), - .source_index_stack = std.ArrayList(u32).initCapacity(this.allocator, 32) catch unreachable, + .source_index_stack = std.ArrayList(u32).initCapacity(this.allocator(), 32) catch unreachable, .exports_kind = exports_kind, .named_exports = this.graph.ast.items(.named_exports), }; @@ -367,7 +367,7 @@ pub fn scanImportsAndExports(this: *LinkerContext) !void { // imported using an import star statement. // Note: `do` will wait for all to finish before moving forward try this.parse_graph.pool.worker_pool.each( - this.allocator, + this.allocator(), this, LinkerContext.doStep5, this.graph.reachable_files, @@ -439,7 +439,7 @@ pub fn scanImportsAndExports(this: *LinkerContext) !void { break :brk count; }; - const string_buffer = this.allocator.alloc(u8, string_buffer_len) catch unreachable; + const string_buffer = this.allocator().alloc(u8, string_buffer_len) catch unreachable; var builder = bun.StringBuilder{ .len = 0, .cap = string_buffer.len, @@ -452,7 +452,7 @@ pub fn scanImportsAndExports(this: *LinkerContext) !void { // are necessary later. This is done now because the symbols map cannot be // mutated later due to parallelism. if (is_entry_point and output_format == .esm) { - const copies = this.allocator.alloc(Ref, aliases.len) catch unreachable; + const copies = this.allocator().alloc(Ref, aliases.len) catch unreachable; for (aliases, copies) |alias, *copy| { const original_name = builder.fmt("export_{}", .{bun.fmt.fmtIdentifier(alias)}); @@ -537,7 +537,7 @@ pub fn scanImportsAndExports(this: *LinkerContext) !void { const total_len = parts_declaring_symbol.len + @as(usize, import.re_exports.len) + @as(usize, part.dependencies.len); if (part.dependencies.cap < total_len) { - var list = std.ArrayList(Dependency).init(this.allocator); + var list = std.ArrayList(Dependency).init(this.allocator()); list.ensureUnusedCapacity(total_len) catch unreachable; list.appendSliceAssumeCapacity(part.dependencies.slice()); part.dependencies.update(list); @@ -568,7 +568,7 @@ pub fn scanImportsAndExports(this: *LinkerContext) !void { const extra_count = @as(usize, @intFromBool(force_include_exports)) + @as(usize, @intFromBool(add_wrapper)); - var dependencies = std.ArrayList(js_ast.Dependency).initCapacity(this.allocator, extra_count) catch bun.outOfMemory(); + var dependencies = std.ArrayList(js_ast.Dependency).initCapacity(this.allocator(), extra_count) catch bun.outOfMemory(); var resolved_exports_list: *ResolvedExports = &this.graph.meta.items(.resolved_exports)[id]; for (aliases) |alias| { diff --git a/src/env.zig b/src/env.zig index 509284b03d..3e2881a53f 100644 --- a/src/env.zig +++ b/src/env.zig @@ -31,9 +31,7 @@ pub const export_cpp_apis = if (build_options.override_no_export_cpp_apis) false /// Whether or not to enable allocation tracking when the `AllocationScope` /// allocator is used. -pub const enableAllocScopes = brk: { - break :brk isDebug or enable_asan; -}; +pub const enableAllocScopes = isDebug or enable_asan; pub const build_options = @import("build_options"); diff --git a/src/js_printer.zig b/src/js_printer.zig index 60c453a573..c676cac054 100644 --- a/src/js_printer.zig +++ b/src/js_printer.zig @@ -488,7 +488,6 @@ pub const PrintResult = union(enum) { pub const Success = struct { code: []u8, - code_allocator: std.mem.Allocator, source_map: ?SourceMap.Chunk = null, }; }; @@ -6009,7 +6008,6 @@ pub fn printWithWriterAndPlatform( return .{ .result = .{ .code = buffer.toOwnedSlice(), - .code_allocator = buffer.allocator, .source_map = source_map, }, }; diff --git a/src/meta.zig b/src/meta.zig index 5e9686b496..3723d26c61 100644 --- a/src/meta.zig +++ b/src/meta.zig @@ -338,7 +338,9 @@ pub fn SliceChild(comptime T: type) type { } /// userland implementation of https://github.com/ziglang/zig/issues/21879 -pub fn VoidFieldTypes(comptime T: type) type { +pub fn useAllFields(comptime T: type, _: VoidFields(T)) void {} + +fn VoidFields(comptime T: type) type { const fields = @typeInfo(T).@"struct".fields; var new_fields = fields[0..fields.len].*; for (&new_fields) |*field| { diff --git a/src/ptr.zig b/src/ptr.zig index 0ea9fd869d..ed1c7a5a46 100644 --- a/src/ptr.zig +++ b/src/ptr.zig @@ -9,6 +9,7 @@ pub const owned = @import("./ptr/owned.zig"); pub const Owned = owned.Owned; // owned pointer allocated with default allocator pub const DynamicOwned = owned.Dynamic; // owned pointer allocated with any allocator pub const MaybeOwned = owned.maybe.MaybeOwned; // owned or borrowed pointer +pub const ScopedOwned = owned.scoped.ScopedOwned; // uses `AllocationScope` pub const shared = @import("./ptr/shared.zig"); pub const Shared = shared.Shared; diff --git a/src/ptr/owned.zig b/src/ptr/owned.zig index 7b350811a9..7033834609 100644 --- a/src/ptr/owned.zig +++ b/src/ptr/owned.zig @@ -67,19 +67,19 @@ pub fn WithOptions(comptime Pointer: type, comptime options: Options) type { /// the owned pointer is already the size of a raw pointer. pub const Unmanaged = if (options.allocator == null) owned.Unmanaged(Pointer, options); - /// Allocate a new owned pointer. The signature of this function depends on whether the + /// Allocates a new owned pointer. The signature of this function depends on whether the /// pointer is a single-item pointer or a slice, and whether a fixed allocator was provided /// in `options`. pub const alloc = (if (options.allocator) |allocator| switch (info.kind()) { .single => struct { - /// Allocate memory for a single value using `options.allocator`, and initialize it - /// with `value`. + /// Allocates memory for a single value using `options.allocator`, and initializes + /// it with `value`. pub fn alloc(value: Child) Allocator.Error!Self { return .allocSingle(allocator, value); } }, .slice => struct { - /// Allocate memory for `count` elements using `options.allocator`, and initialize + /// Allocates memory for `count` elements using `options.allocator`, and initializes /// every element with `elem`. pub fn alloc(count: usize, elem: Child) Allocator.Error!Self { return .allocSlice(allocator, count, elem); @@ -87,13 +87,13 @@ pub fn WithOptions(comptime Pointer: type, comptime options: Options) type { }, } else switch (info.kind()) { .single => struct { - /// Allocate memory for a single value and initialize it with `value`. + /// Allocates memory for a single value and initialize it with `value`. pub fn alloc(allocator: Allocator, value: Child) Allocator.Error!Self { return .allocSingle(allocator, value); } }, .slice => struct { - /// Allocate memory for `count` elements, and initialize every element with `elem`. + /// Allocates memory for `count` elements, and initialize every element with `elem`. pub fn alloc(allocator: Allocator, count: usize, elem: Child) Allocator.Error!Self { return .allocSlice(allocator, count, elem); } @@ -105,7 +105,7 @@ pub fn WithOptions(comptime Pointer: type, comptime options: Options) type { else true; - /// Allocate an owned pointer using the default allocator. This function calls + /// Allocates an owned pointer using the default allocator. This function calls /// `bun.outOfMemory` if memory allocation fails. pub const new = if (info.kind() == .single and supports_default_allocator) struct { pub fn new(value: Child) Self { @@ -113,7 +113,7 @@ pub fn WithOptions(comptime Pointer: type, comptime options: Options) type { } }.new; - /// Create an owned pointer by allocating memory and performing a shallow copy of + /// Creates an owned pointer by allocating memory and performing a shallow copy of /// `data`. pub const allocDupe = (if (options.allocator) |allocator| struct { pub fn allocDupe(data: NonOptionalPointer) Allocator.Error!Self { @@ -126,7 +126,7 @@ pub fn WithOptions(comptime Pointer: type, comptime options: Options) type { }).allocDupe; pub const fromRawOwned = (if (options.allocator == null) struct { - /// Create an owned pointer from a raw pointer and allocator. + /// Creates an owned pointer from a raw pointer and allocator. /// /// Requirements: /// @@ -139,7 +139,7 @@ pub fn WithOptions(comptime Pointer: type, comptime options: Options) type { }; } } else struct { - /// Create an owned pointer from a raw pointer. + /// Creates an owned pointer from a raw pointer. /// /// Requirements: /// @@ -153,7 +153,7 @@ pub fn WithOptions(comptime Pointer: type, comptime options: Options) type { } }).fromRawOwned; - /// Deinitialize the pointer or slice, freeing its memory. + /// Deinitializes the pointer or slice, freeing its memory. /// /// By default, this will first call `deinit` on the data itself, if such a method exists. /// (For slices, this will call `deinit` on every element in this slice.) This behavior can @@ -200,16 +200,16 @@ pub fn WithOptions(comptime Pointer: type, comptime options: Options) type { return self.unsafe_raw_pointer; } } else if (info.isOptional()) struct { - pub fn intoRawOwned(self: Self) struct { Pointer, Allocator } { - return .{ self.unsafe_raw_pointer, self.unsafe_allocator }; - } - } else struct { pub fn intoRawOwned(self: Self) ?struct { NonOptionalPointer, Allocator } { return .{ self.unsafe_raw_pointer orelse return null, self.unsafe_allocator }; } + } else struct { + pub fn intoRawOwned(self: Self) struct { Pointer, Allocator } { + return .{ self.unsafe_raw_pointer, self.unsafe_allocator }; + } }).intoRawOwned; - /// Return a null owned pointer. This function is provided only if `Pointer` is an + /// Returns a null owned pointer. This function is provided only if `Pointer` is an /// optional type. /// /// It is permitted, but not required, to call `deinit` on the returned value. @@ -224,7 +224,7 @@ pub fn WithOptions(comptime Pointer: type, comptime options: Options) type { const OwnedNonOptional = WithOptions(NonOptionalPointer, options); - /// Convert an `Owned(?T)` into an `?Owned(T)`. + /// Converts an `Owned(?T)` into an `?Owned(T)`. /// /// This method sets `self` to null. It is therefore permitted, but not required, to call /// `deinit` on `self`. @@ -242,19 +242,19 @@ pub fn WithOptions(comptime Pointer: type, comptime options: Options) type { const OwnedOptional = WithOptions(?Pointer, options); - /// Convert an `Owned(T)` into a non-null `Owned(?T)`. + /// Converts an `Owned(T)` into a non-null `Owned(?T)`. /// /// This method invalidates `self`. - pub const intoOptional = if (!info.isOptional()) struct { - pub fn intoOptional(self: Self) OwnedOptional { + pub const toOptional = if (!info.isOptional()) struct { + pub fn toOptional(self: Self) OwnedOptional { return .{ .unsafe_raw_pointer = self.unsafe_raw_pointer, .unsafe_allocator = self.unsafe_allocator, }; } - }.intoOptional; + }.toOptional; - /// Convert this owned pointer into an unmanaged variant that doesn't store the allocator. + /// Converts this owned pointer into an unmanaged variant that doesn't store the allocator. /// /// This method invalidates `self`. /// @@ -270,7 +270,7 @@ pub fn WithOptions(comptime Pointer: type, comptime options: Options) type { const DynamicOwned = WithOptions(Pointer, options.asDynamic()); - /// Convert an owned pointer that uses a fixed allocator into a dynamic one. + /// Converts an owned pointer that uses a fixed allocator into a dynamic one. /// /// This method invalidates `self`. /// @@ -332,7 +332,7 @@ fn Unmanaged(comptime Pointer: type, comptime options: Options) type { const Managed = WithOptions(Pointer, options); - /// Convert this unmanaged owned pointer back into a managed version. + /// Converts this unmanaged owned pointer back into a managed version. /// /// `allocator` must be the allocator that was used to allocate the pointer. pub fn toManaged(self: Self, allocator: Allocator) Managed { @@ -343,7 +343,7 @@ fn Unmanaged(comptime Pointer: type, comptime options: Options) type { return .fromRawOwned(data, allocator); } - /// Deinitialize the pointer or slice. See `Owned.deinit` for more information. + /// Deinitializes the pointer or slice. See `Owned.deinit` for more information. /// /// `allocator` must be the allocator that was used to allocate the pointer. pub fn deinit(self: Self, allocator: Allocator) void { @@ -369,6 +369,7 @@ fn Unmanaged(comptime Pointer: type, comptime options: Options) type { } pub const maybe = @import("./owned/maybe.zig"); +pub const scoped = @import("./owned/scoped.zig"); const bun = @import("bun"); const std = @import("std"); diff --git a/src/ptr/owned/maybe.zig b/src/ptr/owned/maybe.zig index 614249d1c5..f940ead971 100644 --- a/src/ptr/owned/maybe.zig +++ b/src/ptr/owned/maybe.zig @@ -43,9 +43,9 @@ pub fn WithOptions(comptime Pointer: type, comptime options: Options) type { const Owned = owned.WithOptions(Pointer, options.toOwned()); - /// Create a `MaybeOwned(Pointer)` from an `Owned(Pointer)`. + /// Creates a `MaybeOwned(Pointer)` from an `Owned(Pointer)`. /// - /// This method invalidates `owned`. + /// This method invalidates `owned_ptr`. pub fn fromOwned(owned_ptr: Owned) Self { const data, const allocator = if (comptime info.isOptional()) owned_ptr.intoRawOwned() orelse return .initNull() @@ -57,7 +57,7 @@ pub fn WithOptions(comptime Pointer: type, comptime options: Options) type { }; } - /// Create a `MaybeOwned(Pointer)` from a raw owned pointer or slice. + /// Creates a `MaybeOwned(Pointer)` from a raw owned pointer or slice. /// /// Requirements: /// @@ -67,7 +67,7 @@ pub fn WithOptions(comptime Pointer: type, comptime options: Options) type { return .fromOwned(.fromRawOwned(data, allocator)); } - /// Create a `MaybeOwned(Pointer)` from borrowed slice or pointer. + /// Creates a `MaybeOwned(Pointer)` from borrowed slice or pointer. /// /// `data` must not be freed for the life of the `MaybeOwned`. pub fn fromBorrowed(data: NonOptionalPointer) Self { @@ -77,7 +77,7 @@ pub fn WithOptions(comptime Pointer: type, comptime options: Options) type { }; } - /// Deinitialize the pointer or slice, freeing its memory if owned. + /// Deinitializes the pointer or slice, freeing its memory if owned. /// /// By default, if the data is owned, `deinit` will first be called on the data itself. /// See `Owned.deinit` for more information. @@ -134,7 +134,7 @@ pub fn WithOptions(comptime Pointer: type, comptime options: Options) type { return !self.unsafe_allocator.isNull(); } - /// Return a null `MaybeOwned(Pointer)`. This method is provided only if `Pointer` is an + /// Returns a null `MaybeOwned(Pointer)`. This method is provided only if `Pointer` is an /// optional type. /// /// It is permitted, but not required, to call `deinit` on the returned value. diff --git a/src/ptr/owned/scoped.zig b/src/ptr/owned/scoped.zig new file mode 100644 index 0000000000..2775323bab --- /dev/null +++ b/src/ptr/owned/scoped.zig @@ -0,0 +1,148 @@ +/// Options for `WithOptions`. +pub const Options = struct { + // Whether to call `deinit` on the data before freeing it, if such a method exists. + deinit: bool = true, + + // The owned pointer will always use this allocator. + allocator: Allocator = bun.default_allocator, + + fn toDynamic(self: Options) owned.Options { + return .{ + .deinit = self.deinit, + .allocator = null, + }; + } +}; + +/// An owned pointer that uses `AllocationScope` when enabled. +pub fn ScopedOwned(comptime Pointer: type) type { + return WithOptions(Pointer, .{}); +} + +/// Like `ScopedOwned`, but takes explicit options. +/// +/// `ScopedOwned(Pointer)` is simply an alias of `WithOptions(Pointer, .{})`. +pub fn WithOptions(comptime Pointer: type, comptime options: Options) type { + const info = PointerInfo.parse(Pointer, .{}); + const NonOptionalPointer = info.NonOptionalPointer; + + return struct { + const Self = @This(); + + unsafe_raw_pointer: Pointer, + unsafe_scope: if (AllocationScope.enabled) AllocationScope else void, + + const DynamicOwned = owned.WithOptions(Pointer, options.toDynamic()); + + /// Creates a `ScopedOwned` from a `DynamicOwned`. + /// + /// If `AllocationScope` is enabled, `owned_ptr` must have been allocated by an + /// `AllocationScope`. Otherwise, `owned_ptr` must have been allocated by + /// `options.allocator`. + /// + /// This method invalidates `owned_ptr`. + pub fn fromDynamic(owned_ptr: DynamicOwned) Self { + const data, const allocator = if (comptime info.isOptional()) + owned_ptr.intoRawOwned() orelse return .initNull() + else + owned_ptr.intoRawOwned(); + + const scope = if (comptime AllocationScope.enabled) + AllocationScope.downcast(allocator) orelse std.debug.panic( + "expected `AllocationScope` allocator", + .{}, + ); + + const parent = if (comptime AllocationScope.enabled) scope.parent() else allocator; + bun.safety.alloc.assertEq(parent, options.allocator); + return .{ + .unsafe_raw_pointer = data, + .unsafe_scope = if (comptime AllocationScope.enabled) scope, + }; + } + + /// Creates a `ScopedOwned` from a raw pointer and `AllocationScope`. + /// + /// If `AllocationScope` is enabled, `scope` must be non-null, and `data` must have + /// been allocated by `scope`. Otherwise, `data` must have been allocated by + /// `options.default_allocator`, and `scope` is ignored. + pub fn fromRawOwned(data: NonOptionalPointer, scope: ?AllocationScope) Self { + const allocator = if (comptime AllocationScope.enabled) + (scope orelse std.debug.panic( + "AllocationScope should be non-null when enabled", + .{}, + )).allocator() + else + options.allocator; + return .fromDynamic(.fromRawOwned(data, allocator)); + } + + /// Deinitializes the pointer or slice, freeing its memory if owned. + /// + /// By default, if the data is owned, `deinit` will first be called on the data itself. + pub fn deinit(self: Self) void { + self.toDynamic().deinit(); + } + + const SelfOrPtr = if (info.isConst()) Self else *Self; + + /// Returns the inner pointer or slice. + pub fn get(self: SelfOrPtr) Pointer { + return self.unsafe_raw_pointer; + } + + /// Returns a const version of the inner pointer or slice. + /// + /// This method is not provided if the pointer is already const; use `get` in that case. + pub const getConst = if (!info.isConst()) struct { + pub fn getConst(self: Self) AddConst(Pointer) { + return self.unsafe_raw_pointer; + } + }.getConst; + + /// Converts an owned pointer into a raw pointer. + /// + /// This method invalidates `self`. + pub fn intoRawOwned(self: Self) Pointer { + return self.unsafe_raw_pointer; + } + + /// Returns a null `ScopedOwned`. This method is provided only if `Pointer` is an optional + /// type. + /// + /// It is permitted, but not required, to call `deinit` on the returned value. + pub const initNull = if (info.isOptional()) struct { + pub fn initNull() Self { + return .{ + .unsafe_raw_pointer = null, + .unsafe_allocator = undefined, + }; + } + }.initNull; + + /// Converts a `ScopedOwned` into a `DynamicOwned`. + /// + /// This method invalidates `self`. + pub fn toDynamic(self: Self) DynamicOwned { + const data = if (comptime info.isOptional()) + self.unsafe_raw_pointer orelse return .initNull() + else + self.unsafe_raw_pointer; + const allocator = if (comptime AllocationScope.enabled) + self.unsafe_scope.allocator() + else + options.allocator; + return .fromRawOwned(data, allocator); + } + }; +} + +const bun = @import("bun"); +const std = @import("std"); +const AllocationScope = bun.allocators.AllocationScope; +const Allocator = std.mem.Allocator; +const owned = bun.ptr.owned; + +const meta = @import("../meta.zig"); +const AddConst = meta.AddConst; +const PointerInfo = meta.PointerInfo; diff --git a/src/ptr/shared.zig b/src/ptr/shared.zig index 18cbd24ff3..4d4baafed8 100644 --- a/src/ptr/shared.zig +++ b/src/ptr/shared.zig @@ -186,11 +186,11 @@ pub fn WithOptions(comptime Pointer: type, comptime options: Options) type { /// Converts a `Shared(*T)` into a non-null `Shared(?*T)`. /// /// This method invalidates `self`. - pub const intoOptional = if (!info.isOptional()) struct { - pub fn intoOptional(self: Self) SharedOptional { + pub const toOptional = if (!info.isOptional()) struct { + pub fn toOptional(self: Self) SharedOptional { return .{ .unsafe_pointer = self.unsafe_pointer }; } - }.intoOptional; + }.toOptional; const Count = if (info.isOptional()) ?usize else usize; diff --git a/src/safety/alloc.zig b/src/safety/alloc.zig index 8fca18adb8..16acc0998c 100644 --- a/src/safety/alloc.zig +++ b/src/safety/alloc.zig @@ -42,7 +42,7 @@ fn hasPtr(alloc: Allocator) bool { /// This function may have false negatives; that is, it may fail to detect that two allocators /// are different. However, in practice, it's a useful safety check. pub fn assertEq(alloc1: Allocator, alloc2: Allocator) void { - if (comptime !bun.ci_assert) return; + if (comptime !enabled) return; bun.assertf( alloc1.vtable == alloc2.vtable, "allocators do not match (vtables differ: {*} and {*})", diff --git a/src/string/MutableString.zig b/src/string/MutableString.zig index 42e22b2b3d..cae69222cf 100644 --- a/src/string/MutableString.zig +++ b/src/string/MutableString.zig @@ -241,14 +241,24 @@ pub inline fn lenI(self: *MutableString) i32 { } pub fn toOwnedSlice(self: *MutableString) []u8 { - return self.list.toOwnedSlice(self.allocator) catch bun.outOfMemory(); // TODO + return bun.handleOom(self.list.toOwnedSlice(self.allocator)); +} + +pub fn toDynamicOwned(self: *MutableString) DynamicOwned([]u8) { + return .fromRawOwned(self.toOwnedSlice(), self.allocator); +} + +/// `self.allocator` must be `bun.default_allocator`. +pub fn toDefaultOwned(self: *MutableString) Owned([]u8) { + bun.safety.alloc.assertEq(self.allocator, bun.default_allocator); + return .fromRawOwned(self.toOwnedSlice()); } pub fn slice(self: *MutableString) []u8 { return self.list.items; } -/// Clear the existing value without freeing the memory or shrinking the capacity. +/// Take ownership of the existing value without discarding excess capacity. pub fn move(self: *MutableString) []u8 { const out = self.list.items; self.list = .{}; @@ -258,18 +268,14 @@ pub fn move(self: *MutableString) []u8 { /// Appends `0` if needed pub fn sliceWithSentinel(self: *MutableString) [:0]u8 { if (self.list.items.len > 0 and self.list.items[self.list.items.len - 1] != 0) { - self.list.append( - self.allocator, - 0, - ) catch unreachable; + bun.handleOom(self.list.append(self.allocator, 0)); } - return self.list.items[0 .. self.list.items.len - 1 :0]; } pub fn toOwnedSliceLength(self: *MutableString, length: usize) string { self.list.items.len = length; - return self.list.toOwnedSlice(self.allocator) catch bun.outOfMemory(); // TODO + return self.toOwnedSlice(); } pub fn containsChar(self: *const MutableString, char: u8) bool { @@ -463,3 +469,6 @@ const Allocator = std.mem.Allocator; const bun = @import("bun"); const js_lexer = bun.js_lexer; const strings = bun.strings; + +const DynamicOwned = bun.ptr.DynamicOwned; +const Owned = bun.ptr.Owned; diff --git a/test/internal/ban-limits.json b/test/internal/ban-limits.json index 5ba0f7e51b..cad1943259 100644 --- a/test/internal/ban-limits.json +++ b/test/internal/ban-limits.json @@ -9,7 +9,7 @@ ".stdDir()": 41, ".stdFile()": 18, "// autofix": 168, - ": [^=]+= undefined,$": 261, + ": [^=]+= undefined,$": 260, "== alloc.ptr": 0, "== allocator.ptr": 0, "@import(\"bun\").": 0, @@ -24,7 +24,7 @@ "globalObject.hasException": 47, "globalThis.hasException": 133, "std.StringArrayHashMap(": 1, - "std.StringArrayHashMapUnmanaged(": 12, + "std.StringArrayHashMapUnmanaged(": 11, "std.StringHashMap(": 0, "std.StringHashMapUnmanaged(": 0, "std.Thread.Mutex": 1,