From cbeffe1b48007641428f6998ee99602fe9f41d60 Mon Sep 17 00:00:00 2001 From: chloe caruso Date: Mon, 24 Feb 2025 20:02:38 -0800 Subject: [PATCH] hmr7 (#17641) --- src/Global.zig | 4 + src/{ => allocators}/AllocationScope.zig | 56 +- src/api/schema.zig | 1 + src/bake/DevServer.zig | 525 +++++++++++------- src/bake/bake.zig | 41 +- src/bun.js/api/server.zig | 19 +- src/bun.js/api/server/HTMLBundle.zig | 11 + src/bun.js/webcore/blob.zig | 8 +- src/bun.zig | 8 +- src/bundler/bundle_v2.zig | 23 +- src/bunfig.zig | 24 + src/crash_handler.zig | 2 +- src/defines.zig | 40 +- src/resolver/resolver.zig | 7 +- src/sourcemap/sourcemap.zig | 12 +- src/transpiler.zig | 5 +- ...{dev-server-harness.ts => bake-harness.ts} | 169 ++++-- test/bake/dev-and-prod.test.ts | 24 + test/bake/dev/bundle.test.ts | 35 +- test/bake/dev/css.test.ts | 2 +- test/bake/dev/dev-plugins.test.ts | 2 +- test/bake/dev/ecosystem.test.ts | 2 +- test/bake/dev/esm.test.ts | 2 +- test/bake/dev/html.test.ts | 2 +- test/bake/dev/react-spa.test.ts | 2 +- test/bake/dev/sourcemap.test.ts | 2 +- 26 files changed, 695 insertions(+), 333 deletions(-) rename src/{ => allocators}/AllocationScope.zig (62%) rename test/bake/{dev-server-harness.ts => bake-harness.ts} (91%) create mode 100644 test/bake/dev-and-prod.test.ts diff --git a/src/Global.zig b/src/Global.zig index 0a07e0ec8f..ba7521919d 100644 --- a/src/Global.zig +++ b/src/Global.zig @@ -91,6 +91,10 @@ export fn Bun__atexit(function: ExitFn) void { } } +pub fn addExitCallback(function: ExitFn) void { + Bun__atexit(function); +} + pub fn runExitCallbacks() void { for (on_exit_callbacks.items) |callback| { callback(); diff --git a/src/AllocationScope.zig b/src/allocators/AllocationScope.zig similarity index 62% rename from src/AllocationScope.zig rename to src/allocators/AllocationScope.zig index e114df70eb..d9cd9ea334 100644 --- a/src/AllocationScope.zig +++ b/src/allocators/AllocationScope.zig @@ -6,8 +6,9 @@ pub const enabled = bun.Environment.isDebug; parent: Allocator, state: if (enabled) struct { + mutex: bun.Mutex, total_memory_allocated: usize, - allocations: std.AutoHashMapUnmanaged([*]u8, Entry), + allocations: std.AutoHashMapUnmanaged([*]const u8, Entry), } else void, pub const Entry = struct { @@ -22,6 +23,7 @@ pub fn init(parent: Allocator) AllocationScope { .state = .{ .total_memory_allocated = 0, .allocations = .empty, + .mutex = .{}, }, } else @@ -30,15 +32,24 @@ pub fn init(parent: Allocator) AllocationScope { pub fn deinit(scope: *AllocationScope) void { if (enabled) { + scope.state.mutex.lock(); defer scope.state.allocations.deinit(scope.parent); const count = scope.state.allocations.count(); if (count == 0) return; - const Output = bun.Output; - Output.debugWarn("Allocation scope leaked {d} allocations ({d} bytes)", .{ count, scope.state.total_memory_allocated }); + Output.debugWarn("Allocation scope leaked {d} allocations ({})", .{ + count, + bun.fmt.size(scope.state.total_memory_allocated, .{}), + }); var it = scope.state.allocations.iterator(); + var n: usize = 0; while (it.next()) |entry| { Output.debugWarn("- {any}, len {d}, at:", .{ entry.key_ptr.*, entry.value_ptr.len }); bun.crash_handler.dumpStackTrace(entry.value_ptr.allocated_at.trace()); + n += 1; + if (n >= 8) { + Output.debugWarn("(only showing first 10 leaks)", .{}); + break; + } } } } @@ -55,6 +66,8 @@ const vtable: Allocator.VTable = .{ fn alloc(ctx: *anyopaque, len: usize, ptr_align: u8, ret_addr: usize) ?[*]u8 { const scope: *AllocationScope = @ptrCast(@alignCast(ctx)); + scope.state.mutex.lock(); + defer scope.state.mutex.unlock(); scope.state.allocations.ensureUnusedCapacity(scope.parent, 1) catch return null; const result = scope.parent.vtable.alloc(scope.parent.ptr, len, ptr_align, ret_addr) orelse @@ -75,6 +88,8 @@ fn resize(ctx: *anyopaque, buf: []u8, buf_align: u8, new_len: usize, ret_addr: u fn free(ctx: *anyopaque, buf: []u8, buf_align: u8, ret_addr: usize) void { const scope: *AllocationScope = @ptrCast(@alignCast(ctx)); + scope.state.mutex.lock(); + defer scope.state.mutex.unlock(); if (scope.state.allocations.fetchRemove(buf.ptr)) |entry| { scope.state.total_memory_allocated -= entry.value.len; } else { @@ -86,7 +101,42 @@ fn free(ctx: *anyopaque, buf: []u8, buf_align: u8, ret_addr: usize) void { return scope.parent.vtable.free(scope.parent.ptr, buf, buf_align, ret_addr); } +pub fn assertOwned(scope: *AllocationScope, ptr: anytype) void { + if (!enabled) return; + const cast_ptr: [*]const u8 = @ptrCast(switch (@typeInfo(@TypeOf(ptr)).pointer.size) { + .c, .one, .many => ptr, + .slice => if (ptr.len > 0) ptr.ptr else return, + }); + scope.state.mutex.lock(); + defer scope.state.mutex.unlock(); + _ = scope.state.allocations.getPtr(cast_ptr) orelse + @panic("this pointer was not owned by the allocation scope"); +} + +pub fn assertUnowned(scope: *AllocationScope, ptr: anytype) void { + if (!enabled) return; + const cast_ptr: [*]const u8 = @ptrCast(switch (@typeInfo(@TypeOf(ptr)).pointer.size) { + .c, .one, .many => ptr, + .slice => if (ptr.len > 0) ptr.ptr else return, + }); + scope.state.mutex.lock(); + defer scope.state.mutex.unlock(); + if (scope.state.allocations.getPtr(cast_ptr)) |owned| { + Output.debugWarn("Pointer allocated here:"); + bun.crash_handler.dumpStackTrace(owned.allocated_at.trace()); + } + @panic("this pointer was owned by the allocation scope when it was not supposed to be"); +} + +pub inline fn downcast(a: Allocator) ?*AllocationScope { + return if (enabled and a.vtable == &vtable) + @ptrCast(@alignCast(a.ptr)) + else + null; +} + const std = @import("std"); const Allocator = std.mem.Allocator; const bun = @import("root").bun; +const Output = bun.Output; const StoredTrace = bun.crash_handler.StoredTrace; diff --git a/src/api/schema.zig b/src/api/schema.zig index 91f066779c..7ab4d400e0 100644 --- a/src/api/schema.zig +++ b/src/api/schema.zig @@ -1710,6 +1710,7 @@ pub const Api = struct { serve_splitting: bool = false, serve_public_path: ?[]const u8 = null, serve_hmr: ?bool = null, + serve_define: ?StringMap = null, bunfig_path: []const u8, diff --git a/src/bake/DevServer.zig b/src/bake/DevServer.zig index 8680a93152..ece4835cd1 100644 --- a/src/bake/DevServer.zig +++ b/src/bake/DevServer.zig @@ -11,6 +11,13 @@ pub const DevServer = @This(); pub const debug = bun.Output.Scoped(.DevServer, false); pub const igLog = bun.Output.scoped(.IncrementalGraph, false); +/// Blockers to enable this in debug builds, which also means blockers to +/// enabling `.deinit()` in a release. +/// - Fix all memory leaks +/// - Fix cases where the server is deinitialized while bundling state is present. +/// Otherwise, this will reduce stability. +const experiment_with_memory_assertions = false; + pub const Options = struct { /// Arena must live until DevServer.deinit() arena: Allocator, @@ -29,10 +36,10 @@ pub const Options = struct { // made it easier to group related fields together, but one must remember those // structures still depend on the DevServer pointer. -/// Used for all server-wide allocations. In debug, this shows up in -/// a separate named heap. Thread-safe. +/// Used for all server-wide allocations. In debug, is is backed by a scope. Thread-safe. allocator: Allocator, -allocation_scope: if (AllocationScope.enabled) AllocationScope else void, +/// All methods are no-op in release builds. +allocation_scope: AllocationScope, /// Absolute path to project root directory. For the HMR /// runtime, its module IDs are strings relative to this. root: []const u8, @@ -235,9 +242,8 @@ pub const RouteBundle = struct { /// Invalidated when the list of CSS files changes. cached_css_file_array: JSC.Strong, - /// Contain the list of serialized failures. Hashmap allows for - /// efficient lookup and removal of failing files. - /// When state == .evaluation_failure, this is populated with that error. + /// When state == .evaluation_failure, this is populated with the route + /// evaluation error mirrored in the dev server hash map evaluate_failure: ?SerializedFailure, }; @@ -294,6 +300,26 @@ pub const RouteBundle = struct { html: *HTMLBundle.HTMLBundleRoute, }; + pub fn deinit(rb: *RouteBundle, allocator: Allocator) void { + if (rb.client_bundle) |blob| blob.deref(); + switch (rb.data) { + .framework => |*fw| { + fw.cached_client_bundle_url.deinit(); + fw.cached_css_file_array.deinit(); + fw.cached_module_list.deinit(); + }, + .html => |*html| { + if (html.bundled_html_text) |text| { + allocator.free(text); + } + if (html.cached_response) |cached_response| { + cached_response.deref(); + } + html.html_bundle.deref(); + }, + } + } + pub fn invalidateClientBundle(self: *RouteBundle) void { if (self.client_bundle) |bundle| { bundle.deref(); @@ -328,7 +354,7 @@ pub const RouteBundle = struct { /// DevServer is stored on the heap, storing its allocator. pub fn init(options: Options) bun.JSOOM!*DevServer { - const allocator = bun.default_allocator; + const unchecked_allocator = bun.default_allocator; bun.analytics.Features.dev_server +|= 1; var dump_dir = if (bun.FeatureFlags.bake_debugging_features) @@ -344,9 +370,10 @@ pub fn init(options: Options) bun.JSOOM!*DevServer { const separate_ssr_graph = if (options.framework.server_components) |sc| sc.separate_ssr_graph else false; - const dev = bun.create(allocator, DevServer, .{ + const dev = bun.new(DevServer, .{ .allocator = undefined, - .allocation_scope = if (AllocationScope.enabled) AllocationScope.init(allocator), + // 'init' is a no-op in release + .allocation_scope = AllocationScope.init(unchecked_allocator), .root = options.root, .vm = options.vm, @@ -382,10 +409,8 @@ pub fn init(options: Options) bun.JSOOM!*DevServer { .refs = .empty, }, .source_maps = .empty, - .log = .init(allocator), .plugin_state = .unknown, .bundling_failures = .{}, - .deferred_request_pool = .init(allocator), .assume_perfect_incremental_bundling = if (bun.Environment.isDebug) if (bun.getenvZ("BUN_ASSUME_PERFECT_INCREMENTAL")) |env| !bun.strings.eqlComptime(env, "0") @@ -401,10 +426,16 @@ pub fn init(options: Options) bun.JSOOM!*DevServer { .configuration_hash_key = undefined, .router = undefined, .watcher_atomics = undefined, + .log = undefined, + .deferred_request_pool = undefined, }); - dev.allocator = if (AllocationScope.enabled) dev.allocation_scope.allocator() else allocator; + errdefer bun.destroy(dev); + const allocator = dev.allocation_scope.allocator(); + dev.allocator = allocator; + dev.log = .init(allocator); + dev.deferred_request_pool = .init(allocator); + const global = dev.vm.global; - errdefer allocator.destroy(dev); assert(dev.server_graph.owner() == dev); assert(dev.client_graph.owner() == dev); @@ -426,11 +457,14 @@ pub fn init(options: Options) bun.JSOOM!*DevServer { dev.watcher_atomics = WatcherAtomics.init(dev); - dev.framework.initTranspiler(allocator, &dev.log, .development, .server, &dev.server_transpiler, &dev.bundler_options.server) catch |err| + // This causes a memory leak, but the allocator is otherwise used on multiple threads. + const transpiler_allocator = bun.default_allocator; + + dev.framework.initTranspiler(transpiler_allocator, &dev.log, .development, .server, &dev.server_transpiler, &dev.bundler_options.server) catch |err| return global.throwError(err, generic_action); dev.server_transpiler.options.dev_server = dev; dev.framework.initTranspiler( - allocator, + transpiler_allocator, &dev.log, .development, .client, @@ -444,7 +478,7 @@ pub fn init(options: Options) bun.JSOOM!*DevServer { dev.client_transpiler.resolver.watcher = dev.bun_watcher.getResolveWatcher(); if (separate_ssr_graph) { - dev.framework.initTranspiler(allocator, &dev.log, .development, .ssr, &dev.ssr_transpiler, &dev.bundler_options.ssr) catch |err| + dev.framework.initTranspiler(transpiler_allocator, &dev.log, .development, .ssr, &dev.ssr_transpiler, &dev.bundler_options.ssr) catch |err| return global.throwError(err, generic_action); dev.ssr_transpiler.options.dev_server = dev; dev.ssr_transpiler.resolver.watcher = dev.bun_watcher.getResolveWatcher(); @@ -460,8 +494,8 @@ pub fn init(options: Options) bun.JSOOM!*DevServer { }; errdefer dev.route_lookup.clearAndFree(allocator); - // errdefer dev.client_graph.deinit(allocator); - // errdefer dev.server_graph.deinit(allocator); + errdefer dev.client_graph.deinit(allocator); + errdefer dev.server_graph.deinit(allocator); dev.configuration_hash_key = hash_key: { var hash = std.hash.Wyhash.init(128); @@ -593,120 +627,124 @@ pub fn init(options: Options) bun.JSOOM!*DevServer { if (bun.FeatureFlags.bake_debugging_features and dev.has_pre_crash_handler) try bun.crash_handler.appendPreCrashHandler(DevServer, dev, dumpStateDueToCrash); + if (experiment_with_memory_assertions) { + EnsureAllMemoryFreed.mutex.lock(); + defer EnsureAllMemoryFreed.mutex.unlock(); + try EnsureAllMemoryFreed.entries.put(bun.default_allocator, dev, {}); + bun.Global.addExitCallback(EnsureAllMemoryFreed.check); + } + return dev; } pub fn deinit(dev: *DevServer) void { - // TODO: Currently deinit is not implemented, as it was assumed to be alive for - // the remainder of this process' lifespan. This isn't always true. + if (!experiment_with_memory_assertions) + return; // TODO: unflag deinit + + if (experiment_with_memory_assertions) { + EnsureAllMemoryFreed.mutex.lock(); + defer EnsureAllMemoryFreed.mutex.unlock(); + _ = EnsureAllMemoryFreed.entries.swapRemove(dev); + } const allocator = dev.allocator; - _ = allocator; + const discard = voidFieldTypeDiscardHelper; + _ = VoidFieldTypes(DevServer){ + .root = {}, + .allocator = {}, + .allocation_scope = {}, // deinit at end + .configuration_hash_key = {}, + .watcher_atomics = {}, + .plugin_state = {}, + .generation = {}, + .bundles_since_last_error = {}, + .emit_visualizer_events = {}, + .deferred_request_pool = {}, + .frontend_only = {}, + .vm = {}, + .server = {}, + .server_transpiler = {}, + .client_transpiler = {}, + .ssr_transpiler = {}, + .framework = {}, + .bundler_options = {}, + .assume_perfect_incremental_bundling = {}, - // _ = VoidFieldTypes(DevServer){ - // // has no action taken - // .allocator = {}, - // .configuration_hash_key = {}, - // .graph_safety_lock = {}, - // .bun_watcher = {}, - // .watcher_atomics = {}, - // .plugin_state = {}, - // .generation = {}, - // .bundles_since_last_error = {}, - // .emit_visualizer_events = {}, - // .dump_dir = {}, - // .frontend_only = {}, - // .server_fetch_function_callback = {}, - // .server_register_update_callback = {}, - // .deferred_request_pool = {}, - - // .has_pre_crash_handler = if (dev.has_pre_crash_handler) - // bun.crash_handler.removePreCrashHandler(dev), - - // // pointers that are not considered a part of DevServer - // .vm = {}, - // .server = {}, - // .server_transpiler = {}, - // .client_transpiler = {}, - // .ssr_transpiler = {}, - // .log = {}, - // .framework = {}, // TODO: maybe - // .bundler_options = {}, // TODO: maybe - - // // to be counted. - // .root = { - // cost += dev.root.len; - // }, - // .router = { - // cost += dev.router.memoryCost(); - // }, - // .route_bundles = for (dev.route_bundles.items) |*bundle| { - // cost += bundle.memoryCost(); - // }, - // .server_graph = { - // cost += dev.server_graph.memoryCost(); - // }, - // .client_graph = { - // cost += dev.client_graph.memoryCost(); - // }, - // .assets = { - // cost += dev.assets.memoryCost(); - // }, - // .incremental_result = { - // cost += memoryCostArrayList(dev.incremental_result.client_components_added); - // cost += memoryCostArrayList(dev.incremental_result.html_routes_affected); - // cost += memoryCostArrayList(dev.incremental_result.framework_routes_affected); - // cost += memoryCostArrayList(dev.incremental_result.client_components_removed); - // cost += memoryCostArrayList(dev.incremental_result.failures_removed); - // cost += memoryCostArrayList(dev.incremental_result.client_components_affected); - // cost += memoryCostArrayList(dev.incremental_result.failures_added); - // }, - // .has_tailwind_plugin_hack = if (dev.has_tailwind_plugin_hack) |hack| { - // cost += memoryCostArrayHashMap(hack); - // }, - // .directory_watchers = { - // cost += memoryCostArrayList(dev.directory_watchers.dependencies); - // cost += memoryCostArrayList(dev.directory_watchers.dependencies_free_list); - // cost += memoryCostArrayHashMap(dev.directory_watchers.watches); - // for (dev.directory_watchers.dependencies.items) |dep| { - // cost += dep.specifier.len; - // } - // }, - // .html_router = { - // // std does not provide a way to measure exact allocation size of HashMapUnmanaged - // cost += dev.html_router.map.capacity() * (@sizeOf(*HTMLBundle.HTMLBundleRoute) + @sizeOf([]const u8)); - // // DevServer does not count the referenced HTMLBundle.HTMLBundleRoutes - // }, - // .bundling_failures = { - // cost += memoryCostSlice(dev.bundling_failures.keys()); - // for (dev.bundling_failures.keys()) |failure| { - // cost += failure.data.len; - // } - // }, - // .current_bundle = { - // // All entries are owned by the bundler arena, not DevServer, except for `requests` - // if (dev.current_bundle) |bundle| { - // var r = bundle.requests.first; - // while (r) |request| : (r = request.next) { - // cost += @sizeOf(DeferredRequest.Node); - // } - // } - // }, - // .next_bundle = { - // var r = dev.next_bundle.requests.first; - // while (r) |request| : (r = request.next) { - // cost += @sizeOf(DeferredRequest.Node); - // } - // cost += memoryCostArrayHashMap(dev.next_bundle.route_queue); - // }, - // .route_lookup = { - // cost += memoryCostArrayHashMap(dev.route_lookup); - // }, - // }; - - // dev.allocation_scope.deinit(); + .graph_safety_lock = dev.graph_safety_lock.lock(), + .bun_watcher = dev.bun_watcher.deinit(true), + .dump_dir = if (bun.FeatureFlags.bake_debugging_features) if (dev.dump_dir) |*dir| dir.close(), + .log = dev.log.deinit(), + .server_fetch_function_callback = dev.server_fetch_function_callback.deinit(), + .server_register_update_callback = dev.server_register_update_callback.deinit(), + .has_pre_crash_handler = if (dev.has_pre_crash_handler) + bun.crash_handler.removePreCrashHandler(dev), + .router = { + dev.router.deinit(allocator); + }, + .route_bundles = { + for (dev.route_bundles.items) |*rb| { + rb.deinit(allocator); + } + dev.route_bundles.deinit(allocator); + }, + .server_graph = dev.server_graph.deinit(allocator), + .client_graph = dev.client_graph.deinit(allocator), + .assets = dev.assets.deinit(allocator), + .incremental_result = discard(VoidFieldTypes(IncrementalResult){ + .had_adjusted_edges = {}, + .client_components_added = dev.incremental_result.client_components_added.deinit(allocator), + .framework_routes_affected = dev.incremental_result.framework_routes_affected.deinit(allocator), + .client_components_removed = dev.incremental_result.client_components_removed.deinit(allocator), + .failures_removed = dev.incremental_result.failures_removed.deinit(allocator), + .client_components_affected = dev.incremental_result.client_components_affected.deinit(allocator), + .failures_added = dev.incremental_result.failures_added.deinit(allocator), + .html_routes_soft_affected = dev.incremental_result.html_routes_soft_affected.deinit(allocator), + .html_routes_hard_affected = dev.incremental_result.html_routes_hard_affected.deinit(allocator), + }), + .has_tailwind_plugin_hack = if (dev.has_tailwind_plugin_hack) |*hack| { + hack.deinit(allocator); + }, + .directory_watchers = { + // dev.directory_watchers.dependencies + for (dev.directory_watchers.dependencies.items) |watcher| { + allocator.free(watcher.specifier); + } + dev.directory_watchers.watches.deinit(allocator); + dev.directory_watchers.dependencies.deinit(allocator); + dev.directory_watchers.dependencies_free_list.deinit(allocator); + }, + .html_router = dev.html_router.map.deinit(dev.allocator), + .bundling_failures = { + for (dev.bundling_failures.keys()) |failure| { + failure.deinit(dev); + } + dev.bundling_failures.deinit(allocator); + }, + .current_bundle = { + // TODO: deinitializing in this state is almost certainly an assertion failure. + if (dev.current_bundle) |_| { + bun.debugAssert(false); + } + }, + .next_bundle = { + var r = dev.next_bundle.requests.first; + while (r) |request| : (r = request.next) { + // TODO: deinitializing in this state is almost certainly an assertion failure. + request.data.deinit(); + } + dev.next_bundle.route_queue.deinit(allocator); + }, + .route_lookup = dev.route_lookup.deinit(allocator), + .source_maps = { + for (dev.source_maps.entries.values()) |value| { + allocator.free(value.sourceContents()); + allocator.free(value.file_paths); + value.response.deref(); + } + dev.source_maps.entries.deinit(allocator); + }, + }; + dev.allocation_scope.deinit(); bun.destroy(dev); - // if (bun.Environment.isDebug) - // bun.todoPanic(@src(), "bake.DevServer.deinit()", .{}); } /// Returns an estimation for how many bytes DevServer is explicitly aware of. @@ -720,6 +758,7 @@ pub fn deinit(dev: *DevServer) void { /// is exponentially easy to mess up memory management. pub fn memoryCost(dev: *DevServer) usize { var cost: usize = @sizeOf(DevServer); + const discard = voidFieldTypeDiscardHelper; // See https://github.com/ziglang/zig/issues/21879 _ = VoidFieldTypes(DevServer){ // does not contain pointers @@ -778,16 +817,33 @@ pub fn memoryCost(dev: *DevServer) usize { cost += memoryCostSlice(entry.file_paths); // do not re-count contents } }, - .incremental_result = { - cost += memoryCostArrayList(dev.incremental_result.client_components_added); - cost += memoryCostArrayList(dev.incremental_result.html_routes_soft_affected); - cost += memoryCostArrayList(dev.incremental_result.html_routes_hard_affected); - cost += memoryCostArrayList(dev.incremental_result.framework_routes_affected); - cost += memoryCostArrayList(dev.incremental_result.client_components_removed); - cost += memoryCostArrayList(dev.incremental_result.failures_removed); - cost += memoryCostArrayList(dev.incremental_result.client_components_affected); - cost += memoryCostArrayList(dev.incremental_result.failures_added); - }, + .incremental_result = discard(VoidFieldTypes(IncrementalResult){ + .had_adjusted_edges = {}, + .client_components_added = { + cost += memoryCostArrayList(dev.incremental_result.client_components_added); + }, + .framework_routes_affected = { + cost += memoryCostArrayList(dev.incremental_result.framework_routes_affected); + }, + .client_components_removed = { + cost += memoryCostArrayList(dev.incremental_result.client_components_removed); + }, + .failures_removed = { + cost += memoryCostArrayList(dev.incremental_result.failures_removed); + }, + .client_components_affected = { + cost += memoryCostArrayList(dev.incremental_result.client_components_affected); + }, + .failures_added = { + cost += memoryCostArrayList(dev.incremental_result.failures_added); + }, + .html_routes_soft_affected = { + cost += memoryCostArrayList(dev.incremental_result.html_routes_soft_affected); + }, + .html_routes_hard_affected = { + cost += memoryCostArrayList(dev.incremental_result.html_routes_hard_affected); + }, + }), .has_tailwind_plugin_hack = if (dev.has_tailwind_plugin_hack) |hack| { cost += memoryCostArrayHashMap(hack); }, @@ -810,13 +866,11 @@ pub fn memoryCost(dev: *DevServer) usize { cost += failure.data.len; } }, - .current_bundle = { - // All entries are owned by the bundler arena, not DevServer, except for `requests` - if (dev.current_bundle) |bundle| { - var r = bundle.requests.first; - while (r) |request| : (r = request.next) { - cost += @sizeOf(DeferredRequest.Node); - } + // All entries are owned by the bundler arena, not DevServer, except for `requests` + .current_bundle = if (dev.current_bundle) |bundle| { + var r = bundle.requests.first; + while (r) |request| : (r = request.next) { + cost += @sizeOf(DeferredRequest.Node); } }, .next_bundle = { @@ -1475,7 +1529,7 @@ fn getJavaScriptCodeForHTMLFile( // Avoid-recloning if it is was moved to the hap return if (array.items.ptr == &sfa_state.buffer) - try bun.default_allocator.dupe(u8, array.items) + try dev.allocator.dupe(u8, array.items) else array.items; } @@ -1714,7 +1768,7 @@ fn indexFailures(dev: *DevServer) !void { for (dev.incremental_result.failures_removed.items) |removed| { try w.writeInt(u32, @bitCast(removed.getOwner().encode()), .little); - removed.deinit(); + removed.deinit(dev); } for (dev.incremental_result.failures_added.items) |added| { @@ -1754,7 +1808,7 @@ fn indexFailures(dev: *DevServer) !void { for (dev.incremental_result.failures_removed.items) |removed| { try w.writeInt(u32, @bitCast(removed.getOwner().encode()), .little); - removed.deinit(); + removed.deinit(dev); } dev.publish(.errors, payload.items, .binary); @@ -2039,7 +2093,7 @@ pub fn finalizeBundle( .{ .js = .{ .code = compile_result.code(), .source_map = source_map, - .quoted_contents = .initOwned(quoted_contents, bun.default_allocator), + .quoted_contents = .initOwned(quoted_contents, dev.allocator), } }, graph == .ssr, ), @@ -2140,6 +2194,7 @@ pub fn finalizeBundle( if (html.bundled_html_text) |slice| { dev.allocator.free(slice); } + dev.allocation_scope.assertOwned(compile_result.code); html.bundled_html_text = compile_result.code; html.head_end_tag_index = .init(compile_result.offsets.head_end_tag); @@ -2233,7 +2288,7 @@ pub fn finalizeBundle( var has_route_bits_set = false; - var hot_update_payload_sfa = std.heap.stackFallback(65536, bun.default_allocator); + var hot_update_payload_sfa = std.heap.stackFallback(65536, dev.allocator); var hot_update_payload = std.ArrayList(u8).initCapacity(hot_update_payload_sfa.get(), 65536) catch unreachable; // enough space defer hot_update_payload.deinit(); @@ -2543,7 +2598,7 @@ fn startNextBundleIfPresent(dev: *DevServer) void { // If there were pending requests, begin another bundle. if (dev.next_bundle.reload_event != null or dev.next_bundle.requests.first != null) { - var sfb = std.heap.stackFallback(4096, bun.default_allocator); + var sfb = std.heap.stackFallback(4096, dev.allocator); const temp_alloc = sfb.get(); var entry_points: EntryPointList = EntryPointList.empty; defer entry_points.deinit(temp_alloc); @@ -2783,7 +2838,7 @@ fn sendSerializedFailures( failures: []const SerializedFailure, kind: ErrorPageKind, ) !void { - var buf: std.ArrayList(u8) = try .initCapacity(bun.default_allocator, 2048); + var buf: std.ArrayList(u8) = try .initCapacity(dev.allocator, 2048); errdefer buf.deinit(); try buf.appendSlice(switch (kind) { @@ -2904,7 +2959,7 @@ pub fn IncrementalGraph(side: bake.Side) type { /// Keys are absolute paths for the "file" namespace, or the /// pretty-formatted path value that appear in imports. Absolute paths /// are stored so the watcher can quickly query and invalidate them. - /// Key slices are owned by `default_allocator` + /// Key slices are owned by `dev.allocator` bundled_files: bun.StringArrayHashMapUnmanaged(File), /// Source maps are stored out-of-line to make `File` objects smaller, /// as file information is accessed much more frequently than source maps. @@ -3001,7 +3056,7 @@ pub fn IncrementalGraph(side: bake.Side) type { /// Content depends on `flags.kind` /// See function wrappers to safely read into this data content: extern union { - /// Allocated by default_allocator. Access with `.jsCode()` + /// Allocated by `dev.allocator`. Access with `.jsCode()` /// When stale, the code is "", otherwise it contains at /// least one non-whitespace character, as empty chunks /// contain at least a function wrapper. @@ -3103,11 +3158,11 @@ pub fn IncrementalGraph(side: bake.Side) type { fn freeFileContent(g: *IncrementalGraph(.client), index: FileIndex, key: []const u8, file: File, css: enum { unref_css, ignore_css }) void { switch (file.flags.kind) { .js, .asset => { - bun.default_allocator.free(file.jsCode()); + g.owner().allocator.free(file.jsCode()); const map = &g.source_maps.items[index.get()]; - bun.default_allocator.free(map.vlq()); + g.owner().allocator.free(map.vlq()); if (map.quoted_contents_flags.is_owned) { - map.quotedContentsCowString().deinit(bun.default_allocator); + map.quotedContentsCowString().deinit(g.owner().allocator); } }, .css => if (css == .unref_css) { @@ -3119,7 +3174,7 @@ pub fn IncrementalGraph(side: bake.Side) type { /// Packed source mapping data pub const PackedMap = struct { - /// Allocated by default_allocator. Access with `.vlq()` + /// Allocated by `dev.allocator`. Access with `.vlq()` /// This is stored to allow lazy construction of source map files. vlq_ptr: [*]u8, vlq_len: u32, @@ -3222,8 +3277,27 @@ pub fn IncrementalGraph(side: bake.Side) type { /// An index into `edges` const EdgeIndex = bun.GenericIndex(u32, Edge); - pub fn deinit() void { - @panic("TODO"); + pub fn deinit(g: *@This(), allocator: Allocator) void { + _ = VoidFieldTypes(@This()){ + .bundled_files = { + for (g.bundled_files.keys(), g.bundled_files.values(), 0..) |k, v, i| { + allocator.free(k); + if (side == .client) + g.freeFileContent(.init(@intCast(i)), k, v, .ignore_css); + } + g.bundled_files.deinit(allocator); + }, + .source_maps = if (side == .client) + g.source_maps.deinit(allocator), + .stale_files = g.stale_files.deinit(allocator), + .first_dep = g.first_dep.deinit(allocator), + .first_import = g.first_import.deinit(allocator), + .edges = g.edges.deinit(allocator), + .edges_free_list = g.edges_free_list.deinit(allocator), + .current_chunk_len = {}, + .current_chunk_parts = g.current_chunk_parts.deinit(allocator), + .current_css_files = if (side == .client) g.current_css_files.deinit(allocator), + }; } /// Does NOT count @sizeOf(@This()) @@ -3315,7 +3389,7 @@ pub fn IncrementalGraph(side: bake.Side) type { const file_index = FileIndex.init(@intCast(gop.index)); if (!gop.found_existing) { - gop.key_ptr.* = try bun.default_allocator.dupe(u8, key); + gop.key_ptr.* = try dev.allocator.dupe(u8, key); try g.first_dep.append(dev.allocator, .none); try g.first_import.append(dev.allocator, .none); if (side == .client) { @@ -3366,13 +3440,17 @@ pub fn IncrementalGraph(side: bake.Side) type { switch (content) { .css => |css| gop.value_ptr.* = .initCSS(css, flags), .js => |js| { + dev.allocation_scope.assertOwned(js.code); gop.value_ptr.* = .initJavaScript(js.code, flags); // Append source map if (js.source_map.buffer.len() > 0) { + dev.allocation_scope.assertOwned(js.source_map.buffer.list.items); + if (js.quoted_contents.flags.is_owned) + dev.allocation_scope.assertOwned(js.quoted_contents.slice()); g.source_maps.items[file_index.get()] = try .fromNonEmptySourceMap(js.source_map, js.quoted_contents); } else { - js.quoted_contents.deinit(bun.default_allocator); + js.quoted_contents.deinit(dev.allocator); } // Track JavaScript chunks for concatenation try g.current_chunk_parts.append(dev.allocator, file_index); @@ -3440,7 +3518,7 @@ pub fn IncrementalGraph(side: bake.Side) type { if (content == .js) { try g.current_chunk_parts.append(dev.allocator, content.js.code); g.current_chunk_len += content.js.code.len; - content.js.quoted_contents.deinit(bun.default_allocator); + content.js.quoted_contents.deinit(dev.allocator); if (content.js.source_map.buffer.len() > 0) { var vlq = content.js.source_map.buffer; vlq.deinit(); @@ -4003,17 +4081,18 @@ pub fn IncrementalGraph(side: bake.Side) type { pub fn insertStaleExtra(g: *@This(), abs_path: []const u8, is_ssr_graph: bool, is_route: bool) bun.OOM!FileIndex { g.owner().graph_safety_lock.assertLocked(); + const dev_allocator = g.owner().allocator; debug.log("Insert stale: {s}", .{abs_path}); - const gop = try g.bundled_files.getOrPut(g.owner().allocator, abs_path); + const gop = try g.bundled_files.getOrPut(dev_allocator, abs_path); const file_index = FileIndex.init(@intCast(gop.index)); if (!gop.found_existing) { - gop.key_ptr.* = try bun.default_allocator.dupe(u8, abs_path); - try g.first_dep.append(g.owner().allocator, .none); - try g.first_import.append(g.owner().allocator, .none); + gop.key_ptr.* = try dev_allocator.dupe(u8, abs_path); + try g.first_dep.append(dev_allocator, .none); + try g.first_import.append(dev_allocator, .none); if (side == .client) - try g.source_maps.append(g.owner().allocator, .empty); + try g.source_maps.append(dev_allocator, .empty); } else { if (side == .server) { if (is_route) gop.value_ptr.*.is_route = true; @@ -4072,9 +4151,10 @@ pub fn IncrementalGraph(side: bake.Side) type { key: []const u8, } { g.owner().graph_safety_lock.assertLocked(); - const gop = try g.bundled_files.getOrPut(g.owner().allocator, abs_path); + const dev_allocator = g.owner().allocator; + const gop = try g.bundled_files.getOrPut(dev_allocator, abs_path); if (!gop.found_existing) { - gop.key_ptr.* = try bun.default_allocator.dupe(u8, abs_path); + gop.key_ptr.* = try dev_allocator.dupe(u8, abs_path); gop.value_ptr.* = switch (side) { .client => File.initUnknown(.{ .failed = false, @@ -4093,10 +4173,10 @@ pub fn IncrementalGraph(side: bake.Side) type { .kind = kind, }, }; - try g.first_dep.append(g.owner().allocator, .none); - try g.first_import.append(g.owner().allocator, .none); + try g.first_dep.append(dev_allocator, .none); + try g.first_import.append(dev_allocator, .none); if (side == .client) - try g.source_maps.append(g.owner().allocator, .empty); + try g.source_maps.append(dev_allocator, .empty); try g.ensureStaleBitCapacity(true); } return .{ .index = .init(@intCast(gop.index)), .key = gop.key_ptr.* }; @@ -4106,15 +4186,16 @@ pub fn IncrementalGraph(side: bake.Side) type { /// Its content lives only on the client. pub fn insertCssFileOnServer(g: *@This(), ctx: *HotUpdateContext, index: bun.JSAst.Index, abs_path: []const u8) bun.OOM!void { g.owner().graph_safety_lock.assertLocked(); + const dev_allocator = g.owner().allocator; debug.log("Insert stale: {s}", .{abs_path}); - const gop = try g.bundled_files.getOrPut(g.owner().allocator, abs_path); + const gop = try g.bundled_files.getOrPut(dev_allocator, abs_path); const file_index: FileIndex = .init(@intCast(gop.index)); if (!gop.found_existing) { - gop.key_ptr.* = try bun.default_allocator.dupe(u8, abs_path); - try g.first_dep.append(g.owner().allocator, .none); - try g.first_import.append(g.owner().allocator, .none); + gop.key_ptr.* = try dev_allocator.dupe(u8, abs_path); + try g.first_dep.append(dev_allocator, .none); + try g.first_import.append(dev_allocator, .none); } switch (side) { @@ -4144,12 +4225,14 @@ pub fn IncrementalGraph(side: bake.Side) type { ) bun.OOM!void { g.owner().graph_safety_lock.assertLocked(); + const dev_allocator = g.owner().allocator; + const Gop = std.StringArrayHashMapUnmanaged(File).GetOrPutResult; // found_existing is destructured separately so that it is // comptime-known true when mode == .index const gop: Gop, const found_existing, const file_index = switch (mode) { .abs_path => brk: { - const gop = try g.bundled_files.getOrPut(g.owner().allocator, key); + const gop = try g.bundled_files.getOrPut(dev_allocator, key); break :brk .{ gop, gop.found_existing, FileIndex.init(@intCast(gop.index)) }; }, // When given an index, no fetch is needed. @@ -4170,11 +4253,11 @@ pub fn IncrementalGraph(side: bake.Side) type { if (!found_existing) { comptime assert(mode == .abs_path); - gop.key_ptr.* = try bun.default_allocator.dupe(u8, key); - try g.first_dep.append(g.owner().allocator, .none); - try g.first_import.append(g.owner().allocator, .none); + gop.key_ptr.* = try dev_allocator.dupe(u8, key); + try g.first_dep.append(dev_allocator, .none); + try g.first_import.append(dev_allocator, .none); if (side == .client) - try g.source_maps.append(g.owner().allocator, .empty); + try g.source_maps.append(dev_allocator, .empty); } try g.ensureStaleBitCapacity(true); @@ -4230,6 +4313,7 @@ pub fn IncrementalGraph(side: bake.Side) type { // the error list as it changes while also supporting a REPL log.print(Output.errorWriter()) catch {}; const failure = try SerializedFailure.initFromLog( + dev, fail_owner, dev.relativePath(gop.key_ptr.*), log.msgs.items, @@ -5149,8 +5233,8 @@ const DirectoryWatchStore = struct { /// The file used source_file_path: []const u8, /// The specifier that failed. Before running re-build, it is resolved for, as - /// creating an unrelated file should not re-emit another error. Default-allocator - specifier: []const u8, + /// creating an unrelated file should not re-emit another error. Allocated memory + specifier: []u8, const Index = bun.GenericIndex(u32, Dep); }; @@ -5171,12 +5255,12 @@ const ChunkKind = enum { /// The HMR client in the browser is expected to sort the final list of errors /// for deterministic output; there is code in DevServer that uses `swapRemove`. pub const SerializedFailure = struct { - /// Serialized data is always owned by default_allocator + /// Serialized data is always owned by dev.allocator /// The first 32 bits of this slice contain the owner data: []u8, - pub fn deinit(f: SerializedFailure) void { - bun.default_allocator.free(f.data); + pub fn deinit(f: SerializedFailure, dev: *DevServer) void { + dev.allocator.free(f.data); } /// The metaphorical owner of an incremental file error. The packed variant @@ -5266,13 +5350,13 @@ pub const SerializedFailure = struct { js_aggregate, }; - pub fn initFromJs(owner: Owner, value: JSValue) !SerializedFailure { + pub fn initFromJs(dev: *DevServer, owner: Owner, value: JSValue) !SerializedFailure { { _ = value; @panic("TODO"); } // Avoid small re-allocations without requesting so much from the heap - var sfb = std.heap.stackFallback(65536, bun.default_allocator); + var sfb = std.heap.stackFallback(65536, dev.allocator); var payload = std.ArrayList(u8).initCapacity(sfb.get(), 65536) catch unreachable; // enough space const w = payload.writer(); @@ -5282,7 +5366,7 @@ pub const SerializedFailure = struct { // Avoid-recloning if it is was moved to the hap const data = if (payload.items.ptr == &sfb.buffer) - try bun.default_allocator.dupe(u8, payload.items) + try dev.allocator.dupe(u8, payload.items) else payload.items; @@ -5290,6 +5374,7 @@ pub const SerializedFailure = struct { } pub fn initFromLog( + dev: *DevServer, owner: Owner, // for .client and .server, these are meant to be relative file paths owner_display_name: []const u8, @@ -5298,7 +5383,7 @@ pub const SerializedFailure = struct { assert(messages.len > 0); // Avoid small re-allocations without requesting so much from the heap - var sfb = std.heap.stackFallback(65536, bun.default_allocator); + var sfb = std.heap.stackFallback(65536, dev.allocator); var payload = std.ArrayList(u8).initCapacity(sfb.get(), 65536) catch unreachable; // enough space const w = payload.writer(); @@ -5315,7 +5400,7 @@ pub const SerializedFailure = struct { // Avoid-recloning if it is was moved to the hap const data = if (payload.items.ptr == &sfb.buffer) - try bun.default_allocator.dupe(u8, payload.items) + try dev.allocator.dupe(u8, payload.items) else payload.items; @@ -5446,7 +5531,7 @@ fn emitVisualizerMessageIfNeeded(dev: *DevServer) void { if (!bun.FeatureFlags.bake_debugging_features) return; if (dev.emit_visualizer_events == 0) return; - var sfb = std.heap.stackFallback(65536, bun.default_allocator); + var sfb = std.heap.stackFallback(65536, dev.allocator); var payload = std.ArrayList(u8).initCapacity(sfb.get(), 65536) catch unreachable; // enough capacity on the stack defer payload.deinit(); @@ -5807,7 +5892,7 @@ const c = struct { pub fn startReloadBundle(dev: *DevServer, event: *HotReloadEvent) bun.OOM!void { defer event.files.clearRetainingCapacity(); - var sfb = std.heap.stackFallback(4096, bun.default_allocator); + var sfb = std.heap.stackFallback(4096, dev.allocator); const temp_alloc = sfb.get(); var entry_points: EntryPointList = EntryPointList.empty; defer entry_points.deinit(temp_alloc); @@ -5992,7 +6077,7 @@ pub const HotReloadEvent = struct { return; } - var sfb = std.heap.stackFallback(4096, bun.default_allocator); + var sfb = std.heap.stackFallback(4096, dev.allocator); const temp_alloc = sfb.get(); var entry_points: EntryPointList = EntryPointList.empty; defer entry_points.deinit(temp_alloc); @@ -6344,7 +6429,7 @@ fn dumpStateDueToCrash(dev: *DevServer) !void { try file.writeAll(start); try file.writeAll("\nlet inlinedData = Uint8Array.from(atob(\""); - var sfb = std.heap.stackFallback(4096, bun.default_allocator); + var sfb = std.heap.stackFallback(4096, dev.allocator); var payload = try std.ArrayList(u8).initCapacity(sfb.get(), 4096); defer payload.deinit(); try dev.writeVisualizerMessage(&payload); @@ -6675,9 +6760,13 @@ pub const SourceMapStore = struct { } }; + pub fn owner(store: *SourceMapStore) *DevServer { + return @alignCast(@fieldParentPtr("source_maps", store)); + } + /// If an *Entry is returned, caller must initialize it with the source map. pub fn putOrIncrementRefCount(store: *SourceMapStore, source_map_id: u64, ref_count: u32) !?*Entry { - const gop = try store.entries.getOrPut(bun.default_allocator, source_map_id); + const gop = try store.entries.getOrPut(store.owner().allocator, source_map_id); if (!gop.found_existing) { gop.value_ptr.* = .{ .ref_count = ref_count, @@ -6700,8 +6789,8 @@ pub const SourceMapStore = struct { source_contents: []const bun.StringPointer, bytes: []const u8, - pub fn deinit(self: *@This()) void { - self.mappings.deinit(bun.default_allocator); + pub fn deinit(self: *@This(), dev: *DevServer) void { + self.mappings.deinit(dev.allocator); // file paths and source contents are borrowed } }; @@ -6712,7 +6801,7 @@ pub const SourceMapStore = struct { const entry = &store.entries.values()[index]; switch (SourceMap.Mapping.parse( - bun.default_allocator, + store.owner().allocator, entry.mappings_data.slice(entry.response.blob.slice()), null, @intCast(entry.file_paths.len), @@ -6749,7 +6838,7 @@ pub const SourceMapStore = struct { // /// // /// Prefer `CowString` (maybe allocated or borrowed) or `[]const u8` (known lifetime) over this structure. // const RefString = struct { -// /// Allocated by `bun.default_allocator`, free with `.unref()` +// /// Allocated by `dev.allocator`, free with `.unref()` // data: []const u8, // pub fn deref(str: RefString, store: *Store) void { @@ -6758,7 +6847,7 @@ pub const SourceMapStore = struct { // const ref_count = &slice.items(.value)[index]; // if (ref_count.* == 1) { // store.strings.swapRemoveAt(index); -// bun.default_allocator.free(str.data); +// dev.allocator.free(str.data); // } else { // ref_count.* -= 1; // } @@ -6776,8 +6865,9 @@ pub const SourceMapStore = struct { // pub const empty: Store = .{ .strings = .empty }; +// /// `data` must be owned by `dev.allocator` // pub fn register(store: *Store, data: []u8) !RefString { -// const gop = try store.strings.getOrPut(bun.default_allocator, data.ptr); +// const gop = try store.strings.getOrPut(dev.allocator, data.ptr); // if (gop.found_existing) { // gop.value_ptr.* += 1; // } else { @@ -6839,6 +6929,10 @@ const ErrorReportRequest = struct { } fn runWithBody(ctx: *ErrorReportRequest, body: []const u8, r: AnyResponse) !void { + // .finalize has to be called last, but only in the non-error path. + var should_finalize_self = false; + defer if (should_finalize_self) ctx.finalize(); + var s = std.io.fixedBufferStream(body); const reader = s.reader(); @@ -6890,7 +6984,7 @@ const ErrorReportRequest = struct { var parsed_source_maps: AutoArrayHashMapUnmanaged(u64, ?SourceMapStore.GetResult) = .empty; try parsed_source_maps.ensureTotalCapacity(temp_alloc, 4); defer for (parsed_source_maps.values()) |*value| { - if (value.*) |*v| v.deinit(); + if (value.*) |*v| v.deinit(ctx.dev); }; var runtime_lines: ?[5][]const u8 = null; var first_line_of_interest: usize = 0; @@ -7011,7 +7105,7 @@ const ErrorReportRequest = struct { ) catch {}, } - var out: std.ArrayList(u8) = .init(bun.default_allocator); + var out: std.ArrayList(u8) = .init(ctx.dev.allocator); errdefer out.deinit(); const w = out.writer(); @@ -7064,7 +7158,7 @@ const ErrorReportRequest = struct { .mime_type = &.other, .server = ctx.dev.server.?, }); - ctx.finalize(); + should_finalize_self = true; } fn parseId(source_url: []const u8, browser_url: []const u8) ?u64 { @@ -7161,6 +7255,27 @@ fn readString32(reader: anytype, alloc: Allocator) ![]const u8 { return memory; } +/// Make it so in a debug build, pressing Ctrl+C to close will forcefully call +/// deinit, running all finalization assertions, before exiting the application. +/// +/// This isn't perfect because there could be pending requests, but this +/// trade-off is done for simplicity. +const EnsureAllMemoryFreed = if (experiment_with_memory_assertions) struct { + var entries: AutoArrayHashMapUnmanaged(*DevServer, void) = .empty; + var mutex: bun.Mutex = .{}; + fn check() callconv(.C) void { + var copy = brk: { + mutex.lock(); + defer mutex.unlock(); + const copy = entries; + entries = .empty; + break :brk copy; + }; + defer copy.deinit(bun.default_allocator); + for (copy.keys()) |dev| dev.deinit(); + } +}; + /// userland implementation of https://github.com/ziglang/zig/issues/21879 fn VoidFieldTypes(comptime T: type) type { const fields = @typeInfo(T).@"struct".fields; @@ -7177,6 +7292,10 @@ fn VoidFieldTypes(comptime T: type) type { } }); } +fn voidFieldTypeDiscardHelper(data: anytype) void { + _ = data; +} + const std = @import("std"); const Allocator = std.mem.Allocator; const Mutex = bun.Mutex; diff --git a/src/bake/bake.zig b/src/bake/bake.zig index 0efe7338dc..e1c77960af 100644 --- a/src/bake/bake.zig +++ b/src/bake/bake.zig @@ -93,14 +93,12 @@ pub const StringRefList = struct { pub const SplitBundlerOptions = struct { plugin: ?*Plugin = null, - all: BuildConfigSubset = .{}, client: BuildConfigSubset = .{}, server: BuildConfigSubset = .{}, ssr: BuildConfigSubset = .{}, pub const empty: SplitBundlerOptions = .{ .plugin = null, - .all = .{}, .client = .{}, .server = .{}, .ssr = .{}, @@ -156,12 +154,7 @@ const BuildConfigSubset = struct { drop: bun.StringArrayHashMapUnmanaged(void) = .{}, env: bun.Schema.Api.DotEnvBehavior = ._none, env_prefix: ?[]const u8 = null, - - pub fn loadFromJs(config: *BuildConfigSubset, value: JSValue, arena: Allocator) !void { - _ = config; // autofix - _ = value; // autofix - _ = arena; // autofix - } + define: bun.Schema.Api.StringMap = .{ .keys = &.{}, .values = &.{} }, }; /// A "Framework" in our eyes is simply set of bundler options that a framework @@ -594,7 +587,7 @@ pub const Framework = struct { pub fn initTranspiler( framework: *Framework, - allocator: std.mem.Allocator, + arena: std.mem.Allocator, log: *bun.logger.Log, mode: Mode, renderer: Graph, @@ -602,7 +595,7 @@ pub const Framework = struct { bundler_options: *const BuildConfigSubset, ) !void { out.* = try bun.Transpiler.init( - allocator, // TODO: this is likely a memory leak + arena, log, std.mem.zeroes(bun.Schema.Api.TransformOptions), null, @@ -634,7 +627,7 @@ pub const Framework = struct { out.options.react_fast_refresh = mode == .development and renderer == .client and framework.react_fast_refresh != null; out.options.server_components = framework.server_components != null; - out.options.conditions = try bun.options.ESMConditions.init(allocator, out.options.target.defaultConditions()); + out.options.conditions = try bun.options.ESMConditions.init(arena, out.options.target.defaultConditions()); if (renderer == .server and framework.server_components != null) { try out.options.conditions.appendSlice(&.{"react-server"}); } @@ -642,6 +635,9 @@ pub const Framework = struct { // Support `esm-env` package using this condition. try out.options.conditions.appendSlice(&.{"development"}); } + if (bundler_options.conditions.count() > 0) { + try out.options.conditions.appendSlice(bundler_options.conditions.keys()); + } out.options.production = mode != .development; out.options.tree_shaking = mode != .development; @@ -650,10 +646,13 @@ pub const Framework = struct { out.options.minify_whitespace = mode != .development; out.options.css_chunking = true; out.options.framework = framework; + if (bundler_options.ignoreDCEAnnotations) |ignore| + out.options.ignore_dce_annotations = ignore; out.options.source_map = switch (mode) { - // Source maps must always be linked, as DevServer special cases the - // linking and part of the generation of these. + // Source maps must always be external, as DevServer special cases + // the linking and part of the generation of these. It also relies + // on source maps always being enabled. .development => .external, // TODO: follow user configuration else => .none, @@ -669,11 +668,25 @@ pub const Framework = struct { out.options.jsx.development = mode == .development; - try addImportMetaDefines(allocator, out.options.define, mode, switch (renderer) { + try addImportMetaDefines(arena, out.options.define, mode, switch (renderer) { .client => .client, .server, .ssr => .server, }); + if ((bundler_options.define.keys.len + bundler_options.drop.count()) > 0) { + for (bundler_options.define.keys, bundler_options.define.values) |k, v| { + const parsed = try bun.options.Define.Data.parse(k, v, false, false, log, arena); + try out.options.define.insert(arena, k, parsed); + } + + for (bundler_options.drop.keys()) |drop_item| { + if (drop_item.len > 0) { + const parsed = try bun.options.Define.Data.parse(drop_item, "", true, true, log, arena); + try out.options.define.insert(arena, drop_item, parsed); + } + } + } + if (mode != .development) { // Hide information about the source repository, at the cost of debugging quality. out.options.entry_naming = "_bun/[hash].[ext]"; diff --git a/src/bun.js/api/server.zig b/src/bun.js/api/server.zig index c4769f95c6..2ccd00366e 100644 --- a/src/bun.js/api/server.zig +++ b/src/bun.js/api/server.zig @@ -1472,20 +1472,29 @@ pub const ServerConfig = struct { .framework = framework, .bundler_options = bun.bake.SplitBundlerOptions.empty, }; + const bake = &args.bake.?; - switch (vm.transpiler.options.transform_options.serve_env_behavior) { + const o = vm.transpiler.options.transform_options; + + switch (o.serve_env_behavior) { .prefix => { - args.bake.?.bundler_options.client.env_prefix = vm.transpiler.options.transform_options.serve_env_prefix; - args.bake.?.bundler_options.client.env = .prefix; + bake.bundler_options.client.env_prefix = vm.transpiler.options.transform_options.serve_env_prefix; + bake.bundler_options.client.env = .prefix; }, .load_all => { - args.bake.?.bundler_options.client.env = .load_all; + bake.bundler_options.client.env = .load_all; }, .disable => { - args.bake.?.bundler_options.client.env = .disable; + bake.bundler_options.client.env = .disable; }, else => {}, } + + if (o.serve_define) |define| { + bake.bundler_options.client.define = define; + bake.bundler_options.server.define = define; + bake.bundler_options.ssr.define = define; + } } else { if (init_ctx.framework_router_list.items.len > 0) { return global.throwInvalidArguments("FrameworkRouter is currently only supported when `development: true`", .{}); diff --git a/src/bun.js/api/server/HTMLBundle.zig b/src/bun.js/api/server/HTMLBundle.zig index d34af3dc4d..ed84c4cc10 100644 --- a/src/bun.js/api/server/HTMLBundle.zig +++ b/src/bun.js/api/server/HTMLBundle.zig @@ -261,6 +261,17 @@ pub const Route = struct { config.minify.syntax = true; } + if (bun.CLI.Command.get().args.serve_define) |define| { + bun.assert(define.keys.len == define.values.len); + try config.define.map.ensureUnusedCapacity(define.keys.len); + config.define.map.unmanaged.entries.len = define.keys.len; + @memcpy(config.define.map.keys(), define.keys); + for (config.define.map.values(), define.values) |*to, from| { + to.* = config.define.map.allocator.dupe(u8, from) catch bun.outOfMemory(); + } + try config.define.map.reIndex(); + } + if (!is_development) { config.define.put("process.env.NODE_ENV", "\"production\"") catch bun.outOfMemory(); config.jsx.development = false; diff --git a/src/bun.js/webcore/blob.zig b/src/bun.js/webcore/blob.zig index 30ea6376af..86c2e77945 100644 --- a/src/bun.js/webcore/blob.zig +++ b/src/bun.js/webcore/blob.zig @@ -6091,15 +6091,11 @@ pub const AnyBlob = union(enum) { // }, .InternalBlob => { self.InternalBlob.bytes.clearAndFree(); - self.* = .{ - .Blob = .{}, - }; + self.* = .{ .Blob = .{} }; }, .WTFStringImpl => { self.WTFStringImpl.deref(); - self.* = .{ - .Blob = .{}, - }; + self.* = .{ .Blob = .{} }; }, }; } diff --git a/src/bun.zig b/src/bun.zig index 17db249c71..62fae99f6b 100644 --- a/src/bun.zig +++ b/src/bun.zig @@ -753,7 +753,8 @@ pub fn isHeapMemory(memory: anytype) bool { return false; } -pub const Mimalloc = @import("./allocators/mimalloc.zig"); +pub const Mimalloc = @import("allocators/mimalloc.zig"); +pub const AllocationScope = @import("allocators/AllocationScope.zig"); pub const isSliceInBuffer = allocators.isSliceInBuffer; pub const isSliceInBufferT = allocators.isSliceInBufferT; @@ -4271,6 +4272,9 @@ pub fn CowSlice(T: type) type { /// `data` is transferred into the returned string, and must be freed with /// `.deinit()` when the string and its borrows are done being used. pub fn initOwned(data: []const T, allocator: Allocator) @This() { + if (AllocationScope.downcast(allocator)) |scope| + scope.assertOwned(data); + return .{ .ptr = data.ptr, .flags = .{ @@ -4360,5 +4364,3 @@ pub fn CowSlice(T: type) type { const Allocator = std.mem.Allocator; pub const server = @import("./bun.js/api/server.zig"); - -pub const AllocationScope = @import("AllocationScope.zig"); diff --git a/src/bundler/bundle_v2.zig b/src/bundler/bundle_v2.zig index 9656e3b43e..7340f14ead 100644 --- a/src/bundler/bundle_v2.zig +++ b/src/bundler/bundle_v2.zig @@ -2699,7 +2699,7 @@ pub const BundleV2 = struct { }; } - // Then all HTML files (TODO: what happens when JS imports HTML. probably ban that?) + // Then all HTML files for (html_files.keys(), chunks[1 + start.css_entry_points.count() ..]) |source_index, *chunk| { chunk.* = .{ .entry_point = .{ @@ -6232,8 +6232,8 @@ pub const LinkerContext = struct { // was generated. This will be preserved so that remapping // stack traces can show the source code, even after incremental // rebuilds occur. - const allocator = if (worker.ctx.transpiler.options.dev_server != null) - bun.default_allocator + const allocator = if (worker.ctx.transpiler.options.dev_server) |dev| + dev.allocator else worker.allocator; @@ -10270,13 +10270,16 @@ pub const LinkerContext = struct { // Client bundles for Bake must be globally allocated, // as it must outlive the bundle task. - const use_global_allocator = c.dev_server != null and - c.parse_graph.ast.items(.target)[part_range.source_index.get()].bakeGraph() == .client; + const allocator = if (c.dev_server) |dev| + if (c.parse_graph.ast.items(.target)[part_range.source_index.get()].bakeGraph() == .client) + dev.allocator + else + default_allocator + else + default_allocator; var arena = &worker.temporary_arena; - var buffer_writer = js_printer.BufferWriter.init( - if (use_global_allocator) default_allocator else worker.allocator, - ) catch bun.outOfMemory(); + var buffer_writer = js_printer.BufferWriter.init(allocator) catch bun.outOfMemory(); defer _ = arena.reset(.retain_capacity); worker.stmt_list.reset(); @@ -10713,9 +10716,9 @@ pub const LinkerContext = struct { } }; - // HTML bundles for Bake must be globally allocated, as it must outlive + // HTML bundles for dev server must be allocated to it, as it must outlive // the bundle task. See `DevServer.RouteBundle.HTML.bundled_html_text` - const output_allocator = if (c.dev_server != null) bun.default_allocator else worker.allocator; + const output_allocator = if (c.dev_server) |dev| dev.allocator else worker.allocator; var html_loader: HTMLLoader = .{ .linker = c, diff --git a/src/bunfig.zig b/src/bunfig.zig index 211ea969ff..b7d62bc340 100644 --- a/src/bunfig.zig +++ b/src/bunfig.zig @@ -687,6 +687,30 @@ pub const Bunfig = struct { try this.addError(minify.loc, "Expected minify to be boolean or object"); } } + + if (serve_obj.get("define")) |expr| { + try this.expect(expr, .e_object); + var valid_count: usize = 0; + const properties = expr.data.e_object.properties.slice(); + for (properties) |prop| { + if (prop.value.?.data != .e_string) continue; + valid_count += 1; + } + var buffer = allocator.alloc([]const u8, valid_count * 2) catch unreachable; + var keys = buffer[0..valid_count]; + var values = buffer[valid_count..]; + var i: usize = 0; + for (properties) |prop| { + if (prop.value.?.data != .e_string) continue; + keys[i] = prop.key.?.data.e_string.string(allocator) catch unreachable; + values[i] = prop.value.?.data.e_string.string(allocator) catch unreachable; + i += 1; + } + this.bunfig.serve_define = Api.StringMap{ + .keys = keys, + .values = values, + }; + } this.bunfig.bunfig_path = bun.default_allocator.dupe(u8, this.source.path.text) catch bun.outOfMemory(); if (serve_obj.get("publicPath")) |public_path| { diff --git a/src/crash_handler.zig b/src/crash_handler.zig index f8aef7da00..a364954f8e 100644 --- a/src/crash_handler.zig +++ b/src/crash_handler.zig @@ -1453,7 +1453,7 @@ fn report(url: []const u8) void { }, } }, - else => @compileError("NOT IMPLEMENTED"), + else => @compileError("Not implemented"), } } diff --git a/src/defines.zig b/src/defines.zig index f61508f305..a28ce3eedc 100644 --- a/src/defines.zig +++ b/src/defines.zig @@ -85,6 +85,24 @@ pub const DefineData = struct { } pub fn fromMergeableInputEntry(user_defines: *UserDefines, key: []const u8, value_str: []const u8, value_is_undefined: bool, method_call_must_be_replaced_with_undefined: bool, log: *logger.Log, allocator: std.mem.Allocator) !void { + user_defines.putAssumeCapacity(key, try .parse( + key, + value_str, + value_is_undefined, + method_call_must_be_replaced_with_undefined, + log, + allocator, + )); + } + + pub fn parse( + key: []const u8, + value_str: []const u8, + value_is_undefined: bool, + method_call_must_be_replaced_with_undefined: bool, + log: *logger.Log, + allocator: std.mem.Allocator, + ) !DefineData { var keySplitter = std.mem.splitScalar(u8, key, '.'); while (keySplitter.next()) |part| { if (!js_lexer.isIdentifier(part)) { @@ -119,17 +137,13 @@ pub const DefineData = struct { .can_be_removed_if_unused = true, } }; - user_defines.putAssumeCapacity( - key, - DefineData{ - .value = value, - .original_name = value_str, - .can_be_removed_if_unused = true, - .valueless = value_is_undefined, - .method_call_must_be_replaced_with_undefined = method_call_must_be_replaced_with_undefined, - }, - ); - return; + return .{ + .value = value, + .original_name = value_str, + .can_be_removed_if_unused = true, + .valueless = value_is_undefined, + .method_call_must_be_replaced_with_undefined = method_call_must_be_replaced_with_undefined, + }; } const _log = log; var source = logger.Source{ @@ -138,12 +152,12 @@ pub const DefineData = struct { }; const expr = try json_parser.parseEnvJSON(&source, _log, allocator); const cloned = try expr.data.deepClone(allocator); - user_defines.putAssumeCapacity(key, DefineData{ + return .{ .value = cloned, .can_be_removed_if_unused = expr.isPrimitiveLiteral(), .valueless = value_is_undefined, .method_call_must_be_replaced_with_undefined = method_call_must_be_replaced_with_undefined, - }); + }; } pub fn fromInput(defines: RawDefines, drop: []const []const u8, log: *logger.Log, allocator: std.mem.Allocator) !UserDefines { diff --git a/src/resolver/resolver.zig b/src/resolver/resolver.zig index 81d3921a87..09a277cf08 100644 --- a/src/resolver/resolver.zig +++ b/src/resolver/resolver.zig @@ -1628,7 +1628,7 @@ pub const Resolver = struct { /// bust both the named file and a parent directory, because `./hello` can resolve /// to `./hello.js` or `./hello/index.js` - pub fn bustDirCacheFromSpecifier(r: *ThisResolver, import_source: []const u8, specifier: []const u8) bool { + pub fn bustDirCacheFromSpecifier(r: *ThisResolver, import_source_file: []const u8, specifier: []const u8) bool { if (std.fs.path.isAbsolute(specifier)) { const dir = bun.path.dirname(specifier, .auto); const a = r.bustDirCache(dir); @@ -1638,9 +1638,10 @@ pub const Resolver = struct { if (!(bun.strings.startsWith(specifier, "./") or bun.strings.startsWith(specifier, "../"))) return false; - if (!std.fs.path.isAbsolute(import_source)) return false; + if (!std.fs.path.isAbsolute(import_source_file)) + return false; - const joined = bun.path.joinAbs(import_source, .auto, specifier); + const joined = bun.path.joinAbs(bun.path.dirname(import_source_file, .auto), .auto, specifier); const dir = bun.path.dirname(joined, .auto); const a = r.bustDirCache(dir); diff --git a/src/sourcemap/sourcemap.zig b/src/sourcemap/sourcemap.zig index 463454dacf..20d9ba5ad9 100644 --- a/src/sourcemap/sourcemap.zig +++ b/src/sourcemap/sourcemap.zig @@ -583,15 +583,13 @@ pub const Mapping = struct { .generated = generated, .original = original, .source_index = source_index, - }) catch unreachable; + }) catch bun.outOfMemory(); } - return ParseResult{ - .success = .{ - .mappings = mapping, - .input_line_count = input_line_count, - }, - }; + return .{ .success = .{ + .mappings = mapping, + .input_line_count = input_line_count, + } }; } }; diff --git a/src/transpiler.zig b/src/transpiler.zig index a5e20db282..1e8492a2c2 100644 --- a/src/transpiler.zig +++ b/src/transpiler.zig @@ -110,8 +110,6 @@ pub const ParseResult = struct { } }; -const cache_files = false; - pub const PluginRunner = struct { global_object: *JSC.JSGlobalObject, allocator: std.mem.Allocator, @@ -360,7 +358,7 @@ pub const Transpiler = struct { macro_context: ?js_ast.Macro.MacroContext = null, - pub const isCacheEnabled = cache_files; + pub const isCacheEnabled = false; pub inline fn getPackageManager(this: *Transpiler) *PackageManager { return this.resolver.getPackageManager(); @@ -372,6 +370,7 @@ pub const Transpiler = struct { this.resolver.log = log; } + // TODO: remove this method. it does not make sense pub fn setAllocator(this: *Transpiler, allocator: std.mem.Allocator) void { this.allocator = allocator; this.linker.allocator = allocator; diff --git a/test/bake/dev-server-harness.ts b/test/bake/bake-harness.ts similarity index 91% rename from test/bake/dev-server-harness.ts rename to test/bake/bake-harness.ts index 53c3ebb79c..517cfbc22f 100644 --- a/test/bake/dev-server-harness.ts +++ b/test/bake/bake-harness.ts @@ -149,36 +149,37 @@ export function emptyHtmlFile({ `; } -export type DevServerTest = ( - | { - /** Starting files */ - files: FileObject; - /** - * Framework to use. Consider `minimalFramework` if possible. - * Provide this object or `files['bun.app.ts']` for a dynamic one. - */ - framework?: Bake.Framework | "react"; - /** - * Source code for a TSX file that `export default`s an array of BunPlugin, - * combined with the `framework` option. - */ - pluginFile?: string; - } - | { - /** - * Copy all files from test/bake/fixtures/ - * This directory must contain `bun.app.ts` or `index.html` to allow hacking on fixtures manually via `bun run .` - */ - fixture: string; - } -) & { +export interface DevServerTest { + /** Execute the test */ test: (dev: Dev) => Promise; + /** Starting files */ + files?: FileObject; + /** + * Framework to use. Consider `minimalFramework` if possible. + * Provide this object or `files['bun.app.ts']` for a dynamic one. + */ + framework?: Bake.Framework | "react"; + /** + * Source code for a TSX file that `export default`s an array of BunPlugin, + * combined with the `framework` option. + */ + pluginFile?: string; + /** + * Copy all files from test/bake/fixtures/ + * This directory must contain `bun.app.ts` or `index.html` to allow hacking on fixtures manually via `bun run .` + */ + fixture?: string; /** * Multiply the timeout by this number. */ timeoutMultiplier?: number; -}; + /** + * Directory to write the bootstrap files into. + * Avoid if possible, this is to reproduce specific bugs. + */ + mainDir?: string; +} let interactive = false; let activeClient: Client | null = null; @@ -227,6 +228,7 @@ export class Dev { panicked = false; connectedClients: Set = new Set(); options: { files: Record }; + nodeEnv: "development" | "production"; // These properties are not owned by this class devProcess: Subprocess<"pipe", "pipe", "pipe">; @@ -237,6 +239,7 @@ export class Dev { port: number, process: Subprocess<"pipe", "pipe", "pipe">, stream: OutputLineStream, + nodeEnv: "development" | "production", options: DevServerTest, ) { this.rootDir = realpathSync(root); @@ -248,6 +251,7 @@ export class Dev { this.output.on("panic", () => { this.panicked = true; }); + this.nodeEnv = nodeEnv; } fetch(url: string, init?: RequestInit) { @@ -271,7 +275,8 @@ export class Dev { const snapshot = snapshotCallerLocation(); return withAnnotatedStack(snapshot, async () => { await maybeWaitInteractive("write " + file); - const wait = this.waitForHotReload(); + const isDev = this.nodeEnv === "development"; + const wait = isDev && this.waitForHotReload(); await Bun.write( this.join(file), ((typeof contents === "string" && options.dedent) ?? true) ? dedent(contents) : contents, @@ -279,7 +284,7 @@ export class Dev { await wait; let errors = options.errors; - if (errors !== null) { + if (isDev && errors !== null) { errors ??= []; for (const client of this.connectedClients) { await client.expectErrorOverlay(errors, null); @@ -337,6 +342,7 @@ export class Dev { } async waitForHotReload() { + if (this.nodeEnv !== "development") return Promise.resolve(); const err = this.output.waitForLine(/error/i); const success = this.output.waitForLine(/bundled page|bundled route|reloaded/i); await Promise.race([ @@ -361,14 +367,17 @@ export class Dev { await maybeWaitInteractive("open client " + url); const client = new Client(new URL(url, this.baseUrl).href, { storeHotChunks: options.storeHotChunks, + hmr: this.nodeEnv === "development", }); - try { - await client.output.waitForLine(hmrClientInitRegex); - } catch (e) { - client[Symbol.asyncDispose](); - throw e; + if (this.nodeEnv === "development") { + try { + await client.output.waitForLine(hmrClientInitRegex); + } catch (e) { + client[Symbol.asyncDispose](); + throw e; + } + await client.expectErrorOverlay(options.errors ?? []); } - await client.expectErrorOverlay(options.errors ?? []); this.connectedClients.add(client); client.on("exit", () => { this.connectedClients.delete(client); @@ -377,12 +386,6 @@ export class Dev { } } -export interface Step { - run: StepFn; - caller: string; - name?: string; -} - class DevFetchPromise extends Promise { dev: Dev; constructor( @@ -540,8 +543,9 @@ export class Client extends EventEmitter { #hmrChunk: string | null = null; suppressInteractivePrompt: boolean = false; expectingReload = false; + hmr = false; - constructor(url: string, options: { storeHotChunks?: boolean } = {}) { + constructor(url: string, options: { storeHotChunks?: boolean; hmr: boolean }) { super(); activeClient = this; const proc = Bun.spawn({ @@ -583,6 +587,7 @@ export class Client extends EventEmitter { this.#hmrChunk = chunk; }); this.#proc = proc; + this.hmr = options.hmr; // @ts-expect-error this.output = new OutputLineStream("web", proc.stdout, proc.stderr); } @@ -1006,7 +1011,7 @@ function snapshotCallerLocation(): string { let i = 1; for (; i < lines.length; i++) { const line = lines[i].replaceAll("\\", "/"); - if (line.includes(import.meta.dir.replaceAll("\\", "/")) && !line.includes("dev-server-harness.ts")) { + if (line.includes(import.meta.dir.replaceAll("\\", "/")) && !line.includes(import.meta.file)) { return line; } } @@ -1090,6 +1095,7 @@ function cleanTestDir(dir: string) { } const devTestRoot = path.join(import.meta.dir, "dev").replaceAll("\\", "/"); +const prodTestRoot = path.join(import.meta.dir, "dev").replaceAll("\\", "/"); const counts: Record = {}; console.log("Dev server testing directory:", tempDir); @@ -1253,14 +1259,16 @@ export function indexHtmlScript(htmlFiles: string[]) { ].join("\n"); } -export function devTest(description: string, options: T): T { +function testImpl( + description: string, + options: T, + NODE_ENV: "development" | "production", + caller: string, +): T { if (interactive) return options; - // Capture the caller name as part of the test tempdir - const callerLocation = snapshotCallerLocation(); - const caller = stackTraceFileName(callerLocation); const jest = (Bun as any).jest(caller); - assert(caller.startsWith(devTestRoot), "dev server tests must be in test/bake/dev, not " + caller); + const basename = path.basename(caller, ".test" + path.extname(caller)); const count = (counts[basename] = (counts[basename] ?? 0) + 1); @@ -1270,8 +1278,11 @@ export function devTest(description: string, options: T // Clean the test directory if it exists cleanTestDir(root); - if ("files" in options) { - const htmlFiles = Object.keys(options.files).filter(file => file.endsWith(".html")); + const mainDir = path.resolve(root, options.mainDir ?? "."); + if (options.files) { + const htmlFiles = Object.keys(options.files) + .filter(file => file.endsWith(".html")) + .map(x => path.join(root, x)); await writeAll(root, options.files); if (options.files["bun.app.ts"] == undefined && htmlFiles.length === 0) { if (!options.framework) { @@ -1296,7 +1307,10 @@ export function devTest(description: string, options: T if (options.files["bun.app.ts"]) { throw new Error("Cannot provide both bun.app.ts and index.html"); } - fs.writeFileSync(path.join(root, "bun.app.ts"), indexHtmlScript(htmlFiles)); + await Bun.write( + path.join(mainDir, "bun.app.ts"), + indexHtmlScript(htmlFiles.map(file => path.relative(mainDir, file))), + ); } } else { if (!options.fixture) { @@ -1305,11 +1319,11 @@ export function devTest(description: string, options: T const fixture = path.join(devTestRoot, "../fixtures", options.fixture); fs.cpSync(fixture, root, { recursive: true }); - if (!fs.existsSync(path.join(root, "bun.app.ts"))) { - if (!fs.existsSync(path.join(root, "index.html"))) { + if (!fs.existsSync(path.join(mainDir, "bun.app.ts"))) { + if (!fs.existsSync(path.join(mainDir, "index.html"))) { throw new Error(`Fixture ${fixture} must contain a bun.app.ts or index.html file.`); } else { - fs.writeFileSync(path.join(root, "bun.app.ts"), indexHtmlScript(["index.html"])); + await Bun.write(path.join(root, "bun.app.ts"), indexHtmlScript(["index.html"])); } } if (!fs.existsSync(path.join(root, "node_modules"))) { @@ -1330,7 +1344,7 @@ export function devTest(description: string, options: T fs.writeFileSync( path.join(root, "harness_start.ts"), dedent` - import appConfig from "./bun.app.ts"; + import appConfig from "${path.join(mainDir, "bun.app.ts")}"; export default { ...appConfig, port: ${interactive ? 3000 : 0}, @@ -1338,6 +1352,14 @@ export function devTest(description: string, options: T `, ); + using _ = { + [Symbol.dispose]: () => { + for (const proc of danglingProcesses) { + proc.kill("SIGKILL"); + } + }, + }; + await using devProcess = Bun.spawn({ cwd: root, cmd: [process.execPath, "./harness_start.ts"], @@ -1347,6 +1369,7 @@ export function devTest(description: string, options: T FORCE_COLOR: "1", BUN_DEV_SERVER_TEST_RUNNER: "1", BUN_DUMP_STATE_ON_CRASH: "1", + NODE_ENV, }, ]), stdio: ["pipe", "pipe", "pipe"], @@ -1362,7 +1385,7 @@ export function devTest(description: string, options: T using stream = new OutputLineStream("dev", devProcess.stdout, devProcess.stderr); const port = parseInt((await stream.waitForLine(/localhost:(\d+)/))[1], 10); // @ts-expect-error - const dev = new Dev(root, port, devProcess, stream, options); + const dev = new Dev(root, port, devProcess, stream, NODE_ENV, options); await maybeWaitInteractive("start"); @@ -1390,7 +1413,15 @@ export function devTest(description: string, options: T } } - const name = `DevServer > ${basename}-${count}: ${description}`; + const name = `${ + NODE_ENV === "development" // + ? Bun.enableANSIColors + ? "\x1b[35mDEV\x1b[0m" + : "DEV" + : Bun.enableANSIColors + ? "\x1b[36mPROD\x1b[0m" + : "PROD" + }:${basename}-${count}: ${description}`; try { // TODO: resolve ci flakiness. if (isCI && isWindows) { @@ -1441,3 +1472,33 @@ process.on("exit", () => { proc.kill("SIGKILL"); } }); + +export function devTest(description: string, options: T): T { + // Capture the caller name as part of the test tempdir + const callerLocation = snapshotCallerLocation(); + const caller = stackTraceFileName(callerLocation); + assert(caller.startsWith(devTestRoot), "dev server tests must be in test/bake/dev, not " + caller); + + return testImpl(description, options, "development", caller); +} + +export function prodTest(description: string, options: T): T { + const callerLocation = snapshotCallerLocation(); + const caller = stackTraceFileName(callerLocation); + assert(caller.startsWith(prodTestRoot), "dev server tests must be in test/bake/prod, not " + caller); + + return testImpl(description, options, "production", caller); +} + +export function devAndProductionTest(description: string, options: DevServerTest) { + const callerLocation = snapshotCallerLocation(); + const caller = stackTraceFileName(callerLocation); + assert( + caller.includes("dev-and-prod"), + 'dev+prod tests should be in "test/bake/dev-and-prod.test.ts", not ' + caller, + ); + + testImpl(description, options, "development", caller); + testImpl(description, options, "production", caller); + return options; +} diff --git a/test/bake/dev-and-prod.test.ts b/test/bake/dev-and-prod.test.ts new file mode 100644 index 0000000000..3d0ba11261 --- /dev/null +++ b/test/bake/dev-and-prod.test.ts @@ -0,0 +1,24 @@ +// Tests which apply to both dev and prod. They are run twice. +import { devAndProductionTest, emptyHtmlFile } from "./bake-harness"; + +devAndProductionTest("define config via bunfig.toml", { + files: { + "index.html": emptyHtmlFile({ + styles: [], + scripts: ["index.ts"], + }), + "index.ts": ` + console.log("a=" + DEFINE); + `, + "bunfig.toml": ` + [serve.static] + define = { + "DEFINE" = "\\"HELLO\\"" + } + `, + }, + async test(dev) { + const c = await dev.client("/"); + await c.expectMessage("a=HELLO"); + }, +}); diff --git a/test/bake/dev/bundle.test.ts b/test/bake/dev/bundle.test.ts index 9c88e536ce..19975c523e 100644 --- a/test/bake/dev/bundle.test.ts +++ b/test/bake/dev/bundle.test.ts @@ -1,6 +1,6 @@ // Bundle tests are tests concerning bundling bugs that only occur in DevServer. import { expect } from "bun:test"; -import { devTest, emptyHtmlFile, minimalFramework, reactAndRefreshStub, reactRefreshStub } from "../dev-server-harness"; +import { devTest, emptyHtmlFile, minimalFramework, reactAndRefreshStub, reactRefreshStub } from "../bake-harness"; devTest("import identifier doesnt get renamed", { framework: minimalFramework, @@ -389,3 +389,36 @@ devTest("default export same-scope handling", { expect(chunk).toMatch(/default:\s*function/); }, }); +devTest("directory cache bust case #17576", { + files: { + ...reactRefreshStub, + "web/index.html": emptyHtmlFile({ + styles: [], + scripts: ["index.ts", "react-refresh/runtime"], + }), + "web/index.ts": ` + console.log(123); + `, + }, + mainDir: "server", + async test(dev) { + await using c = await dev.client("/"); + await c.expectMessage(123); + await c.expectNoWebSocketActivity(async () => { + await dev.write( + "web/Test.ts", + ` + export const abc = 456; + `, + ); + }); + await dev.write( + "web/index.ts", + ` + import { abc } from "./Test.ts"; + console.log(abc); + `, + ); + await c.expectMessage(456); + }, +}); diff --git a/test/bake/dev/css.test.ts b/test/bake/dev/css.test.ts index fbd1c4f9e7..35cc0d22a2 100644 --- a/test/bake/dev/css.test.ts +++ b/test/bake/dev/css.test.ts @@ -1,6 +1,6 @@ // CSS tests concern bundling bugs with CSS files import { expect } from "bun:test"; -import { devTest, emptyHtmlFile, imageFixtures, reactRefreshStub } from "../dev-server-harness"; +import { devTest, emptyHtmlFile, imageFixtures, reactRefreshStub } from "../bake-harness"; import assert from "node:assert"; devTest("css file with syntax error does not kill old styles", { diff --git a/test/bake/dev/dev-plugins.test.ts b/test/bake/dev/dev-plugins.test.ts index 77261451b0..318771bd0a 100644 --- a/test/bake/dev/dev-plugins.test.ts +++ b/test/bake/dev/dev-plugins.test.ts @@ -1,5 +1,5 @@ // Plugin tests concern plugins in development mode. -import { devTest, emptyHtmlFile, minimalFramework } from "../dev-server-harness"; +import { devTest, emptyHtmlFile, minimalFramework } from "../bake-harness"; // Note: more in depth testing of plugins is done in test/bundler/bundler_plugin.test.ts devTest("onResolve", { diff --git a/test/bake/dev/ecosystem.test.ts b/test/bake/dev/ecosystem.test.ts index 72fcb0dc80..d32a9d7090 100644 --- a/test/bake/dev/ecosystem.test.ts +++ b/test/bake/dev/ecosystem.test.ts @@ -3,7 +3,7 @@ // discovered, but it easy and still a reasonable idea to just test the library // entirely. import { expect } from "bun:test"; -import { devTest } from "../dev-server-harness"; +import { devTest } from "../bake-harness"; // Bugs discovered thanks to Svelte: // - Circular import situations diff --git a/test/bake/dev/esm.test.ts b/test/bake/dev/esm.test.ts index 7e09dc7a5d..bfa9a93d1a 100644 --- a/test/bake/dev/esm.test.ts +++ b/test/bake/dev/esm.test.ts @@ -1,5 +1,5 @@ // ESM tests are about various esm features in development mode. -import { devTest, minimalFramework } from "../dev-server-harness"; +import { devTest, minimalFramework } from "../bake-harness"; const liveBindingTest = devTest("live bindings with `var`", { framework: minimalFramework, diff --git a/test/bake/dev/html.test.ts b/test/bake/dev/html.test.ts index c46b66a645..fc35ed48fc 100644 --- a/test/bake/dev/html.test.ts +++ b/test/bake/dev/html.test.ts @@ -1,5 +1,5 @@ // Bundle tests are tests concerning bundling bugs that only occur in DevServer. -import { devTest } from "../dev-server-harness"; +import { devTest } from "../bake-harness"; devTest("html file is watched", { files: { diff --git a/test/bake/dev/react-spa.test.ts b/test/bake/dev/react-spa.test.ts index 3d2a500161..c5b069f43a 100644 --- a/test/bake/dev/react-spa.test.ts +++ b/test/bake/dev/react-spa.test.ts @@ -1,7 +1,7 @@ // these tests involve ensuring react (html loader + single page app) works // react is big and we do lots of stuff like fast refresh. import { expect } from "bun:test"; -import { devTest } from "../dev-server-harness"; +import { devTest } from "../bake-harness"; devTest("react in html", { fixture: "react-spa-simple", diff --git a/test/bake/dev/sourcemap.test.ts b/test/bake/dev/sourcemap.test.ts index 2c99808b0e..a4593fc288 100644 --- a/test/bake/dev/sourcemap.test.ts +++ b/test/bake/dev/sourcemap.test.ts @@ -3,7 +3,7 @@ // work because hmr-runtime is minified in release builds, which would affect // the generated line/column numbers across different build configurations. import { expect } from "bun:test"; -import { Dev, devTest, emptyHtmlFile, reactRefreshStub } from "../dev-server-harness"; +import { Dev, devTest, emptyHtmlFile, reactRefreshStub } from "../bake-harness"; import { BasicSourceMapConsumer, IndexedSourceMapConsumer, SourceMapConsumer } from "source-map"; devTest("source map emitted for primary chunk", {