//! Instance of the development server. Attaches to an instance of `Bun.serve`, //! controlling bundler, routing, and hot module reloading. //! //! Reprocessing files that did not change is banned; by having perfect //! incremental tracking over the project, editing a file's contents (asides //! adjusting imports) must always rebundle only that one file. //! //! All work is held in-memory, using manually managed data-oriented design. pub const DevServer = @This(); pub const debug = bun.Output.Scoped(.Bake, false); pub const igLog = bun.Output.scoped(.IncrementalGraph, false); const DebugHTTPServer = @import("../bun.js/api/server.zig").DebugHTTPServer; pub const Options = struct { /// Arena must live until DevServer.deinit() arena: Allocator, root: []const u8, vm: *VirtualMachine, framework: bake.Framework, // Debugging features dump_sources: ?[]const u8 = if (Environment.isDebug) ".bake-debug" else null, dump_state_on_crash: bool = false, verbose_watcher: bool = false, }; // The fields `client_graph`, `server_graph`, and `directory_watchers` all // use `@fieldParentPointer` to access DevServer's state. This pattern has // made it easier to group related fields together, but one must remember // those structures still depend on the DevServer pointer. /// Used for all server-wide allocations. In debug, this shows up in /// a separate named heap. Thread-safe. allocator: Allocator, /// Absolute path to project root directory. For the HMR /// runtime, its module IDs are strings relative to this. root: []const u8, /// Hex string generated by hashing the framework config and bun revision. /// Emebedding in client bundles and sent when the HMR Socket is opened; /// When the value mismatches the page is forcibly reloaded. configuration_hash_key: [16]u8, /// The virtual machine (global object) to execute code in. vm: *VirtualMachine, /// May be `null` if not attached to an HTTP server yet. server: ?bun.JSC.API.AnyServer, /// Contains the tree of routes. This structure contains FileIndex router: FrameworkRouter, /// Every navigatable route has bundling state here. route_bundles: ArrayListUnmanaged(RouteBundle), /// All access into IncrementalGraph is guarded by a DebugThreadLock. This is /// only a debug assertion as contention to this is always a bug; If a bundle is /// active and a file is changed, that change is placed into the next bundle. graph_safety_lock: bun.DebugThreadLock, client_graph: IncrementalGraph(.client), server_graph: IncrementalGraph(.server), /// State populated during bundling and hot updates. Often cleared incremental_result: IncrementalResult, /// Quickly retrieve a route's index from its entry point file. These are /// populated as the routes are discovered. The route may not be bundled OR /// navigatable, such as the case where a layout's index is looked up. route_lookup: AutoArrayHashMapUnmanaged(IncrementalGraph(.server).FileIndex, RouteIndexAndRecurseFlag), /// CSS files are accessible via `/_bun/css/.css` /// Value is bundled code owned by `dev.allocator` css_files: AutoArrayHashMapUnmanaged(u64, []const u8), /// JS files are accessible via `/_bun/client/route..js` /// These are randomly generated to avoid possible browser caching of old assets. route_js_payloads: AutoArrayHashMapUnmanaged(u64, Route.Index.Optional), // /// Assets are accessible via `/_bun/asset/` // assets: bun.StringArrayHashMapUnmanaged(u64, Asset), /// All bundling failures are stored until a file is saved and rebuilt. /// They are stored in the wire format the HMR runtime expects so that /// serialization only happens once. bundling_failures: std.ArrayHashMapUnmanaged( SerializedFailure, void, SerializedFailure.ArrayHashContextViaOwner, false, ) = .{}, // These values are handles to the functions in `hmr-runtime-server.ts`. // For type definitions, see `./bake.private.d.ts` server_fetch_function_callback: JSC.Strong, server_register_update_callback: JSC.Strong, // Watching bun_watcher: *JSC.Watcher, directory_watchers: DirectoryWatchStore, watcher_atomics: WatcherAtomics, /// Number of bundles that have been executed. This is currently not read, but /// will be used later to determine when to invoke graph garbage collection. generation: usize = 0, /// Displayed in the HMR success indicator bundles_since_last_error: usize = 0, framework: bake.Framework, // Each logical graph gets its own bundler configuration server_bundler: Bundler, client_bundler: Bundler, ssr_bundler: Bundler, /// The log used by all `server_bundler`, `client_bundler` and `ssr_bundler`. /// Note that it is rarely correct to write messages into it. Instead, associate /// messages with the IncrementalGraph file or Route using `SerializedFailure` log: Log, /// There is only ever one bundle executing at the same time, since all bundles /// inevitably share state. This bundle is asynchronous, storing its state here /// while in-flight. All allocations held by `.bv2.graph.heap`'s arena current_bundle: ?struct { bv2: *BundleV2, /// Information BundleV2 needs to finalize the bundle start_data: bun.bundle_v2.BakeBundleStart, /// Started when the bundle was queued timer: std.time.Timer, /// If any files in this bundle were due to hot-reloading, some extra work /// must be done to inform clients to reload routes. When this is false, /// all entry points do not have bundles yet. had_reload_event: bool, }, /// This is not stored in `current_bundle` so that its memory can be reused when /// there is no active bundle. After the bundle finishes, these requests will /// be continued, either calling their handler on success or sending the error /// page on failure. current_bundle_requests: ArrayListUnmanaged(DeferredRequest), /// When `current_bundle` is non-null and new requests to bundle come in, /// those are temporaried here. When the current bundle is finished, it /// will immediately enqueue this. next_bundle: struct { /// A list of `RouteBundle`s which have active requests to bundle it. route_queue: AutoArrayHashMapUnmanaged(RouteBundle.Index, void), /// If a reload event exists and should be drained. The information /// for this watch event is in one of the `watch_events` reload_event: ?*HotReloadEvent, /// The list of requests that are blocked on this bundle. requests: ArrayListUnmanaged(DeferredRequest), }, // Debugging dump_dir: if (bun.FeatureFlags.bake_debugging_features) ?std.fs.Dir else void, /// Reference count to number of active sockets with the visualizer enabled. emit_visualizer_events: u32, has_pre_crash_handler: bool, pub const internal_prefix = "/_bun"; pub const client_prefix = internal_prefix ++ "/client"; pub const asset_prefix = internal_prefix ++ "/asset"; pub const css_prefix = internal_prefix ++ "/css"; pub const RouteBundle = struct { pub const Index = bun.GenericIndex(u30, RouteBundle); route: Route.Index, server_state: State, /// Used to communicate over WebSocket the pattern. The HMR client contains code /// to match this against the URL bar to determine if a reloaded route applies. full_pattern: []const u8, /// Generated lazily when the client JS is requested (HTTP GET /_bun/client/*.js), /// which is only needed when a hard-reload is performed. /// /// Freed when a client module updates. client_bundle: ?[]const u8, /// Contain the list of serialized failures. Hashmap allows for /// efficient lookup and removal of failing files. /// When state == .evaluation_failure, this is popualted with that error. evaluate_failure: ?SerializedFailure, // TODO: micro-opt: use a singular strong /// Cached to avoid re-creating the array every request. /// Invalidated when a layout is added or removed from this route. cached_module_list: JSC.Strong, /// Cached to avoid re-creating the string every request. /// Invalidated when any client file associated with the route is updated. cached_client_bundle_url: JSC.Strong, /// Cached to avoid re-creating the array every request. /// Invalidated when the list of CSS files changes. cached_css_file_array: JSC.Strong, /// Reference count of how many HmrSockets say they are on this route. This /// allows hot-reloading events to reduce the amount of times it traces the /// graph. active_viewers: usize, /// A union is not used so that `bundler_failure_logs` can re-use memory, as /// this state frequently changes between `loaded` and the failure variants. const State = enum { /// In development mode, routes are lazily built. This state implies a /// build of this route has never been run. It is possible to bundle the /// route entry point and still have an unqueued route if another route /// imports this one. This state is implied if `FrameworkRouter.Route` /// has no bundle index assigned. unqueued, /// A bundle associated with this route is happening bundling, /// This route was flagged for bundling failures. There are edge cases /// where a route can be disconnected from its failures, so the route /// imports has to be traced to discover if possible failures still /// exist. possible_bundling_failures, /// Loading the module at runtime had a failure. The error can be /// cleared by editing any file in the same hot-reloading boundary. evaluation_failure, /// Calling the request function may error, but that error will not be /// at fault of bundling, nor would re-bundling change anything. loaded, }; }; /// DevServer is stored on the heap, storing its allocator. pub fn init(options: Options) bun.JSOOM!*DevServer { const allocator = bun.default_allocator; bun.analytics.Features.dev_server +|= 1; var dump_dir = if (bun.FeatureFlags.bake_debugging_features) if (options.dump_sources) |dir| std.fs.cwd().makeOpenPath(dir, .{}) catch |err| dir: { bun.handleErrorReturnTrace(err, @errorReturnTrace()); Output.warn("Could not open directory for dumping sources: {}", .{err}); break :dir null; } else null; errdefer if (bun.FeatureFlags.bake_debugging_features) if (dump_dir) |*dir| dir.close(); const separate_ssr_graph = if (options.framework.server_components) |sc| sc.separate_ssr_graph else false; const dev = bun.create(allocator, DevServer, .{ .allocator = allocator, .root = options.root, .vm = options.vm, .server = null, .directory_watchers = DirectoryWatchStore.empty, .server_fetch_function_callback = .{}, .server_register_update_callback = .{}, .generation = 0, .graph_safety_lock = bun.DebugThreadLock.unlocked, .dump_dir = dump_dir, .framework = options.framework, .emit_visualizer_events = 0, .has_pre_crash_handler = options.dump_state_on_crash, .css_files = .{}, .route_js_payloads = .{}, // .assets = .{}, .client_graph = IncrementalGraph(.client).empty, .server_graph = IncrementalGraph(.server).empty, .incremental_result = IncrementalResult.empty, .route_lookup = .{}, .route_bundles = .{}, .current_bundle = null, .current_bundle_requests = .{}, .next_bundle = .{ .route_queue = .{}, .reload_event = null, .requests = .{}, }, .log = bun.logger.Log.init(allocator), .server_bundler = undefined, .client_bundler = undefined, .ssr_bundler = undefined, .bun_watcher = undefined, .configuration_hash_key = undefined, .router = undefined, .watcher_atomics = undefined, }); const global = dev.vm.global; errdefer allocator.destroy(dev); assert(dev.server_graph.owner() == dev); assert(dev.client_graph.owner() == dev); assert(dev.directory_watchers.owner() == dev); dev.graph_safety_lock.lock(); defer dev.graph_safety_lock.unlock(); const generic_action = "while initializing development server"; const fs = bun.fs.FileSystem.init(options.root) catch |err| return global.throwError(err, generic_action); dev.bun_watcher = Watcher.init(DevServer, dev, fs, bun.default_allocator) catch |err| return global.throwError(err, "while initializing file watcher for development server"); errdefer dev.bun_watcher.deinit(false); dev.bun_watcher.start() catch |err| return global.throwError(err, "while initializing file watcher thread for development server"); dev.server_bundler.resolver.watcher = dev.bun_watcher.getResolveWatcher(); dev.client_bundler.resolver.watcher = dev.bun_watcher.getResolveWatcher(); dev.ssr_bundler.resolver.watcher = dev.bun_watcher.getResolveWatcher(); dev.watcher_atomics = WatcherAtomics.init(dev); dev.framework.initBundler(allocator, &dev.log, .development, .server, &dev.server_bundler) catch |err| return global.throwError(err, generic_action); dev.client_bundler.options.dev_server = dev; dev.framework.initBundler(allocator, &dev.log, .development, .client, &dev.client_bundler) catch |err| return global.throwError(err, generic_action); dev.server_bundler.options.dev_server = dev; if (separate_ssr_graph) { dev.framework.initBundler(allocator, &dev.log, .development, .ssr, &dev.ssr_bundler) catch |err| return global.throwError(err, generic_action); dev.ssr_bundler.options.dev_server = dev; } dev.framework = dev.framework.resolve(&dev.server_bundler.resolver, &dev.client_bundler.resolver, options.arena) catch { try bake.Framework.addReactInstallCommandNote(&dev.log); return global.throwValue2(dev.log.toJSAggregateError(global, "Framework is missing required files!")); }; errdefer dev.route_lookup.clearAndFree(allocator); // errdefer dev.client_graph.deinit(allocator); // errdefer dev.server_graph.deinit(allocator); dev.configuration_hash_key = hash_key: { var hash = std.hash.Wyhash.init(128); if (bun.Environment.isDebug) { const stat = bun.sys.stat(bun.selfExePath() catch |e| Output.panic("unhandled {}", .{e})).unwrap() catch |e| Output.panic("unhandled {}", .{e}); bun.writeAnyToHasher(&hash, stat.mtime()); hash.update(bake.getHmrRuntime(.client)); hash.update(bake.getHmrRuntime(.server)); } else { hash.update(bun.Environment.git_sha_short); } for (dev.framework.file_system_router_types) |fsr| { bun.writeAnyToHasher(&hash, fsr.allow_layouts); bun.writeAnyToHasher(&hash, fsr.ignore_underscores); hash.update(fsr.entry_server); hash.update(&.{0}); hash.update(fsr.entry_client orelse ""); hash.update(&.{0}); hash.update(fsr.prefix); hash.update(&.{0}); hash.update(fsr.root); hash.update(&.{0}); for (fsr.extensions) |ext| { hash.update(ext); hash.update(&.{0}); } hash.update(&.{0}); for (fsr.ignore_dirs) |dir| { hash.update(dir); hash.update(&.{0}); } hash.update(&.{0}); } if (dev.framework.server_components) |sc| { bun.writeAnyToHasher(&hash, true); bun.writeAnyToHasher(&hash, sc.separate_ssr_graph); hash.update(sc.client_register_server_reference); hash.update(&.{0}); hash.update(sc.server_register_client_reference); hash.update(&.{0}); hash.update(sc.server_register_server_reference); hash.update(&.{0}); hash.update(sc.server_runtime_import); hash.update(&.{0}); } else { bun.writeAnyToHasher(&hash, false); } if (dev.framework.react_fast_refresh) |rfr| { bun.writeAnyToHasher(&hash, true); hash.update(rfr.import_source); } else { bun.writeAnyToHasher(&hash, false); } for (dev.framework.built_in_modules.keys(), dev.framework.built_in_modules.values()) |k, v| { hash.update(k); hash.update(&.{0}); bun.writeAnyToHasher(&hash, std.meta.activeTag(v)); hash.update(switch (v) { inline else => |data| data, }); hash.update(&.{0}); } hash.update(&.{0}); break :hash_key std.fmt.bytesToHex(std.mem.asBytes(&hash.final()), .lower); }; // Add react fast refresh if needed. This is the first file on the client side, // as it will be referred to by index. if (dev.framework.react_fast_refresh) |rfr| { assert(try dev.client_graph.insertStale(rfr.import_source, false) == IncrementalGraph(.client).react_refresh_index); } dev.initServerRuntime(); // Initialize FrameworkRouter dev.router = router: { var types = try std.ArrayListUnmanaged(FrameworkRouter.Type).initCapacity(allocator, options.framework.file_system_router_types.len); errdefer types.deinit(allocator); for (options.framework.file_system_router_types, 0..) |fsr, i| { const joined_root = bun.path.joinAbs(dev.root, .auto, fsr.root); const entry = dev.server_bundler.resolver.readDirInfoIgnoreError(joined_root) orelse continue; const server_file = try dev.server_graph.insertStaleExtra(fsr.entry_server, false, true); try types.append(allocator, .{ .abs_root = bun.strings.withoutTrailingSlash(entry.abs_path), .prefix = fsr.prefix, .ignore_underscores = fsr.ignore_underscores, .ignore_dirs = fsr.ignore_dirs, .extensions = fsr.extensions, .style = fsr.style, .allow_layouts = fsr.allow_layouts, .server_file = toOpaqueFileId(.server, server_file), .client_file = if (fsr.entry_client) |client| toOpaqueFileId(.client, try dev.client_graph.insertStale(client, false)).toOptional() else .none, .server_file_string = .{}, }); try dev.route_lookup.put(allocator, server_file, .{ .route_index = FrameworkRouter.Route.Index.init(@intCast(i)), .should_recurse_when_visiting = true, }); } break :router try FrameworkRouter.initEmpty(dev.root, types.items, allocator); }; // TODO: move scanning to be one tick after server startup. this way the // line saying the server is ready shows quicker, and route errors show up // after that line. try dev.scanInitialRoutes(); if (bun.FeatureFlags.bake_debugging_features and options.dump_state_on_crash) try bun.crash_handler.appendPreCrashHandler(DevServer, dev, dumpStateDueToCrash); return dev; } fn initServerRuntime(dev: *DevServer) void { const runtime = bun.String.static(bun.bake.getHmrRuntime(.server)); const interface = c.BakeLoadInitialServerCode( @ptrCast(dev.vm.global), runtime, if (dev.framework.server_components) |sc| sc.separate_ssr_graph else false, ) catch |err| { dev.vm.printErrorLikeObjectToConsole(dev.vm.global.takeException(err)); @panic("Server runtime failed to start. The above error is always a bug in Bun"); }; if (!interface.isObject()) @panic("Internal assertion failure: expected interface from HMR runtime to be an object"); const fetch_function = interface.get(dev.vm.global, "handleRequest") catch null orelse @panic("Internal assertion failure: expected interface from HMR runtime to contain handleRequest"); bun.assert(fetch_function.isCallable(dev.vm.jsc)); dev.server_fetch_function_callback = JSC.Strong.create(fetch_function, dev.vm.global); const register_update = interface.get(dev.vm.global, "registerUpdate") catch null orelse @panic("Internal assertion failure: expected interface from HMR runtime to contain registerUpdate"); dev.server_register_update_callback = JSC.Strong.create(register_update, dev.vm.global); fetch_function.ensureStillAlive(); register_update.ensureStillAlive(); } /// Deferred one tick so that the server can be up faster fn scanInitialRoutes(dev: *DevServer) !void { try dev.router.scanAll( dev.allocator, &dev.server_bundler.resolver, FrameworkRouter.InsertionContext.wrap(DevServer, dev), ); try dev.server_graph.ensureStaleBitCapacity(true); try dev.client_graph.ensureStaleBitCapacity(true); } pub fn attachRoutes(dev: *DevServer, server: anytype) !void { dev.server = bun.JSC.API.AnyServer.from(server); const app = server.app.?; // For this to work, the route handlers need to be augmented to use the comptime // SSL parameter. It's worth considering removing the SSL boolean. if (@TypeOf(app) == *uws.NewApp(true)) { bun.todoPanic(@src(), "DevServer does not support SSL yet", .{}); } app.get(client_prefix ++ "/:route", *DevServer, dev, onJsRequest); app.get(asset_prefix ++ "/:asset", *DevServer, dev, onAssetRequest); app.get(css_prefix ++ "/:asset", *DevServer, dev, onCssRequest); app.get(internal_prefix ++ "/src/*", *DevServer, dev, onSrcRequest); app.ws( internal_prefix ++ "/hmr", dev, 0, uws.WebSocketBehavior.Wrap(DevServer, HmrSocket, false).apply(.{}), ); if (bun.FeatureFlags.bake_debugging_features) app.get(internal_prefix ++ "/incremental_visualizer", *DevServer, dev, onIncrementalVisualizer); app.any("/*", *DevServer, dev, onRequest); } pub fn deinit(dev: *DevServer) void { // TODO: Currently deinit is not implemented, as it was assumed to be alive for // the remainder of this process' lifespan. This isn't always true. const allocator = dev.allocator; if (dev.has_pre_crash_handler) bun.crash_handler.removePreCrashHandler(dev); allocator.destroy(dev); // if (bun.Environment.isDebug) // bun.todoPanic(@src(), "bake.DevServer.deinit()", .{}); } fn onJsRequest(dev: *DevServer, req: *Request, resp: *Response) void { const maybe_route = route: { const route_id = req.parameter(0); if (!bun.strings.hasSuffixComptime(route_id, ".js")) return req.setYield(true); if (!bun.strings.hasPrefixComptime(route_id, "route.")) return req.setYield(true); const i = parseHexToInt(u64, route_id["route.".len .. route_id.len - ".js".len]) orelse return req.setYield(true); break :route dev.route_js_payloads.get(i) orelse return req.setYield(true); }; if (maybe_route.unwrap()) |route| { dev.ensureRouteIsBundled(route, .js_payload, req, resp) catch bun.outOfMemory(); } else { @panic("TODO: generate client bundle with no source files"); } } fn onAssetRequest(dev: *DevServer, req: *Request, resp: *Response) void { _ = dev; _ = req; _ = resp; bun.todoPanic(@src(), "serve asset file", .{}); // const route_id = req.parameter(0); // const asset = dev.assets.get(route_id) orelse // return req.setYield(true); // _ = asset; // autofix } fn onCssRequest(dev: *DevServer, req: *Request, resp: *Response) void { const param = req.parameter(0); if (!bun.strings.hasSuffixComptime(param, ".css")) return req.setYield(true); const hex = param[0 .. param.len - ".css".len]; if (hex.len != @sizeOf(u64) * 2) return req.setYield(true); var out: [@sizeOf(u64)]u8 = undefined; assert((std.fmt.hexToBytes(&out, hex) catch return req.setYield(true)).len == @sizeOf(u64)); const hash: u64 = @bitCast(out); const css = dev.css_files.get(hash) orelse return req.setYield(true); sendTextFile(css, MimeType.css.value, resp); } fn parseHexToInt(comptime T: type, slice: []const u8) ?T { var out: [@sizeOf(T)]u8 = undefined; assert((std.fmt.hexToBytes(&out, slice) catch return null).len == @sizeOf(T)); return @bitCast(out); } fn onIncrementalVisualizer(_: *DevServer, _: *Request, resp: *Response) void { resp.corked(onIncrementalVisualizerCorked, .{resp}); } fn onIncrementalVisualizerCorked(resp: *Response) void { const code = if (Environment.codegen_embed) @embedFile("incremental_visualizer.html") else bun.runtimeEmbedFile(.src_eager, "bake/incremental_visualizer.html"); resp.writeHeaderInt("Content-Length", code.len); resp.end(code, false); } fn ensureRouteIsBundled( dev: *DevServer, route_index: Route.Index, kind: DeferredRequest.Data.Tag, req: *Request, resp: *Response, ) bun.OOM!void { const route_bundle_index = try dev.getOrPutRouteBundle(route_index); // TODO: Zig 0.14 gets labelled continue: // - Remove the `while` // - Move the code after this switch into `.loaded =>` // - Replace `break` with `continue :sw .loaded` // - Replace `continue` with `continue :sw ` while (true) { switch (dev.routeBundlePtr(route_bundle_index).server_state) { .unqueued => { try dev.next_bundle.requests.ensureUnusedCapacity(dev.allocator, 1); if (dev.current_bundle != null) { try dev.next_bundle.route_queue.ensureUnusedCapacity(dev.allocator, 1); } const deferred: DeferredRequest = .{ .route_bundle_index = route_bundle_index, .data = switch (kind) { .js_payload => .{ .js_payload = resp }, .server_handler => .{ .server_handler = (dev.server.?.ptr.as(DebugHTTPServer).prepareJsRequestContext(req, resp) orelse return) .save(dev.vm.global, req, resp), }, }, }; errdefer @compileError("cannot error since the request is already stored"); dev.next_bundle.requests.appendAssumeCapacity(deferred); if (dev.current_bundle != null) { dev.next_bundle.route_queue.putAssumeCapacity(route_bundle_index, {}); } else { var sfa = std.heap.stackFallback(4096, dev.allocator); const temp_alloc = sfa.get(); var entry_points: EntryPointList = EntryPointList.empty; defer entry_points.deinit(temp_alloc); dev.appendRouteEntryPointsIfNotStale(&entry_points, temp_alloc, route_index) catch bun.outOfMemory(); if (entry_points.set.count() == 0) { if (dev.bundling_failures.count() > 0) { dev.routeBundlePtr(route_bundle_index).server_state = .possible_bundling_failures; } else { dev.routeBundlePtr(route_bundle_index).server_state = .loaded; } continue; } dev.startAsyncBundle( entry_points, false, std.time.Timer.start() catch @panic("timers unsupported"), ) catch |err| { if (dev.log.hasAny()) { dev.log.print(Output.errorWriterBuffered()) catch {}; Output.flush(); } Output.panic("Fatal error while initializing bundle job: {}", .{err}); }; dev.routeBundlePtr(route_bundle_index).server_state = .bundling; } return; }, .bundling => { bun.assert(dev.current_bundle != null); try dev.current_bundle_requests.ensureUnusedCapacity(dev.allocator, 1); const deferred: DeferredRequest = .{ .route_bundle_index = route_bundle_index, .data = switch (kind) { .js_payload => .{ .js_payload = resp }, .server_handler => .{ .server_handler = (dev.server.?.ptr.as(DebugHTTPServer).prepareJsRequestContext(req, resp) orelse return) .save(dev.vm.global, req, resp), }, }, }; dev.current_bundle_requests.appendAssumeCapacity(deferred); return; }, .possible_bundling_failures => { // TODO: perform a graph trace to find just the errors that are needed if (dev.bundling_failures.count() > 0) { resp.corked(sendSerializedFailures, .{ dev, resp, dev.bundling_failures.keys(), .bundler, }); return; } else { dev.routeBundlePtr(route_bundle_index).server_state = .loaded; break; } }, .evaluation_failure => { resp.corked(sendSerializedFailures, .{ dev, resp, (&(dev.routeBundlePtr(route_bundle_index).evaluate_failure orelse @panic("missing error")))[0..1], .evaluation, }); return; }, .loaded => break, } // this error is here to make sure there are no accidental loop exits @compileError("all branches above should `return`, `break` or `continue`"); } switch (kind) { .server_handler => dev.onRequestWithBundle(route_bundle_index, .{ .stack = req }, resp), .js_payload => dev.onJsRequestWithBundle(route_bundle_index, resp), } } fn appendRouteEntryPointsIfNotStale(dev: *DevServer, entry_points: *EntryPointList, alloc: Allocator, route_index: Route.Index) bun.OOM!void { const server_file_names = dev.server_graph.bundled_files.keys(); const client_file_names = dev.client_graph.bundled_files.keys(); // Build a list of all files that have not yet been bundled. var route = dev.router.routePtr(route_index); const router_type = dev.router.typePtr(route.type); try dev.appendOpaqueEntryPoint(server_file_names, entry_points, alloc, .server, router_type.server_file); try dev.appendOpaqueEntryPoint(client_file_names, entry_points, alloc, .client, router_type.client_file); try dev.appendOpaqueEntryPoint(server_file_names, entry_points, alloc, .server, route.file_page); try dev.appendOpaqueEntryPoint(server_file_names, entry_points, alloc, .server, route.file_layout); while (route.parent.unwrap()) |parent_index| { route = dev.router.routePtr(parent_index); try dev.appendOpaqueEntryPoint(server_file_names, entry_points, alloc, .server, route.file_layout); } } fn onRequestWithBundle( dev: *DevServer, route_bundle_index: RouteBundle.Index, req: bun.JSC.API.SavedRequest.Union, resp: *Response, ) void { const server_request_callback = dev.server_fetch_function_callback.get() orelse unreachable; // did not bundle const route_bundle = dev.routeBundlePtr(route_bundle_index); const router_type = dev.router.typePtr(dev.router.routePtr(route_bundle.route).type); dev.server.?.onRequestFromSaved( req, resp, server_request_callback, 4, .{ // routerTypeMain router_type.server_file_string.get() orelse str: { const name = dev.server_graph.bundled_files.keys()[fromOpaqueFileId(.server, router_type.server_file).get()]; const str = bun.String.createUTF8(dev.relativePath(name)); defer str.deref(); const js = str.toJS(dev.vm.global); router_type.server_file_string = JSC.Strong.create(js, dev.vm.global); break :str js; }, // routeModules route_bundle.cached_module_list.get() orelse arr: { const global = dev.vm.global; const keys = dev.server_graph.bundled_files.keys(); var n: usize = 1; var route = dev.router.routePtr(route_bundle.route); while (true) { if (route.file_layout != .none) n += 1; route = dev.router.routePtr(route.parent.unwrap() orelse break); } const arr = JSValue.createEmptyArray(global, n); route = dev.router.routePtr(route_bundle.route); var route_name = bun.String.createUTF8(dev.relativePath(keys[fromOpaqueFileId(.server, route.file_page.unwrap().?).get()])); arr.putIndex(global, 0, route_name.transferToJS(global)); n = 1; while (true) { if (route.file_layout.unwrap()) |layout| { var layout_name = bun.String.createUTF8(dev.relativePath(keys[fromOpaqueFileId(.server, layout).get()])); arr.putIndex(global, @intCast(n), layout_name.transferToJS(global)); n += 1; } route = dev.router.routePtr(route.parent.unwrap() orelse break); } route_bundle.cached_module_list = JSC.Strong.create(arr, global); break :arr arr; }, // clientId route_bundle.cached_client_bundle_url.get() orelse str: { const id, const route_index: Route.Index.Optional = if (router_type.client_file != .none) .{ std.crypto.random.int(u64), route_bundle.route.toOptional() } else // When there is no framework-provided client code, generate // a JS file so that the hot-reloading code can reload the // page on server-side changes and show errors in-browser. .{ 0, .none }; dev.route_js_payloads.put(dev.allocator, id, route_index) catch bun.outOfMemory(); const str = bun.String.createFormat(client_prefix ++ "/route.{}.js", .{std.fmt.fmtSliceHexLower(std.mem.asBytes(&id))}) catch bun.outOfMemory(); defer str.deref(); const js = str.toJS(dev.vm.global); route_bundle.cached_client_bundle_url = JSC.Strong.create(js, dev.vm.global); break :str js; }, // styles route_bundle.cached_css_file_array.get() orelse arr: { const js = dev.generateCssJSArray(route_bundle) catch bun.outOfMemory(); route_bundle.cached_css_file_array = JSC.Strong.create(js, dev.vm.global); break :arr js; }, }, ); } pub fn onJsRequestWithBundle(dev: *DevServer, bundle_index: RouteBundle.Index, resp: *Response) void { const route_bundle = dev.routeBundlePtr(bundle_index); const code = route_bundle.client_bundle orelse code: { const code = dev.generateClientBundle(route_bundle) catch bun.outOfMemory(); route_bundle.client_bundle = code; break :code code; }; sendTextFile(code, MimeType.javascript.value, resp); } pub fn onSrcRequest(dev: *DevServer, req: *uws.Request, resp: *App.Response) void { if (req.header("open-in-editor") == null) { resp.writeStatus("501 Not Implemented"); resp.end("Viewing source without opening in editor is not implemented yet!", false); return; } const ctx = &dev.vm.rareData().editor_context; ctx.autoDetectEditor(JSC.VirtualMachine.get().bundler.env); const line: ?[]const u8 = req.header("editor-line"); const column: ?[]const u8 = req.header("editor-column"); if (ctx.editor) |editor| { var url = req.url()[internal_prefix.len + "/src/".len ..]; if (bun.strings.indexOfChar(url, ':')) |colon| { url = url[0..colon]; } editor.open(ctx.path, url, line, column, dev.allocator) catch { resp.writeStatus("202 No Content"); resp.end("", false); return; }; resp.writeStatus("202 No Content"); resp.end("", false); } else { resp.writeStatus("500 Internal Server Error"); resp.end("Please set your editor in bunfig.toml", false); } } const DeferredRequest = struct { route_bundle_index: RouteBundle.Index, data: Data, const Data = union(enum) { server_handler: bun.JSC.API.SavedRequest, js_payload: *Response, const Tag = @typeInfo(Data).Union.tag_type.?; }; }; fn startAsyncBundle( dev: *DevServer, entry_points: EntryPointList, had_reload_event: bool, timer: std.time.Timer, ) bun.OOM!void { assert(dev.current_bundle == null); assert(entry_points.set.count() > 0); dev.log.clearAndFree(); dev.incremental_result.reset(); var heap = try ThreadlocalArena.init(); errdefer heap.deinit(); const allocator = heap.allocator(); const ast_memory_allocator = try allocator.create(bun.JSAst.ASTMemoryAllocator); ast_memory_allocator.* = .{ .allocator = allocator }; ast_memory_allocator.reset(); ast_memory_allocator.push(); if (dev.framework.server_components == null) { // The handling of the dependency graphs are SLIGHTLY different when // server components are disabled. It's subtle, but enough that it // would be incorrect to even try to run a build. bun.todoPanic(@src(), "support non-server components build", .{}); } const bv2 = try BundleV2.init( &dev.server_bundler, if (dev.framework.server_components != null) .{ .framework = dev.framework, .client_bundler = &dev.client_bundler, .ssr_bundler = &dev.ssr_bundler, } else @panic("TODO: support non-server components"), allocator, .{ .js = dev.vm.eventLoop() }, false, // reloading is handled separately JSC.WorkPool.get(), heap, ); bv2.bun_watcher = dev.bun_watcher; bv2.asynchronous = true; { dev.graph_safety_lock.lock(); defer dev.graph_safety_lock.unlock(); dev.client_graph.reset(); dev.server_graph.reset(); } const start_data = try bv2.startFromBakeDevServer(entry_points); dev.current_bundle = .{ .bv2 = bv2, .timer = timer, .start_data = start_data, .had_reload_event = had_reload_event, }; const old_current_requests = dev.current_bundle_requests; bun.assert(old_current_requests.items.len == 0); dev.current_bundle_requests = dev.next_bundle.requests; dev.next_bundle.requests = old_current_requests; } fn indexFailures(dev: *DevServer) !void { var sfa_state = std.heap.stackFallback(65536, dev.allocator); const sfa = sfa_state.get(); if (dev.incremental_result.failures_added.items.len > 0) { var total_len: usize = @sizeOf(MessageId) + @sizeOf(u32); for (dev.incremental_result.failures_added.items) |fail| { total_len += fail.data.len; } total_len += dev.incremental_result.failures_removed.items.len * @sizeOf(u32); var gts = try dev.initGraphTraceState(sfa); defer gts.deinit(sfa); var payload = try std.ArrayList(u8).initCapacity(sfa, total_len); defer payload.deinit(); payload.appendAssumeCapacity(MessageId.errors.char()); const w = payload.writer(); try w.writeInt(u32, @intCast(dev.incremental_result.failures_removed.items.len), .little); for (dev.incremental_result.failures_removed.items) |removed| { try w.writeInt(u32, @bitCast(removed.getOwner().encode()), .little); removed.deinit(); } for (dev.incremental_result.failures_added.items) |added| { try w.writeAll(added.data); switch (added.getOwner()) { .none, .route => unreachable, .server => |index| try dev.server_graph.traceDependencies(index, >s, .no_stop), .client => |index| try dev.client_graph.traceDependencies(index, >s, .no_stop), } } for (dev.incremental_result.routes_affected.items) |entry| { if (dev.router.routePtr(entry.route_index).bundle.unwrap()) |index| { dev.routeBundlePtr(index).server_state = .possible_bundling_failures; } if (entry.should_recurse_when_visiting) dev.markAllRouteChildrenFailed(entry.route_index); } dev.publish(.errors, payload.items, .binary); } else if (dev.incremental_result.failures_removed.items.len > 0) { var payload = try std.ArrayList(u8).initCapacity(sfa, @sizeOf(MessageId) + @sizeOf(u32) + dev.incremental_result.failures_removed.items.len * @sizeOf(u32)); defer payload.deinit(); payload.appendAssumeCapacity(MessageId.errors.char()); const w = payload.writer(); try w.writeInt(u32, @intCast(dev.incremental_result.failures_removed.items.len), .little); for (dev.incremental_result.failures_removed.items) |removed| { try w.writeInt(u32, @bitCast(removed.getOwner().encode()), .little); removed.deinit(); } dev.publish(.errors, payload.items, .binary); } dev.incremental_result.failures_removed.clearRetainingCapacity(); } /// Used to generate the entry point. Unlike incremental patches, this always /// contains all needed files for a route. fn generateClientBundle(dev: *DevServer, route_bundle: *RouteBundle) bun.OOM![]const u8 { assert(route_bundle.client_bundle == null); assert(route_bundle.server_state == .loaded); // page is unfit to load dev.graph_safety_lock.lock(); defer dev.graph_safety_lock.unlock(); // Prepare bitsets var sfa_state = std.heap.stackFallback(65536, dev.allocator); const sfa = sfa_state.get(); var gts = try dev.initGraphTraceState(sfa); defer gts.deinit(sfa); // Run tracing dev.client_graph.reset(); try dev.traceAllRouteImports(route_bundle, >s, .{ .find_client_modules = true }); const client_file = dev.router.typePtr(dev.router.routePtr(route_bundle.route).type).client_file.unwrap() orelse @panic("No client side entrypoint in client bundle"); return dev.client_graph.takeBundle( .initial_response, dev.relativePath(dev.client_graph.bundled_files.keys()[fromOpaqueFileId(.client, client_file).get()]), ); } fn generateCssJSArray(dev: *DevServer, route_bundle: *RouteBundle) bun.OOM!JSC.JSValue { if (Environment.allow_assert) assert(!route_bundle.cached_css_file_array.has()); assert(route_bundle.server_state == .loaded); // page is unfit to load dev.graph_safety_lock.lock(); defer dev.graph_safety_lock.unlock(); // Prepare bitsets var sfa_state = std.heap.stackFallback(65536, dev.allocator); const sfa = sfa_state.get(); var gts = try dev.initGraphTraceState(sfa); defer gts.deinit(sfa); // Run tracing dev.client_graph.reset(); try dev.traceAllRouteImports(route_bundle, >s, .{ .find_css = true }); const names = dev.client_graph.current_css_files.items; const arr = JSC.JSArray.createEmpty(dev.vm.global, names.len); for (names, 0..) |item, i| { const str = bun.String.createUTF8(item); defer str.deref(); arr.putIndex(dev.vm.global, @intCast(i), str.toJS(dev.vm.global)); } return arr; } fn traceAllRouteImports(dev: *DevServer, route_bundle: *RouteBundle, gts: *GraphTraceState, goal: TraceImportGoal) !void { var route = dev.router.routePtr(route_bundle.route); const router_type = dev.router.typePtr(route.type); // Both framework entry points are considered try dev.server_graph.traceImports(fromOpaqueFileId(.server, router_type.server_file), gts, .{ .find_css = true }); if (router_type.client_file.unwrap()) |id| { try dev.client_graph.traceImports(fromOpaqueFileId(.client, id), gts, goal); } // The route file is considered if (route.file_page.unwrap()) |id| { try dev.server_graph.traceImports(fromOpaqueFileId(.server, id), gts, goal); } // For all parents, the layout is considered while (true) { if (route.file_layout.unwrap()) |id| { try dev.server_graph.traceImports(fromOpaqueFileId(.server, id), gts, goal); } route = dev.router.routePtr(route.parent.unwrap() orelse break); } } fn makeArrayForServerComponentsPatch(dev: *DevServer, global: *JSC.JSGlobalObject, items: []const IncrementalGraph(.server).FileIndex) JSValue { if (items.len == 0) return .null; const arr = JSC.JSArray.createEmpty(global, items.len); const names = dev.server_graph.bundled_files.keys(); for (items, 0..) |item, i| { const str = bun.String.createUTF8(dev.relativePath(names[item.get()])); defer str.deref(); arr.putIndex(global, @intCast(i), str.toJS(global)); } return arr; } pub const HotUpdateContext = struct { /// bundle_v2.Graph.input_files.items(.source) sources: []bun.logger.Source, /// bundle_v2.Graph.ast.items(.import_records) import_records: []bun.ImportRecord.List, /// bundle_v2.Graph.server_component_boundaries.slice() scbs: bun.JSAst.ServerComponentBoundary.List.Slice, /// Which files have a server-component boundary. server_to_client_bitset: DynamicBitSetUnmanaged, /// Used to reduce calls to the IncrementalGraph hash table. /// /// Caller initializes a slice with `sources.len * 2` items /// all initialized to `std.math.maxInt(u32)` /// /// The first half of this slice is for the client graph, /// second half is for server. Interact with this via /// `getCachedIndex` resolved_index_cache: []u32, /// Used to tell if the server should replace or append import records. server_seen_bit_set: DynamicBitSetUnmanaged, gts: *GraphTraceState, pub fn getCachedIndex( rc: *const HotUpdateContext, comptime side: bake.Side, i: bun.JSAst.Index, ) *IncrementalGraph(side).FileIndex { const start = switch (side) { .client => 0, .server => rc.sources.len, }; const subslice = rc.resolved_index_cache[start..][0..rc.sources.len]; comptime assert(@alignOf(IncrementalGraph(side).FileIndex.Optional) == @alignOf(u32)); comptime assert(@sizeOf(IncrementalGraph(side).FileIndex.Optional) == @sizeOf(u32)); return @ptrCast(&subslice[i.get()]); } }; /// Called at the end of BundleV2 to index bundle contents into the `IncrementalGraph`s /// This function does not recover DevServer state if it fails (allocation failure) pub fn finalizeBundle( dev: *DevServer, bv2: *bun.bundle_v2.BundleV2, result: bun.bundle_v2.BakeBundleOutput, ) bun.OOM!void { defer dev.startNextBundleIfPresent(); const current_bundle = &dev.current_bundle.?; dev.graph_safety_lock.lock(); defer dev.graph_safety_lock.unlock(); const js_chunk = result.jsPseudoChunk(); const input_file_sources = bv2.graph.input_files.items(.source); const import_records = bv2.graph.ast.items(.import_records); const targets = bv2.graph.ast.items(.target); const scbs = bv2.graph.server_component_boundaries.slice(); var sfa = std.heap.stackFallback(65536, bv2.graph.allocator); const stack_alloc = sfa.get(); var scb_bitset = try bun.bit_set.DynamicBitSetUnmanaged.initEmpty(stack_alloc, input_file_sources.len); for ( scbs.list.items(.source_index), scbs.list.items(.ssr_source_index), scbs.list.items(.reference_source_index), ) |source_index, ssr_index, ref_index| { scb_bitset.set(source_index); scb_bitset.set(ref_index); if (ssr_index < scb_bitset.bit_length) scb_bitset.set(ssr_index); } const resolved_index_cache = try bv2.graph.allocator.alloc(u32, input_file_sources.len * 2); var ctx: bun.bake.DevServer.HotUpdateContext = .{ .import_records = import_records, .sources = input_file_sources, .scbs = scbs, .server_to_client_bitset = scb_bitset, .resolved_index_cache = resolved_index_cache, .server_seen_bit_set = undefined, .gts = undefined, }; // Pass 1, update the graph's nodes, resolving every bundler source // index into its `IncrementalGraph(...).FileIndex` for ( js_chunk.content.javascript.parts_in_chunk_in_order, js_chunk.compile_results_for_chunk, ) |part_range, compile_result| { const index = part_range.source_index; switch (targets[part_range.source_index.get()].bakeGraph()) { .server => try dev.server_graph.receiveChunk(&ctx, index, compile_result.code(), .js, false), .ssr => try dev.server_graph.receiveChunk(&ctx, index, compile_result.code(), .js, true), .client => try dev.client_graph.receiveChunk(&ctx, index, compile_result.code(), .js, false), } } for (result.cssChunks(), result.css_file_list.values()) |*chunk, metadata| { const index = bun.JSAst.Index.init(chunk.entry_point.source_index); const code = try chunk.intermediate_output.code( dev.allocator, &bv2.graph, &bv2.linker.graph, "/_bun/TODO-import-prefix-where-is-this-used?", chunk, result.chunks, null, false, // TODO: sourcemaps true ); // Create an entry for this file. const abs_path = ctx.sources[index.get()].path.text; // Later code needs to retrieve the CSS content // The hack is to use `entry_point_id`, which is otherwise unused, to store an index. chunk.entry_point.entry_point_id = try dev.insertOrUpdateCssAsset(abs_path, code.buffer); try dev.client_graph.receiveChunk(&ctx, index, "", .css, false); // If imported on server, there needs to be a server-side file entry // so that edges can be attached. When a file is only imported on // the server, this file is used to trace the CSS to the route. if (metadata.imported_on_server) { try dev.server_graph.insertCssFileOnServer( &ctx, index, abs_path, ); } } var gts = try dev.initGraphTraceState(bv2.graph.allocator); defer gts.deinit(bv2.graph.allocator); ctx.gts = >s; ctx.server_seen_bit_set = try bun.bit_set.DynamicBitSetUnmanaged.initEmpty(bv2.graph.allocator, dev.server_graph.bundled_files.count()); dev.incremental_result.had_adjusted_edges = false; // Pass 2, update the graph's edges by performing import diffing on each // changed file, removing dependencies. This pass also flags what routes // have been modified. for (js_chunk.content.javascript.parts_in_chunk_in_order) |part_range| { switch (targets[part_range.source_index.get()].bakeGraph()) { .server, .ssr => try dev.server_graph.processChunkDependencies(&ctx, part_range.source_index, bv2.graph.allocator), .client => try dev.client_graph.processChunkDependencies(&ctx, part_range.source_index, bv2.graph.allocator), } } for (result.cssChunks(), result.css_file_list.values()) |*chunk, metadata| { const index = bun.JSAst.Index.init(chunk.entry_point.source_index); // TODO: index css deps _ = index; _ = metadata; } // Index all failed files now that the incremental graph has been updated. try dev.indexFailures(); try dev.client_graph.ensureStaleBitCapacity(false); try dev.server_graph.ensureStaleBitCapacity(false); dev.generation +%= 1; if (Environment.enable_logs) { debug.log("Bundle Round {d}: {d} server, {d} client, {d} ms", .{ dev.generation, dev.server_graph.current_chunk_parts.items.len, dev.client_graph.current_chunk_parts.items.len, @divFloor(current_bundle.timer.read(), std.time.ns_per_ms), }); } // Load all new chunks into the server runtime. if (dev.server_graph.current_chunk_len > 0) { const server_bundle = try dev.server_graph.takeBundle(.hmr_chunk, ""); defer dev.allocator.free(server_bundle); const server_modules = c.BakeLoadServerHmrPatch(@ptrCast(dev.vm.global), bun.String.createLatin1(server_bundle)) catch |err| { // No user code has been evaluated yet, since everything is to // be wrapped in a function clousure. This means that the likely // error is going to be a syntax error, or other mistake in the // bundler. dev.vm.printErrorLikeObjectToConsole(dev.vm.global.takeException(err)); @panic("Error thrown while evaluating server code. This is always a bug in the bundler."); }; const errors = dev.server_register_update_callback.get().?.call( dev.vm.global, dev.vm.global.toJSValue(), &.{ server_modules, dev.makeArrayForServerComponentsPatch(dev.vm.global, dev.incremental_result.client_components_added.items), dev.makeArrayForServerComponentsPatch(dev.vm.global, dev.incremental_result.client_components_removed.items), }, ) catch |err| { // One module replacement error should NOT prevent follow-up // module replacements to fail. It is the HMR runtime's // responsibility to collect all module load errors, and // bubble them up. dev.vm.printErrorLikeObjectToConsole(dev.vm.global.takeException(err)); @panic("Error thrown in Hot-module-replacement code. This is always a bug in the HMR runtime."); }; _ = errors; // TODO: } var route_bits = try DynamicBitSetUnmanaged.initEmpty(stack_alloc, dev.route_bundles.items.len); defer route_bits.deinit(stack_alloc); var route_bits_client = try DynamicBitSetUnmanaged.initEmpty(stack_alloc, dev.route_bundles.items.len); defer route_bits_client.deinit(stack_alloc); var has_route_bits_set = false; var hot_update_payload_sfa = std.heap.stackFallback(65536, bun.default_allocator); var hot_update_payload = std.ArrayList(u8).initCapacity(hot_update_payload_sfa.get(), 65536) catch unreachable; // enough space defer hot_update_payload.deinit(); hot_update_payload.appendAssumeCapacity(MessageId.hot_update.char()); // The writer used for the hot_update payload const w = hot_update_payload.writer(); // It was discovered that if a tree falls with nobody around it, it does not // make any sound. Let's avoid writing into `w` if no sockets are open. const will_hear_hot_update = dev.numSubscribers(.hot_update) > 0; // This list of routes affected excludes client code. This means changing // a client component wont count as a route to trigger a reload on. // // A second trace is required to determine what routes had changed bundles, // since changing a layout affects all child routes. Additionally, routes // that do not have a bundle will not be cleared (as there is nothing to // clear for those) if (will_hear_hot_update and current_bundle.had_reload_event and dev.incremental_result.routes_affected.items.len > 0 and dev.bundling_failures.count() == 0) { has_route_bits_set = true; // A bit-set is used to avoid duplicate entries. This is not a problem // with `dev.incremental_result.routes_affected` for (dev.incremental_result.routes_affected.items) |request| { const route = dev.router.routePtr(request.route_index); if (route.bundle.unwrap()) |id| route_bits.set(id.get()); if (request.should_recurse_when_visiting) { markAllRouteChildren(&dev.router, 1, .{&route_bits}, request.route_index); } } // List 1 var it = route_bits.iterator(.{ .kind = .set }); while (it.next()) |bundled_route_index| { const bundle = &dev.route_bundles.items[bundled_route_index]; if (bundle.active_viewers == 0) continue; try w.writeInt(i32, @intCast(bundled_route_index), .little); } } try w.writeInt(i32, -1, .little); // When client component roots get updated, the `client_components_affected` // list contains the server side versions of these roots. These roots are // traced to the routes so that the client-side bundles can be properly // invalidated. if (dev.incremental_result.client_components_affected.items.len > 0) { has_route_bits_set = true; dev.incremental_result.routes_affected.clearRetainingCapacity(); gts.clear(); for (dev.incremental_result.client_components_affected.items) |index| { try dev.server_graph.traceDependencies(index, >s, .no_stop); } // A bit-set is used to avoid duplicate entries. This is not a problem // with `dev.incremental_result.routes_affected` for (dev.incremental_result.routes_affected.items) |request| { const route = dev.router.routePtr(request.route_index); if (route.bundle.unwrap()) |id| { route_bits.set(id.get()); route_bits_client.set(id.get()); } if (request.should_recurse_when_visiting) { markAllRouteChildren(&dev.router, 2, .{ &route_bits, &route_bits_client }, request.route_index); } } // Free old bundles var it = route_bits_client.iterator(.{ .kind = .set }); while (it.next()) |bundled_route_index| { const bundle = &dev.route_bundles.items[bundled_route_index]; if (bundle.client_bundle) |old| { dev.allocator.free(old); } bundle.client_bundle = null; } } // `route_bits` will have all of the routes that were modified. If any of // these have active viewers, DevServer should inform them of CSS attachments. These // route bundles also need to be invalidated of their css attachments. if (has_route_bits_set and (will_hear_hot_update or dev.incremental_result.had_adjusted_edges)) { var it = route_bits.iterator(.{ .kind = .set }); // List 2 while (it.next()) |i| { const bundle = dev.routeBundlePtr(RouteBundle.Index.init(@intCast(i))); if (dev.incremental_result.had_adjusted_edges) { bundle.cached_css_file_array.clear(); } if (bundle.active_viewers == 0 or !will_hear_hot_update) continue; try w.writeInt(i32, @intCast(i), .little); try w.writeInt(u32, @intCast(bundle.full_pattern.len), .little); try w.writeAll(bundle.full_pattern); // If no edges were changed, then it is impossible to // change the list of CSS files. if (dev.incremental_result.had_adjusted_edges) { gts.clear(); try dev.traceAllRouteImports(bundle, >s, .{ .find_css = true }); const names = dev.client_graph.current_css_files.items; try w.writeInt(i32, @intCast(names.len), .little); for (names) |name| { const css_prefix_slash = css_prefix ++ "/"; // These slices are url pathnames. The ID can be extracted bun.assert(name.len == (css_prefix_slash ++ ".css").len + 16); bun.assert(bun.strings.hasPrefix(name, css_prefix_slash)); try w.writeAll(name[css_prefix_slash.len..][0..16]); } } else { try w.writeInt(i32, -1, .little); } } } try w.writeInt(i32, -1, .little); // Send CSS mutations const css_chunks = result.cssChunks(); if (will_hear_hot_update) { if (dev.client_graph.current_chunk_len > 0 or css_chunks.len > 0) { const css_values = dev.css_files.values(); try w.writeInt(u32, @intCast(css_chunks.len), .little); const sources = bv2.graph.input_files.items(.source); for (css_chunks) |chunk| { const abs_path = sources[chunk.entry_point.source_index].path.text; try w.writeAll(&std.fmt.bytesToHex(std.mem.asBytes(&bun.hash(abs_path)), .lower)); const css_data = css_values[chunk.entry_point.entry_point_id]; try w.writeInt(u32, @intCast(css_data.len), .little); try w.writeAll(css_data); } if (dev.client_graph.current_chunk_len > 0) try dev.client_graph.takeBundleToList(.hmr_chunk, &hot_update_payload, ""); } else { try w.writeInt(i32, 0, .little); } dev.publish(.hot_update, hot_update_payload.items, .binary); } if (dev.incremental_result.failures_added.items.len > 0) { dev.bundles_since_last_error = 0; for (dev.current_bundle_requests.items) |*req| { const rb = dev.routeBundlePtr(req.route_bundle_index); rb.server_state = .possible_bundling_failures; const resp: *Response = switch (req.data) { .server_handler => |*saved| brk: { const resp = saved.response.TCP; saved.deinit(); break :brk resp; }, .js_payload => |resp| resp, }; resp.corked(sendSerializedFailures, .{ dev, resp, dev.bundling_failures.keys(), .bundler, }); } return; } // TODO: improve this visual feedback if (dev.bundling_failures.count() == 0) { if (current_bundle.had_reload_event) { const clear_terminal = !debug.isVisible(); if (clear_terminal) { Output.disableBuffering(); Output.resetTerminalAll(); Output.enableBuffering(); } dev.bundles_since_last_error += 1; if (dev.bundles_since_last_error > 1) { Output.prettyError("[x{d}] ", .{dev.bundles_since_last_error}); } } else { dev.bundles_since_last_error = 0; } Output.prettyError("{s} in {d}ms", .{ if (current_bundle.had_reload_event) "Reloaded" else "Bundled route", @divFloor(current_bundle.timer.read(), std.time.ns_per_ms), }); // Compute a file name to display const file_name: ?[]const u8, const total_count: usize = if (current_bundle.had_reload_event) .{ null, 0 } else first_route_file_name: { const opaque_id = dev.router.routePtr( dev.routeBundlePtr(dev.current_bundle_requests.items[0].route_bundle_index) .route, ).file_page.unwrap() orelse break :first_route_file_name .{ null, 0 }; const server_index = fromOpaqueFileId(.server, opaque_id); break :first_route_file_name .{ dev.relativePath(dev.server_graph.bundled_files.keys()[server_index.get()]), 0, }; }; if (file_name) |name| { Output.prettyError(": {s}", .{name}); if (total_count > 1) { Output.prettyError(" + {d} more", .{total_count - 1}); } } Output.prettyError("\n", .{}); Output.flush(); } // Release the lock because the underlying handler may acquire one. dev.graph_safety_lock.unlock(); defer dev.graph_safety_lock.lock(); for (dev.current_bundle_requests.items) |req| { const rb = dev.routeBundlePtr(req.route_bundle_index); rb.server_state = .loaded; switch (req.data) { .server_handler => |saved| dev.onRequestWithBundle(req.route_bundle_index, .{ .saved = saved }, saved.response.TCP), .js_payload => |resp| dev.onJsRequestWithBundle(req.route_bundle_index, resp), } } } fn startNextBundleIfPresent(dev: *DevServer) void { // Clear the current bundle dev.current_bundle = null; dev.log.clearAndFree(); dev.current_bundle_requests.clearRetainingCapacity(); dev.emitVisualizerMessageIfNeeded() catch {}; // If there were pending requests, begin another bundle. if (dev.next_bundle.reload_event != null or dev.next_bundle.requests.items.len > 0) { var sfb = std.heap.stackFallback(4096, bun.default_allocator); const temp_alloc = sfb.get(); var entry_points: EntryPointList = EntryPointList.empty; defer entry_points.deinit(temp_alloc); if (dev.next_bundle.reload_event) |event| { event.processFileList(dev, &entry_points, temp_alloc); if (dev.watcher_atomics.recycleEventFromDevServer(event)) |second| { second.processFileList(dev, &entry_points, temp_alloc); dev.watcher_atomics.recycleSecondEventFromDevServer(second); } } for (dev.next_bundle.route_queue.keys()) |route_bundle_index| { const rb = dev.routeBundlePtr(route_bundle_index); rb.server_state = .bundling; dev.appendRouteEntryPointsIfNotStale(&entry_points, temp_alloc, rb.route) catch bun.outOfMemory(); } dev.startAsyncBundle( entry_points, dev.next_bundle.reload_event != null, std.time.Timer.start() catch @panic("timers unsupported"), ) catch bun.outOfMemory(); dev.next_bundle.route_queue.clearRetainingCapacity(); dev.next_bundle.reload_event = null; } } fn insertOrUpdateCssAsset(dev: *DevServer, abs_path: []const u8, code: []const u8) !u31 { const path_hash = bun.hash(abs_path); const gop = try dev.css_files.getOrPut(dev.allocator, path_hash); if (gop.found_existing) { dev.allocator.free(gop.value_ptr.*); } gop.value_ptr.* = code; return @intCast(gop.index); } pub fn handleParseTaskFailure( dev: *DevServer, err: anyerror, graph: bake.Graph, abs_path: []const u8, log: *Log, ) bun.OOM!void { dev.graph_safety_lock.lock(); defer dev.graph_safety_lock.unlock(); if (err == error.FileNotFound) { // Special-case files being deleted. Note that if a // file never existed, resolution would fail first. // // TODO: this should walk up the graph one level, and queue all of these // files for re-bundling if they aren't already in the BundleV2 graph. switch (graph) { .server, .ssr => try dev.server_graph.onFileDeleted(abs_path, log), .client => try dev.client_graph.onFileDeleted(abs_path, log), } } else { Output.prettyErrorln("Error{s} while bundling \"{s}\":", .{ if (log.errors +| log.warnings != 1) "s" else "", dev.relativePath(abs_path), }); log.print(Output.errorWriterBuffered()) catch {}; Output.flush(); // Do not index css errors if (!bun.strings.hasSuffixComptime(abs_path, ".css")) { switch (graph) { .server => try dev.server_graph.insertFailure(abs_path, log, false), .ssr => try dev.server_graph.insertFailure(abs_path, log, true), .client => try dev.client_graph.insertFailure(abs_path, log, false), } } } } const CacheEntry = struct { kind: FileKind, }; pub fn isFileCached(dev: *DevServer, path: []const u8, side: bake.Graph) ?CacheEntry { dev.graph_safety_lock.lock(); defer dev.graph_safety_lock.unlock(); switch (side) { inline else => |side_comptime| { const g = switch (side_comptime) { .client => &dev.client_graph, .server => &dev.server_graph, .ssr => &dev.server_graph, }; const index = g.bundled_files.getIndex(path) orelse return null; // non-existent files are considered stale if (!g.stale_files.isSet(index)) { return .{ .kind = g.bundled_files.values()[index].fileKind() }; } return null; }, } } fn appendOpaqueEntryPoint( dev: *DevServer, file_names: [][]const u8, entry_points: *EntryPointList, alloc: Allocator, comptime side: bake.Side, optional_id: anytype, ) !void { const file = switch (@TypeOf(optional_id)) { OpaqueFileId.Optional => optional_id.unwrap() orelse return, OpaqueFileId => optional_id, else => @compileError("invalid type here"), }; const file_index = fromOpaqueFileId(side, file); if (switch (side) { .server => dev.server_graph.stale_files.isSet(file_index.get()), .client => dev.client_graph.stale_files.isSet(file_index.get()), }) { try entry_points.appendJs(alloc, file_names[file_index.get()], side.graph()); } } pub fn routeBundlePtr(dev: *DevServer, idx: RouteBundle.Index) *RouteBundle { return &dev.route_bundles.items[idx.get()]; } fn onRequest(dev: *DevServer, req: *uws.Request, resp: *Response) void { var params: FrameworkRouter.MatchedParams = undefined; if (dev.router.matchSlow(req.url(), ¶ms)) |route_index| { dev.ensureRouteIsBundled(route_index, .server_handler, req, resp) catch bun.outOfMemory(); return; } dev.server.?.onRequest(req, resp); sendBuiltInNotFound(resp); } fn getOrPutRouteBundle(dev: *DevServer, route: Route.Index) !RouteBundle.Index { if (dev.router.routePtr(route).bundle.unwrap()) |bundle_index| return bundle_index; const full_pattern = full_pattern: { var buf = bake.PatternBuffer.empty; var current: *Route = dev.router.routePtr(route); // This loop is done to avoid prepending `/` at the root // if there is more than one component. buf.prependPart(current.part); if (current.parent.unwrap()) |first| { current = dev.router.routePtr(first); while (current.parent.unwrap()) |next| { buf.prependPart(current.part); current = dev.router.routePtr(next); } } break :full_pattern try dev.allocator.dupe(u8, buf.slice()); }; errdefer dev.allocator.free(full_pattern); try dev.route_bundles.append(dev.allocator, .{ .route = route, .server_state = .unqueued, .full_pattern = full_pattern, .client_bundle = null, .evaluate_failure = null, .cached_module_list = .{}, .cached_client_bundle_url = .{}, .cached_css_file_array = .{}, .active_viewers = 0, }); const bundle_index = RouteBundle.Index.init(@intCast(dev.route_bundles.items.len - 1)); dev.router.routePtr(route).bundle = bundle_index.toOptional(); return bundle_index; } fn sendTextFile(code: []const u8, content_type: []const u8, resp: *Response) void { if (code.len == 0) { resp.writeStatus("202 No Content"); resp.writeHeaderInt("Content-Length", 0); resp.end("", true); return; } resp.writeStatus("200 OK"); resp.writeHeader("Content-Type", content_type); resp.end(code, true); // TODO: You should never call res.end(huge buffer) } const ErrorPageKind = enum { /// Modules failed to bundle bundler, /// Modules failed to evaluate evaluation, /// Request handler threw runtime, }; fn sendSerializedFailures( dev: *DevServer, resp: *Response, failures: []const SerializedFailure, kind: ErrorPageKind, ) void { resp.writeStatus("500 Internal Server Error"); resp.writeHeader("Content-Type", MimeType.html.value); // TODO: what to do about return values here? _ = resp.write(switch (kind) { inline else => |k| std.fmt.comptimePrint( \\ \\ \\ \\ \\ \\Bun - {[page_title]s} \\ \\ \\ \\ \\"; if (Environment.codegen_embed) { _ = resp.end(pre ++ @embedFile("bake-codegen/bake.error.js") ++ post, false); } else { _ = resp.write(pre); _ = resp.write(bun.runtimeEmbedFile(.codegen_eager, "bake.error.js")); _ = resp.end(post, false); } } fn sendBuiltInNotFound(resp: *Response) void { const message = "404 Not Found"; resp.writeStatus("404 Not Found"); resp.end(message, true); } const FileKind = enum(u2) { /// Files that failed to bundle or do not exist on disk will appear in the /// graph as "unknown". unknown, js, css, asset, }; /// The paradigm of Bake's incremental state is to store a separate list of files /// than the Graph in bundle_v2. When watch events happen, the bundler is run on /// the changed files, excluding non-stale files via `isFileStale`. /// /// Upon bundle completion, both `client_graph` and `server_graph` have their /// `receiveChunk` methods called with all new chunks, counting the total length /// needed. A call to `takeBundle` joins all of the chunks, resulting in the /// code to send to client or evaluate on the server. /// /// Then, `processChunkDependencies` is called on each chunk to update the /// list of imports. When a change in imports is detected, the dependencies /// are updated accordingly. /// /// Since all routes share the two graphs, bundling a new route that shared /// a module from a previously bundled route will perform the same exclusion /// behavior that rebuilds use. This also ensures that two routes on the server /// do not emit duplicate dependencies. By tracing `imports` on each file in /// the module graph recursively, the full bundle for any given route can /// be re-materialized (required when pressing Cmd+R after any client update) pub fn IncrementalGraph(side: bake.Side) type { return struct { // Unless otherwise mentioned, all data structures use DevServer's allocator. /// Key contents are owned by `default_allocator` bundled_files: bun.StringArrayHashMapUnmanaged(File), /// Track bools for files which are "stale", meaning they should be /// re-bundled before being used. Resizing this is usually deferred /// until after a bundle, since resizing the bit-set requires an /// exact size, instead of the log approach that dynamic arrays use. stale_files: DynamicBitSetUnmanaged, /// Start of the 'dependencies' linked list. These are the other files /// that import used by this file. Walk this list to discover what /// files are to be reloaded when something changes. first_dep: ArrayListUnmanaged(EdgeIndex.Optional), /// Start of the 'imports' linked list. These are the files that this /// file imports. first_import: ArrayListUnmanaged(EdgeIndex.Optional), /// `File` objects act as nodes in a directional many-to-many graph, /// where edges represent the imports between modules. An 'dependency' /// is a file that must to be notified when it `imported` changes. This /// is implemented using an array of `Edge` objects that act as linked /// list nodes; each file stores the first imports and dependency. edges: ArrayListUnmanaged(Edge), /// HMR Dependencies are added and removed very frequently, but indexes /// must remain stable. This free list allows re-use of freed indexes, /// so garbage collection can run less often. edges_free_list: ArrayListUnmanaged(EdgeIndex), /// Byte length of every file queued for concatenation current_chunk_len: usize = 0, /// All part contents current_chunk_parts: ArrayListUnmanaged(switch (side) { .client => FileIndex, // These slices do not outlive the bundler, and must // be joined before its arena is deinitialized. .server => []const u8, }), current_css_files: switch (side) { .client => ArrayListUnmanaged([]const u8), .server => void, }, const empty: @This() = .{ .bundled_files = .{}, .stale_files = .{}, .first_dep = .{}, .first_import = .{}, .edges = .{}, .edges_free_list = .{}, .current_chunk_len = 0, .current_chunk_parts = .{}, .current_css_files = switch (side) { .client => .{}, .server => {}, }, }; pub const File = switch (side) { // The server's incremental graph does not store previously bundled // code because there is only one instance of the server. Instead, // it stores which module graphs it is a part of. This makes sure // that recompilation knows what bundler options to use. .server => struct { // TODO: make this packed(u8), i had compiler crashes before /// Is this file built for the Server graph. is_rsc: bool, /// Is this file built for the SSR graph. is_ssr: bool, /// If set, the client graph contains a matching file. /// The server is_client_component_boundary: bool, /// If this file is a route root, the route can be looked up in /// the route list. This also stops dependency propagation. is_route: bool, /// If the file has an error, the failure can be looked up /// in the `.failures` map. failed: bool, /// CSS and Asset files get special handling kind: FileKind, fn stopsDependencyTrace(file: @This()) bool { return file.is_client_component_boundary; } fn fileKind(file: @This()) FileKind { return file.kind; } }, .client => struct { /// Allocated by default_allocator. Access with `.code()` code_ptr: [*]const u8, /// Separated from the pointer to reduce struct size. /// Parser does not support files >4gb anyways. code_len: u32, flags: Flags, const Flags = struct { /// If the file has an error, the failure can be looked up /// in the `.failures` map. failed: bool, /// For JS files, this is a component root; the server contains a matching file. /// For CSS files, this is also marked on the stylesheet that is imported from JS. is_hmr_root: bool, /// This is a file is an entry point to the framework. /// Changing this will always cause a full page reload. is_special_framework_file: bool, /// CSS and Asset files get special handling kind: FileKind, }; comptime { assert(@sizeOf(@This()) == @sizeOf(usize) * 2); assert(@alignOf(@This()) == @alignOf([*]u8)); } fn init(code_slice: []const u8, flags: Flags) @This() { return .{ .code_ptr = code_slice.ptr, .code_len = @intCast(code_slice.len), .flags = flags, }; } fn code(file: @This()) []const u8 { return file.code_ptr[0..file.code_len]; } inline fn stopsDependencyTrace(_: @This()) bool { return false; } fn fileKind(file: @This()) FileKind { return file.flags.kind; } }, }; // If this data structure is not clear, see `DirectoryWatchStore.Dep` // for a simpler example. It is more complicated here because this // structure is two-way. pub const Edge = struct { /// The file with the `import` statement dependency: FileIndex, /// The file that `dependency` is importing imported: FileIndex, next_import: EdgeIndex.Optional, next_dependency: EdgeIndex.Optional, prev_dependency: EdgeIndex.Optional, }; /// An index into `bundled_files`, `stale_files`, `first_dep`, `first_import` /// Top bits cannot be relied on due to `SerializedFailure.Owner.Packed` pub const FileIndex = bun.GenericIndex(u30, File); pub const react_refresh_index = if (side == .client) FileIndex.init(0); /// An index into `edges` const EdgeIndex = bun.GenericIndex(u32, Edge); fn getFileIndex(g: *@This(), path: []const u8) ?FileIndex { return if (g.bundled_files.getIndex(path)) |i| FileIndex.init(@intCast(i)) else null; } /// Tracks a bundled code chunk for cross-bundle chunks, /// ensuring it has an entry in `bundled_files`. /// /// For client, takes ownership of the code slice (must be default allocated) /// /// For server, the code is temporarily kept in the /// `current_chunk_parts` array, where it must live until /// takeBundle is called. Then it can be freed. pub fn receiveChunk( g: *@This(), ctx: *HotUpdateContext, index: bun.JSAst.Index, code: []const u8, kind: FileKind, is_ssr_graph: bool, ) !void { const dev = g.owner(); dev.graph_safety_lock.assertLocked(); const abs_path = ctx.sources[index.get()].path.text; if (Environment.allow_assert) { switch (kind) { .css => bun.assert(code.len == 0), .js => if (bun.strings.isAllWhitespace(code)) { // Should at least contain the function wrapper bun.Output.panic("Empty chunk is impossible: {s} {s}", .{ abs_path, switch (side) { .client => "client", .server => if (is_ssr_graph) "ssr" else "server", }, }); }, else => Output.panic("unexpected file kind: .{s}", .{@tagName(kind)}), } } g.current_chunk_len += code.len; // Dump to filesystem if enabled if (bun.FeatureFlags.bake_debugging_features) if (dev.dump_dir) |dump_dir| { const cwd = dev.root; var a: bun.PathBuffer = undefined; var b: [bun.MAX_PATH_BYTES * 2]u8 = undefined; const rel_path = bun.path.relativeBufZ(&a, cwd, abs_path); const size = std.mem.replacementSize(u8, rel_path, "../", "_.._/"); _ = std.mem.replace(u8, rel_path, "../", "_.._/", &b); const rel_path_escaped = b[0..size]; dumpBundle(dump_dir, switch (side) { .client => .client, .server => if (is_ssr_graph) .ssr else .server, }, rel_path_escaped, code, true) catch |err| { bun.handleErrorReturnTrace(err, @errorReturnTrace()); Output.warn("Could not dump bundle: {}", .{err}); }; }; const gop = try g.bundled_files.getOrPut(dev.allocator, abs_path); const file_index = FileIndex.init(@intCast(gop.index)); if (!gop.found_existing) { gop.key_ptr.* = try bun.default_allocator.dupe(u8, abs_path); try g.first_dep.append(dev.allocator, .none); try g.first_import.append(dev.allocator, .none); } if (g.stale_files.bit_length > gop.index) { g.stale_files.unset(gop.index); } ctx.getCachedIndex(side, index).* = FileIndex.init(@intCast(gop.index)); switch (side) { .client => { if (gop.found_existing) { if (kind == .js) bun.default_allocator.free(gop.value_ptr.code()); if (gop.value_ptr.flags.failed) { const kv = dev.bundling_failures.fetchSwapRemoveAdapted( SerializedFailure.Owner{ .client = file_index }, SerializedFailure.ArrayHashAdapter{}, ) orelse Output.panic("Missing SerializedFailure in IncrementalGraph", .{}); try dev.incremental_result.failures_removed.append( dev.allocator, kv.key, ); } } const flags: File.Flags = .{ .failed = false, .is_hmr_root = ctx.server_to_client_bitset.isSet(index.get()), .is_special_framework_file = false, .kind = kind, }; if (kind == .css) { if (!gop.found_existing or gop.value_ptr.code_len == 0) { gop.value_ptr.* = File.init(try std.fmt.allocPrint( dev.allocator, css_prefix ++ "/{}.css", .{std.fmt.fmtSliceHexLower(std.mem.asBytes(&bun.hash(abs_path)))}, ), flags); } else { // The key is just the file-path gop.value_ptr.flags = flags; } } else { gop.value_ptr.* = File.init(code, flags); } try g.current_chunk_parts.append(dev.allocator, file_index); }, .server => { if (!gop.found_existing) { const client_component_boundary = ctx.server_to_client_bitset.isSet(index.get()); gop.value_ptr.* = .{ .is_rsc = !is_ssr_graph, .is_ssr = is_ssr_graph, .is_route = false, .is_client_component_boundary = client_component_boundary, .failed = false, .kind = kind, }; if (client_component_boundary) { try dev.incremental_result.client_components_added.append(dev.allocator, file_index); } } else { gop.value_ptr.kind = kind; if (is_ssr_graph) { gop.value_ptr.is_ssr = true; } else { gop.value_ptr.is_rsc = true; } if (ctx.server_to_client_bitset.isSet(index.get())) { gop.value_ptr.is_client_component_boundary = true; try dev.incremental_result.client_components_added.append(dev.allocator, file_index); } else if (gop.value_ptr.is_client_component_boundary) { const client_graph = &g.owner().client_graph; const client_index = client_graph.getFileIndex(gop.key_ptr.*) orelse Output.panic("Client graph's SCB was already deleted", .{}); client_graph.disconnectAndDeleteFile(client_index); gop.value_ptr.is_client_component_boundary = false; try dev.incremental_result.client_components_removed.append(dev.allocator, file_index); } if (gop.value_ptr.failed) { gop.value_ptr.failed = false; const kv = dev.bundling_failures.fetchSwapRemoveAdapted( SerializedFailure.Owner{ .server = file_index }, SerializedFailure.ArrayHashAdapter{}, ) orelse Output.panic("Missing failure in IncrementalGraph", .{}); try dev.incremental_result.failures_removed.append( dev.allocator, kv.key, ); } } try g.current_chunk_parts.append(dev.allocator, code); }, } } const TempLookup = extern struct { edge_index: EdgeIndex, seen: bool, const HashTable = AutoArrayHashMapUnmanaged(FileIndex, TempLookup); }; /// Second pass of IncrementalGraph indexing /// - Updates dependency information for each file /// - Resolves what the HMR roots are pub fn processChunkDependencies( g: *@This(), ctx: *HotUpdateContext, bundle_graph_index: bun.JSAst.Index, temp_alloc: Allocator, ) bun.OOM!void { const log = bun.Output.scoped(.processChunkDependencies, false); const file_index: FileIndex = ctx.getCachedIndex(side, bundle_graph_index).*; log("index id={d} {}:", .{ file_index.get(), bun.fmt.quote(g.bundled_files.keys()[file_index.get()]), }); var quick_lookup: TempLookup.HashTable = .{}; defer quick_lookup.deinit(temp_alloc); { var it: ?EdgeIndex = g.first_import.items[file_index.get()].unwrap(); while (it) |edge_index| { const dep = g.edges.items[edge_index.get()]; it = dep.next_import.unwrap(); assert(dep.dependency == file_index); try quick_lookup.putNoClobber(temp_alloc, dep.imported, .{ .seen = false, .edge_index = edge_index, }); } } var new_imports: EdgeIndex.Optional = .none; defer g.first_import.items[file_index.get()] = new_imports; if (side == .server) { if (ctx.server_seen_bit_set.isSet(file_index.get())) return; const file = &g.bundled_files.values()[file_index.get()]; // Process both files in the server-components graph at the same // time. If they were done separately, the second would detach // the edges the first added. if (file.is_rsc and file.is_ssr) { // The non-ssr file is always first. // const ssr_index = ctx.scbs.getSSRIndex(bundle_graph_index.get()) orelse { // @panic("Unexpected missing server-component-boundary entry"); // }; // try g.processChunkImportRecords(ctx, &quick_lookup, &new_imports, file_index, bun.JSAst.Index.init(ssr_index)); } } try g.processChunkImportRecords(ctx, &quick_lookup, &new_imports, file_index, bundle_graph_index); // '.seen = false' means an import was removed and should be freed for (quick_lookup.values()) |val| { if (!val.seen) { g.owner().incremental_result.had_adjusted_edges = true; // Unlink from dependency list. At this point the edge is // already detached from the import list. g.disconnectEdgeFromDependencyList(val.edge_index); // With no references to this edge, it can be freed g.freeEdge(val.edge_index); } } if (side == .server) { // Follow this file to the route to mark it as stale. try g.traceDependencies(file_index, ctx.gts, .stop_at_boundary); } else { // TODO: Follow this file to the HMR root (info to determine is currently not stored) // without this, changing a client-only file will not mark the route's client bundle as stale } } fn disconnectEdgeFromDependencyList(g: *@This(), edge_index: EdgeIndex) void { const edge = &g.edges.items[edge_index.get()]; igLog("detach edge={d} | id={d} {} -> id={d} {}", .{ edge_index.get(), edge.dependency.get(), bun.fmt.quote(g.bundled_files.keys()[edge.dependency.get()]), edge.imported.get(), bun.fmt.quote(g.bundled_files.keys()[edge.imported.get()]), }); if (edge.prev_dependency.unwrap()) |prev| { const prev_dependency = &g.edges.items[prev.get()]; prev_dependency.next_dependency = edge.next_dependency; } else { assert(g.first_dep.items[edge.imported.get()].unwrap() == edge_index); g.first_dep.items[edge.imported.get()] = .none; } if (edge.next_dependency.unwrap()) |next| { const next_dependency = &g.edges.items[next.get()]; next_dependency.prev_dependency = edge.prev_dependency; } } fn processChunkImportRecords( g: *@This(), ctx: *HotUpdateContext, quick_lookup: *TempLookup.HashTable, new_imports: *EdgeIndex.Optional, file_index: FileIndex, index: bun.JSAst.Index, ) !void { const log = bun.Output.scoped(.processChunkDependencies, false); for (ctx.import_records[index.get()].slice()) |import_record| { if (!import_record.source_index.isRuntime()) try_index_record: { const imported_file_index = if (import_record.source_index.isInvalid()) if (std.fs.path.isAbsolute(import_record.path.text)) FileIndex.init(@intCast( g.bundled_files.getIndex(import_record.path.text) orelse break :try_index_record, )) else break :try_index_record else ctx.getCachedIndex(side, import_record.source_index).*; if (quick_lookup.getPtr(imported_file_index)) |lookup| { // If the edge has already been seen, it will be skipped // to ensure duplicate edges never exist. if (lookup.seen) continue; lookup.seen = true; const dep = &g.edges.items[lookup.edge_index.get()]; dep.next_import = new_imports.*; new_imports.* = lookup.edge_index.toOptional(); } else { // A new edge is needed to represent the dependency and import. const first_dep = &g.first_dep.items[imported_file_index.get()]; const edge = try g.newEdge(.{ .next_import = new_imports.*, .next_dependency = first_dep.*, .prev_dependency = .none, .imported = imported_file_index, .dependency = file_index, }); if (first_dep.*.unwrap()) |dep| { g.edges.items[dep.get()].prev_dependency = edge.toOptional(); } new_imports.* = edge.toOptional(); first_dep.* = edge.toOptional(); g.owner().incremental_result.had_adjusted_edges = true; log("attach edge={d} | id={d} {} -> id={d} {}", .{ edge.get(), file_index.get(), bun.fmt.quote(g.bundled_files.keys()[file_index.get()]), imported_file_index.get(), bun.fmt.quote(g.bundled_files.keys()[imported_file_index.get()]), }); } } } } const TraceDependencyKind = enum { stop_at_boundary, no_stop, css_to_route, }; fn traceDependencies(g: *@This(), file_index: FileIndex, gts: *GraphTraceState, trace_kind: TraceDependencyKind) !void { g.owner().graph_safety_lock.assertLocked(); if (Environment.enable_logs) { igLog("traceDependencies(.{s}, {}{s})", .{ @tagName(side), bun.fmt.quote(g.bundled_files.keys()[file_index.get()]), if (gts.bits(side).isSet(file_index.get())) " [already visited]" else "", }); } if (gts.bits(side).isSet(file_index.get())) return; gts.bits(side).set(file_index.get()); const file = g.bundled_files.values()[file_index.get()]; switch (side) { .server => { const dev = g.owner(); if (file.is_route) { const route_index = dev.route_lookup.get(file_index) orelse Output.panic("Route not in lookup index: {d} {}", .{ file_index.get(), bun.fmt.quote(g.bundled_files.keys()[file_index.get()]) }); igLog("\\<- Route", .{}); try dev.incremental_result.routes_affected.append(dev.allocator, route_index); } if (file.is_client_component_boundary) { try dev.incremental_result.client_components_affected.append(dev.allocator, file_index); } }, .client => { if (file.flags.is_hmr_root or (file.flags.kind == .css and trace_kind == .css_to_route)) { const dev = g.owner(); const key = g.bundled_files.keys()[file_index.get()]; const index = dev.server_graph.getFileIndex(key) orelse Output.panic("Server Incremental Graph is missing component for {}", .{bun.fmt.quote(key)}); try dev.server_graph.traceDependencies(index, gts, trace_kind); } }, } // Certain files do not propagate updates to dependencies. // This is how updating a client component doesn't cause // a server-side reload. if (trace_kind == .stop_at_boundary) { if (file.stopsDependencyTrace()) { igLog("\\<- this file stops propagation", .{}); return; } } // Recurse var it: ?EdgeIndex = g.first_dep.items[file_index.get()].unwrap(); while (it) |dep_index| { const edge = g.edges.items[dep_index.get()]; it = edge.next_dependency.unwrap(); try g.traceDependencies(edge.dependency, gts, trace_kind); } } fn traceImports(g: *@This(), file_index: FileIndex, gts: *GraphTraceState, goal: TraceImportGoal) !void { g.owner().graph_safety_lock.assertLocked(); if (Environment.enable_logs) { igLog("traceImports(.{s}, {}{s})", .{ @tagName(side), bun.fmt.quote(g.bundled_files.keys()[file_index.get()]), if (gts.bits(side).isSet(file_index.get())) " [already visited]" else "", }); } if (gts.bits(side).isSet(file_index.get())) return; gts.bits(side).set(file_index.get()); const file = g.bundled_files.values()[file_index.get()]; switch (side) { .server => { if (file.is_client_component_boundary or file.kind == .css) { const dev = g.owner(); const key = g.bundled_files.keys()[file_index.get()]; const index = dev.client_graph.getFileIndex(key) orelse Output.panic("Client Incremental Graph is missing component for {}", .{bun.fmt.quote(key)}); try dev.client_graph.traceImports(index, gts, goal); } }, .client => { assert(!g.stale_files.isSet(file_index.get())); // should not be left stale if (file.flags.kind == .css) { if (goal.find_css) { try g.current_css_files.append(g.owner().allocator, file.code()); } // Do not count css files as a client module // and also do not trace its dependencies. // // The server version of this code does not need to // early return, since server css files never have // imports. return; } if (goal.find_client_modules) { try g.current_chunk_parts.append(g.owner().allocator, file_index); g.current_chunk_len += file.code_len; } }, } // Recurse var it: ?EdgeIndex = g.first_import.items[file_index.get()].unwrap(); while (it) |dep_index| { const edge = g.edges.items[dep_index.get()]; it = edge.next_import.unwrap(); try g.traceImports(edge.imported, gts, goal); } } /// Never takes ownership of `abs_path` /// Marks a chunk but without any content. Used to track dependencies to files that don't exist. pub fn insertStale(g: *@This(), abs_path: []const u8, is_ssr_graph: bool) bun.OOM!FileIndex { return g.insertStaleExtra(abs_path, is_ssr_graph, false); } pub fn insertStaleExtra(g: *@This(), abs_path: []const u8, is_ssr_graph: bool, is_route: bool) bun.OOM!FileIndex { g.owner().graph_safety_lock.assertLocked(); debug.log("Insert stale: {s}", .{abs_path}); const gop = try g.bundled_files.getOrPut(g.owner().allocator, abs_path); const file_index = FileIndex.init(@intCast(gop.index)); if (!gop.found_existing) { gop.key_ptr.* = try bun.default_allocator.dupe(u8, abs_path); try g.first_dep.append(g.owner().allocator, .none); try g.first_import.append(g.owner().allocator, .none); } else { if (side == .server) { if (is_route) gop.value_ptr.*.is_route = is_route; } } if (g.stale_files.bit_length > gop.index) { g.stale_files.set(gop.index); } switch (side) { .client => { gop.value_ptr.* = File.init("", .{ .failed = false, .is_hmr_root = false, .is_special_framework_file = false, .kind = .unknown, }); }, .server => { if (!gop.found_existing) { gop.value_ptr.* = .{ .is_rsc = !is_ssr_graph, .is_ssr = is_ssr_graph, .is_route = is_route, .is_client_component_boundary = false, .failed = false, .kind = .unknown, }; } else if (is_ssr_graph) { gop.value_ptr.is_ssr = true; } else { gop.value_ptr.is_rsc = true; } }, } return file_index; } /// Server CSS files are just used to be targets for graph traversal. /// Its content lives only on the client. pub fn insertCssFileOnServer(g: *@This(), ctx: *HotUpdateContext, index: bun.JSAst.Index, abs_path: []const u8) bun.OOM!void { g.owner().graph_safety_lock.assertLocked(); debug.log("Insert stale: {s}", .{abs_path}); const gop = try g.bundled_files.getOrPut(g.owner().allocator, abs_path); const file_index = FileIndex.init(@intCast(gop.index)); if (!gop.found_existing) { gop.key_ptr.* = try bun.default_allocator.dupe(u8, abs_path); try g.first_dep.append(g.owner().allocator, .none); try g.first_import.append(g.owner().allocator, .none); } switch (side) { .client => @compileError("not implemented: use receiveChunk"), .server => { gop.value_ptr.* = .{ .is_rsc = false, .is_ssr = false, .is_route = false, .is_client_component_boundary = false, .failed = false, .kind = .css, }; }, } ctx.getCachedIndex(.server, index).* = file_index; } pub fn insertFailure( g: *@This(), abs_path: []const u8, log: *const Log, is_ssr_graph: bool, ) bun.OOM!void { g.owner().graph_safety_lock.assertLocked(); debug.log("Insert stale: {s}", .{abs_path}); const gop = try g.bundled_files.getOrPut(g.owner().allocator, abs_path); const file_index = FileIndex.init(@intCast(gop.index)); if (!gop.found_existing) { gop.key_ptr.* = try bun.default_allocator.dupe(u8, abs_path); try g.first_dep.append(g.owner().allocator, .none); try g.first_import.append(g.owner().allocator, .none); } try g.ensureStaleBitCapacity(true); g.stale_files.set(gop.index); switch (side) { .client => { gop.value_ptr.* = File.init("", .{ .failed = true, .is_hmr_root = false, .is_special_framework_file = false, .kind = .unknown, }); }, .server => { if (!gop.found_existing) { gop.value_ptr.* = .{ .is_rsc = !is_ssr_graph, .is_ssr = is_ssr_graph, .is_route = false, .is_client_component_boundary = false, .failed = true, .kind = .unknown, }; } else { if (is_ssr_graph) { gop.value_ptr.is_ssr = true; } else { gop.value_ptr.is_rsc = true; } gop.value_ptr.failed = true; } }, } const dev = g.owner(); const fail_owner: SerializedFailure.Owner = switch (side) { .server => .{ .server = file_index }, .client => .{ .client = file_index }, }; const failure = try SerializedFailure.initFromLog( fail_owner, dev.relativePath(abs_path), log.msgs.items, ); const fail_gop = try dev.bundling_failures.getOrPut(dev.allocator, failure); try dev.incremental_result.failures_added.append(dev.allocator, failure); if (fail_gop.found_existing) { try dev.incremental_result.failures_removed.append(dev.allocator, fail_gop.key_ptr.*); fail_gop.key_ptr.* = failure; } } pub fn onFileDeleted(g: *@This(), abs_path: []const u8, log: *const Log) !void { const index = g.getFileIndex(abs_path) orelse return; if (g.first_dep.items[index.get()] == .none) { g.disconnectAndDeleteFile(index); } else { // Keep the file so others may refer to it, but mark as failed. try g.insertFailure(abs_path, log, false); } } pub fn ensureStaleBitCapacity(g: *@This(), are_new_files_stale: bool) !void { try g.stale_files.resize( g.owner().allocator, std.mem.alignForward( usize, @max(g.bundled_files.count(), g.stale_files.bit_length), // allocate 8 in 8 usize chunks std.mem.byte_size_in_bits * @sizeOf(usize) * 8, ), are_new_files_stale, ); } pub fn invalidate(g: *@This(), paths: []const []const u8, entry_points: *EntryPointList, alloc: Allocator) !void { g.owner().graph_safety_lock.assertLocked(); const values = g.bundled_files.values(); for (paths) |path| { const index = g.bundled_files.getIndex(path) orelse { // Cannot enqueue because it's impossible to know what // targets to bundle for. Instead, a failing bundle must // retrieve the list of files and add them as stale. continue; }; g.stale_files.set(index); const data = &values[index]; switch (side) { .client => { // When re-bundling SCBs, only bundle the server. Otherwise // the bundler gets confused and bundles both sides without // knowledge of the boundary between them. if (data.flags.kind == .css) try entry_points.appendCss(alloc, path) else if (!data.flags.is_hmr_root) try entry_points.appendJs(alloc, path, .client); }, .server => { if (data.is_rsc) try entry_points.appendJs(alloc, path, .server); if (data.is_ssr and !data.is_client_component_boundary) try entry_points.appendJs(alloc, path, .ssr); }, } } } fn reset(g: *@This()) void { g.owner().graph_safety_lock.assertLocked(); g.current_chunk_len = 0; g.current_chunk_parts.clearRetainingCapacity(); if (side == .client) g.current_css_files.clearRetainingCapacity(); } pub fn takeBundle( g: *@This(), kind: ChunkKind, initial_response_entry_point: []const u8, ) ![]const u8 { var chunk = std.ArrayList(u8).init(g.owner().allocator); try g.takeBundleToList(kind, &chunk, initial_response_entry_point); bun.assert(chunk.items.len == chunk.capacity); return chunk.items; } pub fn takeBundleToList( g: *@This(), kind: ChunkKind, list: *std.ArrayList(u8), initial_response_entry_point: []const u8, ) !void { g.owner().graph_safety_lock.assertLocked(); // initial bundle needs at least the entry point // hot updates shouldn't be emitted if there are no chunks assert(g.current_chunk_len > 0); const runtime = switch (kind) { .initial_response => bun.bake.getHmrRuntime(side), .hmr_chunk => "({\n", }; // A small amount of metadata is present at the end of the chunk // to inform the HMR runtime some crucial entry-point info. The // exact upper bound of this can be calculated, but is not to // avoid worrying about windows paths. var end_sfa = std.heap.stackFallback(65536, g.owner().allocator); var end_list = std.ArrayList(u8).initCapacity(end_sfa.get(), 65536) catch unreachable; defer end_list.deinit(); const end = end: { const w = end_list.writer(); switch (kind) { .initial_response => { const fw = g.owner().framework; try w.writeAll("}, {\n main: "); try bun.js_printer.writeJSONString( g.owner().relativePath(initial_response_entry_point), @TypeOf(w), w, .utf8, ); switch (side) { .client => { try w.writeAll(",\n version: \""); try w.writeAll(&g.owner().configuration_hash_key); try w.writeAll("\""); if (fw.react_fast_refresh) |rfr| { try w.writeAll(",\n refresh: "); try bun.js_printer.writeJSONString( g.owner().relativePath(rfr.import_source), @TypeOf(w), w, .utf8, ); } }, .server => { if (fw.server_components) |sc| { if (sc.separate_ssr_graph) { try w.writeAll(",\n separateSSRGraph: true"); } } }, } try w.writeAll("\n})"); }, .hmr_chunk => { try w.writeAll("\n})"); }, } break :end end_list.items; }; const files = g.bundled_files.values(); const start = list.items.len; if (start == 0) try list.ensureTotalCapacityPrecise(g.current_chunk_len + runtime.len + end.len) else try list.ensureUnusedCapacity(g.current_chunk_len + runtime.len + end.len); list.appendSliceAssumeCapacity(runtime); for (g.current_chunk_parts.items) |entry| { list.appendSliceAssumeCapacity(switch (side) { // entry is an index into files .client => files[entry.get()].code(), // entry is the '[]const u8' itself .server => entry, }); } list.appendSliceAssumeCapacity(end); if (bun.FeatureFlags.bake_debugging_features) if (g.owner().dump_dir) |dump_dir| { const rel_path_escaped = "latest_chunk.js"; dumpBundle(dump_dir, switch (side) { .client => .client, .server => .server, }, rel_path_escaped, list.items[start..], false) catch |err| { bun.handleErrorReturnTrace(err, @errorReturnTrace()); Output.warn("Could not dump bundle: {}", .{err}); }; }; } fn disconnectAndDeleteFile(g: *@This(), file_index: FileIndex) void { bun.assert(g.bundled_files.count() > 1); // never remove all files bun.assert(g.first_dep.items[file_index.get()] == .none); // must have no dependencies // Disconnect all imports { var it: ?EdgeIndex = g.first_import.items[file_index.get()].unwrap(); while (it) |edge_index| { const dep = g.edges.items[edge_index.get()]; it = dep.next_import.unwrap(); assert(dep.dependency == file_index); g.disconnectEdgeFromDependencyList(edge_index); g.freeEdge(edge_index); // TODO: a flag to this function which is queues all // direct importers to rebuild themselves, which will // display the bundling errors. } } const keys = g.bundled_files.keys(); g.owner().allocator.free(keys[file_index.get()]); keys[file_index.get()] = ""; // cannot be `undefined` as it may be read by hashmap logic // TODO: it is infeasible to swapRemove a file since FrameworkRouter // contains file indices to the server graph. Instead, `file_index` // should go in a free-list for use by new files. } fn newEdge(g: *@This(), edge: Edge) !EdgeIndex { if (g.edges_free_list.popOrNull()) |index| { g.edges.items[index.get()] = edge; return index; } const index = EdgeIndex.init(@intCast(g.edges.items.len)); try g.edges.append(g.owner().allocator, edge); return index; } /// Does nothing besides release the `Edge` for reallocation by `newEdge` /// Caller must detach the dependency from the linked list it is in. fn freeEdge(g: *@This(), edge_index: EdgeIndex) void { if (Environment.isDebug) { g.edges.items[edge_index.get()] = undefined; } if (edge_index.get() == (g.edges.items.len - 1)) { g.edges.items.len -= 1; } else { g.edges_free_list.append(g.owner().allocator, edge_index) catch { // Leak an edge object; Ok since it may get cleaned up by // the next incremental graph garbage-collection cycle. }; } } pub fn owner(g: *@This()) *DevServer { return @alignCast(@fieldParentPtr(@tagName(side) ++ "_graph", g)); } }; } const IncrementalResult = struct { /// When tracing a file's dependencies via `traceDependencies`, this is /// populated with the hit `Route.Index`s. To know what `RouteBundle`s /// are affected, the route graph must be traced downwards. /// Tracing is used for multiple purposes. routes_affected: ArrayListUnmanaged(RouteIndexAndRecurseFlag), /// Set to true if any IncrementalGraph edges were added or removed. had_adjusted_edges: bool, // Following three fields are populated during `receiveChunk` /// Components to add to the client manifest client_components_added: ArrayListUnmanaged(IncrementalGraph(.server).FileIndex), /// Components to add to the client manifest client_components_removed: ArrayListUnmanaged(IncrementalGraph(.server).FileIndex), /// This list acts as a free list. The contents of these slices must remain /// valid; they have to be so the affected routes can be cleared of the /// failures and potentially be marked valid. At the end of an /// incremental update, the slices are freed. failures_removed: ArrayListUnmanaged(SerializedFailure), /// Client boundaries that have been added or modified. At the end of a hot /// update, these are traced to their route to mark the bundles as stale (to /// be generated on Cmd+R) /// /// Populated during `traceDependencies` client_components_affected: ArrayListUnmanaged(IncrementalGraph(.server).FileIndex), /// The list of failures which will have to be traced to their route. Such /// tracing is deferred until the second pass of finalizeBundle as the /// dependency graph may not fully exist at the time the failure is indexed. /// /// Populated from within the bundler via `handleParseTaskFailure` failures_added: ArrayListUnmanaged(SerializedFailure), /// Removing files clobbers indices, so removing anything is deferred. // TODO: remove delete_client_files_later: ArrayListUnmanaged(IncrementalGraph(.client).FileIndex), const empty: IncrementalResult = .{ .routes_affected = .{}, .had_adjusted_edges = false, .failures_removed = .{}, .failures_added = .{}, .client_components_added = .{}, .client_components_removed = .{}, .client_components_affected = .{}, .delete_client_files_later = .{}, }; fn reset(result: *IncrementalResult) void { result.routes_affected.clearRetainingCapacity(); assert(result.failures_removed.items.len == 0); result.failures_added.clearRetainingCapacity(); result.client_components_added.clearRetainingCapacity(); result.client_components_removed.clearRetainingCapacity(); result.client_components_affected.clearRetainingCapacity(); } }; /// Used during an incremental update to determine what "HMR roots" /// are affected. Set for all `bundled_files` that have been visited /// by the dependency tracing logic. const GraphTraceState = struct { client_bits: DynamicBitSetUnmanaged, server_bits: DynamicBitSetUnmanaged, fn bits(gts: *GraphTraceState, side: bake.Side) *DynamicBitSetUnmanaged { return switch (side) { .client => >s.client_bits, .server => >s.server_bits, }; } fn deinit(gts: *GraphTraceState, alloc: Allocator) void { gts.client_bits.deinit(alloc); gts.server_bits.deinit(alloc); } fn clear(gts: *GraphTraceState) void { gts.server_bits.setAll(false); gts.client_bits.setAll(false); } }; const TraceImportGoal = struct { // gts: *GraphTraceState, find_css: bool = false, find_client_modules: bool = false, }; fn initGraphTraceState(dev: *const DevServer, sfa: Allocator) !GraphTraceState { var server_bits = try DynamicBitSetUnmanaged.initEmpty(sfa, dev.server_graph.bundled_files.count()); errdefer server_bits.deinit(sfa); const client_bits = try DynamicBitSetUnmanaged.initEmpty(sfa, dev.client_graph.bundled_files.count()); return .{ .server_bits = server_bits, .client_bits = client_bits }; } /// When a file fails to import a relative path, directory watchers are added so /// that when a matching file is created, the dependencies can be rebuilt. This /// handles HMR cases where a user writes an import before creating the file, /// or moves files around. /// /// This structure manages those watchers, including releasing them once /// import resolution failures are solved. const DirectoryWatchStore = struct { /// This guards all store state lock: Mutex, /// List of active watchers. Can be re-ordered on removal watches: bun.StringArrayHashMapUnmanaged(Entry), dependencies: ArrayListUnmanaged(Dep), /// Dependencies cannot be re-ordered. This list tracks what indexes are free. dependencies_free_list: ArrayListUnmanaged(Dep.Index), const empty: DirectoryWatchStore = .{ .lock = .{}, .watches = .{}, .dependencies = .{}, .dependencies_free_list = .{}, }; pub fn owner(store: *DirectoryWatchStore) *DevServer { return @alignCast(@fieldParentPtr("directory_watchers", store)); } pub fn trackResolutionFailure( store: *DirectoryWatchStore, import_source: []const u8, specifier: []const u8, renderer: bake.Graph, ) bun.OOM!void { store.lock.lock(); defer store.lock.unlock(); // When it does not resolve to a file path, there is // nothing to track. Bake does not watch node_modules. if (!(bun.strings.startsWith(specifier, "./") or bun.strings.startsWith(specifier, "../"))) return; if (!std.fs.path.isAbsolute(import_source)) return; const joined = bun.path.joinAbs(bun.path.dirname(import_source, .auto), .auto, specifier); const dir = bun.path.dirname(joined, .auto); // `import_source` is not a stable string. let's share memory with the file graph. // this requires that const dev = store.owner(); dev.graph_safety_lock.lock(); defer dev.graph_safety_lock.unlock(); const owned_file_path = switch (renderer) { .client => path: { const index = try dev.client_graph.insertStale(import_source, false); break :path dev.client_graph.bundled_files.keys()[index.get()]; }, .server, .ssr => path: { const index = try dev.client_graph.insertStale(import_source, renderer == .ssr); break :path dev.client_graph.bundled_files.keys()[index.get()]; }, }; store.insert(dir, owned_file_path, specifier) catch |err| switch (err) { error.Ignore => {}, // ignoring watch errors. error.OutOfMemory => |e| return e, }; } /// `dir_name_to_watch` is cloned /// `file_path` must have lifetime that outlives the watch /// `specifier` is cloned fn insert( store: *DirectoryWatchStore, dir_name_to_watch: []const u8, file_path: []const u8, specifier: []const u8, ) !void { // TODO: watch the parent dir too. const dev = store.owner(); debug.log("DirectoryWatchStore.insert({}, {}, {})", .{ bun.fmt.quote(dir_name_to_watch), bun.fmt.quote(file_path), bun.fmt.quote(specifier), }); if (store.dependencies_free_list.items.len == 0) try store.dependencies.ensureUnusedCapacity(dev.allocator, 1); const gop = try store.watches.getOrPut(dev.allocator, dir_name_to_watch); if (gop.found_existing) { const specifier_cloned = try dev.allocator.dupe(u8, specifier); errdefer dev.allocator.free(specifier_cloned); // TODO: check for dependency const dep = store.appendDepAssumeCapacity(.{ .next = gop.value_ptr.first_dep.toOptional(), .source_file_path = file_path, .specifier = specifier_cloned, }); gop.value_ptr.first_dep = dep; return; } errdefer store.watches.swapRemoveAt(gop.index); // Try to use an existing open directory handle const cache_fd = if (dev.server_bundler.resolver.readDirInfo(dir_name_to_watch) catch null) |cache| fd: { const fd = cache.getFileDescriptor(); break :fd if (fd == .zero) null else fd; } else null; const fd, const owned_fd = if (cache_fd) |fd| .{ fd, false } else .{ switch (bun.sys.open( &(std.posix.toPosixPath(dir_name_to_watch) catch |err| switch (err) { error.NameTooLong => return, // wouldn't be able to open, ignore }), bun.O.DIRECTORY, 0, )) { .result => |fd| fd, .err => |err| switch (err.getErrno()) { // If this directory doesn't exist, a watcher should be // placed on the parent directory. Then, if this // directory is later created, the watcher can be // properly initialized. This would happen if you write // an import path like `./dir/whatever/hello.tsx` and // `dir` does not exist, Bun must place a watcher on // `.`, see the creation of `dir`, and repeat until it // can open a watcher on `whatever` to see the creation // of `hello.tsx` .NOENT => { // TODO: implement that. for now it ignores return; }, .NOTDIR => return error.Ignore, // ignore else => { bun.todoPanic(@src(), "log watcher error", .{}); }, }, }, true, }; errdefer _ = if (owned_fd) bun.sys.close(fd); debug.log("-> fd: {} ({s})", .{ fd, if (owned_fd) "from dir cache" else "owned fd", }); const dir_name = try dev.allocator.dupe(u8, dir_name_to_watch); errdefer dev.allocator.free(dir_name); gop.key_ptr.* = dir_name; const specifier_cloned = try dev.allocator.dupe(u8, specifier); errdefer dev.allocator.free(specifier_cloned); const watch_index = switch (dev.bun_watcher.addDirectory(fd, dir_name, bun.JSC.GenericWatcher.getHash(dir_name), false)) { .err => return error.Ignore, .result => |id| id, }; const dep = store.appendDepAssumeCapacity(.{ .next = .none, .source_file_path = file_path, .specifier = specifier_cloned, }); store.watches.putAssumeCapacity(dir_name, .{ .dir = fd, .dir_fd_owned = owned_fd, .first_dep = dep, .watch_index = watch_index, }); } /// Caller must detach the dependency from the linked list it is in. fn freeDependencyIndex(store: *DirectoryWatchStore, alloc: Allocator, index: Dep.Index) !void { alloc.free(store.dependencies.items[index.get()].specifier); if (Environment.isDebug) { store.dependencies.items[index.get()] = undefined; } if (index.get() == (store.dependencies.items.len - 1)) { store.dependencies.items.len -= 1; } else { try store.dependencies_free_list.append(alloc, index); } } /// Expects dependency list to be already freed fn freeEntry(store: *DirectoryWatchStore, entry_index: usize) void { const entry = store.watches.values()[entry_index]; debug.log("DirectoryWatchStore.freeEntry({d}, {})", .{ entry_index, entry.dir, }); store.owner().bun_watcher.removeAtIndex(entry.watch_index, 0, &.{}, .file); defer _ = if (entry.dir_fd_owned) bun.sys.close(entry.dir); store.watches.swapRemoveAt(entry_index); if (store.watches.entries.len == 0) { assert(store.dependencies.items.len == 0); store.dependencies_free_list.clearRetainingCapacity(); } } fn appendDepAssumeCapacity(store: *DirectoryWatchStore, dep: Dep) Dep.Index { if (store.dependencies_free_list.popOrNull()) |index| { store.dependencies.items[index.get()] = dep; return index; } const index = Dep.Index.init(@intCast(store.dependencies.items.len)); store.dependencies.appendAssumeCapacity(dep); return index; } const Entry = struct { /// The directory handle the watch is placed on dir: bun.FileDescriptor, dir_fd_owned: bool, /// Files which request this import index first_dep: Dep.Index, /// To pass to Watcher.remove watch_index: u16, }; const Dep = struct { next: Index.Optional, /// The file used source_file_path: []const u8, /// The specifier that failed. Before running re-build, it is resolved for, as /// creating an unrelated file should not re-emit another error. Default-allocator specifier: []const u8, const Index = bun.GenericIndex(u32, Dep); }; }; const ChunkKind = enum { initial_response, hmr_chunk, }; /// Errors sent to the HMR client in the browser are serialized. The same format /// is used for thrown JavaScript exceptions as well as bundler errors. /// Serialized failures contain a handle on what file or route they came from, /// which allows the bundler to dismiss or update stale failures via index as /// opposed to re-sending a new payload. This also means only changed files are /// rebuilt, instead of all of the failed files. /// /// The HMR client in the browser is expected to sort the final list of errors /// for deterministic output; there is code in DevServer that uses `swapRemove`. pub const SerializedFailure = struct { /// Serialized data is always owned by default_allocator /// The first 32 bits of this slice contain the owner data: []u8, pub fn deinit(f: SerializedFailure) void { bun.default_allocator.free(f.data); } /// The metaphorical owner of an incremental file error. The packed variant /// is given to the HMR runtime as an opaque handle. pub const Owner = union(enum) { none, route: RouteBundle.Index, client: IncrementalGraph(.client).FileIndex, server: IncrementalGraph(.server).FileIndex, pub fn encode(owner: Owner) Packed { return switch (owner) { .none => .{ .kind = .none, .data = 0 }, .client => |data| .{ .kind = .client, .data = data.get() }, .server => |data| .{ .kind = .server, .data = data.get() }, .route => |data| .{ .kind = .route, .data = data.get() }, }; } pub const Packed = packed struct(u32) { kind: enum(u2) { none, route, client, server }, data: u30, pub fn decode(owner: Packed) Owner { return switch (owner.kind) { .none => .none, .client => .{ .client = IncrementalGraph(.client).FileIndex.init(owner.data) }, .server => .{ .server = IncrementalGraph(.server).FileIndex.init(owner.data) }, .route => .{ .route = RouteBundle.Index.init(owner.data) }, }; } }; }; fn getOwner(failure: SerializedFailure) Owner { return std.mem.bytesAsValue(Owner.Packed, failure.data[0..4]).decode(); } /// This assumes the hash map contains only one SerializedFailure per owner. /// This is okay since SerializedFailure can contain more than one error. const ArrayHashContextViaOwner = struct { pub fn hash(_: ArrayHashContextViaOwner, k: SerializedFailure) u32 { return std.hash.uint32(@bitCast(k.getOwner().encode())); } pub fn eql(_: ArrayHashContextViaOwner, a: SerializedFailure, b: SerializedFailure, _: usize) bool { return @as(u32, @bitCast(a.getOwner().encode())) == @as(u32, @bitCast(b.getOwner().encode())); } }; const ArrayHashAdapter = struct { pub fn hash(_: ArrayHashAdapter, own: Owner) u32 { return std.hash.uint32(@bitCast(own.encode())); } pub fn eql(_: ArrayHashAdapter, a: Owner, b: SerializedFailure, _: usize) bool { return @as(u32, @bitCast(a.encode())) == @as(u32, @bitCast(b.getOwner().encode())); } }; const ErrorKind = enum(u8) { // A log message. The `logger.Kind` is encoded here. bundler_log_err = 0, bundler_log_warn = 1, bundler_log_note = 2, bundler_log_debug = 3, bundler_log_verbose = 4, /// new Error(message) js_error, /// new TypeError(message) js_error_type, /// new RangeError(message) js_error_range, /// Other forms of `Error` objects, including when an error has a /// `code`, and other fields. js_error_extra, /// Non-error with a stack trace js_primitive_exception, /// Non-error JS values js_primitive, /// new AggregateError(errors, message) js_aggregate, }; pub fn initFromJs(owner: Owner, value: JSValue) !SerializedFailure { { _ = value; @panic("TODO"); } // Avoid small re-allocations without requesting so much from the heap var sfb = std.heap.stackFallback(65536, bun.default_allocator); var payload = std.ArrayList(u8).initCapacity(sfb.get(), 65536) catch unreachable; // enough space const w = payload.writer(); try w.writeInt(u32, @bitCast(owner.encode()), .little); // try writeJsValue(value); // Avoid-recloning if it is was moved to the hap const data = if (payload.items.ptr == &sfb.buffer) try bun.default_allocator.dupe(u8, payload.items) else payload.items; return .{ .data = data }; } pub fn initFromLog( owner: Owner, owner_display_name: []const u8, messages: []const bun.logger.Msg, ) !SerializedFailure { assert(messages.len > 0); // Avoid small re-allocations without requesting so much from the heap var sfb = std.heap.stackFallback(65536, bun.default_allocator); var payload = std.ArrayList(u8).initCapacity(sfb.get(), 65536) catch unreachable; // enough space const w = payload.writer(); try w.writeInt(u32, @bitCast(owner.encode()), .little); try writeString32(owner_display_name, w); try w.writeInt(u32, @intCast(messages.len), .little); for (messages) |*msg| { try writeLogMsg(msg, w); } // Avoid-recloning if it is was moved to the hap const data = if (payload.items.ptr == &sfb.buffer) try bun.default_allocator.dupe(u8, payload.items) else payload.items; return .{ .data = data }; } // All "write" functions get a corresponding "read" function in ./client/error.ts const Writer = std.ArrayList(u8).Writer; fn writeLogMsg(msg: *const bun.logger.Msg, w: Writer) !void { try w.writeByte(switch (msg.kind) { inline else => |k| @intFromEnum(@field(ErrorKind, "bundler_log_" ++ @tagName(k))), }); try writeLogData(msg.data, w); const notes = msg.notes; try w.writeInt(u32, @intCast(notes.len), .little); for (notes) |note| { try writeLogData(note, w); } } fn writeLogData(data: bun.logger.Data, w: Writer) !void { try writeString32(data.text, w); if (data.location) |loc| { if (loc.line < 0) { try w.writeInt(u32, 0, .little); return; } assert(loc.column >= 0); // zero based and not negative try w.writeInt(i32, @intCast(loc.line), .little); try w.writeInt(u32, @intCast(loc.column), .little); try w.writeInt(u32, @intCast(loc.length), .little); // TODO: syntax highlighted line text + give more context lines try writeString32(loc.line_text orelse "", w); // The file is not specified here. Since the bundler runs every file // in isolation, it would be impossible to reference any other file // in this Log. Thus, it is not serialized. } else { try w.writeInt(u32, 0, .little); } } fn writeString32(data: []const u8, w: Writer) !void { try w.writeInt(u32, @intCast(data.len), .little); try w.writeAll(data); } // fn writeJsValue(value: JSValue, global: *JSC.JSGlobalObject, w: *Writer) !void { // if (value.isAggregateError(global)) { // // // } // if (value.jsType() == .DOMWrapper) { // if (value.as(JSC.BuildMessage)) |build_error| { // _ = build_error; // autofix // // // } else if (value.as(JSC.ResolveMessage)) |resolve_error| { // _ = resolve_error; // autofix // @panic("TODO"); // } // } // _ = w; // autofix // @panic("TODO"); // } }; // For debugging, it is helpful to be able to see bundles. fn dumpBundle(dump_dir: std.fs.Dir, side: bake.Graph, rel_path: []const u8, chunk: []const u8, wrap: bool) !void { const name = bun.path.joinAbsString("/", &.{ @tagName(side), rel_path, }, .auto)[1..]; var inner_dir = try dump_dir.makeOpenPath(bun.Dirname.dirname(u8, name).?, .{}); defer inner_dir.close(); const file = try inner_dir.createFile(bun.path.basename(name), .{}); defer file.close(); var bufw = std.io.bufferedWriter(file.writer()); try bufw.writer().print("// {s} bundled for {s}\n", .{ bun.fmt.quote(rel_path), @tagName(side), }); try bufw.writer().print("// Bundled at {d}, Bun " ++ bun.Global.package_json_version_with_canary ++ "\n", .{ std.time.nanoTimestamp(), }); // Wrap in an object to make it valid syntax. Regardless, these files // are never executable on their own as they contain only a single module. if (wrap) try bufw.writer().writeAll("({\n"); try bufw.writer().writeAll(chunk); if (wrap) try bufw.writer().writeAll("});\n"); try bufw.flush(); } fn emitVisualizerMessageIfNeeded(dev: *DevServer) !void { if (!bun.FeatureFlags.bake_debugging_features) return; if (dev.emit_visualizer_events == 0) return; var sfb = std.heap.stackFallback(65536, bun.default_allocator); var payload = try std.ArrayList(u8).initCapacity(sfb.get(), 65536); defer payload.deinit(); try dev.writeVisualizerMessage(&payload); dev.publish(.visualizer, payload.items, .binary); } fn writeVisualizerMessage(dev: *DevServer, payload: *std.ArrayList(u8)) !void { payload.appendAssumeCapacity(MessageId.visualizer.char()); const w = payload.writer(); inline for ( [2]bake.Side{ .client, .server }, .{ &dev.client_graph, &dev.server_graph }, ) |side, g| { try w.writeInt(u32, @intCast(g.bundled_files.count()), .little); for ( g.bundled_files.keys(), g.bundled_files.values(), 0.., ) |k, v, i| { const normalized_key = dev.relativePath(k); try w.writeInt(u32, @intCast(normalized_key.len), .little); if (k.len == 0) continue; try w.writeAll(normalized_key); try w.writeByte(@intFromBool(g.stale_files.isSet(i) or switch (side) { .server => v.failed, .client => v.flags.failed, })); try w.writeByte(@intFromBool(side == .server and v.is_rsc)); try w.writeByte(@intFromBool(side == .server and v.is_ssr)); try w.writeByte(@intFromBool(side == .server and v.is_route)); try w.writeByte(@intFromBool(side == .client and v.flags.is_special_framework_file)); try w.writeByte(@intFromBool(switch (side) { .server => v.is_client_component_boundary, .client => v.flags.is_hmr_root, })); } } inline for (.{ &dev.client_graph, &dev.server_graph }) |g| { const G = @TypeOf(g.*); try w.writeInt(u32, @intCast(g.edges.items.len - g.edges_free_list.items.len), .little); for (g.edges.items, 0..) |edge, i| { if (std.mem.indexOfScalar(G.EdgeIndex, g.edges_free_list.items, G.EdgeIndex.init(@intCast(i))) != null) continue; try w.writeInt(u32, @intCast(edge.dependency.get()), .little); try w.writeInt(u32, @intCast(edge.imported.get()), .little); } } } pub fn onWebSocketUpgrade( dev: *DevServer, res: *Response, req: *Request, upgrade_ctx: *uws.uws_socket_context_t, id: usize, ) void { assert(id == 0); const dw = bun.create(dev.allocator, HmrSocket, .{ .dev = dev, .is_from_localhost = if (res.getRemoteSocketInfo()) |addr| if (addr.is_ipv6) bun.strings.eqlComptime(addr.ip, "::1") else bun.strings.eqlComptime(addr.ip, "127.0.0.1") else false, .subscriptions = .{}, .active_route = .none, }); res.upgrade( *HmrSocket, dw, req.header("sec-websocket-key") orelse "", req.header("sec-websocket-protocol") orelse "", req.header("sec-websocket-extension") orelse "", upgrade_ctx, ); } /// Every message is to use `.binary`/`ArrayBuffer` transport mode. The first byte /// indicates a Message ID; see comments on each type for how to interpret the rest. /// /// This format is only intended for communication via the browser and DevServer. /// Server-side HMR is implemented using a different interface. This API is not /// versioned alongside Bun; breaking changes may occur at any point. /// /// All integers are sent in little-endian pub const MessageId = enum(u8) { /// Version payload. Sent on connection startup. The client should issue a /// hard-reload when it mismatches with its `config.version`. version = 'V', /// Sent on a successful bundle, containing client code, updates routes, and /// changed CSS files. Emitted on the `.hot_update` topic. /// /// - For each server-side updated route: /// - `i32`: Route Bundle ID /// - `i32`: -1 to indicate end of list /// - For each route stylesheet lists affected: /// - `i32`: Route Bundle ID /// - `u32`: Length of route pattern /// - `[n]u8` UTF-8: Route pattern /// - `u32`: Number of CSS attachments: For Each /// - `[16]u8` ASCII: CSS identifier /// - `i32`: -1 to indicate end of list /// - `u32`: Number of CSS mutations. For Each: /// - `[16]u8` ASCII: CSS identifier /// - `u32`: Length of CSS code /// - `[n]u8` UTF-8: CSS payload /// - `[n]u8` UTF-8: JS Payload. No length, rest of buffer is text. /// Can be empty if no client-side code changed. /// /// The first list contains route changes that require a page reload, but /// frameworks can perform via `onServerSideReload`. Fallback behavior /// is to call `location.reload();` /// /// The second list is sent to inform the current list of CSS files /// reachable by a route, recalculated whenever an import is added or /// removed as that can inadvertently affect the CSS list. /// /// The third list contains CSS mutations, which are when the underlying /// CSS file itself changes. /// /// The JS payload is the remaining data. If defined, it can be passed to /// `eval`, resulting in an object of new module callables. hot_update = 'u', /// Sent when the list of errors changes. /// /// - `u32`: Removed errors. For Each: /// - `u32`: Error owner /// - Remainder are added errors. For Each: /// - `SerializedFailure`: Error Data errors = 'e', /// A message from the browser. This is used to communicate. /// - `u32`: Unique ID for the browser tab. Each tab gets a different ID /// - `[n]u8`: Opaque bytes, untouched from `IncomingMessageId.browser_error` browser_message = 'b', /// Sent to clear the messages from `browser_error` /// - For each removed ID: /// - `u32`: Unique ID for the browser tab. browser_message_clear = 'B', /// Sent when a request handler error is emitted. Each route will own at /// most 1 error, where sending a new request clears the original one. /// /// - `u32`: Removed errors. For Each: /// - `u32`: Error owner /// - `u32`: Length of route pattern /// - `[n]u8`: UTF-8 Route pattern /// - `SerializedFailure`: The one error list for the request request_handler_error = 'h', /// Payload for `incremental_visualizer.html`. This can be accessed via /// `/_bun/incremental_visualizer`. This contains both graphs. /// /// - `u32`: Number of files in `client_graph`. For Each: /// - `u32`: Length of name. If zero then no other fields are provided. /// - `[n]u8`: File path in UTF-8 encoded text /// - `u8`: If file is stale, set 1 /// - `u8`: If file is in server graph, set 1 /// - `u8`: If file is in ssr graph, set 1 /// - `u8`: If file is a server-side route root, set 1 /// - `u8`: If file is a server-side component boundary file, set 1 /// - `u32`: Number of files in the server graph. For Each: /// - Repeat the same parser for the client graph /// - `u32`: Number of client edges. For Each: /// - `u32`: File index of the dependency file /// - `u32`: File index of the imported file /// - `u32`: Number of server edges. For Each: /// - `u32`: File index of the dependency file /// - `u32`: File index of the imported file visualizer = 'v', pub inline fn char(id: MessageId) u8 { return @intFromEnum(id); } }; pub const IncomingMessageId = enum(u8) { /// Subscribe to an event channel. Payload is a sequence of chars available /// in HmrTopic. subscribe = 's', // /// Subscribe to `.route_manifest` events. No payload. // subscribe_route_manifest = 'r', // /// Emit a hot update for a file without actually changing its on-disk // /// content. This can be used by an editor extension to stream contents in // /// IDE to reflect in the browser. This is gated to only work on localhost // /// socket connections. // virtual_file_change = 'w', /// Emitted on client-side navigations. /// Rest of payload is a UTF-8 string. set_url = 'n', /// Emit a message from the browser. Payload is opaque bytes that DevServer /// does not care about. In practice, the payload is a JSON object. browser_message = 'm', /// Invalid data _, }; const HmrTopic = enum(u8) { hot_update = 'h', errors = 'e', browser_error = 'E', visualizer = 'v', // route_manifest = 'r', /// Invalid data _, pub const max_count = @typeInfo(HmrTopic).Enum.fields.len; pub const Bits = @Type(.{ .Struct = .{ .backing_integer = @Type(.{ .Int = .{ .bits = max_count, .signedness = .unsigned, } }), .fields = &brk: { const enum_fields = @typeInfo(HmrTopic).Enum.fields; var fields: [enum_fields.len]std.builtin.Type.StructField = undefined; for (enum_fields, &fields) |e, *s| { s.* = .{ .name = e.name, .type = bool, .default_value = &false, .is_comptime = false, .alignment = 0, }; } break :brk fields; }, .decls = &.{}, .is_tuple = false, .layout = .@"packed", } }); }; const HmrSocket = struct { dev: *DevServer, subscriptions: HmrTopic.Bits, /// Allows actions which inspect or mutate sensitive DevServer state. is_from_localhost: bool, /// By telling DevServer the active route, this enables receiving detailed /// `hot_update` events for when the route is updated. active_route: RouteBundle.Index.Optional, /// Files which the client definitely has and should not be re-sent pub fn onOpen(s: *HmrSocket, ws: AnyWebSocket) void { _ = ws.send(&(.{MessageId.version.char()} ++ s.dev.configuration_hash_key), .binary, false, true); } pub fn onMessage(s: *HmrSocket, ws: AnyWebSocket, msg: []const u8, opcode: uws.Opcode) void { _ = opcode; if (msg.len == 0) { ws.close(); return; } switch (@as(IncomingMessageId, @enumFromInt(msg[0]))) { .subscribe => { var new_bits: HmrTopic.Bits = .{}; const topics = msg[1..]; if (topics.len > HmrTopic.max_count) return; outer: for (topics) |char| { inline for (@typeInfo(HmrTopic).Enum.fields) |field| { if (char == field.value) { @field(new_bits, field.name) = true; continue :outer; } } } inline for (comptime std.enums.values(HmrTopic)) |field| { if (@field(new_bits, @tagName(field)) and !@field(s.subscriptions, @tagName(field))) { _ = ws.subscribe(&.{@intFromEnum(field)}); // on-subscribe hooks switch (field) { .visualizer => { s.dev.emit_visualizer_events += 1; s.dev.emitVisualizerMessageIfNeeded() catch bun.outOfMemory(); }, else => {}, } } else if (@field(new_bits, @tagName(field)) and !@field(s.subscriptions, @tagName(field))) { _ = ws.unsubscribe(&.{@intFromEnum(field)}); // on-unsubscribe hooks switch (field) { .visualizer => { s.dev.emit_visualizer_events -= 1; }, else => {}, } } } }, .set_url => { const pattern = msg[1..]; var params: FrameworkRouter.MatchedParams = undefined; if (s.dev.router.matchSlow(pattern, ¶ms)) |route| { const rbi = s.dev.getOrPutRouteBundle(route) catch bun.outOfMemory(); if (s.active_route.unwrap()) |old| { if (old == rbi) return; s.dev.routeBundlePtr(old).active_viewers -= 1; } s.dev.routeBundlePtr(rbi).active_viewers += 1; } }, else => ws.close(), } } pub fn onClose(s: *HmrSocket, ws: AnyWebSocket, exit_code: i32, message: []const u8) void { _ = ws; _ = exit_code; _ = message; if (s.subscriptions.visualizer) { s.dev.emit_visualizer_events -= 1; } if (s.active_route.unwrap()) |old| { s.dev.routeBundlePtr(old).active_viewers -= 1; } defer s.dev.allocator.destroy(s); } }; const c = struct { // BakeSourceProvider.cpp extern fn BakeGetDefaultExportFromModule(global: *JSC.JSGlobalObject, module: JSValue) JSValue; fn BakeLoadServerHmrPatch(global: *JSC.JSGlobalObject, code: bun.String) !JSValue { const f = @extern( *const fn (*JSC.JSGlobalObject, bun.String) callconv(.C) JSValue.MaybeException, .{ .name = "BakeLoadServerHmrPatch" }, ); return f(global, code).unwrap(); } fn BakeLoadInitialServerCode(global: *JSC.JSGlobalObject, code: bun.String, separate_ssr_graph: bool) bun.JSError!JSValue { const f = @extern(*const fn (*JSC.JSGlobalObject, bun.String, bool) callconv(.C) JSValue.MaybeException, .{ .name = "BakeLoadInitialServerCode", }); return f(global, code, separate_ssr_graph).unwrap(); } }; /// Called on DevServer thread via HotReloadTask pub fn startReloadBundle(dev: *DevServer, event: *HotReloadEvent) bun.OOM!void { defer event.files.clearRetainingCapacity(); var sfb = std.heap.stackFallback(4096, bun.default_allocator); const temp_alloc = sfb.get(); var entry_points: EntryPointList = EntryPointList.empty; defer entry_points.deinit(temp_alloc); event.processFileList(dev, &entry_points, temp_alloc); if (entry_points.set.count() == 0) { Output.debugWarn("nothing to bundle. watcher may potentially be watching too many files.", .{}); return; } dev.startAsyncBundle( entry_points, true, event.timer, ) catch |err| { bun.handleErrorReturnTrace(err, @errorReturnTrace()); return; }; } fn markAllRouteChildren(router: *FrameworkRouter, comptime n: comptime_int, bits: [n]*DynamicBitSetUnmanaged, route_index: Route.Index) void { var next = router.routePtr(route_index).first_child.unwrap(); while (next) |child_index| { const route = router.routePtr(child_index); if (route.bundle.unwrap()) |index| { inline for (bits) |b| b.set(index.get()); } markAllRouteChildren(router, n, bits, child_index); next = route.next_sibling.unwrap(); } } fn markAllRouteChildrenFailed(dev: *DevServer, route_index: Route.Index) void { var next = dev.router.routePtr(route_index).first_child.unwrap(); while (next) |child_index| { const route = dev.router.routePtr(child_index); if (route.bundle.unwrap()) |index| { dev.routeBundlePtr(index).server_state = .possible_bundling_failures; } markAllRouteChildrenFailed(dev, child_index); next = route.next_sibling.unwrap(); } } /// This task informs the DevServer's thread about new files to be bundled. pub const HotReloadEvent = struct { /// Align to cache lines to eliminate contention. const Aligned = struct { aligned: HotReloadEvent align(std.atomic.cache_line) }; owner: *DevServer, /// Initialized in WatcherAtomics.watcherReleaseAndSubmitEvent concurrent_task: JSC.ConcurrentTask, /// The watcher is not able to peek into the incremental graph to know what /// files to invalidate, so the watch events are de-duplicated and passed /// along. files: bun.StringArrayHashMapUnmanaged(Watcher.Event.Op), /// Initialized by the WatcherAtomics.watcherAcquireEvent timer: std.time.Timer, /// This event may be referenced by either DevServer or Watcher thread. /// 1 if referenced, 0 if unreferenced; see WatcherAtomics contention_indicator: std.atomic.Value(u32), pub fn initEmpty(owner: *DevServer) HotReloadEvent { return .{ .owner = owner, .concurrent_task = undefined, .files = .{}, .timer = undefined, .contention_indicator = std.atomic.Value(u32).init(0), }; } pub fn append( event: *HotReloadEvent, allocator: Allocator, file_path: []const u8, op: Watcher.Event.Op, ) void { const gop = event.files.getOrPut(allocator, file_path) catch bun.outOfMemory(); if (gop.found_existing) { gop.value_ptr.* = gop.value_ptr.merge(op); } else { gop.value_ptr.* = op; } } /// Invalidates items in IncrementalGraph, appending all new items to `entry_points` pub fn processFileList( event: *HotReloadEvent, dev: *DevServer, entry_points: *EntryPointList, alloc: Allocator, ) void { const changed_file_paths = event.files.keys(); // TODO: check for .delete and remove items from graph. this has to be done // with care because some editors save by deleting and recreating the file. // delete events are not to be trusted at face value. also, merging of // events can cause .write and .delete to be true at the same time. const changed_file_attributes = event.files.values(); _ = changed_file_attributes; { dev.graph_safety_lock.lock(); defer dev.graph_safety_lock.unlock(); inline for (.{ &dev.server_graph, &dev.client_graph }) |g| { g.invalidate(changed_file_paths, entry_points, alloc) catch bun.outOfMemory(); } } } pub fn run(first: *HotReloadEvent) void { debug.log("HMR Task start", .{}); defer debug.log("HMR Task end", .{}); const dev = first.owner; if (Environment.allow_assert) { assert(first.contention_indicator.load(.seq_cst) == 0); } if (dev.current_bundle != null) { dev.next_bundle.reload_event = first; return; } // defer event.files.clearRetainingCapacity(); var sfb = std.heap.stackFallback(4096, bun.default_allocator); const temp_alloc = sfb.get(); var entry_points: EntryPointList = EntryPointList.empty; defer entry_points.deinit(temp_alloc); first.processFileList(dev, &entry_points, temp_alloc); const timer = first.timer; if (dev.watcher_atomics.recycleEventFromDevServer(first)) |second| { second.processFileList(dev, &entry_points, temp_alloc); dev.watcher_atomics.recycleSecondEventFromDevServer(second); } if (entry_points.set.count() == 0) { Output.debugWarn("nothing to bundle. watcher may potentially be watching too many files.", .{}); return; } dev.startAsyncBundle( entry_points, true, timer, ) catch |err| { bun.handleErrorReturnTrace(err, @errorReturnTrace()); return; }; } }; /// All code working with atomics to communicate watcher is in this struct. It /// attempts to recycle as much memory as possible since files are very /// frequently updated. const WatcherAtomics = struct { const log = Output.scoped(.DevServerWatchAtomics, true); /// Only two hot-reload tasks exist ever, since only one bundle may be active at /// once. Memory is reused by swapping between these two. These items are /// aligned to cache lines to reduce contention, since these structures are /// carefully passed between two threads. events: [2]HotReloadEvent.Aligned align(std.atomic.cache_line), /// 0 - no watch /// 1 - has fired additional watch /// 2+ - new events available, watcher is waiting on bundler to finish watcher_events_emitted: std.atomic.Value(u32), /// Which event is the watcher holding on to. /// This is not atomic because only the watcher thread uses this value. current: u1 align(std.atomic.cache_line), watcher_has_event: std.debug.SafetyLock, dev_server_has_event: std.debug.SafetyLock, pub fn init(dev: *DevServer) WatcherAtomics { return .{ .events = .{ .{ .aligned = HotReloadEvent.initEmpty(dev) }, .{ .aligned = HotReloadEvent.initEmpty(dev) }, }, .current = 0, .watcher_events_emitted = std.atomic.Value(u32).init(0), .watcher_has_event = .{}, .dev_server_has_event = .{}, }; } /// Atomically get a *HotReloadEvent that is not used by the DevServer thread /// Call `watcherRelease` when it is filled with files. fn watcherAcquireEvent(state: *WatcherAtomics) *HotReloadEvent { state.watcher_has_event.lock(); var ev: *HotReloadEvent = &state.events[state.current].aligned; switch (ev.contention_indicator.swap(1, .seq_cst)) { 0 => { // New event, initialize the timer if it is empty. if (ev.files.count() == 0) ev.timer = std.time.Timer.start() catch unreachable; }, 1 => { // @branchHint(.unlikely); // DevServer stole this event. Unlikely but possible when // the user is saving very heavily (10-30 times per second) state.current +%= 1; ev = &state.events[state.current].aligned; if (Environment.allow_assert) { bun.assert(ev.contention_indicator.swap(1, .seq_cst) == 0); } }, else => unreachable, } ev.owner.bun_watcher.thread_lock.assertLocked(); return ev; } /// Release the pointer from `watcherAcquireHotReloadEvent`, submitting /// the event if it contains new files. fn watcherReleaseAndSubmitEvent(state: *WatcherAtomics, ev: *HotReloadEvent) void { state.watcher_has_event.unlock(); ev.owner.bun_watcher.thread_lock.assertLocked(); if (ev.files.count() > 0) { // @branchHint(.likely); // There are files to be processed, increment this count first. const prev_count = state.watcher_events_emitted.fetchAdd(1, .seq_cst); if (prev_count == 0) { // @branchHint(.likely); // Submit a task to the DevServer, notifying it that there is // work to do. The watcher will move to the other event. ev.concurrent_task = .{ .auto_delete = false, .next = null, .task = JSC.Task.init(ev), }; ev.contention_indicator.store(0, .seq_cst); ev.owner.vm.event_loop.enqueueTaskConcurrent(&ev.concurrent_task); state.current +%= 1; } else { // DevServer thread has already notified once. Sending // a second task would give ownership of both events to // them. Instead, DevServer will steal this item since // it can observe `watcher_events_emitted >= 2`. ev.contention_indicator.store(0, .seq_cst); } } else { ev.contention_indicator.store(0, .seq_cst); } if (Environment.allow_assert) { bun.assert(ev.contention_indicator.load(.monotonic) == 0); // always must be reset } } /// Called by DevServer after it receives a task callback. If this returns /// another event, that event must be recycled with `recycleSecondEventFromDevServer` fn recycleEventFromDevServer(state: *WatcherAtomics, first_event: *HotReloadEvent) ?*HotReloadEvent { first_event.files.clearRetainingCapacity(); first_event.timer = undefined; // Reset the watch count to zero, while detecting if // the other watch event was submitted. if (state.watcher_events_emitted.swap(0, .seq_cst) >= 2) { // Cannot use `state.current` because it will contend with the watcher. // Since there are are two events, one pointer comparison suffices const other_event = if (first_event == &state.events[0].aligned) &state.events[1].aligned else &state.events[0].aligned; switch (other_event.contention_indicator.swap(1, .seq_cst)) { 0 => { // DevServer holds the event now. state.dev_server_has_event.lock(); return other_event; }, 1 => { // The watcher is currently using this event. // `watcher_events_emitted` is already zero, so it will // always submit. // Not 100% confident in this logic, but the only way // to hit this is by saving extremely frequently, and // a followup save will just trigger the reload. return null; }, else => unreachable, } } // If a watch callback had already acquired the event, that is fine as // it will now read 0 when deciding if to submit the task. return null; } fn recycleSecondEventFromDevServer(state: *WatcherAtomics, second_event: *HotReloadEvent) void { second_event.files.clearRetainingCapacity(); second_event.timer = undefined; state.dev_server_has_event.unlock(); if (Environment.allow_assert) { const result = second_event.contention_indicator.swap(0, .seq_cst); bun.assert(result == 1); } else { second_event.contention_indicator.store(0, .seq_cst); } } }; /// Called on watcher's thread; Access to dev-server state restricted. pub fn onFileUpdate(dev: *DevServer, events: []Watcher.Event, changed_files: []?[:0]u8, watchlist: Watcher.ItemList) void { _ = changed_files; debug.log("onFileUpdate start", .{}); defer debug.log("onFileUpdate end", .{}); const slice = watchlist.slice(); const file_paths = slice.items(.file_path); const counts = slice.items(.count); const kinds = slice.items(.kind); const ev = dev.watcher_atomics.watcherAcquireEvent(); defer dev.watcher_atomics.watcherReleaseAndSubmitEvent(ev); defer dev.bun_watcher.flushEvictions(); // TODO: alot of code is missing // TODO: story for busting resolution cache smartly? for (events) |event| { const file_path = file_paths[event.index]; const update_count = counts[event.index] + 1; counts[event.index] = update_count; const kind = kinds[event.index]; debug.log("{s} change: {s} {}", .{ @tagName(kind), file_path, event.op }); switch (kind) { .file => { if (event.op.delete or event.op.rename) { dev.bun_watcher.removeAtIndex(event.index, 0, &.{}, .file); } ev.append(dev.allocator, file_path, event.op); }, .directory => { // bust the directory cache since this directory has changed _ = dev.server_bundler.resolver.bustDirCache(bun.strings.withoutTrailingSlashWindowsPath(file_path)); // if a directory watch exists for resolution // failures, check those now. dev.directory_watchers.lock.lock(); defer dev.directory_watchers.lock.unlock(); if (dev.directory_watchers.watches.getIndex(file_path)) |watcher_index| { const entry = &dev.directory_watchers.watches.values()[watcher_index]; var new_chain: DirectoryWatchStore.Dep.Index.Optional = .none; var it: ?DirectoryWatchStore.Dep.Index = entry.first_dep; while (it) |index| { const dep = &dev.directory_watchers.dependencies.items[index.get()]; it = dep.next.unwrap(); if ((dev.server_bundler.resolver.resolve( bun.path.dirname(dep.source_file_path, .auto), dep.specifier, .stmt, ) catch null) != null) { // the resolution result is not preserved as safely // transferring it into BundleV2 is too complicated. the // resolution is cached, anyways. ev.append(dev.allocator, dep.source_file_path, .{ .write = true }); dev.directory_watchers.freeDependencyIndex(dev.allocator, index) catch bun.outOfMemory(); } else { // rebuild a new linked list for unaffected files dep.next = new_chain; new_chain = index.toOptional(); } } if (new_chain.unwrap()) |new_first_dep| { entry.first_dep = new_first_dep; } else { // without any files to depend on this watcher is freed dev.directory_watchers.freeEntry(watcher_index); } } }, } } } pub fn onWatchError(_: *DevServer, err: bun.sys.Error) void { // TODO: how to recover? the watcher can't just ... crash???????? Output.err(@as(bun.C.E, @enumFromInt(err.errno)), "Watcher crashed", .{}); if (bun.Environment.isDebug) { bun.todoPanic(@src(), "Watcher crash", .{}); } } pub fn publish(dev: *DevServer, topic: HmrTopic, message: []const u8, opcode: uws.Opcode) void { if (dev.server) |s| _ = s.publish(&.{@intFromEnum(topic)}, message, opcode, false); } pub fn numSubscribers(dev: *DevServer, topic: HmrTopic) u32 { return if (dev.server) |s| s.numSubscribers(&.{@intFromEnum(topic)}) else 0; } const SafeFileId = packed struct(u32) { side: bake.Side, index: u30, unused: enum(u1) { unused = 0 } = .unused, }; /// Interface function for FrameworkRouter pub fn getFileIdForRouter(dev: *DevServer, abs_path: []const u8, associated_route: Route.Index, file_kind: Route.FileKind) !OpaqueFileId { const index = try dev.server_graph.insertStaleExtra(abs_path, false, true); try dev.route_lookup.put(dev.allocator, index, .{ .route_index = associated_route, .should_recurse_when_visiting = file_kind == .layout, }); return toOpaqueFileId(.server, index); } pub fn onRouterSyntaxError(dev: *DevServer, rel_path: []const u8, log: FrameworkRouter.TinyLog) bun.OOM!void { _ = dev; // TODO: maybe this should track the error, send over HmrSocket? log.print(rel_path); } pub fn onRouterCollisionError(dev: *DevServer, rel_path: []const u8, other_id: OpaqueFileId, ty: Route.FileKind) bun.OOM!void { // TODO: maybe this should track the error, send over HmrSocket? Output.errGeneric("Multiple {s} matching the same route pattern is ambiguous", .{ switch (ty) { .page => "pages", .layout => "layout", }, }); Output.prettyErrorln(" - {s}", .{rel_path}); Output.prettyErrorln(" - {s}", .{ dev.relativePath(dev.server_graph.bundled_files.keys()[fromOpaqueFileId(.server, other_id).get()]), }); Output.flush(); } fn toOpaqueFileId(comptime side: bake.Side, index: IncrementalGraph(side).FileIndex) OpaqueFileId { if (Environment.allow_assert) { return OpaqueFileId.init(@bitCast(SafeFileId{ .side = side, .index = index.get(), })); } return OpaqueFileId.init(index.get()); } fn fromOpaqueFileId(comptime side: bake.Side, id: OpaqueFileId) IncrementalGraph(side).FileIndex { if (Environment.allow_assert) { const safe: SafeFileId = @bitCast(id.get()); assert(side == safe.side); return IncrementalGraph(side).FileIndex.init(safe.index); } return IncrementalGraph(side).FileIndex.init(@intCast(id.get())); } fn relativePath(dev: *const DevServer, path: []const u8) []const u8 { // TODO: windows slash normalization bun.assert(dev.root[dev.root.len - 1] != '/'); if (path.len >= dev.root.len + 1 and path[dev.root.len] == '/' and bun.strings.startsWith(path, dev.root)) { return path[dev.root.len + 1 ..]; } const rel = bun.path.relative(dev.root, path); // `rel` is owned by a mutable threadlocal buffer in the path code. bun.path.platformToPosixInPlace(u8, @constCast(rel)); return rel; } fn dumpStateDueToCrash(dev: *DevServer) !void { comptime assert(bun.FeatureFlags.bake_debugging_features); // being conservative about how much stuff is put on the stack. var filepath_buf: [@min(4096, bun.MAX_PATH_BYTES)]u8 = undefined; const filepath = std.fmt.bufPrintZ(&filepath_buf, "incremental-graph-crash-dump.{d}.html", .{std.time.timestamp()}) catch "incremental-graph-crash-dump.html"; const file = std.fs.cwd().createFileZ(filepath, .{}) catch |err| { bun.handleErrorReturnTrace(err, @errorReturnTrace()); Output.warn("Could not open directory for dumping sources: {}", .{err}); return; }; defer file.close(); const start, const end = comptime brk: { const visualizer = @embedFile("incremental_visualizer.html"); const i = (std.mem.indexOf(u8, visualizer, "