From d2fe1ce1c8a45c16098e2b0df5c20eb2811bc583 Mon Sep 17 00:00:00 2001 From: dave caruso Date: Mon, 14 Oct 2024 16:49:38 -0700 Subject: [PATCH] feat(bake): handle bundle errors, re-assemble full client payloads, initial error modal (#14504) --- .vscode/launch.json | 2 + build.zig | 1 + src/bake/DevServer.zig | 1896 +++++++++++++++++------- src/bake/bake.private.d.ts | 6 +- src/bake/bake.zig | 40 +- src/bake/client/error-serialization.ts | 89 ++ src/bake/client/overlay.ts | 47 +- src/bake/client/reader.ts | 6 +- src/bake/error.template.html | 15 - src/bake/hmr-module.ts | 18 +- src/bake/hmr-protocol.md | 19 +- src/bake/hmr-runtime-client.ts | 13 +- src/bake/hmr-runtime-error.ts | 60 + src/bake/hmr-runtime-server.ts | 36 +- src/bake/incremental_visualizer.html | 613 ++++---- src/bun.zig | 46 +- src/bundler/bundle_v2.zig | 721 +++++---- src/codegen/bake-codegen.ts | 120 +- src/crash_handler.zig | 45 +- src/js/node/async_hooks.ts | 28 +- src/js_lexer.zig | 18 +- src/js_parser.zig | 32 +- src/js_printer.zig | 11 +- src/logger.zig | 16 +- src/mimalloc_arena.zig | 7 + src/options.zig | 2 +- src/toml/toml_lexer.zig | 6 +- 27 files changed, 2596 insertions(+), 1317 deletions(-) create mode 100644 src/bake/client/error-serialization.ts delete mode 100644 src/bake/error.template.html create mode 100644 src/bake/hmr-runtime-error.ts diff --git a/.vscode/launch.json b/.vscode/launch.json index 2728065c07..888eebd876 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -174,6 +174,8 @@ "BUN_GARBAGE_COLLECTOR_LEVEL": "0", "BUN_DEBUG_IncrementalGraph": "1", "BUN_DEBUG_Bake": "1", + "BUN_DEBUG_reload_file_list": "1", + "GOMAXPROCS": "1", }, "console": "internalConsole", }, diff --git a/build.zig b/build.zig index d81052af40..f65a9bd231 100644 --- a/build.zig +++ b/build.zig @@ -478,6 +478,7 @@ fn addInternalPackages(b: *Build, obj: *Compile, opts: *BunBuildOptions) void { .{ .file = "ErrorCode.zig", .import = "ErrorCode" }, .{ .file = "runtime.out.js" }, .{ .file = "bake.client.js", .import = "bake-codegen/bake.client.js", .enable = opts.shouldEmbedCode() }, + .{ .file = "bake.error.js", .import = "bake-codegen/bake.error.js", .enable = opts.shouldEmbedCode() }, .{ .file = "bake.server.js", .import = "bake-codegen/bake.server.js", .enable = opts.shouldEmbedCode() }, .{ .file = "bun-error/index.js", .enable = opts.shouldEmbedCode() }, .{ .file = "bun-error/bun-error.css", .enable = opts.shouldEmbedCode() }, diff --git a/src/bake/DevServer.zig b/src/bake/DevServer.zig index e73eb7bd06..bc1ad31737 100644 --- a/src/bake/DevServer.zig +++ b/src/bake/DevServer.zig @@ -46,8 +46,10 @@ listener: ?*App.ListenSocket, server_global: *DevGlobalObject, vm: *VirtualMachine, /// This is a handle to the server_fetch_function, which is shared -/// across all loaded modules. Its type is `(Request, Id, Meta) => Response` +/// across all loaded modules. +/// (Request, Id, Meta) => Response server_fetch_function_callback: JSC.Strong, +/// (modules: any, clientComponentsAdd: null|string[], clientComponentsRemove: null|string[]) => Promise server_register_update_callback: JSC.Strong, // Watching @@ -64,11 +66,25 @@ watch_current: u1 = 0, // Bundling generation: usize = 0, +bundles_since_last_error: usize = 0, +/// All access into IncrementalGraph is guarded by this. This is only +/// a debug assertion since there is no actual contention. +graph_safety_lock: bun.DebugThreadLock, client_graph: IncrementalGraph(.client), server_graph: IncrementalGraph(.server), +/// All bundling failures are stored until a file is saved and rebuilt. +/// They are stored in the wire format the HMR runtime expects so that +/// serialization only happens once. +bundling_failures: std.ArrayHashMapUnmanaged( + SerializedFailure, + void, + SerializedFailure.ArrayHashContextViaOwner, + false, +) = .{}, +/// Quickly retrieve a route's index from the entry point file. route_lookup: AutoArrayHashMapUnmanaged(IncrementalGraph(.server).FileIndex, Route.Index), +/// State populated during bundling. Often cleared incremental_result: IncrementalResult, -graph_safety_lock: bun.DebugThreadLock, framework: bake.Framework, // Each logical graph gets it's own bundler configuration server_bundler: Bundler, @@ -79,60 +95,63 @@ log: Log, // Debugging dump_dir: ?std.fs.Dir, -emit_visualizer_events: u32 = 0, +emit_visualizer_events: u32, pub const internal_prefix = "/_bun"; pub const client_prefix = internal_prefix ++ "/client"; pub const Route = struct { - pub const Index = bun.GenericIndex(u32, Route); + pub const Index = bun.GenericIndex(u30, Route); // Config pattern: [:0]const u8, entry_point: []const u8, - bundle: BundleState = .stale, - module_name_string: ?bun.String = null, + server_state: State = .unqueued, + /// Cached to avoid looking up by filename in `server_graph` + server_file: IncrementalGraph(.server).FileIndex.Optional = .none, + /// Generated lazily when the client JS is requested (HTTP GET /_bun/client/*.js), + /// which is only needed when a hard-reload is performed. + /// + /// Freed when a client module updates. + client_bundle: ?[]const u8 = null, + /// Contain the list of serialized failures. Hashmap allows for + /// efficient lookup and removal of failing files. + /// When state == .evaluation_failure, this is popualted with that error. + evaluate_failure: ?SerializedFailure = null, + + /// Cached to avoid re-creating the string every request + module_name_string: JSC.Strong = .{}, /// Assigned in DevServer.init dev: *DevServer = undefined, client_bundled_url: []u8 = undefined, + /// A union is not used so that `bundler_failure_logs` can re-use memory, as + /// this state frequently changes between `loaded` and the failure variants. + const State = enum { + /// In development mode, routes are lazily built. This state implies a + /// build of this route has never been run. It is possible to bundle the + /// route entry point and still have an unqueued route if another route + /// imports this one. + unqueued, + /// This route was flagged for bundling failures. There are edge cases + /// where a route can be disconnected from it's failures, so the route + /// imports has to be traced to discover if possible failures still + /// exist. + possible_bundling_failures, + /// Loading the module at runtime had a failure. + evaluation_failure, + /// Calling the request function may error, but that error will not be + /// at fault of bundling. + loaded, + }; + pub fn clientPublicPath(route: *const Route) []const u8 { return route.client_bundled_url[0 .. route.client_bundled_url.len - "/client.js".len]; } }; -/// Three-way maybe state -const BundleState = union(enum) { - /// Bundled assets are not prepared - stale, - /// Build failure - fail: Failure, - - ready: Bundle, - - fn reset(s: *BundleState) void { - switch (s.*) { - .stale => return, - .fail => |f| f.deinit(), - .ready => |b| b.deinit(), - } - s.* = .stale; - } - - const NonStale = union(enum) { - /// Build failure - fail: Failure, - ready: Bundle, - }; -}; - -const Bundle = struct { - /// Backed by default_allocator. - client_bundle: []const u8, -}; - /// DevServer is stored on the heap, storing it's allocator. pub fn init(options: Options) !*DevServer { const allocator = options.allocator orelse bun.default_allocator; @@ -174,6 +193,7 @@ pub fn init(options: Options) !*DevServer { .framework = options.framework, .watch_state = .{ .raw = 0 }, .watch_current = 0, + .emit_visualizer_events = 0, .client_graph = IncrementalGraph(.client).empty, .server_graph = IncrementalGraph(.server).empty, @@ -237,7 +257,7 @@ pub fn init(options: Options) !*DevServer { var has_fallback = false; for (options.routes, 0..) |*route, i| { - app.any(route.pattern, *Route, route, onServerRequestInit); + app.any(route.pattern, *Route, route, onServerRequest); route.dev = dev; route.client_bundled_url = std.fmt.allocPrint( @@ -250,7 +270,7 @@ pub fn init(options: Options) !*DevServer { has_fallback = true; } - app.get(client_prefix ++ "/:route/:asset", *DevServer, dev, onAssetRequestInit); + app.get(client_prefix ++ "/:route/:asset", *DevServer, dev, onAssetRequest); app.ws( internal_prefix ++ "/hmr", @@ -266,6 +286,40 @@ pub fn init(options: Options) !*DevServer { app.listenWithConfig(*DevServer, dev, onListen, options.listen_config); + // Some indices at the start of the graph are reserved for framework files. + { + dev.graph_safety_lock.lock(); + defer dev.graph_safety_lock.unlock(); + + assert(try dev.client_graph.insertStale(dev.framework.entry_client, false) == IncrementalGraph(.client).framework_entry_point_index); + assert(try dev.server_graph.insertStale(dev.framework.entry_server, false) == IncrementalGraph(.server).framework_entry_point_index); + + if (dev.framework.react_fast_refresh) |rfr| { + assert(try dev.client_graph.insertStale(rfr.import_source, false) == IncrementalGraph(.client).react_refresh_index); + } + + try dev.client_graph.ensureStaleBitCapacity(true); + try dev.server_graph.ensureStaleBitCapacity(true); + + const client_files = dev.client_graph.bundled_files.values(); + client_files[IncrementalGraph(.client).framework_entry_point_index.get()].flags.is_special_framework_file = true; + } + + // Pre-bundle the framework code + { + // Since this will enter JavaScript to load code, ensure we have a lock. + const lock = dev.vm.jsc.getAPILock(); + defer lock.release(); + + dev.bundle(&.{ + BakeEntryPoint.init(dev.framework.entry_server, .server), + BakeEntryPoint.init(dev.framework.entry_client, .client), + }) catch |err| { + _ = &err; // autofix + bun.todoPanic(@src(), "handle error", .{}); + }; + } + return dev; } @@ -275,7 +329,7 @@ fn deinit(dev: *DevServer) void { bun.todoPanic(@src(), "bake.DevServer.deinit()"); } -fn initBundler(dev: *DevServer, bundler: *Bundler, comptime renderer: bake.Renderer) !void { +fn initBundler(dev: *DevServer, bundler: *Bundler, comptime renderer: bake.Graph) !void { const framework = dev.framework; bundler.* = try bun.Bundler.init( @@ -317,6 +371,8 @@ fn initBundler(dev: *DevServer, bundler: *Bundler, comptime renderer: bake.Rende bundler.options.minify_identifiers = false; bundler.options.minify_whitespace = false; + bundler.options.experimental_css = true; + bundler.options.dev_server = dev; bundler.options.framework = &dev.framework; @@ -358,7 +414,7 @@ fn onListen(ctx: *DevServer, maybe_listen: ?*App.ListenSocket) void { Output.flush(); } -fn onAssetRequestInit(dev: *DevServer, req: *Request, resp: *Response) void { +fn onAssetRequest(dev: *DevServer, req: *Request, resp: *Response) void { const route = route: { const route_id = req.parameter(0); const i = std.fmt.parseInt(u16, route_id, 10) catch @@ -367,15 +423,47 @@ fn onAssetRequestInit(dev: *DevServer, req: *Request, resp: *Response) void { return req.setYield(true); break :route &dev.routes[i]; }; - // const asset_name = req.parameter(1); - switch (route.dev.getRouteBundle(route)) { - .ready => |bundle| { - sendJavaScriptSource(bundle.client_bundle, resp); - }, - .fail => |fail| { - fail.sendAsHttpResponse(resp, route); - }, - } + + const js_source = route.client_bundle orelse code: { + if (route.server_state == .unqueued) { + dev.bundleRouteFirstTime(route); + } + + switch (route.server_state) { + .unqueued => bun.assertWithLocation(false, @src()), + .possible_bundling_failures => { + if (dev.bundling_failures.count() > 0) { + resp.corked(sendSerializedFailures, .{ + dev, + resp, + dev.bundling_failures.keys(), + .bundler, + }); + return; + } else { + route.server_state = .loaded; + } + }, + .evaluation_failure => { + resp.corked(sendSerializedFailures, .{ + dev, + resp, + &.{route.evaluate_failure orelse @panic("missing error")}, + .evaluation, + }); + return; + }, + .loaded => {}, + } + + // TODO: there can be stale files in this if you request an asset after + // a watch but before the bundle task starts. + + const out = dev.generateClientBundle(route) catch bun.outOfMemory(); + route.client_bundle = out; + break :code out; + }; + sendJavaScriptSource(js_source, resp); } fn onIncrementalVisualizer(_: *DevServer, _: *Request, resp: *Response) void { @@ -391,76 +479,164 @@ fn onIncrementalVisualizerCorked(resp: *Response) void { resp.end(code, false); } -fn onServerRequestInit(route: *Route, req: *Request, resp: *Response) void { - switch (route.dev.getRouteBundle(route)) { - .ready => |ready| { - onServerRequestWithBundle(route, ready, req, resp); - }, - .fail => |fail| { - fail.sendAsHttpResponse(resp, route); - }, - } -} - -fn getRouteBundle(dev: *DevServer, route: *Route) BundleState.NonStale { - if (route.bundle == .stale) { - var fail: Failure = undefined; - route.bundle = bundle: { - const success = dev.performBundleAndWaitInner(route, &fail) catch |err| { - bun.handleErrorReturnTrace(err, @errorReturnTrace()); - fail.printToConsole(route); - break :bundle .{ .fail = fail }; - }; - break :bundle .{ .ready = success }; - }; - } - return switch (route.bundle) { - .stale => unreachable, - .fail => |fail| .{ .fail = fail }, - .ready => |ready| .{ .ready = ready }, +/// `route.server_state` must be `.unenqueued` +fn bundleRouteFirstTime(dev: *DevServer, route: *Route) void { + if (Environment.allow_assert) switch (route.server_state) { + .unqueued => {}, + .possible_bundling_failures => unreachable, // should watch affected files and bundle on save + .evaluation_failure => unreachable, // bundling again wont fix this issue + .loaded => unreachable, // should not be bundling since it already passed }; + + if (dev.bundle(&.{ + BakeEntryPoint.route( + route.entry_point, + Route.Index.init(@intCast(bun.indexOfPointerInSlice(Route, dev.routes, route))), + ), + })) |_| { + route.server_state = .loaded; + } else |err| switch (err) { + error.OutOfMemory => bun.outOfMemory(), + error.BuildFailed => assert(route.server_state == .possible_bundling_failures), + error.ServerLoadFailed => route.server_state = .evaluation_failure, + } } -fn performBundleAndWaitInner(dev: *DevServer, route: *Route, fail: *Failure) !Bundle { - return dev.theRealBundlingFunction( - &.{ - // TODO: only enqueue these two if they don't exist - // tbh it would be easier just to pre-bundle the framework. - BakeEntryPoint.init(dev.framework.entry_server.?, .server), - BakeEntryPoint.init(dev.framework.entry_client.?, .client), - // The route! - BakeEntryPoint.route( - route.entry_point, - Route.Index.init(@intCast(bun.indexOfPointerInSlice(Route, dev.routes, route))), - ), +fn onServerRequest(route: *Route, req: *Request, resp: *Response) void { + const dev = route.dev; + + if (route.server_state == .unqueued) { + dev.bundleRouteFirstTime(route); + } + + switch (route.server_state) { + .unqueued => bun.assertWithLocation(false, @src()), + .possible_bundling_failures => { + // TODO: perform a graph trace to find just the errors that are needed + if (dev.bundling_failures.count() > 0) { + resp.corked(sendSerializedFailures, .{ + dev, + resp, + dev.bundling_failures.keys(), + .bundler, + }); + return; + } else { + route.server_state = .loaded; + } }, - route, - .initial_response, - fail, + .evaluation_failure => { + resp.corked(sendSerializedFailures, .{ + dev, + resp, + (&(route.evaluate_failure orelse @panic("missing error")))[0..1], + .evaluation, + }); + return; + }, + .loaded => {}, + } + + // TODO: this does not move the body, reuse memory, and many other things + // that server.zig does. + const url_bun_string = bun.String.init(req.url()); + defer url_bun_string.deref(); + + const headers = JSC.FetchHeaders.createFromUWS(req); + const request_object = JSC.WebCore.Request.init( + url_bun_string, + headers, + dev.vm.initRequestBodyValue(.Null) catch bun.outOfMemory(), + bun.http.Method.which(req.method()) orelse .GET, + ).new(); + + const js_request = request_object.toJS(dev.server_global.js()); + + const global = dev.server_global.js(); + + const server_request_callback = dev.server_fetch_function_callback.get() orelse + unreachable; // did not bundle + + // TODO: use a custom class for this metadata type + revise the object structure too + const meta = JSValue.createEmptyObject(global, 1); + meta.put( + dev.server_global.js(), + bun.String.static("clientEntryPoint"), + bun.String.init(route.client_bundled_url).toJS(global), ); -} -/// Error handling is done either by writing to `fail` with a specific failure, -/// or by appending to `dev.log`. The caller, `getRouteBundle`, will handle the -/// error, including replying to the request as well as console logging. -fn theRealBundlingFunction( - dev: *DevServer, - files: []const BakeEntryPoint, - dependant_route: ?*Route, - comptime client_chunk_kind: ChunkKind, - fail: *Failure, -) !Bundle { - // Ensure something is written to `fail` if something goes wrong - fail.* = .{ .zig_error = error.FileNotFound }; - errdefer |err| if (fail.* == .zig_error) { - if (dev.log.hasAny()) { - // todo: clone to recycled - fail.* = Failure.fromLog(&dev.log); - } else { - fail.* = .{ .zig_error = err }; - } + var result = server_request_callback.call( + global, + .undefined, + &.{ + js_request, + meta, + route.module_name_string.get() orelse str: { + const js = bun.String.createUTF8( + bun.path.relative(dev.cwd, route.entry_point), + ).toJS(dev.server_global.js()); + route.module_name_string = JSC.Strong.create(js, dev.server_global.js()); + break :str js; + }, + }, + ) catch |err| { + const exception = global.takeException(err); + dev.vm.printErrorLikeObjectToConsole(exception); + // const fail = try SerializedFailure.initFromJs(.none, exception); + // defer fail.deinit(); + // dev.sendSerializedFailures(resp, &.{fail}, .runtime); + dev.sendStubErrorMessage(route, resp, exception); + return; }; + if (result.asAnyPromise()) |promise| { + dev.vm.waitForPromise(promise); + switch (promise.unwrap(dev.vm.jsc, .mark_handled)) { + .pending => unreachable, // was waited for + .fulfilled => |r| result = r, + .rejected => |exception| { + dev.vm.printErrorLikeObjectToConsole(exception); + dev.sendStubErrorMessage(route, resp, exception); + // const fail = try SerializedFailure.initFromJs(.none, e); + // defer fail.deinit(); + // dev.sendSerializedFailures(resp, &.{fail}, .runtime); + return; + }, + } + } + + // TODO: This interface and implementation is very poor. It is fine as + // the runtime currently emulates returning a `new Response` + // + // It probably should use code from `server.zig`, but most importantly it should + // not have a tie to DevServer, but instead be generic with a context structure + // containing just a *uws.App, *JSC.EventLoop, and JSValue response object. + // + // This would allow us to support all of the nice things `new Response` allows + + const bun_string = result.toBunString(dev.server_global.js()); + defer bun_string.deref(); + if (bun_string.tag == .Dead) { + bun.todoPanic(@src(), "Bake: support non-string return value", .{}); + } + + const utf8 = bun_string.toUTF8(dev.allocator); + defer utf8.deinit(); + + resp.writeStatus("200 OK"); + resp.writeHeader("Content-Type", MimeType.html.value); + resp.end(utf8.slice(), true); // TODO: You should never call res.end(huge buffer) +} + +const BundleError = error{ + OutOfMemory, + /// Graph entry points will be annotated with failures to display. + BuildFailed, + + ServerLoadFailed, +}; + +fn bundle(dev: *DevServer, files: []const BakeEntryPoint) BundleError!void { defer dev.emitVisualizerMessageIfNeeded() catch bun.outOfMemory(); assert(files.len > 0); @@ -510,10 +686,8 @@ fn theRealBundlingFunction( bv2.deinit(); } - defer { - dev.server_graph.reset(); - dev.client_graph.reset(); - } + dev.client_graph.reset(); + dev.server_graph.reset(); errdefer |e| brk: { // Wait for wait groups to finish. There still may be ongoing work. @@ -528,7 +702,7 @@ fn theRealBundlingFunction( const abs_path = file.path.text; if (!std.fs.path.isAbsolute(abs_path)) continue; - switch (target.bakeRenderer()) { + switch (target.bakeGraph()) { .server => { _ = dev.server_graph.insertStale(abs_path, false) catch bun.outOfMemory(); }, @@ -545,16 +719,23 @@ fn theRealBundlingFunction( dev.server_graph.ensureStaleBitCapacity(true) catch bun.outOfMemory(); } - const output_files = try bv2.runFromJSInNewThread(&.{}, files); + const chunk = bv2.runFromBakeDevServer(files) catch |err| { + bun.handleErrorReturnTrace(err, @errorReturnTrace()); + + bv2.bundler.log.printForLogLevel(Output.errorWriter()) catch {}; + + Output.warn("BundleV2.runFromBakeDevServer returned error.{s}", .{@errorName(err)}); + + return; + }; + + bv2.bundler.log.printForLogLevel(Output.errorWriter()) catch {}; + + try dev.finalizeBundle(bv2, &chunk); try dev.client_graph.ensureStaleBitCapacity(false); try dev.server_graph.ensureStaleBitCapacity(false); - assert(output_files.items.len == 0); - - bv2.bundler.log.printForLogLevel(Output.errorWriter()) catch {}; - bv2.client_bundler.log.printForLogLevel(Output.errorWriter()) catch {}; - dev.generation +%= 1; if (Environment.enable_logs) { debug.log("Bundle Round {d}: {d} server, {d} client, {d} ms", .{ @@ -567,42 +748,33 @@ fn theRealBundlingFunction( const is_first_server_chunk = !dev.server_fetch_function_callback.has(); - const server_bundle = try dev.server_graph.takeBundle(if (is_first_server_chunk) .initial_response else .hmr_chunk); - defer dev.allocator.free(server_bundle); + if (dev.server_graph.current_chunk_len > 0) { + const server_bundle = try dev.server_graph.takeBundle(if (is_first_server_chunk) .initial_response else .hmr_chunk); + defer dev.allocator.free(server_bundle); - const client_bundle = try dev.client_graph.takeBundle(client_chunk_kind); - - errdefer if (client_chunk_kind != .hmr_chunk) dev.allocator.free(client_bundle); - defer if (client_chunk_kind == .hmr_chunk) dev.allocator.free(client_bundle); - - if (client_bundle.len > 0 and client_chunk_kind == .hmr_chunk) { - assert(client_bundle[0] == '('); - _ = dev.app.publish("*", client_bundle, .binary, true); - } - - if (dev.log.hasAny()) { - dev.log.printForLogLevel(Output.errorWriter()) catch {}; - } - - if (dependant_route) |route| { - if (route.module_name_string == null) { - route.module_name_string = bun.String.createUTF8(bun.path.relative(dev.cwd, route.entry_point)); - } - } - - if (server_bundle.len > 0) { if (is_first_server_chunk) { const server_code = c.BakeLoadInitialServerCode(dev.server_global, bun.String.createLatin1(server_bundle)) catch |err| { - fail.* = Failure.fromJSServerLoad(dev.server_global.js().takeException(err), dev.server_global.js()); - return error.ServerJSLoad; + dev.vm.printErrorLikeObjectToConsole(dev.server_global.js().takeException(err)); + { + // TODO: document the technical reasons this should not be allowed to fail + bun.todoPanic(@src(), "First Server Load Fails. This should become a bundler bug.", .{}); + } + _ = &err; // autofix + // fail.* = Failure.fromJSServerLoad(dev.server_global.js().takeException(err), dev.server_global.js()); + return error.ServerLoadFailed; }; dev.vm.waitForPromise(.{ .internal = server_code.promise }); switch (server_code.promise.unwrap(dev.vm.jsc, .mark_handled)) { .pending => unreachable, // promise is settled .rejected => |err| { - fail.* = Failure.fromJSServerLoad(err, dev.server_global.js()); - return error.ServerJSLoad; + dev.vm.printErrorLikeObjectToConsole(err); + { + bun.todoPanic(@src(), "First Server Load Fails. This should become a bundler bug.", .{}); + } + _ = &err; // autofix + // fail.* = Failure.fromJSServerLoad(err, dev.server_global.js()); + return error.ServerLoadFailed; }, .fulfilled => |v| bun.assert(v == .undefined), } @@ -621,7 +793,7 @@ fn theRealBundlingFunction( fetch_function.ensureStillAlive(); register_update.ensureStillAlive(); } else { - const server_code = c.BakeLoadServerHmrPatch(dev.server_global, bun.String.createLatin1(server_bundle)) catch |err| { + const server_modules = c.BakeLoadServerHmrPatch(dev.server_global, bun.String.createLatin1(server_bundle)) catch |err| { // No user code has been evaluated yet, since everything is to // be wrapped in a function clousure. This means that the likely // error is going to be a syntax error, or other mistake in the @@ -629,21 +801,154 @@ fn theRealBundlingFunction( dev.vm.printErrorLikeObjectToConsole(dev.server_global.js().takeException(err)); @panic("Error thrown while evaluating server code. This is always a bug in the bundler."); }; - _ = dev.server_register_update_callback.get().?.call( + const errors = dev.server_register_update_callback.get().?.call( dev.server_global.js(), dev.server_global.js().toJSValue(), - &.{server_code}, + &.{ + server_modules, + dev.makeArrayForServerComponentsPatch(dev.server_global.js(), dev.incremental_result.client_components_added.items), + dev.makeArrayForServerComponentsPatch(dev.server_global.js(), dev.incremental_result.client_components_removed.items), + }, ) catch |err| { // One module replacement error should NOT prevent follow-up // module replacements to fail. It is the HMR runtime's - // responsibility to handle these errors. + // responsibility to collect all module load errors, and + // bubble them up. dev.vm.printErrorLikeObjectToConsole(dev.server_global.js().takeException(err)); @panic("Error thrown in Hot-module-replacement code. This is always a bug in the HMR runtime."); }; + _ = errors; // TODO: } } - return .{ .client_bundle = client_bundle }; + if (dev.incremental_result.failures_added.items.len > 0) { + dev.bundles_since_last_error = 0; + return error.BuildFailed; + } +} + +fn indexFailures(dev: *DevServer) !void { + var sfa_state = std.heap.stackFallback(65536, dev.allocator); + const sfa = sfa_state.get(); + + if (dev.incremental_result.failures_added.items.len > 0) { + var total_len: usize = @sizeOf(MessageId) + @sizeOf(u32); + + for (dev.incremental_result.failures_added.items) |fail| { + total_len += fail.data.len; + } + + total_len += dev.incremental_result.failures_removed.items.len * @sizeOf(u32); + + dev.server_graph.affected_by_trace = try DynamicBitSetUnmanaged.initEmpty(sfa, dev.server_graph.bundled_files.count()); + defer dev.server_graph.affected_by_trace.deinit(sfa); + + dev.client_graph.affected_by_trace = try DynamicBitSetUnmanaged.initEmpty(sfa, dev.client_graph.bundled_files.count()); + defer dev.client_graph.affected_by_trace.deinit(sfa); + + var payload = try std.ArrayList(u8).initCapacity(sfa, total_len); + defer payload.deinit(); + payload.appendAssumeCapacity(MessageId.errors.char()); + const w = payload.writer(); + + try w.writeInt(u32, @intCast(dev.incremental_result.failures_removed.items.len), .little); + + for (dev.incremental_result.failures_removed.items) |removed| { + try w.writeInt(u32, @bitCast(removed.getOwner().encode()), .little); + removed.deinit(); + } + + for (dev.incremental_result.failures_added.items) |added| { + try w.writeAll(added.data); + + switch (added.getOwner()) { + .none, .route => unreachable, + .server => |index| try dev.server_graph.traceDependencies(index, .no_stop), + .client => |index| try dev.client_graph.traceDependencies(index, .no_stop), + } + } + + for (dev.incremental_result.routes_affected.items) |route_index| { + const route = &dev.routes[route_index.get()]; + route.server_state = .possible_bundling_failures; + } + + _ = dev.app.publish(DevWebSocket.global_channel, payload.items, .binary, false); + } else if (dev.incremental_result.failures_removed.items.len > 0) { + if (dev.bundling_failures.count() == 0) { + _ = dev.app.publish(DevWebSocket.global_channel, &.{MessageId.errors_cleared.char()}, .binary, false); + for (dev.incremental_result.failures_removed.items) |removed| { + removed.deinit(); + } + } else { + var payload = try std.ArrayList(u8).initCapacity(sfa, @sizeOf(MessageId) + @sizeOf(u32) + dev.incremental_result.failures_removed.items.len * @sizeOf(u32)); + defer payload.deinit(); + payload.appendAssumeCapacity(MessageId.errors.char()); + const w = payload.writer(); + + try w.writeInt(u32, @intCast(dev.incremental_result.failures_removed.items.len), .little); + + for (dev.incremental_result.failures_removed.items) |removed| { + try w.writeInt(u32, @bitCast(removed.getOwner().encode()), .little); + removed.deinit(); + } + + _ = dev.app.publish(DevWebSocket.global_channel, payload.items, .binary, false); + } + } + + dev.incremental_result.failures_removed.clearRetainingCapacity(); +} + +/// Used to generate the entry point. Unlike incremental patches, this always +/// contains all needed files for a route. +fn generateClientBundle(dev: *DevServer, route: *Route) bun.OOM![]const u8 { + assert(route.client_bundle == null); + assert(route.server_state == .loaded); // page is unfit to load + + dev.graph_safety_lock.lock(); + defer dev.graph_safety_lock.unlock(); + + // Prepare bitsets + var sfa_state = std.heap.stackFallback(65536, dev.allocator); + + const sfa = sfa_state.get(); + dev.server_graph.affected_by_trace = try DynamicBitSetUnmanaged.initEmpty(sfa, dev.server_graph.bundled_files.count()); + defer dev.server_graph.affected_by_trace.deinit(sfa); + + dev.client_graph.affected_by_trace = try DynamicBitSetUnmanaged.initEmpty(sfa, dev.client_graph.bundled_files.count()); + defer dev.client_graph.affected_by_trace.deinit(sfa); + + // Run tracing + dev.client_graph.reset(); + + // Framework entry point is always needed. + try dev.client_graph.traceImports(IncrementalGraph(.client).framework_entry_point_index); + + // If react fast refresh is enabled, it will be imported by the runtime instantly. + if (dev.framework.react_fast_refresh != null) { + try dev.client_graph.traceImports(IncrementalGraph(.client).react_refresh_index); + } + + // Trace the route to the client components + try dev.server_graph.traceImports( + route.server_file.unwrap() orelse + Output.panic("File index for route not present", .{}), + ); + + return dev.client_graph.takeBundle(.initial_response); +} + +fn makeArrayForServerComponentsPatch(dev: *DevServer, global: *JSC.JSGlobalObject, items: []const IncrementalGraph(.server).FileIndex) JSValue { + if (items.len == 0) return .null; + const arr = JSC.JSArray.createEmpty(global, items.len); + const names = dev.server_graph.bundled_files.keys(); + for (items, 0..) |item, i| { + const str = bun.String.createUTF8(bun.path.relative(dev.cwd, names[item.get()])); + defer str.deref(); + arr.putIndex(global, @intCast(i), str.toJS(global)); + } + return arr; } pub const HotUpdateContext = struct { @@ -655,7 +960,6 @@ pub const HotUpdateContext = struct { scbs: bun.JSAst.ServerComponentBoundary.List.Slice, /// Which files have a server-component boundary. server_to_client_bitset: DynamicBitSetUnmanaged, - /// Used to reduce calls to the IncrementalGraph hash table. /// /// Caller initializes a slice with `sources.len * 2` items @@ -689,22 +993,28 @@ pub const HotUpdateContext = struct { /// Called at the end of BundleV2 to index bundle contents into the `IncrementalGraph`s pub fn finalizeBundle( dev: *DevServer, - linker: *bun.bundle_v2.LinkerContext, - chunk: *bun.bundle_v2.Chunk, + bv2: *bun.bundle_v2.BundleV2, + chunk: *const [2]bun.bundle_v2.Chunk, ) !void { - const input_file_sources = linker.parse_graph.input_files.items(.source); - const import_records = linker.parse_graph.ast.items(.import_records); - const targets = linker.parse_graph.ast.items(.target); - const scbs = linker.parse_graph.server_component_boundaries.slice(); + const input_file_sources = bv2.graph.input_files.items(.source); + const import_records = bv2.graph.ast.items(.import_records); + const targets = bv2.graph.ast.items(.target); + const scbs = bv2.graph.server_component_boundaries.slice(); - var sfa = std.heap.stackFallback(4096, linker.allocator); + var sfa = std.heap.stackFallback(4096, bv2.graph.allocator); const stack_alloc = sfa.get(); var scb_bitset = try bun.bit_set.DynamicBitSetUnmanaged.initEmpty(stack_alloc, input_file_sources.len); - for (scbs.list.items(.ssr_source_index)) |ssr_index| { + for ( + scbs.list.items(.source_index), + scbs.list.items(.ssr_source_index), + scbs.list.items(.reference_source_index), + ) |source_index, ssr_index, ref_index| { + scb_bitset.set(source_index); scb_bitset.set(ssr_index); + scb_bitset.set(ref_index); } - const resolved_index_cache = try linker.allocator.alloc(u32, input_file_sources.len * 2); + const resolved_index_cache = try bv2.graph.allocator.alloc(u32, input_file_sources.len * 2); var ctx: bun.bake.DevServer.HotUpdateContext = .{ .import_records = import_records, @@ -718,42 +1028,67 @@ pub fn finalizeBundle( // Pass 1, update the graph's nodes, resolving every bundler source // index into it's `IncrementalGraph(...).FileIndex` for ( - chunk.content.javascript.parts_in_chunk_in_order, - chunk.compile_results_for_chunk, + chunk[0].content.javascript.parts_in_chunk_in_order, + chunk[0].compile_results_for_chunk, ) |part_range, compile_result| { try dev.receiveChunk( &ctx, part_range.source_index, - targets[part_range.source_index.get()].bakeRenderer(), + targets[part_range.source_index.get()].bakeGraph(), compile_result, ); } - dev.client_graph.affected_by_update = try DynamicBitSetUnmanaged.initEmpty(linker.allocator, dev.client_graph.bundled_files.count()); - defer dev.client_graph.affected_by_update = .{}; - dev.server_graph.affected_by_update = try DynamicBitSetUnmanaged.initEmpty(linker.allocator, dev.server_graph.bundled_files.count()); - defer dev.client_graph.affected_by_update = .{}; + _ = chunk[1].content.css; // TODO: Index CSS files - ctx.server_seen_bit_set = try bun.bit_set.DynamicBitSetUnmanaged.initEmpty(linker.allocator, dev.server_graph.bundled_files.count()); + dev.client_graph.affected_by_trace = try DynamicBitSetUnmanaged.initEmpty(bv2.graph.allocator, dev.client_graph.bundled_files.count()); + defer dev.client_graph.affected_by_trace = .{}; + dev.server_graph.affected_by_trace = try DynamicBitSetUnmanaged.initEmpty(bv2.graph.allocator, dev.server_graph.bundled_files.count()); + defer dev.client_graph.affected_by_trace = .{}; + + ctx.server_seen_bit_set = try bun.bit_set.DynamicBitSetUnmanaged.initEmpty(bv2.graph.allocator, dev.server_graph.bundled_files.count()); // Pass 2, update the graph's edges by performing import diffing on each // changed file, removing dependencies. This pass also flags what routes // have been modified. - for (chunk.content.javascript.parts_in_chunk_in_order) |part_range| { + for (chunk[0].content.javascript.parts_in_chunk_in_order) |part_range| { try dev.processChunkDependencies( &ctx, part_range.source_index, - targets[part_range.source_index.get()].bakeRenderer(), - linker.allocator, + targets[part_range.source_index.get()].bakeGraph(), + bv2.graph.allocator, ); } + + // Index all failed files now that the incremental graph has been updated. + try dev.indexFailures(); +} + +pub fn handleParseTaskFailure( + dev: *DevServer, + graph: bake.Graph, + abs_path: []const u8, + log: *Log, +) bun.OOM!void { + // Print each error only once + Output.prettyErrorln("Errors while bundling '{s}':", .{ + bun.path.relative(dev.cwd, abs_path), + }); + Output.flush(); + log.printForLogLevel(Output.errorWriter()) catch {}; + + return switch (graph) { + .server => dev.server_graph.insertFailure(abs_path, log, false), + .ssr => dev.server_graph.insertFailure(abs_path, log, true), + .client => dev.client_graph.insertFailure(abs_path, log, false), + }; } pub fn receiveChunk( dev: *DevServer, ctx: *HotUpdateContext, index: bun.JSAst.Index, - side: bake.Renderer, + side: bake.Graph, chunk: bun.bundle_v2.CompileResult, ) !void { return switch (side) { @@ -767,7 +1102,7 @@ pub fn processChunkDependencies( dev: *DevServer, ctx: *HotUpdateContext, index: bun.JSAst.Index, - side: bake.Renderer, + side: bake.Graph, temp_alloc: Allocator, ) !void { return switch (side) { @@ -776,7 +1111,7 @@ pub fn processChunkDependencies( }; } -pub fn isFileStale(dev: *DevServer, path: []const u8, side: bake.Renderer) bool { +pub fn isFileStale(dev: *DevServer, path: []const u8, side: bake.Graph) bool { switch (side) { inline else => |side_comptime| { const g = switch (side_comptime) { @@ -791,118 +1126,10 @@ pub fn isFileStale(dev: *DevServer, path: []const u8, side: bake.Renderer) bool } } -// uws with bundle handlers - -fn onServerRequestWithBundle(route: *Route, bundle: Bundle, req: *Request, resp: *Response) void { - const dev = route.dev; - _ = bundle; - - // TODO: this does not move the body, reuse memory, and many other things - // that server.zig does. - const url_bun_string = bun.String.init(req.url()); - defer url_bun_string.deref(); - - const headers = JSC.FetchHeaders.createFromUWS(req); - const request_object = JSC.WebCore.Request.init( - url_bun_string, - headers, - dev.vm.initRequestBodyValue(.Null) catch bun.outOfMemory(), - bun.http.Method.which(req.method()) orelse .GET, - ).new(); - - const js_request = request_object.toJS(dev.server_global.js()); - - const global = dev.server_global.js(); - - const server_request_callback = dev.server_fetch_function_callback.get() orelse - unreachable; // did not bundle - - // TODO: use a custom class for this metadata type + revise the object structure too - const meta = JSValue.createEmptyObject(global, 1); - meta.put( - dev.server_global.js(), - bun.String.static("clientEntryPoint"), - bun.String.init(route.client_bundled_url).toJS(global), - ); - - var result = server_request_callback.call( - global, - .undefined, - &.{ - js_request, - meta, - route.module_name_string.?.toJS(dev.server_global.js()), - }, - ) catch |err| { - const exception = global.takeException(err); - const fail: Failure = .{ .request_handler = exception }; - fail.printToConsole(route); - fail.sendAsHttpResponse(resp, route); - return; - }; - - if (result.asAnyPromise()) |promise| { - dev.vm.waitForPromise(promise); - switch (promise.unwrap(dev.vm.jsc, .mark_handled)) { - .pending => unreachable, // was waited for - .fulfilled => |r| result = r, - .rejected => |e| { - const fail: Failure = .{ .request_handler = e }; - fail.printToConsole(route); - fail.sendAsHttpResponse(resp, route); - return; - }, - } - } - - // TODO: This interface and implementation is very poor. It is fine as - // the runtime currently emulates returning a `new Response` - // - // It probably should use code from `server.zig`, but most importantly it should - // not have a tie to DevServer, but instead be generic with a context structure - // containing just a *uws.App, *JSC.EventLoop, and JSValue response object. - // - // This would allow us to support all of the nice things `new Response` allows - - const bun_string = result.toBunString(dev.server_global.js()); - defer bun_string.deref(); - if (bun_string.tag == .Dead) { - bun.todoPanic(@src(), "Bake: support non-string return value", .{}); - } - - const utf8 = bun_string.toUTF8(dev.allocator); - defer utf8.deinit(); - - resp.writeStatus("200 OK"); - resp.writeHeader("Content-Type", MimeType.html.value); - resp.end(utf8.slice(), true); // TODO: You should never call res.end(huge buffer) -} - fn onFallbackRoute(_: void, _: *Request, resp: *Response) void { sendBuiltInNotFound(resp); } -// http helper functions - -fn sendOutputFile(file: *const OutputFile, resp: *Response) void { - switch (file.value) { - .buffer => |buffer| { - if (buffer.bytes.len == 0) { - resp.writeStatus("202 No Content"); - resp.writeHeaderInt("Content-Length", 0); - resp.end("", true); - return; - } - - resp.writeStatus("200 OK"); - // TODO: CSS, Sourcemap - resp.writeHeader("Content-Type", MimeType.javascript.value); - resp.end(buffer.bytes, true); // TODO: You should never call res.end(huge buffer) - }, - else => |unhandled_tag| Output.panic("TODO: unhandled tag .{s}", .{@tagName(unhandled_tag)}), - } -} - fn sendJavaScriptSource(code: []const u8, resp: *Response) void { if (code.len == 0) { resp.writeStatus("202 No Content"); @@ -917,12 +1144,90 @@ fn sendJavaScriptSource(code: []const u8, resp: *Response) void { resp.end(code, true); // TODO: You should never call res.end(huge buffer) } +const ErrorPageKind = enum { + /// Modules failed to bundle + bundler, + /// Modules failed to evaluate + evaluation, + /// Request handler threw + runtime, +}; + +fn sendSerializedFailures( + dev: *DevServer, + resp: *Response, + failures: []const SerializedFailure, + kind: ErrorPageKind, +) void { + resp.writeStatus("500 Internal Server Error"); + resp.writeHeader("Content-Type", MimeType.html.value); + + // TODO: what to do about return values here? + _ = resp.write(switch (kind) { + inline else => |k| std.fmt.comptimePrint( + \\ + \\ + \\ + \\ + \\ + \\Bun - {[page_title]s} + \\ + \\ + \\ + \\"; + + if (Environment.codegen_embed) { + _ = resp.end(pre ++ @embedFile("bake-codegen/bake.error.js") ++ post, false); + } else { + _ = resp.write(pre); + _ = resp.write(bun.runtimeEmbedFile(.codegen_eager, "bake.error.js")); + _ = resp.end(post, false); + } +} + fn sendBuiltInNotFound(resp: *Response) void { const message = "404 Not Found"; resp.writeStatus("404 Not Found"); resp.end(message, true); } +fn sendStubErrorMessage(dev: *DevServer, route: *Route, resp: *Response, err: JSValue) void { + var sfb = std.heap.stackFallback(65536, dev.allocator); + var a = std.ArrayList(u8).initCapacity(sfb.get(), 65536) catch bun.outOfMemory(); + + a.writer().print("Server route handler for '{s}' threw while loading\n\n", .{ + route.pattern, + }) catch bun.outOfMemory(); + route.dev.vm.printErrorLikeObjectSimple(err, a.writer(), false); + + resp.writeStatus("500 Internal Server Error"); + resp.end(a.items, true); // TODO: "You should never call res.end(huge buffer)" +} + /// The paradigm of Bake's incremental state is to store a separate list of files /// than the Graph in bundle_v2. When watch events happen, the bundler is run on /// the changed files, excluding non-stale files via `isFileStale`. @@ -978,7 +1283,7 @@ pub fn IncrementalGraph(side: bake.Side) type { /// /// Outside of an incremental bundle, this is empty. /// Backed by the bundler thread's arena allocator. - affected_by_update: DynamicBitSetUnmanaged, + affected_by_trace: DynamicBitSetUnmanaged, /// Byte length of every file queued for concatenation current_chunk_len: usize = 0, @@ -999,7 +1304,7 @@ pub fn IncrementalGraph(side: bake.Side) type { .edges = .{}, .edges_free_list = .{}, - .affected_by_update = .{}, + .affected_by_trace = .{}, .current_chunk_len = 0, .current_chunk_parts = .{}, @@ -1010,36 +1315,66 @@ pub fn IncrementalGraph(side: bake.Side) type { // code because there is only one instance of the server. Instead, // it stores which module graphs it is a part of. This makes sure // that recompilation knows what bundler options to use. - .server => struct { - // .server => packed struct(u8) { + .server => struct { // TODO: make this packed(u8), i had compiler crashes before /// Is this file built for the Server graph. is_rsc: bool, /// Is this file built for the SSR graph. is_ssr: bool, - /// This is a file is an entry point to the framework. - /// Changing this will always cause a full page reload. - is_special_framework_file: bool, - /// Changing code in a client component should rebuild code for - /// SSR, but it should not count as changing the server code - /// since a connected client can hot-update these files. - is_client_to_server_component_boundary: bool, + /// If set, the client graph contains a matching file. + /// The server + is_client_component_boundary: bool, /// If this file is a route root, the route can be looked up in /// the route list. This also stops dependency propagation. is_route: bool, + /// If the file has an error, the failure can be looked up + /// in the `.failures` map. + failed: bool, - unused: enum(u3) { unused = 0 } = .unused, + unused: enum(u2) { unused = 0 } = .unused, - fn stopsPropagation(flags: @This()) bool { - return flags.is_special_framework_file or - flags.is_route or - flags.is_client_to_server_component_boundary; + fn stopsDependencyTrace(flags: @This()) bool { + return flags.is_client_component_boundary; } }, .client => struct { - /// Allocated by default_allocator - code: []const u8, + /// Allocated by default_allocator. Access with `.code()` + code_ptr: [*]const u8, + /// Separated from the pointer to reduce struct size. + /// Parser does not support files >4gb anyways. + code_len: u32, + flags: Flags, - inline fn stopsPropagation(_: @This()) bool { + const Flags = struct { + /// If the file has an error, the failure can be looked up + /// in the `.failures` map. + failed: bool, + /// If set, the client graph contains a matching file. + is_component_root: bool, + /// This is a file is an entry point to the framework. + /// Changing this will always cause a full page reload. + is_special_framework_file: bool, + + kind: enum { js, css }, + }; + + comptime { + assert(@sizeOf(@This()) == @sizeOf(usize) * 2); + assert(@alignOf(@This()) == @alignOf([*]u8)); + } + + fn init(code_slice: []const u8, flags: Flags) @This() { + return .{ + .code_ptr = code_slice.ptr, + .code_len = @intCast(code_slice.len), + .flags = flags, + }; + } + + fn code(file: @This()) []const u8 { + return file.code_ptr[0..file.code_len]; + } + + inline fn stopsDependencyTrace(_: @This()) bool { return false; } }, @@ -1059,12 +1394,19 @@ pub fn IncrementalGraph(side: bake.Side) type { prev_dependency: EdgeIndex.Optional, }; - /// An index into `bundled_files`, `stale_files`, `first_dep`, `first_import`, or `affected_by_update` - pub const FileIndex = bun.GenericIndex(u32, File); + /// An index into `bundled_files`, `stale_files`, `first_dep`, `first_import`, or `affected_by_trace` + /// Top bits cannot be relied on due to `SerializedFailure.Owner.Packed` + pub const FileIndex = bun.GenericIndex(u30, File); + pub const framework_entry_point_index = FileIndex.init(0); + pub const react_refresh_index = if (side == .client) FileIndex.init(1); /// An index into `edges` const EdgeIndex = bun.GenericIndex(u32, Edge); + fn getFileIndex(g: *@This(), path: []const u8) ?FileIndex { + return if (g.bundled_files.getIndex(path)) |i| FileIndex.init(@intCast(i)) else null; + } + /// Tracks a bundled code chunk for cross-bundle chunks, /// ensuring it has an entry in `bundled_files`. /// @@ -1075,12 +1417,13 @@ pub fn IncrementalGraph(side: bake.Side) type { /// takeChunk is called. Then it can be freed. pub fn receiveChunk( g: *@This(), - ctx: *const HotUpdateContext, + ctx: *HotUpdateContext, index: bun.JSAst.Index, chunk: bun.bundle_v2.CompileResult, is_ssr_graph: bool, ) !void { - g.owner().graph_safety_lock.assertLocked(); + const dev = g.owner(); + dev.graph_safety_lock.assertLocked(); const abs_path = ctx.sources[index.get()].path.text; @@ -1100,8 +1443,8 @@ pub fn IncrementalGraph(side: bake.Side) type { g.current_chunk_len += code.len; - if (g.owner().dump_dir) |dump_dir| { - const cwd = g.owner().cwd; + if (dev.dump_dir) |dump_dir| { + const cwd = dev.cwd; var a: bun.PathBuffer = undefined; var b: [bun.MAX_PATH_BYTES * 2]u8 = undefined; const rel_path = bun.path.relativeBufZ(&a, cwd, abs_path); @@ -1117,16 +1460,17 @@ pub fn IncrementalGraph(side: bake.Side) type { }; } - const gop = try g.bundled_files.getOrPut(g.owner().allocator, abs_path); + const gop = try g.bundled_files.getOrPut(dev.allocator, abs_path); + const file_index = FileIndex.init(@intCast(gop.index)); if (!gop.found_existing) { gop.key_ptr.* = try bun.default_allocator.dupe(u8, abs_path); - try g.first_dep.append(g.owner().allocator, .none); - try g.first_import.append(g.owner().allocator, .none); - } else { - if (g.stale_files.bit_length > gop.index) { - g.stale_files.unset(gop.index); - } + try g.first_dep.append(dev.allocator, .none); + try g.first_import.append(dev.allocator, .none); + } + + if (g.stale_files.bit_length > gop.index) { + g.stale_files.unset(gop.index); } ctx.getCachedIndex(side, index).* = FileIndex.init(@intCast(gop.index)); @@ -1134,36 +1478,77 @@ pub fn IncrementalGraph(side: bake.Side) type { switch (side) { .client => { if (gop.found_existing) { - bun.default_allocator.free(gop.value_ptr.code); + bun.default_allocator.free(gop.value_ptr.code()); + + if (gop.value_ptr.flags.failed) { + const kv = dev.bundling_failures.fetchSwapRemoveAdapted( + SerializedFailure.Owner{ .client = file_index }, + SerializedFailure.ArrayHashAdapter{}, + ) orelse + Output.panic("Missing failure in IncrementalGraph", .{}); + try dev.incremental_result.failures_removed.append( + dev.allocator, + kv.key, + ); + } } - gop.value_ptr.* = .{ - .code = code, - }; - try g.current_chunk_parts.append(g.owner().allocator, FileIndex.init(@intCast(gop.index))); + gop.value_ptr.* = File.init(code, .{ + .failed = false, + .is_component_root = ctx.server_to_client_bitset.isSet(index.get()), + .is_special_framework_file = false, + .kind = .js, + }); + try g.current_chunk_parts.append(dev.allocator, file_index); }, .server => { if (!gop.found_existing) { + const client_component_boundary = ctx.server_to_client_bitset.isSet(index.get()); + gop.value_ptr.* = .{ .is_rsc = !is_ssr_graph, .is_ssr = is_ssr_graph, .is_route = false, - .is_client_to_server_component_boundary = ctx.server_to_client_bitset.isSet(index.get()), - .is_special_framework_file = false, // TODO: set later + .is_client_component_boundary = client_component_boundary, + .failed = false, }; + + if (client_component_boundary) { + try dev.incremental_result.client_components_added.append(dev.allocator, file_index); + } } else { if (is_ssr_graph) { gop.value_ptr.is_ssr = true; } else { gop.value_ptr.is_rsc = true; } + if (ctx.server_to_client_bitset.isSet(index.get())) { - gop.value_ptr.is_client_to_server_component_boundary = true; - } else if (gop.value_ptr.is_client_to_server_component_boundary) { - // TODO: free the other graph's file - gop.value_ptr.is_client_to_server_component_boundary = false; + gop.value_ptr.is_client_component_boundary = true; + try dev.incremental_result.client_components_added.append(dev.allocator, file_index); + } else if (gop.value_ptr.is_client_component_boundary) { + const client_graph = &g.owner().client_graph; + const client_index = client_graph.getFileIndex(gop.key_ptr.*) orelse + Output.panic("Client graph's SCB was already deleted", .{}); + try dev.incremental_result.delete_client_files_later.append(g.owner().allocator, client_index); + gop.value_ptr.is_client_component_boundary = false; + + try dev.incremental_result.client_components_removed.append(dev.allocator, file_index); + } + + if (gop.value_ptr.failed) { + gop.value_ptr.failed = false; + const kv = dev.bundling_failures.fetchSwapRemoveAdapted( + SerializedFailure.Owner{ .server = file_index }, + SerializedFailure.ArrayHashAdapter{}, + ) orelse + Output.panic("Missing failure in IncrementalGraph", .{}); + try dev.incremental_result.failures_removed.append( + dev.allocator, + kv.key, + ); } } - try g.current_chunk_parts.append(g.owner().allocator, chunk.code()); + try g.current_chunk_parts.append(dev.allocator, chunk.code()); }, } } @@ -1234,33 +1619,42 @@ pub fn IncrementalGraph(side: bake.Side) type { if (!val.seen) { // Unlink from dependency list. At this point the edge is // already detached from the import list. - const edge = &g.edges.items[val.edge_index.get()]; - log("detach edge={d} | id={d} {} -> id={d} {}", .{ - val.edge_index.get(), - edge.dependency.get(), - bun.fmt.quote(g.bundled_files.keys()[edge.dependency.get()]), - edge.imported.get(), - bun.fmt.quote(g.bundled_files.keys()[edge.imported.get()]), - }); - if (edge.prev_dependency.unwrap()) |prev| { - const prev_dependency = &g.edges.items[prev.get()]; - prev_dependency.next_dependency = edge.next_dependency; - } else { - assert(g.first_dep.items[edge.imported.get()].unwrap() == val.edge_index); - g.first_dep.items[edge.imported.get()] = .none; - } - if (edge.next_dependency.unwrap()) |next| { - const next_dependency = &g.edges.items[next.get()]; - next_dependency.prev_dependency = edge.prev_dependency; - } + g.disconnectEdgeFromDependencyList(val.edge_index); // With no references to this edge, it can be freed - try g.freeEdge(val.edge_index); + g.freeEdge(val.edge_index); } } - // Follow this node to it's HMR root - try g.propagateHotUpdate(file_index); + if (side == .server) { + // Follow this file to the route to mark it as stale. + try g.traceDependencies(file_index, .stop_at_boundary); + } else { + // TODO: Follow this file to the HMR root (info to determine is currently not stored) + // without this, changing a client-only file will not mark the route's client bundle as stale + } + } + + fn disconnectEdgeFromDependencyList(g: *@This(), edge_index: EdgeIndex) void { + const edge = &g.edges.items[edge_index.get()]; + igLog("detach edge={d} | id={d} {} -> id={d} {}", .{ + edge_index.get(), + edge.dependency.get(), + bun.fmt.quote(g.bundled_files.keys()[edge.dependency.get()]), + edge.imported.get(), + bun.fmt.quote(g.bundled_files.keys()[edge.imported.get()]), + }); + if (edge.prev_dependency.unwrap()) |prev| { + const prev_dependency = &g.edges.items[prev.get()]; + prev_dependency.next_dependency = edge.next_dependency; + } else { + assert(g.first_dep.items[edge.imported.get()].unwrap() == edge_index); + g.first_dep.items[edge.imported.get()] = .none; + } + if (edge.next_dependency.unwrap()) |next| { + const next_dependency = &g.edges.items[next.get()]; + next_dependency.prev_dependency = edge.prev_dependency; + } } fn processChunkImportRecords( @@ -1321,41 +1715,61 @@ pub fn IncrementalGraph(side: bake.Side) type { } } - fn propagateHotUpdate(g: *@This(), file_index: FileIndex) !void { + const TraceDependencyKind = enum { + stop_at_boundary, + no_stop, + }; + + fn traceDependencies(g: *@This(), file_index: FileIndex, trace_kind: TraceDependencyKind) !void { + g.owner().graph_safety_lock.assertLocked(); + if (Environment.enable_logs) { - igLog("propagateHotUpdate(.{s}, {}{s})", .{ + igLog("traceDependencies(.{s}, {}{s})", .{ @tagName(side), bun.fmt.quote(g.bundled_files.keys()[file_index.get()]), - if (g.affected_by_update.isSet(file_index.get())) " [already visited]" else "", + if (g.affected_by_trace.isSet(file_index.get())) " [already visited]" else "", }); } - if (g.affected_by_update.isSet(file_index.get())) + if (g.affected_by_trace.isSet(file_index.get())) return; - g.affected_by_update.set(file_index.get()); + g.affected_by_trace.set(file_index.get()); const file = g.bundled_files.values()[file_index.get()]; switch (side) { .server => { + const dev = g.owner(); if (file.is_route) { - const route_index = g.owner().route_lookup.get(file_index) orelse + const route_index = dev.route_lookup.get(file_index) orelse Output.panic("Route not in lookup index: {d} {}", .{ file_index.get(), bun.fmt.quote(g.bundled_files.keys()[file_index.get()]) }); igLog("\\<- Route", .{}); - try g.owner().incremental_result.routes_affected.append(g.owner().allocator, route_index); + + try dev.incremental_result.routes_affected.append(dev.allocator, route_index); + } + if (file.is_client_component_boundary) { + try dev.incremental_result.client_components_affected.append(dev.allocator, file_index); } }, .client => { - // igLog("\\<- client side track", .{}); + if (file.flags.is_component_root) { + const dev = g.owner(); + const key = g.bundled_files.keys()[file_index.get()]; + const index = dev.server_graph.getFileIndex(key) orelse + Output.panic("Server Incremental Graph is missing component for {}", .{bun.fmt.quote(key)}); + try dev.server_graph.traceDependencies(index, trace_kind); + } }, } // Certain files do not propagate updates to dependencies. // This is how updating a client component doesn't cause // a server-side reload. - if (file.stopsPropagation()) { - igLog("\\<- this file stops propagation", .{}); - return; + if (trace_kind == .stop_at_boundary) { + if (file.stopsDependencyTrace()) { + igLog("\\<- this file stops propagation", .{}); + return; + } } // Recurse @@ -1363,7 +1777,50 @@ pub fn IncrementalGraph(side: bake.Side) type { while (it) |dep_index| { const edge = g.edges.items[dep_index.get()]; it = edge.next_dependency.unwrap(); - try g.propagateHotUpdate(edge.dependency); + try g.traceDependencies(edge.dependency, trace_kind); + } + } + + fn traceImports(g: *@This(), file_index: FileIndex) !void { + g.owner().graph_safety_lock.assertLocked(); + + if (Environment.enable_logs) { + igLog("traceImports(.{s}, {}{s})", .{ + @tagName(side), + bun.fmt.quote(g.bundled_files.keys()[file_index.get()]), + if (g.affected_by_trace.isSet(file_index.get())) " [already visited]" else "", + }); + } + + if (g.affected_by_trace.isSet(file_index.get())) + return; + g.affected_by_trace.set(file_index.get()); + + const file = g.bundled_files.values()[file_index.get()]; + + switch (side) { + .server => { + if (file.is_client_component_boundary) { + const dev = g.owner(); + const key = g.bundled_files.keys()[file_index.get()]; + const index = dev.client_graph.getFileIndex(key) orelse + Output.panic("Client Incremental Graph is missing component for {}", .{bun.fmt.quote(key)}); + try dev.client_graph.traceImports(index); + } + }, + .client => { + assert(!g.stale_files.isSet(file_index.get())); // should not be left stale + try g.current_chunk_parts.append(g.owner().allocator, file_index); + g.current_chunk_len += file.code_len; + }, + } + + // Recurse + var it: ?EdgeIndex = g.first_import.items[file_index.get()].unwrap(); + while (it) |dep_index| { + const edge = g.edges.items[dep_index.get()]; + it = edge.next_import.unwrap(); + try g.traceImports(edge.imported); } } @@ -1391,21 +1848,31 @@ pub fn IncrementalGraph(side: bake.Side) type { try g.first_dep.append(g.owner().allocator, .none); try g.first_import.append(g.owner().allocator, .none); } else { - if (g.stale_files.bit_length > gop.index) { - g.stale_files.set(gop.index); - } if (side == .server) { if (is_route) gop.value_ptr.*.is_route = is_route; } } + if (is_route) { + g.owner().routes[route_index.get()].server_file = file_index.toOptional(); + } + + if (g.stale_files.bit_length > gop.index) { + g.stale_files.set(gop.index); + } + if (is_route) { try g.owner().route_lookup.put(g.owner().allocator, file_index, route_index); } switch (side) { .client => { - gop.value_ptr.* = .{ .code = "" }; + gop.value_ptr.* = File.init("", .{ + .failed = false, + .is_component_root = false, + .is_special_framework_file = false, + .kind = .js, + }); }, .server => { if (!gop.found_existing) { @@ -1413,8 +1880,8 @@ pub fn IncrementalGraph(side: bake.Side) type { .is_rsc = !is_ssr_graph, .is_ssr = is_ssr_graph, .is_route = is_route, - .is_client_to_server_component_boundary = false, - .is_special_framework_file = false, + .is_client_component_boundary = false, + .failed = false, }; } else if (is_ssr_graph) { gop.value_ptr.is_ssr = true; @@ -1427,8 +1894,83 @@ pub fn IncrementalGraph(side: bake.Side) type { return file_index; } + pub fn insertFailure( + g: *@This(), + abs_path: []const u8, + log: *const Log, + is_ssr_graph: bool, + ) bun.OOM!void { + g.owner().graph_safety_lock.assertLocked(); + + debug.log("Insert stale: {s}", .{abs_path}); + const gop = try g.bundled_files.getOrPut(g.owner().allocator, abs_path); + const file_index = FileIndex.init(@intCast(gop.index)); + + if (!gop.found_existing) { + gop.key_ptr.* = try bun.default_allocator.dupe(u8, abs_path); + try g.first_dep.append(g.owner().allocator, .none); + try g.first_import.append(g.owner().allocator, .none); + } + + if (g.stale_files.bit_length > gop.index) { + g.stale_files.set(gop.index); + } + + switch (side) { + .client => { + gop.value_ptr.* = File.init("", .{ + .failed = true, + .is_component_root = false, + .is_special_framework_file = false, + .kind = .js, + }); + }, + .server => { + if (!gop.found_existing) { + gop.value_ptr.* = .{ + .is_rsc = !is_ssr_graph, + .is_ssr = is_ssr_graph, + .is_route = false, + .is_client_component_boundary = false, + .failed = true, + }; + } else { + if (is_ssr_graph) { + gop.value_ptr.is_ssr = true; + } else { + gop.value_ptr.is_rsc = true; + } + gop.value_ptr.failed = true; + } + }, + } + + const dev = g.owner(); + + const fail_owner: SerializedFailure.Owner = switch (side) { + .server => .{ .server = file_index }, + .client => .{ .client = file_index }, + }; + const failure = try SerializedFailure.initFromLog(fail_owner, log.msgs.items); + const fail_gop = try dev.bundling_failures.getOrPut(dev.allocator, failure); + try dev.incremental_result.failures_added.append(dev.allocator, failure); + if (fail_gop.found_existing) { + try dev.incremental_result.failures_removed.append(dev.allocator, fail_gop.key_ptr.*); + fail_gop.key_ptr.* = failure; + } + } + pub fn ensureStaleBitCapacity(g: *@This(), val: bool) !void { - try g.stale_files.resize(g.owner().allocator, @max(g.bundled_files.count(), g.stale_files.bit_length), val); + try g.stale_files.resize( + g.owner().allocator, + std.mem.alignForward( + usize, + @max(g.bundled_files.count(), g.stale_files.bit_length), + // allocate 8 in 8 usize chunks + std.mem.byte_size_in_bits * @sizeOf(usize) * 8, + ), + val, + ); } pub fn invalidate(g: *@This(), paths: []const []const u8, out_paths: *std.ArrayList(BakeEntryPoint)) !void { @@ -1442,13 +1984,19 @@ pub fn IncrementalGraph(side: bake.Side) type { continue; }; g.stale_files.set(index); + const data = &values[index]; switch (side) { - .client => try out_paths.append(BakeEntryPoint.init(path, .client)), + .client => { + // When re-bundling SCBs, only bundle the server. Otherwise + // the bundler gets confused and bundles both sides without + // knowledge of the boundary between them. + if (!data.flags.is_component_root) + try out_paths.append(BakeEntryPoint.init(path, .client)); + }, .server => { - const data = &values[index]; if (data.is_rsc) try out_paths.append(BakeEntryPoint.init(path, .server)); - if (data.is_ssr) + if (data.is_ssr and !data.is_client_component_boundary) try out_paths.append(BakeEntryPoint.init(path, .ssr)); }, } @@ -1462,7 +2010,9 @@ pub fn IncrementalGraph(side: bake.Side) type { pub fn takeBundle(g: *@This(), kind: ChunkKind) ![]const u8 { g.owner().graph_safety_lock.assertLocked(); - if (g.current_chunk_len == 0) return ""; + // initial bundle needs at least the entry point + // hot updates shouldnt be emitted if there are no chunks + assert(g.current_chunk_len > 0); const runtime = switch (kind) { .initial_response => bun.bake.getHmrRuntime(side), @@ -1485,7 +2035,7 @@ pub fn IncrementalGraph(side: bake.Side) type { const entry = switch (side) { .server => fw.entry_server, .client => fw.entry_client, - } orelse bun.todoPanic(@src(), "non-framework provided entry-point", .{}); + }; try bun.js_printer.writeJSONString( bun.path.relative(g.owner().cwd, entry), @TypeOf(w), @@ -1533,13 +2083,12 @@ pub fn IncrementalGraph(side: bake.Side) type { for (g.current_chunk_parts.items) |entry| { chunk.appendSliceAssumeCapacity(switch (side) { // entry is an index into files - .client => files[entry.get()].code, + .client => files[entry.get()].code(), // entry is the '[]const u8' itself .server => entry, }); } chunk.appendSliceAssumeCapacity(end); - // bun.assert_eql(chunk.capacity, chunk.items.len); if (g.owner().dump_dir) |dump_dir| { const rel_path_escaped = "latest_chunk.js"; @@ -1555,6 +2104,62 @@ pub fn IncrementalGraph(side: bake.Side) type { return chunk.items; } + fn disconnectAndDeleteFile(g: *@This(), file_index: FileIndex) void { + const last = FileIndex.init(@intCast(g.bundled_files.count() - 1)); + + bun.assert(g.bundled_files.count() > 1); // never remove all files + + bun.assert(g.first_dep.items[file_index.get()] == .none); // must have no dependencies + + // Disconnect all imports + { + var it: ?EdgeIndex = g.first_import.items[file_index.get()].unwrap(); + while (it) |edge_index| { + const dep = g.edges.items[edge_index.get()]; + it = dep.next_import.unwrap(); + assert(dep.dependency == file_index); + + g.disconnectEdgeFromDependencyList(edge_index); + g.freeEdge(edge_index); + } + } + + g.bundled_files.swapRemoveAt(file_index.get()); + + // Move out-of-line data from `last` to replace `file_index` + _ = g.first_dep.swapRemove(file_index.get()); + _ = g.first_import.swapRemove(file_index.get()); + + if (file_index != last) { + g.stale_files.setValue(file_index.get(), g.stale_files.isSet(last.get())); + + // This set is not always initialized, so ignore if it's empty + if (g.affected_by_trace.bit_length > 0) { + g.affected_by_trace.setValue(file_index.get(), g.affected_by_trace.isSet(last.get())); + } + + // Adjust all referenced edges to point to the new file + { + var it: ?EdgeIndex = g.first_import.items[file_index.get()].unwrap(); + while (it) |edge_index| { + const dep = &g.edges.items[edge_index.get()]; + it = dep.next_import.unwrap(); + assert(dep.dependency == last); + dep.dependency = file_index; + } + } + { + var it: ?EdgeIndex = g.first_dep.items[file_index.get()].unwrap(); + while (it) |edge_index| { + const dep = &g.edges.items[edge_index.get()]; + it = dep.next_dependency.unwrap(); + assert(dep.imported == last); + dep.imported = file_index; + } + } + } + } + fn newEdge(g: *@This(), edge: Edge) !EdgeIndex { if (g.edges_free_list.popOrNull()) |index| { g.edges.items[index.get()] = edge; @@ -1568,15 +2173,18 @@ pub fn IncrementalGraph(side: bake.Side) type { /// Does nothing besides release the `Edge` for reallocation by `newEdge` /// Caller must detach the dependency from the linked list it is in. - fn freeEdge(g: *@This(), dep_index: EdgeIndex) !void { + fn freeEdge(g: *@This(), edge_index: EdgeIndex) void { if (Environment.isDebug) { - g.edges.items[dep_index.get()] = undefined; + g.edges.items[edge_index.get()] = undefined; } - if (dep_index.get() == (g.edges.items.len - 1)) { + if (edge_index.get() == (g.edges.items.len - 1)) { g.edges.items.len -= 1; } else { - try g.edges_free_list.append(g.owner().allocator, dep_index); + g.edges_free_list.append(g.owner().allocator, edge_index) catch { + // Leak an edge object; Ok since it may get cleaned up by + // the next incremental graph garbage-collection cycle. + }; } } @@ -1587,14 +2195,56 @@ pub fn IncrementalGraph(side: bake.Side) type { } const IncrementalResult = struct { + /// When tracing a file's dependencies via `traceDependencies`, this is + /// populated with the hit routes. Tracing is used for many purposes. routes_affected: ArrayListUnmanaged(Route.Index), + // Following three fields are populated during `receiveChunk` + + /// Components to add to the client manifest + client_components_added: ArrayListUnmanaged(IncrementalGraph(.server).FileIndex), + /// Components to add to the client manifest + client_components_removed: ArrayListUnmanaged(IncrementalGraph(.server).FileIndex), + /// This list acts as a free list. The contents of these slices must remain + /// valid; they have to be so the affected routes can be cleared of the + /// failures and potentially be marked valid. At the end of an + /// incremental update, the slices are freed. + failures_removed: ArrayListUnmanaged(SerializedFailure), + + /// Client boundaries that have been added or modified. At the end of a hot + /// update, these are traced to their route to mark the bundles as stale (to + /// be generated on Cmd+R) + /// + /// Populated during `traceDependencies` + client_components_affected: ArrayListUnmanaged(IncrementalGraph(.server).FileIndex), + + /// The list of failures which will have to be traced to their route. Such + /// tracing is deferred until the second pass of finalizeBundler as the + /// dependency graph may not fully exist at the time the failure is indexed. + /// + /// Populated from within the bundler via `handleParseTaskFailure` + failures_added: ArrayListUnmanaged(SerializedFailure), + + /// Removing files clobbers indices, so removing anything is deferred. + delete_client_files_later: ArrayListUnmanaged(IncrementalGraph(.client).FileIndex), + const empty: IncrementalResult = .{ .routes_affected = .{}, + .failures_removed = .{}, + .failures_added = .{}, + .client_components_added = .{}, + .client_components_removed = .{}, + .client_components_affected = .{}, + .delete_client_files_later = .{}, }; fn reset(result: *IncrementalResult) void { result.routes_affected.clearRetainingCapacity(); + assert(result.failures_removed.items.len == 0); + result.failures_added.clearRetainingCapacity(); + result.client_components_added.clearRetainingCapacity(); + result.client_components_removed.clearRetainingCapacity(); + result.client_components_affected.clearRetainingCapacity(); } }; @@ -1630,7 +2280,7 @@ const DirectoryWatchStore = struct { store: *DirectoryWatchStore, import_source: []const u8, specifier: []const u8, - renderer: bake.Renderer, + renderer: bake.Graph, ) bun.OOM!void { store.lock.lock(); defer store.lock.unlock(); @@ -1848,132 +2498,215 @@ const ChunkKind = enum { hmr_chunk, }; -/// Represents an error from loading or server sided runtime. Information on -/// what this error is from, such as the associated Route, is inferred from -/// surrounding context. +/// Errors sent to the HMR client in the browser are serialized. The same format +/// is used for thrown JavaScript exceptions as well as bundler errors. +/// Serialized failures contain a handle on what file or route they came from, +/// which allows the bundler to dismiss or update stale failures via index as +/// opposed to re-sending a new payload. This also means only changed files are +/// rebuilt, instead of all of the failed files. /// -/// In the case a route was not able to fully compile, the `Failure` is stored -/// so that a browser refreshing the page can display this failure. -const Failure = union(enum) { - zig_error: anyerror, - /// Bundler and module resolution use `bun.logger` to report multiple errors at once. - bundler: std.ArrayList(bun.logger.Msg), - /// Thrown JavaScript exception while loading server code. - server_load: JSC.Strong, - /// Never stored; the current request handler threw an error. - request_handler: JSValue, +/// The HMR client in the browser is expected to sort the final list of errors +/// for deterministic output; there is code in DevServer that uses `swapRemove`. +pub const SerializedFailure = struct { + /// Serialized data is always owned by default_allocator + /// The first 32 bits of this slice contain the owner + data: []u8, - /// Consumes the Log data, resetting it. - pub fn fromLog(log: *Log) Failure { - const fail: Failure = .{ .bundler = log.msgs }; - log.* = .{ - .msgs = std.ArrayList(bun.logger.Msg).init(log.msgs.allocator), - .level = log.level, + pub fn deinit(f: SerializedFailure) void { + bun.default_allocator.free(f.data); + } + + /// The metaphorical owner of an incremental file error. The packed variant + /// is given to the HMR runtime as an opaque handle. + pub const Owner = union(enum) { + none, + route: Route.Index, + client: IncrementalGraph(.client).FileIndex, + server: IncrementalGraph(.server).FileIndex, + + pub fn encode(owner: Owner) Packed { + return switch (owner) { + .none => .{ .kind = .none, .data = 0 }, + .client => |data| .{ .kind = .client, .data = data.get() }, + .server => |data| .{ .kind = .server, .data = data.get() }, + .route => |data| .{ .kind = .route, .data = data.get() }, + }; + } + + pub const Packed = packed struct(u32) { + kind: enum(u2) { none, route, client, server }, + data: u30, + + pub fn decode(owner: Packed) Owner { + return switch (owner.kind) { + .none => .none, + .client => .{ .client = IncrementalGraph(.client).FileIndex.init(owner.data) }, + .server => .{ .server = IncrementalGraph(.server).FileIndex.init(owner.data) }, + .route => .{ .route = Route.Index.init(owner.data) }, + }; + } }; - return fail; + }; + + fn getOwner(failure: SerializedFailure) Owner { + return std.mem.bytesAsValue(Owner.Packed, failure.data[0..4]).decode(); } - pub fn fromJSServerLoad(js: JSValue, global: *JSC.JSGlobalObject) Failure { - return .{ .server_load = JSC.Strong.create(js, global) }; + /// This assumes the hash map contains only one SerializedFailure per owner. + /// This is okay since SerializedFailure can contain more than one error. + const ArrayHashContextViaOwner = struct { + pub fn hash(_: ArrayHashContextViaOwner, k: SerializedFailure) u32 { + return std.hash.uint32(@bitCast(k.getOwner().encode())); + } + + pub fn eql(_: ArrayHashContextViaOwner, a: SerializedFailure, b: SerializedFailure, _: usize) bool { + return @as(u32, @bitCast(a.getOwner().encode())) == @as(u32, @bitCast(b.getOwner().encode())); + } + }; + + const ArrayHashAdapter = struct { + pub fn hash(_: ArrayHashAdapter, own: Owner) u32 { + return std.hash.uint32(@bitCast(own.encode())); + } + + pub fn eql(_: ArrayHashAdapter, a: Owner, b: SerializedFailure, _: usize) bool { + return @as(u32, @bitCast(a.encode())) == @as(u32, @bitCast(b.getOwner().encode())); + } + }; + + const ErrorKind = enum(u8) { + // A log message. The `logger.Kind` is encoded here. + bundler_log_err = 0, + bundler_log_warn = 1, + bundler_log_note = 2, + bundler_log_debug = 3, + bundler_log_verbose = 4, + + /// new Error(message) + js_error, + /// new TypeError(message) + js_error_type, + /// new RangeError(message) + js_error_range, + /// Other forms of `Error` objects, including when an error has a + /// `code`, and other fields. + js_error_extra, + /// Non-error with a stack trace + js_primitive_exception, + /// Non-error JS values + js_primitive, + /// new AggregateError(errors, message) + js_aggregate, + }; + + pub fn initFromJs(owner: Owner, value: JSValue) !SerializedFailure { + { + _ = value; + @panic("TODO"); + } + // Avoid small re-allocations without requesting so much from the heap + var sfb = std.heap.stackFallback(65536, bun.default_allocator); + var payload = std.ArrayList(u8).initCapacity(sfb.get(), 65536) catch + unreachable; // enough space + const w = payload.writer(); + + try w.writeInt(u32, @bitCast(owner.encode()), .little); + // try writeJsValue(value); + + // Avoid-recloning if it is was moved to the hap + const data = if (payload.items.ptr == &sfb.buffer) + try bun.default_allocator.dupe(u8, payload.items) + else + payload.items; + + return .{ .data = data }; } - // TODO: deduplicate the two methods here. that isnt trivial because one has to - // style with ansi codes, and the other has to style with HTML. + pub fn initFromLog(owner: Owner, messages: []const bun.logger.Msg) !SerializedFailure { + // Avoid small re-allocations without requesting so much from the heap + var sfb = std.heap.stackFallback(65536, bun.default_allocator); + var payload = std.ArrayList(u8).initCapacity(sfb.get(), 65536) catch + unreachable; // enough space + const w = payload.writer(); - fn printToConsole(fail: *const Failure, route: *const Route) void { - // TODO: remove dependency on `route` - defer Output.flush(); + try w.writeInt(u32, @bitCast(owner.encode()), .little); - Output.prettyErrorln("", .{}); + try w.writeInt(u32, @intCast(messages.len), .little); - switch (fail.*) { - .bundler => |msgs| { - Output.prettyErrorln("Errors while bundling '{s}'", .{ - route.pattern, - }); - Output.flush(); + for (messages) |*msg| { + try writeLogMsg(msg, w); + } - var log: Log = .{ .msgs = msgs, .errors = 1, .level = .err }; - log.printForLogLevelColorsRuntime( - Output.errorWriter(), - Output.enable_ansi_colors_stderr, - ) catch {}; - }, - .zig_error => |err| { - Output.prettyErrorln("Error while bundling '{s}': {s}", .{ - route.pattern, - @errorName(err), - }); - Output.flush(); - }, - .server_load => |strong| { - Output.prettyErrorln("Server route handler for '{s}' threw while loading", .{ - route.pattern, - }); - Output.flush(); + // Avoid-recloning if it is was moved to the hap + const data = if (payload.items.ptr == &sfb.buffer) + try bun.default_allocator.dupe(u8, payload.items) + else + payload.items; - const err = strong.get() orelse unreachable; - route.dev.vm.printErrorLikeObjectToConsole(err); - }, - .request_handler => |err| { - Output.prettyErrorln("Request to handler '{s}' failed SSR", .{ - route.pattern, - }); - Output.flush(); + return .{ .data = data }; + } - route.dev.vm.printErrorLikeObjectToConsole(err); - }, + // All "write" functions get a corresponding "read" function in ./client/error.ts + + const Writer = std.ArrayList(u8).Writer; + + fn writeLogMsg(msg: *const bun.logger.Msg, w: Writer) !void { + try w.writeByte(switch (msg.kind) { + inline else => |k| @intFromEnum(@field(ErrorKind, "bundler_log_" ++ @tagName(k))), + }); + try writeLogData(msg.data, w); + const notes = msg.notes orelse &.{}; + try w.writeInt(u32, @intCast(notes.len), .little); + for (notes) |note| { + try writeLogData(note, w); } } - fn sendAsHttpResponse(fail: *const Failure, resp: *Response, route: *const Route) void { - resp.writeStatus("500 Internal Server Error"); - var buffer: [32768]u8 = undefined; + fn writeLogData(data: bun.logger.Data, w: Writer) !void { + try writeString32(data.text, w); + if (data.location) |loc| { + assert(loc.line >= 0); // one based and not negative + assert(loc.column >= 0); // zero based and not negative - const message = message: { - var fbs = std.io.fixedBufferStream(&buffer); - const writer = fbs.writer(); + try w.writeInt(u32, @intCast(loc.line), .little); + try w.writeInt(u32, @intCast(loc.column), .little); - switch (fail.*) { - .bundler => |msgs| { - writer.print("Errors while bundling '{s}'\n\n", .{ - route.pattern, - }) catch break :message null; - - var log: Log = .{ .msgs = msgs, .errors = 1, .level = .err }; - log.printForLogLevelWithEnableAnsiColors(writer, false) catch - break :message null; - }, - .zig_error => |err| { - writer.print("Error while bundling '{s}': {s}\n", .{ route.pattern, @errorName(err) }) catch break :message null; - }, - .server_load => |strong| { - writer.print("Server route handler for '{s}' threw while loading\n\n", .{ - route.pattern, - }) catch break :message null; - const err = strong.get() orelse unreachable; - route.dev.vm.printErrorLikeObjectSimple(err, writer, false); - }, - .request_handler => |err| { - writer.print("Server route handler for '{s}' threw while loading\n\n", .{ - route.pattern, - }) catch break :message null; - route.dev.vm.printErrorLikeObjectSimple(err, writer, false); - }, - } - - break :message fbs.getWritten(); - } orelse message: { - const suffix = "...truncated"; - @memcpy(buffer[buffer.len - suffix.len ..], suffix); - break :message &buffer; - }; - resp.end(message, true); // TODO: "You should never call res.end(huge buffer)" + // TODO: improve the encoding of bundler errors so that the file it is + // referencing is not repeated per error. + try writeString32(loc.namespace, w); + try writeString32(loc.file, w); + try writeString32(loc.line_text orelse "", w); + } else { + try w.writeInt(u32, 0, .little); + } } + + fn writeString32(data: []const u8, w: Writer) !void { + try w.writeInt(u32, @intCast(data.len), .little); + try w.writeAll(data); + } + + // fn writeJsValue(value: JSValue, global: *JSC.JSGlobalObject, w: *Writer) !void { + // if (value.isAggregateError(global)) { + // // + // } + // if (value.jsType() == .DOMWrapper) { + // if (value.as(JSC.BuildMessage)) |build_error| { + // _ = build_error; // autofix + // // + // } else if (value.as(JSC.ResolveMessage)) |resolve_error| { + // _ = resolve_error; // autofix + // @panic("TODO"); + // } + // } + // _ = w; // autofix + + // @panic("TODO"); + // } }; // For debugging, it is helpful to be able to see bundles. -fn dumpBundle(dump_dir: std.fs.Dir, side: bake.Renderer, rel_path: []const u8, chunk: []const u8, wrap: bool) !void { +fn dumpBundle(dump_dir: std.fs.Dir, side: bake.Graph, rel_path: []const u8, chunk: []const u8, wrap: bool) !void { const name = bun.path.joinAbsString("/", &.{ @tagName(side), rel_path, @@ -2030,23 +2763,34 @@ fn emitVisualizerMessageIfNeeded(dev: *DevServer) !void { try w.writeInt(u32, @intCast(k.len), .little); if (k.len == 0) continue; try w.writeAll(k); - try w.writeByte(@intFromBool(g.stale_files.isSet(i))); + try w.writeByte(@intFromBool(g.stale_files.isSet(i) or switch (side) { + .server => v.failed, + .client => v.flags.failed, + })); try w.writeByte(@intFromBool(side == .server and v.is_rsc)); try w.writeByte(@intFromBool(side == .server and v.is_ssr)); try w.writeByte(@intFromBool(side == .server and v.is_route)); - try w.writeByte(@intFromBool(side == .server and v.is_special_framework_file)); - try w.writeByte(@intFromBool(side == .server and v.is_client_to_server_component_boundary)); + try w.writeByte(@intFromBool(side == .client and v.flags.is_special_framework_file)); + try w.writeByte(@intFromBool(switch (side) { + .server => v.is_client_component_boundary, + .client => v.flags.is_component_root, + })); } } inline for (.{ &dev.client_graph, &dev.server_graph }) |g| { - try w.writeInt(u32, @intCast(g.edges.items.len), .little); - for (g.edges.items) |edge| { + const G = @TypeOf(g.*); + + try w.writeInt(u32, @intCast(g.edges.items.len - g.edges_free_list.items.len), .little); + for (g.edges.items, 0..) |edge, i| { + if (std.mem.indexOfScalar(G.EdgeIndex, g.edges_free_list.items, G.EdgeIndex.init(@intCast(i))) != null) + continue; + try w.writeInt(u32, @intCast(edge.dependency.get()), .little); try w.writeInt(u32, @intCast(edge.imported.get()), .little); } } - _ = dev.app.publish("v", payload.items, .binary, false); + _ = dev.app.publish(DevWebSocket.visualizer_channel, payload.items, .binary, false); } pub fn onWebSocketUpgrade( @@ -2072,31 +2816,56 @@ pub fn onWebSocketUpgrade( ); } +pub const MessageId = enum(u8) { + /// Version packet + version = 'V', + /// When visualization mode is enabled, this packet contains + /// the entire serialized IncrementalGraph state. + visualizer = 'v', + /// Sent on a successful bundle, containing client code. + hot_update = '(', + /// Sent on a successful bundle, containing a list of + /// routes that are updated. + route_update = 'R', + /// Sent when the list of errors changes. + errors = 'E', + /// Sent when all errors are cleared. Semi-redundant + errors_cleared = 'c', + + pub fn char(id: MessageId) u8 { + return @intFromEnum(id); + } +}; + const DevWebSocket = struct { dev: *DevServer, emit_visualizer_events: bool, + pub const global_channel = "*"; + pub const visualizer_channel = "v"; + pub fn onOpen(dw: *DevWebSocket, ws: AnyWebSocket) void { - _ = dw; // autofix + _ = dw; // TODO: append hash of the framework config - _ = ws.send("V" ++ bun.Global.package_json_version_with_revision, .binary, false, true); - _ = ws.subscribe("*"); + _ = ws.send(.{MessageId.version.char()} ++ bun.Global.package_json_version_with_revision, .binary, false, true); + _ = ws.subscribe(global_channel); } pub fn onMessage(dw: *DevWebSocket, ws: AnyWebSocket, msg: []const u8, opcode: uws.Opcode) void { - if (msg.len == 1 and msg[0] == 'v' and !dw.emit_visualizer_events) { + _ = opcode; + + if (msg.len == 1 and msg[0] == MessageId.visualizer.char() and !dw.emit_visualizer_events) { dw.emit_visualizer_events = true; dw.dev.emit_visualizer_events += 1; - _ = ws.subscribe("v"); + _ = ws.subscribe(visualizer_channel); dw.dev.emitVisualizerMessageIfNeeded() catch bun.outOfMemory(); } - _ = opcode; // autofix } pub fn onClose(dw: *DevWebSocket, ws: AnyWebSocket, exit_code: i32, message: []const u8) void { - _ = ws; // autofix - _ = exit_code; // autofix - _ = message; // autofix + _ = ws; + _ = exit_code; + _ = message; if (dw.emit_visualizer_events) { dw.dev.emit_visualizer_events -= 1; @@ -2175,7 +2944,8 @@ pub fn reload(dev: *DevServer, reload_task: *HotReloadTask) bun.OOM!void { const changed_file_attributes = reload_task.files.values(); _ = changed_file_attributes; - // std.time.sleep(50 * std.time.ns_per_ms); + var timer = std.time.Timer.start() catch + @panic("timers unsupported"); var sfb = std.heap.stackFallback(4096, bun.default_allocator); const temp_alloc = sfb.get(); @@ -2199,23 +2969,51 @@ pub fn reload(dev: *DevServer, reload_task: *HotReloadTask) bun.OOM!void { return; } - dev.incremental_result.reset(); + const reload_file_list = bun.Output.Scoped(.reload_file_list, false); - var fail: Failure = undefined; - const bundle = dev.theRealBundlingFunction( - files.items, - null, - .hmr_chunk, - &fail, - ) catch |err| { + if (reload_file_list.isVisible()) { + reload_file_list.log("Hot update hits {d} files", .{files.items.len}); + for (files.items) |f| { + reload_file_list.log("- {s} (.{s})", .{ f.path, @tagName(f.graph) }); + } + } + + dev.incremental_result.reset(); + defer { + // Remove files last to start, to avoid issues where removing a file + // invalidates the last file index. + std.sort.pdq( + IncrementalGraph(.client).FileIndex, + dev.incremental_result.delete_client_files_later.items, + {}, + IncrementalGraph(.client).FileIndex.sortFnDesc, + ); + for (dev.incremental_result.delete_client_files_later.items) |client_index| { + dev.client_graph.disconnectAndDeleteFile(client_index); + } + dev.incremental_result.delete_client_files_later.clearRetainingCapacity(); + } + + dev.bundle(files.items) catch |err| { bun.handleErrorReturnTrace(err, @errorReturnTrace()); - fail.printToConsole(&dev.routes[0]); return; }; + dev.graph_safety_lock.lock(); + defer dev.graph_safety_lock.unlock(); + + if (dev.client_graph.current_chunk_len > 0) { + const client = try dev.client_graph.takeBundle(.hmr_chunk); + defer dev.allocator.free(client); + assert(client[0] == '('); + _ = dev.app.publish(DevWebSocket.global_channel, client, .binary, true); + } + + // This list of routes affected excludes client code. This means changing + // a client component wont count as a route to trigger a reload on. if (dev.incremental_result.routes_affected.items.len > 0) { - var sfb2 = std.heap.stackFallback(4096, bun.default_allocator); - var payload = std.ArrayList(u8).initCapacity(sfb2.get(), 4096) catch + var sfb2 = std.heap.stackFallback(65536, bun.default_allocator); + var payload = std.ArrayList(u8).initCapacity(sfb2.get(), 65536) catch unreachable; // enough space defer payload.deinit(); payload.appendAssumeCapacity('R'); @@ -2229,13 +3027,60 @@ pub fn reload(dev: *DevServer, reload_task: *HotReloadTask) bun.OOM!void { try w.writeAll(pattern); } - _ = dev.app.publish("*", payload.items, .binary, true); + _ = dev.app.publish(DevWebSocket.global_channel, payload.items, .binary, true); } - _ = bundle; // already sent to client + // When client component roots get updated, the `client_components_affected` + // list contains the server side versions of these roots. These roots are + // traced to the routes so that the client-side bundles can be properly + // invalidated. + if (dev.incremental_result.client_components_affected.items.len > 0) { + dev.incremental_result.routes_affected.clearRetainingCapacity(); + dev.server_graph.affected_by_trace.setAll(false); + + var sfa_state = std.heap.stackFallback(65536, dev.allocator); + const sfa = sfa_state.get(); + dev.server_graph.affected_by_trace = try DynamicBitSetUnmanaged.initEmpty(sfa, dev.server_graph.bundled_files.count()); + defer dev.server_graph.affected_by_trace.deinit(sfa); + + for (dev.incremental_result.client_components_affected.items) |index| { + try dev.server_graph.traceDependencies(index, .no_stop); + } + + for (dev.incremental_result.routes_affected.items) |route| { + // Free old bundles + if (dev.routes[route.get()].client_bundle) |old| { + dev.allocator.free(old); + } + dev.routes[route.get()].client_bundle = null; + } + } + + // TODO: improve this visual feedback + if (dev.bundling_failures.count() == 0) { + const clear_terminal = true; + if (clear_terminal) { + Output.flush(); + Output.disableBuffering(); + Output.resetTerminalAll(); + } + + dev.bundles_since_last_error += 1; + if (dev.bundles_since_last_error > 1) { + Output.prettyError("[x{d}] ", .{dev.bundles_since_last_error}); + } + + Output.prettyError("Reloaded in {d}ms: {s}", .{ @divFloor(timer.read(), std.time.ns_per_ms), bun.path.relative(dev.cwd, changed_file_paths[0]) }); + if (changed_file_paths.len > 1) { + Output.prettyError(" + {d} more", .{files.items.len - 1}); + } + Output.prettyError("\n", .{}); + Output.flush(); + } else {} } pub const HotReloadTask = struct { + /// Align to cache lines to reduce contention. const Aligned = struct { aligned: HotReloadTask align(std.atomic.cache_line) }; dev: *DevServer, @@ -2415,23 +3260,6 @@ pub fn onWatchError(_: *DevServer, err: bun.sys.Error) void { } } -/// TODO: deprecated -pub fn bustDirCache(dev: *DevServer, path: []const u8) bool { - debug.log("bustDirCache {s}\n", .{path}); - const server = dev.server_bundler.resolver.bustDirCache(path); - const client = dev.client_bundler.resolver.bustDirCache(path); - const ssr = dev.ssr_bundler.resolver.bustDirCache(path); - return server or client or ssr; -} - -/// TODO: deprecated -pub fn getLoaders(dev: *DevServer) *bun.options.Loader.HashTable { - // The watcher needs to know what loader to use for a file, - // therefore, we must ensure that server and client options - // use the same loader set. - return &dev.server_bundler.options.loaders; -} - const std = @import("std"); const Allocator = std.mem.Allocator; const Mutex = std.Thread.Mutex; @@ -2471,6 +3299,4 @@ const JSModuleLoader = JSC.JSModuleLoader; const EventLoopHandle = JSC.EventLoopHandle; const JSInternalPromise = JSC.JSInternalPromise; -const StringPointer = bun.Schema.Api.StringPointer; - const ThreadlocalArena = @import("../mimalloc_arena.zig").Arena; diff --git a/src/bake/bake.private.d.ts b/src/bake/bake.private.d.ts index 4b41b0ac34..14e4038f43 100644 --- a/src/bake/bake.private.d.ts +++ b/src/bake/bake.private.d.ts @@ -40,7 +40,11 @@ declare const side: "client" | "server"; */ declare var server_exports: { handleRequest: (req: Request, meta: HandleRequestMeta, id: Id) => any; - registerUpdate: (modules: any) => void; + registerUpdate: ( + modules: any, + componentManifestAdd: null | string[], + componentManifestDelete: null | string[], + ) => void; }; interface HandleRequestMeta { diff --git a/src/bake/bake.zig b/src/bake/bake.zig index 0ab09589e4..7d3441ab4b 100644 --- a/src/bake/bake.zig +++ b/src/bake/bake.zig @@ -42,8 +42,8 @@ extern fn BakeInitProcessIdentifier() void; /// /// Full documentation on these fields is located in the TypeScript definitions. pub const Framework = struct { - entry_client: ?[]const u8 = null, - entry_server: ?[]const u8 = null, + entry_client: []const u8, + entry_server: []const u8, server_components: ?ServerComponents = null, react_fast_refresh: ?ReactFastRefresh = null, @@ -59,7 +59,7 @@ pub const Framework = struct { .server_components = .{ .separate_ssr_graph = true, .server_runtime_import = "react-server-dom-webpack/server", - .client_runtime_import = "react-server-dom-webpack/client", + // .client_runtime_import = "react-server-dom-webpack/client", }, .react_fast_refresh = .{}, .entry_client = "bun-framework-rsc/client.tsx", @@ -88,7 +88,7 @@ pub const Framework = struct { const ServerComponents = struct { separate_ssr_graph: bool = false, server_runtime_import: []const u8, - client_runtime_import: []const u8, + // client_runtime_import: []const u8, server_register_client_reference: []const u8 = "registerClientReference", server_register_server_reference: []const u8 = "registerServerReference", client_register_server_reference: []const u8 = "registerServerReference", @@ -106,16 +106,16 @@ pub const Framework = struct { var clone = f; var had_errors: bool = false; - if (clone.entry_client) |*path| f.resolveHelper(client, path, &had_errors); - if (clone.entry_server) |*path| f.resolveHelper(server, path, &had_errors); + f.resolveHelper(client, &clone.entry_client, &had_errors, "client entrypoint"); + f.resolveHelper(server, &clone.entry_server, &had_errors, "server entrypoint"); if (clone.react_fast_refresh) |*react_fast_refresh| { - f.resolveHelper(client, &react_fast_refresh.import_source, &had_errors); + f.resolveHelper(client, &react_fast_refresh.import_source, &had_errors, "react refresh runtime"); } if (clone.server_components) |*sc| { - f.resolveHelper(server, &sc.server_runtime_import, &had_errors); - f.resolveHelper(client, &sc.client_runtime_import, &had_errors); + f.resolveHelper(server, &sc.server_runtime_import, &had_errors, "server components runtime"); + // f.resolveHelper(client, &sc.client_runtime_import, &had_errors); } if (had_errors) return error.ModuleNotFound; @@ -123,7 +123,7 @@ pub const Framework = struct { return clone; } - inline fn resolveHelper(f: *const Framework, r: *bun.resolver.Resolver, path: *[]const u8, had_errors: *bool) void { + inline fn resolveHelper(f: *const Framework, r: *bun.resolver.Resolver, path: *[]const u8, had_errors: *bool, desc: []const u8) void { if (f.built_in_modules.get(path.*)) |mod| { switch (mod) { .import => |p| path.* = p, @@ -133,9 +133,8 @@ pub const Framework = struct { } var result = r.resolve(r.fs.top_level_dir, path.*, .stmt) catch |err| { - bun.Output.err(err, "Failed to resolve '{s}' for framework", .{path.*}); + bun.Output.err(err, "Failed to resolve '{s}' for framework ({s})", .{ path.*, desc }); had_errors.* = true; - return; }; path.* = result.path().?.text; // TODO: what is the lifetime of this string @@ -203,17 +202,17 @@ pub const Framework = struct { bun.todoPanic(@src(), "custom react-fast-refresh import source", .{}); }, .server_components = sc: { - const rfr: JSValue = opts.get(global, "serverComponents") orelse { + const sc: JSValue = opts.get(global, "serverComponents") orelse { if (global.hasException()) return error.JSError; break :sc null; }; - if (rfr == .null or rfr == .undefined) break :sc null; + if (sc == .null or sc == .undefined) break :sc null; break :sc .{ - .client_runtime_import = "", + // .client_runtime_import = "", .separate_ssr_graph = brk: { - const prop: JSValue = opts.get(global, "separateSSRGraph") orelse { + const prop: JSValue = sc.get(global, "separateSSRGraph") orelse { if (!global.hasException()) global.throwInvalidArguments("Missing 'framework.serverComponents.separateSSRGraph'", .{}); return error.JSError; @@ -224,7 +223,7 @@ pub const Framework = struct { return error.JSError; }, .server_runtime_import = brk: { - const prop: JSValue = opts.get(global, "serverRuntimeImportSource") orelse { + const prop: JSValue = sc.get(global, "serverRuntimeImportSource") orelse { if (!global.hasException()) global.throwInvalidArguments("Missing 'framework.serverComponents.serverRuntimeImportSource'", .{}); return error.JSError; @@ -239,7 +238,7 @@ pub const Framework = struct { break :brk str.toUTF8(bun.default_allocator).slice(); }, .server_register_client_reference = brk: { - const prop: JSValue = opts.get(global, "serverRegisterClientReferenceExport") orelse { + const prop: JSValue = sc.get(global, "serverRegisterClientReferenceExport") orelse { if (!global.hasException()) global.throwInvalidArguments("Missing 'framework.serverComponents.serverRegisterClientReferenceExport'", .{}); return error.JSError; @@ -326,14 +325,13 @@ pub fn getHmrRuntime(mode: Side) []const u8 { .server => @embedFile("bake-codegen/bake.server.js"), } else switch (mode) { - inline else => |m| bun.runtimeEmbedFile(.codegen, "bake." ++ @tagName(m) ++ ".js"), + inline else => |m| bun.runtimeEmbedFile(.codegen_eager, "bake." ++ @tagName(m) ++ ".js"), }; } pub const Mode = enum { production, development }; pub const Side = enum { client, server }; -/// TODO: Rename this to Graph -pub const Renderer = enum(u2) { +pub const Graph = enum(u2) { client, server, /// Only used when Framework has .server_components.separate_ssr_graph set diff --git a/src/bake/client/error-serialization.ts b/src/bake/client/error-serialization.ts new file mode 100644 index 0000000000..551c0e1eb4 --- /dev/null +++ b/src/bake/client/error-serialization.ts @@ -0,0 +1,89 @@ +// This implements error deserialization from the WebSocket protocol +import { DataViewReader } from "./reader"; + +export const enum BundlerMessageKind { + err = 0, + warn = 1, + note = 2, + debug = 3, + verbose = 4, +} + +export interface BundlerMessage { + kind: BundlerMessageKind; + message: string; + location: BundlerMessageLocation | null; + notes: BundlerNote[]; +} + +export interface BundlerMessageLocation { + /** One-based */ + line: number; + /** Zero-based byte offset */ + column: number; + + namespace: string; + file: string; + lineText: string; +} + +export interface BundlerNote { + message: string; + location: BundlerMessageLocation | null; +} + +export function decodeSerializedErrorPayload(arrayBuffer: DataView, start: number) { + const r = new DataViewReader(arrayBuffer, start); + const owner = r.u32(); + const messageCount = r.u32(); + const messages = new Array(messageCount); + for (let i = 0; i < messageCount; i++) { + const kind = r.u8(); + // TODO: JS errors + messages[i] = readLogMsg(r, kind); + } + console.log({owner, messageCount, messages}); + return messages; +} + +/** First byte is already read in. */ +function readLogMsg(r: DataViewReader, kind: BundlerMessageKind) { + const message = r.string32(); + const location = readBundlerMessageLocationOrNull(r); + const noteCount = r.u32(); + const notes = new Array(noteCount); + for (let i = 0; i < noteCount; i++) { + notes[i] = readLogData(r); + } + return { + kind, + message, + location, + notes, + }; +} + +function readLogData(r: DataViewReader): BundlerNote | null { + return { + message: r.string32(), + location: readBundlerMessageLocationOrNull(r), + }; +} + +function readBundlerMessageLocationOrNull(r: DataViewReader): BundlerMessageLocation | null { + const line = r.u32(); + if (line == 0) return null; + + const column = r.u32(); + const namespace = r.string32(); + const file = r.string32(); + const lineText = r.string32(); + + return { + line, + column, + namespace, + file, + lineText, + }; +} diff --git a/src/bake/client/overlay.ts b/src/bake/client/overlay.ts index eba537c56a..480183d4e5 100644 --- a/src/bake/client/overlay.ts +++ b/src/bake/client/overlay.ts @@ -1,33 +1,38 @@ import { css } from "../macros" with { type: "macro" }; +if (side !== 'client') throw new Error('Not client side!'); + // Create a root element to contain all our our DOM nodes. var root!: HTMLElement; -var mount; +const wrap = document.createElement("bun-hmr"); +wrap.setAttribute( + "style", + "position:absolute;display:block;top:0;left:0;width:100%;height:100%;background:transparent", +); +const shadow = wrap.attachShadow({ mode: "open" }); -if (side === "client") { - mount = function mount() { - const wrap = document.createElement("bun-hmr"); - wrap.setAttribute( - "style", - "position:absolute;display:block;top:0;left:0;width:100%;height:100%;background:transparent", - ); - const shadow = wrap.attachShadow({ mode: "open" }); +const sheet = new CSSStyleSheet(); +sheet.replace(css("client/overlay.css", IS_BUN_DEVELOPMENT)); +shadow.adoptedStyleSheets = [sheet]; - const sheet = new CSSStyleSheet(); - sheet.replace(css("client/overlay.css", IS_BUN_DEVELOPMENT)); - shadow.adoptedStyleSheets = [sheet]; - - root = document.createElement("main"); - shadow.appendChild(root); - document.body.appendChild(wrap); - }; -} +root = document.createElement("main"); +root.style.display = "none"; +wrap.style.display = "none"; +shadow.appendChild(root); +document.body.appendChild(wrap); export function showErrorOverlay(e) { - mount(); console.error(e); - root.innerHTML = `

Client-side Runtime Error

${e?.message ? `${e?.name ?? e?.constructor?.name ?? "Error"}: ${e.message}\n` : JSON.stringify(e)}${e?.message ? e?.stack : ""}
`; + root.style.display = ""; + wrap.style.display = ""; + root.innerHTML = `

Error

${e?.message ? `${e?.name ?? e?.constructor?.name ?? "Error"}: ${e.message}\n` : JSON.stringify(e)}${e?.message ? e?.stack : ""}
`; root.querySelector(".dismiss")!.addEventListener("click", () => { - root.innerHTML = ""; + clearErrorOverlay(); }); } + +export function clearErrorOverlay() { + root.innerHTML = ""; + root.style.display = "none"; + wrap.style.display = "none"; +} \ No newline at end of file diff --git a/src/bake/client/reader.ts b/src/bake/client/reader.ts index fa3f07eca2..a6b8950797 100644 --- a/src/bake/client/reader.ts +++ b/src/bake/client/reader.ts @@ -27,9 +27,13 @@ export class DataViewReader { return value; } - string(byteLength: number) { + stringWithLength(byteLength: number) { const str = td.decode(this.view.buffer.slice(this.cursor, this.cursor + byteLength)); this.cursor += byteLength; return str; } + + string32() { + return this.stringWithLength(this.u32()); + } } diff --git a/src/bake/error.template.html b/src/bake/error.template.html deleted file mode 100644 index 08d63bfe2b..0000000000 --- a/src/bake/error.template.html +++ /dev/null @@ -1,15 +0,0 @@ - - - - - - {[page_title]s} - - - - - - - diff --git a/src/bake/hmr-module.ts b/src/bake/hmr-module.ts index 2ad6731cf5..4ddd70f9be 100644 --- a/src/bake/hmr-module.ts +++ b/src/bake/hmr-module.ts @@ -19,9 +19,9 @@ export const enum LoadModuleType { /** * This object is passed as the CommonJS "module", but has a bunch of - * non-standard properties that are used for implementing hot-module - * reloading. It is unacceptable to depend on these properties, and - * it will not be considered a breaking change. + * non-standard properties that are used for implementing hot-module reloading. + * It is unacceptable for users to depend on these properties, and it will not + * be considered a breaking change when these internals are altered. */ export class HotModule { id: Id; @@ -115,6 +115,8 @@ export function loadModule(key: Id, type: LoadModuleType): HotModule return module; } +export const getModule = registry.get.bind(registry); + export function replaceModule(key: Id, load: ModuleLoadFunction) { const module = registry.get(key); if (module) { @@ -151,6 +153,16 @@ export function replaceModules(modules: any) { registry.set("bun:wrap", runtime); } +export const serverManifest = {}; +export const clientManifest = {}; + +if (side === "server") { + const server_module = new HotModule("bun:bake/server"); + server_module.__esModule = true; + server_module.exports = { serverManifest, clientManifest }; + registry.set(server_module.id, server_module); +} + if (side === "client") { const { refresh } = config; if (refresh) { diff --git a/src/bake/hmr-protocol.md b/src/bake/hmr-protocol.md index fa45034651..c0f69d9138 100644 --- a/src/bake/hmr-protocol.md +++ b/src/bake/hmr-protocol.md @@ -33,15 +33,24 @@ V1.1.30-canary.37+117e1b388 Hot-module-reloading patch. The entire payload is UTF-8 Encoded JavaScript Payload. -### `R` +### `R` - Route reload request Server-side code has reloaded. Client should either refetch the route or perform a hard reload. -- `u32` Number of updated routes +- `u32`: Number of updated routes - For each route: - - `u32` Route ID - - `u16` Length of route name. - - `[n]u8` Route name in UTF-8 encoded text. + - `u32`: Route ID + - `u16`: Length of route name. + - `[n]u8`: Route name in UTF-8 encoded text. + +### `e` - Error status update + +- `u32`: Number of errors removed +- For each removed error: + - `u32` Error owner +- Remainder of payload is repeating each error object: + - `u32` Error owner + - Error Payload ### `v` diff --git a/src/bake/hmr-runtime-client.ts b/src/bake/hmr-runtime-client.ts index f9c0d3f511..d5de9e47b1 100644 --- a/src/bake/hmr-runtime-client.ts +++ b/src/bake/hmr-runtime-client.ts @@ -1,7 +1,7 @@ // This file is the entrypoint to the hot-module-reloading runtime // In the browser, this uses a WebSocket to communicate with the bundler. import { loadModule, LoadModuleType, replaceModules } from "./hmr-module"; -import { showErrorOverlay } from "./client/overlay"; +import { clearErrorOverlay, showErrorOverlay } from "./client/overlay"; import { Bake } from "bun"; import { int } from "./macros" with { type: "macro" }; import { td } from "./text-decoder"; @@ -80,7 +80,7 @@ try { while (routeCount > 0) { routeCount -= 1; const routeId = reader.u32(); - const routePattern = reader.string(reader.u16()); + const routePattern = reader.stringWithLength(reader.u16()); if (routeMatch(routeId, routePattern)) { performRouteReload(); break; @@ -89,6 +89,15 @@ try { break; } + case int("E"): { + showErrorOverlay('ooga boga there are errors!'); + break; + } + case int("c"): { + clearErrorOverlay() + // No action needed + break; + } default: { if (IS_BUN_DEVELOPMENT) { return showErrorOverlay( diff --git a/src/bake/hmr-runtime-error.ts b/src/bake/hmr-runtime-error.ts new file mode 100644 index 0000000000..59f30a3ae8 --- /dev/null +++ b/src/bake/hmr-runtime-error.ts @@ -0,0 +1,60 @@ +// When a bundling error happens, we cannot load any of the users code, since +// that code expects the SSR step to succeed. This version of client just opens +// a websocket and listens only for error resolution events, and reloads the +// page. +// +// This is embedded in `DevServer.sendSerializedFailures`. SSR is +// left unused for simplicity; a flash of unstyled content is +import { decodeSerializedErrorPayload } from "./client/error-serialization"; +import { int } from "./macros" with { type :"macro"}; + +/** Injected by DevServer */ +declare const error: Uint8Array; + +// stopped by the fact this script runs synchronously. +{ + const decoded = decodeSerializedErrorPayload(new DataView(error.buffer), 0); + console.log(decoded); + + document.write(`
${JSON.stringify(decoded, null, 2)}
`); +} + +// TODO: write a shared helper for websocket that performs reconnection +// and handling of the version packet + +function initHmrWebSocket() { + const ws = new WebSocket("/_bun/hmr"); + ws.binaryType = "arraybuffer"; + ws.onopen = ev => { + console.log("HMR socket open!"); + }; + ws.onmessage = (ev: MessageEvent) => { + const { data } = ev; + if (typeof data === "string") return data; + const view = new DataView(data); + switch (view.getUint8(0)) { + case int("R"): { + location.reload(); + break; + } + case int("e"): { + const decoded = decodeSerializedErrorPayload(view, 1); + document.querySelector('#err')!.innerHTML = JSON.stringify(decoded, null, 2); + break; + } + case int("c"): { + location.reload(); + break; + } + } + }; + ws.onclose = ev => { + // TODO: visual feedback in overlay.ts + // TODO: reconnection + }; + ws.onerror = ev => { + console.error(ev); + }; +} + +initHmrWebSocket(); diff --git a/src/bake/hmr-runtime-server.ts b/src/bake/hmr-runtime-server.ts index 226db5481d..512c74581b 100644 --- a/src/bake/hmr-runtime-server.ts +++ b/src/bake/hmr-runtime-server.ts @@ -2,7 +2,7 @@ // On the server, communication is facilitated using the default // export, which is assigned via `server_exports`. import type { Bake } from "bun"; -import { loadModule, LoadModuleType, replaceModules } from "./hmr-module"; +import { loadModule, LoadModuleType, replaceModules, clientManifest, serverManifest, getModule } from "./hmr-module"; if (typeof IS_BUN_DEVELOPMENT !== "boolean") { throw new Error("DCE is configured incorrectly"); @@ -32,5 +32,37 @@ server_exports = { // TODO: support streaming return await response.text(); }, - registerUpdate: replaceModules, + registerUpdate(modules, componentManifestAdd, componentManifestDelete) { + replaceModules(modules); + + if (componentManifestAdd) { + for (const uid of componentManifestAdd) { + try { + const mod = loadModule(uid, LoadModuleType.AssertPresent); + const { exports, __esModule } = mod; + const exp = __esModule ? exports : (mod._ext_exports ??= { ...exports, default: exports }); + + for (const exportName of Object.keys(exp)) { + serverManifest[uid] = { + id: uid, + name: exportName, + chunks: [], + }; + } + } catch (err) { + console.log(err); + } + } + } + + if (componentManifestDelete) { + for (const fileName of componentManifestDelete) { + const client = clientManifest[fileName]; + for (const exportName in client) { + delete serverManifest[`${fileName}#${exportName}`]; + } + delete clientManifest[fileName]; + } + } + }, }; diff --git a/src/bake/incremental_visualizer.html b/src/bake/incremental_visualizer.html index c3e05855f1..3e72944da4 100644 --- a/src/bake/incremental_visualizer.html +++ b/src/bake/incremental_visualizer.html @@ -1,326 +1,345 @@ - + + + + + IncrementalGraph Visualization + + + - #stat { - font-weight: normal; - } - - + +

IncrementalGraph Visualization

+
- -

IncrementalGraph Visualization

-
+ - + // Helper function to remove a node by ID + function removeNode(id) { + nodes.remove({ id }); + } - \ No newline at end of file + // Helper function to add or update edges in the graph + const edgeProps = { arrows: "to" }; + function updateEdge(id, from, to, variant) { + const prop = + variant === "normal" + ? { id, from, to, arrows: "to" } + : variant === "client" + ? { id, from, to, arrows: "to,from", color: "#ffffff99", width: 2, label: "[use client]" } + : { id, from, to }; + if (edges.get(id)) { + edges.update(prop); + } else { + edges.add(prop); + } + } + + // Helper to remove all edges of a node + function removeEdges(nodeId) { + const edgesToRemove = edges.get({ + filter: edge => edge.from === nodeId || edge.to === nodeId, + }); + edges.remove(edgesToRemove.map(e => e.id)); + } + + // Function to update the entire graph when new data is received + function updateGraph() { + const newEdgeIds = new Set(); // Track new edges + const newNodeIds = new Set(); // Track new nodes + + const boundaries = new Map(); + + // Update server files + serverFiles.forEach((file, index) => { + const id = `S_${file.name}`; + if (file.deleted) { + removeNode(id); + removeEdges(id); + } else { + updateNode(id, file, "server"); + } + + if (file.isBoundary) { + boundaries.set(file.name, { server: index, client: -1 }); + } + newNodeIds.add(id); // Track this node + }); + + // Update client files + clientFiles.forEach((file, index) => { + const id = `C_${file.name}`; + if (file.deleted) { + removeNode(id); + removeEdges(id); + return; + } + updateNode(id, file, "client"); + const b = boundaries.get(file.name); + if (b) { + b.client = index; + } + newNodeIds.add(id); // Track this node + }); + + // Update client edges + clientEdges.forEach((edge, index) => { + const id = `C_edge_${index}`; + updateEdge(id, `C_${clientFiles[edge.from].name}`, `C_${clientFiles[edge.to].name}`, "normal"); + newEdgeIds.add(id); // Track this edge + }); + + // Update server edges + serverEdges.forEach((edge, index) => { + const id = `S_edge_${index}`; + updateEdge(id, `S_${serverFiles[edge.from].name}`, `S_${serverFiles[edge.to].name}`, "normal"); + newEdgeIds.add(id); // Track this edge + }); + + boundaries.forEach(({ server, client }) => { + if (client === -1) return; + const id = `S_edge_bound_${server}_${client}`; + updateEdge(id, `S_${serverFiles[server].name}`, `C_${clientFiles[client].name}`, "client"); + newEdgeIds.add(id); // Track this edge + }); + + // Remove edges that are no longer present + currentEdgeIds.forEach(id => { + if (!newEdgeIds.has(id)) { + edges.remove(id); + } + }); + + // Remove nodes that are no longer present + currentNodeIds.forEach(id => { + if (!newNodeIds.has(id)) { + nodes.remove(id); + } + }); + + // Update the currentEdgeIds set to the new one + currentEdgeIds = newEdgeIds; + currentNodeIds = newNodeIds; + + if (isFirst) { + network.stabilize(); + isFirst = false; + } + + document.getElementById("stat").innerText = + `(server: ${serverFiles.length} files, ${serverEdges.length} edges; client: ${clientFiles.length} files, ${clientEdges.length} edges; ${boundaries.size} boundaries)`; + } + + + diff --git a/src/bun.zig b/src/bun.zig index 65f76ce333..864de710df 100644 --- a/src/bun.zig +++ b/src/bun.zig @@ -3315,15 +3315,22 @@ pub inline fn resolveSourcePath( }; } +const RuntimeEmbedRoot = enum { + codegen, + src, + src_eager, + codegen_eager, +}; + pub fn runtimeEmbedFile( - comptime root: enum { codegen, src, src_eager }, + comptime root: RuntimeEmbedRoot, comptime sub_path: []const u8, ) []const u8 { comptime assert(Environment.isDebug); comptime assert(!Environment.codegen_embed); const abs_path = switch (root) { - .codegen => resolveSourcePath(.codegen, sub_path), + .codegen, .codegen_eager => resolveSourcePath(.codegen, sub_path), .src, .src_eager => resolveSourcePath(.src, sub_path), }; @@ -3344,7 +3351,7 @@ pub fn runtimeEmbedFile( } }; - if (root == .src_eager and static.once.done) { + if ((root == .src_eager or root == .codegen_eager) and static.once.done) { static.once.done = false; default_allocator.free(static.storage); } @@ -3851,19 +3858,26 @@ pub fn WeakPtr(comptime T: type, comptime weakable_field: std.meta.FieldEnum(T)) pub const DebugThreadLock = if (Environment.allow_assert) struct { owning_thread: ?std.Thread.Id = null, + locked_at: crash_handler.StoredTrace = crash_handler.StoredTrace.empty, pub fn lock(impl: *@This()) void { - bun.assert(impl.owning_thread == null); + if (impl.owning_thread) |thread| { + Output.err("assertion failure", "Locked by thread {d} here:", .{thread}); + crash_handler.dumpStackTrace(impl.locked_at.trace()); + @panic("Safety lock violated"); + } impl.owning_thread = std.Thread.getCurrentId(); + impl.locked_at = crash_handler.StoredTrace.capture(@returnAddress()); } pub fn unlock(impl: *@This()) void { impl.assertLocked(); - impl.owning_thread = null; + impl.* = .{}; } pub fn assertLocked(impl: *const @This()) void { - assert(std.Thread.getCurrentId() == impl.owning_thread); + assert(impl.owning_thread != null); // not locked + assert(impl.owning_thread == std.Thread.getCurrentId()); } } else @@ -3894,30 +3908,38 @@ pub fn GenericIndex(backing_int: type, uid: anytype) type { } /// Prefer this over @enumFromInt to assert the int is in range - pub fn init(int: backing_int) callconv(callconv_inline) Index { + pub inline fn init(int: backing_int) Index { bun.assert(int != null_value); // would be confused for null return @enumFromInt(int); } /// Prefer this over @intFromEnum because of type confusion with `.Optional` - pub fn get(i: @This()) callconv(callconv_inline) backing_int { + pub inline fn get(i: @This()) backing_int { bun.assert(@intFromEnum(i) != null_value); // memory corruption return @intFromEnum(i); } - pub fn toOptional(oi: @This()) callconv(callconv_inline) Optional { + pub inline fn toOptional(oi: @This()) Optional { return @enumFromInt(oi.get()); } + pub fn sortFnAsc(_: void, a: @This(), b: @This()) bool { + return a.get() < b.get(); + } + + pub fn sortFnDesc(_: void, a: @This(), b: @This()) bool { + return a.get() < b.get(); + } + pub const Optional = enum(backing_int) { none = std.math.maxInt(backing_int), _, - pub fn init(maybe: ?Index) callconv(callconv_inline) ?Index { + pub inline fn init(maybe: ?Index) ?Index { return if (maybe) |i| i.toOptional() else .none; } - pub fn unwrap(oi: Optional) callconv(callconv_inline) ?Index { + pub inline fn unwrap(oi: Optional) ?Index { return if (oi == .none) null else @enumFromInt(@intFromEnum(oi)); } }; @@ -3939,7 +3961,7 @@ pub fn splitAtMut(comptime T: type, slice: []T, mid: usize) struct { []T, []T } /// Given `&slice[index] == item`, returns the `index` needed. /// The item must be in the slice. pub fn indexOfPointerInSlice(comptime T: type, slice: []const T, item: *const T) usize { - bun.assert(isSliceInBufferT(T, slice, item[0..1])); + bun.assert(isSliceInBufferT(T, item[0..1], slice)); const offset = @intFromPtr(slice.ptr) - @intFromPtr(item); const index = @divExact(offset, @sizeOf(T)); return index; diff --git a/src/bundler/bundle_v2.zig b/src/bundler/bundle_v2.zig index de25df4ec2..df6dcb11e1 100644 --- a/src/bundler/bundle_v2.zig +++ b/src/bundler/bundle_v2.zig @@ -321,10 +321,10 @@ const Watcher = bun.JSC.NewHotReloader(BundleV2, EventLoop, true); /// Bake needs to specify more information per entry point. pub const BakeEntryPoint = struct { path: []const u8, - graph: bake.Renderer, + graph: bake.Graph, route_index: bake.DevServer.Route.Index.Optional = .none, - pub fn init(path: []const u8, graph: bake.Renderer) BakeEntryPoint { + pub fn init(path: []const u8, graph: bake.Graph) BakeEntryPoint { return .{ .path = path, .graph = graph }; } @@ -589,7 +589,7 @@ pub const BundleV2 = struct { dev.directory_watchers.trackResolutionFailure( import_record.source_file, import_record.specifier, - target.bakeRenderer(), + target.bakeGraph(), ) catch bun.outOfMemory(); } } @@ -722,6 +722,20 @@ pub const BundleV2 = struct { ) catch bun.outOfMemory(); entry.value_ptr.* = idx; out_source_index = Index.init(idx); + + // For non-javascript files, make all of these files share indices. + // For example, it is silly to bundle index.css depended on by client+server twice. + // It makes sense to separate these for JS because the target affects DCE + if (this.bundler.options.server_components and !loader.isJavaScriptLike()) { + const a, const b = switch (target) { + else => .{ &this.graph.client_path_to_source_index_map, &this.graph.ssr_path_to_source_index_map }, + .browser => .{ &this.graph.path_to_source_index_map, &this.graph.ssr_path_to_source_index_map }, + .kit_server_components_ssr => .{ &this.graph.path_to_source_index_map, &this.graph.client_path_to_source_index_map }, + }; + a.put(this.graph.allocator, entry.key_ptr.*, entry.value_ptr.*) catch bun.outOfMemory(); + if (this.framework.?.server_components.?.separate_ssr_graph) + b.put(this.graph.allocator, entry.key_ptr.*, entry.value_ptr.*) catch bun.outOfMemory(); + } } else { out_source_index = Index.init(entry.value_ptr.*); } @@ -920,9 +934,7 @@ pub const BundleV2 = struct { var runtime_parse_task = try this.graph.allocator.create(ParseTask); runtime_parse_task.* = rt.parse_task; runtime_parse_task.ctx = this; - runtime_parse_task.task = .{ - .callback = &ParseTask.callback, - }; + runtime_parse_task.task = .{ .callback = &ParseTask.callback }; runtime_parse_task.tree_shaking = true; runtime_parse_task.loader = .js; _ = @atomicRmw(usize, &this.graph.parse_pending, .Add, 1, .monotonic); @@ -931,7 +943,7 @@ pub const BundleV2 = struct { // Bake reserves two source indexes at the start of the file list, but // gets its content set after the scan+parse phase, but before linking. - try this.reserveSourceIndexesForKit(); + try this.reserveSourceIndexesForBake(); { // Setup entry points @@ -988,6 +1000,8 @@ pub const BundleV2 = struct { /// This generates the two asts for 'bun:bake/client' and 'bun:bake/server'. Both are generated /// at the same time in one pass over the SBC list. + /// + /// pub fn processServerComponentManifestFiles(this: *BundleV2) OOM!void { // If a server components is not configured, do nothing const fw = this.framework orelse return; @@ -1283,7 +1297,7 @@ pub const BundleV2 = struct { unique_key, ); - return try this.linker.generateChunksInParallel(chunks); + return try this.linker.generateChunksInParallel(chunks, false); } pub fn processFilesToCopy( @@ -1759,8 +1773,8 @@ pub const BundleV2 = struct { // unknown at this point: .contents_or_fd = .{ .fd = .{ - .dir = .zero, - .file = .zero, + .dir = bun.invalid_fd, + .file = bun.invalid_fd, }, }, .side_effects = _resolver.SideEffects.has_side_effects, @@ -1853,7 +1867,6 @@ pub const BundleV2 = struct { pub fn runFromJSInNewThread( this: *BundleV2, entry_points: []const []const u8, - bake_entry_points: []const BakeEntryPoint, ) !std.ArrayList(options.OutputFile) { this.unique_key = std.crypto.random.int(u64); @@ -1861,20 +1874,14 @@ pub const BundleV2 = struct { return error.BuildFailed; } - if (comptime FeatureFlags.help_catch_memory_issues) { - this.graph.heap.gc(true); - bun.Mimalloc.mi_collect(true); - } + this.graph.heap.helpCatchMemoryIssues(); - this.graph.pool.pool.schedule(try this.enqueueEntryPoints(entry_points, bake_entry_points)); + this.graph.pool.pool.schedule(try this.enqueueEntryPoints(entry_points, &.{})); // We must wait for all the parse tasks to complete, even if there are errors. this.waitForParse(); - if (comptime FeatureFlags.help_catch_memory_issues) { - this.graph.heap.gc(true); - bun.Mimalloc.mi_collect(true); - } + this.graph.heap.helpCatchMemoryIssues(); if (this.bundler.log.errors > 0) { return error.BuildFailed; @@ -1882,17 +1889,11 @@ pub const BundleV2 = struct { try this.processServerComponentManifestFiles(); - if (comptime FeatureFlags.help_catch_memory_issues) { - this.graph.heap.gc(true); - bun.Mimalloc.mi_collect(true); - } + this.graph.heap.helpCatchMemoryIssues(); try this.cloneAST(); - if (comptime FeatureFlags.help_catch_memory_issues) { - this.graph.heap.gc(true); - bun.Mimalloc.mi_collect(true); - } + this.graph.heap.helpCatchMemoryIssues(); const reachable_files = try this.findReachableFiles(); @@ -1910,7 +1911,131 @@ pub const BundleV2 = struct { return error.BuildFailed; } - return try this.linker.generateChunksInParallel(chunks); + return try this.linker.generateChunksInParallel(chunks, false); + } + + /// Dev Server uses this instead to run a subset of the bundler, where + /// it indexes the chunks into IncrementalGraph on it's own. + pub fn runFromBakeDevServer(this: *BundleV2, bake_entry_points: []const BakeEntryPoint) ![2]Chunk { + this.unique_key = std.crypto.random.int(u64); + + this.graph.heap.helpCatchMemoryIssues(); + + this.graph.pool.pool.schedule(try this.enqueueEntryPoints(&.{}, bake_entry_points)); + this.waitForParse(); + + this.graph.heap.helpCatchMemoryIssues(); + + try this.cloneAST(); + + this.graph.heap.helpCatchMemoryIssues(); + + this.dynamic_import_entry_points = std.AutoArrayHashMap(Index.Int, void).init(this.graph.allocator); + + // Separate non-failing files into two lists: JS and CSS + const js_reachable_files, const css_asts = reachable_files: { + var css_asts = try BabyList(bun.css.BundlerStyleSheet).initCapacity(this.graph.allocator, this.graph.css_file_count); + var js_files = try std.ArrayListUnmanaged(Index).initCapacity(this.graph.allocator, this.graph.ast.len - this.graph.css_file_count - 1); + + for (this.graph.ast.items(.parts)[1..], this.graph.ast.items(.css)[1..], 1..) |part_list, maybe_css, index| { + // Dev Server proceeds even with failed files. + // These files are filtered out via the lack of any parts. + // + // Actual empty files will contain a part exporting an empty object. + if (part_list.len != 0) { + if (maybe_css) |css| { + css_asts.appendAssumeCapacity(css.*); + } else { + js_files.appendAssumeCapacity(Index.init(index)); + // Mark every part live. + for (part_list.slice()) |*p| { + p.is_live = true; + } + } + } + } + + break :reachable_files .{ js_files.items, css_asts }; + }; + + this.graph.heap.helpCatchMemoryIssues(); + + // HMR skips most of the linker! All linking errors are converted into + // runtime errors to avoid a more complicated dependency graph. For + // example, if you remove an exported symbol, we only rebuild the + // changed file, then detect the missing export at runtime. + // + // Additionally, notice that we run this code generation even if we have + // files that failed. This allows having a large build graph (importing + // a new npm dependency), where one file that fails doesnt prevent the + // passing files to get cached in the incremental graph. + + // The linker still has to be initialized as code generation expects it + // TODO: ??? + try this.linker.load( + this, + this.graph.entry_points.items, + this.graph.server_component_boundaries, + js_reachable_files, + ); + + this.graph.heap.helpCatchMemoryIssues(); + + // Generate chunks + const js_part_ranges = try this.graph.allocator.alloc(PartRange, js_reachable_files.len); + const parts = this.graph.ast.items(.parts); + for (js_reachable_files, js_part_ranges) |source_index, *part_range| { + part_range.* = .{ + .source_index = source_index, + .part_index_begin = 0, + .part_index_end = parts[source_index.get()].len, + }; + } + + _ = css_asts; // TODO: + + var chunks = [_]Chunk{ + // One JS chunk + .{ + .entry_point = .{ + .entry_point_id = 0, + .source_index = 0, + .is_entry_point = true, + }, + .content = .{ + .javascript = .{ + // TODO(@paperdave): remove this ptrCast when Source Index is fixed + .files_in_chunk_order = @ptrCast(js_reachable_files), + .parts_in_chunk_in_order = js_part_ranges, + }, + }, + .output_source_map = sourcemap.SourceMapPieces.init(this.graph.allocator), + }, + // One CSS chunk + .{ + .entry_point = .{ + .entry_point_id = 0, + .source_index = 0, + .is_entry_point = true, + }, + .content = .{ + .css = .{ + // TODO: + .imports_in_chunk_in_order = BabyList(Chunk.CssImportOrder).init(&.{}), + .asts = &.{}, + }, + }, + .output_source_map = sourcemap.SourceMapPieces.init(this.graph.allocator), + }, + }; + + this.graph.heap.helpCatchMemoryIssues(); + + try this.linker.generateChunksInParallel(&chunks, true); + + this.graph.heap.helpCatchMemoryIssues(); + + return chunks; } pub fn enqueueOnResolvePluginIfNeeded( @@ -1959,7 +2084,7 @@ pub const BundleV2 = struct { parse.path.namespace, parse.path.text, }); - var load = bun.default_allocator.create(JSC.API.JSBundler.Load) catch unreachable; + const load = bun.default_allocator.create(JSC.API.JSBundler.Load) catch unreachable; load.* = JSC.API.JSBundler.Load.create( this.completion.?, parse.source_index, @@ -1997,7 +2122,7 @@ pub const BundleV2 = struct { return path_clone.dupeAllocFixPretty(this.graph.allocator); } - fn reserveSourceIndexesForKit(this: *BundleV2) !void { + fn reserveSourceIndexesForBake(this: *BundleV2) !void { const fw = this.framework orelse return; _ = fw.server_components orelse return; @@ -2076,13 +2201,18 @@ pub const BundleV2 = struct { inline else => |is_server| { const src = if (is_server) bake.server_virtual_source else bake.client_virtual_source; if (strings.eqlComptime(import_record.path.text, src.path.pretty)) { - if (is_server) { - this.graph.kit_referenced_server_data = true; + if (this.bundler.options.dev_server != null) { + import_record.is_external_without_side_effects = true; + import_record.source_index = Index.invalid; } else { - this.graph.kit_referenced_client_data = true; + if (is_server) { + this.graph.kit_referenced_server_data = true; + } else { + this.graph.kit_referenced_client_data = true; + } + import_record.path.namespace = "bun"; + import_record.source_index = src.index; } - import_record.path.namespace = "bun"; - import_record.source_index = src.index; continue; } }, @@ -2143,7 +2273,7 @@ pub const BundleV2 = struct { continue; } - const bundler, const renderer: bake.Renderer, const target = + const bundler, const renderer: bake.Graph, const target = if (import_record.tag == .bake_resolve_to_ssr_graph) brk: { // TODO: consider moving this error into js_parser so it is caught more reliably @@ -2179,7 +2309,7 @@ pub const BundleV2 = struct { }; } else .{ this.bundlerForTarget(ast.target), - ast.target.bakeRenderer(), + ast.target.bakeGraph(), ast.target, }; @@ -2207,7 +2337,7 @@ pub const BundleV2 = struct { dev.directory_watchers.trackResolutionFailure( source.path.text, import_record.path.text, - ast.target.bakeRenderer(), // use the source file target not the altered one + ast.target.bakeGraph(), // use the source file target not the altered one ) catch bun.outOfMemory(); } } @@ -2287,13 +2417,14 @@ pub const BundleV2 = struct { } if (this.bundler.options.dev_server) |dev_server| { + import_record.source_index = Index.invalid; + import_record.is_external_without_side_effects = true; + if (!dev_server.isFileStale(path.text, renderer)) { - import_record.source_index = Index.invalid; const rel = bun.path.relativePlatform(this.bundler.fs.top_level_dir, path.text, .loose, false); import_record.path.text = rel; import_record.path.pretty = rel; import_record.path = this.pathWithPrettyInitialized(path.*, target) catch bun.outOfMemory(); - import_record.is_external_without_side_effects = true; continue; } } @@ -2301,7 +2432,11 @@ pub const BundleV2 = struct { const hash_key = path.hashKey(); if (this.pathToSourceIndexMap(target).get(hash_key)) |id| { - import_record.source_index = Index.init(id); + if (this.bundler.options.dev_server != null) { + import_record.path = this.graph.input_files.items(.source)[id].path; + } else { + import_record.source_index = Index.init(id); + } continue; } @@ -2348,10 +2483,12 @@ pub const BundleV2 = struct { debug("failed with error: {s}", .{@errorName(err)}); resolve_queue.clearAndFree(); parse_result.value = .{ - .err = ParseTask.Result.Error{ + .err = .{ .err = err, .step = .resolve, .log = Logger.Log.init(bun.default_allocator), + .source_index = source.index, + .target = ast.target, }, }; } @@ -2366,7 +2503,7 @@ pub const BundleV2 = struct { defer trace.end(); defer bun.default_allocator.destroy(parse_result); - var graph = &this.graph; + const graph = &this.graph; var diff: isize = -1; @@ -2380,6 +2517,7 @@ pub const BundleV2 = struct { var resolve_queue = ResolveQueue.init(this.graph.allocator); defer resolve_queue.deinit(); var process_log = true; + if (parse_result.value == .success) { resolve_queue = runResolutionForParseTask(parse_result, this); if (parse_result.value == .err) { @@ -2387,10 +2525,29 @@ pub const BundleV2 = struct { } } + // To minimize contention, watchers are appended by the bundler thread. + if (this.bun_watcher) |watcher| { + if (parse_result.watcher_data.fd != bun.invalid_fd and parse_result.watcher_data.fd != .zero) { + const source = switch (parse_result.value) { + inline .empty, .err => |data| graph.input_files.items(.source)[data.source_index.get()], + .success => |val| val.source, + }; + _ = watcher.addFile( + parse_result.watcher_data.fd, + source.path.text, + bun.hash32(source.path.text), + graph.input_files.items(.loader)[source.index.get()], + parse_result.watcher_data.dir_fd, + null, + false, + ); + } + } + switch (parse_result.value) { .empty => |empty_result| { - var input_files = graph.input_files.slice(); - var side_effects = input_files.items(.side_effects); + const input_files = graph.input_files.slice(); + const side_effects = input_files.items(.side_effects); side_effects[empty_result.source_index.get()] = .no_side_effects__empty_ast; if (comptime Environment.allow_assert) { debug("onParse({d}, {s}) = empty", .{ @@ -2398,41 +2555,12 @@ pub const BundleV2 = struct { input_files.items(.source)[empty_result.source_index.get()].path.text, }); } - - if (this.bun_watcher) |watcher| { - if (empty_result.watcher_data.fd != .zero and empty_result.watcher_data.fd != bun.invalid_fd) { - _ = watcher.addFile( - empty_result.watcher_data.fd, - input_files.items(.source)[empty_result.source_index.get()].path.text, - bun.hash32(input_files.items(.source)[empty_result.source_index.get()].path.text), - graph.input_files.items(.loader)[empty_result.source_index.get()], - empty_result.watcher_data.dir_fd, - null, - false, - ); - } - } }, .success => |*result| { result.log.cloneToWithRecycled(this.bundler.log, true) catch unreachable; - // to minimize contention, we add watcher on the bundling thread instead of the parsing thread. - if (this.bun_watcher) |watcher| { - if (result.watcher_data.fd != .zero and result.watcher_data.fd != bun.invalid_fd) { - _ = watcher.addFile( - result.watcher_data.fd, - result.source.path.text, - bun.hash32(result.source.path.text), - result.source.path.loader(&this.bundler.options.loaders) orelse options.Loader.file, - result.watcher_data.dir_fd, - result.watcher_data.package_json, - false, - ); - } - } - - // Warning: this array may resize in this function call - // do not reuse it. + // Warning: `input_files` and `ast` arrays may resize in this function call + // It is not safe to cache slices from them. graph.input_files.items(.source)[result.source.index.get()] = result.source; this.source_code_length += if (!result.source.index.isRuntime()) result.source.contents.len @@ -2520,15 +2648,21 @@ pub const BundleV2 = struct { if (this.resolve_tasks_waiting_for_import_source_index.fetchSwapRemove(result.source.index.get())) |pending_entry| { for (pending_entry.value.slice()) |to_assign| { - import_records.slice()[to_assign.import_record_index].source_index = to_assign.to_source_index; + if (this.bundler.options.dev_server == null) + import_records.slice()[to_assign.import_record_index].source_index = to_assign.to_source_index; } var list = pending_entry.value.list(); list.deinit(this.graph.allocator); } + if (result.ast.css != null) { + this.graph.css_file_count += 1; + } + for (import_records.slice(), 0..) |*record, i| { if (path_to_source_index_map.get(record.path.hashKey())) |source_index| { - record.source_index.value = source_index; + if (this.bundler.options.dev_server == null) + record.source_index.value = source_index; if (getRedirectId(result.ast.redirect_import_record_index)) |compare| { if (compare == @as(u32, @truncate(i))) { @@ -2587,12 +2721,18 @@ pub const BundleV2 = struct { } }, .err => |*err| { - if (comptime Environment.allow_assert) { + if (comptime Environment.enable_logs) { debug("onParse() = err", .{}); } if (process_log) { - if (err.log.msgs.items.len > 0) { + if (this.bundler.options.dev_server) |dev_server| { + dev_server.handleParseTaskFailure( + err.target.bakeGraph(), + this.graph.input_files.items(.source)[err.source_index.get()].path.text, + &err.log, + ) catch bun.outOfMemory(); + } else if (err.log.msgs.items.len > 0) { err.log.cloneToWithRecycled(this.bundler.log, true) catch unreachable; } else { this.bundler.log.addErrorFmt( @@ -2604,6 +2744,10 @@ pub const BundleV2 = struct { ) catch unreachable; } } + + if (Environment.allow_assert and this.bundler.options.dev_server != null) { + bun.assert(this.graph.ast.items(.parts)[err.source_index.get()].len == 0); + } }, } } @@ -2776,11 +2920,9 @@ pub fn BundleThread(CompletionStruct: type) type { completion.log = out_log; } - completion.result = .{ - .value = .{ - .output_files = try this.runFromJSInNewThread(bundler.options.entry_points, &.{}), - }, - }; + completion.result = .{ .value = .{ + .output_files = try this.runFromJSInNewThread(bundler.options.entry_points), + } }; var out_log = Logger.Log.init(bun.default_allocator); this.bundler.log.appendToWithRecycled(&out_log, true) catch bun.outOfMemory(); @@ -2816,8 +2958,60 @@ pub const ParseTask = struct { package_version: string = "", is_entry_point: bool = false, - /// Used by generated client components - presolved_source_indices: []const Index.Int = &.{}, + /// The information returned to the Bundler thread when a parse finishes. + pub const Result = struct { + task: EventLoop.Task, + ctx: *BundleV2, + value: Value, + watcher_data: WatcherData, + + pub const Value = union(enum) { + success: Success, + err: Error, + empty: struct { + source_index: Index, + }, + }; + + const WatcherData = struct { + fd: bun.StoredFileDescriptorType, + dir_fd: bun.StoredFileDescriptorType, + + /// When no files to watch, this encoding is used. + const none: WatcherData = .{ + .fd = bun.invalid_fd, + .dir_fd = bun.invalid_fd, + }; + }; + + pub const Success = struct { + ast: JSAst, + source: Logger.Source, + log: Logger.Log, + use_directive: UseDirective, + side_effects: _resolver.SideEffects, + + /// Used by "file" loader files. + unique_key_for_additional_file: []const u8 = "", + /// Used by "file" loader files. + content_hash_for_additional_file: u64 = 0, + }; + + pub const Error = struct { + err: anyerror, + step: Step, + log: Logger.Log, + target: options.Target, + source_index: Index, + + pub const Step = enum { + pending, + read_file, + parse, + resolve, + }; + }; + }; const debug = Output.scoped(.ParseTask, false); @@ -2989,63 +3183,6 @@ pub const ParseTask = struct { }; } - pub const Result = struct { - task: EventLoop.Task, - ctx: *BundleV2, - value: Value, - - pub const Value = union(Tag) { - success: Success, - err: Error, - empty: struct { - source_index: Index, - - watcher_data: WatcherData = .{}, - }, - }; - - const WatcherData = struct { - fd: bun.StoredFileDescriptorType = .zero, - dir_fd: bun.StoredFileDescriptorType = .zero, - package_json: ?*PackageJSON = null, - }; - - pub const Success = struct { - ast: JSAst, - source: Logger.Source, - log: Logger.Log, - - use_directive: UseDirective = .none, - watcher_data: WatcherData = .{}, - side_effects: ?_resolver.SideEffects = null, - - /// Used by "file" loader files. - unique_key_for_additional_file: []const u8 = "", - - /// Used by "file" loader files. - content_hash_for_additional_file: u64 = 0, - }; - - pub const Error = struct { - err: anyerror, - step: Step, - log: Logger.Log, - - pub const Step = enum { - pending, - read_file, - parse, - resolve, - }; - }; - - pub const Tag = enum { - success, - err, - empty, - }; - }; - threadlocal var override_file_path_buf: bun.PathBuffer = undefined; fn getEmptyCSSAST( @@ -3262,12 +3399,12 @@ pub const ParseTask = struct { return ast; } - fn run_( + fn run( task: *ParseTask, this: *ThreadPool.Worker, step: *ParseTask.Result.Error.Step, log: *Logger.Log, - ) anyerror!?Result.Success { + ) anyerror!Result.Success { const allocator = this.allocator; var data = this.data; @@ -3279,7 +3416,7 @@ pub const ParseTask = struct { const loader = task.loader orelse file_path.loader(&bundler.options.loaders) orelse options.Loader.file; var entry: CacheEntry = switch (task.contents_or_fd) { - .fd => brk: { + .fd => |contents| brk: { const trace = tracer(@src(), "readFile"); defer trace.end(); @@ -3296,7 +3433,7 @@ pub const ParseTask = struct { } } - break :brk CacheEntry{ + break :brk .{ .contents = NodeFallbackModules.contentsFromPath(file_path.text) orelse "", }; } @@ -3311,8 +3448,8 @@ pub const ParseTask = struct { file_path.text, task.contents_or_fd.fd.dir, false, - if (task.contents_or_fd.fd.file != .zero) - task.contents_or_fd.fd.file + if (contents.file != bun.invalid_fd and contents.file != .zero) + contents.file else null, ) catch |err| { @@ -3340,27 +3477,26 @@ pub const ParseTask = struct { return err; }; }, - .contents => |contents| CacheEntry{ + .contents => |contents| .{ .contents = contents, - .fd = .zero, + .fd = bun.invalid_fd, }, }; errdefer if (task.contents_or_fd == .fd) entry.deinit(allocator); const will_close_file_descriptor = task.contents_or_fd == .fd and - !entry.fd.isStdio() and - (this.ctx.bun_watcher == null); + entry.fd.isValid() and !entry.fd.isStdio() and + this.ctx.bun_watcher == null; if (will_close_file_descriptor) { _ = entry.closeFD(); - } - - if (!will_close_file_descriptor and !entry.fd.isStdio()) task.contents_or_fd = .{ - .fd = .{ + task.contents_or_fd = .{ .fd = .{ .file = bun.invalid_fd, .dir = bun.invalid_fd } }; + } else { + task.contents_or_fd = .{ .fd = .{ .file = entry.fd, .dir = bun.invalid_fd, - }, - }; + } }; + } step.* = .parse; const is_empty = strings.isAllWhitespace(entry.contents); @@ -3463,94 +3599,73 @@ pub const ParseTask = struct { task.side_effects = .no_side_effects__empty_ast; } - if (task.presolved_source_indices.len > 0) { - for (ast.import_records.slice(), task.presolved_source_indices) |*record, source_index| { - if (record.is_unused or record.is_internal) - continue; - - record.source_index = Index.source(source_index); - } - } - step.* = .resolve; - return Result.Success{ + return .{ .ast = ast, .source = source, .log = log.*, .use_directive = use_directive, .unique_key_for_additional_file = unique_key_for_additional_file, + .side_effects = task.side_effects, // Hash the files in here so that we do it in parallel. .content_hash_for_additional_file = if (loader.shouldCopyForBundling(this.ctx.bundler.options.experimental_css)) ContentHasher.run(source.contents) else 0, - - .watcher_data = .{ - .fd = if (task.contents_or_fd == .fd and !will_close_file_descriptor) task.contents_or_fd.fd.file else .zero, - .dir_fd = if (task.contents_or_fd == .fd) task.contents_or_fd.fd.dir else .zero, - }, }; } - pub fn callback(this: *ThreadPoolLib.Task) void { - run(@fieldParentPtr("task", this)); - } - - fn run(this: *ParseTask) void { + pub fn callback(task: *ThreadPoolLib.Task) void { + const this: *ParseTask = @fieldParentPtr("task", task); var worker = ThreadPool.Worker.get(this.ctx); defer worker.unget(); + var step: ParseTask.Result.Error.Step = .pending; var log = Logger.Log.init(worker.allocator); bun.assert(this.source_index.isValid()); // forgot to set source_index - const result = bun.default_allocator.create(Result) catch unreachable; + const result = bun.default_allocator.create(Result) catch bun.outOfMemory(); + const value: ParseTask.Result.Value = if (run(this, worker, &step, &log)) |ast| value: { + // When using HMR, always flag asts with errors as parse failures. + // Not done outside of the dev server out of fear of breaking existing code. + if (this.ctx.bundler.options.dev_server != null and ast.log.hasErrors()) { + break :value .{ + .err = .{ + .err = error.SyntaxError, + .step = .parse, + .log = ast.log, + .source_index = this.source_index, + .target = this.known_target, + }, + }; + } + + break :value .{ .success = ast }; + } else |err| value: { + if (err == error.EmptyAST) { + log.deinit(); + break :value .{ .empty = .{ + .source_index = this.source_index, + } }; + } + + break :value .{ .err = .{ + .err = err, + .step = step, + .log = log, + .source_index = this.source_index, + .target = this.known_target, + } }; + }; result.* = .{ .ctx = this.ctx, .task = undefined, - .value = brk: { - if (run_( - this, - worker, - &step, - &log, - )) |ast_or_null| { - if (ast_or_null) |ast| { - break :brk .{ .success = ast }; - } else { - log.deinit(); - break :brk .{ - .empty = .{ - .source_index = this.source_index, - .watcher_data = .{ - .fd = if (this.contents_or_fd == .fd) this.contents_or_fd.fd.file else .zero, - .dir_fd = if (this.contents_or_fd == .fd) this.contents_or_fd.fd.dir else .zero, - }, - }, - }; - } - } else |err| { - if (err == error.EmptyAST) { - log.deinit(); - break :brk .{ - .empty = .{ - .source_index = this.source_index, - .watcher_data = .{ - .fd = if (this.contents_or_fd == .fd) this.contents_or_fd.fd.file else .zero, - .dir_fd = if (this.contents_or_fd == .fd) this.contents_or_fd.fd.dir else .zero, - }, - }, - }; - } - break :brk .{ - .err = .{ - .err = err, - .step = step, - .log = log, - }, - }; - } + .value = value, + .watcher_data = .{ + .fd = if (this.contents_or_fd == .fd) this.contents_or_fd.fd.file else bun.invalid_fd, + .dir_fd = if (this.contents_or_fd == .fd) this.contents_or_fd.fd.dir else bun.invalid_fd, }, }; @@ -3612,13 +3727,11 @@ pub const ServerComponentParseTask = struct { worker.allocator, )) |success| .{ .success = success } - else |err| brk: { - break :brk .{ .err = .{ - .err = err, - .step = .resolve, - .log = log, - } }; + else |err| switch (err) { + error.OutOfMemory => bun.outOfMemory(), }, + + .watcher_data = ParseTask.Result.WatcherData.none, }; switch (worker.ctx.loop().*) { @@ -3641,7 +3754,7 @@ pub const ServerComponentParseTask = struct { task: *ServerComponentParseTask, log: *Logger.Log, allocator: std.mem.Allocator, - ) !ParseTask.Result.Success { + ) bun.OOM!ParseTask.Result.Success { var ab = try AstBuilder.init(allocator, &task.source, task.ctx.bundler.options.hot_module_reloading); switch (task.data) { @@ -3655,6 +3768,8 @@ pub const ServerComponentParseTask = struct { }), .source = task.source, .log = log.*, + .use_directive = .none, + .side_effects = .no_side_effects__pure_data, }; } @@ -3968,6 +4083,10 @@ pub const Graph = struct { estimated_file_loader_count: usize = 0, + /// For Bake, a count of the CSS asts is used to make precise + /// pre-allocations without re-iterating the file listing. + css_file_count: usize = 0, + additional_output_files: std.ArrayListUnmanaged(options.OutputFile) = .{}, kit_referenced_server_data: bool, @@ -4915,47 +5034,7 @@ pub const LinkerContext = struct { const trace = tracer(@src(), "computeChunks"); defer trace.end(); - // The dev server never compiles chunks, and requires every reachable - // file to be printed, So the logic is special-cased. - if (this.dev_server != null) { - var js_chunks = try std.ArrayListUnmanaged(Chunk).initCapacity(this.allocator, 1); - const entry_bits = &this.graph.files.items(.entry_bits)[0]; - - // Exclude runtime because it is already embedded - const reachable_files = if (this.graph.reachable_files[0].isRuntime()) - this.graph.reachable_files[1..] - else - this.graph.reachable_files; - - const part_ranges = try this.allocator.alloc(PartRange, reachable_files.len); - - const parts = this.parse_graph.ast.items(.parts); - for (reachable_files, part_ranges) |source_index, *part_range| { - part_range.* = .{ - .source_index = source_index, - .part_index_begin = 0, - .part_index_end = parts[source_index.get()].len, - }; - } - - js_chunks.appendAssumeCapacity(.{ - .entry_point = .{ - .entry_point_id = 0, - .source_index = 0, - .is_entry_point = true, - }, - .entry_bits = entry_bits.*, - .content = .{ - .javascript = .{ - // TODO(@paperdave): this ptrCast should not be needed. - .files_in_chunk_order = @ptrCast(this.graph.reachable_files), - .parts_in_chunk_in_order = part_ranges, - }, - }, - .output_source_map = sourcemap.SourceMapPieces.init(this.allocator), - }); - return js_chunks.items; - } + bun.assert(this.dev_server == null); // use computeChunksForDevServer var stack_fallback = std.heap.stackFallback(4096, this.allocator); const stack_all = stack_fallback.get(); @@ -4978,13 +5057,12 @@ pub const LinkerContext = struct { entry_bits.set(entry_bit); if (this.options.experimental_css) { - if (this.graph.ast.items(.css)[source_index]) |*css| { - _ = css; // autofix + if (this.graph.ast.items(.css)[source_index] != null) { // Create a chunk for the entry point here to ensure that the chunk is // always generated even if the resulting file is empty const css_chunk_entry = try css_chunks.getOrPut(try temp_allocator.dupe(u8, entry_bits.bytes(this.graph.entry_points.len))); // const css_chunk_entry = try js_chunks.getOrPut(); - const order = this.findImportedFilesInCSSOrder(temp_allocator, &[_]Index{Index.init(source_index)}); + const order = this.findImportedFilesInCSSOrder(temp_allocator, &.{Index.init(source_index)}); css_chunk_entry.value_ptr.* = .{ .entry_point = .{ .entry_point_id = entry_bit, @@ -5029,10 +5107,10 @@ pub const LinkerContext = struct { const css_source_indices = this.findImportedCSSFilesInJSOrder(temp_allocator, Index.init(source_index)); if (css_source_indices.len > 0) { const order = this.findImportedFilesInCSSOrder(temp_allocator, css_source_indices.slice()); - var css_files_wth_parts_in_chunk = std.AutoArrayHashMapUnmanaged(Index.Int, void){}; + var css_files_with_parts_in_chunk = std.AutoArrayHashMapUnmanaged(Index.Int, void){}; for (order.slice()) |entry| { if (entry.kind == .source_index) { - css_files_wth_parts_in_chunk.put(this.allocator, entry.kind.source_index.get(), {}) catch bun.outOfMemory(); + css_files_with_parts_in_chunk.put(this.allocator, entry.kind.source_index.get(), {}) catch bun.outOfMemory(); } } const css_chunk_entry = try css_chunks.getOrPut(try temp_allocator.dupe(u8, entry_bits.bytes(this.graph.entry_points.len))); @@ -5050,7 +5128,7 @@ pub const LinkerContext = struct { .asts = this.allocator.alloc(bun.css.BundlerStyleSheet, order.len) catch bun.outOfMemory(), }, }, - .files_with_parts_in_chunk = css_files_wth_parts_in_chunk, + .files_with_parts_in_chunk = css_files_with_parts_in_chunk, .output_source_map = sourcemap.SourceMapPieces.init(this.allocator), }; } @@ -5451,11 +5529,11 @@ pub const LinkerContext = struct { // unlike JavaScript import statements, CSS "@import" rules are evaluated every // time instead of just the first time. // - // A - // / \ - // B C - // \ / - // D + // A + // / \ + // B C + // \ / + // D // // If A imports B and then C, B imports D, and C imports D, then the CSS // traversal order is D B D C A. @@ -5516,12 +5594,12 @@ pub const LinkerContext = struct { // TODO: should we even do this? @import rules have to be the first rules in the stylesheet, why even allow pre-import layers? // Any pre-import layers come first // if len(repr.AST.LayersPreImport) > 0 { - // order = append(order, cssImportOrder{ - // kind: cssImportLayers, - // layers: repr.AST.LayersPreImport, - // conditions: wrappingConditions, - // conditionImportRecords: wrappingImportRecords, - // }) + // order = append(order, cssImportOrder{ + // kind: cssImportLayers, + // layers: repr.AST.LayersPreImport, + // conditions: wrappingConditions, + // conditionImportRecords: wrappingImportRecords, + // }) // } defer { @@ -5744,15 +5822,15 @@ pub const LinkerContext = struct { // // For example: // - // // entry.css - // @import "foo.css" supports(display: flex); - // @import "bar.css" supports(display: flex); + // // entry.css + // @import "foo.css" supports(display: flex); + // @import "bar.css" supports(display: flex); // - // // foo.css - // @import "lib.css" screen; + // // foo.css + // @import "lib.css" screen; // - // // bar.css - // @import "lib.css"; + // // bar.css + // @import "lib.css"; // // When we bundle this code we'll get an import order as follows: // @@ -5827,11 +5905,11 @@ pub const LinkerContext = struct { // order that JavaScript modules were evaluated in before the top-level await // feature was introduced. // - // A - // / \ - // B C - // \ / - // D + // A + // / \ + // B C + // \ / + // D // // If A imports B and then C, B imports D, and C imports D, then the JavaScript // traversal order is D B C A. @@ -8329,7 +8407,7 @@ pub const LinkerContext = struct { // Client bundles for Bake must be globally allocated, // as it must outlive the bundle task. const use_global_allocator = c.dev_server != null and - c.parse_graph.ast.items(.target)[part_range.source_index.get()].bakeRenderer() == .client; + c.parse_graph.ast.items(.target)[part_range.source_index.get()].bakeGraph() == .client; var arena = &worker.temporary_arena; var buffer_writer = js_printer.BufferWriter.init( @@ -8535,10 +8613,10 @@ pub const LinkerContext = struct { // TODO: css banner // if len(c.options.CSSBanner) > 0 { - // prevOffset.AdvanceString(c.options.CSSBanner) - // j.AddString(c.options.CSSBanner) - // prevOffset.AdvanceString("\n") - // j.AddString("\n") + // prevOffset.AdvanceString(c.options.CSSBanner) + // j.AddString(c.options.CSSBanner) + // prevOffset.AdvanceString("\n") + // j.AddString("\n") // } // TODO: (this is where we would put the imports) @@ -8601,13 +8679,13 @@ pub const LinkerContext = struct { // Make sure the file ends with a newline j.ensureNewlineAtEnd(); // if c.options.UnsupportedCSSFeatures.Has(compat.InlineStyle) { - // slashTag = "" + // slashTag = "" // } // c.maybeAppendLegalComments(c.options.LegalComments, legalCommentList, chunk, &j, slashTag) // if len(c.options.CSSFooter) > 0 { - // j.AddString(c.options.CSSFooter) - // j.AddString("\n") + // j.AddString(c.options.CSSFooter) + // j.AddString("\n") // } chunk.intermediate_output = c.breakOutputIntoPieces( @@ -10547,8 +10625,8 @@ pub const LinkerContext = struct { } } - /// The conversion logic is completely different for format .kit_internal_hmr - fn convertStmtsForChunkForKit( + /// The conversion logic is completely different for format .internal_bake_dev + fn convertStmtsForChunkForBake( c: *LinkerContext, source_index: u32, stmts: *StmtList, @@ -10688,7 +10766,7 @@ pub const LinkerContext = struct { bun.assert(!part_range.source_index.isRuntime()); // embedded in HMR runtime for (parts) |part| { - c.convertStmtsForChunkForKit(part_range.source_index.get(), stmts, part.stmts, allocator, &ast) catch |err| + c.convertStmtsForChunkForBake(part_range.source_index.get(), stmts, part.stmts, allocator, &ast) catch |err| return .{ .err = err }; } @@ -10702,7 +10780,7 @@ pub const LinkerContext = struct { }, Logger.Loc.Empty) }, }) catch unreachable; // is within bounds - if (flags.wrap == .cjs and ast.flags.uses_exports_ref) { + if (ast.flags.uses_exports_ref) { clousure_args.appendAssumeCapacity( .{ .binding = Binding.alloc(temp_allocator, B.Identifier{ @@ -11363,7 +11441,7 @@ pub const LinkerContext = struct { shifts: []sourcemap.SourceMapShifts, }; - pub fn generateChunksInParallel(c: *LinkerContext, chunks: []Chunk) !std.ArrayList(options.OutputFile) { + pub fn generateChunksInParallel(c: *LinkerContext, chunks: []Chunk, comptime is_dev_server: bool) !if (is_dev_server) void else std.ArrayList(options.OutputFile) { const trace = tracer(@src(), "generateChunksInParallel"); defer trace.end(); @@ -11373,6 +11451,7 @@ pub const LinkerContext = struct { bun.assert(chunks.len > 0); { + // TODO(@paperdave/bake): instead of running a renamer per chunk, run it per file debug(" START {d} renamers", .{chunks.len}); defer debug(" DONE {d} renamers", .{chunks.len}); var wait_group = try c.allocator.create(sync.WaitGroup); @@ -11489,7 +11568,7 @@ pub const LinkerContext = struct { "Part Range: {s} {s} ({d}..{d})", .{ c.parse_graph.input_files.items(.source)[part_range.source_index.get()].path.pretty, - @tagName(c.parse_graph.ast.items(.target)[part_range.source_index.get()].bakeRenderer()), + @tagName(c.parse_graph.ast.items(.target)[part_range.source_index.get()].bakeGraph()), part_range.part_index_begin, part_range.part_index_end, }, @@ -11549,11 +11628,7 @@ pub const LinkerContext = struct { // // When this isnt the initial bundle, concatenation as usual would produce a // broken module. It is DevServer's job to create and send HMR patches. - if (c.dev_server) |dev_server| { - bun.assert(chunks.len == 1); - try dev_server.finalizeBundle(c, &chunks[0]); - return std.ArrayList(options.OutputFile).init(bun.default_allocator); - } + if (is_dev_server) return; { debug(" START {d} postprocess chunks", .{chunks.len}); @@ -12390,7 +12465,7 @@ pub const LinkerContext = struct { .{ entry_points_count, c.parse_graph.input_files.items(.source)[source_index].path.pretty, - @tagName(c.parse_graph.ast.items(.target)[source_index].bakeRenderer()), + @tagName(c.parse_graph.ast.items(.target)[source_index].bakeGraph()), out_dist, }, ); @@ -12463,7 +12538,7 @@ pub const LinkerContext = struct { debugTreeShake("markFileLiveForTreeShaking({d}, {s} {s}) = {s}", .{ source_index, c.parse_graph.input_files.get(source_index).source.path.pretty, - @tagName(c.parse_graph.ast.items(.target)[source_index].bakeRenderer()), + @tagName(c.parse_graph.ast.items(.target)[source_index].bakeGraph()), if (c.graph.files_live.isSet(source_index)) "already seen" else "first seen", }); } @@ -14029,7 +14104,7 @@ pub const Chunk = struct { }; pub const CssChunk = struct { - imports_in_chunk_in_order: BabyList(CssImportOrder) = .{}, + imports_in_chunk_in_order: BabyList(CssImportOrder), /// Multiple imports may refer to the same file/stylesheet, but may need to /// wrap them in conditions (e.g. a layer). /// diff --git a/src/codegen/bake-codegen.ts b/src/codegen/bake-codegen.ts index 57d698c147..381d3813a0 100644 --- a/src/codegen/bake-codegen.ts +++ b/src/codegen/bake-codegen.ts @@ -1,5 +1,6 @@ import assert from "node:assert"; -import { existsSync, writeFileSync, rmSync } from "node:fs"; +import { existsSync, writeFileSync, rmSync } from "node:fs"; +import { watch } from "node:fs/promises"; import { basename, join } from "node:path"; // arg parsing @@ -14,7 +15,7 @@ for (const arg of process.argv.slice(2)) { options[split[0].slice(2)] = value; } -let { codegen_root, debug } = options as any; +let { codegen_root, debug, live } = options as any; if (!codegen_root) { console.error("Missing --codegen_root=..."); process.exit(1); @@ -24,10 +25,13 @@ if (debug === "false" || debug === "0" || debug == "OFF") debug = false; const base_dir = join(import.meta.dirname, "../bake"); process.chdir(base_dir); // to make bun build predictable in development +async function run(){ + const results = await Promise.allSettled( - ["client", "server"].map(async side => { + ["client", "server", "error"].map(async file => { + const side = file === 'error' ? 'client' : file; let result = await Bun.build({ - entrypoints: [join(base_dir, `hmr-runtime-${side}.ts`)], + entrypoints: [join(base_dir, `hmr-runtime-${file}.ts`)], define: { side: JSON.stringify(side), IS_BUN_DEVELOPMENT: String(!!debug), @@ -44,22 +48,19 @@ const results = await Promise.allSettled( // A second pass is used to convert global variables into parameters, while // allowing for renaming to properly function when minification is enabled. const in_names = [ - 'input_graph', - 'config', - side === 'server' && 'server_exports' + file !== 'error' && 'input_graph', + file !== 'error' && 'config', + file === 'server' && 'server_exports' ].filter(Boolean); - const combined_source = ` + const combined_source = file === 'error' ? code : ` __marker__; - let ${in_names.join(",")}; + ${in_names.length > 0 ? 'let' : ''} ${in_names.join(",")}; __marker__(${in_names.join(",")}); ${code}; `; - const generated_entrypoint = join(base_dir, `.runtime-${side}.generated.ts`); + const generated_entrypoint = join(base_dir, `.runtime-${file}.generated.ts`); writeFileSync(generated_entrypoint, combined_source); - using _ = { [Symbol.dispose] : () => { - rmSync(generated_entrypoint); - }}; result = await Bun.build({ entrypoints: [generated_entrypoint], @@ -71,48 +72,51 @@ const results = await Promise.allSettled( }); if (!result.success) throw new AggregateError(result.logs); assert(result.outputs.length === 1, "must bundle to a single file"); - // @ts-ignore - code = await result.outputs[0].text(); + code = (await result.outputs[0].text()).replace(`// ${basename(generated_entrypoint)}`, "").trim(); - let names: string = ""; - code = code - .replace(/(\n?)\s*__marker__.*__marker__\((.+?)\);\s*/s, (_, n, captured) => { - names = captured; - return n; - }) - .replace(`// ${basename(generated_entrypoint)}`, "") - .trim(); - assert(names, "missing name"); + rmSync(generated_entrypoint); - if (debug) { - code = "\n " + code.replace(/\n/g, "\n ") + "\n"; + if(file !== 'error') { + let names: string = ""; + code = code + .replace(/(\n?)\s*__marker__.*__marker__\((.+?)\);\s*/s, (_, n, captured) => { + names = captured; + return n; + }) + .trim(); + assert(names, "missing name"); + + if (debug) { + code = "\n " + code.replace(/\n/g, "\n ") + "\n"; + } + + if (code[code.length - 1] === ";") code = code.slice(0, -1); + + if (side === "server") { + const server_fetch_function = names.split(",")[2].trim(); + code = debug ? `${code} return ${server_fetch_function};\n` : `${code};return ${server_fetch_function};`; + } + + code = debug ? `((${names}) => {${code}})({\n` : `((${names})=>{${code}})({`; + + if (side === "server") { + code = `export default await ${code}`; + } } - if (code[code.length - 1] === ";") code = code.slice(0, -1); - - if (side === "server") { - const server_fetch_function = names.split(",")[2].trim(); - code = debug ? `${code} return ${server_fetch_function};\n` : `${code};return ${server_fetch_function};`; - } - - code = debug ? `((${names}) => {${code}})({\n` : `((${names})=>{${code}})({`; - - if (side === "server") { - code = `export default await ${code}`; - } - - writeFileSync(join(codegen_root, `bake.${side}.js`), code); + writeFileSync(join(codegen_root, `bake.${file}.js`), code); }), ); // print failures in a de-duplicated fashion. interface Err { - kind: "client" | "server" | "both"; + kind: ("client" | "server" | "error")[]; err: any; } const failed = [ - { kind: "client", result: results[0] }, - { kind: "server", result: results[1] }, + { kind: ["client"], result: results[0] }, + { kind: ["server"], result: results[1] }, + { kind: ["error"], result: results[2] }, ] .filter(x => x.result.status === "rejected") .map(x => ({ kind: x.kind, err: x.result.reason })) as Err[]; @@ -129,25 +133,39 @@ if (failed.length > 0) { if (!x.err?.message) continue; for (const other of flattened_errors.slice(0, i)) { if (other.err?.message === x.err.message || other.err.stack === x.err.stack) { - other.kind = "both"; + other.kind = [...x.kind, ...other.kind]; flattened_errors.splice(i, 1); i -= 1; continue; } } } - let current = ""; for (const { kind, err } of flattened_errors) { - if (kind !== current) { - const map = { both: "runtime", client: "client runtime", server: "server runtime" }; - console.error(`Errors while bundling HMR ${map[kind]}:`); - } + const map = { error: "error runtime", client: "client runtime", server: "server runtime" }; + console.error(`Errors while bundling Bake ${kind.map(x=>map[x]).join(' and ')}:`); console.error(err); } - process.exit(1); + if(!live) + process.exit(1); } else { - console.log("-> bake.client.js, bake.server.js"); + console.log("-> bake.client.js, bake.server.js, bake.error.js"); const empty_file = join(codegen_root, "bake_empty_file"); if (!existsSync(empty_file)) writeFileSync(empty_file, "this is used to fulfill a cmake dependency"); } +} + +await run(); + +if (live) { + const watcher = watch(base_dir, { recursive: true }) as any; + for await (const event of watcher) { + if(event.filename.endsWith('.zig')) continue; + if(event.filename.startsWith('.')) continue; + try { + await run(); + }catch(e) { + console.log(e); + } + } +} \ No newline at end of file diff --git a/src/crash_handler.zig b/src/crash_handler.zig index 3a870c2bdf..4f806ab6dc 100644 --- a/src/crash_handler.zig +++ b/src/crash_handler.zig @@ -1520,7 +1520,7 @@ pub fn dumpStackTrace(trace: std.builtin.StackTrace) void { .action = .view_trace, .reason = .{ .zig_error = error.DumpStackTrace }, .trace = &trace, - }}); + }}) catch {}; return; } @@ -1601,6 +1601,49 @@ pub fn dumpStackTrace(trace: std.builtin.StackTrace) void { stderr.writeAll(proc.stderr) catch return; } +/// A variant of `std.builtin.StackTrace` that stores its data within itself +/// instead of being a pointer. This allows storing captured stack traces +/// for later printing. +pub const StoredTrace = struct { + data: [31]usize, + index: usize, + + pub const empty: StoredTrace = .{ + .data = .{0} ** 31, + .index = 0, + }; + + pub fn trace(stored: *StoredTrace) std.builtin.StackTrace { + return .{ + .index = stored.index, + .instruction_addresses = &stored.data, + }; + } + + pub fn capture(begin: ?usize) StoredTrace { + var stored: StoredTrace = StoredTrace.empty; + var frame = stored.trace(); + std.debug.captureStackTrace(begin orelse @returnAddress(), &frame); + stored.index = frame.index; + return stored; + } + + pub fn from(stack_trace: ?*std.builtin.StackTrace) StoredTrace { + if (stack_trace) |stack| { + var data: [31]usize = undefined; + @memset(&data, 0); + const items = @min(stack.instruction_addresses.len, 31); + @memcpy(data[0..items], stack.instruction_addresses[0..items]); + return .{ + .data = data, + .index = @min(items, stack.index), + }; + } else { + return empty; + } + } +}; + pub const js_bindings = struct { const JSC = bun.JSC; const JSValue = JSC.JSValue; diff --git a/src/js/node/async_hooks.ts b/src/js/node/async_hooks.ts index 9480c1b02a..db0f0b8272 100644 --- a/src/js/node/async_hooks.ts +++ b/src/js/node/async_hooks.ts @@ -303,19 +303,31 @@ class AsyncResource { // The rest of async_hooks is not implemented and is stubbed with no-ops and warnings. -function createWarning(message) { +function createWarning(message, isCreateHook?: boolean) { let warned = false; - var wrapped = function () { + var wrapped = function (arg1?) { if (warned) return; const known_supported_modules = [ // the following do not actually need async_hooks to work properly "zx/build/core.js", "datadog-core/src/storage/async_resource.js", - "react-server-dom-webpack/", ]; const e = new Error().stack!; if (known_supported_modules.some(m => e.includes(m))) return; + if (isCreateHook && arg1) { + // this block is to specifically filter out react-server, which is often + // times bundled into a framework or application. Their use defines three + // handlers which are all TODO stubs. for more info see this comment: + // https://github.com/oven-sh/bun/issues/13866#issuecomment-2397896065 + if (typeof arg1 === 'object') { + const { init, promiseResolve, destroy } = arg1; + if (init && promiseResolve && destroy) { + if (isEmptyFunction(init) && isEmptyFunction(destroy)) + return; + } + } + } warned = true; console.warn("[bun] Warning:", message); @@ -323,13 +335,21 @@ function createWarning(message) { return wrapped; } +function isEmptyFunction(f: Function) { + let str = f.toString(); + if(!str.startsWith('function()'))return false; + str = str.slice('function()'.length).trim(); + return /^{\s*}$/.test(str); +} + const createHookNotImpl = createWarning( "async_hooks.createHook is not implemented in Bun. Hooks can still be created but will never be called.", + true, ); function createHook(callbacks) { return { - enable: createHookNotImpl, + enable: () => createHookNotImpl(callbacks), disable: createHookNotImpl, }; } diff --git a/src/js_lexer.zig b/src/js_lexer.zig index ff310c3156..9ada1a3890 100644 --- a/src/js_lexer.zig +++ b/src/js_lexer.zig @@ -257,7 +257,11 @@ fn NewLexer_( pub fn syntaxError(self: *LexerType) !void { @setCold(true); - self.addError(self.start, "Syntax Error!!", .{}, true); + // Only add this if there is not already an error. + // It is possible that there is a more descriptive error already emitted. + if (!self.log.hasErrors()) + self.addError(self.start, "Syntax Error", .{}, true); + return Error.SyntaxError; } @@ -2723,6 +2727,18 @@ fn NewLexer_( if (lexer.token != token) { try lexer.expected(token); + return Error.SyntaxError; + } + + try lexer.nextInsideJSXElement(); + } + + pub fn expectInsideJSXElementWithName(lexer: *LexerType, token: T, name: string) !void { + lexer.assertNotJSON(); + + if (lexer.token != token) { + try lexer.expectedString(name); + return Error.SyntaxError; } try lexer.nextInsideJSXElement(); diff --git a/src/js_parser.zig b/src/js_parser.zig index a948bc39d1..2815a8a0a7 100644 --- a/src/js_parser.zig +++ b/src/js_parser.zig @@ -548,7 +548,7 @@ const JSXTag = struct { // The tag is an identifier var name = p.lexer.identifier; var tag_range = p.lexer.range(); - try p.lexer.expectInsideJSXElement(.t_identifier); + try p.lexer.expectInsideJSXElementWithName(.t_identifier, "JSX element name"); // Certain identifiers are strings //
to match opening tag \\<{s}\\>", .{ - end_tag.name, - tag.name, - }); + try p.log.addRangeErrorFmtWithNote( + p.source, + end_tag.range, + p.allocator, + "Expected closing tag \\ to match opening tag \\<{s}\\>", + .{ + end_tag.name, + tag.name, + }, + "Starting tag here", + .{}, + tag.range, + ); return error.SyntaxError; } diff --git a/src/js_printer.zig b/src/js_printer.zig index 699a1ed684..cd6ee0bd7e 100644 --- a/src/js_printer.zig +++ b/src/js_printer.zig @@ -4940,15 +4940,8 @@ fn NewPrinter( p.printExpr(s.value, .lowest, ExprFlag.ExprResultIsUnused()); p.printSemicolonAfterStatement(); }, - else => { - var slice = p.writer.slice(); - const to_print: []const u8 = if (slice.len > 1024) slice[slice.len - 1024 ..] else slice; - - if (to_print.len > 0) { - Output.panic("\nvoluntary crash while printing:\n{s}\n---This is a bug. Not your fault.\n", .{to_print}); - } else { - Output.panic("\nvoluntary crash while printing. This is a bug. Not your fault.\n", .{}); - } + else => |tag| { + Output.panic("Unexpected tag in printStmt: .{s}", .{@tagName(tag)}); }, } } diff --git a/src/logger.zig b/src/logger.zig index 04266db7d2..e35ca4c4a2 100644 --- a/src/logger.zig +++ b/src/logger.zig @@ -21,12 +21,13 @@ const assert = bun.assert; const ArrayList = std.ArrayList; const StringBuilder = @import("./string_builder.zig"); const Index = @import("./ast/base.zig").Index; -pub const Kind = enum(i8) { - err, - warn, - note, - debug, - verbose, + +pub const Kind = enum(u8) { + err = 0, + warn = 1, + note = 2, + debug = 3, + verbose = 4, pub inline fn shouldPrint(this: Kind, other: Log.Level) bool { return switch (other) { @@ -379,6 +380,7 @@ pub const Msg = struct { kind: Kind = Kind.err, data: Data, metadata: Metadata = .{ .build = 0 }, + // TODO: make this non-optional, empty slice for no notes notes: ?[]Data = null, pub fn fromJS(allocator: std.mem.Allocator, globalObject: *bun.JSC.JSGlobalObject, file: string, err: bun.JSC.JSValue) !Msg { @@ -598,7 +600,9 @@ pub const Range = struct { pub const Log = struct { debug: bool = false, + // TODO: make u32 warnings: usize = 0, + // TODO: make u32 errors: usize = 0, msgs: ArrayList(Msg), level: Level = if (Environment.isDebug) Level.info else Level.warn, diff --git a/src/mimalloc_arena.zig b/src/mimalloc_arena.zig index a44a35c61f..d44ba21b76 100644 --- a/src/mimalloc_arena.zig +++ b/src/mimalloc_arena.zig @@ -197,6 +197,13 @@ pub const Arena = struct { mimalloc.mi_heap_collect(this.heap orelse return, force); } + pub inline fn helpCatchMemoryIssues(this: Arena) void { + if (comptime FeatureFlags.help_catch_memory_issues) { + this.gc(true); + bun.Mimalloc.mi_collect(true); + } + } + pub fn ownsPtr(this: Arena, ptr: *const anyopaque) bool { return mimalloc.mi_heap_check_owned(this.heap.?, ptr); } diff --git a/src/options.zig b/src/options.zig index b779186472..ef52e99489 100644 --- a/src/options.zig +++ b/src/options.zig @@ -441,7 +441,7 @@ pub const Target = enum { }; } - pub fn bakeRenderer(target: Target) bun.bake.Renderer { + pub fn bakeGraph(target: Target) bun.bake.Graph { return switch (target) { .browser => .client, .kit_server_components_ssr => .ssr, diff --git a/src/toml/toml_lexer.zig b/src/toml/toml_lexer.zig index b9d93991a5..4e53e1a2b4 100644 --- a/src/toml/toml_lexer.zig +++ b/src/toml/toml_lexer.zig @@ -77,7 +77,11 @@ pub const Lexer = struct { pub fn syntaxError(self: *Lexer) !void { @setCold(true); - self.addError(self.start, "Syntax Error!!", .{}, true); + // Only add this if there is not already an error. + // It is possible that there is a more descriptive error already emitted. + if (!self.log.hasErrors()) + self.addError(self.start, "Syntax Error", .{}, true); + return Error.SyntaxError; }