mirror of
https://github.com/oven-sh/bun
synced 2026-02-09 10:28:47 +00:00
3838 lines
153 KiB
Zig
3838 lines
153 KiB
Zig
//! Instance of the development server. Attaches to an instance of `Bun.serve`,
|
|
//! controlling bundler, routing, and hot module reloading.
|
|
//!
|
|
//! Reprocessing files that did not change is banned; by having perfect
|
|
//! incremental tracking over the project, editing a file's contents (asides
|
|
//! adjusting imports) must always rebundle only that one file.
|
|
//!
|
|
//! All work is held in-memory, using manually managed data-oriented design.
|
|
//!
|
|
//! TODO: Currently does not have a `deinit()`, as it was assumed to be alive for
|
|
//! the remainder of this process' lifespan. Later, it will be required to fully
|
|
//! clean up server state.
|
|
pub const DevServer = @This();
|
|
pub const debug = bun.Output.Scoped(.Bake, false);
|
|
pub const igLog = bun.Output.scoped(.IncrementalGraph, false);
|
|
|
|
pub const Options = struct {
|
|
root: []const u8,
|
|
framework: bake.Framework,
|
|
dump_sources: ?[]const u8 = if (Environment.isDebug) ".bake-debug" else null,
|
|
dump_state_on_crash: bool = bun.FeatureFlags.bake_debugging_features,
|
|
verbose_watcher: bool = false,
|
|
vm: *VirtualMachine,
|
|
};
|
|
|
|
// The fields `client_graph`, `server_graph`, and `directory_watchers` all
|
|
// use `@fieldParentPointer` to access DevServer's state. This pattern has
|
|
// made it easier to group related fields together, but one must remember
|
|
// those structures still depend on the DevServer pointer.
|
|
|
|
/// Used for all server-wide allocations. In debug, this shows up in
|
|
/// a separate named heap. Thread-safe.
|
|
allocator: Allocator,
|
|
/// Absolute path to project root directory. For the HMR
|
|
/// runtime, its module IDs are strings relative to this.
|
|
root: []const u8,
|
|
/// Hex string generated by hashing the framework config and bun revision.
|
|
/// Emebedding in client bundles and sent when the HMR Socket is opened;
|
|
/// When the value mismatches the page is forcibly reloaded.
|
|
configuration_hash_key: [16]u8,
|
|
/// The virtual machine (global object) to execute code in.
|
|
vm: *VirtualMachine,
|
|
/// May be `null` if not attached to an HTTP server yet.
|
|
server: ?bun.JSC.API.AnyServer,
|
|
/// Contains the tree of routes. This structure contains FileIndex
|
|
router: FrameworkRouter,
|
|
/// Every navigatable route has bundling state here.
|
|
route_bundles: ArrayListUnmanaged(RouteBundle),
|
|
/// All access into IncrementalGraph is guarded by a DebugThreadLock. This is
|
|
/// only a debug assertion as contention to this is always a bug; If a bundle is
|
|
/// active and a file is changed, that change is placed into the next bundle.
|
|
graph_safety_lock: bun.DebugThreadLock,
|
|
client_graph: IncrementalGraph(.client),
|
|
server_graph: IncrementalGraph(.server),
|
|
/// State populated during bundling and hot updates. Often cleared
|
|
incremental_result: IncrementalResult,
|
|
/// CSS files are accessible via `/_bun/css/<hex key>.css`
|
|
/// Value is bundled code owned by `dev.allocator`
|
|
css_files: AutoArrayHashMapUnmanaged(u64, []const u8),
|
|
/// JS files are accessible via `/_bun/client/route.<hex key>.js`
|
|
/// These are randomly generated to avoid possible browser caching of old assets.
|
|
route_js_payloads: AutoArrayHashMapUnmanaged(u64, Route.Index),
|
|
// /// Assets are accessible via `/_bun/asset/<key>`
|
|
// assets: bun.StringArrayHashMapUnmanaged(u64, Asset),
|
|
/// All bundling failures are stored until a file is saved and rebuilt.
|
|
/// They are stored in the wire format the HMR runtime expects so that
|
|
/// serialization only happens once.
|
|
bundling_failures: std.ArrayHashMapUnmanaged(
|
|
SerializedFailure,
|
|
void,
|
|
SerializedFailure.ArrayHashContextViaOwner,
|
|
false,
|
|
) = .{},
|
|
|
|
// These values are handles to the functions in `hmr-runtime-server.ts`.
|
|
// For type definitions, see `./bake.private.d.ts`
|
|
server_fetch_function_callback: JSC.Strong,
|
|
server_register_update_callback: JSC.Strong,
|
|
|
|
// Watching
|
|
bun_watcher: *JSC.Watcher,
|
|
directory_watchers: DirectoryWatchStore,
|
|
/// Only two hot-reload tasks exist ever. Memory is reused by swapping between the two.
|
|
/// These items are aligned to cache lines to reduce contention.
|
|
watch_events: [2]HotReloadTask.Aligned,
|
|
/// 0 - no watch
|
|
/// 1 - has fired additional watch
|
|
/// 2+ - new events available, watcher is waiting on bundler to finish
|
|
watch_state: std.atomic.Value(u32),
|
|
watch_current: u1 = 0,
|
|
|
|
/// Number of bundles that have been executed. This is currently not read, but
|
|
/// will be used later to determine when to invoke graph garbage collection.
|
|
generation: usize = 0,
|
|
/// Displayed in the HMR success indicator
|
|
bundles_since_last_error: usize = 0,
|
|
|
|
/// Quickly retrieve a route's index from the entry point file. These are
|
|
/// populated as the routes are discovered. The route may not be bundled or
|
|
/// navigatable, in the case a layout's index is looked up.
|
|
route_lookup: AutoArrayHashMapUnmanaged(IncrementalGraph(.server).FileIndex, RouteIndexAndRecurseFlag),
|
|
|
|
framework: bake.Framework,
|
|
// Each logical graph gets its own bundler configuration
|
|
server_bundler: Bundler,
|
|
client_bundler: Bundler,
|
|
ssr_bundler: Bundler,
|
|
|
|
// TODO: This being shared state is likely causing a crash
|
|
/// Stored and reused for bundling tasks
|
|
log: Log,
|
|
|
|
// Debugging
|
|
dump_dir: ?std.fs.Dir,
|
|
/// Reference count to number of active sockets with the visualizer enabled.
|
|
emit_visualizer_events: u32,
|
|
has_pre_crash_handler: bool,
|
|
|
|
pub const internal_prefix = "/_bun";
|
|
pub const client_prefix = internal_prefix ++ "/client";
|
|
pub const asset_prefix = internal_prefix ++ "/asset";
|
|
pub const css_prefix = internal_prefix ++ "/css";
|
|
|
|
pub const RouteBundle = struct {
|
|
pub const Index = bun.GenericIndex(u30, RouteBundle);
|
|
|
|
route: Route.Index,
|
|
|
|
server_state: State,
|
|
|
|
/// Used to communicate over WebSocket the pattern. The HMR client contains code
|
|
/// to match this against the URL bar to determine if a reloading route applies
|
|
/// or not.
|
|
full_pattern: []const u8,
|
|
/// Generated lazily when the client JS is requested (HTTP GET /_bun/client/*.js),
|
|
/// which is only needed when a hard-reload is performed.
|
|
///
|
|
/// Freed when a client module updates.
|
|
client_bundle: ?[]const u8,
|
|
/// Contain the list of serialized failures. Hashmap allows for
|
|
/// efficient lookup and removal of failing files.
|
|
/// When state == .evaluation_failure, this is popualted with that error.
|
|
evaluate_failure: ?SerializedFailure,
|
|
|
|
// TODO: micro-opt: use a singular strong
|
|
|
|
/// Cached to avoid re-creating the array every request.
|
|
/// Invalidated when a layout is added or removed from this route.
|
|
cached_module_list: JSC.Strong,
|
|
/// Cached to avoid re-creating the string every request.
|
|
/// Invalidated when any client file associated with the route is updated.
|
|
cached_client_bundle_url: JSC.Strong,
|
|
/// Cached to avoid re-creating the array every request.
|
|
/// Invalidated when the list of CSS files changes.
|
|
cached_css_file_array: JSC.Strong,
|
|
|
|
/// A union is not used so that `bundler_failure_logs` can re-use memory, as
|
|
/// this state frequently changes between `loaded` and the failure variants.
|
|
const State = enum {
|
|
/// In development mode, routes are lazily built. This state implies a
|
|
/// build of this route has never been run. It is possible to bundle the
|
|
/// route entry point and still have an unqueued route if another route
|
|
/// imports this one. This state is implied if `FrameworkRouter.Route`
|
|
/// has no bundle index assigned.
|
|
unqueued,
|
|
/// A bundle associated with this route is happening
|
|
bundling,
|
|
/// This route was flagged for bundling failures. There are edge cases
|
|
/// where a route can be disconnected from its failures, so the route
|
|
/// imports has to be traced to discover if possible failures still
|
|
/// exist.
|
|
possible_bundling_failures,
|
|
/// Loading the module at runtime had a failure.
|
|
evaluation_failure,
|
|
/// Calling the request function may error, but that error will not be
|
|
/// at fault of bundling.
|
|
loaded,
|
|
};
|
|
};
|
|
|
|
pub const DeferredRequest = struct {
|
|
next: ?*DeferredRequest,
|
|
bundle: RouteBundle.Index,
|
|
data: Data,
|
|
|
|
const Data = union(enum) {
|
|
server_handler: bun.JSC.API.SavedRequest,
|
|
/// onJsRequestWithBundle
|
|
js_payload: *Response,
|
|
|
|
const Tag = @typeInfo(Data).Union.tag_type.?;
|
|
};
|
|
};
|
|
|
|
/// DevServer is stored on the heap, storing its allocator.
|
|
// TODO: change the error set to JSOrMemoryError!*DevServer
|
|
pub fn init(options: Options) !*DevServer {
|
|
const allocator = bun.default_allocator;
|
|
bun.analytics.Features.kit_dev +|= 1;
|
|
|
|
var dump_dir = if (bun.FeatureFlags.bake_debugging_features)
|
|
if (options.dump_sources) |dir|
|
|
std.fs.cwd().makeOpenPath(dir, .{}) catch |err| dir: {
|
|
bun.handleErrorReturnTrace(err, @errorReturnTrace());
|
|
Output.warn("Could not open directory for dumping sources: {}", .{err});
|
|
break :dir null;
|
|
}
|
|
else
|
|
null;
|
|
errdefer if (bun.FeatureFlags.bake_debugging_features) if (dump_dir) |*dir| dir.close();
|
|
|
|
const separate_ssr_graph = if (options.framework.server_components) |sc| sc.separate_ssr_graph else false;
|
|
|
|
const dev = bun.create(allocator, DevServer, .{
|
|
.allocator = allocator,
|
|
|
|
.root = options.root,
|
|
.vm = options.vm,
|
|
.server = null,
|
|
.directory_watchers = DirectoryWatchStore.empty,
|
|
.server_fetch_function_callback = .{},
|
|
.server_register_update_callback = .{},
|
|
.generation = 0,
|
|
.graph_safety_lock = .{},
|
|
.log = Log.init(allocator),
|
|
.dump_dir = dump_dir,
|
|
.framework = options.framework,
|
|
.watch_state = .{ .raw = 0 },
|
|
.watch_current = 0,
|
|
.emit_visualizer_events = 0,
|
|
.has_pre_crash_handler = options.dump_state_on_crash,
|
|
.css_files = .{},
|
|
.route_js_payloads = .{},
|
|
// .assets = .{},
|
|
|
|
.client_graph = IncrementalGraph(.client).empty,
|
|
.server_graph = IncrementalGraph(.server).empty,
|
|
.incremental_result = IncrementalResult.empty,
|
|
.route_lookup = .{},
|
|
|
|
.server_bundler = undefined,
|
|
.client_bundler = undefined,
|
|
.ssr_bundler = undefined,
|
|
|
|
.bun_watcher = undefined,
|
|
.watch_events = undefined,
|
|
|
|
.configuration_hash_key = undefined,
|
|
|
|
.router = undefined,
|
|
.route_bundles = .{},
|
|
});
|
|
errdefer allocator.destroy(dev);
|
|
|
|
assert(dev.server_graph.owner() == dev);
|
|
assert(dev.client_graph.owner() == dev);
|
|
assert(dev.directory_watchers.owner() == dev);
|
|
|
|
dev.graph_safety_lock.lock();
|
|
defer dev.graph_safety_lock.unlock();
|
|
|
|
const fs = try bun.fs.FileSystem.init(options.root);
|
|
|
|
dev.bun_watcher = try Watcher.init(DevServer, dev, fs, bun.default_allocator);
|
|
errdefer dev.bun_watcher.deinit(false);
|
|
try dev.bun_watcher.start();
|
|
|
|
dev.server_bundler.resolver.watcher = dev.bun_watcher.getResolveWatcher();
|
|
dev.client_bundler.resolver.watcher = dev.bun_watcher.getResolveWatcher();
|
|
dev.ssr_bundler.resolver.watcher = dev.bun_watcher.getResolveWatcher();
|
|
dev.watch_events = .{
|
|
.{ .aligned = HotReloadTask.initEmpty(dev) },
|
|
.{ .aligned = HotReloadTask.initEmpty(dev) },
|
|
};
|
|
|
|
try dev.framework.initBundler(allocator, &dev.log, .development, .server, &dev.server_bundler);
|
|
dev.client_bundler.options.dev_server = dev;
|
|
try dev.framework.initBundler(allocator, &dev.log, .development, .client, &dev.client_bundler);
|
|
dev.server_bundler.options.dev_server = dev;
|
|
if (separate_ssr_graph) {
|
|
try dev.framework.initBundler(allocator, &dev.log, .development, .ssr, &dev.ssr_bundler);
|
|
dev.ssr_bundler.options.dev_server = dev;
|
|
}
|
|
|
|
dev.framework = dev.framework.resolve(&dev.server_bundler.resolver, &dev.client_bundler.resolver) catch {
|
|
Output.errGeneric("Failed to resolve all imports required by the framework", .{});
|
|
return error.FrameworkInitialization;
|
|
};
|
|
|
|
errdefer dev.route_lookup.clearAndFree(allocator);
|
|
// errdefer dev.client_graph.deinit(allocator);
|
|
// errdefer dev.server_graph.deinit(allocator);
|
|
|
|
dev.vm.global = @ptrCast(dev.vm.global);
|
|
|
|
dev.configuration_hash_key = hash_key: {
|
|
var hash = std.hash.Wyhash.init(128);
|
|
|
|
if (bun.Environment.isDebug) {
|
|
const stat = try bun.sys.stat(try bun.selfExePath()).unwrap();
|
|
bun.writeAnyToHasher(&hash, stat.mtime());
|
|
hash.update(bake.getHmrRuntime(.client));
|
|
hash.update(bake.getHmrRuntime(.server));
|
|
} else {
|
|
hash.update(bun.Environment.git_sha_short);
|
|
}
|
|
|
|
// TODO: hash router types
|
|
// hash.update(dev.framework.entry_client);
|
|
// hash.update(dev.framework.entry_server);
|
|
|
|
if (dev.framework.server_components) |sc| {
|
|
bun.writeAnyToHasher(&hash, true);
|
|
bun.writeAnyToHasher(&hash, sc.separate_ssr_graph);
|
|
hash.update(sc.client_register_server_reference);
|
|
hash.update(&.{0});
|
|
hash.update(sc.server_register_client_reference);
|
|
hash.update(&.{0});
|
|
hash.update(sc.server_register_server_reference);
|
|
hash.update(&.{0});
|
|
hash.update(sc.server_runtime_import);
|
|
hash.update(&.{0});
|
|
} else {
|
|
bun.writeAnyToHasher(&hash, false);
|
|
}
|
|
|
|
if (dev.framework.react_fast_refresh) |rfr| {
|
|
bun.writeAnyToHasher(&hash, true);
|
|
hash.update(rfr.import_source);
|
|
} else {
|
|
bun.writeAnyToHasher(&hash, false);
|
|
}
|
|
|
|
// TODO: dev.framework.built_in_modules
|
|
|
|
break :hash_key std.fmt.bytesToHex(std.mem.asBytes(&hash.final()), .lower);
|
|
};
|
|
|
|
// Add react fast refresh if needed. This is the first file on the client side,
|
|
// as it will be referred to by index.
|
|
if (dev.framework.react_fast_refresh) |rfr| {
|
|
assert(try dev.client_graph.insertStale(rfr.import_source, false) == IncrementalGraph(.client).react_refresh_index);
|
|
}
|
|
|
|
try dev.initServerRuntime();
|
|
|
|
// Initialize the router
|
|
dev.router = router: {
|
|
var types = try std.ArrayListUnmanaged(FrameworkRouter.Type).initCapacity(allocator, options.framework.file_system_router_types.len);
|
|
errdefer types.deinit(allocator);
|
|
|
|
for (options.framework.file_system_router_types, 0..) |fsr, i| {
|
|
const joined_root = bun.path.joinAbs(dev.root, .auto, fsr.root);
|
|
const entry = dev.server_bundler.resolver.readDirInfoIgnoreError(joined_root) orelse
|
|
continue;
|
|
|
|
const server_file = try dev.server_graph.insertStaleExtra(fsr.entry_server, false, true);
|
|
|
|
try types.append(allocator, .{
|
|
.abs_root = bun.strings.withoutTrailingSlash(entry.abs_path),
|
|
.prefix = fsr.prefix,
|
|
.ignore_underscores = fsr.ignore_underscores,
|
|
.ignore_dirs = fsr.ignore_dirs,
|
|
.extensions = fsr.extensions,
|
|
.style = fsr.style,
|
|
.server_file = toOpaqueFileId(.server, server_file),
|
|
.client_file = if (fsr.entry_client) |client|
|
|
toOpaqueFileId(.client, try dev.client_graph.insertStale(client, false)).toOptional()
|
|
else
|
|
.none,
|
|
.server_file_string = .{},
|
|
});
|
|
|
|
try dev.route_lookup.put(allocator, server_file, .{
|
|
.route_index = FrameworkRouter.Route.Index.init(@intCast(i)),
|
|
.should_recurse_when_visiting = true,
|
|
});
|
|
}
|
|
|
|
break :router try FrameworkRouter.initEmpty(types.items, allocator);
|
|
};
|
|
|
|
// TODO: move pre-bundling to be one tick after server startup.
|
|
// this way the line saying the server is ready shows quicker
|
|
try dev.scanInitialRoutes();
|
|
|
|
if (bun.FeatureFlags.bake_debugging_features and options.dump_state_on_crash)
|
|
try bun.crash_handler.appendPreCrashHandler(DevServer, dev, dumpStateDueToCrash);
|
|
|
|
return dev;
|
|
}
|
|
|
|
fn initServerRuntime(dev: *DevServer) !void {
|
|
const runtime = bun.String.static(bun.bake.getHmrRuntime(.server));
|
|
|
|
const interface = c.BakeLoadInitialServerCode(
|
|
@ptrCast(dev.vm.global),
|
|
runtime,
|
|
if (dev.framework.server_components) |sc| sc.separate_ssr_graph else false,
|
|
) catch |err| {
|
|
dev.vm.printErrorLikeObjectToConsole(dev.vm.global.takeException(err));
|
|
@panic("Server runtime failed to start. The above error is always a bug in Bun");
|
|
};
|
|
|
|
if (!interface.isObject()) @panic("Internal assertion failure: expected interface from HMR runtime to be an object");
|
|
const fetch_function: JSValue = try interface.get(dev.vm.global, "handleRequest") orelse @panic("Internal assertion failure: expected interface from HMR runtime to contain handleRequest");
|
|
bun.assert(fetch_function.isCallable(dev.vm.jsc));
|
|
dev.server_fetch_function_callback = JSC.Strong.create(fetch_function, dev.vm.global);
|
|
const register_update = try interface.get(dev.vm.global, "registerUpdate") orelse @panic("Internal assertion failure: expected interface from HMR runtime to contain registerUpdate");
|
|
dev.server_register_update_callback = JSC.Strong.create(register_update, dev.vm.global);
|
|
|
|
fetch_function.ensureStillAlive();
|
|
register_update.ensureStillAlive();
|
|
}
|
|
|
|
/// Deferred one tick so that the server can be up faster
|
|
fn scanInitialRoutes(dev: *DevServer) !void {
|
|
try dev.router.scanAll(
|
|
dev.allocator,
|
|
&dev.server_bundler.resolver,
|
|
FrameworkRouter.InsertionContext.wrap(DevServer, dev),
|
|
);
|
|
|
|
try dev.server_graph.ensureStaleBitCapacity(true);
|
|
try dev.client_graph.ensureStaleBitCapacity(true);
|
|
}
|
|
|
|
pub fn attachRoutes(dev: *DevServer, server: anytype) !void {
|
|
dev.server = bun.JSC.API.AnyServer.from(server);
|
|
const app = server.app.?;
|
|
|
|
// For this to work, the route handlers need to be augmented to use the comptime
|
|
// SSL parameter. It's worth considering removing the SSL boolean.
|
|
if (@TypeOf(app) == *uws.NewApp(true)) {
|
|
bun.todoPanic(@src(), "DevServer does not support SSL yet", .{});
|
|
}
|
|
|
|
app.get(client_prefix ++ "/:route", *DevServer, dev, onJsRequest);
|
|
app.get(asset_prefix ++ "/:asset", *DevServer, dev, onAssetRequest);
|
|
app.get(css_prefix ++ "/:asset", *DevServer, dev, onCssRequest);
|
|
app.get(internal_prefix ++ "/src/*", *DevServer, dev, onSrcRequest);
|
|
|
|
app.ws(
|
|
internal_prefix ++ "/hmr",
|
|
dev,
|
|
0,
|
|
uws.WebSocketBehavior.Wrap(DevServer, HmrSocket, false).apply(.{}),
|
|
);
|
|
|
|
app.get(internal_prefix ++ "/incremental_visualizer", *DevServer, dev, onIncrementalVisualizer);
|
|
|
|
app.any("/*", *DevServer, dev, onRequest);
|
|
}
|
|
|
|
pub fn deinit(dev: *DevServer) void {
|
|
const allocator = dev.allocator;
|
|
if (dev.has_pre_crash_handler)
|
|
bun.crash_handler.removePreCrashHandler(dev);
|
|
allocator.destroy(dev);
|
|
bun.todoPanic(@src(), "bake.DevServer.deinit()", .{});
|
|
}
|
|
|
|
fn onJsRequest(dev: *DevServer, req: *Request, resp: *Response) void {
|
|
const route_bundle = route: {
|
|
const route_id = req.parameter(0);
|
|
if (!bun.strings.hasSuffixComptime(route_id, ".js"))
|
|
return req.setYield(true);
|
|
if (!bun.strings.hasPrefixComptime(route_id, "route."))
|
|
return req.setYield(true);
|
|
const i = parseHexToInt(u64, route_id["route.".len .. route_id.len - ".js".len]) orelse
|
|
return req.setYield(true);
|
|
break :route dev.route_js_payloads.get(i) orelse
|
|
return req.setYield(true);
|
|
};
|
|
|
|
dev.ensureRouteIsBundled(route_bundle, .js_payload, req, resp) catch bun.outOfMemory();
|
|
}
|
|
|
|
fn onAssetRequest(dev: *DevServer, req: *Request, resp: *Response) void {
|
|
_ = dev;
|
|
_ = req;
|
|
_ = resp;
|
|
bun.todoPanic(@src(), "serve asset file", .{});
|
|
// const route_id = req.parameter(0);
|
|
// const asset = dev.assets.get(route_id) orelse
|
|
// return req.setYield(true);
|
|
// _ = asset; // autofix
|
|
|
|
}
|
|
|
|
fn onCssRequest(dev: *DevServer, req: *Request, resp: *Response) void {
|
|
const param = req.parameter(0);
|
|
if (!bun.strings.hasSuffixComptime(param, ".css"))
|
|
return req.setYield(true);
|
|
const hex = param[0 .. param.len - ".css".len];
|
|
if (hex.len != @sizeOf(u64) * 2)
|
|
return req.setYield(true);
|
|
|
|
var out: [@sizeOf(u64)]u8 = undefined;
|
|
assert((std.fmt.hexToBytes(&out, hex) catch
|
|
return req.setYield(true)).len == @sizeOf(u64));
|
|
const hash: u64 = @bitCast(out);
|
|
|
|
const css = dev.css_files.get(hash) orelse
|
|
return req.setYield(true);
|
|
|
|
sendTextFile(css, MimeType.css.value, resp);
|
|
}
|
|
|
|
fn parseHexToInt(comptime T: type, slice: []const u8) ?T {
|
|
var out: [@sizeOf(T)]u8 = undefined;
|
|
assert((std.fmt.hexToBytes(&out, slice) catch return null).len == @sizeOf(T));
|
|
return @bitCast(out);
|
|
}
|
|
|
|
fn onIncrementalVisualizer(_: *DevServer, _: *Request, resp: *Response) void {
|
|
resp.corked(onIncrementalVisualizerCorked, .{resp});
|
|
}
|
|
|
|
fn onIncrementalVisualizerCorked(resp: *Response) void {
|
|
const code = if (Environment.codegen_embed)
|
|
@embedFile("incremental_visualizer.html")
|
|
else
|
|
bun.runtimeEmbedFile(.src_eager, "bake/incremental_visualizer.html");
|
|
resp.writeHeaderInt("Content-Length", code.len);
|
|
resp.end(code, false);
|
|
}
|
|
|
|
fn ensureRouteIsBundled(
|
|
dev: *DevServer,
|
|
route_index: Route.Index,
|
|
kind: DeferredRequest.Data.Tag,
|
|
req: *Request,
|
|
resp: *Response,
|
|
) bun.OOM!void {
|
|
const bundle_index = if (dev.router.routePtr(route_index).bundle.unwrap()) |bundle_index|
|
|
bundle_index
|
|
else
|
|
try dev.insertRouteBundle(route_index);
|
|
|
|
switch (dev.routeBundlePtr(bundle_index).server_state) {
|
|
.unqueued => {
|
|
const server_file_names = dev.server_graph.bundled_files.keys();
|
|
const client_file_names = dev.client_graph.bundled_files.keys();
|
|
|
|
var sfa = std.heap.stackFallback(4096, dev.allocator);
|
|
const temp_alloc = sfa.get();
|
|
|
|
var entry_points = std.ArrayList(BakeEntryPoint).init(temp_alloc);
|
|
defer entry_points.deinit();
|
|
|
|
// Build a list of all files that have not yet been bundled.
|
|
var route = dev.router.routePtr(route_index);
|
|
const router_type = dev.router.typePtr(route.type);
|
|
try dev.appendOpaqueEntryPoint(server_file_names, &entry_points, .server, router_type.server_file);
|
|
try dev.appendOpaqueEntryPoint(client_file_names, &entry_points, .client, router_type.client_file);
|
|
try dev.appendOpaqueEntryPoint(server_file_names, &entry_points, .server, route.file_page);
|
|
try dev.appendOpaqueEntryPoint(server_file_names, &entry_points, .server, route.file_layout);
|
|
while (route.parent.unwrap()) |parent_index| {
|
|
route = dev.router.routePtr(parent_index);
|
|
try dev.appendOpaqueEntryPoint(server_file_names, &entry_points, .server, route.file_layout);
|
|
}
|
|
|
|
if (entry_points.items.len == 0) {
|
|
@panic("TODO: trace graph for possible errors, so DevServer knows what state this should go to");
|
|
}
|
|
|
|
const route_bundle = dev.routeBundlePtr(bundle_index);
|
|
if (dev.bundle(entry_points.items)) |_| {
|
|
route_bundle.server_state = .loaded;
|
|
} else |err| switch (err) {
|
|
error.OutOfMemory => bun.outOfMemory(),
|
|
error.BuildFailed => assert(route_bundle.server_state == .possible_bundling_failures),
|
|
error.ServerLoadFailed => route_bundle.server_state = .evaluation_failure,
|
|
}
|
|
},
|
|
.bundling => {
|
|
const prepared = dev.server.?.DebugHTTPServer.prepareJsRequestContext(req, resp) orelse
|
|
return;
|
|
_ = prepared;
|
|
@panic("TODO: Async Bundler");
|
|
},
|
|
else => {},
|
|
}
|
|
switch (dev.routeBundlePtr(bundle_index).server_state) {
|
|
.unqueued => unreachable,
|
|
.bundling => @panic("TODO: Async Bundler"),
|
|
.possible_bundling_failures => {
|
|
// TODO: perform a graph trace to find just the errors that are needed
|
|
if (dev.bundling_failures.count() > 0) {
|
|
resp.corked(sendSerializedFailures, .{
|
|
dev,
|
|
resp,
|
|
dev.bundling_failures.keys(),
|
|
.bundler,
|
|
});
|
|
return;
|
|
} else {
|
|
dev.routeBundlePtr(bundle_index).server_state = .loaded;
|
|
}
|
|
},
|
|
.evaluation_failure => {
|
|
resp.corked(sendSerializedFailures, .{
|
|
dev,
|
|
resp,
|
|
(&(dev.routeBundlePtr(bundle_index).evaluate_failure orelse @panic("missing error")))[0..1],
|
|
.evaluation,
|
|
});
|
|
return;
|
|
},
|
|
.loaded => {},
|
|
}
|
|
|
|
switch (kind) {
|
|
.server_handler => dev.onRequestWithBundle(bundle_index, .{ .stack = req }, resp),
|
|
.js_payload => dev.onJsRequestWithBundle(bundle_index, resp),
|
|
}
|
|
}
|
|
|
|
fn onRequestWithBundle(
|
|
dev: *DevServer,
|
|
route_bundle_index: RouteBundle.Index,
|
|
req: bun.JSC.API.SavedRequest.Union,
|
|
resp: *Response,
|
|
) void {
|
|
const server_request_callback = dev.server_fetch_function_callback.get() orelse
|
|
unreachable; // did not bundle
|
|
|
|
const route_bundle = dev.routeBundlePtr(route_bundle_index);
|
|
|
|
const router_type = dev.router.typePtr(dev.router.routePtr(route_bundle.route).type);
|
|
|
|
dev.server.?.onRequestFromSaved(
|
|
req,
|
|
resp,
|
|
server_request_callback,
|
|
4,
|
|
.{
|
|
// routerTypeMain
|
|
router_type.server_file_string.get() orelse str: {
|
|
const name = dev.server_graph.bundled_files.keys()[fromOpaqueFileId(.server, router_type.server_file).get()];
|
|
const str = bun.String.createUTF8(name);
|
|
defer str.deref();
|
|
const js = str.toJS(dev.vm.global);
|
|
router_type.server_file_string = JSC.Strong.create(js, dev.vm.global);
|
|
break :str js;
|
|
},
|
|
// routeModules
|
|
route_bundle.cached_module_list.get() orelse arr: {
|
|
const global = dev.vm.global;
|
|
const keys = dev.server_graph.bundled_files.keys();
|
|
var n: usize = 1;
|
|
var route = dev.router.routePtr(route_bundle.route);
|
|
while (true) {
|
|
if (route.file_layout != .none) n += 1;
|
|
route = dev.router.routePtr(route.parent.unwrap() orelse break);
|
|
}
|
|
const arr = JSValue.createEmptyArray(global, n);
|
|
route = dev.router.routePtr(route_bundle.route);
|
|
var route_name = bun.String.createUTF8(dev.relativePath(keys[fromOpaqueFileId(.server, route.file_page.unwrap().?).get()]));
|
|
arr.putIndex(global, 0, route_name.transferToJS(global));
|
|
n = 1;
|
|
while (true) {
|
|
if (route.file_layout.unwrap()) |layout| {
|
|
var layout_name = bun.String.createUTF8(dev.relativePath(keys[fromOpaqueFileId(.server, layout).get()]));
|
|
arr.putIndex(global, @intCast(n), layout_name.transferToJS(global));
|
|
n += 1;
|
|
}
|
|
route = dev.router.routePtr(route.parent.unwrap() orelse break);
|
|
}
|
|
route_bundle.cached_module_list = JSC.Strong.create(arr, global);
|
|
break :arr arr;
|
|
},
|
|
// clientId
|
|
route_bundle.cached_client_bundle_url.get() orelse str: {
|
|
const id = std.crypto.random.int(u64);
|
|
dev.route_js_payloads.put(dev.allocator, id, route_bundle.route) catch bun.outOfMemory();
|
|
const str = bun.String.createFormat(client_prefix ++ "/route.{}.js", .{std.fmt.fmtSliceHexLower(std.mem.asBytes(&id))}) catch bun.outOfMemory();
|
|
defer str.deref();
|
|
const js = str.toJS(dev.vm.global);
|
|
route_bundle.cached_client_bundle_url = JSC.Strong.create(js, dev.vm.global);
|
|
break :str js;
|
|
},
|
|
// styles
|
|
route_bundle.cached_css_file_array.get() orelse arr: {
|
|
const js = dev.generateCssList(route_bundle) catch bun.outOfMemory();
|
|
route_bundle.cached_css_file_array = JSC.Strong.create(js, dev.vm.global);
|
|
break :arr js;
|
|
},
|
|
},
|
|
);
|
|
}
|
|
|
|
pub fn onJsRequestWithBundle(dev: *DevServer, bundle_index: RouteBundle.Index, resp: *Response) void {
|
|
const route_bundle = dev.routeBundlePtr(bundle_index);
|
|
const code = route_bundle.client_bundle orelse code: {
|
|
const code = dev.generateClientBundle(route_bundle) catch bun.outOfMemory();
|
|
route_bundle.client_bundle = code;
|
|
break :code code;
|
|
};
|
|
sendTextFile(code, MimeType.javascript.value, resp);
|
|
}
|
|
|
|
pub fn onSrcRequest(dev: *DevServer, req: *uws.Request, resp: *App.Response) void {
|
|
if (req.header("open-in-editor") == null) {
|
|
resp.writeStatus("501 Not Implemented");
|
|
resp.end("Viewing source without opening in editor is not implemented yet!", false);
|
|
return;
|
|
}
|
|
|
|
const ctx = &dev.vm.rareData().editor_context;
|
|
ctx.autoDetectEditor(JSC.VirtualMachine.get().bundler.env);
|
|
const line: ?[]const u8 = req.header("editor-line");
|
|
const column: ?[]const u8 = req.header("editor-column");
|
|
|
|
if (ctx.editor) |editor| {
|
|
var url = req.url()[internal_prefix.len + "/src/".len ..];
|
|
if (bun.strings.indexOfChar(url, ':')) |colon| {
|
|
url = url[0..colon];
|
|
}
|
|
editor.open(ctx.path, url, line, column, dev.allocator) catch {
|
|
resp.writeStatus("202 No Content");
|
|
resp.end("", false);
|
|
return;
|
|
};
|
|
resp.writeStatus("202 No Content");
|
|
resp.end("", false);
|
|
} else {
|
|
resp.writeStatus("500 Internal Server Error");
|
|
resp.end("Please set your editor in bunfig.toml", false);
|
|
}
|
|
}
|
|
|
|
const BundleError = error{
|
|
OutOfMemory,
|
|
/// Graph entry points will be annotated with failures to display.
|
|
BuildFailed,
|
|
|
|
ServerLoadFailed,
|
|
};
|
|
|
|
fn bundle(dev: *DevServer, files: []const BakeEntryPoint) BundleError!void {
|
|
defer dev.emitVisualizerMessageIfNeeded() catch bun.outOfMemory();
|
|
|
|
assert(files.len > 0);
|
|
|
|
const bundle_file_list = bun.Output.Scoped(.bundle_file_list, false);
|
|
|
|
if (bundle_file_list.isVisible()) {
|
|
bundle_file_list.log("Start bundle {d} files", .{files.len});
|
|
for (files) |f| {
|
|
bundle_file_list.log("- {s} (.{s})", .{ f.path, @tagName(f.graph) });
|
|
}
|
|
}
|
|
|
|
var heap = try ThreadlocalArena.init();
|
|
defer heap.deinit();
|
|
|
|
const allocator = heap.allocator();
|
|
var ast_memory_allocator = try allocator.create(bun.JSAst.ASTMemoryAllocator);
|
|
ast_memory_allocator.* = .{ .allocator = allocator };
|
|
ast_memory_allocator.reset();
|
|
ast_memory_allocator.push();
|
|
|
|
if (dev.framework.server_components == null) {
|
|
// The handling of the dependency graphs are SLIGHTLY different when
|
|
// server components are disabled. It's subtle, but enough that it
|
|
// would be incorrect to even try to run a build.
|
|
bun.todoPanic(@src(), "support non-server components build", .{});
|
|
}
|
|
|
|
var timer = if (Environment.enable_logs) std.time.Timer.start() catch unreachable;
|
|
|
|
dev.graph_safety_lock.lock();
|
|
defer dev.graph_safety_lock.unlock();
|
|
|
|
const bv2 = try BundleV2.init(
|
|
&dev.server_bundler,
|
|
if (dev.framework.server_components != null) .{
|
|
.framework = dev.framework,
|
|
.client_bundler = &dev.client_bundler,
|
|
.ssr_bundler = &dev.ssr_bundler,
|
|
} else @panic("TODO: support non-server components"),
|
|
allocator,
|
|
JSC.AnyEventLoop.init(allocator),
|
|
false, // reloading is handled separately
|
|
JSC.WorkPool.get(),
|
|
heap,
|
|
);
|
|
bv2.bun_watcher = dev.bun_watcher;
|
|
// this.plugins = completion.plugins;
|
|
|
|
defer {
|
|
if (bv2.graph.pool.pool.threadpool_context == @as(?*anyopaque, @ptrCast(bv2.graph.pool))) {
|
|
bv2.graph.pool.pool.threadpool_context = null;
|
|
}
|
|
ast_memory_allocator.pop();
|
|
bv2.deinit();
|
|
}
|
|
|
|
dev.client_graph.reset();
|
|
dev.server_graph.reset();
|
|
|
|
const bundle_result = bv2.runFromBakeDevServer(files) catch |err| {
|
|
bun.handleErrorReturnTrace(err, @errorReturnTrace());
|
|
|
|
bv2.bundler.log.print(Output.errorWriter()) catch {};
|
|
|
|
Output.warn("BundleV2.runFromBakeDevServer returned error.{s}", .{@errorName(err)});
|
|
|
|
return;
|
|
};
|
|
|
|
bv2.bundler.log.print(Output.errorWriter()) catch {};
|
|
|
|
try dev.finalizeBundle(bv2, bundle_result);
|
|
|
|
try dev.client_graph.ensureStaleBitCapacity(false);
|
|
try dev.server_graph.ensureStaleBitCapacity(false);
|
|
|
|
dev.generation +%= 1;
|
|
if (Environment.enable_logs) {
|
|
debug.log("Bundle Round {d}: {d} server, {d} client, {d} ms", .{
|
|
dev.generation,
|
|
dev.server_graph.current_chunk_parts.items.len,
|
|
dev.client_graph.current_chunk_parts.items.len,
|
|
@divFloor(timer.read(), std.time.ns_per_ms),
|
|
});
|
|
}
|
|
|
|
const is_first_server_chunk = !dev.server_fetch_function_callback.has();
|
|
|
|
if (dev.server_graph.current_chunk_len > 0) {
|
|
const server_bundle = try dev.server_graph.takeBundle(
|
|
if (is_first_server_chunk) .initial_response else .hmr_chunk,
|
|
"",
|
|
);
|
|
defer dev.allocator.free(server_bundle);
|
|
|
|
const server_modules = c.BakeLoadServerHmrPatch(@ptrCast(dev.vm.global), bun.String.createLatin1(server_bundle)) catch |err| {
|
|
// No user code has been evaluated yet, since everything is to
|
|
// be wrapped in a function clousure. This means that the likely
|
|
// error is going to be a syntax error, or other mistake in the
|
|
// bundler.
|
|
dev.vm.printErrorLikeObjectToConsole(dev.vm.global.takeException(err));
|
|
@panic("Error thrown while evaluating server code. This is always a bug in the bundler.");
|
|
};
|
|
const errors = dev.server_register_update_callback.get().?.call(
|
|
dev.vm.global,
|
|
dev.vm.global.toJSValue(),
|
|
&.{
|
|
server_modules,
|
|
dev.makeArrayForServerComponentsPatch(dev.vm.global, dev.incremental_result.client_components_added.items),
|
|
dev.makeArrayForServerComponentsPatch(dev.vm.global, dev.incremental_result.client_components_removed.items),
|
|
},
|
|
) catch |err| {
|
|
// One module replacement error should NOT prevent follow-up
|
|
// module replacements to fail. It is the HMR runtime's
|
|
// responsibility to collect all module load errors, and
|
|
// bubble them up.
|
|
dev.vm.printErrorLikeObjectToConsole(dev.vm.global.takeException(err));
|
|
@panic("Error thrown in Hot-module-replacement code. This is always a bug in the HMR runtime.");
|
|
};
|
|
_ = errors; // TODO:
|
|
}
|
|
|
|
const css_chunks = bundle_result.cssChunks();
|
|
if ((dev.client_graph.current_chunk_len > 0 or
|
|
css_chunks.len > 0) and
|
|
dev.numSubscribers(HmrSocket.global_topic) > 0)
|
|
{
|
|
var sfb2 = std.heap.stackFallback(65536, bun.default_allocator);
|
|
var payload = std.ArrayList(u8).initCapacity(sfb2.get(), 65536) catch
|
|
unreachable; // enough space
|
|
defer payload.deinit();
|
|
payload.appendAssumeCapacity(MessageId.hot_update.char());
|
|
const w = payload.writer();
|
|
|
|
const css_values = dev.css_files.values();
|
|
try w.writeInt(u32, @intCast(css_chunks.len), .little);
|
|
const sources = bv2.graph.input_files.items(.source);
|
|
for (css_chunks) |chunk| {
|
|
const abs_path = sources[chunk.entry_point.source_index].path.text;
|
|
|
|
try w.writeAll(&std.fmt.bytesToHex(std.mem.asBytes(&bun.hash(abs_path)), .lower));
|
|
|
|
const css_data = css_values[chunk.entry_point.entry_point_id];
|
|
try w.writeInt(u32, @intCast(css_data.len), .little);
|
|
try w.writeAll(css_data);
|
|
}
|
|
|
|
if (dev.client_graph.current_chunk_len > 0)
|
|
try dev.client_graph.takeBundleToList(.hmr_chunk, &payload, "");
|
|
|
|
dev.publish(HmrSocket.global_topic, payload.items, .binary);
|
|
}
|
|
|
|
if (dev.incremental_result.failures_added.items.len > 0) {
|
|
dev.bundles_since_last_error = 0;
|
|
return error.BuildFailed;
|
|
}
|
|
}
|
|
|
|
fn indexFailures(dev: *DevServer) !void {
|
|
var sfa_state = std.heap.stackFallback(65536, dev.allocator);
|
|
const sfa = sfa_state.get();
|
|
|
|
if (dev.incremental_result.failures_added.items.len > 0) {
|
|
var total_len: usize = @sizeOf(MessageId) + @sizeOf(u32);
|
|
|
|
for (dev.incremental_result.failures_added.items) |fail| {
|
|
total_len += fail.data.len;
|
|
}
|
|
|
|
total_len += dev.incremental_result.failures_removed.items.len * @sizeOf(u32);
|
|
|
|
dev.server_graph.affected_by_trace = try DynamicBitSetUnmanaged.initEmpty(sfa, dev.server_graph.bundled_files.count());
|
|
defer dev.server_graph.affected_by_trace.deinit(sfa);
|
|
|
|
dev.client_graph.affected_by_trace = try DynamicBitSetUnmanaged.initEmpty(sfa, dev.client_graph.bundled_files.count());
|
|
defer dev.client_graph.affected_by_trace.deinit(sfa);
|
|
|
|
var payload = try std.ArrayList(u8).initCapacity(sfa, total_len);
|
|
defer payload.deinit();
|
|
payload.appendAssumeCapacity(MessageId.errors.char());
|
|
const w = payload.writer();
|
|
|
|
try w.writeInt(u32, @intCast(dev.incremental_result.failures_removed.items.len), .little);
|
|
|
|
for (dev.incremental_result.failures_removed.items) |removed| {
|
|
try w.writeInt(u32, @bitCast(removed.getOwner().encode()), .little);
|
|
removed.deinit();
|
|
}
|
|
|
|
for (dev.incremental_result.failures_added.items) |added| {
|
|
try w.writeAll(added.data);
|
|
|
|
switch (added.getOwner()) {
|
|
.none, .route => unreachable,
|
|
.server => |index| try dev.server_graph.traceDependencies(index, .no_stop),
|
|
.client => |index| try dev.client_graph.traceDependencies(index, .no_stop),
|
|
}
|
|
}
|
|
|
|
{
|
|
@panic("TODO: revive");
|
|
}
|
|
// for (dev.incremental_result.routes_affected.items) |route_index| {
|
|
// const route = &dev.routes[route_index.get()];
|
|
// route.server_state = .possible_bundling_failures;
|
|
// }
|
|
|
|
dev.publish(HmrSocket.global_topic, payload.items, .binary);
|
|
} else if (dev.incremental_result.failures_removed.items.len > 0) {
|
|
if (dev.bundling_failures.count() == 0) {
|
|
dev.publish(HmrSocket.global_topic, &.{MessageId.errors_cleared.char()}, .binary);
|
|
for (dev.incremental_result.failures_removed.items) |removed| {
|
|
removed.deinit();
|
|
}
|
|
} else {
|
|
var payload = try std.ArrayList(u8).initCapacity(sfa, @sizeOf(MessageId) + @sizeOf(u32) + dev.incremental_result.failures_removed.items.len * @sizeOf(u32));
|
|
defer payload.deinit();
|
|
payload.appendAssumeCapacity(MessageId.errors.char());
|
|
const w = payload.writer();
|
|
|
|
try w.writeInt(u32, @intCast(dev.incremental_result.failures_removed.items.len), .little);
|
|
|
|
for (dev.incremental_result.failures_removed.items) |removed| {
|
|
try w.writeInt(u32, @bitCast(removed.getOwner().encode()), .little);
|
|
removed.deinit();
|
|
}
|
|
|
|
dev.publish(HmrSocket.global_topic, payload.items, .binary);
|
|
}
|
|
}
|
|
|
|
dev.incremental_result.failures_removed.clearRetainingCapacity();
|
|
}
|
|
|
|
/// Used to generate the entry point. Unlike incremental patches, this always
|
|
/// contains all needed files for a route.
|
|
fn generateClientBundle(dev: *DevServer, route_bundle: *RouteBundle) bun.OOM![]const u8 {
|
|
assert(route_bundle.client_bundle == null);
|
|
assert(route_bundle.server_state == .loaded); // page is unfit to load
|
|
|
|
dev.graph_safety_lock.lock();
|
|
defer dev.graph_safety_lock.unlock();
|
|
|
|
// Prepare bitsets
|
|
var sfa_state = std.heap.stackFallback(65536, dev.allocator);
|
|
const sfa = sfa_state.get();
|
|
// const gts = try dev.initGraphTraceState(sfa);
|
|
// defer gts.deinit(sfa);
|
|
dev.server_graph.affected_by_trace = try DynamicBitSetUnmanaged.initEmpty(sfa, dev.server_graph.bundled_files.count());
|
|
defer dev.server_graph.affected_by_trace.deinit(sfa);
|
|
|
|
dev.client_graph.affected_by_trace = try DynamicBitSetUnmanaged.initEmpty(sfa, dev.client_graph.bundled_files.count());
|
|
defer dev.client_graph.affected_by_trace.deinit(sfa);
|
|
|
|
// Run tracing
|
|
dev.client_graph.reset();
|
|
try dev.traceAllRouteImports(route_bundle, .{ .find_client_modules = true });
|
|
|
|
const client_file = dev.router.typePtr(dev.router.routePtr(route_bundle.route).type).client_file.unwrap() orelse
|
|
@panic("No client side entrypoint in client bundle");
|
|
|
|
return dev.client_graph.takeBundle(
|
|
.initial_response,
|
|
dev.relativePath(dev.client_graph.bundled_files.keys()[fromOpaqueFileId(.client, client_file).get()]),
|
|
);
|
|
}
|
|
|
|
fn generateCssList(dev: *DevServer, route_bundle: *RouteBundle) bun.OOM!JSC.JSValue {
|
|
if (Environment.allow_assert) assert(!route_bundle.cached_css_file_array.has());
|
|
assert(route_bundle.server_state == .loaded); // page is unfit to load
|
|
|
|
dev.graph_safety_lock.lock();
|
|
defer dev.graph_safety_lock.unlock();
|
|
|
|
// Prepare bitsets
|
|
var sfa_state = std.heap.stackFallback(65536, dev.allocator);
|
|
|
|
const sfa = sfa_state.get();
|
|
dev.server_graph.affected_by_trace = try DynamicBitSetUnmanaged.initEmpty(sfa, dev.server_graph.bundled_files.count());
|
|
defer dev.server_graph.affected_by_trace.deinit(sfa);
|
|
|
|
dev.client_graph.affected_by_trace = try DynamicBitSetUnmanaged.initEmpty(sfa, dev.client_graph.bundled_files.count());
|
|
defer dev.client_graph.affected_by_trace.deinit(sfa);
|
|
|
|
// Run tracing
|
|
dev.client_graph.reset();
|
|
try dev.traceAllRouteImports(route_bundle, .{ .find_css = true });
|
|
|
|
const names = dev.client_graph.current_css_files.items;
|
|
const arr = JSC.JSArray.createEmpty(dev.vm.global, names.len);
|
|
for (names, 0..) |item, i| {
|
|
const str = bun.String.createUTF8(item);
|
|
defer str.deref();
|
|
arr.putIndex(dev.vm.global, @intCast(i), str.toJS(dev.vm.global));
|
|
}
|
|
return arr;
|
|
}
|
|
|
|
fn traceAllRouteImports(dev: *DevServer, route_bundle: *RouteBundle, goal: TraceImportGoal) !void {
|
|
var route = dev.router.routePtr(route_bundle.route);
|
|
const router_type = dev.router.typePtr(route.type);
|
|
|
|
// Both framework entry points are considered
|
|
try dev.server_graph.traceImports(fromOpaqueFileId(.server, router_type.server_file), .{ .find_css = true });
|
|
if (router_type.client_file.unwrap()) |id| {
|
|
try dev.client_graph.traceImports(fromOpaqueFileId(.client, id), goal);
|
|
}
|
|
|
|
// The route file is considered
|
|
if (route.file_page.unwrap()) |id| {
|
|
try dev.server_graph.traceImports(fromOpaqueFileId(.server, id), goal);
|
|
}
|
|
|
|
// For all parents, the layout is considered
|
|
while (true) {
|
|
if (route.file_layout.unwrap()) |id| {
|
|
try dev.server_graph.traceImports(fromOpaqueFileId(.server, id), goal);
|
|
}
|
|
route = dev.router.routePtr(route.parent.unwrap() orelse break);
|
|
}
|
|
}
|
|
|
|
fn makeArrayForServerComponentsPatch(dev: *DevServer, global: *JSC.JSGlobalObject, items: []const IncrementalGraph(.server).FileIndex) JSValue {
|
|
if (items.len == 0) return .null;
|
|
const arr = JSC.JSArray.createEmpty(global, items.len);
|
|
const names = dev.server_graph.bundled_files.keys();
|
|
for (items, 0..) |item, i| {
|
|
const str = bun.String.createUTF8(dev.relativePath(names[item.get()]));
|
|
defer str.deref();
|
|
arr.putIndex(global, @intCast(i), str.toJS(global));
|
|
}
|
|
return arr;
|
|
}
|
|
|
|
pub const HotUpdateContext = struct {
|
|
/// bundle_v2.Graph.input_files.items(.source)
|
|
sources: []bun.logger.Source,
|
|
/// bundle_v2.Graph.ast.items(.import_records)
|
|
import_records: []bun.ImportRecord.List,
|
|
/// bundle_v2.Graph.server_component_boundaries.slice()
|
|
scbs: bun.JSAst.ServerComponentBoundary.List.Slice,
|
|
/// Which files have a server-component boundary.
|
|
server_to_client_bitset: DynamicBitSetUnmanaged,
|
|
/// Used to reduce calls to the IncrementalGraph hash table.
|
|
///
|
|
/// Caller initializes a slice with `sources.len * 2` items
|
|
/// all initialized to `std.math.maxInt(u32)`
|
|
///
|
|
/// The first half of this slice is for the client graph,
|
|
/// second half is for server. Interact with this via
|
|
/// `getCachedIndex`
|
|
resolved_index_cache: []u32,
|
|
/// Used to tell if the server should replace or append import records.
|
|
server_seen_bit_set: DynamicBitSetUnmanaged,
|
|
|
|
pub fn getCachedIndex(
|
|
rc: *const HotUpdateContext,
|
|
comptime side: bake.Side,
|
|
i: bun.JSAst.Index,
|
|
) *IncrementalGraph(side).FileIndex {
|
|
const start = switch (side) {
|
|
.client => 0,
|
|
.server => rc.sources.len,
|
|
};
|
|
|
|
const subslice = rc.resolved_index_cache[start..][0..rc.sources.len];
|
|
|
|
comptime assert(@alignOf(IncrementalGraph(side).FileIndex.Optional) == @alignOf(u32));
|
|
comptime assert(@sizeOf(IncrementalGraph(side).FileIndex.Optional) == @sizeOf(u32));
|
|
return @ptrCast(&subslice[i.get()]);
|
|
}
|
|
};
|
|
|
|
/// Called at the end of BundleV2 to index bundle contents into the `IncrementalGraph`s
|
|
pub fn finalizeBundle(
|
|
dev: *DevServer,
|
|
bv2: *bun.bundle_v2.BundleV2,
|
|
result: bun.bundle_v2.BakeBundleOutput,
|
|
) !void {
|
|
const js_chunk = result.jsPseudoChunk();
|
|
const input_file_sources = bv2.graph.input_files.items(.source);
|
|
const import_records = bv2.graph.ast.items(.import_records);
|
|
const targets = bv2.graph.ast.items(.target);
|
|
const scbs = bv2.graph.server_component_boundaries.slice();
|
|
|
|
var sfa = std.heap.stackFallback(4096, bv2.graph.allocator);
|
|
const stack_alloc = sfa.get();
|
|
var scb_bitset = try bun.bit_set.DynamicBitSetUnmanaged.initEmpty(stack_alloc, input_file_sources.len);
|
|
for (
|
|
scbs.list.items(.source_index),
|
|
scbs.list.items(.ssr_source_index),
|
|
scbs.list.items(.reference_source_index),
|
|
) |source_index, ssr_index, ref_index| {
|
|
scb_bitset.set(source_index);
|
|
scb_bitset.set(ref_index);
|
|
if (ssr_index < scb_bitset.bit_length)
|
|
scb_bitset.set(ssr_index);
|
|
}
|
|
|
|
const resolved_index_cache = try bv2.graph.allocator.alloc(u32, input_file_sources.len * 2);
|
|
|
|
var ctx: bun.bake.DevServer.HotUpdateContext = .{
|
|
.import_records = import_records,
|
|
.sources = input_file_sources,
|
|
.scbs = scbs,
|
|
.server_to_client_bitset = scb_bitset,
|
|
.resolved_index_cache = resolved_index_cache,
|
|
.server_seen_bit_set = undefined,
|
|
};
|
|
|
|
// Pass 1, update the graph's nodes, resolving every bundler source
|
|
// index into its `IncrementalGraph(...).FileIndex`
|
|
for (
|
|
js_chunk.content.javascript.parts_in_chunk_in_order,
|
|
js_chunk.compile_results_for_chunk,
|
|
) |part_range, compile_result| {
|
|
const index = part_range.source_index;
|
|
switch (targets[part_range.source_index.get()].bakeGraph()) {
|
|
.server => try dev.server_graph.receiveChunk(&ctx, index, compile_result.code(), .js, false),
|
|
.ssr => try dev.server_graph.receiveChunk(&ctx, index, compile_result.code(), .js, true),
|
|
.client => try dev.client_graph.receiveChunk(&ctx, index, compile_result.code(), .js, false),
|
|
}
|
|
}
|
|
for (result.cssChunks(), result.css_file_list.metas) |*chunk, metadata| {
|
|
const index = bun.JSAst.Index.init(chunk.entry_point.source_index);
|
|
|
|
const code = try chunk.intermediate_output.code(
|
|
dev.allocator,
|
|
&bv2.graph,
|
|
&bv2.linker.graph,
|
|
"/_bun/TODO-import-prefix-where-is-this-used?",
|
|
chunk,
|
|
result.chunks,
|
|
null,
|
|
false, // TODO: sourcemaps true
|
|
);
|
|
|
|
// Create an asset entry for this file.
|
|
const abs_path = ctx.sources[index.get()].path.text;
|
|
// Later code needs to retrieve the CSS content
|
|
// The hack is to use `entry_point_id`, which is otherwise unused, to store an index.
|
|
chunk.entry_point.entry_point_id = try dev.insertOrUpdateCssAsset(abs_path, code.buffer);
|
|
|
|
try dev.client_graph.receiveChunk(&ctx, index, "", .css, false);
|
|
|
|
// If imported on server, there needs to be a server-side file entry
|
|
// so that edges can be attached. When a file is only imported on
|
|
// the server, this file is used to trace the CSS to the route.
|
|
if (metadata.imported_on_server) {
|
|
try dev.server_graph.insertCssFileOnServer(
|
|
&ctx,
|
|
index,
|
|
abs_path,
|
|
);
|
|
}
|
|
}
|
|
|
|
dev.client_graph.affected_by_trace = try DynamicBitSetUnmanaged.initEmpty(bv2.graph.allocator, dev.client_graph.bundled_files.count());
|
|
defer dev.client_graph.affected_by_trace = .{};
|
|
dev.server_graph.affected_by_trace = try DynamicBitSetUnmanaged.initEmpty(bv2.graph.allocator, dev.server_graph.bundled_files.count());
|
|
defer dev.client_graph.affected_by_trace = .{};
|
|
|
|
ctx.server_seen_bit_set = try bun.bit_set.DynamicBitSetUnmanaged.initEmpty(bv2.graph.allocator, dev.server_graph.bundled_files.count());
|
|
|
|
// Pass 2, update the graph's edges by performing import diffing on each
|
|
// changed file, removing dependencies. This pass also flags what routes
|
|
// have been modified.
|
|
for (js_chunk.content.javascript.parts_in_chunk_in_order) |part_range| {
|
|
switch (targets[part_range.source_index.get()].bakeGraph()) {
|
|
.server, .ssr => try dev.server_graph.processChunkDependencies(&ctx, part_range.source_index, bv2.graph.allocator),
|
|
.client => try dev.client_graph.processChunkDependencies(&ctx, part_range.source_index, bv2.graph.allocator),
|
|
}
|
|
}
|
|
for (result.cssChunks(), result.css_file_list.metas) |*chunk, metadata| {
|
|
const index = bun.JSAst.Index.init(chunk.entry_point.source_index);
|
|
// TODO: index css deps
|
|
_ = index; // autofix
|
|
_ = metadata; // autofix
|
|
}
|
|
|
|
// Index all failed files now that the incremental graph has been updated.
|
|
try dev.indexFailures();
|
|
}
|
|
|
|
fn insertOrUpdateCssAsset(dev: *DevServer, abs_path: []const u8, code: []const u8) !u31 {
|
|
const path_hash = bun.hash(abs_path);
|
|
const gop = try dev.css_files.getOrPut(dev.allocator, path_hash);
|
|
if (gop.found_existing) {
|
|
dev.allocator.free(gop.value_ptr.*);
|
|
}
|
|
gop.value_ptr.* = code;
|
|
return @intCast(gop.index);
|
|
}
|
|
|
|
pub fn handleParseTaskFailure(
|
|
dev: *DevServer,
|
|
graph: bake.Graph,
|
|
abs_path: []const u8,
|
|
log: *Log,
|
|
) bun.OOM!void {
|
|
// Print each error only once
|
|
Output.prettyErrorln("<red><b>Errors while bundling '{s}':<r>", .{
|
|
dev.relativePath(abs_path),
|
|
});
|
|
Output.flush();
|
|
log.print(Output.errorWriter()) catch {};
|
|
|
|
return switch (graph) {
|
|
.server => dev.server_graph.insertFailure(abs_path, log, false),
|
|
.ssr => dev.server_graph.insertFailure(abs_path, log, true),
|
|
.client => dev.client_graph.insertFailure(abs_path, log, false),
|
|
};
|
|
}
|
|
|
|
const CacheEntry = struct {
|
|
kind: FileKind,
|
|
};
|
|
|
|
pub fn isFileCached(dev: *DevServer, path: []const u8, side: bake.Graph) ?CacheEntry {
|
|
switch (side) {
|
|
inline else => |side_comptime| {
|
|
const g = switch (side_comptime) {
|
|
.client => &dev.client_graph,
|
|
.server => &dev.server_graph,
|
|
.ssr => &dev.server_graph,
|
|
};
|
|
const index = g.bundled_files.getIndex(path) orelse
|
|
return null; // non-existent files are considered stale
|
|
if (!g.stale_files.isSet(index)) {
|
|
return .{ .kind = g.bundled_files.values()[index].fileKind() };
|
|
}
|
|
return null;
|
|
},
|
|
}
|
|
}
|
|
|
|
fn appendOpaqueEntryPoint(
|
|
dev: *DevServer,
|
|
file_names: [][]const u8,
|
|
entry_points: *std.ArrayList(BakeEntryPoint),
|
|
comptime side: bake.Side,
|
|
optional_id: anytype,
|
|
) !void {
|
|
const file = switch (@TypeOf(optional_id)) {
|
|
OpaqueFileId.Optional => optional_id.unwrap() orelse return,
|
|
OpaqueFileId => optional_id,
|
|
else => @compileError("invalid type here"),
|
|
};
|
|
|
|
const file_index = fromOpaqueFileId(side, file);
|
|
if (switch (side) {
|
|
.server => dev.server_graph.stale_files.isSet(file_index.get()),
|
|
.client => dev.client_graph.stale_files.isSet(file_index.get()),
|
|
}) {
|
|
try entry_points.append(.{
|
|
.path = file_names[file_index.get()],
|
|
.graph = switch (side) {
|
|
.server => .server,
|
|
.client => .client,
|
|
},
|
|
});
|
|
}
|
|
}
|
|
|
|
pub fn routeBundlePtr(dev: *DevServer, idx: RouteBundle.Index) *RouteBundle {
|
|
return &dev.route_bundles.items[idx.get()];
|
|
}
|
|
|
|
fn onRequest(dev: *DevServer, req: *Request, resp: *Response) void {
|
|
var params: FrameworkRouter.MatchedParams = undefined;
|
|
if (dev.router.matchSlow(req.url(), ¶ms)) |route_index| {
|
|
dev.ensureRouteIsBundled(route_index, .server_handler, req, resp) catch bun.outOfMemory();
|
|
return;
|
|
}
|
|
|
|
sendBuiltInNotFound(resp);
|
|
}
|
|
|
|
fn insertRouteBundle(dev: *DevServer, route: Route.Index) !RouteBundle.Index {
|
|
const full_pattern = full_pattern: {
|
|
var buf = bake.PatternBuffer.empty;
|
|
var current: *Route = dev.router.routePtr(route);
|
|
while (true) {
|
|
buf.prependPart(current.part);
|
|
current = dev.router.routePtr(current.parent.unwrap() orelse break);
|
|
}
|
|
break :full_pattern try dev.allocator.dupe(u8, buf.slice());
|
|
};
|
|
errdefer dev.allocator.free(full_pattern);
|
|
|
|
try dev.route_bundles.append(dev.allocator, .{
|
|
.route = route,
|
|
.server_state = .unqueued,
|
|
.full_pattern = full_pattern,
|
|
.client_bundle = null,
|
|
.evaluate_failure = null,
|
|
.cached_module_list = .{},
|
|
.cached_client_bundle_url = .{},
|
|
.cached_css_file_array = .{},
|
|
});
|
|
const bundle_index = RouteBundle.Index.init(@intCast(dev.route_bundles.items.len - 1));
|
|
dev.router.routePtr(route).bundle = bundle_index.toOptional();
|
|
return bundle_index;
|
|
}
|
|
|
|
fn sendTextFile(code: []const u8, content_type: []const u8, resp: *Response) void {
|
|
if (code.len == 0) {
|
|
resp.writeStatus("202 No Content");
|
|
resp.writeHeaderInt("Content-Length", 0);
|
|
resp.end("", true);
|
|
return;
|
|
}
|
|
|
|
resp.writeStatus("200 OK");
|
|
resp.writeHeader("Content-Type", content_type);
|
|
resp.end(code, true); // TODO: You should never call res.end(huge buffer)
|
|
}
|
|
|
|
const ErrorPageKind = enum {
|
|
/// Modules failed to bundle
|
|
bundler,
|
|
/// Modules failed to evaluate
|
|
evaluation,
|
|
/// Request handler threw
|
|
runtime,
|
|
};
|
|
|
|
fn sendSerializedFailures(
|
|
dev: *DevServer,
|
|
resp: *Response,
|
|
failures: []const SerializedFailure,
|
|
kind: ErrorPageKind,
|
|
) void {
|
|
resp.writeStatus("500 Internal Server Error");
|
|
resp.writeHeader("Content-Type", MimeType.html.value);
|
|
|
|
// TODO: what to do about return values here?
|
|
_ = resp.write(switch (kind) {
|
|
inline else => |k| std.fmt.comptimePrint(
|
|
\\<!doctype html>
|
|
\\<html lang="en">
|
|
\\<head>
|
|
\\<meta charset="UTF-8" />
|
|
\\<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
|
\\<title>Bun - {[page_title]s}</title>
|
|
\\<style>:root{{color-scheme:light dark}}body{{background:light-dark(white,black)}}</style>
|
|
\\</head>
|
|
\\<body>
|
|
\\<noscript><p style="font:24px sans-serif;">Bun requires JavaScript enabled in the browser to receive hot reloading events.</p></noscript>
|
|
\\<script>let error=Uint8Array.from(atob("
|
|
,
|
|
.{ .page_title = switch (k) {
|
|
.bundler => "Bundling Error",
|
|
.evaluation, .runtime => "Runtime Error",
|
|
} },
|
|
),
|
|
});
|
|
|
|
var sfb = std.heap.stackFallback(65536, dev.allocator);
|
|
var arena_state = std.heap.ArenaAllocator.init(sfb.get());
|
|
defer arena_state.deinit();
|
|
|
|
for (failures) |fail| {
|
|
// TODO: make this entirely use stack memory.
|
|
const len = bun.base64.encodeLen(fail.data);
|
|
const buf = arena_state.allocator().alloc(u8, len) catch bun.outOfMemory();
|
|
const encoded = buf[0..bun.base64.encode(buf, fail.data)];
|
|
_ = resp.write(encoded);
|
|
|
|
_ = arena_state.reset(.retain_capacity);
|
|
}
|
|
|
|
const pre = "\"),c=>c.charCodeAt(0));";
|
|
const post = "</script></body></html>";
|
|
|
|
if (Environment.codegen_embed) {
|
|
_ = resp.end(pre ++ @embedFile("bake-codegen/bake.error.js") ++ post, false);
|
|
} else {
|
|
_ = resp.write(pre);
|
|
_ = resp.write(bun.runtimeEmbedFile(.codegen_eager, "bake.error.js"));
|
|
_ = resp.end(post, false);
|
|
}
|
|
}
|
|
|
|
fn sendBuiltInNotFound(resp: *Response) void {
|
|
const message = "404 Not Found";
|
|
resp.writeStatus("404 Not Found");
|
|
resp.end(message, true);
|
|
}
|
|
|
|
fn sendStubErrorMessage(dev: *DevServer, route: *RouteBundle, resp: *Response, err: JSValue) void {
|
|
var sfb = std.heap.stackFallback(65536, dev.allocator);
|
|
var a = std.ArrayList(u8).initCapacity(sfb.get(), 65536) catch bun.outOfMemory();
|
|
|
|
a.writer().print("Server route handler for '{s}' threw while loading\n\n", .{
|
|
route.pattern,
|
|
}) catch bun.outOfMemory();
|
|
route.dev.vm.printErrorLikeObjectSimple(err, a.writer(), false);
|
|
|
|
resp.writeStatus("500 Internal Server Error");
|
|
resp.end(a.items, true); // TODO: "You should never call res.end(huge buffer)"
|
|
}
|
|
|
|
const FileKind = enum(u2) {
|
|
/// Files that failed to bundle or do not exist on disk will appear in the
|
|
/// graph as "unknown".
|
|
unknown,
|
|
js,
|
|
css,
|
|
asset,
|
|
};
|
|
|
|
/// The paradigm of Bake's incremental state is to store a separate list of files
|
|
/// than the Graph in bundle_v2. When watch events happen, the bundler is run on
|
|
/// the changed files, excluding non-stale files via `isFileStale`.
|
|
///
|
|
/// Upon bundle completion, both `client_graph` and `server_graph` have their
|
|
/// `receiveChunk` methods called with all new chunks, counting the total length
|
|
/// needed. A call to `takeBundle` joins all of the chunks, resulting in the
|
|
/// code to send to client or evaluate on the server.
|
|
///
|
|
/// Then, `processChunkDependencies` is called on each chunk to update the
|
|
/// list of imports. When a change in imports is detected, the dependencies
|
|
/// are updated accordingly.
|
|
///
|
|
/// Since all routes share the two graphs, bundling a new route that shared
|
|
/// a module from a previously bundled route will perform the same exclusion
|
|
/// behavior that rebuilds use. This also ensures that two routes on the server
|
|
/// do not emit duplicate dependencies. By tracing `imports` on each file in
|
|
/// the module graph recursively, the full bundle for any given route can
|
|
/// be re-materialized (required when pressing Cmd+R after any client update)
|
|
pub fn IncrementalGraph(side: bake.Side) type {
|
|
return struct {
|
|
// Unless otherwise mentioned, all data structures use DevServer's allocator.
|
|
|
|
/// Key contents are owned by `default_allocator`
|
|
bundled_files: bun.StringArrayHashMapUnmanaged(File),
|
|
/// Track bools for files which are "stale", meaning they should be
|
|
/// re-bundled before being used. Resizing this is usually deferred
|
|
/// until after a bundle, since resizing the bit-set requires an
|
|
/// exact size, instead of the log approach that dynamic arrays use.
|
|
stale_files: DynamicBitSetUnmanaged,
|
|
|
|
/// Start of the 'dependencies' linked list. These are the other files
|
|
/// that import used by this file. Walk this list to discover what
|
|
/// files are to be reloaded when something changes.
|
|
first_dep: ArrayListUnmanaged(EdgeIndex.Optional),
|
|
/// Start of the 'imports' linked list. These are the files that this
|
|
/// file imports.
|
|
first_import: ArrayListUnmanaged(EdgeIndex.Optional),
|
|
/// `File` objects act as nodes in a directional many-to-many graph,
|
|
/// where edges represent the imports between modules. An 'dependency'
|
|
/// is a file that must to be notified when it `imported` changes. This
|
|
/// is implemented using an array of `Edge` objects that act as linked
|
|
/// list nodes; each file stores the first imports and dependency.
|
|
edges: ArrayListUnmanaged(Edge),
|
|
/// HMR Dependencies are added and removed very frequently, but indexes
|
|
/// must remain stable. This free list allows re-use of freed indexes,
|
|
/// so garbage collection can run less often.
|
|
edges_free_list: ArrayListUnmanaged(EdgeIndex),
|
|
|
|
// TODO: delete
|
|
/// Used during an incremental update to determine what "HMR roots"
|
|
/// are affected. Set for all `bundled_files` that have been visited
|
|
/// by the dependency tracing logic.
|
|
///
|
|
/// Outside of an incremental bundle, this is empty.
|
|
/// Backed by the bundler thread's arena allocator.
|
|
affected_by_trace: DynamicBitSetUnmanaged,
|
|
|
|
/// Byte length of every file queued for concatenation
|
|
current_chunk_len: usize = 0,
|
|
/// All part contents
|
|
current_chunk_parts: ArrayListUnmanaged(switch (side) {
|
|
.client => FileIndex,
|
|
// These slices do not outlive the bundler, and must
|
|
// be joined before its arena is deinitialized.
|
|
.server => []const u8,
|
|
}),
|
|
|
|
current_css_files: switch (side) {
|
|
.client => ArrayListUnmanaged([]const u8),
|
|
.server => void,
|
|
},
|
|
|
|
const empty: @This() = .{
|
|
.bundled_files = .{},
|
|
.stale_files = .{},
|
|
|
|
.first_dep = .{},
|
|
.first_import = .{},
|
|
.edges = .{},
|
|
.edges_free_list = .{},
|
|
|
|
.affected_by_trace = .{},
|
|
|
|
.current_chunk_len = 0,
|
|
.current_chunk_parts = .{},
|
|
|
|
.current_css_files = switch (side) {
|
|
.client => .{},
|
|
.server => {},
|
|
},
|
|
};
|
|
|
|
pub const File = switch (side) {
|
|
// The server's incremental graph does not store previously bundled
|
|
// code because there is only one instance of the server. Instead,
|
|
// it stores which module graphs it is a part of. This makes sure
|
|
// that recompilation knows what bundler options to use.
|
|
.server => struct { // TODO: make this packed(u8), i had compiler crashes before
|
|
/// Is this file built for the Server graph.
|
|
is_rsc: bool,
|
|
/// Is this file built for the SSR graph.
|
|
is_ssr: bool,
|
|
/// If set, the client graph contains a matching file.
|
|
/// The server
|
|
is_client_component_boundary: bool,
|
|
/// If this file is a route root, the route can be looked up in
|
|
/// the route list. This also stops dependency propagation.
|
|
is_route: bool,
|
|
/// If the file has an error, the failure can be looked up
|
|
/// in the `.failures` map.
|
|
failed: bool,
|
|
/// CSS and Asset files get special handling
|
|
kind: FileKind,
|
|
|
|
fn stopsDependencyTrace(file: @This()) bool {
|
|
return file.is_client_component_boundary;
|
|
}
|
|
|
|
fn fileKind(file: @This()) FileKind {
|
|
return file.kind;
|
|
}
|
|
},
|
|
.client => struct {
|
|
/// Allocated by default_allocator. Access with `.code()`
|
|
code_ptr: [*]const u8,
|
|
/// Separated from the pointer to reduce struct size.
|
|
/// Parser does not support files >4gb anyways.
|
|
code_len: u32,
|
|
flags: Flags,
|
|
|
|
const Flags = struct {
|
|
/// If the file has an error, the failure can be looked up
|
|
/// in the `.failures` map.
|
|
failed: bool,
|
|
/// For JS files, this is a component root; the server contains a matching file.
|
|
/// For CSS files, this is also marked on the stylesheet that is imported from JS.
|
|
is_hmr_root: bool,
|
|
/// This is a file is an entry point to the framework.
|
|
/// Changing this will always cause a full page reload.
|
|
is_special_framework_file: bool,
|
|
/// CSS and Asset files get special handling
|
|
kind: FileKind,
|
|
};
|
|
|
|
comptime {
|
|
assert(@sizeOf(@This()) == @sizeOf(usize) * 2);
|
|
assert(@alignOf(@This()) == @alignOf([*]u8));
|
|
}
|
|
|
|
fn init(code_slice: []const u8, flags: Flags) @This() {
|
|
return .{
|
|
.code_ptr = code_slice.ptr,
|
|
.code_len = @intCast(code_slice.len),
|
|
.flags = flags,
|
|
};
|
|
}
|
|
|
|
fn code(file: @This()) []const u8 {
|
|
return file.code_ptr[0..file.code_len];
|
|
}
|
|
|
|
inline fn stopsDependencyTrace(_: @This()) bool {
|
|
return false;
|
|
}
|
|
|
|
fn fileKind(file: @This()) FileKind {
|
|
return file.flags.kind;
|
|
}
|
|
},
|
|
};
|
|
|
|
// If this data structure is not clear, see `DirectoryWatchStore.Dep`
|
|
// for a simpler example. It is more complicated here because this
|
|
// structure is two-way.
|
|
pub const Edge = struct {
|
|
/// The file with the `import` statement
|
|
dependency: FileIndex,
|
|
/// The file that `dependency` is importing
|
|
imported: FileIndex,
|
|
|
|
next_import: EdgeIndex.Optional,
|
|
next_dependency: EdgeIndex.Optional,
|
|
prev_dependency: EdgeIndex.Optional,
|
|
};
|
|
|
|
/// An index into `bundled_files`, `stale_files`, `first_dep`, `first_import`, or `affected_by_trace`
|
|
/// Top bits cannot be relied on due to `SerializedFailure.Owner.Packed`
|
|
pub const FileIndex = bun.GenericIndex(u30, File);
|
|
pub const react_refresh_index = if (side == .client) FileIndex.init(0);
|
|
|
|
/// An index into `edges`
|
|
const EdgeIndex = bun.GenericIndex(u32, Edge);
|
|
|
|
fn getFileIndex(g: *@This(), path: []const u8) ?FileIndex {
|
|
return if (g.bundled_files.getIndex(path)) |i| FileIndex.init(@intCast(i)) else null;
|
|
}
|
|
|
|
/// Tracks a bundled code chunk for cross-bundle chunks,
|
|
/// ensuring it has an entry in `bundled_files`.
|
|
///
|
|
/// For client, takes ownership of the code slice (must be default allocated)
|
|
///
|
|
/// For server, the code is temporarily kept in the
|
|
/// `current_chunk_parts` array, where it must live until
|
|
/// takeBundle is called. Then it can be freed.
|
|
pub fn receiveChunk(
|
|
g: *@This(),
|
|
ctx: *HotUpdateContext,
|
|
index: bun.JSAst.Index,
|
|
code: []const u8,
|
|
kind: FileKind,
|
|
is_ssr_graph: bool,
|
|
) !void {
|
|
const dev = g.owner();
|
|
dev.graph_safety_lock.assertLocked();
|
|
|
|
const abs_path = ctx.sources[index.get()].path.text;
|
|
|
|
if (Environment.allow_assert) {
|
|
switch (kind) {
|
|
.css => bun.assert(code.len == 0),
|
|
.js => if (bun.strings.isAllWhitespace(code)) {
|
|
// Should at least contain the function wrapper
|
|
bun.Output.panic("Empty chunk is impossible: {s} {s}", .{
|
|
abs_path,
|
|
switch (side) {
|
|
.client => "client",
|
|
.server => if (is_ssr_graph) "ssr" else "server",
|
|
},
|
|
});
|
|
},
|
|
else => Output.panic("unexpected file kind: .{s}", .{@tagName(kind)}),
|
|
}
|
|
}
|
|
|
|
g.current_chunk_len += code.len;
|
|
|
|
// Dump to filesystem if enabled
|
|
if (bun.FeatureFlags.bake_debugging_features) if (dev.dump_dir) |dump_dir| {
|
|
const cwd = dev.root;
|
|
var a: bun.PathBuffer = undefined;
|
|
var b: [bun.MAX_PATH_BYTES * 2]u8 = undefined;
|
|
const rel_path = bun.path.relativeBufZ(&a, cwd, abs_path);
|
|
const size = std.mem.replacementSize(u8, rel_path, "../", "_.._/");
|
|
_ = std.mem.replace(u8, rel_path, "../", "_.._/", &b);
|
|
const rel_path_escaped = b[0..size];
|
|
dumpBundle(dump_dir, switch (side) {
|
|
.client => .client,
|
|
.server => if (is_ssr_graph) .ssr else .server,
|
|
}, rel_path_escaped, code, true) catch |err| {
|
|
bun.handleErrorReturnTrace(err, @errorReturnTrace());
|
|
Output.warn("Could not dump bundle: {}", .{err});
|
|
};
|
|
};
|
|
|
|
const gop = try g.bundled_files.getOrPut(dev.allocator, abs_path);
|
|
const file_index = FileIndex.init(@intCast(gop.index));
|
|
|
|
if (!gop.found_existing) {
|
|
gop.key_ptr.* = try bun.default_allocator.dupe(u8, abs_path);
|
|
try g.first_dep.append(dev.allocator, .none);
|
|
try g.first_import.append(dev.allocator, .none);
|
|
}
|
|
|
|
if (g.stale_files.bit_length > gop.index) {
|
|
g.stale_files.unset(gop.index);
|
|
}
|
|
|
|
ctx.getCachedIndex(side, index).* = FileIndex.init(@intCast(gop.index));
|
|
|
|
switch (side) {
|
|
.client => {
|
|
if (gop.found_existing) {
|
|
if (kind == .js)
|
|
bun.default_allocator.free(gop.value_ptr.code());
|
|
|
|
if (gop.value_ptr.flags.failed) {
|
|
const kv = dev.bundling_failures.fetchSwapRemoveAdapted(
|
|
SerializedFailure.Owner{ .client = file_index },
|
|
SerializedFailure.ArrayHashAdapter{},
|
|
) orelse
|
|
Output.panic("Missing SerializedFailure in IncrementalGraph", .{});
|
|
try dev.incremental_result.failures_removed.append(
|
|
dev.allocator,
|
|
kv.key,
|
|
);
|
|
}
|
|
}
|
|
const flags: File.Flags = .{
|
|
.failed = false,
|
|
.is_hmr_root = ctx.server_to_client_bitset.isSet(index.get()),
|
|
.is_special_framework_file = false,
|
|
.kind = kind,
|
|
};
|
|
if (kind == .css) {
|
|
if (!gop.found_existing or gop.value_ptr.code_len == 0) {
|
|
gop.value_ptr.* = File.init(try std.fmt.allocPrint(
|
|
dev.allocator,
|
|
css_prefix ++ "/{}.css",
|
|
.{std.fmt.fmtSliceHexLower(std.mem.asBytes(&bun.hash(abs_path)))},
|
|
), flags);
|
|
} else {
|
|
// The key is just the file-path
|
|
gop.value_ptr.flags = flags;
|
|
}
|
|
} else {
|
|
gop.value_ptr.* = File.init(code, flags);
|
|
}
|
|
try g.current_chunk_parts.append(dev.allocator, file_index);
|
|
},
|
|
.server => {
|
|
if (!gop.found_existing) {
|
|
const client_component_boundary = ctx.server_to_client_bitset.isSet(index.get());
|
|
|
|
gop.value_ptr.* = .{
|
|
.is_rsc = !is_ssr_graph,
|
|
.is_ssr = is_ssr_graph,
|
|
.is_route = false,
|
|
.is_client_component_boundary = client_component_boundary,
|
|
.failed = false,
|
|
.kind = kind,
|
|
};
|
|
|
|
if (client_component_boundary) {
|
|
try dev.incremental_result.client_components_added.append(dev.allocator, file_index);
|
|
}
|
|
} else {
|
|
gop.value_ptr.kind = kind;
|
|
|
|
if (is_ssr_graph) {
|
|
gop.value_ptr.is_ssr = true;
|
|
} else {
|
|
gop.value_ptr.is_rsc = true;
|
|
}
|
|
|
|
if (ctx.server_to_client_bitset.isSet(index.get())) {
|
|
gop.value_ptr.is_client_component_boundary = true;
|
|
try dev.incremental_result.client_components_added.append(dev.allocator, file_index);
|
|
} else if (gop.value_ptr.is_client_component_boundary) {
|
|
const client_graph = &g.owner().client_graph;
|
|
const client_index = client_graph.getFileIndex(gop.key_ptr.*) orelse
|
|
Output.panic("Client graph's SCB was already deleted", .{});
|
|
try dev.incremental_result.delete_client_files_later.append(g.owner().allocator, client_index);
|
|
gop.value_ptr.is_client_component_boundary = false;
|
|
|
|
try dev.incremental_result.client_components_removed.append(dev.allocator, file_index);
|
|
}
|
|
|
|
if (gop.value_ptr.failed) {
|
|
gop.value_ptr.failed = false;
|
|
const kv = dev.bundling_failures.fetchSwapRemoveAdapted(
|
|
SerializedFailure.Owner{ .server = file_index },
|
|
SerializedFailure.ArrayHashAdapter{},
|
|
) orelse
|
|
Output.panic("Missing failure in IncrementalGraph", .{});
|
|
try dev.incremental_result.failures_removed.append(
|
|
dev.allocator,
|
|
kv.key,
|
|
);
|
|
}
|
|
}
|
|
try g.current_chunk_parts.append(dev.allocator, code);
|
|
},
|
|
}
|
|
}
|
|
|
|
const TempLookup = extern struct {
|
|
edge_index: EdgeIndex,
|
|
seen: bool,
|
|
|
|
const HashTable = AutoArrayHashMapUnmanaged(FileIndex, TempLookup);
|
|
};
|
|
|
|
/// Second pass of IncrementalGraph indexing
|
|
/// - Updates dependency information for each file
|
|
/// - Resolves what the HMR roots are
|
|
pub fn processChunkDependencies(
|
|
g: *@This(),
|
|
ctx: *HotUpdateContext,
|
|
bundle_graph_index: bun.JSAst.Index,
|
|
temp_alloc: Allocator,
|
|
) bun.OOM!void {
|
|
const log = bun.Output.scoped(.processChunkDependencies, false);
|
|
const file_index: FileIndex = ctx.getCachedIndex(side, bundle_graph_index).*;
|
|
log("index id={d} {}:", .{
|
|
file_index.get(),
|
|
bun.fmt.quote(g.bundled_files.keys()[file_index.get()]),
|
|
});
|
|
|
|
var quick_lookup: TempLookup.HashTable = .{};
|
|
defer quick_lookup.deinit(temp_alloc);
|
|
|
|
{
|
|
var it: ?EdgeIndex = g.first_import.items[file_index.get()].unwrap();
|
|
while (it) |edge_index| {
|
|
const dep = g.edges.items[edge_index.get()];
|
|
it = dep.next_import.unwrap();
|
|
assert(dep.dependency == file_index);
|
|
try quick_lookup.putNoClobber(temp_alloc, dep.imported, .{
|
|
.seen = false,
|
|
.edge_index = edge_index,
|
|
});
|
|
}
|
|
}
|
|
|
|
var new_imports: EdgeIndex.Optional = .none;
|
|
defer g.first_import.items[file_index.get()] = new_imports;
|
|
|
|
if (side == .server) {
|
|
if (ctx.server_seen_bit_set.isSet(file_index.get())) return;
|
|
|
|
const file = &g.bundled_files.values()[file_index.get()];
|
|
|
|
// Process both files in the server-components graph at the same
|
|
// time. If they were done separately, the second would detach
|
|
// the edges the first added.
|
|
if (file.is_rsc and file.is_ssr) {
|
|
// The non-ssr file is always first.
|
|
// const ssr_index = ctx.scbs.getSSRIndex(bundle_graph_index.get()) orelse {
|
|
// @panic("Unexpected missing server-component-boundary entry");
|
|
// };
|
|
// try g.processChunkImportRecords(ctx, &quick_lookup, &new_imports, file_index, bun.JSAst.Index.init(ssr_index));
|
|
}
|
|
}
|
|
|
|
try g.processChunkImportRecords(ctx, &quick_lookup, &new_imports, file_index, bundle_graph_index);
|
|
|
|
// '.seen = false' means an import was removed and should be freed
|
|
for (quick_lookup.values()) |val| {
|
|
if (!val.seen) {
|
|
// Unlink from dependency list. At this point the edge is
|
|
// already detached from the import list.
|
|
g.disconnectEdgeFromDependencyList(val.edge_index);
|
|
|
|
// With no references to this edge, it can be freed
|
|
g.freeEdge(val.edge_index);
|
|
}
|
|
}
|
|
|
|
if (side == .server) {
|
|
// Follow this file to the route to mark it as stale.
|
|
try g.traceDependencies(file_index, .stop_at_boundary);
|
|
} else {
|
|
// TODO: Follow this file to the HMR root (info to determine is currently not stored)
|
|
// without this, changing a client-only file will not mark the route's client bundle as stale
|
|
}
|
|
}
|
|
|
|
fn disconnectEdgeFromDependencyList(g: *@This(), edge_index: EdgeIndex) void {
|
|
const edge = &g.edges.items[edge_index.get()];
|
|
igLog("detach edge={d} | id={d} {} -> id={d} {}", .{
|
|
edge_index.get(),
|
|
edge.dependency.get(),
|
|
bun.fmt.quote(g.bundled_files.keys()[edge.dependency.get()]),
|
|
edge.imported.get(),
|
|
bun.fmt.quote(g.bundled_files.keys()[edge.imported.get()]),
|
|
});
|
|
if (edge.prev_dependency.unwrap()) |prev| {
|
|
const prev_dependency = &g.edges.items[prev.get()];
|
|
prev_dependency.next_dependency = edge.next_dependency;
|
|
} else {
|
|
assert(g.first_dep.items[edge.imported.get()].unwrap() == edge_index);
|
|
g.first_dep.items[edge.imported.get()] = .none;
|
|
}
|
|
if (edge.next_dependency.unwrap()) |next| {
|
|
const next_dependency = &g.edges.items[next.get()];
|
|
next_dependency.prev_dependency = edge.prev_dependency;
|
|
}
|
|
}
|
|
|
|
fn processChunkImportRecords(
|
|
g: *@This(),
|
|
ctx: *HotUpdateContext,
|
|
quick_lookup: *TempLookup.HashTable,
|
|
new_imports: *EdgeIndex.Optional,
|
|
file_index: FileIndex,
|
|
index: bun.JSAst.Index,
|
|
) !void {
|
|
const log = bun.Output.scoped(.processChunkDependencies, false);
|
|
for (ctx.import_records[index.get()].slice()) |import_record| {
|
|
if (!import_record.source_index.isRuntime()) try_index_record: {
|
|
const imported_file_index = if (import_record.source_index.isInvalid())
|
|
if (std.fs.path.isAbsolute(import_record.path.text))
|
|
FileIndex.init(@intCast(
|
|
g.bundled_files.getIndex(import_record.path.text) orelse break :try_index_record,
|
|
))
|
|
else
|
|
break :try_index_record
|
|
else
|
|
ctx.getCachedIndex(side, import_record.source_index).*;
|
|
|
|
if (quick_lookup.getPtr(imported_file_index)) |lookup| {
|
|
// If the edge has already been seen, it will be skipped
|
|
// to ensure duplicate edges never exist.
|
|
if (lookup.seen) continue;
|
|
lookup.seen = true;
|
|
|
|
const dep = &g.edges.items[lookup.edge_index.get()];
|
|
dep.next_import = new_imports.*;
|
|
new_imports.* = lookup.edge_index.toOptional();
|
|
} else {
|
|
// A new edge is needed to represent the dependency and import.
|
|
const first_dep = &g.first_dep.items[imported_file_index.get()];
|
|
const edge = try g.newEdge(.{
|
|
.next_import = new_imports.*,
|
|
.next_dependency = first_dep.*,
|
|
.prev_dependency = .none,
|
|
.imported = imported_file_index,
|
|
.dependency = file_index,
|
|
});
|
|
if (first_dep.*.unwrap()) |dep| {
|
|
g.edges.items[dep.get()].prev_dependency = edge.toOptional();
|
|
}
|
|
new_imports.* = edge.toOptional();
|
|
first_dep.* = edge.toOptional();
|
|
|
|
log("attach edge={d} | id={d} {} -> id={d} {}", .{
|
|
edge.get(),
|
|
file_index.get(),
|
|
bun.fmt.quote(g.bundled_files.keys()[file_index.get()]),
|
|
imported_file_index.get(),
|
|
bun.fmt.quote(g.bundled_files.keys()[imported_file_index.get()]),
|
|
});
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
const TraceDependencyKind = enum {
|
|
stop_at_boundary,
|
|
no_stop,
|
|
};
|
|
|
|
fn traceDependencies(g: *@This(), file_index: FileIndex, trace_kind: TraceDependencyKind) !void {
|
|
g.owner().graph_safety_lock.assertLocked();
|
|
|
|
if (Environment.enable_logs) {
|
|
igLog("traceDependencies(.{s}, {}{s})", .{
|
|
@tagName(side),
|
|
bun.fmt.quote(g.bundled_files.keys()[file_index.get()]),
|
|
if (g.affected_by_trace.isSet(file_index.get())) " [already visited]" else "",
|
|
});
|
|
}
|
|
|
|
if (g.affected_by_trace.isSet(file_index.get()))
|
|
return;
|
|
g.affected_by_trace.set(file_index.get());
|
|
|
|
const file = g.bundled_files.values()[file_index.get()];
|
|
|
|
switch (side) {
|
|
.server => {
|
|
const dev = g.owner();
|
|
if (file.is_route) {
|
|
const route_index = dev.route_lookup.get(file_index) orelse
|
|
Output.panic("Route not in lookup index: {d} {}", .{ file_index.get(), bun.fmt.quote(g.bundled_files.keys()[file_index.get()]) });
|
|
igLog("\\<- Route", .{});
|
|
|
|
try dev.incremental_result.routes_affected.append(dev.allocator, route_index);
|
|
}
|
|
if (file.is_client_component_boundary) {
|
|
try dev.incremental_result.client_components_affected.append(dev.allocator, file_index);
|
|
}
|
|
},
|
|
.client => {
|
|
if (file.flags.is_hmr_root) {
|
|
const dev = g.owner();
|
|
const key = g.bundled_files.keys()[file_index.get()];
|
|
const index = dev.server_graph.getFileIndex(key) orelse
|
|
Output.panic("Server Incremental Graph is missing component for {}", .{bun.fmt.quote(key)});
|
|
try dev.server_graph.traceDependencies(index, trace_kind);
|
|
}
|
|
},
|
|
}
|
|
|
|
// Certain files do not propagate updates to dependencies.
|
|
// This is how updating a client component doesn't cause
|
|
// a server-side reload.
|
|
if (trace_kind == .stop_at_boundary) {
|
|
if (file.stopsDependencyTrace()) {
|
|
igLog("\\<- this file stops propagation", .{});
|
|
return;
|
|
}
|
|
}
|
|
|
|
// Recurse
|
|
var it: ?EdgeIndex = g.first_dep.items[file_index.get()].unwrap();
|
|
while (it) |dep_index| {
|
|
const edge = g.edges.items[dep_index.get()];
|
|
it = edge.next_dependency.unwrap();
|
|
try g.traceDependencies(edge.dependency, trace_kind);
|
|
}
|
|
}
|
|
|
|
fn traceImports(g: *@This(), file_index: FileIndex, goal: TraceImportGoal) !void {
|
|
g.owner().graph_safety_lock.assertLocked();
|
|
|
|
if (Environment.enable_logs) {
|
|
igLog("traceImports(.{s}, {}{s})", .{
|
|
@tagName(side),
|
|
bun.fmt.quote(g.bundled_files.keys()[file_index.get()]),
|
|
if (g.affected_by_trace.isSet(file_index.get())) " [already visited]" else "",
|
|
});
|
|
}
|
|
|
|
if (g.affected_by_trace.isSet(file_index.get()))
|
|
return;
|
|
g.affected_by_trace.set(file_index.get());
|
|
|
|
const file = g.bundled_files.values()[file_index.get()];
|
|
|
|
switch (side) {
|
|
.server => {
|
|
if (file.is_client_component_boundary or file.kind == .css) {
|
|
const dev = g.owner();
|
|
const key = g.bundled_files.keys()[file_index.get()];
|
|
const index = dev.client_graph.getFileIndex(key) orelse
|
|
Output.panic("Client Incremental Graph is missing component for {}", .{bun.fmt.quote(key)});
|
|
try dev.client_graph.traceImports(index, goal);
|
|
}
|
|
},
|
|
.client => {
|
|
assert(!g.stale_files.isSet(file_index.get())); // should not be left stale
|
|
if (file.flags.kind == .css) {
|
|
if (goal.find_css) {
|
|
try g.current_css_files.append(g.owner().allocator, file.code());
|
|
}
|
|
|
|
// Do not count css files as a client module
|
|
// and also do not trace its dependencies.
|
|
//
|
|
// The server version of this code does not need to
|
|
// early return, since server css files never have
|
|
// imports.
|
|
return;
|
|
}
|
|
|
|
if (goal.find_client_modules) {
|
|
try g.current_chunk_parts.append(g.owner().allocator, file_index);
|
|
g.current_chunk_len += file.code_len;
|
|
}
|
|
},
|
|
}
|
|
|
|
// Recurse
|
|
var it: ?EdgeIndex = g.first_import.items[file_index.get()].unwrap();
|
|
while (it) |dep_index| {
|
|
const edge = g.edges.items[dep_index.get()];
|
|
it = edge.next_import.unwrap();
|
|
try g.traceImports(edge.imported, goal);
|
|
}
|
|
}
|
|
|
|
/// Never takes ownership of `abs_path`
|
|
/// Marks a chunk but without any content. Used to track dependencies to files that don't exist.
|
|
pub fn insertStale(g: *@This(), abs_path: []const u8, is_ssr_graph: bool) bun.OOM!FileIndex {
|
|
return g.insertStaleExtra(abs_path, is_ssr_graph, false);
|
|
}
|
|
|
|
pub fn insertStaleExtra(g: *@This(), abs_path: []const u8, is_ssr_graph: bool, is_route: bool) bun.OOM!FileIndex {
|
|
g.owner().graph_safety_lock.assertLocked();
|
|
|
|
debug.log("Insert stale: {s}", .{abs_path});
|
|
const gop = try g.bundled_files.getOrPut(g.owner().allocator, abs_path);
|
|
const file_index = FileIndex.init(@intCast(gop.index));
|
|
|
|
if (!gop.found_existing) {
|
|
gop.key_ptr.* = try bun.default_allocator.dupe(u8, abs_path);
|
|
try g.first_dep.append(g.owner().allocator, .none);
|
|
try g.first_import.append(g.owner().allocator, .none);
|
|
} else {
|
|
if (side == .server) {
|
|
if (is_route) gop.value_ptr.*.is_route = is_route;
|
|
}
|
|
}
|
|
|
|
if (g.stale_files.bit_length > gop.index) {
|
|
g.stale_files.set(gop.index);
|
|
}
|
|
|
|
switch (side) {
|
|
.client => {
|
|
gop.value_ptr.* = File.init("", .{
|
|
.failed = false,
|
|
.is_hmr_root = false,
|
|
.is_special_framework_file = false,
|
|
.kind = .unknown,
|
|
});
|
|
},
|
|
.server => {
|
|
if (!gop.found_existing) {
|
|
gop.value_ptr.* = .{
|
|
.is_rsc = !is_ssr_graph,
|
|
.is_ssr = is_ssr_graph,
|
|
.is_route = is_route,
|
|
.is_client_component_boundary = false,
|
|
.failed = false,
|
|
.kind = .unknown,
|
|
};
|
|
} else if (is_ssr_graph) {
|
|
gop.value_ptr.is_ssr = true;
|
|
} else {
|
|
gop.value_ptr.is_rsc = true;
|
|
}
|
|
},
|
|
}
|
|
|
|
return file_index;
|
|
}
|
|
|
|
/// Server CSS files are just used to be targets for graph traversal.
|
|
/// Its content lives only on the client.
|
|
pub fn insertCssFileOnServer(g: *@This(), ctx: *HotUpdateContext, index: bun.JSAst.Index, abs_path: []const u8) bun.OOM!void {
|
|
g.owner().graph_safety_lock.assertLocked();
|
|
|
|
debug.log("Insert stale: {s}", .{abs_path});
|
|
const gop = try g.bundled_files.getOrPut(g.owner().allocator, abs_path);
|
|
const file_index = FileIndex.init(@intCast(gop.index));
|
|
|
|
if (!gop.found_existing) {
|
|
gop.key_ptr.* = try bun.default_allocator.dupe(u8, abs_path);
|
|
try g.first_dep.append(g.owner().allocator, .none);
|
|
try g.first_import.append(g.owner().allocator, .none);
|
|
}
|
|
|
|
switch (side) {
|
|
.client => @compileError("not implemented: use receiveChunk"),
|
|
.server => {
|
|
gop.value_ptr.* = .{
|
|
.is_rsc = false,
|
|
.is_ssr = false,
|
|
.is_route = false,
|
|
.is_client_component_boundary = false,
|
|
.failed = false,
|
|
.kind = .css,
|
|
};
|
|
},
|
|
}
|
|
|
|
ctx.getCachedIndex(.server, index).* = file_index;
|
|
}
|
|
|
|
pub fn insertFailure(
|
|
g: *@This(),
|
|
abs_path: []const u8,
|
|
log: *const Log,
|
|
is_ssr_graph: bool,
|
|
) bun.OOM!void {
|
|
g.owner().graph_safety_lock.assertLocked();
|
|
|
|
debug.log("Insert stale: {s}", .{abs_path});
|
|
const gop = try g.bundled_files.getOrPut(g.owner().allocator, abs_path);
|
|
const file_index = FileIndex.init(@intCast(gop.index));
|
|
|
|
if (!gop.found_existing) {
|
|
gop.key_ptr.* = try bun.default_allocator.dupe(u8, abs_path);
|
|
try g.first_dep.append(g.owner().allocator, .none);
|
|
try g.first_import.append(g.owner().allocator, .none);
|
|
}
|
|
|
|
if (g.stale_files.bit_length > gop.index) {
|
|
g.stale_files.set(gop.index);
|
|
}
|
|
|
|
switch (side) {
|
|
.client => {
|
|
gop.value_ptr.* = File.init("", .{
|
|
.failed = true,
|
|
.is_hmr_root = false,
|
|
.is_special_framework_file = false,
|
|
.kind = .unknown,
|
|
});
|
|
},
|
|
.server => {
|
|
if (!gop.found_existing) {
|
|
gop.value_ptr.* = .{
|
|
.is_rsc = !is_ssr_graph,
|
|
.is_ssr = is_ssr_graph,
|
|
.is_route = false,
|
|
.is_client_component_boundary = false,
|
|
.failed = true,
|
|
.kind = .unknown,
|
|
};
|
|
} else {
|
|
if (is_ssr_graph) {
|
|
gop.value_ptr.is_ssr = true;
|
|
} else {
|
|
gop.value_ptr.is_rsc = true;
|
|
}
|
|
gop.value_ptr.failed = true;
|
|
}
|
|
},
|
|
}
|
|
|
|
const dev = g.owner();
|
|
|
|
const fail_owner: SerializedFailure.Owner = switch (side) {
|
|
.server => .{ .server = file_index },
|
|
.client => .{ .client = file_index },
|
|
};
|
|
const failure = try SerializedFailure.initFromLog(
|
|
fail_owner,
|
|
dev.relativePath(abs_path),
|
|
log.msgs.items,
|
|
);
|
|
const fail_gop = try dev.bundling_failures.getOrPut(dev.allocator, failure);
|
|
try dev.incremental_result.failures_added.append(dev.allocator, failure);
|
|
if (fail_gop.found_existing) {
|
|
try dev.incremental_result.failures_removed.append(dev.allocator, fail_gop.key_ptr.*);
|
|
fail_gop.key_ptr.* = failure;
|
|
}
|
|
}
|
|
|
|
pub fn ensureStaleBitCapacity(g: *@This(), are_new_files_stale: bool) !void {
|
|
try g.stale_files.resize(
|
|
g.owner().allocator,
|
|
std.mem.alignForward(
|
|
usize,
|
|
@max(g.bundled_files.count(), g.stale_files.bit_length),
|
|
// allocate 8 in 8 usize chunks
|
|
std.mem.byte_size_in_bits * @sizeOf(usize) * 8,
|
|
),
|
|
are_new_files_stale,
|
|
);
|
|
}
|
|
|
|
pub fn invalidate(g: *@This(), paths: []const []const u8, out_paths: *std.ArrayList(BakeEntryPoint)) !void {
|
|
g.owner().graph_safety_lock.assertLocked();
|
|
const values = g.bundled_files.values();
|
|
for (paths) |path| {
|
|
const index = g.bundled_files.getIndex(path) orelse {
|
|
// cannot enqueue because we don't know what targets to
|
|
// bundle for. instead, a failing bundle must retrieve the
|
|
// list of files and add them as stale.
|
|
continue;
|
|
};
|
|
g.stale_files.set(index);
|
|
const data = &values[index];
|
|
switch (side) {
|
|
.client => {
|
|
// When re-bundling SCBs, only bundle the server. Otherwise
|
|
// the bundler gets confused and bundles both sides without
|
|
// knowledge of the boundary between them.
|
|
if (data.flags.kind == .css)
|
|
try out_paths.append(BakeEntryPoint.initCss(path))
|
|
else if (!data.flags.is_hmr_root)
|
|
try out_paths.append(BakeEntryPoint.init(path, .client));
|
|
},
|
|
.server => {
|
|
if (data.is_rsc)
|
|
try out_paths.append(BakeEntryPoint.init(path, .server));
|
|
if (data.is_ssr and !data.is_client_component_boundary)
|
|
try out_paths.append(BakeEntryPoint.init(path, .ssr));
|
|
},
|
|
}
|
|
}
|
|
}
|
|
|
|
fn reset(g: *@This()) void {
|
|
g.current_chunk_len = 0;
|
|
g.current_chunk_parts.clearRetainingCapacity();
|
|
if (side == .client) g.current_css_files.clearRetainingCapacity();
|
|
}
|
|
|
|
pub fn takeBundle(
|
|
g: *@This(),
|
|
kind: ChunkKind,
|
|
initial_response_entry_point: []const u8,
|
|
) ![]const u8 {
|
|
var chunk = std.ArrayList(u8).init(g.owner().allocator);
|
|
try g.takeBundleToList(kind, &chunk, initial_response_entry_point);
|
|
bun.assert(chunk.items.len == chunk.capacity);
|
|
return chunk.items;
|
|
}
|
|
|
|
pub fn takeBundleToList(
|
|
g: *@This(),
|
|
kind: ChunkKind,
|
|
list: *std.ArrayList(u8),
|
|
initial_response_entry_point: []const u8,
|
|
) !void {
|
|
g.owner().graph_safety_lock.assertLocked();
|
|
// initial bundle needs at least the entry point
|
|
// hot updates shouldn't be emitted if there are no chunks
|
|
assert(g.current_chunk_len > 0);
|
|
|
|
const runtime = switch (kind) {
|
|
.initial_response => bun.bake.getHmrRuntime(side),
|
|
.hmr_chunk => "({\n",
|
|
};
|
|
|
|
// A small amount of metadata is present at the end of the chunk
|
|
// to inform the HMR runtime some crucial entry-point info. The
|
|
// exact upper bound of this can be calculated, but is not to
|
|
// avoid worrying about windows paths.
|
|
var end_sfa = std.heap.stackFallback(65536, g.owner().allocator);
|
|
var end_list = std.ArrayList(u8).initCapacity(end_sfa.get(), 65536) catch unreachable;
|
|
defer end_list.deinit();
|
|
const end = end: {
|
|
const w = end_list.writer();
|
|
switch (kind) {
|
|
.initial_response => {
|
|
const fw = g.owner().framework;
|
|
try w.writeAll("}, {\n main: ");
|
|
try bun.js_printer.writeJSONString(
|
|
g.owner().relativePath(initial_response_entry_point),
|
|
@TypeOf(w),
|
|
w,
|
|
.utf8,
|
|
);
|
|
switch (side) {
|
|
.client => {
|
|
try w.writeAll(",\n version: \"");
|
|
try w.writeAll(&g.owner().configuration_hash_key);
|
|
try w.writeAll("\"");
|
|
if (fw.react_fast_refresh) |rfr| {
|
|
try w.writeAll(",\n refresh: ");
|
|
try bun.js_printer.writeJSONString(
|
|
g.owner().relativePath(rfr.import_source),
|
|
@TypeOf(w),
|
|
w,
|
|
.utf8,
|
|
);
|
|
}
|
|
},
|
|
.server => {
|
|
if (fw.server_components) |sc| {
|
|
if (sc.separate_ssr_graph) {
|
|
try w.writeAll(",\n separateSSRGraph: true");
|
|
}
|
|
}
|
|
},
|
|
}
|
|
try w.writeAll("\n})");
|
|
},
|
|
.hmr_chunk => {
|
|
try w.writeAll("\n})");
|
|
},
|
|
}
|
|
break :end end_list.items;
|
|
};
|
|
|
|
const files = g.bundled_files.values();
|
|
|
|
const start = list.items.len;
|
|
if (start == 0)
|
|
try list.ensureTotalCapacityPrecise(g.current_chunk_len + runtime.len + end.len)
|
|
else
|
|
try list.ensureUnusedCapacity(g.current_chunk_len + runtime.len + end.len);
|
|
|
|
list.appendSliceAssumeCapacity(runtime);
|
|
for (g.current_chunk_parts.items) |entry| {
|
|
list.appendSliceAssumeCapacity(switch (side) {
|
|
// entry is an index into files
|
|
.client => files[entry.get()].code(),
|
|
// entry is the '[]const u8' itself
|
|
.server => entry,
|
|
});
|
|
}
|
|
list.appendSliceAssumeCapacity(end);
|
|
|
|
if (bun.FeatureFlags.bake_debugging_features) if (g.owner().dump_dir) |dump_dir| {
|
|
const rel_path_escaped = "latest_chunk.js";
|
|
dumpBundle(dump_dir, switch (side) {
|
|
.client => .client,
|
|
.server => .server,
|
|
}, rel_path_escaped, list.items[start..], false) catch |err| {
|
|
bun.handleErrorReturnTrace(err, @errorReturnTrace());
|
|
Output.warn("Could not dump bundle: {}", .{err});
|
|
};
|
|
};
|
|
}
|
|
|
|
fn disconnectAndDeleteFile(g: *@This(), file_index: FileIndex) void {
|
|
const last = FileIndex.init(@intCast(g.bundled_files.count() - 1));
|
|
|
|
bun.assert(g.bundled_files.count() > 1); // never remove all files
|
|
bun.assert(g.first_dep.items[file_index.get()] == .none); // must have no dependencies
|
|
|
|
// Disconnect all imports
|
|
{
|
|
var it: ?EdgeIndex = g.first_import.items[file_index.get()].unwrap();
|
|
while (it) |edge_index| {
|
|
const dep = g.edges.items[edge_index.get()];
|
|
it = dep.next_import.unwrap();
|
|
assert(dep.dependency == file_index);
|
|
|
|
g.disconnectEdgeFromDependencyList(edge_index);
|
|
g.freeEdge(edge_index);
|
|
}
|
|
}
|
|
|
|
// TODO: it is infeasible to do this since FrameworkRouter contains file indices
|
|
// to the server graph
|
|
{
|
|
return;
|
|
}
|
|
|
|
g.bundled_files.swapRemoveAt(file_index.get());
|
|
|
|
// Move out-of-line data from `last` to replace `file_index`
|
|
_ = g.first_dep.swapRemove(file_index.get());
|
|
_ = g.first_import.swapRemove(file_index.get());
|
|
|
|
if (file_index != last) {
|
|
g.stale_files.setValue(file_index.get(), g.stale_files.isSet(last.get()));
|
|
|
|
// This set is not always initialized, so ignore if it's empty
|
|
if (g.affected_by_trace.bit_length > 0) {
|
|
g.affected_by_trace.setValue(file_index.get(), g.affected_by_trace.isSet(last.get()));
|
|
}
|
|
|
|
// Adjust all referenced edges to point to the new file
|
|
{
|
|
var it: ?EdgeIndex = g.first_import.items[file_index.get()].unwrap();
|
|
while (it) |edge_index| {
|
|
const dep = &g.edges.items[edge_index.get()];
|
|
it = dep.next_import.unwrap();
|
|
assert(dep.dependency == last);
|
|
dep.dependency = file_index;
|
|
}
|
|
}
|
|
{
|
|
var it: ?EdgeIndex = g.first_dep.items[file_index.get()].unwrap();
|
|
while (it) |edge_index| {
|
|
const dep = &g.edges.items[edge_index.get()];
|
|
it = dep.next_dependency.unwrap();
|
|
assert(dep.imported == last);
|
|
dep.imported = file_index;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
fn newEdge(g: *@This(), edge: Edge) !EdgeIndex {
|
|
if (g.edges_free_list.popOrNull()) |index| {
|
|
g.edges.items[index.get()] = edge;
|
|
return index;
|
|
}
|
|
|
|
const index = EdgeIndex.init(@intCast(g.edges.items.len));
|
|
try g.edges.append(g.owner().allocator, edge);
|
|
return index;
|
|
}
|
|
|
|
/// Does nothing besides release the `Edge` for reallocation by `newEdge`
|
|
/// Caller must detach the dependency from the linked list it is in.
|
|
fn freeEdge(g: *@This(), edge_index: EdgeIndex) void {
|
|
if (Environment.isDebug) {
|
|
g.edges.items[edge_index.get()] = undefined;
|
|
}
|
|
|
|
if (edge_index.get() == (g.edges.items.len - 1)) {
|
|
g.edges.items.len -= 1;
|
|
} else {
|
|
g.edges_free_list.append(g.owner().allocator, edge_index) catch {
|
|
// Leak an edge object; Ok since it may get cleaned up by
|
|
// the next incremental graph garbage-collection cycle.
|
|
};
|
|
}
|
|
}
|
|
|
|
pub fn owner(g: *@This()) *DevServer {
|
|
return @alignCast(@fieldParentPtr(@tagName(side) ++ "_graph", g));
|
|
}
|
|
};
|
|
}
|
|
|
|
const IncrementalResult = struct {
|
|
/// When tracing a file's dependencies via `traceDependencies`, this is
|
|
/// populated with the hit `Route.Index`s. To know what `RouteBundle`s
|
|
/// are affected, the route graph must be traced downwards.
|
|
/// Tracing is used for multiple purposes.
|
|
routes_affected: ArrayListUnmanaged(RouteIndexAndRecurseFlag),
|
|
|
|
// Following three fields are populated during `receiveChunk`
|
|
|
|
/// Components to add to the client manifest
|
|
client_components_added: ArrayListUnmanaged(IncrementalGraph(.server).FileIndex),
|
|
/// Components to add to the client manifest
|
|
client_components_removed: ArrayListUnmanaged(IncrementalGraph(.server).FileIndex),
|
|
/// This list acts as a free list. The contents of these slices must remain
|
|
/// valid; they have to be so the affected routes can be cleared of the
|
|
/// failures and potentially be marked valid. At the end of an
|
|
/// incremental update, the slices are freed.
|
|
failures_removed: ArrayListUnmanaged(SerializedFailure),
|
|
|
|
/// Client boundaries that have been added or modified. At the end of a hot
|
|
/// update, these are traced to their route to mark the bundles as stale (to
|
|
/// be generated on Cmd+R)
|
|
///
|
|
/// Populated during `traceDependencies`
|
|
client_components_affected: ArrayListUnmanaged(IncrementalGraph(.server).FileIndex),
|
|
|
|
/// The list of failures which will have to be traced to their route. Such
|
|
/// tracing is deferred until the second pass of finalizeBundler as the
|
|
/// dependency graph may not fully exist at the time the failure is indexed.
|
|
///
|
|
/// Populated from within the bundler via `handleParseTaskFailure`
|
|
failures_added: ArrayListUnmanaged(SerializedFailure),
|
|
|
|
/// Removing files clobbers indices, so removing anything is deferred.
|
|
// TODO: remove
|
|
delete_client_files_later: ArrayListUnmanaged(IncrementalGraph(.client).FileIndex),
|
|
|
|
const empty: IncrementalResult = .{
|
|
.routes_affected = .{},
|
|
.failures_removed = .{},
|
|
.failures_added = .{},
|
|
.client_components_added = .{},
|
|
.client_components_removed = .{},
|
|
.client_components_affected = .{},
|
|
.delete_client_files_later = .{},
|
|
};
|
|
|
|
fn reset(result: *IncrementalResult) void {
|
|
result.routes_affected.clearRetainingCapacity();
|
|
assert(result.failures_removed.items.len == 0);
|
|
result.failures_added.clearRetainingCapacity();
|
|
result.client_components_added.clearRetainingCapacity();
|
|
result.client_components_removed.clearRetainingCapacity();
|
|
result.client_components_affected.clearRetainingCapacity();
|
|
}
|
|
};
|
|
|
|
const GraphTraceState = struct {
|
|
client_bits: DynamicBitSetUnmanaged,
|
|
server_bits: DynamicBitSetUnmanaged,
|
|
|
|
fn deinit(gts: *GraphTraceState, alloc: Allocator) void {
|
|
gts.client_bits.deinit(alloc);
|
|
gts.server_bits.deinit(alloc);
|
|
}
|
|
|
|
fn clear(gts: *GraphTraceState) void {
|
|
gts.server_bits.setAll(false);
|
|
gts.client_bits.setAll(false);
|
|
}
|
|
};
|
|
|
|
const TraceImportGoal = struct {
|
|
// gts: *GraphTraceState,
|
|
find_css: bool = false,
|
|
find_client_modules: bool = false,
|
|
};
|
|
|
|
fn initGraphTraceState(dev: *const DevServer, sfa: Allocator) !GraphTraceState {
|
|
const server_bits = try DynamicBitSetUnmanaged.initEmpty(sfa, dev.server_graph.bundled_files.count());
|
|
errdefer server_bits.deinit(sfa);
|
|
const client_bits = try DynamicBitSetUnmanaged.initEmpty(sfa, dev.client_graph.bundled_files.count());
|
|
return .{ .server_bits = server_bits, .client_bits = client_bits };
|
|
}
|
|
|
|
/// When a file fails to import a relative path, directory watchers are added so
|
|
/// that when a matching file is created, the dependencies can be rebuilt. This
|
|
/// handles HMR cases where a user writes an import before creating the file,
|
|
/// or moves files around.
|
|
///
|
|
/// This structure manages those watchers, including releasing them once
|
|
/// import resolution failures are solved.
|
|
const DirectoryWatchStore = struct {
|
|
/// This guards all store state
|
|
lock: Mutex,
|
|
|
|
/// List of active watchers. Can be re-ordered on removal
|
|
watches: bun.StringArrayHashMapUnmanaged(Entry),
|
|
dependencies: ArrayListUnmanaged(Dep),
|
|
/// Dependencies cannot be re-ordered. This list tracks what indexes are free.
|
|
dependencies_free_list: ArrayListUnmanaged(Dep.Index),
|
|
|
|
const empty: DirectoryWatchStore = .{
|
|
.lock = .{},
|
|
.watches = .{},
|
|
.dependencies = .{},
|
|
.dependencies_free_list = .{},
|
|
};
|
|
|
|
pub fn owner(store: *DirectoryWatchStore) *DevServer {
|
|
return @alignCast(@fieldParentPtr("directory_watchers", store));
|
|
}
|
|
|
|
pub fn trackResolutionFailure(
|
|
store: *DirectoryWatchStore,
|
|
import_source: []const u8,
|
|
specifier: []const u8,
|
|
renderer: bake.Graph,
|
|
) bun.OOM!void {
|
|
store.lock.lock();
|
|
defer store.lock.unlock();
|
|
|
|
// When it does not resolve to a file path, there is
|
|
// nothing to track. Bake does not watch node_modules.
|
|
if (!(bun.strings.startsWith(specifier, "./") or
|
|
bun.strings.startsWith(specifier, "../"))) return;
|
|
if (!std.fs.path.isAbsolute(import_source)) return;
|
|
|
|
const joined = bun.path.joinAbs(bun.path.dirname(import_source, .auto), .auto, specifier);
|
|
const dir = bun.path.dirname(joined, .auto);
|
|
|
|
// `import_source` is not a stable string. let's share memory with the file graph.
|
|
// this requires that
|
|
const dev = store.owner();
|
|
const owned_file_path = switch (renderer) {
|
|
.client => path: {
|
|
const index = try dev.client_graph.insertStale(import_source, false);
|
|
break :path dev.client_graph.bundled_files.keys()[index.get()];
|
|
},
|
|
.server, .ssr => path: {
|
|
const index = try dev.client_graph.insertStale(import_source, renderer == .ssr);
|
|
break :path dev.client_graph.bundled_files.keys()[index.get()];
|
|
},
|
|
};
|
|
|
|
store.insert(dir, owned_file_path, specifier) catch |err| switch (err) {
|
|
error.Ignore => {}, // ignoring watch errors.
|
|
error.OutOfMemory => |e| return e,
|
|
};
|
|
}
|
|
|
|
/// `dir_name_to_watch` is cloned
|
|
/// `file_path` must have lifetime that outlives the watch
|
|
/// `specifier` is cloned
|
|
fn insert(
|
|
store: *DirectoryWatchStore,
|
|
dir_name_to_watch: []const u8,
|
|
file_path: []const u8,
|
|
specifier: []const u8,
|
|
) !void {
|
|
// TODO: watch the parent dir too.
|
|
const dev = store.owner();
|
|
|
|
debug.log("DirectoryWatchStore.insert({}, {}, {})", .{
|
|
bun.fmt.quote(dir_name_to_watch),
|
|
bun.fmt.quote(file_path),
|
|
bun.fmt.quote(specifier),
|
|
});
|
|
|
|
if (store.dependencies_free_list.items.len == 0)
|
|
try store.dependencies.ensureUnusedCapacity(dev.allocator, 1);
|
|
|
|
const gop = try store.watches.getOrPut(dev.allocator, dir_name_to_watch);
|
|
if (gop.found_existing) {
|
|
const specifier_cloned = try dev.allocator.dupe(u8, specifier);
|
|
errdefer dev.allocator.free(specifier_cloned);
|
|
|
|
// TODO: check for dependency
|
|
|
|
const dep = store.appendDepAssumeCapacity(.{
|
|
.next = gop.value_ptr.first_dep.toOptional(),
|
|
.source_file_path = file_path,
|
|
.specifier = specifier_cloned,
|
|
});
|
|
gop.value_ptr.first_dep = dep;
|
|
|
|
return;
|
|
}
|
|
errdefer store.watches.swapRemoveAt(gop.index);
|
|
|
|
// Try to use an existing open directory handle
|
|
const cache_fd = if (dev.server_bundler.resolver.readDirInfo(dir_name_to_watch) catch null) |cache| fd: {
|
|
const fd = cache.getFileDescriptor();
|
|
break :fd if (fd == .zero) null else fd;
|
|
} else null;
|
|
|
|
const fd, const owned_fd = if (cache_fd) |fd|
|
|
.{ fd, false }
|
|
else
|
|
.{
|
|
switch (bun.sys.open(
|
|
&(std.posix.toPosixPath(dir_name_to_watch) catch |err| switch (err) {
|
|
error.NameTooLong => return, // wouldn't be able to open, ignore
|
|
}),
|
|
bun.O.DIRECTORY,
|
|
0,
|
|
)) {
|
|
.result => |fd| fd,
|
|
.err => |err| switch (err.getErrno()) {
|
|
// If this directory doesn't exist, a watcher should be
|
|
// placed on the parent directory. Then, if this
|
|
// directory is later created, the watcher can be
|
|
// properly initialized. This would happen if you write
|
|
// an import path like `./dir/whatever/hello.tsx` and
|
|
// `dir` does not exist, Bun must place a watcher on
|
|
// `.`, see the creation of `dir`, and repeat until it
|
|
// can open a watcher on `whatever` to see the creation
|
|
// of `hello.tsx`
|
|
.NOENT => {
|
|
// TODO: implement that. for now it ignores
|
|
return;
|
|
},
|
|
.NOTDIR => return error.Ignore, // ignore
|
|
else => {
|
|
bun.todoPanic(@src(), "log watcher error", .{});
|
|
},
|
|
},
|
|
},
|
|
true,
|
|
};
|
|
errdefer _ = if (owned_fd) bun.sys.close(fd);
|
|
|
|
debug.log("-> fd: {} ({s})", .{
|
|
fd,
|
|
if (owned_fd) "from dir cache" else "owned fd",
|
|
});
|
|
|
|
const dir_name = try dev.allocator.dupe(u8, dir_name_to_watch);
|
|
errdefer dev.allocator.free(dir_name);
|
|
|
|
gop.key_ptr.* = dir_name;
|
|
|
|
const specifier_cloned = try dev.allocator.dupe(u8, specifier);
|
|
errdefer dev.allocator.free(specifier_cloned);
|
|
|
|
const watch_index = switch (dev.bun_watcher.addDirectory(fd, dir_name, bun.JSC.GenericWatcher.getHash(dir_name), false)) {
|
|
.err => return error.Ignore,
|
|
.result => |id| id,
|
|
};
|
|
const dep = store.appendDepAssumeCapacity(.{
|
|
.next = .none,
|
|
.source_file_path = file_path,
|
|
.specifier = specifier_cloned,
|
|
});
|
|
store.watches.putAssumeCapacity(dir_name, .{
|
|
.dir = fd,
|
|
.dir_fd_owned = owned_fd,
|
|
.first_dep = dep,
|
|
.watch_index = watch_index,
|
|
});
|
|
}
|
|
|
|
/// Caller must detach the dependency from the linked list it is in.
|
|
fn freeDependencyIndex(store: *DirectoryWatchStore, alloc: Allocator, index: Dep.Index) !void {
|
|
alloc.free(store.dependencies.items[index.get()].specifier);
|
|
|
|
if (Environment.isDebug) {
|
|
store.dependencies.items[index.get()] = undefined;
|
|
}
|
|
|
|
if (index.get() == (store.dependencies.items.len - 1)) {
|
|
store.dependencies.items.len -= 1;
|
|
} else {
|
|
try store.dependencies_free_list.append(alloc, index);
|
|
}
|
|
}
|
|
|
|
/// Expects dependency list to be already freed
|
|
fn freeEntry(store: *DirectoryWatchStore, entry_index: usize) void {
|
|
const entry = store.watches.values()[entry_index];
|
|
|
|
debug.log("DirectoryWatchStore.freeEntry({d}, {})", .{
|
|
entry_index,
|
|
entry.dir,
|
|
});
|
|
|
|
store.owner().bun_watcher.removeAtIndex(entry.watch_index, 0, &.{}, .file);
|
|
|
|
defer _ = if (entry.dir_fd_owned) bun.sys.close(entry.dir);
|
|
store.watches.swapRemoveAt(entry_index);
|
|
|
|
if (store.watches.entries.len == 0) {
|
|
assert(store.dependencies.items.len == 0);
|
|
store.dependencies_free_list.clearRetainingCapacity();
|
|
}
|
|
}
|
|
|
|
fn appendDepAssumeCapacity(store: *DirectoryWatchStore, dep: Dep) Dep.Index {
|
|
if (store.dependencies_free_list.popOrNull()) |index| {
|
|
store.dependencies.items[index.get()] = dep;
|
|
return index;
|
|
}
|
|
|
|
const index = Dep.Index.init(@intCast(store.dependencies.items.len));
|
|
store.dependencies.appendAssumeCapacity(dep);
|
|
return index;
|
|
}
|
|
|
|
const Entry = struct {
|
|
/// The directory handle the watch is placed on
|
|
dir: bun.FileDescriptor,
|
|
dir_fd_owned: bool,
|
|
/// Files which request this import index
|
|
first_dep: Dep.Index,
|
|
/// To pass to Watcher.remove
|
|
watch_index: u16,
|
|
};
|
|
|
|
const Dep = struct {
|
|
next: Index.Optional,
|
|
/// The file used
|
|
source_file_path: []const u8,
|
|
/// The specifier that failed. Before running re-build, it is resolved for, as
|
|
/// creating an unrelated file should not re-emit another error. Default-allocator
|
|
specifier: []const u8,
|
|
|
|
const Index = bun.GenericIndex(u32, Dep);
|
|
};
|
|
};
|
|
|
|
const ChunkKind = enum {
|
|
initial_response,
|
|
hmr_chunk,
|
|
};
|
|
|
|
/// Errors sent to the HMR client in the browser are serialized. The same format
|
|
/// is used for thrown JavaScript exceptions as well as bundler errors.
|
|
/// Serialized failures contain a handle on what file or route they came from,
|
|
/// which allows the bundler to dismiss or update stale failures via index as
|
|
/// opposed to re-sending a new payload. This also means only changed files are
|
|
/// rebuilt, instead of all of the failed files.
|
|
///
|
|
/// The HMR client in the browser is expected to sort the final list of errors
|
|
/// for deterministic output; there is code in DevServer that uses `swapRemove`.
|
|
pub const SerializedFailure = struct {
|
|
/// Serialized data is always owned by default_allocator
|
|
/// The first 32 bits of this slice contain the owner
|
|
data: []u8,
|
|
|
|
pub fn deinit(f: SerializedFailure) void {
|
|
bun.default_allocator.free(f.data);
|
|
}
|
|
|
|
/// The metaphorical owner of an incremental file error. The packed variant
|
|
/// is given to the HMR runtime as an opaque handle.
|
|
pub const Owner = union(enum) {
|
|
none,
|
|
route: RouteBundle.Index,
|
|
client: IncrementalGraph(.client).FileIndex,
|
|
server: IncrementalGraph(.server).FileIndex,
|
|
|
|
pub fn encode(owner: Owner) Packed {
|
|
return switch (owner) {
|
|
.none => .{ .kind = .none, .data = 0 },
|
|
.client => |data| .{ .kind = .client, .data = data.get() },
|
|
.server => |data| .{ .kind = .server, .data = data.get() },
|
|
.route => |data| .{ .kind = .route, .data = data.get() },
|
|
};
|
|
}
|
|
|
|
pub const Packed = packed struct(u32) {
|
|
kind: enum(u2) { none, route, client, server },
|
|
data: u30,
|
|
|
|
pub fn decode(owner: Packed) Owner {
|
|
return switch (owner.kind) {
|
|
.none => .none,
|
|
.client => .{ .client = IncrementalGraph(.client).FileIndex.init(owner.data) },
|
|
.server => .{ .server = IncrementalGraph(.server).FileIndex.init(owner.data) },
|
|
.route => .{ .route = RouteBundle.Index.init(owner.data) },
|
|
};
|
|
}
|
|
};
|
|
};
|
|
|
|
fn getOwner(failure: SerializedFailure) Owner {
|
|
return std.mem.bytesAsValue(Owner.Packed, failure.data[0..4]).decode();
|
|
}
|
|
|
|
/// This assumes the hash map contains only one SerializedFailure per owner.
|
|
/// This is okay since SerializedFailure can contain more than one error.
|
|
const ArrayHashContextViaOwner = struct {
|
|
pub fn hash(_: ArrayHashContextViaOwner, k: SerializedFailure) u32 {
|
|
return std.hash.uint32(@bitCast(k.getOwner().encode()));
|
|
}
|
|
|
|
pub fn eql(_: ArrayHashContextViaOwner, a: SerializedFailure, b: SerializedFailure, _: usize) bool {
|
|
return @as(u32, @bitCast(a.getOwner().encode())) == @as(u32, @bitCast(b.getOwner().encode()));
|
|
}
|
|
};
|
|
|
|
const ArrayHashAdapter = struct {
|
|
pub fn hash(_: ArrayHashAdapter, own: Owner) u32 {
|
|
return std.hash.uint32(@bitCast(own.encode()));
|
|
}
|
|
|
|
pub fn eql(_: ArrayHashAdapter, a: Owner, b: SerializedFailure, _: usize) bool {
|
|
return @as(u32, @bitCast(a.encode())) == @as(u32, @bitCast(b.getOwner().encode()));
|
|
}
|
|
};
|
|
|
|
const ErrorKind = enum(u8) {
|
|
// A log message. The `logger.Kind` is encoded here.
|
|
bundler_log_err = 0,
|
|
bundler_log_warn = 1,
|
|
bundler_log_note = 2,
|
|
bundler_log_debug = 3,
|
|
bundler_log_verbose = 4,
|
|
|
|
/// new Error(message)
|
|
js_error,
|
|
/// new TypeError(message)
|
|
js_error_type,
|
|
/// new RangeError(message)
|
|
js_error_range,
|
|
/// Other forms of `Error` objects, including when an error has a
|
|
/// `code`, and other fields.
|
|
js_error_extra,
|
|
/// Non-error with a stack trace
|
|
js_primitive_exception,
|
|
/// Non-error JS values
|
|
js_primitive,
|
|
/// new AggregateError(errors, message)
|
|
js_aggregate,
|
|
};
|
|
|
|
pub fn initFromJs(owner: Owner, value: JSValue) !SerializedFailure {
|
|
{
|
|
_ = value;
|
|
@panic("TODO");
|
|
}
|
|
// Avoid small re-allocations without requesting so much from the heap
|
|
var sfb = std.heap.stackFallback(65536, bun.default_allocator);
|
|
var payload = std.ArrayList(u8).initCapacity(sfb.get(), 65536) catch
|
|
unreachable; // enough space
|
|
const w = payload.writer();
|
|
|
|
try w.writeInt(u32, @bitCast(owner.encode()), .little);
|
|
// try writeJsValue(value);
|
|
|
|
// Avoid-recloning if it is was moved to the hap
|
|
const data = if (payload.items.ptr == &sfb.buffer)
|
|
try bun.default_allocator.dupe(u8, payload.items)
|
|
else
|
|
payload.items;
|
|
|
|
return .{ .data = data };
|
|
}
|
|
|
|
pub fn initFromLog(
|
|
owner: Owner,
|
|
owner_display_name: []const u8,
|
|
messages: []const bun.logger.Msg,
|
|
) !SerializedFailure {
|
|
assert(messages.len > 0);
|
|
|
|
// Avoid small re-allocations without requesting so much from the heap
|
|
var sfb = std.heap.stackFallback(65536, bun.default_allocator);
|
|
var payload = std.ArrayList(u8).initCapacity(sfb.get(), 65536) catch
|
|
unreachable; // enough space
|
|
const w = payload.writer();
|
|
|
|
try w.writeInt(u32, @bitCast(owner.encode()), .little);
|
|
|
|
try writeString32(owner_display_name, w);
|
|
|
|
try w.writeInt(u32, @intCast(messages.len), .little);
|
|
|
|
for (messages) |*msg| {
|
|
try writeLogMsg(msg, w);
|
|
}
|
|
|
|
// Avoid-recloning if it is was moved to the hap
|
|
const data = if (payload.items.ptr == &sfb.buffer)
|
|
try bun.default_allocator.dupe(u8, payload.items)
|
|
else
|
|
payload.items;
|
|
|
|
return .{ .data = data };
|
|
}
|
|
|
|
// All "write" functions get a corresponding "read" function in ./client/error.ts
|
|
|
|
const Writer = std.ArrayList(u8).Writer;
|
|
|
|
fn writeLogMsg(msg: *const bun.logger.Msg, w: Writer) !void {
|
|
try w.writeByte(switch (msg.kind) {
|
|
inline else => |k| @intFromEnum(@field(ErrorKind, "bundler_log_" ++ @tagName(k))),
|
|
});
|
|
try writeLogData(msg.data, w);
|
|
const notes = msg.notes;
|
|
try w.writeInt(u32, @intCast(notes.len), .little);
|
|
for (notes) |note| {
|
|
try writeLogData(note, w);
|
|
}
|
|
}
|
|
|
|
fn writeLogData(data: bun.logger.Data, w: Writer) !void {
|
|
try writeString32(data.text, w);
|
|
if (data.location) |loc| {
|
|
assert(loc.line >= 0); // one based and not negative
|
|
assert(loc.column >= 0); // zero based and not negative
|
|
|
|
try w.writeInt(u32, @intCast(loc.line), .little);
|
|
try w.writeInt(u32, @intCast(loc.column), .little);
|
|
try w.writeInt(u32, @intCast(loc.length), .little);
|
|
|
|
// TODO: syntax highlighted line text + give more context lines
|
|
try writeString32(loc.line_text orelse "", w);
|
|
|
|
// The file is not specified here. Since the bundler runs every file
|
|
// in isolation, it would be impossible to reference any other file
|
|
// in this Log. Thus, it is not serialized.
|
|
} else {
|
|
try w.writeInt(u32, 0, .little);
|
|
}
|
|
}
|
|
|
|
fn writeString32(data: []const u8, w: Writer) !void {
|
|
try w.writeInt(u32, @intCast(data.len), .little);
|
|
try w.writeAll(data);
|
|
}
|
|
|
|
// fn writeJsValue(value: JSValue, global: *JSC.JSGlobalObject, w: *Writer) !void {
|
|
// if (value.isAggregateError(global)) {
|
|
// //
|
|
// }
|
|
// if (value.jsType() == .DOMWrapper) {
|
|
// if (value.as(JSC.BuildMessage)) |build_error| {
|
|
// _ = build_error; // autofix
|
|
// //
|
|
// } else if (value.as(JSC.ResolveMessage)) |resolve_error| {
|
|
// _ = resolve_error; // autofix
|
|
// @panic("TODO");
|
|
// }
|
|
// }
|
|
// _ = w; // autofix
|
|
|
|
// @panic("TODO");
|
|
// }
|
|
};
|
|
|
|
// For debugging, it is helpful to be able to see bundles.
|
|
fn dumpBundle(dump_dir: std.fs.Dir, side: bake.Graph, rel_path: []const u8, chunk: []const u8, wrap: bool) !void {
|
|
const name = bun.path.joinAbsString("/", &.{
|
|
@tagName(side),
|
|
rel_path,
|
|
}, .auto)[1..];
|
|
var inner_dir = try dump_dir.makeOpenPath(bun.Dirname.dirname(u8, name).?, .{});
|
|
defer inner_dir.close();
|
|
|
|
const file = try inner_dir.createFile(bun.path.basename(name), .{});
|
|
defer file.close();
|
|
|
|
var bufw = std.io.bufferedWriter(file.writer());
|
|
|
|
try bufw.writer().print("// {s} bundled for {s}\n", .{
|
|
bun.fmt.quote(rel_path),
|
|
@tagName(side),
|
|
});
|
|
try bufw.writer().print("// Bundled at {d}, Bun " ++ bun.Global.package_json_version_with_canary ++ "\n", .{
|
|
std.time.nanoTimestamp(),
|
|
});
|
|
|
|
// Wrap in an object to make it valid syntax. Regardless, these files
|
|
// are never executable on their own as they contain only a single module.
|
|
|
|
if (wrap)
|
|
try bufw.writer().writeAll("({\n");
|
|
|
|
try bufw.writer().writeAll(chunk);
|
|
|
|
if (wrap)
|
|
try bufw.writer().writeAll("});\n");
|
|
|
|
try bufw.flush();
|
|
}
|
|
|
|
fn emitVisualizerMessageIfNeeded(dev: *DevServer) !void {
|
|
if (!bun.FeatureFlags.bake_debugging_features) return;
|
|
if (dev.emit_visualizer_events == 0) return;
|
|
|
|
var sfb = std.heap.stackFallback(65536, bun.default_allocator);
|
|
var payload = try std.ArrayList(u8).initCapacity(sfb.get(), 65536);
|
|
defer payload.deinit();
|
|
|
|
try dev.writeVisualizerMessage(&payload);
|
|
|
|
dev.publish(HmrSocket.visualizer_topic, payload.items, .binary);
|
|
}
|
|
|
|
fn writeVisualizerMessage(dev: *DevServer, payload: *std.ArrayList(u8)) !void {
|
|
payload.appendAssumeCapacity(MessageId.visualizer.char());
|
|
const w = payload.writer();
|
|
|
|
inline for (
|
|
[2]bake.Side{ .client, .server },
|
|
.{ &dev.client_graph, &dev.server_graph },
|
|
) |side, g| {
|
|
try w.writeInt(u32, @intCast(g.bundled_files.count()), .little);
|
|
for (
|
|
g.bundled_files.keys(),
|
|
g.bundled_files.values(),
|
|
0..,
|
|
) |k, v, i| {
|
|
const normalized_key = dev.relativePath(k);
|
|
try w.writeInt(u32, @intCast(normalized_key.len), .little);
|
|
if (k.len == 0) continue;
|
|
try w.writeAll(normalized_key);
|
|
try w.writeByte(@intFromBool(g.stale_files.isSet(i) or switch (side) {
|
|
.server => v.failed,
|
|
.client => v.flags.failed,
|
|
}));
|
|
try w.writeByte(@intFromBool(side == .server and v.is_rsc));
|
|
try w.writeByte(@intFromBool(side == .server and v.is_ssr));
|
|
try w.writeByte(@intFromBool(side == .server and v.is_route));
|
|
try w.writeByte(@intFromBool(side == .client and v.flags.is_special_framework_file));
|
|
try w.writeByte(@intFromBool(switch (side) {
|
|
.server => v.is_client_component_boundary,
|
|
.client => v.flags.is_hmr_root,
|
|
}));
|
|
}
|
|
}
|
|
inline for (.{ &dev.client_graph, &dev.server_graph }) |g| {
|
|
const G = @TypeOf(g.*);
|
|
|
|
try w.writeInt(u32, @intCast(g.edges.items.len - g.edges_free_list.items.len), .little);
|
|
for (g.edges.items, 0..) |edge, i| {
|
|
if (std.mem.indexOfScalar(G.EdgeIndex, g.edges_free_list.items, G.EdgeIndex.init(@intCast(i))) != null)
|
|
continue;
|
|
|
|
try w.writeInt(u32, @intCast(edge.dependency.get()), .little);
|
|
try w.writeInt(u32, @intCast(edge.imported.get()), .little);
|
|
}
|
|
}
|
|
}
|
|
|
|
pub fn onWebSocketUpgrade(
|
|
dev: *DevServer,
|
|
res: *Response,
|
|
req: *Request,
|
|
upgrade_ctx: *uws.uws_socket_context_t,
|
|
id: usize,
|
|
) void {
|
|
assert(id == 0);
|
|
|
|
const dw = bun.create(dev.allocator, HmrSocket, .{
|
|
.dev = dev,
|
|
.emit_visualizer_events = false,
|
|
});
|
|
res.upgrade(
|
|
*HmrSocket,
|
|
dw,
|
|
req.header("sec-websocket-key") orelse "",
|
|
req.header("sec-websocket-protocol") orelse "",
|
|
req.header("sec-websocket-extension") orelse "",
|
|
upgrade_ctx,
|
|
);
|
|
}
|
|
|
|
/// Every message is to use `.binary`/`ArrayBuffer` transport mode. The first byte
|
|
/// indicates a Message ID; see comments on each type for how to interpret the rest.
|
|
///
|
|
/// This format is only intended for communication for the browser build of
|
|
/// `hmr-runtime.ts` <-> `DevServer.zig`. Server-side HMR is implemented using a
|
|
/// different interface. This document is aimed for contributors to these two
|
|
/// components; Any other use-case is unsupported.
|
|
///
|
|
/// All integers are sent in little-endian
|
|
pub const MessageId = enum(u8) {
|
|
/// Version payload. Sent on connection startup. The client should issue a
|
|
/// hard-reload when it mismatches with its `config.version`.
|
|
version = 'V',
|
|
/// Sent on a successful bundle, containing client code and changed CSS files.
|
|
///
|
|
/// - u32: Number of CSS updates. For Each:
|
|
/// - [16]u8 ASCII: CSS identifier (hash of source path)
|
|
/// - u32: Length of CSS code
|
|
/// - [n]u8 UTF-8: CSS payload
|
|
/// - [n]u8 UTF-8: JS Payload. No length, rest of buffer is text.
|
|
///
|
|
/// The JS payload will be code to hand to `eval`
|
|
// TODO: the above structure does not consider CSS attachments/detachments
|
|
hot_update = 'u',
|
|
/// Sent on a successful bundle, containing a list of routes that have
|
|
/// server changes. This is not sent when only client code changes.
|
|
///
|
|
/// - `u32`: Number of updated routes.
|
|
/// - For each route:
|
|
/// - `u32`: Route ID
|
|
/// - `u32`: Length of route pattern
|
|
/// - `[n]u8` UTF-8: Route pattern
|
|
///
|
|
/// HMR Runtime contains code that performs route matching at runtime
|
|
/// against `location.pathname`. The server is unaware of its routing
|
|
/// state.
|
|
route_update = 'R',
|
|
/// Sent when the list of errors changes.
|
|
///
|
|
/// - `u32`: Removed errors. For Each:
|
|
/// - `u32`: Error owner
|
|
/// - Remainder are added errors. For Each:
|
|
/// - `SerializedFailure`: Error Data
|
|
errors = 'E',
|
|
/// Sent when all errors are cleared.
|
|
// TODO: Remove this message ID
|
|
errors_cleared = 'c',
|
|
/// Payload for `incremental_visualizer.html`. This can be accessed via
|
|
/// `/_bun/incremental_visualizer`. This contains both graphs.
|
|
///
|
|
/// - `u32`: Number of files in `client_graph`. For Each:
|
|
/// - `u32`: Length of name. If zero then no other fields are provided.
|
|
/// - `[n]u8`: File path in UTF-8 encoded text
|
|
/// - `u8`: If file is stale, set 1
|
|
/// - `u8`: If file is in server graph, set 1
|
|
/// - `u8`: If file is in ssr graph, set 1
|
|
/// - `u8`: If file is a server-side route root, set 1
|
|
/// - `u8`: If file is a server-side component boundary file, set 1
|
|
/// - `u32`: Number of files in the server graph. For Each:
|
|
/// - Repeat the same parser for the client graph
|
|
/// - `u32`: Number of client edges. For Each:
|
|
/// - `u32`: File index of the dependency file
|
|
/// - `u32`: File index of the imported file
|
|
/// - `u32`: Number of server edges. For Each:
|
|
/// - `u32`: File index of the dependency file
|
|
/// - `u32`: File index of the imported file
|
|
visualizer = 'v',
|
|
|
|
pub inline fn char(id: MessageId) u8 {
|
|
return @intFromEnum(id);
|
|
}
|
|
};
|
|
|
|
pub const IncomingMessageId = enum(u8) {
|
|
/// Subscribe to `.visualizer` events. No payload.
|
|
visualizer = 'v',
|
|
/// Invalid data
|
|
_,
|
|
};
|
|
|
|
const HmrSocket = struct {
|
|
dev: *DevServer,
|
|
emit_visualizer_events: bool,
|
|
|
|
pub const global_topic = "*";
|
|
pub const visualizer_topic = "v";
|
|
|
|
pub fn onOpen(s: *HmrSocket, ws: AnyWebSocket) void {
|
|
_ = ws.send(&(.{MessageId.version.char()} ++ s.dev.configuration_hash_key), .binary, false, true);
|
|
_ = ws.subscribe(global_topic);
|
|
}
|
|
|
|
pub fn onMessage(s: *HmrSocket, ws: AnyWebSocket, msg: []const u8, opcode: uws.Opcode) void {
|
|
_ = opcode;
|
|
|
|
if (msg.len == 0) {
|
|
ws.close();
|
|
return;
|
|
}
|
|
|
|
switch (@as(IncomingMessageId, @enumFromInt(msg[0]))) {
|
|
.visualizer => {
|
|
if (!s.emit_visualizer_events) {
|
|
s.emit_visualizer_events = true;
|
|
s.dev.emit_visualizer_events += 1;
|
|
_ = ws.subscribe(visualizer_topic);
|
|
s.dev.emitVisualizerMessageIfNeeded() catch bun.outOfMemory();
|
|
}
|
|
},
|
|
else => {
|
|
ws.close();
|
|
},
|
|
}
|
|
}
|
|
|
|
pub fn onClose(s: *HmrSocket, ws: AnyWebSocket, exit_code: i32, message: []const u8) void {
|
|
_ = ws;
|
|
_ = exit_code;
|
|
_ = message;
|
|
|
|
if (s.emit_visualizer_events) {
|
|
s.dev.emit_visualizer_events -= 1;
|
|
}
|
|
|
|
defer s.dev.allocator.destroy(s);
|
|
}
|
|
};
|
|
|
|
const c = struct {
|
|
// BakeSourceProvider.cpp
|
|
extern fn BakeGetDefaultExportFromModule(global: *JSC.JSGlobalObject, module: JSValue) JSValue;
|
|
|
|
fn BakeLoadServerHmrPatch(global: *JSC.JSGlobalObject, code: bun.String) !JSValue {
|
|
const f = @extern(
|
|
*const fn (*JSC.JSGlobalObject, bun.String) callconv(.C) JSValue.MaybeException,
|
|
.{ .name = "BakeLoadServerHmrPatch" },
|
|
);
|
|
return f(global, code).unwrap();
|
|
}
|
|
|
|
fn BakeLoadInitialServerCode(global: *JSC.JSGlobalObject, code: bun.String, separate_ssr_graph: bool) bun.JSError!JSValue {
|
|
const f = @extern(*const fn (*JSC.JSGlobalObject, bun.String, bool) callconv(.C) JSValue.MaybeException, .{
|
|
.name = "BakeLoadInitialServerCode",
|
|
});
|
|
return f(global, code, separate_ssr_graph).unwrap();
|
|
}
|
|
};
|
|
|
|
/// Called on DevServer thread via HotReloadTask
|
|
pub fn reload(dev: *DevServer, reload_task: *HotReloadTask) bun.OOM!void {
|
|
defer reload_task.files.clearRetainingCapacity();
|
|
|
|
const changed_file_paths = reload_task.files.keys();
|
|
// TODO: check for .delete and remove items from graph. this has to be done
|
|
// with care because some editors save by deleting and recreating the file.
|
|
// delete events are not to be trusted at face value. also, merging of
|
|
// events can cause .write and .delete to be true at the same time.
|
|
const changed_file_attributes = reload_task.files.values();
|
|
_ = changed_file_attributes;
|
|
|
|
var timer = std.time.Timer.start() catch
|
|
@panic("timers unsupported");
|
|
|
|
var sfb = std.heap.stackFallback(4096, bun.default_allocator);
|
|
var temp_alloc = sfb.get();
|
|
|
|
// pre-allocate a few files worth of strings. it is unlikely but supported
|
|
// to change more than 8 files in the same bundling round.
|
|
var files = std.ArrayList(BakeEntryPoint).initCapacity(temp_alloc, 8) catch unreachable;
|
|
defer files.deinit();
|
|
|
|
{
|
|
dev.graph_safety_lock.lock();
|
|
defer dev.graph_safety_lock.unlock();
|
|
|
|
inline for (.{ &dev.server_graph, &dev.client_graph }) |g| {
|
|
g.invalidate(changed_file_paths, &files) catch bun.outOfMemory();
|
|
}
|
|
}
|
|
|
|
if (files.items.len == 0) {
|
|
Output.debugWarn("nothing to bundle?? this is a bug?", .{});
|
|
return;
|
|
}
|
|
|
|
dev.incremental_result.reset();
|
|
defer {
|
|
// Remove files last to start, to avoid issues where removing a file
|
|
// invalidates the last file index.
|
|
std.sort.pdq(
|
|
IncrementalGraph(.client).FileIndex,
|
|
dev.incremental_result.delete_client_files_later.items,
|
|
{},
|
|
IncrementalGraph(.client).FileIndex.sortFnDesc,
|
|
);
|
|
for (dev.incremental_result.delete_client_files_later.items) |client_index| {
|
|
dev.client_graph.disconnectAndDeleteFile(client_index);
|
|
}
|
|
dev.incremental_result.delete_client_files_later.clearRetainingCapacity();
|
|
}
|
|
|
|
dev.bundle(files.items) catch |err| {
|
|
bun.handleErrorReturnTrace(err, @errorReturnTrace());
|
|
return;
|
|
};
|
|
|
|
dev.graph_safety_lock.lock();
|
|
defer dev.graph_safety_lock.unlock();
|
|
|
|
// This list of routes affected excludes client code. This means changing
|
|
// a client component wont count as a route to trigger a reload on.
|
|
//
|
|
// A second trace is required to determine what routes had changed bundles,
|
|
// since changing a layout affects all child routes. Additionally, routes
|
|
// that do not have a bundle will not be cleared (as there is nothing to
|
|
// clear for those)
|
|
if (dev.incremental_result.routes_affected.items.len > 0) {
|
|
// re-use some earlier stack memory
|
|
files.clearAndFree();
|
|
sfb = std.heap.stackFallback(4096, bun.default_allocator);
|
|
temp_alloc = sfb.get();
|
|
|
|
// A bit-set is used to avoid duplicate entries. This is not a problem
|
|
// with `dev.incremental_result.routes_affected`
|
|
var second_trace_result = try DynamicBitSetUnmanaged.initEmpty(temp_alloc, dev.route_bundles.items.len);
|
|
for (dev.incremental_result.routes_affected.items) |request| {
|
|
const route = dev.router.routePtr(request.route_index);
|
|
if (route.bundle.unwrap()) |id| second_trace_result.set(id.get());
|
|
if (request.should_recurse_when_visiting) {
|
|
markAllRouteChildren(&dev.router, &second_trace_result, request.route_index);
|
|
}
|
|
}
|
|
|
|
var sfb2 = std.heap.stackFallback(65536, bun.default_allocator);
|
|
var payload = std.ArrayList(u8).initCapacity(sfb2.get(), 65536) catch
|
|
unreachable; // enough space
|
|
defer payload.deinit();
|
|
payload.appendAssumeCapacity(MessageId.route_update.char());
|
|
const w = payload.writer();
|
|
const count = second_trace_result.count();
|
|
assert(count > 0);
|
|
try w.writeInt(u32, @intCast(count), .little);
|
|
|
|
var it = second_trace_result.iterator(.{ .kind = .set });
|
|
while (it.next()) |bundled_route_index| {
|
|
try w.writeInt(u32, @intCast(bundled_route_index), .little);
|
|
const pattern = dev.route_bundles.items[bundled_route_index].full_pattern;
|
|
try w.writeInt(u32, @intCast(pattern.len), .little);
|
|
try w.writeAll(pattern);
|
|
}
|
|
|
|
// Notify
|
|
dev.publish(HmrSocket.global_topic, payload.items, .binary);
|
|
}
|
|
|
|
// When client component roots get updated, the `client_components_affected`
|
|
// list contains the server side versions of these roots. These roots are
|
|
// traced to the routes so that the client-side bundles can be properly
|
|
// invalidated.
|
|
if (dev.incremental_result.client_components_affected.items.len > 0) {
|
|
dev.incremental_result.routes_affected.clearRetainingCapacity();
|
|
dev.server_graph.affected_by_trace.setAll(false);
|
|
|
|
var sfa_state = std.heap.stackFallback(65536, dev.allocator);
|
|
const sfa = sfa_state.get();
|
|
dev.server_graph.affected_by_trace = try DynamicBitSetUnmanaged.initEmpty(sfa, dev.server_graph.bundled_files.count());
|
|
defer dev.server_graph.affected_by_trace.deinit(sfa);
|
|
|
|
for (dev.incremental_result.client_components_affected.items) |index| {
|
|
try dev.server_graph.traceDependencies(index, .no_stop);
|
|
}
|
|
|
|
// TODO:
|
|
// for (dev.incremental_result.routes_affected.items) |route| {
|
|
// // Free old bundles
|
|
// if (dev.routes[route.get()].client_bundle) |old| {
|
|
// dev.allocator.free(old);
|
|
// }
|
|
// dev.routes[route.get()].client_bundle = null;
|
|
// }
|
|
}
|
|
|
|
// TODO: improve this visual feedback
|
|
if (dev.bundling_failures.count() == 0) {
|
|
const clear_terminal = !debug.isVisible();
|
|
if (clear_terminal) {
|
|
Output.flush();
|
|
Output.disableBuffering();
|
|
Output.resetTerminalAll();
|
|
}
|
|
|
|
dev.bundles_since_last_error += 1;
|
|
if (dev.bundles_since_last_error > 1) {
|
|
Output.prettyError("<cyan>[x{d}]<r> ", .{dev.bundles_since_last_error});
|
|
}
|
|
|
|
Output.prettyError("<green>Reloaded in {d}ms<r><d>:<r> {s}", .{ @divFloor(timer.read(), std.time.ns_per_ms), dev.relativePath(changed_file_paths[0]) });
|
|
if (changed_file_paths.len > 1) {
|
|
Output.prettyError(" <d>+ {d} more<r>", .{files.items.len - 1});
|
|
}
|
|
Output.prettyError("\n", .{});
|
|
Output.flush();
|
|
} else {}
|
|
}
|
|
|
|
fn markAllRouteChildren(router: *FrameworkRouter, bits: *DynamicBitSetUnmanaged, route_index: Route.Index) void {
|
|
var next = router.routePtr(route_index).first_child.unwrap();
|
|
while (next) |child_index| {
|
|
const route = router.routePtr(child_index);
|
|
if (route.bundle.unwrap()) |index| bits.set(index.get());
|
|
markAllRouteChildren(router, bits, child_index);
|
|
next = route.next_sibling.unwrap();
|
|
}
|
|
}
|
|
|
|
pub const HotReloadTask = struct {
|
|
/// Align to cache lines to reduce contention.
|
|
const Aligned = struct { aligned: HotReloadTask align(std.atomic.cache_line) };
|
|
|
|
dev: *DevServer,
|
|
concurrent_task: JSC.ConcurrentTask = undefined,
|
|
|
|
files: bun.StringArrayHashMapUnmanaged(Watcher.Event.Op),
|
|
|
|
/// I am sorry.
|
|
state: std.atomic.Value(u32),
|
|
|
|
pub fn initEmpty(dev: *DevServer) HotReloadTask {
|
|
return .{
|
|
.dev = dev,
|
|
.files = .{},
|
|
.state = .{ .raw = 0 },
|
|
};
|
|
}
|
|
|
|
pub fn append(
|
|
task: *HotReloadTask,
|
|
allocator: Allocator,
|
|
file_path: []const u8,
|
|
op: Watcher.Event.Op,
|
|
) void {
|
|
const gop = task.files.getOrPut(allocator, file_path) catch bun.outOfMemory();
|
|
if (gop.found_existing) {
|
|
gop.value_ptr.* = gop.value_ptr.merge(op);
|
|
} else {
|
|
gop.value_ptr.* = op;
|
|
}
|
|
}
|
|
|
|
pub fn run(initial: *HotReloadTask) void {
|
|
debug.log("HMR Task start", .{});
|
|
defer debug.log("HMR Task end", .{});
|
|
|
|
// TODO: audit the atomics with this reloading strategy
|
|
// It was not written by an expert.
|
|
|
|
const dev = initial.dev;
|
|
if (Environment.allow_assert) {
|
|
assert(initial.state.load(.seq_cst) == 0);
|
|
}
|
|
|
|
// const start_timestamp = std.time.nanoTimestamp();
|
|
dev.reload(initial) catch bun.outOfMemory();
|
|
|
|
// if there was a pending run, do it now
|
|
if (dev.watch_state.swap(0, .seq_cst) > 1) {
|
|
// debug.log("dual event fire", .{});
|
|
const current = if (initial == &dev.watch_events[0].aligned)
|
|
&dev.watch_events[1].aligned
|
|
else
|
|
&dev.watch_events[0].aligned;
|
|
if (current.state.swap(1, .seq_cst) == 0) {
|
|
// debug.log("case 1 (run now)", .{});
|
|
dev.reload(current) catch bun.outOfMemory();
|
|
current.state.store(0, .seq_cst);
|
|
} else {
|
|
// Watcher will emit an event since it reads watch_state 0
|
|
// debug.log("case 2 (run later)", .{});
|
|
}
|
|
}
|
|
}
|
|
};
|
|
|
|
/// Called on watcher's thread; Access to dev-server state restricted.
|
|
pub fn onFileUpdate(dev: *DevServer, events: []Watcher.Event, changed_files: []?[:0]u8, watchlist: Watcher.ItemList) void {
|
|
debug.log("onFileUpdate start", .{});
|
|
defer debug.log("onFileUpdate end", .{});
|
|
|
|
_ = changed_files;
|
|
const slice = watchlist.slice();
|
|
const file_paths = slice.items(.file_path);
|
|
const counts = slice.items(.count);
|
|
const kinds = slice.items(.kind);
|
|
|
|
// TODO: audit the atomics with this reloading strategy
|
|
// It was not written by an expert.
|
|
|
|
// Get a Hot reload task pointer
|
|
var ev: *HotReloadTask = &dev.watch_events[dev.watch_current].aligned;
|
|
if (ev.state.swap(1, .seq_cst) == 1) {
|
|
debug.log("work got stolen, must guarantee the other is free", .{});
|
|
dev.watch_current +%= 1;
|
|
ev = &dev.watch_events[dev.watch_current].aligned;
|
|
bun.assert(ev.state.swap(1, .seq_cst) == 0);
|
|
}
|
|
defer {
|
|
// Submit the Hot reload task for bundling
|
|
if (ev.files.entries.len > 0) {
|
|
const prev_state = dev.watch_state.fetchAdd(1, .seq_cst);
|
|
ev.state.store(0, .seq_cst);
|
|
debug.log("prev_state={d}", .{prev_state});
|
|
if (prev_state == 0) {
|
|
ev.concurrent_task = .{ .auto_delete = false, .next = null, .task = JSC.Task.init(ev) };
|
|
dev.vm.event_loop.enqueueTaskConcurrent(&ev.concurrent_task);
|
|
dev.watch_current +%= 1;
|
|
} else {
|
|
// DevServer thread is notified.
|
|
}
|
|
} else {
|
|
ev.state.store(0, .seq_cst);
|
|
}
|
|
}
|
|
|
|
defer dev.bun_watcher.flushEvictions();
|
|
|
|
// TODO: alot of code is missing
|
|
// TODO: story for busting resolution cache smartly?
|
|
for (events) |event| {
|
|
const file_path = file_paths[event.index];
|
|
const update_count = counts[event.index] + 1;
|
|
counts[event.index] = update_count;
|
|
const kind = kinds[event.index];
|
|
|
|
debug.log("{s} change: {s} {}", .{ @tagName(kind), file_path, event.op });
|
|
|
|
switch (kind) {
|
|
.file => {
|
|
if (event.op.delete or event.op.rename) {
|
|
dev.bun_watcher.removeAtIndex(event.index, 0, &.{}, .file);
|
|
}
|
|
|
|
ev.append(dev.allocator, file_path, event.op);
|
|
},
|
|
.directory => {
|
|
// bust the directory cache since this directory has changed
|
|
// TODO: correctly solve https://github.com/oven-sh/bun/issues/14913
|
|
_ = dev.server_bundler.resolver.bustDirCache(bun.strings.withoutTrailingSlash(file_path));
|
|
|
|
// if a directory watch exists for resolution
|
|
// failures, check those now.
|
|
dev.directory_watchers.lock.lock();
|
|
defer dev.directory_watchers.lock.unlock();
|
|
if (dev.directory_watchers.watches.getIndex(file_path)) |watcher_index| {
|
|
const entry = &dev.directory_watchers.watches.values()[watcher_index];
|
|
var new_chain: DirectoryWatchStore.Dep.Index.Optional = .none;
|
|
var it: ?DirectoryWatchStore.Dep.Index = entry.first_dep;
|
|
|
|
while (it) |index| {
|
|
const dep = &dev.directory_watchers.dependencies.items[index.get()];
|
|
it = dep.next.unwrap();
|
|
if ((dev.server_bundler.resolver.resolve(
|
|
bun.path.dirname(dep.source_file_path, .auto),
|
|
dep.specifier,
|
|
.stmt,
|
|
) catch null) != null) {
|
|
// the resolution result is not preserved as safely
|
|
// transferring it into BundleV2 is too complicated. the
|
|
// resolution is cached, anyways.
|
|
ev.append(dev.allocator, dep.source_file_path, .{ .write = true });
|
|
dev.directory_watchers.freeDependencyIndex(dev.allocator, index) catch bun.outOfMemory();
|
|
} else {
|
|
// rebuild a new linked list for unaffected files
|
|
dep.next = new_chain;
|
|
new_chain = index.toOptional();
|
|
}
|
|
}
|
|
|
|
if (new_chain.unwrap()) |new_first_dep| {
|
|
entry.first_dep = new_first_dep;
|
|
} else {
|
|
// without any files to depend on this watcher is freed
|
|
dev.directory_watchers.freeEntry(watcher_index);
|
|
}
|
|
}
|
|
},
|
|
}
|
|
}
|
|
}
|
|
|
|
pub fn onWatchError(_: *DevServer, err: bun.sys.Error) void {
|
|
// TODO: how to recover? the watcher can't just ... crash????????
|
|
Output.err(@as(bun.C.E, @enumFromInt(err.errno)), "Watcher crashed", .{});
|
|
if (bun.Environment.isDebug) {
|
|
bun.todoPanic(@src(), "Watcher crash", .{});
|
|
}
|
|
}
|
|
|
|
pub fn publish(dev: *DevServer, topic: []const u8, message: []const u8, opcode: uws.Opcode) void {
|
|
if (dev.server) |s| _ = s.publish(topic, message, opcode, false);
|
|
}
|
|
|
|
pub fn numSubscribers(dev: *DevServer, topic: []const u8) u32 {
|
|
return if (dev.server) |s| s.numSubscribers(topic) else 0;
|
|
}
|
|
|
|
const SafeFileId = packed struct(u32) {
|
|
side: bake.Side,
|
|
index: u30,
|
|
unused: enum(u1) { unused = 0 } = .unused,
|
|
};
|
|
|
|
/// Interface function for FrameworkRouter
|
|
pub fn getFileIdForRouter(dev: *DevServer, abs_path: []const u8, associated_route: Route.Index, file_kind: Route.FileKind) !OpaqueFileId {
|
|
const index = try dev.server_graph.insertStaleExtra(abs_path, false, true);
|
|
try dev.route_lookup.put(dev.allocator, index, .{
|
|
.route_index = associated_route,
|
|
.should_recurse_when_visiting = file_kind == .layout,
|
|
});
|
|
return toOpaqueFileId(.server, index);
|
|
}
|
|
|
|
fn toOpaqueFileId(comptime side: bake.Side, index: IncrementalGraph(side).FileIndex) OpaqueFileId {
|
|
if (Environment.allow_assert) {
|
|
return OpaqueFileId.init(@bitCast(SafeFileId{
|
|
.side = side,
|
|
.index = index.get(),
|
|
}));
|
|
}
|
|
|
|
return OpaqueFileId.init(index.get());
|
|
}
|
|
|
|
fn fromOpaqueFileId(comptime side: bake.Side, id: OpaqueFileId) IncrementalGraph(side).FileIndex {
|
|
if (Environment.allow_assert) {
|
|
const safe: SafeFileId = @bitCast(id.get());
|
|
assert(side == safe.side);
|
|
return IncrementalGraph(side).FileIndex.init(safe.index);
|
|
}
|
|
return IncrementalGraph(side).FileIndex.init(@intCast(id.get()));
|
|
}
|
|
|
|
fn relativePath(dev: *const DevServer, path: []const u8) []const u8 {
|
|
// TODO: windows slash normalization
|
|
bun.assert(dev.root[dev.root.len - 1] != '/');
|
|
if (path.len >= dev.root.len + 1 and
|
|
path[dev.root.len] == '/' and
|
|
bun.strings.startsWith(path, dev.root))
|
|
{
|
|
return path[dev.root.len + 1 ..];
|
|
}
|
|
return bun.path.relative(dev.root, path);
|
|
}
|
|
|
|
fn dumpStateDueToCrash(dev: *DevServer) !void {
|
|
comptime assert(bun.FeatureFlags.bake_debugging_features);
|
|
|
|
// being conservative about how much stuff is put on the stack.
|
|
var filepath_buf: [@min(4096, bun.MAX_PATH_BYTES)]u8 = undefined;
|
|
const filepath = std.fmt.bufPrintZ(&filepath_buf, "incremental-graph-crash-dump.{d}.html", .{std.time.timestamp()}) catch "incremental-graph-crash-dump.html";
|
|
const file = std.fs.cwd().createFileZ(filepath, .{}) catch |err| {
|
|
bun.handleErrorReturnTrace(err, @errorReturnTrace());
|
|
Output.warn("Could not open directory for dumping sources: {}", .{err});
|
|
return;
|
|
};
|
|
defer file.close();
|
|
|
|
const start, const end = comptime brk: {
|
|
const visualizer = @embedFile("incremental_visualizer.html");
|
|
const i = (std.mem.indexOf(u8, visualizer, "<script>") orelse unreachable) + "<script>".len;
|
|
break :brk .{ visualizer[0..i], visualizer[i..] };
|
|
};
|
|
try file.writeAll(start);
|
|
try file.writeAll("\nlet inlinedData = Uint8Array.from(atob(\"");
|
|
|
|
var sfb = std.heap.stackFallback(4096, bun.default_allocator);
|
|
var payload = try std.ArrayList(u8).initCapacity(sfb.get(), 4096);
|
|
defer payload.deinit();
|
|
try dev.writeVisualizerMessage(&payload);
|
|
|
|
var buf: [bun.base64.encodeLenFromSize(4096)]u8 = undefined;
|
|
var it = std.mem.window(u8, payload.items, 4096, 4096);
|
|
while (it.next()) |chunk| {
|
|
try file.writeAll(buf[0..bun.base64.encode(&buf, chunk)]);
|
|
}
|
|
|
|
try file.writeAll("\"), c => c.charCodeAt(0));\n");
|
|
try file.writeAll(end);
|
|
|
|
Output.note("Dumped incremental bundler graph to {}", .{bun.fmt.quote(filepath)});
|
|
}
|
|
|
|
// const RouteIndexAndRecurseFlag = packed struct(u32) {
|
|
const RouteIndexAndRecurseFlag = struct {
|
|
route_index: Route.Index,
|
|
/// Set true for layout
|
|
should_recurse_when_visiting: bool,
|
|
};
|
|
|
|
const std = @import("std");
|
|
const Allocator = std.mem.Allocator;
|
|
const Mutex = std.Thread.Mutex;
|
|
const ArrayListUnmanaged = std.ArrayListUnmanaged;
|
|
const AutoArrayHashMapUnmanaged = std.AutoArrayHashMapUnmanaged;
|
|
|
|
const bun = @import("root").bun;
|
|
const Environment = bun.Environment;
|
|
const assert = bun.assert;
|
|
const DynamicBitSetUnmanaged = bun.bit_set.DynamicBitSetUnmanaged;
|
|
|
|
const bake = bun.bake;
|
|
const FrameworkRouter = bake.FrameworkRouter;
|
|
const Route = FrameworkRouter.Route;
|
|
const OpaqueFileId = FrameworkRouter.OpaqueFileId;
|
|
|
|
const Log = bun.logger.Log;
|
|
const Output = bun.Output;
|
|
|
|
const Bundler = bun.bundler.Bundler;
|
|
const BundleV2 = bun.bundle_v2.BundleV2;
|
|
const BakeEntryPoint = bun.bundle_v2.BakeEntryPoint;
|
|
|
|
const Define = bun.options.Define;
|
|
const OutputFile = bun.options.OutputFile;
|
|
|
|
const uws = bun.uws;
|
|
const App = uws.NewApp(false);
|
|
const AnyWebSocket = uws.AnyWebSocket;
|
|
const Request = uws.Request;
|
|
const Response = App.Response;
|
|
|
|
const MimeType = bun.http.MimeType;
|
|
|
|
const JSC = bun.JSC;
|
|
const Watcher = bun.JSC.Watcher;
|
|
const JSValue = JSC.JSValue;
|
|
const VirtualMachine = JSC.VirtualMachine;
|
|
const JSModuleLoader = JSC.JSModuleLoader;
|
|
const EventLoopHandle = JSC.EventLoopHandle;
|
|
const JSInternalPromise = JSC.JSInternalPromise;
|
|
|
|
const ThreadlocalArena = @import("../mimalloc_arena.zig").Arena;
|