mirror of
https://github.com/oven-sh/bun
synced 2026-02-09 10:28:47 +00:00
7657 lines
318 KiB
Zig
7657 lines
318 KiB
Zig
//! Instance of the development server. Attaches to an instance of `Bun.serve`,
|
|
//! controlling bundler, routing, and hot module reloading.
|
|
//!
|
|
//! Reprocessing files that did not change is banned; by having perfect
|
|
//! incremental tracking over the project, editing a file's contents (asides
|
|
//! adjusting imports) must always rebundle only that one file.
|
|
//!
|
|
//! All work is held in-memory, using manually managed data-oriented design.
|
|
//! For questions about DevServer, please consult the delusional @paperclover
|
|
pub const DevServer = @This();
|
|
pub const debug = bun.Output.Scoped(.DevServer, false);
|
|
pub const igLog = bun.Output.scoped(.IncrementalGraph, false);
|
|
const DebugHTTPServer = @import("../bun.js/api/server.zig").DebugHTTPServer;
|
|
|
|
pub const Options = struct {
|
|
/// Arena must live until DevServer.deinit()
|
|
arena: Allocator,
|
|
root: [:0]const u8,
|
|
vm: *VirtualMachine,
|
|
framework: bake.Framework,
|
|
bundler_options: bake.SplitBundlerOptions,
|
|
|
|
// Debugging features
|
|
dump_sources: ?[]const u8 = if (Environment.isDebug) ".bake-debug" else null,
|
|
dump_state_on_crash: ?bool = null,
|
|
};
|
|
|
|
// The fields `client_graph`, `server_graph`, `directory_watchers`, and `assets`
|
|
// all use `@fieldParentPointer` to access DevServer's state. This pattern has
|
|
// made it easier to group related fields together, but one must remember those
|
|
// structures still depend on the DevServer pointer.
|
|
|
|
/// Used for all server-wide allocations. In debug, is is backed by a scope. Thread-safe.
|
|
allocator: Allocator,
|
|
/// All methods are no-op in release builds.
|
|
allocation_scope: AllocationScope,
|
|
/// Absolute path to project root directory. For the HMR
|
|
/// runtime, its module IDs are strings relative to this.
|
|
root: []const u8,
|
|
/// Hex string generated by hashing the framework config and bun revision.
|
|
/// Emebedding in client bundles and sent when the HMR Socket is opened;
|
|
/// When the value mismatches the page is forcibly reloaded.
|
|
configuration_hash_key: [16]u8,
|
|
/// The virtual machine (global object) to execute code in.
|
|
vm: *VirtualMachine,
|
|
/// May be `null` if not attached to an HTTP server yet. When no server is
|
|
/// available, functions taking in requests and responses are unavailable.
|
|
/// However, a lot of testing in this mode is missing, so it may hit assertions.
|
|
server: ?bun.JSC.API.AnyServer,
|
|
/// Contains the tree of routes. This structure contains FileIndex
|
|
router: FrameworkRouter,
|
|
/// Every navigatable route has bundling state here.
|
|
route_bundles: ArrayListUnmanaged(RouteBundle),
|
|
/// All access into IncrementalGraph is guarded by a DebugThreadLock. This is
|
|
/// only a debug assertion as contention to this is always a bug; If a bundle is
|
|
/// active and a file is changed, that change is placed into the next bundle.
|
|
graph_safety_lock: bun.DebugThreadLock,
|
|
client_graph: IncrementalGraph(.client),
|
|
server_graph: IncrementalGraph(.server),
|
|
/// State populated during bundling and hot updates. Often cleared
|
|
incremental_result: IncrementalResult,
|
|
/// Quickly retrieve a framework route's index from its entry point file. These
|
|
/// are populated as the routes are discovered. The route may not be bundled OR
|
|
/// navigatable, such as the case where a layout's index is looked up.
|
|
route_lookup: AutoArrayHashMapUnmanaged(IncrementalGraph(.server).FileIndex, RouteIndexAndRecurseFlag),
|
|
/// This acts as a duplicate of the lookup table in uws, but only for HTML routes
|
|
/// Used to identify what route a connected WebSocket is on, so that only
|
|
/// the active pages are notified of a hot updates.
|
|
html_router: HTMLRouter,
|
|
/// Assets are accessible via `/_bun/asset/<key>`
|
|
/// This store is not thread safe.
|
|
assets: Assets,
|
|
/// Similar to `assets`, specialized for the additional needs of source mappings.
|
|
source_maps: SourceMapStore,
|
|
// /// Allocations that require a reference count.
|
|
// ref_strings: RefString.Store,
|
|
/// All bundling failures are stored until a file is saved and rebuilt.
|
|
/// They are stored in the wire format the HMR runtime expects so that
|
|
/// serialization only happens once.
|
|
bundling_failures: std.ArrayHashMapUnmanaged(
|
|
SerializedFailure,
|
|
void,
|
|
SerializedFailure.ArrayHashContextViaOwner,
|
|
false,
|
|
) = .{},
|
|
/// When set, nothing is ever bundled for the server-side,
|
|
/// and DevSever acts purely as a frontend bundler.
|
|
frontend_only: bool,
|
|
/// The Plugin API is missing a way to attach filesystem watchers (addWatchFile)
|
|
/// This special case makes `bun-plugin-tailwind` work, which is a requirement
|
|
/// to ship initial incremental bundling support for HTML files.
|
|
has_tailwind_plugin_hack: ?bun.StringArrayHashMapUnmanaged(void) = null,
|
|
|
|
// These values are handles to the functions in `hmr-runtime-server.ts`.
|
|
// For type definitions, see `./bake.private.d.ts`
|
|
server_fetch_function_callback: JSC.Strong,
|
|
server_register_update_callback: JSC.Strong,
|
|
|
|
// Watching
|
|
bun_watcher: *bun.Watcher,
|
|
directory_watchers: DirectoryWatchStore,
|
|
watcher_atomics: WatcherAtomics,
|
|
testing_batch_events: union(enum) {
|
|
disabled,
|
|
enable_after_bundle,
|
|
enabled: TestingBatch,
|
|
},
|
|
|
|
/// Number of bundles that have been executed. This is currently not read, but
|
|
/// will be used later to determine when to invoke graph garbage collection.
|
|
generation: usize = 0,
|
|
/// Displayed in the HMR success indicator
|
|
bundles_since_last_error: usize = 0,
|
|
|
|
framework: bake.Framework,
|
|
bundler_options: bake.SplitBundlerOptions,
|
|
// Each logical graph gets its own bundler configuration
|
|
server_transpiler: Transpiler,
|
|
client_transpiler: Transpiler,
|
|
ssr_transpiler: Transpiler,
|
|
/// The log used by all `server_transpiler`, `client_transpiler` and `ssr_transpiler`.
|
|
/// Note that it is rarely correct to write messages into it. Instead, associate
|
|
/// messages with the IncrementalGraph file or Route using `SerializedFailure`
|
|
log: Log,
|
|
plugin_state: enum {
|
|
/// Should ask server for plugins. Once plugins are loaded, the plugin
|
|
/// pointer is written into `server_transpiler.options.plugin`
|
|
unknown,
|
|
// These two states mean that `server.getOrLoadPlugins()` was called.
|
|
pending,
|
|
loaded,
|
|
/// Currently, this represents a degraded state where no bundle can
|
|
/// be correctly executed because the plugins did not load successfully.
|
|
err,
|
|
},
|
|
/// There is only ever one bundle executing at the same time, since all bundles
|
|
/// inevitably share state. This bundle is asynchronous, storing its state here
|
|
/// while in-flight. All allocations held by `.bv2.graph.heap`'s arena
|
|
current_bundle: ?struct {
|
|
bv2: *BundleV2,
|
|
/// Information BundleV2 needs to finalize the bundle
|
|
start_data: bun.bundle_v2.DevServerInput,
|
|
/// Started when the bundle was queued
|
|
timer: std.time.Timer,
|
|
/// If any files in this bundle were due to hot-reloading, some extra work
|
|
/// must be done to inform clients to reload routes. When this is false,
|
|
/// all entry points do not have bundles yet.
|
|
had_reload_event: bool,
|
|
/// After a bundle finishes, these requests will be continued, either
|
|
/// calling their handler on success or sending the error page on failure.
|
|
/// Owned by `deferred_request_pool` in DevServer.
|
|
requests: DeferredRequest.List,
|
|
/// Resolution failures are grouped by incremental graph file index.
|
|
/// Unlike parse failures (`handleParseTaskFailure`), the resolution
|
|
/// failures can be created asynchronously, and out of order.
|
|
resolution_failure_entries: AutoArrayHashMapUnmanaged(SerializedFailure.Owner.Packed, bun.logger.Log),
|
|
},
|
|
/// When `current_bundle` is non-null and new requests to bundle come in,
|
|
/// those are temporaried here. When the current bundle is finished, it
|
|
/// will immediately enqueue this.
|
|
next_bundle: struct {
|
|
/// A list of `RouteBundle`s which have active requests to bundle it.
|
|
route_queue: AutoArrayHashMapUnmanaged(RouteBundle.Index, void),
|
|
/// If a reload event exists and should be drained. The information
|
|
/// for this watch event is in one of the `watch_events`
|
|
reload_event: ?*HotReloadEvent,
|
|
/// The list of requests that are blocked on this bundle.
|
|
requests: DeferredRequest.List,
|
|
},
|
|
deferred_request_pool: bun.HiveArray(DeferredRequest.Node, DeferredRequest.max_preallocated).Fallback,
|
|
/// UWS can handle closing the websocket connections themselves
|
|
active_websocket_connections: std.AutoHashMapUnmanaged(*HmrSocket, void),
|
|
|
|
relative_path_buf_lock: bun.DebugThreadLock,
|
|
relative_path_buf: bun.PathBuffer,
|
|
|
|
// Debugging
|
|
|
|
dump_dir: if (bun.FeatureFlags.bake_debugging_features) ?std.fs.Dir else void,
|
|
/// Reference count to number of active sockets with the visualizer enabled.
|
|
emit_visualizer_events: u32,
|
|
has_pre_crash_handler: bool,
|
|
/// Perfect incremental bundling implies that there are zero bugs in the
|
|
/// code that bundles, watches, and rebuilds routes and client side code.
|
|
///
|
|
/// More specifically, when this is false, DevServer will run a full rebundle
|
|
/// when a user force-refreshes an error page. In a perfect system, a rebundle
|
|
/// could not possibly fix the build. But since builds are NOT perfect, this
|
|
/// could very well be the case.
|
|
///
|
|
/// DISABLED in releases, ENABLED in debug.
|
|
/// Can be enabled with env var `BUN_ASSUME_PERFECT_INCREMENTAL=1`
|
|
assume_perfect_incremental_bundling: bool = false,
|
|
|
|
pub const internal_prefix = "/_bun";
|
|
/// Assets which are routed to the `Assets` storage.
|
|
pub const asset_prefix = internal_prefix ++ "/asset";
|
|
/// Client scripts are available at `/_bun/client/{name}-{rbi}{generation}.js`
|
|
/// where:
|
|
/// - `name` is the display name of the route, such as "index" or
|
|
/// "about". It is ignored when routing.
|
|
/// - `rbi` is the route bundle index, in padded hex (e.g. `00000001`)
|
|
/// - `generation` which is initialized to a random value. This value is
|
|
/// re-randomized whenever `client_bundle` is invalidated.
|
|
///
|
|
/// Example: `/_bun/client/index-00000000f209a20e.js`
|
|
pub const client_prefix = internal_prefix ++ "/client";
|
|
|
|
pub const RouteBundle = struct {
|
|
pub const Index = bun.GenericIndex(u30, RouteBundle);
|
|
|
|
server_state: State,
|
|
/// There are two distinct types of route bundles.
|
|
data: union(enum) {
|
|
/// FrameworkRouter provided route
|
|
framework: Framework,
|
|
/// HTMLBundle provided route
|
|
html: HTML,
|
|
},
|
|
/// Generated lazily when the client JS is requested.
|
|
/// Invalidated when a downstream client module updates.
|
|
client_bundle: ?*StaticRoute,
|
|
|
|
/// If the client tries to load a script with the wrong generation, it will
|
|
/// receive a bundle that instantly reloads the page, implying a bundle
|
|
/// change has occurred while fetching the script.
|
|
client_script_generation: u32,
|
|
|
|
/// Reference count of how many HmrSockets say they are on this route. This
|
|
/// allows hot-reloading events to reduce the amount of times it traces the
|
|
/// graph.
|
|
active_viewers: u32,
|
|
|
|
pub const Framework = struct {
|
|
route_index: Route.Index,
|
|
|
|
/// Cached to avoid re-creating the array every request.
|
|
/// TODO: Invalidated when a layout is added or removed from this route.
|
|
cached_module_list: JSC.Strong,
|
|
/// Cached to avoid re-creating the string every request.
|
|
/// TODO: Invalidated when any client file associated with the route is updated.
|
|
cached_client_bundle_url: JSC.Strong,
|
|
/// Cached to avoid re-creating the array every request.
|
|
/// Invalidated when the list of CSS files changes.
|
|
cached_css_file_array: JSC.Strong,
|
|
|
|
/// When state == .evaluation_failure, this is populated with the route
|
|
/// evaluation error mirrored in the dev server hash map
|
|
evaluate_failure: ?SerializedFailure,
|
|
};
|
|
|
|
pub const HTML = struct {
|
|
/// DevServer increments the ref count of this bundle
|
|
html_bundle: *HTMLBundle.HTMLBundleRoute,
|
|
bundled_file: IncrementalGraph(.client).FileIndex,
|
|
/// Invalidated when the HTML file is modified, but not it's imports.
|
|
/// The style tag is injected here.
|
|
script_injection_offset: ByteOffset.Optional,
|
|
/// The HTML file bundled, from the bundler.
|
|
bundled_html_text: ?[]const u8,
|
|
/// Derived from `bundled_html_text` + `client_script_generation`
|
|
/// and css information. Invalidated when:
|
|
/// - The HTML file itself modified.
|
|
/// - The list of CSS files changes.
|
|
/// - Any downstream file is rebundled.
|
|
cached_response: ?*StaticRoute,
|
|
|
|
const ByteOffset = bun.GenericIndex(u32, u8);
|
|
};
|
|
|
|
/// A union is not used so that `bundler_failure_logs` can re-use memory, as
|
|
/// this state frequently changes between `loaded` and the failure variants.
|
|
pub const State = enum {
|
|
/// In development mode, routes are lazily built. This state implies a
|
|
/// build of this route has never been run. It is possible to bundle the
|
|
/// route entry point and still have an unqueued route if another route
|
|
/// imports this one. This state is implied if `FrameworkRouter.Route`
|
|
/// has no bundle index assigned.
|
|
unqueued,
|
|
/// A bundle associated with this route is happening
|
|
bundling,
|
|
/// This route was flagged for bundling failures. There are edge cases
|
|
/// where a route can be disconnected from its failures, so the route
|
|
/// imports has to be traced to discover if possible failures still
|
|
/// exist.
|
|
possible_bundling_failures,
|
|
/// Loading the module at runtime had a failure. The error can be
|
|
/// cleared by editing any file in the same hot-reloading boundary.
|
|
evaluation_failure,
|
|
/// Calling the request function may error, but that error will not be
|
|
/// at fault of bundling, nor would re-bundling change anything.
|
|
loaded,
|
|
};
|
|
|
|
pub const UnresolvedIndex = union(enum) {
|
|
/// FrameworkRouter provides a fullstack server-side route
|
|
framework: FrameworkRouter.Route.Index,
|
|
/// HTMLBundle provides a frontend-only route, SPA-style
|
|
html: *HTMLBundle.HTMLBundleRoute,
|
|
};
|
|
|
|
pub fn deinit(rb: *RouteBundle, allocator: Allocator) void {
|
|
if (rb.client_bundle) |blob| blob.deref();
|
|
switch (rb.data) {
|
|
.framework => |*fw| {
|
|
fw.cached_client_bundle_url.deinit();
|
|
fw.cached_css_file_array.deinit();
|
|
fw.cached_module_list.deinit();
|
|
},
|
|
.html => |*html| {
|
|
if (html.bundled_html_text) |text| {
|
|
allocator.free(text);
|
|
}
|
|
if (html.cached_response) |cached_response| {
|
|
cached_response.deref();
|
|
}
|
|
html.html_bundle.deref();
|
|
},
|
|
}
|
|
}
|
|
|
|
pub fn invalidateClientBundle(self: *RouteBundle) void {
|
|
if (self.client_bundle) |bundle| {
|
|
bundle.deref();
|
|
self.client_bundle = null;
|
|
}
|
|
self.client_script_generation = std.crypto.random.int(u32);
|
|
switch (self.data) {
|
|
.framework => |*fw| fw.cached_client_bundle_url.clearWithoutDeallocation(),
|
|
.html => |*html| if (html.cached_response) |cached_response| {
|
|
cached_response.deref();
|
|
html.cached_response = null;
|
|
},
|
|
}
|
|
}
|
|
|
|
pub fn memoryCost(self: *const RouteBundle) usize {
|
|
var cost: usize = @sizeOf(RouteBundle);
|
|
if (self.client_bundle) |bundle| cost += bundle.memoryCost();
|
|
switch (self.data) {
|
|
.framework => {
|
|
// the JSC.Strong children do not support memoryCost. likely not needed
|
|
// .evaluate_failure is not owned
|
|
},
|
|
.html => |*html| {
|
|
if (html.bundled_html_text) |text| cost += text.len;
|
|
if (html.cached_response) |cached_response| cost += cached_response.memoryCost();
|
|
},
|
|
}
|
|
return cost;
|
|
}
|
|
};
|
|
|
|
/// DevServer is stored on the heap, storing its allocator.
|
|
pub fn init(options: Options) bun.JSOOM!*DevServer {
|
|
const unchecked_allocator = bun.default_allocator;
|
|
bun.analytics.Features.dev_server +|= 1;
|
|
|
|
var dump_dir = if (bun.FeatureFlags.bake_debugging_features)
|
|
if (options.dump_sources) |dir|
|
|
std.fs.cwd().makeOpenPath(dir, .{}) catch |err| dir: {
|
|
bun.handleErrorReturnTrace(err, @errorReturnTrace());
|
|
Output.warn("Could not open directory for dumping sources: {}", .{err});
|
|
break :dir null;
|
|
}
|
|
else
|
|
null;
|
|
errdefer if (bun.FeatureFlags.bake_debugging_features) if (dump_dir) |*dir| dir.close();
|
|
|
|
const separate_ssr_graph = if (options.framework.server_components) |sc| sc.separate_ssr_graph else false;
|
|
|
|
const dev = bun.new(DevServer, .{
|
|
.allocator = undefined,
|
|
// 'init' is a no-op in release
|
|
.allocation_scope = AllocationScope.init(unchecked_allocator),
|
|
|
|
.root = options.root,
|
|
.vm = options.vm,
|
|
.server = null,
|
|
.directory_watchers = .empty,
|
|
.server_fetch_function_callback = .empty,
|
|
.server_register_update_callback = .empty,
|
|
.generation = 0,
|
|
.graph_safety_lock = .unlocked,
|
|
.dump_dir = dump_dir,
|
|
.framework = options.framework,
|
|
.bundler_options = options.bundler_options,
|
|
.emit_visualizer_events = 0,
|
|
.has_pre_crash_handler = bun.FeatureFlags.bake_debugging_features and
|
|
options.dump_state_on_crash orelse
|
|
bun.getRuntimeFeatureFlag("BUN_DUMP_STATE_ON_CRASH"),
|
|
.frontend_only = options.framework.file_system_router_types.len == 0,
|
|
.client_graph = .empty,
|
|
.server_graph = .empty,
|
|
.incremental_result = .empty,
|
|
.route_lookup = .empty,
|
|
.route_bundles = .empty,
|
|
.html_router = .empty,
|
|
.active_websocket_connections = .empty,
|
|
.current_bundle = null,
|
|
.next_bundle = .{
|
|
.route_queue = .empty,
|
|
.reload_event = null,
|
|
.requests = .{},
|
|
},
|
|
.assets = .{
|
|
.path_map = .empty,
|
|
.files = .empty,
|
|
.refs = .empty,
|
|
},
|
|
.source_maps = .empty,
|
|
.plugin_state = .unknown,
|
|
.bundling_failures = .{},
|
|
.assume_perfect_incremental_bundling = if (bun.Environment.isDebug)
|
|
if (bun.getenvZ("BUN_ASSUME_PERFECT_INCREMENTAL")) |env|
|
|
!bun.strings.eqlComptime(env, "0")
|
|
else
|
|
true
|
|
else
|
|
bun.getRuntimeFeatureFlag("BUN_ASSUME_PERFECT_INCREMENTAL"),
|
|
.relative_path_buf_lock = .unlocked,
|
|
.testing_batch_events = .disabled,
|
|
|
|
.server_transpiler = undefined,
|
|
.client_transpiler = undefined,
|
|
.ssr_transpiler = undefined,
|
|
.bun_watcher = undefined,
|
|
.configuration_hash_key = undefined,
|
|
.router = undefined,
|
|
.watcher_atomics = undefined,
|
|
.log = undefined,
|
|
.deferred_request_pool = undefined,
|
|
.relative_path_buf = undefined,
|
|
});
|
|
errdefer bun.destroy(dev);
|
|
const allocator = dev.allocation_scope.allocator();
|
|
dev.allocator = allocator;
|
|
dev.log = .init(allocator);
|
|
dev.deferred_request_pool = .init(allocator);
|
|
|
|
const global = dev.vm.global;
|
|
|
|
assert(dev.server_graph.owner() == dev);
|
|
assert(dev.client_graph.owner() == dev);
|
|
assert(dev.directory_watchers.owner() == dev);
|
|
|
|
dev.graph_safety_lock.lock();
|
|
defer dev.graph_safety_lock.unlock();
|
|
|
|
const generic_action = "while initializing development server";
|
|
const fs = bun.fs.FileSystem.init(options.root) catch |err|
|
|
return global.throwError(err, generic_action);
|
|
|
|
dev.bun_watcher = Watcher.init(DevServer, dev, fs, bun.default_allocator) catch |err|
|
|
return global.throwError(err, "while initializing file watcher for development server");
|
|
|
|
errdefer dev.bun_watcher.deinit(false);
|
|
dev.bun_watcher.start() catch |err|
|
|
return global.throwError(err, "while initializing file watcher thread for development server");
|
|
|
|
dev.watcher_atomics = WatcherAtomics.init(dev);
|
|
|
|
// This causes a memory leak, but the allocator is otherwise used on multiple threads.
|
|
const transpiler_allocator = bun.default_allocator;
|
|
|
|
dev.framework.initTranspiler(transpiler_allocator, &dev.log, .development, .server, &dev.server_transpiler, &dev.bundler_options.server) catch |err|
|
|
return global.throwError(err, generic_action);
|
|
dev.server_transpiler.options.dev_server = dev;
|
|
dev.framework.initTranspiler(
|
|
transpiler_allocator,
|
|
&dev.log,
|
|
.development,
|
|
.client,
|
|
&dev.client_transpiler,
|
|
&dev.bundler_options.client,
|
|
) catch |err|
|
|
return global.throwError(err, generic_action);
|
|
dev.client_transpiler.options.dev_server = dev;
|
|
|
|
dev.server_transpiler.resolver.watcher = dev.bun_watcher.getResolveWatcher();
|
|
dev.client_transpiler.resolver.watcher = dev.bun_watcher.getResolveWatcher();
|
|
|
|
if (separate_ssr_graph) {
|
|
dev.framework.initTranspiler(transpiler_allocator, &dev.log, .development, .ssr, &dev.ssr_transpiler, &dev.bundler_options.ssr) catch |err|
|
|
return global.throwError(err, generic_action);
|
|
dev.ssr_transpiler.options.dev_server = dev;
|
|
dev.ssr_transpiler.resolver.watcher = dev.bun_watcher.getResolveWatcher();
|
|
}
|
|
|
|
assert(dev.server_transpiler.resolver.opts.target != .browser);
|
|
assert(dev.client_transpiler.resolver.opts.target == .browser);
|
|
|
|
dev.framework = dev.framework.resolve(&dev.server_transpiler.resolver, &dev.client_transpiler.resolver, options.arena) catch {
|
|
if (dev.framework.is_built_in_react)
|
|
try bake.Framework.addReactInstallCommandNote(&dev.log);
|
|
return global.throwValue(dev.log.toJSAggregateError(global, bun.String.static("Framework is missing required files!")));
|
|
};
|
|
|
|
errdefer dev.route_lookup.clearAndFree(allocator);
|
|
errdefer dev.client_graph.deinit(allocator);
|
|
errdefer dev.server_graph.deinit(allocator);
|
|
|
|
dev.configuration_hash_key = hash_key: {
|
|
var hash = std.hash.Wyhash.init(128);
|
|
|
|
if (bun.Environment.isDebug) {
|
|
const stat = bun.sys.stat(bun.selfExePath() catch |e|
|
|
Output.panic("unhandled {}", .{e})).unwrap() catch |e|
|
|
Output.panic("unhandled {}", .{e});
|
|
bun.writeAnyToHasher(&hash, stat.mtime());
|
|
hash.update(bake.getHmrRuntime(.client).code);
|
|
hash.update(bake.getHmrRuntime(.server).code);
|
|
} else {
|
|
hash.update(bun.Environment.git_sha_short);
|
|
}
|
|
|
|
for (dev.framework.file_system_router_types) |fsr| {
|
|
bun.writeAnyToHasher(&hash, fsr.allow_layouts);
|
|
bun.writeAnyToHasher(&hash, fsr.ignore_underscores);
|
|
hash.update(fsr.entry_server);
|
|
hash.update(&.{0});
|
|
hash.update(fsr.entry_client orelse "");
|
|
hash.update(&.{0});
|
|
hash.update(fsr.prefix);
|
|
hash.update(&.{0});
|
|
hash.update(fsr.root);
|
|
hash.update(&.{0});
|
|
for (fsr.extensions) |ext| {
|
|
hash.update(ext);
|
|
hash.update(&.{0});
|
|
}
|
|
hash.update(&.{0});
|
|
for (fsr.ignore_dirs) |dir| {
|
|
hash.update(dir);
|
|
hash.update(&.{0});
|
|
}
|
|
hash.update(&.{0});
|
|
}
|
|
|
|
if (dev.framework.server_components) |sc| {
|
|
bun.writeAnyToHasher(&hash, true);
|
|
bun.writeAnyToHasher(&hash, sc.separate_ssr_graph);
|
|
hash.update(sc.client_register_server_reference);
|
|
hash.update(&.{0});
|
|
hash.update(sc.server_register_client_reference);
|
|
hash.update(&.{0});
|
|
hash.update(sc.server_register_server_reference);
|
|
hash.update(&.{0});
|
|
hash.update(sc.server_runtime_import);
|
|
hash.update(&.{0});
|
|
} else {
|
|
bun.writeAnyToHasher(&hash, false);
|
|
}
|
|
|
|
if (dev.framework.react_fast_refresh) |rfr| {
|
|
bun.writeAnyToHasher(&hash, true);
|
|
hash.update(rfr.import_source);
|
|
} else {
|
|
bun.writeAnyToHasher(&hash, false);
|
|
}
|
|
|
|
for (dev.framework.built_in_modules.keys(), dev.framework.built_in_modules.values()) |k, v| {
|
|
hash.update(k);
|
|
hash.update(&.{0});
|
|
bun.writeAnyToHasher(&hash, std.meta.activeTag(v));
|
|
hash.update(switch (v) {
|
|
inline else => |data| data,
|
|
});
|
|
hash.update(&.{0});
|
|
}
|
|
hash.update(&.{0});
|
|
|
|
break :hash_key std.fmt.bytesToHex(std.mem.asBytes(&hash.final()), .lower);
|
|
};
|
|
|
|
// Add react fast refresh if needed. This is the first file on the client side,
|
|
// as it will be referred to by index.
|
|
if (dev.framework.react_fast_refresh) |rfr| {
|
|
assert(try dev.client_graph.insertStale(rfr.import_source, false) == IncrementalGraph(.client).react_refresh_index);
|
|
}
|
|
|
|
if (!dev.frontend_only) {
|
|
dev.initServerRuntime();
|
|
}
|
|
|
|
// Initialize FrameworkRouter
|
|
dev.router = router: {
|
|
var types = try std.ArrayListUnmanaged(FrameworkRouter.Type).initCapacity(allocator, options.framework.file_system_router_types.len);
|
|
errdefer types.deinit(allocator);
|
|
|
|
for (options.framework.file_system_router_types, 0..) |fsr, i| {
|
|
const buf = bun.PathBufferPool.get();
|
|
defer bun.PathBufferPool.put(buf);
|
|
const joined_root = bun.path.joinAbsStringBuf(dev.root, buf, &.{fsr.root}, .auto);
|
|
const entry = dev.server_transpiler.resolver.readDirInfoIgnoreError(joined_root) orelse
|
|
continue;
|
|
|
|
const server_file = try dev.server_graph.insertStaleExtra(fsr.entry_server, false, true);
|
|
|
|
try types.append(allocator, .{
|
|
.abs_root = bun.strings.withoutTrailingSlash(entry.abs_path),
|
|
.prefix = fsr.prefix,
|
|
.ignore_underscores = fsr.ignore_underscores,
|
|
.ignore_dirs = fsr.ignore_dirs,
|
|
.extensions = fsr.extensions,
|
|
.style = fsr.style,
|
|
.allow_layouts = fsr.allow_layouts,
|
|
.server_file = toOpaqueFileId(.server, server_file),
|
|
.client_file = if (fsr.entry_client) |client|
|
|
toOpaqueFileId(.client, try dev.client_graph.insertStale(client, false)).toOptional()
|
|
else
|
|
.none,
|
|
.server_file_string = .empty,
|
|
});
|
|
|
|
try dev.route_lookup.put(allocator, server_file, .{
|
|
.route_index = FrameworkRouter.Route.Index.init(@intCast(i)),
|
|
.should_recurse_when_visiting = true,
|
|
});
|
|
}
|
|
|
|
break :router try FrameworkRouter.initEmpty(dev.root, types.items, allocator);
|
|
};
|
|
|
|
// TODO: move scanning to be one tick after server startup. this way the
|
|
// line saying the server is ready shows quicker, and route errors show up
|
|
// after that line.
|
|
try dev.scanInitialRoutes();
|
|
|
|
if (bun.FeatureFlags.bake_debugging_features and dev.has_pre_crash_handler)
|
|
try bun.crash_handler.appendPreCrashHandler(DevServer, dev, dumpStateDueToCrash);
|
|
|
|
return dev;
|
|
}
|
|
|
|
pub fn deinit(dev: *DevServer) void {
|
|
dev_server_deinit_count_for_testing +|= 1;
|
|
|
|
const allocator = dev.allocator;
|
|
const discard = voidFieldTypeDiscardHelper;
|
|
_ = VoidFieldTypes(DevServer){
|
|
.root = {},
|
|
.allocator = {},
|
|
.allocation_scope = {}, // deinit at end
|
|
.configuration_hash_key = {},
|
|
.plugin_state = {},
|
|
.generation = {},
|
|
.bundles_since_last_error = {},
|
|
.emit_visualizer_events = {},
|
|
.deferred_request_pool = {},
|
|
.frontend_only = {},
|
|
.vm = {},
|
|
.server = {},
|
|
.server_transpiler = {},
|
|
.client_transpiler = {},
|
|
.ssr_transpiler = {},
|
|
.framework = {},
|
|
.bundler_options = {},
|
|
.assume_perfect_incremental_bundling = {},
|
|
.relative_path_buf = {},
|
|
.relative_path_buf_lock = {},
|
|
|
|
.graph_safety_lock = dev.graph_safety_lock.lock(),
|
|
.bun_watcher = dev.bun_watcher.deinit(true),
|
|
.dump_dir = if (bun.FeatureFlags.bake_debugging_features) if (dev.dump_dir) |*dir| dir.close(),
|
|
.log = dev.log.deinit(),
|
|
.server_fetch_function_callback = dev.server_fetch_function_callback.deinit(),
|
|
.server_register_update_callback = dev.server_register_update_callback.deinit(),
|
|
.has_pre_crash_handler = if (dev.has_pre_crash_handler)
|
|
bun.crash_handler.removePreCrashHandler(dev),
|
|
.router = {
|
|
dev.router.deinit(allocator);
|
|
},
|
|
.route_bundles = {
|
|
for (dev.route_bundles.items) |*rb| {
|
|
rb.deinit(allocator);
|
|
}
|
|
dev.route_bundles.deinit(allocator);
|
|
},
|
|
.server_graph = dev.server_graph.deinit(allocator),
|
|
.client_graph = dev.client_graph.deinit(allocator),
|
|
.assets = dev.assets.deinit(allocator),
|
|
.incremental_result = discard(VoidFieldTypes(IncrementalResult){
|
|
.had_adjusted_edges = {},
|
|
.client_components_added = dev.incremental_result.client_components_added.deinit(allocator),
|
|
.framework_routes_affected = dev.incremental_result.framework_routes_affected.deinit(allocator),
|
|
.client_components_removed = dev.incremental_result.client_components_removed.deinit(allocator),
|
|
.failures_removed = dev.incremental_result.failures_removed.deinit(allocator),
|
|
.client_components_affected = dev.incremental_result.client_components_affected.deinit(allocator),
|
|
.failures_added = dev.incremental_result.failures_added.deinit(allocator),
|
|
.html_routes_soft_affected = dev.incremental_result.html_routes_soft_affected.deinit(allocator),
|
|
.html_routes_hard_affected = dev.incremental_result.html_routes_hard_affected.deinit(allocator),
|
|
}),
|
|
.has_tailwind_plugin_hack = if (dev.has_tailwind_plugin_hack) |*hack| {
|
|
hack.deinit(allocator);
|
|
},
|
|
.directory_watchers = {
|
|
// dev.directory_watchers.dependencies
|
|
for (dev.directory_watchers.watches.keys()) |dir_name| {
|
|
allocator.free(dir_name);
|
|
}
|
|
for (dev.directory_watchers.dependencies.items) |watcher| {
|
|
allocator.free(watcher.specifier);
|
|
}
|
|
dev.directory_watchers.watches.deinit(allocator);
|
|
dev.directory_watchers.dependencies.deinit(allocator);
|
|
dev.directory_watchers.dependencies_free_list.deinit(allocator);
|
|
},
|
|
.html_router = dev.html_router.map.deinit(dev.allocator),
|
|
.bundling_failures = {
|
|
for (dev.bundling_failures.keys()) |failure| {
|
|
failure.deinit(dev);
|
|
}
|
|
dev.bundling_failures.deinit(allocator);
|
|
},
|
|
.current_bundle = {
|
|
if (dev.current_bundle) |_| {
|
|
bun.debugAssert(false); // impossible to de-initialize this state correctly.
|
|
}
|
|
},
|
|
.next_bundle = {
|
|
var r = dev.next_bundle.requests.first;
|
|
while (r) |request| {
|
|
defer dev.deferred_request_pool.put(request);
|
|
// TODO: deinitializing in this state is almost certainly an assertion failure.
|
|
// This code is shipped in release because it is only reachable by experimenntal server components.
|
|
bun.debugAssert(request.data.handler != .server_handler);
|
|
request.data.deinit();
|
|
r = request.next;
|
|
}
|
|
dev.next_bundle.route_queue.deinit(allocator);
|
|
},
|
|
.route_lookup = dev.route_lookup.deinit(allocator),
|
|
.source_maps = {
|
|
for (dev.source_maps.entries.values()) |value| {
|
|
allocator.free(value.sourceContents());
|
|
allocator.free(value.file_paths);
|
|
value.response.deref();
|
|
}
|
|
dev.source_maps.entries.deinit(allocator);
|
|
},
|
|
.active_websocket_connections = {
|
|
var it = dev.active_websocket_connections.keyIterator();
|
|
while (it.next()) |item| {
|
|
const s: *HmrSocket = item.*;
|
|
if (s.underlying) |websocket|
|
|
websocket.close();
|
|
}
|
|
dev.active_websocket_connections.deinit(allocator);
|
|
},
|
|
.watcher_atomics = for (&dev.watcher_atomics.events) |*event| {
|
|
event.aligned.dirs.deinit(dev.allocator);
|
|
event.aligned.files.deinit(dev.allocator);
|
|
event.aligned.extra_files.deinit(dev.allocator);
|
|
},
|
|
.testing_batch_events = switch (dev.testing_batch_events) {
|
|
.disabled => {},
|
|
.enabled => |*batch| {
|
|
batch.entry_points.deinit(allocator);
|
|
},
|
|
.enable_after_bundle => {},
|
|
},
|
|
};
|
|
dev.allocation_scope.deinit();
|
|
bun.destroy(dev);
|
|
}
|
|
|
|
/// Returns an estimation for how many bytes DevServer is explicitly aware of.
|
|
/// If this number stays constant but RSS grows, then there is a memory leak. If
|
|
/// this number grows out of control, then incremental garbage collection is not
|
|
/// good enough.
|
|
///
|
|
/// Memory measurements are important as DevServer has a long lifetime, but
|
|
/// unlike the HTTP server, it controls a lot of objects that are frequently
|
|
/// being added, removed, and changed (as the developer edits source files). It
|
|
/// is exponentially easy to mess up memory management.
|
|
pub fn memoryCost(dev: *DevServer) usize {
|
|
var cost: usize = @sizeOf(DevServer);
|
|
const discard = voidFieldTypeDiscardHelper;
|
|
// See https://github.com/ziglang/zig/issues/21879
|
|
_ = VoidFieldTypes(DevServer){
|
|
// does not contain pointers
|
|
.allocator = {},
|
|
.configuration_hash_key = {},
|
|
.graph_safety_lock = {},
|
|
.bun_watcher = {},
|
|
.watcher_atomics = {},
|
|
.plugin_state = {},
|
|
.generation = {},
|
|
.bundles_since_last_error = {},
|
|
.emit_visualizer_events = {},
|
|
.dump_dir = {},
|
|
.has_pre_crash_handler = {},
|
|
.frontend_only = {},
|
|
.server_fetch_function_callback = {},
|
|
.server_register_update_callback = {},
|
|
.deferred_request_pool = {},
|
|
.assume_perfect_incremental_bundling = {},
|
|
.relative_path_buf = {},
|
|
.relative_path_buf_lock = {},
|
|
|
|
// pointers that are not considered a part of DevServer
|
|
.vm = {},
|
|
.server = {},
|
|
.server_transpiler = {},
|
|
.client_transpiler = {},
|
|
.ssr_transpiler = {},
|
|
.log = {},
|
|
.framework = {},
|
|
.bundler_options = {},
|
|
.allocation_scope = {},
|
|
|
|
// to be counted.
|
|
.root = {
|
|
cost += dev.root.len;
|
|
},
|
|
.router = {
|
|
cost += dev.router.memoryCost();
|
|
},
|
|
.route_bundles = for (dev.route_bundles.items) |*bundle| {
|
|
cost += bundle.memoryCost();
|
|
},
|
|
.server_graph = {
|
|
cost += dev.server_graph.memoryCost();
|
|
},
|
|
.client_graph = {
|
|
cost += dev.client_graph.memoryCost();
|
|
},
|
|
.assets = {
|
|
cost += dev.assets.memoryCost();
|
|
},
|
|
.active_websocket_connections = {
|
|
cost += dev.active_websocket_connections.capacity() * @sizeOf(*HmrSocket);
|
|
},
|
|
.source_maps = {
|
|
cost += memoryCostArrayHashMap(dev.source_maps.entries);
|
|
for (dev.source_maps.entries.values()) |entry| {
|
|
cost += entry.response.memoryCost();
|
|
cost += memoryCostSlice(entry.sourceContents());
|
|
cost += memoryCostSlice(entry.file_paths); // do not re-count contents
|
|
}
|
|
},
|
|
.incremental_result = discard(VoidFieldTypes(IncrementalResult){
|
|
.had_adjusted_edges = {},
|
|
.client_components_added = {
|
|
cost += memoryCostArrayList(dev.incremental_result.client_components_added);
|
|
},
|
|
.framework_routes_affected = {
|
|
cost += memoryCostArrayList(dev.incremental_result.framework_routes_affected);
|
|
},
|
|
.client_components_removed = {
|
|
cost += memoryCostArrayList(dev.incremental_result.client_components_removed);
|
|
},
|
|
.failures_removed = {
|
|
cost += memoryCostArrayList(dev.incremental_result.failures_removed);
|
|
},
|
|
.client_components_affected = {
|
|
cost += memoryCostArrayList(dev.incremental_result.client_components_affected);
|
|
},
|
|
.failures_added = {
|
|
cost += memoryCostArrayList(dev.incremental_result.failures_added);
|
|
},
|
|
.html_routes_soft_affected = {
|
|
cost += memoryCostArrayList(dev.incremental_result.html_routes_soft_affected);
|
|
},
|
|
.html_routes_hard_affected = {
|
|
cost += memoryCostArrayList(dev.incremental_result.html_routes_hard_affected);
|
|
},
|
|
}),
|
|
.has_tailwind_plugin_hack = if (dev.has_tailwind_plugin_hack) |hack| {
|
|
cost += memoryCostArrayHashMap(hack);
|
|
},
|
|
.directory_watchers = {
|
|
cost += memoryCostArrayList(dev.directory_watchers.dependencies);
|
|
cost += memoryCostArrayList(dev.directory_watchers.dependencies_free_list);
|
|
cost += memoryCostArrayHashMap(dev.directory_watchers.watches);
|
|
for (dev.directory_watchers.dependencies.items) |dep| {
|
|
cost += dep.specifier.len;
|
|
}
|
|
for (dev.directory_watchers.watches.keys()) |dir_name| {
|
|
cost += dir_name.len;
|
|
}
|
|
},
|
|
.html_router = {
|
|
// std does not provide a way to measure exact allocation size of HashMapUnmanaged
|
|
cost += dev.html_router.map.capacity() * (@sizeOf(*HTMLBundle.HTMLBundleRoute) + @sizeOf([]const u8));
|
|
// DevServer does not count the referenced HTMLBundle.HTMLBundleRoutes
|
|
},
|
|
.bundling_failures = {
|
|
cost += memoryCostSlice(dev.bundling_failures.keys());
|
|
for (dev.bundling_failures.keys()) |failure| {
|
|
cost += failure.data.len;
|
|
}
|
|
},
|
|
// All entries are owned by the bundler arena, not DevServer, except for `requests`
|
|
.current_bundle = if (dev.current_bundle) |bundle| {
|
|
var r = bundle.requests.first;
|
|
while (r) |request| : (r = request.next) {
|
|
cost += @sizeOf(DeferredRequest.Node);
|
|
}
|
|
},
|
|
.next_bundle = {
|
|
var r = dev.next_bundle.requests.first;
|
|
while (r) |request| : (r = request.next) {
|
|
cost += @sizeOf(DeferredRequest.Node);
|
|
}
|
|
cost += memoryCostArrayHashMap(dev.next_bundle.route_queue);
|
|
},
|
|
.route_lookup = {
|
|
cost += memoryCostArrayHashMap(dev.route_lookup);
|
|
},
|
|
.testing_batch_events = switch (dev.testing_batch_events) {
|
|
.disabled => {},
|
|
.enabled => |batch| {
|
|
cost += memoryCostArrayHashMap(batch.entry_points.set);
|
|
},
|
|
.enable_after_bundle => {},
|
|
},
|
|
};
|
|
return cost;
|
|
}
|
|
|
|
fn memoryCostArrayList(slice: anytype) usize {
|
|
return slice.capacity * @sizeOf(@typeInfo(@TypeOf(slice.items)).pointer.child);
|
|
}
|
|
fn memoryCostSlice(slice: anytype) usize {
|
|
return slice.len * @sizeOf(@typeInfo(@TypeOf(slice)).pointer.child);
|
|
}
|
|
fn memoryCostArrayHashMap(map: anytype) usize {
|
|
return @TypeOf(map.entries).capacityInBytes(map.entries.capacity);
|
|
}
|
|
|
|
fn initServerRuntime(dev: *DevServer) void {
|
|
const runtime = bun.String.static(bun.bake.getHmrRuntime(.server).code);
|
|
|
|
const interface = c.BakeLoadInitialServerCode(
|
|
@ptrCast(dev.vm.global),
|
|
runtime,
|
|
if (dev.framework.server_components) |sc| sc.separate_ssr_graph else false,
|
|
) catch |err| {
|
|
dev.vm.printErrorLikeObjectToConsole(dev.vm.global.takeException(err));
|
|
@panic("Server runtime failed to start. The above error is always a bug in Bun");
|
|
};
|
|
|
|
if (!interface.isObject()) @panic("Internal assertion failure: expected interface from HMR runtime to be an object");
|
|
const fetch_function = interface.get(dev.vm.global, "handleRequest") catch null orelse
|
|
@panic("Internal assertion failure: expected interface from HMR runtime to contain handleRequest");
|
|
bun.assert(fetch_function.isCallable());
|
|
dev.server_fetch_function_callback = JSC.Strong.create(fetch_function, dev.vm.global);
|
|
const register_update = interface.get(dev.vm.global, "registerUpdate") catch null orelse
|
|
@panic("Internal assertion failure: expected interface from HMR runtime to contain registerUpdate");
|
|
dev.server_register_update_callback = JSC.Strong.create(register_update, dev.vm.global);
|
|
|
|
fetch_function.ensureStillAlive();
|
|
register_update.ensureStillAlive();
|
|
}
|
|
|
|
/// Deferred one tick so that the server can be up faster
|
|
fn scanInitialRoutes(dev: *DevServer) !void {
|
|
try dev.router.scanAll(
|
|
dev.allocator,
|
|
&dev.server_transpiler.resolver,
|
|
FrameworkRouter.InsertionContext.wrap(DevServer, dev),
|
|
);
|
|
|
|
try dev.server_graph.ensureStaleBitCapacity(true);
|
|
try dev.client_graph.ensureStaleBitCapacity(true);
|
|
}
|
|
|
|
/// Returns true if a catch-all handler was attached.
|
|
pub fn setRoutes(dev: *DevServer, server: anytype) !bool {
|
|
// TODO: all paths here must be prefixed with publicPath if set.
|
|
dev.server = bun.JSC.API.AnyServer.from(server);
|
|
const app = server.app.?;
|
|
const is_ssl = @typeInfo(@TypeOf(app)).pointer.child.is_ssl;
|
|
|
|
app.get(client_prefix ++ "/:route", *DevServer, dev, wrapGenericRequestHandler(onJsRequest, is_ssl));
|
|
app.get(asset_prefix ++ "/:asset", *DevServer, dev, wrapGenericRequestHandler(onAssetRequest, is_ssl));
|
|
app.get(internal_prefix ++ "/src/*", *DevServer, dev, wrapGenericRequestHandler(onSrcRequest, is_ssl));
|
|
app.post(internal_prefix ++ "/report_error", *DevServer, dev, wrapGenericRequestHandler(ErrorReportRequest.run, is_ssl));
|
|
|
|
app.any(internal_prefix, *DevServer, dev, wrapGenericRequestHandler(onNotFound, is_ssl));
|
|
|
|
app.ws(
|
|
internal_prefix ++ "/hmr",
|
|
dev,
|
|
0,
|
|
uws.WebSocketBehavior.Wrap(DevServer, HmrSocket, is_ssl).apply(.{}),
|
|
);
|
|
|
|
if (bun.FeatureFlags.bake_debugging_features) {
|
|
app.get(
|
|
internal_prefix ++ "/incremental_visualizer",
|
|
*DevServer,
|
|
dev,
|
|
wrapGenericRequestHandler(onIncrementalVisualizer, is_ssl),
|
|
);
|
|
app.get(
|
|
internal_prefix ++ "/iv",
|
|
*DevServer,
|
|
dev,
|
|
redirectHandler(internal_prefix ++ "/incremental_visualizer", is_ssl),
|
|
);
|
|
}
|
|
|
|
// Only attach a catch-all handler if the framework has filesystem router
|
|
// types. Otherwise, this can just be Bun.serve's default handler.
|
|
if (dev.framework.file_system_router_types.len > 0) {
|
|
app.any("/*", *DevServer, dev, wrapGenericRequestHandler(onRequest, is_ssl));
|
|
return true;
|
|
} else {
|
|
return false;
|
|
}
|
|
}
|
|
|
|
fn onNotFound(_: *DevServer, _: *Request, resp: anytype) void {
|
|
notFound(resp);
|
|
}
|
|
|
|
fn notFound(resp: anytype) void {
|
|
resp.corked(onNotFoundCorked, .{resp});
|
|
}
|
|
|
|
fn onNotFoundCorked(resp: anytype) void {
|
|
resp.writeStatus("404 Not Found");
|
|
resp.end("Not Found", false);
|
|
}
|
|
|
|
fn onOutdatedJSCorked(resp: anytype) void {
|
|
// Send a payload to instantly reload the page. This only happens when the
|
|
// client bundle is invalidated while the page is loading, aka when you
|
|
// perform many file updates that cannot be hot-updated.
|
|
resp.writeStatus("200 OK");
|
|
resp.writeHeader("Content-Type", MimeType.javascript.value);
|
|
resp.end(
|
|
\\try{location.reload()}catch(_){}
|
|
\\addEventListener("DOMContentLoaded",function(event){location.reload()})
|
|
, false);
|
|
}
|
|
|
|
fn onJsRequest(dev: *DevServer, req: *Request, resp: AnyResponse) void {
|
|
const route_id = req.parameter(0);
|
|
const is_map = bun.strings.hasSuffixComptime(route_id, ".js.map");
|
|
if (!is_map and !bun.strings.hasSuffixComptime(route_id, ".js"))
|
|
return notFound(resp);
|
|
const min_len = "00000000FFFFFFFF.js".len + (if (is_map) ".map".len else 0);
|
|
if (route_id.len < min_len)
|
|
return notFound(resp);
|
|
const hex = route_id[route_id.len - min_len ..][0 .. @sizeOf(u64) * 2];
|
|
if (hex.len != @sizeOf(u64) * 2)
|
|
return notFound(resp);
|
|
const id = parseHexToInt(u64, hex) orelse
|
|
return notFound(resp);
|
|
|
|
if (is_map) {
|
|
const entry = dev.source_maps.entries.getPtr(id) orelse
|
|
return notFound(resp);
|
|
entry.response.onRequest(req, resp);
|
|
return;
|
|
}
|
|
|
|
const route_bundle_index: RouteBundle.Index = .init(@intCast(id & 0xFFFFFFFF));
|
|
const generation: u32 = @intCast(id >> 32);
|
|
|
|
if (route_bundle_index.get() >= dev.route_bundles.items.len)
|
|
return notFound(resp);
|
|
|
|
const route_bundle = dev.route_bundles.items[route_bundle_index.get()];
|
|
if (route_bundle.client_script_generation != generation or
|
|
route_bundle.server_state != .loaded)
|
|
{
|
|
return resp.corked(onOutdatedJSCorked, .{resp});
|
|
}
|
|
|
|
dev.onJsRequestWithBundle(route_bundle_index, resp, bun.http.Method.which(req.method()) orelse .POST);
|
|
}
|
|
|
|
fn onAssetRequest(dev: *DevServer, req: *Request, resp: AnyResponse) void {
|
|
const param = req.parameter(0);
|
|
if (param.len < @sizeOf(u64) * 2)
|
|
return notFound(resp);
|
|
const hex = param[0 .. @sizeOf(u64) * 2];
|
|
var out: [@sizeOf(u64)]u8 = undefined;
|
|
assert((std.fmt.hexToBytes(&out, hex) catch
|
|
return notFound(resp)).len == @sizeOf(u64));
|
|
const hash: u64 = @bitCast(out);
|
|
debug.log("onAssetRequest {} {s}", .{ hash, param });
|
|
const asset = dev.assets.get(hash) orelse
|
|
return notFound(resp);
|
|
req.setYield(false);
|
|
asset.on(resp);
|
|
}
|
|
|
|
fn parseHexToInt(comptime T: type, slice: []const u8) ?T {
|
|
var out: [@sizeOf(T)]u8 = undefined;
|
|
assert((std.fmt.hexToBytes(&out, slice) catch return null).len == @sizeOf(T));
|
|
return @bitCast(out);
|
|
}
|
|
|
|
inline fn wrapGenericRequestHandler(
|
|
comptime handler: anytype,
|
|
comptime is_ssl: bool,
|
|
) fn (
|
|
dev: *DevServer,
|
|
req: *Request,
|
|
resp: *uws.NewApp(is_ssl).Response,
|
|
) void {
|
|
const fn_info = @typeInfo(@TypeOf(handler)).@"fn";
|
|
assert(fn_info.params.len == 3);
|
|
const uses_any_response = if (fn_info.params[2].type) |t| t == AnyResponse else false;
|
|
return struct {
|
|
fn handle(dev: *DevServer, req: *Request, resp: *uws.NewApp(is_ssl).Response) void {
|
|
handler(dev, req, if (uses_any_response) AnyResponse.init(resp) else resp);
|
|
}
|
|
}.handle;
|
|
}
|
|
|
|
inline fn redirectHandler(comptime path: []const u8, comptime is_ssl: bool) fn (
|
|
dev: *DevServer,
|
|
req: *Request,
|
|
resp: *uws.NewApp(is_ssl).Response,
|
|
) void {
|
|
return struct {
|
|
fn handle(dev: *DevServer, req: *Request, resp: *uws.NewApp(is_ssl).Response) void {
|
|
_ = dev;
|
|
_ = req;
|
|
resp.writeStatus("302 Found");
|
|
resp.writeHeader("Location", path);
|
|
resp.end("Redirecting...", false);
|
|
}
|
|
}.handle;
|
|
}
|
|
|
|
fn onIncrementalVisualizer(_: *DevServer, _: *Request, resp: anytype) void {
|
|
resp.corked(onIncrementalVisualizerCorked, .{resp});
|
|
}
|
|
|
|
fn onIncrementalVisualizerCorked(resp: anytype) void {
|
|
const code = if (Environment.codegen_embed)
|
|
@embedFile("incremental_visualizer.html")
|
|
else
|
|
bun.runtimeEmbedFile(.src_eager, "bake/incremental_visualizer.html");
|
|
resp.end(code, false);
|
|
}
|
|
|
|
fn ensureRouteIsBundled(
|
|
dev: *DevServer,
|
|
route_bundle_index: RouteBundle.Index,
|
|
kind: DeferredRequest.Handler.Kind,
|
|
req: *Request,
|
|
resp: AnyResponse,
|
|
) bun.OOM!void {
|
|
assert(dev.server != null);
|
|
sw: switch (dev.routeBundlePtr(route_bundle_index).server_state) {
|
|
.unqueued => {
|
|
if (dev.current_bundle != null) {
|
|
try dev.next_bundle.route_queue.put(dev.allocator, route_bundle_index, {});
|
|
dev.routeBundlePtr(route_bundle_index).server_state = .bundling;
|
|
try dev.deferRequest(&dev.next_bundle.requests, route_bundle_index, kind, req, resp);
|
|
} else {
|
|
// If plugins are not yet loaded, prepare them.
|
|
// In the case plugins are set to &.{}, this will not hit `.pending`.
|
|
plugin: switch (dev.plugin_state) {
|
|
.unknown => if (dev.bundler_options.plugin != null) {
|
|
// Framework-provided plugin is likely going to be phased out later
|
|
dev.plugin_state = .loaded;
|
|
} else {
|
|
// TODO: implement a proper solution here
|
|
dev.has_tailwind_plugin_hack = if (dev.vm.transpiler.options.serve_plugins) |serve_plugins|
|
|
for (serve_plugins) |plugin| {
|
|
if (bun.strings.includes(plugin, "tailwind")) break .empty;
|
|
} else null
|
|
else
|
|
null;
|
|
|
|
switch (dev.server.?.getOrLoadPlugins(.{ .dev_server = dev })) {
|
|
.pending => {
|
|
dev.plugin_state = .pending;
|
|
continue :plugin .pending;
|
|
},
|
|
.err => {
|
|
dev.plugin_state = .err;
|
|
continue :plugin .err;
|
|
},
|
|
.ready => |ready| {
|
|
dev.plugin_state = .loaded;
|
|
dev.bundler_options.plugin = ready;
|
|
},
|
|
}
|
|
},
|
|
.pending => {
|
|
try dev.next_bundle.route_queue.put(dev.allocator, route_bundle_index, {});
|
|
dev.routeBundlePtr(route_bundle_index).server_state = .bundling;
|
|
try dev.deferRequest(&dev.next_bundle.requests, route_bundle_index, kind, req, resp);
|
|
return;
|
|
},
|
|
.err => {
|
|
// TODO: render plugin error page
|
|
resp.end("Plugin Error", false);
|
|
return;
|
|
},
|
|
.loaded => {},
|
|
}
|
|
|
|
// TODO(@heimskr): store the request?
|
|
|
|
// Prepare a bundle with just this route.
|
|
var sfa = std.heap.stackFallback(4096, dev.allocator);
|
|
const temp_alloc = sfa.get();
|
|
|
|
var entry_points: EntryPointList = .empty;
|
|
defer entry_points.deinit(temp_alloc);
|
|
try dev.appendRouteEntryPointsIfNotStale(&entry_points, temp_alloc, route_bundle_index);
|
|
|
|
// If all files were already bundled (possible with layouts),
|
|
// then no entry points will be queued up here. That does
|
|
// not mean the route is ready for presentation.
|
|
if (entry_points.set.count() == 0) {
|
|
if (dev.bundling_failures.count() > 0) {
|
|
dev.routeBundlePtr(route_bundle_index).server_state = .possible_bundling_failures;
|
|
continue :sw .possible_bundling_failures;
|
|
} else {
|
|
dev.routeBundlePtr(route_bundle_index).server_state = .loaded;
|
|
continue :sw .loaded;
|
|
}
|
|
}
|
|
|
|
try dev.deferRequest(&dev.next_bundle.requests, route_bundle_index, kind, req, resp);
|
|
|
|
dev.startAsyncBundle(
|
|
entry_points,
|
|
false,
|
|
std.time.Timer.start() catch @panic("timers unsupported"),
|
|
) catch bun.outOfMemory();
|
|
}
|
|
|
|
dev.routeBundlePtr(route_bundle_index).server_state = .bundling;
|
|
},
|
|
.bundling => {
|
|
bun.assert(dev.current_bundle != null);
|
|
try dev.deferRequest(&dev.current_bundle.?.requests, route_bundle_index, kind, req, resp);
|
|
},
|
|
.possible_bundling_failures => {
|
|
if (dev.bundling_failures.count() > 0) {
|
|
// Trace the graph to see if there are any failures that are
|
|
// reachable by this route.
|
|
switch (try checkRouteFailures(dev, route_bundle_index, resp)) {
|
|
.stop => return,
|
|
.ok => {}, // Errors were cleared or not in the way.
|
|
.rebuild => continue :sw .unqueued, // Do the build all over again
|
|
}
|
|
}
|
|
|
|
dev.routeBundlePtr(route_bundle_index).server_state = .loaded;
|
|
continue :sw .loaded;
|
|
},
|
|
.evaluation_failure => {
|
|
try dev.sendSerializedFailures(
|
|
resp,
|
|
(&(dev.routeBundlePtr(route_bundle_index).data.framework.evaluate_failure.?))[0..1],
|
|
.evaluation,
|
|
);
|
|
},
|
|
.loaded => switch (kind) {
|
|
.server_handler => dev.onFrameworkRequestWithBundle(route_bundle_index, .{ .stack = req }, resp),
|
|
.bundled_html_page => dev.onHtmlRequestWithBundle(route_bundle_index, resp, bun.http.Method.which(req.method()) orelse .POST),
|
|
},
|
|
}
|
|
}
|
|
|
|
fn deferRequest(
|
|
dev: *DevServer,
|
|
requests_array: *DeferredRequest.List,
|
|
route_bundle_index: RouteBundle.Index,
|
|
kind: DeferredRequest.Handler.Kind,
|
|
req: *Request,
|
|
resp: AnyResponse,
|
|
) !void {
|
|
const deferred = dev.deferred_request_pool.get();
|
|
deferred.data = .{
|
|
.route_bundle_index = route_bundle_index,
|
|
.handler = switch (kind) {
|
|
.bundled_html_page => .{ .bundled_html_page = .{ .response = resp, .method = bun.http.Method.which(req.method()) orelse .POST } },
|
|
.server_handler => .{
|
|
.server_handler = dev.server.?.prepareAndSaveJsRequestContext(req, resp, dev.vm.global) orelse return,
|
|
},
|
|
},
|
|
};
|
|
resp.onAborted(*DeferredRequest, DeferredRequest.onAbort, &deferred.data);
|
|
requests_array.prepend(deferred);
|
|
}
|
|
|
|
fn checkRouteFailures(
|
|
dev: *DevServer,
|
|
route_bundle_index: RouteBundle.Index,
|
|
resp: anytype,
|
|
) !enum { stop, ok, rebuild } {
|
|
var sfa_state = std.heap.stackFallback(65536, dev.allocator);
|
|
const sfa = sfa_state.get();
|
|
var gts = try dev.initGraphTraceState(sfa, 0);
|
|
defer gts.deinit(sfa);
|
|
defer dev.incremental_result.failures_added.clearRetainingCapacity();
|
|
dev.graph_safety_lock.lock();
|
|
defer dev.graph_safety_lock.unlock();
|
|
try dev.traceAllRouteImports(dev.routeBundlePtr(route_bundle_index), >s, .find_errors);
|
|
if (dev.incremental_result.failures_added.items.len > 0) {
|
|
// See comment on this field for information
|
|
if (!dev.assume_perfect_incremental_bundling) {
|
|
// Cache bust EVERYTHING reachable
|
|
inline for (.{
|
|
.{ .graph = &dev.client_graph, .bits = >s.client_bits },
|
|
.{ .graph = &dev.server_graph, .bits = >s.server_bits },
|
|
}) |entry| {
|
|
var it = entry.bits.iterator(.{});
|
|
while (it.next()) |file_index| {
|
|
entry.graph.stale_files.set(file_index);
|
|
}
|
|
}
|
|
return .rebuild;
|
|
}
|
|
|
|
try dev.sendSerializedFailures(
|
|
resp,
|
|
dev.incremental_result.failures_added.items,
|
|
.bundler,
|
|
);
|
|
return .stop;
|
|
} else {
|
|
// Failures are unreachable by this route, so it is OK to load.
|
|
return .ok;
|
|
}
|
|
}
|
|
|
|
fn appendRouteEntryPointsIfNotStale(dev: *DevServer, entry_points: *EntryPointList, alloc: Allocator, rbi: RouteBundle.Index) bun.OOM!void {
|
|
const server_file_names = dev.server_graph.bundled_files.keys();
|
|
const client_file_names = dev.client_graph.bundled_files.keys();
|
|
|
|
// Build a list of all files that have not yet been bundled.
|
|
switch (dev.routeBundlePtr(rbi).data) {
|
|
.framework => |*bundle| {
|
|
var route = dev.router.routePtr(bundle.route_index);
|
|
const router_type = dev.router.typePtr(route.type);
|
|
try dev.appendOpaqueEntryPoint(server_file_names, entry_points, alloc, .server, router_type.server_file);
|
|
try dev.appendOpaqueEntryPoint(client_file_names, entry_points, alloc, .client, router_type.client_file);
|
|
try dev.appendOpaqueEntryPoint(server_file_names, entry_points, alloc, .server, route.file_page);
|
|
try dev.appendOpaqueEntryPoint(server_file_names, entry_points, alloc, .server, route.file_layout);
|
|
while (route.parent.unwrap()) |parent_index| {
|
|
route = dev.router.routePtr(parent_index);
|
|
try dev.appendOpaqueEntryPoint(server_file_names, entry_points, alloc, .server, route.file_layout);
|
|
}
|
|
},
|
|
.html => |*html| {
|
|
try entry_points.append(alloc, html.html_bundle.html_bundle.path, .{ .client = true });
|
|
},
|
|
}
|
|
|
|
if (dev.has_tailwind_plugin_hack) |*map| {
|
|
for (map.keys()) |abs_path| {
|
|
const file = dev.client_graph.bundled_files.get(abs_path) orelse
|
|
continue;
|
|
if (file.flags.kind == .css)
|
|
entry_points.appendCss(alloc, abs_path) catch bun.outOfMemory();
|
|
}
|
|
}
|
|
}
|
|
|
|
fn onFrameworkRequestWithBundle(
|
|
dev: *DevServer,
|
|
route_bundle_index: RouteBundle.Index,
|
|
req: bun.JSC.API.SavedRequest.Union,
|
|
resp: AnyResponse,
|
|
) void {
|
|
const route_bundle = dev.routeBundlePtr(route_bundle_index);
|
|
assert(route_bundle.data == .framework);
|
|
const bundle = &route_bundle.data.framework;
|
|
|
|
const server_request_callback = dev.server_fetch_function_callback.get() orelse
|
|
unreachable; // did not initialize server code
|
|
|
|
const router_type = dev.router.typePtr(dev.router.routePtr(bundle.route_index).type);
|
|
|
|
dev.server.?.onRequestFromSaved(
|
|
req,
|
|
resp,
|
|
server_request_callback,
|
|
4,
|
|
.{
|
|
// routerTypeMain
|
|
router_type.server_file_string.get() orelse str: {
|
|
const name = dev.server_graph.bundled_files.keys()[fromOpaqueFileId(.server, router_type.server_file).get()];
|
|
const str = bun.String.createUTF8ForJS(dev.vm.global, dev.relativePath(name));
|
|
dev.releaseRelativePathBuf();
|
|
router_type.server_file_string = JSC.Strong.create(str, dev.vm.global);
|
|
break :str str;
|
|
},
|
|
// routeModules
|
|
bundle.cached_module_list.get() orelse arr: {
|
|
const global = dev.vm.global;
|
|
const keys = dev.server_graph.bundled_files.keys();
|
|
var n: usize = 1;
|
|
var route = dev.router.routePtr(bundle.route_index);
|
|
while (true) {
|
|
if (route.file_layout != .none) n += 1;
|
|
route = dev.router.routePtr(route.parent.unwrap() orelse break);
|
|
}
|
|
const arr = JSValue.createEmptyArray(global, n);
|
|
route = dev.router.routePtr(bundle.route_index);
|
|
var route_name = bun.String.createUTF8(dev.relativePath(keys[fromOpaqueFileId(.server, route.file_page.unwrap().?).get()]));
|
|
arr.putIndex(global, 0, route_name.transferToJS(global));
|
|
dev.releaseRelativePathBuf();
|
|
n = 1;
|
|
while (true) {
|
|
if (route.file_layout.unwrap()) |layout| {
|
|
var layout_name = bun.String.createUTF8(dev.relativePath(keys[fromOpaqueFileId(.server, layout).get()]));
|
|
defer dev.releaseRelativePathBuf();
|
|
arr.putIndex(global, @intCast(n), layout_name.transferToJS(global));
|
|
n += 1;
|
|
}
|
|
route = dev.router.routePtr(route.parent.unwrap() orelse break);
|
|
}
|
|
bundle.cached_module_list = JSC.Strong.create(arr, global);
|
|
break :arr arr;
|
|
},
|
|
// clientId
|
|
bundle.cached_client_bundle_url.get() orelse str: {
|
|
const bundle_index: u32 = route_bundle_index.get();
|
|
const generation: u32 = route_bundle.client_script_generation;
|
|
const str = bun.String.createFormat(client_prefix ++ "/route-{}{}.js", .{
|
|
std.fmt.fmtSliceHexLower(std.mem.asBytes(&bundle_index)),
|
|
std.fmt.fmtSliceHexLower(std.mem.asBytes(&generation)),
|
|
}) catch bun.outOfMemory();
|
|
defer str.deref();
|
|
const js = str.toJS(dev.vm.global);
|
|
bundle.cached_client_bundle_url = JSC.Strong.create(js, dev.vm.global);
|
|
break :str js;
|
|
},
|
|
// styles
|
|
bundle.cached_css_file_array.get() orelse arr: {
|
|
const js = dev.generateCssJSArray(route_bundle) catch bun.outOfMemory();
|
|
bundle.cached_css_file_array = JSC.Strong.create(js, dev.vm.global);
|
|
break :arr js;
|
|
},
|
|
},
|
|
);
|
|
}
|
|
|
|
fn onHtmlRequestWithBundle(dev: *DevServer, route_bundle_index: RouteBundle.Index, resp: AnyResponse, method: bun.http.Method) void {
|
|
const route_bundle = dev.routeBundlePtr(route_bundle_index);
|
|
assert(route_bundle.data == .html);
|
|
const html = &route_bundle.data.html;
|
|
const blob = html.cached_response orelse generate: {
|
|
const payload = generateHTMLPayload(dev, route_bundle_index, route_bundle, html) catch bun.outOfMemory();
|
|
errdefer dev.allocator.free(payload);
|
|
html.cached_response = StaticRoute.initFromAnyBlob(
|
|
&.fromOwnedSlice(dev.allocator, payload),
|
|
.{
|
|
.mime_type = &.html,
|
|
.server = dev.server orelse unreachable,
|
|
},
|
|
);
|
|
break :generate html.cached_response.?;
|
|
};
|
|
blob.onWithMethod(method, resp);
|
|
}
|
|
|
|
fn generateHTMLPayload(dev: *DevServer, route_bundle_index: RouteBundle.Index, route_bundle: *RouteBundle, html: *RouteBundle.HTML) bun.OOM![]u8 {
|
|
assert(route_bundle.server_state == .loaded); // if not loaded, following values wont be initialized
|
|
assert(html.html_bundle.dev_server_id.unwrap() == route_bundle_index);
|
|
assert(html.cached_response == null);
|
|
const script_injection_offset = (html.script_injection_offset.unwrap() orelse unreachable).get();
|
|
const bundled_html = html.bundled_html_text orelse unreachable;
|
|
|
|
// The bundler records an offsets in development mode, splitting the HTML
|
|
// file into two chunks. DevServer is able to insert style/script tags
|
|
// using the information available in IncrementalGraph. This approach
|
|
// allows downstream files to update without re-bundling the HTML file.
|
|
//
|
|
// <!DOCTYPE html>
|
|
// <html>
|
|
// <head>
|
|
// <title>Single Page Web App</title>
|
|
// {script_injection_offset}</head>
|
|
// <body>
|
|
// <div id="root"></div>
|
|
// </body>
|
|
// </html>
|
|
const before_head_end = bundled_html[0..script_injection_offset];
|
|
const after_head_end = bundled_html[script_injection_offset..];
|
|
|
|
var display_name = bun.strings.withoutSuffixComptime(bun.path.basename(html.html_bundle.html_bundle.path), ".html");
|
|
// TODO: function for URL safe chars
|
|
if (!bun.strings.isAllASCII(display_name)) display_name = "page";
|
|
|
|
dev.graph_safety_lock.lock();
|
|
defer dev.graph_safety_lock.unlock();
|
|
|
|
// Prepare bitsets for tracing
|
|
var sfa_state = std.heap.stackFallback(65536, dev.allocator);
|
|
const sfa = sfa_state.get();
|
|
var gts = try dev.initGraphTraceState(sfa, 0);
|
|
defer gts.deinit(sfa);
|
|
// Run tracing
|
|
dev.client_graph.reset();
|
|
try dev.traceAllRouteImports(route_bundle, >s, .find_css);
|
|
|
|
const css_ids = dev.client_graph.current_css_files.items;
|
|
|
|
const payload_size = bundled_html.len +
|
|
("<link rel=\"stylesheet\" href=\"" ++ asset_prefix ++ "/0000000000000000.css\">").len * css_ids.len +
|
|
"<script type=\"module\" crossorigin src=\"\"></script>".len +
|
|
client_prefix.len + "/".len +
|
|
display_name.len +
|
|
"-0000000000000000.js".len;
|
|
|
|
var array: std.ArrayListUnmanaged(u8) = try std.ArrayListUnmanaged(u8).initCapacity(dev.allocator, payload_size);
|
|
errdefer array.deinit(dev.allocator);
|
|
array.appendSliceAssumeCapacity(before_head_end);
|
|
// Insert all link tags before "</head>"
|
|
for (css_ids) |name| {
|
|
array.appendSliceAssumeCapacity("<link rel=\"stylesheet\" href=\"" ++ asset_prefix ++ "/");
|
|
array.appendSliceAssumeCapacity(&std.fmt.bytesToHex(std.mem.asBytes(&name), .lower));
|
|
array.appendSliceAssumeCapacity(".css\">");
|
|
}
|
|
|
|
array.appendSliceAssumeCapacity("<script type=\"module\" crossorigin src=\"");
|
|
array.appendSliceAssumeCapacity(client_prefix);
|
|
array.appendSliceAssumeCapacity("/");
|
|
array.appendSliceAssumeCapacity(display_name);
|
|
array.appendSliceAssumeCapacity("-");
|
|
array.appendSliceAssumeCapacity(&std.fmt.bytesToHex(std.mem.asBytes(&@as(u32, route_bundle_index.get())), .lower));
|
|
array.appendSliceAssumeCapacity(&std.fmt.bytesToHex(std.mem.asBytes(&route_bundle.client_script_generation), .lower));
|
|
array.appendSliceAssumeCapacity(".js\"></script>");
|
|
|
|
// DevServer used to put the script tag before the body end, but to match the regular bundler it does not do this.
|
|
array.appendSliceAssumeCapacity(after_head_end);
|
|
assert(array.items.len == array.capacity); // incorrect memory allocation size
|
|
return array.items;
|
|
}
|
|
|
|
fn generateJavaScriptCodeForHTMLFile(
|
|
dev: *DevServer,
|
|
index: bun.JSAst.Index,
|
|
import_records: []bun.BabyList(bun.ImportRecord),
|
|
input_file_sources: []bun.logger.Source,
|
|
loaders: []bun.options.Loader,
|
|
) bun.OOM![]const u8 {
|
|
var sfa_state = std.heap.stackFallback(65536, dev.allocator);
|
|
const sfa = sfa_state.get();
|
|
var array = std.ArrayListUnmanaged(u8).initCapacity(sfa, 65536) catch bun.outOfMemory();
|
|
defer array.deinit(sfa);
|
|
const w = array.writer(sfa);
|
|
|
|
try w.writeAll(" ");
|
|
try bun.js_printer.writeJSONString(input_file_sources[index.get()].path.pretty, @TypeOf(w), w, .utf8);
|
|
try w.writeAll(": [ [");
|
|
var any = false;
|
|
for (import_records[index.get()].slice()) |import| {
|
|
if (import.source_index.isValid()) {
|
|
if (!loaders[import.source_index.get()].isJavaScriptLike())
|
|
continue; // ignore non-JavaScript imports
|
|
} else {
|
|
// Find the in-graph import.
|
|
const file = dev.client_graph.bundled_files.get(import.path.text) orelse
|
|
continue;
|
|
if (file.flags.kind != .js)
|
|
continue;
|
|
}
|
|
if (!any) {
|
|
any = true;
|
|
try w.writeAll("\n");
|
|
}
|
|
try w.writeAll(" ");
|
|
try bun.js_printer.writeJSONString(import.path.pretty, @TypeOf(w), w, .utf8);
|
|
try w.writeAll(", 0,\n");
|
|
}
|
|
if (any) {
|
|
try w.writeAll(" ");
|
|
}
|
|
try w.writeAll("], [], [], () => {}, false],\n");
|
|
|
|
// Avoid-recloning if it is was moved to the heap
|
|
return if (array.items.ptr == &sfa_state.buffer)
|
|
try dev.allocator.dupe(u8, array.items)
|
|
else
|
|
array.items;
|
|
}
|
|
|
|
pub fn onJsRequestWithBundle(dev: *DevServer, bundle_index: RouteBundle.Index, resp: AnyResponse, method: bun.http.Method) void {
|
|
const route_bundle = dev.routeBundlePtr(bundle_index);
|
|
const blob = route_bundle.client_bundle orelse generate: {
|
|
const payload = dev.generateClientBundle(route_bundle) catch bun.outOfMemory();
|
|
errdefer dev.allocator.free(payload);
|
|
route_bundle.client_bundle = StaticRoute.initFromAnyBlob(
|
|
&.fromOwnedSlice(dev.allocator, payload),
|
|
.{
|
|
.mime_type = &.javascript,
|
|
.server = dev.server orelse unreachable,
|
|
},
|
|
);
|
|
break :generate route_bundle.client_bundle.?;
|
|
};
|
|
blob.onWithMethod(method, resp);
|
|
}
|
|
|
|
pub fn onSrcRequest(dev: *DevServer, req: *uws.Request, resp: anytype) void {
|
|
if (req.header("open-in-editor") == null) {
|
|
resp.writeStatus("501 Not Implemented");
|
|
resp.end("Viewing source without opening in editor is not implemented yet!", false);
|
|
return;
|
|
}
|
|
|
|
// TODO: better editor detection. on chloe's dev env, this opens apple terminal + vim
|
|
resp.writeStatus("501 Not Implemented");
|
|
resp.end("TODO", false);
|
|
_ = dev;
|
|
|
|
// const ctx = &dev.vm.rareData().editor_context;
|
|
// ctx.autoDetectEditor(JSC.VirtualMachine.get().transpiler.env);
|
|
// const line: ?[]const u8 = req.header("editor-line");
|
|
// const column: ?[]const u8 = req.header("editor-column");
|
|
|
|
// if (ctx.editor) |editor| {
|
|
// var url = req.url()[internal_prefix.len + "/src/".len ..];
|
|
// if (bun.strings.indexOfChar(url, ':')) |colon| {
|
|
// url = url[0..colon];
|
|
// }
|
|
// editor.open(ctx.path, url, line, column, dev.allocator) catch {
|
|
// resp.writeStatus("202 No Content");
|
|
// resp.end("", false);
|
|
// return;
|
|
// };
|
|
// resp.writeStatus("202 No Content");
|
|
// resp.end("", false);
|
|
// } else {
|
|
// resp.writeStatus("500 Internal Server Error");
|
|
// resp.end("Please set your editor in bunfig.toml", false);
|
|
// }
|
|
}
|
|
|
|
/// When requests are waiting on a bundle, the relevant request information is
|
|
/// prepared and stored in a linked list.
|
|
const DeferredRequest = struct {
|
|
/// A small maximum is set because development servers are unlikely to
|
|
/// acquire much load, so allocating a ton at the start for no reason
|
|
/// is very silly. This contributes to ~6kb of the initial DevServer allocation.
|
|
const max_preallocated = 16;
|
|
|
|
pub const List = std.SinglyLinkedList(DeferredRequest);
|
|
pub const Node = List.Node;
|
|
|
|
route_bundle_index: RouteBundle.Index,
|
|
handler: Handler,
|
|
|
|
const Handler = union(enum) {
|
|
/// For a .framework route. This says to call and render the page.
|
|
server_handler: bun.JSC.API.SavedRequest,
|
|
/// For a .html route. Serve the bundled HTML page.
|
|
bundled_html_page: ResponseAndMethod,
|
|
/// Do nothing and free this node. To simplify lifetimes,
|
|
/// the `DeferredRequest` is not freed upon abortion. Which
|
|
/// is okay since most requests do not abort.
|
|
aborted,
|
|
|
|
/// Does not include `aborted` because branching on that value
|
|
/// has no meaningful purpose, so it is excluded.
|
|
const Kind = enum {
|
|
server_handler,
|
|
bundled_html_page,
|
|
};
|
|
};
|
|
|
|
fn onAbort(this: *DeferredRequest, resp: AnyResponse) void {
|
|
_ = resp;
|
|
this.abort();
|
|
assert(this.handler == .aborted);
|
|
}
|
|
|
|
/// Calling this is only required if the desired handler is going to be avoided,
|
|
/// such as for bundling failures or aborting the server.
|
|
/// Does not free the underlying `DeferredRequest.Node`
|
|
fn deinit(this: *DeferredRequest) void {
|
|
switch (this.handler) {
|
|
.server_handler => |*saved| saved.deinit(),
|
|
.bundled_html_page, .aborted => {},
|
|
}
|
|
}
|
|
|
|
/// Deinitializes state by aborting the connection.
|
|
fn abort(this: *DeferredRequest) void {
|
|
switch (this.handler) {
|
|
.server_handler => |*saved| {
|
|
saved.response.endWithoutBody(true);
|
|
saved.deinit();
|
|
},
|
|
.bundled_html_page => |r| {
|
|
r.response.endWithoutBody(true);
|
|
},
|
|
.aborted => return,
|
|
}
|
|
this.handler = .aborted;
|
|
}
|
|
};
|
|
|
|
const ResponseAndMethod = struct {
|
|
response: AnyResponse,
|
|
method: bun.http.Method,
|
|
};
|
|
|
|
fn startAsyncBundle(
|
|
dev: *DevServer,
|
|
entry_points: EntryPointList,
|
|
had_reload_event: bool,
|
|
timer: std.time.Timer,
|
|
) bun.OOM!void {
|
|
assert(dev.current_bundle == null);
|
|
assert(entry_points.set.count() > 0);
|
|
dev.log.clearAndFree();
|
|
|
|
dev.incremental_result.reset();
|
|
|
|
// Ref server to keep it from closing.
|
|
if (dev.server) |server| server.onPendingRequest();
|
|
|
|
var heap = try ThreadlocalArena.init();
|
|
errdefer heap.deinit();
|
|
const allocator = heap.allocator();
|
|
const ast_memory_allocator = try allocator.create(bun.JSAst.ASTMemoryAllocator);
|
|
ast_memory_allocator.* = .{ .allocator = allocator };
|
|
ast_memory_allocator.reset();
|
|
ast_memory_allocator.push();
|
|
|
|
const bv2 = try BundleV2.init(
|
|
&dev.server_transpiler,
|
|
.{
|
|
.framework = dev.framework,
|
|
.client_transpiler = &dev.client_transpiler,
|
|
.ssr_transpiler = &dev.ssr_transpiler,
|
|
.plugins = dev.bundler_options.plugin,
|
|
},
|
|
allocator,
|
|
.{ .js = dev.vm.eventLoop() },
|
|
false, // watching is handled separately
|
|
JSC.WorkPool.get(),
|
|
heap,
|
|
);
|
|
bv2.bun_watcher = dev.bun_watcher;
|
|
bv2.asynchronous = true;
|
|
|
|
{
|
|
dev.graph_safety_lock.lock();
|
|
defer dev.graph_safety_lock.unlock();
|
|
|
|
dev.client_graph.reset();
|
|
dev.server_graph.reset();
|
|
}
|
|
|
|
const start_data = try bv2.startFromBakeDevServer(entry_points);
|
|
|
|
dev.current_bundle = .{
|
|
.bv2 = bv2,
|
|
.timer = timer,
|
|
.start_data = start_data,
|
|
.had_reload_event = had_reload_event,
|
|
.requests = dev.next_bundle.requests,
|
|
.resolution_failure_entries = .{},
|
|
};
|
|
dev.next_bundle.requests = .{};
|
|
}
|
|
|
|
fn prepareAndLogResolutionFailures(dev: *DevServer) !void {
|
|
// Since resolution failures can be asynchronous, their logs are not inserted
|
|
// until the very end.
|
|
const resolution_failures = dev.current_bundle.?.resolution_failure_entries;
|
|
if (resolution_failures.count() > 0) {
|
|
for (resolution_failures.keys(), resolution_failures.values()) |owner, *log| {
|
|
if (log.hasErrors()) {
|
|
switch (owner.decode()) {
|
|
.client => |index| try dev.client_graph.insertFailure(.index, index, log, false),
|
|
.server => |index| try dev.server_graph.insertFailure(.index, index, log, true),
|
|
.none, .route => unreachable,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// Theoretically, it shouldn't be possible for errors to leak into dev.log, but just in
|
|
// case that happens, they can be printed out.
|
|
if (dev.log.hasErrors() and dev.log.msgs.items.len > 0) {
|
|
if (Environment.isDebug) {
|
|
Output.debugWarn("dev.log should not be written into when using DevServer", .{});
|
|
}
|
|
dev.log.print(Output.errorWriter()) catch {};
|
|
}
|
|
}
|
|
|
|
fn indexFailures(dev: *DevServer) !void {
|
|
// After inserting failures into the IncrementalGraphs, they are traced to their routes.
|
|
var sfa_state = std.heap.stackFallback(65536, dev.allocator);
|
|
const sfa = sfa_state.get();
|
|
|
|
if (dev.incremental_result.failures_added.items.len > 0) {
|
|
var total_len: usize = @sizeOf(MessageId) + @sizeOf(u32);
|
|
|
|
for (dev.incremental_result.failures_added.items) |fail| {
|
|
total_len += fail.data.len;
|
|
}
|
|
|
|
total_len += dev.incremental_result.failures_removed.items.len * @sizeOf(u32);
|
|
|
|
var gts = try dev.initGraphTraceState(sfa, 0);
|
|
defer gts.deinit(sfa);
|
|
|
|
var payload = try std.ArrayList(u8).initCapacity(sfa, total_len);
|
|
defer payload.deinit();
|
|
payload.appendAssumeCapacity(MessageId.errors.char());
|
|
const w = payload.writer();
|
|
|
|
try w.writeInt(u32, @intCast(dev.incremental_result.failures_removed.items.len), .little);
|
|
|
|
for (dev.incremental_result.failures_removed.items) |removed| {
|
|
try w.writeInt(u32, @bitCast(removed.getOwner().encode()), .little);
|
|
removed.deinit(dev);
|
|
}
|
|
|
|
for (dev.incremental_result.failures_added.items) |added| {
|
|
try w.writeAll(added.data);
|
|
|
|
switch (added.getOwner()) {
|
|
.none, .route => unreachable,
|
|
.server => |index| try dev.server_graph.traceDependencies(index, >s, .no_stop, index),
|
|
.client => |index| try dev.client_graph.traceDependencies(index, >s, .no_stop, index),
|
|
}
|
|
}
|
|
|
|
for (dev.incremental_result.framework_routes_affected.items) |entry| {
|
|
if (dev.router.routePtr(entry.route_index).bundle.unwrap()) |index| {
|
|
dev.routeBundlePtr(index).server_state = .possible_bundling_failures;
|
|
}
|
|
if (entry.should_recurse_when_visiting)
|
|
dev.markAllRouteChildrenFailed(entry.route_index);
|
|
}
|
|
|
|
for (dev.incremental_result.html_routes_soft_affected.items) |index| {
|
|
dev.routeBundlePtr(index).server_state = .possible_bundling_failures;
|
|
}
|
|
|
|
for (dev.incremental_result.html_routes_hard_affected.items) |index| {
|
|
dev.routeBundlePtr(index).server_state = .possible_bundling_failures;
|
|
}
|
|
|
|
dev.publish(.errors, payload.items, .binary);
|
|
} else if (dev.incremental_result.failures_removed.items.len > 0) {
|
|
var payload = try std.ArrayList(u8).initCapacity(sfa, @sizeOf(MessageId) + @sizeOf(u32) + dev.incremental_result.failures_removed.items.len * @sizeOf(u32));
|
|
defer payload.deinit();
|
|
payload.appendAssumeCapacity(MessageId.errors.char());
|
|
const w = payload.writer();
|
|
|
|
try w.writeInt(u32, @intCast(dev.incremental_result.failures_removed.items.len), .little);
|
|
|
|
for (dev.incremental_result.failures_removed.items) |removed| {
|
|
try w.writeInt(u32, @bitCast(removed.getOwner().encode()), .little);
|
|
removed.deinit(dev);
|
|
}
|
|
|
|
dev.publish(.errors, payload.items, .binary);
|
|
}
|
|
|
|
dev.incremental_result.failures_removed.clearRetainingCapacity();
|
|
}
|
|
|
|
/// Used to generate the entry point. Unlike incremental patches, this always
|
|
/// contains all needed files for a route.
|
|
fn generateClientBundle(dev: *DevServer, route_bundle: *RouteBundle) bun.OOM![]u8 {
|
|
assert(route_bundle.client_bundle == null);
|
|
assert(route_bundle.server_state == .loaded); // page is unfit to load
|
|
|
|
dev.graph_safety_lock.lock();
|
|
defer dev.graph_safety_lock.unlock();
|
|
|
|
// Prepare bitsets
|
|
var sfa_state = std.heap.stackFallback(65536, dev.allocator);
|
|
const sfa = sfa_state.get();
|
|
var gts = try dev.initGraphTraceState(sfa, 0);
|
|
defer gts.deinit(sfa);
|
|
|
|
// Run tracing
|
|
dev.client_graph.reset();
|
|
try dev.traceAllRouteImports(route_bundle, >s, .find_client_modules);
|
|
|
|
var react_fast_refresh_id: []const u8 = "";
|
|
if (dev.framework.react_fast_refresh) |rfr| brk: {
|
|
const rfr_index = dev.client_graph.getFileIndex(rfr.import_source) orelse
|
|
break :brk;
|
|
if (!dev.client_graph.stale_files.isSet(rfr_index.get())) {
|
|
try dev.client_graph.traceImports(rfr_index, >s, .find_client_modules);
|
|
react_fast_refresh_id = rfr.import_source;
|
|
}
|
|
}
|
|
|
|
const client_file: ?IncrementalGraph(.client).FileIndex = switch (route_bundle.data) {
|
|
.framework => |fw| if (dev.router.typePtr(dev.router.routePtr(fw.route_index).type).client_file.unwrap()) |ofi|
|
|
fromOpaqueFileId(.client, ofi)
|
|
else
|
|
null,
|
|
.html => |html| html.bundled_file,
|
|
};
|
|
|
|
// Insert the source map
|
|
const source_map_id = @as(u64, route_bundle.client_script_generation) << 32;
|
|
if (try dev.source_maps.putOrIncrementRefCount(source_map_id, 1)) |entry| {
|
|
var arena = std.heap.ArenaAllocator.init(sfa);
|
|
defer arena.deinit();
|
|
try dev.client_graph.takeSourceMap(.initial_response, arena.allocator(), dev.allocator, entry);
|
|
}
|
|
|
|
const client_bundle = dev.client_graph.takeJSBundle(.{
|
|
.kind = .initial_response,
|
|
.initial_response_entry_point = if (client_file) |index|
|
|
dev.client_graph.bundled_files.keys()[index.get()]
|
|
else
|
|
"",
|
|
.react_refresh_entry_point = react_fast_refresh_id,
|
|
.source_map_id = source_map_id,
|
|
});
|
|
|
|
return client_bundle;
|
|
}
|
|
|
|
fn generateCssJSArray(dev: *DevServer, route_bundle: *RouteBundle) bun.OOM!JSC.JSValue {
|
|
assert(route_bundle.data == .framework); // a JSC.JSValue has no purpose, and therefore isn't implemented.
|
|
if (Environment.allow_assert) assert(!route_bundle.data.framework.cached_css_file_array.has());
|
|
assert(route_bundle.server_state == .loaded); // page is unfit to load
|
|
|
|
dev.graph_safety_lock.lock();
|
|
defer dev.graph_safety_lock.unlock();
|
|
|
|
// Prepare bitsets
|
|
var sfa_state = std.heap.stackFallback(65536, dev.allocator);
|
|
|
|
const sfa = sfa_state.get();
|
|
var gts = try dev.initGraphTraceState(sfa, 0);
|
|
defer gts.deinit(sfa);
|
|
|
|
// Run tracing
|
|
dev.client_graph.reset();
|
|
try dev.traceAllRouteImports(route_bundle, >s, .find_css);
|
|
|
|
const names = dev.client_graph.current_css_files.items;
|
|
const arr = JSC.JSArray.createEmpty(dev.vm.global, names.len);
|
|
for (names, 0..) |item, i| {
|
|
var buf: [asset_prefix.len + @sizeOf(u64) * 2 + "/.css".len]u8 = undefined;
|
|
const path = std.fmt.bufPrint(&buf, asset_prefix ++ "/{s}.css", .{
|
|
&std.fmt.bytesToHex(std.mem.asBytes(&item), .lower),
|
|
}) catch unreachable;
|
|
const str = bun.String.createUTF8(path);
|
|
defer str.deref();
|
|
arr.putIndex(dev.vm.global, @intCast(i), str.toJS(dev.vm.global));
|
|
}
|
|
return arr;
|
|
}
|
|
|
|
fn traceAllRouteImports(dev: *DevServer, route_bundle: *RouteBundle, gts: *GraphTraceState, comptime goal: TraceImportGoal) !void {
|
|
switch (route_bundle.data) {
|
|
.framework => |fw| {
|
|
var route = dev.router.routePtr(fw.route_index);
|
|
const router_type = dev.router.typePtr(route.type);
|
|
|
|
// Both framework entry points are considered
|
|
try dev.server_graph.traceImports(fromOpaqueFileId(.server, router_type.server_file), gts, .find_css);
|
|
if (router_type.client_file.unwrap()) |id| {
|
|
try dev.client_graph.traceImports(fromOpaqueFileId(.client, id), gts, goal);
|
|
}
|
|
|
|
// The route file is considered
|
|
if (route.file_page.unwrap()) |id| {
|
|
try dev.server_graph.traceImports(fromOpaqueFileId(.server, id), gts, goal);
|
|
}
|
|
|
|
// For all parents, the layout is considered
|
|
while (true) {
|
|
if (route.file_layout.unwrap()) |id| {
|
|
try dev.server_graph.traceImports(fromOpaqueFileId(.server, id), gts, goal);
|
|
}
|
|
route = dev.router.routePtr(route.parent.unwrap() orelse break);
|
|
}
|
|
},
|
|
.html => |html| {
|
|
try dev.client_graph.traceImports(html.bundled_file, gts, goal);
|
|
},
|
|
}
|
|
}
|
|
|
|
fn makeArrayForServerComponentsPatch(dev: *DevServer, global: *JSC.JSGlobalObject, items: []const IncrementalGraph(.server).FileIndex) JSValue {
|
|
if (items.len == 0) return .null;
|
|
const arr = JSC.JSArray.createEmpty(global, items.len);
|
|
const names = dev.server_graph.bundled_files.keys();
|
|
for (items, 0..) |item, i| {
|
|
const str = bun.String.createUTF8(dev.relativePath(names[item.get()]));
|
|
defer dev.releaseRelativePathBuf();
|
|
defer str.deref();
|
|
arr.putIndex(global, @intCast(i), str.toJS(global));
|
|
}
|
|
return arr;
|
|
}
|
|
|
|
pub const HotUpdateContext = struct {
|
|
/// bundle_v2.Graph.input_files.items(.source)
|
|
sources: []bun.logger.Source,
|
|
/// bundle_v2.Graph.ast.items(.import_records)
|
|
import_records: []bun.ImportRecord.List,
|
|
/// bundle_v2.Graph.server_component_boundaries.slice()
|
|
scbs: bun.JSAst.ServerComponentBoundary.List.Slice,
|
|
/// bundle_v2.Graph.input_files.items(.loader)
|
|
loaders: []bun.options.Loader,
|
|
/// Which files have a server-component boundary.
|
|
server_to_client_bitset: DynamicBitSetUnmanaged,
|
|
/// Used to reduce calls to the IncrementalGraph hash table.
|
|
///
|
|
/// Caller initializes a slice with `sources.len * 2` items
|
|
/// all initialized to `std.math.maxInt(u32)`
|
|
///
|
|
/// The first half of this slice is for the client graph,
|
|
/// second half is for server. Interact with this via
|
|
/// `getCachedIndex`
|
|
resolved_index_cache: []u32,
|
|
/// Used to tell if the server should replace or append import records.
|
|
server_seen_bit_set: DynamicBitSetUnmanaged,
|
|
gts: *GraphTraceState,
|
|
|
|
pub fn getCachedIndex(
|
|
rc: *const HotUpdateContext,
|
|
comptime side: bake.Side,
|
|
i: bun.JSAst.Index,
|
|
) *IncrementalGraph(side).FileIndex.Optional {
|
|
const start = switch (side) {
|
|
.client => 0,
|
|
.server => rc.sources.len,
|
|
};
|
|
|
|
const subslice = rc.resolved_index_cache[start..][0..rc.sources.len];
|
|
|
|
comptime assert(@alignOf(IncrementalGraph(side).FileIndex.Optional) == @alignOf(u32));
|
|
comptime assert(@sizeOf(IncrementalGraph(side).FileIndex.Optional) == @sizeOf(u32));
|
|
return @ptrCast(&subslice[i.get()]);
|
|
}
|
|
};
|
|
|
|
/// Called at the end of BundleV2 to index bundle contents into the `IncrementalGraph`s
|
|
/// This function does not recover DevServer state if it fails (allocation failure)
|
|
pub fn finalizeBundle(
|
|
dev: *DevServer,
|
|
bv2: *bun.bundle_v2.BundleV2,
|
|
result: bun.bundle_v2.DevServerOutput,
|
|
) bun.OOM!void {
|
|
var had_sent_hmr_event = false;
|
|
defer {
|
|
bv2.deinit();
|
|
dev.current_bundle = null;
|
|
|
|
dev.assets.reindexIfNeeded(dev.allocator) catch {
|
|
// not fatal: the assets may be reindexed some time later.
|
|
};
|
|
|
|
// Signal for testing framework where it is in synchronization
|
|
if (dev.testing_batch_events == .enable_after_bundle) {
|
|
dev.testing_batch_events = .{ .enabled = .empty };
|
|
dev.publish(.testing_watch_synchronization, &.{
|
|
MessageId.testing_watch_synchronization.char(),
|
|
0,
|
|
}, .binary);
|
|
} else {
|
|
dev.publish(.testing_watch_synchronization, &.{
|
|
MessageId.testing_watch_synchronization.char(),
|
|
if (had_sent_hmr_event) 4 else 3,
|
|
}, .binary);
|
|
}
|
|
|
|
dev.startNextBundleIfPresent();
|
|
|
|
// Unref the ref added in `startAsyncBundle`
|
|
if (dev.server) |server| server.onStaticRequestComplete();
|
|
}
|
|
const current_bundle = &dev.current_bundle.?;
|
|
defer {
|
|
if (current_bundle.requests.first != null) {
|
|
// cannot be an assertion because in the case of error.OutOfMemory, the request list was not drained.
|
|
Output.debug("current_bundle.requests.first != null. this leaves pending requests without an error page!", .{});
|
|
}
|
|
while (current_bundle.requests.popFirst()) |node| {
|
|
defer dev.deferred_request_pool.put(node);
|
|
const req = &node.data;
|
|
req.abort();
|
|
}
|
|
}
|
|
|
|
dev.graph_safety_lock.lock();
|
|
defer dev.graph_safety_lock.unlock();
|
|
|
|
const js_chunk = result.jsPseudoChunk();
|
|
const input_file_sources = bv2.graph.input_files.items(.source);
|
|
const input_file_loaders = bv2.graph.input_files.items(.loader);
|
|
const import_records = bv2.graph.ast.items(.import_records);
|
|
const targets = bv2.graph.ast.items(.target);
|
|
const scbs = bv2.graph.server_component_boundaries.slice();
|
|
|
|
var sfa = std.heap.stackFallback(65536, bv2.graph.allocator);
|
|
const stack_alloc = sfa.get();
|
|
var scb_bitset = try bun.bit_set.DynamicBitSetUnmanaged.initEmpty(stack_alloc, input_file_sources.len);
|
|
for (
|
|
scbs.list.items(.source_index),
|
|
scbs.list.items(.ssr_source_index),
|
|
scbs.list.items(.reference_source_index),
|
|
) |source_index, ssr_index, ref_index| {
|
|
scb_bitset.set(source_index);
|
|
scb_bitset.set(ref_index);
|
|
if (ssr_index < scb_bitset.bit_length)
|
|
scb_bitset.set(ssr_index);
|
|
}
|
|
|
|
const resolved_index_cache = try bv2.graph.allocator.alloc(u32, input_file_sources.len * 2);
|
|
@memset(resolved_index_cache, @intFromEnum(IncrementalGraph(.server).FileIndex.Optional.none));
|
|
|
|
var ctx: bun.bake.DevServer.HotUpdateContext = .{
|
|
.import_records = import_records,
|
|
.sources = input_file_sources,
|
|
.loaders = input_file_loaders,
|
|
.scbs = scbs,
|
|
.server_to_client_bitset = scb_bitset,
|
|
.resolved_index_cache = resolved_index_cache,
|
|
.server_seen_bit_set = undefined,
|
|
.gts = undefined,
|
|
};
|
|
|
|
const quoted_source_contents: []const []const u8 = bv2.linker.graph.files.items(.quoted_source_contents);
|
|
// Pass 1, update the graph's nodes, resolving every bundler source
|
|
// index into its `IncrementalGraph(...).FileIndex`
|
|
for (
|
|
js_chunk.content.javascript.parts_in_chunk_in_order,
|
|
js_chunk.compile_results_for_chunk,
|
|
) |part_range, compile_result| {
|
|
const index = part_range.source_index;
|
|
const source_map: SourceMap.Chunk = compile_result.sourceMapChunk() orelse brk: {
|
|
// The source map is `null` if empty
|
|
bun.assert(compile_result.javascript.result == .result);
|
|
bun.assert(dev.server_transpiler.options.source_map != .none);
|
|
bun.assert(!part_range.source_index.isRuntime());
|
|
break :brk .empty;
|
|
};
|
|
// TODO: investigate why linker.files is not indexed by linker's index
|
|
// const linker_index = bv2.linker.graph.stable_source_indices[index.get()];
|
|
// const quoted_contents = quoted_source_contents[linker_index];
|
|
const quoted_contents = quoted_source_contents[part_range.source_index.get()];
|
|
switch (targets[part_range.source_index.get()].bakeGraph()) {
|
|
inline else => |graph| try (switch (graph) {
|
|
.client => dev.client_graph,
|
|
else => dev.server_graph,
|
|
}).receiveChunk(
|
|
&ctx,
|
|
index,
|
|
.{ .js = .{
|
|
.code = compile_result.code(),
|
|
.source_map = source_map,
|
|
.quoted_contents = .initOwned(@constCast(quoted_contents), dev.allocator),
|
|
} },
|
|
graph == .ssr,
|
|
),
|
|
}
|
|
}
|
|
|
|
for (result.cssChunks(), result.css_file_list.values()) |*chunk, metadata| {
|
|
assert(chunk.content == .css);
|
|
|
|
const index = bun.JSAst.Index.init(chunk.entry_point.source_index);
|
|
|
|
const code = try chunk.intermediate_output.code(
|
|
dev.allocator,
|
|
&bv2.graph,
|
|
&bv2.linker.graph,
|
|
"THIS_SHOULD_NEVER_BE_EMITTED_IN_DEV_MODE",
|
|
chunk,
|
|
result.chunks,
|
|
null,
|
|
false,
|
|
);
|
|
|
|
// Create an entry for this file.
|
|
const key = ctx.sources[index.get()].path.keyForIncrementalGraph();
|
|
// const hash = brk: {
|
|
// var hash: ContentHasher.Hash = .init(0x9a4e); // arbitrary seed
|
|
// hash.update(key);
|
|
// hash.update(code.buffer);
|
|
// break :brk hash.final();
|
|
// };
|
|
// TODO: use a hash mix with the first half being a path hash (to identify files) and
|
|
// the second half to be the content hash (to know if the file has changed)
|
|
const hash = bun.hash(key);
|
|
const asset_index = try dev.assets.replacePath(
|
|
key,
|
|
&.fromOwnedSlice(dev.allocator, code.buffer),
|
|
&.css,
|
|
hash,
|
|
);
|
|
// Later code needs to retrieve the CSS content
|
|
// The hack is to use `entry_point_id`, which is otherwise unused, to store an index.
|
|
chunk.entry_point.entry_point_id = asset_index.get();
|
|
|
|
// Track css files that look like tailwind files.
|
|
if (dev.has_tailwind_plugin_hack) |*map| {
|
|
const first_1024 = code.buffer[0..@min(code.buffer.len, 1024)];
|
|
if (std.mem.indexOf(u8, first_1024, "tailwind") != null) {
|
|
try map.put(dev.allocator, key, {});
|
|
} else {
|
|
_ = map.swapRemove(key);
|
|
}
|
|
}
|
|
|
|
try dev.client_graph.receiveChunk(&ctx, index, .{ .css = hash }, false);
|
|
|
|
// If imported on server, there needs to be a server-side file entry
|
|
// so that edges can be attached. When a file is only imported on
|
|
// the server, this file is used to trace the CSS to the route.
|
|
if (metadata.imported_on_server) {
|
|
try dev.server_graph.insertCssFileOnServer(
|
|
&ctx,
|
|
index,
|
|
key,
|
|
);
|
|
}
|
|
}
|
|
|
|
for (result.htmlChunks()) |*chunk| {
|
|
const index = bun.JSAst.Index.init(chunk.entry_point.source_index);
|
|
const compile_result = chunk.compile_results_for_chunk[0].html;
|
|
const generated_js = try dev.generateJavaScriptCodeForHTMLFile(
|
|
index,
|
|
import_records,
|
|
input_file_sources,
|
|
bv2.graph.input_files.items(.loader),
|
|
);
|
|
try dev.client_graph.receiveChunk(
|
|
&ctx,
|
|
index,
|
|
.{ .js = .{
|
|
.code = generated_js,
|
|
.source_map = .empty,
|
|
.quoted_contents = .empty,
|
|
} },
|
|
false,
|
|
);
|
|
const client_index = ctx.getCachedIndex(.client, index).*.unwrap() orelse @panic("unresolved index");
|
|
const route_bundle_index = dev.client_graph.htmlRouteBundleIndex(client_index);
|
|
const route_bundle = dev.routeBundlePtr(route_bundle_index);
|
|
assert(route_bundle.data.html.bundled_file == client_index);
|
|
const html = &route_bundle.data.html;
|
|
|
|
if (html.cached_response) |blob| {
|
|
blob.deref();
|
|
html.cached_response = null;
|
|
route_bundle.invalidateClientBundle();
|
|
}
|
|
if (html.bundled_html_text) |slice| {
|
|
dev.allocator.free(slice);
|
|
}
|
|
dev.allocation_scope.assertOwned(compile_result.code);
|
|
html.bundled_html_text = compile_result.code;
|
|
|
|
html.script_injection_offset = .init(compile_result.script_injection_offset);
|
|
|
|
chunk.entry_point.entry_point_id = @intCast(route_bundle_index.get());
|
|
}
|
|
|
|
var gts = try dev.initGraphTraceState(
|
|
bv2.graph.allocator,
|
|
if (result.cssChunks().len > 0) bv2.graph.input_files.len else 0,
|
|
);
|
|
defer gts.deinit(bv2.graph.allocator);
|
|
ctx.gts = >s;
|
|
ctx.server_seen_bit_set = try bun.bit_set.DynamicBitSetUnmanaged.initEmpty(bv2.graph.allocator, dev.server_graph.bundled_files.count());
|
|
|
|
dev.incremental_result.had_adjusted_edges = false;
|
|
|
|
try prepareAndLogResolutionFailures(dev);
|
|
|
|
// Pass 2, update the graph's edges by performing import diffing on each
|
|
// changed file, removing dependencies. This pass also flags what routes
|
|
// have been modified.
|
|
for (js_chunk.content.javascript.parts_in_chunk_in_order) |part_range| {
|
|
switch (targets[part_range.source_index.get()].bakeGraph()) {
|
|
.server, .ssr => try dev.server_graph.processChunkDependencies(&ctx, .normal, part_range.source_index, bv2.graph.allocator),
|
|
.client => try dev.client_graph.processChunkDependencies(&ctx, .normal, part_range.source_index, bv2.graph.allocator),
|
|
}
|
|
}
|
|
for (result.htmlChunks()) |*chunk| {
|
|
const index = bun.JSAst.Index.init(chunk.entry_point.source_index);
|
|
try dev.client_graph.processChunkDependencies(&ctx, .normal, index, bv2.graph.allocator);
|
|
}
|
|
for (result.cssChunks()) |*chunk| {
|
|
const entry_index = bun.JSAst.Index.init(chunk.entry_point.source_index);
|
|
try dev.client_graph.processChunkDependencies(&ctx, .css, entry_index, bv2.graph.allocator);
|
|
}
|
|
|
|
// Index all failed files now that the incremental graph has been updated.
|
|
if (dev.incremental_result.failures_removed.items.len > 0 or
|
|
dev.incremental_result.failures_added.items.len > 0)
|
|
{
|
|
had_sent_hmr_event = true;
|
|
}
|
|
try dev.indexFailures();
|
|
|
|
try dev.client_graph.ensureStaleBitCapacity(false);
|
|
try dev.server_graph.ensureStaleBitCapacity(false);
|
|
|
|
dev.generation +%= 1;
|
|
if (Environment.enable_logs) {
|
|
debug.log("Bundle Round {d}: {d} server, {d} client, {d} ms", .{
|
|
dev.generation,
|
|
dev.server_graph.current_chunk_parts.items.len,
|
|
dev.client_graph.current_chunk_parts.items.len,
|
|
@divFloor(current_bundle.timer.read(), std.time.ns_per_ms),
|
|
});
|
|
}
|
|
|
|
// Load all new chunks into the server runtime.
|
|
if (!dev.frontend_only and dev.server_graph.current_chunk_len > 0) {
|
|
const server_bundle = try dev.server_graph.takeJSBundle(.{ .kind = .hmr_chunk });
|
|
defer dev.allocator.free(server_bundle);
|
|
|
|
const server_modules = c.BakeLoadServerHmrPatch(@ptrCast(dev.vm.global), bun.String.createLatin1(server_bundle)) catch |err| {
|
|
// No user code has been evaluated yet, since everything is to
|
|
// be wrapped in a function clousure. This means that the likely
|
|
// error is going to be a syntax error, or other mistake in the
|
|
// bundler.
|
|
dev.vm.printErrorLikeObjectToConsole(dev.vm.global.takeException(err));
|
|
@panic("Error thrown while evaluating server code. This is always a bug in the bundler.");
|
|
};
|
|
const errors = dev.server_register_update_callback.get().?.call(
|
|
dev.vm.global,
|
|
dev.vm.global.toJSValue(),
|
|
&.{
|
|
server_modules,
|
|
dev.makeArrayForServerComponentsPatch(dev.vm.global, dev.incremental_result.client_components_added.items),
|
|
dev.makeArrayForServerComponentsPatch(dev.vm.global, dev.incremental_result.client_components_removed.items),
|
|
},
|
|
) catch |err| {
|
|
// One module replacement error should NOT prevent follow-up
|
|
// module replacements to fail. It is the HMR runtime's
|
|
// responsibility to collect all module load errors, and
|
|
// bubble them up.
|
|
dev.vm.printErrorLikeObjectToConsole(dev.vm.global.takeException(err));
|
|
@panic("Error thrown in Hot-module-replacement code. This is always a bug in the HMR runtime.");
|
|
};
|
|
_ = errors; // TODO:
|
|
}
|
|
|
|
var route_bits = try DynamicBitSetUnmanaged.initEmpty(stack_alloc, dev.route_bundles.items.len);
|
|
defer route_bits.deinit(stack_alloc);
|
|
var route_bits_client = try DynamicBitSetUnmanaged.initEmpty(stack_alloc, dev.route_bundles.items.len);
|
|
defer route_bits_client.deinit(stack_alloc);
|
|
|
|
var has_route_bits_set = false;
|
|
|
|
var hot_update_payload_sfa = std.heap.stackFallback(65536, dev.allocator);
|
|
var hot_update_payload = std.ArrayList(u8).initCapacity(hot_update_payload_sfa.get(), 65536) catch
|
|
unreachable; // enough space
|
|
defer hot_update_payload.deinit();
|
|
hot_update_payload.appendAssumeCapacity(MessageId.hot_update.char());
|
|
|
|
// The writer used for the hot_update payload
|
|
const w = hot_update_payload.writer();
|
|
|
|
// It was discovered that if a tree falls with nobody around it, it does not
|
|
// make any sound. Let's avoid writing into `w` if no sockets are open.
|
|
const hot_update_subscribers = dev.numSubscribers(.hot_update);
|
|
const will_hear_hot_update = hot_update_subscribers > 0;
|
|
|
|
// This list of routes affected excludes client code. This means changing
|
|
// a client component wont count as a route to trigger a reload on.
|
|
//
|
|
// A second trace is required to determine what routes had changed bundles,
|
|
// since changing a layout affects all child routes. Additionally, routes
|
|
// that do not have a bundle will not be cleared (as there is nothing to
|
|
// clear for those)
|
|
if (will_hear_hot_update and
|
|
current_bundle.had_reload_event and
|
|
(dev.incremental_result.framework_routes_affected.items.len +
|
|
dev.incremental_result.html_routes_hard_affected.items.len) > 0 and
|
|
dev.bundling_failures.count() == 0)
|
|
{
|
|
has_route_bits_set = true;
|
|
|
|
// A bit-set is used to avoid duplicate entries. This is not a problem
|
|
// with `dev.incremental_result.framework_routes_affected`
|
|
for (dev.incremental_result.framework_routes_affected.items) |request| {
|
|
const route = dev.router.routePtr(request.route_index);
|
|
if (route.bundle.unwrap()) |id| route_bits.set(id.get());
|
|
if (request.should_recurse_when_visiting) {
|
|
markAllRouteChildren(&dev.router, 1, .{&route_bits}, request.route_index);
|
|
}
|
|
}
|
|
for (dev.incremental_result.html_routes_hard_affected.items) |route_bundle_index| {
|
|
route_bits.set(route_bundle_index.get());
|
|
route_bits_client.set(route_bundle_index.get());
|
|
}
|
|
|
|
// List 1
|
|
var it = route_bits.iterator(.{ .kind = .set });
|
|
while (it.next()) |bundled_route_index| {
|
|
const bundle = &dev.route_bundles.items[bundled_route_index];
|
|
if (bundle.active_viewers == 0) continue;
|
|
try w.writeInt(i32, @intCast(bundled_route_index), .little);
|
|
}
|
|
}
|
|
try w.writeInt(i32, -1, .little);
|
|
|
|
// When client component roots get updated, the `client_components_affected`
|
|
// list contains the server side versions of these roots. These roots are
|
|
// traced to the routes so that the client-side bundles can be properly
|
|
// invalidated.
|
|
if (dev.incremental_result.client_components_affected.items.len > 0) {
|
|
has_route_bits_set = true;
|
|
|
|
dev.incremental_result.framework_routes_affected.clearRetainingCapacity();
|
|
dev.incremental_result.html_routes_hard_affected.clearRetainingCapacity();
|
|
dev.incremental_result.html_routes_soft_affected.clearRetainingCapacity();
|
|
gts.clear();
|
|
|
|
for (dev.incremental_result.client_components_affected.items) |index| {
|
|
try dev.server_graph.traceDependencies(index, >s, .no_stop, index);
|
|
}
|
|
|
|
// A bit-set is used to avoid duplicate entries. This is not a problem
|
|
// with `dev.incremental_result.routes_affected`
|
|
for (dev.incremental_result.framework_routes_affected.items) |request| {
|
|
const route = dev.router.routePtr(request.route_index);
|
|
if (route.bundle.unwrap()) |id| {
|
|
route_bits.set(id.get());
|
|
route_bits_client.set(id.get());
|
|
}
|
|
if (request.should_recurse_when_visiting) {
|
|
markAllRouteChildren(&dev.router, 2, .{ &route_bits, &route_bits_client }, request.route_index);
|
|
}
|
|
}
|
|
|
|
// Free old bundles
|
|
var it = route_bits_client.iterator(.{ .kind = .set });
|
|
while (it.next()) |bundled_route_index| {
|
|
const bundle = &dev.route_bundles.items[bundled_route_index];
|
|
bundle.invalidateClientBundle();
|
|
}
|
|
} else if (dev.incremental_result.html_routes_hard_affected.items.len > 0) {
|
|
// When only HTML routes were affected, there may not be any client
|
|
// components that got affected, but the bundles for these HTML routes
|
|
// are invalid now. That is why HTML routes above writes into
|
|
// `route_bits_client`.
|
|
|
|
// Free old bundles
|
|
var it = route_bits_client.iterator(.{ .kind = .set });
|
|
while (it.next()) |bundled_route_index| {
|
|
const bundle = &dev.route_bundles.items[bundled_route_index];
|
|
bundle.invalidateClientBundle();
|
|
}
|
|
}
|
|
|
|
// Softly affected HTML routes only need the bundle invalidated. WebSocket
|
|
// connections don't have to be told anything.
|
|
if (dev.incremental_result.html_routes_soft_affected.items.len > 0) {
|
|
for (dev.incremental_result.html_routes_soft_affected.items) |index| {
|
|
dev.routeBundlePtr(index).invalidateClientBundle();
|
|
route_bits.set(index.get());
|
|
}
|
|
has_route_bits_set = true;
|
|
}
|
|
|
|
// `route_bits` will have all of the routes that were modified. If any of
|
|
// these have active viewers, DevServer should inform them of CSS attachments. These
|
|
// route bundles also need to be invalidated of their css attachments.
|
|
if (has_route_bits_set and
|
|
(will_hear_hot_update or dev.incremental_result.had_adjusted_edges))
|
|
{
|
|
var it = route_bits.iterator(.{ .kind = .set });
|
|
// List 2
|
|
while (it.next()) |i| {
|
|
const route_bundle = dev.routeBundlePtr(RouteBundle.Index.init(@intCast(i)));
|
|
if (dev.incremental_result.had_adjusted_edges) {
|
|
switch (route_bundle.data) {
|
|
.framework => |*fw_bundle| fw_bundle.cached_css_file_array.clearWithoutDeallocation(),
|
|
.html => |*html| if (html.cached_response) |blob| {
|
|
blob.deref();
|
|
html.cached_response = null;
|
|
},
|
|
}
|
|
}
|
|
if (route_bundle.active_viewers == 0 or !will_hear_hot_update) continue;
|
|
try w.writeInt(i32, @intCast(i), .little);
|
|
|
|
// If no edges were changed, then it is impossible to
|
|
// change the list of CSS files.
|
|
if (dev.incremental_result.had_adjusted_edges) {
|
|
gts.clear();
|
|
dev.client_graph.current_css_files.clearRetainingCapacity();
|
|
try dev.traceAllRouteImports(route_bundle, >s, .find_css);
|
|
const css_ids = dev.client_graph.current_css_files.items;
|
|
|
|
try w.writeInt(i32, @intCast(css_ids.len), .little);
|
|
for (css_ids) |css_id| {
|
|
try w.writeAll(&std.fmt.bytesToHex(std.mem.asBytes(&css_id), .lower));
|
|
}
|
|
} else {
|
|
try w.writeInt(i32, -1, .little);
|
|
}
|
|
}
|
|
}
|
|
try w.writeInt(i32, -1, .little);
|
|
|
|
const css_chunks = result.cssChunks();
|
|
if (will_hear_hot_update) {
|
|
if (dev.client_graph.current_chunk_len > 0 or css_chunks.len > 0) {
|
|
// Send CSS mutations
|
|
const asset_values = dev.assets.files.values();
|
|
try w.writeInt(u32, @intCast(css_chunks.len), .little);
|
|
const sources = bv2.graph.input_files.items(.source);
|
|
for (css_chunks) |chunk| {
|
|
const key = sources[chunk.entry_point.source_index].path.keyForIncrementalGraph();
|
|
try w.writeAll(&std.fmt.bytesToHex(std.mem.asBytes(&bun.hash(key)), .lower));
|
|
const css_data = asset_values[chunk.entry_point.entry_point_id].blob.InternalBlob.bytes.items;
|
|
try w.writeInt(u32, @intCast(css_data.len), .little);
|
|
try w.writeAll(css_data);
|
|
}
|
|
|
|
// Send the JS chunk
|
|
if (dev.client_graph.current_chunk_len > 0) {
|
|
const hash = hash: {
|
|
var source_map_hash: bun.bundle_v2.ContentHasher.Hash = .init(0x4b12); // arbitrarily different seed than what .initial_response uses
|
|
const keys = dev.client_graph.bundled_files.keys();
|
|
for (dev.client_graph.current_chunk_parts.items) |part| {
|
|
source_map_hash.update(keys[part.get()]);
|
|
source_map_hash.update(dev.client_graph.source_maps.items[part.get()].vlq());
|
|
}
|
|
// Set the bottom bit. This ensures that the resource can never be confused for a route bundle.
|
|
break :hash source_map_hash.final() | 1;
|
|
};
|
|
if (try dev.source_maps.putOrIncrementRefCount(hash, 1)) |entry| {
|
|
try dev.client_graph.takeSourceMap(.hmr_chunk, bv2.graph.allocator, dev.allocator, entry);
|
|
}
|
|
// Build and send the source chunk
|
|
try dev.client_graph.takeJSBundleToList(&hot_update_payload, .{
|
|
.kind = .hmr_chunk,
|
|
.source_map_id = hash,
|
|
});
|
|
}
|
|
} else {
|
|
try w.writeInt(i32, 0, .little);
|
|
}
|
|
|
|
dev.publish(.hot_update, hot_update_payload.items, .binary);
|
|
had_sent_hmr_event = true;
|
|
}
|
|
|
|
if (dev.incremental_result.failures_added.items.len > 0) {
|
|
dev.bundles_since_last_error = 0;
|
|
|
|
while (current_bundle.requests.popFirst()) |node| {
|
|
defer dev.deferred_request_pool.put(node);
|
|
const req = &node.data;
|
|
|
|
const rb = dev.routeBundlePtr(req.route_bundle_index);
|
|
rb.server_state = .possible_bundling_failures;
|
|
|
|
const resp: AnyResponse = switch (req.handler) {
|
|
.aborted => continue,
|
|
.server_handler => |*saved| brk: {
|
|
const resp = saved.response;
|
|
saved.deinit();
|
|
break :brk resp;
|
|
},
|
|
.bundled_html_page => |ram| ram.response,
|
|
};
|
|
|
|
try dev.sendSerializedFailures(
|
|
resp,
|
|
dev.bundling_failures.keys(),
|
|
.bundler,
|
|
);
|
|
}
|
|
return;
|
|
}
|
|
|
|
if (dev.bundling_failures.count() == 0) {
|
|
if (current_bundle.had_reload_event) {
|
|
const clear_terminal = !debug.isVisible();
|
|
if (clear_terminal) {
|
|
Output.disableBuffering();
|
|
Output.resetTerminalAll();
|
|
Output.enableBuffering();
|
|
}
|
|
|
|
dev.printMemoryLine();
|
|
|
|
dev.bundles_since_last_error += 1;
|
|
if (dev.bundles_since_last_error > 1) {
|
|
Output.prettyError("<cyan>[x{d}]<r> ", .{dev.bundles_since_last_error});
|
|
}
|
|
} else {
|
|
dev.bundles_since_last_error = 0;
|
|
dev.printMemoryLine();
|
|
}
|
|
|
|
Output.prettyError("<green>{s} in {d}ms<r>", .{
|
|
if (current_bundle.had_reload_event)
|
|
"Reloaded"
|
|
else
|
|
"Bundled page",
|
|
@divFloor(current_bundle.timer.read(), std.time.ns_per_ms),
|
|
});
|
|
|
|
// Compute a file name to display
|
|
const file_name: ?[]const u8 = if (current_bundle.had_reload_event)
|
|
if (bv2.graph.entry_points.items.len > 0)
|
|
dev.relativePath(
|
|
bv2.graph.input_files.items(.source)[bv2.graph.entry_points.items[0].get()].path.text,
|
|
)
|
|
else
|
|
null // TODO: How does this happen
|
|
else switch (dev.routeBundlePtr(current_bundle.requests.first.?.data.route_bundle_index).data) {
|
|
.html => |html| dev.relativePath(html.html_bundle.html_bundle.path),
|
|
.framework => |fw| file_name: {
|
|
const route = dev.router.routePtr(fw.route_index);
|
|
const opaque_id = route.file_page.unwrap() orelse
|
|
route.file_layout.unwrap() orelse
|
|
break :file_name null;
|
|
const server_index = fromOpaqueFileId(.server, opaque_id);
|
|
const abs_path = dev.server_graph.bundled_files.keys()[server_index.get()];
|
|
break :file_name dev.relativePath(abs_path);
|
|
},
|
|
};
|
|
defer dev.releaseRelativePathBuf();
|
|
const total_count = bv2.graph.entry_points.items.len;
|
|
if (file_name) |name| {
|
|
Output.prettyError("<d>:<r> {s}", .{name});
|
|
if (total_count > 1) {
|
|
Output.prettyError(" <d>+ {d} more<r>", .{total_count - 1});
|
|
}
|
|
}
|
|
Output.prettyError("\n", .{});
|
|
Output.flush();
|
|
}
|
|
|
|
// Release the lock because the underlying handler may acquire one.
|
|
dev.graph_safety_lock.unlock();
|
|
defer dev.graph_safety_lock.lock();
|
|
|
|
while (current_bundle.requests.popFirst()) |node| {
|
|
defer dev.deferred_request_pool.put(node);
|
|
const req = &node.data;
|
|
|
|
const rb = dev.routeBundlePtr(req.route_bundle_index);
|
|
rb.server_state = .loaded;
|
|
|
|
switch (req.handler) {
|
|
.aborted => continue,
|
|
.server_handler => |saved| dev.onFrameworkRequestWithBundle(req.route_bundle_index, .{ .saved = saved }, saved.response),
|
|
.bundled_html_page => |ram| dev.onHtmlRequestWithBundle(req.route_bundle_index, ram.response, ram.method),
|
|
}
|
|
}
|
|
}
|
|
|
|
fn startNextBundleIfPresent(dev: *DevServer) void {
|
|
// Clear the current bundle
|
|
assert(dev.current_bundle == null);
|
|
dev.log.clearAndFree();
|
|
dev.emitVisualizerMessageIfNeeded();
|
|
|
|
// If there were pending requests, begin another bundle.
|
|
if (dev.next_bundle.reload_event != null or dev.next_bundle.requests.first != null) {
|
|
var sfb = std.heap.stackFallback(4096, dev.allocator);
|
|
const temp_alloc = sfb.get();
|
|
var entry_points: EntryPointList = .empty;
|
|
defer entry_points.deinit(temp_alloc);
|
|
|
|
const is_reload, const timer = if (dev.next_bundle.reload_event) |event| brk: {
|
|
dev.next_bundle.reload_event = null;
|
|
|
|
const reload_event_timer = event.timer;
|
|
|
|
event.processFileList(dev, &entry_points, temp_alloc);
|
|
|
|
if (dev.watcher_atomics.recycleEventFromDevServer(event)) |second| {
|
|
if (Environment.isDebug) {
|
|
assert(second.debug_mutex.tryLock());
|
|
}
|
|
second.processFileList(dev, &entry_points, temp_alloc);
|
|
dev.watcher_atomics.recycleSecondEventFromDevServer(second);
|
|
}
|
|
|
|
break :brk .{ true, reload_event_timer };
|
|
} else .{ false, std.time.Timer.start() catch @panic("timers unsupported") };
|
|
|
|
for (dev.next_bundle.route_queue.keys()) |route_bundle_index| {
|
|
const rb = dev.routeBundlePtr(route_bundle_index);
|
|
rb.server_state = .bundling;
|
|
dev.appendRouteEntryPointsIfNotStale(&entry_points, temp_alloc, route_bundle_index) catch bun.outOfMemory();
|
|
}
|
|
|
|
if (entry_points.set.count() > 0) {
|
|
dev.startAsyncBundle(entry_points, is_reload, timer) catch bun.outOfMemory();
|
|
}
|
|
|
|
dev.next_bundle.route_queue.clearRetainingCapacity();
|
|
}
|
|
}
|
|
|
|
/// Note: The log is not consumed here
|
|
pub fn handleParseTaskFailure(
|
|
dev: *DevServer,
|
|
err: anyerror,
|
|
graph: bake.Graph,
|
|
abs_path: []const u8,
|
|
log: *const Log,
|
|
bv2: *BundleV2,
|
|
) bun.OOM!void {
|
|
dev.graph_safety_lock.lock();
|
|
defer dev.graph_safety_lock.unlock();
|
|
|
|
debug.log("handleParseTaskFailure({}, .{s}, {}, {d} messages)", .{
|
|
err,
|
|
@tagName(graph),
|
|
bun.fmt.quote(abs_path),
|
|
log.msgs.items.len,
|
|
});
|
|
|
|
if (err == error.FileNotFound or err == error.ModuleNotFound) {
|
|
// Special-case files being deleted. Note that if a file had never
|
|
// existed, resolution would fail first.
|
|
switch (graph) {
|
|
.server, .ssr => dev.server_graph.onFileDeleted(abs_path, bv2),
|
|
.client => dev.client_graph.onFileDeleted(abs_path, bv2),
|
|
}
|
|
} else {
|
|
switch (graph) {
|
|
.server => try dev.server_graph.insertFailure(.abs_path, abs_path, log, false),
|
|
.ssr => try dev.server_graph.insertFailure(.abs_path, abs_path, log, true),
|
|
.client => try dev.client_graph.insertFailure(.abs_path, abs_path, log, false),
|
|
}
|
|
}
|
|
}
|
|
|
|
/// Return a log to write resolution failures into.
|
|
pub fn getLogForResolutionFailures(dev: *DevServer, abs_path: []const u8, graph: bake.Graph) !*bun.logger.Log {
|
|
assert(dev.current_bundle != null);
|
|
const current_bundle = &dev.current_bundle.?;
|
|
|
|
dev.graph_safety_lock.lock();
|
|
defer dev.graph_safety_lock.unlock();
|
|
|
|
const owner = switch (graph == .client) {
|
|
inline else => |is_client| @unionInit(
|
|
SerializedFailure.Owner,
|
|
if (is_client) "client" else "server",
|
|
try (if (is_client) dev.client_graph else dev.server_graph)
|
|
.insertStale(abs_path, !is_client and graph == .ssr),
|
|
).encode(),
|
|
};
|
|
const gop = try current_bundle.resolution_failure_entries.getOrPut(current_bundle.bv2.graph.allocator, owner);
|
|
if (!gop.found_existing) {
|
|
gop.value_ptr.* = bun.logger.Log.init(current_bundle.bv2.graph.allocator);
|
|
}
|
|
return gop.value_ptr;
|
|
}
|
|
|
|
const CacheEntry = struct {
|
|
kind: FileKind,
|
|
};
|
|
|
|
pub fn isFileCached(dev: *DevServer, path: []const u8, side: bake.Graph) ?CacheEntry {
|
|
dev.graph_safety_lock.lock();
|
|
defer dev.graph_safety_lock.unlock();
|
|
|
|
switch (side) {
|
|
inline else => |side_comptime| {
|
|
const g = switch (side_comptime) {
|
|
.client => &dev.client_graph,
|
|
.server => &dev.server_graph,
|
|
.ssr => &dev.server_graph,
|
|
};
|
|
const index = g.bundled_files.getIndex(path) orelse
|
|
return null; // non-existent files are considered stale
|
|
if (!g.stale_files.isSet(index)) {
|
|
return .{ .kind = g.bundled_files.values()[index].fileKind() };
|
|
}
|
|
return null;
|
|
},
|
|
}
|
|
}
|
|
|
|
fn appendOpaqueEntryPoint(
|
|
dev: *DevServer,
|
|
file_names: [][]const u8,
|
|
entry_points: *EntryPointList,
|
|
alloc: Allocator,
|
|
comptime side: bake.Side,
|
|
optional_id: anytype,
|
|
) !void {
|
|
const file = switch (@TypeOf(optional_id)) {
|
|
OpaqueFileId.Optional => optional_id.unwrap() orelse return,
|
|
OpaqueFileId => optional_id,
|
|
else => @compileError("invalid type here"),
|
|
};
|
|
|
|
const file_index = fromOpaqueFileId(side, file);
|
|
if (switch (side) {
|
|
.server => dev.server_graph.stale_files.isSet(file_index.get()),
|
|
.client => dev.client_graph.stale_files.isSet(file_index.get()),
|
|
}) {
|
|
try entry_points.appendJs(alloc, file_names[file_index.get()], side.graph());
|
|
}
|
|
}
|
|
|
|
pub fn routeBundlePtr(dev: *DevServer, idx: RouteBundle.Index) *RouteBundle {
|
|
return &dev.route_bundles.items[idx.get()];
|
|
}
|
|
|
|
fn onRequest(dev: *DevServer, req: *Request, resp: anytype) void {
|
|
var params: FrameworkRouter.MatchedParams = undefined;
|
|
if (dev.router.matchSlow(req.url(), ¶ms)) |route_index| {
|
|
dev.ensureRouteIsBundled(
|
|
dev.getOrPutRouteBundle(.{ .framework = route_index }) catch bun.outOfMemory(),
|
|
.server_handler,
|
|
req,
|
|
AnyResponse.init(resp),
|
|
) catch bun.outOfMemory();
|
|
return;
|
|
}
|
|
|
|
if (DevServer.AnyResponse != @typeInfo(@TypeOf(resp)).pointer.child) {
|
|
unreachable; // mismatch between `is_ssl` with server and response types. optimize these checks out.
|
|
}
|
|
|
|
if (dev.server.?.config.onRequest != .zero) {
|
|
dev.server.?.onRequest(req, resp);
|
|
return;
|
|
}
|
|
|
|
sendBuiltInNotFound(resp);
|
|
}
|
|
|
|
pub fn respondForHTMLBundle(dev: *DevServer, html: *HTMLBundle.HTMLBundleRoute, req: *uws.Request, resp: AnyResponse) !void {
|
|
try dev.ensureRouteIsBundled(try dev.getOrPutRouteBundle(.{ .html = html }), .bundled_html_page, req, resp);
|
|
}
|
|
|
|
fn getOrPutRouteBundle(dev: *DevServer, route: RouteBundle.UnresolvedIndex) !RouteBundle.Index {
|
|
const index_location: *RouteBundle.Index.Optional = switch (route) {
|
|
.framework => |route_index| &dev.router.routePtr(route_index).bundle,
|
|
.html => |html| &html.dev_server_id,
|
|
};
|
|
if (index_location.unwrap()) |bundle_index| {
|
|
return bundle_index;
|
|
}
|
|
|
|
dev.graph_safety_lock.lock();
|
|
defer dev.graph_safety_lock.unlock();
|
|
|
|
const bundle_index = RouteBundle.Index.init(@intCast(dev.route_bundles.items.len));
|
|
|
|
try dev.route_bundles.ensureUnusedCapacity(dev.allocator, 1);
|
|
dev.route_bundles.appendAssumeCapacity(.{
|
|
.data = switch (route) {
|
|
.framework => |route_index| .{ .framework = .{
|
|
.route_index = route_index,
|
|
.evaluate_failure = null,
|
|
.cached_module_list = .empty,
|
|
.cached_client_bundle_url = .empty,
|
|
.cached_css_file_array = .empty,
|
|
} },
|
|
.html => |html| brk: {
|
|
const incremental_graph_index = try dev.client_graph.insertStaleExtra(html.html_bundle.path, false, true);
|
|
dev.client_graph.source_maps.items[incremental_graph_index.get()].extra.empty.html_bundle_route_index = .init(bundle_index.get());
|
|
break :brk .{ .html = .{
|
|
.html_bundle = html,
|
|
.bundled_file = incremental_graph_index,
|
|
.script_injection_offset = .none,
|
|
.cached_response = null,
|
|
.bundled_html_text = null,
|
|
} };
|
|
},
|
|
},
|
|
.client_script_generation = std.crypto.random.int(u32),
|
|
.server_state = .unqueued,
|
|
.client_bundle = null,
|
|
.active_viewers = 0,
|
|
});
|
|
index_location.* = bundle_index.toOptional();
|
|
return bundle_index;
|
|
}
|
|
|
|
fn registerCatchAllHtmlRoute(dev: *DevServer, html: *HTMLBundle.HTMLBundleRoute) !void {
|
|
const bundle_index = try getOrPutRouteBundle(dev, .{ .html = html });
|
|
dev.html_router.fallback = bundle_index.toOptional();
|
|
}
|
|
|
|
const ErrorPageKind = enum {
|
|
/// Modules failed to bundle
|
|
bundler,
|
|
/// Modules failed to evaluate
|
|
evaluation,
|
|
/// Request handler threw
|
|
runtime,
|
|
};
|
|
|
|
fn sendSerializedFailures(
|
|
dev: *DevServer,
|
|
resp: AnyResponse,
|
|
failures: []const SerializedFailure,
|
|
kind: ErrorPageKind,
|
|
) !void {
|
|
var buf: std.ArrayList(u8) = try .initCapacity(dev.allocator, 2048);
|
|
errdefer buf.deinit();
|
|
|
|
try buf.appendSlice(switch (kind) {
|
|
inline else => |k| std.fmt.comptimePrint(
|
|
\\<!doctype html>
|
|
\\<html lang="en">
|
|
\\<head>
|
|
\\<meta charset="UTF-8" />
|
|
\\<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
|
\\<title>Bun - {[page_title]s}</title>
|
|
\\<style>:root{{color-scheme:light dark}}body{{background:light-dark(white,black)}}</style>
|
|
\\</head>
|
|
\\<body>
|
|
\\<noscript><h1 style="font:28px sans-serif;">{[page_title]s}</h1><p style="font:20px sans-serif;">Bun requires JavaScript enabled in the browser to render this error screen, as well as receive hot reloading events.</p></noscript>
|
|
\\<script>let error=Uint8Array.from(atob("
|
|
,
|
|
.{ .page_title = switch (k) {
|
|
.bundler => "Build Failed",
|
|
.evaluation, .runtime => "Runtime Error",
|
|
} },
|
|
),
|
|
});
|
|
|
|
for (failures) |fail| {
|
|
const len = bun.base64.encodeLen(fail.data);
|
|
|
|
try buf.ensureUnusedCapacity(len);
|
|
const start = buf.items.len;
|
|
buf.items.len += len;
|
|
const to_write_into = buf.items[start..];
|
|
|
|
var encoded = to_write_into[0..bun.base64.encode(to_write_into, fail.data)];
|
|
while (encoded.len > 0 and encoded[encoded.len - 1] == '=') {
|
|
encoded.len -= 1;
|
|
}
|
|
|
|
buf.items.len = start + encoded.len;
|
|
}
|
|
|
|
const pre = "\"),c=>c.charCodeAt(0));let config={bun:\"" ++ bun.Global.package_json_version_with_canary ++ "\"};";
|
|
const post = "</script></body></html>";
|
|
|
|
if (Environment.codegen_embed) {
|
|
try buf.appendSlice(pre ++ @embedFile("bake-codegen/bake.error.js") ++ post);
|
|
} else {
|
|
try buf.appendSlice(pre);
|
|
try buf.appendSlice(bun.runtimeEmbedFile(.codegen_eager, "bake.error.js"));
|
|
try buf.appendSlice(post);
|
|
}
|
|
|
|
StaticRoute.sendBlobThenDeinit(resp, &.fromArrayList(buf), .{
|
|
.mime_type = &.html,
|
|
.server = dev.server.?,
|
|
.status_code = 500,
|
|
});
|
|
}
|
|
|
|
fn sendBuiltInNotFound(resp: anytype) void {
|
|
const message = "404 Not Found";
|
|
resp.writeStatus("404 Not Found");
|
|
resp.end(message, true);
|
|
}
|
|
|
|
fn printMemoryLine(dev: *DevServer) void {
|
|
if (!debug.isVisible()) return;
|
|
Output.prettyErrorln("<d>DevServer tracked {}, measured: {} ({}), process: {}<r>", .{
|
|
bun.fmt.size(dev.memoryCost(), .{}),
|
|
dev.allocation_scope.state.allocations.count(),
|
|
bun.fmt.size(dev.allocation_scope.state.total_memory_allocated, .{}),
|
|
bun.fmt.size(bun.sys.selfProcessMemoryUsage() orelse 0, .{}),
|
|
});
|
|
}
|
|
|
|
const FileKind = enum(u2) {
|
|
/// Files that failed to bundle or do not exist on disk will appear in the
|
|
/// graph as "unknown".
|
|
unknown,
|
|
/// `code` is JavaScript code. This field is also used for HTML files, where
|
|
/// the associated JS just calls `require` to emulate the script tags.
|
|
js,
|
|
/// `code` is JavaScript code of a module exporting a single file path.
|
|
/// This is used when the loader is not `css` nor `isJavaScriptLike()`
|
|
asset,
|
|
/// `code` is the URL where the CSS file is to be fetched from, ex.
|
|
/// '/_bun/css/0000000000000000.css'
|
|
css,
|
|
};
|
|
|
|
/// The paradigm of Bake's incremental state is to store a separate list of files
|
|
/// than the Graph in bundle_v2. When watch events happen, the bundler is run on
|
|
/// the changed files, excluding non-stale files via `isFileStale`.
|
|
///
|
|
/// Upon bundle completion, both `client_graph` and `server_graph` have their
|
|
/// `receiveChunk` methods called with all new chunks, counting the total length
|
|
/// needed. A call to `takeJSBundle` joins all of the chunks, resulting in the
|
|
/// code to send to client or evaluate on the server.
|
|
///
|
|
/// Then, `processChunkDependencies` is called on each chunk to update the
|
|
/// list of imports. When a change in imports is detected, the dependencies
|
|
/// are updated accordingly.
|
|
///
|
|
/// Since all routes share the two graphs, bundling a new route that shared
|
|
/// a module from a previously bundled route will perform the same exclusion
|
|
/// behavior that rebuilds use. This also ensures that two routes on the server
|
|
/// do not emit duplicate dependencies. By tracing `imports` on each file in
|
|
/// the module graph recursively, the full bundle for any given route can
|
|
/// be re-materialized (required when pressing Cmd+R after any client update)
|
|
///
|
|
/// Since source mappings are all relative to their previous mapping, each
|
|
/// chunk's mappings can be stored in the graph, and very trivially built into
|
|
/// JSON source map files (`takeSourceMap`), even after hot updates. The
|
|
/// lifetime for these sourcemaps is a bit tricky and depend on the lifetime of
|
|
/// of WebSocket connections; see comments in `Assets` for more details.
|
|
pub fn IncrementalGraph(side: bake.Side) type {
|
|
return struct {
|
|
// Unless otherwise mentioned, all data structures use DevServer's allocator.
|
|
// All arrays are indexed by FileIndex, except for the two edge-related arrays.
|
|
|
|
/// Keys are absolute paths for the "file" namespace, or the
|
|
/// pretty-formatted path value that appear in imports. Absolute paths
|
|
/// are stored so the watcher can quickly query and invalidate them.
|
|
/// Key slices are owned by `dev.allocator`
|
|
bundled_files: bun.StringArrayHashMapUnmanaged(File),
|
|
/// Source maps are stored out-of-line to make `File` objects smaller,
|
|
/// as file information is accessed much more frequently than source maps.
|
|
source_maps: if (side == .client) ArrayListUnmanaged(PackedMap) else void,
|
|
/// Track bools for files which are "stale", meaning they should be
|
|
/// re-bundled before being used. Resizing this is usually deferred
|
|
/// until after a bundle, since resizing the bit-set requires an
|
|
/// exact size, instead of the log approach that dynamic arrays use.
|
|
stale_files: DynamicBitSetUnmanaged,
|
|
|
|
// TODO: rename `dependencies` to something that clearly indicates direction.
|
|
// such as "parent" or "consumer"
|
|
|
|
/// Start of a file's 'dependencies' linked list. These are the other
|
|
/// files that have imports to this file. Walk this list to discover
|
|
/// what files are to be reloaded when something changes.
|
|
first_dep: ArrayListUnmanaged(EdgeIndex.Optional),
|
|
/// Start of a file's 'imports' linked lists. These are the files that
|
|
/// this file imports.
|
|
first_import: ArrayListUnmanaged(EdgeIndex.Optional),
|
|
/// `File` objects act as nodes in a directional many-to-many graph,
|
|
/// where edges represent the imports between modules. An 'dependency'
|
|
/// is a file that must to be notified when it `imported` changes. This
|
|
/// is implemented using an array of `Edge` objects that act as linked
|
|
/// list nodes; each file stores the first imports and dependency.
|
|
edges: ArrayListUnmanaged(Edge),
|
|
/// HMR Dependencies are added and removed very frequently, but indexes
|
|
/// must remain stable. This free list allows re-use of freed indexes,
|
|
/// so garbage collection can run less often.
|
|
edges_free_list: ArrayListUnmanaged(EdgeIndex),
|
|
|
|
/// Byte length of every file queued for concatenation
|
|
current_chunk_len: usize = 0,
|
|
/// All part contents
|
|
current_chunk_parts: ArrayListUnmanaged(switch (side) {
|
|
.client => FileIndex,
|
|
// These slices do not outlive the bundler, and must
|
|
// be joined before its arena is deinitialized.
|
|
.server => []const u8,
|
|
}),
|
|
|
|
/// Asset IDs, which can be printed as hex in '/_bun/asset/{hash}.css'
|
|
current_css_files: switch (side) {
|
|
.client => ArrayListUnmanaged(u64),
|
|
.server => void,
|
|
},
|
|
|
|
const empty: @This() = .{
|
|
.bundled_files = .empty,
|
|
.source_maps = if (side == .client) .empty,
|
|
.stale_files = .empty,
|
|
.first_dep = .empty,
|
|
.first_import = .empty,
|
|
|
|
.edges = .empty,
|
|
.edges_free_list = .empty,
|
|
|
|
.current_chunk_len = 0,
|
|
.current_chunk_parts = .empty,
|
|
|
|
.current_css_files = if (side == .client) .empty,
|
|
};
|
|
|
|
pub const File = switch (side) {
|
|
// The server's incremental graph does not store previously bundled
|
|
// code because there is only one instance of the server. Instead,
|
|
// it stores which module graphs it is a part of. This makes sure
|
|
// that recompilation knows what bundler options to use.
|
|
.server => packed struct(u8) {
|
|
/// Is this file built for the Server graph.
|
|
is_rsc: bool,
|
|
/// Is this file built for the SSR graph.
|
|
is_ssr: bool,
|
|
/// If set, the client graph contains a matching file.
|
|
/// The server
|
|
is_client_component_boundary: bool,
|
|
/// If this file is a route root, the route can be looked up in
|
|
/// the route list. This also stops dependency propagation.
|
|
is_route: bool,
|
|
/// If the file has an error, the failure can be looked up
|
|
/// in the `.failures` map.
|
|
failed: bool,
|
|
/// CSS and Asset files get special handling
|
|
kind: FileKind,
|
|
|
|
unused: enum(u1) { unused } = .unused,
|
|
|
|
fn stopsDependencyTrace(file: @This()) bool {
|
|
return file.is_client_component_boundary;
|
|
}
|
|
|
|
fn fileKind(file: @This()) FileKind {
|
|
return file.kind;
|
|
}
|
|
},
|
|
.client => struct {
|
|
/// Content depends on `flags.kind`
|
|
/// See function wrappers to safely read into this data
|
|
content: extern union {
|
|
/// Allocated by `dev.allocator`. Access with `.jsCode()`
|
|
/// When stale, the code is "", otherwise it contains at
|
|
/// least one non-whitespace character, as empty chunks
|
|
/// contain at least a function wrapper.
|
|
js_code_ptr: [*]const u8,
|
|
/// Access with `.cssAssetId()`
|
|
css_asset_id: u64,
|
|
|
|
unknown: enum(u32) { unknown = 0 },
|
|
},
|
|
/// Separated from the pointer to reduce struct size.
|
|
/// Parser does not support files >4gb anyways.
|
|
code_len: u32,
|
|
flags: Flags,
|
|
|
|
const Flags = packed struct(u32) {
|
|
/// Kind determines the data representation in `content`, as
|
|
/// well as how this file behaves when tracing.
|
|
kind: FileKind,
|
|
/// If the file has an error, the failure can be looked up
|
|
/// in the `.failures` map.
|
|
failed: bool,
|
|
/// For JS files, this is a component root; the server contains a matching file.
|
|
is_hmr_root: bool,
|
|
/// This is a file is an entry point to the framework.
|
|
/// Changing this will always cause a full page reload.
|
|
is_special_framework_file: bool,
|
|
/// If this file has a HTML RouteBundle. The bundle index is tucked away in:
|
|
/// `graph.source_maps.items[i].extra.empty.html_bundle_route_index`
|
|
is_html_route: bool,
|
|
/// A CSS root is the first file in a CSS bundle, aka the
|
|
/// one that the JS or HTML file points into.
|
|
///
|
|
/// There are many complicated rules when CSS files
|
|
/// reference each other, none of which are modelled in
|
|
/// IncrementalGraph. Instead, any change to downstream
|
|
/// files will find the CSS root, and queue it for a
|
|
/// re-bundle. Additionally, CSS roots only have one level
|
|
/// of imports, as the code in `finalizeBundle` will add all
|
|
/// referenced files as edges directly to the root, creating
|
|
/// a flat list instead of a tree. Those downstream files
|
|
/// remaining empty; only present so that invalidation can
|
|
/// trace them to this root.
|
|
is_css_root: bool,
|
|
|
|
unused: enum(u25) { unused } = .unused,
|
|
};
|
|
|
|
comptime {
|
|
assert(@sizeOf(@This()) == @sizeOf(u64) * 2);
|
|
assert(@alignOf(@This()) == @alignOf([*]u8));
|
|
}
|
|
|
|
fn initJavaScript(code_slice: []const u8, flags: Flags) @This() {
|
|
assert(flags.kind == .js or flags.kind == .asset);
|
|
return .{
|
|
.content = .{ .js_code_ptr = code_slice.ptr },
|
|
.code_len = @intCast(code_slice.len),
|
|
.flags = flags,
|
|
};
|
|
}
|
|
|
|
fn initCSS(asset_id: u64, flags: Flags) @This() {
|
|
assert(flags.kind == .css);
|
|
return .{
|
|
.content = .{ .css_asset_id = asset_id },
|
|
.code_len = 0, // unused
|
|
.flags = flags,
|
|
};
|
|
}
|
|
|
|
fn initUnknown(flags: Flags) @This() {
|
|
return .{
|
|
.content = .{ .unknown = .unknown }, // unused
|
|
.code_len = 0, // unused
|
|
.flags = flags,
|
|
};
|
|
}
|
|
|
|
fn jsCode(file: @This()) []const u8 {
|
|
assert(file.flags.kind == .js or file.flags.kind == .asset);
|
|
return file.content.js_code_ptr[0..file.code_len];
|
|
}
|
|
|
|
fn cssAssetId(file: @This()) u64 {
|
|
assert(file.flags.kind == .css);
|
|
return file.content.css_asset_id;
|
|
}
|
|
|
|
inline fn stopsDependencyTrace(_: @This()) bool {
|
|
return false;
|
|
}
|
|
|
|
fn fileKind(file: @This()) FileKind {
|
|
return file.flags.kind;
|
|
}
|
|
},
|
|
};
|
|
|
|
fn freeFileContent(g: *IncrementalGraph(.client), index: FileIndex, key: []const u8, file: File, css: enum { unref_css, ignore_css }) void {
|
|
switch (file.flags.kind) {
|
|
.js, .asset => {
|
|
g.owner().allocator.free(file.jsCode());
|
|
const map = &g.source_maps.items[index.get()];
|
|
g.owner().allocator.free(map.vlq());
|
|
if (map.quoted_contents_flags.is_owned) {
|
|
map.quotedContentsCowString().deinit(g.owner().allocator);
|
|
}
|
|
},
|
|
.css => if (css == .unref_css) {
|
|
g.owner().assets.unrefByPath(key);
|
|
},
|
|
.unknown => {},
|
|
}
|
|
}
|
|
|
|
/// Packed source mapping data
|
|
pub const PackedMap = struct {
|
|
/// Allocated by `dev.allocator`. Access with `.vlq()`
|
|
/// This is stored to allow lazy construction of source map files.
|
|
vlq_ptr: [*]u8,
|
|
vlq_len: u32,
|
|
/// The bundler runs quoting on multiple threads, so it only makes
|
|
/// sense to preserve that effort for concatenation and
|
|
/// re-concatenation.
|
|
quoted_contents_ptr: [*]const u8,
|
|
quoted_contents_flags: packed struct(u32) {
|
|
len: u31,
|
|
is_owned: bool,
|
|
},
|
|
/// This field is overloaded depending on if the VLQ data is
|
|
/// present or not (.len != 0).
|
|
extra: extern union {
|
|
/// Used to track the last state of the source map chunk. This
|
|
/// is used when concatenating chunks. The generated column is
|
|
/// not tracked because it is always zero (all chunks end in a
|
|
/// newline because minification is off), and the generated line
|
|
/// is recomputed on demand and is different per concatenation.
|
|
end_state: extern struct {
|
|
original_line: i32,
|
|
original_column: i32,
|
|
},
|
|
empty: extern struct {
|
|
/// Number of lines to skip. Computed on demand.
|
|
line_count: bun.GenericIndex(u32, u8).Optional,
|
|
// Cannot use RouteBundle.Index because `extern union` above :(
|
|
html_bundle_route_index: bun.GenericIndex(u32, u8).Optional,
|
|
},
|
|
},
|
|
|
|
pub const empty: PackedMap = .{
|
|
.vlq_ptr = &[0]u8{},
|
|
.vlq_len = 0,
|
|
.quoted_contents_ptr = &[0]u8{},
|
|
.quoted_contents_flags = .{
|
|
.len = 0,
|
|
.is_owned = false,
|
|
},
|
|
.extra = .{ .empty = .{
|
|
.line_count = .none,
|
|
.html_bundle_route_index = .none,
|
|
} },
|
|
};
|
|
|
|
pub fn vlq(self: @This()) []u8 {
|
|
return self.vlq_ptr[0..self.vlq_len];
|
|
}
|
|
|
|
pub fn quotedContentsCowString(self: @This()) bun.ptr.CowString {
|
|
return bun.ptr.CowString.initUnchecked(self.quoted_contents_ptr[0..self.quoted_contents_flags.len], self.quoted_contents_flags.is_owned);
|
|
}
|
|
|
|
pub fn quotedContents(self: @This()) []const u8 {
|
|
return self.quoted_contents_ptr[0..self.quoted_contents_flags.len];
|
|
}
|
|
|
|
pub fn fromNonEmptySourceMap(source_map: SourceMap.Chunk, quoted_contents: bun.ptr.CowString) !PackedMap {
|
|
assert(source_map.buffer.list.items.len > 0);
|
|
return .{
|
|
.vlq_ptr = source_map.buffer.list.items.ptr,
|
|
.vlq_len = @intCast(source_map.buffer.list.items.len),
|
|
.quoted_contents_ptr = quoted_contents.ptr,
|
|
.quoted_contents_flags = .{
|
|
.len = @intCast(quoted_contents.flags.len),
|
|
.is_owned = quoted_contents.flags.is_owned,
|
|
},
|
|
.extra = .{ .end_state = .{
|
|
.original_line = source_map.end_state.original_line,
|
|
.original_column = source_map.end_state.original_column,
|
|
} },
|
|
};
|
|
}
|
|
|
|
comptime {
|
|
assert(@sizeOf(@This()) == @sizeOf(usize) * 4);
|
|
assert(@alignOf(@This()) == @alignOf(usize));
|
|
}
|
|
};
|
|
|
|
// If this data structure is not clear, see `DirectoryWatchStore.Dep`
|
|
// for a simpler example. It is more complicated here because this
|
|
// structure is two-way.
|
|
pub const Edge = struct {
|
|
/// The file with the import statement
|
|
dependency: FileIndex,
|
|
/// The file the import statement references.
|
|
imported: FileIndex,
|
|
|
|
next_import: EdgeIndex.Optional,
|
|
next_dependency: EdgeIndex.Optional,
|
|
prev_dependency: EdgeIndex.Optional,
|
|
};
|
|
|
|
/// An index into `bundled_files`, `stale_files`, `first_dep`, `first_import`
|
|
/// Top bits cannot be relied on due to `SerializedFailure.Owner.Packed`
|
|
pub const FileIndex = bun.GenericIndex(u30, File);
|
|
pub const react_refresh_index = if (side == .client) FileIndex.init(0);
|
|
|
|
/// An index into `edges`
|
|
const EdgeIndex = bun.GenericIndex(u32, Edge);
|
|
|
|
pub fn deinit(g: *@This(), allocator: Allocator) void {
|
|
_ = VoidFieldTypes(@This()){
|
|
.bundled_files = {
|
|
for (g.bundled_files.keys(), g.bundled_files.values(), 0..) |k, v, i| {
|
|
allocator.free(k);
|
|
if (side == .client)
|
|
g.freeFileContent(.init(@intCast(i)), k, v, .ignore_css);
|
|
}
|
|
g.bundled_files.deinit(allocator);
|
|
},
|
|
.source_maps = if (side == .client)
|
|
g.source_maps.deinit(allocator),
|
|
.stale_files = g.stale_files.deinit(allocator),
|
|
.first_dep = g.first_dep.deinit(allocator),
|
|
.first_import = g.first_import.deinit(allocator),
|
|
.edges = g.edges.deinit(allocator),
|
|
.edges_free_list = g.edges_free_list.deinit(allocator),
|
|
.current_chunk_len = {},
|
|
.current_chunk_parts = g.current_chunk_parts.deinit(allocator),
|
|
.current_css_files = if (side == .client) g.current_css_files.deinit(allocator),
|
|
};
|
|
}
|
|
|
|
/// Does NOT count @sizeOf(@This())
|
|
pub fn memoryCost(g: *@This()) usize {
|
|
var cost: usize = 0;
|
|
cost += memoryCostArrayHashMap(g.bundled_files);
|
|
cost += g.stale_files.bytes().len;
|
|
cost += memoryCostArrayList(g.first_dep);
|
|
cost += memoryCostArrayList(g.first_import);
|
|
cost += memoryCostArrayList(g.edges);
|
|
cost += memoryCostArrayList(g.edges_free_list);
|
|
cost += memoryCostArrayList(g.current_chunk_parts);
|
|
|
|
if (side == .client) {
|
|
cost += memoryCostArrayList(g.source_maps);
|
|
cost += memoryCostArrayList(g.current_css_files);
|
|
}
|
|
|
|
return cost;
|
|
}
|
|
|
|
fn getFileIndex(g: *@This(), path: []const u8) ?FileIndex {
|
|
return if (g.bundled_files.getIndex(path)) |i| FileIndex.init(@intCast(i)) else null;
|
|
}
|
|
|
|
/// Prefer calling .values() and indexing manually if accessing more than one
|
|
pub fn getFileByIndex(g: *@This(), index: FileIndex) File {
|
|
return g.bundled_files.values()[index.get()];
|
|
}
|
|
|
|
pub fn htmlRouteBundleIndex(g: *@This(), index: FileIndex) RouteBundle.Index {
|
|
if (Environment.allow_assert) {
|
|
assert(g.bundled_files.values()[index.get()].flags.is_html_route);
|
|
}
|
|
return .init(@intCast((g.source_maps.items[index.get()].extra.empty.html_bundle_route_index.unwrap() orelse
|
|
@panic("Internal assertion failure: HTML bundle not registered correctly")).get()));
|
|
}
|
|
|
|
/// Tracks a bundled code chunk for cross-bundle chunks,
|
|
/// ensuring it has an entry in `bundled_files`.
|
|
///
|
|
/// For client, takes ownership of the code slice (must be default allocated)
|
|
///
|
|
/// For server, the code is temporarily kept in the
|
|
/// `current_chunk_parts` array, where it must live until
|
|
/// takeJSBundle is called. Then it can be freed.
|
|
pub fn receiveChunk(
|
|
g: *@This(),
|
|
ctx: *HotUpdateContext,
|
|
index: bun.JSAst.Index,
|
|
content: union(enum) {
|
|
js: struct {
|
|
code: []const u8,
|
|
source_map: SourceMap.Chunk,
|
|
quoted_contents: bun.ptr.CowString,
|
|
},
|
|
css: u64,
|
|
},
|
|
is_ssr_graph: bool,
|
|
) !void {
|
|
const dev = g.owner();
|
|
dev.graph_safety_lock.assertLocked();
|
|
|
|
const path = ctx.sources[index.get()].path;
|
|
const key = path.keyForIncrementalGraph();
|
|
|
|
if (Environment.allow_assert) {
|
|
switch (content) {
|
|
.css => {},
|
|
.js => |js| if (bun.strings.isAllWhitespace(js.code)) {
|
|
// Should at least contain the function wrapper
|
|
bun.Output.panic("Empty chunk is impossible: {s} {s}", .{
|
|
key,
|
|
switch (side) {
|
|
.client => "client",
|
|
.server => if (is_ssr_graph) "ssr" else "server",
|
|
},
|
|
});
|
|
},
|
|
}
|
|
}
|
|
|
|
// Dump to filesystem if enabled
|
|
if (bun.FeatureFlags.bake_debugging_features and content == .js) if (dev.dump_dir) |dump_dir| {
|
|
dumpBundleForChunk(dev, dump_dir, side, key, content.js.code, true, is_ssr_graph);
|
|
};
|
|
|
|
const gop = try g.bundled_files.getOrPut(dev.allocator, key);
|
|
const file_index = FileIndex.init(@intCast(gop.index));
|
|
|
|
if (!gop.found_existing) {
|
|
gop.key_ptr.* = try dev.allocator.dupe(u8, key);
|
|
try g.first_dep.append(dev.allocator, .none);
|
|
try g.first_import.append(dev.allocator, .none);
|
|
if (side == .client) {
|
|
try g.source_maps.append(dev.allocator, .empty);
|
|
}
|
|
}
|
|
|
|
if (g.stale_files.bit_length > gop.index) {
|
|
g.stale_files.unset(gop.index);
|
|
}
|
|
|
|
ctx.getCachedIndex(side, index).* = .init(FileIndex.init(@intCast(gop.index)));
|
|
|
|
switch (side) {
|
|
.client => {
|
|
var flags: File.Flags = .{
|
|
.failed = false,
|
|
.is_hmr_root = ctx.server_to_client_bitset.isSet(index.get()),
|
|
.is_special_framework_file = false,
|
|
.is_html_route = false,
|
|
.is_css_root = content == .css, // non-root CSS files never get registered in this function
|
|
.kind = switch (content) {
|
|
.js => if (ctx.loaders[index.get()].isJavaScriptLike()) .js else .asset,
|
|
.css => .css,
|
|
},
|
|
};
|
|
if (gop.found_existing) {
|
|
// Free the original content
|
|
g.freeFileContent(file_index, key, gop.value_ptr.*, .ignore_css);
|
|
|
|
// Free a failure if it exists
|
|
if (gop.value_ptr.flags.failed) {
|
|
const kv = dev.bundling_failures.fetchSwapRemoveAdapted(
|
|
SerializedFailure.Owner{ .client = file_index },
|
|
SerializedFailure.ArrayHashAdapter{},
|
|
) orelse
|
|
Output.panic("Missing SerializedFailure in IncrementalGraph", .{});
|
|
try dev.incremental_result.failures_removed.append(
|
|
dev.allocator,
|
|
kv.key,
|
|
);
|
|
}
|
|
|
|
// Persist some flags
|
|
flags.is_special_framework_file = gop.value_ptr.flags.is_special_framework_file;
|
|
flags.is_html_route = gop.value_ptr.flags.is_html_route;
|
|
}
|
|
switch (content) {
|
|
.css => |css| gop.value_ptr.* = .initCSS(css, flags),
|
|
.js => |js| {
|
|
dev.allocation_scope.assertOwned(js.code);
|
|
gop.value_ptr.* = .initJavaScript(js.code, flags);
|
|
|
|
// Append source map
|
|
if (js.source_map.buffer.len() > 0) {
|
|
dev.allocation_scope.assertOwned(js.source_map.buffer.list.items);
|
|
if (js.quoted_contents.flags.is_owned)
|
|
dev.allocation_scope.assertOwned(js.quoted_contents.slice());
|
|
g.source_maps.items[file_index.get()] = try .fromNonEmptySourceMap(js.source_map, js.quoted_contents);
|
|
} else {
|
|
js.quoted_contents.deinit(dev.allocator);
|
|
}
|
|
// Track JavaScript chunks for concatenation
|
|
try g.current_chunk_parts.append(dev.allocator, file_index);
|
|
g.current_chunk_len += js.code.len;
|
|
},
|
|
}
|
|
},
|
|
.server => {
|
|
if (!gop.found_existing) {
|
|
const client_component_boundary = ctx.server_to_client_bitset.isSet(index.get());
|
|
|
|
gop.value_ptr.* = .{
|
|
.is_rsc = !is_ssr_graph,
|
|
.is_ssr = is_ssr_graph,
|
|
.is_route = false,
|
|
.is_client_component_boundary = client_component_boundary,
|
|
.failed = false,
|
|
.kind = switch (content) {
|
|
.js => .js,
|
|
.css => .css,
|
|
},
|
|
};
|
|
|
|
if (client_component_boundary) {
|
|
try dev.incremental_result.client_components_added.append(dev.allocator, file_index);
|
|
}
|
|
} else {
|
|
gop.value_ptr.kind = switch (content) {
|
|
.js => .js,
|
|
.css => .css,
|
|
};
|
|
|
|
if (is_ssr_graph) {
|
|
gop.value_ptr.is_ssr = true;
|
|
} else {
|
|
gop.value_ptr.is_rsc = true;
|
|
}
|
|
|
|
if (ctx.server_to_client_bitset.isSet(index.get())) {
|
|
gop.value_ptr.is_client_component_boundary = true;
|
|
try dev.incremental_result.client_components_added.append(dev.allocator, file_index);
|
|
} else if (gop.value_ptr.is_client_component_boundary) {
|
|
const client_graph = &g.owner().client_graph;
|
|
const client_index = client_graph.getFileIndex(gop.key_ptr.*) orelse
|
|
Output.panic("Client graph's SCB was already deleted", .{});
|
|
client_graph.disconnectAndDeleteFile(client_index);
|
|
gop.value_ptr.is_client_component_boundary = false;
|
|
|
|
try dev.incremental_result.client_components_removed.append(dev.allocator, file_index);
|
|
}
|
|
|
|
if (gop.value_ptr.failed) {
|
|
gop.value_ptr.failed = false;
|
|
const kv = dev.bundling_failures.fetchSwapRemoveAdapted(
|
|
SerializedFailure.Owner{ .server = file_index },
|
|
SerializedFailure.ArrayHashAdapter{},
|
|
) orelse
|
|
Output.panic("Missing failure in IncrementalGraph", .{});
|
|
try dev.incremental_result.failures_removed.append(
|
|
dev.allocator,
|
|
kv.key,
|
|
);
|
|
}
|
|
}
|
|
if (content == .js) {
|
|
try g.current_chunk_parts.append(dev.allocator, content.js.code);
|
|
g.current_chunk_len += content.js.code.len;
|
|
content.js.quoted_contents.deinit(dev.allocator);
|
|
if (content.js.source_map.buffer.len() > 0) {
|
|
var vlq = content.js.source_map.buffer;
|
|
vlq.deinit();
|
|
}
|
|
}
|
|
},
|
|
}
|
|
}
|
|
|
|
const TempLookup = extern struct {
|
|
edge_index: EdgeIndex,
|
|
seen: bool,
|
|
|
|
const HashTable = AutoArrayHashMapUnmanaged(FileIndex, TempLookup);
|
|
};
|
|
|
|
/// Second pass of IncrementalGraph indexing
|
|
/// - Updates dependency information for each file
|
|
/// - Resolves what the HMR roots are
|
|
pub fn processChunkDependencies(
|
|
g: *@This(),
|
|
ctx: *HotUpdateContext,
|
|
comptime mode: enum { normal, css },
|
|
bundle_graph_index: bun.JSAst.Index,
|
|
temp_alloc: Allocator,
|
|
) bun.OOM!void {
|
|
const log = bun.Output.scoped(.processChunkDependencies, false);
|
|
const file_index: FileIndex = ctx.getCachedIndex(side, bundle_graph_index).*.unwrap() orelse
|
|
@panic("unresolved index"); // do not process for failed chunks
|
|
log("index id={d} {}:", .{
|
|
file_index.get(),
|
|
bun.fmt.quote(g.bundled_files.keys()[file_index.get()]),
|
|
});
|
|
|
|
// Build a map from the existing import list. Later, entries that
|
|
// were not marked as `.seen = true` will be freed.
|
|
var quick_lookup: TempLookup.HashTable = .{};
|
|
defer quick_lookup.deinit(temp_alloc);
|
|
{
|
|
var it: ?EdgeIndex = g.first_import.items[file_index.get()].unwrap();
|
|
while (it) |edge_index| {
|
|
const dep = g.edges.items[edge_index.get()];
|
|
it = dep.next_import.unwrap();
|
|
assert(dep.dependency == file_index);
|
|
try quick_lookup.putNoClobber(temp_alloc, dep.imported, .{
|
|
.seen = false,
|
|
.edge_index = edge_index,
|
|
});
|
|
}
|
|
}
|
|
|
|
// `processChunkImportRecords` appends items into `quick_lookup`,
|
|
// but those entries always have .seen = true. Snapshot the length
|
|
// of original entries so that the new ones can be ignored when
|
|
// removing edges.
|
|
const quick_lookup_values_to_care_len = quick_lookup.count();
|
|
|
|
// A new import linked list is constructed. A side effect of this
|
|
// approach is that the order of the imports is reversed on every
|
|
// save. However, the ordering here doesn't matter.
|
|
var new_imports: EdgeIndex.Optional = .none;
|
|
defer g.first_import.items[file_index.get()] = new_imports;
|
|
|
|
// (CSS chunks are not present on the server side)
|
|
if (mode == .normal and side == .server) {
|
|
if (ctx.server_seen_bit_set.isSet(file_index.get())) return;
|
|
|
|
const file = &g.bundled_files.values()[file_index.get()];
|
|
|
|
// Process both files in the server-components graph at the same
|
|
// time. If they were done separately, the second would detach
|
|
// the edges the first added.
|
|
if (file.is_rsc and file.is_ssr) {
|
|
// The non-ssr file is always first.
|
|
// TODO:
|
|
// const ssr_index = ctx.scbs.getSSRIndex(bundle_graph_index.get()) orelse {
|
|
// @panic("Unexpected missing server-component-boundary entry");
|
|
// };
|
|
// try g.processChunkImportRecords(ctx, &quick_lookup, &new_imports, file_index, bun.JSAst.Index.init(ssr_index));
|
|
}
|
|
}
|
|
|
|
switch (mode) {
|
|
.normal => try g.processChunkImportRecords(ctx, temp_alloc, &quick_lookup, &new_imports, file_index, bundle_graph_index),
|
|
.css => try g.processCSSChunkImportRecords(ctx, temp_alloc, &quick_lookup, &new_imports, file_index, bundle_graph_index),
|
|
}
|
|
|
|
// '.seen = false' means an import was removed and should be freed
|
|
for (quick_lookup.values()[0..quick_lookup_values_to_care_len]) |val| {
|
|
if (!val.seen) {
|
|
g.owner().incremental_result.had_adjusted_edges = true;
|
|
|
|
// Unlink from dependency list. At this point the edge is
|
|
// already detached from the import list.
|
|
g.disconnectEdgeFromDependencyList(val.edge_index);
|
|
|
|
// With no references to this edge, it can be freed
|
|
g.freeEdge(val.edge_index);
|
|
}
|
|
}
|
|
|
|
if (side == .server) {
|
|
// Follow this file to the route to mark it as stale.
|
|
try g.traceDependencies(file_index, ctx.gts, .stop_at_boundary, file_index);
|
|
} else {
|
|
// Follow this file to the HTML route or HMR root to mark the client bundle as stale.
|
|
try g.traceDependencies(file_index, ctx.gts, .stop_at_boundary, file_index);
|
|
}
|
|
}
|
|
|
|
fn disconnectEdgeFromDependencyList(g: *@This(), edge_index: EdgeIndex) void {
|
|
const edge = &g.edges.items[edge_index.get()];
|
|
igLog("detach edge={d} | id={d} {} -> id={d} {}", .{
|
|
edge_index.get(),
|
|
edge.dependency.get(),
|
|
bun.fmt.quote(g.bundled_files.keys()[edge.dependency.get()]),
|
|
edge.imported.get(),
|
|
bun.fmt.quote(g.bundled_files.keys()[edge.imported.get()]),
|
|
});
|
|
if (edge.prev_dependency.unwrap()) |prev| {
|
|
const prev_dependency = &g.edges.items[prev.get()];
|
|
prev_dependency.next_dependency = edge.next_dependency;
|
|
} else {
|
|
assert_eql(g.first_dep.items[edge.imported.get()].unwrap(), edge_index);
|
|
g.first_dep.items[edge.imported.get()] = .none;
|
|
}
|
|
if (edge.next_dependency.unwrap()) |next| {
|
|
const next_dependency = &g.edges.items[next.get()];
|
|
next_dependency.prev_dependency = edge.prev_dependency;
|
|
}
|
|
}
|
|
|
|
fn processCSSChunkImportRecords(
|
|
g: *@This(),
|
|
ctx: *HotUpdateContext,
|
|
temp_alloc: Allocator,
|
|
quick_lookup: *TempLookup.HashTable,
|
|
new_imports: *EdgeIndex.Optional,
|
|
file_index: FileIndex,
|
|
bundler_index: bun.JSAst.Index,
|
|
) !void {
|
|
bun.assert(bundler_index.isValid());
|
|
bun.assert(ctx.loaders[bundler_index.get()].isCSS());
|
|
|
|
var sfb = std.heap.stackFallback(@sizeOf(bun.JSAst.Index) * 64, temp_alloc);
|
|
const queue_alloc = sfb.get();
|
|
|
|
// This queue avoids stack overflow.
|
|
// Infinite loop is prevented by the tracing bits in `processEdgeAttachment`.
|
|
var queue: ArrayListUnmanaged(bun.JSAst.Index) = .empty;
|
|
defer queue.deinit(queue_alloc);
|
|
|
|
for (ctx.import_records[bundler_index.get()].slice()) |import_record| {
|
|
const result = try processEdgeAttachment(g, ctx, temp_alloc, quick_lookup, new_imports, file_index, import_record, .css);
|
|
if (result == .@"continue" and import_record.source_index.isValid()) {
|
|
try queue.append(queue_alloc, import_record.source_index);
|
|
}
|
|
}
|
|
|
|
while (queue.pop()) |index| {
|
|
for (ctx.import_records[index.get()].slice()) |import_record| {
|
|
const result = try processEdgeAttachment(g, ctx, temp_alloc, quick_lookup, new_imports, file_index, import_record, .css);
|
|
if (result == .@"continue" and import_record.source_index.isValid()) {
|
|
try queue.append(queue_alloc, import_record.source_index);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
fn processEdgeAttachment(
|
|
g: *@This(),
|
|
ctx: *HotUpdateContext,
|
|
temp_alloc: Allocator,
|
|
quick_lookup: *TempLookup.HashTable,
|
|
new_imports: *EdgeIndex.Optional,
|
|
file_index: FileIndex,
|
|
import_record: bun.ImportRecord,
|
|
comptime mode: enum {
|
|
js_or_html,
|
|
/// When set, the graph tracing state bits are used to prevent
|
|
/// infinite recursion. This is only done for CSS, since it:
|
|
/// - Recursively processes its imports
|
|
/// - Does not use its tracing bits for anything else
|
|
css,
|
|
},
|
|
) bun.OOM!enum { @"continue", stop } {
|
|
const log = bun.Output.scoped(.processEdgeAttachment, false);
|
|
|
|
// When an import record is duplicated, it gets marked unused.
|
|
// This happens in `ConvertESMExportsForHmr.deduplicatedImport`
|
|
// There is still a case where deduplication must happen.
|
|
if (import_record.is_unused) return .stop;
|
|
if (import_record.source_index.isRuntime()) return .stop;
|
|
|
|
const key = import_record.path.keyForIncrementalGraph();
|
|
log("processEdgeAttachment({s}, {})", .{ key, import_record.source_index });
|
|
|
|
// Attempt to locate the FileIndex from bundle_v2's Source.Index
|
|
const imported_file_index: FileIndex, const kind = brk: {
|
|
if (import_record.source_index.isValid()) {
|
|
const kind: FileKind = if (mode == .css)
|
|
switch (ctx.loaders[import_record.source_index.get()]) {
|
|
.css => .css,
|
|
else => .asset,
|
|
};
|
|
if (ctx.getCachedIndex(side, import_record.source_index).*.unwrap()) |i| {
|
|
break :brk .{ i, kind };
|
|
} else if (mode == .css) {
|
|
const index = (try g.insertEmpty(key, kind)).index;
|
|
// TODO: make this more clear that:
|
|
// temp_alloc == bv2.graph.allocator
|
|
try ctx.gts.resize(side, temp_alloc, index.get() + 1);
|
|
break :brk .{ index, kind };
|
|
}
|
|
}
|
|
|
|
break :brk switch (mode) {
|
|
// All invalid source indices are external URLs that cannot be watched.
|
|
.css => return .stop,
|
|
// Check IncrementalGraph to find an file from a prior build.
|
|
.js_or_html => .{
|
|
.init(@intCast(
|
|
g.bundled_files.getIndex(key) orelse
|
|
// Not tracked in IncrementalGraph. This can be hit for
|
|
// certain external files.
|
|
return .@"continue",
|
|
)),
|
|
{},
|
|
},
|
|
};
|
|
};
|
|
|
|
if (Environment.isDebug) {
|
|
bun.assert(imported_file_index.get() < g.bundled_files.count());
|
|
}
|
|
|
|
// For CSS files visiting other CSS files, prevent infinite
|
|
// recursion. CSS files visiting assets cannot cause recursion
|
|
// since assets cannot import other files.
|
|
if (mode == .css and kind == .css) {
|
|
if (ctx.gts.bits(side).isSet(imported_file_index.get()))
|
|
return .stop;
|
|
ctx.gts.bits(side).set(imported_file_index.get());
|
|
}
|
|
|
|
const gop = try quick_lookup.getOrPut(temp_alloc, imported_file_index);
|
|
if (gop.found_existing) {
|
|
// If the edge has already been seen, it will be skipped
|
|
// to ensure duplicate edges never exist.
|
|
if (gop.value_ptr.seen) return .@"continue";
|
|
const lookup = gop.value_ptr;
|
|
lookup.seen = true;
|
|
const dep = &g.edges.items[lookup.edge_index.get()];
|
|
dep.next_import = new_imports.*;
|
|
new_imports.* = lookup.edge_index.toOptional();
|
|
} else {
|
|
// A new edge is needed to represent the dependency and import.
|
|
const first_dep = &g.first_dep.items[imported_file_index.get()];
|
|
const edge = try g.newEdge(.{
|
|
.next_import = new_imports.*,
|
|
.next_dependency = first_dep.*,
|
|
.prev_dependency = .none,
|
|
.imported = imported_file_index,
|
|
.dependency = file_index,
|
|
});
|
|
if (first_dep.*.unwrap()) |dep| {
|
|
g.edges.items[dep.get()].prev_dependency = edge.toOptional();
|
|
}
|
|
new_imports.* = edge.toOptional();
|
|
first_dep.* = edge.toOptional();
|
|
|
|
g.owner().incremental_result.had_adjusted_edges = true;
|
|
|
|
// To prevent duplicates, add into the quick lookup map
|
|
// the file index so that it does exist.
|
|
gop.value_ptr.* = .{
|
|
.edge_index = edge,
|
|
.seen = true,
|
|
};
|
|
|
|
log("attach edge={d} | id={d} {} -> id={d} {}", .{
|
|
edge.get(),
|
|
file_index.get(),
|
|
bun.fmt.quote(g.bundled_files.keys()[file_index.get()]),
|
|
imported_file_index.get(),
|
|
bun.fmt.quote(g.bundled_files.keys()[imported_file_index.get()]),
|
|
});
|
|
}
|
|
|
|
return .@"continue";
|
|
}
|
|
|
|
fn processChunkImportRecords(
|
|
g: *@This(),
|
|
ctx: *HotUpdateContext,
|
|
temp_alloc: Allocator,
|
|
quick_lookup: *TempLookup.HashTable,
|
|
new_imports: *EdgeIndex.Optional,
|
|
file_index: FileIndex,
|
|
index: bun.JSAst.Index,
|
|
) !void {
|
|
bun.assert(index.isValid());
|
|
// don't call this function for CSS sources
|
|
bun.assert(ctx.loaders[index.get()] != .css);
|
|
|
|
const log = bun.Output.scoped(.processChunkDependencies, false);
|
|
for (ctx.import_records[index.get()].slice()) |import_record| {
|
|
// When an import record is duplicated, it gets marked unused.
|
|
// This happens in `ConvertESMExportsForHmr.deduplicatedImport`
|
|
// There is still a case where deduplication must happen.
|
|
if (import_record.is_unused) continue;
|
|
|
|
if (!import_record.source_index.isRuntime()) try_index_record: {
|
|
// TODO: move this block into a function
|
|
const key = import_record.path.keyForIncrementalGraph();
|
|
const imported_file_index: FileIndex = brk: {
|
|
if (import_record.source_index.isValid()) {
|
|
if (ctx.getCachedIndex(side, import_record.source_index).*.unwrap()) |i| {
|
|
break :brk i;
|
|
}
|
|
}
|
|
break :brk .init(@intCast(
|
|
g.bundled_files.getIndex(key) orelse
|
|
break :try_index_record,
|
|
));
|
|
};
|
|
|
|
if (Environment.isDebug) {
|
|
bun.assert(imported_file_index.get() < g.bundled_files.count());
|
|
}
|
|
|
|
const gop = try quick_lookup.getOrPut(temp_alloc, imported_file_index);
|
|
if (gop.found_existing) {
|
|
// If the edge has already been seen, it will be skipped
|
|
// to ensure duplicate edges never exist.
|
|
if (gop.value_ptr.seen) continue;
|
|
const lookup = gop.value_ptr;
|
|
lookup.seen = true;
|
|
const dep = &g.edges.items[lookup.edge_index.get()];
|
|
dep.next_import = new_imports.*;
|
|
new_imports.* = lookup.edge_index.toOptional();
|
|
} else {
|
|
// A new edge is needed to represent the dependency and import.
|
|
const first_dep = &g.first_dep.items[imported_file_index.get()];
|
|
const edge = try g.newEdge(.{
|
|
.next_import = new_imports.*,
|
|
.next_dependency = first_dep.*,
|
|
.prev_dependency = .none,
|
|
.imported = imported_file_index,
|
|
.dependency = file_index,
|
|
});
|
|
if (first_dep.*.unwrap()) |dep| {
|
|
g.edges.items[dep.get()].prev_dependency = edge.toOptional();
|
|
}
|
|
new_imports.* = edge.toOptional();
|
|
first_dep.* = edge.toOptional();
|
|
|
|
g.owner().incremental_result.had_adjusted_edges = true;
|
|
|
|
// To prevent duplicates, add into the quick lookup map
|
|
// the file index so that it does exist.
|
|
gop.value_ptr.* = .{
|
|
.edge_index = edge,
|
|
.seen = true,
|
|
};
|
|
|
|
log("attach edge={d} | id={d} {} -> id={d} {}", .{
|
|
edge.get(),
|
|
file_index.get(),
|
|
bun.fmt.quote(g.bundled_files.keys()[file_index.get()]),
|
|
imported_file_index.get(),
|
|
bun.fmt.quote(g.bundled_files.keys()[imported_file_index.get()]),
|
|
});
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
const TraceDependencyGoal = enum {
|
|
stop_at_boundary,
|
|
no_stop,
|
|
};
|
|
|
|
fn traceDependencies(
|
|
g: *@This(),
|
|
file_index: FileIndex,
|
|
gts: *GraphTraceState,
|
|
goal: TraceDependencyGoal,
|
|
from_file_index: FileIndex,
|
|
) !void {
|
|
g.owner().graph_safety_lock.assertLocked();
|
|
|
|
if (Environment.enable_logs) {
|
|
igLog("traceDependencies(.{s}, {}{s})", .{
|
|
@tagName(side),
|
|
bun.fmt.quote(g.bundled_files.keys()[file_index.get()]),
|
|
if (gts.bits(side).isSet(file_index.get())) " [already visited]" else "",
|
|
});
|
|
}
|
|
|
|
if (gts.bits(side).isSet(file_index.get()))
|
|
return;
|
|
gts.bits(side).set(file_index.get());
|
|
|
|
const file = g.bundled_files.values()[file_index.get()];
|
|
|
|
switch (side) {
|
|
.server => {
|
|
const dev = g.owner();
|
|
if (file.is_route) {
|
|
const route_index = dev.route_lookup.get(file_index) orelse
|
|
Output.panic("Route not in lookup index: {d} {}", .{ file_index.get(), bun.fmt.quote(g.bundled_files.keys()[file_index.get()]) });
|
|
igLog("\\<- Route", .{});
|
|
|
|
try dev.incremental_result.framework_routes_affected.append(dev.allocator, route_index);
|
|
}
|
|
if (file.is_client_component_boundary) {
|
|
try dev.incremental_result.client_components_affected.append(dev.allocator, file_index);
|
|
}
|
|
},
|
|
.client => {
|
|
const dev = g.owner();
|
|
if (file.flags.is_hmr_root) {
|
|
const key = g.bundled_files.keys()[file_index.get()];
|
|
const index = dev.server_graph.getFileIndex(key) orelse
|
|
Output.panic("Server Incremental Graph is missing component for {}", .{bun.fmt.quote(key)});
|
|
try dev.server_graph.traceDependencies(index, gts, goal, index);
|
|
} else if (file.flags.is_html_route) {
|
|
const route_bundle_index = dev.client_graph.htmlRouteBundleIndex(file_index);
|
|
|
|
// If the HTML file itself was modified, or an asset was
|
|
// modified, this must be a hard reload. Otherwise just
|
|
// invalidate the script tag.
|
|
const list = if (from_file_index == file_index or
|
|
g.bundled_files.values()[from_file_index.get()].flags.kind == .asset)
|
|
&dev.incremental_result.html_routes_hard_affected
|
|
else
|
|
&dev.incremental_result.html_routes_soft_affected;
|
|
|
|
try list.append(dev.allocator, route_bundle_index);
|
|
|
|
if (goal == .stop_at_boundary)
|
|
return;
|
|
}
|
|
},
|
|
}
|
|
|
|
// Certain files do not propagate updates to dependencies.
|
|
// This is how updating a client component doesn't cause
|
|
// a server-side reload.
|
|
if (goal == .stop_at_boundary) {
|
|
if (file.stopsDependencyTrace()) {
|
|
igLog("\\<- this file stops propagation", .{});
|
|
return;
|
|
}
|
|
}
|
|
|
|
// Recurse
|
|
var it: ?EdgeIndex = g.first_dep.items[file_index.get()].unwrap();
|
|
while (it) |dep_index| {
|
|
const edge = g.edges.items[dep_index.get()];
|
|
it = edge.next_dependency.unwrap();
|
|
try g.traceDependencies(
|
|
edge.dependency,
|
|
gts,
|
|
goal,
|
|
file_index,
|
|
);
|
|
}
|
|
}
|
|
|
|
fn traceImports(g: *@This(), file_index: FileIndex, gts: *GraphTraceState, comptime goal: TraceImportGoal) !void {
|
|
g.owner().graph_safety_lock.assertLocked();
|
|
|
|
if (Environment.enable_logs) {
|
|
igLog("traceImports(.{s}, .{s}, {}{s})", .{
|
|
@tagName(side),
|
|
@tagName(goal),
|
|
bun.fmt.quote(g.bundled_files.keys()[file_index.get()]),
|
|
if (gts.bits(side).isSet(file_index.get())) " [already visited]" else "",
|
|
});
|
|
}
|
|
|
|
if (gts.bits(side).isSet(file_index.get()))
|
|
return;
|
|
gts.bits(side).set(file_index.get());
|
|
|
|
const file = g.bundled_files.values()[file_index.get()];
|
|
|
|
switch (side) {
|
|
.server => {
|
|
if (file.is_client_component_boundary or file.kind == .css) {
|
|
const dev = g.owner();
|
|
const key = g.bundled_files.keys()[file_index.get()];
|
|
const index = dev.client_graph.getFileIndex(key) orelse
|
|
Output.panic("Client Incremental Graph is missing component for {}", .{bun.fmt.quote(key)});
|
|
try dev.client_graph.traceImports(index, gts, goal);
|
|
|
|
if (Environment.isDebug and file.kind == .css) {
|
|
// Server CSS files never have imports. They are
|
|
// purely a reference to the client graph.
|
|
bun.assert(g.first_import.items[file_index.get()] == .none);
|
|
}
|
|
}
|
|
if (goal == .find_errors and file.failed) {
|
|
const fail = g.owner().bundling_failures.getKeyAdapted(
|
|
SerializedFailure.Owner{ .server = file_index },
|
|
SerializedFailure.ArrayHashAdapter{},
|
|
) orelse
|
|
@panic("Failed to get bundling failure");
|
|
try g.owner().incremental_result.failures_added.append(g.owner().allocator, fail);
|
|
}
|
|
},
|
|
.client => {
|
|
if (file.flags.kind == .css) {
|
|
// It is only possible to find CSS roots by tracing.
|
|
bun.debugAssert(file.flags.is_css_root);
|
|
|
|
if (goal == .find_css) {
|
|
try g.current_css_files.append(g.owner().allocator, file.cssAssetId());
|
|
}
|
|
|
|
// See the comment on `is_css_root` on how CSS roots
|
|
// have a slightly different meaning for their assets.
|
|
// Regardless, CSS can't import JS, so this trace is done.
|
|
return;
|
|
}
|
|
|
|
if (goal == .find_client_modules) {
|
|
try g.current_chunk_parts.append(g.owner().allocator, file_index);
|
|
g.current_chunk_len += file.code_len;
|
|
}
|
|
|
|
if (goal == .find_errors and file.flags.failed) {
|
|
const fail = g.owner().bundling_failures.getKeyAdapted(
|
|
SerializedFailure.Owner{ .client = file_index },
|
|
SerializedFailure.ArrayHashAdapter{},
|
|
) orelse
|
|
@panic("Failed to get bundling failure");
|
|
try g.owner().incremental_result.failures_added.append(g.owner().allocator, fail);
|
|
return;
|
|
}
|
|
},
|
|
}
|
|
|
|
// Recurse
|
|
var it: ?EdgeIndex = g.first_import.items[file_index.get()].unwrap();
|
|
while (it) |dep_index| {
|
|
const edge = g.edges.items[dep_index.get()];
|
|
it = edge.next_import.unwrap();
|
|
try g.traceImports(edge.imported, gts, goal);
|
|
}
|
|
}
|
|
|
|
/// Never takes ownership of `abs_path`
|
|
/// Marks a chunk but without any content. Used to track dependencies to files that don't exist.
|
|
pub fn insertStale(g: *@This(), abs_path: []const u8, is_ssr_graph: bool) bun.OOM!FileIndex {
|
|
return g.insertStaleExtra(abs_path, is_ssr_graph, false);
|
|
}
|
|
|
|
pub fn insertStaleExtra(g: *@This(), abs_path: []const u8, is_ssr_graph: bool, is_route: bool) bun.OOM!FileIndex {
|
|
g.owner().graph_safety_lock.assertLocked();
|
|
const dev_allocator = g.owner().allocator;
|
|
|
|
debug.log("Insert stale: {s}", .{abs_path});
|
|
const gop = try g.bundled_files.getOrPut(dev_allocator, abs_path);
|
|
const file_index = FileIndex.init(@intCast(gop.index));
|
|
|
|
if (!gop.found_existing) {
|
|
gop.key_ptr.* = try dev_allocator.dupe(u8, abs_path);
|
|
try g.first_dep.append(dev_allocator, .none);
|
|
try g.first_import.append(dev_allocator, .none);
|
|
if (side == .client)
|
|
try g.source_maps.append(dev_allocator, .empty);
|
|
} else {
|
|
if (side == .server) {
|
|
if (is_route) gop.value_ptr.*.is_route = true;
|
|
}
|
|
}
|
|
|
|
if (g.stale_files.bit_length > gop.index) {
|
|
g.stale_files.set(gop.index);
|
|
}
|
|
|
|
switch (side) {
|
|
.client => {
|
|
var flags: File.Flags = .{
|
|
.failed = false,
|
|
.is_hmr_root = false,
|
|
.is_special_framework_file = false,
|
|
.is_html_route = is_route,
|
|
.is_css_root = false,
|
|
.kind = .unknown,
|
|
};
|
|
if (gop.found_existing) {
|
|
g.freeFileContent(file_index, gop.key_ptr.*, gop.value_ptr.*, .unref_css);
|
|
|
|
flags.is_html_route = flags.is_html_route or gop.value_ptr.flags.is_html_route;
|
|
flags.failed = gop.value_ptr.flags.failed;
|
|
flags.is_special_framework_file = gop.value_ptr.flags.is_special_framework_file;
|
|
flags.is_hmr_root = gop.value_ptr.flags.is_hmr_root;
|
|
flags.is_css_root = gop.value_ptr.flags.is_css_root;
|
|
}
|
|
gop.value_ptr.* = File.initUnknown(flags);
|
|
},
|
|
.server => {
|
|
if (!gop.found_existing) {
|
|
gop.value_ptr.* = .{
|
|
.is_rsc = !is_ssr_graph,
|
|
.is_ssr = is_ssr_graph,
|
|
.is_route = is_route,
|
|
.is_client_component_boundary = false,
|
|
.failed = false,
|
|
.kind = .unknown,
|
|
};
|
|
} else if (is_ssr_graph) {
|
|
gop.value_ptr.is_ssr = true;
|
|
} else {
|
|
gop.value_ptr.is_rsc = true;
|
|
}
|
|
},
|
|
}
|
|
|
|
return file_index;
|
|
}
|
|
|
|
/// Returns the key that was inserted.
|
|
pub fn insertEmpty(g: *@This(), abs_path: []const u8, kind: FileKind) bun.OOM!struct {
|
|
index: FileIndex,
|
|
key: []const u8,
|
|
} {
|
|
g.owner().graph_safety_lock.assertLocked();
|
|
const dev_allocator = g.owner().allocator;
|
|
const gop = try g.bundled_files.getOrPut(dev_allocator, abs_path);
|
|
if (!gop.found_existing) {
|
|
gop.key_ptr.* = try dev_allocator.dupe(u8, abs_path);
|
|
gop.value_ptr.* = switch (side) {
|
|
.client => File.initUnknown(.{
|
|
.failed = false,
|
|
.is_hmr_root = false,
|
|
.is_special_framework_file = false,
|
|
.is_html_route = false,
|
|
.is_css_root = false,
|
|
.kind = kind,
|
|
}),
|
|
.server => .{
|
|
.is_rsc = false,
|
|
.is_ssr = false,
|
|
.is_route = false,
|
|
.is_client_component_boundary = false,
|
|
.failed = false,
|
|
.kind = kind,
|
|
},
|
|
};
|
|
try g.first_dep.append(dev_allocator, .none);
|
|
try g.first_import.append(dev_allocator, .none);
|
|
if (side == .client)
|
|
try g.source_maps.append(dev_allocator, .empty);
|
|
try g.ensureStaleBitCapacity(true);
|
|
}
|
|
return .{ .index = .init(@intCast(gop.index)), .key = gop.key_ptr.* };
|
|
}
|
|
|
|
/// Server CSS files are just used to be targets for graph traversal.
|
|
/// Its content lives only on the client.
|
|
pub fn insertCssFileOnServer(g: *@This(), ctx: *HotUpdateContext, index: bun.JSAst.Index, abs_path: []const u8) bun.OOM!void {
|
|
g.owner().graph_safety_lock.assertLocked();
|
|
const dev_allocator = g.owner().allocator;
|
|
|
|
debug.log("Insert stale: {s}", .{abs_path});
|
|
const gop = try g.bundled_files.getOrPut(dev_allocator, abs_path);
|
|
const file_index: FileIndex = .init(@intCast(gop.index));
|
|
|
|
if (!gop.found_existing) {
|
|
gop.key_ptr.* = try dev_allocator.dupe(u8, abs_path);
|
|
try g.first_dep.append(dev_allocator, .none);
|
|
try g.first_import.append(dev_allocator, .none);
|
|
}
|
|
|
|
switch (side) {
|
|
.client => @compileError("not implemented: use receiveChunk"),
|
|
.server => gop.value_ptr.* = .{
|
|
.is_rsc = false,
|
|
.is_ssr = false,
|
|
.is_route = false,
|
|
.is_client_component_boundary = false,
|
|
.failed = false,
|
|
.kind = .css,
|
|
},
|
|
}
|
|
|
|
ctx.getCachedIndex(.server, index).* = .init(file_index);
|
|
}
|
|
|
|
pub fn insertFailure(
|
|
g: *@This(),
|
|
comptime mode: enum { abs_path, index },
|
|
key: switch (mode) {
|
|
.abs_path => []const u8,
|
|
.index => FileIndex,
|
|
},
|
|
log: *const Log,
|
|
is_ssr_graph: bool,
|
|
) bun.OOM!void {
|
|
g.owner().graph_safety_lock.assertLocked();
|
|
|
|
const dev_allocator = g.owner().allocator;
|
|
|
|
const Gop = std.StringArrayHashMapUnmanaged(File).GetOrPutResult;
|
|
// found_existing is destructured separately so that it is
|
|
// comptime-known true when mode == .index
|
|
const gop: Gop, const found_existing, const file_index = switch (mode) {
|
|
.abs_path => brk: {
|
|
const gop = try g.bundled_files.getOrPut(dev_allocator, key);
|
|
break :brk .{ gop, gop.found_existing, FileIndex.init(@intCast(gop.index)) };
|
|
},
|
|
// When given an index, no fetch is needed.
|
|
.index => brk: {
|
|
const slice = g.bundled_files.entries.slice();
|
|
break :brk .{
|
|
.{
|
|
.key_ptr = &slice.items(.key)[key.get()],
|
|
.value_ptr = &slice.items(.value)[key.get()],
|
|
.found_existing = true,
|
|
.index = key.get(),
|
|
},
|
|
true,
|
|
key,
|
|
};
|
|
},
|
|
};
|
|
|
|
if (!found_existing) {
|
|
comptime assert(mode == .abs_path);
|
|
gop.key_ptr.* = try dev_allocator.dupe(u8, key);
|
|
try g.first_dep.append(dev_allocator, .none);
|
|
try g.first_import.append(dev_allocator, .none);
|
|
if (side == .client)
|
|
try g.source_maps.append(dev_allocator, .empty);
|
|
}
|
|
|
|
try g.ensureStaleBitCapacity(true);
|
|
g.stale_files.set(gop.index);
|
|
|
|
switch (side) {
|
|
.client => {
|
|
var flags: File.Flags = .{
|
|
.failed = true,
|
|
.is_hmr_root = false,
|
|
.is_special_framework_file = false,
|
|
.is_html_route = false,
|
|
.is_css_root = false,
|
|
.kind = .unknown,
|
|
};
|
|
if (found_existing) {
|
|
g.freeFileContent(file_index, gop.key_ptr.*, gop.value_ptr.*, .unref_css);
|
|
flags.is_html_route = gop.value_ptr.flags.is_html_route;
|
|
flags.is_special_framework_file = gop.value_ptr.flags.is_special_framework_file;
|
|
flags.is_hmr_root = gop.value_ptr.flags.is_hmr_root;
|
|
flags.is_css_root = gop.value_ptr.flags.is_css_root;
|
|
}
|
|
gop.value_ptr.* = File.initUnknown(flags);
|
|
},
|
|
.server => {
|
|
if (!gop.found_existing) {
|
|
gop.value_ptr.* = .{
|
|
.is_rsc = !is_ssr_graph,
|
|
.is_ssr = is_ssr_graph,
|
|
.is_route = false,
|
|
.is_client_component_boundary = false,
|
|
.failed = true,
|
|
.kind = .unknown,
|
|
};
|
|
} else {
|
|
if (is_ssr_graph) {
|
|
gop.value_ptr.is_ssr = true;
|
|
} else {
|
|
gop.value_ptr.is_rsc = true;
|
|
}
|
|
gop.value_ptr.failed = true;
|
|
}
|
|
},
|
|
}
|
|
|
|
const dev = g.owner();
|
|
|
|
const fail_owner: SerializedFailure.Owner = switch (side) {
|
|
.server => .{ .server = file_index },
|
|
.client => .{ .client = file_index },
|
|
};
|
|
// TODO: DevServer should get a stdio manager which can process
|
|
// the error list as it changes while also supporting a REPL
|
|
log.print(Output.errorWriter()) catch {};
|
|
const failure = try SerializedFailure.initFromLog(
|
|
dev,
|
|
fail_owner,
|
|
dev.relativePath(gop.key_ptr.*),
|
|
log.msgs.items,
|
|
);
|
|
defer dev.releaseRelativePathBuf();
|
|
const fail_gop = try dev.bundling_failures.getOrPut(dev.allocator, failure);
|
|
try dev.incremental_result.failures_added.append(dev.allocator, failure);
|
|
if (fail_gop.found_existing) {
|
|
try dev.incremental_result.failures_removed.append(dev.allocator, fail_gop.key_ptr.*);
|
|
fail_gop.key_ptr.* = failure;
|
|
}
|
|
}
|
|
|
|
pub fn onFileDeleted(g: *@This(), abs_path: []const u8, bv2: *bun.BundleV2) void {
|
|
const index = g.getFileIndex(abs_path) orelse return;
|
|
|
|
const keys = g.bundled_files.keys();
|
|
|
|
// Disconnect all imports
|
|
var it: ?EdgeIndex = g.first_import.items[index.get()].unwrap();
|
|
while (it) |edge_index| {
|
|
const dep = g.edges.items[edge_index.get()];
|
|
it = dep.next_import.unwrap();
|
|
assert(dep.dependency == index);
|
|
|
|
g.disconnectEdgeFromDependencyList(edge_index);
|
|
g.freeEdge(edge_index);
|
|
}
|
|
|
|
// Rebuild all dependencies
|
|
it = g.first_dep.items[index.get()].unwrap();
|
|
while (it) |edge_index| {
|
|
const dep = g.edges.items[edge_index.get()];
|
|
it = dep.next_dependency.unwrap();
|
|
assert(dep.imported == index);
|
|
|
|
bv2.enqueueFileFromDevServerIncrementalGraphInvalidation(
|
|
keys[dep.dependency.get()],
|
|
switch (side) {
|
|
.client => .browser,
|
|
.server => .bun,
|
|
},
|
|
) catch bun.outOfMemory();
|
|
}
|
|
|
|
// Bust the resolution caches of the dir containing this file,
|
|
// so that it cannot be resolved.
|
|
const dirname = std.fs.path.dirname(abs_path) orelse abs_path;
|
|
_ = bv2.transpiler.resolver.bustDirCache(dirname);
|
|
|
|
// Additionally, clear the cached entry of the file from the path to
|
|
// source index map.
|
|
const hash = bun.hash(abs_path);
|
|
for ([_]*bun.bundle_v2.PathToSourceIndexMap{
|
|
&bv2.graph.path_to_source_index_map,
|
|
&bv2.graph.client_path_to_source_index_map,
|
|
&bv2.graph.ssr_path_to_source_index_map,
|
|
}) |map| {
|
|
_ = map.remove(hash);
|
|
}
|
|
}
|
|
|
|
pub fn ensureStaleBitCapacity(g: *@This(), are_new_files_stale: bool) !void {
|
|
try g.stale_files.resize(
|
|
g.owner().allocator,
|
|
std.mem.alignForward(
|
|
usize,
|
|
@max(g.bundled_files.count(), g.stale_files.bit_length),
|
|
// allocate 8 in 8 usize chunks
|
|
std.mem.byte_size_in_bits * @sizeOf(usize) * 8,
|
|
),
|
|
are_new_files_stale,
|
|
);
|
|
}
|
|
|
|
/// Given a set of paths, mark the relevant files as stale and append
|
|
/// them into `entry_points`. This is called whenever a file is changed,
|
|
/// and a new bundle has to be run.
|
|
pub fn invalidate(g: *@This(), paths: []const []const u8, entry_points: *EntryPointList, alloc: Allocator) !void {
|
|
g.owner().graph_safety_lock.assertLocked();
|
|
const keys = g.bundled_files.keys();
|
|
const values = g.bundled_files.values();
|
|
for (paths) |path| {
|
|
const index = g.bundled_files.getIndex(path) orelse {
|
|
// Cannot enqueue because it's impossible to know what
|
|
// targets to bundle for. Instead, a failing bundle must
|
|
// retrieve the list of files and add them as stale.
|
|
continue;
|
|
};
|
|
g.stale_files.set(index);
|
|
const data = &values[index];
|
|
switch (side) {
|
|
.client => switch (data.flags.kind) {
|
|
.css => {
|
|
if (data.flags.is_css_root) {
|
|
try entry_points.appendCss(alloc, path);
|
|
}
|
|
|
|
var it = g.first_dep.items[index].unwrap();
|
|
while (it) |edge_index| {
|
|
const entry = g.edges.items[edge_index.get()];
|
|
const dep = entry.dependency;
|
|
g.stale_files.set(dep.get());
|
|
|
|
const dep_file = values[dep.get()];
|
|
if (dep_file.flags.is_css_root) {
|
|
try entry_points.appendCss(alloc, keys[dep.get()]);
|
|
}
|
|
|
|
it = entry.next_dependency.unwrap();
|
|
}
|
|
},
|
|
.asset => {
|
|
var it = g.first_dep.items[index].unwrap();
|
|
while (it) |edge_index| {
|
|
const entry = g.edges.items[edge_index.get()];
|
|
const dep = entry.dependency;
|
|
g.stale_files.set(dep.get());
|
|
|
|
const dep_file = values[dep.get()];
|
|
// Assets violate the "do not reprocess
|
|
// unchanged files" rule by reprocessing ALL
|
|
// dependencies, instead of just the CSS roots.
|
|
//
|
|
// This is currently required to force HTML
|
|
// bundles to become up to date with the new
|
|
// asset URL. Additionally, it is currently seen
|
|
// as a bit nicer in HMR to do this for all JS
|
|
// files, though that could be reconsidered.
|
|
if (dep_file.flags.is_css_root) {
|
|
try entry_points.appendCss(alloc, keys[dep.get()]);
|
|
} else {
|
|
try entry_points.appendJs(alloc, keys[dep.get()], .client);
|
|
}
|
|
|
|
it = entry.next_dependency.unwrap();
|
|
}
|
|
|
|
try entry_points.appendJs(alloc, path, .client);
|
|
},
|
|
// When re-bundling SCBs, only bundle the server. Otherwise
|
|
// the bundler gets confused and bundles both sides without
|
|
// knowledge of the boundary between them.
|
|
.js, .unknown => if (!data.flags.is_hmr_root) {
|
|
try entry_points.appendJs(alloc, path, .client);
|
|
},
|
|
},
|
|
.server => {
|
|
if (data.is_rsc)
|
|
try entry_points.appendJs(alloc, path, .server);
|
|
if (data.is_ssr and !data.is_client_component_boundary)
|
|
try entry_points.appendJs(alloc, path, .ssr);
|
|
},
|
|
}
|
|
}
|
|
}
|
|
|
|
fn reset(g: *@This()) void {
|
|
g.owner().graph_safety_lock.assertLocked();
|
|
g.current_chunk_len = 0;
|
|
g.current_chunk_parts.clearRetainingCapacity();
|
|
if (side == .client) g.current_css_files.clearRetainingCapacity();
|
|
}
|
|
|
|
const TakeJSBundleOptions = switch (side) {
|
|
.client => struct {
|
|
kind: ChunkKind,
|
|
source_map_id: ?u64 = null,
|
|
initial_response_entry_point: []const u8 = "",
|
|
react_refresh_entry_point: []const u8 = "",
|
|
},
|
|
.server => struct {
|
|
kind: ChunkKind,
|
|
},
|
|
};
|
|
|
|
pub fn takeJSBundle(
|
|
g: *@This(),
|
|
options: TakeJSBundleOptions,
|
|
) ![]u8 {
|
|
var chunk = std.ArrayList(u8).init(g.owner().allocator);
|
|
try g.takeJSBundleToList(&chunk, options);
|
|
bun.assert(chunk.items.len == chunk.capacity);
|
|
return chunk.items;
|
|
}
|
|
|
|
pub fn takeJSBundleToList(
|
|
g: *@This(),
|
|
list: *std.ArrayList(u8),
|
|
options: TakeJSBundleOptions,
|
|
) !void {
|
|
const kind = options.kind;
|
|
g.owner().graph_safety_lock.assertLocked();
|
|
// initial bundle needs at least the entry point
|
|
// hot updates shouldn't be emitted if there are no chunks
|
|
assert(g.current_chunk_len > 0);
|
|
|
|
const runtime: bake.HmrRuntime = switch (kind) {
|
|
.initial_response => bun.bake.getHmrRuntime(side),
|
|
.hmr_chunk => comptime .init("({\n"),
|
|
};
|
|
|
|
// A small amount of metadata is present at the end of the chunk
|
|
// to inform the HMR runtime some crucial entry-point info. The
|
|
// exact upper bound of this can be calculated, but is not to
|
|
// avoid worrying about windows paths.
|
|
var end_sfa = std.heap.stackFallback(65536, g.owner().allocator);
|
|
var end_list = std.ArrayList(u8).initCapacity(end_sfa.get(), 65536) catch unreachable;
|
|
defer end_list.deinit();
|
|
const end = end: {
|
|
const w = end_list.writer();
|
|
switch (kind) {
|
|
.initial_response => {
|
|
if (side == .server) @panic("unreachable");
|
|
try w.writeAll("}, {\n main: ");
|
|
const initial_response_entry_point = options.initial_response_entry_point;
|
|
if (initial_response_entry_point.len > 0) {
|
|
try bun.js_printer.writeJSONString(
|
|
g.owner().relativePath(initial_response_entry_point),
|
|
@TypeOf(w),
|
|
w,
|
|
.utf8,
|
|
);
|
|
g.owner().releaseRelativePathBuf();
|
|
} else {
|
|
try w.writeAll("null");
|
|
}
|
|
try w.writeAll(",\n bun: \"" ++ bun.Global.package_json_version_with_canary ++ "\"");
|
|
try w.writeAll(",\n version: \"");
|
|
try w.writeAll(&g.owner().configuration_hash_key);
|
|
try w.writeAll("\"");
|
|
if (options.react_refresh_entry_point.len > 0) {
|
|
try w.writeAll(",\n refresh: ");
|
|
try bun.js_printer.writeJSONString(
|
|
g.owner().relativePath(options.react_refresh_entry_point),
|
|
@TypeOf(w),
|
|
w,
|
|
.utf8,
|
|
);
|
|
g.owner().releaseRelativePathBuf();
|
|
}
|
|
try w.writeAll("\n");
|
|
},
|
|
.hmr_chunk => {},
|
|
}
|
|
try w.writeAll("})");
|
|
if (side == .client) if (options.source_map_id) |source_map_id| {
|
|
try w.writeAll("\n//# sourceMappingURL=" ++ client_prefix ++ "/");
|
|
try w.writeAll(&std.fmt.bytesToHex(std.mem.asBytes(&source_map_id), .lower));
|
|
try w.writeAll(".js.map\n");
|
|
};
|
|
break :end end_list.items;
|
|
};
|
|
|
|
const files = g.bundled_files.values();
|
|
|
|
const start = list.items.len;
|
|
if (start == 0)
|
|
try list.ensureTotalCapacityPrecise(g.current_chunk_len + runtime.code.len + end.len)
|
|
else
|
|
try list.ensureUnusedCapacity(g.current_chunk_len + runtime.code.len + end.len);
|
|
|
|
list.appendSliceAssumeCapacity(runtime.code);
|
|
for (g.current_chunk_parts.items) |entry| {
|
|
list.appendSliceAssumeCapacity(switch (side) {
|
|
// entry is an index into files
|
|
.client => files[entry.get()].jsCode(),
|
|
// entry is the '[]const u8' itself
|
|
.server => entry,
|
|
});
|
|
}
|
|
list.appendSliceAssumeCapacity(end);
|
|
|
|
if (bun.FeatureFlags.bake_debugging_features) if (g.owner().dump_dir) |dump_dir| {
|
|
const rel_path_escaped = switch (kind) {
|
|
.initial_response => "latest_chunk.js",
|
|
.hmr_chunk => "latest_hmr.js",
|
|
};
|
|
dumpBundle(dump_dir, switch (side) {
|
|
.client => .client,
|
|
.server => .server,
|
|
}, rel_path_escaped, list.items[start..], false) catch |err| {
|
|
bun.handleErrorReturnTrace(err, @errorReturnTrace());
|
|
Output.warn("Could not dump bundle: {}", .{err});
|
|
};
|
|
};
|
|
}
|
|
|
|
pub const SourceMapGeneration = struct {
|
|
json: []u8,
|
|
mappings: bun.StringPointer,
|
|
file_paths: [][]const u8,
|
|
};
|
|
|
|
/// Uses `arena` as a temporary allocator, fills in all fields of `out` except ref_count
|
|
pub fn takeSourceMap(g: *@This(), kind: ChunkKind, arena: std.mem.Allocator, gpa: Allocator, out: *SourceMapStore.Entry) bun.OOM!void {
|
|
if (side == .server) @compileError("not implemented");
|
|
|
|
const paths = g.bundled_files.keys();
|
|
const files = g.bundled_files.values();
|
|
const source_maps = g.source_maps.items;
|
|
|
|
var j: StringJoiner = .{ .allocator = arena };
|
|
|
|
const runtime: bake.HmrRuntime = switch (kind) {
|
|
.initial_response => bun.bake.getHmrRuntime(side),
|
|
.hmr_chunk => comptime .init("({\n"),
|
|
};
|
|
|
|
j.pushStatic(
|
|
\\{"version":3,"sources":["bun://Bun/Bun HMR Runtime"
|
|
);
|
|
|
|
// This buffer is temporary, holding the quoted source paths, joined with commas.
|
|
var source_map_strings = std.ArrayList(u8).init(arena);
|
|
defer source_map_strings.deinit();
|
|
|
|
const dev = g.owner();
|
|
dev.relative_path_buf_lock.lock();
|
|
defer dev.relative_path_buf_lock.unlock();
|
|
|
|
const buf = bun.PathBufferPool.get();
|
|
defer bun.PathBufferPool.put(buf);
|
|
|
|
var path_count: usize = 0;
|
|
for (g.current_chunk_parts.items) |entry| {
|
|
path_count += 1;
|
|
try source_map_strings.appendSlice(",");
|
|
const path = if (Environment.isWindows)
|
|
bun.path.pathToPosixBuf(u8, paths[entry.get()], buf)
|
|
else
|
|
paths[entry.get()];
|
|
|
|
if (std.fs.path.isAbsolute(path)) {
|
|
const is_windows_drive_path = Environment.isWindows and path[0] != '/';
|
|
try source_map_strings.appendSlice(if (is_windows_drive_path)
|
|
"\"file:///"
|
|
else
|
|
"\"file://");
|
|
if (Environment.isWindows and !is_windows_drive_path) {
|
|
// UNC namespace -> file://server/share/path.ext
|
|
bun.strings.percentEncodeWrite(
|
|
if (path.len > 2 and path[0] == '/' and path[1] == '/')
|
|
path[2..]
|
|
else
|
|
path, // invalid but must not crash
|
|
&source_map_strings,
|
|
) catch |err| switch (err) {
|
|
error.IncompleteUTF8 => @panic("Unexpected: asset with incomplete UTF-8 as file path"),
|
|
error.OutOfMemory => |e| return e,
|
|
};
|
|
} else {
|
|
// posix paths always start with '/'
|
|
// -> file:///path/to/file.js
|
|
// windows drive letter paths have the extra slash added
|
|
// -> file:///C:/path/to/file.js
|
|
bun.strings.percentEncodeWrite(path, &source_map_strings) catch |err| switch (err) {
|
|
error.IncompleteUTF8 => @panic("Unexpected: asset with incomplete UTF-8 as file path"),
|
|
error.OutOfMemory => |e| return e,
|
|
};
|
|
}
|
|
try source_map_strings.appendSlice("\"");
|
|
} else {
|
|
try source_map_strings.appendSlice("\"bun://");
|
|
bun.strings.percentEncodeWrite(path, &source_map_strings) catch |err| switch (err) {
|
|
error.IncompleteUTF8 => @panic("Unexpected: asset with incomplete UTF-8 as file path"),
|
|
error.OutOfMemory => |e| return e,
|
|
};
|
|
try source_map_strings.appendSlice("\"");
|
|
}
|
|
}
|
|
var file_paths = try ArrayListUnmanaged([]const u8).initCapacity(gpa, path_count);
|
|
errdefer file_paths.deinit(gpa);
|
|
var source_contents = try ArrayListUnmanaged(bun.StringPointer).initCapacity(gpa, path_count);
|
|
errdefer source_contents.deinit(gpa);
|
|
try source_contents.ensureTotalCapacity(gpa, path_count);
|
|
j.pushStatic(source_map_strings.items);
|
|
j.pushStatic(
|
|
\\],"sourcesContent":["// (Bun's internal HMR runtime is minified)"
|
|
);
|
|
for (g.current_chunk_parts.items) |file_index| {
|
|
const map = &source_maps[file_index.get()];
|
|
// For empty chunks, put a blank entry. This allows HTML
|
|
// files to get their stack remapped, despite having no
|
|
// actual mappings.
|
|
if (map.vlq_len == 0) {
|
|
const ptr: bun.StringPointer = .{
|
|
.offset = @intCast(j.len + ",\"".len),
|
|
.length = 0,
|
|
};
|
|
j.pushStatic(",\"\"");
|
|
file_paths.appendAssumeCapacity(paths[file_index.get()]);
|
|
source_contents.appendAssumeCapacity(ptr);
|
|
continue;
|
|
}
|
|
j.pushStatic(",");
|
|
const quoted_slice = map.quotedContents();
|
|
if (quoted_slice.len == 0) {
|
|
bun.debugAssert(false); // vlq without source contents!
|
|
const ptr: bun.StringPointer = .{
|
|
.offset = @intCast(j.len + ",\"".len),
|
|
.length = 0,
|
|
};
|
|
j.pushStatic(",\"// Did not have source contents for this file.\n// This is a bug in Bun's bundler and should be reported with a reproduction.\"");
|
|
file_paths.appendAssumeCapacity(paths[file_index.get()]);
|
|
source_contents.appendAssumeCapacity(ptr);
|
|
continue;
|
|
}
|
|
// Store the location of the source file. Since it is going
|
|
// to be stored regardless for use by the served source map.
|
|
// These 8 bytes per file allow remapping sources without
|
|
// reading from disk, as well as ensuring that remaps to
|
|
// this exact sourcemap can print the previous state of
|
|
// the code when it was modified.
|
|
bun.assert(quoted_slice[0] == '"');
|
|
bun.assert(quoted_slice[quoted_slice.len - 1] == '"');
|
|
const ptr: bun.StringPointer = .{
|
|
.offset = @intCast(j.len + "\"".len),
|
|
.length = @intCast(quoted_slice.len - "\"\"".len),
|
|
};
|
|
j.pushStatic(quoted_slice);
|
|
file_paths.appendAssumeCapacity(paths[file_index.get()]);
|
|
source_contents.appendAssumeCapacity(ptr);
|
|
}
|
|
// This first mapping makes the bytes from line 0 column 0 to the next mapping
|
|
j.pushStatic(
|
|
\\],"names":[],"mappings":"AAAA
|
|
);
|
|
|
|
const mappings_start = j.len - 4;
|
|
var prev_end_state: SourceMap.SourceMapState = .{
|
|
.generated_line = 0,
|
|
.generated_column = 0,
|
|
.source_index = 0,
|
|
.original_line = 0,
|
|
.original_column = 0,
|
|
};
|
|
|
|
// +2 because the magic fairy in my dreams said it would align the source maps.
|
|
var lines_between: u32 = runtime.line_count + 2;
|
|
|
|
// Join all of the mappings together.
|
|
for (g.current_chunk_parts.items, 1..) |entry, source_index| {
|
|
const source_map = &source_maps[entry.get()];
|
|
if (source_map.vlq_len == 0) {
|
|
if (source_map.extra.empty.line_count.unwrap()) |line_count| {
|
|
lines_between += line_count.get();
|
|
} else {
|
|
const count: u32 = @intCast(bun.strings.countChar(files[entry.get()].jsCode(), '\n'));
|
|
source_map.extra.empty.line_count = .init(count);
|
|
lines_between += count;
|
|
}
|
|
// TODO: consider reviving this code. it generates a valid
|
|
// map according to Source Map Visualization, but it does
|
|
// not map correctly in Chrome or Bun. This code tries to
|
|
// add a mapping for each source line of unmapped code, so
|
|
// at the very minimum the file name can be retrieved.
|
|
// In practice, Bun may not need this as the only chunks
|
|
// with this behavior are HTML and empty JS files.
|
|
|
|
// if (lines_between > 0) {
|
|
// j.pushStatic(try bun.strings.repeatingAlloc(arena, lines_between, ';'));
|
|
// lines_between = 0;
|
|
// }
|
|
// if (source_map.extra.empty.line_count.unwrap()) |lc| {
|
|
// lines_between = lc.get();
|
|
// } else {
|
|
// const count: u32 = @intCast(bun.strings.countChar(files[entry.get()].jsCode(), '\n'));
|
|
// source_map.extra.empty.line_count = .init(count);
|
|
// lines_between = count;
|
|
// }
|
|
// // For empty chunks, put a blank entry for HTML. This will
|
|
// // consist of a single mapping per line to cover the file
|
|
// assert(lines_between > 1); // there is one line to open the function, and one line `},` to close it
|
|
// const repeating_mapping = ";AAAA"; // VLQ [0, 0, 0, 0] to repeat a column zero same source index mapping
|
|
// const original_line = SourceMap.encodeVLQWithLookupTable(-prev_end_state.original_line);
|
|
// const original_column = SourceMap.encodeVLQWithLookupTable(-prev_end_state.original_column);
|
|
// var list = std.ArrayList(u8).init(arena);
|
|
// const first = "AC"; // VLQ [0, 1], generated column and generated source index
|
|
// try list.ensureTotalCapacityPrecise(
|
|
// @as(usize, (lines_between + 1) * repeating_mapping.len) +
|
|
// first.len +
|
|
// @as(usize, original_line.len) +
|
|
// @as(usize, original_column.len),
|
|
// );
|
|
// list.appendSliceAssumeCapacity(first);
|
|
// list.appendSliceAssumeCapacity(original_line.slice());
|
|
// list.appendSliceAssumeCapacity(original_column.slice());
|
|
// for (0..lines_between + 1) |_| {
|
|
// list.appendSliceAssumeCapacity(repeating_mapping);
|
|
// }
|
|
// j.pushStatic(list.items);
|
|
// prev_end_state = .{
|
|
// .source_index = @intCast(source_index),
|
|
// .generated_line = 0,
|
|
// .generated_column = 0,
|
|
// .original_line = 0,
|
|
// .original_column = 0,
|
|
// };
|
|
// lines_between = 1;
|
|
continue;
|
|
}
|
|
|
|
const start_state: SourceMap.SourceMapState = .{
|
|
.source_index = @intCast(source_index),
|
|
.generated_line = @intCast(lines_between),
|
|
.generated_column = 0,
|
|
.original_line = 0,
|
|
.original_column = 0,
|
|
};
|
|
lines_between = 0;
|
|
|
|
try SourceMap.appendSourceMapChunk(
|
|
&j,
|
|
arena,
|
|
prev_end_state,
|
|
start_state,
|
|
source_map.vlq(),
|
|
);
|
|
|
|
prev_end_state = .{
|
|
.source_index = @intCast(source_index),
|
|
.generated_line = 0,
|
|
.generated_column = 0,
|
|
.original_line = source_map.extra.end_state.original_line,
|
|
.original_column = source_map.extra.end_state.original_column,
|
|
};
|
|
}
|
|
const mappings_len = j.len - mappings_start;
|
|
|
|
const json_bytes = try j.doneWithEnd(gpa, "\"}");
|
|
errdefer @compileError("last try should be the final alloc");
|
|
|
|
if (bun.FeatureFlags.bake_debugging_features) if (g.owner().dump_dir) |dump_dir| {
|
|
const rel_path_escaped = "latest_chunk.js.map";
|
|
dumpBundle(dump_dir, switch (side) {
|
|
.client => .client,
|
|
.server => .server,
|
|
}, rel_path_escaped, json_bytes, false) catch |err| {
|
|
bun.handleErrorReturnTrace(err, @errorReturnTrace());
|
|
Output.warn("Could not dump bundle: {}", .{err});
|
|
};
|
|
};
|
|
|
|
out.* = .{
|
|
.ref_count = out.ref_count,
|
|
.response = .initFromAnyBlob(&.fromOwnedSlice(gpa, json_bytes), .{
|
|
.server = g.owner().server,
|
|
.mime_type = &.json,
|
|
}),
|
|
.mappings_data = .{
|
|
.offset = @intCast(mappings_start),
|
|
.length = @intCast(mappings_len),
|
|
},
|
|
.file_paths = file_paths.items,
|
|
.source_contents = source_contents.items.ptr,
|
|
};
|
|
}
|
|
|
|
fn disconnectAndDeleteFile(g: *@This(), file_index: FileIndex) void {
|
|
bun.assert(g.first_dep.items[file_index.get()] == .none); // must have no dependencies
|
|
|
|
// Disconnect all imports
|
|
{
|
|
var it: ?EdgeIndex = g.first_import.items[file_index.get()].unwrap();
|
|
while (it) |edge_index| {
|
|
const dep = g.edges.items[edge_index.get()];
|
|
it = dep.next_import.unwrap();
|
|
assert(dep.dependency == file_index);
|
|
|
|
g.disconnectEdgeFromDependencyList(edge_index);
|
|
g.freeEdge(edge_index);
|
|
|
|
// TODO: a flag to this function which is queues all
|
|
// direct importers to rebuild themselves, which will
|
|
// display the bundling errors.
|
|
}
|
|
}
|
|
|
|
const keys = g.bundled_files.keys();
|
|
|
|
g.owner().allocator.free(keys[file_index.get()]);
|
|
keys[file_index.get()] = ""; // cannot be `undefined` as it may be read by hashmap logic
|
|
|
|
assert_eql(g.first_dep.items[file_index.get()], .none);
|
|
assert_eql(g.first_import.items[file_index.get()], .none);
|
|
|
|
// TODO: it is infeasible to swapRemove a file since
|
|
// FrameworkRouter, SerializedFailure, and more structures contains
|
|
// file indices to the server graph. Instead, `file_index` should
|
|
// go in a free-list for use by new files.
|
|
}
|
|
|
|
fn newEdge(g: *@This(), edge: Edge) !EdgeIndex {
|
|
if (g.edges_free_list.pop()) |index| {
|
|
g.edges.items[index.get()] = edge;
|
|
return index;
|
|
}
|
|
|
|
const index = EdgeIndex.init(@intCast(g.edges.items.len));
|
|
try g.edges.append(g.owner().allocator, edge);
|
|
return index;
|
|
}
|
|
|
|
/// Does nothing besides release the `Edge` for reallocation by `newEdge`
|
|
/// Caller must detach the dependency from the linked list it is in.
|
|
fn freeEdge(g: *@This(), edge_index: EdgeIndex) void {
|
|
if (Environment.isDebug) {
|
|
g.edges.items[edge_index.get()] = undefined;
|
|
}
|
|
|
|
if (edge_index.get() == (g.edges.items.len - 1)) {
|
|
g.edges.items.len -= 1;
|
|
} else {
|
|
g.edges_free_list.append(g.owner().allocator, edge_index) catch {
|
|
// Leak an edge object; Ok since it may get cleaned up by
|
|
// the next incremental graph garbage-collection cycle.
|
|
};
|
|
}
|
|
}
|
|
|
|
pub fn owner(g: *@This()) *DevServer {
|
|
return @alignCast(@fieldParentPtr(@tagName(side) ++ "_graph", g));
|
|
}
|
|
};
|
|
}
|
|
|
|
const IncrementalResult = struct {
|
|
/// When tracing a file's dependencies via `traceDependencies`, this is
|
|
/// populated with the hit `Route.Index`s. To know what framework
|
|
/// `RouteBundle`s are affected, the route graph must be traced downwards.
|
|
/// Tracing is used for multiple purposes.
|
|
framework_routes_affected: ArrayListUnmanaged(RouteIndexAndRecurseFlag),
|
|
/// HTML routes have slightly different anatomy than their
|
|
/// framework-provided counterparts, and get a separate list.
|
|
/// This list is just for when a script/style URL needs to be rewritten.
|
|
html_routes_soft_affected: ArrayListUnmanaged(RouteBundle.Index),
|
|
/// Requires a hard reload of active viewers.
|
|
/// - Changed HTML File
|
|
/// - Non-JS/CSS link between HTML file and import, such as image.
|
|
html_routes_hard_affected: ArrayListUnmanaged(RouteBundle.Index),
|
|
/// Set to true if any IncrementalGraph edges were added or removed.
|
|
had_adjusted_edges: bool,
|
|
|
|
// The following three fields are populated during `receiveChunk`
|
|
|
|
/// Components to add to the client manifest
|
|
client_components_added: ArrayListUnmanaged(IncrementalGraph(.server).FileIndex),
|
|
/// Components to remove from the client manifest
|
|
client_components_removed: ArrayListUnmanaged(IncrementalGraph(.server).FileIndex),
|
|
/// This list acts as a free list. The contents of these slices must remain
|
|
/// valid; they have to be so the affected routes can be cleared of the
|
|
/// failures and potentially be marked valid. At the end of an
|
|
/// incremental update, the slices are freed.
|
|
failures_removed: ArrayListUnmanaged(SerializedFailure),
|
|
|
|
/// Client boundaries that have been added or modified. At the end of a hot
|
|
/// update, these are traced to their route to mark the bundles as stale (to
|
|
/// be generated on Cmd+R)
|
|
///
|
|
/// Populated during `traceDependencies`
|
|
client_components_affected: ArrayListUnmanaged(IncrementalGraph(.server).FileIndex),
|
|
|
|
/// The list of failures which will have to be traced to their route. Such
|
|
/// tracing is deferred until the second pass of finalizeBundle as the
|
|
/// dependency graph may not fully exist at the time the failure is indexed.
|
|
///
|
|
/// Populated from within the bundler via `handleParseTaskFailure`, as well
|
|
/// as at the start of `indexFailures`.
|
|
///
|
|
/// This is also populated when calling `traceImports` with `find_errors`
|
|
failures_added: ArrayListUnmanaged(SerializedFailure),
|
|
|
|
const empty: IncrementalResult = .{
|
|
.framework_routes_affected = .{},
|
|
.html_routes_soft_affected = .{},
|
|
.html_routes_hard_affected = .{},
|
|
.had_adjusted_edges = false,
|
|
.failures_removed = .{},
|
|
.failures_added = .{},
|
|
.client_components_added = .{},
|
|
.client_components_removed = .{},
|
|
.client_components_affected = .{},
|
|
};
|
|
|
|
fn reset(result: *IncrementalResult) void {
|
|
result.framework_routes_affected.clearRetainingCapacity();
|
|
result.html_routes_soft_affected.clearRetainingCapacity();
|
|
result.html_routes_hard_affected.clearRetainingCapacity();
|
|
assert(result.failures_removed.items.len == 0);
|
|
result.failures_added.clearRetainingCapacity();
|
|
result.client_components_added.clearRetainingCapacity();
|
|
result.client_components_removed.clearRetainingCapacity();
|
|
result.client_components_affected.clearRetainingCapacity();
|
|
}
|
|
};
|
|
|
|
/// Used during an incremental update to determine what "HMR roots"
|
|
/// are affected. Set for all `bundled_files` that have been visited
|
|
/// by the dependency tracing logic.
|
|
const GraphTraceState = struct {
|
|
client_bits: DynamicBitSetUnmanaged,
|
|
server_bits: DynamicBitSetUnmanaged,
|
|
|
|
fn bits(gts: *GraphTraceState, side: bake.Side) *DynamicBitSetUnmanaged {
|
|
return switch (side) {
|
|
.client => >s.client_bits,
|
|
.server => >s.server_bits,
|
|
};
|
|
}
|
|
|
|
fn deinit(gts: *GraphTraceState, alloc: Allocator) void {
|
|
gts.client_bits.deinit(alloc);
|
|
gts.server_bits.deinit(alloc);
|
|
}
|
|
|
|
fn clear(gts: *GraphTraceState) void {
|
|
gts.server_bits.setAll(false);
|
|
gts.client_bits.setAll(false);
|
|
}
|
|
|
|
fn resize(gts: *GraphTraceState, side: bake.Side, allocator: Allocator, new_size: usize) !void {
|
|
const b = switch (side) {
|
|
.client => >s.client_bits,
|
|
.server => >s.server_bits,
|
|
};
|
|
if (b.bit_length < new_size) {
|
|
try b.resize(allocator, new_size, false);
|
|
}
|
|
}
|
|
};
|
|
|
|
const TraceImportGoal = enum {
|
|
find_css,
|
|
find_client_modules,
|
|
find_errors,
|
|
};
|
|
|
|
/// `extra_client_bits` is specified if it is possible that the client graph may
|
|
/// increase in size while the bits are being used. This happens with CSS files,
|
|
/// though [TODO] might not actually be necessary.
|
|
fn initGraphTraceState(dev: *const DevServer, sfa: Allocator, extra_client_bits: usize) !GraphTraceState {
|
|
var server_bits = try DynamicBitSetUnmanaged.initEmpty(sfa, dev.server_graph.bundled_files.count());
|
|
errdefer server_bits.deinit(sfa);
|
|
const client_bits = try DynamicBitSetUnmanaged.initEmpty(sfa, dev.client_graph.bundled_files.count() + extra_client_bits);
|
|
return .{ .server_bits = server_bits, .client_bits = client_bits };
|
|
}
|
|
|
|
/// When a file fails to import a relative path, directory watchers are added so
|
|
/// that when a matching file is created, the dependencies can be rebuilt. This
|
|
/// handles HMR cases where a user writes an import before creating the file,
|
|
/// or moves files around. This structure is not thread-safe.
|
|
///
|
|
/// This structure manages those watchers, including releasing them once
|
|
/// import resolution failures are solved.
|
|
// TODO: when a file fixes its resolution, there is no code specifically to remove the watchers.
|
|
const DirectoryWatchStore = struct {
|
|
/// List of active watchers. Can be re-ordered on removal
|
|
watches: bun.StringArrayHashMapUnmanaged(Entry),
|
|
dependencies: ArrayListUnmanaged(Dep),
|
|
/// Dependencies cannot be re-ordered. This list tracks what indexes are free.
|
|
dependencies_free_list: ArrayListUnmanaged(Dep.Index),
|
|
|
|
const empty: DirectoryWatchStore = .{
|
|
.watches = .{},
|
|
.dependencies = .{},
|
|
.dependencies_free_list = .{},
|
|
};
|
|
|
|
pub fn owner(store: *DirectoryWatchStore) *DevServer {
|
|
return @alignCast(@fieldParentPtr("directory_watchers", store));
|
|
}
|
|
|
|
pub fn trackResolutionFailure(store: *DirectoryWatchStore, import_source: []const u8, specifier: []const u8, renderer: bake.Graph, loader: bun.options.Loader) bun.OOM!void {
|
|
// When it does not resolve to a file path, there is nothing to track.
|
|
if (specifier.len == 0) return;
|
|
if (!std.fs.path.isAbsolute(import_source)) return;
|
|
|
|
switch (loader) {
|
|
.tsx, .ts, .jsx, .js => {
|
|
if (!(bun.strings.startsWith(specifier, "./") or
|
|
bun.strings.startsWith(specifier, "../"))) return;
|
|
},
|
|
|
|
// Imports in CSS can resolve to relative files without './'
|
|
// Imports in HTML can resolve to project-relative paths by
|
|
// prefixing with '/', but that is done in HTMLScanner.
|
|
.css, .html => {},
|
|
|
|
// Multiple parts of DevServer rely on the fact that these
|
|
// loaders do not depend on importing other files.
|
|
.file,
|
|
.json,
|
|
.jsonc,
|
|
.toml,
|
|
.wasm,
|
|
.napi,
|
|
.base64,
|
|
.dataurl,
|
|
.text,
|
|
.bunsh,
|
|
.sqlite,
|
|
.sqlite_embedded,
|
|
=> bun.debugAssert(false),
|
|
}
|
|
|
|
const buf = bun.PathBufferPool.get();
|
|
defer bun.PathBufferPool.put(buf);
|
|
const joined = bun.path.joinAbsStringBuf(bun.path.dirname(import_source, .auto), buf, &.{specifier}, .auto);
|
|
const dir = bun.path.dirname(joined, .auto);
|
|
|
|
// The `import_source` parameter is not a stable string. Since the
|
|
// import source will be added to IncrementalGraph anyways, this is a
|
|
// great place to share memory.
|
|
const dev = store.owner();
|
|
dev.graph_safety_lock.lock();
|
|
defer dev.graph_safety_lock.unlock();
|
|
const owned_file_path = switch (renderer) {
|
|
.client => (try dev.client_graph.insertEmpty(import_source, .unknown)).key,
|
|
.server, .ssr => (try dev.server_graph.insertEmpty(import_source, .unknown)).key,
|
|
};
|
|
|
|
store.insert(dir, owned_file_path, specifier) catch |err| switch (err) {
|
|
error.Ignore => {}, // ignoring watch errors.
|
|
error.OutOfMemory => |e| return e,
|
|
};
|
|
}
|
|
|
|
/// `dir_name_to_watch` is cloned
|
|
/// `file_path` must have lifetime that outlives the watch
|
|
/// `specifier` is cloned
|
|
fn insert(
|
|
store: *DirectoryWatchStore,
|
|
dir_name_to_watch: []const u8,
|
|
file_path: []const u8,
|
|
specifier: []const u8,
|
|
) !void {
|
|
assert(specifier.len > 0);
|
|
// TODO: watch the parent dir too.
|
|
const dev = store.owner();
|
|
|
|
debug.log("DirectoryWatchStore.insert({}, {}, {})", .{
|
|
bun.fmt.quote(dir_name_to_watch),
|
|
bun.fmt.quote(file_path),
|
|
bun.fmt.quote(specifier),
|
|
});
|
|
|
|
if (store.dependencies_free_list.items.len == 0)
|
|
try store.dependencies.ensureUnusedCapacity(dev.allocator, 1);
|
|
|
|
const gop = try store.watches.getOrPut(dev.allocator, bun.strings.withoutTrailingSlashWindowsPath(dir_name_to_watch));
|
|
const specifier_cloned = if (specifier[0] == '.' or std.fs.path.isAbsolute(specifier))
|
|
try dev.allocator.dupe(u8, specifier)
|
|
else
|
|
try std.fmt.allocPrint(dev.allocator, "./{s}", .{specifier});
|
|
errdefer dev.allocator.free(specifier_cloned);
|
|
|
|
if (gop.found_existing) {
|
|
const dep = store.appendDepAssumeCapacity(.{
|
|
.next = gop.value_ptr.first_dep.toOptional(),
|
|
.source_file_path = file_path,
|
|
.specifier = specifier_cloned,
|
|
});
|
|
gop.value_ptr.first_dep = dep;
|
|
|
|
return;
|
|
}
|
|
errdefer store.watches.swapRemoveAt(gop.index);
|
|
|
|
// Try to use an existing open directory handle
|
|
const cache_fd = if (dev.server_transpiler.resolver.readDirInfo(dir_name_to_watch) catch null) |cache| fd: {
|
|
const fd = cache.getFileDescriptor();
|
|
break :fd if (fd == .zero) null else fd;
|
|
} else null;
|
|
|
|
const fd, const owned_fd = if (Watcher.requires_file_descriptors) if (cache_fd) |fd|
|
|
.{ fd, false }
|
|
else switch (bun.sys.open(
|
|
&(std.posix.toPosixPath(dir_name_to_watch) catch |err| switch (err) {
|
|
error.NameTooLong => return error.Ignore, // wouldn't be able to open, ignore
|
|
}),
|
|
// O_EVTONLY is the flag to indicate that only watches will be used.
|
|
bun.O.DIRECTORY | bun.c.O_EVTONLY,
|
|
0,
|
|
)) {
|
|
.result => |fd| .{ fd, true },
|
|
.err => |err| switch (err.getErrno()) {
|
|
// If this directory doesn't exist, a watcher should be placed
|
|
// on the parent directory. Then, if this directory is later
|
|
// created, the watcher can be properly initialized. This would
|
|
// happen if a specifier like `./dir/whatever/hello.tsx` and
|
|
// `dir` does not exist, Bun must place a watcher on `.`, see
|
|
// the creation of `dir`, and repeat until it can open a watcher
|
|
// on `whatever` to see the creation of `hello.tsx`
|
|
.NOENT => {
|
|
// TODO: implement that. for now it ignores (BUN-10968)
|
|
return error.Ignore;
|
|
},
|
|
.NOTDIR => return error.Ignore, // ignore
|
|
else => {
|
|
bun.todoPanic(@src(), "log watcher error", .{});
|
|
},
|
|
},
|
|
} else .{ bun.invalid_fd, false };
|
|
errdefer _ = if (Watcher.requires_file_descriptors) if (owned_fd) bun.sys.close(fd);
|
|
if (Watcher.requires_file_descriptors)
|
|
debug.log("-> fd: {} ({s})", .{
|
|
fd,
|
|
if (owned_fd) "from dir cache" else "owned fd",
|
|
});
|
|
|
|
const dir_name = try dev.allocator.dupe(u8, dir_name_to_watch);
|
|
errdefer dev.allocator.free(dir_name);
|
|
|
|
gop.key_ptr.* = bun.strings.withoutTrailingSlashWindowsPath(dir_name);
|
|
|
|
const watch_index = switch (dev.bun_watcher.addDirectory(fd, dir_name, bun.Watcher.getHash(dir_name), false)) {
|
|
.err => return error.Ignore,
|
|
.result => |id| id,
|
|
};
|
|
const dep = store.appendDepAssumeCapacity(.{
|
|
.next = .none,
|
|
.source_file_path = file_path,
|
|
.specifier = specifier_cloned,
|
|
});
|
|
store.watches.putAssumeCapacity(dir_name, .{
|
|
.dir = fd,
|
|
.dir_fd_owned = owned_fd,
|
|
.first_dep = dep,
|
|
.watch_index = watch_index,
|
|
});
|
|
}
|
|
|
|
/// Caller must detach the dependency from the linked list it is in.
|
|
fn freeDependencyIndex(store: *DirectoryWatchStore, alloc: Allocator, index: Dep.Index) !void {
|
|
alloc.free(store.dependencies.items[index.get()].specifier);
|
|
|
|
if (Environment.isDebug) {
|
|
store.dependencies.items[index.get()] = undefined;
|
|
}
|
|
|
|
if (index.get() == (store.dependencies.items.len - 1)) {
|
|
store.dependencies.items.len -= 1;
|
|
} else {
|
|
try store.dependencies_free_list.append(alloc, index);
|
|
}
|
|
}
|
|
|
|
/// Expects dependency list to be already freed
|
|
fn freeEntry(store: *DirectoryWatchStore, alloc: Allocator, entry_index: usize) void {
|
|
const entry = store.watches.values()[entry_index];
|
|
|
|
debug.log("DirectoryWatchStore.freeEntry({d}, {})", .{
|
|
entry_index,
|
|
entry.dir,
|
|
});
|
|
|
|
store.owner().bun_watcher.removeAtIndex(entry.watch_index, 0, &.{}, .file);
|
|
|
|
defer _ = if (entry.dir_fd_owned) bun.sys.close(entry.dir);
|
|
|
|
alloc.free(store.watches.keys()[entry_index]);
|
|
store.watches.swapRemoveAt(entry_index);
|
|
|
|
if (store.watches.entries.len == 0) {
|
|
assert(store.dependencies.items.len == 0);
|
|
store.dependencies_free_list.clearRetainingCapacity();
|
|
}
|
|
}
|
|
|
|
fn appendDepAssumeCapacity(store: *DirectoryWatchStore, dep: Dep) Dep.Index {
|
|
if (store.dependencies_free_list.pop()) |index| {
|
|
store.dependencies.items[index.get()] = dep;
|
|
return index;
|
|
}
|
|
|
|
const index = Dep.Index.init(@intCast(store.dependencies.items.len));
|
|
store.dependencies.appendAssumeCapacity(dep);
|
|
return index;
|
|
}
|
|
|
|
const Entry = struct {
|
|
/// The directory handle the watch is placed on
|
|
dir: bun.FileDescriptor,
|
|
dir_fd_owned: bool,
|
|
/// Files which request this import index
|
|
first_dep: Dep.Index,
|
|
/// To pass to Watcher.remove
|
|
watch_index: u16,
|
|
};
|
|
|
|
const Dep = struct {
|
|
next: Index.Optional,
|
|
/// The file used
|
|
source_file_path: []const u8,
|
|
/// The specifier that failed. Before running re-build, it is resolved for, as
|
|
/// creating an unrelated file should not re-emit another error. Allocated memory
|
|
specifier: []u8,
|
|
|
|
const Index = bun.GenericIndex(u32, Dep);
|
|
};
|
|
};
|
|
|
|
const ChunkKind = enum {
|
|
initial_response,
|
|
hmr_chunk,
|
|
};
|
|
|
|
/// Errors sent to the HMR client in the browser are serialized. The same format
|
|
/// is used for thrown JavaScript exceptions as well as bundler errors.
|
|
/// Serialized failures contain a handle on what file or route they came from,
|
|
/// which allows the bundler to dismiss or update stale failures via index as
|
|
/// opposed to re-sending a new payload. This also means only changed files are
|
|
/// rebuilt, instead of all of the failed files.
|
|
///
|
|
/// The HMR client in the browser is expected to sort the final list of errors
|
|
/// for deterministic output; there is code in DevServer that uses `swapRemove`.
|
|
pub const SerializedFailure = struct {
|
|
/// Serialized data is always owned by dev.allocator
|
|
/// The first 32 bits of this slice contain the owner
|
|
data: []u8,
|
|
|
|
pub fn deinit(f: SerializedFailure, dev: *DevServer) void {
|
|
dev.allocator.free(f.data);
|
|
}
|
|
|
|
/// The metaphorical owner of an incremental file error. The packed variant
|
|
/// is given to the HMR runtime as an opaque handle.
|
|
pub const Owner = union(enum) {
|
|
none,
|
|
route: RouteBundle.Index,
|
|
client: IncrementalGraph(.client).FileIndex,
|
|
server: IncrementalGraph(.server).FileIndex,
|
|
|
|
pub fn encode(owner: Owner) Packed {
|
|
return switch (owner) {
|
|
.none => .{ .kind = .none, .data = 0 },
|
|
.client => |data| .{ .kind = .client, .data = data.get() },
|
|
.server => |data| .{ .kind = .server, .data = data.get() },
|
|
.route => |data| .{ .kind = .route, .data = data.get() },
|
|
};
|
|
}
|
|
|
|
pub const Packed = packed struct(u32) {
|
|
data: u30,
|
|
kind: enum(u2) { none, route, client, server },
|
|
|
|
pub fn decode(owner: Packed) Owner {
|
|
return switch (owner.kind) {
|
|
.none => .none,
|
|
.client => .{ .client = IncrementalGraph(.client).FileIndex.init(owner.data) },
|
|
.server => .{ .server = IncrementalGraph(.server).FileIndex.init(owner.data) },
|
|
.route => .{ .route = RouteBundle.Index.init(owner.data) },
|
|
};
|
|
}
|
|
|
|
comptime {
|
|
assert(@as(u32, @bitCast(Packed{ .kind = .none, .data = 1 })) == 1);
|
|
}
|
|
};
|
|
};
|
|
|
|
fn getOwner(failure: SerializedFailure) Owner {
|
|
return std.mem.bytesAsValue(Owner.Packed, failure.data[0..4]).decode();
|
|
}
|
|
|
|
/// This assumes the hash map contains only one SerializedFailure per owner.
|
|
/// This is okay since SerializedFailure can contain more than one error.
|
|
const ArrayHashContextViaOwner = struct {
|
|
pub fn hash(_: ArrayHashContextViaOwner, k: SerializedFailure) u32 {
|
|
return std.hash.uint32(@bitCast(k.getOwner().encode()));
|
|
}
|
|
|
|
pub fn eql(_: ArrayHashContextViaOwner, a: SerializedFailure, b: SerializedFailure, _: usize) bool {
|
|
return @as(u32, @bitCast(a.getOwner().encode())) == @as(u32, @bitCast(b.getOwner().encode()));
|
|
}
|
|
};
|
|
|
|
const ArrayHashAdapter = struct {
|
|
pub fn hash(_: ArrayHashAdapter, own: Owner) u32 {
|
|
return std.hash.uint32(@bitCast(own.encode()));
|
|
}
|
|
|
|
pub fn eql(_: ArrayHashAdapter, a: Owner, b: SerializedFailure, _: usize) bool {
|
|
return @as(u32, @bitCast(a.encode())) == @as(u32, @bitCast(b.getOwner().encode()));
|
|
}
|
|
};
|
|
|
|
pub const ErrorKind = enum(u8) {
|
|
// A log message. The `logger.Kind` is encoded here.
|
|
bundler_log_err = 0,
|
|
bundler_log_warn = 1,
|
|
bundler_log_note = 2,
|
|
bundler_log_debug = 3,
|
|
bundler_log_verbose = 4,
|
|
|
|
/// new Error(message)
|
|
js_error,
|
|
/// new TypeError(message)
|
|
js_error_type,
|
|
/// new RangeError(message)
|
|
js_error_range,
|
|
/// Other forms of `Error` objects, including when an error has a
|
|
/// `code`, and other fields.
|
|
js_error_extra,
|
|
/// Non-error with a stack trace
|
|
js_primitive_exception,
|
|
/// Non-error JS values
|
|
js_primitive,
|
|
/// new AggregateError(errors, message)
|
|
js_aggregate,
|
|
};
|
|
|
|
pub fn initFromJs(dev: *DevServer, owner: Owner, value: JSValue) !SerializedFailure {
|
|
{
|
|
_ = value;
|
|
@panic("TODO");
|
|
}
|
|
// Avoid small re-allocations without requesting so much from the heap
|
|
var sfb = std.heap.stackFallback(65536, dev.allocator);
|
|
var payload = std.ArrayList(u8).initCapacity(sfb.get(), 65536) catch
|
|
unreachable; // enough space
|
|
const w = payload.writer();
|
|
|
|
try w.writeInt(u32, @bitCast(owner.encode()), .little);
|
|
// try writeJsValue(value);
|
|
|
|
// Avoid-recloning if it is was moved to the hap
|
|
const data = if (payload.items.ptr == &sfb.buffer)
|
|
try dev.allocator.dupe(u8, payload.items)
|
|
else
|
|
payload.items;
|
|
|
|
return .{ .data = data };
|
|
}
|
|
|
|
pub fn initFromLog(
|
|
dev: *DevServer,
|
|
owner: Owner,
|
|
// for .client and .server, these are meant to be relative file paths
|
|
owner_display_name: []const u8,
|
|
messages: []const bun.logger.Msg,
|
|
) !SerializedFailure {
|
|
assert(messages.len > 0);
|
|
|
|
// Avoid small re-allocations without requesting so much from the heap
|
|
var sfb = std.heap.stackFallback(65536, dev.allocator);
|
|
var payload = std.ArrayList(u8).initCapacity(sfb.get(), 65536) catch
|
|
unreachable; // enough space
|
|
const w = payload.writer();
|
|
|
|
try w.writeInt(u32, @bitCast(owner.encode()), .little);
|
|
|
|
try writeString32(owner_display_name, w);
|
|
|
|
try w.writeInt(u32, @intCast(messages.len), .little);
|
|
|
|
for (messages) |*msg| {
|
|
try writeLogMsg(msg, w);
|
|
}
|
|
|
|
// Avoid-recloning if it is was moved to the hap
|
|
const data = if (payload.items.ptr == &sfb.buffer)
|
|
try dev.allocator.dupe(u8, payload.items)
|
|
else
|
|
payload.items;
|
|
|
|
return .{ .data = data };
|
|
}
|
|
|
|
// All "write" functions get a corresponding "read" function in ./client/error.ts
|
|
|
|
const Writer = std.ArrayList(u8).Writer;
|
|
|
|
fn writeLogMsg(msg: *const bun.logger.Msg, w: Writer) !void {
|
|
try w.writeByte(switch (msg.kind) {
|
|
inline else => |k| @intFromEnum(@field(ErrorKind, "bundler_log_" ++ @tagName(k))),
|
|
});
|
|
try writeLogData(msg.data, w);
|
|
const notes = msg.notes;
|
|
try w.writeInt(u32, @intCast(notes.len), .little);
|
|
for (notes) |note| {
|
|
try writeLogData(note, w);
|
|
}
|
|
}
|
|
|
|
fn writeLogData(data: bun.logger.Data, w: Writer) !void {
|
|
try writeString32(data.text, w);
|
|
if (data.location) |loc| {
|
|
if (loc.line < 0) {
|
|
try w.writeInt(u32, 0, .little);
|
|
return;
|
|
}
|
|
assert(loc.column >= 0); // zero based and not negative
|
|
|
|
try w.writeInt(i32, @intCast(loc.line), .little);
|
|
try w.writeInt(u32, @intCast(loc.column), .little);
|
|
try w.writeInt(u32, @intCast(loc.length), .little);
|
|
|
|
// TODO: syntax highlighted line text + give more context lines
|
|
try writeString32(loc.line_text orelse "", w);
|
|
|
|
// The file is not specified here. Since the transpiler runs every file
|
|
// in isolation, it would be impossible to reference any other file
|
|
// in this Log. Thus, it is not serialized.
|
|
} else {
|
|
try w.writeInt(u32, 0, .little);
|
|
}
|
|
}
|
|
|
|
fn writeString32(data: []const u8, w: Writer) !void {
|
|
try w.writeInt(u32, @intCast(data.len), .little);
|
|
try w.writeAll(data);
|
|
}
|
|
|
|
// fn writeJsValue(value: JSValue, global: *JSC.JSGlobalObject, w: *Writer) !void {
|
|
// if (value.isAggregateError(global)) {
|
|
// //
|
|
// }
|
|
// if (value.jsType() == .DOMWrapper) {
|
|
// if (value.as(JSC.BuildMessage)) |build_error| {
|
|
// _ = build_error; // autofix
|
|
// //
|
|
// } else if (value.as(JSC.ResolveMessage)) |resolve_error| {
|
|
// _ = resolve_error; // autofix
|
|
// @panic("TODO");
|
|
// }
|
|
// }
|
|
// _ = w; // autofix
|
|
|
|
// @panic("TODO");
|
|
// }
|
|
};
|
|
|
|
// For debugging, it is helpful to be able to see bundles.
|
|
fn dumpBundle(dump_dir: std.fs.Dir, graph: bake.Graph, rel_path: []const u8, chunk: []const u8, wrap: bool) !void {
|
|
const buf = bun.PathBufferPool.get();
|
|
defer bun.PathBufferPool.put(buf);
|
|
const name = bun.path.joinAbsStringBuf("/", buf, &.{
|
|
@tagName(graph),
|
|
rel_path,
|
|
}, .auto)[1..];
|
|
var inner_dir = try dump_dir.makeOpenPath(bun.Dirname.dirname(u8, name).?, .{});
|
|
defer inner_dir.close();
|
|
|
|
const file = try inner_dir.createFile(bun.path.basename(name), .{});
|
|
defer file.close();
|
|
|
|
var bufw = std.io.bufferedWriter(file.writer());
|
|
|
|
if (!bun.strings.hasSuffixComptime(rel_path, ".map")) {
|
|
try bufw.writer().print("// {s} bundled for {s}\n", .{
|
|
bun.fmt.quote(rel_path),
|
|
@tagName(graph),
|
|
});
|
|
try bufw.writer().print("// Bundled at {d}, Bun " ++ bun.Global.package_json_version_with_canary ++ "\n", .{
|
|
std.time.nanoTimestamp(),
|
|
});
|
|
}
|
|
|
|
// Wrap in an object to make it valid syntax. Regardless, these files
|
|
// are never executable on their own as they contain only a single module.
|
|
|
|
if (wrap)
|
|
try bufw.writer().writeAll("({\n");
|
|
|
|
try bufw.writer().writeAll(chunk);
|
|
|
|
if (wrap)
|
|
try bufw.writer().writeAll("});\n");
|
|
|
|
try bufw.flush();
|
|
}
|
|
|
|
noinline fn dumpBundleForChunk(dev: *DevServer, dump_dir: std.fs.Dir, side: bake.Side, key: []const u8, code: []const u8, wrap: bool, is_ssr_graph: bool) void {
|
|
const cwd = dev.root;
|
|
var a: bun.PathBuffer = undefined;
|
|
var b: [bun.MAX_PATH_BYTES * 2]u8 = undefined;
|
|
const rel_path = bun.path.relativeBufZ(&a, cwd, key);
|
|
const size = std.mem.replacementSize(u8, rel_path, ".." ++ std.fs.path.sep_str, "_.._" ++ std.fs.path.sep_str);
|
|
_ = std.mem.replace(u8, rel_path, ".." ++ std.fs.path.sep_str, "_.._" ++ std.fs.path.sep_str, &b);
|
|
const rel_path_escaped = b[0..size];
|
|
dumpBundle(dump_dir, switch (side) {
|
|
.client => .client,
|
|
.server => if (is_ssr_graph) .ssr else .server,
|
|
}, rel_path_escaped, code, wrap) catch |err| {
|
|
bun.handleErrorReturnTrace(err, @errorReturnTrace());
|
|
Output.warn("Could not dump bundle: {}", .{err});
|
|
};
|
|
}
|
|
fn emitVisualizerMessageIfNeeded(dev: *DevServer) void {
|
|
if (!bun.FeatureFlags.bake_debugging_features) return;
|
|
if (dev.emit_visualizer_events == 0) return;
|
|
|
|
var sfb = std.heap.stackFallback(65536, dev.allocator);
|
|
var payload = std.ArrayList(u8).initCapacity(sfb.get(), 65536) catch
|
|
unreachable; // enough capacity on the stack
|
|
defer payload.deinit();
|
|
|
|
dev.writeVisualizerMessage(&payload) catch return; // visualizer does not get an update if it OOMs
|
|
|
|
dev.publish(.visualizer, payload.items, .binary);
|
|
}
|
|
|
|
fn writeVisualizerMessage(dev: *DevServer, payload: *std.ArrayList(u8)) !void {
|
|
payload.appendAssumeCapacity(MessageId.visualizer.char());
|
|
const w = payload.writer();
|
|
|
|
inline for (
|
|
[2]bake.Side{ .client, .server },
|
|
.{ &dev.client_graph, &dev.server_graph },
|
|
) |side, g| {
|
|
try w.writeInt(u32, @intCast(g.bundled_files.count()), .little);
|
|
for (
|
|
g.bundled_files.keys(),
|
|
g.bundled_files.values(),
|
|
0..,
|
|
) |k, v, i| {
|
|
const normalized_key = dev.relativePath(k);
|
|
defer dev.releaseRelativePathBuf();
|
|
try w.writeInt(u32, @intCast(normalized_key.len), .little);
|
|
if (k.len == 0) continue;
|
|
try w.writeAll(normalized_key);
|
|
try w.writeByte(@intFromBool(g.stale_files.isSetAllowOutOfBound(i, true) or switch (side) {
|
|
.server => v.failed,
|
|
.client => v.flags.failed,
|
|
}));
|
|
try w.writeByte(@intFromBool(side == .server and v.is_rsc));
|
|
try w.writeByte(@intFromBool(side == .server and v.is_ssr));
|
|
try w.writeByte(@intFromBool(if (side == .server) v.is_route else v.flags.is_html_route));
|
|
try w.writeByte(@intFromBool(side == .client and v.flags.is_special_framework_file));
|
|
try w.writeByte(@intFromBool(switch (side) {
|
|
.server => v.is_client_component_boundary,
|
|
.client => v.flags.is_hmr_root,
|
|
}));
|
|
}
|
|
}
|
|
inline for (.{ &dev.client_graph, &dev.server_graph }) |g| {
|
|
const G = @TypeOf(g.*);
|
|
|
|
try w.writeInt(u32, @intCast(g.edges.items.len - g.edges_free_list.items.len), .little);
|
|
for (g.edges.items, 0..) |edge, i| {
|
|
if (std.mem.indexOfScalar(G.EdgeIndex, g.edges_free_list.items, G.EdgeIndex.init(@intCast(i))) != null)
|
|
continue;
|
|
|
|
try w.writeInt(u32, @intCast(edge.dependency.get()), .little);
|
|
try w.writeInt(u32, @intCast(edge.imported.get()), .little);
|
|
}
|
|
}
|
|
}
|
|
|
|
pub fn onWebSocketUpgrade(
|
|
dev: *DevServer,
|
|
res: anytype,
|
|
req: *Request,
|
|
upgrade_ctx: *uws.uws_socket_context_t,
|
|
id: usize,
|
|
) void {
|
|
assert(id == 0);
|
|
|
|
const dw = bun.create(dev.allocator, HmrSocket, .{
|
|
.dev = dev,
|
|
.is_from_localhost = if (res.getRemoteSocketInfo()) |addr|
|
|
if (addr.is_ipv6)
|
|
bun.strings.eqlComptime(addr.ip, "::1")
|
|
else
|
|
bun.strings.eqlComptime(addr.ip, "127.0.0.1")
|
|
else
|
|
false,
|
|
.subscriptions = .{},
|
|
.active_route = .none,
|
|
});
|
|
dev.active_websocket_connections.put(dev.allocator, dw, {}) catch bun.outOfMemory();
|
|
_ = res.upgrade(
|
|
*HmrSocket,
|
|
dw,
|
|
req.header("sec-websocket-key") orelse "",
|
|
req.header("sec-websocket-protocol") orelse "",
|
|
req.header("sec-websocket-extension") orelse "",
|
|
upgrade_ctx,
|
|
);
|
|
}
|
|
|
|
/// Every message is to use `.binary`/`ArrayBuffer` transport mode. The first byte
|
|
/// indicates a Message ID; see comments on each type for how to interpret the rest.
|
|
/// Avoid changing message ID values, as some of these are hard-coded in tests.
|
|
///
|
|
/// This format is only intended for communication via the browser and DevServer.
|
|
/// Server-side HMR is implemented using a different interface. This API is not
|
|
/// versioned alongside Bun; breaking changes may occur at any point.
|
|
///
|
|
/// All integers are sent in little-endian.
|
|
pub const MessageId = enum(u8) {
|
|
/// Version payload. Sent on connection startup. The client should issue a
|
|
/// hard-reload when it mismatches with its `config.version`.
|
|
version = 'V',
|
|
/// Sent on a successful bundle, containing client code, updates routes, and
|
|
/// changed CSS files. Emitted on the `.hot_update` topic.
|
|
///
|
|
/// - For each server-side updated route:
|
|
/// - `i32`: Route Bundle ID
|
|
/// - `i32`: -1 to indicate end of list
|
|
/// - For each route stylesheet lists affected:
|
|
/// - `i32`: Route Bundle ID
|
|
/// - `u32`: Length of route pattern
|
|
/// - `[n]u8` UTF-8: Route pattern
|
|
/// - `u32`: Number of CSS attachments: For Each
|
|
/// - `[16]u8` ASCII: CSS identifier
|
|
/// - `i32`: -1 to indicate end of list
|
|
/// - `u32`: Number of CSS mutations. For Each:
|
|
/// - `[16]u8` ASCII: CSS identifier
|
|
/// - `u32`: Length of CSS code
|
|
/// - `[n]u8` UTF-8: CSS payload
|
|
/// - `[n]u8` UTF-8: JS Payload. No length, rest of buffer is text.
|
|
/// Can be empty if no client-side code changed.
|
|
///
|
|
/// The first list contains route changes that require a page reload, but
|
|
/// frameworks can perform via `onServerSideReload`. Fallback behavior
|
|
/// is to call `location.reload();`
|
|
///
|
|
/// The second list is sent to inform the current list of CSS files
|
|
/// reachable by a route, recalculated whenever an import is added or
|
|
/// removed as that can inadvertently affect the CSS list.
|
|
///
|
|
/// The third list contains CSS mutations, which are when the underlying
|
|
/// CSS file itself changes.
|
|
///
|
|
/// The JS payload is the remaining data. If defined, it can be passed to
|
|
/// `eval`, resulting in an object of new module callables.
|
|
hot_update = 'u',
|
|
/// Sent when the list of errors changes.
|
|
///
|
|
/// - `u32`: Removed errors. For Each:
|
|
/// - `u32`: Error owner
|
|
/// - Remainder are added errors. For Each:
|
|
/// - `SerializedFailure`: Error Data
|
|
errors = 'e',
|
|
/// A message from the browser. This is used to communicate.
|
|
/// - `u32`: Unique ID for the browser tab. Each tab gets a different ID
|
|
/// - `[n]u8`: Opaque bytes, untouched from `IncomingMessageId.browser_error`
|
|
browser_message = 'b',
|
|
/// Sent to clear the messages from `browser_error`
|
|
/// - For each removed ID:
|
|
/// - `u32`: Unique ID for the browser tab.
|
|
browser_message_clear = 'B',
|
|
/// Sent when a request handler error is emitted. Each route will own at
|
|
/// most 1 error, where sending a new request clears the original one.
|
|
///
|
|
/// - `u32`: Removed errors. For Each:
|
|
/// - `u32`: Error owner
|
|
/// - `u32`: Length of route pattern
|
|
/// - `[n]u8`: UTF-8 Route pattern
|
|
/// - `SerializedFailure`: The one error list for the request
|
|
request_handler_error = 'h',
|
|
/// Payload for `incremental_visualizer.html`. This can be accessed via
|
|
/// `/_bun/incremental_visualizer`. This contains both graphs.
|
|
///
|
|
/// - `u32`: Number of files in `client_graph`. For Each:
|
|
/// - `u32`: Length of name. If zero then no other fields are provided.
|
|
/// - `[n]u8`: File path in UTF-8 encoded text
|
|
/// - `u8`: If file is stale, set 1
|
|
/// - `u8`: If file is in server graph, set 1
|
|
/// - `u8`: If file is in ssr graph, set 1
|
|
/// - `u8`: If file is a server-side route root, set 1
|
|
/// - `u8`: If file is a server-side component boundary file, set 1
|
|
/// - `u32`: Number of files in the server graph. For Each:
|
|
/// - Repeat the same parser for the client graph
|
|
/// - `u32`: Number of client edges. For Each:
|
|
/// - `u32`: File index of the dependency file
|
|
/// - `u32`: File index of the imported file
|
|
/// - `u32`: Number of server edges. For Each:
|
|
/// - `u32`: File index of the dependency file
|
|
/// - `u32`: File index of the imported file
|
|
visualizer = 'v',
|
|
/// Sent in response to `set_url`.
|
|
/// - `u32`: Route index
|
|
set_url_response = 'n',
|
|
/// Used for synchronization in DevServer tests, to identify when a update was
|
|
/// acknowledged by the watcher but intentionally took no action.
|
|
/// - `u8`: See bake-harness.ts WatchSynchronization enum.
|
|
testing_watch_synchronization = 'r',
|
|
|
|
pub inline fn char(id: MessageId) u8 {
|
|
return @intFromEnum(id);
|
|
}
|
|
};
|
|
|
|
/// Avoid changing message ID values, as some of these are hard-coded in tests.
|
|
pub const IncomingMessageId = enum(u8) {
|
|
/// Subscribe to an event channel. Payload is a sequence of chars available
|
|
/// in HmrTopic.
|
|
subscribe = 's',
|
|
/// Emitted on client-side navigations.
|
|
/// Rest of payload is a UTF-8 string.
|
|
set_url = 'n',
|
|
/// Tells the DevServer to batch events together.
|
|
testing_batch_events = 'H',
|
|
|
|
/// Invalid data
|
|
_,
|
|
};
|
|
|
|
const HmrTopic = enum(u8) {
|
|
hot_update = 'h',
|
|
errors = 'e',
|
|
browser_error = 'E',
|
|
visualizer = 'v',
|
|
testing_watch_synchronization = 'r',
|
|
|
|
/// Invalid data
|
|
_,
|
|
|
|
pub const max_count = @typeInfo(HmrTopic).@"enum".fields.len;
|
|
pub const Bits = @Type(.{ .@"struct" = .{
|
|
.backing_integer = @Type(.{ .int = .{
|
|
.bits = max_count,
|
|
.signedness = .unsigned,
|
|
} }),
|
|
.fields = &brk: {
|
|
const enum_fields = @typeInfo(HmrTopic).@"enum".fields;
|
|
var fields: [enum_fields.len]std.builtin.Type.StructField = undefined;
|
|
for (enum_fields, &fields) |e, *s| {
|
|
s.* = .{
|
|
.name = e.name,
|
|
.type = bool,
|
|
.default_value_ptr = &false,
|
|
.is_comptime = false,
|
|
.alignment = 0,
|
|
};
|
|
}
|
|
break :brk fields;
|
|
},
|
|
.decls = &.{},
|
|
.is_tuple = false,
|
|
.layout = .@"packed",
|
|
} });
|
|
};
|
|
|
|
const HmrSocket = struct {
|
|
dev: *DevServer,
|
|
underlying: ?AnyWebSocket = null,
|
|
subscriptions: HmrTopic.Bits,
|
|
/// Allows actions which inspect or mutate sensitive DevServer state.
|
|
is_from_localhost: bool,
|
|
/// By telling DevServer the active route, this enables receiving detailed
|
|
/// `hot_update` events for when the route is updated.
|
|
active_route: RouteBundle.Index.Optional,
|
|
|
|
pub fn onOpen(s: *HmrSocket, ws: AnyWebSocket) void {
|
|
_ = ws.send(&(.{MessageId.version.char()} ++ s.dev.configuration_hash_key), .binary, false, true);
|
|
s.underlying = ws;
|
|
}
|
|
|
|
pub fn onMessage(s: *HmrSocket, ws: AnyWebSocket, msg: []const u8, opcode: uws.Opcode) void {
|
|
_ = opcode;
|
|
|
|
if (msg.len == 0) {
|
|
ws.close();
|
|
return;
|
|
}
|
|
|
|
switch (@as(IncomingMessageId, @enumFromInt(msg[0]))) {
|
|
.subscribe => {
|
|
var new_bits: HmrTopic.Bits = .{};
|
|
const topics = msg[1..];
|
|
if (topics.len > HmrTopic.max_count) return;
|
|
outer: for (topics) |char| {
|
|
inline for (@typeInfo(HmrTopic).@"enum".fields) |field| {
|
|
if (char == field.value) {
|
|
@field(new_bits, field.name) = true;
|
|
continue :outer;
|
|
}
|
|
}
|
|
}
|
|
inline for (comptime std.enums.values(HmrTopic)) |field| {
|
|
if (@field(new_bits, @tagName(field)) and !@field(s.subscriptions, @tagName(field))) {
|
|
_ = ws.subscribe(&.{@intFromEnum(field)});
|
|
|
|
// on-subscribe hooks
|
|
switch (field) {
|
|
.visualizer => {
|
|
s.dev.emit_visualizer_events += 1;
|
|
s.dev.emitVisualizerMessageIfNeeded();
|
|
},
|
|
else => {},
|
|
}
|
|
} else if (@field(new_bits, @tagName(field)) and !@field(s.subscriptions, @tagName(field))) {
|
|
_ = ws.unsubscribe(&.{@intFromEnum(field)});
|
|
|
|
// on-unsubscribe hooks
|
|
switch (field) {
|
|
.visualizer => {
|
|
s.dev.emit_visualizer_events -= 1;
|
|
},
|
|
else => {},
|
|
}
|
|
}
|
|
}
|
|
},
|
|
.set_url => {
|
|
const pattern = msg[1..];
|
|
const rbi = s.dev.routeToBundleIndexSlow(pattern) orelse
|
|
return;
|
|
if (s.active_route.unwrap()) |old| {
|
|
if (old == rbi) return;
|
|
s.dev.routeBundlePtr(old).active_viewers -= 1;
|
|
}
|
|
s.dev.routeBundlePtr(rbi).active_viewers += 1;
|
|
s.active_route = rbi.toOptional();
|
|
var response: [5]u8 = .{MessageId.set_url_response.char()} ++ std.mem.toBytes(rbi.get());
|
|
_ = ws.send(&response, .binary, false, true);
|
|
},
|
|
.testing_batch_events => switch (s.dev.testing_batch_events) {
|
|
.disabled => {
|
|
if (s.dev.current_bundle != null) {
|
|
s.dev.testing_batch_events = .enable_after_bundle;
|
|
} else {
|
|
s.dev.testing_batch_events = .{ .enabled = .empty };
|
|
s.dev.publish(.testing_watch_synchronization, &.{
|
|
MessageId.testing_watch_synchronization.char(),
|
|
0,
|
|
}, .binary);
|
|
}
|
|
},
|
|
.enable_after_bundle => {
|
|
// do not expose a websocket event that panics a release build
|
|
bun.debugAssert(false);
|
|
ws.close();
|
|
},
|
|
.enabled => |event_const| {
|
|
var event = event_const;
|
|
s.dev.testing_batch_events = .disabled;
|
|
|
|
if (event.entry_points.set.count() == 0) {
|
|
s.dev.publish(.testing_watch_synchronization, &.{
|
|
MessageId.testing_watch_synchronization.char(),
|
|
2,
|
|
}, .binary);
|
|
return;
|
|
}
|
|
|
|
s.dev.startAsyncBundle(
|
|
event.entry_points,
|
|
true,
|
|
std.time.Timer.start() catch @panic("timers unsupported"),
|
|
) catch bun.outOfMemory();
|
|
|
|
event.entry_points.deinit(s.dev.allocator);
|
|
},
|
|
},
|
|
_ => ws.close(),
|
|
}
|
|
}
|
|
|
|
pub fn onClose(s: *HmrSocket, ws: AnyWebSocket, exit_code: i32, message: []const u8) void {
|
|
_ = ws;
|
|
_ = exit_code;
|
|
_ = message;
|
|
|
|
if (s.subscriptions.visualizer) {
|
|
s.dev.emit_visualizer_events -= 1;
|
|
}
|
|
|
|
if (s.active_route.unwrap()) |old| {
|
|
s.dev.routeBundlePtr(old).active_viewers -= 1;
|
|
}
|
|
|
|
bun.debugAssert(s.dev.active_websocket_connections.remove(s));
|
|
s.dev.allocator.destroy(s);
|
|
}
|
|
};
|
|
|
|
pub fn routeToBundleIndexSlow(dev: *DevServer, pattern: []const u8) ?RouteBundle.Index {
|
|
var params: FrameworkRouter.MatchedParams = undefined;
|
|
if (dev.router.matchSlow(pattern, ¶ms)) |route_index| {
|
|
return dev.getOrPutRouteBundle(.{ .framework = route_index }) catch bun.outOfMemory();
|
|
}
|
|
if (dev.html_router.get(pattern)) |html| {
|
|
return dev.getOrPutRouteBundle(.{ .html = html }) catch bun.outOfMemory();
|
|
}
|
|
return null;
|
|
}
|
|
|
|
const c = struct {
|
|
// BakeSourceProvider.cpp
|
|
extern fn BakeGetDefaultExportFromModule(global: *JSC.JSGlobalObject, module: JSValue) JSValue;
|
|
|
|
fn BakeLoadServerHmrPatch(global: *JSC.JSGlobalObject, code: bun.String) !JSValue {
|
|
const f = @extern(
|
|
*const fn (*JSC.JSGlobalObject, bun.String) callconv(.C) JSValue.MaybeException,
|
|
.{ .name = "BakeLoadServerHmrPatch" },
|
|
);
|
|
return f(global, code).unwrap();
|
|
}
|
|
|
|
fn BakeLoadInitialServerCode(global: *JSC.JSGlobalObject, code: bun.String, separate_ssr_graph: bool) bun.JSError!JSValue {
|
|
const f = @extern(*const fn (*JSC.JSGlobalObject, bun.String, bool) callconv(.C) JSValue.MaybeException, .{
|
|
.name = "BakeLoadInitialServerCode",
|
|
});
|
|
return f(global, code, separate_ssr_graph).unwrap();
|
|
}
|
|
};
|
|
|
|
/// Called on DevServer thread via HotReloadTask
|
|
pub fn startReloadBundle(dev: *DevServer, event: *HotReloadEvent) bun.OOM!void {
|
|
defer event.files.clearRetainingCapacity();
|
|
|
|
var sfb = std.heap.stackFallback(4096, dev.allocator);
|
|
const temp_alloc = sfb.get();
|
|
var entry_points: EntryPointList = EntryPointList.empty;
|
|
defer entry_points.deinit(temp_alloc);
|
|
|
|
event.processFileList(dev, &entry_points, temp_alloc);
|
|
if (entry_points.set.count() == 0) {
|
|
return;
|
|
}
|
|
|
|
dev.startAsyncBundle(
|
|
entry_points,
|
|
true,
|
|
event.timer,
|
|
) catch |err| {
|
|
bun.handleErrorReturnTrace(err, @errorReturnTrace());
|
|
return;
|
|
};
|
|
}
|
|
|
|
fn markAllRouteChildren(router: *FrameworkRouter, comptime n: comptime_int, bits: [n]*DynamicBitSetUnmanaged, route_index: Route.Index) void {
|
|
var next = router.routePtr(route_index).first_child.unwrap();
|
|
while (next) |child_index| {
|
|
const route = router.routePtr(child_index);
|
|
if (route.bundle.unwrap()) |index| {
|
|
inline for (bits) |b|
|
|
b.set(index.get());
|
|
}
|
|
markAllRouteChildren(router, n, bits, child_index);
|
|
next = route.next_sibling.unwrap();
|
|
}
|
|
}
|
|
|
|
fn markAllRouteChildrenFailed(dev: *DevServer, route_index: Route.Index) void {
|
|
var next = dev.router.routePtr(route_index).first_child.unwrap();
|
|
while (next) |child_index| {
|
|
const route = dev.router.routePtr(child_index);
|
|
if (route.bundle.unwrap()) |index| {
|
|
dev.routeBundlePtr(index).server_state = .possible_bundling_failures;
|
|
}
|
|
markAllRouteChildrenFailed(dev, child_index);
|
|
next = route.next_sibling.unwrap();
|
|
}
|
|
}
|
|
|
|
/// This task informs the DevServer's thread about new files to be bundled.
|
|
pub const HotReloadEvent = struct {
|
|
/// Align to cache lines to eliminate contention.
|
|
const Aligned = struct { aligned: HotReloadEvent align(std.atomic.cache_line) };
|
|
|
|
owner: *DevServer,
|
|
/// Initialized in WatcherAtomics.watcherReleaseAndSubmitEvent
|
|
concurrent_task: JSC.ConcurrentTask,
|
|
/// The watcher is not able to peek into IncrementalGraph to know what files
|
|
/// to invalidate, so the watch events are de-duplicated and passed along.
|
|
/// The keys are owned by the file watcher.
|
|
files: bun.StringArrayHashMapUnmanaged(void),
|
|
/// Directories are watched so that resolution failures can be solved.
|
|
/// The keys are owned by the file watcher.
|
|
dirs: bun.StringArrayHashMapUnmanaged(void),
|
|
/// Same purpose as `files` but keys do not have an owner.
|
|
extra_files: std.ArrayListUnmanaged(u8),
|
|
/// Initialized by the WatcherAtomics.watcherAcquireEvent
|
|
timer: std.time.Timer,
|
|
/// This event may be referenced by either DevServer or Watcher thread.
|
|
/// 1 if referenced, 0 if unreferenced; see WatcherAtomics
|
|
contention_indicator: std.atomic.Value(u32),
|
|
|
|
debug_mutex: if (Environment.isDebug) bun.Mutex else void,
|
|
|
|
pub fn initEmpty(owner: *DevServer) HotReloadEvent {
|
|
return .{
|
|
.owner = owner,
|
|
.concurrent_task = undefined,
|
|
.files = .empty,
|
|
.dirs = .empty,
|
|
.timer = undefined,
|
|
.contention_indicator = .init(0),
|
|
.debug_mutex = if (Environment.isDebug) .{} else {},
|
|
.extra_files = .empty,
|
|
};
|
|
}
|
|
|
|
pub fn reset(ev: *HotReloadEvent) void {
|
|
if (Environment.isDebug)
|
|
ev.debug_mutex.unlock();
|
|
|
|
ev.files.clearRetainingCapacity();
|
|
ev.dirs.clearRetainingCapacity();
|
|
ev.extra_files.clearRetainingCapacity();
|
|
ev.timer = undefined;
|
|
}
|
|
|
|
pub fn isEmpty(ev: *const HotReloadEvent) bool {
|
|
return (ev.files.count() + ev.dirs.count()) == 0;
|
|
}
|
|
|
|
pub fn appendFile(event: *HotReloadEvent, allocator: Allocator, file_path: []const u8) void {
|
|
_ = event.files.getOrPut(allocator, file_path) catch bun.outOfMemory();
|
|
}
|
|
|
|
pub fn appendDir(event: *HotReloadEvent, allocator: Allocator, dir_path: []const u8, maybe_sub_path: ?[]const u8) void {
|
|
if (dir_path.len == 0) return;
|
|
_ = event.dirs.getOrPut(allocator, dir_path) catch bun.outOfMemory();
|
|
|
|
const sub_path = maybe_sub_path orelse return;
|
|
if (sub_path.len == 0) return;
|
|
|
|
const platform = bun.path.Platform.auto;
|
|
const ends_with_sep = platform.isSeparator(dir_path[dir_path.len - 1]);
|
|
const starts_with_sep = platform.isSeparator(sub_path[0]);
|
|
const sep_offset: i32 = if (ends_with_sep and starts_with_sep) -1 else 1;
|
|
|
|
event.extra_files.ensureUnusedCapacity(allocator, @intCast(@as(i32, @intCast(dir_path.len + sub_path.len)) + sep_offset + 1)) catch bun.outOfMemory();
|
|
event.extra_files.appendSliceAssumeCapacity(if (ends_with_sep) dir_path[0 .. dir_path.len - 1] else dir_path);
|
|
event.extra_files.appendAssumeCapacity(platform.separator());
|
|
event.extra_files.appendSliceAssumeCapacity(sub_path);
|
|
event.extra_files.appendAssumeCapacity(0);
|
|
}
|
|
|
|
/// Invalidates items in IncrementalGraph, appending all new items to `entry_points`
|
|
pub fn processFileList(
|
|
event: *HotReloadEvent,
|
|
dev: *DevServer,
|
|
entry_points: *EntryPointList,
|
|
temp_alloc: Allocator,
|
|
) void {
|
|
dev.graph_safety_lock.lock();
|
|
defer dev.graph_safety_lock.unlock();
|
|
|
|
// First handle directories, because this may mutate `event.files`
|
|
if (dev.directory_watchers.watches.count() > 0) for (event.dirs.keys()) |changed_dir_with_slash| {
|
|
const changed_dir = bun.strings.withoutTrailingSlashWindowsPath(changed_dir_with_slash);
|
|
|
|
// Bust resolution cache, but since Bun does not watch all
|
|
// directories in a codebase, this only targets the following resolutions
|
|
_ = dev.server_transpiler.resolver.bustDirCache(changed_dir);
|
|
|
|
// if a directory watch exists for resolution failures, check those now.
|
|
if (dev.directory_watchers.watches.getIndex(changed_dir)) |watcher_index| {
|
|
const entry = &dev.directory_watchers.watches.values()[watcher_index];
|
|
var new_chain: DirectoryWatchStore.Dep.Index.Optional = .none;
|
|
var it: ?DirectoryWatchStore.Dep.Index = entry.first_dep;
|
|
|
|
while (it) |index| {
|
|
const dep = &dev.directory_watchers.dependencies.items[index.get()];
|
|
it = dep.next.unwrap();
|
|
|
|
if ((dev.server_transpiler.resolver.resolve(
|
|
bun.path.dirname(dep.source_file_path, .auto),
|
|
dep.specifier,
|
|
.stmt,
|
|
) catch null) != null) {
|
|
// this resolution result is not preserved as passing it
|
|
// into BundleV2 is too complicated. the resolution is
|
|
// cached, anyways.
|
|
event.appendFile(dev.allocator, dep.source_file_path);
|
|
dev.directory_watchers.freeDependencyIndex(dev.allocator, index) catch bun.outOfMemory();
|
|
} else {
|
|
// rebuild a new linked list for unaffected files
|
|
dep.next = new_chain;
|
|
new_chain = index.toOptional();
|
|
}
|
|
}
|
|
|
|
if (new_chain.unwrap()) |new_first_dep| {
|
|
entry.first_dep = new_first_dep;
|
|
} else {
|
|
// without any files to depend on this watcher is freed
|
|
dev.directory_watchers.freeEntry(dev.allocator, watcher_index);
|
|
}
|
|
}
|
|
};
|
|
|
|
var rest_extra = event.extra_files.items;
|
|
while (bun.strings.indexOfChar(rest_extra, 0)) |str| {
|
|
event.files.put(dev.allocator, rest_extra[0..str], {}) catch bun.outOfMemory();
|
|
rest_extra = rest_extra[str + 1 ..];
|
|
}
|
|
if (rest_extra.len > 0) {
|
|
event.files.put(dev.allocator, rest_extra, {}) catch bun.outOfMemory();
|
|
}
|
|
|
|
const changed_file_paths = event.files.keys();
|
|
inline for (.{ &dev.server_graph, &dev.client_graph }) |g| {
|
|
g.invalidate(changed_file_paths, entry_points, temp_alloc) catch bun.outOfMemory();
|
|
}
|
|
|
|
if (entry_points.set.count() == 0) {
|
|
Output.debugWarn("nothing to bundle", .{});
|
|
if (changed_file_paths.len > 0)
|
|
Output.debugWarn("modified files: {s}", .{
|
|
bun.fmt.fmtSlice(changed_file_paths, ", "),
|
|
});
|
|
|
|
if (event.dirs.count() > 0)
|
|
Output.debugWarn("modified dirs: {s}", .{
|
|
bun.fmt.fmtSlice(event.dirs.keys(), ", "),
|
|
});
|
|
|
|
dev.publish(.testing_watch_synchronization, &.{
|
|
MessageId.testing_watch_synchronization.char(),
|
|
1,
|
|
}, .binary);
|
|
return;
|
|
}
|
|
|
|
if (dev.has_tailwind_plugin_hack) |*map| {
|
|
for (map.keys()) |abs_path| {
|
|
const file = dev.client_graph.bundled_files.get(abs_path) orelse
|
|
continue;
|
|
if (file.flags.kind == .css)
|
|
entry_points.appendCss(temp_alloc, abs_path) catch bun.outOfMemory();
|
|
}
|
|
}
|
|
}
|
|
|
|
pub fn run(first: *HotReloadEvent) void {
|
|
debug.log("HMR Task start", .{});
|
|
defer debug.log("HMR Task end", .{});
|
|
|
|
const dev = first.owner;
|
|
|
|
if (Environment.isDebug) {
|
|
assert(first.debug_mutex.tryLock());
|
|
assert(first.contention_indicator.load(.seq_cst) == 0);
|
|
}
|
|
|
|
if (dev.current_bundle != null) {
|
|
dev.next_bundle.reload_event = first;
|
|
return;
|
|
}
|
|
|
|
var sfb = std.heap.stackFallback(4096, dev.allocator);
|
|
const temp_alloc = sfb.get();
|
|
var entry_points: EntryPointList = .empty;
|
|
defer entry_points.deinit(temp_alloc);
|
|
|
|
first.processFileList(dev, &entry_points, temp_alloc);
|
|
|
|
const timer = first.timer;
|
|
|
|
if (dev.watcher_atomics.recycleEventFromDevServer(first)) |second| {
|
|
if (Environment.isDebug) {
|
|
assert(second.debug_mutex.tryLock());
|
|
}
|
|
second.processFileList(dev, &entry_points, temp_alloc);
|
|
dev.watcher_atomics.recycleSecondEventFromDevServer(second);
|
|
}
|
|
|
|
if (entry_points.set.count() == 0) {
|
|
return;
|
|
}
|
|
|
|
switch (dev.testing_batch_events) {
|
|
.disabled => {},
|
|
.enabled => |*ev| {
|
|
ev.append(dev, entry_points) catch bun.outOfMemory();
|
|
dev.publish(.testing_watch_synchronization, &.{
|
|
MessageId.testing_watch_synchronization.char(),
|
|
1,
|
|
}, .binary);
|
|
return;
|
|
},
|
|
.enable_after_bundle => bun.debugAssert(false),
|
|
}
|
|
|
|
dev.startAsyncBundle(
|
|
entry_points,
|
|
true,
|
|
timer,
|
|
) catch |err| {
|
|
bun.handleErrorReturnTrace(err, @errorReturnTrace());
|
|
return;
|
|
};
|
|
}
|
|
};
|
|
|
|
/// All code working with atomics to communicate watcher is in this struct. It
|
|
/// attempts to recycle as much memory as possible since files are very
|
|
/// frequently updated (the whole point of hmr)
|
|
const WatcherAtomics = struct {
|
|
const log = Output.scoped(.DevServerWatchAtomics, true);
|
|
|
|
/// Only two hot-reload tasks exist ever, since only one bundle may be active at
|
|
/// once. Memory is reused by swapping between these two. These items are
|
|
/// aligned to cache lines to reduce contention, since these structures are
|
|
/// carefully passed between two threads.
|
|
events: [2]HotReloadEvent.Aligned align(std.atomic.cache_line),
|
|
/// 0 - no watch
|
|
/// 1 - has fired additional watch
|
|
/// 2+ - new events available, watcher is waiting on bundler to finish
|
|
watcher_events_emitted: std.atomic.Value(u32),
|
|
/// Which event is the watcher holding on to.
|
|
/// This is not atomic because only the watcher thread uses this value.
|
|
current: u1 align(std.atomic.cache_line),
|
|
|
|
watcher_has_event: std.debug.SafetyLock,
|
|
dev_server_has_event: std.debug.SafetyLock,
|
|
|
|
pub fn init(dev: *DevServer) WatcherAtomics {
|
|
return .{
|
|
.events = .{
|
|
.{ .aligned = .initEmpty(dev) },
|
|
.{ .aligned = .initEmpty(dev) },
|
|
},
|
|
.current = 0,
|
|
.watcher_events_emitted = .init(0),
|
|
.watcher_has_event = .{},
|
|
.dev_server_has_event = .{},
|
|
};
|
|
}
|
|
|
|
/// Atomically get a *HotReloadEvent that is not used by the DevServer thread
|
|
/// Call `watcherRelease` when it is filled with files.
|
|
fn watcherAcquireEvent(state: *WatcherAtomics) *HotReloadEvent {
|
|
state.watcher_has_event.lock();
|
|
|
|
var ev: *HotReloadEvent = &state.events[state.current].aligned;
|
|
switch (ev.contention_indicator.swap(1, .seq_cst)) {
|
|
0 => {
|
|
// New event, initialize the timer if it is empty.
|
|
if (ev.isEmpty())
|
|
ev.timer = std.time.Timer.start() catch unreachable;
|
|
},
|
|
1 => {
|
|
@branchHint(.unlikely);
|
|
// DevServer stole this event. Unlikely but possible when
|
|
// the user is saving very heavily (10-30 times per second)
|
|
state.current +%= 1;
|
|
ev = &state.events[state.current].aligned;
|
|
if (Environment.allow_assert) {
|
|
bun.assert(ev.contention_indicator.swap(1, .seq_cst) == 0);
|
|
}
|
|
},
|
|
else => unreachable,
|
|
}
|
|
|
|
ev.owner.bun_watcher.thread_lock.assertLocked();
|
|
|
|
if (Environment.isDebug)
|
|
assert(ev.debug_mutex.tryLock());
|
|
|
|
return ev;
|
|
}
|
|
|
|
/// Release the pointer from `watcherAcquireHotReloadEvent`, submitting
|
|
/// the event if it contains new files.
|
|
fn watcherReleaseAndSubmitEvent(state: *WatcherAtomics, ev: *HotReloadEvent) void {
|
|
state.watcher_has_event.unlock();
|
|
ev.owner.bun_watcher.thread_lock.assertLocked();
|
|
|
|
if (Environment.isDebug) {
|
|
for (std.mem.asBytes(&ev.timer)) |b| {
|
|
if (b != 0xAA) break;
|
|
} else @panic("timer is undefined memory in watcherReleaseAndSubmitEvent");
|
|
}
|
|
|
|
if (Environment.isDebug)
|
|
ev.debug_mutex.unlock();
|
|
|
|
if (!ev.isEmpty()) {
|
|
@branchHint(.likely);
|
|
// There are files to be processed, increment this count first.
|
|
const prev_count = state.watcher_events_emitted.fetchAdd(1, .seq_cst);
|
|
|
|
if (prev_count == 0) {
|
|
@branchHint(.likely);
|
|
// Submit a task to the DevServer, notifying it that there is
|
|
// work to do. The watcher will move to the other event.
|
|
ev.concurrent_task = .{
|
|
.auto_delete = false,
|
|
.next = null,
|
|
.task = JSC.Task.init(ev),
|
|
};
|
|
ev.contention_indicator.store(0, .seq_cst);
|
|
ev.owner.vm.event_loop.enqueueTaskConcurrent(&ev.concurrent_task);
|
|
state.current +%= 1;
|
|
} else {
|
|
// DevServer thread has already notified once. Sending
|
|
// a second task would give ownership of both events to
|
|
// them. Instead, DevServer will steal this item since
|
|
// it can observe `watcher_events_emitted >= 2`.
|
|
ev.contention_indicator.store(0, .seq_cst);
|
|
}
|
|
} else {
|
|
ev.contention_indicator.store(0, .seq_cst);
|
|
}
|
|
|
|
if (Environment.allow_assert) {
|
|
bun.assert(ev.contention_indicator.load(.monotonic) == 0); // always must be reset
|
|
}
|
|
}
|
|
|
|
/// Called by DevServer after it receives a task callback. If this returns
|
|
/// another event, that event must be recycled with `recycleSecondEventFromDevServer`
|
|
fn recycleEventFromDevServer(state: *WatcherAtomics, first_event: *HotReloadEvent) ?*HotReloadEvent {
|
|
first_event.reset();
|
|
|
|
// Reset the watch count to zero, while detecting if
|
|
// the other watch event was submitted.
|
|
if (state.watcher_events_emitted.swap(0, .seq_cst) >= 2) {
|
|
// Cannot use `state.current` because it will contend with the watcher.
|
|
// Since there are are two events, one pointer comparison suffices
|
|
const other_event = if (first_event == &state.events[0].aligned)
|
|
&state.events[1].aligned
|
|
else
|
|
&state.events[0].aligned;
|
|
|
|
switch (other_event.contention_indicator.swap(1, .seq_cst)) {
|
|
0 => {
|
|
// DevServer holds the event now.
|
|
state.dev_server_has_event.lock();
|
|
return other_event;
|
|
},
|
|
1 => {
|
|
// The watcher is currently using this event.
|
|
// `watcher_events_emitted` is already zero, so it will
|
|
// always submit.
|
|
|
|
// Not 100% confident in this logic, but the only way
|
|
// to hit this is by saving extremely frequently, and
|
|
// a followup save will just trigger the reload.
|
|
return null;
|
|
},
|
|
else => unreachable,
|
|
}
|
|
}
|
|
|
|
// If a watch callback had already acquired the event, that is fine as
|
|
// it will now read 0 when deciding if to submit the task.
|
|
return null;
|
|
}
|
|
|
|
fn recycleSecondEventFromDevServer(state: *WatcherAtomics, second_event: *HotReloadEvent) void {
|
|
second_event.reset();
|
|
|
|
state.dev_server_has_event.unlock();
|
|
if (Environment.allow_assert) {
|
|
const result = second_event.contention_indicator.swap(0, .seq_cst);
|
|
bun.assert(result == 1);
|
|
} else {
|
|
second_event.contention_indicator.store(0, .seq_cst);
|
|
}
|
|
}
|
|
};
|
|
|
|
/// Called on watcher's thread; Access to dev-server state restricted.
|
|
pub fn onFileUpdate(dev: *DevServer, events: []Watcher.Event, changed_files: []?[:0]u8, watchlist: Watcher.ItemList) void {
|
|
debug.log("onFileUpdate start", .{});
|
|
defer debug.log("onFileUpdate end", .{});
|
|
|
|
const slice = watchlist.slice();
|
|
const file_paths = slice.items(.file_path);
|
|
const counts = slice.items(.count);
|
|
const kinds = slice.items(.kind);
|
|
|
|
const ev = dev.watcher_atomics.watcherAcquireEvent();
|
|
defer dev.watcher_atomics.watcherReleaseAndSubmitEvent(ev);
|
|
|
|
defer dev.bun_watcher.flushEvictions();
|
|
|
|
for (events) |event| {
|
|
// TODO: why does this out of bounds when you delete every file in the directory?
|
|
if (event.index >= file_paths.len) continue;
|
|
|
|
const file_path = file_paths[event.index];
|
|
const update_count = counts[event.index] + 1;
|
|
counts[event.index] = update_count;
|
|
const kind = kinds[event.index];
|
|
|
|
debug.log("{s} change: {s} {}", .{ @tagName(kind), file_path, event.op });
|
|
|
|
switch (kind) {
|
|
.file => {
|
|
if (event.op.delete or event.op.rename) {
|
|
// TODO: audit this line heavily
|
|
dev.bun_watcher.removeAtIndex(event.index, 0, &.{}, .file);
|
|
}
|
|
|
|
ev.appendFile(dev.allocator, file_path);
|
|
},
|
|
.directory => {
|
|
// INotifyWatcher stores sub paths into `changed_files`
|
|
// the other platforms do not appear to write anything into `changed_files` ever.
|
|
if (Environment.isLinux) {
|
|
ev.appendDir(dev.allocator, file_path, if (event.name_len > 0) changed_files[event.name_off] else null);
|
|
} else {
|
|
ev.appendDir(dev.allocator, file_path, null);
|
|
}
|
|
},
|
|
}
|
|
}
|
|
}
|
|
|
|
pub fn onWatchError(_: *DevServer, err: bun.sys.Error) void {
|
|
if (err.path.len > 0) {
|
|
Output.err(err, "failed to watch {} for hot-reloading", .{bun.fmt.quote(err.path)});
|
|
} else {
|
|
Output.err(err, "failed to watch files for hot-reloading", .{});
|
|
}
|
|
Output.warn("The development server is still running, but hot-reloading is disabled until a restart.", .{});
|
|
// TODO: attempt to automatically restart the watcher thread, perhaps wait for next request.
|
|
}
|
|
|
|
pub fn publish(dev: *DevServer, topic: HmrTopic, message: []const u8, opcode: uws.Opcode) void {
|
|
if (dev.server) |s| _ = s.publish(&.{@intFromEnum(topic)}, message, opcode, false);
|
|
}
|
|
|
|
pub fn numSubscribers(dev: *DevServer, topic: HmrTopic) u32 {
|
|
return if (dev.server) |s| s.numSubscribers(&.{@intFromEnum(topic)}) else 0;
|
|
}
|
|
|
|
const SafeFileId = packed struct(u32) {
|
|
side: bake.Side,
|
|
index: u30,
|
|
unused: enum(u1) { unused = 0 } = .unused,
|
|
};
|
|
|
|
/// Interface function for FrameworkRouter
|
|
pub fn getFileIdForRouter(dev: *DevServer, abs_path: []const u8, associated_route: Route.Index, file_kind: Route.FileKind) !OpaqueFileId {
|
|
const index = try dev.server_graph.insertStaleExtra(abs_path, false, true);
|
|
try dev.route_lookup.put(dev.allocator, index, .{
|
|
.route_index = associated_route,
|
|
.should_recurse_when_visiting = file_kind == .layout,
|
|
});
|
|
return toOpaqueFileId(.server, index);
|
|
}
|
|
|
|
pub fn onRouterSyntaxError(dev: *DevServer, rel_path: []const u8, log: FrameworkRouter.TinyLog) bun.OOM!void {
|
|
_ = dev; // TODO: maybe this should track the error, send over HmrSocket?
|
|
log.print(rel_path);
|
|
}
|
|
|
|
pub fn onRouterCollisionError(dev: *DevServer, rel_path: []const u8, other_id: OpaqueFileId, ty: Route.FileKind) bun.OOM!void {
|
|
// TODO: maybe this should track the error, send over HmrSocket?
|
|
|
|
Output.errGeneric("Multiple {s} matching the same route pattern is ambiguous", .{
|
|
switch (ty) {
|
|
.page => "pages",
|
|
.layout => "layout",
|
|
},
|
|
});
|
|
Output.prettyErrorln(" - <blue>{s}<r>", .{rel_path});
|
|
Output.prettyErrorln(" - <blue>{s}<r>", .{
|
|
dev.relativePath(dev.server_graph.bundled_files.keys()[fromOpaqueFileId(.server, other_id).get()]),
|
|
});
|
|
Output.flush();
|
|
|
|
dev.releaseRelativePathBuf();
|
|
}
|
|
|
|
fn toOpaqueFileId(comptime side: bake.Side, index: IncrementalGraph(side).FileIndex) OpaqueFileId {
|
|
if (Environment.allow_assert) {
|
|
return OpaqueFileId.init(@bitCast(SafeFileId{
|
|
.side = side,
|
|
.index = index.get(),
|
|
}));
|
|
}
|
|
|
|
return OpaqueFileId.init(index.get());
|
|
}
|
|
|
|
fn fromOpaqueFileId(comptime side: bake.Side, id: OpaqueFileId) IncrementalGraph(side).FileIndex {
|
|
if (Environment.allow_assert) {
|
|
const safe: SafeFileId = @bitCast(id.get());
|
|
assert(side == safe.side);
|
|
return IncrementalGraph(side).FileIndex.init(safe.index);
|
|
}
|
|
return IncrementalGraph(side).FileIndex.init(@intCast(id.get()));
|
|
}
|
|
|
|
/// Returns posix style path, suitible for URLs and reproducible hashes.
|
|
/// To avoid overwriting memory, this has a lock for the buffer.
|
|
fn relativePath(dev: *DevServer, path: []const u8) []const u8 {
|
|
dev.relative_path_buf_lock.lock();
|
|
bun.assert(dev.root[dev.root.len - 1] != '/');
|
|
|
|
if (!std.fs.path.isAbsolute(path)) {
|
|
return path;
|
|
}
|
|
|
|
if (path.len >= dev.root.len + 1 and
|
|
path[dev.root.len] == '/' and
|
|
bun.strings.startsWith(path, dev.root))
|
|
{
|
|
return path[dev.root.len + 1 ..];
|
|
}
|
|
|
|
const rel = bun.path.relativePlatformBuf(&dev.relative_path_buf, dev.root, path, .auto, true);
|
|
// @constCast: `rel` is owned by a buffer on `dev`, which is mutable
|
|
bun.path.platformToPosixInPlace(u8, @constCast(rel));
|
|
return rel;
|
|
}
|
|
|
|
fn releaseRelativePathBuf(dev: *DevServer) void {
|
|
dev.relative_path_buf_lock.unlock();
|
|
if (bun.Environment.isDebug) {
|
|
dev.relative_path_buf = undefined;
|
|
}
|
|
}
|
|
|
|
fn dumpStateDueToCrash(dev: *DevServer) !void {
|
|
comptime assert(bun.FeatureFlags.bake_debugging_features);
|
|
|
|
// being conservative about how much stuff is put on the stack.
|
|
var filepath_buf: [@min(4096, bun.MAX_PATH_BYTES)]u8 = undefined;
|
|
const filepath = std.fmt.bufPrintZ(&filepath_buf, "incremental-graph-crash-dump.{d}.html", .{std.time.timestamp()}) catch "incremental-graph-crash-dump.html";
|
|
const file = std.fs.cwd().createFileZ(filepath, .{}) catch |err| {
|
|
bun.handleErrorReturnTrace(err, @errorReturnTrace());
|
|
Output.warn("Could not open file for dumping incremental graph: {}", .{err});
|
|
return;
|
|
};
|
|
defer file.close();
|
|
|
|
const start, const end = comptime brk: {
|
|
@setEvalBranchQuota(5000);
|
|
const visualizer = @embedFile("incremental_visualizer.html");
|
|
const i = (std.mem.lastIndexOf(u8, visualizer, "<script>") orelse unreachable) + "<script>".len;
|
|
break :brk .{ visualizer[0..i], visualizer[i..] };
|
|
};
|
|
try file.writeAll(start);
|
|
try file.writeAll("\nlet inlinedData = Uint8Array.from(atob(\"");
|
|
|
|
var sfb = std.heap.stackFallback(4096, dev.allocator);
|
|
var payload = try std.ArrayList(u8).initCapacity(sfb.get(), 4096);
|
|
defer payload.deinit();
|
|
try dev.writeVisualizerMessage(&payload);
|
|
|
|
var buf: [bun.base64.encodeLenFromSize(4096)]u8 = undefined;
|
|
var it = std.mem.window(u8, payload.items, 4096, 4096);
|
|
while (it.next()) |chunk| {
|
|
try file.writeAll(buf[0..bun.base64.encode(&buf, chunk)]);
|
|
}
|
|
|
|
try file.writeAll("\"), c => c.charCodeAt(0));\n");
|
|
try file.writeAll(end);
|
|
|
|
Output.note("Dumped incremental bundler graph to {}", .{bun.fmt.quote(filepath)});
|
|
}
|
|
|
|
const RouteIndexAndRecurseFlag = packed struct(u32) {
|
|
route_index: Route.Index,
|
|
/// Set true for layout
|
|
should_recurse_when_visiting: bool,
|
|
};
|
|
|
|
/// Bake needs to specify which graph (client/server/ssr) each entry point is.
|
|
/// File paths are always absolute paths. Files may be bundled for multiple
|
|
/// targets.
|
|
pub const EntryPointList = struct {
|
|
set: bun.StringArrayHashMapUnmanaged(Flags),
|
|
|
|
pub const empty: EntryPointList = .{ .set = .{} };
|
|
|
|
pub const Flags = packed struct(u8) {
|
|
client: bool = false,
|
|
server: bool = false,
|
|
ssr: bool = false,
|
|
/// When this is set, also set .client = true
|
|
css: bool = false,
|
|
|
|
unused: enum(u4) { unused = 0 } = .unused,
|
|
};
|
|
|
|
pub fn deinit(entry_points: *EntryPointList, allocator: std.mem.Allocator) void {
|
|
entry_points.set.deinit(allocator);
|
|
}
|
|
|
|
pub fn appendJs(
|
|
entry_points: *EntryPointList,
|
|
allocator: std.mem.Allocator,
|
|
abs_path: []const u8,
|
|
side: bake.Graph,
|
|
) !void {
|
|
return entry_points.append(allocator, abs_path, switch (side) {
|
|
.server => .{ .server = true },
|
|
.client => .{ .client = true },
|
|
.ssr => .{ .ssr = true },
|
|
});
|
|
}
|
|
|
|
pub fn appendCss(entry_points: *EntryPointList, allocator: std.mem.Allocator, abs_path: []const u8) !void {
|
|
return entry_points.append(allocator, abs_path, .{
|
|
.client = true,
|
|
.css = true,
|
|
});
|
|
}
|
|
|
|
/// Deduplictes requests to bundle the same file twice.
|
|
pub fn append(entry_points: *EntryPointList, allocator: std.mem.Allocator, abs_path: []const u8, flags: Flags) !void {
|
|
const gop = try entry_points.set.getOrPut(allocator, abs_path);
|
|
if (gop.found_existing) {
|
|
const T = @typeInfo(Flags).@"struct".backing_integer.?;
|
|
gop.value_ptr.* = @bitCast(@as(T, @bitCast(gop.value_ptr.*)) | @as(T, @bitCast(flags)));
|
|
} else {
|
|
gop.value_ptr.* = flags;
|
|
}
|
|
}
|
|
};
|
|
|
|
/// This structure does not increment the reference count of its contents, as
|
|
/// the lifetime of them are all tied to the underling Bun.serve instance.
|
|
const HTMLRouter = struct {
|
|
map: bun.StringHashMapUnmanaged(*HTMLBundle.HTMLBundleRoute),
|
|
/// If a catch-all route exists, it is not stored in map, but here.
|
|
fallback: ?*HTMLBundle.HTMLBundleRoute,
|
|
|
|
pub const empty: HTMLRouter = .{
|
|
.map = .empty,
|
|
.fallback = null,
|
|
};
|
|
|
|
pub fn get(router: *HTMLRouter, path: []const u8) ?*HTMLBundle.HTMLBundleRoute {
|
|
return router.map.get(path) orelse router.fallback;
|
|
}
|
|
|
|
pub fn put(router: *HTMLRouter, alloc: Allocator, path: []const u8, route: *HTMLBundle.HTMLBundleRoute) !void {
|
|
if (bun.strings.eqlComptime(path, "/*")) {
|
|
router.fallback = route;
|
|
} else {
|
|
try router.map.put(alloc, path, route);
|
|
}
|
|
}
|
|
|
|
pub fn clear(router: *HTMLRouter) void {
|
|
router.map.clearRetainingCapacity();
|
|
router.fallback = null;
|
|
}
|
|
|
|
pub fn deinit(router: *HTMLRouter, alloc: Allocator) void {
|
|
router.map.deinit(alloc);
|
|
}
|
|
};
|
|
|
|
pub fn putOrOverwriteAsset(
|
|
dev: *DevServer,
|
|
path: *const bun.fs.Path,
|
|
/// Ownership is transferred to this function.
|
|
contents: *const AnyBlob,
|
|
content_hash: u64,
|
|
) !void {
|
|
dev.graph_safety_lock.lock();
|
|
defer dev.graph_safety_lock.unlock();
|
|
_ = try dev.assets.replacePath(path.text, contents, &.byExtension(path.name.extWithoutLeadingDot()), content_hash);
|
|
}
|
|
|
|
/// Storage for hashed assets on `/_bun/asset/{hash}.ext`
|
|
pub const Assets = struct {
|
|
/// Keys are absolute paths, sharing memory with the keys in IncrementalGraph(.client)
|
|
/// Values are indexes into files
|
|
path_map: bun.StringArrayHashMapUnmanaged(EntryIndex),
|
|
/// Content-addressable store. Multiple paths can point to the same content
|
|
/// hash, which is tracked by the `refs` array. One reference is held to
|
|
/// contained StaticRoute instances when they are stored.
|
|
files: AutoArrayHashMapUnmanaged(u64, *StaticRoute),
|
|
/// Indexed by the same index of `files`. The value is never `0`.
|
|
refs: ArrayListUnmanaged(u32),
|
|
/// When mutating `files`'s keys, the map must be reindexed to function.
|
|
needs_reindex: bool = false,
|
|
|
|
pub const EntryIndex = bun.GenericIndex(u30, Assets);
|
|
|
|
fn owner(assets: *Assets) *DevServer {
|
|
return @alignCast(@fieldParentPtr("assets", assets));
|
|
}
|
|
|
|
pub fn getHash(assets: *Assets, path: []const u8) ?u64 {
|
|
return if (assets.path_map.get(path)) |idx|
|
|
assets.files.keys()[idx.get()]
|
|
else
|
|
null;
|
|
}
|
|
|
|
/// When an asset is overwritten, it receives a new URL to get around browser caching.
|
|
/// The old URL is immediately revoked.
|
|
pub fn replacePath(
|
|
assets: *Assets,
|
|
/// not allocated
|
|
abs_path: []const u8,
|
|
/// Ownership is transferred to this function
|
|
contents: *const AnyBlob,
|
|
mime_type: *const MimeType,
|
|
/// content hash of the asset
|
|
content_hash: u64,
|
|
) !EntryIndex {
|
|
defer assert(assets.files.count() == assets.refs.items.len);
|
|
const alloc = assets.owner().allocator;
|
|
debug.log("replacePath {} {} - {s}/{s} ({s})", .{
|
|
bun.fmt.quote(abs_path),
|
|
content_hash,
|
|
asset_prefix,
|
|
&std.fmt.bytesToHex(std.mem.asBytes(&content_hash), .lower),
|
|
mime_type.value,
|
|
});
|
|
|
|
const gop = try assets.path_map.getOrPut(alloc, abs_path);
|
|
if (!gop.found_existing) {
|
|
// Locate a stable pointer for the file path
|
|
const stable_abs_path = (try assets.owner().client_graph.insertEmpty(abs_path, .unknown)).key;
|
|
gop.key_ptr.* = stable_abs_path;
|
|
} else {
|
|
const entry_index = gop.value_ptr.*;
|
|
// When there is one reference to the asset, the entry can be
|
|
// replaced in-place with the new asset.
|
|
if (assets.refs.items[entry_index.get()] == 1) {
|
|
const slice = assets.files.entries.slice();
|
|
|
|
const prev = slice.items(.value)[entry_index.get()];
|
|
prev.deref();
|
|
|
|
slice.items(.key)[entry_index.get()] = content_hash;
|
|
slice.items(.value)[entry_index.get()] = StaticRoute.initFromAnyBlob(contents, .{
|
|
.mime_type = mime_type,
|
|
.server = assets.owner().server orelse unreachable,
|
|
});
|
|
comptime assert(@TypeOf(slice.items(.hash)[0]) == void);
|
|
assets.needs_reindex = true;
|
|
return entry_index;
|
|
} else {
|
|
assets.refs.items[entry_index.get()] -= 1;
|
|
assert(assets.refs.items[entry_index.get()] > 0);
|
|
}
|
|
}
|
|
|
|
try assets.reindexIfNeeded(alloc);
|
|
const file_index_gop = try assets.files.getOrPut(alloc, content_hash);
|
|
if (!file_index_gop.found_existing) {
|
|
try assets.refs.append(alloc, 1);
|
|
file_index_gop.value_ptr.* = StaticRoute.initFromAnyBlob(contents, .{
|
|
.mime_type = mime_type,
|
|
.server = assets.owner().server orelse unreachable,
|
|
});
|
|
} else {
|
|
assets.refs.items[file_index_gop.index] += 1;
|
|
var contents_mut = contents.*;
|
|
contents_mut.detach();
|
|
}
|
|
gop.value_ptr.* = .init(@intCast(file_index_gop.index));
|
|
return gop.value_ptr.*;
|
|
}
|
|
|
|
/// Returns a pointer to insert the *StaticRoute. If `null` is returned, then it
|
|
/// means there is already data here.
|
|
pub fn putOrIncrementRefCount(assets: *Assets, content_hash: u64, ref_count: u32) !?**StaticRoute {
|
|
defer assert(assets.files.count() == assets.refs.items.len);
|
|
const file_index_gop = try assets.files.getOrPut(assets.owner().allocator, content_hash);
|
|
if (!file_index_gop.found_existing) {
|
|
try assets.refs.append(assets.owner().allocator, ref_count);
|
|
return file_index_gop.value_ptr;
|
|
} else {
|
|
assets.refs.items[file_index_gop.index] += ref_count;
|
|
return null;
|
|
}
|
|
}
|
|
|
|
pub fn unrefByHash(assets: *Assets, content_hash: u64, dec_count: u32) void {
|
|
const index = assets.files.getIndex(content_hash) orelse
|
|
Output.panic("Asset double unref: {s}", .{std.fmt.fmtSliceHexLower(std.mem.asBytes(&content_hash))});
|
|
assets.unrefByIndex(.init(@intCast(index)), dec_count);
|
|
}
|
|
|
|
pub fn unrefByIndex(assets: *Assets, index: EntryIndex, dec_count: u32) void {
|
|
defer assert(assets.files.count() == assets.refs.items.len);
|
|
assert(dec_count > 0);
|
|
assets.refs.items[index.get()] -= dec_count;
|
|
if (assets.refs.items[index.get()] == 0) {
|
|
assets.files.values()[index.get()].deref();
|
|
assets.files.swapRemoveAt(index.get());
|
|
_ = assets.refs.swapRemove(index.get());
|
|
}
|
|
}
|
|
|
|
pub fn unrefByPath(assets: *Assets, path: []const u8) void {
|
|
const entry = assets.path_map.fetchSwapRemove(path) orelse return;
|
|
assets.unrefByIndex(entry.value, 1);
|
|
}
|
|
|
|
pub fn reindexIfNeeded(assets: *Assets, alloc: Allocator) !void {
|
|
if (assets.needs_reindex) {
|
|
try assets.files.reIndex(alloc);
|
|
assets.needs_reindex = false;
|
|
}
|
|
}
|
|
|
|
pub fn get(assets: *Assets, content_hash: u64) ?*StaticRoute {
|
|
assert(assets.files.count() == assets.refs.items.len);
|
|
return assets.files.get(content_hash);
|
|
}
|
|
|
|
pub fn deinit(assets: *Assets, alloc: Allocator) void {
|
|
assets.path_map.deinit(alloc);
|
|
for (assets.files.values()) |blob| blob.deref();
|
|
assets.files.deinit(alloc);
|
|
assets.refs.deinit(alloc);
|
|
}
|
|
|
|
pub fn memoryCost(assets: *Assets) usize {
|
|
var cost: usize = 0;
|
|
cost += memoryCostArrayHashMap(assets.path_map);
|
|
for (assets.files.values()) |blob| cost += blob.memoryCost();
|
|
cost += memoryCostArrayHashMap(assets.files);
|
|
cost += memoryCostArrayList(assets.refs);
|
|
return cost;
|
|
}
|
|
};
|
|
|
|
/// Storage for source maps on `/_bun/client/{id}.js.map`
|
|
///
|
|
/// All source maps are referenced counted, so that when a bundle is unloaded,
|
|
/// the unreachable source map URLs are revoked, and their memory freed.
|
|
pub const SourceMapStore = struct {
|
|
entries: AutoArrayHashMapUnmanaged(u64, Entry),
|
|
|
|
pub const empty: SourceMapStore = .{ .entries = .empty };
|
|
|
|
pub const Entry = struct {
|
|
/// Sum of:
|
|
/// - How many active pages could try to request this source map?
|
|
/// - For route bundle client scripts, 1 until invalidation.
|
|
ref_count: u32,
|
|
/// Notes:
|
|
/// - Bundler produces VLQ mappings, and there is not a well written
|
|
/// implementation to go from Mapping.List to VLQ.
|
|
/// - All mapping info needs to be somewhere, since watch updates would
|
|
/// taint IncrementalGraph state, leading to broken old sourcemaps.
|
|
/// - The "mappings" field, being VLQ, cannot contain escape sequences,
|
|
/// therefore it can still be extracted without a JSON parser.
|
|
/// - VLQ encoding is smaller than Mapping.List
|
|
///
|
|
/// Since server-side error remapping is semi-rare, the parsed source
|
|
/// maps are released as soon as the error is finished printing, with
|
|
/// currently no re-use.
|
|
response: *StaticRoute,
|
|
/// Pointer into `response.blob` to where `mappings` is stored.
|
|
mappings_data: bun.StringPointer,
|
|
/// Outer slice is allocated. Inner slices are shared keys in
|
|
/// IncrementalGraph. Indexes are off by one because of the HMR
|
|
/// Runtime, which is not present in this list.
|
|
file_paths: [][]const u8,
|
|
/// Only entries from plugins are persisted after a bundle. Like
|
|
/// `file_paths`, indexes are off by one. A pointer into `response.blob`
|
|
/// to JSON encoded source contents, not including the quotes.
|
|
// TODO: reduce the size of this slice
|
|
source_contents: [*]bun.StringPointer,
|
|
|
|
pub fn sourceContents(self: @This()) []const bun.StringPointer {
|
|
return self.source_contents[0..self.file_paths.len];
|
|
}
|
|
|
|
fn deinit(entry: *Entry, dev: *DevServer) void {
|
|
dev.allocator.free(entry.file_paths);
|
|
dev.allocator.free(entry.source_contents);
|
|
|
|
// should not have been removed or unreferenced.
|
|
assert(entry.ref_count > 0);
|
|
assert(entry.file_paths.ptr == entry.file_paths.ptr);
|
|
}
|
|
};
|
|
|
|
pub fn owner(store: *SourceMapStore) *DevServer {
|
|
return @alignCast(@fieldParentPtr("source_maps", store));
|
|
}
|
|
|
|
/// If an *Entry is returned, caller must initialize it with the source map.
|
|
pub fn putOrIncrementRefCount(store: *SourceMapStore, source_map_id: u64, ref_count: u32) !?*Entry {
|
|
const gop = try store.entries.getOrPut(store.owner().allocator, source_map_id);
|
|
if (!gop.found_existing) {
|
|
gop.value_ptr.* = .{
|
|
.ref_count = ref_count,
|
|
.source_contents = undefined,
|
|
.response = undefined,
|
|
.mappings_data = undefined,
|
|
.file_paths = undefined,
|
|
};
|
|
return gop.value_ptr;
|
|
} else {
|
|
gop.value_ptr.*.ref_count += ref_count;
|
|
return null;
|
|
}
|
|
}
|
|
|
|
const GetResult = struct {
|
|
index: bun.GenericIndex(u32, Entry),
|
|
mappings: SourceMap.Mapping.List,
|
|
file_paths: []const []const u8,
|
|
source_contents: []const bun.StringPointer,
|
|
bytes: []const u8,
|
|
|
|
pub fn deinit(self: *@This(), dev: *DevServer) void {
|
|
self.mappings.deinit(dev.allocator);
|
|
// file paths and source contents are borrowed
|
|
}
|
|
};
|
|
|
|
pub fn getParsedSourceMap(store: *SourceMapStore, source_map_id: u64) ?GetResult {
|
|
const index = store.entries.getIndex(source_map_id) orelse
|
|
return null; // source map was collected.
|
|
const entry = &store.entries.values()[index];
|
|
|
|
switch (SourceMap.Mapping.parse(
|
|
store.owner().allocator,
|
|
entry.mappings_data.slice(entry.response.blob.slice()),
|
|
null,
|
|
@intCast(entry.file_paths.len),
|
|
0, // unused
|
|
)) {
|
|
.fail => |fail| {
|
|
Output.debugWarn("Failed to re-parse source map: {s}", .{fail.msg});
|
|
return null;
|
|
},
|
|
.success => |psm| {
|
|
// TODO: consider storing mappings for, say, a minute. this
|
|
// means having a loop in client code would re-use the
|
|
// allocation. if this is deemed a bad idea, replace this
|
|
// comment with an explanation why.
|
|
// entry.parsed_mappings = psm.mappings;
|
|
return .{
|
|
.index = .init(@intCast(index)),
|
|
.mappings = psm.mappings,
|
|
.file_paths = entry.file_paths,
|
|
.source_contents = entry.source_contents[0..entry.file_paths.len],
|
|
.bytes = entry.response.blob.slice(),
|
|
};
|
|
},
|
|
}
|
|
}
|
|
};
|
|
|
|
// NOTE: not used but keeping around in case a use case is found soon
|
|
// /// Instead of a pointer to `struct { data: []const u8, ref_count: u32 }`, all
|
|
// /// reference counts are in a shared map. This is used by strings shared between
|
|
// /// source maps and the incremental graph. Not thread-safe.
|
|
// ///
|
|
// /// Transfer from default allocator to RefString with `dev.ref_strings.register(slice)`
|
|
// ///
|
|
// /// Prefer `CowString` (maybe allocated or borrowed) or `[]const u8` (known lifetime) over this structure.
|
|
// const RefString = struct {
|
|
// /// Allocated by `dev.allocator`, free with `.unref()`
|
|
// data: []const u8,
|
|
|
|
// pub fn deref(str: RefString, store: *Store) void {
|
|
// const index = store.strings.getIndex(str.ptr) orelse unreachable;
|
|
// const slice = store.strings.entries.slice();
|
|
// const ref_count = &slice.items(.value)[index];
|
|
// if (ref_count.* == 1) {
|
|
// store.strings.swapRemoveAt(index);
|
|
// dev.allocator.free(str.data);
|
|
// } else {
|
|
// ref_count.* -= 1;
|
|
// }
|
|
// }
|
|
|
|
// pub fn dupeRef(str: RefString, store: *Store) RefString {
|
|
// const ref_count = store.strings.getPtr(str.ptr) orelse unreachable;
|
|
// ref_count.* += 1;
|
|
// return str;
|
|
// }
|
|
|
|
// pub const Store = struct {
|
|
// /// Key -> Data. Value -> Reference count
|
|
// strings: AutoArrayHashMapUnmanaged([*]u8, u32),
|
|
|
|
// pub const empty: Store = .{ .strings = .empty };
|
|
|
|
// /// `data` must be owned by `dev.allocator`
|
|
// pub fn register(store: *Store, data: []u8) !RefString {
|
|
// const gop = try store.strings.getOrPut(dev.allocator, data.ptr);
|
|
// if (gop.found_existing) {
|
|
// gop.value_ptr.* += 1;
|
|
// } else {
|
|
// gop.value_ptr = 1;
|
|
// }
|
|
// return .{ .data = data };
|
|
// }
|
|
// };
|
|
// };
|
|
|
|
pub fn onPluginsResolved(dev: *DevServer, plugins: ?*Plugin) !void {
|
|
dev.bundler_options.plugin = plugins;
|
|
dev.plugin_state = .loaded;
|
|
dev.startNextBundleIfPresent();
|
|
}
|
|
|
|
pub fn onPluginsRejected(dev: *DevServer) !void {
|
|
dev.plugin_state = .err;
|
|
while (dev.next_bundle.requests.popFirst()) |item| {
|
|
defer dev.deferred_request_pool.put(item);
|
|
item.data.abort();
|
|
}
|
|
dev.next_bundle.route_queue.clearRetainingCapacity();
|
|
// TODO: allow recovery from this state
|
|
}
|
|
|
|
/// Fetched when a client-side error happens. This performs two actions
|
|
/// - Logs the remapped stack trace to the console.
|
|
/// - Replies with the remapped stack trace.
|
|
/// Payload:
|
|
/// - `u32`: Responding message ID (echoed back)
|
|
/// - `u32`: Length of message
|
|
/// - `[n]u8`: Message
|
|
/// - `u32`: Length of error name
|
|
/// - `[n]u8`: Error name
|
|
/// - `u32`: Number of stack frames. For each
|
|
/// - `u32`: Line number (0 for unavailable)
|
|
/// - `u32`: Column number (0 for unavailable)
|
|
/// - `u32`: Length of file name (0 for unavailable)
|
|
/// - `[n]u8`: File name
|
|
/// - `u32`: Length of function name (0 for unavailable)
|
|
/// - `[n]u8`: Function name
|
|
const ErrorReportRequest = struct {
|
|
dev: *DevServer,
|
|
body: uws.BodyReaderMixin(@This(), "body", runWithBody, finalize),
|
|
|
|
fn run(dev: *DevServer, _: *Request, resp: anytype) void {
|
|
const ctx = bun.new(ErrorReportRequest, .{
|
|
.dev = dev,
|
|
.body = .init(dev.allocator),
|
|
});
|
|
ctx.dev.server.?.onPendingRequest();
|
|
ctx.body.readBody(resp);
|
|
}
|
|
|
|
fn finalize(ctx: *ErrorReportRequest) void {
|
|
ctx.dev.server.?.onStaticRequestComplete();
|
|
bun.destroy(ctx);
|
|
}
|
|
|
|
fn runWithBody(ctx: *ErrorReportRequest, body: []const u8, r: AnyResponse) !void {
|
|
// .finalize has to be called last, but only in the non-error path.
|
|
var should_finalize_self = false;
|
|
defer if (should_finalize_self) ctx.finalize();
|
|
|
|
var s = std.io.fixedBufferStream(body);
|
|
const reader = s.reader();
|
|
|
|
var sfa = std.heap.stackFallback(131072, ctx.dev.allocator);
|
|
const temp_alloc = sfa.get();
|
|
var arena = std.heap.ArenaAllocator.init(temp_alloc);
|
|
defer arena.deinit();
|
|
|
|
// Read payload, assemble ZigException
|
|
const name = try readString32(reader, temp_alloc);
|
|
defer temp_alloc.free(name);
|
|
const message = try readString32(reader, temp_alloc);
|
|
defer temp_alloc.free(message);
|
|
const browser_url = try readString32(reader, temp_alloc);
|
|
defer temp_alloc.free(browser_url);
|
|
var frames: ArrayListUnmanaged(JSC.ZigStackFrame) = .empty;
|
|
defer frames.deinit(temp_alloc);
|
|
const stack_count = @min(try reader.readInt(u32, .little), 255); // does not support more than 255
|
|
try frames.ensureTotalCapacity(temp_alloc, stack_count);
|
|
for (0..stack_count) |_| {
|
|
const line = try reader.readInt(i32, .little);
|
|
const column = try reader.readInt(i32, .little);
|
|
const function_name = try readString32(reader, temp_alloc);
|
|
const file_name = try readString32(reader, temp_alloc);
|
|
frames.appendAssumeCapacity(.{
|
|
.function_name = .init(function_name),
|
|
.source_url = .init(file_name),
|
|
.position = if (line > 0) .{
|
|
.line = .fromOneBased(line + 1),
|
|
.column = .fromOneBased(@max(1, column)),
|
|
.line_start_byte = 0,
|
|
} else .{
|
|
.line = .invalid,
|
|
.column = .invalid,
|
|
.line_start_byte = 0,
|
|
},
|
|
.code_type = .None,
|
|
.remapped = false,
|
|
});
|
|
}
|
|
|
|
const runtime_name = "Bun HMR Runtime";
|
|
|
|
// All files that DevServer could provide a source map fit the pattern:
|
|
// `/_bun/client/<label>-{u64}.js`
|
|
// Where the u64 is a unique identifier pointing into sourcemaps.
|
|
//
|
|
// HMR chunks use this too, but currently do not host their JS code.
|
|
var parsed_source_maps: AutoArrayHashMapUnmanaged(u64, ?SourceMapStore.GetResult) = .empty;
|
|
try parsed_source_maps.ensureTotalCapacity(temp_alloc, 4);
|
|
defer for (parsed_source_maps.values()) |*value| {
|
|
if (value.*) |*v| v.deinit(ctx.dev);
|
|
};
|
|
var runtime_lines: ?[5][]const u8 = null;
|
|
var first_line_of_interest: usize = 0;
|
|
var top_frame_position: JSC.ZigStackFramePosition = undefined;
|
|
var region_of_interest_line: u32 = 0;
|
|
for (frames.items) |*frame| {
|
|
const source_url = frame.source_url.value.ZigString.slice();
|
|
// The browser code strips "http://localhost:3000" when the string
|
|
// has /_bun/client. It's done because JS can refer to `location`
|
|
const id = parseId(source_url, browser_url) orelse continue;
|
|
|
|
// Get and cache the parsed source map
|
|
const gop = try parsed_source_maps.getOrPut(temp_alloc, id);
|
|
if (!gop.found_existing) {
|
|
const psm = ctx.dev.source_maps.getParsedSourceMap(id) orelse {
|
|
Output.debugWarn("Failed to find mapping for {s}, {d}", .{ source_url, id });
|
|
gop.value_ptr.* = null;
|
|
continue;
|
|
};
|
|
gop.value_ptr.* = psm;
|
|
}
|
|
const result: *const SourceMapStore.GetResult = &(gop.value_ptr.* orelse continue);
|
|
|
|
// When before the first generated line, remap to the HMR runtime
|
|
const generated_mappings = result.mappings.items(.generated);
|
|
if (frame.position.line.oneBased() < generated_mappings[1].lines) {
|
|
frame.source_url = .init(runtime_name); // matches value in source map
|
|
frame.position = .invalid;
|
|
continue;
|
|
}
|
|
|
|
// Remap the frame
|
|
const remapped = SourceMap.Mapping.find(
|
|
result.mappings,
|
|
frame.position.line.oneBased(),
|
|
frame.position.column.zeroBased(),
|
|
);
|
|
if (remapped) |remapped_position| {
|
|
frame.position = .{
|
|
.line = .fromZeroBased(remapped_position.originalLine()),
|
|
.column = .fromZeroBased(remapped_position.originalColumn()),
|
|
.line_start_byte = 0,
|
|
};
|
|
const index = remapped_position.source_index;
|
|
if (index >= 1 and (index - 1) < result.file_paths.len) {
|
|
const abs_path = result.file_paths[@intCast(index - 1)];
|
|
frame.source_url = .init(abs_path);
|
|
const rel_path = ctx.dev.relativePath(abs_path);
|
|
defer ctx.dev.releaseRelativePathBuf();
|
|
if (bun.strings.eql(frame.function_name.value.ZigString.slice(), rel_path)) {
|
|
frame.function_name = .empty;
|
|
}
|
|
frame.remapped = true;
|
|
|
|
if (runtime_lines == null) {
|
|
const json_encoded_source_code = result.source_contents[@intCast(index - 1)].slice(result.bytes);
|
|
// First line of interest is two above the target line.
|
|
const target_line = @as(usize, @intCast(frame.position.line.zeroBased()));
|
|
first_line_of_interest = target_line -| 2;
|
|
region_of_interest_line = @intCast(target_line - first_line_of_interest);
|
|
runtime_lines = try extractJsonEncodedSourceCode(
|
|
json_encoded_source_code,
|
|
@intCast(first_line_of_interest),
|
|
5,
|
|
arena.allocator(),
|
|
);
|
|
top_frame_position = frame.position;
|
|
}
|
|
} else if (index == 0) {
|
|
// Should be picked up by above but just in case.
|
|
frame.source_url = .init(runtime_name);
|
|
frame.position = .invalid;
|
|
}
|
|
}
|
|
}
|
|
|
|
// Stack traces can often end with random runtime frames that are not relevant.
|
|
trim_runtime_frames: {
|
|
// Ensure that trimming will not remove ALL frames.
|
|
for (frames.items) |frame| {
|
|
if (!frame.position.isInvalid() or frame.source_url.value.ZigString.slice().ptr != runtime_name) {
|
|
break;
|
|
}
|
|
} else break :trim_runtime_frames;
|
|
|
|
// Move all frames up
|
|
var i: usize = 0;
|
|
for (frames.items[i..]) |frame| {
|
|
if (frame.position.isInvalid() and frame.source_url.value.ZigString.slice().ptr == runtime_name) {
|
|
continue; // skip runtime frames
|
|
}
|
|
|
|
frames.items[i] = frame;
|
|
i += 1;
|
|
}
|
|
frames.items.len = i;
|
|
}
|
|
|
|
var exception: JSC.ZigException = .{
|
|
.type = .Error,
|
|
.runtime_type = .Nothing,
|
|
.name = .init(name),
|
|
.message = .init(message),
|
|
.stack = .fromFrames(frames.items),
|
|
.exception = null,
|
|
.remapped = false,
|
|
.browser_url = .init(browser_url),
|
|
};
|
|
|
|
const stderr = Output.errorWriterBuffered();
|
|
defer Output.flush();
|
|
switch (Output.enable_ansi_colors_stderr) {
|
|
inline else => |ansi_colors| ctx.dev.vm.printExternallyRemappedZigException(
|
|
&exception,
|
|
null,
|
|
@TypeOf(stderr),
|
|
stderr,
|
|
true,
|
|
ansi_colors,
|
|
) catch {},
|
|
}
|
|
|
|
var out: std.ArrayList(u8) = .init(ctx.dev.allocator);
|
|
errdefer out.deinit();
|
|
const w = out.writer();
|
|
|
|
try w.writeInt(u32, exception.stack.frames_len, .little);
|
|
for (exception.stack.frames()) |frame| {
|
|
try w.writeInt(i32, frame.position.line.oneBased(), .little);
|
|
try w.writeInt(i32, frame.position.column.oneBased(), .little);
|
|
|
|
const function_name = frame.function_name.value.ZigString.slice();
|
|
try w.writeInt(u32, @intCast(function_name.len), .little);
|
|
try w.writeAll(function_name);
|
|
|
|
const src_to_write = frame.source_url.value.ZigString.slice();
|
|
if (bun.strings.hasPrefixComptime(src_to_write, "/")) {
|
|
const file = ctx.dev.relativePath(src_to_write);
|
|
defer ctx.dev.releaseRelativePathBuf();
|
|
try w.writeInt(u32, @intCast(file.len), .little);
|
|
try w.writeAll(file);
|
|
} else {
|
|
try w.writeInt(u32, @intCast(src_to_write.len), .little);
|
|
try w.writeAll(src_to_write);
|
|
}
|
|
}
|
|
|
|
if (runtime_lines) |*lines| {
|
|
// trim empty lines
|
|
var adjusted_lines: [][]const u8 = lines;
|
|
while (adjusted_lines.len > 0 and adjusted_lines[0].len == 0) {
|
|
adjusted_lines = adjusted_lines[1..];
|
|
region_of_interest_line -|= 1;
|
|
first_line_of_interest += 1;
|
|
}
|
|
while (adjusted_lines.len > 0 and adjusted_lines[adjusted_lines.len - 1].len == 0) {
|
|
adjusted_lines.len -= 1;
|
|
}
|
|
|
|
try w.writeInt(u8, @intCast(adjusted_lines.len), .little);
|
|
try w.writeInt(u32, @intCast(region_of_interest_line), .little);
|
|
try w.writeInt(u32, @intCast(first_line_of_interest + 1), .little);
|
|
try w.writeInt(u32, @intCast(top_frame_position.column.oneBased()), .little);
|
|
|
|
for (adjusted_lines) |line| {
|
|
try w.writeInt(u32, @intCast(line.len), .little);
|
|
try w.writeAll(line);
|
|
}
|
|
} else {
|
|
try w.writeInt(u8, 0, .little);
|
|
}
|
|
|
|
StaticRoute.sendBlobThenDeinit(r, &.fromArrayList(out), .{
|
|
.mime_type = &.other,
|
|
.server = ctx.dev.server.?,
|
|
});
|
|
should_finalize_self = true;
|
|
}
|
|
|
|
fn parseId(source_url: []const u8, browser_url: []const u8) ?u64 {
|
|
if (!bun.strings.startsWith(source_url, browser_url))
|
|
return null;
|
|
const after_host = source_url[bun.strings.withoutTrailingSlash(browser_url).len..];
|
|
if (!bun.strings.hasPrefixComptime(after_host, client_prefix ++ "/"))
|
|
return null;
|
|
const after_prefix = after_host[client_prefix.len + 1 ..];
|
|
// Extract the ID
|
|
if (!bun.strings.hasSuffixComptime(after_prefix, ".js"))
|
|
return null;
|
|
const min_len = "00000000FFFFFFFF.js".len;
|
|
if (after_prefix.len < min_len)
|
|
return null;
|
|
const hex = after_prefix[after_prefix.len - min_len ..][0 .. @sizeOf(u64) * 2];
|
|
if (hex.len != @sizeOf(u64) * 2)
|
|
return null;
|
|
return parseHexToInt(u64, hex) orelse
|
|
return null;
|
|
}
|
|
|
|
/// Instead of decoding the entire file, just decode the desired section.
|
|
fn extractJsonEncodedSourceCode(contents: []const u8, target_line: u32, comptime n: usize, arena: Allocator) !?[n][]const u8 {
|
|
var line: usize = 0;
|
|
var prev: usize = 0;
|
|
const index_of_first_line = if (target_line == 0)
|
|
0 // no iteration needed
|
|
else while (bun.strings.indexOfCharPos(contents, '\\', prev)) |i| : (prev = i + 2) {
|
|
if (i >= contents.len - 2) return null;
|
|
// Bun's JSON printer will not use a sillier encoding for newline.
|
|
if (contents[i + 1] == 'n') {
|
|
line += 1;
|
|
if (line == target_line)
|
|
break i + 2;
|
|
}
|
|
} else return null;
|
|
|
|
var rest = contents[index_of_first_line..];
|
|
|
|
// For decoding JSON escapes, the JS Lexer decoding function has
|
|
// `decodeEscapeSequences`, which only supports decoding to UTF-16.
|
|
// Alternatively, it appears the TOML lexer has copied this exact
|
|
// function but for UTF-8. So the decoder can just use that.
|
|
//
|
|
// This function expects but does not assume the escape sequences
|
|
// given are valid, and does not bubble errors up.
|
|
var log = Log.init(arena);
|
|
var l: @import("../toml/toml_lexer.zig").Lexer = .{
|
|
.log = &log,
|
|
.source = .initEmptyFile(""),
|
|
.allocator = arena,
|
|
.should_redact_logs = false,
|
|
.prev_error_loc = .Empty,
|
|
};
|
|
defer log.deinit();
|
|
|
|
var result: [n][]const u8 = .{""} ** n;
|
|
for (&result) |*decoded_line| {
|
|
var has_extra_escapes = false;
|
|
prev = 0;
|
|
// Locate the line slice
|
|
const end_of_line = while (bun.strings.indexOfCharPos(rest, '\\', prev)) |i| : (prev = i + 2) {
|
|
if (i >= rest.len - 1) return null;
|
|
if (rest[i + 1] == 'n') {
|
|
break i;
|
|
}
|
|
has_extra_escapes = true;
|
|
} else rest.len;
|
|
const encoded_line = rest[0..end_of_line];
|
|
|
|
// Decode it
|
|
if (has_extra_escapes) {
|
|
var bytes: std.ArrayList(u8) = try .initCapacity(arena, encoded_line.len);
|
|
try l.decodeEscapeSequences(0, encoded_line, false, std.ArrayList(u8), &bytes);
|
|
decoded_line.* = bytes.items;
|
|
} else {
|
|
decoded_line.* = encoded_line;
|
|
}
|
|
|
|
if (end_of_line + 2 >= rest.len) break;
|
|
rest = rest[end_of_line + 2 ..];
|
|
}
|
|
|
|
return result;
|
|
}
|
|
};
|
|
|
|
fn readString32(reader: anytype, alloc: Allocator) ![]const u8 {
|
|
const len = try reader.readInt(u32, .little);
|
|
const memory = try alloc.alloc(u8, len);
|
|
errdefer alloc.free(memory);
|
|
try reader.readNoEof(memory);
|
|
return memory;
|
|
}
|
|
|
|
const TestingBatch = struct {
|
|
entry_points: EntryPointList,
|
|
|
|
const empty: @This() = .{ .entry_points = .empty };
|
|
|
|
pub fn append(self: *@This(), dev: *DevServer, entry_points: EntryPointList) !void {
|
|
assert(entry_points.set.count() > 0);
|
|
for (entry_points.set.keys(), entry_points.set.values()) |k, v| {
|
|
try self.entry_points.append(dev.allocator, k, v);
|
|
}
|
|
}
|
|
};
|
|
|
|
/// userland implementation of https://github.com/ziglang/zig/issues/21879
|
|
fn VoidFieldTypes(comptime T: type) type {
|
|
const fields = @typeInfo(T).@"struct".fields;
|
|
var new_fields = fields[0..fields.len].*;
|
|
for (&new_fields) |*field| {
|
|
field.type = void;
|
|
field.default_value_ptr = null;
|
|
}
|
|
return @Type(.{ .@"struct" = .{
|
|
.layout = .auto,
|
|
.fields = &new_fields,
|
|
.decls = &.{},
|
|
.is_tuple = false,
|
|
} });
|
|
}
|
|
|
|
fn voidFieldTypeDiscardHelper(data: anytype) void {
|
|
_ = data;
|
|
}
|
|
|
|
/// `test/bake/deinitialization.test.ts` checks for this as well as all tests
|
|
/// using the dev server test harness (test/bake/bake-harness.ts)
|
|
///
|
|
/// In debug builds, this will also assert that AllocationScope.deinit was
|
|
/// called, validating that all Dev Server tests have no leaks.
|
|
var dev_server_deinit_count_for_testing: usize = 0;
|
|
pub fn getDeinitCountForTesting() usize {
|
|
return dev_server_deinit_count_for_testing;
|
|
}
|
|
|
|
const std = @import("std");
|
|
const Allocator = std.mem.Allocator;
|
|
const Mutex = bun.Mutex;
|
|
const ArrayListUnmanaged = std.ArrayListUnmanaged;
|
|
const AutoArrayHashMapUnmanaged = std.AutoArrayHashMapUnmanaged;
|
|
|
|
const bun = @import("root").bun;
|
|
const Environment = bun.Environment;
|
|
const assert = bun.assert;
|
|
const assert_eql = bun.assert_eql;
|
|
const DynamicBitSetUnmanaged = bun.bit_set.DynamicBitSetUnmanaged;
|
|
|
|
const bake = bun.bake;
|
|
const FrameworkRouter = bake.FrameworkRouter;
|
|
const Route = FrameworkRouter.Route;
|
|
const OpaqueFileId = FrameworkRouter.OpaqueFileId;
|
|
|
|
const Log = bun.logger.Log;
|
|
const Output = bun.Output;
|
|
|
|
const Transpiler = bun.transpiler.Transpiler;
|
|
const BundleV2 = bun.bundle_v2.BundleV2;
|
|
const Chunk = bun.bundle_v2.Chunk;
|
|
const ContentHasher = bun.bundle_v2.ContentHasher;
|
|
|
|
const Define = bun.options.Define;
|
|
|
|
const uws = bun.uws;
|
|
const AnyWebSocket = uws.AnyWebSocket;
|
|
const Request = uws.Request;
|
|
const AnyResponse = bun.uws.AnyResponse;
|
|
|
|
const MimeType = bun.http.MimeType;
|
|
|
|
const JSC = bun.JSC;
|
|
const JSValue = JSC.JSValue;
|
|
const VirtualMachine = JSC.VirtualMachine;
|
|
const JSModuleLoader = JSC.JSModuleLoader;
|
|
const EventLoopHandle = JSC.EventLoopHandle;
|
|
const HTMLBundle = JSC.API.HTMLBundle;
|
|
const Plugin = JSC.API.JSBundler.Plugin;
|
|
|
|
const ThreadlocalArena = @import("../allocators/mimalloc_arena.zig").Arena;
|
|
|
|
const Watcher = bun.Watcher;
|
|
const StaticRoute = bun.server.StaticRoute;
|
|
|
|
const AnyBlob = JSC.WebCore.AnyBlob;
|
|
|
|
const SourceMap = bun.sourcemap;
|
|
const VLQ = SourceMap.VLQ;
|
|
|
|
const StringJoiner = bun.StringJoiner;
|
|
const AllocationScope = bun.AllocationScope;
|