mirror of
https://github.com/oven-sh/bun
synced 2026-02-09 10:28:47 +00:00
This would happen sometimes because it was appending base64 strings to eachother. You can't do that. Tested locally and it fixes the bug. Not sure how to make a regression test for this.
4101 lines
166 KiB
Zig
4101 lines
166 KiB
Zig
//! Instance of the development server. Attaches to an instance of `Bun.serve`,
|
|
//! controlling bundler, routing, and hot module reloading.
|
|
//!
|
|
//! Reprocessing files that did not change is banned; by having perfect
|
|
//! incremental tracking over the project, editing a file's contents (asides
|
|
//! adjusting imports) must always rebundle only that one file.
|
|
//!
|
|
//! All work is held in-memory, using manually managed data-oriented design.
|
|
//! For questions about DevServer, please consult the delusional @paperclover
|
|
|
|
const DevServer = @This();
|
|
|
|
pub const debug = bun.Output.Scoped(.DevServer, .visible);
|
|
pub const igLog = bun.Output.scoped(.IncrementalGraph, .visible);
|
|
pub const mapLog = bun.Output.scoped(.SourceMapStore, .visible);
|
|
|
|
pub const Options = struct {
|
|
/// Arena must live until DevServer.deinit()
|
|
arena: Allocator,
|
|
root: [:0]const u8,
|
|
vm: *VirtualMachine,
|
|
framework: bake.Framework,
|
|
bundler_options: bake.SplitBundlerOptions,
|
|
broadcast_console_log_from_browser_to_server: bool,
|
|
|
|
// Debugging features
|
|
dump_sources: ?[]const u8 = if (Environment.isDebug) ".bake-debug" else null,
|
|
dump_state_on_crash: ?bool = null,
|
|
};
|
|
|
|
// The fields `client_graph`, `server_graph`, `directory_watchers`, and `assets`
|
|
// all use `@fieldParentPointer` to access DevServer's state. This pattern has
|
|
// made it easier to group related fields together, but one must remember those
|
|
// structures still depend on the DevServer pointer.
|
|
|
|
/// To validate the DevServer has not been collected, this can be checked.
|
|
/// When freed, this is set to `undefined`. UAF here also trips ASAN.
|
|
magic: if (Environment.isDebug)
|
|
enum(u128) { valid = 0x1ffd363f121f5c12 }
|
|
else
|
|
enum { valid } = .valid,
|
|
/// Used for all server-wide allocations. In debug, is is backed by a scope. Thread-safe.
|
|
allocator: Allocator,
|
|
/// All methods are no-op in release builds.
|
|
allocation_scope: AllocationScope,
|
|
/// Absolute path to project root directory. For the HMR
|
|
/// runtime, its module IDs are strings relative to this.
|
|
root: []const u8,
|
|
/// Unique identifier for this DevServer instance. Used to identify it
|
|
/// when using the debugger protocol.
|
|
inspector_server_id: DebuggerId,
|
|
/// Hex string generated by hashing the framework config and bun revision.
|
|
/// Emebedding in client bundles and sent when the HMR Socket is opened;
|
|
/// When the value mismatches the page is forcibly reloaded.
|
|
configuration_hash_key: [16]u8,
|
|
/// The virtual machine (global object) to execute code in.
|
|
vm: *VirtualMachine,
|
|
/// May be `null` if not attached to an HTTP server yet. When no server is
|
|
/// available, functions taking in requests and responses are unavailable.
|
|
/// However, a lot of testing in this mode is missing, so it may hit assertions.
|
|
server: ?bun.jsc.API.AnyServer,
|
|
/// Contains the tree of routes. This structure contains FileIndex
|
|
router: FrameworkRouter,
|
|
/// Every navigatable route has bundling state here.
|
|
route_bundles: ArrayListUnmanaged(RouteBundle),
|
|
/// All access into IncrementalGraph is guarded by a ThreadLock. This is
|
|
/// only a debug assertion as contention to this is always a bug; If a bundle is
|
|
/// active and a file is changed, that change is placed into the next bundle.
|
|
graph_safety_lock: bun.safety.ThreadLock,
|
|
client_graph: IncrementalGraph(.client),
|
|
server_graph: IncrementalGraph(.server),
|
|
/// State populated during bundling and hot updates. Often cleared
|
|
incremental_result: IncrementalResult,
|
|
/// Quickly retrieve a framework route's index from its entry point file. These
|
|
/// are populated as the routes are discovered. The route may not be bundled OR
|
|
/// navigatable, such as the case where a layout's index is looked up.
|
|
route_lookup: AutoArrayHashMapUnmanaged(IncrementalGraph(.server).FileIndex, RouteIndexAndRecurseFlag),
|
|
/// This acts as a duplicate of the lookup table in uws, but only for HTML routes
|
|
/// Used to identify what route a connected WebSocket is on, so that only
|
|
/// the active pages are notified of a hot updates.
|
|
html_router: HTMLRouter,
|
|
/// Assets are accessible via `/_bun/asset/<key>`
|
|
/// This store is not thread safe.
|
|
assets: Assets,
|
|
/// Similar to `assets`, specialized for the additional needs of source mappings.
|
|
source_maps: SourceMapStore,
|
|
/// All bundling failures are stored until a file is saved and rebuilt.
|
|
/// They are stored in the wire format the HMR runtime expects so that
|
|
/// serialization only happens once.
|
|
bundling_failures: std.ArrayHashMapUnmanaged(
|
|
SerializedFailure,
|
|
void,
|
|
SerializedFailure.ArrayHashContextViaOwner,
|
|
false,
|
|
) = .{},
|
|
/// When set, nothing is ever bundled for the server-side,
|
|
/// and DevSever acts purely as a frontend bundler.
|
|
frontend_only: bool,
|
|
/// The Plugin API is missing a way to attach filesystem watchers (addWatchFile)
|
|
/// This special case makes `bun-plugin-tailwind` work, which is a requirement
|
|
/// to ship initial incremental bundling support for HTML files.
|
|
has_tailwind_plugin_hack: ?bun.StringArrayHashMapUnmanaged(void) = null,
|
|
|
|
// These values are handles to the functions in `hmr-runtime-server.ts`.
|
|
// For type definitions, see `./bake.private.d.ts`
|
|
server_fetch_function_callback: jsc.Strong.Optional,
|
|
server_register_update_callback: jsc.Strong.Optional,
|
|
|
|
// Watching
|
|
bun_watcher: *bun.Watcher,
|
|
directory_watchers: DirectoryWatchStore,
|
|
watcher_atomics: WatcherAtomics,
|
|
/// In end-to-end DevServer tests, flakiness was noticed around file watching
|
|
/// and bundling times, where the test harness (bake-harness.ts) would not wait
|
|
/// long enough for processing to complete. Checking client logs, for example,
|
|
/// not only must wait on DevServer, but also wait on all connected WebSocket
|
|
/// clients to receive their update, but also wait for those modules
|
|
/// (potentially async) to finish loading.
|
|
///
|
|
/// To solve the first part of this, DevServer exposes a special WebSocket
|
|
/// payload, `testing_batch_events`, which informs the watcher to batch a set of
|
|
/// files together. The various states are used to inform the test harness when
|
|
/// it sees files, and when it finishes a build it will send a message
|
|
/// identifying if the build had notified WebSockets. This makes sure that when
|
|
/// an error happens, the test harness does not needlessly wait for it's clients
|
|
/// to receive a "successful hot update" message when it will never come.
|
|
///
|
|
/// Sync events are sent over the `testing_watch_synchronization` topic.
|
|
testing_batch_events: union(enum) {
|
|
disabled,
|
|
/// A meta-state where the DevServer has been requested to start a batch,
|
|
/// but is currently bundling something so it must wait. In this state, the
|
|
/// harness is waiting for a "i am in batch mode" message, and it waits
|
|
/// until the bundle finishes.
|
|
enable_after_bundle,
|
|
/// DevServer will not start new bundles, but instead write all files into
|
|
/// this `TestingBatch` object. Additionally, writes into this will signal
|
|
/// a message saying that new files have been seen. Once DevServer receives
|
|
/// that signal, or times out, it will "release" this batch.
|
|
enabled: TestingBatch,
|
|
},
|
|
|
|
/// Number of bundles that have been executed. This is currently not read, but
|
|
/// will be used later to determine when to invoke graph garbage collection.
|
|
generation: usize = 0,
|
|
/// Displayed in the HMR success indicator
|
|
bundles_since_last_error: usize = 0,
|
|
|
|
framework: bake.Framework,
|
|
bundler_options: bake.SplitBundlerOptions,
|
|
// Each logical graph gets its own bundler configuration
|
|
server_transpiler: Transpiler,
|
|
client_transpiler: Transpiler,
|
|
ssr_transpiler: Transpiler,
|
|
/// The log used by all `server_transpiler`, `client_transpiler` and `ssr_transpiler`.
|
|
/// Note that it is rarely correct to write messages into it. Instead, associate
|
|
/// messages with the IncrementalGraph file or Route using `SerializedFailure`
|
|
log: Log,
|
|
plugin_state: enum {
|
|
/// Should ask server for plugins. Once plugins are loaded, the plugin
|
|
/// pointer is written into `server_transpiler.options.plugin`
|
|
unknown,
|
|
// These two states mean that `server.getOrLoadPlugins()` was called.
|
|
pending,
|
|
loaded,
|
|
/// Currently, this represents a degraded state where no bundle can
|
|
/// be correctly executed because the plugins did not load successfully.
|
|
err,
|
|
},
|
|
/// There is only ever one bundle executing at the same time, since all bundles
|
|
/// inevitably share state. This bundle is asynchronous, storing its state here
|
|
/// while in-flight. All allocations held by `.bv2.graph.heap`'s arena
|
|
current_bundle: ?struct {
|
|
bv2: *BundleV2,
|
|
/// Information BundleV2 needs to finalize the bundle
|
|
start_data: bun.bundle_v2.DevServerInput,
|
|
/// Started when the bundle was queued
|
|
timer: std.time.Timer,
|
|
/// If any files in this bundle were due to hot-reloading, some extra work
|
|
/// must be done to inform clients to reload routes. When this is false,
|
|
/// all entry points do not have bundles yet.
|
|
had_reload_event: bool,
|
|
/// After a bundle finishes, these requests will be continued, either
|
|
/// calling their handler on success or sending the error page on failure.
|
|
/// Owned by `deferred_request_pool` in DevServer.
|
|
requests: DeferredRequest.List,
|
|
/// Resolution failures are grouped by incremental graph file index.
|
|
/// Unlike parse failures (`handleParseTaskFailure`), the resolution
|
|
/// failures can be created asynchronously, and out of order.
|
|
resolution_failure_entries: AutoArrayHashMapUnmanaged(SerializedFailure.Owner.Packed, bun.logger.Log),
|
|
},
|
|
/// When `current_bundle` is non-null and new requests to bundle come in,
|
|
/// those are temporaried here. When the current bundle is finished, it
|
|
/// will immediately enqueue this.
|
|
next_bundle: struct {
|
|
/// A list of `RouteBundle`s which have active requests to bundle it.
|
|
route_queue: AutoArrayHashMapUnmanaged(RouteBundle.Index, void),
|
|
/// If a reload event exists and should be drained. The information
|
|
/// for this watch event is in one of the `watch_events`
|
|
reload_event: ?*HotReloadEvent,
|
|
/// The list of requests that are blocked on this bundle.
|
|
requests: DeferredRequest.List,
|
|
},
|
|
deferred_request_pool: bun.HiveArray(DeferredRequest.Node, DeferredRequest.max_preallocated).Fallback,
|
|
/// UWS can handle closing the websocket connections themselves
|
|
active_websocket_connections: std.AutoHashMapUnmanaged(*HmrSocket, void),
|
|
|
|
// Debugging
|
|
|
|
dump_dir: if (bun.FeatureFlags.bake_debugging_features) ?std.fs.Dir else void,
|
|
/// Reference count to number of active sockets with the incremental_visualizer enabled.
|
|
emit_incremental_visualizer_events: u32,
|
|
/// Reference count to number of active sockets with the memory_visualizer enabled.
|
|
emit_memory_visualizer_events: u32,
|
|
memory_visualizer_timer: EventLoopTimer,
|
|
|
|
has_pre_crash_handler: bool,
|
|
/// Perfect incremental bundling implies that there are zero bugs in the
|
|
/// code that bundles, watches, and rebuilds routes and client side code.
|
|
///
|
|
/// More specifically, when this is false, DevServer will run a full rebundle
|
|
/// when a user force-refreshes an error page. In a perfect system, a rebundle
|
|
/// could not possibly fix the build. But since builds are NOT perfect, this
|
|
/// could very well be the case.
|
|
///
|
|
/// DISABLED in releases, ENABLED in debug.
|
|
/// Can be enabled with env var `BUN_ASSUME_PERFECT_INCREMENTAL=1`
|
|
assume_perfect_incremental_bundling: bool = false,
|
|
|
|
/// If true, console logs from the browser will be echoed to the server console.
|
|
/// This works by overriding console.log & console.error in hmr-runtime-client.ts
|
|
/// with a function that sends the message from the client to the server.
|
|
///
|
|
/// There are two usecases:
|
|
/// - Echoing browser console logs to the server for debugging
|
|
/// - WebKit Inspector remote debugging integration
|
|
broadcast_console_log_from_browser_to_server: bool,
|
|
|
|
pub const internal_prefix = "/_bun";
|
|
/// Assets which are routed to the `Assets` storage.
|
|
pub const asset_prefix = internal_prefix ++ "/asset";
|
|
/// Client scripts are available at `/_bun/client/{name}-{rbi}{generation}.js`
|
|
/// where:
|
|
/// - `name` is the display name of the route, such as "index" or
|
|
/// "about". It is ignored when routing.
|
|
/// - `rbi` is the route bundle index, in padded hex (e.g. `00000001`)
|
|
/// - `generation` which is initialized to a random value. This value is
|
|
/// re-randomized whenever `client_bundle` is invalidated.
|
|
///
|
|
/// Example: `/_bun/client/index-00000000f209a20e.js`
|
|
pub const client_prefix = internal_prefix ++ "/client";
|
|
|
|
pub const RouteBundle = @import("./DevServer/RouteBundle.zig");
|
|
|
|
/// DevServer is stored on the heap, storing its allocator.
|
|
pub fn init(options: Options) bun.JSOOM!*DevServer {
|
|
const unchecked_allocator = bun.default_allocator;
|
|
bun.analytics.Features.dev_server +|= 1;
|
|
|
|
var dump_dir = if (bun.FeatureFlags.bake_debugging_features)
|
|
if (options.dump_sources) |dir|
|
|
std.fs.cwd().makeOpenPath(dir, .{}) catch |err| dir: {
|
|
bun.handleErrorReturnTrace(err, @errorReturnTrace());
|
|
Output.warn("Could not open directory for dumping sources: {}", .{err});
|
|
break :dir null;
|
|
}
|
|
else
|
|
null;
|
|
errdefer if (bun.FeatureFlags.bake_debugging_features) if (dump_dir) |*dir| dir.close();
|
|
|
|
const separate_ssr_graph = if (options.framework.server_components) |sc| sc.separate_ssr_graph else false;
|
|
|
|
const dev = bun.new(DevServer, .{
|
|
.allocator = undefined,
|
|
// 'init' is a no-op in release
|
|
.allocation_scope = AllocationScope.init(unchecked_allocator),
|
|
|
|
.root = options.root,
|
|
.vm = options.vm,
|
|
.server = null,
|
|
.directory_watchers = .empty,
|
|
.server_fetch_function_callback = .empty,
|
|
.server_register_update_callback = .empty,
|
|
.generation = 0,
|
|
.graph_safety_lock = .initUnlocked(),
|
|
.dump_dir = dump_dir,
|
|
.framework = options.framework,
|
|
.bundler_options = options.bundler_options,
|
|
.emit_incremental_visualizer_events = 0,
|
|
.emit_memory_visualizer_events = 0,
|
|
.memory_visualizer_timer = .initPaused(.DevServerMemoryVisualizerTick),
|
|
.has_pre_crash_handler = bun.FeatureFlags.bake_debugging_features and
|
|
options.dump_state_on_crash orelse
|
|
bun.getRuntimeFeatureFlag(.BUN_DUMP_STATE_ON_CRASH),
|
|
.frontend_only = options.framework.file_system_router_types.len == 0,
|
|
.client_graph = .empty,
|
|
.server_graph = .empty,
|
|
.incremental_result = .empty,
|
|
.route_lookup = .empty,
|
|
.route_bundles = .empty,
|
|
.html_router = .empty,
|
|
.active_websocket_connections = .empty,
|
|
.current_bundle = null,
|
|
.next_bundle = .{
|
|
.route_queue = .empty,
|
|
.reload_event = null,
|
|
.requests = .{},
|
|
},
|
|
.inspector_server_id = .init(0), // TODO paper clover:
|
|
.assets = .{
|
|
.path_map = .empty,
|
|
.files = .empty,
|
|
.refs = .empty,
|
|
},
|
|
.source_maps = .empty,
|
|
.plugin_state = .unknown,
|
|
.bundling_failures = .{},
|
|
.assume_perfect_incremental_bundling = if (bun.Environment.isDebug)
|
|
if (bun.getenvZ("BUN_ASSUME_PERFECT_INCREMENTAL")) |env|
|
|
!bun.strings.eqlComptime(env, "0")
|
|
else
|
|
true
|
|
else
|
|
bun.getRuntimeFeatureFlag(.BUN_ASSUME_PERFECT_INCREMENTAL),
|
|
.testing_batch_events = .disabled,
|
|
.broadcast_console_log_from_browser_to_server = options.broadcast_console_log_from_browser_to_server,
|
|
.server_transpiler = undefined,
|
|
.client_transpiler = undefined,
|
|
.ssr_transpiler = undefined,
|
|
.bun_watcher = undefined,
|
|
.configuration_hash_key = undefined,
|
|
.router = undefined,
|
|
.watcher_atomics = undefined,
|
|
.log = undefined,
|
|
.deferred_request_pool = undefined,
|
|
});
|
|
errdefer bun.destroy(dev);
|
|
const allocator = dev.allocation_scope.allocator();
|
|
dev.allocator = allocator;
|
|
dev.log = .init(allocator);
|
|
dev.deferred_request_pool = .init(allocator);
|
|
|
|
const global = dev.vm.global;
|
|
|
|
assert(dev.server_graph.owner() == dev);
|
|
assert(dev.client_graph.owner() == dev);
|
|
assert(dev.directory_watchers.owner() == dev);
|
|
|
|
dev.graph_safety_lock.lock();
|
|
defer dev.graph_safety_lock.unlock();
|
|
|
|
const generic_action = "while initializing development server";
|
|
const fs = bun.fs.FileSystem.init(options.root) catch |err|
|
|
return global.throwError(err, generic_action);
|
|
|
|
dev.bun_watcher = Watcher.init(DevServer, dev, fs, bun.default_allocator) catch |err|
|
|
return global.throwError(err, "while initializing file watcher for development server");
|
|
|
|
errdefer dev.bun_watcher.deinit(false);
|
|
dev.bun_watcher.start() catch |err|
|
|
return global.throwError(err, "while initializing file watcher thread for development server");
|
|
|
|
dev.watcher_atomics = WatcherAtomics.init(dev);
|
|
|
|
// This causes a memory leak, but the allocator is otherwise used on multiple threads.
|
|
const transpiler_allocator = bun.default_allocator;
|
|
|
|
dev.framework.initTranspiler(transpiler_allocator, &dev.log, .development, .server, &dev.server_transpiler, &dev.bundler_options.server) catch |err|
|
|
return global.throwError(err, generic_action);
|
|
dev.server_transpiler.options.dev_server = dev;
|
|
dev.framework.initTranspiler(
|
|
transpiler_allocator,
|
|
&dev.log,
|
|
.development,
|
|
.client,
|
|
&dev.client_transpiler,
|
|
&dev.bundler_options.client,
|
|
) catch |err|
|
|
return global.throwError(err, generic_action);
|
|
dev.client_transpiler.options.dev_server = dev;
|
|
|
|
dev.server_transpiler.resolver.watcher = dev.bun_watcher.getResolveWatcher();
|
|
dev.client_transpiler.resolver.watcher = dev.bun_watcher.getResolveWatcher();
|
|
|
|
if (separate_ssr_graph) {
|
|
dev.framework.initTranspiler(transpiler_allocator, &dev.log, .development, .ssr, &dev.ssr_transpiler, &dev.bundler_options.ssr) catch |err|
|
|
return global.throwError(err, generic_action);
|
|
dev.ssr_transpiler.options.dev_server = dev;
|
|
dev.ssr_transpiler.resolver.watcher = dev.bun_watcher.getResolveWatcher();
|
|
}
|
|
|
|
assert(dev.server_transpiler.resolver.opts.target != .browser);
|
|
assert(dev.client_transpiler.resolver.opts.target == .browser);
|
|
|
|
dev.framework = dev.framework.resolve(&dev.server_transpiler.resolver, &dev.client_transpiler.resolver, options.arena) catch {
|
|
if (dev.framework.is_built_in_react)
|
|
try bake.Framework.addReactInstallCommandNote(&dev.log);
|
|
return global.throwValue(try dev.log.toJSAggregateError(global, bun.String.static("Framework is missing required files!")));
|
|
};
|
|
|
|
errdefer dev.route_lookup.clearAndFree(allocator);
|
|
errdefer dev.client_graph.deinit(allocator);
|
|
errdefer dev.server_graph.deinit(allocator);
|
|
|
|
dev.configuration_hash_key = hash_key: {
|
|
var hash = std.hash.Wyhash.init(128);
|
|
|
|
if (bun.Environment.isDebug) {
|
|
const stat = bun.sys.stat(bun.selfExePath() catch |e|
|
|
Output.panic("unhandled {}", .{e})).unwrap() catch |e|
|
|
Output.panic("unhandled {}", .{e});
|
|
bun.writeAnyToHasher(&hash, stat.mtime());
|
|
hash.update(bake.getHmrRuntime(.client).code);
|
|
hash.update(bake.getHmrRuntime(.server).code);
|
|
} else {
|
|
hash.update(bun.Environment.git_sha_short);
|
|
}
|
|
|
|
for (dev.framework.file_system_router_types) |fsr| {
|
|
bun.writeAnyToHasher(&hash, fsr.allow_layouts);
|
|
bun.writeAnyToHasher(&hash, fsr.ignore_underscores);
|
|
hash.update(fsr.entry_server);
|
|
hash.update(&.{0});
|
|
hash.update(fsr.entry_client orelse "");
|
|
hash.update(&.{0});
|
|
hash.update(fsr.prefix);
|
|
hash.update(&.{0});
|
|
hash.update(fsr.root);
|
|
hash.update(&.{0});
|
|
for (fsr.extensions) |ext| {
|
|
hash.update(ext);
|
|
hash.update(&.{0});
|
|
}
|
|
hash.update(&.{0});
|
|
for (fsr.ignore_dirs) |dir| {
|
|
hash.update(dir);
|
|
hash.update(&.{0});
|
|
}
|
|
hash.update(&.{0});
|
|
}
|
|
|
|
if (dev.framework.server_components) |sc| {
|
|
bun.writeAnyToHasher(&hash, true);
|
|
bun.writeAnyToHasher(&hash, sc.separate_ssr_graph);
|
|
hash.update(sc.client_register_server_reference);
|
|
hash.update(&.{0});
|
|
hash.update(sc.server_register_client_reference);
|
|
hash.update(&.{0});
|
|
hash.update(sc.server_register_server_reference);
|
|
hash.update(&.{0});
|
|
hash.update(sc.server_runtime_import);
|
|
hash.update(&.{0});
|
|
} else {
|
|
bun.writeAnyToHasher(&hash, false);
|
|
}
|
|
|
|
if (dev.framework.react_fast_refresh) |rfr| {
|
|
bun.writeAnyToHasher(&hash, true);
|
|
hash.update(rfr.import_source);
|
|
} else {
|
|
bun.writeAnyToHasher(&hash, false);
|
|
}
|
|
|
|
for (dev.framework.built_in_modules.keys(), dev.framework.built_in_modules.values()) |k, v| {
|
|
hash.update(k);
|
|
hash.update(&.{0});
|
|
bun.writeAnyToHasher(&hash, std.meta.activeTag(v));
|
|
hash.update(switch (v) {
|
|
inline else => |data| data,
|
|
});
|
|
hash.update(&.{0});
|
|
}
|
|
hash.update(&.{0});
|
|
|
|
break :hash_key std.fmt.bytesToHex(std.mem.asBytes(&hash.final()), .lower);
|
|
};
|
|
|
|
// Add react fast refresh if needed. This is the first file on the client side,
|
|
// as it will be referred to by index.
|
|
if (dev.framework.react_fast_refresh) |rfr| {
|
|
assert(try dev.client_graph.insertStale(rfr.import_source, false) == IncrementalGraph(.client).react_refresh_index);
|
|
}
|
|
|
|
if (!dev.frontend_only) {
|
|
dev.initServerRuntime();
|
|
}
|
|
|
|
// Initialize FrameworkRouter
|
|
dev.router = router: {
|
|
var types = try std.ArrayListUnmanaged(FrameworkRouter.Type).initCapacity(allocator, options.framework.file_system_router_types.len);
|
|
errdefer types.deinit(allocator);
|
|
|
|
for (options.framework.file_system_router_types, 0..) |fsr, i| {
|
|
const buf = bun.path_buffer_pool.get();
|
|
defer bun.path_buffer_pool.put(buf);
|
|
const joined_root = bun.path.joinAbsStringBuf(dev.root, buf, &.{fsr.root}, .auto);
|
|
const entry = dev.server_transpiler.resolver.readDirInfoIgnoreError(joined_root) orelse
|
|
continue;
|
|
|
|
const server_file = try dev.server_graph.insertStaleExtra(fsr.entry_server, false, true);
|
|
|
|
try types.append(allocator, .{
|
|
.abs_root = bun.strings.withoutTrailingSlash(entry.abs_path),
|
|
.prefix = fsr.prefix,
|
|
.ignore_underscores = fsr.ignore_underscores,
|
|
.ignore_dirs = fsr.ignore_dirs,
|
|
.extensions = fsr.extensions,
|
|
.style = fsr.style,
|
|
.allow_layouts = fsr.allow_layouts,
|
|
.server_file = toOpaqueFileId(.server, server_file),
|
|
.client_file = if (fsr.entry_client) |client|
|
|
toOpaqueFileId(.client, try dev.client_graph.insertStale(client, false)).toOptional()
|
|
else
|
|
.none,
|
|
.server_file_string = .empty,
|
|
});
|
|
|
|
try dev.route_lookup.put(allocator, server_file, .{
|
|
.route_index = FrameworkRouter.Route.Index.init(@intCast(i)),
|
|
.should_recurse_when_visiting = true,
|
|
});
|
|
}
|
|
|
|
break :router try FrameworkRouter.initEmpty(dev.root, types.items, allocator);
|
|
};
|
|
|
|
// TODO: move scanning to be one tick after server startup. this way the
|
|
// line saying the server is ready shows quicker, and route errors show up
|
|
// after that line.
|
|
try dev.scanInitialRoutes();
|
|
|
|
if (bun.FeatureFlags.bake_debugging_features and dev.has_pre_crash_handler)
|
|
try bun.crash_handler.appendPreCrashHandler(DevServer, dev, dumpStateDueToCrash);
|
|
|
|
bun.assert(dev.magic == .valid);
|
|
|
|
return dev;
|
|
}
|
|
|
|
pub fn deinit(dev: *DevServer) void {
|
|
debug.log("deinit", .{});
|
|
dev_server_deinit_count_for_testing +|= 1;
|
|
|
|
const allocator = dev.allocator;
|
|
const discard = voidFieldTypeDiscardHelper;
|
|
_ = VoidFieldTypes(DevServer){
|
|
.allocation_scope = {}, // deinit at end
|
|
.allocator = {},
|
|
.assume_perfect_incremental_bundling = {},
|
|
.bundler_options = {},
|
|
.bundles_since_last_error = {},
|
|
.client_transpiler = {},
|
|
.configuration_hash_key = {},
|
|
.inspector_server_id = {},
|
|
.deferred_request_pool = {},
|
|
.emit_incremental_visualizer_events = {},
|
|
.emit_memory_visualizer_events = {},
|
|
.framework = {},
|
|
.frontend_only = {},
|
|
.generation = {},
|
|
.plugin_state = {},
|
|
.root = {},
|
|
.server = {},
|
|
.server_transpiler = {},
|
|
.ssr_transpiler = {},
|
|
.vm = {},
|
|
|
|
// WebSockets should be deinitialized before other parts
|
|
.active_websocket_connections = {
|
|
var it = dev.active_websocket_connections.keyIterator();
|
|
while (it.next()) |item| {
|
|
const s: *HmrSocket = item.*;
|
|
if (s.underlying) |websocket|
|
|
websocket.close();
|
|
}
|
|
dev.active_websocket_connections.deinit(allocator);
|
|
},
|
|
|
|
.memory_visualizer_timer = if (dev.memory_visualizer_timer.state == .ACTIVE)
|
|
dev.vm.timer.remove(&dev.memory_visualizer_timer),
|
|
.graph_safety_lock = dev.graph_safety_lock.lock(),
|
|
.bun_watcher = dev.bun_watcher.deinit(true),
|
|
.dump_dir = if (bun.FeatureFlags.bake_debugging_features) if (dev.dump_dir) |*dir| dir.close(),
|
|
.log = dev.log.deinit(),
|
|
.server_fetch_function_callback = dev.server_fetch_function_callback.deinit(),
|
|
.server_register_update_callback = dev.server_register_update_callback.deinit(),
|
|
.has_pre_crash_handler = if (dev.has_pre_crash_handler)
|
|
bun.crash_handler.removePreCrashHandler(dev),
|
|
.router = {
|
|
dev.router.deinit(allocator);
|
|
},
|
|
.route_bundles = {
|
|
for (dev.route_bundles.items) |*rb| {
|
|
rb.deinit(allocator);
|
|
}
|
|
dev.route_bundles.deinit(allocator);
|
|
},
|
|
.server_graph = dev.server_graph.deinit(allocator),
|
|
.client_graph = dev.client_graph.deinit(allocator),
|
|
.assets = dev.assets.deinit(allocator),
|
|
.incremental_result = discard(VoidFieldTypes(IncrementalResult){
|
|
.had_adjusted_edges = {},
|
|
.client_components_added = dev.incremental_result.client_components_added.deinit(allocator),
|
|
.framework_routes_affected = dev.incremental_result.framework_routes_affected.deinit(allocator),
|
|
.client_components_removed = dev.incremental_result.client_components_removed.deinit(allocator),
|
|
.failures_removed = dev.incremental_result.failures_removed.deinit(allocator),
|
|
.client_components_affected = dev.incremental_result.client_components_affected.deinit(allocator),
|
|
.failures_added = dev.incremental_result.failures_added.deinit(allocator),
|
|
.html_routes_soft_affected = dev.incremental_result.html_routes_soft_affected.deinit(allocator),
|
|
.html_routes_hard_affected = dev.incremental_result.html_routes_hard_affected.deinit(allocator),
|
|
}),
|
|
.has_tailwind_plugin_hack = if (dev.has_tailwind_plugin_hack) |*hack| {
|
|
for (hack.keys()) |key| {
|
|
allocator.free(key);
|
|
}
|
|
hack.deinit(allocator);
|
|
},
|
|
.directory_watchers = {
|
|
// dev.directory_watchers.dependencies
|
|
for (dev.directory_watchers.watches.keys()) |dir_name| {
|
|
allocator.free(dir_name);
|
|
}
|
|
for (dev.directory_watchers.dependencies.items) |watcher| {
|
|
allocator.free(watcher.specifier);
|
|
}
|
|
dev.directory_watchers.watches.deinit(allocator);
|
|
dev.directory_watchers.dependencies.deinit(allocator);
|
|
dev.directory_watchers.dependencies_free_list.deinit(allocator);
|
|
},
|
|
.html_router = dev.html_router.map.deinit(dev.allocator),
|
|
.bundling_failures = {
|
|
for (dev.bundling_failures.keys()) |failure| {
|
|
failure.deinit(dev);
|
|
}
|
|
dev.bundling_failures.deinit(allocator);
|
|
},
|
|
.current_bundle = {
|
|
if (dev.current_bundle) |_| {
|
|
bun.debugAssert(false); // impossible to de-initialize this state correctly.
|
|
}
|
|
},
|
|
.next_bundle = {
|
|
var r = dev.next_bundle.requests.first;
|
|
while (r) |request| {
|
|
// TODO: deinitializing in this state is almost certainly an assertion failure.
|
|
// This code is shipped in release because it is only reachable by experimenntal server components.
|
|
bun.debugAssert(request.data.handler != .server_handler);
|
|
defer request.data.deref();
|
|
r = request.next;
|
|
}
|
|
dev.next_bundle.route_queue.deinit(allocator);
|
|
},
|
|
.route_lookup = dev.route_lookup.deinit(allocator),
|
|
.source_maps = {
|
|
for (dev.source_maps.entries.values()) |*value| {
|
|
bun.assert(value.ref_count > 0);
|
|
value.ref_count = 0;
|
|
value.deinit(dev);
|
|
}
|
|
dev.source_maps.entries.deinit(allocator);
|
|
|
|
if (dev.source_maps.weak_ref_sweep_timer.state == .ACTIVE)
|
|
dev.vm.timer.remove(&dev.source_maps.weak_ref_sweep_timer);
|
|
},
|
|
|
|
.watcher_atomics = for (&dev.watcher_atomics.events) |*event| {
|
|
event.dirs.deinit(dev.allocator);
|
|
event.files.deinit(dev.allocator);
|
|
event.extra_files.deinit(dev.allocator);
|
|
},
|
|
.testing_batch_events = switch (dev.testing_batch_events) {
|
|
.disabled => {},
|
|
.enabled => |*batch| {
|
|
batch.entry_points.deinit(allocator);
|
|
},
|
|
.enable_after_bundle => {},
|
|
},
|
|
.broadcast_console_log_from_browser_to_server = {},
|
|
|
|
.magic = {
|
|
bun.debugAssert(dev.magic == .valid);
|
|
dev.magic = undefined;
|
|
},
|
|
};
|
|
dev.allocation_scope.deinit();
|
|
bun.destroy(dev);
|
|
}
|
|
|
|
pub const MemoryCost = @import("./DevServer/memory_cost.zig");
|
|
pub const memoryCost = MemoryCost.memoryCost;
|
|
pub const memoryCostDetailed = MemoryCost.memoryCostDetailed;
|
|
pub const memoryCostArrayHashMap = MemoryCost.memoryCostArrayHashMap;
|
|
pub const memoryCostArrayList = MemoryCost.memoryCostArrayList;
|
|
pub const memoryCostSlice = MemoryCost.memoryCostSlice;
|
|
|
|
fn initServerRuntime(dev: *DevServer) void {
|
|
const runtime = bun.String.static(bun.bake.getHmrRuntime(.server).code);
|
|
|
|
const interface = c.BakeLoadInitialServerCode(
|
|
@ptrCast(dev.vm.global),
|
|
runtime,
|
|
if (dev.framework.server_components) |sc| sc.separate_ssr_graph else false,
|
|
) catch |err| {
|
|
dev.vm.printErrorLikeObjectToConsole(dev.vm.global.takeException(err));
|
|
@panic("Server runtime failed to start. The above error is always a bug in Bun");
|
|
};
|
|
|
|
if (!interface.isObject()) @panic("Internal assertion failure: expected interface from HMR runtime to be an object");
|
|
const fetch_function = interface.get(dev.vm.global, "handleRequest") catch null orelse
|
|
@panic("Internal assertion failure: expected interface from HMR runtime to contain handleRequest");
|
|
bun.assert(fetch_function.isCallable());
|
|
dev.server_fetch_function_callback = .create(fetch_function, dev.vm.global);
|
|
const register_update = interface.get(dev.vm.global, "registerUpdate") catch null orelse
|
|
@panic("Internal assertion failure: expected interface from HMR runtime to contain registerUpdate");
|
|
dev.server_register_update_callback = .create(register_update, dev.vm.global);
|
|
|
|
fetch_function.ensureStillAlive();
|
|
register_update.ensureStillAlive();
|
|
}
|
|
|
|
/// Deferred one tick so that the server can be up faster
|
|
fn scanInitialRoutes(dev: *DevServer) !void {
|
|
try dev.router.scanAll(
|
|
dev.allocator,
|
|
&dev.server_transpiler.resolver,
|
|
FrameworkRouter.InsertionContext.wrap(DevServer, dev),
|
|
);
|
|
|
|
try dev.server_graph.ensureStaleBitCapacity(true);
|
|
try dev.client_graph.ensureStaleBitCapacity(true);
|
|
}
|
|
|
|
/// Returns true if a catch-all handler was attached.
|
|
pub fn setRoutes(dev: *DevServer, server: anytype) !bool {
|
|
// TODO: all paths here must be prefixed with publicPath if set.
|
|
dev.server = bun.jsc.API.AnyServer.from(server);
|
|
const app = server.app.?;
|
|
const is_ssl = @typeInfo(@TypeOf(app)).pointer.child.is_ssl;
|
|
|
|
app.get(client_prefix ++ "/:route", *DevServer, dev, wrapGenericRequestHandler(onJsRequest, is_ssl));
|
|
app.get(asset_prefix ++ "/:asset", *DevServer, dev, wrapGenericRequestHandler(onAssetRequest, is_ssl));
|
|
app.get(internal_prefix ++ "/src/*", *DevServer, dev, wrapGenericRequestHandler(onSrcRequest, is_ssl));
|
|
app.post(internal_prefix ++ "/report_error", *DevServer, dev, wrapGenericRequestHandler(ErrorReportRequest.run, is_ssl));
|
|
app.post(internal_prefix ++ "/unref", *DevServer, dev, wrapGenericRequestHandler(UnrefSourceMapRequest.run, is_ssl));
|
|
|
|
app.any(internal_prefix, *DevServer, dev, wrapGenericRequestHandler(onNotFound, is_ssl));
|
|
|
|
app.ws(
|
|
internal_prefix ++ "/hmr",
|
|
dev,
|
|
0,
|
|
uws.WebSocketBehavior.Wrap(DevServer, HmrSocket, is_ssl).apply(.{}),
|
|
);
|
|
|
|
if (bun.FeatureFlags.bake_debugging_features) {
|
|
app.get(
|
|
internal_prefix ++ "/incremental_visualizer",
|
|
*DevServer,
|
|
dev,
|
|
wrapGenericRequestHandler(onIncrementalVisualizer, is_ssl),
|
|
);
|
|
app.get(
|
|
internal_prefix ++ "/memory_visualizer",
|
|
*DevServer,
|
|
dev,
|
|
wrapGenericRequestHandler(onMemoryVisualizer, is_ssl),
|
|
);
|
|
app.get(
|
|
internal_prefix ++ "/iv",
|
|
*DevServer,
|
|
dev,
|
|
redirectHandler(internal_prefix ++ "/incremental_visualizer", is_ssl),
|
|
);
|
|
app.get(
|
|
internal_prefix ++ "/mv",
|
|
*DevServer,
|
|
dev,
|
|
redirectHandler(internal_prefix ++ "/memory_visualizer", is_ssl),
|
|
);
|
|
}
|
|
|
|
// Only attach a catch-all handler if the framework has filesystem router
|
|
// types. Otherwise, this can just be Bun.serve's default handler.
|
|
if (dev.framework.file_system_router_types.len > 0) {
|
|
app.any("/*", *DevServer, dev, wrapGenericRequestHandler(onRequest, is_ssl));
|
|
return true;
|
|
} else {
|
|
return false;
|
|
}
|
|
}
|
|
|
|
fn onNotFound(_: *DevServer, _: *Request, resp: AnyResponse) void {
|
|
notFound(resp);
|
|
}
|
|
|
|
fn notFound(resp: AnyResponse) void {
|
|
resp.corked(onNotFoundCorked, .{resp});
|
|
}
|
|
|
|
fn onNotFoundCorked(resp: AnyResponse) void {
|
|
resp.writeStatus("404 Not Found");
|
|
resp.end("Not Found", false);
|
|
}
|
|
|
|
fn onOutdatedjscorked(resp: AnyResponse) void {
|
|
// Send a payload to instantly reload the page. This only happens when the
|
|
// client bundle is invalidated while the page is loading, aka when you
|
|
// perform many file updates that cannot be hot-updated.
|
|
resp.writeStatus("200 OK");
|
|
resp.writeHeader("Content-Type", MimeType.javascript.value);
|
|
resp.end(
|
|
\\try{location.reload()}catch(_){}
|
|
\\addEventListener("DOMContentLoaded",function(event){location.reload()})
|
|
, false);
|
|
}
|
|
|
|
fn onJsRequest(dev: *DevServer, req: *Request, resp: AnyResponse) void {
|
|
const route_id = req.parameter(0);
|
|
const is_map = bun.strings.hasSuffixComptime(route_id, ".js.map");
|
|
if (!is_map and !bun.strings.hasSuffixComptime(route_id, ".js"))
|
|
return notFound(resp);
|
|
const min_len = "00000000FFFFFFFF.js".len + (if (is_map) ".map".len else 0);
|
|
if (route_id.len < min_len)
|
|
return notFound(resp);
|
|
const hex = route_id[route_id.len - min_len ..][0 .. @sizeOf(u64) * 2];
|
|
if (hex.len != @sizeOf(u64) * 2)
|
|
return notFound(resp);
|
|
const id = parseHexToInt(u64, hex) orelse
|
|
return notFound(resp);
|
|
|
|
if (is_map) {
|
|
const source_id: SourceMapStore.SourceId = @bitCast(id);
|
|
const entry = dev.source_maps.entries.getPtr(.init(id)) orelse
|
|
return notFound(resp);
|
|
var arena = std.heap.ArenaAllocator.init(dev.allocator);
|
|
defer arena.deinit();
|
|
const json_bytes = entry.renderJSON(
|
|
dev,
|
|
arena.allocator(),
|
|
source_id.kind,
|
|
dev.allocator,
|
|
) catch bun.outOfMemory();
|
|
const response = StaticRoute.initFromAnyBlob(&.fromOwnedSlice(dev.allocator, json_bytes), .{
|
|
.server = dev.server,
|
|
.mime_type = &.json,
|
|
});
|
|
defer response.deref();
|
|
response.onRequest(req, resp);
|
|
return;
|
|
}
|
|
|
|
const route_bundle_index: RouteBundle.Index = .init(@intCast(id & 0xFFFFFFFF));
|
|
const generation: u32 = @intCast(id >> 32);
|
|
|
|
if (route_bundle_index.get() >= dev.route_bundles.items.len)
|
|
return notFound(resp);
|
|
|
|
const route_bundle = dev.route_bundles.items[route_bundle_index.get()];
|
|
if (route_bundle.client_script_generation != generation or
|
|
route_bundle.server_state != .loaded)
|
|
{
|
|
return resp.corked(onOutdatedjscorked, .{resp});
|
|
}
|
|
|
|
dev.onJsRequestWithBundle(route_bundle_index, resp, bun.http.Method.which(req.method()) orelse .POST);
|
|
}
|
|
|
|
fn onAssetRequest(dev: *DevServer, req: *Request, resp: AnyResponse) void {
|
|
const param = req.parameter(0);
|
|
if (param.len < @sizeOf(u64) * 2)
|
|
return notFound(resp);
|
|
const hex = param[0 .. @sizeOf(u64) * 2];
|
|
var out: [@sizeOf(u64)]u8 = undefined;
|
|
assert((std.fmt.hexToBytes(&out, hex) catch
|
|
return notFound(resp)).len == @sizeOf(u64));
|
|
const hash: u64 = @bitCast(out);
|
|
debug.log("onAssetRequest {} {s}", .{ hash, param });
|
|
const asset = dev.assets.get(hash) orelse
|
|
return notFound(resp);
|
|
req.setYield(false);
|
|
asset.on(resp);
|
|
}
|
|
|
|
pub fn parseHexToInt(comptime T: type, slice: []const u8) ?T {
|
|
var out: [@sizeOf(T)]u8 = undefined;
|
|
assert((std.fmt.hexToBytes(&out, slice) catch return null).len == @sizeOf(T));
|
|
return @bitCast(out);
|
|
}
|
|
|
|
inline fn wrapGenericRequestHandler(
|
|
comptime handler: anytype,
|
|
comptime is_ssl: bool,
|
|
) fn (
|
|
dev: *DevServer,
|
|
req: *Request,
|
|
resp: *uws.NewApp(is_ssl).Response,
|
|
) void {
|
|
const fn_info = @typeInfo(@TypeOf(handler)).@"fn";
|
|
assert(fn_info.params.len == 3);
|
|
const uses_any_response = if (fn_info.params[2].type) |t| t == AnyResponse else false;
|
|
return struct {
|
|
fn handle(dev: *DevServer, req: *Request, resp: *uws.NewApp(is_ssl).Response) void {
|
|
assert(dev.magic == .valid);
|
|
handler(dev, req, if (uses_any_response) AnyResponse.init(resp) else resp);
|
|
}
|
|
}.handle;
|
|
}
|
|
|
|
inline fn redirectHandler(comptime path: []const u8, comptime is_ssl: bool) fn (
|
|
dev: *DevServer,
|
|
req: *Request,
|
|
resp: *uws.NewApp(is_ssl).Response,
|
|
) void {
|
|
return struct {
|
|
fn handle(dev: *DevServer, req: *Request, resp: *uws.NewApp(is_ssl).Response) void {
|
|
_ = dev;
|
|
_ = req;
|
|
resp.writeStatus("302 Found");
|
|
resp.writeHeader("Location", path);
|
|
resp.end("Redirecting...", false);
|
|
}
|
|
}.handle;
|
|
}
|
|
|
|
fn onIncrementalVisualizer(_: *DevServer, _: *Request, resp: AnyResponse) void {
|
|
resp.corked(onIncrementalVisualizerCorked, .{resp});
|
|
}
|
|
|
|
fn onIncrementalVisualizerCorked(resp: AnyResponse) void {
|
|
const code = if (Environment.codegen_embed)
|
|
@embedFile("incremental_visualizer.html")
|
|
else
|
|
bun.runtimeEmbedFile(.src_eager, "bake/incremental_visualizer.html");
|
|
resp.end(code, false);
|
|
}
|
|
|
|
fn onMemoryVisualizer(_: *DevServer, _: *Request, resp: AnyResponse) void {
|
|
resp.corked(onMemoryVisualizerCorked, .{resp});
|
|
}
|
|
|
|
fn onMemoryVisualizerCorked(resp: AnyResponse) void {
|
|
const code = if (Environment.codegen_embed)
|
|
@embedFile("memory_visualizer.html")
|
|
else
|
|
bun.runtimeEmbedFile(.src_eager, "bake/memory_visualizer.html");
|
|
resp.end(code, false);
|
|
}
|
|
|
|
fn ensureRouteIsBundled(
|
|
dev: *DevServer,
|
|
route_bundle_index: RouteBundle.Index,
|
|
kind: DeferredRequest.Handler.Kind,
|
|
req: *Request,
|
|
resp: AnyResponse,
|
|
) bun.JSError!void {
|
|
assert(dev.magic == .valid);
|
|
assert(dev.server != null);
|
|
sw: switch (dev.routeBundlePtr(route_bundle_index).server_state) {
|
|
.unqueued => {
|
|
if (dev.current_bundle != null) {
|
|
try dev.next_bundle.route_queue.put(dev.allocator, route_bundle_index, {});
|
|
dev.routeBundlePtr(route_bundle_index).server_state = .bundling;
|
|
try dev.deferRequest(&dev.next_bundle.requests, route_bundle_index, kind, req, resp);
|
|
} else {
|
|
// If plugins are not yet loaded, prepare them.
|
|
// In the case plugins are set to &.{}, this will not hit `.pending`.
|
|
plugin: switch (dev.plugin_state) {
|
|
.unknown => if (dev.bundler_options.plugin != null) {
|
|
// Framework-provided plugin is likely going to be phased out later
|
|
dev.plugin_state = .loaded;
|
|
} else {
|
|
// TODO: implement a proper solution here
|
|
dev.has_tailwind_plugin_hack = if (dev.vm.transpiler.options.serve_plugins) |serve_plugins|
|
|
for (serve_plugins) |plugin| {
|
|
if (bun.strings.includes(plugin, "tailwind")) break .empty;
|
|
} else null
|
|
else
|
|
null;
|
|
|
|
switch (dev.server.?.getOrLoadPlugins(.{ .dev_server = dev })) {
|
|
.pending => {
|
|
dev.plugin_state = .pending;
|
|
continue :plugin .pending;
|
|
},
|
|
.err => {
|
|
dev.plugin_state = .err;
|
|
continue :plugin .err;
|
|
},
|
|
.ready => |ready| {
|
|
dev.plugin_state = .loaded;
|
|
dev.bundler_options.plugin = ready;
|
|
},
|
|
}
|
|
},
|
|
.pending => {
|
|
try dev.next_bundle.route_queue.put(dev.allocator, route_bundle_index, {});
|
|
dev.routeBundlePtr(route_bundle_index).server_state = .bundling;
|
|
try dev.deferRequest(&dev.next_bundle.requests, route_bundle_index, kind, req, resp);
|
|
return;
|
|
},
|
|
.err => {
|
|
// TODO: render plugin error page
|
|
resp.end("Plugin Error", false);
|
|
return;
|
|
},
|
|
.loaded => {},
|
|
}
|
|
|
|
// Prepare a bundle with just this route.
|
|
var sfa = std.heap.stackFallback(4096, dev.allocator);
|
|
const temp_alloc = sfa.get();
|
|
|
|
var entry_points: EntryPointList = .empty;
|
|
defer entry_points.deinit(temp_alloc);
|
|
try dev.appendRouteEntryPointsIfNotStale(&entry_points, temp_alloc, route_bundle_index);
|
|
|
|
// If all files were already bundled (possible with layouts),
|
|
// then no entry points will be queued up here. That does
|
|
// not mean the route is ready for presentation.
|
|
if (entry_points.set.count() == 0) {
|
|
if (dev.bundling_failures.count() > 0) {
|
|
dev.routeBundlePtr(route_bundle_index).server_state = .possible_bundling_failures;
|
|
continue :sw .possible_bundling_failures;
|
|
} else {
|
|
dev.routeBundlePtr(route_bundle_index).server_state = .loaded;
|
|
continue :sw .loaded;
|
|
}
|
|
}
|
|
|
|
try dev.deferRequest(&dev.next_bundle.requests, route_bundle_index, kind, req, resp);
|
|
|
|
dev.startAsyncBundle(
|
|
entry_points,
|
|
false,
|
|
std.time.Timer.start() catch @panic("timers unsupported"),
|
|
) catch bun.outOfMemory();
|
|
}
|
|
|
|
dev.routeBundlePtr(route_bundle_index).server_state = .bundling;
|
|
},
|
|
.bundling => {
|
|
bun.assert(dev.current_bundle != null);
|
|
try dev.deferRequest(&dev.current_bundle.?.requests, route_bundle_index, kind, req, resp);
|
|
},
|
|
.possible_bundling_failures => {
|
|
if (dev.bundling_failures.count() > 0) {
|
|
// Trace the graph to see if there are any failures that are
|
|
// reachable by this route.
|
|
switch (try checkRouteFailures(dev, route_bundle_index, resp)) {
|
|
.stop => return,
|
|
.ok => {}, // Errors were cleared or not in the way.
|
|
.rebuild => continue :sw .unqueued, // Do the build all over again
|
|
}
|
|
}
|
|
|
|
dev.routeBundlePtr(route_bundle_index).server_state = .loaded;
|
|
continue :sw .loaded;
|
|
},
|
|
.evaluation_failure => {
|
|
try dev.sendSerializedFailures(
|
|
resp,
|
|
(&(dev.routeBundlePtr(route_bundle_index).data.framework.evaluate_failure.?))[0..1],
|
|
.evaluation,
|
|
null,
|
|
);
|
|
},
|
|
.loaded => switch (kind) {
|
|
.server_handler => try dev.onFrameworkRequestWithBundle(route_bundle_index, .{ .stack = req }, resp),
|
|
.bundled_html_page => dev.onHtmlRequestWithBundle(route_bundle_index, resp, bun.http.Method.which(req.method()) orelse .POST),
|
|
},
|
|
}
|
|
}
|
|
|
|
fn deferRequest(
|
|
dev: *DevServer,
|
|
requests_array: *DeferredRequest.List,
|
|
route_bundle_index: RouteBundle.Index,
|
|
kind: DeferredRequest.Handler.Kind,
|
|
req: *Request,
|
|
resp: AnyResponse,
|
|
) !void {
|
|
const deferred = dev.deferred_request_pool.get();
|
|
const method = bun.http.Method.which(req.method()) orelse .POST;
|
|
deferred.data = .{
|
|
.route_bundle_index = route_bundle_index,
|
|
.dev = dev,
|
|
.ref_count = .init(),
|
|
.handler = switch (kind) {
|
|
.bundled_html_page => .{ .bundled_html_page = .{ .response = resp, .method = method } },
|
|
.server_handler => .{
|
|
.server_handler = dev.server.?.prepareAndSaveJsRequestContext(req, resp, dev.vm.global, method) orelse return,
|
|
},
|
|
},
|
|
};
|
|
deferred.data.ref();
|
|
resp.onAborted(*DeferredRequest, DeferredRequest.onAbort, &deferred.data);
|
|
requests_array.prepend(deferred);
|
|
}
|
|
|
|
fn checkRouteFailures(
|
|
dev: *DevServer,
|
|
route_bundle_index: RouteBundle.Index,
|
|
resp: anytype,
|
|
) !enum { stop, ok, rebuild } {
|
|
var sfa_state = std.heap.stackFallback(65536, dev.allocator);
|
|
const sfa = sfa_state.get();
|
|
var gts = try dev.initGraphTraceState(sfa, 0);
|
|
defer gts.deinit(sfa);
|
|
defer dev.incremental_result.failures_added.clearRetainingCapacity();
|
|
dev.graph_safety_lock.lock();
|
|
defer dev.graph_safety_lock.unlock();
|
|
try dev.traceAllRouteImports(dev.routeBundlePtr(route_bundle_index), >s, .find_errors);
|
|
if (dev.incremental_result.failures_added.items.len > 0) {
|
|
// See comment on this field for information
|
|
if (!dev.assume_perfect_incremental_bundling) {
|
|
// Cache bust EVERYTHING reachable
|
|
inline for (.{
|
|
.{ .graph = &dev.client_graph, .bits = >s.client_bits },
|
|
.{ .graph = &dev.server_graph, .bits = >s.server_bits },
|
|
}) |entry| {
|
|
var it = entry.bits.iterator(.{});
|
|
while (it.next()) |file_index| {
|
|
entry.graph.stale_files.set(file_index);
|
|
}
|
|
}
|
|
return .rebuild;
|
|
}
|
|
|
|
try dev.sendSerializedFailures(
|
|
resp,
|
|
dev.incremental_result.failures_added.items,
|
|
.bundler,
|
|
null,
|
|
);
|
|
return .stop;
|
|
} else {
|
|
// Failures are unreachable by this route, so it is OK to load.
|
|
return .ok;
|
|
}
|
|
}
|
|
|
|
fn appendRouteEntryPointsIfNotStale(dev: *DevServer, entry_points: *EntryPointList, alloc: Allocator, rbi: RouteBundle.Index) bun.OOM!void {
|
|
const server_file_names = dev.server_graph.bundled_files.keys();
|
|
const client_file_names = dev.client_graph.bundled_files.keys();
|
|
|
|
// Build a list of all files that have not yet been bundled.
|
|
switch (dev.routeBundlePtr(rbi).data) {
|
|
.framework => |*bundle| {
|
|
var route = dev.router.routePtr(bundle.route_index);
|
|
const router_type = dev.router.typePtr(route.type);
|
|
try dev.appendOpaqueEntryPoint(server_file_names, entry_points, alloc, .server, router_type.server_file);
|
|
try dev.appendOpaqueEntryPoint(client_file_names, entry_points, alloc, .client, router_type.client_file);
|
|
try dev.appendOpaqueEntryPoint(server_file_names, entry_points, alloc, .server, route.file_page);
|
|
try dev.appendOpaqueEntryPoint(server_file_names, entry_points, alloc, .server, route.file_layout);
|
|
while (route.parent.unwrap()) |parent_index| {
|
|
route = dev.router.routePtr(parent_index);
|
|
try dev.appendOpaqueEntryPoint(server_file_names, entry_points, alloc, .server, route.file_layout);
|
|
}
|
|
},
|
|
.html => |*html| {
|
|
try entry_points.append(alloc, html.html_bundle.data.bundle.data.path, .{ .client = true });
|
|
},
|
|
}
|
|
|
|
if (dev.has_tailwind_plugin_hack) |*map| {
|
|
for (map.keys()) |abs_path| {
|
|
const file = dev.client_graph.bundled_files.get(abs_path) orelse
|
|
continue;
|
|
if (file.flags.kind == .css)
|
|
entry_points.appendCss(alloc, abs_path) catch bun.outOfMemory();
|
|
}
|
|
}
|
|
}
|
|
|
|
fn onFrameworkRequestWithBundle(
|
|
dev: *DevServer,
|
|
route_bundle_index: RouteBundle.Index,
|
|
req: bun.jsc.API.SavedRequest.Union,
|
|
resp: AnyResponse,
|
|
) bun.JSError!void {
|
|
const route_bundle = dev.routeBundlePtr(route_bundle_index);
|
|
assert(route_bundle.data == .framework);
|
|
|
|
const bundle = &route_bundle.data.framework;
|
|
|
|
// Extract route params by re-matching the URL
|
|
var params: FrameworkRouter.MatchedParams = undefined;
|
|
const url_bunstr = switch (req) {
|
|
.stack => |r| bun.String{
|
|
.tag = .ZigString,
|
|
.value = .{ .ZigString = bun.ZigString.fromUTF8(r.url()) },
|
|
},
|
|
.saved => |data| brk: {
|
|
const url = data.request.url;
|
|
url.ref();
|
|
break :brk url;
|
|
},
|
|
};
|
|
defer url_bunstr.deref();
|
|
const url = url_bunstr.toUTF8(bun.default_allocator);
|
|
defer url.deinit();
|
|
|
|
// Extract pathname from URL (remove protocol, host, query, hash)
|
|
const pathname = if (std.mem.indexOf(u8, url.byteSlice(), "://")) |proto_end| blk: {
|
|
const after_proto = url.byteSlice()[proto_end + 3 ..];
|
|
if (std.mem.indexOfScalar(u8, after_proto, '/')) |path_start| {
|
|
const path_with_query = after_proto[path_start..];
|
|
// Remove query string and hash
|
|
const end = bun.strings.indexOfAny(path_with_query, "?#") orelse path_with_query.len;
|
|
break :blk path_with_query[0..end];
|
|
}
|
|
break :blk "/";
|
|
} else url.byteSlice();
|
|
|
|
// Create params JSValue
|
|
// TODO: lazy structure caching since we are making these objects a lot
|
|
const params_js_value = if (dev.router.matchSlow(pathname, ¶ms)) |_| blk: {
|
|
const global = dev.vm.global;
|
|
const params_array = params.params.slice();
|
|
|
|
if (params_array.len == 0) {
|
|
break :blk JSValue.null;
|
|
}
|
|
|
|
// Create a JavaScript object with params
|
|
const obj = JSValue.createEmptyObject(global, params_array.len);
|
|
for (params_array) |param| {
|
|
const key_str = bun.String.cloneUTF8(param.key);
|
|
defer key_str.deref();
|
|
const value_str = bun.String.cloneUTF8(param.value);
|
|
defer value_str.deref();
|
|
|
|
obj.put(global, key_str, value_str.toJS(global));
|
|
}
|
|
break :blk obj;
|
|
} else JSValue.null;
|
|
|
|
const server_request_callback = dev.server_fetch_function_callback.get() orelse
|
|
unreachable; // did not initialize server code
|
|
|
|
const router_type = dev.router.typePtr(dev.router.routePtr(bundle.route_index).type);
|
|
|
|
dev.server.?.onRequestFromSaved(
|
|
req,
|
|
resp,
|
|
server_request_callback,
|
|
5,
|
|
.{
|
|
// routerTypeMain
|
|
router_type.server_file_string.get() orelse str: {
|
|
const name = dev.server_graph.bundled_files.keys()[fromOpaqueFileId(.server, router_type.server_file).get()];
|
|
const relative_path_buf = bun.path_buffer_pool.get();
|
|
defer bun.path_buffer_pool.put(relative_path_buf);
|
|
const str = try bun.String.createUTF8ForJS(dev.vm.global, dev.relativePath(relative_path_buf, name));
|
|
router_type.server_file_string = .create(str, dev.vm.global);
|
|
break :str str;
|
|
},
|
|
// routeModules
|
|
bundle.cached_module_list.get() orelse arr: {
|
|
const global = dev.vm.global;
|
|
const keys = dev.server_graph.bundled_files.keys();
|
|
var n: usize = 1;
|
|
var route = dev.router.routePtr(bundle.route_index);
|
|
while (true) {
|
|
if (route.file_layout != .none) n += 1;
|
|
route = dev.router.routePtr(route.parent.unwrap() orelse break);
|
|
}
|
|
const arr = try JSValue.createEmptyArray(global, n);
|
|
route = dev.router.routePtr(bundle.route_index);
|
|
{
|
|
const relative_path_buf = bun.path_buffer_pool.get();
|
|
defer bun.path_buffer_pool.put(relative_path_buf);
|
|
var route_name = bun.String.cloneUTF8(dev.relativePath(relative_path_buf, keys[fromOpaqueFileId(.server, route.file_page.unwrap().?).get()]));
|
|
try arr.putIndex(global, 0, route_name.transferToJS(global));
|
|
}
|
|
n = 1;
|
|
while (true) {
|
|
if (route.file_layout.unwrap()) |layout| {
|
|
const relative_path_buf = bun.path_buffer_pool.get();
|
|
defer bun.path_buffer_pool.put(relative_path_buf);
|
|
var layout_name = bun.String.cloneUTF8(dev.relativePath(
|
|
relative_path_buf,
|
|
keys[fromOpaqueFileId(.server, layout).get()],
|
|
));
|
|
try arr.putIndex(global, @intCast(n), layout_name.transferToJS(global));
|
|
n += 1;
|
|
}
|
|
route = dev.router.routePtr(route.parent.unwrap() orelse break);
|
|
}
|
|
bundle.cached_module_list = .create(arr, global);
|
|
break :arr arr;
|
|
},
|
|
// clientId
|
|
bundle.cached_client_bundle_url.get() orelse str: {
|
|
const bundle_index: u32 = route_bundle_index.get();
|
|
const generation: u32 = route_bundle.client_script_generation;
|
|
const str = bun.String.createFormat(client_prefix ++ "/route-{}{}.js", .{
|
|
std.fmt.fmtSliceHexLower(std.mem.asBytes(&bundle_index)),
|
|
std.fmt.fmtSliceHexLower(std.mem.asBytes(&generation)),
|
|
}) catch bun.outOfMemory();
|
|
defer str.deref();
|
|
const js = str.toJS(dev.vm.global);
|
|
bundle.cached_client_bundle_url = .create(js, dev.vm.global);
|
|
break :str js;
|
|
},
|
|
// styles
|
|
bundle.cached_css_file_array.get() orelse arr: {
|
|
const js = dev.generateCssJSArray(route_bundle) catch bun.outOfMemory();
|
|
bundle.cached_css_file_array = .create(js, dev.vm.global);
|
|
break :arr js;
|
|
},
|
|
// params
|
|
params_js_value,
|
|
},
|
|
);
|
|
}
|
|
|
|
fn onHtmlRequestWithBundle(dev: *DevServer, route_bundle_index: RouteBundle.Index, resp: AnyResponse, method: bun.http.Method) void {
|
|
const route_bundle = dev.routeBundlePtr(route_bundle_index);
|
|
assert(route_bundle.data == .html);
|
|
const html = &route_bundle.data.html;
|
|
|
|
const blob = html.cached_response orelse generate: {
|
|
const payload = generateHTMLPayload(dev, route_bundle_index, route_bundle, html) catch bun.outOfMemory();
|
|
errdefer dev.allocator.free(payload);
|
|
|
|
html.cached_response = StaticRoute.initFromAnyBlob(
|
|
&.fromOwnedSlice(dev.allocator, payload),
|
|
.{
|
|
.mime_type = &.html,
|
|
.server = dev.server orelse unreachable,
|
|
},
|
|
);
|
|
break :generate html.cached_response.?;
|
|
};
|
|
blob.onWithMethod(method, resp);
|
|
}
|
|
|
|
/// This payload is used to unref the source map weak reference if the page
|
|
/// starts loading but the JavaScript code is not reached. The event handler
|
|
/// is replaced by the HMR runtime to one that handles things better.
|
|
const script_unref_payload = "<script>" ++
|
|
"((a)=>{" ++
|
|
"document.addEventListener('visibilitychange'," ++
|
|
"globalThis[Symbol.for('bun:loadData')]=()=>" ++
|
|
"document.visibilityState==='hidden'&&" ++
|
|
"navigator.sendBeacon('/_bun/unref',a)" ++
|
|
");" ++
|
|
"})(document.querySelector('[data-bun-dev-server-script]').src.slice(-11,-3))" ++
|
|
"</script>";
|
|
|
|
fn generateHTMLPayload(dev: *DevServer, route_bundle_index: RouteBundle.Index, route_bundle: *RouteBundle, html: *RouteBundle.HTML) bun.OOM![]u8 {
|
|
assert(route_bundle.server_state == .loaded); // if not loaded, following values wont be initialized
|
|
assert(html.html_bundle.data.dev_server_id.unwrap() == route_bundle_index);
|
|
assert(html.cached_response == null);
|
|
const script_injection_offset = (html.script_injection_offset.unwrap() orelse unreachable).get();
|
|
const bundled_html = html.bundled_html_text orelse unreachable;
|
|
|
|
// The bundler records an offsets in development mode, splitting the HTML
|
|
// file into two chunks. DevServer is able to insert style/script tags
|
|
// using the information available in IncrementalGraph. This approach
|
|
// allows downstream files to update without re-bundling the HTML file.
|
|
//
|
|
// <!DOCTYPE html>
|
|
// <html>
|
|
// <head>
|
|
// <title>Single Page Web App</title>
|
|
// {script_injection_offset}</head>
|
|
// <body>
|
|
// <div id="root"></div>
|
|
// </body>
|
|
// </html>
|
|
const before_head_end = bundled_html[0..script_injection_offset];
|
|
const after_head_end = bundled_html[script_injection_offset..];
|
|
|
|
var display_name = bun.strings.withoutSuffixComptime(bun.path.basename(html.html_bundle.data.bundle.data.path), ".html");
|
|
// TODO: function for URL safe chars
|
|
if (!bun.strings.isAllASCII(display_name)) display_name = "page";
|
|
|
|
dev.graph_safety_lock.lock();
|
|
defer dev.graph_safety_lock.unlock();
|
|
|
|
// Prepare bitsets for tracing
|
|
var sfa_state = std.heap.stackFallback(65536, dev.allocator);
|
|
const sfa = sfa_state.get();
|
|
var gts = try dev.initGraphTraceState(sfa, 0);
|
|
defer gts.deinit(sfa);
|
|
// Run tracing
|
|
dev.client_graph.reset();
|
|
try dev.traceAllRouteImports(route_bundle, >s, .find_css);
|
|
|
|
const css_ids = dev.client_graph.current_css_files.items;
|
|
|
|
const payload_size = bundled_html.len +
|
|
("<link rel=\"stylesheet\" href=\"" ++ asset_prefix ++ "/0000000000000000.css\">").len * css_ids.len +
|
|
"<script type=\"module\" crossorigin src=\"\" data-bun-dev-server-script></script>".len +
|
|
client_prefix.len + "/".len +
|
|
display_name.len +
|
|
"-0000000000000000.js".len +
|
|
script_unref_payload.len;
|
|
|
|
var array: std.ArrayListUnmanaged(u8) = try std.ArrayListUnmanaged(u8).initCapacity(dev.allocator, payload_size);
|
|
errdefer array.deinit(dev.allocator);
|
|
array.appendSliceAssumeCapacity(before_head_end);
|
|
|
|
// Insert all link tags before "</head>"
|
|
for (css_ids) |name| {
|
|
array.appendSliceAssumeCapacity("<link rel=\"stylesheet\" href=\"" ++ asset_prefix ++ "/");
|
|
array.appendSliceAssumeCapacity(&std.fmt.bytesToHex(std.mem.asBytes(&name), .lower));
|
|
array.appendSliceAssumeCapacity(".css\">");
|
|
}
|
|
|
|
array.appendSliceAssumeCapacity("<script type=\"module\" crossorigin src=\"");
|
|
array.appendSliceAssumeCapacity(client_prefix);
|
|
array.appendSliceAssumeCapacity("/");
|
|
array.appendSliceAssumeCapacity(display_name);
|
|
array.appendSliceAssumeCapacity("-");
|
|
array.appendSliceAssumeCapacity(&std.fmt.bytesToHex(std.mem.asBytes(&@as(u32, route_bundle_index.get())), .lower));
|
|
array.appendSliceAssumeCapacity(&std.fmt.bytesToHex(std.mem.asBytes(&route_bundle.client_script_generation), .lower));
|
|
array.appendSliceAssumeCapacity(".js\" data-bun-dev-server-script></script>");
|
|
array.appendSliceAssumeCapacity(script_unref_payload);
|
|
|
|
// DevServer used to put the script tag before the body end, but to match the regular bundler it does not do this.
|
|
array.appendSliceAssumeCapacity(after_head_end);
|
|
assert(array.items.len == array.capacity); // incorrect memory allocation size
|
|
return array.items;
|
|
}
|
|
|
|
fn generateJavaScriptCodeForHTMLFile(
|
|
dev: *DevServer,
|
|
index: bun.ast.Index,
|
|
import_records: []bun.BabyList(bun.ImportRecord),
|
|
input_file_sources: []bun.logger.Source,
|
|
loaders: []bun.options.Loader,
|
|
) bun.OOM![]const u8 {
|
|
var sfa_state = std.heap.stackFallback(65536, dev.allocator);
|
|
const sfa = sfa_state.get();
|
|
var array = std.ArrayListUnmanaged(u8).initCapacity(sfa, 65536) catch bun.outOfMemory();
|
|
defer array.deinit(sfa);
|
|
const w = array.writer(sfa);
|
|
|
|
try w.writeAll(" ");
|
|
try bun.js_printer.writeJSONString(input_file_sources[index.get()].path.pretty, @TypeOf(w), w, .utf8);
|
|
try w.writeAll(": [ [");
|
|
var any = false;
|
|
for (import_records[index.get()].slice()) |import| {
|
|
if (import.source_index.isValid()) {
|
|
if (!loaders[import.source_index.get()].isJavaScriptLike())
|
|
continue; // ignore non-JavaScript imports
|
|
} else {
|
|
// Find the in-graph import.
|
|
const file = dev.client_graph.bundled_files.get(import.path.text) orelse
|
|
continue;
|
|
if (file.flags.kind != .js)
|
|
continue;
|
|
}
|
|
if (!any) {
|
|
any = true;
|
|
try w.writeAll("\n");
|
|
}
|
|
try w.writeAll(" ");
|
|
try bun.js_printer.writeJSONString(import.path.pretty, @TypeOf(w), w, .utf8);
|
|
try w.writeAll(", 0,\n");
|
|
}
|
|
if (any) {
|
|
try w.writeAll(" ");
|
|
}
|
|
try w.writeAll("], [], [], () => {}, false],\n");
|
|
|
|
// Avoid-recloning if it is was moved to the heap
|
|
return if (array.items.ptr == &sfa_state.buffer)
|
|
try dev.allocator.dupe(u8, array.items)
|
|
else
|
|
array.items;
|
|
}
|
|
|
|
pub fn onJsRequestWithBundle(dev: *DevServer, bundle_index: RouteBundle.Index, resp: AnyResponse, method: bun.http.Method) void {
|
|
const route_bundle = dev.routeBundlePtr(bundle_index);
|
|
const blob = route_bundle.client_bundle orelse generate: {
|
|
const payload = dev.generateClientBundle(route_bundle) catch bun.outOfMemory();
|
|
errdefer dev.allocator.free(payload);
|
|
route_bundle.client_bundle = StaticRoute.initFromAnyBlob(
|
|
&.fromOwnedSlice(dev.allocator, payload),
|
|
.{
|
|
.mime_type = &.javascript,
|
|
.server = dev.server orelse unreachable,
|
|
},
|
|
);
|
|
break :generate route_bundle.client_bundle.?;
|
|
};
|
|
dev.source_maps.addWeakRef(route_bundle.sourceMapId());
|
|
blob.onWithMethod(method, resp);
|
|
}
|
|
|
|
pub fn onSrcRequest(dev: *DevServer, req: *uws.Request, resp: anytype) void {
|
|
if (req.header("open-in-editor") == null) {
|
|
resp.writeStatus("501 Not Implemented");
|
|
resp.end("Viewing source without opening in editor is not implemented yet!", false);
|
|
return;
|
|
}
|
|
|
|
// TODO: better editor detection. on chloe's dev env, this opens apple terminal + vim
|
|
// This is already done in Next.js. we have to port this to Zig so we can use.
|
|
resp.writeStatus("501 Not Implemented");
|
|
resp.end("TODO", false);
|
|
_ = dev;
|
|
|
|
// const ctx = &dev.vm.rareData().editor_context;
|
|
// ctx.autoDetectEditor(jsc.VirtualMachine.get().transpiler.env);
|
|
// const line: ?[]const u8 = req.header("editor-line");
|
|
// const column: ?[]const u8 = req.header("editor-column");
|
|
|
|
// if (ctx.editor) |editor| {
|
|
// var url = req.url()[internal_prefix.len + "/src/".len ..];
|
|
// if (bun.strings.indexOfChar(url, ':')) |colon| {
|
|
// url = url[0..colon];
|
|
// }
|
|
// editor.open(ctx.path, url, line, column, dev.allocator) catch {
|
|
// resp.writeStatus("202 No Content");
|
|
// resp.end("", false);
|
|
// return;
|
|
// };
|
|
// resp.writeStatus("202 No Content");
|
|
// resp.end("", false);
|
|
// } else {
|
|
// resp.writeStatus("500 Internal Server Error");
|
|
// resp.end("Please set your editor in bunfig.toml", false);
|
|
// }
|
|
}
|
|
|
|
/// When requests are waiting on a bundle, the relevant request information is
|
|
/// prepared and stored in a linked list.
|
|
pub const DeferredRequest = struct {
|
|
/// A small maximum is set because development servers are unlikely to
|
|
/// acquire much load, so allocating a ton at the start for no reason
|
|
/// is very silly. This contributes to ~6kb of the initial DevServer allocation.
|
|
const max_preallocated = 16;
|
|
|
|
pub const List = std.SinglyLinkedList(DeferredRequest);
|
|
pub const Node = List.Node;
|
|
|
|
const RefCount = bun.ptr.RefCount(@This(), "ref_count", deinitImpl, .{});
|
|
|
|
route_bundle_index: RouteBundle.Index,
|
|
handler: Handler,
|
|
dev: *DevServer,
|
|
|
|
/// This struct can have at most 2 references it:
|
|
/// - The dev server (`dev.current_bundle.requests`)
|
|
/// - uws.Response as a user data pointer
|
|
ref_count: RefCount,
|
|
|
|
// expose `ref` and `deref` as public methods
|
|
pub const ref = RefCount.ref;
|
|
pub const deref = RefCount.deref;
|
|
|
|
const Handler = union(enum) {
|
|
/// For a .framework route. This says to call and render the page.
|
|
server_handler: bun.jsc.API.SavedRequest,
|
|
/// For a .html route. Serve the bundled HTML page.
|
|
bundled_html_page: ResponseAndMethod,
|
|
/// Do nothing and free this node. To simplify lifetimes,
|
|
/// the `DeferredRequest` is not freed upon abortion. Which
|
|
/// is okay since most requests do not abort.
|
|
aborted,
|
|
|
|
/// Does not include `aborted` because branching on that value
|
|
/// has no meaningful purpose, so it is excluded.
|
|
const Kind = enum {
|
|
server_handler,
|
|
bundled_html_page,
|
|
};
|
|
};
|
|
|
|
fn onAbort(this: *DeferredRequest, resp: AnyResponse) void {
|
|
_ = resp;
|
|
this.abort();
|
|
assert(this.handler == .aborted);
|
|
}
|
|
|
|
/// *WARNING*: Do not call this directly, instead call `.deref()`
|
|
///
|
|
/// Calling this is only required if the desired handler is going to be avoided,
|
|
/// such as for bundling failures or aborting the server.
|
|
/// Does not free the underlying `DeferredRequest.Node`
|
|
fn deinitImpl(this: *DeferredRequest) void {
|
|
this.ref_count.assertNoRefs();
|
|
|
|
defer this.dev.deferred_request_pool.put(@fieldParentPtr("data", this));
|
|
switch (this.handler) {
|
|
.server_handler => |*saved| saved.deinit(),
|
|
.bundled_html_page, .aborted => {},
|
|
}
|
|
}
|
|
|
|
/// Deinitializes state by aborting the connection.
|
|
fn abort(this: *DeferredRequest) void {
|
|
var handler = this.handler;
|
|
this.handler = .aborted;
|
|
switch (handler) {
|
|
.server_handler => |*saved| {
|
|
saved.ctx.onAbort(saved.response);
|
|
saved.js_request.deinit();
|
|
},
|
|
.bundled_html_page => |r| {
|
|
r.response.endWithoutBody(true);
|
|
},
|
|
.aborted => {},
|
|
}
|
|
}
|
|
};
|
|
|
|
const ResponseAndMethod = struct {
|
|
response: AnyResponse,
|
|
method: bun.http.Method,
|
|
};
|
|
|
|
pub fn startAsyncBundle(
|
|
dev: *DevServer,
|
|
entry_points: EntryPointList,
|
|
had_reload_event: bool,
|
|
timer: std.time.Timer,
|
|
) bun.OOM!void {
|
|
assert(dev.current_bundle == null);
|
|
assert(entry_points.set.count() > 0);
|
|
dev.log.clearAndFree();
|
|
|
|
// Notify inspector about bundle start
|
|
if (dev.inspector()) |agent| {
|
|
var sfa_state = std.heap.stackFallback(256, dev.allocator);
|
|
const sfa = sfa_state.get();
|
|
var trigger_files = try std.ArrayList(bun.String).initCapacity(sfa, entry_points.set.count());
|
|
defer trigger_files.deinit();
|
|
defer for (trigger_files.items) |*str| {
|
|
str.deref();
|
|
};
|
|
for (entry_points.set.keys()) |key| {
|
|
try trigger_files.append(bun.String.cloneUTF8(key));
|
|
}
|
|
|
|
agent.notifyBundleStart(dev.inspector_server_id, trigger_files.items);
|
|
}
|
|
|
|
dev.incremental_result.reset();
|
|
|
|
// Ref server to keep it from closing.
|
|
if (dev.server) |server| server.onPendingRequest();
|
|
|
|
var heap = ThreadLocalArena.init();
|
|
errdefer heap.deinit();
|
|
const allocator = heap.allocator();
|
|
const ast_memory_allocator = try allocator.create(bun.ast.ASTMemoryAllocator);
|
|
var ast_scope = ast_memory_allocator.enter(allocator);
|
|
defer ast_scope.exit();
|
|
|
|
const bv2 = try BundleV2.init(
|
|
&dev.server_transpiler,
|
|
.{
|
|
.framework = dev.framework,
|
|
.client_transpiler = &dev.client_transpiler,
|
|
.ssr_transpiler = &dev.ssr_transpiler,
|
|
.plugins = dev.bundler_options.plugin,
|
|
},
|
|
allocator,
|
|
.{ .js = dev.vm.eventLoop() },
|
|
false, // watching is handled separately
|
|
jsc.WorkPool.get(),
|
|
heap,
|
|
);
|
|
bv2.bun_watcher = dev.bun_watcher;
|
|
bv2.asynchronous = true;
|
|
|
|
{
|
|
dev.graph_safety_lock.lock();
|
|
defer dev.graph_safety_lock.unlock();
|
|
|
|
dev.client_graph.reset();
|
|
dev.server_graph.reset();
|
|
}
|
|
|
|
const start_data = try bv2.startFromBakeDevServer(entry_points);
|
|
dev.current_bundle = .{
|
|
.bv2 = bv2,
|
|
.timer = timer,
|
|
.start_data = start_data,
|
|
.had_reload_event = had_reload_event,
|
|
.requests = dev.next_bundle.requests,
|
|
.resolution_failure_entries = .{},
|
|
};
|
|
dev.next_bundle.requests = .{};
|
|
}
|
|
|
|
pub fn prepareAndLogResolutionFailures(dev: *DevServer) !void {
|
|
// Since resolution failures can be asynchronous, their logs are not inserted
|
|
// until the very end.
|
|
const resolution_failures = dev.current_bundle.?.resolution_failure_entries;
|
|
if (resolution_failures.count() > 0) {
|
|
for (resolution_failures.keys(), resolution_failures.values()) |owner, *log| {
|
|
if (log.hasErrors()) {
|
|
switch (owner.decode()) {
|
|
.client => |index| try dev.client_graph.insertFailure(.index, index, log, false),
|
|
.server => |index| try dev.server_graph.insertFailure(.index, index, log, true),
|
|
.none, .route => unreachable,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// Theoretically, it shouldn't be possible for errors to leak into dev.log, but just in
|
|
// case that happens, they can be printed out.
|
|
if (dev.log.hasErrors() and dev.log.msgs.items.len > 0) {
|
|
if (Environment.isDebug) {
|
|
Output.debugWarn("dev.log should not be written into when using DevServer", .{});
|
|
}
|
|
dev.log.print(Output.errorWriter()) catch {};
|
|
}
|
|
}
|
|
|
|
fn indexFailures(dev: *DevServer) !void {
|
|
// After inserting failures into the IncrementalGraphs, they are traced to their routes.
|
|
var sfa_state = std.heap.stackFallback(65536, dev.allocator);
|
|
const sfa = sfa_state.get();
|
|
|
|
if (dev.incremental_result.failures_added.items.len > 0) {
|
|
var total_len: usize = @sizeOf(MessageId) + @sizeOf(u32);
|
|
|
|
for (dev.incremental_result.failures_added.items) |fail| {
|
|
total_len += fail.data.len;
|
|
}
|
|
|
|
total_len += dev.incremental_result.failures_removed.items.len * @sizeOf(u32);
|
|
|
|
var gts = try dev.initGraphTraceState(sfa, 0);
|
|
defer gts.deinit(sfa);
|
|
|
|
var payload = try std.ArrayList(u8).initCapacity(sfa, total_len);
|
|
defer payload.deinit();
|
|
payload.appendAssumeCapacity(MessageId.errors.char());
|
|
const w = payload.writer();
|
|
|
|
try w.writeInt(u32, @intCast(dev.incremental_result.failures_removed.items.len), .little);
|
|
|
|
for (dev.incremental_result.failures_removed.items) |removed| {
|
|
try w.writeInt(u32, @bitCast(removed.getOwner().encode()), .little);
|
|
removed.deinit(dev);
|
|
}
|
|
|
|
for (dev.incremental_result.failures_added.items) |added| {
|
|
try w.writeAll(added.data);
|
|
|
|
switch (added.getOwner()) {
|
|
.none, .route => unreachable,
|
|
.server => |index| try dev.server_graph.traceDependencies(index, >s, .no_stop, index),
|
|
.client => |index| try dev.client_graph.traceDependencies(index, >s, .no_stop, index),
|
|
}
|
|
}
|
|
|
|
for (dev.incremental_result.framework_routes_affected.items) |entry| {
|
|
if (dev.router.routePtr(entry.route_index).bundle.unwrap()) |index| {
|
|
dev.routeBundlePtr(index).server_state = .possible_bundling_failures;
|
|
}
|
|
if (entry.should_recurse_when_visiting)
|
|
dev.markAllRouteChildrenFailed(entry.route_index);
|
|
}
|
|
|
|
for (dev.incremental_result.html_routes_soft_affected.items) |index| {
|
|
dev.routeBundlePtr(index).server_state = .possible_bundling_failures;
|
|
}
|
|
|
|
for (dev.incremental_result.html_routes_hard_affected.items) |index| {
|
|
dev.routeBundlePtr(index).server_state = .possible_bundling_failures;
|
|
}
|
|
|
|
dev.publish(.errors, payload.items, .binary);
|
|
} else if (dev.incremental_result.failures_removed.items.len > 0) {
|
|
var payload = try std.ArrayList(u8).initCapacity(sfa, @sizeOf(MessageId) + @sizeOf(u32) + dev.incremental_result.failures_removed.items.len * @sizeOf(u32));
|
|
defer payload.deinit();
|
|
payload.appendAssumeCapacity(MessageId.errors.char());
|
|
const w = payload.writer();
|
|
|
|
try w.writeInt(u32, @intCast(dev.incremental_result.failures_removed.items.len), .little);
|
|
|
|
for (dev.incremental_result.failures_removed.items) |removed| {
|
|
try w.writeInt(u32, @bitCast(removed.getOwner().encode()), .little);
|
|
removed.deinit(dev);
|
|
}
|
|
|
|
dev.publish(.errors, payload.items, .binary);
|
|
}
|
|
|
|
dev.incremental_result.failures_removed.clearRetainingCapacity();
|
|
}
|
|
|
|
/// Used to generate the entry point. Unlike incremental patches, this always
|
|
/// contains all needed files for a route.
|
|
fn generateClientBundle(dev: *DevServer, route_bundle: *RouteBundle) bun.OOM![]u8 {
|
|
assert(route_bundle.client_bundle == null);
|
|
assert(route_bundle.server_state == .loaded); // page is unfit to trace/load
|
|
|
|
dev.graph_safety_lock.lock();
|
|
defer dev.graph_safety_lock.unlock();
|
|
|
|
// Prepare bitsets
|
|
var sfa_state = std.heap.stackFallback(65536, dev.allocator);
|
|
const sfa = sfa_state.get();
|
|
var gts = try dev.initGraphTraceState(sfa, 0);
|
|
defer gts.deinit(sfa);
|
|
|
|
// Run tracing
|
|
dev.client_graph.reset();
|
|
try dev.traceAllRouteImports(route_bundle, >s, .find_client_modules);
|
|
|
|
var react_fast_refresh_id: []const u8 = "";
|
|
if (dev.framework.react_fast_refresh) |rfr| brk: {
|
|
const rfr_index = dev.client_graph.getFileIndex(rfr.import_source) orelse
|
|
break :brk;
|
|
if (!dev.client_graph.stale_files.isSet(rfr_index.get())) {
|
|
try dev.client_graph.traceImports(rfr_index, >s, .find_client_modules);
|
|
react_fast_refresh_id = rfr.import_source;
|
|
}
|
|
}
|
|
|
|
const client_file: ?IncrementalGraph(.client).FileIndex = switch (route_bundle.data) {
|
|
.framework => |fw| if (dev.router.typePtr(dev.router.routePtr(fw.route_index).type).client_file.unwrap()) |ofi|
|
|
fromOpaqueFileId(.client, ofi)
|
|
else
|
|
null,
|
|
.html => |html| html.bundled_file,
|
|
};
|
|
|
|
// Insert the source map
|
|
const script_id = route_bundle.sourceMapId();
|
|
mapLog("inc {x}, 1 for generateClientBundle", .{script_id.get()});
|
|
switch (try dev.source_maps.putOrIncrementRefCount(script_id, 1)) {
|
|
.uninitialized => |entry| {
|
|
errdefer dev.source_maps.unref(script_id);
|
|
gts.clearAndFree(sfa);
|
|
var arena = std.heap.ArenaAllocator.init(sfa);
|
|
defer arena.deinit();
|
|
try dev.client_graph.takeSourceMap(arena.allocator(), dev.allocator, entry);
|
|
},
|
|
.shared => {},
|
|
}
|
|
|
|
const client_bundle = dev.client_graph.takeJSBundle(&.{
|
|
.kind = .initial_response,
|
|
.initial_response_entry_point = if (client_file) |index|
|
|
dev.client_graph.bundled_files.keys()[index.get()]
|
|
else
|
|
"",
|
|
.react_refresh_entry_point = react_fast_refresh_id,
|
|
.script_id = script_id,
|
|
.console_log = dev.shouldReceiveConsoleLogFromBrowser(),
|
|
});
|
|
|
|
return client_bundle;
|
|
}
|
|
|
|
fn generateCssJSArray(dev: *DevServer, route_bundle: *RouteBundle) bun.JSError!jsc.JSValue {
|
|
assert(route_bundle.data == .framework); // a jsc.JSValue has no purpose, and therefore isn't implemented.
|
|
if (Environment.allow_assert) assert(!route_bundle.data.framework.cached_css_file_array.has());
|
|
assert(route_bundle.server_state == .loaded); // page is unfit to load
|
|
|
|
dev.graph_safety_lock.lock();
|
|
defer dev.graph_safety_lock.unlock();
|
|
|
|
// Prepare bitsets
|
|
var sfa_state = std.heap.stackFallback(65536, dev.allocator);
|
|
|
|
const sfa = sfa_state.get();
|
|
var gts = try dev.initGraphTraceState(sfa, 0);
|
|
defer gts.deinit(sfa);
|
|
|
|
// Run tracing
|
|
dev.client_graph.reset();
|
|
try dev.traceAllRouteImports(route_bundle, >s, .find_css);
|
|
|
|
const names = dev.client_graph.current_css_files.items;
|
|
const arr = try jsc.JSArray.createEmpty(dev.vm.global, names.len);
|
|
for (names, 0..) |item, i| {
|
|
var buf: [asset_prefix.len + @sizeOf(u64) * 2 + "/.css".len]u8 = undefined;
|
|
const path = std.fmt.bufPrint(&buf, asset_prefix ++ "/{s}.css", .{
|
|
&std.fmt.bytesToHex(std.mem.asBytes(&item), .lower),
|
|
}) catch unreachable;
|
|
const str = bun.String.cloneUTF8(path);
|
|
defer str.deref();
|
|
try arr.putIndex(dev.vm.global, @intCast(i), str.toJS(dev.vm.global));
|
|
}
|
|
return arr;
|
|
}
|
|
|
|
fn traceAllRouteImports(dev: *DevServer, route_bundle: *RouteBundle, gts: *GraphTraceState, comptime goal: TraceImportGoal) !void {
|
|
switch (route_bundle.data) {
|
|
.framework => |fw| {
|
|
var route = dev.router.routePtr(fw.route_index);
|
|
const router_type = dev.router.typePtr(route.type);
|
|
|
|
// Both framework entry points are considered
|
|
try dev.server_graph.traceImports(fromOpaqueFileId(.server, router_type.server_file), gts, .find_css);
|
|
if (router_type.client_file.unwrap()) |id| {
|
|
try dev.client_graph.traceImports(fromOpaqueFileId(.client, id), gts, goal);
|
|
}
|
|
|
|
// The route file is considered
|
|
if (route.file_page.unwrap()) |id| {
|
|
try dev.server_graph.traceImports(fromOpaqueFileId(.server, id), gts, goal);
|
|
}
|
|
|
|
// For all parents, the layout is considered
|
|
while (true) {
|
|
if (route.file_layout.unwrap()) |id| {
|
|
try dev.server_graph.traceImports(fromOpaqueFileId(.server, id), gts, goal);
|
|
}
|
|
route = dev.router.routePtr(route.parent.unwrap() orelse break);
|
|
}
|
|
},
|
|
.html => |html| {
|
|
try dev.client_graph.traceImports(html.bundled_file, gts, goal);
|
|
},
|
|
}
|
|
}
|
|
|
|
fn makeArrayForServerComponentsPatch(dev: *DevServer, global: *jsc.JSGlobalObject, items: []const IncrementalGraph(.server).FileIndex) bun.JSError!JSValue {
|
|
if (items.len == 0) return .null;
|
|
const arr = try jsc.JSArray.createEmpty(global, items.len);
|
|
const names = dev.server_graph.bundled_files.keys();
|
|
for (items, 0..) |item, i| {
|
|
const relative_path_buf = bun.path_buffer_pool.get();
|
|
defer bun.path_buffer_pool.put(relative_path_buf);
|
|
const str = bun.String.cloneUTF8(dev.relativePath(relative_path_buf, names[item.get()]));
|
|
defer str.deref();
|
|
try arr.putIndex(global, @intCast(i), str.toJS(global));
|
|
}
|
|
return arr;
|
|
}
|
|
|
|
pub const HotUpdateContext = struct {
|
|
/// bundle_v2.Graph.input_files.items(.source)
|
|
sources: []bun.logger.Source,
|
|
/// bundle_v2.Graph.ast.items(.import_records)
|
|
import_records: []bun.ImportRecord.List,
|
|
/// bundle_v2.Graph.server_component_boundaries.slice()
|
|
scbs: bun.ast.ServerComponentBoundary.List.Slice,
|
|
/// bundle_v2.Graph.input_files.items(.loader)
|
|
loaders: []bun.options.Loader,
|
|
/// Which files have a server-component boundary.
|
|
server_to_client_bitset: DynamicBitSetUnmanaged,
|
|
/// Used to reduce calls to the IncrementalGraph hash table.
|
|
///
|
|
/// Caller initializes a slice with `sources.len * 2` items
|
|
/// all initialized to `std.math.maxInt(u32)`
|
|
///
|
|
/// The first half of this slice is for the client graph,
|
|
/// second half is for server. Interact with this via
|
|
/// `getCachedIndex`
|
|
resolved_index_cache: []u32,
|
|
/// Used to tell if the server should replace or append import records.
|
|
server_seen_bit_set: DynamicBitSetUnmanaged,
|
|
gts: *GraphTraceState,
|
|
|
|
pub fn getCachedIndex(
|
|
rc: *const HotUpdateContext,
|
|
comptime side: bake.Side,
|
|
i: bun.ast.Index,
|
|
) *IncrementalGraph(side).FileIndex.Optional {
|
|
const start = switch (side) {
|
|
.client => 0,
|
|
.server => rc.sources.len,
|
|
};
|
|
|
|
const subslice = rc.resolved_index_cache[start..][0..rc.sources.len];
|
|
|
|
comptime assert(@alignOf(IncrementalGraph(side).FileIndex.Optional) == @alignOf(u32));
|
|
comptime assert(@sizeOf(IncrementalGraph(side).FileIndex.Optional) == @sizeOf(u32));
|
|
return @ptrCast(&subslice[i.get()]);
|
|
}
|
|
};
|
|
|
|
/// Called at the end of BundleV2 to index bundle contents into the `IncrementalGraph`s
|
|
/// This function does not recover DevServer state if it fails (allocation failure)
|
|
pub fn finalizeBundle(
|
|
dev: *DevServer,
|
|
bv2: *bun.bundle_v2.BundleV2,
|
|
result: *const bun.bundle_v2.DevServerOutput,
|
|
) bun.JSError!void {
|
|
assert(dev.magic == .valid);
|
|
var had_sent_hmr_event = false;
|
|
defer {
|
|
var heap = bv2.graph.heap;
|
|
bv2.deinitWithoutFreeingArena();
|
|
dev.current_bundle = null;
|
|
dev.log.clearAndFree();
|
|
heap.deinit();
|
|
|
|
dev.assets.reindexIfNeeded(dev.allocator) catch {
|
|
// not fatal: the assets may be reindexed some time later.
|
|
};
|
|
|
|
// Signal for testing framework where it is in synchronization
|
|
if (dev.testing_batch_events == .enable_after_bundle) {
|
|
dev.testing_batch_events = .{ .enabled = .empty };
|
|
dev.publish(.testing_watch_synchronization, &.{
|
|
MessageId.testing_watch_synchronization.char(),
|
|
0,
|
|
}, .binary);
|
|
} else {
|
|
dev.publish(.testing_watch_synchronization, &.{
|
|
MessageId.testing_watch_synchronization.char(),
|
|
if (had_sent_hmr_event) 4 else 3,
|
|
}, .binary);
|
|
}
|
|
|
|
dev.startNextBundleIfPresent();
|
|
|
|
// Unref the ref added in `startAsyncBundle`
|
|
if (dev.server) |server| server.onStaticRequestComplete();
|
|
}
|
|
const current_bundle = &dev.current_bundle.?;
|
|
defer {
|
|
if (current_bundle.requests.first != null) {
|
|
// cannot be an assertion because in the case of error.OutOfMemory, the request list was not drained.
|
|
Output.debug("current_bundle.requests.first != null. this leaves pending requests without an error page!", .{});
|
|
}
|
|
while (current_bundle.requests.popFirst()) |node| {
|
|
const req = &node.data;
|
|
defer req.deref();
|
|
req.abort();
|
|
}
|
|
}
|
|
|
|
dev.graph_safety_lock.lock();
|
|
defer dev.graph_safety_lock.unlock();
|
|
|
|
const js_chunk = result.jsPseudoChunk();
|
|
const input_file_sources = bv2.graph.input_files.items(.source);
|
|
const input_file_loaders = bv2.graph.input_files.items(.loader);
|
|
const import_records = bv2.graph.ast.items(.import_records);
|
|
const targets = bv2.graph.ast.items(.target);
|
|
const scbs = bv2.graph.server_component_boundaries.slice();
|
|
|
|
var sfa = std.heap.stackFallback(65536, bv2.graph.allocator);
|
|
const stack_alloc = sfa.get();
|
|
var scb_bitset = try bun.bit_set.DynamicBitSetUnmanaged.initEmpty(stack_alloc, input_file_sources.len);
|
|
for (
|
|
scbs.list.items(.source_index),
|
|
scbs.list.items(.ssr_source_index),
|
|
scbs.list.items(.reference_source_index),
|
|
) |source_index, ssr_index, ref_index| {
|
|
scb_bitset.set(source_index);
|
|
scb_bitset.set(ref_index);
|
|
if (ssr_index < scb_bitset.bit_length)
|
|
scb_bitset.set(ssr_index);
|
|
}
|
|
|
|
const resolved_index_cache = try bv2.graph.allocator.alloc(u32, input_file_sources.len * 2);
|
|
@memset(resolved_index_cache, @intFromEnum(IncrementalGraph(.server).FileIndex.Optional.none));
|
|
|
|
var ctx: bun.bake.DevServer.HotUpdateContext = .{
|
|
.import_records = import_records,
|
|
.sources = input_file_sources,
|
|
.loaders = input_file_loaders,
|
|
.scbs = scbs,
|
|
.server_to_client_bitset = scb_bitset,
|
|
.resolved_index_cache = resolved_index_cache,
|
|
.server_seen_bit_set = undefined,
|
|
.gts = undefined,
|
|
};
|
|
|
|
const quoted_source_contents: []?[]u8 = bv2.linker.graph.files.items(.quoted_source_contents);
|
|
// Pass 1, update the graph's nodes, resolving every bundler source
|
|
// index into its `IncrementalGraph(...).FileIndex`
|
|
for (
|
|
js_chunk.content.javascript.parts_in_chunk_in_order,
|
|
js_chunk.compile_results_for_chunk,
|
|
) |part_range, compile_result| {
|
|
const index = part_range.source_index;
|
|
const source_map: SourceMap.Chunk = compile_result.sourceMapChunk() orelse brk: {
|
|
// The source map is `null` if empty
|
|
bun.assert(compile_result.javascript.result == .result);
|
|
bun.assert(dev.server_transpiler.options.source_map != .none);
|
|
bun.assert(!part_range.source_index.isRuntime());
|
|
break :brk .initEmpty();
|
|
};
|
|
// TODO: investigate why linker.files is not indexed by linker's index
|
|
// const linker_index = bv2.linker.graph.stable_source_indices[index.get()];
|
|
// const quoted_contents = quoted_source_contents[linker_index];
|
|
const quoted_contents = quoted_source_contents[part_range.source_index.get()];
|
|
switch (targets[part_range.source_index.get()].bakeGraph()) {
|
|
inline else => |graph| try (switch (graph) {
|
|
.client => dev.client_graph,
|
|
else => dev.server_graph,
|
|
}).receiveChunk(
|
|
&ctx,
|
|
index,
|
|
.{
|
|
.js = .{
|
|
.code = compile_result.javascript.code(),
|
|
.code_allocator = compile_result.javascript.allocator(),
|
|
.source_map = .{
|
|
.chunk = source_map,
|
|
.escaped_source = quoted_contents,
|
|
},
|
|
},
|
|
},
|
|
graph == .ssr,
|
|
),
|
|
}
|
|
}
|
|
|
|
for (result.cssChunks(), result.css_file_list.values()) |*chunk, metadata| {
|
|
assert(chunk.content == .css);
|
|
|
|
const index = bun.ast.Index.init(chunk.entry_point.source_index);
|
|
|
|
const code = try chunk.intermediate_output.code(
|
|
dev.allocator,
|
|
&bv2.graph,
|
|
&bv2.linker.graph,
|
|
"THIS_SHOULD_NEVER_BE_EMITTED_IN_DEV_MODE",
|
|
chunk,
|
|
result.chunks,
|
|
null,
|
|
false,
|
|
);
|
|
|
|
// Create an entry for this file.
|
|
const key = ctx.sources[index.get()].path.keyForIncrementalGraph();
|
|
// const hash = brk: {
|
|
// var hash: ContentHasher.Hash = .init(0x9a4e); // arbitrary seed
|
|
// hash.update(key);
|
|
// hash.update(code.buffer);
|
|
// break :brk hash.final();
|
|
// };
|
|
// TODO: use a hash mix with the first half being a path hash (to identify files) and
|
|
// the second half to be the content hash (to know if the file has changed)
|
|
const hash = bun.hash(key);
|
|
const asset_index = try dev.assets.replacePath(
|
|
key,
|
|
&.fromOwnedSlice(dev.allocator, code.buffer),
|
|
&.css,
|
|
hash,
|
|
);
|
|
// Later code needs to retrieve the CSS content
|
|
// The hack is to use `entry_point_id`, which is otherwise unused, to store an index.
|
|
chunk.entry_point.entry_point_id = asset_index.get();
|
|
|
|
// Track css files that look like tailwind files.
|
|
if (dev.has_tailwind_plugin_hack) |*map| {
|
|
const first_1024 = code.buffer[0..@min(code.buffer.len, 1024)];
|
|
if (std.mem.indexOf(u8, first_1024, "tailwind") != null) {
|
|
const entry = try map.getOrPut(dev.allocator, key);
|
|
if (!entry.found_existing) {
|
|
entry.key_ptr.* = try dev.allocator.dupe(u8, key);
|
|
}
|
|
} else {
|
|
if (map.fetchSwapRemove(key)) |entry| {
|
|
dev.allocator.free(entry.key);
|
|
}
|
|
}
|
|
}
|
|
|
|
try dev.client_graph.receiveChunk(&ctx, index, .{ .css = hash }, false);
|
|
|
|
// If imported on server, there needs to be a server-side file entry
|
|
// so that edges can be attached. When a file is only imported on
|
|
// the server, this file is used to trace the CSS to the route.
|
|
if (metadata.imported_on_server) {
|
|
try dev.server_graph.insertCssFileOnServer(
|
|
&ctx,
|
|
index,
|
|
key,
|
|
);
|
|
}
|
|
}
|
|
|
|
for (result.htmlChunks()) |*chunk| {
|
|
const index = bun.ast.Index.init(chunk.entry_point.source_index);
|
|
const compile_result = chunk.compile_results_for_chunk[0].html;
|
|
const generated_js = try dev.generateJavaScriptCodeForHTMLFile(
|
|
index,
|
|
import_records,
|
|
input_file_sources,
|
|
bv2.graph.input_files.items(.loader),
|
|
);
|
|
try dev.client_graph.receiveChunk(
|
|
&ctx,
|
|
index,
|
|
.{ .js = .{
|
|
.code = generated_js,
|
|
.code_allocator = dev.allocator,
|
|
.source_map = null,
|
|
} },
|
|
false,
|
|
);
|
|
const client_index = ctx.getCachedIndex(.client, index).*.unwrap() orelse @panic("unresolved index");
|
|
const route_bundle_index = dev.client_graph.htmlRouteBundleIndex(client_index);
|
|
const route_bundle = dev.routeBundlePtr(route_bundle_index);
|
|
assert(route_bundle.data.html.bundled_file == client_index);
|
|
const html = &route_bundle.data.html;
|
|
|
|
if (html.cached_response) |blob| {
|
|
blob.deref();
|
|
html.cached_response = null;
|
|
route_bundle.invalidateClientBundle(dev);
|
|
}
|
|
if (html.bundled_html_text) |slice| {
|
|
dev.allocator.free(slice);
|
|
}
|
|
dev.allocation_scope.assertOwned(compile_result.code);
|
|
html.bundled_html_text = compile_result.code;
|
|
html.script_injection_offset = .init(compile_result.script_injection_offset);
|
|
|
|
chunk.entry_point.entry_point_id = @intCast(route_bundle_index.get());
|
|
}
|
|
|
|
var gts = try dev.initGraphTraceState(
|
|
bv2.graph.allocator,
|
|
if (result.cssChunks().len > 0) bv2.graph.input_files.len else 0,
|
|
);
|
|
defer gts.deinit(bv2.graph.allocator);
|
|
ctx.gts = >s;
|
|
ctx.server_seen_bit_set = try bun.bit_set.DynamicBitSetUnmanaged.initEmpty(bv2.graph.allocator, dev.server_graph.bundled_files.count());
|
|
|
|
dev.incremental_result.had_adjusted_edges = false;
|
|
|
|
try prepareAndLogResolutionFailures(dev);
|
|
|
|
// Pass 2, update the graph's edges by performing import diffing on each
|
|
// changed file, removing dependencies. This pass also flags what routes
|
|
// have been modified.
|
|
for (js_chunk.content.javascript.parts_in_chunk_in_order) |part_range| {
|
|
switch (targets[part_range.source_index.get()].bakeGraph()) {
|
|
.server, .ssr => try dev.server_graph.processChunkDependencies(&ctx, .normal, part_range.source_index, bv2.graph.allocator),
|
|
.client => try dev.client_graph.processChunkDependencies(&ctx, .normal, part_range.source_index, bv2.graph.allocator),
|
|
}
|
|
}
|
|
for (result.htmlChunks()) |*chunk| {
|
|
const index = bun.ast.Index.init(chunk.entry_point.source_index);
|
|
try dev.client_graph.processChunkDependencies(&ctx, .normal, index, bv2.graph.allocator);
|
|
}
|
|
for (result.cssChunks()) |*chunk| {
|
|
const entry_index = bun.ast.Index.init(chunk.entry_point.source_index);
|
|
try dev.client_graph.processChunkDependencies(&ctx, .css, entry_index, bv2.graph.allocator);
|
|
}
|
|
|
|
// Index all failed files now that the incremental graph has been updated.
|
|
if (dev.incremental_result.failures_removed.items.len > 0 or
|
|
dev.incremental_result.failures_added.items.len > 0)
|
|
{
|
|
had_sent_hmr_event = true;
|
|
}
|
|
try dev.indexFailures();
|
|
|
|
try dev.client_graph.ensureStaleBitCapacity(false);
|
|
try dev.server_graph.ensureStaleBitCapacity(false);
|
|
|
|
dev.generation +%= 1;
|
|
if (Environment.enable_logs) {
|
|
debug.log("Bundle Round {d}: {d} server, {d} client, {d} ms", .{
|
|
dev.generation,
|
|
dev.server_graph.current_chunk_parts.items.len,
|
|
dev.client_graph.current_chunk_parts.items.len,
|
|
@divFloor(current_bundle.timer.read(), std.time.ns_per_ms),
|
|
});
|
|
}
|
|
|
|
// Load all new chunks into the server runtime.
|
|
if (!dev.frontend_only and dev.server_graph.current_chunk_len > 0) {
|
|
const server_bundle = try dev.server_graph.takeJSBundle(&.{ .kind = .hmr_chunk });
|
|
defer dev.allocator.free(server_bundle);
|
|
|
|
const server_modules = c.BakeLoadServerHmrPatch(@ptrCast(dev.vm.global), bun.String.cloneLatin1(server_bundle)) catch |err| {
|
|
// No user code has been evaluated yet, since everything is to
|
|
// be wrapped in a function clousure. This means that the likely
|
|
// error is going to be a syntax error, or other mistake in the
|
|
// bundler.
|
|
dev.vm.printErrorLikeObjectToConsole(dev.vm.global.takeException(err));
|
|
@panic("Error thrown while evaluating server code. This is always a bug in the bundler.");
|
|
};
|
|
const errors = dev.server_register_update_callback.get().?.call(
|
|
dev.vm.global,
|
|
dev.vm.global.toJSValue(),
|
|
&.{
|
|
server_modules,
|
|
try dev.makeArrayForServerComponentsPatch(dev.vm.global, dev.incremental_result.client_components_added.items),
|
|
try dev.makeArrayForServerComponentsPatch(dev.vm.global, dev.incremental_result.client_components_removed.items),
|
|
},
|
|
) catch |err| {
|
|
// One module replacement error should NOT prevent follow-up
|
|
// module replacements to fail. It is the HMR runtime's
|
|
// responsibility to collect all module load errors, and
|
|
// bubble them up.
|
|
dev.vm.printErrorLikeObjectToConsole(dev.vm.global.takeException(err));
|
|
@panic("Error thrown in Hot-module-replacement code. This is always a bug in the HMR runtime.");
|
|
};
|
|
_ = errors; // TODO:
|
|
}
|
|
|
|
var route_bits = try DynamicBitSetUnmanaged.initEmpty(stack_alloc, dev.route_bundles.items.len);
|
|
defer route_bits.deinit(stack_alloc);
|
|
var route_bits_client = try DynamicBitSetUnmanaged.initEmpty(stack_alloc, dev.route_bundles.items.len);
|
|
defer route_bits_client.deinit(stack_alloc);
|
|
|
|
var has_route_bits_set = false;
|
|
|
|
var hot_update_payload_sfa = std.heap.stackFallback(65536, dev.allocator);
|
|
var hot_update_payload = std.ArrayList(u8).initCapacity(hot_update_payload_sfa.get(), 65536) catch
|
|
unreachable; // enough space
|
|
defer hot_update_payload.deinit();
|
|
hot_update_payload.appendAssumeCapacity(MessageId.hot_update.char());
|
|
|
|
// The writer used for the hot_update payload
|
|
const w = hot_update_payload.writer();
|
|
|
|
// It was discovered that if a tree falls with nobody around it, it does not
|
|
// make any sound. Let's avoid writing into `w` if no sockets are open.
|
|
const hot_update_subscribers = dev.numSubscribers(.hot_update);
|
|
const will_hear_hot_update = hot_update_subscribers > 0;
|
|
|
|
// This list of routes affected excludes client code. This means changing
|
|
// a client component wont count as a route to trigger a reload on.
|
|
//
|
|
// A second trace is required to determine what routes had changed bundles,
|
|
// since changing a layout affects all child routes. Additionally, routes
|
|
// that do not have a bundle will not be cleared (as there is nothing to
|
|
// clear for those)
|
|
if (will_hear_hot_update and
|
|
current_bundle.had_reload_event and
|
|
(dev.incremental_result.framework_routes_affected.items.len +
|
|
dev.incremental_result.html_routes_hard_affected.items.len) > 0 and
|
|
dev.bundling_failures.count() == 0)
|
|
{
|
|
has_route_bits_set = true;
|
|
|
|
// A bit-set is used to avoid duplicate entries. This is not a problem
|
|
// with `dev.incremental_result.framework_routes_affected`
|
|
for (dev.incremental_result.framework_routes_affected.items) |request| {
|
|
const route = dev.router.routePtr(request.route_index);
|
|
if (route.bundle.unwrap()) |id| route_bits.set(id.get());
|
|
if (request.should_recurse_when_visiting) {
|
|
markAllRouteChildren(&dev.router, 1, .{&route_bits}, request.route_index);
|
|
}
|
|
}
|
|
for (dev.incremental_result.html_routes_hard_affected.items) |route_bundle_index| {
|
|
route_bits.set(route_bundle_index.get());
|
|
route_bits_client.set(route_bundle_index.get());
|
|
}
|
|
|
|
// List 1
|
|
var it = route_bits.iterator(.{ .kind = .set });
|
|
while (it.next()) |bundled_route_index| {
|
|
const bundle = &dev.route_bundles.items[bundled_route_index];
|
|
if (bundle.active_viewers == 0) continue;
|
|
try w.writeInt(i32, @intCast(bundled_route_index), .little);
|
|
}
|
|
}
|
|
try w.writeInt(i32, -1, .little);
|
|
|
|
// When client component roots get updated, the `client_components_affected`
|
|
// list contains the server side versions of these roots. These roots are
|
|
// traced to the routes so that the client-side bundles can be properly
|
|
// invalidated.
|
|
if (dev.incremental_result.client_components_affected.items.len > 0) {
|
|
has_route_bits_set = true;
|
|
|
|
dev.incremental_result.framework_routes_affected.clearRetainingCapacity();
|
|
dev.incremental_result.html_routes_hard_affected.clearRetainingCapacity();
|
|
dev.incremental_result.html_routes_soft_affected.clearRetainingCapacity();
|
|
gts.clear();
|
|
|
|
for (dev.incremental_result.client_components_affected.items) |index| {
|
|
try dev.server_graph.traceDependencies(index, >s, .no_stop, index);
|
|
}
|
|
|
|
// A bit-set is used to avoid duplicate entries. This is not a problem
|
|
// with `dev.incremental_result.routes_affected`
|
|
for (dev.incremental_result.framework_routes_affected.items) |request| {
|
|
const route = dev.router.routePtr(request.route_index);
|
|
if (route.bundle.unwrap()) |id| {
|
|
route_bits.set(id.get());
|
|
route_bits_client.set(id.get());
|
|
}
|
|
if (request.should_recurse_when_visiting) {
|
|
markAllRouteChildren(&dev.router, 2, .{ &route_bits, &route_bits_client }, request.route_index);
|
|
}
|
|
}
|
|
|
|
// Free old bundles
|
|
var it = route_bits_client.iterator(.{ .kind = .set });
|
|
while (it.next()) |bundled_route_index| {
|
|
const bundle = &dev.route_bundles.items[bundled_route_index];
|
|
bundle.invalidateClientBundle(dev);
|
|
}
|
|
} else if (dev.incremental_result.html_routes_hard_affected.items.len > 0) {
|
|
// When only HTML routes were affected, there may not be any client
|
|
// components that got affected, but the bundles for these HTML routes
|
|
// are invalid now. That is why HTML routes above writes into
|
|
// `route_bits_client`.
|
|
|
|
// Free old bundles
|
|
var it = route_bits_client.iterator(.{ .kind = .set });
|
|
while (it.next()) |bundled_route_index| {
|
|
const bundle = &dev.route_bundles.items[bundled_route_index];
|
|
bundle.invalidateClientBundle(dev);
|
|
}
|
|
}
|
|
|
|
// Softly affected HTML routes only need the bundle invalidated. WebSocket
|
|
// connections don't have to be told anything.
|
|
if (dev.incremental_result.html_routes_soft_affected.items.len > 0) {
|
|
for (dev.incremental_result.html_routes_soft_affected.items) |index| {
|
|
dev.routeBundlePtr(index).invalidateClientBundle(dev);
|
|
route_bits.set(index.get());
|
|
}
|
|
has_route_bits_set = true;
|
|
}
|
|
|
|
// `route_bits` will have all of the routes that were modified. If any of
|
|
// these have active viewers, DevServer should inform them of CSS attachments. These
|
|
// route bundles also need to be invalidated of their css attachments.
|
|
if (has_route_bits_set and
|
|
(will_hear_hot_update or dev.incremental_result.had_adjusted_edges))
|
|
{
|
|
var it = route_bits.iterator(.{ .kind = .set });
|
|
// List 2
|
|
while (it.next()) |i| {
|
|
const route_bundle = dev.routeBundlePtr(RouteBundle.Index.init(@intCast(i)));
|
|
if (dev.incremental_result.had_adjusted_edges) {
|
|
switch (route_bundle.data) {
|
|
.framework => |*fw_bundle| fw_bundle.cached_css_file_array.clearWithoutDeallocation(),
|
|
.html => |*html| if (html.cached_response) |blob| {
|
|
blob.deref();
|
|
html.cached_response = null;
|
|
},
|
|
}
|
|
}
|
|
if (route_bundle.active_viewers == 0 or !will_hear_hot_update) continue;
|
|
try w.writeInt(i32, @intCast(i), .little);
|
|
|
|
// If no edges were changed, then it is impossible to
|
|
// change the list of CSS files.
|
|
if (dev.incremental_result.had_adjusted_edges) {
|
|
gts.clear();
|
|
dev.client_graph.current_css_files.clearRetainingCapacity();
|
|
try dev.traceAllRouteImports(route_bundle, >s, .find_css);
|
|
const css_ids = dev.client_graph.current_css_files.items;
|
|
|
|
try w.writeInt(i32, @intCast(css_ids.len), .little);
|
|
for (css_ids) |css_id| {
|
|
try w.writeAll(&std.fmt.bytesToHex(std.mem.asBytes(&css_id), .lower));
|
|
}
|
|
} else {
|
|
try w.writeInt(i32, -1, .little);
|
|
}
|
|
}
|
|
}
|
|
try w.writeInt(i32, -1, .little);
|
|
|
|
const css_chunks = result.cssChunks();
|
|
if (will_hear_hot_update) {
|
|
if (dev.client_graph.current_chunk_len > 0 or css_chunks.len > 0) {
|
|
// Send CSS mutations
|
|
const asset_values = dev.assets.files.values();
|
|
try w.writeInt(u32, @intCast(css_chunks.len), .little);
|
|
const sources = bv2.graph.input_files.items(.source);
|
|
for (css_chunks) |chunk| {
|
|
const key = sources[chunk.entry_point.source_index].path.keyForIncrementalGraph();
|
|
try w.writeAll(&std.fmt.bytesToHex(std.mem.asBytes(&bun.hash(key)), .lower));
|
|
const css_data = asset_values[chunk.entry_point.entry_point_id].blob.InternalBlob.bytes.items;
|
|
try w.writeInt(u32, @intCast(css_data.len), .little);
|
|
try w.writeAll(css_data);
|
|
}
|
|
|
|
// Send the JS chunk
|
|
if (dev.client_graph.current_chunk_len > 0) {
|
|
const script_id = hash: {
|
|
var source_map_hash: bun.bundle_v2.ContentHasher.Hash = .init(0x4b12); // arbitrarily different seed than what .initial_response uses
|
|
const keys = dev.client_graph.bundled_files.keys();
|
|
const values = dev.client_graph.bundled_files.values();
|
|
for (dev.client_graph.current_chunk_parts.items) |part| {
|
|
source_map_hash.update(keys[part.get()]);
|
|
const val = &values[part.get()];
|
|
if (val.flags.source_map_state == .ref) {
|
|
source_map_hash.update(val.source_map.ref.data.vlq());
|
|
}
|
|
}
|
|
// Set the bottom bit. This ensures that the resource can never be confused for a route bundle.
|
|
break :hash SourceMapStore.Key.init(source_map_hash.final() | 1);
|
|
};
|
|
var it = dev.active_websocket_connections.keyIterator();
|
|
var sockets: u32 = 0;
|
|
while (it.next()) |socket_ptr_ptr| {
|
|
const socket: *HmrSocket = socket_ptr_ptr.*;
|
|
if (socket.subscriptions.hot_update) {
|
|
const entry = socket.referenced_source_maps.getOrPut(dev.allocator, script_id) catch bun.outOfMemory();
|
|
if (!entry.found_existing) {
|
|
sockets += 1;
|
|
} else {
|
|
// Source maps are hashed, so that creating an exact
|
|
// copy of a previous source map will simply
|
|
// reference the old one.
|
|
}
|
|
entry.value_ptr.* = {};
|
|
}
|
|
}
|
|
mapLog("inc {x}, for {d} sockets", .{ script_id.get(), sockets });
|
|
const entry = switch (try dev.source_maps.putOrIncrementRefCount(script_id, sockets)) {
|
|
.uninitialized => |entry| brk: {
|
|
try dev.client_graph.takeSourceMap(bv2.graph.allocator, dev.allocator, entry);
|
|
break :brk entry;
|
|
},
|
|
.shared => |entry| entry,
|
|
};
|
|
try w.writeInt(u32, entry.overlapping_memory_cost, .little);
|
|
|
|
// Build and send the source chunk
|
|
try dev.client_graph.takeJSBundleToList(&hot_update_payload, &.{
|
|
.kind = .hmr_chunk,
|
|
.script_id = script_id,
|
|
.console_log = dev.shouldReceiveConsoleLogFromBrowser(),
|
|
});
|
|
}
|
|
} else {
|
|
try w.writeInt(i32, 0, .little);
|
|
}
|
|
|
|
dev.publish(.hot_update, hot_update_payload.items, .binary);
|
|
had_sent_hmr_event = true;
|
|
}
|
|
|
|
if (dev.incremental_result.failures_added.items.len > 0) {
|
|
dev.bundles_since_last_error = 0;
|
|
|
|
var inspector_agent = dev.inspector();
|
|
while (current_bundle.requests.popFirst()) |node| {
|
|
const req = &node.data;
|
|
defer req.deref();
|
|
|
|
const rb = dev.routeBundlePtr(req.route_bundle_index);
|
|
rb.server_state = .possible_bundling_failures;
|
|
|
|
const resp: AnyResponse = switch (req.handler) {
|
|
.aborted => continue,
|
|
.server_handler => |*saved| brk: {
|
|
const resp = saved.response;
|
|
saved.deinit();
|
|
break :brk resp;
|
|
},
|
|
.bundled_html_page => |ram| ram.response,
|
|
};
|
|
|
|
try dev.sendSerializedFailures(
|
|
resp,
|
|
dev.bundling_failures.keys(),
|
|
.bundler,
|
|
inspector_agent,
|
|
);
|
|
inspector_agent = null;
|
|
}
|
|
if (inspector_agent) |agent| {
|
|
var buf = std.ArrayList(u8).init(bun.default_allocator);
|
|
defer buf.deinit();
|
|
try dev.encodeSerializedFailures(dev.bundling_failures.keys(), &buf, agent);
|
|
}
|
|
|
|
return;
|
|
}
|
|
|
|
if (dev.bundling_failures.count() == 0) {
|
|
if (current_bundle.had_reload_event) {
|
|
const clear_terminal = !debug.isVisible();
|
|
if (clear_terminal) {
|
|
Output.disableBuffering();
|
|
Output.resetTerminalAll();
|
|
Output.enableBuffering();
|
|
}
|
|
|
|
dev.printMemoryLine();
|
|
|
|
dev.bundles_since_last_error += 1;
|
|
if (dev.bundles_since_last_error > 1) {
|
|
Output.prettyError("<cyan>[x{d}]<r> ", .{dev.bundles_since_last_error});
|
|
}
|
|
} else {
|
|
dev.bundles_since_last_error = 0;
|
|
dev.printMemoryLine();
|
|
}
|
|
|
|
const ms_elapsed = @divFloor(current_bundle.timer.read(), std.time.ns_per_ms);
|
|
|
|
Output.prettyError("<green>{s} in {d}ms<r>", .{
|
|
if (current_bundle.had_reload_event)
|
|
"Reloaded"
|
|
else
|
|
"Bundled page",
|
|
ms_elapsed,
|
|
});
|
|
|
|
// Intentionally creating a new scope here so we can limit the lifetime
|
|
// of the `relative_path_buf`
|
|
{
|
|
const relative_path_buf = bun.path_buffer_pool.get();
|
|
defer bun.path_buffer_pool.put(relative_path_buf);
|
|
|
|
// Compute a file name to display
|
|
const file_name: ?[]const u8 = if (current_bundle.had_reload_event)
|
|
if (bv2.graph.entry_points.items.len > 0)
|
|
dev.relativePath(
|
|
relative_path_buf,
|
|
bv2.graph.input_files.items(.source)[bv2.graph.entry_points.items[0].get()].path.text,
|
|
)
|
|
else
|
|
null // TODO: How does this happen
|
|
else switch (dev.routeBundlePtr(current_bundle.requests.first.?.data.route_bundle_index).data) {
|
|
.html => |html| dev.relativePath(relative_path_buf, html.html_bundle.data.bundle.data.path),
|
|
.framework => |fw| file_name: {
|
|
const route = dev.router.routePtr(fw.route_index);
|
|
const opaque_id = route.file_page.unwrap() orelse
|
|
route.file_layout.unwrap() orelse
|
|
break :file_name null;
|
|
const server_index = fromOpaqueFileId(.server, opaque_id);
|
|
const abs_path = dev.server_graph.bundled_files.keys()[server_index.get()];
|
|
break :file_name dev.relativePath(relative_path_buf, abs_path);
|
|
},
|
|
};
|
|
|
|
const total_count = bv2.graph.entry_points.items.len;
|
|
if (file_name) |name| {
|
|
Output.prettyError("<d>:<r> {s}", .{name});
|
|
if (total_count > 1) {
|
|
Output.prettyError(" <d>+ {d} more<r>", .{total_count - 1});
|
|
}
|
|
}
|
|
}
|
|
Output.prettyError("\n", .{});
|
|
Output.flush();
|
|
|
|
if (dev.inspector()) |agent| {
|
|
agent.notifyBundleComplete(dev.inspector_server_id, @floatFromInt(ms_elapsed));
|
|
}
|
|
}
|
|
|
|
// Release the lock because the underlying handler may acquire one.
|
|
dev.graph_safety_lock.unlock();
|
|
defer dev.graph_safety_lock.lock();
|
|
|
|
while (current_bundle.requests.popFirst()) |node| {
|
|
const req = &node.data;
|
|
defer req.deref();
|
|
|
|
const rb = dev.routeBundlePtr(req.route_bundle_index);
|
|
rb.server_state = .loaded;
|
|
|
|
switch (req.handler) {
|
|
.aborted => continue,
|
|
.server_handler => |saved| try dev.onFrameworkRequestWithBundle(req.route_bundle_index, .{ .saved = saved }, saved.response),
|
|
.bundled_html_page => |ram| dev.onHtmlRequestWithBundle(req.route_bundle_index, ram.response, ram.method),
|
|
}
|
|
}
|
|
}
|
|
|
|
fn startNextBundleIfPresent(dev: *DevServer) void {
|
|
assert(dev.magic == .valid);
|
|
// Clear the current bundle
|
|
assert(dev.current_bundle == null);
|
|
dev.emitVisualizerMessageIfNeeded();
|
|
|
|
// If there were pending requests, begin another bundle.
|
|
if (dev.next_bundle.reload_event != null or dev.next_bundle.requests.first != null) {
|
|
var sfb = std.heap.stackFallback(4096, dev.allocator);
|
|
const temp_alloc = sfb.get();
|
|
var entry_points: EntryPointList = .empty;
|
|
defer entry_points.deinit(temp_alloc);
|
|
|
|
const is_reload, const timer = if (dev.next_bundle.reload_event) |event| brk: {
|
|
dev.next_bundle.reload_event = null;
|
|
|
|
const reload_event_timer = event.timer;
|
|
|
|
var current = event;
|
|
while (true) {
|
|
current.processFileList(dev, &entry_points, temp_alloc);
|
|
current = dev.watcher_atomics.recycleEventFromDevServer(current) orelse break;
|
|
if (comptime Environment.isDebug) {
|
|
assert(current.debug_mutex.tryLock());
|
|
}
|
|
}
|
|
|
|
break :brk .{ true, reload_event_timer };
|
|
} else .{ false, std.time.Timer.start() catch @panic("timers unsupported") };
|
|
|
|
for (dev.next_bundle.route_queue.keys()) |route_bundle_index| {
|
|
const rb = dev.routeBundlePtr(route_bundle_index);
|
|
rb.server_state = .bundling;
|
|
dev.appendRouteEntryPointsIfNotStale(&entry_points, temp_alloc, route_bundle_index) catch bun.outOfMemory();
|
|
}
|
|
|
|
if (entry_points.set.count() > 0) {
|
|
dev.startAsyncBundle(entry_points, is_reload, timer) catch bun.outOfMemory();
|
|
}
|
|
|
|
dev.next_bundle.route_queue.clearRetainingCapacity();
|
|
}
|
|
}
|
|
|
|
/// Note: The log is not consumed here
|
|
pub fn handleParseTaskFailure(
|
|
dev: *DevServer,
|
|
err: anyerror,
|
|
graph: bake.Graph,
|
|
abs_path: []const u8,
|
|
log: *const Log,
|
|
bv2: *BundleV2,
|
|
) bun.OOM!void {
|
|
dev.graph_safety_lock.lock();
|
|
defer dev.graph_safety_lock.unlock();
|
|
|
|
debug.log("handleParseTaskFailure({}, .{s}, {}, {d} messages)", .{
|
|
err,
|
|
@tagName(graph),
|
|
bun.fmt.quote(abs_path),
|
|
log.msgs.items.len,
|
|
});
|
|
|
|
if (err == error.FileNotFound or err == error.ModuleNotFound) {
|
|
// Special-case files being deleted. Note that if a file had never
|
|
// existed, resolution would fail first.
|
|
switch (graph) {
|
|
.server, .ssr => dev.server_graph.onFileDeleted(abs_path, bv2),
|
|
.client => dev.client_graph.onFileDeleted(abs_path, bv2),
|
|
}
|
|
} else {
|
|
switch (graph) {
|
|
.server => try dev.server_graph.insertFailure(.abs_path, abs_path, log, false),
|
|
.ssr => try dev.server_graph.insertFailure(.abs_path, abs_path, log, true),
|
|
.client => try dev.client_graph.insertFailure(.abs_path, abs_path, log, false),
|
|
}
|
|
}
|
|
}
|
|
|
|
/// Return a log to write resolution failures into.
|
|
pub fn getLogForResolutionFailures(dev: *DevServer, abs_path: []const u8, graph: bake.Graph) !*bun.logger.Log {
|
|
assert(dev.current_bundle != null);
|
|
const current_bundle = &dev.current_bundle.?;
|
|
|
|
dev.graph_safety_lock.lock();
|
|
defer dev.graph_safety_lock.unlock();
|
|
|
|
const owner = switch (graph == .client) {
|
|
inline else => |is_client| @unionInit(
|
|
SerializedFailure.Owner,
|
|
if (is_client) "client" else "server",
|
|
try (if (is_client) dev.client_graph else dev.server_graph)
|
|
.insertStale(abs_path, !is_client and graph == .ssr),
|
|
).encode(),
|
|
};
|
|
const gop = try current_bundle.resolution_failure_entries.getOrPut(current_bundle.bv2.graph.allocator, owner);
|
|
if (!gop.found_existing) {
|
|
gop.value_ptr.* = bun.logger.Log.init(current_bundle.bv2.graph.allocator);
|
|
}
|
|
return gop.value_ptr;
|
|
}
|
|
|
|
const CacheEntry = struct {
|
|
kind: FileKind,
|
|
};
|
|
|
|
pub fn isFileCached(dev: *DevServer, path: []const u8, side: bake.Graph) ?CacheEntry {
|
|
dev.graph_safety_lock.lock();
|
|
defer dev.graph_safety_lock.unlock();
|
|
|
|
switch (side) {
|
|
inline else => |side_comptime| {
|
|
const g = switch (side_comptime) {
|
|
.client => &dev.client_graph,
|
|
.server => &dev.server_graph,
|
|
.ssr => &dev.server_graph,
|
|
};
|
|
const index = g.bundled_files.getIndex(path) orelse
|
|
return null; // non-existent files are considered stale
|
|
if (!g.stale_files.isSet(index)) {
|
|
return .{ .kind = g.bundled_files.values()[index].fileKind() };
|
|
}
|
|
return null;
|
|
},
|
|
}
|
|
}
|
|
|
|
fn appendOpaqueEntryPoint(
|
|
dev: *DevServer,
|
|
file_names: [][]const u8,
|
|
entry_points: *EntryPointList,
|
|
alloc: Allocator,
|
|
comptime side: bake.Side,
|
|
optional_id: anytype,
|
|
) !void {
|
|
const file = switch (@TypeOf(optional_id)) {
|
|
OpaqueFileId.Optional => optional_id.unwrap() orelse return,
|
|
OpaqueFileId => optional_id,
|
|
else => @compileError("invalid type here"),
|
|
};
|
|
|
|
const file_index = fromOpaqueFileId(side, file);
|
|
if (switch (side) {
|
|
.server => dev.server_graph.stale_files.isSet(file_index.get()),
|
|
.client => dev.client_graph.stale_files.isSet(file_index.get()),
|
|
}) {
|
|
try entry_points.appendJs(alloc, file_names[file_index.get()], side.graph());
|
|
}
|
|
}
|
|
|
|
pub fn routeBundlePtr(dev: *DevServer, idx: RouteBundle.Index) *RouteBundle {
|
|
return &dev.route_bundles.items[idx.get()];
|
|
}
|
|
|
|
fn onRequest(dev: *DevServer, req: *Request, resp: anytype) void {
|
|
var params: FrameworkRouter.MatchedParams = undefined;
|
|
if (dev.router.matchSlow(req.url(), ¶ms)) |route_index| {
|
|
dev.ensureRouteIsBundled(
|
|
dev.getOrPutRouteBundle(.{ .framework = route_index }) catch bun.outOfMemory(),
|
|
.server_handler,
|
|
req,
|
|
AnyResponse.init(resp),
|
|
) catch bun.outOfMemory();
|
|
return;
|
|
}
|
|
|
|
if (dev.server.?.config().onRequest != .zero) {
|
|
dev.server.?.onRequest(req, AnyResponse.init(resp));
|
|
return;
|
|
}
|
|
|
|
sendBuiltInNotFound(resp);
|
|
}
|
|
|
|
pub fn respondForHTMLBundle(dev: *DevServer, html: *HTMLBundle.HTMLBundleRoute, req: *uws.Request, resp: AnyResponse) !void {
|
|
try dev.ensureRouteIsBundled(try dev.getOrPutRouteBundle(.{ .html = html }), .bundled_html_page, req, resp);
|
|
}
|
|
|
|
fn getOrPutRouteBundle(dev: *DevServer, route: RouteBundle.UnresolvedIndex) !RouteBundle.Index {
|
|
const index_location: *RouteBundle.Index.Optional = switch (route) {
|
|
.framework => |route_index| &dev.router.routePtr(route_index).bundle,
|
|
.html => |html| &html.dev_server_id,
|
|
};
|
|
if (index_location.unwrap()) |bundle_index| {
|
|
return bundle_index;
|
|
}
|
|
|
|
dev.graph_safety_lock.lock();
|
|
defer dev.graph_safety_lock.unlock();
|
|
|
|
const bundle_index = RouteBundle.Index.init(@intCast(dev.route_bundles.items.len));
|
|
|
|
try dev.route_bundles.ensureUnusedCapacity(dev.allocator, 1);
|
|
dev.route_bundles.appendAssumeCapacity(.{
|
|
.data = switch (route) {
|
|
.framework => |route_index| .{ .framework = .{
|
|
.route_index = route_index,
|
|
.evaluate_failure = null,
|
|
.cached_module_list = .empty,
|
|
.cached_client_bundle_url = .empty,
|
|
.cached_css_file_array = .empty,
|
|
} },
|
|
.html => |html| brk: {
|
|
const incremental_graph_index = try dev.client_graph.insertStaleExtra(html.bundle.data.path, false, true);
|
|
const file = &dev.client_graph.bundled_files.values()[incremental_graph_index.get()];
|
|
file.source_map.empty.html_bundle_route_index = .init(bundle_index.get());
|
|
break :brk .{ .html = .{
|
|
.html_bundle = .initRef(html),
|
|
.bundled_file = incremental_graph_index,
|
|
.script_injection_offset = .none,
|
|
.cached_response = null,
|
|
.bundled_html_text = null,
|
|
} };
|
|
},
|
|
},
|
|
.client_script_generation = std.crypto.random.int(u32),
|
|
.server_state = .unqueued,
|
|
.client_bundle = null,
|
|
.active_viewers = 0,
|
|
});
|
|
index_location.* = bundle_index.toOptional();
|
|
return bundle_index;
|
|
}
|
|
|
|
fn registerCatchAllHtmlRoute(dev: *DevServer, html: *HTMLBundle.HTMLBundleRoute) !void {
|
|
const bundle_index = try getOrPutRouteBundle(dev, .{ .html = html });
|
|
dev.html_router.fallback = bundle_index.toOptional();
|
|
}
|
|
|
|
const ErrorPageKind = enum {
|
|
/// Modules failed to bundle
|
|
bundler,
|
|
/// Modules failed to evaluate
|
|
evaluation,
|
|
/// Request handler threw
|
|
runtime,
|
|
};
|
|
|
|
fn encodeSerializedFailures(
|
|
dev: *const DevServer,
|
|
failures: []const SerializedFailure,
|
|
buf: *std.ArrayList(u8),
|
|
inspector_agent: ?*BunFrontendDevServerAgent,
|
|
) bun.OOM!void {
|
|
var all_failures_len: usize = 0;
|
|
for (failures) |fail| all_failures_len += fail.data.len;
|
|
var all_failures = try std.ArrayListUnmanaged(u8).initCapacity(dev.allocator, all_failures_len);
|
|
defer all_failures.deinit(dev.allocator);
|
|
for (failures) |fail| all_failures.appendSliceAssumeCapacity(fail.data);
|
|
|
|
const failures_start_buf_pos = buf.items.len;
|
|
|
|
const len = bun.base64.encodeLen(all_failures.items);
|
|
try buf.ensureUnusedCapacity(len);
|
|
const to_write_into = buf.unusedCapacitySlice();
|
|
buf.items.len += bun.base64.encode(to_write_into, all_failures.items);
|
|
|
|
// Re-use the encoded buffer to avoid encoding failures more times than neccecary.
|
|
if (inspector_agent) |agent| {
|
|
assert(agent.isEnabled());
|
|
const failures_encoded = buf.items[failures_start_buf_pos..];
|
|
var str = bun.String.initLatin1OrASCIIView(failures_encoded);
|
|
defer str.deref();
|
|
agent.notifyBundleFailed(dev.inspector_server_id, &str);
|
|
}
|
|
}
|
|
|
|
fn sendSerializedFailures(
|
|
dev: *DevServer,
|
|
resp: AnyResponse,
|
|
failures: []const SerializedFailure,
|
|
kind: ErrorPageKind,
|
|
inspector_agent: ?*BunFrontendDevServerAgent,
|
|
) !void {
|
|
var buf: std.ArrayList(u8) = try .initCapacity(dev.allocator, 2048);
|
|
errdefer buf.deinit();
|
|
|
|
try buf.appendSlice(switch (kind) {
|
|
inline else => |k| std.fmt.comptimePrint(
|
|
\\<!doctype html>
|
|
\\<html lang="en">
|
|
\\<head>
|
|
\\<meta charset="UTF-8" />
|
|
\\<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
|
\\<title>Bun - {[page_title]s}</title>
|
|
\\<style>:root{{color-scheme:light dark}}body{{background:light-dark(white,black)}}</style>
|
|
\\</head>
|
|
\\<body>
|
|
\\<noscript><h1 style="font:28px sans-serif;">{[page_title]s}</h1><p style="font:20px sans-serif;">Bun requires JavaScript enabled in the browser to render this error screen, as well as receive hot reloading events.</p></noscript>
|
|
\\<script>let error=Uint8Array.from(atob("
|
|
,
|
|
.{ .page_title = switch (k) {
|
|
.bundler => "Build Failed",
|
|
.evaluation, .runtime => "Runtime Error",
|
|
} },
|
|
),
|
|
});
|
|
|
|
try dev.encodeSerializedFailures(failures, &buf, inspector_agent);
|
|
|
|
const pre = "\"),c=>c.charCodeAt(0));let config={bun:\"" ++ bun.Global.package_json_version_with_canary ++ "\"};";
|
|
const post = "</script></body></html>";
|
|
|
|
if (Environment.codegen_embed) {
|
|
try buf.appendSlice(pre ++ @embedFile("bake-codegen/bake.error.js") ++ post);
|
|
} else {
|
|
try buf.appendSlice(pre);
|
|
try buf.appendSlice(bun.runtimeEmbedFile(.codegen_eager, "bake.error.js"));
|
|
try buf.appendSlice(post);
|
|
}
|
|
|
|
StaticRoute.sendBlobThenDeinit(resp, &.fromArrayList(buf), .{
|
|
.mime_type = &.html,
|
|
.server = dev.server.?,
|
|
.status_code = 500,
|
|
});
|
|
}
|
|
|
|
fn sendBuiltInNotFound(resp: anytype) void {
|
|
const message = "404 Not Found";
|
|
resp.writeStatus("404 Not Found");
|
|
resp.end(message, true);
|
|
}
|
|
|
|
fn printMemoryLine(dev: *DevServer) void {
|
|
if (comptime !bun.Environment.enableAllocScopes) {
|
|
return;
|
|
}
|
|
if (!debug.isVisible()) return;
|
|
Output.prettyErrorln("<d>DevServer tracked {}, measured: {} ({}), process: {}<r>", .{
|
|
bun.fmt.size(dev.memoryCost(), .{}),
|
|
dev.allocation_scope.state.allocations.count(),
|
|
bun.fmt.size(dev.allocation_scope.state.total_memory_allocated, .{}),
|
|
bun.fmt.size(bun.sys.selfProcessMemoryUsage() orelse 0, .{}),
|
|
});
|
|
}
|
|
|
|
pub const PackedMap = @import("./DevServer/PackedMap.zig");
|
|
pub const FileKind = enum(u2) {
|
|
/// Files that failed to bundle or do not exist on disk will appear in the
|
|
/// graph as "unknown".
|
|
unknown,
|
|
/// `code` is JavaScript code. This field is also used for HTML files, where
|
|
/// the associated JS just calls `require` to emulate the script tags.
|
|
js,
|
|
/// `code` is JavaScript code of a module exporting a single file path.
|
|
/// This is used when the loader is not `css` nor `isJavaScriptLike()`
|
|
asset,
|
|
/// `code` is the URL where the CSS file is to be fetched from, ex.
|
|
/// '/_bun/css/0000000000000000.css'
|
|
css,
|
|
|
|
pub fn hasInlinejscodeChunk(self: @This()) bool {
|
|
return switch (self) {
|
|
.js, .asset => true,
|
|
else => false,
|
|
};
|
|
}
|
|
};
|
|
|
|
pub const IncrementalGraph = @import("./DevServer/IncrementalGraph.zig").IncrementalGraph;
|
|
|
|
pub const IncrementalResult = struct {
|
|
/// When tracing a file's dependencies via `traceDependencies`, this is
|
|
/// populated with the hit `Route.Index`s. To know what framework
|
|
/// `RouteBundle`s are affected, the route graph must be traced downwards.
|
|
/// Tracing is used for multiple purposes.
|
|
framework_routes_affected: ArrayListUnmanaged(RouteIndexAndRecurseFlag),
|
|
/// HTML routes have slightly different anatomy than their
|
|
/// framework-provided counterparts, and get a separate list.
|
|
/// This list is just for when a script/style URL needs to be rewritten.
|
|
html_routes_soft_affected: ArrayListUnmanaged(RouteBundle.Index),
|
|
/// Requires a hard reload of active viewers.
|
|
/// - Changed HTML File
|
|
/// - Non-JS/CSS link between HTML file and import, such as image.
|
|
html_routes_hard_affected: ArrayListUnmanaged(RouteBundle.Index),
|
|
/// Set to true if any IncrementalGraph edges were added or removed.
|
|
had_adjusted_edges: bool,
|
|
|
|
// The following three fields are populated during `receiveChunk`
|
|
|
|
/// Components to add to the client manifest
|
|
client_components_added: ArrayListUnmanaged(IncrementalGraph(.server).FileIndex),
|
|
/// Components to remove from the client manifest
|
|
client_components_removed: ArrayListUnmanaged(IncrementalGraph(.server).FileIndex),
|
|
/// This list acts as a free list. The contents of these slices must remain
|
|
/// valid; they have to be so the affected routes can be cleared of the
|
|
/// failures and potentially be marked valid. At the end of an
|
|
/// incremental update, the slices are freed.
|
|
failures_removed: ArrayListUnmanaged(SerializedFailure),
|
|
|
|
/// Client boundaries that have been added or modified. At the end of a hot
|
|
/// update, these are traced to their route to mark the bundles as stale (to
|
|
/// be generated on Cmd+R)
|
|
///
|
|
/// Populated during `traceDependencies`
|
|
client_components_affected: ArrayListUnmanaged(IncrementalGraph(.server).FileIndex),
|
|
|
|
/// The list of failures which will have to be traced to their route. Such
|
|
/// tracing is deferred until the second pass of finalizeBundle as the
|
|
/// dependency graph may not fully exist at the time the failure is indexed.
|
|
///
|
|
/// Populated from within the bundler via `handleParseTaskFailure`, as well
|
|
/// as at the start of `indexFailures`.
|
|
///
|
|
/// This is also populated when calling `traceImports` with `find_errors`
|
|
failures_added: ArrayListUnmanaged(SerializedFailure),
|
|
|
|
pub const empty: IncrementalResult = .{
|
|
.framework_routes_affected = .{},
|
|
.html_routes_soft_affected = .{},
|
|
.html_routes_hard_affected = .{},
|
|
.had_adjusted_edges = false,
|
|
.failures_removed = .{},
|
|
.failures_added = .{},
|
|
.client_components_added = .{},
|
|
.client_components_removed = .{},
|
|
.client_components_affected = .{},
|
|
};
|
|
|
|
fn reset(result: *IncrementalResult) void {
|
|
result.framework_routes_affected.clearRetainingCapacity();
|
|
result.html_routes_soft_affected.clearRetainingCapacity();
|
|
result.html_routes_hard_affected.clearRetainingCapacity();
|
|
assert(result.failures_removed.items.len == 0);
|
|
result.failures_added.clearRetainingCapacity();
|
|
result.client_components_added.clearRetainingCapacity();
|
|
result.client_components_removed.clearRetainingCapacity();
|
|
result.client_components_affected.clearRetainingCapacity();
|
|
}
|
|
};
|
|
|
|
/// Used during an incremental update to determine what "HMR roots"
|
|
/// are affected. Set for all `bundled_files` that have been visited
|
|
/// by the dependency tracing logic.
|
|
pub const GraphTraceState = struct {
|
|
client_bits: DynamicBitSetUnmanaged,
|
|
server_bits: DynamicBitSetUnmanaged,
|
|
|
|
pub fn bits(gts: *GraphTraceState, side: bake.Side) *DynamicBitSetUnmanaged {
|
|
return switch (side) {
|
|
.client => >s.client_bits,
|
|
.server => >s.server_bits,
|
|
};
|
|
}
|
|
|
|
pub fn deinit(gts: *GraphTraceState, alloc: Allocator) void {
|
|
gts.client_bits.deinit(alloc);
|
|
gts.server_bits.deinit(alloc);
|
|
}
|
|
|
|
pub fn clear(gts: *GraphTraceState) void {
|
|
gts.server_bits.setAll(false);
|
|
gts.client_bits.setAll(false);
|
|
}
|
|
|
|
pub fn resize(gts: *GraphTraceState, side: bake.Side, allocator: Allocator, new_size: usize) !void {
|
|
const b = switch (side) {
|
|
.client => >s.client_bits,
|
|
.server => >s.server_bits,
|
|
};
|
|
if (b.bit_length < new_size) {
|
|
try b.resize(allocator, new_size, false);
|
|
}
|
|
}
|
|
|
|
pub fn clearAndFree(gts: *GraphTraceState, alloc: Allocator) void {
|
|
gts.client_bits.resize(alloc, 0, false) catch
|
|
unreachable; // freeing memory can not fail
|
|
gts.server_bits.resize(alloc, 0, false) catch
|
|
unreachable; // freeing memory can not fail
|
|
}
|
|
};
|
|
|
|
pub const TraceImportGoal = enum {
|
|
find_css,
|
|
find_client_modules,
|
|
find_errors,
|
|
};
|
|
|
|
/// `extra_client_bits` is specified if it is possible that the client graph may
|
|
/// increase in size while the bits are being used. This happens with CSS files,
|
|
/// though [TODO] might not actually be necessary.
|
|
fn initGraphTraceState(dev: *const DevServer, sfa: Allocator, extra_client_bits: usize) !GraphTraceState {
|
|
var server_bits = try DynamicBitSetUnmanaged.initEmpty(sfa, dev.server_graph.bundled_files.count());
|
|
errdefer server_bits.deinit(sfa);
|
|
const client_bits = try DynamicBitSetUnmanaged.initEmpty(sfa, dev.client_graph.bundled_files.count() + extra_client_bits);
|
|
return .{ .server_bits = server_bits, .client_bits = client_bits };
|
|
}
|
|
|
|
pub const DirectoryWatchStore = @import("./DevServer/DirectoryWatchStore.zig");
|
|
|
|
pub const ChunkKind = enum(u1) {
|
|
initial_response,
|
|
hmr_chunk,
|
|
};
|
|
|
|
pub const SerializedFailure = @import("./DevServer/SerializedFailure.zig");
|
|
|
|
// For debugging, it is helpful to be able to see bundles.
|
|
pub fn dumpBundle(dump_dir: std.fs.Dir, graph: bake.Graph, rel_path: []const u8, chunk: []const u8, wrap: bool) !void {
|
|
const buf = bun.path_buffer_pool.get();
|
|
defer bun.path_buffer_pool.put(buf);
|
|
const name = bun.path.joinAbsStringBuf("/", buf, &.{
|
|
@tagName(graph),
|
|
rel_path,
|
|
}, .auto)[1..];
|
|
var inner_dir = try dump_dir.makeOpenPath(bun.Dirname.dirname(u8, name).?, .{});
|
|
defer inner_dir.close();
|
|
|
|
const file = try inner_dir.createFile(bun.path.basename(name), .{});
|
|
defer file.close();
|
|
|
|
var bufw = std.io.bufferedWriter(file.writer());
|
|
|
|
if (!bun.strings.hasSuffixComptime(rel_path, ".map")) {
|
|
try bufw.writer().print("// {s} bundled for {s}\n", .{
|
|
bun.fmt.quote(rel_path),
|
|
@tagName(graph),
|
|
});
|
|
try bufw.writer().print("// Bundled at {d}, Bun " ++ bun.Global.package_json_version_with_canary ++ "\n", .{
|
|
std.time.nanoTimestamp(),
|
|
});
|
|
}
|
|
|
|
// Wrap in an object to make it valid syntax. Regardless, these files
|
|
// are never executable on their own as they contain only a single module.
|
|
|
|
if (wrap)
|
|
try bufw.writer().writeAll("({\n");
|
|
|
|
try bufw.writer().writeAll(chunk);
|
|
|
|
if (wrap)
|
|
try bufw.writer().writeAll("});\n");
|
|
|
|
try bufw.flush();
|
|
}
|
|
|
|
pub noinline fn dumpBundleForChunk(dev: *DevServer, dump_dir: std.fs.Dir, side: bake.Side, key: []const u8, code: []const u8, wrap: bool, is_ssr_graph: bool) void {
|
|
const cwd = dev.root;
|
|
var a: bun.PathBuffer = undefined;
|
|
var b: [bun.MAX_PATH_BYTES * 2]u8 = undefined;
|
|
const rel_path = bun.path.relativeBufZ(&a, cwd, key);
|
|
const size = std.mem.replacementSize(u8, rel_path, ".." ++ std.fs.path.sep_str, "_.._" ++ std.fs.path.sep_str);
|
|
_ = std.mem.replace(u8, rel_path, ".." ++ std.fs.path.sep_str, "_.._" ++ std.fs.path.sep_str, &b);
|
|
const rel_path_escaped = b[0..size];
|
|
dumpBundle(dump_dir, switch (side) {
|
|
.client => .client,
|
|
.server => if (is_ssr_graph) .ssr else .server,
|
|
}, rel_path_escaped, code, wrap) catch |err| {
|
|
bun.handleErrorReturnTrace(err, @errorReturnTrace());
|
|
Output.warn("Could not dump bundle: {}", .{err});
|
|
};
|
|
}
|
|
|
|
pub fn emitVisualizerMessageIfNeeded(dev: *DevServer) void {
|
|
if (!bun.FeatureFlags.bake_debugging_features) return;
|
|
defer dev.emitMemoryVisualizerMessageIfNeeded();
|
|
if (dev.emit_incremental_visualizer_events == 0) return;
|
|
|
|
var sfb = std.heap.stackFallback(65536, dev.allocator);
|
|
var payload = std.ArrayList(u8).initCapacity(sfb.get(), 65536) catch
|
|
unreachable; // enough capacity on the stack
|
|
defer payload.deinit();
|
|
|
|
dev.writeVisualizerMessage(&payload) catch return; // visualizer does not get an update if it OOMs
|
|
|
|
dev.publish(.incremental_visualizer, payload.items, .binary);
|
|
}
|
|
|
|
pub fn emitMemoryVisualizerMessageTimer(timer: *EventLoopTimer, _: *const bun.timespec) EventLoopTimer.Arm {
|
|
if (!bun.FeatureFlags.bake_debugging_features) return .disarm;
|
|
const dev: *DevServer = @alignCast(@fieldParentPtr("memory_visualizer_timer", timer));
|
|
assert(dev.magic == .valid);
|
|
dev.emitMemoryVisualizerMessage();
|
|
timer.state = .FIRED;
|
|
dev.vm.timer.update(timer, &bun.timespec.msFromNow(1000));
|
|
return .disarm;
|
|
}
|
|
|
|
pub fn emitMemoryVisualizerMessageIfNeeded(dev: *DevServer) void {
|
|
if (!bun.FeatureFlags.bake_debugging_features) return;
|
|
if (dev.emit_memory_visualizer_events == 0) return;
|
|
dev.emitMemoryVisualizerMessage();
|
|
}
|
|
|
|
pub fn emitMemoryVisualizerMessage(dev: *DevServer) void {
|
|
comptime assert(bun.FeatureFlags.bake_debugging_features);
|
|
bun.debugAssert(dev.emit_memory_visualizer_events > 0);
|
|
|
|
var sfb = std.heap.stackFallback(65536, dev.allocator);
|
|
var payload = std.ArrayList(u8).initCapacity(sfb.get(), 65536) catch
|
|
unreachable; // enough capacity on the stack
|
|
defer payload.deinit();
|
|
payload.appendAssumeCapacity(MessageId.memory_visualizer.char());
|
|
writeMemoryVisualizerMessage(dev, &payload) catch return; // drop packet
|
|
dev.publish(.memory_visualizer, payload.items, .binary);
|
|
}
|
|
|
|
pub fn writeMemoryVisualizerMessage(dev: *DevServer, payload: *std.ArrayList(u8)) !void {
|
|
const w = payload.writer();
|
|
const Fields = extern struct {
|
|
incremental_graph_client: u32,
|
|
incremental_graph_server: u32,
|
|
js_code: u32,
|
|
source_maps: u32,
|
|
assets: u32,
|
|
other: u32,
|
|
devserver_tracked: u32,
|
|
process_used: u32,
|
|
system_used: u32,
|
|
system_total: u32,
|
|
};
|
|
const cost = dev.memoryCostDetailed();
|
|
const system_total = bun.api.node.os.totalmem();
|
|
try w.writeStruct(Fields{
|
|
.incremental_graph_client = @truncate(cost.incremental_graph_client),
|
|
.incremental_graph_server = @truncate(cost.incremental_graph_server),
|
|
.js_code = @truncate(cost.js_code),
|
|
.source_maps = @truncate(cost.source_maps),
|
|
.assets = @truncate(cost.assets),
|
|
.other = @truncate(cost.other),
|
|
.devserver_tracked = if (AllocationScope.enabled)
|
|
@truncate(dev.allocation_scope.state.total_memory_allocated)
|
|
else
|
|
0,
|
|
.process_used = @truncate(bun.sys.selfProcessMemoryUsage() orelse 0),
|
|
.system_used = @truncate(system_total -| bun.api.node.os.freemem()),
|
|
.system_total = @truncate(system_total),
|
|
});
|
|
|
|
// SourceMapStore is easy to leak refs in.
|
|
{
|
|
const keys = dev.source_maps.entries.keys();
|
|
const values = dev.source_maps.entries.values();
|
|
try w.writeInt(u32, @intCast(keys.len), .little);
|
|
for (keys, values) |key, value| {
|
|
bun.assert(value.ref_count > 0);
|
|
try w.writeAll(std.mem.asBytes(&key.get()));
|
|
try w.writeInt(u32, value.ref_count, .little);
|
|
if (dev.source_maps.locateWeakRef(key)) |entry| {
|
|
try w.writeInt(u32, entry.ref.count, .little);
|
|
// floats are easier to decode in JS
|
|
try w.writeAll(std.mem.asBytes(&@as(f64, @floatFromInt(entry.ref.expire))));
|
|
} else {
|
|
try w.writeInt(u32, 0, .little);
|
|
}
|
|
try w.writeInt(u32, @truncate(value.files.len), .little);
|
|
try w.writeInt(u32, value.overlapping_memory_cost, .little);
|
|
}
|
|
}
|
|
}
|
|
|
|
pub fn writeVisualizerMessage(dev: *DevServer, payload: *std.ArrayList(u8)) !void {
|
|
payload.appendAssumeCapacity(MessageId.visualizer.char());
|
|
const w = payload.writer();
|
|
|
|
inline for (
|
|
[2]bake.Side{ .client, .server },
|
|
.{ &dev.client_graph, &dev.server_graph },
|
|
) |side, g| {
|
|
try w.writeInt(u32, @intCast(g.bundled_files.count()), .little);
|
|
for (
|
|
g.bundled_files.keys(),
|
|
g.bundled_files.values(),
|
|
0..,
|
|
) |k, v, i| {
|
|
const relative_path_buf = bun.path_buffer_pool.get();
|
|
defer bun.path_buffer_pool.put(relative_path_buf);
|
|
const normalized_key = dev.relativePath(relative_path_buf, k);
|
|
try w.writeInt(u32, @intCast(normalized_key.len), .little);
|
|
if (k.len == 0) continue;
|
|
try w.writeAll(normalized_key);
|
|
try w.writeByte(@intFromBool(g.stale_files.isSetAllowOutOfBound(i, true) or switch (side) {
|
|
.server => v.failed,
|
|
.client => v.flags.failed,
|
|
}));
|
|
try w.writeByte(@intFromBool(side == .server and v.is_rsc));
|
|
try w.writeByte(@intFromBool(side == .server and v.is_ssr));
|
|
try w.writeByte(@intFromBool(if (side == .server) v.is_route else v.flags.is_html_route));
|
|
try w.writeByte(@intFromBool(side == .client and v.flags.is_special_framework_file));
|
|
try w.writeByte(@intFromBool(switch (side) {
|
|
.server => v.is_client_component_boundary,
|
|
.client => v.flags.is_hmr_root,
|
|
}));
|
|
}
|
|
}
|
|
inline for (.{ &dev.client_graph, &dev.server_graph }) |g| {
|
|
const G = @TypeOf(g.*);
|
|
|
|
try w.writeInt(u32, @intCast(g.edges.items.len - g.edges_free_list.items.len), .little);
|
|
for (g.edges.items, 0..) |edge, i| {
|
|
if (std.mem.indexOfScalar(G.EdgeIndex, g.edges_free_list.items, G.EdgeIndex.init(@intCast(i))) != null)
|
|
continue;
|
|
|
|
try w.writeInt(u32, @intCast(edge.dependency.get()), .little);
|
|
try w.writeInt(u32, @intCast(edge.imported.get()), .little);
|
|
}
|
|
}
|
|
}
|
|
|
|
pub fn onWebSocketUpgrade(
|
|
dev: *DevServer,
|
|
res: anytype,
|
|
req: *Request,
|
|
upgrade_ctx: *uws.SocketContext,
|
|
id: usize,
|
|
) void {
|
|
assert(id == 0);
|
|
|
|
const dw = HmrSocket.new(dev, res);
|
|
dev.active_websocket_connections.put(dev.allocator, dw, {}) catch bun.outOfMemory();
|
|
_ = res.upgrade(
|
|
*HmrSocket,
|
|
dw,
|
|
req.header("sec-websocket-key") orelse "",
|
|
req.header("sec-websocket-protocol") orelse "",
|
|
req.header("sec-websocket-extension") orelse "",
|
|
upgrade_ctx,
|
|
);
|
|
}
|
|
|
|
/// Every message is to use `.binary`/`ArrayBuffer` transport mode. The first byte
|
|
/// indicates a Message ID; see comments on each type for how to interpret the rest.
|
|
/// Avoid changing message ID values, as some of these are hard-coded in tests.
|
|
///
|
|
/// This format is only intended for communication via the browser and DevServer.
|
|
/// Server-side HMR is implemented using a different interface. This API is not
|
|
/// versioned alongside Bun; breaking changes may occur at any point.
|
|
///
|
|
/// All integers are sent in little-endian.
|
|
pub const MessageId = enum(u8) {
|
|
/// Version payload. Sent on connection startup. The client should issue a
|
|
/// hard-reload when it mismatches with its `config.version`.
|
|
version = 'V',
|
|
/// Sent on a successful bundle, containing client code, updates routes, and
|
|
/// changed CSS files. Emitted on the `.hot_update` topic.
|
|
///
|
|
/// - For each server-side updated route:
|
|
/// - `i32`: Route Bundle ID
|
|
/// - `i32`: -1 to indicate end of list
|
|
/// - For each route stylesheet lists affected:
|
|
/// - `i32`: Route Bundle ID
|
|
/// - `u32`: Length of route pattern
|
|
/// - `[n]u8` UTF-8: Route pattern
|
|
/// - `u32`: Number of CSS attachments: For Each
|
|
/// - `[16]u8` ASCII: CSS identifier
|
|
/// - `i32`: -1 to indicate end of list
|
|
/// - `u32`: Number of CSS mutations. For Each:
|
|
/// - `[16]u8` ASCII: CSS identifier
|
|
/// - `u32`: Length of CSS code
|
|
/// - `[n]u8` UTF-8: CSS payload
|
|
/// - `[n]u8` UTF-8: JS Payload. No length, rest of buffer is text.
|
|
/// Can be empty if no client-side code changed.
|
|
///
|
|
/// The first list contains route changes that require a page reload, but
|
|
/// frameworks can perform via `onServerSideReload`. Fallback behavior
|
|
/// is to call `location.reload();`
|
|
///
|
|
/// The second list is sent to inform the current list of CSS files
|
|
/// reachable by a route, recalculated whenever an import is added or
|
|
/// removed as that can inadvertently affect the CSS list.
|
|
///
|
|
/// The third list contains CSS mutations, which are when the underlying
|
|
/// CSS file itself changes.
|
|
///
|
|
/// The JS payload is the remaining data. If defined, it can be passed to
|
|
/// `eval`, resulting in an object of new module callables.
|
|
hot_update = 'u',
|
|
/// Sent when the list of errors changes.
|
|
///
|
|
/// - `u32`: Removed errors. For Each:
|
|
/// - `u32`: Error owner
|
|
/// - Remainder are added errors. For Each:
|
|
/// - `SerializedFailure`: Error Data
|
|
errors = 'e',
|
|
/// A message from the browser. This is used to communicate.
|
|
/// - `u32`: Unique ID for the browser tab. Each tab gets a different ID
|
|
/// - `[n]u8`: Opaque bytes, untouched from `IncomingMessageId.browser_error`
|
|
browser_message = 'b',
|
|
/// Sent to clear the messages from `browser_error`
|
|
/// - For each removed ID:
|
|
/// - `u32`: Unique ID for the browser tab.
|
|
browser_message_clear = 'B',
|
|
/// Sent when a request handler error is emitted. Each route will own at
|
|
/// most 1 error, where sending a new request clears the original one.
|
|
///
|
|
/// - `u32`: Removed errors. For Each:
|
|
/// - `u32`: Error owner
|
|
/// - `u32`: Length of route pattern
|
|
/// - `[n]u8`: UTF-8 Route pattern
|
|
/// - `SerializedFailure`: The one error list for the request
|
|
request_handler_error = 'h',
|
|
/// Payload for `incremental_visualizer.html`. Contains both graphs.
|
|
/// This can be accessed via `/_bun/incremental_visualizer`.
|
|
///
|
|
/// - `u32`: Number of files in `client_graph`. For Each:
|
|
/// - `u32`: Length of name. If zero then no other fields are provided.
|
|
/// - `[n]u8`: File path in UTF-8 encoded text
|
|
/// - `u8`: If file is stale, set 1
|
|
/// - `u8`: If file is in server graph, set 1
|
|
/// - `u8`: If file is in ssr graph, set 1
|
|
/// - `u8`: If file is a server-side route root, set 1
|
|
/// - `u8`: If file is a server-side component boundary file, set 1
|
|
/// - `u32`: Number of files in the server graph. For Each:
|
|
/// - Repeat the same parser for the client graph
|
|
/// - `u32`: Number of client edges. For Each:
|
|
/// - `u32`: File index of the dependency file
|
|
/// - `u32`: File index of the imported file
|
|
/// - `u32`: Number of server edges. For Each:
|
|
/// - `u32`: File index of the dependency file
|
|
/// - `u32`: File index of the imported file
|
|
visualizer = 'v',
|
|
/// Payload for `memory_visualizer.html`.]
|
|
/// This can be accessed via `/_bun/memory_visualizer`.
|
|
///
|
|
/// - u32: incremental_graph_client
|
|
/// - u32: incremental_graph_server
|
|
/// - u32: js_code
|
|
/// - u32: source_maps
|
|
/// - u32: assets
|
|
/// - u32: other
|
|
/// - u32: devserver_tracked
|
|
/// - u32: process_used
|
|
/// - u32: system_used
|
|
/// - u32: system_total
|
|
memory_visualizer = 'M',
|
|
/// Sent in response to `set_url`.
|
|
/// - `u32`: Route index
|
|
set_url_response = 'n',
|
|
/// Used for synchronization in DevServer tests, to identify when a update was
|
|
/// acknowledged by the watcher but intentionally took no action.
|
|
/// - `u8`: See bake-harness.ts WatchSynchronization enum.
|
|
testing_watch_synchronization = 'r',
|
|
|
|
pub inline fn char(id: MessageId) u8 {
|
|
return @intFromEnum(id);
|
|
}
|
|
};
|
|
|
|
/// Avoid changing message ID values, as some of these are hard-coded in tests.
|
|
pub const IncomingMessageId = enum(u8) {
|
|
/// Initialization packet.
|
|
/// - [8]u8: Source Map ID, from the client config. Encoded in HEX
|
|
///
|
|
/// Responsibilities:
|
|
/// - Clear SourceMapStore's weak reference, move as a strong ref on HmrSocket.
|
|
init = 'i',
|
|
/// Subscribe to an event channel. Payload is a sequence of chars available
|
|
/// in HmrTopic.
|
|
subscribe = 's',
|
|
/// Emitted on client-side navigations.
|
|
/// Rest of payload is a UTF-8 string.
|
|
set_url = 'n',
|
|
/// Tells the DevServer to batch events together.
|
|
testing_batch_events = 'H',
|
|
|
|
/// Console log from the client
|
|
console_log = 'l',
|
|
/// Tells the DevServer to unref a source map.
|
|
/// - `u64`: SourceMapStore key
|
|
unref_source_map = 'u',
|
|
|
|
/// Invalid data
|
|
_,
|
|
};
|
|
|
|
pub const ConsoleLogKind = enum(u8) {
|
|
log = 'l',
|
|
err = 'e',
|
|
};
|
|
|
|
pub const HmrTopic = enum(u8) {
|
|
hot_update = 'h',
|
|
errors = 'e',
|
|
browser_error = 'E',
|
|
incremental_visualizer = 'v',
|
|
memory_visualizer = 'M',
|
|
testing_watch_synchronization = 'r',
|
|
|
|
/// Invalid data
|
|
_,
|
|
|
|
pub const max_count = @typeInfo(HmrTopic).@"enum".fields.len;
|
|
pub const Bits = @Type(.{ .@"struct" = .{
|
|
.backing_integer = @Type(.{ .int = .{
|
|
.bits = max_count,
|
|
.signedness = .unsigned,
|
|
} }),
|
|
.fields = &brk: {
|
|
const enum_fields = @typeInfo(HmrTopic).@"enum".fields;
|
|
var fields: [enum_fields.len]std.builtin.Type.StructField = undefined;
|
|
for (enum_fields, &fields) |e, *s| {
|
|
s.* = .{
|
|
.name = e.name,
|
|
.type = bool,
|
|
.default_value_ptr = &false,
|
|
.is_comptime = false,
|
|
.alignment = 0,
|
|
};
|
|
}
|
|
break :brk fields;
|
|
},
|
|
.decls = &.{},
|
|
.is_tuple = false,
|
|
.layout = .@"packed",
|
|
} });
|
|
};
|
|
|
|
pub const HmrSocket = @import("./DevServer/HmrSocket.zig");
|
|
|
|
pub fn routeToBundleIndexSlow(dev: *DevServer, pattern: []const u8) ?RouteBundle.Index {
|
|
var params: FrameworkRouter.MatchedParams = undefined;
|
|
if (dev.router.matchSlow(pattern, ¶ms)) |route_index| {
|
|
return dev.getOrPutRouteBundle(.{ .framework = route_index }) catch bun.outOfMemory();
|
|
}
|
|
if (dev.html_router.get(pattern)) |html| {
|
|
return dev.getOrPutRouteBundle(.{ .html = html }) catch bun.outOfMemory();
|
|
}
|
|
return null;
|
|
}
|
|
|
|
const c = struct {
|
|
// BakeSourceProvider.cpp
|
|
extern fn BakeGetDefaultExportFromModule(global: *jsc.JSGlobalObject, module: JSValue) JSValue;
|
|
|
|
fn BakeLoadServerHmrPatch(global: *jsc.JSGlobalObject, code: bun.String) bun.JSError!JSValue {
|
|
const f = @extern(*const fn (*jsc.JSGlobalObject, bun.String) callconv(.c) JSValue, .{ .name = "BakeLoadServerHmrPatch" }).*;
|
|
return bun.jsc.fromJSHostCall(global, @src(), f, .{ global, code });
|
|
}
|
|
|
|
fn BakeLoadInitialServerCode(global: *jsc.JSGlobalObject, code: bun.String, separate_ssr_graph: bool) bun.JSError!JSValue {
|
|
const f = @extern(*const fn (*jsc.JSGlobalObject, bun.String, bool) callconv(.c) JSValue, .{ .name = "BakeLoadInitialServerCode" }).*;
|
|
return bun.jsc.fromJSHostCall(global, @src(), f, .{ global, code, separate_ssr_graph });
|
|
}
|
|
};
|
|
|
|
/// Called on DevServer thread via HotReloadTask
|
|
pub fn startReloadBundle(dev: *DevServer, event: *HotReloadEvent) bun.OOM!void {
|
|
defer event.files.clearRetainingCapacity();
|
|
|
|
var sfb = std.heap.stackFallback(4096, dev.allocator);
|
|
const temp_alloc = sfb.get();
|
|
var entry_points: EntryPointList = EntryPointList.empty;
|
|
defer entry_points.deinit(temp_alloc);
|
|
|
|
event.processFileList(dev, &entry_points, temp_alloc);
|
|
if (entry_points.set.count() == 0) {
|
|
return;
|
|
}
|
|
|
|
dev.startAsyncBundle(
|
|
entry_points,
|
|
true,
|
|
event.timer,
|
|
) catch |err| {
|
|
bun.handleErrorReturnTrace(err, @errorReturnTrace());
|
|
return;
|
|
};
|
|
}
|
|
|
|
fn markAllRouteChildren(router: *FrameworkRouter, comptime n: comptime_int, bits: [n]*DynamicBitSetUnmanaged, route_index: Route.Index) void {
|
|
var next = router.routePtr(route_index).first_child.unwrap();
|
|
while (next) |child_index| {
|
|
const route = router.routePtr(child_index);
|
|
if (route.bundle.unwrap()) |index| {
|
|
inline for (bits) |b|
|
|
b.set(index.get());
|
|
}
|
|
markAllRouteChildren(router, n, bits, child_index);
|
|
next = route.next_sibling.unwrap();
|
|
}
|
|
}
|
|
|
|
fn markAllRouteChildrenFailed(dev: *DevServer, route_index: Route.Index) void {
|
|
var next = dev.router.routePtr(route_index).first_child.unwrap();
|
|
while (next) |child_index| {
|
|
const route = dev.router.routePtr(child_index);
|
|
if (route.bundle.unwrap()) |index| {
|
|
dev.routeBundlePtr(index).server_state = .possible_bundling_failures;
|
|
}
|
|
markAllRouteChildrenFailed(dev, child_index);
|
|
next = route.next_sibling.unwrap();
|
|
}
|
|
}
|
|
|
|
pub fn inspector(dev: *const DevServer) ?*BunFrontendDevServerAgent {
|
|
if (dev.vm.debugger) |*debugger| {
|
|
@branchHint(.unlikely);
|
|
|
|
if (debugger.frontend_dev_server_agent.isEnabled()) {
|
|
@branchHint(.unlikely);
|
|
return &debugger.frontend_dev_server_agent;
|
|
}
|
|
}
|
|
|
|
return null;
|
|
}
|
|
|
|
pub const HotReloadEvent = @import("./DevServer/HotReloadEvent.zig");
|
|
pub const WatcherAtomics = @import("./DevServer/WatcherAtomics.zig");
|
|
|
|
/// Called on watcher's thread; Access to dev-server state restricted.
|
|
pub fn onFileUpdate(dev: *DevServer, events: []Watcher.Event, changed_files: []?[:0]u8, watchlist: Watcher.ItemList) void {
|
|
assert(dev.magic == .valid);
|
|
debug.log("onFileUpdate start", .{});
|
|
defer debug.log("onFileUpdate end", .{});
|
|
|
|
const slice = watchlist.slice();
|
|
const file_paths = slice.items(.file_path);
|
|
const counts = slice.items(.count);
|
|
const kinds = slice.items(.kind);
|
|
|
|
const ev = dev.watcher_atomics.watcherAcquireEvent();
|
|
defer dev.watcher_atomics.watcherReleaseAndSubmitEvent(ev);
|
|
|
|
defer dev.bun_watcher.flushEvictions();
|
|
|
|
for (events) |event| {
|
|
// TODO: why does this out of bounds when you delete every file in the directory?
|
|
if (event.index >= file_paths.len) continue;
|
|
|
|
const file_path = file_paths[event.index];
|
|
const update_count = counts[event.index] + 1;
|
|
counts[event.index] = update_count;
|
|
const kind = kinds[event.index];
|
|
|
|
debug.log("{s} change: {s} {}", .{ @tagName(kind), file_path, event.op });
|
|
|
|
switch (kind) {
|
|
.file => {
|
|
if (event.op.delete or event.op.rename) {
|
|
// TODO: audit this line heavily
|
|
dev.bun_watcher.removeAtIndex(event.index, 0, &.{}, .file);
|
|
}
|
|
|
|
ev.appendFile(dev.allocator, file_path);
|
|
},
|
|
.directory => {
|
|
// INotifyWatcher stores sub paths into `changed_files`
|
|
// the other platforms do not appear to write anything into `changed_files` ever.
|
|
if (Environment.isLinux) {
|
|
ev.appendDir(dev.allocator, file_path, if (event.name_len > 0) changed_files[event.name_off] else null);
|
|
} else {
|
|
ev.appendDir(dev.allocator, file_path, null);
|
|
}
|
|
},
|
|
}
|
|
}
|
|
}
|
|
|
|
pub fn onWatchError(_: *DevServer, err: bun.sys.Error) void {
|
|
if (err.path.len > 0) {
|
|
Output.err(err, "failed to watch {} for hot-reloading", .{bun.fmt.quote(err.path)});
|
|
} else {
|
|
Output.err(err, "failed to watch files for hot-reloading", .{});
|
|
}
|
|
Output.warn("The development server is still running, but hot-reloading is disabled until a restart.", .{});
|
|
// TODO: attempt to automatically restart the watcher thread, perhaps wait for next request.
|
|
}
|
|
|
|
pub fn publish(dev: *DevServer, topic: HmrTopic, message: []const u8, opcode: uws.Opcode) void {
|
|
if (dev.server) |s| _ = s.publish(&.{@intFromEnum(topic)}, message, opcode, false);
|
|
}
|
|
|
|
pub fn numSubscribers(dev: *DevServer, topic: HmrTopic) u32 {
|
|
return if (dev.server) |s| s.numSubscribers(&.{@intFromEnum(topic)}) else 0;
|
|
}
|
|
|
|
const SafeFileId = packed struct(u32) {
|
|
side: bake.Side,
|
|
index: u30,
|
|
unused: enum(u1) { unused = 0 } = .unused,
|
|
};
|
|
|
|
/// Interface function for FrameworkRouter
|
|
pub fn getFileIdForRouter(dev: *DevServer, abs_path: []const u8, associated_route: Route.Index, file_kind: Route.FileKind) !OpaqueFileId {
|
|
const index = try dev.server_graph.insertStaleExtra(abs_path, false, true);
|
|
try dev.route_lookup.put(dev.allocator, index, .{
|
|
.route_index = associated_route,
|
|
.should_recurse_when_visiting = file_kind == .layout,
|
|
});
|
|
return toOpaqueFileId(.server, index);
|
|
}
|
|
|
|
pub fn onRouterSyntaxError(dev: *DevServer, rel_path: []const u8, log: FrameworkRouter.TinyLog) bun.OOM!void {
|
|
_ = dev; // TODO: maybe this should track the error, send over HmrSocket?
|
|
log.print(rel_path);
|
|
}
|
|
|
|
pub fn onRouterCollisionError(dev: *DevServer, rel_path: []const u8, other_id: OpaqueFileId, ty: Route.FileKind) bun.OOM!void {
|
|
// TODO: maybe this should track the error, send over HmrSocket?
|
|
|
|
Output.errGeneric("Multiple {s} matching the same route pattern is ambiguous", .{
|
|
switch (ty) {
|
|
.page => "pages",
|
|
.layout => "layout",
|
|
},
|
|
});
|
|
Output.prettyErrorln(" - <blue>{s}<r>", .{rel_path});
|
|
const relative_path_buf = bun.path_buffer_pool.get();
|
|
defer bun.path_buffer_pool.put(relative_path_buf);
|
|
Output.prettyErrorln(" - <blue>{s}<r>", .{
|
|
dev.relativePath(relative_path_buf, dev.server_graph.bundled_files.keys()[fromOpaqueFileId(.server, other_id).get()]),
|
|
});
|
|
Output.flush();
|
|
}
|
|
|
|
fn toOpaqueFileId(comptime side: bake.Side, index: IncrementalGraph(side).FileIndex) OpaqueFileId {
|
|
if (Environment.allow_assert) {
|
|
return OpaqueFileId.init(@bitCast(SafeFileId{
|
|
.side = side,
|
|
.index = index.get(),
|
|
}));
|
|
}
|
|
|
|
return OpaqueFileId.init(index.get());
|
|
}
|
|
|
|
fn fromOpaqueFileId(comptime side: bake.Side, id: OpaqueFileId) IncrementalGraph(side).FileIndex {
|
|
if (Environment.allow_assert) {
|
|
const safe: SafeFileId = @bitCast(id.get());
|
|
assert(side == safe.side);
|
|
return IncrementalGraph(side).FileIndex.init(safe.index);
|
|
}
|
|
return IncrementalGraph(side).FileIndex.init(@intCast(id.get()));
|
|
}
|
|
|
|
/// Returns posix style path, suitible for URLs and reproducible hashes.
|
|
/// Calculate the relative path from the dev server root.
|
|
/// The caller must provide a PathBuffer from the pool.
|
|
pub fn relativePath(dev: *DevServer, relative_path_buf: *bun.PathBuffer, path: []const u8) []const u8 {
|
|
bun.assert(dev.root[dev.root.len - 1] != '/');
|
|
|
|
if (!std.fs.path.isAbsolute(path)) {
|
|
return path;
|
|
}
|
|
|
|
if (path.len >= dev.root.len + 1 and
|
|
path[dev.root.len] == '/' and
|
|
bun.strings.startsWith(path, dev.root))
|
|
{
|
|
return path[dev.root.len + 1 ..];
|
|
}
|
|
|
|
const rel = bun.path.relativePlatformBuf(relative_path_buf, dev.root, path, .auto, true);
|
|
// @constCast: `rel` is owned by a buffer on `dev`, which is mutable
|
|
bun.path.platformToPosixInPlace(u8, @constCast(rel));
|
|
return rel;
|
|
}
|
|
|
|
/// Either of two conditions make this true:
|
|
/// - The inspector is enabled
|
|
/// - The user passed "console": true in serve({development: {console: true}}) options
|
|
///
|
|
/// Changing this value at runtime is unsupported. It's expected that the
|
|
/// inspector domains are registered at initialization time.
|
|
fn shouldReceiveConsoleLogFromBrowser(dev: *const DevServer) bool {
|
|
return dev.inspector() != null or dev.broadcast_console_log_from_browser_to_server;
|
|
}
|
|
|
|
fn dumpStateDueToCrash(dev: *DevServer) !void {
|
|
comptime assert(bun.FeatureFlags.bake_debugging_features);
|
|
|
|
// being conservative about how much stuff is put on the stack.
|
|
var filepath_buf: [@min(4096, bun.MAX_PATH_BYTES)]u8 = undefined;
|
|
const filepath = std.fmt.bufPrintZ(&filepath_buf, "incremental-graph-crash-dump.{d}.html", .{std.time.timestamp()}) catch "incremental-graph-crash-dump.html";
|
|
const file = std.fs.cwd().createFileZ(filepath, .{}) catch |err| {
|
|
bun.handleErrorReturnTrace(err, @errorReturnTrace());
|
|
Output.warn("Could not open file for dumping incremental graph: {}", .{err});
|
|
return;
|
|
};
|
|
defer file.close();
|
|
|
|
const start, const end = comptime brk: {
|
|
@setEvalBranchQuota(5000);
|
|
const visualizer = @embedFile("incremental_visualizer.html");
|
|
const i = (std.mem.lastIndexOf(u8, visualizer, "<script>") orelse unreachable) + "<script>".len;
|
|
break :brk .{ visualizer[0..i], visualizer[i..] };
|
|
};
|
|
try file.writeAll(start);
|
|
try file.writeAll("\nlet inlinedData = Uint8Array.from(atob(\"");
|
|
|
|
var sfb = std.heap.stackFallback(4096, dev.allocator);
|
|
var payload = try std.ArrayList(u8).initCapacity(sfb.get(), 4096);
|
|
defer payload.deinit();
|
|
try dev.writeVisualizerMessage(&payload);
|
|
|
|
var buf: [bun.base64.encodeLenFromSize(4096)]u8 = undefined;
|
|
var it = std.mem.window(u8, payload.items, 4096, 4096);
|
|
while (it.next()) |chunk| {
|
|
try file.writeAll(buf[0..bun.base64.encode(&buf, chunk)]);
|
|
}
|
|
|
|
try file.writeAll("\"), c => c.charCodeAt(0));\n");
|
|
try file.writeAll(end);
|
|
|
|
Output.note("Dumped incremental bundler graph to {}", .{bun.fmt.quote(filepath)});
|
|
}
|
|
|
|
const RouteIndexAndRecurseFlag = packed struct(u32) {
|
|
route_index: Route.Index,
|
|
/// Set true for layout
|
|
should_recurse_when_visiting: bool,
|
|
};
|
|
|
|
/// Bake needs to specify which graph (client/server/ssr) each entry point is.
|
|
/// File paths are always absolute paths. Files may be bundled for multiple
|
|
/// targets.
|
|
pub const EntryPointList = struct {
|
|
set: bun.StringArrayHashMapUnmanaged(Flags),
|
|
|
|
pub const empty: EntryPointList = .{ .set = .{} };
|
|
|
|
pub const Flags = packed struct(u8) {
|
|
client: bool = false,
|
|
server: bool = false,
|
|
ssr: bool = false,
|
|
/// When this is set, also set .client = true
|
|
css: bool = false,
|
|
|
|
unused: enum(u4) { unused = 0 } = .unused,
|
|
};
|
|
|
|
pub fn deinit(entry_points: *EntryPointList, allocator: std.mem.Allocator) void {
|
|
entry_points.set.deinit(allocator);
|
|
}
|
|
|
|
pub fn appendJs(
|
|
entry_points: *EntryPointList,
|
|
allocator: std.mem.Allocator,
|
|
abs_path: []const u8,
|
|
side: bake.Graph,
|
|
) !void {
|
|
return entry_points.append(allocator, abs_path, switch (side) {
|
|
.server => .{ .server = true },
|
|
.client => .{ .client = true },
|
|
.ssr => .{ .ssr = true },
|
|
});
|
|
}
|
|
|
|
pub fn appendCss(entry_points: *EntryPointList, allocator: std.mem.Allocator, abs_path: []const u8) !void {
|
|
return entry_points.append(allocator, abs_path, .{
|
|
.client = true,
|
|
.css = true,
|
|
});
|
|
}
|
|
|
|
/// Deduplictes requests to bundle the same file twice.
|
|
pub fn append(entry_points: *EntryPointList, allocator: std.mem.Allocator, abs_path: []const u8, flags: Flags) !void {
|
|
const gop = try entry_points.set.getOrPut(allocator, abs_path);
|
|
if (gop.found_existing) {
|
|
const T = @typeInfo(Flags).@"struct".backing_integer.?;
|
|
gop.value_ptr.* = @bitCast(@as(T, @bitCast(gop.value_ptr.*)) | @as(T, @bitCast(flags)));
|
|
} else {
|
|
gop.value_ptr.* = flags;
|
|
}
|
|
}
|
|
};
|
|
|
|
/// This structure does not increment the reference count of its contents, as
|
|
/// the lifetime of them are all tied to the underling Bun.serve instance.
|
|
const HTMLRouter = struct {
|
|
map: Map,
|
|
|
|
/// If a catch-all route exists, it is not stored in map, but here.
|
|
fallback: ?*HTMLBundle.HTMLBundleRoute,
|
|
|
|
pub const Map = bun.StringHashMapUnmanaged(*HTMLBundle.HTMLBundleRoute);
|
|
pub const empty: HTMLRouter = .{
|
|
.map = .empty,
|
|
.fallback = null,
|
|
};
|
|
|
|
pub fn get(router: *HTMLRouter, path: []const u8) ?*HTMLBundle.HTMLBundleRoute {
|
|
return router.map.get(path) orelse router.fallback;
|
|
}
|
|
|
|
pub fn put(router: *HTMLRouter, alloc: Allocator, path: []const u8, route: *HTMLBundle.HTMLBundleRoute) !void {
|
|
if (bun.strings.eqlComptime(path, "/*")) {
|
|
router.fallback = route;
|
|
} else {
|
|
try router.map.put(alloc, path, route);
|
|
}
|
|
}
|
|
|
|
pub fn clear(router: *HTMLRouter) void {
|
|
router.map.clearRetainingCapacity();
|
|
router.fallback = null;
|
|
}
|
|
|
|
pub fn deinit(router: *HTMLRouter, alloc: Allocator) void {
|
|
router.map.deinit(alloc);
|
|
}
|
|
};
|
|
|
|
pub fn putOrOverwriteAsset(
|
|
dev: *DevServer,
|
|
path: *const bun.fs.Path,
|
|
/// Ownership is transferred to this function.
|
|
contents: *const AnyBlob,
|
|
content_hash: u64,
|
|
) !void {
|
|
dev.graph_safety_lock.lock();
|
|
defer dev.graph_safety_lock.unlock();
|
|
_ = try dev.assets.replacePath(path.text, contents, &.byExtension(path.name.extWithoutLeadingDot()), content_hash);
|
|
}
|
|
|
|
pub const Assets = @import("./DevServer/Assets.zig");
|
|
|
|
pub const SourceMapStore = @import("./DevServer/SourceMapStore.zig");
|
|
|
|
pub fn onPluginsResolved(dev: *DevServer, plugins: ?*Plugin) !void {
|
|
dev.bundler_options.plugin = plugins;
|
|
dev.plugin_state = .loaded;
|
|
dev.startNextBundleIfPresent();
|
|
}
|
|
|
|
pub fn onPluginsRejected(dev: *DevServer) !void {
|
|
dev.plugin_state = .err;
|
|
while (dev.next_bundle.requests.popFirst()) |item| {
|
|
defer item.data.deref();
|
|
item.data.abort();
|
|
}
|
|
dev.next_bundle.route_queue.clearRetainingCapacity();
|
|
// TODO: allow recovery from this state
|
|
}
|
|
|
|
pub const ErrorReportRequest = @import("./DevServer/ErrorReportRequest.zig");
|
|
|
|
/// Problem statement documented on `script_unref_payload`
|
|
/// Takes 8 bytes: The generation ID in hex.
|
|
const UnrefSourceMapRequest = struct {
|
|
dev: *DevServer,
|
|
body: uws.BodyReaderMixin(@This(), "body", runWithBody, finalize),
|
|
|
|
fn run(dev: *DevServer, _: *Request, resp: anytype) void {
|
|
const ctx = bun.new(UnrefSourceMapRequest, .{
|
|
.dev = dev,
|
|
.body = .init(dev.allocator),
|
|
});
|
|
ctx.dev.server.?.onPendingRequest();
|
|
ctx.body.readBody(resp);
|
|
}
|
|
|
|
fn finalize(ctx: *UnrefSourceMapRequest) void {
|
|
ctx.dev.server.?.onStaticRequestComplete();
|
|
bun.destroy(ctx);
|
|
}
|
|
|
|
fn runWithBody(ctx: *UnrefSourceMapRequest, body: []const u8, r: AnyResponse) !void {
|
|
if (body.len != 8) return error.InvalidRequest;
|
|
var generation: u32 = undefined;
|
|
_ = std.fmt.hexToBytes(std.mem.asBytes(&generation), body) catch
|
|
return error.InvalidRequest;
|
|
const source_map_key = SourceMapStore.Key.init(@as(u64, generation) << 32);
|
|
_ = ctx.dev.source_maps.removeOrUpgradeWeakRef(source_map_key, .remove);
|
|
r.writeStatus("204 No Content");
|
|
r.end("", false);
|
|
}
|
|
};
|
|
|
|
pub fn readString32(reader: anytype, alloc: Allocator) ![]const u8 {
|
|
const len = try reader.readInt(u32, .little);
|
|
const memory = try alloc.alloc(u8, len);
|
|
errdefer alloc.free(memory);
|
|
try reader.readNoEof(memory);
|
|
return memory;
|
|
}
|
|
|
|
const TestingBatch = struct {
|
|
entry_points: EntryPointList,
|
|
|
|
pub const empty: @This() = .{ .entry_points = .empty };
|
|
|
|
pub fn append(self: *@This(), dev: *DevServer, entry_points: EntryPointList) !void {
|
|
assert(entry_points.set.count() > 0);
|
|
for (entry_points.set.keys(), entry_points.set.values()) |k, v| {
|
|
try self.entry_points.append(dev.allocator, k, v);
|
|
}
|
|
}
|
|
};
|
|
|
|
/// `test/bake/deinitialization.test.ts` checks for this as well as all tests
|
|
/// using the dev server test harness (test/bake/bake-harness.ts)
|
|
///
|
|
/// In debug builds, this will also assert that AllocationScope.deinit was
|
|
/// called, validating that all Dev Server tests have no leaks.
|
|
var dev_server_deinit_count_for_testing: usize = 0;
|
|
pub fn getDeinitCountForTesting() usize {
|
|
return dev_server_deinit_count_for_testing;
|
|
}
|
|
|
|
const bun = @import("bun");
|
|
const AllocationScope = bun.AllocationScope;
|
|
const Environment = bun.Environment;
|
|
const Output = bun.Output;
|
|
const SourceMap = bun.sourcemap;
|
|
const Watcher = bun.Watcher;
|
|
const assert = bun.assert;
|
|
const bake = bun.bake;
|
|
const DynamicBitSetUnmanaged = bun.bit_set.DynamicBitSetUnmanaged;
|
|
const Log = bun.logger.Log;
|
|
const MimeType = bun.http.MimeType;
|
|
const ThreadLocalArena = bun.allocators.MimallocArena;
|
|
const Transpiler = bun.transpiler.Transpiler;
|
|
const EventLoopTimer = bun.api.Timer.EventLoopTimer;
|
|
const StaticRoute = bun.api.server.StaticRoute;
|
|
|
|
const FrameworkRouter = bake.FrameworkRouter;
|
|
const OpaqueFileId = FrameworkRouter.OpaqueFileId;
|
|
const Route = FrameworkRouter.Route;
|
|
|
|
const BundleV2 = bun.bundle_v2.BundleV2;
|
|
const Chunk = bun.bundle_v2.Chunk;
|
|
const ContentHasher = bun.bundle_v2.ContentHasher;
|
|
|
|
const jsc = bun.jsc;
|
|
const JSValue = jsc.JSValue;
|
|
const VirtualMachine = jsc.VirtualMachine;
|
|
const HTMLBundle = jsc.API.HTMLBundle;
|
|
const AnyBlob = jsc.WebCore.Blob.Any;
|
|
const Plugin = jsc.API.JSBundler.Plugin;
|
|
|
|
const BunFrontendDevServerAgent = jsc.Debugger.BunFrontendDevServerAgent;
|
|
const DebuggerId = jsc.Debugger.DebuggerId;
|
|
|
|
const VoidFieldTypes = bun.meta.VoidFieldTypes;
|
|
const voidFieldTypeDiscardHelper = bun.meta.voidFieldTypeDiscardHelper;
|
|
|
|
const uws = bun.uws;
|
|
const AnyResponse = bun.uws.AnyResponse;
|
|
const Request = uws.Request;
|
|
|
|
const std = @import("std");
|
|
const ArrayListUnmanaged = std.ArrayListUnmanaged;
|
|
const AutoArrayHashMapUnmanaged = std.AutoArrayHashMapUnmanaged;
|
|
const Allocator = std.mem.Allocator;
|