Split DevServer.zig into multiple files (#21299)

Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
This commit is contained in:
Zack Radisic
2025-07-22 22:29:22 -07:00
committed by GitHub
parent 07cd45deae
commit 71e2161591
15 changed files with 4794 additions and 4553 deletions

View File

@@ -36,6 +36,18 @@ src/async/stub_event_loop.zig
src/async/windows_event_loop.zig
src/bake.zig
src/bake/DevServer.zig
src/bake/DevServer/Assets.zig
src/bake/DevServer/DirectoryWatchStore.zig
src/bake/DevServer/ErrorReportRequest.zig
src/bake/DevServer/HmrSocket.zig
src/bake/DevServer/HotReloadEvent.zig
src/bake/DevServer/IncrementalGraph.zig
src/bake/DevServer/memory_cost.zig
src/bake/DevServer/PackedMap.zig
src/bake/DevServer/RouteBundle.zig
src/bake/DevServer/SerializedFailure.zig
src/bake/DevServer/SourceMapStore.zig
src/bake/DevServer/WatcherAtomics.zig
src/bake/FrameworkRouter.zig
src/bake/production.zig
src/base64/base64.zig

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,181 @@
/// Storage for hashed assets on `/_bun/asset/{hash}.ext`
pub const Assets = @This();
/// Keys are absolute paths, sharing memory with the keys in IncrementalGraph(.client)
/// Values are indexes into files
path_map: bun.StringArrayHashMapUnmanaged(EntryIndex),
/// Content-addressable store. Multiple paths can point to the same content
/// hash, which is tracked by the `refs` array. One reference is held to
/// contained StaticRoute instances when they are stored.
files: AutoArrayHashMapUnmanaged(u64, *StaticRoute),
/// Indexed by the same index of `files`. The value is never `0`.
refs: ArrayListUnmanaged(u32),
/// When mutating `files`'s keys, the map must be reindexed to function.
needs_reindex: bool = false,
pub const EntryIndex = bun.GenericIndex(u30, Assets);
fn owner(assets: *Assets) *DevServer {
return @alignCast(@fieldParentPtr("assets", assets));
}
pub fn getHash(assets: *Assets, path: []const u8) ?u64 {
assert(assets.owner().magic == .valid);
return if (assets.path_map.get(path)) |idx|
assets.files.keys()[idx.get()]
else
null;
}
/// When an asset is overwritten, it receives a new URL to get around browser caching.
/// The old URL is immediately revoked.
pub fn replacePath(
assets: *Assets,
/// not allocated
abs_path: []const u8,
/// Ownership is transferred to this function
contents: *const AnyBlob,
mime_type: *const MimeType,
/// content hash of the asset
content_hash: u64,
) !EntryIndex {
assert(assets.owner().magic == .valid);
defer assert(assets.files.count() == assets.refs.items.len);
const alloc = assets.owner().allocator;
debug.log("replacePath {} {} - {s}/{s} ({s})", .{
bun.fmt.quote(abs_path),
content_hash,
DevServer.asset_prefix,
&std.fmt.bytesToHex(std.mem.asBytes(&content_hash), .lower),
mime_type.value,
});
const gop = try assets.path_map.getOrPut(alloc, abs_path);
if (!gop.found_existing) {
// Locate a stable pointer for the file path
const stable_abs_path = (try assets.owner().client_graph.insertEmpty(abs_path, .unknown)).key;
gop.key_ptr.* = stable_abs_path;
} else {
const entry_index = gop.value_ptr.*;
// When there is one reference to the asset, the entry can be
// replaced in-place with the new asset.
if (assets.refs.items[entry_index.get()] == 1) {
const slice = assets.files.entries.slice();
const prev = slice.items(.value)[entry_index.get()];
prev.deref();
slice.items(.key)[entry_index.get()] = content_hash;
slice.items(.value)[entry_index.get()] = StaticRoute.initFromAnyBlob(contents, .{
.mime_type = mime_type,
.server = assets.owner().server orelse unreachable,
});
comptime assert(@TypeOf(slice.items(.hash)[0]) == void);
assets.needs_reindex = true;
return entry_index;
} else {
assets.refs.items[entry_index.get()] -= 1;
assert(assets.refs.items[entry_index.get()] > 0);
}
}
try assets.reindexIfNeeded(alloc);
const file_index_gop = try assets.files.getOrPut(alloc, content_hash);
if (!file_index_gop.found_existing) {
try assets.refs.append(alloc, 1);
file_index_gop.value_ptr.* = StaticRoute.initFromAnyBlob(contents, .{
.mime_type = mime_type,
.server = assets.owner().server orelse unreachable,
});
} else {
assets.refs.items[file_index_gop.index] += 1;
var contents_mut = contents.*;
contents_mut.detach();
}
gop.value_ptr.* = .init(@intCast(file_index_gop.index));
return gop.value_ptr.*;
}
/// Returns a pointer to insert the *StaticRoute. If `null` is returned, then it
/// means there is already data here.
pub fn putOrIncrementRefCount(assets: *Assets, content_hash: u64, ref_count: u32) !?**StaticRoute {
defer assert(assets.files.count() == assets.refs.items.len);
const file_index_gop = try assets.files.getOrPut(assets.owner().allocator, content_hash);
if (!file_index_gop.found_existing) {
try assets.refs.append(assets.owner().allocator, ref_count);
return file_index_gop.value_ptr;
} else {
assets.refs.items[file_index_gop.index] += ref_count;
return null;
}
}
pub fn unrefByHash(assets: *Assets, content_hash: u64, dec_count: u32) void {
const index = assets.files.getIndex(content_hash) orelse
Output.panic("Asset double unref: {s}", .{std.fmt.fmtSliceHexLower(std.mem.asBytes(&content_hash))});
assets.unrefByIndex(.init(@intCast(index)), dec_count);
}
pub fn unrefByIndex(assets: *Assets, index: EntryIndex, dec_count: u32) void {
defer assert(assets.files.count() == assets.refs.items.len);
assert(dec_count > 0);
assets.refs.items[index.get()] -= dec_count;
if (assets.refs.items[index.get()] == 0) {
assets.files.values()[index.get()].deref();
assets.files.swapRemoveAt(index.get());
_ = assets.refs.swapRemove(index.get());
}
}
pub fn unrefByPath(assets: *Assets, path: []const u8) void {
const entry = assets.path_map.fetchSwapRemove(path) orelse return;
assets.unrefByIndex(entry.value, 1);
}
pub fn reindexIfNeeded(assets: *Assets, alloc: Allocator) !void {
if (assets.needs_reindex) {
try assets.files.reIndex(alloc);
assets.needs_reindex = false;
}
}
pub fn get(assets: *Assets, content_hash: u64) ?*StaticRoute {
assert(assets.owner().magic == .valid);
assert(assets.files.count() == assets.refs.items.len);
return assets.files.get(content_hash);
}
pub fn deinit(assets: *Assets, alloc: Allocator) void {
assets.path_map.deinit(alloc);
for (assets.files.values()) |blob| blob.deref();
assets.files.deinit(alloc);
assets.refs.deinit(alloc);
}
pub fn memoryCost(assets: *Assets) usize {
var cost: usize = 0;
cost += memoryCostArrayHashMap(assets.path_map);
for (assets.files.values()) |blob| cost += blob.memoryCost();
cost += memoryCostArrayHashMap(assets.files);
cost += memoryCostArrayList(assets.refs);
return cost;
}
const bun = @import("bun");
const Output = bun.Output;
const assert = bun.assert;
const bake = bun.bake;
const jsc = bun.jsc;
const MimeType = bun.http.MimeType;
const StaticRoute = bun.api.server.StaticRoute;
const AnyBlob = jsc.WebCore.Blob.Any;
const DevServer = bake.DevServer;
const debug = DevServer.debug;
const memoryCostArrayHashMap = DevServer.memoryCostArrayHashMap;
const memoryCostArrayList = DevServer.memoryCostArrayList;
const std = @import("std");
const ArrayListUnmanaged = std.ArrayListUnmanaged;
const AutoArrayHashMapUnmanaged = std.AutoArrayHashMapUnmanaged;
const Allocator = std.mem.Allocator;

View File

@@ -0,0 +1,268 @@
const DirectoryWatchStore = @This();
/// When a file fails to import a relative path, directory watchers are added so
/// that when a matching file is created, the dependencies can be rebuilt. This
/// handles HMR cases where a user writes an import before creating the file,
/// or moves files around. This structure is not thread-safe.
///
/// This structure manages those watchers, including releasing them once
/// import resolution failures are solved.
// TODO: when a file fixes its resolution, there is no code specifically to remove the watchers.
/// List of active watchers. Can be re-ordered on removal
watches: bun.StringArrayHashMapUnmanaged(Entry),
dependencies: ArrayListUnmanaged(Dep),
/// Dependencies cannot be re-ordered. This list tracks what indexes are free.
dependencies_free_list: ArrayListUnmanaged(Dep.Index),
pub const empty: DirectoryWatchStore = .{
.watches = .{},
.dependencies = .{},
.dependencies_free_list = .{},
};
pub fn owner(store: *DirectoryWatchStore) *DevServer {
return @alignCast(@fieldParentPtr("directory_watchers", store));
}
pub fn trackResolutionFailure(store: *DirectoryWatchStore, import_source: []const u8, specifier: []const u8, renderer: bake.Graph, loader: bun.options.Loader) bun.OOM!void {
// When it does not resolve to a file path, there is nothing to track.
if (specifier.len == 0) return;
if (!std.fs.path.isAbsolute(import_source)) return;
switch (loader) {
.tsx, .ts, .jsx, .js => {
if (!(bun.strings.startsWith(specifier, "./") or
bun.strings.startsWith(specifier, "../"))) return;
},
// Imports in CSS can resolve to relative files without './'
// Imports in HTML can resolve to project-relative paths by
// prefixing with '/', but that is done in HTMLScanner.
.css, .html => {},
// Multiple parts of DevServer rely on the fact that these
// loaders do not depend on importing other files.
.file,
.json,
.jsonc,
.toml,
.wasm,
.napi,
.base64,
.dataurl,
.text,
.bunsh,
.sqlite,
.sqlite_embedded,
=> bun.debugAssert(false),
}
const buf = bun.path_buffer_pool.get();
defer bun.path_buffer_pool.put(buf);
const joined = bun.path.joinAbsStringBuf(bun.path.dirname(import_source, .auto), buf, &.{specifier}, .auto);
const dir = bun.path.dirname(joined, .auto);
// The `import_source` parameter is not a stable string. Since the
// import source will be added to IncrementalGraph anyways, this is a
// great place to share memory.
const dev = store.owner();
dev.graph_safety_lock.lock();
defer dev.graph_safety_lock.unlock();
const owned_file_path = switch (renderer) {
.client => (try dev.client_graph.insertEmpty(import_source, .unknown)).key,
.server, .ssr => (try dev.server_graph.insertEmpty(import_source, .unknown)).key,
};
store.insert(dir, owned_file_path, specifier) catch |err| switch (err) {
error.Ignore => {}, // ignoring watch errors.
error.OutOfMemory => |e| return e,
};
}
/// `dir_name_to_watch` is cloned
/// `file_path` must have lifetime that outlives the watch
/// `specifier` is cloned
fn insert(
store: *DirectoryWatchStore,
dir_name_to_watch: []const u8,
file_path: []const u8,
specifier: []const u8,
) !void {
assert(specifier.len > 0);
// TODO: watch the parent dir too.
const dev = store.owner();
debug.log("DirectoryWatchStore.insert({}, {}, {})", .{
bun.fmt.quote(dir_name_to_watch),
bun.fmt.quote(file_path),
bun.fmt.quote(specifier),
});
if (store.dependencies_free_list.items.len == 0)
try store.dependencies.ensureUnusedCapacity(dev.allocator, 1);
const gop = try store.watches.getOrPut(dev.allocator, bun.strings.withoutTrailingSlashWindowsPath(dir_name_to_watch));
const specifier_cloned = if (specifier[0] == '.' or std.fs.path.isAbsolute(specifier))
try dev.allocator.dupe(u8, specifier)
else
try std.fmt.allocPrint(dev.allocator, "./{s}", .{specifier});
errdefer dev.allocator.free(specifier_cloned);
if (gop.found_existing) {
const dep = store.appendDepAssumeCapacity(.{
.next = gop.value_ptr.first_dep.toOptional(),
.source_file_path = file_path,
.specifier = specifier_cloned,
});
gop.value_ptr.first_dep = dep;
return;
}
errdefer store.watches.swapRemoveAt(gop.index);
// Try to use an existing open directory handle
const cache_fd = if (dev.server_transpiler.resolver.readDirInfo(dir_name_to_watch) catch null) |cache|
cache.getFileDescriptor().unwrapValid()
else
null;
const fd, const owned_fd = if (Watcher.requires_file_descriptors) if (cache_fd) |fd|
.{ fd, false }
else switch (bun.sys.open(
&(std.posix.toPosixPath(dir_name_to_watch) catch |err| switch (err) {
error.NameTooLong => return error.Ignore, // wouldn't be able to open, ignore
}),
// O_EVTONLY is the flag to indicate that only watches will be used.
bun.O.DIRECTORY | bun.c.O_EVTONLY,
0,
)) {
.result => |fd| .{ fd, true },
.err => |err| switch (err.getErrno()) {
// If this directory doesn't exist, a watcher should be placed
// on the parent directory. Then, if this directory is later
// created, the watcher can be properly initialized. This would
// happen if a specifier like `./dir/whatever/hello.tsx` and
// `dir` does not exist, Bun must place a watcher on `.`, see
// the creation of `dir`, and repeat until it can open a watcher
// on `whatever` to see the creation of `hello.tsx`
.NOENT => {
// TODO: implement that. for now it ignores (BUN-10968)
return error.Ignore;
},
.NOTDIR => return error.Ignore, // ignore
else => {
bun.todoPanic(@src(), "log watcher error", .{});
},
},
} else .{ bun.invalid_fd, false };
errdefer if (Watcher.requires_file_descriptors) if (owned_fd) fd.close();
if (Watcher.requires_file_descriptors)
debug.log("-> fd: {} ({s})", .{
fd,
if (owned_fd) "from dir cache" else "owned fd",
});
const dir_name = try dev.allocator.dupe(u8, dir_name_to_watch);
errdefer dev.allocator.free(dir_name);
gop.key_ptr.* = bun.strings.withoutTrailingSlashWindowsPath(dir_name);
const watch_index = switch (dev.bun_watcher.addDirectory(fd, dir_name, bun.Watcher.getHash(dir_name), false)) {
.err => return error.Ignore,
.result => |id| id,
};
const dep = store.appendDepAssumeCapacity(.{
.next = .none,
.source_file_path = file_path,
.specifier = specifier_cloned,
});
store.watches.putAssumeCapacity(dir_name, .{
.dir = fd,
.dir_fd_owned = owned_fd,
.first_dep = dep,
.watch_index = watch_index,
});
}
/// Caller must detach the dependency from the linked list it is in.
pub fn freeDependencyIndex(store: *DirectoryWatchStore, alloc: Allocator, index: Dep.Index) !void {
alloc.free(store.dependencies.items[index.get()].specifier);
if (Environment.isDebug) {
store.dependencies.items[index.get()] = undefined;
}
if (index.get() == (store.dependencies.items.len - 1)) {
store.dependencies.items.len -= 1;
} else {
try store.dependencies_free_list.append(alloc, index);
}
}
/// Expects dependency list to be already freed
pub fn freeEntry(store: *DirectoryWatchStore, alloc: Allocator, entry_index: usize) void {
const entry = store.watches.values()[entry_index];
debug.log("DirectoryWatchStore.freeEntry({d}, {})", .{
entry_index,
entry.dir,
});
store.owner().bun_watcher.removeAtIndex(entry.watch_index, 0, &.{}, .file);
defer if (entry.dir_fd_owned) entry.dir.close();
alloc.free(store.watches.keys()[entry_index]);
store.watches.swapRemoveAt(entry_index);
if (store.watches.entries.len == 0) {
assert(store.dependencies.items.len == 0);
store.dependencies_free_list.clearRetainingCapacity();
}
}
fn appendDepAssumeCapacity(store: *DirectoryWatchStore, dep: Dep) Dep.Index {
if (store.dependencies_free_list.pop()) |index| {
store.dependencies.items[index.get()] = dep;
return index;
}
const index = Dep.Index.init(@intCast(store.dependencies.items.len));
store.dependencies.appendAssumeCapacity(dep);
return index;
}
pub const Entry = struct {
/// The directory handle the watch is placed on
dir: bun.FileDescriptor,
dir_fd_owned: bool,
/// Files which request this import index
first_dep: Dep.Index,
/// To pass to Watcher.remove
watch_index: u16,
};
pub const Dep = struct {
next: Index.Optional,
/// The file used
source_file_path: []const u8,
/// The specifier that failed. Before running re-build, it is resolved for, as
/// creating an unrelated file should not re-emit another error. Allocated memory
specifier: []u8,
pub const Index = bun.GenericIndex(u32, Dep);
};
const bun = @import("bun");
const Environment = bun.Environment;
const Watcher = bun.Watcher;
const assert = bun.assert;
const bake = bun.bake;
const DevServer = bake.DevServer;
const debug = DevServer.debug;
const std = @import("std");
const ArrayListUnmanaged = std.ArrayListUnmanaged;
const Allocator = std.mem.Allocator;

View File

@@ -0,0 +1,389 @@
/// Fetched when a client-side error happens. This performs two actions
/// - Logs the remapped stack trace to the console.
/// - Replies with the remapped stack trace.
/// Payload:
/// - `u32`: Responding message ID (echoed back)
/// - `u32`: Length of message
/// - `[n]u8`: Message
/// - `u32`: Length of error name
/// - `[n]u8`: Error name
/// - `u32`: Number of stack frames. For each
/// - `u32`: Line number (0 for unavailable)
/// - `u32`: Column number (0 for unavailable)
/// - `u32`: Length of file name (0 for unavailable)
/// - `[n]u8`: File name
/// - `u32`: Length of function name (0 for unavailable)
/// - `[n]u8`: Function name
const ErrorReportRequest = @This();
dev: *DevServer,
body: uws.BodyReaderMixin(@This(), "body", runWithBody, finalize),
pub fn run(dev: *DevServer, _: *Request, resp: anytype) void {
const ctx = bun.new(ErrorReportRequest, .{
.dev = dev,
.body = .init(dev.allocator),
});
ctx.dev.server.?.onPendingRequest();
ctx.body.readBody(resp);
}
pub fn finalize(ctx: *ErrorReportRequest) void {
ctx.dev.server.?.onStaticRequestComplete();
bun.destroy(ctx);
}
pub fn runWithBody(ctx: *ErrorReportRequest, body: []const u8, r: AnyResponse) !void {
// .finalize has to be called last, but only in the non-error path.
var should_finalize_self = false;
defer if (should_finalize_self) ctx.finalize();
var s = std.io.fixedBufferStream(body);
const reader = s.reader();
var sfa_general = std.heap.stackFallback(65536, ctx.dev.allocator);
var sfa_sourcemap = std.heap.stackFallback(65536, ctx.dev.allocator);
const temp_alloc = sfa_general.get();
var arena = std.heap.ArenaAllocator.init(temp_alloc);
defer arena.deinit();
var source_map_arena = std.heap.ArenaAllocator.init(sfa_sourcemap.get());
defer source_map_arena.deinit();
// Read payload, assemble ZigException
const name = try readString32(reader, temp_alloc);
defer temp_alloc.free(name);
const message = try readString32(reader, temp_alloc);
defer temp_alloc.free(message);
const browser_url = try readString32(reader, temp_alloc);
defer temp_alloc.free(browser_url);
var frames: ArrayListUnmanaged(jsc.ZigStackFrame) = .empty;
defer frames.deinit(temp_alloc);
const stack_count = @min(try reader.readInt(u32, .little), 255); // does not support more than 255
try frames.ensureTotalCapacity(temp_alloc, stack_count);
for (0..stack_count) |_| {
const line = try reader.readInt(i32, .little);
const column = try reader.readInt(i32, .little);
const function_name = try readString32(reader, temp_alloc);
const file_name = try readString32(reader, temp_alloc);
frames.appendAssumeCapacity(.{
.function_name = .init(function_name),
.source_url = .init(file_name),
.position = if (line > 0) .{
.line = .fromOneBased(line + 1),
.column = .fromOneBased(@max(1, column)),
.line_start_byte = 0,
} else .{
.line = .invalid,
.column = .invalid,
.line_start_byte = 0,
},
.code_type = .None,
.remapped = false,
});
}
const runtime_name = "Bun HMR Runtime";
const browser_url_origin = bun.jsc.URL.originFromSlice(browser_url) orelse browser_url;
// All files that DevServer could provide a source map fit the pattern:
// `/_bun/client/<label>-{u64}.js`
// Where the u64 is a unique identifier pointing into sourcemaps.
//
// HMR chunks use this too, but currently do not host their JS code.
var parsed_source_maps: AutoArrayHashMapUnmanaged(SourceMapStore.Key, ?SourceMapStore.GetResult) = .empty;
try parsed_source_maps.ensureTotalCapacity(temp_alloc, 4);
defer for (parsed_source_maps.values()) |*value| {
if (value.*) |*v| v.deinit(temp_alloc);
};
var runtime_lines: ?[5][]const u8 = null;
var first_line_of_interest: usize = 0;
var top_frame_position: jsc.ZigStackFramePosition = undefined;
var region_of_interest_line: u32 = 0;
for (frames.items) |*frame| {
const source_url = frame.source_url.value.ZigString.slice();
// The browser code strips "http://localhost:3000" when the string
// has /_bun/client. It's done because JS can refer to `location`
const id = parseId(source_url, browser_url_origin) orelse continue;
// Get and cache the parsed source map
const gop = try parsed_source_maps.getOrPut(temp_alloc, id);
if (!gop.found_existing) {
defer _ = source_map_arena.reset(.retain_capacity);
const psm = ctx.dev.source_maps.getParsedSourceMap(
id,
source_map_arena.allocator(), // arena for parsing
temp_alloc, // store results into first arena
) orelse {
Output.debugWarn("Failed to find mapping for {s}, {d}", .{ source_url, id.get() });
gop.value_ptr.* = null;
continue;
};
gop.value_ptr.* = psm;
}
const result: *const SourceMapStore.GetResult = &(gop.value_ptr.* orelse continue);
// When before the first generated line, remap to the HMR runtime
const generated_mappings = result.mappings.generated();
if (frame.position.line.oneBased() < generated_mappings[1].lines) {
frame.source_url = .init(runtime_name); // matches value in source map
frame.position = .invalid;
continue;
}
// Remap the frame
const remapped = result.mappings.find(
frame.position.line.oneBased(),
frame.position.column.zeroBased(),
);
if (remapped) |remapped_position| {
frame.position = .{
.line = .fromZeroBased(remapped_position.originalLine()),
.column = .fromZeroBased(remapped_position.originalColumn()),
.line_start_byte = 0,
};
const index = remapped_position.source_index;
if (index >= 1 and (index - 1) < result.file_paths.len) {
const abs_path = result.file_paths[@intCast(index - 1)];
frame.source_url = .init(abs_path);
const rel_path = ctx.dev.relativePath(abs_path);
defer ctx.dev.releaseRelativePathBuf();
if (bun.strings.eql(frame.function_name.value.ZigString.slice(), rel_path)) {
frame.function_name = .empty;
}
frame.remapped = true;
if (runtime_lines == null) {
const file = result.entry_files.get(@intCast(index - 1));
if (file != .empty) {
const json_encoded_source_code = file.ref.data.quotedContents();
// First line of interest is two above the target line.
const target_line = @as(usize, @intCast(frame.position.line.zeroBased()));
first_line_of_interest = target_line -| 2;
region_of_interest_line = @intCast(target_line - first_line_of_interest);
runtime_lines = try extractJsonEncodedSourceCode(
json_encoded_source_code,
@intCast(first_line_of_interest),
5,
arena.allocator(),
);
top_frame_position = frame.position;
}
}
} else if (index == 0) {
// Should be picked up by above but just in case.
frame.source_url = .init(runtime_name);
frame.position = .invalid;
}
}
}
// Stack traces can often end with random runtime frames that are not relevant.
trim_runtime_frames: {
// Ensure that trimming will not remove ALL frames.
for (frames.items) |frame| {
if (!frame.position.isInvalid() or frame.source_url.value.ZigString.slice().ptr != runtime_name) {
break;
}
} else break :trim_runtime_frames;
// Move all frames up
var i: usize = 0;
for (frames.items[i..]) |frame| {
if (frame.position.isInvalid() and frame.source_url.value.ZigString.slice().ptr == runtime_name) {
continue; // skip runtime frames
}
frames.items[i] = frame;
i += 1;
}
frames.items.len = i;
}
var exception: jsc.ZigException = .{
.type = .Error,
.runtime_type = .Nothing,
.name = .init(name),
.message = .init(message),
.stack = .fromFrames(frames.items),
.exception = null,
.remapped = false,
.browser_url = .init(browser_url),
};
const stderr = Output.errorWriterBuffered();
defer Output.flush();
switch (Output.enable_ansi_colors_stderr) {
inline else => |ansi_colors| ctx.dev.vm.printExternallyRemappedZigException(
&exception,
null,
@TypeOf(stderr),
stderr,
true,
ansi_colors,
) catch {},
}
var out: std.ArrayList(u8) = .init(ctx.dev.allocator);
errdefer out.deinit();
const w = out.writer();
try w.writeInt(u32, exception.stack.frames_len, .little);
for (exception.stack.frames()) |frame| {
try w.writeInt(i32, frame.position.line.oneBased(), .little);
try w.writeInt(i32, frame.position.column.oneBased(), .little);
const function_name = frame.function_name.value.ZigString.slice();
try w.writeInt(u32, @intCast(function_name.len), .little);
try w.writeAll(function_name);
const src_to_write = frame.source_url.value.ZigString.slice();
if (bun.strings.hasPrefixComptime(src_to_write, "/")) {
const file = ctx.dev.relativePath(src_to_write);
defer ctx.dev.releaseRelativePathBuf();
try w.writeInt(u32, @intCast(file.len), .little);
try w.writeAll(file);
} else {
try w.writeInt(u32, @intCast(src_to_write.len), .little);
try w.writeAll(src_to_write);
}
}
if (runtime_lines) |*lines| {
// trim empty lines
var adjusted_lines: [][]const u8 = lines;
while (adjusted_lines.len > 0 and adjusted_lines[0].len == 0) {
adjusted_lines = adjusted_lines[1..];
region_of_interest_line -|= 1;
first_line_of_interest += 1;
}
while (adjusted_lines.len > 0 and adjusted_lines[adjusted_lines.len - 1].len == 0) {
adjusted_lines.len -= 1;
}
try w.writeInt(u8, @intCast(adjusted_lines.len), .little);
try w.writeInt(u32, @intCast(region_of_interest_line), .little);
try w.writeInt(u32, @intCast(first_line_of_interest + 1), .little);
try w.writeInt(u32, @intCast(top_frame_position.column.oneBased()), .little);
for (adjusted_lines) |line| {
try w.writeInt(u32, @intCast(line.len), .little);
try w.writeAll(line);
}
} else {
try w.writeInt(u8, 0, .little);
}
StaticRoute.sendBlobThenDeinit(r, &.fromArrayList(out), .{
.mime_type = &.other,
.server = ctx.dev.server.?,
});
should_finalize_self = true;
}
pub fn parseId(source_url: []const u8, browser_url: []const u8) ?SourceMapStore.Key {
if (!bun.strings.startsWith(source_url, browser_url))
return null;
const after_host = source_url[bun.strings.withoutTrailingSlash(browser_url).len..];
if (!bun.strings.hasPrefixComptime(after_host, client_prefix ++ "/"))
return null;
const after_prefix = after_host[client_prefix.len + 1 ..];
// Extract the ID
if (!bun.strings.hasSuffixComptime(after_prefix, ".js"))
return null;
const min_len = "00000000FFFFFFFF.js".len;
if (after_prefix.len < min_len)
return null;
const hex = after_prefix[after_prefix.len - min_len ..][0 .. @sizeOf(u64) * 2];
if (hex.len != @sizeOf(u64) * 2)
return null;
return .init(DevServer.parseHexToInt(u64, hex) orelse
return null);
}
/// Instead of decoding the entire file, just decode the desired section.
fn extractJsonEncodedSourceCode(contents: []const u8, target_line: u32, comptime n: usize, arena: Allocator) !?[n][]const u8 {
var line: usize = 0;
var prev: usize = 0;
const index_of_first_line = if (target_line == 0)
0 // no iteration needed
else while (bun.strings.indexOfCharPos(contents, '\\', prev)) |i| : (prev = i + 2) {
if (i >= contents.len - 2) return null;
// Bun's JSON printer will not use a sillier encoding for newline.
if (contents[i + 1] == 'n') {
line += 1;
if (line == target_line)
break i + 2;
}
} else return null;
var rest = contents[index_of_first_line..];
// For decoding JSON escapes, the JS Lexer decoding function has
// `decodeEscapeSequences`, which only supports decoding to UTF-16.
// Alternatively, it appears the TOML lexer has copied this exact
// function but for UTF-8. So the decoder can just use that.
//
// This function expects but does not assume the escape sequences
// given are valid, and does not bubble errors up.
var log = Log.init(arena);
var l: bun.interchange.toml.Lexer = .{
.log = &log,
.source = .initEmptyFile(""),
.allocator = arena,
.should_redact_logs = false,
.prev_error_loc = .Empty,
};
defer log.deinit();
var result: [n][]const u8 = .{""} ** n;
for (&result) |*decoded_line| {
var has_extra_escapes = false;
prev = 0;
// Locate the line slice
const end_of_line = while (bun.strings.indexOfCharPos(rest, '\\', prev)) |i| : (prev = i + 2) {
if (i >= rest.len - 1) return null;
if (rest[i + 1] == 'n') {
break i;
}
has_extra_escapes = true;
} else rest.len;
const encoded_line = rest[0..end_of_line];
// Decode it
if (has_extra_escapes) {
var bytes: std.ArrayList(u8) = try .initCapacity(arena, encoded_line.len);
try l.decodeEscapeSequences(0, encoded_line, false, std.ArrayList(u8), &bytes);
decoded_line.* = bytes.items;
} else {
decoded_line.* = encoded_line;
}
if (end_of_line + 2 >= rest.len) break;
rest = rest[end_of_line + 2 ..];
}
return result;
}
const bun = @import("bun");
const Output = bun.Output;
const bake = bun.bake;
const jsc = bun.jsc;
const Log = bun.logger.Log;
const StaticRoute = bun.api.server.StaticRoute;
const DevServer = bake.DevServer;
const SourceMapStore = DevServer.SourceMapStore;
const client_prefix = DevServer.client_prefix;
const readString32 = DevServer.readString32;
const uws = bun.uws;
const AnyResponse = bun.uws.AnyResponse;
const Request = uws.Request;
const std = @import("std");
const ArrayListUnmanaged = std.ArrayListUnmanaged;
const AutoArrayHashMapUnmanaged = std.AutoArrayHashMapUnmanaged;
const Allocator = std.mem.Allocator;

View File

@@ -0,0 +1,295 @@
pub const HmrSocket = @This();
dev: *DevServer,
underlying: ?AnyWebSocket = null,
subscriptions: HmrTopic.Bits,
/// Allows actions which inspect or mutate sensitive DevServer state.
is_from_localhost: bool,
/// By telling DevServer the active route, this enables receiving detailed
/// `hot_update` events for when the route is updated.
active_route: RouteBundle.Index.Optional,
referenced_source_maps: std.AutoHashMapUnmanaged(SourceMapStore.Key, void),
inspector_connection_id: i32 = -1,
pub fn new(dev: *DevServer, res: anytype) *HmrSocket {
return bun.create(dev.allocator, HmrSocket, .{
.dev = dev,
.is_from_localhost = if (res.getRemoteSocketInfo()) |addr|
if (addr.is_ipv6)
bun.strings.eqlComptime(addr.ip, "::1")
else
bun.strings.eqlComptime(addr.ip, "127.0.0.1")
else
false,
.subscriptions = .{},
.active_route = .none,
.referenced_source_maps = .empty,
});
}
pub fn onOpen(s: *HmrSocket, ws: AnyWebSocket) void {
const send_status = ws.send(&(.{MessageId.version.char()} ++ s.dev.configuration_hash_key), .binary, false, true);
s.underlying = ws;
if (send_status != .dropped) {
// Notify inspector about client connection
if (s.dev.inspector()) |agent| {
s.inspector_connection_id = agent.nextConnectionID();
agent.notifyClientConnected(s.dev.inspector_server_id, s.inspector_connection_id);
}
}
}
pub fn onMessage(s: *HmrSocket, ws: AnyWebSocket, msg: []const u8, opcode: uws.Opcode) void {
_ = opcode;
if (msg.len == 0)
return ws.close();
switch (@as(IncomingMessageId, @enumFromInt(msg[0]))) {
.init => {
if (msg.len != 9) return ws.close();
var generation: u32 = undefined;
_ = std.fmt.hexToBytes(std.mem.asBytes(&generation), msg[1..]) catch
return ws.close();
const source_map_id = SourceMapStore.Key.init(@as(u64, generation) << 32);
if (s.dev.source_maps.removeOrUpgradeWeakRef(source_map_id, .upgrade)) {
s.referenced_source_maps.put(s.dev.allocator, source_map_id, {}) catch
bun.outOfMemory();
}
},
.subscribe => {
var new_bits: HmrTopic.Bits = .{};
const topics = msg[1..];
if (topics.len > HmrTopic.max_count) return;
outer: for (topics) |char| {
inline for (@typeInfo(HmrTopic).@"enum".fields) |field| {
if (char == field.value) {
@field(new_bits, field.name) = true;
continue :outer;
}
}
}
inline for (comptime std.enums.values(HmrTopic)) |field| {
if (@field(new_bits, @tagName(field)) and !@field(s.subscriptions, @tagName(field))) {
_ = ws.subscribe(&.{@intFromEnum(field)});
// on-subscribe hooks
if (bun.FeatureFlags.bake_debugging_features) switch (field) {
.incremental_visualizer => {
s.dev.emit_incremental_visualizer_events += 1;
s.dev.emitVisualizerMessageIfNeeded();
},
.memory_visualizer => {
s.dev.emit_memory_visualizer_events += 1;
s.dev.emitMemoryVisualizerMessage();
if (s.dev.emit_memory_visualizer_events == 1) {
bun.assert(s.dev.memory_visualizer_timer.state != .ACTIVE);
s.dev.vm.timer.update(
&s.dev.memory_visualizer_timer,
&bun.timespec.msFromNow(1000),
);
}
},
else => {},
};
} else if (@field(new_bits, @tagName(field)) and !@field(s.subscriptions, @tagName(field))) {
_ = ws.unsubscribe(&.{@intFromEnum(field)});
}
}
onUnsubscribe(s, bun.bits.@"and"(
HmrTopic.Bits,
bun.bits.invert(HmrTopic.Bits, new_bits),
s.subscriptions,
));
s.subscriptions = new_bits;
},
.set_url => {
const pattern = msg[1..];
const maybe_rbi = s.dev.routeToBundleIndexSlow(pattern);
if (s.dev.inspector()) |agent| {
if (s.inspector_connection_id > -1) {
var pattern_str = bun.String.init(pattern);
defer pattern_str.deref();
agent.notifyClientNavigated(
s.dev.inspector_server_id,
s.inspector_connection_id,
&pattern_str,
maybe_rbi,
);
}
}
const rbi = maybe_rbi orelse return;
if (s.active_route.unwrap()) |old| {
if (old == rbi) return;
s.dev.routeBundlePtr(old).active_viewers -= 1;
}
s.dev.routeBundlePtr(rbi).active_viewers += 1;
s.active_route = rbi.toOptional();
var response: [5]u8 = .{MessageId.set_url_response.char()} ++ std.mem.toBytes(rbi.get());
_ = ws.send(&response, .binary, false, true);
s.notifyInspectorClientNavigation(pattern, rbi.toOptional());
},
.testing_batch_events => switch (s.dev.testing_batch_events) {
.disabled => {
if (s.dev.current_bundle != null) {
s.dev.testing_batch_events = .enable_after_bundle;
} else {
s.dev.testing_batch_events = .{ .enabled = .empty };
s.dev.publish(.testing_watch_synchronization, &.{
MessageId.testing_watch_synchronization.char(),
0,
}, .binary);
}
},
.enable_after_bundle => {
// do not expose a websocket event that panics a release build
bun.debugAssert(false);
ws.close();
},
.enabled => |event_const| {
var event = event_const;
s.dev.testing_batch_events = .disabled;
if (event.entry_points.set.count() == 0) {
s.dev.publish(.testing_watch_synchronization, &.{
MessageId.testing_watch_synchronization.char(),
2,
}, .binary);
return;
}
s.dev.startAsyncBundle(
event.entry_points,
true,
std.time.Timer.start() catch @panic("timers unsupported"),
) catch bun.outOfMemory();
event.entry_points.deinit(s.dev.allocator);
},
},
.console_log => {
if (msg.len < 2) {
ws.close();
return;
}
const kind: ConsoleLogKind = switch (msg[1]) {
'l' => .log,
'e' => .err,
else => {
ws.close();
return;
},
};
const data = msg[2..];
if (s.dev.inspector()) |agent| {
var log_str = bun.String.init(data);
defer log_str.deref();
agent.notifyConsoleLog(s.dev.inspector_server_id, kind, &log_str);
}
if (s.dev.broadcast_console_log_from_browser_to_server) {
switch (kind) {
.log => {
bun.Output.pretty("<r><d>[browser]<r> {s}<r>\n", .{data});
},
.err => {
bun.Output.prettyError("<r><d>[browser]<r> {s}<r>\n", .{data});
},
}
bun.Output.flush();
}
},
.unref_source_map => {
var fbs = std.io.fixedBufferStream(msg[1..]);
const r = fbs.reader();
const source_map_id = SourceMapStore.Key.init(r.readInt(u64, .little) catch
return ws.close());
const kv = s.referenced_source_maps.fetchRemove(source_map_id) orelse {
bun.Output.debugWarn("unref_source_map: no entry found: {x}\n", .{source_map_id.get()});
return; // no entry may happen.
};
s.dev.source_maps.unref(kv.key);
},
_ => ws.close(),
}
}
fn onUnsubscribe(s: *HmrSocket, field: HmrTopic.Bits) void {
if (bun.FeatureFlags.bake_debugging_features) {
if (field.incremental_visualizer) {
s.dev.emit_incremental_visualizer_events -= 1;
}
if (field.memory_visualizer) {
s.dev.emit_memory_visualizer_events -= 1;
if (s.dev.emit_incremental_visualizer_events == 0 and s.dev.memory_visualizer_timer.state == .ACTIVE) {
s.dev.vm.timer.remove(&s.dev.memory_visualizer_timer);
}
}
}
}
pub fn onClose(s: *HmrSocket, ws: AnyWebSocket, exit_code: i32, message: []const u8) void {
_ = ws;
_ = exit_code;
_ = message;
s.onUnsubscribe(s.subscriptions);
if (s.inspector_connection_id > -1) {
// Notify inspector about client disconnection
if (s.dev.inspector()) |agent| {
agent.notifyClientDisconnected(s.dev.inspector_server_id, s.inspector_connection_id);
}
}
if (s.active_route.unwrap()) |old| {
s.dev.routeBundlePtr(old).active_viewers -= 1;
}
var it = s.referenced_source_maps.keyIterator();
while (it.next()) |key| {
s.dev.source_maps.unref(key.*);
}
s.referenced_source_maps.deinit(s.dev.allocator);
bun.debugAssert(s.dev.active_websocket_connections.remove(s));
s.dev.allocator.destroy(s);
}
fn notifyInspectorClientNavigation(s: *const HmrSocket, pattern: []const u8, rbi: RouteBundle.Index.Optional) void {
if (s.inspector_connection_id > -1) {
if (s.dev.inspector()) |agent| {
var pattern_str = bun.String.init(pattern);
defer pattern_str.deref();
agent.notifyClientNavigated(
s.dev.inspector_server_id,
s.inspector_connection_id,
&pattern_str,
rbi.unwrap(),
);
}
}
}
const std = @import("std");
const bun = @import("bun");
const Output = bun.Output;
const assert = bun.assert;
const bake = bun.bake;
const DevServer = bake.DevServer;
const ConsoleLogKind = DevServer.ConsoleLogKind;
const HmrTopic = DevServer.HmrTopic;
const IncomingMessageId = DevServer.IncomingMessageId;
const MessageId = DevServer.MessageId;
const RouteBundle = DevServer.RouteBundle;
const SourceMapStore = DevServer.SourceMapStore;
const uws = bun.uws;
const AnyWebSocket = uws.AnyWebSocket;

View File

@@ -0,0 +1,253 @@
/// This task informs the DevServer's thread about new files to be bundled.
pub const HotReloadEvent = @This();
/// Align to cache lines to eliminate false sharing.
_: u0 align(std.atomic.cache_line) = 0,
owner: *DevServer,
/// Initialized in WatcherAtomics.watcherReleaseAndSubmitEvent
concurrent_task: jsc.ConcurrentTask,
/// The watcher is not able to peek into IncrementalGraph to know what files
/// to invalidate, so the watch events are de-duplicated and passed along.
/// The keys are owned by the file watcher.
files: bun.StringArrayHashMapUnmanaged(void),
/// Directories are watched so that resolution failures can be solved.
/// The keys are owned by the file watcher.
dirs: bun.StringArrayHashMapUnmanaged(void),
/// Same purpose as `files` but keys do not have an owner.
extra_files: std.ArrayListUnmanaged(u8),
/// Initialized by the WatcherAtomics.watcherAcquireEvent
timer: std.time.Timer,
/// This event may be referenced by either DevServer or Watcher thread.
/// 1 if referenced, 0 if unreferenced; see WatcherAtomics
contention_indicator: std.atomic.Value(u32),
debug_mutex: if (Environment.isDebug) bun.Mutex else void,
pub fn initEmpty(owner: *DevServer) HotReloadEvent {
return .{
.owner = owner,
.concurrent_task = undefined,
.files = .empty,
.dirs = .empty,
.timer = undefined,
.contention_indicator = .init(0),
.debug_mutex = if (Environment.isDebug) .{} else {},
.extra_files = .empty,
};
}
pub fn reset(ev: *HotReloadEvent) void {
if (Environment.isDebug)
ev.debug_mutex.unlock();
ev.files.clearRetainingCapacity();
ev.dirs.clearRetainingCapacity();
ev.extra_files.clearRetainingCapacity();
ev.timer = undefined;
}
pub fn isEmpty(ev: *const HotReloadEvent) bool {
return (ev.files.count() + ev.dirs.count()) == 0;
}
pub fn appendFile(event: *HotReloadEvent, allocator: Allocator, file_path: []const u8) void {
_ = event.files.getOrPut(allocator, file_path) catch bun.outOfMemory();
}
pub fn appendDir(event: *HotReloadEvent, allocator: Allocator, dir_path: []const u8, maybe_sub_path: ?[]const u8) void {
if (dir_path.len == 0) return;
_ = event.dirs.getOrPut(allocator, dir_path) catch bun.outOfMemory();
const sub_path = maybe_sub_path orelse return;
if (sub_path.len == 0) return;
const platform = bun.path.Platform.auto;
const ends_with_sep = platform.isSeparator(dir_path[dir_path.len - 1]);
const starts_with_sep = platform.isSeparator(sub_path[0]);
const sep_offset: i32 = if (ends_with_sep and starts_with_sep) -1 else 1;
event.extra_files.ensureUnusedCapacity(allocator, @intCast(@as(i32, @intCast(dir_path.len + sub_path.len)) + sep_offset + 1)) catch bun.outOfMemory();
event.extra_files.appendSliceAssumeCapacity(if (ends_with_sep) dir_path[0 .. dir_path.len - 1] else dir_path);
event.extra_files.appendAssumeCapacity(platform.separator());
event.extra_files.appendSliceAssumeCapacity(sub_path);
event.extra_files.appendAssumeCapacity(0);
}
/// Invalidates items in IncrementalGraph, appending all new items to `entry_points`
pub fn processFileList(
event: *HotReloadEvent,
dev: *DevServer,
entry_points: *EntryPointList,
temp_alloc: Allocator,
) void {
dev.graph_safety_lock.lock();
defer dev.graph_safety_lock.unlock();
// First handle directories, because this may mutate `event.files`
if (dev.directory_watchers.watches.count() > 0) for (event.dirs.keys()) |changed_dir_with_slash| {
const changed_dir = bun.strings.withoutTrailingSlashWindowsPath(changed_dir_with_slash);
// Bust resolution cache, but since Bun does not watch all
// directories in a codebase, this only targets the following resolutions
_ = dev.server_transpiler.resolver.bustDirCache(changed_dir);
// if a directory watch exists for resolution failures, check those now.
if (dev.directory_watchers.watches.getIndex(changed_dir)) |watcher_index| {
const entry = &dev.directory_watchers.watches.values()[watcher_index];
var new_chain: DirectoryWatchStore.Dep.Index.Optional = .none;
var it: ?DirectoryWatchStore.Dep.Index = entry.first_dep;
while (it) |index| {
const dep = &dev.directory_watchers.dependencies.items[index.get()];
it = dep.next.unwrap();
if ((dev.server_transpiler.resolver.resolve(
bun.path.dirname(dep.source_file_path, .auto),
dep.specifier,
.stmt,
) catch null) != null) {
// this resolution result is not preserved as passing it
// into BundleV2 is too complicated. the resolution is
// cached, anyways.
event.appendFile(dev.allocator, dep.source_file_path);
dev.directory_watchers.freeDependencyIndex(dev.allocator, index) catch bun.outOfMemory();
} else {
// rebuild a new linked list for unaffected files
dep.next = new_chain;
new_chain = index.toOptional();
}
}
if (new_chain.unwrap()) |new_first_dep| {
entry.first_dep = new_first_dep;
} else {
// without any files to depend on this watcher is freed
dev.directory_watchers.freeEntry(dev.allocator, watcher_index);
}
}
};
var rest_extra = event.extra_files.items;
while (bun.strings.indexOfChar(rest_extra, 0)) |str| {
event.files.put(dev.allocator, rest_extra[0..str], {}) catch bun.outOfMemory();
rest_extra = rest_extra[str + 1 ..];
}
if (rest_extra.len > 0) {
event.files.put(dev.allocator, rest_extra, {}) catch bun.outOfMemory();
}
const changed_file_paths = event.files.keys();
inline for (.{ &dev.server_graph, &dev.client_graph }) |g| {
g.invalidate(changed_file_paths, entry_points, temp_alloc) catch bun.outOfMemory();
}
if (entry_points.set.count() == 0) {
Output.debugWarn("nothing to bundle", .{});
if (changed_file_paths.len > 0)
Output.debugWarn("modified files: {s}", .{
bun.fmt.fmtSlice(changed_file_paths, ", "),
});
if (event.dirs.count() > 0)
Output.debugWarn("modified dirs: {s}", .{
bun.fmt.fmtSlice(event.dirs.keys(), ", "),
});
dev.publish(.testing_watch_synchronization, &.{
MessageId.testing_watch_synchronization.char(),
1,
}, .binary);
return;
}
if (dev.has_tailwind_plugin_hack) |*map| {
for (map.keys()) |abs_path| {
const file = dev.client_graph.bundled_files.get(abs_path) orelse
continue;
if (file.flags.kind == .css)
entry_points.appendCss(temp_alloc, abs_path) catch bun.outOfMemory();
}
}
}
pub fn run(first: *HotReloadEvent) void {
assert(first.owner.magic == .valid);
debug.log("HMR Task start", .{});
defer debug.log("HMR Task end", .{});
const dev = first.owner;
if (Environment.isDebug) {
assert(first.debug_mutex.tryLock());
assert(first.contention_indicator.load(.seq_cst) == 0);
}
if (dev.current_bundle != null) {
dev.next_bundle.reload_event = first;
return;
}
var sfb = std.heap.stackFallback(4096, dev.allocator);
const temp_alloc = sfb.get();
var entry_points: EntryPointList = .empty;
defer entry_points.deinit(temp_alloc);
first.processFileList(dev, &entry_points, temp_alloc);
const timer = first.timer;
if (dev.watcher_atomics.recycleEventFromDevServer(first)) |second| {
if (Environment.isDebug) {
assert(second.debug_mutex.tryLock());
}
second.processFileList(dev, &entry_points, temp_alloc);
dev.watcher_atomics.recycleSecondEventFromDevServer(second);
}
if (entry_points.set.count() == 0) {
return;
}
switch (dev.testing_batch_events) {
.disabled => {},
.enabled => |*ev| {
ev.append(dev, entry_points) catch bun.outOfMemory();
dev.publish(.testing_watch_synchronization, &.{
MessageId.testing_watch_synchronization.char(),
1,
}, .binary);
return;
},
.enable_after_bundle => bun.debugAssert(false),
}
dev.startAsyncBundle(
entry_points,
true,
timer,
) catch |err| {
bun.handleErrorReturnTrace(err, @errorReturnTrace());
return;
};
}
const bun = @import("bun");
const Environment = bun.Environment;
const Mutex = bun.Mutex;
const Output = bun.Output;
const Watcher = bun.Watcher;
const assert = bun.assert;
const bake = bun.bake;
const jsc = bun.jsc;
const BundleV2 = bun.bundle_v2.BundleV2;
const DevServer = bake.DevServer;
const DirectoryWatchStore = DevServer.DirectoryWatchStore;
const EntryPointList = DevServer.EntryPointList;
const MessageId = DevServer.MessageId;
const debug = DevServer.debug;
const std = @import("std");
const ArrayListUnmanaged = std.ArrayListUnmanaged;
const Allocator = std.mem.Allocator;

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,169 @@
/// Packed source mapping data for a single file.
/// Owned by one IncrementalGraph file and/or multiple SourceMapStore entries.
pub const PackedMap = @This();
const RefCount = bun.ptr.RefCount(@This(), "ref_count", destroy, .{
.destructor_ctx = *DevServer,
});
ref_count: RefCount,
/// Allocated by `dev.allocator`. Access with `.vlq()`
/// This is stored to allow lazy construction of source map files.
vlq_ptr: [*]u8,
vlq_len: u32,
/// The bundler runs quoting on multiple threads, so it only makes
/// sense to preserve that effort for concatenation and
/// re-concatenation.
// TODO: rename to `escaped_source_*`
quoted_contents_ptr: [*]u8,
quoted_contents_len: u32,
/// Used to track the last state of the source map chunk. This
/// is used when concatenating chunks. The generated column is
/// not tracked because it is always zero (all chunks end in a
/// newline because minification is off), and the generated line
/// is recomputed on demand and is different per concatenation.
end_state: struct {
original_line: i32,
original_column: i32,
},
/// There is 32 bits of extra padding in this struct. These are used while
/// implementing `DevServer.memoryCost` to check which PackedMap entries are
/// already counted for.
bits_used_for_memory_cost_dedupe: u32 = 0,
pub fn newNonEmpty(source_map: SourceMap.Chunk, quoted_contents: []u8) bun.ptr.RefPtr(PackedMap) {
assert(source_map.buffer.list.items.len > 0);
return .new(.{
.ref_count = .init(),
.vlq_ptr = source_map.buffer.list.items.ptr,
.vlq_len = @intCast(source_map.buffer.list.items.len),
.quoted_contents_ptr = quoted_contents.ptr,
.quoted_contents_len = @intCast(quoted_contents.len),
.end_state = .{
.original_line = source_map.end_state.original_line,
.original_column = source_map.end_state.original_column,
},
});
}
fn destroy(self: *@This(), dev: *DevServer) void {
dev.allocator.free(self.vlq());
dev.allocator.free(self.quotedContents());
bun.destroy(self);
}
pub fn memoryCost(self: *const @This()) usize {
return self.vlq_len + self.quoted_contents_len + @sizeOf(@This());
}
/// When DevServer iterates everything to calculate memory usage, it passes
/// a generation number along which is different on each sweep, but
/// consistent within one. It is used to avoid counting memory twice.
pub fn memoryCostWithDedupe(self: *@This(), new_dedupe_bits: u32) usize {
if (self.bits_used_for_memory_cost_dedupe == new_dedupe_bits) {
return 0; // already counted.
}
self.bits_used_for_memory_cost_dedupe = new_dedupe_bits;
return self.memoryCost();
}
pub fn vlq(self: *const @This()) []u8 {
return self.vlq_ptr[0..self.vlq_len];
}
// TODO: rename to `escapedSource`
pub fn quotedContents(self: *const @This()) []u8 {
return self.quoted_contents_ptr[0..self.quoted_contents_len];
}
comptime {
if (!Environment.isDebug) {
assert_eql(@sizeOf(@This()), @sizeOf(usize) * 5);
assert_eql(@alignOf(@This()), @alignOf(usize));
}
}
/// HTML, CSS, Assets, and failed files do not have source maps. These cases
/// should never allocate an object. There is still relevant state for these
/// files to encode, so those fields fit within the same 64 bits the pointer
/// would have used.
///
/// The tag is stored out of line with `Untagged`
/// - `IncrementalGraph(.client).File` offloads this bit into `File.Flags`
/// - `SourceMapStore.Entry` uses `MultiArrayList`
pub const RefOrEmpty = union(enum(u1)) {
ref: bun.ptr.RefPtr(PackedMap),
empty: Empty,
pub const Empty = struct {
/// Number of lines to skip when there is an associated JS chunk.
line_count: bun.GenericIndex(u32, u8).Optional,
/// This technically is not source-map related, but
/// all HTML files have no source map, so this can
/// fit in this space.
html_bundle_route_index: RouteBundle.Index.Optional,
};
pub const blank_empty: @This() = .{ .empty = .{
.line_count = .none,
.html_bundle_route_index = .none,
} };
pub fn deref(map: *const @This(), dev: *DevServer) void {
switch (map.*) {
.ref => |ptr| ptr.derefWithContext(dev),
.empty => {},
}
}
pub fn dupeRef(map: *const @This()) @This() {
return switch (map.*) {
.ref => |ptr| .{ .ref = ptr.dupeRef() },
.empty => map.*,
};
}
pub fn untag(map: @This()) Untagged {
return switch (map) {
.ref => |ptr| .{ .ref = ptr },
.empty => |empty| .{ .empty = empty },
};
}
pub const Tag = @typeInfo(@This()).@"union".tag_type.?;
pub const Untagged = brk: {
@setRuntimeSafety(Environment.isDebug); // do not store a union tag in windows release
break :brk union {
ref: bun.ptr.RefPtr(PackedMap),
empty: Empty,
pub const blank_empty = RefOrEmpty.blank_empty.untag();
pub fn decode(untagged: @This(), tag: Tag) RefOrEmpty {
return switch (tag) {
.ref => .{ .ref = untagged.ref },
.empty => .{ .empty = untagged.empty },
};
}
comptime {
if (!Environment.isDebug) {
assert_eql(@sizeOf(@This()), @sizeOf(usize));
assert_eql(@alignOf(@This()), @alignOf(usize));
}
}
};
};
};
const bun = @import("bun");
const Environment = bun.Environment;
const SourceMap = bun.sourcemap;
const assert = bun.assert;
const assert_eql = bun.assert_eql;
const bake = bun.bake;
const Chunk = bun.bundle_v2.Chunk;
const RefPtr = bun.ptr.RefPtr;
const DevServer = bake.DevServer;
const RouteBundle = DevServer.RouteBundle;

View File

@@ -0,0 +1,167 @@
pub const RouteBundle = @This();
pub const Index = bun.GenericIndex(u30, RouteBundle);
server_state: State,
/// There are two distinct types of route bundles.
data: union(enum) {
/// FrameworkRouter provided route
framework: Framework,
/// HTMLBundle provided route
html: HTML,
},
/// Generated lazily when the client JS is requested.
/// Invalidated when a downstream client module updates.
client_bundle: ?*StaticRoute,
/// If the client tries to load a script with the wrong generation, it will
/// receive a bundle that instantly reloads the page, implying a bundle
/// change has occurred while fetching the script.
client_script_generation: u32,
/// Reference count of how many HmrSockets say they are on this route. This
/// allows hot-reloading events to reduce the amount of times it traces the
/// graph.
active_viewers: u32,
pub const Framework = struct {
route_index: Route.Index,
/// Cached to avoid re-creating the array every request.
/// TODO: Invalidated when a layout is added or removed from this route.
cached_module_list: jsc.Strong.Optional,
/// Cached to avoid re-creating the string every request.
/// TODO: Invalidated when any client file associated with the route is updated.
cached_client_bundle_url: jsc.Strong.Optional,
/// Cached to avoid re-creating the array every request.
/// Invalidated when the list of CSS files changes.
cached_css_file_array: jsc.Strong.Optional,
/// When state == .evaluation_failure, this is populated with the route
/// evaluation error mirrored in the dev server hash map
evaluate_failure: ?SerializedFailure,
};
pub const HTML = struct {
/// DevServer increments the ref count of this bundle
html_bundle: RefPtr(HTMLBundle.HTMLBundleRoute),
bundled_file: IncrementalGraph(.client).FileIndex,
/// Invalidated when the HTML file is modified, but not it's imports.
/// The style tag is injected here.
script_injection_offset: ByteOffset.Optional,
/// The HTML file bundled, from the bundler.
bundled_html_text: ?[]const u8,
/// Derived from `bundled_html_text` + `client_script_generation`
/// and css information. Invalidated when:
/// - The HTML file itself modified.
/// - The list of CSS files changes.
/// - Any downstream file is rebundled.
cached_response: ?*StaticRoute,
const ByteOffset = bun.GenericIndex(u32, u8);
};
/// A union is not used so that `bundler_failure_logs` can re-use memory, as
/// this state frequently changes between `loaded` and the failure variants.
pub const State = enum {
/// In development mode, routes are lazily built. This state implies a
/// build of this route has never been run. It is possible to bundle the
/// route entry point and still have an unqueued route if another route
/// imports this one. This state is implied if `FrameworkRouter.Route`
/// has no bundle index assigned.
unqueued,
/// A bundle associated with this route is happening
bundling,
/// This route was flagged for bundling failures. There are edge cases
/// where a route can be disconnected from its failures, so the route
/// imports has to be traced to discover if possible failures still
/// exist.
possible_bundling_failures,
/// Loading the module at runtime had a failure. The error can be
/// cleared by editing any file in the same hot-reloading boundary.
evaluation_failure,
/// Calling the request function may error, but that error will not be
/// at fault of bundling, nor would re-bundling change anything.
loaded,
};
pub const UnresolvedIndex = union(enum) {
/// FrameworkRouter provides a fullstack server-side route
framework: FrameworkRouter.Route.Index,
/// HTMLBundle provides a frontend-only route, SPA-style
html: *HTMLBundle.HTMLBundleRoute,
};
pub fn deinit(rb: *RouteBundle, allocator: Allocator) void {
if (rb.client_bundle) |blob| blob.deref();
switch (rb.data) {
.framework => |*fw| {
fw.cached_client_bundle_url.deinit();
fw.cached_css_file_array.deinit();
fw.cached_module_list.deinit();
},
.html => |*html| {
if (html.bundled_html_text) |text| {
allocator.free(text);
}
if (html.cached_response) |cached_response| {
cached_response.deref();
}
html.html_bundle.deref();
},
}
}
pub fn sourceMapId(rb: *RouteBundle) SourceMapStore.Key {
return .init(@as(u64, rb.client_script_generation) << 32);
}
pub fn invalidateClientBundle(rb: *RouteBundle, dev: *DevServer) void {
if (rb.client_bundle) |bundle| {
dev.source_maps.unref(rb.sourceMapId());
bundle.deref();
rb.client_bundle = null;
}
rb.client_script_generation = std.crypto.random.int(u32);
switch (rb.data) {
.framework => |*fw| fw.cached_client_bundle_url.clearWithoutDeallocation(),
.html => |*html| if (html.cached_response) |cached_response| {
cached_response.deref();
html.cached_response = null;
},
}
}
pub fn memoryCost(rb: *const RouteBundle) usize {
var cost: usize = @sizeOf(RouteBundle);
if (rb.client_bundle) |bundle| cost += bundle.memoryCost();
switch (rb.data) {
.framework => {
// the jsc.Strong.Optional children do not support memoryCost. likely not needed
// .evaluate_failure is not owned
},
.html => |*html| {
if (html.bundled_html_text) |text| cost += text.len;
if (html.cached_response) |cached_response| cost += cached_response.memoryCost();
},
}
return cost;
}
const std = @import("std");
const Allocator = std.mem.Allocator;
const bun = @import("bun");
const bake = bun.bake;
const jsc = bun.jsc;
const RefPtr = bun.ptr.RefPtr;
const HTMLBundle = jsc.API.HTMLBundle;
const StaticRoute = bun.api.server.StaticRoute;
const DevServer = bake.DevServer;
const IncrementalGraph = DevServer.IncrementalGraph;
const SerializedFailure = DevServer.SerializedFailure;
const SourceMapStore = DevServer.SourceMapStore;
const FrameworkRouter = bake.FrameworkRouter;
const Route = FrameworkRouter.Route;

View File

@@ -0,0 +1,239 @@
/// Errors sent to the HMR client in the browser are serialized. The same format
/// is used for thrown JavaScript exceptions as well as bundler errors.
/// Serialized failures contain a handle on what file or route they came from,
/// which allows the bundler to dismiss or update stale failures via index as
/// opposed to re-sending a new payload. This also means only changed files are
/// rebuilt, instead of all of the failed files.
///
/// The HMR client in the browser is expected to sort the final list of errors
/// for deterministic output; there is code in DevServer that uses `swapRemove`.
pub const SerializedFailure = @This();
/// Serialized data is always owned by dev.allocator
/// The first 32 bits of this slice contain the owner
data: []u8,
pub fn deinit(f: SerializedFailure, dev: *DevServer) void {
dev.allocator.free(f.data);
}
/// The metaphorical owner of an incremental file error. The packed variant
/// is given to the HMR runtime as an opaque handle.
pub const Owner = union(enum) {
none,
route: RouteBundle.Index,
client: IncrementalGraph(.client).FileIndex,
server: IncrementalGraph(.server).FileIndex,
pub fn encode(owner: Owner) Packed {
return switch (owner) {
.none => .{ .kind = .none, .data = 0 },
.client => |data| .{ .kind = .client, .data = data.get() },
.server => |data| .{ .kind = .server, .data = data.get() },
.route => |data| .{ .kind = .route, .data = data.get() },
};
}
pub const Packed = packed struct(u32) {
data: u30,
kind: enum(u2) { none, route, client, server },
pub fn decode(owner: Packed) Owner {
return switch (owner.kind) {
.none => .none,
.client => .{ .client = IncrementalGraph(.client).FileIndex.init(owner.data) },
.server => .{ .server = IncrementalGraph(.server).FileIndex.init(owner.data) },
.route => .{ .route = RouteBundle.Index.init(owner.data) },
};
}
comptime {
assert(@as(u32, @bitCast(Packed{ .kind = .none, .data = 1 })) == 1);
}
};
};
pub fn getOwner(failure: SerializedFailure) Owner {
return std.mem.bytesAsValue(Owner.Packed, failure.data[0..4]).decode();
}
/// This assumes the hash map contains only one SerializedFailure per owner.
/// This is okay since SerializedFailure can contain more than one error.
pub const ArrayHashContextViaOwner = struct {
pub fn hash(_: ArrayHashContextViaOwner, k: SerializedFailure) u32 {
return std.hash.uint32(@bitCast(k.getOwner().encode()));
}
pub fn eql(_: ArrayHashContextViaOwner, a: SerializedFailure, b: SerializedFailure, _: usize) bool {
return @as(u32, @bitCast(a.getOwner().encode())) == @as(u32, @bitCast(b.getOwner().encode()));
}
};
pub const ArrayHashAdapter = struct {
pub fn hash(_: ArrayHashAdapter, own: Owner) u32 {
return std.hash.uint32(@bitCast(own.encode()));
}
pub fn eql(_: ArrayHashAdapter, a: Owner, b: SerializedFailure, _: usize) bool {
return @as(u32, @bitCast(a.encode())) == @as(u32, @bitCast(b.getOwner().encode()));
}
};
pub const ErrorKind = enum(u8) {
// A log message. The `logger.Kind` is encoded here.
bundler_log_err = 0,
bundler_log_warn = 1,
bundler_log_note = 2,
bundler_log_debug = 3,
bundler_log_verbose = 4,
/// new Error(message)
js_error,
/// new TypeError(message)
js_error_type,
/// new RangeError(message)
js_error_range,
/// Other forms of `Error` objects, including when an error has a
/// `code`, and other fields.
js_error_extra,
/// Non-error with a stack trace
js_primitive_exception,
/// Non-error JS values
js_primitive,
/// new AggregateError(errors, message)
js_aggregate,
};
pub fn initFromJs(dev: *DevServer, owner: Owner, value: JSValue) !SerializedFailure {
{
_ = value;
@panic("TODO");
}
// Avoid small re-allocations without requesting so much from the heap
var sfb = std.heap.stackFallback(65536, dev.allocator);
var payload = std.ArrayList(u8).initCapacity(sfb.get(), 65536) catch
unreachable; // enough space
const w = payload.writer();
try w.writeInt(u32, @bitCast(owner.encode()), .little);
// try writeJsValue(value);
// Avoid-recloning if it is was moved to the hap
const data = if (payload.items.ptr == &sfb.buffer)
try dev.allocator.dupe(u8, payload.items)
else
payload.items;
return .{ .data = data };
}
pub fn initFromLog(
dev: *DevServer,
owner: Owner,
// for .client and .server, these are meant to be relative file paths
owner_display_name: []const u8,
messages: []const bun.logger.Msg,
) !SerializedFailure {
assert(messages.len > 0);
// Avoid small re-allocations without requesting so much from the heap
var sfb = std.heap.stackFallback(65536, dev.allocator);
var payload = std.ArrayList(u8).initCapacity(sfb.get(), 65536) catch
unreachable; // enough space
const w = payload.writer();
try w.writeInt(u32, @bitCast(owner.encode()), .little);
try writeString32(owner_display_name, w);
try w.writeInt(u32, @intCast(messages.len), .little);
for (messages) |*msg| {
try writeLogMsg(msg, w);
}
// Avoid-recloning if it is was moved to the hap
const data = if (payload.items.ptr == &sfb.buffer)
try dev.allocator.dupe(u8, payload.items)
else
payload.items;
return .{ .data = data };
}
// All "write" functions get a corresponding "read" function in ./client/error.ts
const Writer = std.ArrayList(u8).Writer;
fn writeLogMsg(msg: *const bun.logger.Msg, w: Writer) !void {
try w.writeByte(switch (msg.kind) {
inline else => |k| @intFromEnum(@field(ErrorKind, "bundler_log_" ++ @tagName(k))),
});
try writeLogData(msg.data, w);
const notes = msg.notes;
try w.writeInt(u32, @intCast(notes.len), .little);
for (notes) |note| {
try writeLogData(note, w);
}
}
fn writeLogData(data: bun.logger.Data, w: Writer) !void {
try writeString32(data.text, w);
if (data.location) |loc| {
if (loc.line < 0) {
try w.writeInt(u32, 0, .little);
return;
}
assert(loc.column >= 0); // zero based and not negative
try w.writeInt(i32, @intCast(loc.line), .little);
try w.writeInt(u32, @intCast(loc.column), .little);
try w.writeInt(u32, @intCast(loc.length), .little);
// TODO: syntax highlighted line text + give more context lines
try writeString32(loc.line_text orelse "", w);
// The file is not specified here. Since the transpiler runs every file
// in isolation, it would be impossible to reference any other file
// in this Log. Thus, it is not serialized.
} else {
try w.writeInt(u32, 0, .little);
}
}
fn writeString32(data: []const u8, w: Writer) !void {
try w.writeInt(u32, @intCast(data.len), .little);
try w.writeAll(data);
}
// fn writeJsValue(value: JSValue, global: *jsc.JSGlobalObject, w: *Writer) !void {
// if (value.isAggregateError(global)) {
// //
// }
// if (value.jsType() == .DOMWrapper) {
// if (value.as(bun.api.BuildMessage)) |build_error| {
// _ = build_error; // autofix
// //
// } else if (value.as(bun.api.ResolveMessage)) |resolve_error| {
// _ = resolve_error; // autofix
// @panic("TODO");
// }
// }
// _ = w; // autofix
// @panic("TODO");
// }
const std = @import("std");
const bun = @import("bun");
const assert = bun.assert;
const bake = bun.bake;
const Log = bun.logger.Log;
const DevServer = bake.DevServer;
const IncrementalGraph = DevServer.IncrementalGraph;
const RouteBundle = DevServer.RouteBundle;
const jsc = bun.jsc;
const JSValue = jsc.JSValue;

View File

@@ -0,0 +1,526 @@
/// Storage for source maps on `/_bun/client/{id}.js.map`
///
/// All source maps are referenced counted, so that when a websocket disconnects
/// or a bundle is replaced, the unreachable source map URLs are revoked. Source
/// maps that aren't reachable from IncrementalGraph can still be reached by
/// a browser tab if it has a callback to a previously loaded chunk; so DevServer
/// should be aware of it.
pub const SourceMapStore = @This();
/// See `SourceId` for what the content of u64 is.
pub const Key = bun.GenericIndex(u64, .{ "Key of", SourceMapStore });
entries: AutoArrayHashMapUnmanaged(Key, Entry),
/// When a HTML bundle is loaded, it places a "weak reference" to the
/// script's source map. This reference is held until either:
/// - The script loads and moves the ref into "strongly held" by the HmrSocket
/// - The expiry time passes
/// - Too many different weak references exist
weak_refs: bun.LinearFifo(WeakRef, .{ .Static = weak_ref_entry_max }),
/// Shared
weak_ref_sweep_timer: EventLoopTimer,
pub const empty: SourceMapStore = .{
.entries = .empty,
.weak_ref_sweep_timer = .initPaused(.DevServerSweepSourceMaps),
.weak_refs = .init(),
};
const weak_ref_expiry_seconds = 10;
const weak_ref_entry_max = 16;
/// Route bundle keys clear the bottom 32 bits of this value, using only the
/// top 32 bits to represent the map. For JS chunks, these bottom 32 bits are
/// used as an index into `dev.route_bundles` to know what route it refers to.
///
/// HMR patches set the bottom bit to `1`, and use the remaining 63 bits as
/// an ID. This is fine since the JS chunks are never served after the update
/// is emitted.
// TODO: Rewrite this `SourceMapStore.Key` and some other places that use bit
// shifts and u64 to use this struct.
pub const SourceId = packed struct(u64) {
kind: ChunkKind,
bits: packed union {
initial_response: packed struct(u63) {
unused: enum(u31) { zero = 0 } = .zero,
generation_id: u32,
},
hmr_chunk: packed struct(u63) {
content_hash: u63,
},
},
};
/// IncrementalGraph stores partial source maps for each file. A
/// `SourceMapStore.Entry` is the information + refcount holder to
/// construct the actual JSON file associated with a bundle/hot update.
pub const Entry = struct {
/// Sum of:
/// - How many active sockets have code that could reference this source map?
/// - For route bundle client scripts, +1 until invalidation.
ref_count: u32,
/// Indexes are off by one because this excludes the HMR Runtime.
/// Outer slice is owned, inner slice is shared with IncrementalGraph.
paths: []const []const u8,
/// Indexes are off by one because this excludes the HMR Runtime.
files: bun.MultiArrayList(PackedMap.RefOrEmpty),
/// The memory cost can be shared between many entries and IncrementalGraph
/// So this is only used for eviction logic, to pretend this was the only
/// entry. To compute the memory cost of DevServer, this cannot be used.
overlapping_memory_cost: u32,
pub fn sourceContents(entry: @This()) []const bun.StringPointer {
return entry.source_contents[0..entry.file_paths.len];
}
pub fn renderMappings(map: Entry, kind: ChunkKind, arena: Allocator, gpa: Allocator) ![]u8 {
var j: StringJoiner = .{ .allocator = arena };
j.pushStatic("AAAA");
try joinVLQ(&map, kind, &j, arena);
return j.done(gpa);
}
pub fn renderJSON(map: *const Entry, dev: *DevServer, arena: Allocator, kind: ChunkKind, gpa: Allocator) ![]u8 {
const map_files = map.files.slice();
const paths = map.paths;
var j: StringJoiner = .{ .allocator = arena };
j.pushStatic(
\\{"version":3,"sources":["bun://Bun/Bun HMR Runtime"
);
// This buffer is temporary, holding the quoted source paths, joined with commas.
var source_map_strings = std.ArrayList(u8).init(arena);
defer source_map_strings.deinit();
dev.relative_path_buf_lock.lock();
defer dev.relative_path_buf_lock.unlock();
const buf = bun.path_buffer_pool.get();
defer bun.path_buffer_pool.put(buf);
for (paths) |native_file_path| {
try source_map_strings.appendSlice(",");
const path = if (Environment.isWindows)
bun.path.pathToPosixBuf(u8, native_file_path, buf)
else
native_file_path;
if (std.fs.path.isAbsolute(path)) {
const is_windows_drive_path = Environment.isWindows and path[0] != '/';
try source_map_strings.appendSlice(if (is_windows_drive_path)
"\"file:///"
else
"\"file://");
if (Environment.isWindows and !is_windows_drive_path) {
// UNC namespace -> file://server/share/path.ext
bun.strings.percentEncodeWrite(
if (path.len > 2 and path[0] == '/' and path[1] == '/')
path[2..]
else
path, // invalid but must not crash
&source_map_strings,
) catch |err| switch (err) {
error.IncompleteUTF8 => @panic("Unexpected: asset with incomplete UTF-8 as file path"),
error.OutOfMemory => |e| return e,
};
} else {
// posix paths always start with '/'
// -> file:///path/to/file.js
// windows drive letter paths have the extra slash added
// -> file:///C:/path/to/file.js
bun.strings.percentEncodeWrite(path, &source_map_strings) catch |err| switch (err) {
error.IncompleteUTF8 => @panic("Unexpected: asset with incomplete UTF-8 as file path"),
error.OutOfMemory => |e| return e,
};
}
try source_map_strings.appendSlice("\"");
} else {
try source_map_strings.appendSlice("\"bun://");
bun.strings.percentEncodeWrite(path, &source_map_strings) catch |err| switch (err) {
error.IncompleteUTF8 => @panic("Unexpected: asset with incomplete UTF-8 as file path"),
error.OutOfMemory => |e| return e,
};
try source_map_strings.appendSlice("\"");
}
}
j.pushStatic(source_map_strings.items);
j.pushStatic(
\\],"sourcesContent":["// (Bun's internal HMR runtime is minified)"
);
for (map_files.items(.tags), map_files.items(.data)) |tag, chunk| {
// For empty chunks, put a blank entry. This allows HTML
// files to get their stack remapped, despite having no
// actual mappings.
if (tag == .empty) {
j.pushStatic(",\"\"");
continue;
}
j.pushStatic(",");
const quoted_slice = chunk.ref.data.quotedContents();
if (quoted_slice.len == 0) {
bun.debugAssert(false); // vlq without source contents!
j.pushStatic(",\"// Did not have source contents for this file.\n// This is a bug in Bun's bundler and should be reported with a reproduction.\"");
continue;
}
// Store the location of the source file. Since it is going
// to be stored regardless for use by the served source map.
// These 8 bytes per file allow remapping sources without
// reading from disk, as well as ensuring that remaps to
// this exact sourcemap can print the previous state of
// the code when it was modified.
bun.assert(quoted_slice[0] == '"');
bun.assert(quoted_slice[quoted_slice.len - 1] == '"');
j.pushStatic(quoted_slice);
}
// This first mapping makes the bytes from line 0 column 0 to the next mapping
j.pushStatic(
\\],"names":[],"mappings":"AAAA
);
try joinVLQ(map, kind, &j, arena);
const json_bytes = try j.doneWithEnd(gpa, "\"}");
errdefer @compileError("last try should be the final alloc");
if (bun.FeatureFlags.bake_debugging_features) if (dev.dump_dir) |dump_dir| {
const rel_path_escaped = "latest_chunk.js.map";
dumpBundle(dump_dir, .client, rel_path_escaped, json_bytes, false) catch |err| {
bun.handleErrorReturnTrace(err, @errorReturnTrace());
Output.warn("Could not dump bundle: {}", .{err});
};
};
return json_bytes;
}
fn joinVLQ(map: *const Entry, kind: ChunkKind, j: *StringJoiner, arena: Allocator) !void {
const map_files = map.files.slice();
const runtime: bake.HmrRuntime = switch (kind) {
.initial_response => bun.bake.getHmrRuntime(.client),
.hmr_chunk => comptime .init("self[Symbol.for(\"bun:hmr\")]({\n"),
};
var prev_end_state: SourceMap.SourceMapState = .{
.generated_line = 0,
.generated_column = 0,
.source_index = 0,
.original_line = 0,
.original_column = 0,
};
// +2 because the magic fairy in my dreams said it would align the source maps.
var lines_between: u32 = runtime.line_count + 2;
// Join all of the mappings together.
for (map_files.items(.tags), map_files.items(.data), 1..) |tag, chunk, source_index| switch (tag) {
.empty => {
lines_between += (chunk.empty.line_count.unwrap() orelse
// NOTE: It is too late to compute this info since the
// bundled text may have been freed already. For example, a
// HMR chunk is never persisted.
@panic("Missing internal precomputed line count.")).get();
// - Empty file has no breakpoints that could remap.
// - Codegen of HTML files cannot throw.
continue;
},
.ref => {
const content = chunk.ref.data;
const start_state: SourceMap.SourceMapState = .{
.source_index = @intCast(source_index),
.generated_line = @intCast(lines_between),
.generated_column = 0,
.original_line = 0,
.original_column = 0,
};
lines_between = 0;
try SourceMap.appendSourceMapChunk(
j,
arena,
prev_end_state,
start_state,
content.vlq(),
);
prev_end_state = .{
.source_index = @intCast(source_index),
.generated_line = 0,
.generated_column = 0,
.original_line = content.end_state.original_line,
.original_column = content.end_state.original_column,
};
},
};
}
pub fn deinit(entry: *Entry, dev: *DevServer) void {
_ = VoidFieldTypes(Entry){
.ref_count = assert(entry.ref_count == 0),
.overlapping_memory_cost = {},
.files = {
for (entry.files.items(.tags), entry.files.items(.data)) |tag, data| {
switch (tag) {
.ref => data.ref.derefWithContext(dev),
.empty => {},
}
}
entry.files.deinit(dev.allocator);
},
.paths = dev.allocator.free(entry.paths),
};
}
};
pub const WeakRef = struct {
/// This encoding only supports route bundle scripts, which do not
/// utilize the bottom 32 bits of their keys. This is because the bottom
/// 32 bits are used for the index of the route bundle. While those bits
/// are present in the JS file's key, it is not present in the source
/// map key. This allows this struct to be cleanly packed to 128 bits.
key_top_bits: u32,
/// When this ref expires, it must subtract this many from `refs`
count: u32,
/// Seconds since epoch. Every time `weak_refs` is incremented, this is
/// updated to the current time + 1 minute. When the timer expires, all
/// references are removed.
expire: i64,
pub fn key(ref: WeakRef) Key {
return .init(@as(u64, ref.key_top_bits) << 32);
}
pub fn init(k: Key, count: u32, expire: i64) WeakRef {
return .{
.key_top_bits = @intCast(k.get() >> 32),
.count = count,
.expire = expire,
};
}
};
pub fn owner(store: *SourceMapStore) *DevServer {
return @alignCast(@fieldParentPtr("source_maps", store));
}
const PutOrIncrementRefCount = union(enum) {
/// If an *Entry is returned, caller must initialize some
/// fields with the source map data.
uninitialized: *Entry,
/// Already exists, ref count was incremented.
shared: *Entry,
};
pub fn putOrIncrementRefCount(store: *SourceMapStore, script_id: Key, ref_count: u32) !PutOrIncrementRefCount {
const gop = try store.entries.getOrPut(store.owner().allocator, script_id);
if (!gop.found_existing) {
bun.debugAssert(ref_count > 0); // invalid state
gop.value_ptr.* = .{
.ref_count = ref_count,
.overlapping_memory_cost = undefined,
.paths = undefined,
.files = undefined,
};
return .{ .uninitialized = gop.value_ptr };
} else {
bun.debugAssert(ref_count >= 0); // okay since ref_count is already 1
gop.value_ptr.*.ref_count += ref_count;
return .{ .shared = gop.value_ptr };
}
}
pub fn unref(store: *SourceMapStore, key: Key) void {
unrefCount(store, key, 1);
}
pub fn unrefCount(store: *SourceMapStore, key: Key, count: u32) void {
const index = store.entries.getIndex(key) orelse
return bun.debugAssert(false);
unrefAtIndex(store, index, count);
}
fn unrefAtIndex(store: *SourceMapStore, index: usize, count: u32) void {
const e = &store.entries.values()[index];
e.ref_count -= count;
if (bun.Environment.enable_logs) {
mapLog("dec {x}, {d} | {d} -> {d}", .{ store.entries.keys()[index].get(), count, e.ref_count + count, e.ref_count });
}
if (e.ref_count == 0) {
e.deinit(store.owner());
store.entries.swapRemoveAt(index);
}
}
pub fn addWeakRef(store: *SourceMapStore, key: Key) void {
// This function expects that `weak_ref_entry_max` is low.
const entry = store.entries.getPtr(key) orelse
return bun.debugAssert(false);
entry.ref_count += 1;
var new_weak_ref_count: u32 = 1;
for (0..store.weak_refs.count) |i| {
const ref = store.weak_refs.peekItem(i);
if (ref.key() == key) {
new_weak_ref_count += ref.count;
store.weak_refs.orderedRemoveItem(i);
break;
}
} else {
// If full, one must be expired to make room.
if (store.weak_refs.count >= weak_ref_entry_max) {
const first = store.weak_refs.readItem().?;
store.unrefCount(first.key(), first.count);
if (store.weak_ref_sweep_timer.state == .ACTIVE and
store.weak_ref_sweep_timer.next.sec == first.expire)
store.owner().vm.timer.remove(&store.weak_ref_sweep_timer);
}
}
const expire = bun.timespec.msFromNow(weak_ref_expiry_seconds * 1000);
store.weak_refs.writeItem(.init(
key,
new_weak_ref_count,
expire.sec,
)) catch
unreachable; // space has been cleared above
if (store.weak_ref_sweep_timer.state != .ACTIVE) {
mapLog("arming weak ref sweep timer", .{});
store.owner().vm.timer.update(&store.weak_ref_sweep_timer, &expire);
}
mapLog("addWeakRef {x}, ref_count: {d}", .{ key.get(), entry.ref_count });
}
/// Returns true if the ref count was incremented (meaning there was a source map to transfer)
pub fn removeOrUpgradeWeakRef(store: *SourceMapStore, key: Key, mode: enum(u1) {
/// Remove the weak ref entirely
remove = 0,
/// Convert the weak ref into a strong ref
upgrade = 1,
}) bool {
const entry = store.entries.getPtr(key) orelse
return false;
for (0..store.weak_refs.count) |i| {
const ref = store.weak_refs.peekItemMut(i);
if (ref.key() == key) {
ref.count -|= 1;
if (mode == .remove) {
store.unref(key);
}
if (ref.count == 0) {
store.weak_refs.orderedRemoveItem(i);
}
break;
}
} else {
entry.ref_count += @intFromEnum(mode);
}
mapLog("maybeUpgradeWeakRef {x}, ref_count: {d}", .{
key.get(),
entry.ref_count,
});
return true;
}
pub fn locateWeakRef(store: *SourceMapStore, key: Key) ?struct { index: usize, ref: WeakRef } {
for (0..store.weak_refs.count) |i| {
const ref = store.weak_refs.peekItem(i);
if (ref.key() == key) return .{ .index = i, .ref = ref };
}
return null;
}
pub fn sweepWeakRefs(timer: *EventLoopTimer, now_ts: *const bun.timespec) EventLoopTimer.Arm {
mapLog("sweepWeakRefs", .{});
const store: *SourceMapStore = @fieldParentPtr("weak_ref_sweep_timer", timer);
assert(store.owner().magic == .valid);
const now: u64 = @max(now_ts.sec, 0);
defer store.owner().emitMemoryVisualizerMessageIfNeeded();
while (store.weak_refs.readItem()) |item| {
if (item.expire <= now) {
store.unrefCount(item.key(), item.count);
} else {
store.weak_refs.unget(&.{item}) catch
unreachable; // there is enough space since the last item was just removed.
store.weak_ref_sweep_timer.state = .FIRED;
store.owner().vm.timer.update(
&store.weak_ref_sweep_timer,
&.{ .sec = item.expire + 1, .nsec = 0 },
);
return .disarm;
}
}
store.weak_ref_sweep_timer.state = .CANCELLED;
return .disarm;
}
pub const GetResult = struct {
index: bun.GenericIndex(u32, Entry),
mappings: SourceMap.Mapping.List,
file_paths: []const []const u8,
entry_files: *const bun.MultiArrayList(PackedMap.RefOrEmpty),
pub fn deinit(self: *@This(), allocator: Allocator) void {
self.mappings.deinit(allocator);
// file paths and source contents are borrowed
}
};
/// This is used in exactly one place: remapping errors.
/// In that function, an arena allows reusing memory between different source maps
pub fn getParsedSourceMap(store: *SourceMapStore, script_id: Key, arena: Allocator, gpa: Allocator) ?GetResult {
const index = store.entries.getIndex(script_id) orelse
return null; // source map was collected.
const entry = &store.entries.values()[index];
const script_id_decoded: SourceMapStore.SourceId = @bitCast(script_id.get());
const vlq_bytes = entry.renderMappings(script_id_decoded.kind, arena, arena) catch bun.outOfMemory();
switch (SourceMap.Mapping.parse(
gpa,
vlq_bytes,
null,
@intCast(entry.paths.len),
0, // unused
.{},
)) {
.fail => |fail| {
Output.debugWarn("Failed to re-parse source map: {s}", .{fail.msg});
return null;
},
.success => |psm| {
return .{
.index = .init(@intCast(index)),
.mappings = psm.mappings,
.file_paths = entry.paths,
.entry_files = &entry.files,
};
},
}
}
const bun = @import("bun");
const Environment = bun.Environment;
const Output = bun.Output;
const SourceMap = bun.sourcemap;
const StringJoiner = bun.StringJoiner;
const assert = bun.assert;
const bake = bun.bake;
const VoidFieldTypes = bun.meta.VoidFieldTypes;
const EventLoopTimer = bun.api.Timer.EventLoopTimer;
const DevServer = bun.bake.DevServer;
const ChunkKind = DevServer.ChunkKind;
const PackedMap = DevServer.PackedMap;
const dumpBundle = DevServer.dumpBundle;
const mapLog = DevServer.mapLog;
const std = @import("std");
const AutoArrayHashMapUnmanaged = std.AutoArrayHashMapUnmanaged;
const Allocator = std.mem.Allocator;

View File

@@ -0,0 +1,177 @@
/// All code working with atomics to communicate watcher <-> DevServer is here.
/// It attempts to recycle as much memory as possible, since files are very
/// frequently updated (the whole point of HMR)
const WatcherAtomics = @This();
/// Only two hot-reload events exist ever, which is possible since only one
/// bundle may be active at once. Memory is reused by swapping between these
/// two. These items are aligned to cache lines to reduce contention, since
/// these structures are carefully passed between two threads.
events: [2]HotReloadEvent align(std.atomic.cache_line),
/// 0 - no watch
/// 1 - has fired additional watch
/// 2+ - new events available, watcher is waiting on bundler to finish
watcher_events_emitted: std.atomic.Value(u32),
/// Which event is the watcher holding on to.
/// This is not atomic because only the watcher thread uses this value.
current: u1 align(std.atomic.cache_line),
watcher_has_event: std.debug.SafetyLock,
dev_server_has_event: std.debug.SafetyLock,
pub fn init(dev: *DevServer) WatcherAtomics {
return .{
.events = .{ .initEmpty(dev), .initEmpty(dev) },
.current = 0,
.watcher_events_emitted = .init(0),
.watcher_has_event = .{},
.dev_server_has_event = .{},
};
}
/// Atomically get a *HotReloadEvent that is not used by the DevServer thread
/// Call `watcherRelease` when it is filled with files.
pub fn watcherAcquireEvent(state: *WatcherAtomics) *HotReloadEvent {
state.watcher_has_event.lock();
var ev: *HotReloadEvent = &state.events[state.current];
switch (ev.contention_indicator.swap(1, .seq_cst)) {
0 => {
// New event is unreferenced by the DevServer thread.
},
1 => {
@branchHint(.unlikely);
// DevServer stole this event. Unlikely but possible when
// the user is saving very heavily (10-30 times per second)
state.current +%= 1;
ev = &state.events[state.current];
if (Environment.allow_assert) {
bun.assert(ev.contention_indicator.swap(1, .seq_cst) == 0);
}
},
else => unreachable,
}
// Initialize the timer if it is empty.
if (ev.isEmpty())
ev.timer = std.time.Timer.start() catch unreachable;
ev.owner.bun_watcher.thread_lock.assertLocked();
if (Environment.isDebug)
assert(ev.debug_mutex.tryLock());
return ev;
}
/// Release the pointer from `watcherAcquireHotReloadEvent`, submitting
/// the event if it contains new files.
pub fn watcherReleaseAndSubmitEvent(state: *WatcherAtomics, ev: *HotReloadEvent) void {
state.watcher_has_event.unlock();
ev.owner.bun_watcher.thread_lock.assertLocked();
if (Environment.isDebug) {
for (std.mem.asBytes(&ev.timer)) |b| {
if (b != 0xAA) break;
} else @panic("timer is undefined memory in watcherReleaseAndSubmitEvent");
}
if (Environment.isDebug)
ev.debug_mutex.unlock();
if (!ev.isEmpty()) {
@branchHint(.likely);
// There are files to be processed, increment this count first.
const prev_count = state.watcher_events_emitted.fetchAdd(1, .seq_cst);
if (prev_count == 0) {
@branchHint(.likely);
// Submit a task to the DevServer, notifying it that there is
// work to do. The watcher will move to the other event.
ev.concurrent_task = .{
.auto_delete = false,
.next = null,
.task = jsc.Task.init(ev),
};
ev.contention_indicator.store(0, .seq_cst);
ev.owner.vm.event_loop.enqueueTaskConcurrent(&ev.concurrent_task);
state.current +%= 1;
} else {
// DevServer thread has already notified once. Sending
// a second task would give ownership of both events to
// them. Instead, DevServer will steal this item since
// it can observe `watcher_events_emitted >= 2`.
ev.contention_indicator.store(0, .seq_cst);
}
} else {
ev.contention_indicator.store(0, .seq_cst);
}
if (Environment.allow_assert) {
bun.assert(ev.contention_indicator.load(.monotonic) == 0); // always must be reset
}
}
/// Called by DevServer after it receives a task callback. If this returns
/// another event, that event must be recycled with `recycleSecondEventFromDevServer`
pub fn recycleEventFromDevServer(state: *WatcherAtomics, first_event: *HotReloadEvent) ?*HotReloadEvent {
first_event.reset();
// Reset the watch count to zero, while detecting if
// the other watch event was submitted.
if (state.watcher_events_emitted.swap(0, .seq_cst) >= 2) {
// Cannot use `state.current` because it will contend with the watcher.
// Since there are are two events, one pointer comparison suffices
const other_event = if (first_event == &state.events[0])
&state.events[1]
else
&state.events[0];
switch (other_event.contention_indicator.swap(1, .seq_cst)) {
0 => {
// DevServer holds the event now.
state.dev_server_has_event.lock();
return other_event;
},
1 => {
// The watcher is currently using this event.
// `watcher_events_emitted` is already zero, so it will
// always submit.
// Not 100% confident in this logic, but the only way
// to hit this is by saving extremely frequently, and
// a followup save will just trigger the reload.
return null;
},
else => unreachable,
}
}
// If a watch callback had already acquired the event, that is fine as
// it will now read 0 when deciding if to submit the task.
return null;
}
pub fn recycleSecondEventFromDevServer(state: *WatcherAtomics, second_event: *HotReloadEvent) void {
second_event.reset();
state.dev_server_has_event.unlock();
if (Environment.allow_assert) {
const result = second_event.contention_indicator.swap(0, .seq_cst);
bun.assert(result == 1);
} else {
second_event.contention_indicator.store(0, .seq_cst);
}
}
const std = @import("std");
const bun = @import("bun");
const Environment = bun.Environment;
const assert = bun.assert;
const bake = bun.bake;
const jsc = bun.jsc;
const DevServer = bake.DevServer;
const HotReloadEvent = DevServer.HotReloadEvent;
const debug = DevServer.debug;

View File

@@ -0,0 +1,223 @@
pub const MemoryCost = @This();
incremental_graph_client: usize,
incremental_graph_server: usize,
js_code: usize,
source_maps: usize,
assets: usize,
other: usize,
/// Returns an estimation for how many bytes DevServer is explicitly aware of.
/// If this number stays constant but RSS grows, then there is a memory leak. If
/// this number grows out of control, then incremental garbage collection is not
/// good enough.
///
/// Memory measurements are important as DevServer has a long lifetime, but
/// unlike the HTTP server, it controls a lot of objects that are frequently
/// being added, removed, and changed (as the developer edits source files). It
/// is exponentially easy to mess up memory management.
pub fn memoryCostDetailed(dev: *DevServer) MemoryCost {
var other_bytes: usize = @sizeOf(DevServer);
var incremental_graph_client: usize = 0;
var incremental_graph_server: usize = 0;
var js_code: usize = 0;
var source_maps: usize = 0;
var assets: usize = 0;
const dedupe_bits: u32 = @truncate(@abs(std.time.nanoTimestamp()));
const discard = voidFieldTypeDiscardHelper;
// See https://github.com/ziglang/zig/issues/21879
_ = VoidFieldTypes(DevServer){
// does not contain pointers
.allocator = {},
.assume_perfect_incremental_bundling = {},
.bun_watcher = {},
.bundles_since_last_error = {},
.configuration_hash_key = {},
.inspector_server_id = {},
.deferred_request_pool = {},
.dump_dir = {},
.emit_incremental_visualizer_events = {},
.emit_memory_visualizer_events = {},
.frontend_only = {},
.generation = {},
.graph_safety_lock = {},
.has_pre_crash_handler = {},
.magic = {},
.memory_visualizer_timer = {},
.plugin_state = {},
.relative_path_buf_lock = {},
.server_register_update_callback = {},
.server_fetch_function_callback = {},
.watcher_atomics = {},
.relative_path_buf = {},
// pointers that are not considered a part of DevServer
.vm = {},
.server = {},
.server_transpiler = {},
.client_transpiler = {},
.ssr_transpiler = {},
.log = {},
.framework = {},
.bundler_options = {},
.allocation_scope = {},
.broadcast_console_log_from_browser_to_server = {},
// to be counted.
.root = {
other_bytes += dev.root.len;
},
.router = {
other_bytes += dev.router.memoryCost();
},
.route_bundles = for (dev.route_bundles.items) |*bundle| {
other_bytes += bundle.memoryCost();
},
.server_graph = {
const cost = dev.server_graph.memoryCostDetailed(dedupe_bits);
incremental_graph_server += cost.graph;
js_code += cost.code;
source_maps += cost.source_maps;
},
.client_graph = {
const cost = dev.client_graph.memoryCostDetailed(dedupe_bits);
incremental_graph_client += cost.graph;
js_code += cost.code;
source_maps += cost.source_maps;
},
.assets = {
assets += dev.assets.memoryCost();
},
.active_websocket_connections = {
other_bytes += dev.active_websocket_connections.capacity() * @sizeOf(*HmrSocket);
},
.source_maps = {
other_bytes += memoryCostArrayHashMap(dev.source_maps.entries);
for (dev.source_maps.entries.values()) |entry| {
source_maps += entry.files.memoryCost();
for (entry.files.items(.tags), entry.files.items(.data)) |tag, data| {
switch (tag) {
.ref => source_maps += data.ref.data.memoryCostWithDedupe(dedupe_bits),
.empty => {},
}
}
}
},
.incremental_result = discard(VoidFieldTypes(IncrementalResult){
.had_adjusted_edges = {},
.client_components_added = {
other_bytes += memoryCostArrayList(dev.incremental_result.client_components_added);
},
.framework_routes_affected = {
other_bytes += memoryCostArrayList(dev.incremental_result.framework_routes_affected);
},
.client_components_removed = {
other_bytes += memoryCostArrayList(dev.incremental_result.client_components_removed);
},
.failures_removed = {
other_bytes += memoryCostArrayList(dev.incremental_result.failures_removed);
},
.client_components_affected = {
other_bytes += memoryCostArrayList(dev.incremental_result.client_components_affected);
},
.failures_added = {
other_bytes += memoryCostArrayList(dev.incremental_result.failures_added);
},
.html_routes_soft_affected = {
other_bytes += memoryCostArrayList(dev.incremental_result.html_routes_soft_affected);
},
.html_routes_hard_affected = {
other_bytes += memoryCostArrayList(dev.incremental_result.html_routes_hard_affected);
},
}),
.has_tailwind_plugin_hack = if (dev.has_tailwind_plugin_hack) |hack| {
other_bytes += memoryCostArrayHashMap(hack);
},
.directory_watchers = {
other_bytes += memoryCostArrayList(dev.directory_watchers.dependencies);
other_bytes += memoryCostArrayList(dev.directory_watchers.dependencies_free_list);
other_bytes += memoryCostArrayHashMap(dev.directory_watchers.watches);
for (dev.directory_watchers.dependencies.items) |dep| {
other_bytes += dep.specifier.len;
}
for (dev.directory_watchers.watches.keys()) |dir_name| {
other_bytes += dir_name.len;
}
},
.html_router = {
// std does not provide a way to measure exact allocation size of HashMapUnmanaged
other_bytes += dev.html_router.map.capacity() * (@sizeOf(*HTMLBundle.HTMLBundleRoute) + @sizeOf([]const u8));
// DevServer does not count the referenced HTMLBundle.HTMLBundleRoutes
},
.bundling_failures = {
other_bytes += memoryCostSlice(dev.bundling_failures.keys());
for (dev.bundling_failures.keys()) |failure| {
other_bytes += failure.data.len;
}
},
// All entries are owned by the bundler arena, not DevServer, except for `requests`
.current_bundle = if (dev.current_bundle) |bundle| {
var r = bundle.requests.first;
while (r) |request| : (r = request.next) {
other_bytes += @sizeOf(DeferredRequest.Node);
}
},
.next_bundle = {
var r = dev.next_bundle.requests.first;
while (r) |request| : (r = request.next) {
other_bytes += @sizeOf(DeferredRequest.Node);
}
other_bytes += memoryCostArrayHashMap(dev.next_bundle.route_queue);
},
.route_lookup = {
other_bytes += memoryCostArrayHashMap(dev.route_lookup);
},
.testing_batch_events = switch (dev.testing_batch_events) {
.disabled => {},
.enabled => |batch| {
other_bytes += memoryCostArrayHashMap(batch.entry_points.set);
},
.enable_after_bundle => {},
},
};
return .{
.assets = assets,
.incremental_graph_client = incremental_graph_client,
.incremental_graph_server = incremental_graph_server,
.js_code = js_code,
.other = other_bytes,
.source_maps = source_maps,
};
}
pub fn memoryCost(dev: *DevServer) usize {
const cost = memoryCostDetailed(dev);
var acc: usize = 0;
inline for (@typeInfo(MemoryCost).@"struct".fields) |field| {
acc += @field(cost, field.name);
}
return acc;
}
pub fn memoryCostArrayList(slice: anytype) usize {
return slice.capacity * @sizeOf(@typeInfo(@TypeOf(slice.items)).pointer.child);
}
pub fn memoryCostSlice(slice: anytype) usize {
return slice.len * @sizeOf(@typeInfo(@TypeOf(slice)).pointer.child);
}
pub fn memoryCostArrayHashMap(map: anytype) usize {
return @TypeOf(map.entries).capacityInBytes(map.entries.capacity);
}
const std = @import("std");
const bun = @import("bun");
const jsc = bun.jsc;
const HTMLBundle = jsc.API.HTMLBundle;
const DevServer = bun.bake.DevServer;
const DeferredRequest = DevServer.DeferredRequest;
const HmrSocket = DevServer.HmrSocket;
const IncrementalResult = DevServer.IncrementalResult;
const VoidFieldTypes = bun.meta.VoidFieldTypes;
const voidFieldTypeDiscardHelper = bun.meta.voidFieldTypeDiscardHelper;

View File

@@ -336,5 +336,25 @@ pub fn SliceChild(comptime T: type) type {
return T;
}
/// userland implementation of https://github.com/ziglang/zig/issues/21879
pub fn VoidFieldTypes(comptime T: type) type {
const fields = @typeInfo(T).@"struct".fields;
var new_fields = fields[0..fields.len].*;
for (&new_fields) |*field| {
field.type = void;
field.default_value_ptr = null;
}
return @Type(.{ .@"struct" = .{
.layout = .auto,
.fields = &new_fields,
.decls = &.{},
.is_tuple = false,
} });
}
pub fn voidFieldTypeDiscardHelper(data: anytype) void {
_ = data;
}
const bun = @import("bun");
const std = @import("std");