fix a crash in remapping stacks (#12477)

Co-authored-by: Jarred Sumner <jarred@jarredsumner.com>
This commit is contained in:
dave caruso
2024-07-10 16:39:05 -07:00
committed by GitHub
parent 55d59ebf1f
commit 02b589b2ce
9 changed files with 229 additions and 81 deletions

4
.vscode/launch.json generated vendored
View File

@@ -145,8 +145,8 @@
"request": "launch",
"name": "bun run [file]",
"program": "${workspaceFolder}/build/bun-debug",
"args": ["run", "${fileBasename}"],
"cwd": "${fileDirname}",
"args": ["run", "/Users/dave/Downloads/pd-api-testnet/dist/app.js"],
"cwd": "/Users/dave/Downloads/pd-api-testnet",
"env": {
"FORCE_COLOR": "0",
"BUN_DEBUG_QUIET_LOGS": "1",

View File

@@ -390,15 +390,15 @@ WTF::String Bun::formatStackTrace(
// If it's not a Zig::GlobalObject, don't bother source-mapping it.
if (globalObject && !sourceURLForFrame.isEmpty()) {
// https://github.com/oven-sh/bun/issues/3595
if (!sourceURLForFrame.isEmpty()) {
remappedFrame.source_url = Bun::toString(sourceURLForFrame);
} else {
// https://github.com/oven-sh/bun/issues/3595
remappedFrame.source_url = BunStringEmpty;
}
remappedFrame.source_url = Bun::toStringRef(sourceURLForFrame);
// This ensures the lifetime of the sourceURL is accounted for correctly
Bun__remapStackFramePositions(globalObject, &remappedFrame, 1);
// This ensures the lifetime of the sourceURL is accounted for correctly
Bun__remapStackFramePositions(globalObject, &remappedFrame, 1);
sourceURLForFrame = remappedFrame.source_url.toWTFString();
}
}
// there is always a newline before each stack frame line, ensuring that the name + message
@@ -493,15 +493,15 @@ WTF::String Bun::formatStackTrace(
// If it's not a Zig::GlobalObject, don't bother source-mapping it.
if (globalObject) {
// https://github.com/oven-sh/bun/issues/3595
if (!sourceURLForFrame.isEmpty()) {
remappedFrame.source_url = Bun::toString(sourceURLForFrame);
} else {
// https://github.com/oven-sh/bun/issues/3595
remappedFrame.source_url = BunStringEmpty;
}
remappedFrame.source_url = Bun::toStringRef(sourceURLForFrame);
// This ensures the lifetime of the sourceURL is accounted for correctly
Bun__remapStackFramePositions(globalObject, &remappedFrame, 1);
// This ensures the lifetime of the sourceURL is accounted for correctly
Bun__remapStackFramePositions(globalObject, &remappedFrame, 1);
sourceURLForFrame = remappedFrame.source_url.toWTFString();
}
}
if (!hasSet) {
@@ -587,7 +587,7 @@ static String computeErrorInfoWithPrepareStackTrace(JSC::VM& vm, Zig::GlobalObje
ZigStackFrame remappedFrames[framesCount];
for (int i = 0; i < framesCount; i++) {
remappedFrames[i] = {};
remappedFrames[i].source_url = Bun::toString(lexicalGlobalObject, stackTrace.at(i).sourceURL());
remappedFrames[i].source_url = Bun::toStringRef(lexicalGlobalObject, stackTrace.at(i).sourceURL());
if (JSCStackFrame::SourcePositions* sourcePositions = stackTrace.at(i).getSourcePositions()) {
remappedFrames[i].position.line_zero_based = sourcePositions->line.zeroBasedInt();
remappedFrames[i].position.column_zero_based = sourcePositions->column.zeroBasedInt();
@@ -2493,7 +2493,7 @@ JSC_DEFINE_HOST_FUNCTION(errorConstructorFuncCaptureStackTrace, (JSC::JSGlobalOb
for (int i = 0; i < framesCount; i++) {
memset(remappedFrames + i, 0, sizeof(ZigStackFrame));
remappedFrames[i].source_url = Bun::toString(lexicalGlobalObject, stackTrace.at(i).sourceURL());
remappedFrames[i].source_url = Bun::toStringRef(lexicalGlobalObject, stackTrace.at(i).sourceURL());
if (JSCStackFrame::SourcePositions* sourcePositions = stackTrace.at(i).getSourcePositions()) {
remappedFrames[i].position.line_zero_based = sourcePositions->line.zeroBasedInt();
remappedFrames[i].position.column_zero_based = sourcePositions->column.zeroBasedInt();

View File

@@ -127,6 +127,16 @@ pub const SavedSourceMap = struct {
pub const vlq_offset = 24;
pub inline fn lock(map: *SavedSourceMap) void {
map.mutex.lock();
map.map.unlockPointers();
}
pub inline fn unlock(map: *SavedSourceMap) void {
map.map.lockPointers();
map.mutex.unlock();
}
// For the runtime, we store the number of mappings and how many bytes the final list is at the beginning of the array
// The first 8 bytes are the length of the array
// The second 8 bytes are the number of mappings
@@ -209,8 +219,8 @@ pub const SavedSourceMap = struct {
}
pub fn removeZigSourceProvider(this: *SavedSourceMap, opaque_source_provider: *anyopaque, path: []const u8) void {
this.mutex.lock();
defer this.mutex.unlock();
this.lock();
defer this.unlock();
const entry = this.map.getEntry(bun.hash(path)) orelse return;
const old_value = Value.from(entry.value_ptr.*);
@@ -222,8 +232,8 @@ pub const SavedSourceMap = struct {
} else if (old_value.get(ParsedSourceMap)) |map| {
if (map.underlying_provider.provider()) |prov| {
if (@intFromPtr(prov) == @intFromPtr(opaque_source_provider)) {
map.deinit(default_allocator);
this.map.removeByPtr(entry.key_ptr);
map.deref();
}
}
}
@@ -239,15 +249,14 @@ pub const SavedSourceMap = struct {
pub fn deinit(this: *SavedSourceMap) void {
{
this.mutex.lock();
defer this.mutex.unlock();
this.lock();
defer this.unlock();
var iter = this.map.valueIterator();
while (iter.next()) |val| {
var value = Value.from(val.*);
if (value.get(ParsedSourceMap)) |source_map_| {
var source_map: *ParsedSourceMap = source_map_;
source_map.deinit(default_allocator);
if (value.get(ParsedSourceMap)) |source_map| {
source_map.deref();
} else if (value.get(SavedMappings)) |saved_mappings| {
var saved = SavedMappings{ .data = @as([*]u8, @ptrCast(saved_mappings)) };
saved.deinit();
@@ -257,6 +266,7 @@ pub const SavedSourceMap = struct {
}
}
this.map.unlockPointers();
this.map.deinit();
}
@@ -265,14 +275,15 @@ pub const SavedSourceMap = struct {
}
fn putValue(this: *SavedSourceMap, path: []const u8, value: Value) !void {
this.mutex.lock();
defer this.mutex.unlock();
this.lock();
defer this.unlock();
const entry = try this.map.getOrPut(bun.hash(path));
if (entry.found_existing) {
var old_value = Value.from(entry.value_ptr.*);
if (old_value.get(ParsedSourceMap)) |parsed_source_map| {
var source_map: *ParsedSourceMap = parsed_source_map;
source_map.deinit(default_allocator);
source_map.deref();
} else if (old_value.get(SavedMappings)) |saved_mappings| {
var saved = SavedMappings{ .data = @as([*]u8, @ptrCast(saved_mappings)) };
saved.deinit();
@@ -283,37 +294,59 @@ pub const SavedSourceMap = struct {
entry.value_ptr.* = value.ptr();
}
pub fn getWithContent(
fn getWithContent(
this: *SavedSourceMap,
path: string,
hint: SourceMap.ParseUrlResultHint,
) SourceMap.ParseUrl {
const hash = bun.hash(path);
const mapping = this.map.getEntry(hash) orelse return .{};
// This lock is for the hash table
this.lock();
// This mapping entry is only valid while the mutex is locked
const mapping = this.map.getEntry(hash) orelse {
this.unlock();
return .{};
};
switch (Value.from(mapping.value_ptr.*).tag()) {
Value.Tag.ParsedSourceMap => {
return .{ .map = Value.from(mapping.value_ptr.*).as(ParsedSourceMap) };
defer this.unlock();
const map = Value.from(mapping.value_ptr.*).as(ParsedSourceMap);
map.ref();
return .{ .map = map };
},
Value.Tag.SavedMappings => {
defer this.unlock();
var saved = SavedMappings{ .data = @as([*]u8, @ptrCast(Value.from(mapping.value_ptr.*).as(ParsedSourceMap))) };
defer saved.deinit();
const result = default_allocator.create(ParsedSourceMap) catch unreachable;
result.* = saved.toMapping(default_allocator, path) catch {
const result = ParsedSourceMap.new(saved.toMapping(default_allocator, path) catch {
_ = this.map.remove(mapping.key_ptr.*);
return .{};
};
});
mapping.value_ptr.* = Value.init(result).ptr();
result.ref();
return .{ .map = result };
},
Value.Tag.SourceProviderMap => {
var ptr = Value.from(mapping.value_ptr.*).as(SourceProviderMap);
this.unlock();
if (ptr.getSourceMap(path, .none, hint)) |parse|
// Do not lock the mutex while we're parsing JSON!
if (ptr.getSourceMap(path, .none, hint)) |parse| {
if (parse.map) |map| {
mapping.value_ptr.* = Value.init(map).ptr();
return parse;
};
map.ref();
// The mutex is not locked. We have to check the hash table again.
this.putValue(path, Value.init(map)) catch bun.outOfMemory();
return parse;
}
}
this.lock();
defer this.unlock();
// does not have a valid source map. let's not try again
_ = this.map.remove(hash);
@@ -343,14 +376,12 @@ pub const SavedSourceMap = struct {
column: i32,
source_handling: SourceMap.SourceContentHandling,
) ?SourceMap.Mapping.Lookup {
this.mutex.lock();
defer this.mutex.unlock();
const parse = this.getWithContent(path, switch (source_handling) {
.no_source_contents => .mappings_only,
.source_contents => .{ .all = .{ .line = line, .column = column } },
});
const map = parse.map orelse return null;
const mapping = parse.mapping orelse
SourceMap.Mapping.find(map.mappings, line, column) orelse
return null;
@@ -655,11 +686,12 @@ pub const VirtualMachine = struct {
default_tls_reject_unauthorized: ?bool = null,
default_verbose_fetch: ?bun.http.HTTPVerboseLevel = null,
/// Do not access this field directly
/// It exists in the VirtualMachine struct so that
/// we don't accidentally make a stack copy of it
/// only use it through
/// source_mappings
/// Do not access this field directly!
///
/// It exists in the VirtualMachine struct so that we don't accidentally
/// make a stack copy of it only use it through source_mappings.
///
/// This proposal could let us safely move it back https://github.com/ziglang/zig/issues/7769
saved_source_map_table: SavedSourceMap.HashTable = undefined,
source_mappings: SavedSourceMap = undefined,
@@ -1447,6 +1479,7 @@ pub const VirtualMachine = struct {
.debug_thread_id = if (Environment.allow_assert) std.Thread.getCurrentId() else {},
};
vm.source_mappings = .{ .map = &vm.saved_source_map_table };
vm.source_mappings.map.lockPointers();
vm.regular_event_loop.tasks = EventLoop.Queue.init(
default_allocator,
);
@@ -1561,6 +1594,7 @@ pub const VirtualMachine = struct {
.debug_thread_id = if (Environment.allow_assert) std.Thread.getCurrentId() else {},
};
vm.source_mappings = .{ .map = &vm.saved_source_map_table };
vm.source_mappings.map.lockPointers();
vm.regular_event_loop.tasks = EventLoop.Queue.init(
default_allocator,
);
@@ -2944,6 +2978,8 @@ pub const VirtualMachine = struct {
@max(frame.position.column.zeroBased(), 0),
.no_source_contents,
)) |lookup| {
const source_map = lookup.source_map;
defer if (source_map) |map| map.deref();
if (lookup.displaySourceURLIfNeeded(sourceURL.slice())) |source_url| {
frame.source_url.deref();
frame.source_url = source_url;
@@ -3055,9 +3091,8 @@ pub const VirtualMachine = struct {
},
.source_index = 0,
},
// undefined is fine, because these two values are never read if `top.remapped == true`
.source_map = undefined,
.prefetched_source_code = undefined,
.source_map = null,
.prefetched_source_code = null,
}
else
this.source_mappings.resolveMapping(
@@ -3069,6 +3104,8 @@ pub const VirtualMachine = struct {
if (maybe_lookup) |lookup| {
const mapping = lookup.mapping;
const source_map = lookup.source_map;
defer if (source_map) |map| map.deref();
if (!top.remapped) {
if (lookup.displaySourceURLIfNeeded(top_source_url.slice())) |src| {
@@ -3078,7 +3115,7 @@ pub const VirtualMachine = struct {
}
const code = code: {
if (!top.remapped and lookup.source_map.isExternal()) {
if (!top.remapped and lookup.source_map != null and lookup.source_map.?.isExternal()) {
if (lookup.getSourceCode(top_source_url.slice())) |src| {
break :code src;
}
@@ -3135,6 +3172,7 @@ pub const VirtualMachine = struct {
@max(frame.position.column.zeroBased(), 0),
.no_source_contents,
)) |lookup| {
defer if (lookup.source_map) |map| map.deref();
if (lookup.displaySourceURLIfNeeded(source_url.slice())) |src| {
frame.source_url.deref();
frame.source_url = src;
@@ -3718,6 +3756,7 @@ pub fn NewHotReloader(comptime Ctx: type, comptime EventLoopType: type, comptime
onAccept: std.ArrayHashMapUnmanaged(GenericWatcher.HashType, bun.BabyList(OnAcceptCallback), bun.ArrayIdentityContext, false) = .{},
ctx: *Ctx,
verbose: bool = false,
pending_count: std.atomic.Value(u32) = std.atomic.Value(u32).init(0),
tombstones: bun.StringHashMapUnmanaged(*bun.fs.FileSystem.RealFS.EntriesOption) = .{},
@@ -3755,7 +3794,18 @@ pub fn NewHotReloader(comptime Ctx: type, comptime EventLoopType: type, comptime
}
pub fn run(this: *HotReloadTask) void {
this.reloader.ctx.reload();
// Since we rely on the event loop for hot reloads, there can be
// a delay before the next reload begins. In the time between the
// last reload and the next one, we shouldn't schedule any more
// hot reloads. Since we reload literally everything, we don't
// need to worry about missing any changes.
//
// Note that we set the count _before_ we reload, so that if we
// get another hot reload request while we're reloading, we'll
// still enqueue it.
while (this.reloader.pending_count.swap(0, .monotonic) > 0) {
this.reloader.ctx.reload();
}
}
pub fn enqueue(this: *HotReloadTask) void {
@@ -3772,6 +3822,8 @@ pub fn NewHotReloader(comptime Ctx: type, comptime EventLoopType: type, comptime
unreachable;
}
_ = this.reloader.pending_count.fetchAdd(1, .monotonic);
BunDebugger__willHotReload();
var that = bun.default_allocator.create(HotReloadTask) catch unreachable;

View File

@@ -208,6 +208,7 @@ fn dumpSourceStringFailiable(vm: *VirtualMachine, specifier: string, written: []
return;
};
if (vm.source_mappings.get(specifier)) |mappings| {
defer mappings.deref();
const map_path = std.mem.concat(bun.default_allocator, u8, &.{ std.fs.path.basename(specifier), ".map" }) catch bun.outOfMemory();
defer bun.default_allocator.free(map_path);
const file = try parent.createFile(map_path, .{});

View File

@@ -3036,7 +3036,7 @@ pub fn NewRefCounted(comptime T: type, comptime deinit_fn: ?fn (self: *T) void)
pub inline fn new(t: T) *T {
const ptr = bun.new(T, t);
if (Environment.allow_assert) {
if (Environment.enable_logs) {
if (ptr.ref_count != 1) {
Output.panic("Expected ref_count to be 1, got {d}", .{ptr.ref_count});
}
@@ -3047,6 +3047,65 @@ pub fn NewRefCounted(comptime T: type, comptime deinit_fn: ?fn (self: *T) void)
};
}
pub fn NewThreadSafeRefCounted(comptime T: type, comptime deinit_fn: ?fn (self: *T) void) type {
if (!@hasField(T, "ref_count")) {
@compileError("Expected a field named \"ref_count\" with a default value of 1 on " ++ @typeName(T));
}
for (std.meta.fields(T)) |field| {
if (strings.eqlComptime(field.name, "ref_count")) {
if (field.default_value == null) {
@compileError("Expected a field named \"ref_count\" with a default value of 1 on " ++ @typeName(T));
}
}
}
const output_name: []const u8 = if (@hasDecl(T, "DEBUG_REFCOUNT_NAME")) T.DEBUG_REFCOUNT_NAME else meta.typeBaseName(@typeName(T));
const log = Output.scoped(output_name, true);
return struct {
pub fn destroy(self: *T) void {
if (Environment.allow_assert) {
assert(self.ref_count.load(.seq_cst) == 0);
}
bun.destroy(self);
}
pub fn ref(self: *T) void {
const ref_count = self.ref_count.fetchAdd(1, .seq_cst);
if (Environment.isDebug) log("0x{x} ref {d} + 1 = {d}", .{ @intFromPtr(self), ref_count, ref_count - 1 });
bun.debugAssert(ref_count > 0);
}
pub fn deref(self: *T) void {
const ref_count = self.ref_count.fetchSub(1, .seq_cst);
if (Environment.isDebug) log("0x{x} deref {d} - 1 = {d}", .{ @intFromPtr(self), ref_count, ref_count -| 1 });
if (ref_count == 1) {
if (comptime deinit_fn) |deinit| {
deinit(self);
} else {
self.destroy();
}
}
}
pub inline fn new(t: T) *T {
const ptr = bun.new(T, t);
if (Environment.enable_logs) {
if (ptr.ref_count.load(.seq_cst) != 1) {
Output.panic("Expected ref_count to be 1, got {d}", .{ptr.ref_count.load(.seq_cst)});
}
}
return ptr;
}
};
}
pub fn exitThread() noreturn {
const exiter = struct {
pub extern "C" fn pthread_exit(?*anyopaque) noreturn;

View File

@@ -117,11 +117,27 @@ pub const Lock = struct {
this.mutex.release();
}
pub inline fn assertUnlocked(this: *Lock, comptime message: []const u8) void {
pub inline fn releaseAssertUnlocked(this: *Lock, comptime message: []const u8) void {
if (this.mutex.state.load(.monotonic) != 0) {
@panic(message);
}
}
pub inline fn assertUnlocked(this: *Lock) void {
if (std.debug.runtime_safety) {
if (this.mutex.state.load(.monotonic) != 0) {
@panic("Mutex is expected to be unlocked");
}
}
}
pub inline fn assertLocked(this: *Lock) void {
if (std.debug.runtime_safety) {
if (this.mutex.state.load(.monotonic) == 0) {
@panic("Mutex is expected to be locked");
}
}
}
};
pub fn spinCycle() void {}

View File

@@ -17,6 +17,7 @@ const URL = bun.URL;
const FileSystem = bun.fs.FileSystem;
const SourceMap = @This();
const debug = bun.Output.scoped(.SourceMap, false);
/// Coordinates in source maps are stored using relative offsets for size
/// reasons. When joining together chunks of a source map that were emitted
@@ -76,7 +77,9 @@ pub fn parseUrl(
) !ParseUrl {
const json_bytes = json_bytes: {
const data_prefix = "data:application/json";
if (bun.strings.hasPrefixComptime(source, data_prefix) and source.len > (data_prefix.len + 1)) try_data_url: {
debug("parse (data url, {d} bytes)", .{source.len});
switch (source[data_prefix.len]) {
';' => {
const encoding = bun.sliceTo(source[data_prefix.len + 1 ..], ',');
@@ -118,16 +121,20 @@ pub fn parseJSON(
var log = bun.logger.Log.init(arena);
defer log.deinit();
var json = bun.JSON.ParseJSON(&json_src, &log, arena) catch {
return error.InvalidJSON;
};
// the allocator given to the JS parser is not respected for all parts
// of the parse, so we need to remember to reset the ast store
bun.JSAst.Expr.Data.Store.reset();
bun.JSAst.Stmt.Data.Store.reset();
defer {
// the allocator given to the JS parser is not respected for all parts
// of the parse, so we need to remember to reset the ast store
bun.JSAst.Expr.Data.Store.reset();
bun.JSAst.Stmt.Data.Store.reset();
}
debug("parse (JSON, {d} bytes)", .{source.len});
var json = bun.JSON.ParseJSON(&json_src, &log, arena) catch {
return error.InvalidJSON;
};
if (json.get("version")) |version| {
if (version.data != .e_number or version.data.e_number.value != 3.0) {
@@ -192,12 +199,11 @@ pub fn parseJSON(
.fail => |fail| return fail.err,
};
const ptr = bun.default_allocator.create(Mapping.ParsedSourceMap) catch bun.outOfMemory();
ptr.* = map_data;
const ptr = Mapping.ParsedSourceMap.new(map_data);
ptr.external_source_names = source_paths_slice.?;
break :map ptr;
} else null;
errdefer if (map) |m| m.deinit(bun.default_allocator);
errdefer if (map) |m| m.deref();
const mapping, const source_index = switch (hint) {
.source_only => |index| .{ null, index },
@@ -244,7 +250,7 @@ pub const Mapping = struct {
pub const Lookup = struct {
mapping: Mapping,
source_map: *ParsedSourceMap,
source_map: ?*ParsedSourceMap = null,
/// Owned by default_allocator always
/// use `getSourceCode` to access this as a Slice
prefetched_source_code: ?[]const u8,
@@ -252,17 +258,18 @@ pub const Mapping = struct {
/// This creates a bun.String if the source remap *changes* the source url,
/// a case that happens only when the source map points to another file.
pub fn displaySourceURLIfNeeded(lookup: Lookup, base_filename: []const u8) ?bun.String {
const source_map = lookup.source_map orelse return null;
// See doc comment on `external_source_names`
if (lookup.source_map.external_source_names.len == 0)
if (source_map.external_source_names.len == 0)
return null;
if (lookup.mapping.source_index >= lookup.source_map.external_source_names.len)
if (lookup.mapping.source_index >= source_map.external_source_names.len)
return null;
const name = lookup.source_map.external_source_names[@intCast(lookup.mapping.source_index)];
const name = source_map.external_source_names[@intCast(lookup.mapping.source_index)];
if (std.fs.path.isAbsolute(base_filename)) {
const dir = bun.path.dirname(base_filename, .auto);
return bun.String.init(bun.path.joinAbs(dir, .auto, name));
return bun.String.createUTF8(bun.path.joinAbs(dir, .auto, name));
}
return bun.String.init(name);
@@ -272,28 +279,30 @@ pub const Mapping = struct {
/// This has the possibility of invoking a call to the filesystem.
pub fn getSourceCode(lookup: Lookup, base_filename: []const u8) ?bun.JSC.ZigString.Slice {
const bytes = bytes: {
assert(lookup.source_map.isExternal());
if (lookup.prefetched_source_code) |code| {
break :bytes code;
}
const provider = lookup.source_map.underlying_provider.provider() orelse
const source_map = lookup.source_map orelse return null;
assert(source_map.isExternal());
const provider = source_map.underlying_provider.provider() orelse
return null;
const index = lookup.mapping.source_index;
if (provider.getSourceMap(
base_filename,
lookup.source_map.underlying_provider.load_hint,
source_map.underlying_provider.load_hint,
.{ .source_only = @intCast(index) },
)) |parsed|
if (parsed.source_contents) |contents|
break :bytes contents;
if (index >= lookup.source_map.external_source_names.len)
if (index >= source_map.external_source_names.len)
return null;
const name = lookup.source_map.external_source_names[@intCast(index)];
const name = source_map.external_source_names[@intCast(index)];
var buf: bun.PathBuffer = undefined;
const normalized = bun.path.joinAbsStringBufZ(
@@ -379,6 +388,8 @@ pub const Mapping = struct {
sources_count: i32,
input_line_count: usize,
) ParseResult {
debug("parse mappings ({d} bytes)", .{bytes.len});
var mapping = Mapping.List{};
if (estimated_mapping_count) |count| {
mapping.ensureTotalCapacity(allocator, count) catch unreachable;
@@ -597,7 +608,7 @@ pub const Mapping = struct {
/// maps `source_index` to the correct filename.
external_source_names: []const []const u8 = &.{},
/// In order to load source contents from a source-map after the fact,
/// a handle to the underying source provider is stored. Within this pointer,
// / a handle to the underlying source provider is stored. Within this pointer,
/// a flag is stored if it is known to be an inline or external source map.
///
/// Source contents are large, we don't preserve them in memory. This has
@@ -606,6 +617,10 @@ pub const Mapping = struct {
/// rely on source contents)
underlying_provider: SourceContentPtr = .{ .data = 0 },
ref_count: std.atomic.Value(u32) = std.atomic.Value(u32).init(1),
pub usingnamespace bun.NewThreadSafeRefCounted(ParsedSourceMap, deinitFn);
const SourceContentPtr = packed struct(u64) {
load_hint: SourceMapLoadHint = .none,
data: u62,
@@ -623,7 +638,11 @@ pub const Mapping = struct {
return psm.external_source_names.len != 0;
}
pub fn deinit(this: *ParsedSourceMap, allocator: std.mem.Allocator) void {
fn deinitFn(this: *ParsedSourceMap) void {
this.deinitWithAllocator(bun.default_allocator);
}
fn deinitWithAllocator(this: *ParsedSourceMap, allocator: std.mem.Allocator) void {
this.mappings.deinit(allocator);
if (this.external_source_names.len > 0) {
@@ -632,7 +651,7 @@ pub const Mapping = struct {
allocator.free(this.external_source_names);
}
allocator.destroy(this);
this.destroy();
}
pub fn writeVLQs(map: ParsedSourceMap, writer: anytype) !void {
@@ -726,6 +745,7 @@ pub const SourceProviderMap = opaque {
var sfb = std.heap.stackFallback(65536, bun.default_allocator);
var arena = bun.ArenaAllocator.init(sfb.get());
defer arena.deinit();
const allocator = arena.allocator();
const new_load_hint: SourceMapLoadHint, const parsed = parsed: {
var inline_err: ?anyerror = null;
@@ -737,9 +757,9 @@ pub const SourceProviderMap = opaque {
bun.assert(source.tag == .ZigString);
const found_url = (if (source.is8Bit())
findSourceMappingURL(u8, source.latin1(), arena.allocator())
findSourceMappingURL(u8, source.latin1(), allocator)
else
findSourceMappingURL(u16, source.utf16(), arena.allocator())) orelse
findSourceMappingURL(u16, source.utf16(), allocator)) orelse
break :try_inline;
defer found_url.deinit();
@@ -747,7 +767,7 @@ pub const SourceProviderMap = opaque {
.is_inline_map,
parseUrl(
bun.default_allocator,
arena.allocator(),
allocator,
found_url.slice(),
result,
) catch |err| {
@@ -766,7 +786,7 @@ pub const SourceProviderMap = opaque {
@memcpy(load_path_buf[source_filename.len..][0..4], ".map");
const load_path = load_path_buf[0 .. source_filename.len + 4];
const data = switch (bun.sys.File.readFrom(std.fs.cwd(), load_path, arena.allocator())) {
const data = switch (bun.sys.File.readFrom(std.fs.cwd(), load_path, allocator)) {
.err => break :try_external,
.result => |data| data,
};
@@ -775,7 +795,7 @@ pub const SourceProviderMap = opaque {
.is_external_map,
parseJSON(
bun.default_allocator,
arena.allocator(),
allocator,
data,
result,
) catch |err| {

View File

@@ -137,7 +137,7 @@ pub fn TaggedPointerUnion(comptime Types: anytype) type {
@compileError("TaggedPointerUnion does not have " ++ name ++ ".");
}
}
pub inline fn get(this: This, comptime Type: anytype) ?*Type {
pub inline fn get(this: This, comptime Type: type) ?*Type {
comptime assert_type(Type);
return if (this.is(Type)) this.as(Type) else null;

View File

@@ -595,7 +595,7 @@ pub fn NewWatcher(comptime ContextType: type) type {
this.running = false;
} else {
// if the mutex is locked, then that's now a UAF.
this.mutex.assertUnlocked("Internal consistency error: watcher mutex is locked when it should not be.");
this.mutex.releaseAssertUnlocked("Internal consistency error: watcher mutex is locked when it should not be.");
if (close_descriptors and this.running) {
const fds = this.watchlist.items(.fd);