fix a crash in remapping stacks (#12477)

Co-authored-by: Jarred Sumner <jarred@jarredsumner.com>
This commit is contained in:
dave caruso
2024-07-10 16:39:05 -07:00
committed by GitHub
parent 55d59ebf1f
commit 02b589b2ce
9 changed files with 229 additions and 81 deletions

View File

@@ -127,6 +127,16 @@ pub const SavedSourceMap = struct {
pub const vlq_offset = 24;
pub inline fn lock(map: *SavedSourceMap) void {
map.mutex.lock();
map.map.unlockPointers();
}
pub inline fn unlock(map: *SavedSourceMap) void {
map.map.lockPointers();
map.mutex.unlock();
}
// For the runtime, we store the number of mappings and how many bytes the final list is at the beginning of the array
// The first 8 bytes are the length of the array
// The second 8 bytes are the number of mappings
@@ -209,8 +219,8 @@ pub const SavedSourceMap = struct {
}
pub fn removeZigSourceProvider(this: *SavedSourceMap, opaque_source_provider: *anyopaque, path: []const u8) void {
this.mutex.lock();
defer this.mutex.unlock();
this.lock();
defer this.unlock();
const entry = this.map.getEntry(bun.hash(path)) orelse return;
const old_value = Value.from(entry.value_ptr.*);
@@ -222,8 +232,8 @@ pub const SavedSourceMap = struct {
} else if (old_value.get(ParsedSourceMap)) |map| {
if (map.underlying_provider.provider()) |prov| {
if (@intFromPtr(prov) == @intFromPtr(opaque_source_provider)) {
map.deinit(default_allocator);
this.map.removeByPtr(entry.key_ptr);
map.deref();
}
}
}
@@ -239,15 +249,14 @@ pub const SavedSourceMap = struct {
pub fn deinit(this: *SavedSourceMap) void {
{
this.mutex.lock();
defer this.mutex.unlock();
this.lock();
defer this.unlock();
var iter = this.map.valueIterator();
while (iter.next()) |val| {
var value = Value.from(val.*);
if (value.get(ParsedSourceMap)) |source_map_| {
var source_map: *ParsedSourceMap = source_map_;
source_map.deinit(default_allocator);
if (value.get(ParsedSourceMap)) |source_map| {
source_map.deref();
} else if (value.get(SavedMappings)) |saved_mappings| {
var saved = SavedMappings{ .data = @as([*]u8, @ptrCast(saved_mappings)) };
saved.deinit();
@@ -257,6 +266,7 @@ pub const SavedSourceMap = struct {
}
}
this.map.unlockPointers();
this.map.deinit();
}
@@ -265,14 +275,15 @@ pub const SavedSourceMap = struct {
}
fn putValue(this: *SavedSourceMap, path: []const u8, value: Value) !void {
this.mutex.lock();
defer this.mutex.unlock();
this.lock();
defer this.unlock();
const entry = try this.map.getOrPut(bun.hash(path));
if (entry.found_existing) {
var old_value = Value.from(entry.value_ptr.*);
if (old_value.get(ParsedSourceMap)) |parsed_source_map| {
var source_map: *ParsedSourceMap = parsed_source_map;
source_map.deinit(default_allocator);
source_map.deref();
} else if (old_value.get(SavedMappings)) |saved_mappings| {
var saved = SavedMappings{ .data = @as([*]u8, @ptrCast(saved_mappings)) };
saved.deinit();
@@ -283,37 +294,59 @@ pub const SavedSourceMap = struct {
entry.value_ptr.* = value.ptr();
}
pub fn getWithContent(
fn getWithContent(
this: *SavedSourceMap,
path: string,
hint: SourceMap.ParseUrlResultHint,
) SourceMap.ParseUrl {
const hash = bun.hash(path);
const mapping = this.map.getEntry(hash) orelse return .{};
// This lock is for the hash table
this.lock();
// This mapping entry is only valid while the mutex is locked
const mapping = this.map.getEntry(hash) orelse {
this.unlock();
return .{};
};
switch (Value.from(mapping.value_ptr.*).tag()) {
Value.Tag.ParsedSourceMap => {
return .{ .map = Value.from(mapping.value_ptr.*).as(ParsedSourceMap) };
defer this.unlock();
const map = Value.from(mapping.value_ptr.*).as(ParsedSourceMap);
map.ref();
return .{ .map = map };
},
Value.Tag.SavedMappings => {
defer this.unlock();
var saved = SavedMappings{ .data = @as([*]u8, @ptrCast(Value.from(mapping.value_ptr.*).as(ParsedSourceMap))) };
defer saved.deinit();
const result = default_allocator.create(ParsedSourceMap) catch unreachable;
result.* = saved.toMapping(default_allocator, path) catch {
const result = ParsedSourceMap.new(saved.toMapping(default_allocator, path) catch {
_ = this.map.remove(mapping.key_ptr.*);
return .{};
};
});
mapping.value_ptr.* = Value.init(result).ptr();
result.ref();
return .{ .map = result };
},
Value.Tag.SourceProviderMap => {
var ptr = Value.from(mapping.value_ptr.*).as(SourceProviderMap);
this.unlock();
if (ptr.getSourceMap(path, .none, hint)) |parse|
// Do not lock the mutex while we're parsing JSON!
if (ptr.getSourceMap(path, .none, hint)) |parse| {
if (parse.map) |map| {
mapping.value_ptr.* = Value.init(map).ptr();
return parse;
};
map.ref();
// The mutex is not locked. We have to check the hash table again.
this.putValue(path, Value.init(map)) catch bun.outOfMemory();
return parse;
}
}
this.lock();
defer this.unlock();
// does not have a valid source map. let's not try again
_ = this.map.remove(hash);
@@ -343,14 +376,12 @@ pub const SavedSourceMap = struct {
column: i32,
source_handling: SourceMap.SourceContentHandling,
) ?SourceMap.Mapping.Lookup {
this.mutex.lock();
defer this.mutex.unlock();
const parse = this.getWithContent(path, switch (source_handling) {
.no_source_contents => .mappings_only,
.source_contents => .{ .all = .{ .line = line, .column = column } },
});
const map = parse.map orelse return null;
const mapping = parse.mapping orelse
SourceMap.Mapping.find(map.mappings, line, column) orelse
return null;
@@ -655,11 +686,12 @@ pub const VirtualMachine = struct {
default_tls_reject_unauthorized: ?bool = null,
default_verbose_fetch: ?bun.http.HTTPVerboseLevel = null,
/// Do not access this field directly
/// It exists in the VirtualMachine struct so that
/// we don't accidentally make a stack copy of it
/// only use it through
/// source_mappings
/// Do not access this field directly!
///
/// It exists in the VirtualMachine struct so that we don't accidentally
/// make a stack copy of it only use it through source_mappings.
///
/// This proposal could let us safely move it back https://github.com/ziglang/zig/issues/7769
saved_source_map_table: SavedSourceMap.HashTable = undefined,
source_mappings: SavedSourceMap = undefined,
@@ -1447,6 +1479,7 @@ pub const VirtualMachine = struct {
.debug_thread_id = if (Environment.allow_assert) std.Thread.getCurrentId() else {},
};
vm.source_mappings = .{ .map = &vm.saved_source_map_table };
vm.source_mappings.map.lockPointers();
vm.regular_event_loop.tasks = EventLoop.Queue.init(
default_allocator,
);
@@ -1561,6 +1594,7 @@ pub const VirtualMachine = struct {
.debug_thread_id = if (Environment.allow_assert) std.Thread.getCurrentId() else {},
};
vm.source_mappings = .{ .map = &vm.saved_source_map_table };
vm.source_mappings.map.lockPointers();
vm.regular_event_loop.tasks = EventLoop.Queue.init(
default_allocator,
);
@@ -2944,6 +2978,8 @@ pub const VirtualMachine = struct {
@max(frame.position.column.zeroBased(), 0),
.no_source_contents,
)) |lookup| {
const source_map = lookup.source_map;
defer if (source_map) |map| map.deref();
if (lookup.displaySourceURLIfNeeded(sourceURL.slice())) |source_url| {
frame.source_url.deref();
frame.source_url = source_url;
@@ -3055,9 +3091,8 @@ pub const VirtualMachine = struct {
},
.source_index = 0,
},
// undefined is fine, because these two values are never read if `top.remapped == true`
.source_map = undefined,
.prefetched_source_code = undefined,
.source_map = null,
.prefetched_source_code = null,
}
else
this.source_mappings.resolveMapping(
@@ -3069,6 +3104,8 @@ pub const VirtualMachine = struct {
if (maybe_lookup) |lookup| {
const mapping = lookup.mapping;
const source_map = lookup.source_map;
defer if (source_map) |map| map.deref();
if (!top.remapped) {
if (lookup.displaySourceURLIfNeeded(top_source_url.slice())) |src| {
@@ -3078,7 +3115,7 @@ pub const VirtualMachine = struct {
}
const code = code: {
if (!top.remapped and lookup.source_map.isExternal()) {
if (!top.remapped and lookup.source_map != null and lookup.source_map.?.isExternal()) {
if (lookup.getSourceCode(top_source_url.slice())) |src| {
break :code src;
}
@@ -3135,6 +3172,7 @@ pub const VirtualMachine = struct {
@max(frame.position.column.zeroBased(), 0),
.no_source_contents,
)) |lookup| {
defer if (lookup.source_map) |map| map.deref();
if (lookup.displaySourceURLIfNeeded(source_url.slice())) |src| {
frame.source_url.deref();
frame.source_url = src;
@@ -3718,6 +3756,7 @@ pub fn NewHotReloader(comptime Ctx: type, comptime EventLoopType: type, comptime
onAccept: std.ArrayHashMapUnmanaged(GenericWatcher.HashType, bun.BabyList(OnAcceptCallback), bun.ArrayIdentityContext, false) = .{},
ctx: *Ctx,
verbose: bool = false,
pending_count: std.atomic.Value(u32) = std.atomic.Value(u32).init(0),
tombstones: bun.StringHashMapUnmanaged(*bun.fs.FileSystem.RealFS.EntriesOption) = .{},
@@ -3755,7 +3794,18 @@ pub fn NewHotReloader(comptime Ctx: type, comptime EventLoopType: type, comptime
}
pub fn run(this: *HotReloadTask) void {
this.reloader.ctx.reload();
// Since we rely on the event loop for hot reloads, there can be
// a delay before the next reload begins. In the time between the
// last reload and the next one, we shouldn't schedule any more
// hot reloads. Since we reload literally everything, we don't
// need to worry about missing any changes.
//
// Note that we set the count _before_ we reload, so that if we
// get another hot reload request while we're reloading, we'll
// still enqueue it.
while (this.reloader.pending_count.swap(0, .monotonic) > 0) {
this.reloader.ctx.reload();
}
}
pub fn enqueue(this: *HotReloadTask) void {
@@ -3772,6 +3822,8 @@ pub fn NewHotReloader(comptime Ctx: type, comptime EventLoopType: type, comptime
unreachable;
}
_ = this.reloader.pending_count.fetchAdd(1, .monotonic);
BunDebugger__willHotReload();
var that = bun.default_allocator.create(HotReloadTask) catch unreachable;