From a57dee57219042eee8f78a013aa427316e44a072 Mon Sep 17 00:00:00 2001 From: "taylor.fish" Date: Mon, 11 Aug 2025 13:40:07 -0700 Subject: [PATCH] Various safety improvements (`safety.ThreadLock`, stack traces, `MimallocArena`, `RefCount`, `safety.alloc`) (#21726) * Move `DebugThreadLock` to `bun.safety` * Enable in `ci_assert` builds, but store stack traces only in debug builds * Reduce size of struct by making optional field non-optional * Add `initLockedIfNonComptime` as a workaround for not being able to call `initLocked` in comptime contexts * Add `lockOrAssert` method to acquire the lock if unlocked, or else assert that the current thread acquired the lock * Add stack traces to `CriticalSection` and `AllocPtr` in debug builds * Make `MimallocArena.init` infallible * Make `MimallocArena.heap` non-nullable * Rename `RefCount.active_counts` to `raw_count` and provide read-only `get` method * Add `bun.safety.alloc.assertEq` to assert that two allocators are equal (avoiding comparison of undefined `ptr`s) (For internal tracking: fixes STAB-917, STAB-918, STAB-962, STAB-963, STAB-964, STAB-965) --------- Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- cmake/sources/ZigSources.txt | 4 +- src/Watcher.zig | 2 +- src/allocators/MimallocArena.zig | 21 ++-- src/bake/DevServer.zig | 10 +- src/bake/DevServer/IncrementalGraph.zig | 2 +- src/bake/DevServer/PackedMap.zig | 2 +- src/bake/production.zig | 2 +- src/bun.js.zig | 4 +- src/bun.js/Debugger.zig | 2 +- src/bun.js/api/JSTranspiler.zig | 8 +- src/bun.js/api/bun/socket.zig | 4 +- src/bun.js/api/bun/subprocess.zig | 2 +- src/bun.js/api/server.zig | 2 +- src/bun.js/web_worker.zig | 2 +- src/bun.zig | 75 +++----------- src/bundler/BundleThread.zig | 2 +- src/bundler/Graph.zig | 4 +- src/bundler/ThreadPool.zig | 5 +- src/bundler/bundle_v2.zig | 8 +- src/cli/test_command.zig | 2 +- src/env.zig | 24 ++--- src/http/HTTPThread.zig | 2 +- src/install/install.zig | 4 +- src/main_wasm.zig | 6 +- src/paths/path_buffer_pool.zig | 4 +- src/ptr.zig | 3 +- src/ptr/ref_count.zig | 130 +++++++++++------------- src/safety.zig | 4 +- src/safety/CriticalSection.zig | 81 +++++++++------ src/safety/ThreadLock.zig | 78 ++++++++++++++ src/safety/{alloc_ptr.zig => alloc.zig} | 46 ++++++++- src/safety/thread_id.zig | 5 + src/sql/postgres/PostgresSQLQuery.zig | 2 +- src/threading/guarded_value.zig | 2 +- test/internal/ban-limits.json | 2 +- 35 files changed, 326 insertions(+), 230 deletions(-) create mode 100644 src/safety/ThreadLock.zig rename src/safety/{alloc_ptr.zig => alloc.zig} (63%) create mode 100644 src/safety/thread_id.zig diff --git a/cmake/sources/ZigSources.txt b/cmake/sources/ZigSources.txt index f392249468..4544453ac6 100644 --- a/cmake/sources/ZigSources.txt +++ b/cmake/sources/ZigSources.txt @@ -731,8 +731,10 @@ src/s3/multipart.zig src/s3/simple_request.zig src/s3/storage_class.zig src/safety.zig -src/safety/alloc_ptr.zig +src/safety/alloc.zig src/safety/CriticalSection.zig +src/safety/thread_id.zig +src/safety/ThreadLock.zig src/semver.zig src/semver/ExternalString.zig src/semver/SemverObject.zig diff --git a/src/Watcher.zig b/src/Watcher.zig index 418af3396e..082a4fe115 100644 --- a/src/Watcher.zig +++ b/src/Watcher.zig @@ -32,7 +32,7 @@ ctx: *anyopaque, onFileUpdate: *const fn (this: *anyopaque, events: []WatchEvent, changed_files: []?[:0]u8, watchlist: WatchList) void, onError: *const fn (this: *anyopaque, err: bun.sys.Error) void, -thread_lock: bun.DebugThreadLock = bun.DebugThreadLock.unlocked, +thread_lock: bun.safety.ThreadLock = .initUnlocked(), pub const max_count = 128; pub const requires_file_descriptors = switch (Environment.os) { diff --git a/src/allocators/MimallocArena.zig b/src/allocators/MimallocArena.zig index 718949d1db..181933da2f 100644 --- a/src/allocators/MimallocArena.zig +++ b/src/allocators/MimallocArena.zig @@ -1,6 +1,6 @@ const Self = @This(); -heap: ?*mimalloc.Heap = null, +heap: *mimalloc.Heap, const log = bun.Output.scoped(.mimalloc, true); @@ -8,18 +8,17 @@ const log = bun.Output.scoped(.mimalloc, true); /// to get the default heap. /// It uses pthread_getspecific to do that. /// We can save those extra calls if we just do it once in here -pub fn getThreadlocalDefault() Allocator { +pub fn getThreadLocalDefault() Allocator { return Allocator{ .ptr = mimalloc.mi_heap_get_default(), .vtable = &c_allocator_vtable }; } pub fn backingAllocator(self: Self) Allocator { - var arena = Self{ .heap = self.heap.?.backing() }; + var arena = Self{ .heap = self.heap.backing() }; return arena.allocator(); } pub fn allocator(self: Self) Allocator { - @setRuntimeSafety(false); - return Allocator{ .ptr = self.heap.?, .vtable = &c_allocator_vtable }; + return Allocator{ .ptr = self.heap, .vtable = &c_allocator_vtable }; } pub fn dumpThreadStats(self: *Self) void { @@ -47,14 +46,16 @@ pub fn dumpStats(self: *Self) void { } pub fn deinit(self: *Self) void { - mimalloc.mi_heap_destroy(bun.take(&self.heap).?); + mimalloc.mi_heap_destroy(self.heap); + self.* = undefined; } -pub fn init() !Self { - return .{ .heap = mimalloc.mi_heap_new() orelse return error.OutOfMemory }; + +pub fn init() Self { + return .{ .heap = mimalloc.mi_heap_new() orelse bun.outOfMemory() }; } pub fn gc(self: Self) void { - mimalloc.mi_heap_collect(self.heap orelse return, false); + mimalloc.mi_heap_collect(self.heap, false); } pub inline fn helpCatchMemoryIssues(self: Self) void { @@ -65,7 +66,7 @@ pub inline fn helpCatchMemoryIssues(self: Self) void { } pub fn ownsPtr(self: Self, ptr: *const anyopaque) bool { - return mimalloc.mi_heap_check_owned(self.heap.?, ptr); + return mimalloc.mi_heap_check_owned(self.heap, ptr); } pub const supports_posix_memalign = true; diff --git a/src/bake/DevServer.zig b/src/bake/DevServer.zig index 3f47e73ce8..32e137c56c 100644 --- a/src/bake/DevServer.zig +++ b/src/bake/DevServer.zig @@ -63,10 +63,10 @@ server: ?bun.jsc.API.AnyServer, router: FrameworkRouter, /// Every navigatable route has bundling state here. route_bundles: ArrayListUnmanaged(RouteBundle), -/// All access into IncrementalGraph is guarded by a DebugThreadLock. This is +/// All access into IncrementalGraph is guarded by a ThreadLock. This is /// only a debug assertion as contention to this is always a bug; If a bundle is /// active and a file is changed, that change is placed into the next bundle. -graph_safety_lock: bun.DebugThreadLock, +graph_safety_lock: bun.safety.ThreadLock, client_graph: IncrementalGraph(.client), server_graph: IncrementalGraph(.server), /// State populated during bundling and hot updates. Often cleared @@ -282,7 +282,7 @@ pub fn init(options: Options) bun.JSOOM!*DevServer { .server_fetch_function_callback = .empty, .server_register_update_callback = .empty, .generation = 0, - .graph_safety_lock = .unlocked, + .graph_safety_lock = .initUnlocked(), .dump_dir = dump_dir, .framework = options.framework, .bundler_options = options.bundler_options, @@ -1584,7 +1584,7 @@ pub const DeferredRequest = struct { /// such as for bundling failures or aborting the server. /// Does not free the underlying `DeferredRequest.Node` fn deinitImpl(this: *DeferredRequest) void { - bun.assert(this.ref_count.active_counts == 0); + this.ref_count.assertNoRefs(); defer this.dev.deferred_request_pool.put(@fieldParentPtr("data", this)); switch (this.handler) { @@ -1646,7 +1646,7 @@ pub fn startAsyncBundle( // Ref server to keep it from closing. if (dev.server) |server| server.onPendingRequest(); - var heap = try ThreadLocalArena.init(); + var heap = ThreadLocalArena.init(); errdefer heap.deinit(); const allocator = heap.allocator(); const ast_memory_allocator = try allocator.create(bun.ast.ASTMemoryAllocator); diff --git a/src/bake/DevServer/IncrementalGraph.zig b/src/bake/DevServer/IncrementalGraph.zig index 9b2c40ac68..4389939589 100644 --- a/src/bake/DevServer/IncrementalGraph.zig +++ b/src/bake/DevServer/IncrementalGraph.zig @@ -182,7 +182,7 @@ pub fn IncrementalGraph(side: bake.Side) type { }; comptime { - if (@import("builtin").mode == .ReleaseFast or @import("builtin").mode == .ReleaseSmall) { + if (!Environment.ci_assert) { bun.assert_eql(@sizeOf(@This()), @sizeOf(u64) * 5); bun.assert_eql(@alignOf(@This()), @alignOf([*]u8)); } diff --git a/src/bake/DevServer/PackedMap.zig b/src/bake/DevServer/PackedMap.zig index 83fb0922e7..1284293676 100644 --- a/src/bake/DevServer/PackedMap.zig +++ b/src/bake/DevServer/PackedMap.zig @@ -80,7 +80,7 @@ pub fn quotedContents(self: *const @This()) []u8 { } comptime { - if (!Environment.isDebug) { + if (!Environment.ci_assert) { assert_eql(@sizeOf(@This()), @sizeOf(usize) * 7); assert_eql(@alignOf(@This()), @alignOf(usize)); } diff --git a/src/bake/production.zig b/src/bake/production.zig index 9129faeb1a..fc630ad22c 100644 --- a/src/bake/production.zig +++ b/src/bake/production.zig @@ -26,7 +26,7 @@ pub fn buildCommand(ctx: bun.cli.Command.Context) !void { bun.ast.Expr.Data.Store.create(); bun.ast.Stmt.Data.Store.create(); - var arena = try bun.MimallocArena.init(); + var arena = bun.MimallocArena.init(); defer arena.deinit(); const vm = try VirtualMachine.initBake(.{ diff --git a/src/bun.js.zig b/src/bun.js.zig index 55051aa7c1..cc84429cfd 100644 --- a/src/bun.js.zig +++ b/src/bun.js.zig @@ -23,7 +23,7 @@ pub const Run = struct { js_ast.Expr.Data.Store.create(); js_ast.Stmt.Data.Store.create(); - const arena = try Arena.init(); + const arena = Arena.init(); if (!ctx.debug.loaded_bunfig) { try bun.cli.Arguments.loadConfigPath(ctx.allocator, true, "bunfig.toml", ctx, .RunCommand); @@ -160,7 +160,7 @@ pub const Run = struct { js_ast.Expr.Data.Store.create(); js_ast.Stmt.Data.Store.create(); - const arena = try Arena.init(); + const arena = Arena.init(); run = .{ .vm = try VirtualMachine.init( diff --git a/src/bun.js/Debugger.zig b/src/bun.js/Debugger.zig index ceb584d00c..2e9cf19e63 100644 --- a/src/bun.js/Debugger.zig +++ b/src/bun.js/Debugger.zig @@ -141,7 +141,7 @@ pub fn create(this: *VirtualMachine, globalObject: *JSGlobalObject) !void { } pub fn startJSDebuggerThread(other_vm: *VirtualMachine) void { - var arena = bun.MimallocArena.init() catch unreachable; + var arena = bun.MimallocArena.init(); Output.Source.configureNamedThread("Debugger"); log("startJSDebuggerThread", .{}); jsc.markBinding(@src()); diff --git a/src/bun.js/api/JSTranspiler.zig b/src/bun.js/api/JSTranspiler.zig index 0cb7c5977d..a80e13db72 100644 --- a/src/bun.js/api/JSTranspiler.zig +++ b/src/bun.js/api/JSTranspiler.zig @@ -482,7 +482,7 @@ pub const TransformTask = struct { const name = this.loader.stdinName(); const source = logger.Source.initPathString(name, this.input_code.slice()); - var arena = MimallocArena.init() catch unreachable; + var arena = MimallocArena.init(); defer arena.deinit(); const allocator = arena.allocator(); @@ -788,7 +788,7 @@ pub fn scan(this: *JSTranspiler, globalThis: *jsc.JSGlobalObject, callframe: *js return .zero; } - var arena = MimallocArena.init() catch unreachable; + var arena = MimallocArena.init(); const prev_allocator = this.transpiler.allocator; const allocator = arena.allocator(); this.transpiler.setAllocator(allocator); @@ -888,7 +888,7 @@ pub fn transformSync( return globalThis.throwInvalidArgumentType("transformSync", "code", "string or Uint8Array"); }; - var arena = MimallocArena.init() catch unreachable; + var arena = MimallocArena.init(); defer arena.deinit(); const code_holder = try jsc.Node.StringOrBuffer.fromJS(globalThis, arena.allocator(), code_arg) orelse { return globalThis.throwInvalidArgumentType("transformSync", "code", "string or Uint8Array"); @@ -1066,7 +1066,7 @@ pub fn scanImports(this: *JSTranspiler, globalThis: *jsc.JSGlobalObject, callfra return globalThis.throwInvalidArguments("Only JavaScript-like files support this fast path", .{}); } - var arena = MimallocArena.init() catch unreachable; + var arena = MimallocArena.init(); const prev_allocator = this.transpiler.allocator; const allocator = arena.allocator(); var ast_memory_allocator = allocator.create(JSAst.ASTMemoryAllocator) catch bun.outOfMemory(); diff --git a/src/bun.js/api/bun/socket.zig b/src/bun.js/api/bun/socket.zig index 119bbe17a3..d6316cf302 100644 --- a/src/bun.js/api/bun/socket.zig +++ b/src/bun.js/api/bun/socket.zig @@ -281,7 +281,7 @@ pub fn NewSocket(comptime ssl: bool) type { pub fn handleConnectError(this: *This, errno: c_int) void { const handlers = this.getHandlers(); - log("onConnectError {s} ({d}, {d})", .{ if (handlers.is_server) "S" else "C", errno, this.ref_count.active_counts }); + log("onConnectError {s} ({d}, {d})", .{ if (handlers.is_server) "S" else "C", errno, this.ref_count.get() }); // Ensure the socket is still alive for any defer's we have this.ref(); defer this.deref(); @@ -401,7 +401,7 @@ pub fn NewSocket(comptime ssl: bool) type { } pub fn onOpen(this: *This, socket: Socket) void { - log("onOpen {s} {*} {} {}", .{ if (this.isServer()) "S" else "C", this, this.socket.isDetached(), this.ref_count.active_counts }); + log("onOpen {s} {*} {} {}", .{ if (this.isServer()) "S" else "C", this, this.socket.isDetached(), this.ref_count.get() }); // Ensure the socket remains alive until this is finished this.ref(); defer this.deref(); diff --git a/src/bun.js/api/bun/subprocess.zig b/src/bun.js/api/bun/subprocess.zig index 038ffcd745..b4180bf04d 100644 --- a/src/bun.js/api/bun/subprocess.zig +++ b/src/bun.js/api/bun/subprocess.zig @@ -1402,7 +1402,7 @@ const Writable = union(enum) { const debug_ref_count = if (Environment.isDebug) subprocess.ref_count else 0; pipe.onAttachedProcessExit(&subprocess.process.status); if (Environment.isDebug) { - bun.debugAssert(subprocess.ref_count.active_counts == debug_ref_count.active_counts); + bun.debugAssert(subprocess.ref_count.get() == debug_ref_count.get()); } return pipe.toJS(globalThis); } else { diff --git a/src/bun.js/api/server.zig b/src/bun.js/api/server.zig index ac5d0453a9..5552989545 100644 --- a/src/bun.js/api/server.zig +++ b/src/bun.js/api/server.zig @@ -1642,7 +1642,7 @@ pub fn NewServer(protocol_enum: enum { http, https }, development_kind: enum { d .config = config.*, .base_url_string_for_joining = base_url, .vm = jsc.VirtualMachine.get(), - .allocator = Arena.getThreadlocalDefault(), + .allocator = Arena.getThreadLocalDefault(), .dev_server = dev_server, }); diff --git a/src/bun.js/web_worker.zig b/src/bun.js/web_worker.zig index 093fa17664..4eddf44120 100644 --- a/src/bun.js/web_worker.zig +++ b/src/bun.js/web_worker.zig @@ -310,7 +310,7 @@ pub fn start( // this should go through most flags and update the options. } - this.arena = try bun.MimallocArena.init(); + this.arena = bun.MimallocArena.init(); var vm = try jsc.VirtualMachine.initWorker(this, .{ .allocator = this.arena.?.allocator(), .args = transform_options, diff --git a/src/bun.zig b/src/bun.zig index 3bf0528315..ab0d437d6d 100644 --- a/src/bun.zig +++ b/src/bun.zig @@ -603,6 +603,7 @@ pub const Bunfig = @import("./bunfig.zig").Bunfig; pub const HTTPThread = @import("./http.zig").HTTPThread; pub const http = @import("./http.zig"); +pub const ptr = @import("./ptr.zig"); pub const TaggedPointer = ptr.TaggedPointer; pub const TaggedPointerUnion = ptr.TaggedPointerUnion; @@ -1827,9 +1828,9 @@ pub const Futex = threading.Futex; pub const ThreadPool = threading.ThreadPool; pub const UnboundedQueue = threading.UnboundedQueue; -pub fn threadlocalAllocator() std.mem.Allocator { +pub fn threadLocalAllocator() std.mem.Allocator { if (comptime use_mimalloc) { - return MimallocArena.getThreadlocalDefault(); + return MimallocArena.getThreadLocalDefault(); } return default_allocator; @@ -2656,11 +2657,11 @@ pub inline fn new(comptime T: type, init: T) *T { break :pointer pointer; }; - // TODO:: - // if (comptime Environment.allow_assert) { - // const logAlloc = Output.scoped(.alloc, @hasDecl(T, "logAllocations")); - // logAlloc("new({s}) = {*}", .{ meta.typeName(T), ptr }); - // } + if (comptime Environment.allow_assert) { + const enable_logs = @hasDecl(T, "logAllocations"); + const logAlloc = Output.scoped(.alloc, !enable_logs); + logAlloc("new({s}) = {*}", .{ meta.typeName(T), pointer }); + } return pointer; } @@ -2669,21 +2670,22 @@ pub inline fn new(comptime T: type, init: T) *T { /// For single-item heap pointers, prefer bun.new/destroy over default_allocator /// /// Destruction performs additional safety checks: -/// - Generic assertions can be added to T.assertMayDeinit() +/// - Generic assertions can be added to T.assertBeforeDestroy() /// - Automatic integration with `RefCount` pub inline fn destroy(pointer: anytype) void { const T = std.meta.Child(@TypeOf(pointer)); if (Environment.allow_assert) { - const logAlloc = Output.scoped(.alloc, @hasDecl(T, "logAllocations")); + const enable_logs = @hasDecl(T, "logAllocations"); + const logAlloc = Output.scoped(.alloc, !enable_logs); logAlloc("destroy({s}) = {*}", .{ meta.typeName(T), pointer }); // If this type implements a RefCount, make sure it is zero. - @import("./ptr/ref_count.zig").maybeAssertNoRefs(T, pointer); + ptr.ref_count.maybeAssertNoRefs(T, pointer); switch (@typeInfo(T)) { - .@"struct", .@"union", .@"enum" => if (@hasDecl(T, "assertMayDeinit")) - pointer.assertMayDeinit(), + .@"struct", .@"union", .@"enum" => if (@hasDecl(T, "assertBeforeDestroy")) + pointer.assertBeforeDestroy(), else => {}, } } @@ -3439,57 +3441,11 @@ pub fn tagName(comptime Enum: type, value: Enum) ?[:0]const u8 { if (@intFromEnum(value) == f.value) break f.name; } else null; } + pub fn getTotalMemorySize() usize { return cpp.Bun__ramSize(); } -pub const DebugThreadLock = if (Environment.isDebug) - struct { - owning_thread: ?std.Thread.Id, - locked_at: crash_handler.StoredTrace, - - pub const unlocked: DebugThreadLock = .{ - .owning_thread = null, - .locked_at = crash_handler.StoredTrace.empty, - }; - - pub fn lock(impl: *@This()) void { - if (impl.owning_thread) |thread| { - Output.err("assertion failure", "Locked by thread {d} here:", .{thread}); - crash_handler.dumpStackTrace(impl.locked_at.trace(), .{ .frame_count = 10, .stop_at_jsc_llint = true }); - Output.panic("Safety lock violated on thread {d}", .{std.Thread.getCurrentId()}); - } - impl.owning_thread = std.Thread.getCurrentId(); - impl.locked_at = crash_handler.StoredTrace.capture(@returnAddress()); - } - - pub fn unlock(impl: *@This()) void { - impl.assertLocked(); - impl.* = unlocked; - } - - pub fn assertLocked(impl: *const @This()) void { - assert(impl.owning_thread != null); // not locked - assert(impl.owning_thread == std.Thread.getCurrentId()); - } - - pub fn initLocked() @This() { - var impl = DebugThreadLock.unlocked; - impl.lock(); - return impl; - } - } -else - struct { - pub const unlocked: @This() = .{}; - pub fn lock(_: *@This()) void {} - pub fn unlock(_: *@This()) void {} - pub fn assertLocked(_: *const @This()) void {} - pub fn initLocked() @This() { - return .{}; - } - }; - pub const bytecode_extension = ".jsc"; /// An typed index into an array or other structure. @@ -3764,7 +3720,6 @@ pub noinline fn throwStackOverflow() StackOverflow!void { const StackOverflow = error{StackOverflow}; pub const S3 = @import("./s3/client.zig"); -pub const ptr = @import("./ptr.zig"); /// Memory is typically not decommitted immediately when freed. /// Sensitive information that's kept in memory can be read in various ways until the OS diff --git a/src/bundler/BundleThread.zig b/src/bundler/BundleThread.zig index 798f604b1c..cc33ea7908 100644 --- a/src/bundler/BundleThread.zig +++ b/src/bundler/BundleThread.zig @@ -102,7 +102,7 @@ pub fn BundleThread(CompletionStruct: type) type { /// This is called from `Bun.build` in JavaScript. fn generateInNewThread(completion: *CompletionStruct, generation: bun.Generation) !void { - var heap = try ThreadLocalArena.init(); + var heap = ThreadLocalArena.init(); defer heap.deinit(); const allocator = heap.allocator(); diff --git a/src/bundler/Graph.zig b/src/bundler/Graph.zig index 868256c8c3..0c6ac3321d 100644 --- a/src/bundler/Graph.zig +++ b/src/bundler/Graph.zig @@ -1,10 +1,10 @@ const Graph = @This(); pool: *ThreadPool, -heap: ThreadLocalArena = .{}, +heap: ThreadLocalArena, /// This allocator is thread-local to the Bundler thread /// .allocator == .heap.allocator() -allocator: std.mem.Allocator = undefined, +allocator: std.mem.Allocator, /// Mapping user-specified entry points to their Source Index entry_points: std.ArrayListUnmanaged(Index) = .{}, diff --git a/src/bundler/ThreadPool.zig b/src/bundler/ThreadPool.zig index 26c8871992..08dc31e217 100644 --- a/src/bundler/ThreadPool.zig +++ b/src/bundler/ThreadPool.zig @@ -193,6 +193,7 @@ pub const ThreadPool = struct { worker.* = .{ .ctx = this.v2, + .heap = undefined, .allocator = undefined, .thread = ThreadPoolLib.Thread.current, }; @@ -202,7 +203,7 @@ pub const ThreadPool = struct { } pub const Worker = struct { - heap: ThreadLocalArena = ThreadLocalArena{}, + heap: ThreadLocalArena, /// Thread-local memory allocator /// All allocations are freed in `deinit` at the very end of bundling. @@ -284,7 +285,7 @@ pub const ThreadPool = struct { this.has_created = true; Output.Source.configureThread(); - this.heap = ThreadLocalArena.init() catch unreachable; + this.heap = ThreadLocalArena.init(); this.allocator = this.heap.allocator(); const allocator = this.allocator; diff --git a/src/bundler/bundle_v2.zig b/src/bundler/bundle_v2.zig index 7737ec14a2..29cb182163 100644 --- a/src/bundler/bundle_v2.zig +++ b/src/bundler/bundle_v2.zig @@ -140,7 +140,7 @@ pub const BundleV2 = struct { /// You can find which callbacks are run by looking at the /// `finishFromBakeDevServer(...)` function here asynchronous: bool = false, - thread_lock: bun.DebugThreadLock, + thread_lock: bun.safety.ThreadLock, const BakeOptions = struct { framework: bake.Framework, @@ -817,7 +817,7 @@ pub const BundleV2 = struct { .plugins = null, .completion = null, .source_code_length = 0, - .thread_lock = bun.DebugThreadLock.initLocked(), + .thread_lock = .initLocked(), }; if (bake_options) |bo| { this.client_transpiler = bo.client_transpiler; @@ -1384,7 +1384,7 @@ pub const BundleV2 = struct { event_loop, enable_reloading, null, - try ThreadLocalArena.init(), + .init(), ); this.unique_key = generateUniqueKey(); @@ -1448,7 +1448,7 @@ pub const BundleV2 = struct { event_loop, false, null, - try ThreadLocalArena.init(), + .init(), ); this.unique_key = generateUniqueKey(); diff --git a/src/cli/test_command.zig b/src/cli/test_command.zig index b5ad3df23a..a23dfdcd40 100644 --- a/src/cli/test_command.zig +++ b/src/cli/test_command.zig @@ -1775,7 +1775,7 @@ pub const TestCommand = struct { } }; - var arena = bun.MimallocArena.init() catch @panic("Unexpected error in mimalloc"); + var arena = bun.MimallocArena.init(); vm_.eventLoop().ensureWaker(); vm_.arena = &arena; vm_.allocator = arena.allocator(); diff --git a/src/env.zig b/src/env.zig index 7dc489d466..509284b03d 100644 --- a/src/env.zig +++ b/src/env.zig @@ -1,6 +1,6 @@ pub const BuildTarget = enum { native, wasm, wasi }; pub const build_target: BuildTarget = brk: { - if (@import("builtin").cpu.arch.isWasm()) { + if (builtin.cpu.arch.isWasm()) { break :brk BuildTarget.wasm; } else { break :brk BuildTarget.native; @@ -10,24 +10,24 @@ pub const build_target: BuildTarget = brk: { pub const isWasm = build_target == .wasm; pub const isNative = build_target == .native; pub const isWasi = build_target == .wasi; -pub const isMac = build_target == .native and @import("builtin").target.os.tag == .macos; +pub const isMac = build_target == .native and builtin.target.os.tag == .macos; pub const isBrowser = !isWasi and isWasm; -pub const isWindows = @import("builtin").target.os.tag == .windows; +pub const isWindows = builtin.target.os.tag == .windows; pub const isPosix = !isWindows and !isWasm; -pub const isDebug = @import("builtin").mode == .Debug; -pub const isTest = @import("builtin").is_test; -pub const isLinux = @import("builtin").target.os.tag == .linux; -pub const isAarch64 = @import("builtin").target.cpu.arch.isAARCH64(); -pub const isX86 = @import("builtin").target.cpu.arch.isX86(); -pub const isX64 = @import("builtin").target.cpu.arch == .x86_64; +pub const isDebug = builtin.mode == .Debug; +pub const isTest = builtin.is_test; +pub const isLinux = builtin.target.os.tag == .linux; +pub const isAarch64 = builtin.target.cpu.arch.isAARCH64(); +pub const isX86 = builtin.target.cpu.arch.isX86(); +pub const isX64 = builtin.target.cpu.arch == .x86_64; pub const isMusl = builtin.target.abi.isMusl(); -pub const allow_assert = isDebug or isTest or std.builtin.Mode.ReleaseSafe == @import("builtin").mode; -pub const ci_assert = isDebug or isTest or enable_asan or (std.builtin.Mode.ReleaseSafe == @import("builtin").mode and is_canary); +pub const allow_assert = isDebug or isTest or std.builtin.Mode.ReleaseSafe == builtin.mode; +pub const ci_assert = isDebug or isTest or enable_asan or (std.builtin.Mode.ReleaseSafe == builtin.mode and is_canary); pub const show_crash_trace = isDebug or isTest or enable_asan; /// All calls to `@export` should be gated behind this check, so that code /// generators that compile Zig code know not to reference and compile a ton of /// unused code. -pub const export_cpp_apis = if (build_options.override_no_export_cpp_apis) false else (@import("builtin").output_mode == .Obj or isTest); +pub const export_cpp_apis = if (build_options.override_no_export_cpp_apis) false else (builtin.output_mode == .Obj or isTest); /// Whether or not to enable allocation tracking when the `AllocationScope` /// allocator is used. diff --git a/src/http/HTTPThread.zig b/src/http/HTTPThread.zig index 354cf93483..28c1a6e847 100644 --- a/src/http/HTTPThread.zig +++ b/src/http/HTTPThread.zig @@ -195,7 +195,7 @@ pub fn init(opts: *const InitOpts) void { pub fn onStart(opts: InitOpts) void { Output.Source.configureNamedThread("HTTP Client"); - bun.http.default_arena = Arena.init() catch unreachable; + bun.http.default_arena = Arena.init(); bun.http.default_allocator = bun.http.default_arena.allocator(); const loop = bun.jsc.MiniEventLoop.initGlobal(null); diff --git a/src/install/install.zig b/src/install/install.zig index 35cc343bef..8bbf037656 100644 --- a/src/install/install.zig +++ b/src/install/install.zig @@ -71,7 +71,7 @@ pub fn initializeMiniStore() void { if (MiniStore.instance == null) { var mini_store = bun.default_allocator.create(MiniStore) catch bun.outOfMemory(); mini_store.* = .{ - .heap = bun.MimallocArena.init() catch bun.outOfMemory(), + .heap = bun.MimallocArena.init(), .memory_allocator = undefined, }; mini_store.memory_allocator = .{ .allocator = mini_store.heap.allocator() }; @@ -82,7 +82,7 @@ pub fn initializeMiniStore() void { var mini_store = MiniStore.instance.?; if (mini_store.memory_allocator.stack_allocator.fixed_buffer_allocator.end_index >= mini_store.memory_allocator.stack_allocator.fixed_buffer_allocator.buffer.len -| 1) { mini_store.heap.deinit(); - mini_store.heap = bun.MimallocArena.init() catch bun.outOfMemory(); + mini_store.heap = bun.MimallocArena.init(); mini_store.memory_allocator.allocator = mini_store.heap.allocator(); } mini_store.memory_allocator.reset(); diff --git a/src/main_wasm.zig b/src/main_wasm.zig index 9e017bdb3e..4f7d0c15fb 100644 --- a/src/main_wasm.zig +++ b/src/main_wasm.zig @@ -431,7 +431,7 @@ const TestAnalyzer = struct { } }; export fn getTests(opts_array: u64) u64 { - var arena = Arena.init() catch unreachable; + var arena = Arena.init(); var allocator = arena.allocator(); defer arena.deinit(); var log_ = Logger.Log.init(allocator); @@ -485,7 +485,7 @@ export fn getTests(opts_array: u64) u64 { export fn transform(opts_array: u64) u64 { // var arena = bun.ArenaAllocator.init(default_allocator); - var arena = Arena.init() catch unreachable; + var arena = Arena.init(); var allocator = arena.allocator(); defer arena.deinit(); log = Logger.Log.init(allocator); @@ -555,7 +555,7 @@ export fn transform(opts_array: u64) u64 { export fn scan(opts_array: u64) u64 { // var arena = bun.ArenaAllocator.init(default_allocator); - var arena = Arena.init() catch unreachable; + var arena = Arena.init(); var allocator = arena.allocator(); defer arena.deinit(); log = Logger.Log.init(allocator); diff --git a/src/paths/path_buffer_pool.zig b/src/paths/path_buffer_pool.zig index d9a0702aba..4c950f8095 100644 --- a/src/paths/path_buffer_pool.zig +++ b/src/paths/path_buffer_pool.zig @@ -7,8 +7,8 @@ fn PathBufferPoolT(comptime T: type) type { const Pool = ObjectPool(T, null, true, 4); pub fn get() *T { - // use a threadlocal allocator so mimalloc deletes it on thread deinit. - return &Pool.get(bun.threadlocalAllocator()).data; + // use a thread-local allocator so mimalloc deletes it on thread deinit. + return &Pool.get(bun.threadLocalAllocator()).data; } pub fn put(buffer: *const T) void { diff --git a/src/ptr.zig b/src/ptr.zig index f1063e08ea..43e4ee9e46 100644 --- a/src/ptr.zig +++ b/src/ptr.zig @@ -5,6 +5,7 @@ pub const CowSlice = @import("./ptr/CowSlice.zig").CowSlice; pub const CowSliceZ = @import("./ptr/CowSlice.zig").CowSliceZ; pub const CowString = CowSlice(u8); +pub const ref_count = @import("./ptr/ref_count.zig"); pub const RefCount = ref_count.RefCount; pub const ThreadSafeRefCount = ref_count.ThreadSafeRefCount; pub const RefPtr = ref_count.RefPtr; @@ -13,5 +14,3 @@ pub const TaggedPointer = @import("./ptr/tagged_pointer.zig").TaggedPointer; pub const TaggedPointerUnion = @import("./ptr/tagged_pointer.zig").TaggedPointerUnion; pub const WeakPtr = @import("./ptr/weak_ptr.zig").WeakPtr; - -const ref_count = @import("./ptr/ref_count.zig"); diff --git a/src/ptr/ref_count.zig b/src/ptr/ref_count.zig index 0a2c0213f0..2d20dc633b 100644 --- a/src/ptr/ref_count.zig +++ b/src/ptr/ref_count.zig @@ -1,4 +1,4 @@ -pub const RefCountOptions = struct { +pub const Options = struct { /// Defaults to the type basename. debug_name: ?[]const u8 = null, destructor_ctx: ?type = null, @@ -64,18 +64,18 @@ pub const RefCountOptions = struct { /// If you want to enforce usage of RefPtr for memory management, you /// can remove the forwarded `ref` and `deref` methods from `RefCount`. /// If these methods are not forwarded, keep in mind that it should use the wrapper. -pub fn RefCount(T: type, field_name: []const u8, destructor_untyped: anytype, options: RefCountOptions) type { +pub fn RefCount(T: type, field_name: []const u8, destructor: anytype, options: Options) type { return struct { - active_counts: u32, - thread: if (enable_single_threaded_checks) ?bun.DebugThreadLock else void, - debug: if (enable_debug) DebugData(false) else void, + raw_count: u32, + thread: bun.safety.ThreadLock, + debug: if (enable_debug) DebugData(false) else void = if (enable_debug) .empty, const debug_name = options.debug_name orelse bun.meta.typeBaseName(@typeName(T)); pub const scope = bun.Output.Scoped(debug_name, true); - const DEBUG_STACK_TRACE = false; + const debug_stack_trace = false; const Destructor = if (options.destructor_ctx) |ctx| fn (*T, ctx) void else fn (*T) void; - const destructor: Destructor = destructor_untyped; + const typed_destructor: Destructor = destructor; pub fn init() @This() { return .initExactRefs(1); @@ -85,34 +85,33 @@ pub fn RefCount(T: type, field_name: []const u8, destructor_untyped: anytype, op pub fn initExactRefs(count: u32) @This() { assert(count > 0); return .{ - .thread = if (enable_single_threaded_checks) if (@inComptime()) null else .initLocked(), - .active_counts = count, - .debug = if (enable_debug) .empty else undefined, + .raw_count = count, + .thread = .initLockedIfNonComptime(), }; } - // trait implementation + // interface implementation pub fn ref(self: *T) void { - const counter = getCounter(self); + const count = getRefCount(self); if (enable_debug) { - counter.debug.assertValid(); + count.debug.assertValid(); } if (bun.Environment.enable_logs and scope.isVisible()) { scope.log("0x{x} ref {d} -> {d}:", .{ @intFromPtr(self), - counter.active_counts, - counter.active_counts + 1, + count.raw_count, + count.raw_count + 1, }); - if (DEBUG_STACK_TRACE) { + if (debug_stack_trace) { bun.crash_handler.dumpCurrentStackTrace(@returnAddress(), .{ .frame_count = 2, .skip_file_patterns = &.{"ptr/ref_count.zig"}, }); } } - counter.assertNonThreadSafeCountIsSingleThreaded(); - counter.active_counts += 1; + count.assertSingleThreaded(); + count.raw_count += 1; } pub fn deref(self: *T) void { @@ -120,28 +119,28 @@ pub fn RefCount(T: type, field_name: []const u8, destructor_untyped: anytype, op } pub fn derefWithContext(self: *T, ctx: (options.destructor_ctx orelse void)) void { - const counter = getCounter(self); + const count = getRefCount(self); if (enable_debug) { - counter.debug.assertValid(); // Likely double deref. + count.debug.assertValid(); // Likely double deref. } if (bun.Environment.enable_logs and scope.isVisible()) { scope.log("0x{x} deref {d} -> {d}:", .{ @intFromPtr(self), - counter.active_counts, - counter.active_counts - 1, + count.raw_count, + count.raw_count - 1, }); - if (DEBUG_STACK_TRACE) { + if (debug_stack_trace) { bun.crash_handler.dumpCurrentStackTrace(@returnAddress(), .{ .frame_count = 2, .skip_file_patterns = &.{"ptr/ref_count.zig"}, }); } } - counter.assertNonThreadSafeCountIsSingleThreaded(); - counter.active_counts -= 1; - if (counter.active_counts == 0) { + count.assertSingleThreaded(); + count.raw_count -= 1; + if (count.raw_count == 0) { if (enable_debug) { - counter.debug.deinit(std.mem.asBytes(self), @returnAddress()); + count.debug.deinit(std.mem.asBytes(self), @returnAddress()); } if (comptime options.destructor_ctx != null) { destructor(self, ctx); @@ -159,35 +158,33 @@ pub fn RefCount(T: type, field_name: []const u8, destructor_untyped: anytype, op // utility functions pub fn hasOneRef(count: *@This()) bool { - count.assertNonThreadSafeCountIsSingleThreaded(); - return count.active_counts == 1; + count.assertSingleThreaded(); + return count.raw_count == 1; + } + + pub fn get(count: *const @This()) u32 { + return count.raw_count; } pub fn dumpActiveRefs(count: *@This()) void { if (enable_debug) { const ptr: *T = @fieldParentPtr(field_name, count); - count.debug.dump(@typeName(T), ptr, count.active_counts); + count.debug.dump(@typeName(T), ptr, count.raw_count); } } - /// The active_counts value is 0 after the destructor is called. + /// The count is 0 after the destructor is called. pub fn assertNoRefs(count: *const @This()) void { if (enable_debug) { - bun.assert(count.active_counts == 0); + bun.assert(count.raw_count == 0); } } - fn assertNonThreadSafeCountIsSingleThreaded(count: *@This()) void { - if (enable_single_threaded_checks) { - const thread = if (count.thread) |*ptr| ptr else { - count.thread = .initLocked(); - return; - }; - thread.assertLocked(); // this counter is not thread-safe - } + fn assertSingleThreaded(count: *@This()) void { + count.thread.lockOrAssert(); } - fn getCounter(self: *T) *@This() { + fn getRefCount(self: *T) *@This() { return &@field(self, field_name); } @@ -205,10 +202,10 @@ pub fn RefCount(T: type, field_name: []const u8, destructor_untyped: anytype, op /// /// Avoid reference counting when an object only has one owner. /// Avoid thread-safe reference counting when only one thread allocates and frees. -pub fn ThreadSafeRefCount(T: type, field_name: []const u8, destructor: fn (*T) void, options: RefCountOptions) type { +pub fn ThreadSafeRefCount(T: type, field_name: []const u8, destructor: fn (*T) void, options: Options) type { return struct { - active_counts: std.atomic.Value(u32), - debug: if (enable_debug) DebugData(true) else void, + raw_count: std.atomic.Value(u32), + debug: if (enable_debug) DebugData(true) else void = if (enable_debug) .empty, const debug_name = options.debug_name orelse bun.meta.typeBaseName(@typeName(T)); pub const scope = bun.Output.Scoped(debug_name, true); @@ -220,18 +217,15 @@ pub fn ThreadSafeRefCount(T: type, field_name: []const u8, destructor: fn (*T) v /// Caller will have to call `unref()` exactly `count` times to destroy. pub fn initExactRefs(count: u32) @This() { assert(count > 0); - return .{ - .active_counts = .init(count), - .debug = if (enable_debug) .empty, - }; + return .{ .raw_count = .init(count) }; } - // trait implementation + // interface implementation pub fn ref(self: *T) void { - const counter = getCounter(self); - if (enable_debug) counter.debug.assertValid(); - const old_count = counter.active_counts.fetchAdd(1, .seq_cst); + const count = getRefCount(self); + if (enable_debug) count.debug.assertValid(); + const old_count = count.raw_count.fetchAdd(1, .seq_cst); if (comptime bun.Environment.enable_logs) { scope.log("0x{x} ref {d} -> {d}", .{ @intFromPtr(self), @@ -243,9 +237,9 @@ pub fn ThreadSafeRefCount(T: type, field_name: []const u8, destructor: fn (*T) v } pub fn deref(self: *T) void { - const counter = getCounter(self); - if (enable_debug) counter.debug.assertValid(); - const old_count = counter.active_counts.fetchSub(1, .seq_cst); + const count = getRefCount(self); + if (enable_debug) count.debug.assertValid(); + const old_count = count.raw_count.fetchSub(1, .seq_cst); if (comptime bun.Environment.enable_logs) { scope.log("0x{x} deref {d} -> {d}", .{ @intFromPtr(self), @@ -256,44 +250,44 @@ pub fn ThreadSafeRefCount(T: type, field_name: []const u8, destructor: fn (*T) v bun.debugAssert(old_count > 0); if (old_count == 1) { if (enable_debug) { - counter.debug.deinit(std.mem.asBytes(self), @returnAddress()); + count.debug.deinit(std.mem.asBytes(self), @returnAddress()); } destructor(self); } } pub fn dupeRef(self: anytype) RefPtr(@TypeOf(self)) { - if (enable_debug) getCounter(self).debug.assertValid(); + if (enable_debug) getRefCount(self).debug.assertValid(); _ = @as(*const T, self); // ensure ptr child is T return .initRef(self); } // utility functions - pub fn hasOneRef(counter: *const @This()) bool { - if (enable_debug) counter.debug.assertValid(); - return counter.active_counts.load(.seq_cst) == 1; + pub fn get(count: *const @This()) u32 { + return count.raw_count.load(.seq_cst); } - pub fn getCount(counter: *const @This()) u32 { - return counter.active_counts.load(.seq_cst); + pub fn hasOneRef(count: *const @This()) bool { + if (enable_debug) count.debug.assertValid(); + return count.get() == 1; } pub fn dumpActiveRefs(count: *@This()) void { if (enable_debug) { const ptr: *T = @alignCast(@fieldParentPtr(field_name, count)); - count.debug.dump(@typeName(T), ptr, count.active_counts.load(.seq_cst)); + count.debug.dump(@typeName(T), ptr, count.raw_count.load(.seq_cst)); } } - /// The active_counts value is 0 after the destructor is called. + /// The count is 0 after the destructor is called. pub fn assertNoRefs(count: *const @This()) void { if (enable_debug) { - bun.assert(count.active_counts.load(.seq_cst) == 0); + bun.assert(count.raw_count.load(.seq_cst) == 0); } } - fn getCounter(self: *T) *@This() { + fn getRefCount(self: *T) *@This() { return &@field(self, field_name); } @@ -408,7 +402,7 @@ pub fn RefPtr(T: type) type { return .{ .data = raw_ptr, .debug = if (enable_debug) raw_ptr.ref_count.debug.acquire( - &raw_ptr.ref_count.active_counts, + &raw_ptr.ref_count.raw_count, ret_addr orelse @returnAddress(), ), }; @@ -569,6 +563,4 @@ const std = @import("std"); const bun = @import("bun"); const AllocationScope = bun.AllocationScope; const assert = bun.assert; - const enable_debug = bun.Environment.isDebug; -const enable_single_threaded_checks = enable_debug; diff --git a/src/safety.zig b/src/safety.zig index 54c5671f1e..8e930c1a01 100644 --- a/src/safety.zig +++ b/src/safety.zig @@ -1,2 +1,4 @@ -pub const AllocPtr = @import("./safety/alloc_ptr.zig").AllocPtr; +pub const alloc = @import("./safety/alloc.zig"); +pub const AllocPtr = alloc.AllocPtr; pub const CriticalSection = @import("./safety/CriticalSection.zig"); +pub const ThreadLock = @import("./safety/ThreadLock.zig"); diff --git a/src/safety/CriticalSection.zig b/src/safety/CriticalSection.zig index 3d3497503a..d308295b5d 100644 --- a/src/safety/CriticalSection.zig +++ b/src/safety/CriticalSection.zig @@ -30,10 +30,6 @@ const Self = @This(); internal_state: if (enabled) State else void = if (enabled) .{}, -/// A value that does not alias any other thread ID. -/// See `Thread/Mutex/Recursive.zig` in the Zig standard library. -const invalid_thread_id = std.math.maxInt(Thread.Id); - const OptionalThreadId = struct { inner: Thread.Id, @@ -62,6 +58,10 @@ const State = struct { /// The ID of the thread that first acquired the lock (the "owner thread"). thread_id: std.atomic.Value(Thread.Id) = .init(invalid_thread_id), + /// Stack trace of the first time the owner thread acquired the lock (that is, when it became + /// the owner). + owner_trace: if (traces_enabled) StoredTrace else void = if (traces_enabled) StoredTrace.empty, + /// Number of nested calls to `lockShared`/`lockExclusive` performed on the owner thread. /// Only accessed on the owner thread. owned_count: u32 = 0, @@ -74,23 +74,43 @@ const State = struct { /// (read/write) access. const exclusive = std.math.maxInt(u32); - /// Acquire the lock for shared (read-only) access. - fn lockShared(self: *State) void { + fn getOrBecomeOwner(self: *State) Thread.Id { const current_id = Thread.getCurrentId(); // .monotonic is okay because we don't need to synchronize-with other threads; we just need // to make sure that only one thread succeeds in setting the value. - const id = self.thread_id.cmpxchgStrong( + return self.thread_id.cmpxchgStrong( invalid_thread_id, current_id, .monotonic, .monotonic, - ) orelse current_id; - if (id == current_id) { + ) orelse { + if (comptime traces_enabled) { + self.owner_trace = StoredTrace.capture(@returnAddress()); + } + return current_id; + }; + } + + fn showTrace(self: *State) void { + if (comptime !traces_enabled) return; + bun.Output.err("race condition", "`CriticalSection` first entered here:", .{}); + bun.crash_handler.dumpStackTrace( + self.owner_trace.trace(), + .{ .frame_count = 10, .stop_at_jsc_llint = true }, + ); + } + + /// Acquire the lock for shared (read-only) access. + fn lockShared(self: *State) void { + const current_id = Thread.getCurrentId(); + const owner_id = self.getOrBecomeOwner(); + if (owner_id == current_id) { self.owned_count += 1; } else if (self.count.fetchAdd(1, .monotonic) == exclusive) { + self.showTrace(); std.debug.panic( "race condition: thread {} tried to read data being modified by {}", - .{ current_id, OptionalThreadId.init(id) }, + .{ current_id, OptionalThreadId.init(owner_id) }, ); } } @@ -98,28 +118,25 @@ const State = struct { /// Acquire the lock for exclusive (read/write) access. fn lockExclusive(self: *State) void { const current_id = Thread.getCurrentId(); - // .monotonic is okay because we don't need to synchronize-with other threads; we just need - // to make sure that only one thread succeeds in setting the value. - const id = self.thread_id.cmpxchgStrong( - invalid_thread_id, - current_id, - .monotonic, - .monotonic, - ) orelse current_id; - if (id == current_id) { + const owner_id = self.getOrBecomeOwner(); + if (owner_id == current_id) { // .monotonic is okay because concurrent access is an error. switch (self.count.swap(exclusive, .monotonic)) { 0, exclusive => {}, - else => std.debug.panic( - "race condition: thread {} tried to modify data being read by {}", - .{ current_id, OptionalThreadId.init(id) }, - ), + else => { + self.showTrace(); + std.debug.panic( + "race condition: thread {} tried to modify data being read by {}", + .{ current_id, OptionalThreadId.init(owner_id) }, + ); + }, } self.owned_count += 1; } else { + self.showTrace(); std.debug.panic( "race condition: thread {} tried to modify data being accessed by {}", - .{ current_id, OptionalThreadId.init(id) }, + .{ current_id, OptionalThreadId.init(owner_id) }, ); } } @@ -129,13 +146,13 @@ const State = struct { const current_id = Thread.getCurrentId(); // .monotonic is okay because this value shouldn't change until all locks are released, and // we currently hold a lock. - const id = self.thread_id.load(.monotonic); + const owner_id = self.thread_id.load(.monotonic); - // It's possible for this thread to be the owner (`id == current_id`) and for `owned_count` - // to be 0, if this thread originally wasn't the owner, but became the owner when the - // original owner released all of its locks. In this case, some of the lock count for this - // thread is still in `self.count` rather than `self.owned_count`. - if (id == current_id and self.owned_count > 0) { + // It's possible for this thread to be the owner (`owner_id == current_id`) and for + // `owned_count` to be 0, if this thread originally wasn't the owner, but became the owner + // when the original owner released all of its locks. In this case, some of the lock count + // for this thread is still in `self.count` rather than `self.owned_count`. + if (owner_id == current_id and self.owned_count > 0) { self.owned_count -= 1; if (self.owned_count == 0) { // .monotonic is okay because: @@ -181,7 +198,11 @@ pub fn end(self: *Self) void { } const bun = @import("bun"); +const invalid_thread_id = @import("./thread_id.zig").invalid; +const StoredTrace = bun.crash_handler.StoredTrace; + const enabled = bun.Environment.ci_assert; +const traces_enabled = bun.Environment.isDebug; const std = @import("std"); const Thread = std.Thread; diff --git a/src/safety/ThreadLock.zig b/src/safety/ThreadLock.zig new file mode 100644 index 0000000000..143bd1904a --- /dev/null +++ b/src/safety/ThreadLock.zig @@ -0,0 +1,78 @@ +const Self = @This(); + +owning_thread: if (enabled) Thread.Id else void, +locked_at: if (traces_enabled) StoredTrace else void = if (traces_enabled) StoredTrace.empty, + +pub fn initUnlocked() Self { + return .{ .owning_thread = if (comptime enabled) invalid_thread_id }; +} + +pub fn initLocked() Self { + var self = Self.initUnlocked(); + self.lock(); + return self; +} + +pub fn initLockedIfNonComptime() Self { + return if (@inComptime()) .initUnlocked() else .initLocked(); +} + +pub fn lock(self: *Self) void { + if (comptime !enabled) return; + const current = Thread.getCurrentId(); + if (self.owning_thread != invalid_thread_id) { + if (comptime traces_enabled) { + bun.Output.err("assertion failure", "`ThreadLock` was already locked here:", .{}); + bun.crash_handler.dumpStackTrace( + self.locked_at.trace(), + .{ .frame_count = 10, .stop_at_jsc_llint = true }, + ); + } + std.debug.panic( + "tried to lock `ThreadLock` on thread {}, but was already locked by thread {}", + .{ current, self.owning_thread }, + ); + } + self.owning_thread = current; + if (comptime traces_enabled) { + self.locked_at = StoredTrace.capture(@returnAddress()); + } +} + +pub fn unlock(self: *Self) void { + if (comptime !enabled) return; + self.assertLocked(); + self.* = .initUnlocked(); +} + +pub fn assertLocked(self: *const Self) void { + if (comptime !enabled) return; + bun.assertf(self.owning_thread != invalid_thread_id, "`ThreadLock` is not locked", .{}); + const current = Thread.getCurrentId(); + bun.assertf( + self.owning_thread == current, + "`ThreadLock` is locked by thread {}, not thread {}", + .{ self.owning_thread, current }, + ); +} + +/// Acquires the lock if not already locked; otherwise, asserts that the current thread holds the +/// lock. +pub fn lockOrAssert(self: *Self) void { + if (comptime !enabled) return; + if (self.owning_thread == invalid_thread_id) { + self.lock(); + } else { + self.assertLocked(); + } +} + +const bun = @import("bun"); +const invalid_thread_id = @import("./thread_id.zig").invalid; +const StoredTrace = bun.crash_handler.StoredTrace; + +const enabled = bun.Environment.ci_assert; +const traces_enabled = bun.Environment.isDebug; + +const std = @import("std"); +const Thread = std.Thread; diff --git a/src/safety/alloc_ptr.zig b/src/safety/alloc.zig similarity index 63% rename from src/safety/alloc_ptr.zig rename to src/safety/alloc.zig index fb9bd3dd80..8fca18adb8 100644 --- a/src/safety/alloc_ptr.zig +++ b/src/safety/alloc.zig @@ -37,6 +37,26 @@ fn hasPtr(alloc: Allocator) bool { bun.String.isWTFAllocator(alloc); } +/// Asserts that two allocators are equal (in `ci_assert` builds). +/// +/// This function may have false negatives; that is, it may fail to detect that two allocators +/// are different. However, in practice, it's a useful safety check. +pub fn assertEq(alloc1: Allocator, alloc2: Allocator) void { + if (comptime !bun.ci_assert) return; + bun.assertf( + alloc1.vtable == alloc2.vtable, + "allocators do not match (vtables differ: {*} and {*})", + .{ alloc1.vtable, alloc2.vtable }, + ); + const ptr1 = if (hasPtr(alloc1)) alloc1.ptr else return; + const ptr2 = if (hasPtr(alloc2)) alloc2.ptr else return; + bun.assertf( + ptr1 == ptr2, + "allocators do not match (vtables are both {*} but pointers differ: {*} and {*})", + .{ alloc1.vtable, ptr1, ptr2 }, + ); +} + fn allocToPtr(alloc: Allocator) *anyopaque { return if (hasPtr(alloc)) alloc.ptr else @ptrCast(@constCast(alloc.vtable)); } @@ -50,6 +70,7 @@ pub const AllocPtr = struct { const Self = @This(); ptr: if (enabled) ?*anyopaque else void = if (enabled) null, + trace: if (traces_enabled) StoredTrace else void = if (traces_enabled) StoredTrace.empty, pub fn init(alloc: Allocator) Self { var self = Self{}; @@ -62,6 +83,9 @@ pub const AllocPtr = struct { const ptr = allocToPtr(alloc); if (self.ptr == null) { self.ptr = ptr; + if (comptime traces_enabled) { + self.trace = StoredTrace.capture(@returnAddress()); + } } else { self.assertPtrEq(ptr); } @@ -73,10 +97,23 @@ pub const AllocPtr = struct { } fn assertPtrEq(self: Self, ptr: *anyopaque) void { - if (self.ptr) |self_ptr| bun.assertf( - ptr == self_ptr, + const old_ptr = self.ptr orelse return; + if (old_ptr == ptr) return; + if (comptime traces_enabled) { + bun.Output.err( + "allocator mismatch", + "collection first used here, with a different allocator:", + .{}, + ); + var trace = self.trace; + bun.crash_handler.dumpStackTrace( + trace.trace(), + .{ .frame_count = 10, .stop_at_jsc_llint = true }, + ); + } + std.debug.panic( "cannot use multiple allocators with the same collection (got {*}, expected {*})", - .{ ptr, self_ptr }, + .{ ptr, old_ptr }, ); } }; @@ -85,4 +122,7 @@ const bun = @import("bun"); const std = @import("std"); const Allocator = std.mem.Allocator; const LinuxMemFdAllocator = bun.allocators.LinuxMemFdAllocator; +const StoredTrace = bun.crash_handler.StoredTrace; + const enabled = bun.Environment.ci_assert; +const traces_enabled = bun.Environment.isDebug; diff --git a/src/safety/thread_id.zig b/src/safety/thread_id.zig new file mode 100644 index 0000000000..ea9195eb8e --- /dev/null +++ b/src/safety/thread_id.zig @@ -0,0 +1,5 @@ +/// A value that does not alias any other thread ID. +/// See `Thread/Mutex/Recursive.zig` in the Zig standard library. +pub const invalid = std.math.maxInt(std.Thread.Id); + +const std = @import("std"); diff --git a/src/sql/postgres/PostgresSQLQuery.zig b/src/sql/postgres/PostgresSQLQuery.zig index 7d51065e41..2fa6c66331 100644 --- a/src/sql/postgres/PostgresSQLQuery.zig +++ b/src/sql/postgres/PostgresSQLQuery.zig @@ -52,7 +52,7 @@ pub const Status = enum(u8) { }; pub fn hasPendingActivity(this: *@This()) bool { - return this.ref_count.getCount() > 1; + return this.ref_count.get() > 1; } pub fn deinit(this: *@This()) void { diff --git a/src/threading/guarded_value.zig b/src/threading/guarded_value.zig index ba5640988e..832e4c155a 100644 --- a/src/threading/guarded_value.zig +++ b/src/threading/guarded_value.zig @@ -26,7 +26,7 @@ pub fn GuardedValue(comptime Value: type, comptime Mutex: type) type { } pub fn DebugGuardedValue(comptime Value: type) type { - return GuardedValue(Value, bun.DebugThreadLock); + return GuardedValue(Value, bun.safety.ThreadLock); } const bun = @import("bun"); diff --git a/test/internal/ban-limits.json b/test/internal/ban-limits.json index 9994ceb9d3..5a51441805 100644 --- a/test/internal/ban-limits.json +++ b/test/internal/ban-limits.json @@ -7,7 +7,7 @@ ".stdDir()": 40, ".stdFile()": 18, "// autofix": 168, - ": [^=]+= undefined,$": 261, + ": [^=]+= undefined,$": 260, "== alloc.ptr": 0, "== allocator.ptr": 0, "@import(\"bun\").": 0,