diff --git a/cmake/sources/ZigSources.txt b/cmake/sources/ZigSources.txt index a038c91225..25f6e46937 100644 --- a/cmake/sources/ZigSources.txt +++ b/cmake/sources/ZigSources.txt @@ -1,10 +1,11 @@ src/allocators.zig -src/allocators/AllocationScope.zig +src/allocators/allocation_scope.zig src/allocators/basic.zig src/allocators/fallback.zig src/allocators/fallback/z.zig src/allocators/LinuxMemFdAllocator.zig src/allocators/MaxHeapAllocator.zig +src/allocators/maybe_owned.zig src/allocators/MemoryReportingAllocator.zig src/allocators/mimalloc.zig src/allocators/MimallocArena.zig @@ -64,7 +65,6 @@ src/async/windows_event_loop.zig src/bake.zig src/bake/DevServer.zig src/bake/DevServer/Assets.zig -src/bake/DevServer/DevAllocator.zig src/bake/DevServer/DirectoryWatchStore.zig src/bake/DevServer/ErrorReportRequest.zig src/bake/DevServer/HmrSocket.zig @@ -783,6 +783,7 @@ src/macho.zig src/main_test.zig src/main_wasm.zig src/main.zig +src/memory.zig src/meta.zig src/napi/napi.zig src/node_fallbacks.zig @@ -804,8 +805,6 @@ src/ptr/Cow.zig src/ptr/CowSlice.zig src/ptr/meta.zig src/ptr/owned.zig -src/ptr/owned/maybe.zig -src/ptr/owned/scoped.zig src/ptr/ref_count.zig src/ptr/shared.zig src/ptr/tagged_pointer.zig @@ -1036,7 +1035,7 @@ src/threading.zig src/threading/channel.zig src/threading/Condition.zig src/threading/Futex.zig -src/threading/guarded_value.zig +src/threading/guarded.zig src/threading/Mutex.zig src/threading/ThreadPool.zig src/threading/unbounded_queue.zig diff --git a/src/allocators.zig b/src/allocators.zig index 134c260d9e..ccc1d09ac6 100644 --- a/src/allocators.zig +++ b/src/allocators.zig @@ -3,11 +3,16 @@ pub const z_allocator = basic.z_allocator; pub const freeWithoutSize = basic.freeWithoutSize; pub const mimalloc = @import("./allocators/mimalloc.zig"); pub const MimallocArena = @import("./allocators/MimallocArena.zig"); -pub const AllocationScope = @import("./allocators/AllocationScope.zig"); + +pub const allocation_scope = @import("./allocators/allocation_scope.zig"); +pub const AllocationScope = allocation_scope.AllocationScope; +pub const AllocationScopeIn = allocation_scope.AllocationScopeIn; + pub const NullableAllocator = @import("./allocators/NullableAllocator.zig"); pub const MaxHeapAllocator = @import("./allocators/MaxHeapAllocator.zig"); pub const MemoryReportingAllocator = @import("./allocators/MemoryReportingAllocator.zig"); pub const LinuxMemFdAllocator = @import("./allocators/LinuxMemFdAllocator.zig"); +pub const MaybeOwned = @import("./allocators/maybe_owned.zig").MaybeOwned; pub fn isSliceInBufferT(comptime T: type, slice: []const T, buffer: []const T) bool { return (@intFromPtr(buffer.ptr) <= @intFromPtr(slice.ptr) and @@ -228,7 +233,7 @@ pub fn BSSList(comptime ValueType: type, comptime _count: anytype) type { const Self = @This(); - allocator: Allocator, + allocator: std.mem.Allocator, mutex: Mutex = .{}, head: *OverflowBlock, tail: OverflowBlock, @@ -316,7 +321,7 @@ pub fn BSSStringList(comptime _count: usize, comptime _item_length: usize) type backing_buf: [count * item_length]u8, backing_buf_used: u64, overflow_list: Overflow, - allocator: Allocator, + allocator: std.mem.Allocator, slice_buf: [count][]const u8, slice_buf_used: u16, mutex: Mutex = .{}, @@ -499,7 +504,7 @@ pub fn BSSMap(comptime ValueType: type, comptime count: anytype, comptime store_ index: IndexMap, overflow_list: Overflow, - allocator: Allocator, + allocator: std.mem.Allocator, mutex: Mutex = .{}, backing_buf: [count]ValueType, backing_buf_used: u16, @@ -770,36 +775,119 @@ pub fn BSSMap(comptime ValueType: type, comptime count: anytype, comptime store_ }; } -pub fn isDefault(allocator: Allocator) bool { +/// Checks whether `allocator` is the default allocator. +pub fn isDefault(allocator: std.mem.Allocator) bool { return allocator.vtable == c_allocator.vtable; } -/// Allocate memory for a value of type `T` using the provided allocator, and initialize the memory -/// with `value`. -/// -/// If `allocator` is `bun.default_allocator`, this will internally use `bun.tryNew` to benefit from -/// the added assertions. -pub fn create(comptime T: type, allocator: Allocator, value: T) OOM!*T { - if ((comptime Environment.allow_assert) and isDefault(allocator)) { - return bun.tryNew(T, value); - } - const ptr = try allocator.create(T); - ptr.* = value; - return ptr; +// The following functions operate on generic allocators. A generic allocator is a type that +// satisfies the `GenericAllocator` interface: +// +// ``` +// const GenericAllocator = struct { +// // Required. +// pub fn allocator(self: Self) std.mem.Allocator; +// +// // Optional, to allow default-initialization. `.{}` will also be tried. +// pub fn init() Self; +// +// // Optional, if this allocator owns auxiliary resources that need to be deinitialized. +// pub fn deinit(self: *Self) void; +// +// // Optional. Defining a borrowed type makes it clear who owns the allocator and prevents +// // `deinit` from being called twice. +// pub const Borrowed: type; +// pub fn borrow(self: Self) Borrowed; +// }; +// ``` +// +// Generic allocators must support being moved. They cannot contain self-references, and they cannot +// serve allocations from a buffer that exists within the allocator itself (have your allocator type +// contain a pointer to the buffer instead). +// +// As an exception, `std.mem.Allocator` is also treated as a generic allocator, and receives +// special handling in the following functions to achieve this. + +/// Gets the `std.mem.Allocator` for a given generic allocator. +pub fn asStd(allocator: anytype) std.mem.Allocator { + return if (comptime @TypeOf(allocator) == std.mem.Allocator) + allocator + else + allocator.allocator(); } -/// Free memory previously allocated by `create`. +/// A borrowed version of an allocator. /// -/// The memory must have been allocated by the `create` function in this namespace, not -/// directly by `allocator.create`. -pub fn destroy(allocator: Allocator, ptr: anytype) void { - if ((comptime Environment.allow_assert) and isDefault(allocator)) { - bun.destroy(ptr); - } else { - allocator.destroy(ptr); - } +/// Some allocators have a `deinit` method that would be invalid to call multiple times (e.g., +/// `AllocationScope` and `MimallocArena`). +/// +/// If multiple structs or functions need access to the same allocator, we want to avoid simply +/// passing the allocator by value, as this could easily lead to `deinit` being called multiple +/// times if we forget who really owns the allocator. +/// +/// Passing a pointer is not always a good approach, as this results in a performance penalty for +/// zero-sized allocators, and adds another level of indirection in all cases. +/// +/// This function allows allocators that have a concept of being "owned" to define a "borrowed" +/// version of the allocator. If no such type is defined, it is assumed the allocator does not +/// own any data, and `Borrowed(Allocator)` is simply the same as `Allocator`. +pub fn Borrowed(comptime Allocator: type) type { + return if (comptime @hasDecl(Allocator, "Borrowed")) + Allocator.Borrowed + else + Allocator; } +/// Borrows an allocator. +/// +/// See `Borrowed` for the rationale. +pub fn borrow(allocator: anytype) Borrowed(@TypeOf(allocator)) { + return if (comptime @hasDecl(@TypeOf(allocator), "Borrowed")) + allocator.borrow() + else + allocator; +} + +/// A type that behaves like `?Allocator`. This function will either return `?Allocator` itself, +/// or an optimized type that behaves like `?Allocator`. +/// +/// Use `initNullable` and `unpackNullable` to work with the returned type. +pub fn Nullable(comptime Allocator: type) type { + return if (comptime Allocator == std.mem.Allocator) + NullableAllocator + else if (comptime @hasDecl(Allocator, "Nullable")) + Allocator.Nullable + else + ?Allocator; +} + +/// Creates a `Nullable(Allocator)` from an optional `Allocator`. +pub fn initNullable(comptime Allocator: type, allocator: ?Allocator) Nullable(Allocator) { + return if (comptime Allocator == std.mem.Allocator or @hasDecl(Allocator, "Nullable")) + .init(allocator) + else + allocator; +} + +/// Turns a `Nullable(Allocator)` back into an optional `Allocator`. +pub fn unpackNullable(comptime Allocator: type, allocator: Nullable(Allocator)) ?Allocator { + return if (comptime Allocator == std.mem.Allocator or @hasDecl(Allocator, "Nullable")) + .get() + else + allocator; +} + +/// The default allocator. This is a zero-sized type whose `allocator` method returns +/// `bun.default_allocator`. +/// +/// This type is a `GenericAllocator`; see `src/allocators.zig`. +pub const Default = struct { + pub fn allocator(self: Default) std.mem.Allocator { + _ = self; + return c_allocator; + } +}; + const basic = if (bun.use_mimalloc) @import("./allocators/basic.zig") else @@ -807,7 +895,6 @@ else const Environment = @import("./env.zig"); const std = @import("std"); -const Allocator = std.mem.Allocator; const bun = @import("bun"); const OOM = bun.OOM; diff --git a/src/allocators/AllocationScope.zig b/src/allocators/AllocationScope.zig deleted file mode 100644 index abc8cd9de9..0000000000 --- a/src/allocators/AllocationScope.zig +++ /dev/null @@ -1,288 +0,0 @@ -//! AllocationScope wraps another allocator, providing leak and invalid free assertions. -//! It also allows measuring how much memory a scope has allocated. -//! -//! AllocationScope is conceptually a pointer, so it can be moved without invalidating allocations. -//! Therefore, it isn't necessary to pass an AllocationScope by pointer. - -const Self = @This(); - -pub const enabled = bun.Environment.enableAllocScopes; - -internal_state: if (enabled) *State else Allocator, - -const State = struct { - parent: Allocator, - mutex: bun.Mutex, - total_memory_allocated: usize, - allocations: std.AutoHashMapUnmanaged([*]const u8, Allocation), - frees: std.AutoArrayHashMapUnmanaged([*]const u8, Free), - /// Once `frees` fills up, entries are overwritten from start to end. - free_overwrite_index: std.math.IntFittingRange(0, max_free_tracking + 1), -}; - -pub const max_free_tracking = 2048 - 1; - -pub const Allocation = struct { - allocated_at: StoredTrace, - len: usize, - extra: Extra, -}; - -pub const Free = struct { - allocated_at: StoredTrace, - freed_at: StoredTrace, -}; - -pub const Extra = union(enum) { - none, - ref_count: *RefCountDebugData(false), - ref_count_threadsafe: *RefCountDebugData(true), - - const RefCountDebugData = @import("../ptr/ref_count.zig").DebugData; -}; - -pub fn init(parent_alloc: Allocator) Self { - const state = if (comptime enabled) - bun.new(State, .{ - .parent = parent_alloc, - .total_memory_allocated = 0, - .allocations = .empty, - .frees = .empty, - .free_overwrite_index = 0, - .mutex = .{}, - }) - else - parent_alloc; - return .{ .internal_state = state }; -} - -pub fn deinit(scope: Self) void { - if (comptime !enabled) return; - - const state = scope.internal_state; - state.mutex.lock(); - defer bun.destroy(state); - defer state.allocations.deinit(state.parent); - const count = state.allocations.count(); - if (count == 0) return; - Output.errGeneric("Allocation scope leaked {d} allocations ({})", .{ - count, - bun.fmt.size(state.total_memory_allocated, .{}), - }); - var it = state.allocations.iterator(); - var n: usize = 0; - while (it.next()) |entry| { - Output.prettyErrorln("- {any}, len {d}, at:", .{ entry.key_ptr.*, entry.value_ptr.len }); - bun.crash_handler.dumpStackTrace(entry.value_ptr.allocated_at.trace(), trace_limits); - - switch (entry.value_ptr.extra) { - .none => {}, - inline else => |t| t.onAllocationLeak(@constCast(entry.key_ptr.*[0..entry.value_ptr.len])), - } - - n += 1; - if (n >= 8) { - Output.prettyErrorln("(only showing first 10 leaks)", .{}); - break; - } - } - Output.panic("Allocation scope leaked {}", .{bun.fmt.size(state.total_memory_allocated, .{})}); -} - -pub fn allocator(scope: Self) Allocator { - const state = scope.internal_state; - return if (comptime enabled) .{ .ptr = state, .vtable = &vtable } else state; -} - -pub fn parent(scope: Self) Allocator { - const state = scope.internal_state; - return if (comptime enabled) state.parent else state; -} - -pub fn total(self: Self) usize { - if (comptime !enabled) @compileError("AllocationScope must be enabled"); - return self.internal_state.total_memory_allocated; -} - -pub fn numAllocations(self: Self) usize { - if (comptime !enabled) @compileError("AllocationScope must be enabled"); - return self.internal_state.allocations.count(); -} - -const vtable: Allocator.VTable = .{ - .alloc = alloc, - .resize = &std.mem.Allocator.noResize, - .remap = &std.mem.Allocator.noRemap, - .free = free, -}; - -// Smaller traces since AllocationScope prints so many -pub const trace_limits: bun.crash_handler.WriteStackTraceLimits = .{ - .frame_count = 6, - .stop_at_jsc_llint = true, - .skip_stdlib = true, -}; -pub const free_trace_limits: bun.crash_handler.WriteStackTraceLimits = .{ - .frame_count = 3, - .stop_at_jsc_llint = true, - .skip_stdlib = true, -}; - -fn alloc(ctx: *anyopaque, len: usize, alignment: std.mem.Alignment, ret_addr: usize) ?[*]u8 { - const state: *State = @ptrCast(@alignCast(ctx)); - - state.mutex.lock(); - defer state.mutex.unlock(); - state.allocations.ensureUnusedCapacity(state.parent, 1) catch - return null; - const result = state.parent.vtable.alloc(state.parent.ptr, len, alignment, ret_addr) orelse - return null; - trackAllocationAssumeCapacity(state, result[0..len], ret_addr, .none); - return result; -} - -fn trackAllocationAssumeCapacity(state: *State, buf: []const u8, ret_addr: usize, extra: Extra) void { - const trace = StoredTrace.capture(ret_addr); - state.allocations.putAssumeCapacityNoClobber(buf.ptr, .{ - .allocated_at = trace, - .len = buf.len, - .extra = extra, - }); - state.total_memory_allocated += buf.len; -} - -fn free(ctx: *anyopaque, buf: []u8, alignment: std.mem.Alignment, ret_addr: usize) void { - const state: *State = @ptrCast(@alignCast(ctx)); - state.mutex.lock(); - defer state.mutex.unlock(); - const invalid = trackFreeAssumeLocked(state, buf, ret_addr); - - state.parent.vtable.free(state.parent.ptr, buf, alignment, ret_addr); - - // If asan did not catch the free, panic now. - if (invalid) @panic("Invalid free"); -} - -fn trackFreeAssumeLocked(state: *State, buf: []const u8, ret_addr: usize) bool { - if (state.allocations.fetchRemove(buf.ptr)) |entry| { - state.total_memory_allocated -= entry.value.len; - - free_entry: { - state.frees.put(state.parent, buf.ptr, .{ - .allocated_at = entry.value.allocated_at, - .freed_at = StoredTrace.capture(ret_addr), - }) catch break :free_entry; - // Store a limited amount of free entries - if (state.frees.count() >= max_free_tracking) { - const i = state.free_overwrite_index; - state.free_overwrite_index = @mod(state.free_overwrite_index + 1, max_free_tracking); - state.frees.swapRemoveAt(i); - } - } - return false; - } else { - bun.Output.errGeneric("Invalid free, pointer {any}, len {d}", .{ buf.ptr, buf.len }); - - if (state.frees.get(buf.ptr)) |free_entry_const| { - var free_entry = free_entry_const; - bun.Output.printErrorln("Pointer allocated here:", .{}); - bun.crash_handler.dumpStackTrace(free_entry.allocated_at.trace(), trace_limits); - bun.Output.printErrorln("Pointer first freed here:", .{}); - bun.crash_handler.dumpStackTrace(free_entry.freed_at.trace(), free_trace_limits); - } - - // do not panic because address sanitizer will catch this case better. - // the log message is in case there is a situation where address - // sanitizer does not catch the invalid free. - - return true; - } -} - -pub fn assertOwned(scope: Self, ptr: anytype) void { - if (comptime !enabled) return; - const cast_ptr: [*]const u8 = @ptrCast(switch (@typeInfo(@TypeOf(ptr)).pointer.size) { - .c, .one, .many => ptr, - .slice => if (ptr.len > 0) ptr.ptr else return, - }); - const state = scope.internal_state; - state.mutex.lock(); - defer state.mutex.unlock(); - _ = state.allocations.getPtr(cast_ptr) orelse - @panic("this pointer was not owned by the allocation scope"); -} - -pub fn assertUnowned(scope: Self, ptr: anytype) void { - if (comptime !enabled) return; - const cast_ptr: [*]const u8 = @ptrCast(switch (@typeInfo(@TypeOf(ptr)).pointer.size) { - .c, .one, .many => ptr, - .slice => if (ptr.len > 0) ptr.ptr else return, - }); - const state = scope.internal_state; - state.mutex.lock(); - defer state.mutex.unlock(); - if (state.allocations.getPtr(cast_ptr)) |owned| { - Output.warn("Owned pointer allocated here:"); - bun.crash_handler.dumpStackTrace(owned.allocated_at.trace(), trace_limits, trace_limits); - } - @panic("this pointer was owned by the allocation scope when it was not supposed to be"); -} - -/// Track an arbitrary pointer. Extra data can be stored in the allocation, -/// which will be printed when a leak is detected. -pub fn trackExternalAllocation(scope: Self, ptr: []const u8, ret_addr: ?usize, extra: Extra) void { - if (comptime !enabled) return; - const state = scope.internal_state; - state.mutex.lock(); - defer state.mutex.unlock(); - bun.handleOom(state.allocations.ensureUnusedCapacity(state.parent, 1)); - trackAllocationAssumeCapacity(state, ptr, ptr.len, ret_addr orelse @returnAddress(), extra); -} - -/// Call when the pointer from `trackExternalAllocation` is freed. -/// Returns true if the free was invalid. -pub fn trackExternalFree(scope: Self, slice: anytype, ret_addr: ?usize) bool { - if (comptime !enabled) return false; - const ptr: []const u8 = switch (@typeInfo(@TypeOf(slice))) { - .pointer => |p| switch (p.size) { - .slice => brk: { - if (p.child != u8) @compileError("This function only supports []u8 or [:sentinel]u8 types, you passed in: " ++ @typeName(@TypeOf(slice))); - if (p.sentinel_ptr == null) break :brk slice; - // Ensure we include the sentinel value - break :brk slice[0 .. slice.len + 1]; - }, - else => @compileError("This function only supports []u8 or [:sentinel]u8 types, you passed in: " ++ @typeName(@TypeOf(slice))), - }, - else => @compileError("This function only supports []u8 or [:sentinel]u8 types, you passed in: " ++ @typeName(@TypeOf(slice))), - }; - // Empty slice usually means invalid pointer - if (ptr.len == 0) return false; - const state = scope.internal_state; - state.mutex.lock(); - defer state.mutex.unlock(); - return trackFreeAssumeLocked(state, ptr, ret_addr orelse @returnAddress()); -} - -pub fn setPointerExtra(scope: Self, ptr: *anyopaque, extra: Extra) void { - if (comptime !enabled) return; - const state = scope.internal_state; - state.mutex.lock(); - defer state.mutex.unlock(); - const allocation = state.allocations.getPtr(ptr) orelse - @panic("Pointer not owned by allocation scope"); - allocation.extra = extra; -} - -pub inline fn downcast(a: Allocator) ?Self { - return if (enabled and a.vtable == &vtable) - .{ .internal_state = @ptrCast(@alignCast(a.ptr)) } - else - null; -} - -const std = @import("std"); -const Allocator = std.mem.Allocator; - -const bun = @import("bun"); -const Output = bun.Output; -const StoredTrace = bun.crash_handler.StoredTrace; diff --git a/src/allocators/MimallocArena.zig b/src/allocators/MimallocArena.zig index 75a7432ca5..0588a34821 100644 --- a/src/allocators/MimallocArena.zig +++ b/src/allocators/MimallocArena.zig @@ -1,29 +1,95 @@ +//! This type is a `GenericAllocator`; see `src/allocators.zig`. + const Self = @This(); -heap: HeapPtr, +#heap: if (safety_checks) Owned(*DebugHeap) else *mimalloc.Heap, -const HeapPtr = if (safety_checks) *DebugHeap else *mimalloc.Heap; +/// Uses the default thread-local heap. This type is zero-sized. +/// +/// This type is a `GenericAllocator`; see `src/allocators.zig`. +pub const Default = struct { + pub fn allocator(self: Default) std.mem.Allocator { + _ = self; + return Borrowed.getDefault().allocator(); + } +}; + +/// Borrowed version of `MimallocArena`, returned by `MimallocArena.borrow`. +/// Using this type makes it clear who actually owns the `MimallocArena`, and prevents +/// `deinit` from being called twice. +/// +/// This type is a `GenericAllocator`; see `src/allocators.zig`. +pub const Borrowed = struct { + #heap: BorrowedHeap, + + pub fn allocator(self: Borrowed) std.mem.Allocator { + return .{ .ptr = self.#heap, .vtable = &c_allocator_vtable }; + } + + pub fn getDefault() Borrowed { + return .{ .#heap = getThreadHeap() }; + } + + pub fn gc(self: Borrowed) void { + mimalloc.mi_heap_collect(self.getMimallocHeap(), false); + } + + pub fn helpCatchMemoryIssues(self: Borrowed) void { + if (comptime bun.FeatureFlags.help_catch_memory_issues) { + self.gc(); + bun.mimalloc.mi_collect(false); + } + } + + pub fn ownsPtr(self: Borrowed, ptr: *const anyopaque) bool { + return mimalloc.mi_heap_check_owned(self.getMimallocHeap(), ptr); + } + + fn fromOpaque(ptr: *anyopaque) Borrowed { + return .{ .#heap = @ptrCast(@alignCast(ptr)) }; + } + + fn getMimallocHeap(self: Borrowed) *mimalloc.Heap { + return if (comptime safety_checks) self.#heap.inner else self.#heap; + } + + fn assertThreadLock(self: Borrowed) void { + if (comptime safety_checks) self.#heap.thread_lock.assertLocked(); + } + + fn alignedAlloc(self: Borrowed, len: usize, alignment: Alignment) ?[*]u8 { + log("Malloc: {d}\n", .{len}); + + const heap = self.getMimallocHeap(); + const ptr: ?*anyopaque = if (mimalloc.mustUseAlignedAlloc(alignment)) + mimalloc.mi_heap_malloc_aligned(heap, len, alignment.toByteUnits()) + else + mimalloc.mi_heap_malloc(heap, len); + + if (comptime bun.Environment.isDebug) { + const usable = mimalloc.mi_malloc_usable_size(ptr); + if (usable < len) { + std.debug.panic("mimalloc: allocated size is too small: {d} < {d}", .{ usable, len }); + } + } + + return if (ptr) |p| + @as([*]u8, @ptrCast(p)) + else + null; + } +}; + +const BorrowedHeap = if (safety_checks) *DebugHeap else *mimalloc.Heap; const DebugHeap = struct { inner: *mimalloc.Heap, thread_lock: bun.safety.ThreadLock, }; -fn getMimallocHeap(self: Self) *mimalloc.Heap { - return if (comptime safety_checks) self.heap.inner else self.heap; -} - -fn fromOpaque(ptr: *anyopaque) Self { - return .{ .heap = bun.cast(HeapPtr, ptr) }; -} - -fn assertThreadLock(self: Self) void { - if (comptime safety_checks) self.heap.thread_lock.assertLocked(); -} - threadlocal var thread_heap: if (safety_checks) ?DebugHeap else void = if (safety_checks) null; -fn getThreadHeap() HeapPtr { +fn getThreadHeap() BorrowedHeap { if (comptime !safety_checks) return mimalloc.mi_heap_get_default(); if (thread_heap == null) { thread_heap = .{ @@ -36,23 +102,27 @@ fn getThreadHeap() HeapPtr { const log = bun.Output.scoped(.mimalloc, .hidden); +pub fn allocator(self: Self) std.mem.Allocator { + return self.borrow().allocator(); +} + +pub fn borrow(self: Self) Borrowed { + return .{ .#heap = if (comptime safety_checks) self.#heap.get() else self.#heap }; +} + /// Internally, mimalloc calls mi_heap_get_default() /// to get the default heap. /// It uses pthread_getspecific to do that. /// We can save those extra calls if we just do it once in here -pub fn getThreadLocalDefault() Allocator { - return Allocator{ .ptr = getThreadHeap(), .vtable = &c_allocator_vtable }; +pub fn getThreadLocalDefault() std.mem.Allocator { + return Borrowed.getDefault().allocator(); } -pub fn backingAllocator(_: Self) Allocator { +pub fn backingAllocator(_: Self) std.mem.Allocator { return getThreadLocalDefault(); } -pub fn allocator(self: Self) Allocator { - return Allocator{ .ptr = self.heap, .vtable = &c_allocator_vtable }; -} - -pub fn dumpThreadStats(_: *Self) void { +pub fn dumpThreadStats(_: Self) void { const dump_fn = struct { pub fn dump(textZ: [*:0]const u8, _: ?*anyopaque) callconv(.C) void { const text = bun.span(textZ); @@ -63,7 +133,7 @@ pub fn dumpThreadStats(_: *Self) void { bun.Output.flush(); } -pub fn dumpStats(_: *Self) void { +pub fn dumpStats(_: Self) void { const dump_fn = struct { pub fn dump(textZ: [*:0]const u8, _: ?*anyopaque) callconv(.C) void { const text = bun.span(textZ); @@ -75,9 +145,9 @@ pub fn dumpStats(_: *Self) void { } pub fn deinit(self: *Self) void { - const mimalloc_heap = self.getMimallocHeap(); + const mimalloc_heap = self.borrow().getMimallocHeap(); if (comptime safety_checks) { - bun.destroy(self.heap); + self.#heap.deinit(); } mimalloc.mi_heap_destroy(mimalloc_heap); self.* = undefined; @@ -85,70 +155,43 @@ pub fn deinit(self: *Self) void { pub fn init() Self { const mimalloc_heap = mimalloc.mi_heap_new() orelse bun.outOfMemory(); - const heap = if (comptime safety_checks) - bun.new(DebugHeap, .{ - .inner = mimalloc_heap, - .thread_lock = .initLocked(), - }) - else - mimalloc_heap; - return .{ .heap = heap }; + if (comptime !safety_checks) return .{ .#heap = mimalloc_heap }; + const heap: Owned(*DebugHeap) = .new(.{ + .inner = mimalloc_heap, + .thread_lock = .initLocked(), + }); + return .{ .#heap = heap }; } pub fn gc(self: Self) void { - mimalloc.mi_heap_collect(self.getMimallocHeap(), false); + self.borrow().gc(); } -pub inline fn helpCatchMemoryIssues(self: Self) void { - if (comptime bun.FeatureFlags.help_catch_memory_issues) { - self.gc(); - bun.mimalloc.mi_collect(false); - } +pub fn helpCatchMemoryIssues(self: Self) void { + self.borrow().helpCatchMemoryIssues(); } pub fn ownsPtr(self: Self, ptr: *const anyopaque) bool { - return mimalloc.mi_heap_check_owned(self.getMimallocHeap(), ptr); -} - -fn alignedAlloc(self: Self, len: usize, alignment: Alignment) ?[*]u8 { - log("Malloc: {d}\n", .{len}); - - const heap = self.getMimallocHeap(); - const ptr: ?*anyopaque = if (mimalloc.mustUseAlignedAlloc(alignment)) - mimalloc.mi_heap_malloc_aligned(heap, len, alignment.toByteUnits()) - else - mimalloc.mi_heap_malloc(heap, len); - - if (comptime bun.Environment.isDebug) { - const usable = mimalloc.mi_malloc_usable_size(ptr); - if (usable < len) { - std.debug.panic("mimalloc: allocated size is too small: {d} < {d}", .{ usable, len }); - } - } - - return if (ptr) |p| - @as([*]u8, @ptrCast(p)) - else - null; + return self.borrow().ownsPtr(ptr); } fn alignedAllocSize(ptr: [*]u8) usize { return mimalloc.mi_malloc_usable_size(ptr); } -fn alloc(ptr: *anyopaque, len: usize, alignment: Alignment, _: usize) ?[*]u8 { - const self = fromOpaque(ptr); +fn vtable_alloc(ptr: *anyopaque, len: usize, alignment: Alignment, _: usize) ?[*]u8 { + const self: Borrowed = .fromOpaque(ptr); self.assertThreadLock(); - return alignedAlloc(self, len, alignment); + return self.alignedAlloc(len, alignment); } -fn resize(ptr: *anyopaque, buf: []u8, _: Alignment, new_len: usize, _: usize) bool { - const self = fromOpaque(ptr); +fn vtable_resize(ptr: *anyopaque, buf: []u8, _: Alignment, new_len: usize, _: usize) bool { + const self: Borrowed = .fromOpaque(ptr); self.assertThreadLock(); return mimalloc.mi_expand(buf.ptr, new_len) != null; } -fn free( +fn vtable_free( _: *anyopaque, buf: []u8, alignment: Alignment, @@ -187,8 +230,8 @@ fn free( /// `ret_addr` is optionally provided as the first return address of the /// allocation call stack. If the value is `0` it means no return address /// has been provided. -fn remap(ptr: *anyopaque, buf: []u8, alignment: Alignment, new_len: usize, _: usize) ?[*]u8 { - const self = fromOpaque(ptr); +fn vtable_remap(ptr: *anyopaque, buf: []u8, alignment: Alignment, new_len: usize, _: usize) ?[*]u8 { + const self: Borrowed = .fromOpaque(ptr); self.assertThreadLock(); const heap = self.getMimallocHeap(); const aligned_size = alignment.toByteUnits(); @@ -196,23 +239,22 @@ fn remap(ptr: *anyopaque, buf: []u8, alignment: Alignment, new_len: usize, _: us return @ptrCast(value); } -pub fn isInstance(allocator_: Allocator) bool { - return allocator_.vtable == &c_allocator_vtable; +pub fn isInstance(alloc: std.mem.Allocator) bool { + return alloc.vtable == &c_allocator_vtable; } -const c_allocator_vtable = Allocator.VTable{ - .alloc = &Self.alloc, - .resize = &Self.resize, - .remap = &Self.remap, - .free = &Self.free, +const c_allocator_vtable = std.mem.Allocator.VTable{ + .alloc = vtable_alloc, + .resize = vtable_resize, + .remap = vtable_remap, + .free = vtable_free, }; const std = @import("std"); +const Alignment = std.mem.Alignment; const bun = @import("bun"); const assert = bun.assert; const mimalloc = bun.mimalloc; +const Owned = bun.ptr.Owned; const safety_checks = bun.Environment.ci_assert; - -const Alignment = std.mem.Alignment; -const Allocator = std.mem.Allocator; diff --git a/src/allocators/NullableAllocator.zig b/src/allocators/NullableAllocator.zig index 6ebe10d98b..e733d96414 100644 --- a/src/allocators/NullableAllocator.zig +++ b/src/allocators/NullableAllocator.zig @@ -4,8 +4,7 @@ const NullableAllocator = @This(); ptr: *anyopaque = undefined, // Utilize the null pointer optimization on the vtable instead of -// the regular ptr because some allocator implementations might tag their -// `ptr` property. +// the regular `ptr` because `ptr` may be undefined. vtable: ?*const std.mem.Allocator.VTable = null, pub inline fn init(allocator: ?std.mem.Allocator) NullableAllocator { diff --git a/src/allocators/allocation_scope.zig b/src/allocators/allocation_scope.zig new file mode 100644 index 0000000000..2bc93fd3be --- /dev/null +++ b/src/allocators/allocation_scope.zig @@ -0,0 +1,555 @@ +//! AllocationScope wraps another allocator, providing leak and invalid free assertions. +//! It also allows measuring how much memory a scope has allocated. + +const allocation_scope = @This(); + +/// An allocation scope with a dynamically typed parent allocator. Prefer using a concrete type, +/// like `AllocationScopeIn(bun.DefaultAllocator)`. +pub const AllocationScope = AllocationScopeIn(std.mem.Allocator); + +pub const Allocation = struct { + allocated_at: StoredTrace, + len: usize, + extra: Extra, +}; + +pub const Free = struct { + allocated_at: StoredTrace, + freed_at: StoredTrace, +}; + +pub const Extra = struct { + ptr: *anyopaque, + vtable: ?*const VTable, + + pub const none: Extra = .{ .ptr = undefined, .vtable = null }; + + pub const VTable = struct { + onAllocationLeak: *const fn (*anyopaque, data: []u8) void, + }; +}; + +pub const Stats = struct { + total_memory_allocated: usize, + num_allocations: usize, +}; + +pub const FreeError = error{ + /// Tried to free memory that wasn't allocated by this `AllocationScope`, or was already freed. + NotAllocated, +}; + +pub const enabled = bun.Environment.enableAllocScopes; +pub const max_free_tracking = 2048 - 1; + +const History = struct { + const Self = @This(); + + total_memory_allocated: usize = 0, + /// Allocated by `State.parent`. + allocations: std.AutoHashMapUnmanaged([*]const u8, Allocation) = .empty, + /// Allocated by `State.parent`. + frees: std.AutoArrayHashMapUnmanaged([*]const u8, Free) = .empty, + /// Once `frees` fills up, entries are overwritten from start to end. + free_overwrite_index: std.math.IntFittingRange(0, max_free_tracking + 1) = 0, + + /// `allocator` should be `State.parent`. + fn deinit(self: *Self, allocator: std.mem.Allocator) void { + self.allocations.deinit(allocator); + self.frees.deinit(allocator); + self.* = undefined; + } +}; + +const LockedState = struct { + const Self = @This(); + + /// Should be the same as `State.parent`. + parent: std.mem.Allocator, + history: *History, + + fn alloc(self: Self, len: usize, alignment: std.mem.Alignment, ret_addr: usize) bun.OOM![*]u8 { + const result = self.parent.rawAlloc(len, alignment, ret_addr) orelse + return error.OutOfMemory; + errdefer self.parent.rawFree(result[0..len], alignment, ret_addr); + try self.trackAllocation(result[0..len], ret_addr, .none); + return result; + } + + fn free(self: Self, buf: []u8, alignment: std.mem.Alignment, ret_addr: usize) void { + const success = if (self.trackFree(buf, ret_addr)) + true + else |err| switch (err) { + error.NotAllocated => false, + }; + if (success or bun.Environment.enable_asan) { + self.parent.rawFree(buf, alignment, ret_addr); + } + if (!success) { + // If asan did not catch the free, panic now. + std.debug.panic("Invalid free: {*}", .{buf}); + } + } + + fn assertOwned(self: Self, ptr: anytype) void { + const cast_ptr: [*]const u8 = @ptrCast(switch (@typeInfo(@TypeOf(ptr)).pointer.size) { + .c, .one, .many => ptr, + .slice => if (ptr.len > 0) ptr.ptr else return, + }); + if (!self.history.allocations.contains(cast_ptr)) { + @panic("this pointer was not owned by the allocation scope"); + } + } + + fn assertUnowned(self: Self, ptr: anytype) void { + const cast_ptr: [*]const u8 = @ptrCast(switch (@typeInfo(@TypeOf(ptr)).pointer.size) { + .c, .one, .many => ptr, + .slice => if (ptr.len > 0) ptr.ptr else return, + }); + if (self.history.allocations.getPtr(cast_ptr)) |owned| { + Output.warn("Owned pointer allocated here:"); + bun.crash_handler.dumpStackTrace( + owned.allocated_at.trace(), + trace_limits, + trace_limits, + ); + @panic("this pointer was owned by the allocation scope when it was not supposed to be"); + } + } + + fn trackAllocation(self: Self, buf: []const u8, ret_addr: usize, extra: Extra) bun.OOM!void { + const trace = StoredTrace.capture(ret_addr); + try self.history.allocations.putNoClobber(self.parent, buf.ptr, .{ + .allocated_at = trace, + .len = buf.len, + .extra = extra, + }); + self.history.total_memory_allocated += buf.len; + } + + fn trackFree(self: Self, buf: []const u8, ret_addr: usize) FreeError!void { + const entry = self.history.allocations.fetchRemove(buf.ptr) orelse { + Output.errGeneric("Invalid free, pointer {any}, len {d}", .{ buf.ptr, buf.len }); + + if (self.history.frees.getPtr(buf.ptr)) |free_entry| { + Output.printErrorln("Pointer allocated here:", .{}); + bun.crash_handler.dumpStackTrace(free_entry.allocated_at.trace(), trace_limits); + Output.printErrorln("Pointer first freed here:", .{}); + bun.crash_handler.dumpStackTrace(free_entry.freed_at.trace(), free_trace_limits); + } + + // do not panic because address sanitizer will catch this case better. + // the log message is in case there is a situation where address + // sanitizer does not catch the invalid free. + return error.NotAllocated; + }; + + self.history.total_memory_allocated -= entry.value.len; + + // Store a limited amount of free entries + if (self.history.frees.count() >= max_free_tracking) { + const i = self.history.free_overwrite_index; + self.history.free_overwrite_index = + @mod(self.history.free_overwrite_index + 1, max_free_tracking); + self.history.frees.swapRemoveAt(i); + } + + self.history.frees.put(self.parent, buf.ptr, .{ + .allocated_at = entry.value.allocated_at, + .freed_at = StoredTrace.capture(ret_addr), + }) catch |err| bun.handleOom(err); + } +}; + +const State = struct { + const Self = @This(); + + /// This field should not be modified. Therefore, it doesn't need to be protected by the mutex. + parent: std.mem.Allocator, + history: bun.threading.Guarded(History), + + fn init(parent_alloc: std.mem.Allocator) Self { + return .{ + .parent = parent_alloc, + .history = .init(.{}), + }; + } + + fn lock(self: *Self) LockedState { + return .{ + .parent = self.parent, + .history = self.history.lock(), + }; + } + + fn unlock(self: *Self) void { + self.history.unlock(); + } + + fn deinit(self: *Self) void { + defer self.* = undefined; + var history = self.history.intoUnprotected(); + defer history.deinit(); + + const count = history.allocations.count(); + if (count == 0) return; + Output.errGeneric("Allocation scope leaked {d} allocations ({})", .{ + count, + bun.fmt.size(history.total_memory_allocated, .{}), + }); + + var it = history.allocations.iterator(); + var n: usize = 0; + while (it.next()) |entry| : (n += 1) { + if (n >= 10) { + Output.prettyErrorln("(only showing first 10 leaks)", .{}); + break; + } + Output.prettyErrorln( + "- {any}, len {d}, at:", + .{ entry.key_ptr.*, entry.value_ptr.len }, + ); + bun.crash_handler.dumpStackTrace( + entry.value_ptr.allocated_at.trace(), + trace_limits, + ); + const extra = entry.value_ptr.extra; + if (extra.vtable) |extra_vtable| { + extra_vtable.onAllocationLeak( + extra.ptr, + @constCast(entry.key_ptr.*[0..entry.value_ptr.len]), + ); + } + } + + Output.panic( + "Allocation scope leaked {}", + .{bun.fmt.size(history.total_memory_allocated, .{})}, + ); + } + + fn trackExternalAllocation(self: *Self, ptr: []const u8, ret_addr: ?usize, extra: Extra) void { + const locked = self.lock(); + defer self.unlock(); + locked.trackAllocation(ptr, ret_addr orelse @returnAddress(), extra) catch |err| + bun.handleOom(err); + } + + fn trackExternalFree(self: *Self, slice: anytype, ret_addr: ?usize) FreeError!void { + const invalidType = struct { + fn invalidType() noreturn { + @compileError(std.fmt.comptimePrint( + "This function only supports []u8 or [:sentinel]u8 types, you passed in: {s}", + .{@typeName(@TypeOf(slice))}, + )); + } + }.invalidType; + + const ptr: []const u8 = switch (@typeInfo(@TypeOf(slice))) { + .pointer => |p| switch (p.size) { + .slice => brk: { + if (p.child != u8) invalidType(); + if (p.sentinel_ptr == null) break :brk slice; + // Ensure we include the sentinel value + break :brk slice[0 .. slice.len + 1]; + }, + else => invalidType(), + }, + else => invalidType(), + }; + // Empty slice usually means invalid pointer + if (ptr.len == 0) return; + const locked = self.lock(); + defer self.unlock(); + return locked.trackFree(ptr, ret_addr orelse @returnAddress()); + } + + fn setPointerExtra(self: *Self, ptr: *anyopaque, extra: Extra) void { + const locked = self.lock(); + defer self.unlock(); + const allocation = locked.history.allocations.getPtr(@ptrCast(ptr)) orelse + @panic("Pointer not owned by allocation scope"); + allocation.extra = extra; + } +}; + +/// An allocation scope that uses a specific kind of parent allocator. +/// +/// This type is a `GenericAllocator`; see `src/allocators.zig`. +pub fn AllocationScopeIn(comptime Allocator: type) type { + const BorrowedAllocator = bun.allocators.Borrowed(Allocator); + + // Borrowed version of `AllocationScope`. Access this type as `AllocationScope.Borrowed`. + const BorrowedScope = struct { + const Self = @This(); + + #parent: BorrowedAllocator, + #state: if (enabled) *State else void, + + pub fn allocator(self: Self) std.mem.Allocator { + return if (comptime enabled) + .{ .ptr = self.#state, .vtable = &vtable } + else + bun.allocators.asStd(self.#parent); + } + + pub fn parent(self: Self) BorrowedAllocator { + return self.#parent; + } + + /// Deinitializes a borrowed allocation scope. This does not deinitialize the + /// `AllocationScope` itself; only the owner of the `AllocationScope` should do that. + /// + /// This method doesn't need to be called unless `bun.allocators.Borrowed(Allocator)` has + /// a `deinit` method. + pub fn deinit(self: *Self) void { + bun.memory.deinit(&self.#parent); + self.* = undefined; + } + + pub fn stats(self: Self) Stats { + if (comptime !enabled) @compileError("AllocationScope must be enabled"); + const state = self.#state.lock(); + defer self.#state.unlock(); + return .{ + .total_memory_allocated = state.history.total_memory_allocated, + .num_allocations = state.history.allocations.count(), + }; + } + + pub fn assertOwned(self: Self, ptr: anytype) void { + if (comptime !enabled) return; + const state = self.#state.lock(); + defer self.#state.unlock(); + state.assertOwned(ptr); + } + + pub fn assertUnowned(self: Self, ptr: anytype) void { + if (comptime !enabled) return; + const state = self.#state.lock(); + defer self.#state.unlock(); + state.assertUnowned(ptr); + } + + pub fn trackExternalAllocation( + self: Self, + ptr: []const u8, + ret_addr: ?usize, + extra: Extra, + ) void { + if (comptime enabled) self.#state.trackExternalAllocation(ptr, ret_addr, extra); + } + + pub fn trackExternalFree(self: Self, slice: anytype, ret_addr: ?usize) FreeError!void { + return if (comptime enabled) self.#state.trackExternalFree(slice, ret_addr); + } + + pub fn setPointerExtra(self: Self, ptr: *anyopaque, extra: Extra) void { + if (comptime enabled) self.#state.setPointerExtra(ptr, extra); + } + + fn downcastImpl( + std_alloc: std.mem.Allocator, + parent_alloc: if (Allocator == std.mem.Allocator) + ?BorrowedAllocator + else + BorrowedAllocator, + ) Self { + const state = if (comptime enabled) blk: { + bun.assertf( + std_alloc.vtable == &vtable, + "allocator is not an allocation scope (has vtable {*})", + .{std_alloc.vtable}, + ); + const state: *State = @ptrCast(@alignCast(std_alloc.ptr)); + break :blk state; + }; + + const current_std_parent = if (comptime enabled) + state.parent + else + std_alloc; + + const new_parent = if (comptime Allocator == std.mem.Allocator) + parent_alloc orelse current_std_parent + else + parent_alloc; + + const new_std_parent = bun.allocators.asStd(new_parent); + bun.safety.alloc.assertEqFmt( + current_std_parent, + new_std_parent, + "tried to downcast allocation scope with wrong parent allocator", + .{}, + ); + return .{ .#parent = new_parent, .#state = state }; + } + + /// Converts an `std.mem.Allocator` into a borrowed allocation scope, with a given parent + /// allocator. + /// + /// Requirements: + /// + /// * `std_alloc` must have come from `AllocationScopeIn(Allocator).allocator` (or the + /// equivalent method on a `Borrowed` instance). + /// + /// * `parent_alloc` must be equivalent to the (borrowed) parent allocator of the original + /// allocation scope (that is, the return value of `AllocationScopeIn(Allocator).parent`). + /// In particular, `bun.allocators.asStd` must return the same value for each allocator. + pub fn downcastIn(std_alloc: std.mem.Allocator, parent_alloc: BorrowedAllocator) Self { + return downcastImpl(std_alloc, parent_alloc); + } + + /// Converts an `std.mem.Allocator` into a borrowed allocation scope. + /// + /// Requirements: + /// + /// * `std_alloc` must have come from `AllocationScopeIn(Allocator).allocator` (or the + /// equivalent method on a `Borrowed` instance). + /// + /// * One of the following must be true: + /// + /// 1. `Allocator` is `std.mem.Allocator`. + /// + /// 2. The parent allocator of the original allocation scope is equivalent to a + /// default-initialized borrowed `Allocator`, as returned by + /// `bun.memory.initDefault(bun.allocators.Borrowed(Allocator))`. This is the case + /// for `bun.DefaultAllocator`. + pub fn downcast(std_alloc: std.mem.Allocator) Self { + return downcastImpl(std_alloc, if (comptime Allocator == std.mem.Allocator) + null + else + bun.memory.initDefault(BorrowedAllocator)); + } + }; + + return struct { + const Self = @This(); + + #parent: Allocator, + #state: if (Self.enabled) Owned(*State) else void, + + pub const enabled = allocation_scope.enabled; + + /// Borrowed version of `AllocationScope`, returned by `AllocationScope.borrow`. + /// Using this type makes it clear who actually owns the `AllocationScope`, and prevents + /// `deinit` from being called twice. + /// + /// This type is a `GenericAllocator`; see `src/allocators.zig`. + pub const Borrowed = BorrowedScope; + + pub fn init(parent_alloc: Allocator) Self { + return .{ + .#parent = parent_alloc, + .#state = if (comptime Self.enabled) .new(.init( + bun.allocators.asStd(parent_alloc), + )), + }; + } + + pub fn initDefault() Self { + return .init(bun.memory.initDefault(Allocator)); + } + + /// Borrows this `AllocationScope`. Use this method instead of copying `self`, as that makes + /// it hard to know who owns the `AllocationScope`, and could lead to `deinit` being called + /// twice. + pub fn borrow(self: Self) Borrowed { + return .{ + .#parent = self.parent(), + .#state = if (comptime Self.enabled) self.#state.get(), + }; + } + + pub fn allocator(self: Self) std.mem.Allocator { + return self.borrow().allocator(); + } + + pub fn deinit(self: *Self) void { + bun.memory.deinit(&self.#parent); + if (comptime Self.enabled) self.#state.deinit(); + self.* = undefined; + } + + pub fn parent(self: Self) BorrowedAllocator { + return bun.allocators.borrow(self.#parent); + } + + pub fn stats(self: Self) Stats { + return self.borrow().stats(); + } + + pub fn assertOwned(self: Self, ptr: anytype) void { + self.borrow().assertOwned(ptr); + } + + pub fn assertUnowned(self: Self, ptr: anytype) void { + self.borrow().assertUnowned(ptr); + } + + /// Track an arbitrary pointer. Extra data can be stored in the allocation, which will be + /// printed when a leak is detected. + pub fn trackExternalAllocation( + self: Self, + ptr: []const u8, + ret_addr: ?usize, + extra: Extra, + ) void { + self.borrow().trackExternalAllocation(ptr, ret_addr, extra); + } + + /// Call when the pointer from `trackExternalAllocation` is freed. + pub fn trackExternalFree(self: Self, slice: anytype, ret_addr: ?usize) FreeError!void { + return self.borrow().trackExternalFree(slice, ret_addr); + } + + pub fn setPointerExtra(self: Self, ptr: *anyopaque, extra: Extra) void { + return self.borrow().setPointerExtra(ptr, extra); + } + }; +} + +const vtable: std.mem.Allocator.VTable = .{ + .alloc = vtable_alloc, + .resize = std.mem.Allocator.noResize, + .remap = std.mem.Allocator.noRemap, + .free = vtable_free, +}; + +// Smaller traces since AllocationScope prints so many +pub const trace_limits: bun.crash_handler.WriteStackTraceLimits = .{ + .frame_count = 6, + .stop_at_jsc_llint = true, + .skip_stdlib = true, +}; + +pub const free_trace_limits: bun.crash_handler.WriteStackTraceLimits = .{ + .frame_count = 3, + .stop_at_jsc_llint = true, + .skip_stdlib = true, +}; + +fn vtable_alloc(ctx: *anyopaque, len: usize, alignment: std.mem.Alignment, ret_addr: usize) ?[*]u8 { + const raw_state: *State = @ptrCast(@alignCast(ctx)); + const state = raw_state.lock(); + defer raw_state.unlock(); + return state.alloc(len, alignment, ret_addr) catch null; +} + +fn vtable_free(ctx: *anyopaque, buf: []u8, alignment: std.mem.Alignment, ret_addr: usize) void { + const raw_state: *State = @ptrCast(@alignCast(ctx)); + const state = raw_state.lock(); + defer raw_state.unlock(); + state.free(buf, alignment, ret_addr); +} + +pub inline fn isInstance(allocator: std.mem.Allocator) bool { + return (comptime enabled) and allocator.vtable == &vtable; +} + +const std = @import("std"); + +const bun = @import("bun"); +const Output = bun.Output; +const Owned = bun.ptr.Owned; +const StoredTrace = bun.crash_handler.StoredTrace; diff --git a/src/allocators/maybe_owned.zig b/src/allocators/maybe_owned.zig new file mode 100644 index 0000000000..efedbf39da --- /dev/null +++ b/src/allocators/maybe_owned.zig @@ -0,0 +1,112 @@ +/// This type can be used with `bun.ptr.Owned` to model "maybe owned" pointers: +/// +/// ``` +/// // Either owned by the default allocator, or borrowed +/// const MaybeOwnedFoo = bun.ptr.Owned(*Foo, bun.allocators.MaybeOwned(bun.DefaultAllocator)); +/// +/// var owned_foo: MaybeOwnedFoo = .new(makeFoo()); +/// var borrowed_foo: MaybeOwnedFoo = .fromRawIn(some_foo_ptr, .initBorrowed()); +/// +/// owned_foo.deinit(); // calls `Foo.deinit` and frees the memory +/// borrowed_foo.deinit(); // no-op +/// ``` +/// +/// This type is a `GenericAllocator`; see `src/allocators.zig`. +pub fn MaybeOwned(comptime Allocator: type) type { + return struct { + const Self = @This(); + + _parent: bun.allocators.Nullable(Allocator), + + /// Same as `.initBorrowed()`. This allocator cannot be used to allocate memory; a panic + /// will occur. + pub const borrowed = .initBorrowed(); + + /// Creates a `MaybeOwned` allocator that owns memory. + /// + /// Allocations are forwarded to a default-initialized `Allocator`. + pub fn init() Self { + return .initOwned(bun.memory.initDefault(Allocator)); + } + + /// Creates a `MaybeOwned` allocator that owns memory, and forwards to a specific + /// allocator. + /// + /// Allocations are forwarded to `parent_alloc`. + pub fn initOwned(parent_alloc: Allocator) Self { + return .initRaw(parent_alloc); + } + + /// Creates a `MaybeOwned` allocator that does not own any memory. This allocator cannot + /// be used to allocate new memory (a panic will occur), and its implementation of `free` + /// is a no-op. + pub fn initBorrowed() Self { + return .initRaw(null); + } + + pub fn deinit(self: *Self) void { + var maybe_parent = self.intoParent(); + if (maybe_parent) |*parent_alloc| { + bun.memory.deinit(parent_alloc); + } + } + + pub fn isOwned(self: Self) bool { + return self.rawParent() != null; + } + + pub fn allocator(self: Self) std.mem.Allocator { + const maybe_parent = self.rawParent(); + return if (maybe_parent) |parent_alloc| + bun.allocators.asStd(parent_alloc) + else + .{ .ptr = undefined, .vtable = &null_vtable }; + } + + const BorrowedParent = bun.allocators.Borrowed(Allocator); + + pub fn parent(self: Self) ?BorrowedParent { + const maybe_parent = self.rawParent(); + return if (maybe_parent) |parent_alloc| + bun.allocators.borrow(parent_alloc) + else + null; + } + + pub fn intoParent(self: *Self) ?Allocator { + defer self.* = undefined; + return self.rawParent(); + } + + /// Used by smart pointer types and allocator wrappers. See `bun.allocators.borrow`. + pub const Borrowed = MaybeOwned(BorrowedParent); + + pub fn borrow(self: Self) Borrowed { + return .{ ._parent = bun.allocators.initNullable(BorrowedParent, self.parent()) }; + } + + fn initRaw(parent_alloc: ?Allocator) Self { + return .{ ._parent = bun.allocators.initNullable(Allocator, parent_alloc) }; + } + + fn rawParent(self: Self) ?Allocator { + return bun.allocators.unpackNullable(Allocator, self._parent); + } + }; +} + +fn nullAlloc(ptr: *anyopaque, len: usize, alignment: Alignment, ret_addr: usize) ?[*]u8 { + _ = .{ ptr, len, alignment, ret_addr }; + std.debug.panic("cannot allocate with a borrowed `MaybeOwned` allocator", .{}); +} + +const null_vtable: std.mem.Allocator.VTable = .{ + .alloc = nullAlloc, + .resize = std.mem.Allocator.noResize, + .remap = std.mem.Allocator.noRemap, + .free = std.mem.Allocator.noFree, +}; + +const bun = @import("bun"); +const std = @import("std"); +const Alignment = std.mem.Alignment; diff --git a/src/bake/DevServer.zig b/src/bake/DevServer.zig index aec224d777..6a2110b5de 100644 --- a/src/bake/DevServer.zig +++ b/src/bake/DevServer.zig @@ -39,7 +39,8 @@ magic: if (Environment.isDebug) enum(u128) { valid = 0x1ffd363f121f5c12 } else enum { valid } = .valid, -allocation_scope: if (AllocationScope.enabled) AllocationScope else void, +/// No overhead in release builds. +allocation_scope: AllocationScope, /// Absolute path to project root directory. For the HMR /// runtime, its module IDs are strings relative to this. root: []const u8, @@ -267,8 +268,7 @@ pub fn init(options: Options) bun.JSOOM!*DevServer { const separate_ssr_graph = if (options.framework.server_components) |sc| sc.separate_ssr_graph else false; const dev = bun.new(DevServer, .{ - .allocation_scope = if (comptime AllocationScope.enabled) - AllocationScope.init(bun.default_allocator), + .allocation_scope = .initDefault(), .root = options.root, .vm = options.vm, .server = null, @@ -679,15 +679,17 @@ pub fn deinit(dev: *DevServer) void { bun.destroy(dev); } +const AllocationScope = bun.allocators.AllocationScopeIn(bun.DefaultAllocator); +pub const DevAllocator = AllocationScope.Borrowed; + pub fn allocator(dev: *const DevServer) Allocator { - return dev.dev_allocator().get(); + return dev.allocation_scope.allocator(); } pub fn dev_allocator(dev: *const DevServer) DevAllocator { - return .{ .maybe_scope = dev.allocation_scope }; + return dev.allocation_scope.borrow(); } -pub const DevAllocator = @import("./DevServer/DevAllocator.zig"); pub const MemoryCost = @import("./DevServer/memory_cost.zig"); pub const memoryCost = MemoryCost.memoryCost; pub const memoryCostDetailed = MemoryCost.memoryCostDetailed; @@ -3001,10 +3003,11 @@ fn printMemoryLine(dev: *DevServer) void { return; } if (!debug.isVisible()) return; + const stats = dev.allocation_scope.stats(); Output.prettyErrorln("DevServer tracked {}, measured: {} ({}), process: {}", .{ bun.fmt.size(dev.memoryCost(), .{}), - dev.allocation_scope.numAllocations(), - bun.fmt.size(dev.allocation_scope.total(), .{}), + stats.num_allocations, + bun.fmt.size(stats.total_memory_allocated, .{}), bun.fmt.size(bun.sys.selfProcessMemoryUsage() orelse 0, .{}), }); } @@ -3296,7 +3299,7 @@ pub fn writeMemoryVisualizerMessage(dev: *DevServer, payload: *std.ArrayList(u8) .assets = @truncate(cost.assets), .other = @truncate(cost.other), .devserver_tracked = if (comptime AllocationScope.enabled) - @truncate(dev.allocation_scope.total()) + @truncate(dev.allocation_scope.stats().total_memory_allocated) else 0, .process_used = @truncate(bun.sys.selfProcessMemoryUsage() orelse 0), @@ -4068,7 +4071,6 @@ pub fn getDeinitCountForTesting() usize { } const bun = @import("bun"); -const AllocationScope = bun.AllocationScope; const Environment = bun.Environment; const Output = bun.Output; const SourceMap = bun.sourcemap; diff --git a/src/bake/DevServer/DevAllocator.zig b/src/bake/DevServer/DevAllocator.zig deleted file mode 100644 index 626e392dc3..0000000000 --- a/src/bake/DevServer/DevAllocator.zig +++ /dev/null @@ -1,19 +0,0 @@ -const Self = @This(); - -maybe_scope: if (AllocationScope.enabled) AllocationScope else void, - -pub fn get(self: Self) Allocator { - return if (comptime AllocationScope.enabled) - self.maybe_scope.allocator() - else - bun.default_allocator; -} - -pub fn scope(self: Self) ?AllocationScope { - return if (comptime AllocationScope.enabled) self.maybe_scope else null; -} - -const bun = @import("bun"); -const std = @import("std"); -const AllocationScope = bun.allocators.AllocationScope; -const Allocator = std.mem.Allocator; diff --git a/src/bake/DevServer/IncrementalGraph.zig b/src/bake/DevServer/IncrementalGraph.zig index fe54b169ea..8d5f56ee85 100644 --- a/src/bake/DevServer/IncrementalGraph.zig +++ b/src/bake/DevServer/IncrementalGraph.zig @@ -314,7 +314,8 @@ pub fn IncrementalGraph(comptime side: bake.Side) type { bun.assertf(side == .client, "freeFileContent requires client graph", .{}); } if (file.source_map.take()) |ptr| { - ptr.deinit(); + var ptr_mut = ptr; + ptr_mut.deinit(); } defer file.content = .unknown; switch (file.content) { @@ -444,7 +445,7 @@ pub fn IncrementalGraph(comptime side: bake.Side) type { g: *Self, ctx: *HotUpdateContext, index: bun.ast.Index, - content: union(enum) { + content_: union(enum) { js: struct { code: JsCode, source_map: ?struct { @@ -456,6 +457,7 @@ pub fn IncrementalGraph(comptime side: bake.Side) type { }, is_ssr_graph: bool, ) !void { + var content = content_; const dev = g.owner(); dev.graph_safety_lock.assertLocked(); @@ -538,20 +540,18 @@ pub fn IncrementalGraph(comptime side: bake.Side) type { }, .source_map = switch (content) { .css => .none, - .js => |js| blk: { + .js => |*js| blk: { // Insert new source map or patch existing empty source map. - if (js.source_map) |source_map| { + if (js.source_map) |*source_map| { bun.assert(html_route_bundle_index == null); // suspect behind #17956 - var chunk = source_map.chunk; - var escaped_source = source_map.escaped_source; - if (chunk.buffer.len() > 0) { + if (source_map.chunk.buffer.len() > 0) { break :blk .{ .some = PackedMap.newNonEmpty( - chunk, - escaped_source.take().?, + source_map.chunk, + source_map.escaped_source.take().?, ) }; } - chunk.buffer.deinit(); - escaped_source.deinit(); + source_map.chunk.buffer.deinit(); + source_map.escaped_source.deinit(); } // Must precompute this. Otherwise, source maps won't have @@ -634,9 +634,8 @@ pub fn IncrementalGraph(comptime side: bake.Side) type { if (content == .js) { try g.current_chunk_parts.append(dev.allocator(), content.js.code); g.current_chunk_len += content.js.code.len; - if (content.js.source_map) |source_map| { - var buffer = source_map.chunk.buffer; - buffer.deinit(); + if (content.js.source_map) |*source_map| { + source_map.chunk.buffer.deinit(); source_map.escaped_source.deinit(); } } @@ -1911,12 +1910,13 @@ pub fn IncrementalGraph(comptime side: bake.Side) type { return @alignCast(@fieldParentPtr(@tagName(side) ++ "_graph", g)); } - fn dev_allocator(g: *Self) DevAllocator { - return g.owner().dev_allocator(); + fn allocator(g: *const Self) Allocator { + return g.dev_allocator().allocator(); } - fn allocator(g: *Self) Allocator { - return g.dev_allocator().get(); + fn dev_allocator(g: *const Self) DevAllocator { + const dev_server: *const DevServer = @constCast(g).owner(); + return dev_server.dev_allocator(); } }; } diff --git a/src/bake/DevServer/PackedMap.zig b/src/bake/DevServer/PackedMap.zig index 1626f3cf25..7c0957f668 100644 --- a/src/bake/DevServer/PackedMap.zig +++ b/src/bake/DevServer/PackedMap.zig @@ -4,7 +4,7 @@ const Self = @This(); /// Allocated by `dev.allocator()`. Access with `.vlq()` /// This is stored to allow lazy construction of source map files. -vlq_: ScopedOwned([]u8), +vlq_: OwnedIn([]u8, DevAllocator), /// The bundler runs quoting on multiple threads, so it only makes /// sense to preserve that effort for concatenation and /// re-concatenation. @@ -22,8 +22,9 @@ end_state: struct { pub fn newNonEmpty(chunk: SourceMap.Chunk, escaped_source: Owned([]u8)) bun.ptr.Shared(*Self) { var buffer = chunk.buffer; assert(!buffer.isEmpty()); + const dev_allocator = DevAllocator.downcast(buffer.allocator); return .new(.{ - .vlq_ = .fromDynamic(buffer.toDynamicOwned()), + .vlq_ = .fromRawIn(buffer.toOwnedSlice(), dev_allocator), .escaped_source = escaped_source, .end_state = .{ .original_line = chunk.end_state.original_line, @@ -42,12 +43,12 @@ pub fn memoryCost(self: *const Self) usize { } pub fn vlq(self: *const Self) []const u8 { - return self.vlq_.getConst(); + return self.vlq_.get(); } // TODO: rename to `escapedSource` pub fn quotedContents(self: *const Self) []const u8 { - return self.escaped_source.getConst(); + return self.escaped_source.get(); } comptime { @@ -94,9 +95,10 @@ pub const Shared = union(enum) { }; } - pub fn deinit(self: Shared) void { - switch (self) { - .some => |ptr| ptr.deinit(), + pub fn deinit(self: *Shared) void { + defer self.* = undefined; + switch (self.*) { + .some => |*ptr| ptr.deinit(), else => {}, } } @@ -116,6 +118,7 @@ const SourceMap = bun.sourcemap; const assert = bun.assert; const assert_eql = bun.assert_eql; const Chunk = bun.bundle_v2.Chunk; +const DevAllocator = bun.bake.DevServer.DevAllocator; const Owned = bun.ptr.Owned; -const ScopedOwned = bun.ptr.ScopedOwned; +const OwnedIn = bun.ptr.OwnedIn; diff --git a/src/bake/DevServer/SourceMapStore.zig b/src/bake/DevServer/SourceMapStore.zig index ae840474f0..bd109a00bd 100644 --- a/src/bake/DevServer/SourceMapStore.zig +++ b/src/bake/DevServer/SourceMapStore.zig @@ -261,7 +261,8 @@ pub const Entry = struct { .files = { const files = entry.files.slice(); for (0..files.len) |i| { - files.get(i).deinit(); + var file = files.get(i); + file.deinit(); } entry.files.deinit(entry.allocator()); }, @@ -270,7 +271,7 @@ pub const Entry = struct { } fn allocator(entry: *const Entry) Allocator { - return entry.dev_allocator.get(); + return entry.dev_allocator.allocator(); } }; @@ -305,12 +306,13 @@ pub fn owner(store: *Self) *DevServer { return @alignCast(@fieldParentPtr("source_maps", store)); } -fn dev_allocator(store: *Self) DevAllocator { - return store.owner().dev_allocator(); +fn allocator(store: *Self) Allocator { + return store.dev_allocator().allocator(); } -fn allocator(store: *Self) Allocator { - return store.dev_allocator().get(); +fn dev_allocator(store: *const Self) DevAllocator { + const dev_server: *const DevServer = @constCast(store).owner(); + return dev_server.dev_allocator(); } const PutOrIncrementRefCount = union(enum) { diff --git a/src/bun.zig b/src/bun.zig index 015e3fd554..a991c7806a 100644 --- a/src/bun.zig +++ b/src/bun.zig @@ -10,6 +10,8 @@ pub const Environment = @import("./env.zig"); pub const use_mimalloc = true; pub const default_allocator: std.mem.Allocator = allocators.c_allocator; +/// Zero-sized type whose `allocator` method returns `default_allocator`. +pub const DefaultAllocator = allocators.Default; /// Zeroing memory allocator pub const z_allocator: std.mem.Allocator = allocators.z_allocator; @@ -40,16 +42,16 @@ pub const debug_allocator_data = struct { return backing.?.allocator().rawAlloc(new_len, alignment, ret_addr); } - fn resize(_: *anyopaque, memory: []u8, alignment: std.mem.Alignment, new_len: usize, ret_addr: usize) bool { - return backing.?.allocator().rawResize(memory, alignment, new_len, ret_addr); + fn resize(_: *anyopaque, mem: []u8, alignment: std.mem.Alignment, new_len: usize, ret_addr: usize) bool { + return backing.?.allocator().rawResize(mem, alignment, new_len, ret_addr); } - fn remap(_: *anyopaque, memory: []u8, alignment: std.mem.Alignment, new_len: usize, ret_addr: usize) ?[*]u8 { - return backing.?.allocator().rawRemap(memory, alignment, new_len, ret_addr); + fn remap(_: *anyopaque, mem: []u8, alignment: std.mem.Alignment, new_len: usize, ret_addr: usize) ?[*]u8 { + return backing.?.allocator().rawRemap(mem, alignment, new_len, ret_addr); } - fn free(_: *anyopaque, memory: []u8, alignment: std.mem.Alignment, ret_addr: usize) void { - return backing.?.allocator().rawFree(memory, alignment, ret_addr); + fn free(_: *anyopaque, mem: []u8, alignment: std.mem.Alignment, ret_addr: usize) void { + return backing.?.allocator().rawFree(mem, alignment, ret_addr); } }; @@ -663,17 +665,18 @@ pub fn onceUnsafe(comptime function: anytype, comptime ReturnType: type) ReturnT return Result.execute(); } -pub fn isHeapMemory(memory: anytype) bool { +pub fn isHeapMemory(mem: anytype) bool { if (comptime use_mimalloc) { - const Memory = @TypeOf(memory); + const Memory = @TypeOf(mem); if (comptime std.meta.trait.isSingleItemPtr(Memory)) { - return mimalloc.mi_is_in_heap_region(memory); + return mimalloc.mi_is_in_heap_region(mem); } - return mimalloc.mi_is_in_heap_region(std.mem.sliceAsBytes(memory).ptr); + return mimalloc.mi_is_in_heap_region(std.mem.sliceAsBytes(mem).ptr); } return false; } +pub const memory = @import("./memory.zig"); pub const allocators = @import("./allocators.zig"); pub const mimalloc = allocators.mimalloc; pub const MimallocArena = allocators.MimallocArena; @@ -3127,8 +3130,6 @@ pub fn assertf(ok: bool, comptime format: []const u8, args: anytype) callconv(ca } if (!ok) { - // crash handler has runtime-only code. - if (@inComptime()) @compileError(std.fmt.comptimePrint(format, args)); assertionFailureWithMsg(format, args); } } diff --git a/src/bundler/LinkerContext.zig b/src/bundler/LinkerContext.zig index 3fcf33f63a..7b9cd948ed 100644 --- a/src/bundler/LinkerContext.zig +++ b/src/bundler/LinkerContext.zig @@ -151,9 +151,7 @@ pub const LinkerContext = struct { pub fn computeQuotedSourceContents(this: *LinkerContext, _: std.mem.Allocator, source_index: Index.Int) void { debug("Computing Quoted Source Contents: {d}", .{source_index}); const quoted_source_contents = &this.graph.files.items(.quoted_source_contents)[source_index]; - if (quoted_source_contents.take()) |old| { - old.deinit(); - } + quoted_source_contents.reset(); const loader: options.Loader = this.parse_graph.input_files.items(.loader)[source_index]; if (!loader.canHaveSourceMap()) { @@ -163,7 +161,8 @@ pub const LinkerContext = struct { const source: *const Logger.Source = &this.parse_graph.input_files.items(.source)[source_index]; var mutable = MutableString.initEmpty(bun.default_allocator); bun.handleOom(js_printer.quoteForJSON(source.contents, &mutable, false)); - quoted_source_contents.* = mutable.toDefaultOwned().toOptional(); + var mutableOwned = mutable.toDefaultOwned(); + quoted_source_contents.* = mutableOwned.toOptional(); } }; @@ -748,12 +747,12 @@ pub const LinkerContext = struct { if (source_indices_for_contents.len > 0) { j.pushStatic("\n "); j.pushStatic( - quoted_source_map_contents[source_indices_for_contents[0]].getConst() orelse "", + quoted_source_map_contents[source_indices_for_contents[0]].get() orelse "", ); for (source_indices_for_contents[1..]) |index| { j.pushStatic(",\n "); - j.pushStatic(quoted_source_map_contents[index].getConst() orelse ""); + j.pushStatic(quoted_source_map_contents[index].get() orelse ""); } } j.pushStatic( diff --git a/src/collections/baby_list.zig b/src/collections/baby_list.zig index f704c33fca..a41a6fd8f8 100644 --- a/src/collections/baby_list.zig +++ b/src/collections/baby_list.zig @@ -4,10 +4,12 @@ pub fn BabyList(comptime Type: type) type { return struct { const Self = @This(); + // NOTE: If you add, remove, or rename any public fields, you need to update + // `looksLikeListContainerType` in `meta.zig`. ptr: [*]Type = &[_]Type{}, len: u32 = 0, cap: u32 = 0, - alloc_ptr: bun.safety.AllocPtr = .{}, + #allocator: bun.safety.CheckedAllocator = .{}, pub const Elem = Type; @@ -169,7 +171,7 @@ pub fn BabyList(comptime Type: type) type { pub fn initCapacity(allocator: std.mem.Allocator, len: usize) std.mem.Allocator.Error!Self { var this = initWithBuffer(try allocator.alloc(Type, len)); - this.alloc_ptr.set(allocator); + this.#allocator.set(allocator); return this; } @@ -218,7 +220,7 @@ pub fn BabyList(comptime Type: type) type { .ptr = allocated.ptr, .len = @intCast(allocated.len), .cap = @intCast(allocated.len), - .alloc_ptr = .init(allocator), + .#allocator = .init(allocator), }; } @@ -248,7 +250,7 @@ pub fn BabyList(comptime Type: type) type { } pub fn listManaged(this: *Self, allocator: std.mem.Allocator) std.ArrayList(Type) { - this.alloc_ptr.set(allocator); + this.#allocator.set(allocator); var list_ = this.list(); return list_.toManaged(allocator); } @@ -282,7 +284,7 @@ pub fn BabyList(comptime Type: type) type { .ptr = @as([*]Type, @ptrCast(items.ptr)), .len = 1, .cap = 1, - .alloc_ptr = .init(allocator), + .#allocator = .init(allocator), }; } @@ -416,6 +418,20 @@ pub fn BabyList(comptime Type: type) type { pub fn memoryCost(self: *const Self) usize { return self.cap; } + + pub fn format( + self: Self, + comptime fmt: []const u8, + options: std.fmt.FormatOptions, + writer: anytype, + ) !void { + _ = .{ fmt, options }; + return std.fmt.format( + writer, + "BabyList({s}){{{any}}}", + .{ @typeName(Type), self.list() }, + ); + } }; } diff --git a/src/collections/multi_array_list.zig b/src/collections/multi_array_list.zig index 8063252312..9fff6495ae 100644 --- a/src/collections/multi_array_list.zig +++ b/src/collections/multi_array_list.zig @@ -21,7 +21,7 @@ pub fn MultiArrayList(comptime T: type) type { bytes: [*]align(@alignOf(T)) u8 = undefined, len: usize = 0, capacity: usize = 0, - alloc_ptr: bun.safety.AllocPtr = .{}, + #allocator: bun.safety.CheckedAllocator = .{}, pub const empty: Self = .{ .bytes = undefined, @@ -186,7 +186,7 @@ pub fn MultiArrayList(comptime T: type) type { /// Release all allocated memory. pub fn deinit(self: *Self, gpa: Allocator) void { - self.alloc_ptr.assertEq(gpa); + self.#allocator.assertEq(gpa); gpa.free(self.allocatedBytes()); self.* = undefined; } @@ -235,7 +235,7 @@ pub fn MultiArrayList(comptime T: type) type { /// Extend the list by 1 element. Allocates more memory as necessary. pub fn append(self: *Self, gpa: Allocator, elem: T) !void { - self.alloc_ptr.set(gpa); + self.#allocator.set(gpa); try self.ensureUnusedCapacity(gpa, 1); self.appendAssumeCapacity(elem); } @@ -252,7 +252,7 @@ pub fn MultiArrayList(comptime T: type) type { /// index with uninitialized data. /// Allocates more memory as necesasry. pub fn addOne(self: *Self, allocator: Allocator) Allocator.Error!usize { - self.alloc_ptr.set(allocator); + self.#allocator.set(allocator); try self.ensureUnusedCapacity(allocator, 1); return self.addOneAssumeCapacity(); } @@ -281,7 +281,7 @@ pub fn MultiArrayList(comptime T: type) type { /// sets the given index to the specified element. May reallocate /// and invalidate iterators. pub fn insert(self: *Self, gpa: Allocator, index: usize, elem: T) !void { - self.alloc_ptr.set(gpa); + self.#allocator.set(gpa); try self.ensureUnusedCapacity(gpa, 1); self.insertAssumeCapacity(index, elem); } @@ -354,7 +354,7 @@ pub fn MultiArrayList(comptime T: type) type { /// Adjust the list's length to `new_len`. /// Does not initialize added items, if any. pub fn resize(self: *Self, gpa: Allocator, new_len: usize) !void { - self.alloc_ptr.set(gpa); + self.#allocator.set(gpa); try self.ensureTotalCapacity(gpa, new_len); self.len = new_len; } @@ -363,7 +363,7 @@ pub fn MultiArrayList(comptime T: type) type { /// If `new_len` is greater than zero, this may fail to reduce the capacity, /// but the data remains intact and the length is updated to new_len. pub fn shrinkAndFree(self: *Self, gpa: Allocator, new_len: usize) void { - self.alloc_ptr.set(gpa); + self.#allocator.set(gpa); if (new_len == 0) return clearAndFree(self, gpa); assert(new_len <= self.capacity); @@ -407,7 +407,7 @@ pub fn MultiArrayList(comptime T: type) type { } pub fn clearAndFree(self: *Self, gpa: Allocator) void { - self.alloc_ptr.set(gpa); + self.#allocator.set(gpa); gpa.free(self.allocatedBytes()); self.* = .{}; } @@ -452,7 +452,7 @@ pub fn MultiArrayList(comptime T: type) type { /// Modify the array so that it can hold at least `additional_count` **more** items. /// Invalidates pointers if additional memory is needed. pub fn ensureUnusedCapacity(self: *Self, gpa: Allocator, additional_count: usize) !void { - self.alloc_ptr.set(gpa); + self.#allocator.set(gpa); return self.ensureTotalCapacity(gpa, self.len + additional_count); } @@ -460,7 +460,7 @@ pub fn MultiArrayList(comptime T: type) type { /// Invalidates pointers if additional memory is needed. /// `new_capacity` must be greater or equal to `len`. pub fn setCapacity(self: *Self, gpa: Allocator, new_capacity: usize) !void { - self.alloc_ptr.set(gpa); + self.#allocator.set(gpa); assert(new_capacity >= self.len); const new_bytes = try gpa.alignedAlloc( u8, diff --git a/src/heap_breakdown.zig b/src/heap_breakdown.zig index 8948a42ece..69a79f5060 100644 --- a/src/heap_breakdown.zig +++ b/src/heap_breakdown.zig @@ -102,7 +102,7 @@ pub const Zone = opaque { pub inline fn tryCreate(zone: *Zone, comptime T: type, data: T) !*T { const alignment: std.mem.Alignment = .fromByteUnits(@alignOf(T)); const ptr: *T = @alignCast(@ptrCast( - rawAlloc(zone, @sizeOf(T), alignment, @returnAddress()) orelse bun.outOfMemory(), + rawAlloc(zone, @sizeOf(T), alignment, @returnAddress()) orelse return error.OutOfMemory, )); ptr.* = data; return ptr; diff --git a/src/memory.zig b/src/memory.zig new file mode 100644 index 0000000000..47e54a7a65 --- /dev/null +++ b/src/memory.zig @@ -0,0 +1,83 @@ +//! Basic utilities for working with memory and objects. + +/// Allocates memory for a value of type `T` using the provided allocator, and initializes the +/// memory with `value`. +/// +/// If `allocator` is `bun.default_allocator`, this will internally use `bun.tryNew` to benefit from +/// the added assertions. +pub fn create(comptime T: type, allocator: std.mem.Allocator, value: T) bun.OOM!*T { + if ((comptime Environment.allow_assert) and isDefault(allocator)) { + return bun.tryNew(T, value); + } + const ptr = try allocator.create(T); + ptr.* = value; + return ptr; +} + +/// Frees memory previously allocated by `create`. +/// +/// The memory must have been allocated by the `create` function in this namespace, not +/// directly by `allocator.create`. +pub fn destroy(allocator: std.mem.Allocator, ptr: anytype) void { + if ((comptime Environment.allow_assert) and isDefault(allocator)) { + bun.destroy(ptr); + } else { + allocator.destroy(ptr); + } +} + +/// Default-initializes a value of type `T`. +/// +/// This method tries the following, in order: +/// +/// * `.initDefault()`, if a method with that name exists +/// * `.init()`, if a method with that name exists +/// * `.{}`, otherwise +pub fn initDefault(comptime T: type) T { + return if (comptime std.meta.hasFn(T, "initDefault")) + .initDefault() + else if (comptime std.meta.hasFn(T, "init")) + .init() + else + .{}; +} + +/// Calls `deinit` on `ptr_or_slice`, or on every element of `ptr_or_slice`, if such a `deinit` +/// method exists. +/// +/// This function first does the following: +/// +/// * If `ptr_or_slice` is a single-item pointer, calls `ptr_or_slice.deinit()`, if that method +/// exists. +/// * If `ptr_or_slice` is a slice, calls `deinit` on every element of the slice, if the slice +/// elements have a `deinit` method. +/// +/// Then, if `ptr_or_slice` is non-const, this function also sets all memory referenced by the +/// pointer to `undefined`. +/// +/// This method does not free `ptr_or_slice` itself. +pub fn deinit(ptr_or_slice: anytype) void { + const ptr_info = @typeInfo(@TypeOf(ptr_or_slice)); + const Child = ptr_info.pointer.child; + const mutable = !ptr_info.pointer.is_const; + if (comptime std.meta.hasFn(Child, "deinit")) { + switch (comptime ptr_info.pointer.size) { + .one => { + ptr_or_slice.deinit(); + if (comptime mutable) ptr_or_slice.* = undefined; + }, + .slice => for (ptr_or_slice) |*elem| { + elem.deinit(); + if (comptime mutable) elem.* = undefined; + }, + else => @compileError("unsupported pointer type"), + } + } +} + +const std = @import("std"); +const Allocator = std.mem.Allocator; + +const bun = @import("bun"); +const Environment = bun.Environment; +const isDefault = bun.allocators.isDefault; diff --git a/src/meta.zig b/src/meta.zig index 3723d26c61..964235a26f 100644 --- a/src/meta.zig +++ b/src/meta.zig @@ -301,11 +301,10 @@ pub fn looksLikeListContainerType(comptime T: type) ?struct { list: ListContaine return .{ .list = .array_list, .child = std.meta.Child(tyinfo.@"struct".fields[0].type) }; // Looks like babylist - if (tyinfo.@"struct".fields.len == 4 and + if (tyinfo.@"struct".fields.len == 3 and std.mem.eql(u8, tyinfo.@"struct".fields[0].name, "ptr") and std.mem.eql(u8, tyinfo.@"struct".fields[1].name, "len") and - std.mem.eql(u8, tyinfo.@"struct".fields[2].name, "cap") and - std.mem.eql(u8, tyinfo.@"struct".fields[3].name, "alloc_ptr")) + std.mem.eql(u8, tyinfo.@"struct".fields[2].name, "cap")) return .{ .list = .baby_list, .child = std.meta.Child(tyinfo.@"struct".fields[0].type) }; // Looks like SmallList diff --git a/src/ptr.zig b/src/ptr.zig index ed1c7a5a46..608b0efc50 100644 --- a/src/ptr.zig +++ b/src/ptr.zig @@ -7,9 +7,8 @@ pub const CowString = CowSlice(u8); pub const owned = @import("./ptr/owned.zig"); pub const Owned = owned.Owned; // owned pointer allocated with default allocator -pub const DynamicOwned = owned.Dynamic; // owned pointer allocated with any allocator -pub const MaybeOwned = owned.maybe.MaybeOwned; // owned or borrowed pointer -pub const ScopedOwned = owned.scoped.ScopedOwned; // uses `AllocationScope` +pub const OwnedIn = owned.OwnedIn; // owned pointer allocated with specific type of allocator +pub const DynamicOwned = owned.Dynamic; // owned pointer allocated with any `std.mem.Allocator` pub const shared = @import("./ptr/shared.zig"); pub const Shared = shared.Shared; diff --git a/src/ptr/CowSlice.zig b/src/ptr/CowSlice.zig index b3bc7d02c7..c0ee935ee5 100644 --- a/src/ptr/CowSlice.zig +++ b/src/ptr/CowSlice.zig @@ -60,8 +60,10 @@ pub fn CowSliceZ(T: type, comptime sentinel: ?T) type { /// `data` is transferred into the returned string, and must be freed with /// `.deinit()` when the string and its borrows are done being used. pub fn initOwned(data: []T, allocator: Allocator) Self { - if (AllocationScope.downcast(allocator)) |scope| + if (allocation_scope.isInstance(allocator)) { + const scope = AllocationScope.Borrowed.downcast(allocator); scope.assertOwned(data); + } return .{ .ptr = data.ptr, @@ -306,11 +308,12 @@ test CowSlice { try expectEqualStrings(borrow.slice(), "hello"); } +const bun = @import("bun"); const std = @import("std"); const Allocator = std.mem.Allocator; -const bun = @import("bun"); -const AllocationScope = bun.AllocationScope; - const Environment = bun.Environment; const cow_str_assertions = Environment.isDebug; + +const allocation_scope = bun.allocators.allocation_scope; +const AllocationScope = allocation_scope.AllocationScope; diff --git a/src/ptr/owned.zig b/src/ptr/owned.zig index 702bd9c927..1af997a3d9 100644 --- a/src/ptr/owned.zig +++ b/src/ptr/owned.zig @@ -1,22 +1,5 @@ const owned = @This(); -/// Options for `WithOptions`. -pub const Options = struct { - // Whether to call `deinit` on the data before freeing it, if such a method exists. - deinit: bool = true, - - // If non-null, the owned pointer will always use the provided allocator. This makes it the - // same size as a raw pointer, as it no longer has to store the allocator at runtime, but it - // means it will be a different type from owned pointers that use different allocators. - allocator: ?Allocator = bun.default_allocator, - - fn asDynamic(self: Options) Options { - var new = self; - new.allocator = null; - return new; - } -}; - /// An owned pointer or slice that was allocated using the default allocator. /// /// This type is a wrapper around a pointer or slice of type `Pointer` that was allocated using @@ -26,188 +9,232 @@ pub const Options = struct { /// `Pointer` can be a single-item pointer, a slice, or an optional version of either of those; /// e.g., `Owned(*u8)`, `Owned([]u8)`, `Owned(?*u8)`, or `Owned(?[]u8)`. /// -/// Use the `alloc*` functions to create an `Owned(Pointer)` by allocating memory, or use -/// `fromRawOwned` to create one from a raw pointer. Use `get` to access the inner pointer, and -/// call `deinit` to free the memory. If `Pointer` is optional, use `initNull` to create a null -/// `Owned(Pointer)`. -/// -/// See `Dynamic` for a version that supports any allocator. You can also specify a different -/// fixed allocator using `WithOptions(Pointer, .{ .allocator = some_other_allocator })`. +/// This type is an alias of `OwnedIn(Pointer, bun.DefaultAllocator)`, and thus has no overhead +/// because `bun.DefaultAllocator` is a zero-sized type. pub fn Owned(comptime Pointer: type) type { - return WithOptions(Pointer, .{}); + return OwnedIn(Pointer, bun.DefaultAllocator); } -/// An owned pointer or slice allocated using any allocator. +/// An owned pointer or slice allocated using any `std.mem.Allocator`. /// -/// This type is like `Owned`, but it supports data allocated by any allocator. To do this, it -/// stores the allocator at runtime, which increases the size of the type. An unmanaged version -/// which doesn't store the allocator is available with `Dynamic(Pointer).Unmanaged`. +/// This type is an alias of `OwnedIn(Pointer, std.mem.Allocator)`, and thus stores the +/// `std.mem.Allocator` at runtime. pub fn Dynamic(comptime Pointer: type) type { - return WithOptions(Pointer, .{ .allocator = null }); + return OwnedIn(Pointer, std.mem.Allocator); } -/// Like `Owned`, but takes explicit options. +/// An owned pointer or slice, allocated using an instance of `Allocator`. /// -/// `Owned(Pointer)` is simply an alias of `WithOptions(Pointer, .{})`. -pub fn WithOptions(comptime Pointer: type, comptime options: Options) type { +/// `Allocator` must be one of the following: +/// +/// * `std.mem.Allocator` +/// * A type with a method named `allocator` that takes no parameters (except `self`) and returns +/// an instance of `std.mem.Allocator`. +/// +/// If `Allocator` is a zero-sized type, the owned pointer has no overhead compared to a raw +/// pointer. +pub fn OwnedIn(comptime Pointer: type, comptime Allocator: type) type { const info = PointerInfo.parse(Pointer, .{}); const NonOptionalPointer = info.NonOptionalPointer; const Child = info.Child; + const ConstPointer = AddConst(Pointer); return struct { const Self = @This(); - #unsafe_raw_pointer: Pointer, - unsafe_allocator: if (options.allocator == null) Allocator else void, + #pointer: Pointer, + #allocator: Allocator, /// An unmanaged version of this owned pointer. This type doesn't store the allocator and /// is the same size as a raw pointer. /// - /// This type is provided only if `options.allocator` is null, since if it's non-null, - /// the owned pointer is already the size of a raw pointer. - pub const Unmanaged = if (options.allocator == null) owned.Unmanaged(Pointer, options); + /// If `Allocator` is a zero-sized type, there is no advantage to using this type. Just + /// use a normal owned pointer, which has no overhead in this case. + pub const Unmanaged = owned.Unmanaged(Pointer, Allocator); - /// Allocates a new owned pointer. The signature of this function depends on whether the - /// pointer is a single-item pointer or a slice, and whether a fixed allocator was provided - /// in `options`. - pub const alloc = (if (options.allocator) |allocator| switch (info.kind()) { + /// Allocates a new owned pointer with a default-initialized `Allocator`. + pub const alloc = switch (info.kind()) { .single => struct { - /// Allocates memory for a single value using `options.allocator`, and initializes - /// it with `value`. - pub fn alloc(value: Child) Allocator.Error!Self { - return .allocSingle(allocator, value); + pub fn alloc(value: Child) AllocError!Self { + return .allocIn(value, bun.memory.initDefault(Allocator)); } }, .slice => struct { - /// Allocates memory for `count` elements using `options.allocator`, and initializes - /// every element with `elem`. - pub fn alloc(count: usize, elem: Child) Allocator.Error!Self { - return .allocSlice(allocator, count, elem); + pub fn alloc(count: usize, elem: Child) AllocError!Self { + return .allocIn(count, elem, bun.memory.initDefault(Allocator)); } }, - } else switch (info.kind()) { + }.alloc; + + /// Allocates a new owned pointer with the given allocator. + pub const allocIn = switch (info.kind()) { .single => struct { - /// Allocates memory for a single value and initialize it with `value`. - pub fn alloc(allocator: Allocator, value: Child) Allocator.Error!Self { - return .allocSingle(allocator, value); + pub fn allocIn(value: Child, allocator_: Allocator) AllocError!Self { + const data = try bun.memory.create( + Child, + bun.allocators.asStd(allocator_), + value, + ); + return .{ + .#pointer = data, + .#allocator = allocator_, + }; } }, .slice => struct { - /// Allocates memory for `count` elements, and initialize every element with `elem`. - pub fn alloc(allocator: Allocator, count: usize, elem: Child) Allocator.Error!Self { - return .allocSlice(allocator, count, elem); + pub fn allocIn(count: usize, elem: Child, allocator_: Allocator) AllocError!Self { + const data = try bun.allocators.asStd(allocator_).alloc(Child, count); + @memset(data, elem); + return .{ + .#pointer = data, + .#allocator = allocator_, + }; } }, - }).alloc; + }.allocIn; - const supports_default_allocator = if (options.allocator) |allocator| - bun.allocators.isDefault(allocator) - else - true; - - /// Allocates an owned pointer using the default allocator. This function calls - /// `bun.outOfMemory` if memory allocation fails. - pub const new = if (info.kind() == .single and supports_default_allocator) struct { + /// Allocates an owned pointer for a single item, and calls `bun.outOfMemory` if allocation + /// fails. + /// + /// It must be possible to default-initialize `Allocator`. + pub const new = if (info.kind() == .single) struct { pub fn new(value: Child) Self { - return bun.handleOom(Self.allocSingle(bun.default_allocator, value)); + return bun.handleOom(Self.alloc(value)); } }.new; - /// Creates an owned pointer by allocating memory and performing a shallow copy of - /// `data`. - pub const allocDupe = (if (options.allocator) |allocator| struct { - pub fn allocDupe(data: NonOptionalPointer) Allocator.Error!Self { - return .allocDupeImpl(data, allocator); - } - } else struct { - pub fn allocDupe(data: NonOptionalPointer, allocator: Allocator) Allocator.Error!Self { - return .allocDupeImpl(data, allocator); - } - }).allocDupe; - - pub const fromRawOwned = (if (options.allocator == null) struct { - /// Creates an owned pointer from a raw pointer and allocator. - /// - /// Requirements: - /// - /// * `data` must have been allocated by `allocator`. - /// * `data` must not be freed for the life of the owned pointer. - pub fn fromRawOwned(data: NonOptionalPointer, allocator: Allocator) Self { - return .{ - .#unsafe_raw_pointer = data, - .unsafe_allocator = allocator, - }; - } - } else struct { - /// Creates an owned pointer from a raw pointer. - /// - /// Requirements: - /// - /// * `data` must have been allocated by `options.allocator`. - /// * `data` must not be freed for the life of the owned pointer. - pub fn fromRawOwned(data: NonOptionalPointer) Self { - return .{ - .#unsafe_raw_pointer = data, - .unsafe_allocator = {}, - }; - } - }).fromRawOwned; - - /// Deinitializes the pointer or slice, freeing its memory. + /// Creates an owned pointer by allocating memory and performing a shallow copy of `data`. /// - /// By default, this will first call `deinit` on the data itself, if such a method exists. - /// (For slices, this will call `deinit` on every element in this slice.) This behavior can - /// be disabled in `options`. - pub fn deinit(self: Self) void { - const data = if (comptime info.isOptional()) - self.#unsafe_raw_pointer orelse return + /// It must be possible to default-initialize `Allocator`. + pub fn allocDupe(data: ConstPointer) AllocError!Self { + return .allocDupeIn(data, bun.memory.initDefault(Allocator)); + } + + /// Creates an owned pointer by allocating memory with the given allocator and performing + /// a shallow copy of `data`. + pub fn allocDupeIn(data: ConstPointer, allocator_: Allocator) AllocError!Self { + const unwrapped = if (comptime info.isOptional()) + data orelse return .initNull() else - self.#unsafe_raw_pointer; - if (comptime options.deinit and std.meta.hasFn(Child, "deinit")) { - switch (comptime info.kind()) { - .single => data.deinit(), - .slice => for (data) |*elem| elem.deinit(), - } - } - switch (comptime info.kind()) { - .single => bun.allocators.destroy(self.getAllocator(), data), - .slice => self.getAllocator().free(data), - } + data; + return switch (comptime info.kind()) { + .single => .allocIn(unwrapped.*, allocator_), + .slice => .{ + .#pointer = try bun.allocators.asStd(allocator_).dupe(Child, unwrapped), + .#allocator = allocator_, + }, + }; } - const SelfOrPtr = if (info.isConst()) Self else *Self; - - /// Returns the inner pointer or slice. - pub fn get(self: SelfOrPtr) Pointer { - return self.#unsafe_raw_pointer; - } - - /// Returns a const version of the inner pointer or slice. + /// Creates an owned pointer from a raw pointer. /// - /// This method is not provided if the pointer is already const; use `get` in that case. - pub const getConst = if (!info.isConst()) struct { - pub fn getConst(self: Self) AddConst(Pointer) { - return self.#unsafe_raw_pointer; - } - }.getConst; + /// Requirements: + /// + /// * It must be permissible to free `data` with a new instance of `Allocator` created + /// with `bun.memory.initDefault(Allocator)`. + /// * `data` must not be freed for the life of the owned pointer. + /// + /// NOTE: If `Allocator` is the default allocator, and `Pointer` is a single-item pointer, + /// `data` must have been allocated with `bun.new`, `bun.tryNew`, or `bun.memory.create`, + /// NOT `bun.default_allocator.create`. If `data` came from an owned pointer, this + /// requirement is satisfied. + /// + /// `Allocator` is the default allocator if `Allocator.allocator` returns + /// `bun.default_allocator` when called on a default-initialized `Allocator` (created with + /// `bun.memory.initDefault`). Most notably, this is true for `bun.DefaultAllocator`. + pub fn fromRaw(data: Pointer) Self { + return .fromRawIn(data, bun.memory.initDefault(Allocator)); + } - /// Converts an owned pointer into a raw pointer. If `options.allocator` is non-null, - /// this method also returns the allocator. + /// Creates an owned pointer from a raw pointer and allocator. + /// + /// Requirements: + /// + /// * It must be permissible to free `data` with `allocator`. + /// * `data` must not be freed for the life of the owned pointer. + /// + /// NOTE: If `allocator` is the default allocator, and `Pointer` is a single-item pointer, + /// `data` must have been allocated with `bun.new`, `bun.tryNew`, or `bun.memory.create`, + /// NOT `bun.default_allocator.create`. If `data` came from `intoRaw` on another owned + /// pointer, this requirement is satisfied. + /// + /// `allocator` is the default allocator if either of the following is true: + /// * `allocator` is `bun.default_allocator` + /// * `allocator.allocator()` returns `bun.default_allocator` + pub fn fromRawIn(data: Pointer, allocator_: Allocator) Self { + return .{ + .#pointer = data, + // Code shouldn't rely on null pointers having a specific allocator, since + // `initNull` necessarily sets this field to undefined. + .#allocator = if ((comptime info.isOptional()) and data == null) + undefined + else + allocator_, + }; + } + + /// Calls `deinit` on the underlying data (pointer target or slice elements) and then + /// frees the memory. + /// + /// `deinit` is also called on the allocator. /// /// This method invalidates `self`. - pub const intoRawOwned = (if (options.allocator != null) struct { - pub fn intoRawOwned(self: Self) Pointer { - return self.#unsafe_raw_pointer; + pub fn deinit(self: *Self) void { + self.deinitImpl(.deep); + } + + /// Frees the memory without calling `deinit` on the underlying data. `deinit` is still + /// called on the allocator. + /// + /// This method invalidates `self`. + pub fn deinitShallow(self: *Self) void { + self.deinitImpl(.shallow); + } + + /// Returns the inner pointer or slice. + pub fn get(self: Self) Pointer { + return self.#pointer; + } + + /// Converts an owned pointer into a raw pointer. This releases ownership of the pointer. + /// + /// This method calls `deinit` on the allocator. If you need to retain access to the + /// allocator, use `intoRawWithAllocator`. + /// + /// NOTE: If the current allocator is the default allocator, and `Pointer` is a single-item + /// pointer, the pointer must be freed with `bun.destroy` or `bun.memory.destroy`, NOT + /// `bun.default_allocator.destroy`. Or it can be turned back into an owned pointer. + /// + /// This method invalidates `self`. + pub fn intoRaw(self: *Self) Pointer { + defer self.* = undefined; + if ((comptime !info.isOptional()) or self.#pointer != null) { + bun.memory.deinit(&self.#allocator); } - } else if (info.isOptional()) struct { - pub fn intoRawOwned(self: Self) ?struct { NonOptionalPointer, Allocator } { - return .{ self.#unsafe_raw_pointer orelse return null, self.unsafe_allocator }; - } - } else struct { - pub fn intoRawOwned(self: Self) struct { Pointer, Allocator } { - return .{ self.#unsafe_raw_pointer, self.unsafe_allocator }; - } - }).intoRawOwned; + return self.#pointer; + } + + const PointerAndAllocator = if (info.isOptional()) + ?struct { NonOptionalPointer, Allocator } + else + struct { Pointer, Allocator }; + + /// Converts an owned pointer into a raw pointer and allocator, releasing ownership of the + /// pointer. + /// + /// NOTE: If the current allocator is the default allocator, and `Pointer` is a single-item + /// pointer, the pointer must be freed with `bun.destroy` or `bun.memory.destroy`, NOT + /// `bun.default_allocator.destroy`. Or it can be turned back into an owned pointer. + /// + /// This method invalidates `self`. + pub fn intoRawWithAllocator(self: *Self) PointerAndAllocator { + defer self.* = undefined; + const data = if (comptime info.isOptional()) + self.#pointer orelse return null + else + self.#pointer; + return .{ data, self.#allocator }; + } /// Returns a null owned pointer. This function is provided only if `Pointer` is an /// optional type. @@ -216,14 +243,12 @@ pub fn WithOptions(comptime Pointer: type, comptime options: Options) type { pub const initNull = if (info.isOptional()) struct { pub fn initNull() Self { return .{ - .#unsafe_raw_pointer = null, - .unsafe_allocator = undefined, + .#pointer = null, + .#allocator = undefined, }; } }.initNull; - const OwnedNonOptional = WithOptions(NonOptionalPointer, options); - /// Converts an `Owned(?T)` into an `?Owned(T)`. /// /// This method sets `self` to null. It is therefore permitted, but not required, to call @@ -231,149 +256,172 @@ pub fn WithOptions(comptime Pointer: type, comptime options: Options) type { /// /// This method is provided only if `Pointer` is an optional type. pub const take = if (info.isOptional()) struct { + const OwnedNonOptional = OwnedIn(NonOptionalPointer, Allocator); + pub fn take(self: *Self) ?OwnedNonOptional { defer self.* = .initNull(); return .{ - .#unsafe_raw_pointer = self.#unsafe_raw_pointer orelse return null, - .unsafe_allocator = self.unsafe_allocator, + .#pointer = self.#pointer orelse return null, + .#allocator = self.#allocator, }; } }.take; - const OwnedOptional = WithOptions(?Pointer, options); + /// Like `deinit`, but sets `self` to null instead of invalidating it. + /// + /// This method is provided only if `Pointer` is an optional type. + pub const reset = if (info.isOptional()) struct { + pub fn reset(self: *Self) void { + defer self.* = .initNull(); + self.deinit(); + } + }.reset; /// Converts an `Owned(T)` into a non-null `Owned(?T)`. /// /// This method invalidates `self`. pub const toOptional = if (!info.isOptional()) struct { - pub fn toOptional(self: Self) OwnedOptional { + const OwnedOptional = OwnedIn(?Pointer, Allocator); + + pub fn toOptional(self: *Self) OwnedOptional { + defer self.* = undefined; return .{ - .#unsafe_raw_pointer = self.#unsafe_raw_pointer, - .unsafe_allocator = self.unsafe_allocator, + .#pointer = self.#pointer, + .#allocator = self.#allocator, }; } }.toOptional; /// Converts this owned pointer into an unmanaged variant that doesn't store the allocator. /// - /// This method invalidates `self`. - /// - /// This method is provided only if `options.allocator` is null, since if it's non-null, - /// this type is already the size of a raw pointer. - pub const toUnmanaged = if (options.allocator == null) struct { - pub fn toUnmanaged(self: Self) Self.Unmanaged { - return .{ - .#unsafe_raw_pointer = self.#unsafe_raw_pointer, - }; - } - }.toUnmanaged; - - const DynamicOwned = WithOptions(Pointer, options.asDynamic()); - - /// Converts an owned pointer that uses a fixed allocator into a dynamic one. + /// There is no reason to use this method if `Allocator` is a zero-sized type, as a normal + /// owned pointer has no overhead in this case. /// /// This method invalidates `self`. - /// - /// This method is provided only if `options.allocator` is non-null, and returns - /// a new owned pointer that has `options.allocator` set to null. - pub const toDynamic = if (options.allocator) |allocator| struct { - pub fn toDynamic(self: Self) DynamicOwned { - return .{ - .#unsafe_raw_pointer = self.#unsafe_raw_pointer, - .unsafe_allocator = allocator, - }; - } - }.toDynamic; - - fn rawInit(data: NonOptionalPointer, allocator: Allocator) Self { + pub fn toUnmanaged(self: *Self) Self.Unmanaged { + defer self.* = undefined; return .{ - .#unsafe_raw_pointer = data, - .unsafe_allocator = if (comptime options.allocator == null) allocator, + .#pointer = self.#pointer, }; } - fn allocSingle(allocator: Allocator, value: Child) !Self { - const data = try bun.allocators.create(Child, allocator, value); - return .rawInit(data, allocator); + /// Converts an owned pointer that uses a fixed type of allocator into a dynamic one + /// that uses any `std.mem.Allocator`. + /// + /// It must be possible to use the `std.mem.Allocator` returned by `Allocator.allocator` + /// even after deinitializing the `Allocator`. As a safety check, this method will not + /// compile if `Allocator.Borrowed` exists and is a different type from `Allocator`, as + /// this likely indicates a scenario where this invariant will not hold. + /// + /// There is no reason to use this method if `Allocator` is already `std.mem.Allocator`. + /// + /// This method invalidates `self`. + pub fn toDynamic(self: *Self) owned.Dynamic(Pointer) { + if (comptime @hasDecl(Allocator, "Borrowed") and Allocator.Borrowed != Allocator) { + // If this allocator can be borrowed as a different type, it's likely that the + // `std.mem.Allocator` returned by `Allocator.allocator` won't be valid after the + // `Allocator` is dropped. + @compileError("allocator won't live long enough"); + } + + defer self.* = undefined; + const data = if (comptime info.isOptional()) + self.#pointer orelse return .initNull() + else + self.#pointer; + defer bun.memory.deinit(&self.#allocator); + return .fromRawIn(data, self.getStdAllocator()); } - fn allocSlice(allocator: Allocator, count: usize, elem: Child) !Self { - const data = try allocator.alloc(Child, count); - @memset(data, elem); - return .rawInit(data, allocator); + const MaybeAllocator = if (info.isOptional()) + ?bun.allocators.Borrowed(Allocator) + else + bun.allocators.Borrowed(Allocator); + + /// Returns a borrowed version of the allocator. + /// + /// Not all allocators have a separate borrowed type; in this case, the allocator is + /// returned as-is. For example, if `Allocator` is `std.mem.Allocator`, this method also + /// returns `std.mem.Allocator`. + pub fn allocator(self: Self) MaybeAllocator { + return if ((comptime info.isOptional()) and self.#pointer == null) + null + else + bun.allocators.borrow(self.#allocator); } - fn allocDupeImpl(data: NonOptionalPointer, allocator: Allocator) !Self { - return switch (comptime info.kind()) { - .single => .allocSingle(allocator, data.*), - .slice => .rawInit(try allocator.dupe(Child, data), allocator), - }; + fn getStdAllocator(self: Self) std.mem.Allocator { + return bun.allocators.asStd(self.#allocator); } - fn getAllocator(self: Self) Allocator { - return (comptime options.allocator) orelse self.unsafe_allocator; + fn deinitImpl(self: *Self, comptime mode: enum { deep, shallow }) void { + defer self.* = undefined; + const data = if (comptime info.isOptional()) + self.#pointer orelse return + else + self.#pointer; + if (comptime mode == .deep) { + bun.memory.deinit(data); + } + switch (comptime info.kind()) { + .single => bun.memory.destroy(self.getStdAllocator(), data), + .slice => self.getStdAllocator().free(data), + } + bun.memory.deinit(&self.#allocator); } }; } -/// An unmanaged version of `Dynamic(Pointer)` that doesn't store the allocator. -fn Unmanaged(comptime Pointer: type, comptime options: Options) type { +/// An unmanaged version of `OwnedIn(Pointer, Allocator)` that doesn't store the allocator. +/// +/// If `Allocator` is a zero-sized type, there is no benefit to using this type. Just use a +/// normal owned pointer, which has no overhead in this case. +/// +/// This type is accessible as `OwnedIn(Pointer, Allocator).Unmanaged`. +fn Unmanaged(comptime Pointer: type, comptime Allocator: type) type { const info = PointerInfo.parse(Pointer, .{}); - bun.assertf( - options.allocator == null, - "owned.Unmanaged is useless if options.allocator is provided", - .{}, - ); return struct { const Self = @This(); - #unsafe_raw_pointer: Pointer, + #pointer: Pointer, - const Managed = WithOptions(Pointer, options); + const Managed = OwnedIn(Pointer, Allocator); /// Converts this unmanaged owned pointer back into a managed version. /// /// `allocator` must be the allocator that was used to allocate the pointer. - pub fn toManaged(self: Self, allocator: Allocator) Managed { + /// + /// This method invalidates `self`. + pub fn toManaged(self: *Self, allocator: Allocator) Managed { + defer self.* = undefined; const data = if (comptime info.isOptional()) - self.#unsafe_raw_pointer orelse return .initNull() + self.#pointer orelse return .initNull() else - self.#unsafe_raw_pointer; - return .fromRawOwned(data, allocator); + self.#pointer; + return .fromRawIn(data, allocator); } /// Deinitializes the pointer or slice. See `Owned.deinit` for more information. /// /// `allocator` must be the allocator that was used to allocate the pointer. - pub fn deinit(self: Self, allocator: Allocator) void { - self.toManaged(allocator).deinit(); + /// + /// This method invalidates `self`. + pub fn deinit(self: *Self, allocator: Allocator) void { + var managed = self.toManaged(allocator); + managed.deinit(); } - const SelfOrPtr = if (info.isConst()) Self else *Self; - /// Returns the inner pointer or slice. - pub fn get(self: SelfOrPtr) Pointer { - return self.#unsafe_raw_pointer; + pub fn get(self: Self) Pointer { + return self.#pointer; } - - /// Returns a const version of the inner pointer or slice. - /// - /// This method is not provided if the pointer is already const; use `get` in that case. - pub const getConst = if (!info.isConst()) struct { - pub fn getConst(self: Self) AddConst(Pointer) { - return self.#unsafe_raw_pointer; - } - }.getConst; }; } -pub const maybe = @import("./owned/maybe.zig"); -pub const scoped = @import("./owned/scoped.zig"); - const bun = @import("bun"); const std = @import("std"); -const Allocator = std.mem.Allocator; +const AllocError = std.mem.Allocator.Error; const meta = @import("./meta.zig"); const AddConst = meta.AddConst; diff --git a/src/ptr/owned/maybe.zig b/src/ptr/owned/maybe.zig deleted file mode 100644 index f940ead971..0000000000 --- a/src/ptr/owned/maybe.zig +++ /dev/null @@ -1,160 +0,0 @@ -/// Options for `WithOptions`. -pub const Options = struct { - // Whether to call `deinit` on the data before freeing it, if such a method exists. - deinit: bool = true, - - fn toOwned(self: Options) owned.Options { - return .{ - .deinit = self.deinit, - .allocator = null, - }; - } -}; - -/// A possibly owned pointer or slice. -/// -/// Memory held by this type is either owned or borrowed. If owned, this type also holds the -/// allocator used to allocate the memory, and calling `deinit` on this type will call `deinit` on -/// the underlying data and then free the memory. If the memory is borrowed, `deinit` is a no-op. -/// -/// `Pointer` can be a single-item pointer, a slice, or an optional version of either of those; -/// e.g., `MaybeOwned(*u8)`, `MaybeOwned([]u8)`, `MaybeOwned(?*u8)`, or `MaybeOwned(?[]u8)`. -/// -/// Use `fromOwned` or `fromBorrowed` to create a `MaybeOwned(Pointer)`. Use `get` to access the -/// inner pointer, and call `deinit` when done with the data. (It's best practice to always call -/// `deinit`, even if the data is borrowed. It's a no-op in that case but doing so will help prevent -/// leaks.) If `Pointer` is optional, use `initNull` to create a null `MaybeOwned(Pointer)`. -pub fn MaybeOwned(comptime Pointer: type) type { - return WithOptions(Pointer, .{}); -} - -/// Like `MaybeOwned`, but takes explicit options. -/// -/// `MaybeOwned(Pointer)` is simply an alias of `WithOptions(Pointer, .{})`. -pub fn WithOptions(comptime Pointer: type, comptime options: Options) type { - const info = PointerInfo.parse(Pointer, .{}); - const NonOptionalPointer = info.NonOptionalPointer; - - return struct { - const Self = @This(); - - unsafe_raw_pointer: Pointer, - unsafe_allocator: NullableAllocator, - - const Owned = owned.WithOptions(Pointer, options.toOwned()); - - /// Creates a `MaybeOwned(Pointer)` from an `Owned(Pointer)`. - /// - /// This method invalidates `owned_ptr`. - pub fn fromOwned(owned_ptr: Owned) Self { - const data, const allocator = if (comptime info.isOptional()) - owned_ptr.intoRawOwned() orelse return .initNull() - else - owned_ptr.intoRawOwned(); - return .{ - .unsafe_raw_pointer = data, - .unsafe_allocator = .init(allocator), - }; - } - - /// Creates a `MaybeOwned(Pointer)` from a raw owned pointer or slice. - /// - /// Requirements: - /// - /// * `data` must have been allocated by `allocator`. - /// * `data` must not be freed for the life of the `MaybeOwned`. - pub fn fromRawOwned(data: NonOptionalPointer, allocator: Allocator) Self { - return .fromOwned(.fromRawOwned(data, allocator)); - } - - /// Creates a `MaybeOwned(Pointer)` from borrowed slice or pointer. - /// - /// `data` must not be freed for the life of the `MaybeOwned`. - pub fn fromBorrowed(data: NonOptionalPointer) Self { - return .{ - .unsafe_raw_pointer = data, - .unsafe_allocator = .init(null), - }; - } - - /// Deinitializes the pointer or slice, freeing its memory if owned. - /// - /// By default, if the data is owned, `deinit` will first be called on the data itself. - /// See `Owned.deinit` for more information. - pub fn deinit(self: Self) void { - const data, const maybe_allocator = if (comptime info.isOptional()) - self.intoRaw() orelse return - else - self.intoRaw(); - if (maybe_allocator) |allocator| { - Owned.fromRawOwned(data, allocator).deinit(); - } - } - - const SelfOrPtr = if (info.isConst()) Self else *Self; - - /// Returns the inner pointer or slice. - pub fn get(self: SelfOrPtr) Pointer { - return self.unsafe_raw_pointer; - } - - /// Returns a const version of the inner pointer or slice. - /// - /// This method is not provided if the pointer is already const; use `get` in that case. - pub const getConst = if (!info.isConst()) struct { - pub fn getConst(self: Self) AddConst(Pointer) { - return self.unsafe_raw_pointer; - } - }.getConst; - - /// Converts a `MaybeOwned(Pointer)` into its constituent parts, a raw pointer and an - /// optional allocator. - /// - /// Do not use `self` or call `deinit` after calling this method. - pub const intoRaw = switch (info.isOptional()) { - // Regular, non-optional pointer (e.g., `*u8`, `[]u8`). - false => struct { - pub fn intoRaw(self: Self) struct { Pointer, ?Allocator } { - return .{ self.unsafe_raw_pointer, self.unsafe_allocator.get() }; - } - }, - // Optional pointer (e.g., `?*u8`, `?[]u8`). - true => struct { - pub fn intoRaw(self: Self) ?struct { NonOptionalPointer, ?Allocator } { - return .{ - self.unsafe_raw_pointer orelse return null, - self.unsafe_allocator.get(), - }; - } - }, - }.intoRaw; - - /// Returns whether or not the memory is owned. - pub fn isOwned(self: Self) bool { - return !self.unsafe_allocator.isNull(); - } - - /// Returns a null `MaybeOwned(Pointer)`. This method is provided only if `Pointer` is an - /// optional type. - /// - /// It is permitted, but not required, to call `deinit` on the returned value. - pub const initNull = if (info.isOptional()) struct { - pub fn initNull() Self { - return .{ - .unsafe_raw_pointer = null, - .unsafe_allocator = undefined, - }; - } - }.initNull; - }; -} - -const bun = @import("bun"); -const std = @import("std"); -const Allocator = std.mem.Allocator; -const NullableAllocator = bun.allocators.NullableAllocator; -const owned = bun.ptr.owned; - -const meta = @import("../meta.zig"); -const AddConst = meta.AddConst; -const PointerInfo = meta.PointerInfo; diff --git a/src/ptr/owned/scoped.zig b/src/ptr/owned/scoped.zig deleted file mode 100644 index 2775323bab..0000000000 --- a/src/ptr/owned/scoped.zig +++ /dev/null @@ -1,148 +0,0 @@ -/// Options for `WithOptions`. -pub const Options = struct { - // Whether to call `deinit` on the data before freeing it, if such a method exists. - deinit: bool = true, - - // The owned pointer will always use this allocator. - allocator: Allocator = bun.default_allocator, - - fn toDynamic(self: Options) owned.Options { - return .{ - .deinit = self.deinit, - .allocator = null, - }; - } -}; - -/// An owned pointer that uses `AllocationScope` when enabled. -pub fn ScopedOwned(comptime Pointer: type) type { - return WithOptions(Pointer, .{}); -} - -/// Like `ScopedOwned`, but takes explicit options. -/// -/// `ScopedOwned(Pointer)` is simply an alias of `WithOptions(Pointer, .{})`. -pub fn WithOptions(comptime Pointer: type, comptime options: Options) type { - const info = PointerInfo.parse(Pointer, .{}); - const NonOptionalPointer = info.NonOptionalPointer; - - return struct { - const Self = @This(); - - unsafe_raw_pointer: Pointer, - unsafe_scope: if (AllocationScope.enabled) AllocationScope else void, - - const DynamicOwned = owned.WithOptions(Pointer, options.toDynamic()); - - /// Creates a `ScopedOwned` from a `DynamicOwned`. - /// - /// If `AllocationScope` is enabled, `owned_ptr` must have been allocated by an - /// `AllocationScope`. Otherwise, `owned_ptr` must have been allocated by - /// `options.allocator`. - /// - /// This method invalidates `owned_ptr`. - pub fn fromDynamic(owned_ptr: DynamicOwned) Self { - const data, const allocator = if (comptime info.isOptional()) - owned_ptr.intoRawOwned() orelse return .initNull() - else - owned_ptr.intoRawOwned(); - - const scope = if (comptime AllocationScope.enabled) - AllocationScope.downcast(allocator) orelse std.debug.panic( - "expected `AllocationScope` allocator", - .{}, - ); - - const parent = if (comptime AllocationScope.enabled) scope.parent() else allocator; - bun.safety.alloc.assertEq(parent, options.allocator); - return .{ - .unsafe_raw_pointer = data, - .unsafe_scope = if (comptime AllocationScope.enabled) scope, - }; - } - - /// Creates a `ScopedOwned` from a raw pointer and `AllocationScope`. - /// - /// If `AllocationScope` is enabled, `scope` must be non-null, and `data` must have - /// been allocated by `scope`. Otherwise, `data` must have been allocated by - /// `options.default_allocator`, and `scope` is ignored. - pub fn fromRawOwned(data: NonOptionalPointer, scope: ?AllocationScope) Self { - const allocator = if (comptime AllocationScope.enabled) - (scope orelse std.debug.panic( - "AllocationScope should be non-null when enabled", - .{}, - )).allocator() - else - options.allocator; - return .fromDynamic(.fromRawOwned(data, allocator)); - } - - /// Deinitializes the pointer or slice, freeing its memory if owned. - /// - /// By default, if the data is owned, `deinit` will first be called on the data itself. - pub fn deinit(self: Self) void { - self.toDynamic().deinit(); - } - - const SelfOrPtr = if (info.isConst()) Self else *Self; - - /// Returns the inner pointer or slice. - pub fn get(self: SelfOrPtr) Pointer { - return self.unsafe_raw_pointer; - } - - /// Returns a const version of the inner pointer or slice. - /// - /// This method is not provided if the pointer is already const; use `get` in that case. - pub const getConst = if (!info.isConst()) struct { - pub fn getConst(self: Self) AddConst(Pointer) { - return self.unsafe_raw_pointer; - } - }.getConst; - - /// Converts an owned pointer into a raw pointer. - /// - /// This method invalidates `self`. - pub fn intoRawOwned(self: Self) Pointer { - return self.unsafe_raw_pointer; - } - - /// Returns a null `ScopedOwned`. This method is provided only if `Pointer` is an optional - /// type. - /// - /// It is permitted, but not required, to call `deinit` on the returned value. - pub const initNull = if (info.isOptional()) struct { - pub fn initNull() Self { - return .{ - .unsafe_raw_pointer = null, - .unsafe_allocator = undefined, - }; - } - }.initNull; - - /// Converts a `ScopedOwned` into a `DynamicOwned`. - /// - /// This method invalidates `self`. - pub fn toDynamic(self: Self) DynamicOwned { - const data = if (comptime info.isOptional()) - self.unsafe_raw_pointer orelse return .initNull() - else - self.unsafe_raw_pointer; - const allocator = if (comptime AllocationScope.enabled) - self.unsafe_scope.allocator() - else - options.allocator; - return .fromRawOwned(data, allocator); - } - }; -} - -const bun = @import("bun"); -const std = @import("std"); -const AllocationScope = bun.allocators.AllocationScope; -const Allocator = std.mem.Allocator; -const owned = bun.ptr.owned; - -const meta = @import("../meta.zig"); -const AddConst = meta.AddConst; -const PointerInfo = meta.PointerInfo; diff --git a/src/ptr/ref_count.zig b/src/ptr/ref_count.zig index 46b17cc048..c0433d5da4 100644 --- a/src/ptr/ref_count.zig +++ b/src/ptr/ref_count.zig @@ -389,12 +389,15 @@ pub fn RefPtr(T: type) type { } fn trackImpl(ref: @This(), scope: *AllocationScope, ret_addr: usize) void { + if (!comptime enable_debug) return; const debug = &ref.data.ref_count.debug; - debug.allocation_scope = &scope; + debug.lock.lock(); + defer debug.lock.unlock(); + debug.allocation_scope = scope; scope.trackExternalAllocation( std.mem.asBytes(ref.data), ret_addr, - .{ .ref_count = debug }, + .{ .ptr = debug, .vtable = debug.getScopeExtraVTable() }, ); } @@ -498,17 +501,25 @@ pub fn DebugData(thread_safe: bool) type { debug.map.clearAndFree(bun.default_allocator); debug.frees.clearAndFree(bun.default_allocator); if (debug.allocation_scope) |scope| { - _ = scope.trackExternalFree(data, ret_addr); + scope.trackExternalFree(data, ret_addr) catch {}; } } - // Trait function for AllocationScope - pub fn onAllocationLeak(debug: *@This(), data: []u8) void { + fn onAllocationLeak(ptr: *anyopaque, data: []u8) void { + const debug: *@This() = @ptrCast(@alignCast(ptr)); debug.lock.lock(); defer debug.lock.unlock(); const count = debug.count_pointer.?; debug.dump(null, data.ptr, if (thread_safe) count.load(.seq_cst) else count.*); } + + fn getScopeExtraVTable(_: *@This()) *const allocation_scope.Extra.VTable { + return &scope_extra_vtable; + } + + const scope_extra_vtable: allocation_scope.Extra.VTable = .{ + .onAllocationLeak = onAllocationLeak, + }; }; } @@ -561,6 +572,8 @@ const unique_symbol = opaque {}; const std = @import("std"); const bun = @import("bun"); -const AllocationScope = bun.AllocationScope; const assert = bun.assert; const enable_debug = bun.Environment.isDebug; + +const allocation_scope = bun.allocators.allocation_scope; +const AllocationScope = allocation_scope.AllocationScope; diff --git a/src/ptr/shared.zig b/src/ptr/shared.zig index 4d4baafed8..c3e8adaa8a 100644 --- a/src/ptr/shared.zig +++ b/src/ptr/shared.zig @@ -2,8 +2,10 @@ const shared = @This(); /// Options for `WithOptions`. pub const Options = struct { - /// Whether to call `deinit` on the data before freeing it, if such a method exists. - deinit: bool = true, + // If non-null, the shared pointer will always use the provided allocator. This saves a small + // amount of memory, but it means the shared pointer will be a different type from shared + // pointers that use different allocators. + Allocator: type = bun.DefaultAllocator, /// Whether to use an atomic type to store the ref count. This makes the shared pointer /// thread-safe, assuming the underlying data is also thread-safe. @@ -11,12 +13,14 @@ pub const Options = struct { /// Whether to allow weak pointers to be created. This uses slightly more memory but is often /// negligible due to padding. - allow_weak: bool = true, + /// + /// There is no point in enabling this if `deinit` is false, or if your data type doesn't have + /// a `deinit` method, since the sole purpose of weak pointers is to allow `deinit` to be called + /// before the memory is freed. + allow_weak: bool = false, - // If non-null, the shared pointer will always use the provided allocator. This saves a small - // amount of memory, but it means the shared pointer will be a different type from shared - // pointers that use different allocators. - allocator: ?Allocator = bun.default_allocator, + /// Whether to call `deinit` on the data before freeing it, if such a method exists. + deinit: bool = true, }; /// A shared pointer, allocated using the default allocator. @@ -27,7 +31,7 @@ pub const Options = struct { /// This type is not thread-safe: all pointers to the same piece of data must live on the same /// thread. See `AtomicShared` for a thread-safe version. pub fn Shared(comptime Pointer: type) type { - return WithOptions(Pointer, .{}); + return SharedIn(Pointer, bun.DefaultAllocator); } /// A thread-safe shared pointer, allocated using the default allocator. @@ -36,24 +40,28 @@ pub fn Shared(comptime Pointer: type) type { /// synchronization of the data itself. You must ensure proper concurrency using mutexes or /// atomics. pub fn AtomicShared(comptime Pointer: type) type { - return WithOptions(Pointer, .{ .atomic = true }); + return AtomicSharedIn(Pointer, bun.DefaultAllocator); } -/// A shared pointer allocated using any allocator. -pub fn Dynamic(comptime Pointer: type) type { - return WithOptions(Pointer, .{ .allocator = null }); +/// A shared pointer allocated using a specific type of allocator. +/// +/// The requirements for `Allocator` are the same as `bun.ptr.OwnedIn`. +/// `Allocator` may be `std.mem.Allocator` to allow any kind of allocator. +pub fn SharedIn(comptime Pointer: type, comptime Allocator: type) type { + return WithOptions(Pointer, .{ .Allocator = Allocator }); } -/// A thread-safe shared pointer allocated using any allocator. -pub fn DynamicAtomic(comptime Pointer: type) type { +/// A thread-safe shared pointer allocated using a specific type of allocator. +pub fn AtomicSharedIn(comptime Pointer: type, comptime Allocator: type) type { return WithOptions(Pointer, .{ + .Allocator = Allocator, .atomic = true, - .allocator = null, }); } /// Like `Shared`, but takes explicit options. pub fn WithOptions(comptime Pointer: type, comptime options: Options) type { + const Allocator = options.Allocator; const info = parsePointer(Pointer); const Child = info.Child; const NonOptionalPointer = info.NonOptionalPointer; @@ -68,17 +76,16 @@ pub fn WithOptions(comptime Pointer: type, comptime options: Options) type { "shared.Options.allow_weak is useless if `deinit` is false", .{}, ); - bun.assertf( - std.meta.hasFn(Child, "deinit"), - "shared.Options.allow_weak is useless if type has no `deinit` method", - .{}, - ); + // Weak pointers are useless if `Child` doesn't have a `deinit` method, but don't error + // in this case, as that could break generic code. It should be allowed to use + // `WithOptions(*T, .{ .allow_weak = true }).Weak` if `T` might sometimes have a `deinit` + // method. } return struct { const Self = @This(); - unsafe_pointer: Pointer, + #pointer: Pointer, /// A weak pointer. /// @@ -87,43 +94,35 @@ pub fn WithOptions(comptime Pointer: type, comptime options: Options) type { /// data will have been deinitialized in that case. pub const Weak = if (options.allow_weak) shared.Weak(Pointer, options); - pub const alloc = (if (options.allocator) |allocator| struct { - /// Allocates a shared value using `options.allocator`. - /// - /// Call `deinit` when done. - pub fn alloc(value: Child) Allocator.Error!Self { - return .allocImpl(allocator, value); - } - } else struct { - /// Allocates a shared value using the provided allocator. - /// - /// Call `deinit` when done. - pub fn alloc(allocator: Allocator, value: Child) Allocator.Error!Self { - return .allocImpl(allocator, value); - } - }).alloc; - - const supports_default_allocator = if (options.allocator) |allocator| - bun.allocators.isDefault(allocator) - else - true; - - /// Allocates a shared value using the default allocator. This function calls - /// `bun.outOfMemory` if memory allocation fails. + /// Allocates a shared value with a default-initialized `Allocator`. /// /// Call `deinit` when done. - pub const new = if (supports_default_allocator) struct { - pub fn new(value: Child) Self { - return bun.handleOom(Self.allocImpl(bun.default_allocator, value)); - } - }.new; + pub fn alloc(value: Child) AllocError!Self { + return .allocImpl(bun.memory.initDefault(Allocator), value); + } + + /// Allocates a shared value using the provided allocator. + /// + /// Call `deinit` when done. + pub fn allocIn(value: Child, allocator: Allocator) AllocError!Self { + return .allocImpl(allocator, value); + } + + /// Allocates a shared value, calling `bun.outOfMemory` if allocation fails. + /// + /// It must be possible to default-initialize `Allocator`. + /// + /// Call `deinit` when done. + pub fn new(value: Child) Self { + return bun.handleOom(Self.alloc(value)); + } /// Returns a pointer to the shared value. /// /// This pointer should usually not be stored directly in a struct, as it could become /// invalid once all the shared pointers are deinitialized. pub fn get(self: Self) Pointer { - return self.unsafe_pointer; + return self.#pointer; } /// Clones this shared pointer. This clones the pointer, not the data; the new pointer @@ -134,24 +133,26 @@ pub fn WithOptions(comptime Pointer: type, comptime options: Options) type { else self.getData(); data.incrementStrong(); - return .{ .unsafe_pointer = &data.value }; + return .{ .#pointer = &data.value }; } /// Creates a weak clone of this shared pointer. pub const cloneWeak = if (options.allow_weak) struct { pub fn cloneWeak(self: Self) Self.Weak { - return .{ .unsafe_pointer = self.unsafe_pointer }; + return .{ .#pointer = self.#pointer }; } }.cloneWeak; - /// Deinitializes this shared pointer. + /// Deinitializes this shared pointer. This does not deinitialize the data itself until all + /// other shared pointers have been deinitialized. /// /// When no more (strong) shared pointers point to a given piece of data, the data is /// deinitialized. Once no weak pointers exist either, the memory is freed. /// - /// The default behavior of calling `deinit` on the data before freeing it can be changed in - /// the `options`. - pub fn deinit(self: Self) void { + /// This method invalidates `self`. The default behavior of calling `deinit` on the data can + /// be changed in the `options`. + pub fn deinit(self: *Self) void { + defer self.* = undefined; const data = if (comptime info.isOptional()) self.getData() orelse return else @@ -165,7 +166,7 @@ pub fn WithOptions(comptime Pointer: type, comptime options: Options) type { /// It is permitted, but not required, to call `deinit` on the returned value. pub const initNull = if (info.isOptional()) struct { pub fn initNull() Self { - return .{ .unsafe_pointer = null }; + return .{ .#pointer = null }; } }.initNull; @@ -177,7 +178,7 @@ pub fn WithOptions(comptime Pointer: type, comptime options: Options) type { /// `deinit` on `self`. pub const take = if (info.isOptional()) struct { pub fn take(self: *Self) ?SharedNonOptional { - return .{ .unsafe_pointer = self.unsafe_pointer orelse return null }; + return .{ .#pointer = self.#pointer orelse return null }; } }.take; @@ -187,8 +188,9 @@ pub fn WithOptions(comptime Pointer: type, comptime options: Options) type { /// /// This method invalidates `self`. pub const toOptional = if (!info.isOptional()) struct { - pub fn toOptional(self: Self) SharedOptional { - return .{ .unsafe_pointer = self.unsafe_pointer }; + pub fn toOptional(self: *Self) SharedOptional { + defer self.* = undefined; + return .{ .#pointer = self.#pointer }; } }.toOptional; @@ -224,11 +226,11 @@ pub fn WithOptions(comptime Pointer: type, comptime options: Options) type { fn allocImpl(allocator: Allocator, value: Child) !Self { const data = try Data.alloc(allocator, value); - return .{ .unsafe_pointer = &data.value }; + return .{ .#pointer = &data.value }; } fn getData(self: Self) if (info.isOptional()) ?*Data else *Data { - return .fromValuePtr(self.unsafe_pointer); + return .fromValuePtr(self.#pointer); } }; } @@ -240,7 +242,7 @@ fn Weak(comptime Pointer: type, comptime options: Options) type { const Data = FullData(Child, options); bun.assertf( - options.allow_weak and options.deinit and std.meta.hasFn(Child, "deinit"), + options.allow_weak and options.deinit, "options incompatible with shared.Weak", .{}, ); @@ -248,7 +250,7 @@ fn Weak(comptime Pointer: type, comptime options: Options) type { return struct { const Self = @This(); - unsafe_pointer: Pointer, + #pointer: Pointer, const SharedNonOptional = WithOptions(NonOptionalPointer, options); @@ -262,7 +264,7 @@ fn Weak(comptime Pointer: type, comptime options: Options) type { self.getData(); if (!data.tryIncrementStrong()) return null; data.incrementWeak(); - return .{ .unsafe_pointer = &data.value }; + return .{ .#pointer = &data.value }; } /// Clones this weak pointer. @@ -272,11 +274,14 @@ fn Weak(comptime Pointer: type, comptime options: Options) type { else self.getData(); data.incrementWeak(); - return .{ .unsafe_pointer = &data.value }; + return .{ .#pointer = &data.value }; } /// Deinitializes this weak pointer. - pub fn deinit(self: Self) void { + /// + /// This method invalidates `self`. + pub fn deinit(self: *Self) void { + defer self.* = undefined; const data = if (comptime info.isOptional()) self.getData() orelse return else @@ -290,7 +295,7 @@ fn Weak(comptime Pointer: type, comptime options: Options) type { /// It is permitted, but not required, to call `deinit` on the returned value. pub const initNull = if (info.isOptional()) struct { pub fn initNull() Self { - return .{ .unsafe_pointer = null }; + return .{ .#pointer = null }; } }.initNull; @@ -299,7 +304,7 @@ fn Weak(comptime Pointer: type, comptime options: Options) type { /// This method is provided only if `Pointer` is an optional type. pub const isNull = if (options.isOptional()) struct { pub fn isNull(self: Self) bool { - return self.unsafe_pointer == null; + return self.#pointer == null; } }.isNull; @@ -338,12 +343,14 @@ fn Weak(comptime Pointer: type, comptime options: Options) type { } fn getData(self: Self) if (info.isOptional()) ?*Data else *Data { - return .fromValuePtr(self.unsafe_pointer); + return .fromValuePtr(self.#pointer); } }; } fn FullData(comptime Child: type, comptime options: Options) type { + const Allocator = options.Allocator; + return struct { const Self = @This(); @@ -352,7 +359,7 @@ fn FullData(comptime Child: type, comptime options: Options) type { /// Weak count is always >= 1 as long as strong references exist. /// When the last strong pointer is deinitialized, this value is decremented. weak_count: if (options.allow_weak) Count else void = if (options.allow_weak) .init(1), - allocator: if (options.allocator == null) Allocator else void, + allocator: Allocator, thread_lock: if (options.atomic) void else bun.safety.ThreadLock, const Count = if (options.atomic) AtomicCount else NonAtomicCount; @@ -369,9 +376,9 @@ fn FullData(comptime Child: type, comptime options: Options) type { } pub fn alloc(allocator: Allocator, value: Child) !*Self { - return bun.allocators.create(Self, allocator, .{ + return bun.memory.create(Self, bun.allocators.asStd(allocator), .{ .value = value, - .allocator = if (comptime options.allocator == null) allocator, + .allocator = allocator, .thread_lock = if (comptime !options.atomic) .initLocked(), }); } @@ -422,18 +429,13 @@ fn FullData(comptime Child: type, comptime options: Options) type { } fn deinitValue(self: *Self) void { - if (comptime options.deinit and std.meta.hasFn(Child, "deinit")) { - self.value.deinit(); + if (comptime options.deinit) { + bun.memory.deinit(&self.value); } } - fn getAllocator(self: Self) Allocator { - return (comptime options.allocator) orelse self.allocator; - } - fn destroy(self: *Self) void { - self.* = undefined; - bun.allocators.destroy(self.getAllocator(), self); + bun.memory.destroy(bun.allocators.asStd(self.allocator), self); } fn assertThreadSafety(self: Self) void { @@ -465,6 +467,7 @@ const NonAtomicCount = struct { pub fn tryIncrement(self: *Self) bool { if (self.value == 0) return false; self.increment(); + return true; } /// Returns the new number of references. @@ -529,8 +532,8 @@ fn parsePointer(comptime Pointer: type) PointerInfo { const bun = @import("bun"); const std = @import("std"); -const Allocator = std.mem.Allocator; const AtomicOrder = std.builtin.AtomicOrder; +const AllocError = std.mem.Allocator.Error; const meta = @import("./meta.zig"); const PointerInfo = meta.PointerInfo; diff --git a/src/safety.zig b/src/safety.zig index 8e930c1a01..b7691923a6 100644 --- a/src/safety.zig +++ b/src/safety.zig @@ -1,4 +1,4 @@ pub const alloc = @import("./safety/alloc.zig"); -pub const AllocPtr = alloc.AllocPtr; +pub const CheckedAllocator = alloc.CheckedAllocator; pub const CriticalSection = @import("./safety/CriticalSection.zig"); pub const ThreadLock = @import("./safety/ThreadLock.zig"); diff --git a/src/safety/alloc.zig b/src/safety/alloc.zig index 16acc0998c..3c544496d8 100644 --- a/src/safety/alloc.zig +++ b/src/safety/alloc.zig @@ -24,7 +24,7 @@ const arena_vtable = blk: { /// Returns true if `alloc` definitely has a valid `.ptr`. fn hasPtr(alloc: Allocator) bool { return alloc.vtable == arena_vtable or - bun.AllocationScope.downcast(alloc) != null or + bun.allocators.allocation_scope.isInstance(alloc) or bun.MemoryReportingAllocator.isInstance(alloc) or ((comptime bun.Environment.isLinux) and LinuxMemFdAllocator.isInstance(alloc)) or bun.MaxHeapAllocator.isInstance(alloc) or @@ -37,92 +37,114 @@ fn hasPtr(alloc: Allocator) bool { bun.String.isWTFAllocator(alloc); } +/// Returns true if the allocators are definitely different. +fn guaranteedMismatch(alloc1: Allocator, alloc2: Allocator) bool { + if (alloc1.vtable != alloc2.vtable) return true; + const ptr1 = if (hasPtr(alloc1)) alloc1.ptr else return false; + const ptr2 = if (hasPtr(alloc2)) alloc2.ptr else return false; + return ptr1 != ptr2; +} + /// Asserts that two allocators are equal (in `ci_assert` builds). /// /// This function may have false negatives; that is, it may fail to detect that two allocators /// are different. However, in practice, it's a useful safety check. pub fn assertEq(alloc1: Allocator, alloc2: Allocator) void { + assertEqFmt(alloc1, alloc2, "allocators do not match", .{}); +} + +/// Asserts that two allocators are equal, with a formatted message. +pub fn assertEqFmt( + alloc1: Allocator, + alloc2: Allocator, + comptime format: []const u8, + args: anytype, +) void { if (comptime !enabled) return; - bun.assertf( - alloc1.vtable == alloc2.vtable, - "allocators do not match (vtables differ: {*} and {*})", - .{ alloc1.vtable, alloc2.vtable }, - ); - const ptr1 = if (hasPtr(alloc1)) alloc1.ptr else return; - const ptr2 = if (hasPtr(alloc2)) alloc2.ptr else return; - bun.assertf( - ptr1 == ptr2, - "allocators do not match (vtables are both {*} but pointers differ: {*} and {*})", - .{ alloc1.vtable, ptr1, ptr2 }, - ); + blk: { + if (alloc1.vtable != alloc2.vtable) { + bun.Output.err( + "allocator mismatch", + "vtables differ: {*} and {*}", + .{ alloc1.vtable, alloc2.vtable }, + ); + break :blk; + } + const ptr1 = if (hasPtr(alloc1)) alloc1.ptr else return; + const ptr2 = if (hasPtr(alloc2)) alloc2.ptr else return; + if (ptr1 == ptr2) return; + bun.Output.err( + "allocator mismatch", + "vtables are both {*} but pointers differ: {*} and {*}", + .{ alloc1.vtable, ptr1, ptr2 }, + ); + } + bun.assertf(false, format, args); } -fn allocToPtr(alloc: Allocator) *anyopaque { - return if (hasPtr(alloc)) alloc.ptr else @ptrCast(@constCast(alloc.vtable)); -} - -/// Use this in unmanaged containers to ensure multiple allocators aren't being used with the -/// same container. Each method of the container that accepts an allocator parameter should call -/// either `AllocPtr.set` (for non-const methods) or `AllocPtr.assertEq` (for const methods). -/// (Exception: methods like `clone` which explicitly accept any allocator should not call any -/// methods on this type.) -pub const AllocPtr = struct { +/// Use this in unmanaged containers to ensure multiple allocators aren't being used with the same +/// container. Each method of the container that accepts an allocator parameter should call either +/// `CheckedAllocator.set` (for non-const methods) or `CheckedAllocator.assertEq` (for const +/// methods). (Exception: methods like `clone` which explicitly accept any allocator should not call +/// any methods on this type.) +pub const CheckedAllocator = struct { const Self = @This(); - ptr: if (enabled) ?*anyopaque else void = if (enabled) null, - trace: if (traces_enabled) StoredTrace else void = if (traces_enabled) StoredTrace.empty, + #allocator: if (enabled) NullableAllocator else void = if (enabled) .init(null), + #trace: if (traces_enabled) StoredTrace else void = if (traces_enabled) StoredTrace.empty, pub fn init(alloc: Allocator) Self { - var self = Self{}; + var self: Self = .{}; self.set(alloc); return self; } pub fn set(self: *Self, alloc: Allocator) void { if (comptime !enabled) return; - const ptr = allocToPtr(alloc); - if (self.ptr == null) { - self.ptr = ptr; + if (self.#allocator.isNull()) { + self.#allocator = .init(alloc); if (comptime traces_enabled) { - self.trace = StoredTrace.capture(@returnAddress()); + self.#trace = StoredTrace.capture(@returnAddress()); } } else { - self.assertPtrEq(ptr); + self.assertEq(alloc); } } pub fn assertEq(self: Self, alloc: Allocator) void { if (comptime !enabled) return; - self.assertPtrEq(allocToPtr(alloc)); - } + const old_alloc = self.#allocator.get() orelse return; + if (!guaranteedMismatch(old_alloc, alloc)) return; - fn assertPtrEq(self: Self, ptr: *anyopaque) void { - const old_ptr = self.ptr orelse return; - if (old_ptr == ptr) return; + bun.Output.err( + "allocator mismatch", + "cannot use multiple allocators with the same collection", + .{}, + ); if (comptime traces_enabled) { bun.Output.err( "allocator mismatch", "collection first used here, with a different allocator:", .{}, ); - var trace = self.trace; + var trace = self.#trace; bun.crash_handler.dumpStackTrace( trace.trace(), .{ .frame_count = 10, .stop_at_jsc_llint = true }, ); } - std.debug.panic( - "cannot use multiple allocators with the same collection (got {*}, expected {*})", - .{ ptr, old_ptr }, - ); + // Assertion will always fail. We want the error message. + bun.safety.alloc.assertEq(old_alloc, alloc); } }; const bun = @import("bun"); const std = @import("std"); const Allocator = std.mem.Allocator; -const LinuxMemFdAllocator = bun.allocators.LinuxMemFdAllocator; const StoredTrace = bun.crash_handler.StoredTrace; const enabled = bun.Environment.ci_assert; const traces_enabled = bun.Environment.isDebug; + +const LinuxMemFdAllocator = bun.allocators.LinuxMemFdAllocator; +const NullableAllocator = bun.allocators.NullableAllocator; diff --git a/src/shell/AllocScope.zig b/src/shell/AllocScope.zig index e05a6adc1b..4e82e18379 100644 --- a/src/shell/AllocScope.zig +++ b/src/shell/AllocScope.zig @@ -21,7 +21,8 @@ pub fn endScope(this: *AllocScope) void { pub fn leakSlice(this: *AllocScope, memory: anytype) void { if (comptime bun.Environment.enableAllocScopes) { _ = @typeInfo(@TypeOf(memory)).pointer; - bun.assert(!this.__scope.trackExternalFree(memory, null)); + this.__scope.trackExternalFree(memory, null) catch |err| + std.debug.panic("invalid free: {}", .{err}); } } diff --git a/src/shell/states/Base.zig b/src/shell/states/Base.zig index 3add0ec5fc..a72dfebd96 100644 --- a/src/shell/states/Base.zig +++ b/src/shell/states/Base.zig @@ -53,7 +53,8 @@ const AllocScope = union(enum) { pub fn leakSlice(this: *AllocScope, memory: anytype) void { if (comptime bun.Environment.enableAllocScopes) { _ = @typeInfo(@TypeOf(memory)).pointer; - bun.assert(!this.scopedAllocator().trackExternalFree(memory, null)); + this.scopedAllocator().trackExternalFree(memory, null) catch |err| + std.debug.panic("invalid free: {}", .{err}); } } }; diff --git a/src/string/MutableString.zig b/src/string/MutableString.zig index 163808dd91..b4e2da39ea 100644 --- a/src/string/MutableString.zig +++ b/src/string/MutableString.zig @@ -245,13 +245,13 @@ pub fn toOwnedSlice(self: *MutableString) []u8 { } pub fn toDynamicOwned(self: *MutableString) DynamicOwned([]u8) { - return .fromRawOwned(self.toOwnedSlice(), self.allocator); + return .fromRawIn(self.toOwnedSlice(), self.allocator); } /// `self.allocator` must be `bun.default_allocator`. pub fn toDefaultOwned(self: *MutableString) Owned([]u8) { bun.safety.alloc.assertEq(self.allocator, bun.default_allocator); - return .fromRawOwned(self.toOwnedSlice()); + return .fromRaw(self.toOwnedSlice()); } pub fn slice(self: *MutableString) []u8 { diff --git a/src/threading.zig b/src/threading.zig index 504e6af054..90c6579a8f 100644 --- a/src/threading.zig +++ b/src/threading.zig @@ -1,8 +1,10 @@ pub const Mutex = @import("./threading/Mutex.zig"); pub const Futex = @import("./threading/Futex.zig"); pub const Condition = @import("./threading/Condition.zig"); -pub const GuardedValue = @import("./threading/guarded_value.zig").GuardedValue; -pub const DebugGuardedValue = @import("./threading/guarded_value.zig").DebugGuardedValue; +pub const guarded = @import("./threading/guarded.zig"); +pub const Guarded = guarded.Guarded; +pub const GuardedBy = guarded.GuardedBy; +pub const DebugGuarded = guarded.Debug; pub const WaitGroup = @import("./threading/WaitGroup.zig"); pub const ThreadPool = @import("./threading/ThreadPool.zig"); pub const Channel = @import("./threading/channel.zig").Channel; diff --git a/src/threading/guarded.zig b/src/threading/guarded.zig new file mode 100644 index 0000000000..147cc409fa --- /dev/null +++ b/src/threading/guarded.zig @@ -0,0 +1,72 @@ +/// A wrapper around a mutex, and a value protected by the mutex. +/// This type uses `bun.threading.Mutex` internally. +pub fn Guarded(comptime Value: type) type { + return GuardedBy(Value, bun.threading.Mutex); +} + +/// A wrapper around a mutex, and a value protected by the mutex. +/// `Mutex` should have `lock` and `unlock` methods. +pub fn GuardedBy(comptime Value: type, comptime Mutex: type) type { + return struct { + const Self = @This(); + + /// The raw value. Don't use this if there might be concurrent accesses. + unsynchronized_value: Value, + #mutex: Mutex, + + /// Creates a guarded value with a default-initialized mutex. + pub fn init(value: Value) Self { + return .initWithMutex(value, bun.memory.initDefault(Mutex)); + } + + /// Creates a guarded value with the given mutex. + pub fn initWithMutex(value: Value, mutex: Mutex) Self { + return .{ + .unsynchronized_value = value, + .#mutex = mutex, + }; + } + + /// Locks the mutex and returns a pointer to the value. Remember to call `unlock`! + pub fn lock(self: *Self) *Value { + self.#mutex.lock(); + return &self.unsynchronized_value; + } + + /// Unlocks the mutex. Don't use any pointers returned by `lock` after calling this method! + pub fn unlock(self: *Self) void { + self.#mutex.unlock(); + } + + /// Returns the inner unprotected value. + /// + /// You must ensure that no other threads could be concurrently using `self`. This method + /// invalidates `self`, so you must ensure `self` is not used on any thread after calling + /// this method. + pub fn intoUnprotected(self: *Self) Value { + defer self.* = undefined; + bun.memory.deinit(&self.#mutex); + return self.unsynchronized_value; + } + + /// Deinitializes the inner value and mutex. + /// + /// You must ensure that no other threads could be concurrently using `self`. This method + /// invalidates `self`. + /// + /// If neither `Value` nor `Mutex` has a `deinit` method, it is not necessary to call this + /// method. + pub fn deinit(self: *Self) void { + bun.memory.deinit(&self.unsynchronized_value); + bun.memory.deinit(&self.#mutex); + self.* = undefined; + } + }; +} + +/// Uses `bun.safety.ThreadLock`. +pub fn Debug(comptime Value: type) type { + return GuardedBy(Value, bun.safety.ThreadLock); +} + +const bun = @import("bun"); diff --git a/src/threading/guarded_value.zig b/src/threading/guarded_value.zig deleted file mode 100644 index 832e4c155a..0000000000 --- a/src/threading/guarded_value.zig +++ /dev/null @@ -1,32 +0,0 @@ -/// A wrapper around a mutex, and a value protected by the mutex. -/// `Mutex` should have `lock` and `unlock` methods and should be initializable with `.{}`. -pub fn GuardedValue(comptime Value: type, comptime Mutex: type) type { - return struct { - const Self = @This(); - - /// The raw value. Don't use this if there might be concurrent accesses. - unsynchronized_value: Value, - mutex: Mutex, - - pub fn init(value: Value, mutex: Mutex) Self { - return .{ .unsynchronized_value = value, .mutex = mutex }; - } - - /// Lock the mutex and return a pointer to the value. Remember to call `unlock`! - pub fn lock(self: *Self) *Value { - self.mutex.lock(); - return &self.unsynchronized_value; - } - - /// Unlock the mutex. Don't use any pointers returned by `lock` after calling this method! - pub fn unlock(self: *Self) void { - self.mutex.unlock(); - } - }; -} - -pub fn DebugGuardedValue(comptime Value: type) type { - return GuardedValue(Value, bun.safety.ThreadLock); -} - -const bun = @import("bun");