diff --git a/src/bun.js/event_loop/ConcurrentTask.zig b/src/bun.js/event_loop/ConcurrentTask.zig index bbb872c51a..bdc56a692c 100644 --- a/src/bun.js/event_loop/ConcurrentTask.zig +++ b/src/bun.js/event_loop/ConcurrentTask.zig @@ -28,6 +28,20 @@ pub const PackedNext = packed struct(u64) { return .{}; } + pub fn initPreserveAutoDelete(self: PackedNext, ptr: ?*ConcurrentTask) PackedNext { + if (ptr) |p| { + const addr = @intFromPtr(p); + return .{ + .ptr_bits = @as(u63, @truncate(addr)), + .auto_delete = self.auto_delete, + }; + } + return .{ + .ptr_bits = 0, + .auto_delete = self.auto_delete, + }; + } + pub fn get(self: PackedNext) ?*ConcurrentTask { if (self.ptr_bits == 0) return null; const ptr: u64 = @as(u64, self.ptr_bits); @@ -39,6 +53,8 @@ pub const PackedNext = packed struct(u64) { } pub fn setAutoDelete(self: *PackedNext, value: bool) void { + // Non-atomic write is safe because this is only called during initialization + // before the task is shared with other threads self.auto_delete = value; } diff --git a/src/threading/unbounded_queue.zig b/src/threading/unbounded_queue.zig index 5371a8259e..0c83fb51ab 100644 --- a/src/threading/unbounded_queue.zig +++ b/src/threading/unbounded_queue.zig @@ -24,8 +24,7 @@ pub fn UnboundedQueuePacked(comptime T: type, comptime next_field: meta.FieldEnu fn setter(instance: *T, value: ?*T) void { if (packed_or_unpacked == .@"packed") { - const FieldType = @TypeOf(@field(instance, next_name)); - @field(instance, next_name) = FieldType.init(value); + @field(instance, next_name) = @field(instance, next_name).initPreserveAutoDelete(value); } else { @field(instance, next_name) = value; } @@ -72,26 +71,23 @@ pub fn UnboundedQueuePacked(comptime T: type, comptime next_field: meta.FieldEnu } assertf(item == last, "`last` should be reachable from `first`", .{}); } - const prev_next_ptr = if (self.back.swap(last, .acq_rel)) |old_back| blk: { - if (packed_or_unpacked == .@"packed") { - break :blk @as(*u64, @ptrCast(&@field(old_back, next))); - } else { - break :blk &@field(old_back, next); - } - } else blk: { - if (packed_or_unpacked == .@"packed") { - break :blk @as(*u64, @ptrCast(&self.front.raw)); - } else { - break :blk &self.front.raw; - } - }; - if (packed_or_unpacked == .@"packed") { - const FieldType = @TypeOf(@field(first, next_name)); - const packed_val = FieldType.init(first); - @atomicStore(u64, @as(*u64, @ptrCast(prev_next_ptr)), @bitCast(packed_val), .release); + if (self.back.swap(last, .acq_rel)) |old_back| { + // There was a previous back item - set its `next` field to point to `first` + if (packed_or_unpacked == .@"packed") { + // We need to preserve old_back's auto_delete flag while updating the pointer + const prev_next_ptr = @as(*u64, @ptrCast(&@field(old_back, next))); + const current_packed = @atomicLoad(u64, prev_next_ptr, .monotonic); + const current_typed = @as(@TypeOf(@field(first, next_name)), @bitCast(current_packed)); + const new_packed = current_typed.initPreserveAutoDelete(first); + @atomicStore(u64, prev_next_ptr, @bitCast(new_packed), .release); + } else { + @atomicStore(?*T, &@field(old_back, next), first, .release); + } } else { - @atomicStore(?*T, prev_next_ptr, first, .release); + // Queue was empty - set front to point to `first` + // front/back atomics hold plain pointers, not PackedNext + @atomicStore(?*T, &self.front.raw, first, .release); } }