Use low alignment bits instead of truncating high pointer bits

Changed PackedNext implementation to preserve ARM TBI/PAC/MTE pointer metadata:
- Store full usize pointer with auto_delete flag in LOW alignment bit (not high bit)
- ConcurrentTask is 8-byte aligned (contains u64 Task), so low bit is guaranteed zero
- Use enum(usize) { zero = 0, _ } for type safety and clean initialization syntax
- All atomic operations use usize (instead of u64) with @intFromEnum/@enumFromInt
- Masks: AUTO_DELETE_MASK=0x1, POINTER_MASK=~0x1

Benefits:
- Preserves all high pointer metadata (ARM pointer authentication, tagging, etc.)
- Uses guaranteed-zero low alignment bits instead of truncating address space
- Type-safe enum wrapper prevents accidental misuse
- Clean .zero initialization syntax
This commit is contained in:
Claude Bot
2025-11-03 03:18:20 +00:00
parent 063a5a9775
commit bd90052694
5 changed files with 54 additions and 40 deletions

View File

@@ -132,7 +132,7 @@ pub fn watcherReleaseAndSubmitEvent(self: *Self, ev: *HotReloadEvent) void {
self.dbg_server_event = ev;
}
ev.concurrent_task = .{
.next = .{},
.next = .zero,
.task = jsc.Task.init(ev),
};
ev.concurrent_task.next.setAutoDelete(false);

View File

@@ -11,57 +11,71 @@
const ConcurrentTask = @This();
task: Task = undefined,
next: PackedNext = .{},
next: PackedNext = .zero,
pub const PackedNext = packed struct(u64) {
ptr_bits: u63 = 0,
auto_delete: bool = false,
pub const PackedNext = enum(usize) {
// Store the full usize pointer with auto_delete flag in the low alignment bit
// ConcurrentTask contains a u64 Task field, so it's at least 8-byte aligned
// This preserves all pointer metadata (ARM TBI/PAC/MTE tags, etc.)
zero = 0,
_,
const AUTO_DELETE_MASK: usize = 0x1;
const POINTER_MASK: usize = ~AUTO_DELETE_MASK;
pub fn init(ptr: ?*ConcurrentTask) PackedNext {
if (ptr) |p| {
const addr = @intFromPtr(p);
return .{
.ptr_bits = @as(u63, @truncate(addr)),
.auto_delete = false,
};
// Pointer should be aligned, verify low bit is zero
if (bun.Environment.allow_assert) {
bun.assertf((addr & AUTO_DELETE_MASK) == 0, "ConcurrentTask pointer must be aligned", .{});
}
// Store pointer with auto_delete = false (low bit = 0)
return @enumFromInt(addr);
}
return .{};
return @enumFromInt(0);
}
pub fn initPreserveAutoDelete(self: PackedNext, ptr: ?*ConcurrentTask) PackedNext {
const self_val = @intFromEnum(self);
if (ptr) |p| {
const addr = @intFromPtr(p);
return .{
.ptr_bits = @as(u63, @truncate(addr)),
.auto_delete = self.auto_delete,
};
// Pointer should be aligned, verify low bit is zero
if (bun.Environment.allow_assert) {
bun.assertf((addr & AUTO_DELETE_MASK) == 0, "ConcurrentTask pointer must be aligned", .{});
}
// Combine new pointer with existing auto_delete flag
return @enumFromInt(addr | (self_val & AUTO_DELETE_MASK));
}
return .{
.ptr_bits = 0,
.auto_delete = self.auto_delete,
};
// Null pointer but preserve auto_delete flag
return @enumFromInt(self_val & AUTO_DELETE_MASK);
}
pub fn get(self: PackedNext) ?*ConcurrentTask {
if (self.ptr_bits == 0) return null;
// Explicitly zero out bit 63 to avoid any UB
const ptr: u64 = @as(u64, self.ptr_bits) & 0x7FFFFFFFFFFFFFFF;
return @as(?*ConcurrentTask, @ptrFromInt(ptr));
// Mask out the auto_delete bit to get the original pointer
const addr = @intFromEnum(self) & POINTER_MASK;
if (addr == 0) return null;
return @ptrFromInt(addr);
}
pub fn autoDelete(self: PackedNext) bool {
return self.auto_delete;
return (@intFromEnum(self) & AUTO_DELETE_MASK) != 0;
}
pub fn setAutoDelete(self: *PackedNext, value: bool) void {
// Non-atomic write is safe because this is only called during initialization
// before the task is shared with other threads
self.auto_delete = value;
const self_val = @intFromEnum(self.*);
if (value) {
self.* = @enumFromInt(self_val | AUTO_DELETE_MASK);
} else {
self.* = @enumFromInt(self_val & POINTER_MASK);
}
}
comptime {
if (@sizeOf(PackedNext) != @sizeOf(u64)) {
@compileError("PackedNext must be the same size as a u64");
if (@sizeOf(PackedNext) != @sizeOf(usize)) {
@compileError("PackedNext must be the same size as a usize");
}
}
};
@@ -83,7 +97,7 @@ pub const AutoDeinit = enum {
pub fn create(task: Task) *ConcurrentTask {
var concurrent_task = ConcurrentTask.new(.{
.task = task,
.next = .{},
.next = .zero,
});
concurrent_task.next.setAutoDelete(true);
return concurrent_task;
@@ -105,7 +119,7 @@ pub fn from(this: *ConcurrentTask, of: anytype, auto_deinit: AutoDeinit) *Concur
this.* = .{
.task = Task.init(of),
.next = .{},
.next = .zero,
};
this.next.setAutoDelete(auto_deinit == .auto_deinit);
return this;

View File

@@ -27,7 +27,7 @@ export fn Bun__queueJSCDeferredWorkTaskConcurrently(jsc_vm: *VirtualMachine, tas
var loop = jsc_vm.eventLoop();
const concurrent_task = ConcurrentTask.new(.{
.task = Task.init(task),
.next = .{},
.next = .zero,
});
concurrent_task.next.setAutoDelete(true);
loop.enqueueTaskConcurrent(concurrent_task);

View File

@@ -225,7 +225,7 @@ pub fn NewHotReloader(comptime Ctx: type, comptime EventLoopType: type, comptime
.hashes = this.hashes,
.concurrent_task = undefined,
});
that.concurrent_task = .{ .task = jsc.Task.init(that), .next = .{} };
that.concurrent_task = .{ .task = jsc.Task.init(that), .next = .zero };
that.concurrent_task.next.setAutoDelete(false);
that.reloader.enqueueTaskConcurrent(&that.concurrent_task);
this.count = 0;

View File

@@ -76,11 +76,11 @@ pub fn UnboundedQueuePacked(comptime T: type, comptime next_field: meta.FieldEnu
// There was a previous back item - set its `next` field to point to `first`
if (packed_or_unpacked == .@"packed") {
// We need to preserve old_back's auto_delete flag while updating the pointer
const prev_next_ptr = @as(*u64, @ptrCast(&@field(old_back, next)));
const current_packed = @atomicLoad(u64, prev_next_ptr, .monotonic);
const current_typed = @as(@TypeOf(@field(first, next_name)), @bitCast(current_packed));
const prev_next_ptr = @as(*usize, @ptrCast(&@field(old_back, next)));
const current_packed = @atomicLoad(usize, prev_next_ptr, .monotonic);
const current_typed: @TypeOf(@field(first, next_name)) = @enumFromInt(current_packed);
const new_packed = current_typed.initPreserveAutoDelete(first);
@atomicStore(u64, prev_next_ptr, @bitCast(new_packed), .release);
@atomicStore(usize, prev_next_ptr, @intFromEnum(new_packed), .release);
} else {
@atomicStore(?*T, &@field(old_back, next), first, .release);
}
@@ -95,8 +95,8 @@ pub fn UnboundedQueuePacked(comptime T: type, comptime next_field: meta.FieldEnu
var first = self.front.load(.acquire) orelse return null;
const next_item = while (true) {
const next_item = if (packed_or_unpacked == .@"packed") blk: {
const packed_val = @atomicLoad(u64, @as(*u64, @ptrCast(&@field(first, next))), .acquire);
const packed_typed = @as(@TypeOf(@field(first, next_name)), @bitCast(packed_val));
const packed_val = @atomicLoad(usize, @as(*usize, @ptrCast(&@field(first, next))), .acquire);
const packed_typed: @TypeOf(@field(first, next_name)) = @enumFromInt(packed_val);
break :blk packed_typed.get();
} else @atomicLoad(?*T, &@field(first, next), .acquire);
@@ -124,8 +124,8 @@ pub fn UnboundedQueuePacked(comptime T: type, comptime next_field: meta.FieldEnu
const new_first = while (true) : (atomic.spinLoopHint()) {
// Wait for push/pushBatch to set `next`.
if (packed_or_unpacked == .@"packed") {
const packed_val = @atomicLoad(u64, @as(*u64, @ptrCast(&@field(first, next))), .acquire);
const packed_typed = @as(@TypeOf(@field(first, next_name)), @bitCast(packed_val));
const packed_val = @atomicLoad(usize, @as(*usize, @ptrCast(&@field(first, next))), .acquire);
const packed_typed: @TypeOf(@field(first, next_name)) = @enumFromInt(packed_val);
break packed_typed.get() orelse continue;
} else {
break @atomicLoad(?*T, &@field(first, next), .acquire) orelse continue;
@@ -153,8 +153,8 @@ pub fn UnboundedQueuePacked(comptime T: type, comptime next_field: meta.FieldEnu
next_item = while (true) : (atomic.spinLoopHint()) {
// Wait for push/pushBatch to set `next`.
if (packed_or_unpacked == .@"packed") {
const packed_val = @atomicLoad(u64, @as(*u64, @ptrCast(&@field(next_item, next))), .acquire);
const packed_typed = @as(@TypeOf(@field(next_item, next_name)), @bitCast(packed_val));
const packed_val = @atomicLoad(usize, @as(*usize, @ptrCast(&@field(next_item, next))), .acquire);
const packed_typed: @TypeOf(@field(next_item, next_name)) = @enumFromInt(packed_val);
break packed_typed.get() orelse continue;
} else {
break @atomicLoad(?*T, &@field(next_item, next), .acquire) orelse continue;