diff --git a/build.zig b/build.zig index fb90c49ac8..5bb2804182 100644 --- a/build.zig +++ b/build.zig @@ -19,17 +19,17 @@ const OperatingSystem = @import("src/env.zig").OperatingSystem; const pathRel = fs.path.relative; /// Do not rename this constant. It is scanned by some scripts to determine which zig version to install. -const recommended_zig_version = "0.14.0-dev.2987+183bb8b08"; +const recommended_zig_version = "0.14.0"; comptime { if (!std.mem.eql(u8, builtin.zig_version_string, recommended_zig_version)) { @compileError( "" ++ - "Bun requires Zig version " ++ recommended_zig_version ++ " (found " ++ - builtin.zig_version_string ++ "). This is " ++ - "automatically configured via Bun's CMake setup. You likely meant to run " ++ - "`bun setup`. If you are trying to upgrade the Zig compiler, " ++ - "run `./scripts/download-zig.sh master` or comment this message out.", + "Bun requires Zig version " ++ recommended_zig_version ++ ", but you have " ++ + builtin.zig_version_string ++ ". This is automatically configured via Bun's " ++ + "CMake setup. You likely meant to run `bun run build`. If you are trying to " ++ + "upgrade the Zig compiler, edit ZIG_COMMIT in cmake/tools/SetupZig.cmake or " ++ + "comment this error out.", ); } } diff --git a/cmake/tools/SetupZig.cmake b/cmake/tools/SetupZig.cmake index c64acc3425..048aa40b3f 100644 --- a/cmake/tools/SetupZig.cmake +++ b/cmake/tools/SetupZig.cmake @@ -20,7 +20,7 @@ else() unsupported(CMAKE_SYSTEM_NAME) endif() -set(ZIG_COMMIT "bb9d6ab2c0bbbf20cc24dad03e88f3b3ffdb7de7") +set(ZIG_COMMIT "cd1995944508e4c946deb75bd70947d302e0db37") optionx(ZIG_TARGET STRING "The zig target to use" DEFAULT ${DEFAULT_ZIG_TARGET}) if(CMAKE_BUILD_TYPE STREQUAL "Release") diff --git a/src/StandaloneModuleGraph.zig b/src/StandaloneModuleGraph.zig index 5dc0804745..64ca13d111 100644 --- a/src/StandaloneModuleGraph.zig +++ b/src/StandaloneModuleGraph.zig @@ -466,11 +466,7 @@ pub const StandaloneModuleGraph = struct { return output_bytes; } - const page_size = if (Environment.isLinux and Environment.isAarch64) - // some linux distros do 64 KB pages on aarch64 - 64 * 1024 - else - std.mem.page_size; + const page_size = std.heap.page_size_max; pub const InjectOptions = struct { windows_hide_console: bool = false, diff --git a/src/allocators/AllocationScope.zig b/src/allocators/AllocationScope.zig index 5c2142f71e..5e2f7e3c8b 100644 --- a/src/allocators/AllocationScope.zig +++ b/src/allocators/AllocationScope.zig @@ -74,16 +74,17 @@ pub fn allocator(scope: *AllocationScope) Allocator { const vtable: Allocator.VTable = .{ .alloc = alloc, .resize = resize, + .remap = remap, .free = free, }; -fn alloc(ctx: *anyopaque, len: usize, ptr_align: u8, ret_addr: usize) ?[*]u8 { +fn alloc(ctx: *anyopaque, len: usize, alignment: std.mem.Alignment, ret_addr: usize) ?[*]u8 { const scope: *AllocationScope = @ptrCast(@alignCast(ctx)); scope.state.mutex.lock(); defer scope.state.mutex.unlock(); scope.state.allocations.ensureUnusedCapacity(scope.parent, 1) catch return null; - const result = scope.parent.vtable.alloc(scope.parent.ptr, len, ptr_align, ret_addr) orelse + const result = scope.parent.vtable.alloc(scope.parent.ptr, len, alignment, ret_addr) orelse return null; const trace = StoredTrace.capture(ret_addr); scope.state.allocations.putAssumeCapacityNoClobber(result, .{ @@ -94,12 +95,17 @@ fn alloc(ctx: *anyopaque, len: usize, ptr_align: u8, ret_addr: usize) ?[*]u8 { return result; } -fn resize(ctx: *anyopaque, buf: []u8, buf_align: u8, new_len: usize, ret_addr: usize) bool { +fn resize(ctx: *anyopaque, buf: []u8, alignment: std.mem.Alignment, new_len: usize, ret_addr: usize) bool { const scope: *AllocationScope = @ptrCast(@alignCast(ctx)); - return scope.parent.vtable.resize(scope.parent.ptr, buf, buf_align, new_len, ret_addr); + return scope.parent.vtable.resize(scope.parent.ptr, buf, alignment, new_len, ret_addr); } -fn free(ctx: *anyopaque, buf: []u8, buf_align: u8, ret_addr: usize) void { +fn remap(ctx: *anyopaque, buf: []u8, alignment: std.mem.Alignment, new_len: usize, ret_addr: usize) ?[*]u8 { + const scope: *AllocationScope = @ptrCast(@alignCast(ctx)); + return scope.parent.vtable.remap(scope.parent.ptr, buf, alignment, new_len, ret_addr); +} + +fn free(ctx: *anyopaque, buf: []u8, alignment: std.mem.Alignment, ret_addr: usize) void { const scope: *AllocationScope = @ptrCast(@alignCast(ctx)); scope.state.mutex.lock(); defer scope.state.mutex.unlock(); @@ -137,7 +143,7 @@ fn free(ctx: *anyopaque, buf: []u8, buf_align: u8, ret_addr: usize) void { // sanitizer does not catch the invalid free. } - scope.parent.vtable.free(scope.parent.ptr, buf, buf_align, ret_addr); + scope.parent.vtable.free(scope.parent.ptr, buf, alignment, ret_addr); // If asan did not catch the free, panic now. if (invalid) @panic("Invalid free"); diff --git a/src/allocators/NullableAllocator.zig b/src/allocators/NullableAllocator.zig index ccbf972579..bb78e45e9d 100644 --- a/src/allocators/NullableAllocator.zig +++ b/src/allocators/NullableAllocator.zig @@ -33,7 +33,7 @@ pub fn free(this: *const NullableAllocator, bytes: []const u8) void { if (this.get()) |allocator| { if (bun.String.isWTFAllocator(allocator)) { // workaround for https://github.com/ziglang/zig/issues/4298 - bun.String.StringImplAllocator.free(allocator.ptr, @constCast(bytes), 0, 0); + bun.String.StringImplAllocator.free(allocator.ptr, @constCast(bytes), .fromByteUnits(1), 0); return; } diff --git a/src/allocators/linux_memfd_allocator.zig b/src/allocators/linux_memfd_allocator.zig index 0206fd9398..25ae9ed14f 100644 --- a/src/allocators/linux_memfd_allocator.zig +++ b/src/allocators/linux_memfd_allocator.zig @@ -62,19 +62,15 @@ pub const LinuxMemFdAllocator = struct { } const AllocatorInterface = struct { - fn alloc(_: *anyopaque, _: usize, _: u8, _: usize) ?[*]u8 { + fn alloc(_: *anyopaque, _: usize, _: std.mem.Alignment, _: usize) ?[*]u8 { // it should perform no allocations or resizes return null; } - fn resize(_: *anyopaque, _: []u8, _: u8, _: usize, _: usize) bool { - return false; - } - fn free( ptr: *anyopaque, buf: []u8, - _: u8, + _: std.mem.Alignment, _: usize, ) void { var this: *LinuxMemFdAllocator = @alignCast(@ptrCast(ptr)); @@ -86,7 +82,8 @@ pub const LinuxMemFdAllocator = struct { pub const VTable = &std.mem.Allocator.VTable{ .alloc = &AllocatorInterface.alloc, - .resize = &resize, + .resize = &std.mem.Allocator.noResize, + .remap = &std.mem.Allocator.noRemap, .free = &free, }; }; @@ -95,7 +92,7 @@ pub const LinuxMemFdAllocator = struct { var size = len; // size rounded up to nearest page - size += (size + std.mem.page_size - 1) & std.mem.page_size; + size = std.mem.alignForward(usize, size, std.heap.pageSize()); var flags_mut = flags; flags_mut.TYPE = .SHARED; diff --git a/src/allocators/max_heap_allocator.zig b/src/allocators/max_heap_allocator.zig index ea616c9ca8..124a1acc55 100644 --- a/src/allocators/max_heap_allocator.zig +++ b/src/allocators/max_heap_allocator.zig @@ -4,9 +4,10 @@ const std = @import("std"); /// Single allocation only. /// pub const MaxHeapAllocator = struct { - array_list: std.ArrayList(u8), + array_list: std.ArrayListAligned(u8, @alignOf(std.c.max_align_t)), - fn alloc(ptr: *anyopaque, len: usize, _: u8, _: usize) ?[*]u8 { + fn alloc(ptr: *anyopaque, len: usize, alignment: std.mem.Alignment, _: usize) ?[*]u8 { + bun.assert(alignment.toByteUnits() <= @alignOf(std.c.max_align_t)); var this = bun.cast(*MaxHeapAllocator, ptr); this.array_list.items.len = 0; this.array_list.ensureTotalCapacity(len) catch return null; @@ -14,7 +15,7 @@ pub const MaxHeapAllocator = struct { return this.array_list.items.ptr; } - fn resize(_: *anyopaque, buf: []u8, _: u8, new_len: usize, _: usize) bool { + fn resize(_: *anyopaque, buf: []u8, _: std.mem.Alignment, new_len: usize, _: usize) bool { _ = new_len; _ = buf; @panic("not implemented"); @@ -23,7 +24,7 @@ pub const MaxHeapAllocator = struct { fn free( _: *anyopaque, _: []u8, - _: u8, + _: std.mem.Alignment, _: usize, ) void {} @@ -39,9 +40,10 @@ pub const MaxHeapAllocator = struct { .alloc = &alloc, .free = &free, .resize = &resize, + .remap = &std.mem.Allocator.noRemap, }; pub fn init(this: *MaxHeapAllocator, allocator: std.mem.Allocator) std.mem.Allocator { - this.array_list = std.ArrayList(u8).init(allocator); + this.array_list = .init(allocator); return std.mem.Allocator{ .ptr = this, diff --git a/src/allocators/memory_allocator.zig b/src/allocators/memory_allocator.zig index 244f15544b..f3222f0447 100644 --- a/src/allocators/memory_allocator.zig +++ b/src/allocators/memory_allocator.zig @@ -12,7 +12,7 @@ const Environment = @import("../env.zig"); fn mimalloc_free( _: *anyopaque, buf: []u8, - buf_align: u8, + alignment: mem.Alignment, _: usize, ) void { if (comptime Environment.enable_logs) @@ -23,8 +23,8 @@ fn mimalloc_free( // let's only enable it in debug mode if (comptime Environment.isDebug) { assert(mimalloc.mi_is_in_heap_region(buf.ptr)); - if (mimalloc.canUseAlignedAlloc(buf.len, buf_align)) - mimalloc.mi_free_size_aligned(buf.ptr, buf.len, buf_align) + if (mimalloc.canUseAlignedAlloc(buf.len, alignment.toByteUnits())) + mimalloc.mi_free_size_aligned(buf.ptr, buf.len, alignment.toByteUnits()) else mimalloc.mi_free_size(buf.ptr, buf.len); } else { @@ -35,12 +35,12 @@ fn mimalloc_free( const CAllocator = struct { pub const supports_posix_memalign = true; - fn alignedAlloc(len: usize, alignment: usize) ?[*]u8 { + fn alignedAlloc(len: usize, alignment: mem.Alignment) ?[*]u8 { if (comptime Environment.enable_logs) - log("mi_alloc({d}, {d})", .{ len, alignment }); + log("mi_alloc({d}, {d})", .{ len, alignment.toByteUnits() }); - const ptr: ?*anyopaque = if (mimalloc.canUseAlignedAlloc(len, alignment)) - mimalloc.mi_malloc_aligned(len, alignment) + const ptr: ?*anyopaque = if (mimalloc.canUseAlignedAlloc(len, alignment.toByteUnits())) + mimalloc.mi_malloc_aligned(len, alignment.toByteUnits()) else mimalloc.mi_malloc(len); @@ -60,16 +60,11 @@ const CAllocator = struct { return mimalloc.mi_malloc_size(ptr); } - fn alloc(_: *anyopaque, len: usize, log2_align: u8, _: usize) ?[*]u8 { - if (comptime FeatureFlags.alignment_tweak) { - return alignedAlloc(len, log2_align); - } - - const alignment = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_align)); + fn alloc(_: *anyopaque, len: usize, alignment: mem.Alignment, _: usize) ?[*]u8 { return alignedAlloc(len, alignment); } - fn resize(_: *anyopaque, buf: []u8, _: u8, new_len: usize, _: usize) bool { + fn resize(_: *anyopaque, buf: []u8, _: mem.Alignment, new_len: usize, _: usize) bool { if (new_len <= buf.len) { return true; } @@ -93,17 +88,18 @@ pub const c_allocator = Allocator{ const c_allocator_vtable = &Allocator.VTable{ .alloc = &CAllocator.alloc, .resize = &CAllocator.resize, + .remap = &std.mem.Allocator.noRemap, .free = &CAllocator.free, }; const ZAllocator = struct { pub const supports_posix_memalign = true; - fn alignedAlloc(len: usize, alignment: usize) ?[*]u8 { + fn alignedAlloc(len: usize, alignment: mem.Alignment) ?[*]u8 { log("ZAllocator.alignedAlloc: {d}\n", .{len}); - const ptr = if (mimalloc.canUseAlignedAlloc(len, alignment)) - mimalloc.mi_zalloc_aligned(len, alignment) + const ptr = if (mimalloc.canUseAlignedAlloc(len, alignment.toByteUnits())) + mimalloc.mi_zalloc_aligned(len, alignment.toByteUnits()) else mimalloc.mi_zalloc(len); @@ -123,11 +119,11 @@ const ZAllocator = struct { return mimalloc.mi_malloc_size(ptr); } - fn alloc(_: *anyopaque, len: usize, ptr_align: u8, _: usize) ?[*]u8 { - return alignedAlloc(len, ptr_align); + fn alloc(_: *anyopaque, len: usize, alignment: mem.Alignment, _: usize) ?[*]u8 { + return alignedAlloc(len, alignment); } - fn resize(_: *anyopaque, buf: []u8, _: u8, new_len: usize, _: usize) bool { + fn resize(_: *anyopaque, buf: []u8, _: mem.Alignment, new_len: usize, _: usize) bool { if (new_len <= buf.len) { return true; } @@ -150,6 +146,7 @@ pub const z_allocator = Allocator{ const z_allocator_vtable = Allocator.VTable{ .alloc = &ZAllocator.alloc, .resize = &ZAllocator.resize, + .remap = &std.mem.Allocator.noRemap, .free = &ZAllocator.free, }; const HugeAllocator = struct { diff --git a/src/allocators/mimalloc_arena.zig b/src/allocators/mimalloc_arena.zig index d249ee6f6e..52277695fd 100644 --- a/src/allocators/mimalloc_arena.zig +++ b/src/allocators/mimalloc_arena.zig @@ -209,11 +209,11 @@ pub const Arena = struct { } pub const supports_posix_memalign = true; - fn alignedAlloc(heap: *mimalloc.Heap, len: usize, alignment: usize) ?[*]u8 { + fn alignedAlloc(heap: *mimalloc.Heap, len: usize, alignment: mem.Alignment) ?[*]u8 { log("Malloc: {d}\n", .{len}); - const ptr: ?*anyopaque = if (mimalloc.canUseAlignedAlloc(len, alignment)) - mimalloc.mi_heap_malloc_aligned(heap, len, alignment) + const ptr: ?*anyopaque = if (mimalloc.canUseAlignedAlloc(len, alignment.toByteUnits())) + mimalloc.mi_heap_malloc_aligned(heap, len, alignment.toByteUnits()) else mimalloc.mi_heap_malloc(heap, len); @@ -234,15 +234,10 @@ pub const Arena = struct { return mimalloc.mi_malloc_usable_size(ptr); } - fn alloc(arena: *anyopaque, len: usize, log2_align: u8, _: usize) ?[*]u8 { + fn alloc(arena: *anyopaque, len: usize, alignment: mem.Alignment, _: usize) ?[*]u8 { const this = bun.cast(*mimalloc.Heap, arena); // if (comptime Environment.isDebug) // ArenaRegistry.assert(.{ .heap = this }); - if (comptime FeatureFlags.alignment_tweak) { - return alignedAlloc(this, len, log2_align); - } - - const alignment = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_align)); return alignedAlloc( this, @@ -251,7 +246,7 @@ pub const Arena = struct { ); } - fn resize(_: *anyopaque, buf: []u8, _: u8, new_len: usize, _: usize) bool { + fn resize(_: *anyopaque, buf: []u8, _: mem.Alignment, new_len: usize, _: usize) bool { if (new_len <= buf.len) { return true; } @@ -267,7 +262,7 @@ pub const Arena = struct { fn free( _: *anyopaque, buf: []u8, - buf_align: u8, + alignment: mem.Alignment, _: usize, ) void { // mi_free_size internally just asserts the size @@ -275,8 +270,8 @@ pub const Arena = struct { // but its good to have that assertion if (comptime Environment.isDebug) { assert(mimalloc.mi_is_in_heap_region(buf.ptr)); - if (mimalloc.canUseAlignedAlloc(buf.len, buf_align)) - mimalloc.mi_free_size_aligned(buf.ptr, buf.len, buf_align) + if (mimalloc.canUseAlignedAlloc(buf.len, alignment.toByteUnits())) + mimalloc.mi_free_size_aligned(buf.ptr, buf.len, alignment.toByteUnits()) else mimalloc.mi_free_size(buf.ptr, buf.len); } else { @@ -288,5 +283,6 @@ pub const Arena = struct { const c_allocator_vtable = Allocator.VTable{ .alloc = &Arena.alloc, .resize = &Arena.resize, + .remap = &std.mem.Allocator.noRemap, .free = &Arena.free, }; diff --git a/src/baby_list.zig b/src/baby_list.zig index 60fb5dcf87..1426aa956e 100644 --- a/src/baby_list.zig +++ b/src/baby_list.zig @@ -94,7 +94,7 @@ pub fn BabyList(comptime Type: type) type { this.update(list_); } - pub fn popOrNull(this: *@This()) ?Type { + pub fn pop(this: *@This()) ?Type { if (this.len == 0) return null; this.len -= 1; return this.ptr[this.len]; diff --git a/src/bake/DevServer.zig b/src/bake/DevServer.zig index 392f2b760b..b65c72a9ce 100644 --- a/src/bake/DevServer.zig +++ b/src/bake/DevServer.zig @@ -388,7 +388,7 @@ pub fn init(options: Options) bun.JSOOM!*DevServer { .emit_visualizer_events = 0, .has_pre_crash_handler = bun.FeatureFlags.bake_debugging_features and options.dump_state_on_crash orelse - bun.getRuntimeFeatureFlag("BUN_DUMP_STATE_ON_CRASH"), + bun.getRuntimeFeatureFlag("BUN_DUMP_STATE_ON_CRASH"), .frontend_only = options.framework.file_system_router_types.len == 0, .client_graph = .empty, .server_graph = .empty, @@ -2383,7 +2383,7 @@ pub fn finalizeBundle( if (will_hear_hot_update and current_bundle.had_reload_event and (dev.incremental_result.framework_routes_affected.items.len + - dev.incremental_result.html_routes_hard_affected.items.len) > 0 and + dev.incremental_result.html_routes_hard_affected.items.len) > 0 and dev.bundling_failures.count() == 0) { has_route_bits_set = true; @@ -3756,7 +3756,7 @@ pub fn IncrementalGraph(side: bake.Side) type { } } - while (queue.popOrNull()) |index| { + while (queue.pop()) |index| { for (ctx.import_records[index.get()].slice()) |import_record| { const result = try processEdgeAttachment(g, ctx, temp_alloc, quick_lookup, new_imports, file_index, import_record, .css); if (result == .@"continue" and import_record.source_index.isValid()) { @@ -4986,7 +4986,7 @@ pub fn IncrementalGraph(side: bake.Side) type { } fn newEdge(g: *@This(), edge: Edge) !EdgeIndex { - if (g.edges_free_list.popOrNull()) |index| { + if (g.edges_free_list.pop()) |index| { g.edges.items[index.get()] = edge; return index; } @@ -5363,7 +5363,7 @@ const DirectoryWatchStore = struct { } fn appendDepAssumeCapacity(store: *DirectoryWatchStore, dep: Dep) Dep.Index { - if (store.dependencies_free_list.popOrNull()) |index| { + if (store.dependencies_free_list.pop()) |index| { store.dependencies.items[index.get()] = dep; return index; } diff --git a/src/bake/FrameworkRouter.zig b/src/bake/FrameworkRouter.zig index 582a20c1e0..9c5954eafb 100644 --- a/src/bake/FrameworkRouter.zig +++ b/src/bake/FrameworkRouter.zig @@ -810,7 +810,7 @@ fn newRoute(fr: *FrameworkRouter, alloc: Allocator, route_data: Route) !Route.In } fn newEdge(fr: *FrameworkRouter, alloc: Allocator, edge_data: Route.Edge) !Route.Edge.Index { - if (fr.freed_edges.popOrNull()) |i| { + if (fr.freed_edges.pop()) |i| { fr.edges.items[i.get()] = edge_data; return i; } else { diff --git a/src/btjs.zig b/src/btjs.zig index 85e61c9bc4..a1b09085cb 100644 --- a/src/btjs.zig +++ b/src/btjs.zig @@ -182,7 +182,7 @@ fn printLineFromFileAnyOs(out_stream: anytype, source_location: std.debug.Source defer f.close(); // TODO fstat and make sure that the file has the correct size - var buf: [std.mem.page_size]u8 = undefined; + var buf: [4096]u8 = undefined; var amt_read = try f.read(buf[0..]); const line_start = seek: { var current_line_start: usize = 0; diff --git a/src/bun.js/api/BunObject.zig b/src/bun.js/api/BunObject.zig index 37401db957..bcf9566542 100644 --- a/src/bun.js/api/BunObject.zig +++ b/src/bun.js/api/BunObject.zig @@ -1804,7 +1804,10 @@ pub const Crypto = struct { } const hash_options = pwhash.bcrypt.HashOptions{ - .params = pwhash.bcrypt.Params{ .rounds_log = cost }, + .params = pwhash.bcrypt.Params{ + .rounds_log = cost, + .silently_truncate_password = true, + }, .allocator = allocator, .encoding = .crypt, }; @@ -1862,7 +1865,10 @@ pub const Crypto = struct { sha_512.final(&outbuf); password_to_use = &outbuf; } - pwhash.bcrypt.strVerify(previous_hash, password_to_use, .{ .allocator = allocator }) catch |err| { + pwhash.bcrypt.strVerify(previous_hash, password_to_use, .{ + .allocator = allocator, + .silently_truncate_password = true, + }) catch |err| { if (err == error.PasswordVerificationFailed) { return false; } @@ -3434,7 +3440,7 @@ pub fn mmapFile(globalThis: *JSC.JSGlobalObject, callframe: *JSC.CallFrame) bun. if (try opts.get(globalThis, "offset")) |value| { offset = @as(usize, @intCast(value.toInt64())); - offset = std.mem.alignBackwardAnyAlign(usize, offset, std.mem.page_size); + offset = std.mem.alignBackwardAnyAlign(usize, offset, std.heap.pageSize()); } } @@ -3448,7 +3454,7 @@ pub fn mmapFile(globalThis: *JSC.JSGlobalObject, callframe: *JSC.CallFrame) bun. return JSC.C.JSObjectMakeTypedArrayWithBytesNoCopy(globalThis, JSC.C.JSTypedArrayType.kJSTypedArrayTypeUint8Array, @as(?*anyopaque, @ptrCast(map.ptr)), map.len, struct { pub fn x(ptr: ?*anyopaque, size: ?*anyopaque) callconv(.C) void { - _ = bun.sys.munmap(@as([*]align(std.mem.page_size) u8, @ptrCast(@alignCast(ptr)))[0..@intFromPtr(size)]); + _ = bun.sys.munmap(@as([*]align(std.heap.page_size_min) u8, @ptrCast(@alignCast(ptr)))[0..@intFromPtr(size)]); } }.x, @as(?*anyopaque, @ptrFromInt(map.len)), null).?.value(); } @@ -4530,10 +4536,10 @@ pub const JSZlib = struct { const buffer_value = if (arguments.len > 0) arguments[0] else .undefined; const options_val: ?JSValue = if (arguments.len > 1 and arguments[1].isObject()) - arguments[1] - else if (arguments.len > 1 and !arguments[1].isUndefined()) { - return globalThis.throwInvalidArguments("Expected options to be an object", .{}); - } else null; + arguments[1] + else if (arguments.len > 1 and !arguments[1].isUndefined()) { + return globalThis.throwInvalidArguments("Expected options to be an object", .{}); + } else null; if (JSC.Node.StringOrBuffer.fromJS(globalThis, bun.default_allocator, buffer_value)) |buffer| { return .{ buffer, options_val }; diff --git a/src/bun.js/base.zig b/src/bun.js/base.zig index 452cf15c2e..9e3f42eec9 100644 --- a/src/bun.js/base.zig +++ b/src/bun.js/base.zig @@ -1480,8 +1480,9 @@ pub const MemoryReportingAllocator = struct { memory_cost: std.atomic.Value(usize) = std.atomic.Value(usize).init(0), const log = Output.scoped(.MEM, false); - fn alloc(this: *MemoryReportingAllocator, n: usize, log2_ptr_align: u8, return_address: usize) ?[*]u8 { - const result = this.child_allocator.rawAlloc(n, log2_ptr_align, return_address) orelse return null; + fn alloc(context: *anyopaque, n: usize, alignment: std.mem.Alignment, return_address: usize) ?[*]u8 { + const this: *MemoryReportingAllocator = @alignCast(@ptrCast(context)); + const result = this.child_allocator.rawAlloc(n, alignment, return_address) orelse return null; _ = this.memory_cost.fetchAdd(n, .monotonic); if (comptime Environment.allow_assert) log("malloc({d}) = {d}", .{ n, this.memory_cost.raw }); @@ -1494,8 +1495,9 @@ pub const MemoryReportingAllocator = struct { log("discard({d}) = {d}", .{ buf.len, this.memory_cost.raw }); } - fn resize(this: *MemoryReportingAllocator, buf: []u8, buf_align: u8, new_len: usize, ret_addr: usize) bool { - if (this.child_allocator.rawResize(buf, buf_align, new_len, ret_addr)) { + fn resize(context: *anyopaque, buf: []u8, alignment: std.mem.Alignment, new_len: usize, ret_addr: usize) bool { + const this: *MemoryReportingAllocator = @alignCast(@ptrCast(context)); + if (this.child_allocator.rawResize(buf, alignment, new_len, ret_addr)) { _ = this.memory_cost.fetchAdd(new_len -| buf.len, .monotonic); if (comptime Environment.allow_assert) log("resize() = {d}", .{this.memory_cost.raw}); @@ -1505,8 +1507,9 @@ pub const MemoryReportingAllocator = struct { } } - fn free(this: *MemoryReportingAllocator, buf: []u8, buf_align: u8, ret_addr: usize) void { - this.child_allocator.rawFree(buf, buf_align, ret_addr); + fn free(context: *anyopaque, buf: []u8, alignment: std.mem.Alignment, ret_addr: usize) void { + const this: *MemoryReportingAllocator = @alignCast(@ptrCast(context)); + this.child_allocator.rawFree(buf, alignment, ret_addr); if (comptime Environment.allow_assert) { // check for overflow, racily @@ -1554,9 +1557,10 @@ pub const MemoryReportingAllocator = struct { } pub const VTable = std.mem.Allocator.VTable{ - .alloc = @ptrCast(&MemoryReportingAllocator.alloc), - .resize = @ptrCast(&MemoryReportingAllocator.resize), - .free = @ptrCast(&MemoryReportingAllocator.free), + .alloc = &MemoryReportingAllocator.alloc, + .resize = &MemoryReportingAllocator.resize, + .remap = &std.mem.Allocator.noRemap, + .free = &MemoryReportingAllocator.free, }; }; diff --git a/src/bun.js/bindings/AbortSignal.zig b/src/bun.js/bindings/AbortSignal.zig index eae01694eb..960f6ce1e8 100644 --- a/src/bun.js/bindings/AbortSignal.zig +++ b/src/bun.js/bindings/AbortSignal.zig @@ -5,7 +5,7 @@ const JSValue = JSC.JSValue; const JSGlobalObject = JSC.JSGlobalObject; const CommonAbortReason = @import("CommonAbortReason.zig").CommonAbortReason; -pub const AbortSignal = extern opaque { +pub const AbortSignal = opaque { extern fn WebCore__AbortSignal__aborted(arg0: *AbortSignal) bool; extern fn WebCore__AbortSignal__abortReason(arg0: *AbortSignal) JSValue; extern fn WebCore__AbortSignal__addListener(arg0: *AbortSignal, arg1: ?*anyopaque, ArgFn2: ?*const fn (?*anyopaque, JSValue) callconv(.C) void) *AbortSignal; diff --git a/src/bun.js/bindings/CachedBytecode.zig b/src/bun.js/bindings/CachedBytecode.zig index edd4bb4cbd..08758d8584 100644 --- a/src/bun.js/bindings/CachedBytecode.zig +++ b/src/bun.js/bindings/CachedBytecode.zig @@ -43,31 +43,24 @@ pub const CachedBytecode = opaque { pub const VTable = &std.mem.Allocator.VTable{ .alloc = struct { - pub fn alloc(ctx: *anyopaque, len: usize, ptr_align: u8, ret_addr: usize) ?[*]u8 { - _ = ctx; // autofix - _ = len; // autofix - _ = ptr_align; // autofix - _ = ret_addr; // autofix + pub fn alloc(ctx: *anyopaque, len: usize, alignment: std.mem.Alignment, ret_addr: usize) ?[*]u8 { + _ = ctx; + _ = len; + _ = alignment; + _ = ret_addr; @panic("Unexpectedly called CachedBytecode.alloc"); } }.alloc, - .resize = struct { - pub fn resize(ctx: *anyopaque, buf: []u8, buf_align: u8, new_len: usize, ret_addr: usize) bool { - _ = ctx; // autofix - _ = buf; // autofix - _ = buf_align; // autofix - _ = new_len; // autofix - _ = ret_addr; // autofix - return false; - } - }.resize, .free = struct { - pub fn free(ctx: *anyopaque, buf: []u8, buf_align: u8, _: usize) void { - _ = buf; // autofix - _ = buf_align; // autofix + pub fn free(ctx: *anyopaque, buf: []u8, alignment: std.mem.Alignment, ret_addr: usize) void { + _ = buf; + _ = alignment; + _ = ret_addr; CachedBytecode__deref(@ptrCast(ctx)); } }.free, + .resize = &std.mem.Allocator.noResize, + .remap = &std.mem.Allocator.noRemap, }; pub fn allocator(this: *CachedBytecode) std.mem.Allocator { diff --git a/src/bun.js/javascript.zig b/src/bun.js/javascript.zig index fa8392082d..4b046ee5f0 100644 --- a/src/bun.js/javascript.zig +++ b/src/bun.js/javascript.zig @@ -724,7 +724,7 @@ const AutoKiller = struct { fn killProcesses(this: *AutoKiller) u32 { var count: u32 = 0; - while (this.processes.popOrNull()) |process| { + while (this.processes.pop()) |process| { if (!process.key.hasExited()) { log("process.kill {d}", .{process.key.pid}); count += @as(u32, @intFromBool(process.key.kill(@intFromEnum(bun.SignalCode.default)) == .result)); @@ -1033,9 +1033,9 @@ pub const VirtualMachine = struct { pub fn isEventLoopAliveExcludingImmediates(vm: *const VirtualMachine) bool { return vm.unhandled_error_counter == 0 and (@intFromBool(vm.event_loop_handle.?.isActive()) + - vm.active_tasks + - vm.event_loop.tasks.count + - @intFromBool(vm.event_loop.hasPendingRefs()) > 0); + vm.active_tasks + + vm.event_loop.tasks.count + + @intFromBool(vm.event_loop.hasPendingRefs()) > 0); } pub fn isEventLoopAlive(vm: *const VirtualMachine) bool { @@ -2521,7 +2521,7 @@ pub const VirtualMachine = struct { return; } else if (jsc_vm.module_loader.eval_source != null and (strings.endsWithComptime(specifier, bun.pathLiteral("/[eval]")) or - strings.endsWithComptime(specifier, bun.pathLiteral("/[stdin]")))) + strings.endsWithComptime(specifier, bun.pathLiteral("/[stdin]")))) { ret.result = null; ret.path = specifier; @@ -3553,7 +3553,7 @@ pub const VirtualMachine = struct { error_instance.toZigException(this.global, exception); const enable_source_code_preview = allow_source_code_preview and !(bun.getRuntimeFeatureFlag("BUN_DISABLE_SOURCE_CODE_PREVIEW") or - bun.getRuntimeFeatureFlag("BUN_DISABLE_TRANSPILED_SOURCE_CODE_PREVIEW")); + bun.getRuntimeFeatureFlag("BUN_DISABLE_TRANSPILED_SOURCE_CODE_PREVIEW")); defer { if (Environment.isDebug) { @@ -4186,13 +4186,13 @@ pub const VirtualMachine = struct { // + 1 to ensure the message is a non-empty string. break :has_prefix msg_chars.len > code.len + ": ".len + 1 and (if (is_utf16) - // there is no existing function to perform this slice comparison - // []const u16, []const u8 - for (code, msg_chars[0..code.len]) |a, b| { - if (a != b) break false; - } else true - else - bun.strings.eqlLong(msg_chars[0..code.len], code, false)) and + // there is no existing function to perform this slice comparison + // []const u16, []const u8 + for (code, msg_chars[0..code.len]) |a, b| { + if (a != b) break false; + } else true + else + bun.strings.eqlLong(msg_chars[0..code.len], code, false)) and msg_chars[code.len] == ':' and msg_chars[code.len + 1] == ' '; }, diff --git a/src/bun.js/javascript_core_c_api.zig b/src/bun.js/javascript_core_c_api.zig index 2d81c70987..c203bb0da3 100644 --- a/src/bun.js/javascript_core_c_api.zig +++ b/src/bun.js/javascript_core_c_api.zig @@ -8,11 +8,11 @@ const bun = @import("root").bun; const std = @import("std"); const cpp = @import("./bindings/bindings.zig"); const generic = opaque { - pub fn value(this: *const @This()) cpp.JSValue { + pub fn value(this: *const generic) cpp.JSValue { return @as(cpp.JSValue, @enumFromInt(@as(cpp.JSValueReprInt, @bitCast(@intFromPtr(this))))); } - pub inline fn bunVM(this: *@This()) *bun.JSC.VirtualMachine { + pub inline fn bunVM(this: *generic) *bun.JSC.VirtualMachine { return this.ptr().bunVM(); } }; diff --git a/src/bun.js/node/fs_events.zig b/src/bun.js/node/fs_events.zig index 2853dc528c..f746f63a34 100644 --- a/src/bun.js/node/fs_events.zig +++ b/src/bun.js/node/fs_events.zig @@ -551,7 +551,7 @@ pub const FSEventsLoop = struct { this.sem.deinit(); if (this.watcher_count > 0) { - while (this.watchers.popOrNull()) |watcher| { + while (this.watchers.pop()) |watcher| { if (watcher) |w| { // unlink watcher w.loop = null; diff --git a/src/bun.js/node/node_fs.zig b/src/bun.js/node/node_fs.zig index 9e9291e66b..c3363fc49d 100644 --- a/src/bun.js/node/node_fs.zig +++ b/src/bun.js/node/node_fs.zig @@ -3724,7 +3724,7 @@ pub const NodeFS = struct { while (true) { // Linux Kernel 5.3 or later // Not supported in gVisor - const written = linux.copy_file_range(src_fd.cast(), &off_in_copy, dest_fd.cast(), &off_out_copy, std.mem.page_size, 0); + const written = linux.copy_file_range(src_fd.cast(), &off_in_copy, dest_fd.cast(), &off_out_copy, std.heap.pageSize(), 0); if (ret.errnoSysP(written, .copy_file_range, dest)) |err| { return switch (err.getErrno()) { .INTR => continue, @@ -6464,7 +6464,7 @@ pub const NodeFS = struct { while (true) { // Linux Kernel 5.3 or later // Not supported in gVisor - const written = linux.copy_file_range(src_fd.cast(), &off_in_copy, dest_fd.cast(), &off_out_copy, std.mem.page_size, 0); + const written = linux.copy_file_range(src_fd.cast(), &off_in_copy, dest_fd.cast(), &off_out_copy, std.heap.pageSize(), 0); if (ret.errnoSysP(written, .copy_file_range, dest)) |err| { return switch (err.getErrno()) { inline .XDEV, .NOSYS => |errno| brk: { diff --git a/src/bun.js/node/path_watcher.zig b/src/bun.js/node/path_watcher.zig index 9e647386c3..c98fed2202 100644 --- a/src/bun.js/node/path_watcher.zig +++ b/src/bun.js/node/path_watcher.zig @@ -417,7 +417,7 @@ pub const PathWatcherManager = struct { this.manager.mutex.lock(); defer this.manager.mutex.unlock(); - const watcher = this.watcher_list.popOrNull(); + const watcher = this.watcher_list.pop(); if (watcher == null) { // no more work todo, release the fd and path _ = this.manager.current_fd_task.remove(this.path.fd); @@ -659,7 +659,7 @@ pub const PathWatcherManager = struct { { watcher.mutex.lock(); defer watcher.mutex.unlock(); - while (watcher.file_paths.popOrNull()) |file_path| { + while (watcher.file_paths.pop()) |file_path| { this._decrementPathRefNoLock(file_path); } } @@ -695,7 +695,7 @@ pub const PathWatcherManager = struct { this.main_watcher.deinit(false); if (this.watcher_count > 0) { - while (this.watchers.popOrNull()) |watcher| { + while (this.watchers.pop()) |watcher| { if (watcher) |w| { // unlink watcher w.manager = null; @@ -870,7 +870,7 @@ pub const PathWatcher = struct { const time_diff = time_stamp - this.last_change_event.time_stamp; if (!((this.last_change_event.time_stamp == 0 or time_diff > 1) or this.last_change_event.event_type != event_type and - this.last_change_event.hash != hash)) + this.last_change_event.hash != hash)) { // skip consecutive duplicates return; diff --git a/src/bun.js/webcore/blob.zig b/src/bun.js/webcore/blob.zig index 306ba8ebd2..7ae295211b 100644 --- a/src/bun.js/webcore/blob.zig +++ b/src/bun.js/webcore/blob.zig @@ -1193,11 +1193,11 @@ pub const Blob = struct { if (path_or_blob == .path or // If they try to set an offset, its a little more complicated so let's avoid that (path_or_blob.blob.offset == 0 and !path_or_blob.blob.isS3() and - // Is this a file that is known to be a pipe? Let's avoid blocking the main thread on it. - !(path_or_blob.blob.store != null and - path_or_blob.blob.store.?.data == .file and - path_or_blob.blob.store.?.data.file.mode != 0 and - bun.isRegularFile(path_or_blob.blob.store.?.data.file.mode)))) + // Is this a file that is known to be a pipe? Let's avoid blocking the main thread on it. + !(path_or_blob.blob.store != null and + path_or_blob.blob.store.?.data == .file and + path_or_blob.blob.store.?.data.file.mode != 0 and + bun.isRegularFile(path_or_blob.blob.store.?.data.file.mode)))) { if (data.isString()) { const len = data.getLength(globalThis); @@ -5780,7 +5780,7 @@ pub const Blob = struct { joiner.push(sliced.slice(), sliced.allocator.get()); }, } - current = stack.popOrNull() orelse break; + current = stack.pop() orelse break; } const joined = try joiner.done(bun.default_allocator); diff --git a/src/bun.js/webcore/streams.zig b/src/bun.js/webcore/streams.zig index 8b20569c8f..533738ed25 100644 --- a/src/bun.js/webcore/streams.zig +++ b/src/bun.js/webcore/streams.zig @@ -4154,8 +4154,8 @@ pub const FileReader = struct { if ((file.is_atty orelse false) or (fd.int() < 3 and std.posix.isatty(fd.cast())) or (file.pathlike == .fd and - bun.FDTag.get(file.pathlike.fd) != .none and - std.posix.isatty(file.pathlike.fd.cast()))) + bun.FDTag.get(file.pathlike.fd) != .none and + std.posix.isatty(file.pathlike.fd.cast()))) { // var termios = std.mem.zeroes(std.posix.termios); // _ = std.c.tcgetattr(fd.cast(), &termios); @@ -4996,7 +4996,8 @@ pub const ByteStream = struct { // #define LIBUS_RECV_BUFFER_LENGTH 524288 // For HTTPS, the size is probably quite a bit lower like 64 KB due to TLS transmission. // We add 1 extra page size so that if there's a little bit of excess buffered data, we avoid extra allocations. - return .{ .chunk_size = @min(512 * 1024 + std.mem.page_size, @max(this.highWaterMark, std.mem.page_size)) }; + const page_size: Blob.SizeType = @intCast(std.heap.pageSize()); + return .{ .chunk_size = @min(512 * 1024 + page_size, @max(this.highWaterMark, page_size)) }; } pub fn value(this: *@This()) JSValue { diff --git a/src/bun.zig b/src/bun.zig index d532dc8051..9c4b6b4405 100644 --- a/src/bun.zig +++ b/src/bun.zig @@ -2776,10 +2776,10 @@ pub const MakePath = struct { while (true) { const sub_path_w = if (comptime T == u16) try w.wToPrefixedFileW(self.fd, - // TODO: report this bug - // they always copy it - // it doesn't need to be [:0]const u16 - @ptrCast(component.path)) + // TODO: report this bug + // they always copy it + // it doesn't need to be [:0]const u16 + @ptrCast(component.path)) else try w.sliceToPrefixedFileW(self.fd, component.path); var result = makeOpenDirAccessMaskW(self, sub_path_w.span().ptr, access_mask, .{ diff --git a/src/bundler/bundle_v2.zig b/src/bundler/bundle_v2.zig index 5afdd4a7d7..9aff20eb52 100644 --- a/src/bundler/bundle_v2.zig +++ b/src/bundler/bundle_v2.zig @@ -426,7 +426,7 @@ fn genericPathWithPrettyInitialized(path: Fs.Path, target: options.Target, top_l const is_node = bun.strings.eqlComptime(path.namespace, "node"); if (is_node and (bun.strings.hasPrefixComptime(path.text, NodeFallbackModules.import_path) or - !std.fs.path.isAbsolute(path.text))) + !std.fs.path.isAbsolute(path.text))) { return path; } @@ -3060,42 +3060,41 @@ pub const BundleV2 = struct { } const transpiler, const bake_graph: bake.Graph, const target = - if (import_record.tag == .bake_resolve_to_ssr_graph) - brk: { - if (this.framework == null) { - this.logForResolutionFailures(source.path.text, .ssr).addErrorFmt( - source, - import_record.range.loc, - this.graph.allocator, - "The 'bunBakeGraph' import attribute cannot be used outside of a Bun Bake bundle", - .{}, - ) catch @panic("unexpected log error"); - continue; - } + if (import_record.tag == .bake_resolve_to_ssr_graph) brk: { + if (this.framework == null) { + this.logForResolutionFailures(source.path.text, .ssr).addErrorFmt( + source, + import_record.range.loc, + this.graph.allocator, + "The 'bunBakeGraph' import attribute cannot be used outside of a Bun Bake bundle", + .{}, + ) catch @panic("unexpected log error"); + continue; + } - const is_supported = this.framework.?.server_components != null and - this.framework.?.server_components.?.separate_ssr_graph; - if (!is_supported) { - this.logForResolutionFailures(source.path.text, .ssr).addErrorFmt( - source, - import_record.range.loc, - this.graph.allocator, - "Framework does not have a separate SSR graph to put this import into", - .{}, - ) catch @panic("unexpected log error"); - continue; - } + const is_supported = this.framework.?.server_components != null and + this.framework.?.server_components.?.separate_ssr_graph; + if (!is_supported) { + this.logForResolutionFailures(source.path.text, .ssr).addErrorFmt( + source, + import_record.range.loc, + this.graph.allocator, + "Framework does not have a separate SSR graph to put this import into", + .{}, + ) catch @panic("unexpected log error"); + continue; + } - break :brk .{ - this.ssr_transpiler, - .ssr, - .bake_server_components_ssr, + break :brk .{ + this.ssr_transpiler, + .ssr, + .bake_server_components_ssr, + }; + } else .{ + this.transpilerForTarget(ast.target), + ast.target.bakeGraph(), + ast.target, }; - } else .{ - this.transpilerForTarget(ast.target), - ast.target.bakeGraph(), - ast.target, - }; var had_busted_dir_cache = false; var resolve_result = inner: while (true) break transpiler.resolver.resolveWithFramework( @@ -5086,7 +5085,7 @@ pub const ParseTask = struct { this.ctx.framework.?.server_components.?.separate_ssr_graph) or // set the target to the client when bundling client-side files ((transpiler.options.server_components or transpiler.options.dev_server != null) and - task.known_target == .browser)) + task.known_target == .browser)) { transpiler = this.ctx.client_transpiler; resolver = &transpiler.resolver; @@ -5102,9 +5101,9 @@ pub const ParseTask = struct { const target = (if (task.source_index.get() == 1) targetFromHashbang(entry.contents) else null) orelse if (task.known_target == .bake_server_components_ssr and transpiler.options.framework.?.server_components.?.separate_ssr_graph) - .bake_server_components_ssr - else - transpiler.options.target; + .bake_server_components_ssr + else + transpiler.options.target; const output_format = transpiler.options.output_format; @@ -7315,7 +7314,7 @@ pub const LinkerContext = struct { // } defer { - _ = visitor.visited.popOrNull(); + _ = visitor.visited.pop(); } // Iterate over the top-level "@import" rules @@ -9089,9 +9088,9 @@ pub const LinkerContext = struct { // if (kind != .require and (kind != .stmt or - record.contains_import_star or - record.contains_default_alias or - record.contains_es_module_alias)) + record.contains_import_star or + record.contains_default_alias or + record.contains_es_module_alias)) { record.wrap_with_to_esm = true; to_esm_uses += 1; @@ -9603,7 +9602,7 @@ pub const LinkerContext = struct { const exports_ref = c.graph.ast.items(.exports_ref)[id]; const all_export_stmts: []js_ast.Stmt = stmts.head[0 .. @as(usize, @intFromBool(needs_exports_variable)) + @as(usize, @intFromBool(properties.items.len > 0) + - @as(usize, @intFromBool(force_include_exports_for_entry_point)))]; + @as(usize, @intFromBool(force_include_exports_for_entry_point)))]; stmts.head = stmts.head[all_export_stmts.len..]; var remaining_stmts = all_export_stmts; defer bun.assert(remaining_stmts.len == 0); // all must be used @@ -15777,8 +15776,8 @@ pub const LinkerContext = struct { // perform tree-shaking on the runtime even if tree-shaking is disabled. if (!can_be_removed_if_unused or (!part.force_tree_shaking and - !c.options.tree_shaking and - entry_point_kinds[source_index].isEntryPoint())) + !c.options.tree_shaking and + entry_point_kinds[source_index].isEntryPoint())) { c.markPartLiveForTreeShaking( @intCast(part_index), @@ -16294,9 +16293,9 @@ pub const LinkerContext = struct { // TODO: investigate if this is a bug // It implies there are imports being added without being resolved return .{ - .value = .{}, - .status = .external, - }; + .value = .{}, + .status = .external, + }; // Is this an external file? const record: *const ImportRecord = import_records.at(named_import.import_record_index); @@ -18052,7 +18051,8 @@ const ExternalFreeFunctionAllocator = struct { const vtable: std.mem.Allocator.VTable = .{ .alloc = &alloc, .free = &free, - .resize = &resize, + .resize = &std.mem.Allocator.noResize, + .remap = &std.mem.Allocator.noRemap, }; pub fn create(free_callback: *const fn (ctx: *anyopaque) callconv(.C) void, context: *anyopaque) std.mem.Allocator { @@ -18065,15 +18065,11 @@ const ExternalFreeFunctionAllocator = struct { }; } - fn alloc(_: *anyopaque, _: usize, _: u8, _: usize) ?[*]u8 { + fn alloc(_: *anyopaque, _: usize, _: std.mem.Alignment, _: usize) ?[*]u8 { return null; } - fn resize(_: *anyopaque, _: []u8, _: u8, _: usize, _: usize) bool { - return false; - } - - fn free(ext_free_function: *anyopaque, _: []u8, _: u8, _: usize) void { + fn free(ext_free_function: *anyopaque, _: []u8, _: std.mem.Alignment, _: usize) void { const info: *ExternalFreeFunctionAllocator = @alignCast(@ptrCast(ext_free_function)); info.free_callback(info.context); bun.default_allocator.destroy(info); diff --git a/src/cli/pack_command.zig b/src/cli/pack_command.zig index 35c1ed6883..ccdca7f939 100644 --- a/src/cli/pack_command.zig +++ b/src/cli/pack_command.zig @@ -211,9 +211,9 @@ pub const PackCommand = struct { MissingPackageJSON, } || if (for_publish) error{ - RestrictedUnscopedPackage, - PrivatePackage, - } else error{}; + RestrictedUnscopedPackage, + PrivatePackage, + } else error{}; } const package_prefix = "package/"; @@ -291,7 +291,7 @@ pub const PackCommand = struct { defer subpath_dedupe.deinit(); // first find included dirs and files - while (dirs.popOrNull()) |dir_info| { + while (dirs.pop()) |dir_info| { var dir, const dir_subpath, const dir_depth = dir_info; defer { if (dir_depth != 1) { @@ -320,10 +320,10 @@ pub const PackCommand = struct { if (entry.kind == .file and (eql(entry_name, "package.json") or - eql(entry_name, "LICENSE") or - eql(entry_name, "LICENCE") or - eql(entry_name, "README") or - entry_name.len > "README.".len and eql(entry_name[0.."README.".len], "README."))) + eql(entry_name, "LICENSE") or + eql(entry_name, "LICENCE") or + eql(entry_name, "README") or + entry_name.len > "README.".len and eql(entry_name[0.."README.".len], "README."))) included = true; } @@ -394,7 +394,7 @@ pub const PackCommand = struct { var ignores: std.ArrayListUnmanaged(IgnorePatterns) = .{}; defer ignores.deinit(allocator); - while (dirs.popOrNull()) |dir_info| { + while (dirs.pop()) |dir_info| { var dir, const dir_subpath, const dir_depth = dir_info; defer dir.close(); @@ -610,7 +610,7 @@ pub const PackCommand = struct { } } - while (additional_bundled_deps.popOrNull()) |bundled_dir_info| { + while (additional_bundled_deps.pop()) |bundled_dir_info| { const dir_subpath = bundled_dir_info[1]; const maybe_slash = strings.lastIndexOfChar(dir_subpath, '/'); bun.assertWithLocation(maybe_slash != null, @src()); @@ -652,7 +652,7 @@ pub const PackCommand = struct { try dirs.append(ctx.allocator, bundled_dir_info); - while (dirs.popOrNull()) |dir_info| { + while (dirs.pop()) |dir_info| { var dir, const dir_subpath, const dir_depth = dir_info; defer dir.close(); @@ -787,7 +787,7 @@ pub const PackCommand = struct { try dirs.append(allocator, .{ root_dir, "", 1 }); - while (dirs.popOrNull()) |dir_info| { + while (dirs.pop()) |dir_info| { var dir, const dir_subpath, const dir_depth = dir_info; defer { if (dir_depth != 1) { @@ -1035,12 +1035,12 @@ pub const PackCommand = struct { // first, check files that can never be ignored. project root directory only if (entry.kind == .file and (eql(entry_name, "package.json") or - eql(entry_name, "LICENSE") or - eql(entry_name, "LICENCE") or - eql(entry_name, "README") or - entry_name.len > "README.".len and eql(entry_name[0.."README.".len], "README.") or - eql(entry_name, "CHANGELOG") or - entry_name.len > "CHANGELOG.".len and eql(entry_name[0.."CHANGELOG.".len], "CHANGELOG."))) + eql(entry_name, "LICENSE") or + eql(entry_name, "LICENCE") or + eql(entry_name, "README") or + entry_name.len > "README.".len and eql(entry_name[0.."README.".len], "README.") or + eql(entry_name, "CHANGELOG") or + entry_name.len > "CHANGELOG.".len and eql(entry_name[0.."CHANGELOG.".len], "CHANGELOG."))) return null; // check default ignores that only apply to the root project directory diff --git a/src/cli/publish_command.zig b/src/cli/publish_command.zig index c6d405d044..9e0d93ff63 100644 --- a/src/cli/publish_command.zig +++ b/src/cli/publish_command.zig @@ -1149,7 +1149,7 @@ pub const PublishCommand = struct { try dirs.append(allocator, .{ bin_dir.asDir(), normalized_bin_dir, false }); - while (dirs.popOrNull()) |dir_info| { + while (dirs.pop()) |dir_info| { var dir, const dir_subpath, const close_dir = dir_info; defer if (close_dir) dir.close(); diff --git a/src/css/properties/background.zig b/src/css/properties/background.zig index 2321fc58e1..69c74b3ac9 100644 --- a/src/css/properties/background.zig +++ b/src/css/properties/background.zig @@ -1014,9 +1014,9 @@ pub const BackgroundHandler = struct { // If the last declaration is prefixed, pop the last value // so it isn't duplicated when we flush. if (this.has_prefix) { - var prop = this.decls.popOrNull(); - if (prop != null) { - prop.?.deinit(allocator); + var maybe_prop = this.decls.pop(); + if (maybe_prop) |*prop| { + prop.deinit(allocator); } } diff --git a/src/css/rules/supports.zig b/src/css/rules/supports.zig index e33f6115a8..dd58eb2c35 100644 --- a/src/css/rules/supports.zig +++ b/src/css/rules/supports.zig @@ -234,7 +234,7 @@ pub const SupportsCondition = union(enum) { } if (conditions.items.len == 1) { - const ret = conditions.pop(); + const ret = conditions.pop().?; defer conditions.deinit(input.allocator()); return .{ .result = ret }; } diff --git a/src/css/small_list.zig b/src/css/small_list.zig index a5b523fc74..22b78ebe2e 100644 --- a/src/css/small_list.zig +++ b/src/css/small_list.zig @@ -287,7 +287,7 @@ pub fn SmallList(comptime T: type, comptime N: comptime_int) type { old.deinit(allocator); } } - } else if (res.popOrNull()) |the_last| { + } else if (res.pop()) |the_last| { var old = this.*; // Prefixed property with no unprefixed version. // Replace self with the last prefixed version so that it doesn't diff --git a/src/css/values/percentage.zig b/src/css/values/percentage.zig index d0795ec426..da05ce2c84 100644 --- a/src/css/values/percentage.zig +++ b/src/css/values/percentage.zig @@ -41,17 +41,15 @@ pub const Percentage = struct { } }; if (this.v != 0.0 and @abs(this.v) < 0.01) { - // TODO: is this the max length? var buf: [32]u8 = undefined; - var fba = std.heap.FixedBufferAllocator.init(&buf); - var string = std.ArrayList(u8).init(fba.allocator()); - const writer = string.writer(); + var stream = std.io.fixedBufferStream(&buf); + const writer = stream.writer(); percent.toCssGeneric(writer) catch return dest.addFmtError(); if (this.v < 0.0) { try dest.writeChar('-'); - try dest.writeStr(bun.strings.trimLeadingPattern2(string.items, '-', '0')); + try dest.writeStr(bun.strings.trimLeadingPattern2(stream.getWritten(), '-', '0')); } else { - try dest.writeStr(bun.strings.trimLeadingChar(string.items, '0')); + try dest.writeStr(bun.strings.trimLeadingChar(stream.getWritten(), '0')); } } else { try percent.toCss(W, dest); diff --git a/src/deps/uws.zig b/src/deps/uws.zig index f2176a75f5..6aa55f5b69 100644 --- a/src/deps/uws.zig +++ b/src/deps/uws.zig @@ -3398,7 +3398,8 @@ pub const AnyResponse = union(enum) { } }; pub fn NewApp(comptime ssl: bool) type { - return opaque { + // TODO: change to `opaque` when https://github.com/ziglang/zig/issues/22869 is fixed + return struct { pub const is_ssl = ssl; const ssl_flag: i32 = @intFromBool(ssl); const ThisApp = @This(); @@ -3454,7 +3455,7 @@ pub fn NewApp(comptime ssl: bool) type { return us_socket_local_port(ssl_flag, @as(*uws.Socket, @ptrCast(this))); } - pub fn socket(this: *@This()) NewSocketHandler(ssl) { + pub fn socket(this: *ThisApp.ListenSocket) NewSocketHandler(ssl) { return NewSocketHandler(ssl).from(@ptrCast(this)); } }; @@ -4572,75 +4573,73 @@ pub const AnySocket = union(enum) { pub const udp = struct { pub const Socket = opaque { - const This = @This(); - - pub fn create(loop: *Loop, data_cb: *const fn (*This, *PacketBuffer, c_int) callconv(.C) void, drain_cb: *const fn (*This) callconv(.C) void, close_cb: *const fn (*This) callconv(.C) void, host: [*c]const u8, port: c_ushort, options: c_int, err: ?*c_int, user_data: ?*anyopaque) ?*This { + pub fn create(loop: *Loop, data_cb: *const fn (*udp.Socket, *PacketBuffer, c_int) callconv(.C) void, drain_cb: *const fn (*udp.Socket) callconv(.C) void, close_cb: *const fn (*udp.Socket) callconv(.C) void, host: [*c]const u8, port: c_ushort, options: c_int, err: ?*c_int, user_data: ?*anyopaque) ?*udp.Socket { return us_create_udp_socket(loop, data_cb, drain_cb, close_cb, host, port, options, err, user_data); } - pub fn send(this: *This, payloads: []const [*]const u8, lengths: []const usize, addresses: []const ?*const anyopaque) c_int { + pub fn send(this: *udp.Socket, payloads: []const [*]const u8, lengths: []const usize, addresses: []const ?*const anyopaque) c_int { bun.assert(payloads.len == lengths.len and payloads.len == addresses.len); return us_udp_socket_send(this, payloads.ptr, lengths.ptr, addresses.ptr, @intCast(payloads.len)); } - pub fn user(this: *This) ?*anyopaque { + pub fn user(this: *udp.Socket) ?*anyopaque { return us_udp_socket_user(this); } - pub fn bind(this: *This, hostname: [*c]const u8, port: c_uint) c_int { + pub fn bind(this: *udp.Socket, hostname: [*c]const u8, port: c_uint) c_int { return us_udp_socket_bind(this, hostname, port); } /// Get the bound port in host byte order - pub fn boundPort(this: *This) c_int { + pub fn boundPort(this: *udp.Socket) c_int { return us_udp_socket_bound_port(this); } - pub fn boundIp(this: *This, buf: [*c]u8, length: *i32) void { + pub fn boundIp(this: *udp.Socket, buf: [*c]u8, length: *i32) void { return us_udp_socket_bound_ip(this, buf, length); } - pub fn remoteIp(this: *This, buf: [*c]u8, length: *i32) void { + pub fn remoteIp(this: *udp.Socket, buf: [*c]u8, length: *i32) void { return us_udp_socket_remote_ip(this, buf, length); } - pub fn close(this: *This) void { + pub fn close(this: *udp.Socket) void { return us_udp_socket_close(this); } - pub fn connect(this: *This, hostname: [*c]const u8, port: c_uint) c_int { + pub fn connect(this: *udp.Socket, hostname: [*c]const u8, port: c_uint) c_int { return us_udp_socket_connect(this, hostname, port); } - pub fn disconnect(this: *This) c_int { + pub fn disconnect(this: *udp.Socket) c_int { return us_udp_socket_disconnect(this); } - pub fn setBroadcast(this: *This, enabled: bool) c_int { + pub fn setBroadcast(this: *udp.Socket, enabled: bool) c_int { return us_udp_socket_set_broadcast(this, @intCast(@intFromBool(enabled))); } - pub fn setUnicastTTL(this: *This, ttl: i32) c_int { + pub fn setUnicastTTL(this: *udp.Socket, ttl: i32) c_int { return us_udp_socket_set_ttl_unicast(this, @intCast(ttl)); } - pub fn setMulticastTTL(this: *This, ttl: i32) c_int { + pub fn setMulticastTTL(this: *udp.Socket, ttl: i32) c_int { return us_udp_socket_set_ttl_multicast(this, @intCast(ttl)); } - pub fn setMulticastLoopback(this: *This, enabled: bool) c_int { + pub fn setMulticastLoopback(this: *udp.Socket, enabled: bool) c_int { return us_udp_socket_set_multicast_loopback(this, @intCast(@intFromBool(enabled))); } - pub fn setMulticastInterface(this: *This, iface: *const std.posix.sockaddr.storage) c_int { + pub fn setMulticastInterface(this: *udp.Socket, iface: *const std.posix.sockaddr.storage) c_int { return us_udp_socket_set_multicast_interface(this, iface); } - pub fn setMembership(this: *This, address: *const std.posix.sockaddr.storage, iface: ?*const std.posix.sockaddr.storage, drop: bool) c_int { + pub fn setMembership(this: *udp.Socket, address: *const std.posix.sockaddr.storage, iface: ?*const std.posix.sockaddr.storage, drop: bool) c_int { return us_udp_socket_set_membership(this, address, iface, @intFromBool(drop)); } - pub fn setSourceSpecificMembership(this: *This, source: *const std.posix.sockaddr.storage, group: *const std.posix.sockaddr.storage, iface: ?*const std.posix.sockaddr.storage, drop: bool) c_int { + pub fn setSourceSpecificMembership(this: *udp.Socket, source: *const std.posix.sockaddr.storage, group: *const std.posix.sockaddr.storage, iface: ?*const std.posix.sockaddr.storage, drop: bool) c_int { return us_udp_socket_set_source_specific_membership(this, source, group, iface, @intFromBool(drop)); } }; diff --git a/src/env.zig b/src/env.zig index 482d7d5058..3c596a5aad 100644 --- a/src/env.zig +++ b/src/env.zig @@ -4,7 +4,7 @@ const bun = @import("root").bun; pub const BuildTarget = enum { native, wasm, wasi }; pub const build_target: BuildTarget = brk: { - if (@import("builtin").target.isWasm()) { + if (@import("builtin").cpu.arch.isWasm()) { break :brk BuildTarget.wasm; } else { break :brk BuildTarget.native; diff --git a/src/feature_flags.zig b/src/feature_flags.zig index 7a5c7605cc..edeb2c2db8 100644 --- a/src/feature_flags.zig +++ b/src/feature_flags.zig @@ -110,8 +110,6 @@ pub const unwrap_commonjs_to_esm = true; /// https://github.com/source-map/source-map-rfc/pull/20 pub const source_map_debug_id = true; -pub const alignment_tweak = false; - pub const export_star_redirect = false; pub const streaming_file_uploads_for_http_client = true; diff --git a/src/glob/GlobWalker.zig b/src/glob/GlobWalker.zig index 9cf60d5269..e30476f086 100644 --- a/src/glob/GlobWalker.zig +++ b/src/glob/GlobWalker.zig @@ -549,7 +549,7 @@ pub fn GlobWalker_( else => {}, } - while (this.walker.workbuf.popOrNull()) |work_item| { + while (this.walker.workbuf.pop()) |work_item| { if (work_item.fd) |fd| { this.closeDisallowingCwd(fd); } @@ -715,7 +715,7 @@ pub fn GlobWalker_( .get_next => { // Done if (this.walker.workbuf.items.len == 0) return .{ .result = null }; - const work_item = this.walker.workbuf.pop(); + const work_item = this.walker.workbuf.pop().?; switch (work_item.kind) { .directory => { switch (try this.transitionToDirIterState(work_item, false)) { @@ -753,9 +753,9 @@ pub fn GlobWalker_( // So for case A, we just need to check if the pattern is the last pattern. if (is_last or (pattern.syntax_hint == .Double and - component_idx + 1 == this.walker.patternComponents.items.len -| 1 and - next_pattern.?.syntax_hint != .Double and - this.walker.matchPatternImpl(next_pattern.?, entry_name))) + component_idx + 1 == this.walker.patternComponents.items.len -| 1 and + next_pattern.?.syntax_hint != .Double and + this.walker.matchPatternImpl(next_pattern.?, entry_name))) { return .{ .result = try this.walker.prepareMatchedPathSymlink(symlink_full_path_z) orelse continue }; } diff --git a/src/heap_breakdown.zig b/src/heap_breakdown.zig index 692018d1f1..5a225871d0 100644 --- a/src/heap_breakdown.zig +++ b/src/heap_breakdown.zig @@ -50,10 +50,10 @@ pub const Zone = opaque { return zone; } - fn alignedAlloc(zone: *Zone, len: usize, alignment: usize) ?[*]u8 { + fn alignedAlloc(zone: *Zone, len: usize, alignment: std.mem.Alignment) ?[*]u8 { // The posix_memalign only accepts alignment values that are a // multiple of the pointer size - const eff_alignment = @max(alignment, @sizeOf(usize)); + const eff_alignment = @max(alignment.toByteUnits(), @sizeOf(usize)); const ptr = malloc_zone_memalign(zone, eff_alignment, len); return @as(?[*]u8, @ptrCast(ptr)); } @@ -62,12 +62,11 @@ pub const Zone = opaque { return std.c.malloc_size(ptr); } - fn rawAlloc(zone: *anyopaque, len: usize, log2_align: u8, _: usize) ?[*]u8 { - const alignment = @as(usize, 1) << @intCast(log2_align); + fn rawAlloc(zone: *anyopaque, len: usize, alignment: std.mem.Alignment, _: usize) ?[*]u8 { return alignedAlloc(@ptrCast(zone), len, alignment); } - fn resize(_: *anyopaque, buf: []u8, _: u8, new_len: usize, _: usize) bool { + fn resize(_: *anyopaque, buf: []u8, _: std.mem.Alignment, new_len: usize, _: usize) bool { if (new_len <= buf.len) { return true; } @@ -80,13 +79,14 @@ pub const Zone = opaque { return false; } - fn rawFree(zone: *anyopaque, buf: []u8, _: u8, _: usize) void { + fn rawFree(zone: *anyopaque, buf: []u8, _: std.mem.Alignment, _: usize) void { malloc_zone_free(@ptrCast(zone), @ptrCast(buf.ptr)); } pub const vtable = std.mem.Allocator.VTable{ .alloc = &rawAlloc, .resize = &resize, + .remap = &std.mem.Allocator.noRemap, .free = &rawFree, }; @@ -99,10 +99,9 @@ pub const Zone = opaque { /// Create a single-item pointer with initialized data. pub inline fn create(zone: *Zone, comptime T: type, data: T) *T { - const align_of_t: usize = @alignOf(T); - const log2_align_of_t = @ctz(align_of_t); + const alignment: std.mem.Alignment = .fromByteUnits(@alignOf(T)); const ptr: *T = @alignCast(@ptrCast( - rawAlloc(zone, @sizeOf(T), log2_align_of_t, @returnAddress()) orelse bun.outOfMemory(), + rawAlloc(zone, @sizeOf(T), alignment, @returnAddress()) orelse bun.outOfMemory(), )); ptr.* = data; return ptr; diff --git a/src/http.zig b/src/http.zig index f76db2f8ee..01a39cb813 100644 --- a/src/http.zig +++ b/src/http.zig @@ -1394,7 +1394,7 @@ pub const HTTPThread = struct { this.queued_writes.clearRetainingCapacity(); } - while (this.queued_proxy_deref.popOrNull()) |http| { + while (this.queued_proxy_deref.pop()) |http| { http.deref(); } @@ -3078,6 +3078,9 @@ pub fn start(this: *HTTPClient, body: HTTPRequestBody, body_out_str: *MutableStr fn start_(this: *HTTPClient, comptime is_ssl: bool) void { if (comptime Environment.allow_assert) { + // Comparing `ptr` is safe here because it is only done if the vtable pointers are equal, + // which means they are both mimalloc arenas and therefore have non-undefined context + // pointers. if (this.allocator.vtable == default_allocator.vtable and this.allocator.ptr != default_allocator.ptr) { @panic("HTTPClient used with threadlocal allocator belonging to another thread. This will cause crashes."); } diff --git a/src/install/install.zig b/src/install/install.zig index 1f34a62eaf..b4ce83fff6 100644 --- a/src/install/install.zig +++ b/src/install/install.zig @@ -1173,15 +1173,15 @@ pub fn NewPackageInstall(comptime kind: PkgInstallKind) type { ) bool { const verified = switch (resolution.tag) { - .git => this.verifyGitResolution(&resolution.value.git, root_node_modules_dir), - .github => this.verifyGitResolution(&resolution.value.github, root_node_modules_dir), - .root => this.verifyTransitiveSymlinkedFolder(root_node_modules_dir), - .folder => if (this.lockfile.isWorkspaceTreeId(this.node_modules.tree_id)) - this.verifyPackageJSONNameAndVersion(root_node_modules_dir, resolution.tag) - else - this.verifyTransitiveSymlinkedFolder(root_node_modules_dir), - else => this.verifyPackageJSONNameAndVersion(root_node_modules_dir, resolution.tag), - }; + .git => this.verifyGitResolution(&resolution.value.git, root_node_modules_dir), + .github => this.verifyGitResolution(&resolution.value.github, root_node_modules_dir), + .root => this.verifyTransitiveSymlinkedFolder(root_node_modules_dir), + .folder => if (this.lockfile.isWorkspaceTreeId(this.node_modules.tree_id)) + this.verifyPackageJSONNameAndVersion(root_node_modules_dir, resolution.tag) + else + this.verifyTransitiveSymlinkedFolder(root_node_modules_dir), + else => this.verifyPackageJSONNameAndVersion(root_node_modules_dir, resolution.tag), + }; if (comptime kind == .patch) return verified; if (this.patch.isNull()) return verified; if (!verified) return false; @@ -4321,7 +4321,7 @@ pub const PackageManager = struct { this.lockfile.isRootDependency(this, dependency_id) and // no need to do a look up if update requests are empty (`bun update` with no args) (this.update_requests.len == 0 or - this.updating_packages.contains(dependency.name.slice(this.lockfile.buffers.string_bytes.items))); + this.updating_packages.contains(dependency.name.slice(this.lockfile.buffers.string_bytes.items))); // Was this package already allocated? Let's reuse the existing one. if (this.lockfile.getPackageID( @@ -5120,7 +5120,7 @@ pub const PackageManager = struct { var file = tmpfile.file(); const file_writer = file.writer(); - var buffered_writer = std.io.BufferedWriter(std.mem.page_size, @TypeOf(file_writer)){ + var buffered_writer = std.io.BufferedWriter(std.heap.page_size_min, @TypeOf(file_writer)){ .unbuffered_writer = file_writer, }; const writer = buffered_writer.writer(); @@ -7471,7 +7471,7 @@ pub const PackageManager = struct { if (env.get(registry_key)) |registry_| { if (registry_.len > 0 and (strings.startsWith(registry_, "https://") or - strings.startsWith(registry_, "http://"))) + strings.startsWith(registry_, "http://"))) { const prev_scope = this.scope; var api_registry = std.mem.zeroes(Api.NpmRegistry); @@ -14555,12 +14555,12 @@ pub const PackageManager = struct { manager.options.enable.force_save_lockfile = manager.options.enable.force_save_lockfile or (load_result == .ok and - // if migrated always save a new lockfile - (load_result.ok.was_migrated or + // if migrated always save a new lockfile + (load_result.ok.was_migrated or - // if loaded from binary and save-text-lockfile is passed - (load_result.ok.format == .binary and - manager.options.save_text_lockfile orelse false))); + // if loaded from binary and save-text-lockfile is passed + (load_result.ok.format == .binary and + manager.options.save_text_lockfile orelse false))); // this defaults to false // but we force allowing updates to the lockfile when you do bun add @@ -15255,29 +15255,29 @@ pub const PackageManager = struct { // If the lockfile was frozen, we already checked it !manager.options.enable.frozen_lockfile and if (load_result.loadedFromTextLockfile()) - !try manager.lockfile.eql(lockfile_before_clean, packages_len_before_install, manager.allocator) - else - try manager.lockfile.hasMetaHashChanged( - PackageManager.verbose_install or manager.options.do.print_meta_hash_string, - @min(packages_len_before_install, manager.lockfile.packages.len), - ); + !try manager.lockfile.eql(lockfile_before_clean, packages_len_before_install, manager.allocator) + else + try manager.lockfile.hasMetaHashChanged( + PackageManager.verbose_install or manager.options.do.print_meta_hash_string, + @min(packages_len_before_install, manager.lockfile.packages.len), + ); // It's unnecessary work to re-save the lockfile if there are no changes const should_save_lockfile = (load_result == .ok and ((load_result.ok.format == .binary and save_format == .text) or - // make sure old versions are updated - load_result.ok.format == .text and save_format == .text and manager.lockfile.text_lockfile_version != TextLockfile.Version.current)) or + // make sure old versions are updated + load_result.ok.format == .text and save_format == .text and manager.lockfile.text_lockfile_version != TextLockfile.Version.current)) or // check `save_lockfile` after checking if loaded from binary and save format is text // because `save_lockfile` is set to false for `--frozen-lockfile` (manager.options.do.save_lockfile and - (did_meta_hash_change or - had_any_diffs or - manager.update_requests.len > 0 or - (load_result == .ok and load_result.ok.serializer_result.packages_need_update) or - manager.lockfile.isEmpty() or - manager.options.enable.force_save_lockfile)); + (did_meta_hash_change or + had_any_diffs or + manager.update_requests.len > 0 or + (load_result == .ok and load_result.ok.serializer_result.packages_need_update) or + manager.lockfile.isEmpty() or + manager.options.enable.force_save_lockfile)); if (should_save_lockfile) { try manager.saveLockfile(&load_result, save_format, had_any_diffs, lockfile_before_install, packages_len_before_install, log_level); diff --git a/src/install/lockfile.zig b/src/install/lockfile.zig index 8cfbc1df16..b344c12518 100644 --- a/src/install/lockfile.zig +++ b/src/install/lockfile.zig @@ -1578,7 +1578,7 @@ const Cloner = struct { pub fn flush(this: *Cloner) anyerror!void { const max_package_id = this.old.packages.len; - while (this.clone_queue.popOrNull()) |to_clone| { + while (this.clone_queue.pop()) |to_clone| { const mapping = this.mapping[to_clone.old_resolution]; if (mapping < max_package_id) { this.lockfile.buffers.resolutions.items[to_clone.resolve_id] = mapping; @@ -5510,7 +5510,7 @@ pub const Package = extern struct { } log.addErrorFmt(&source, dependencies_q.loc, allocator, - // TODO: what if we could comptime call the syntax highlighter + // TODO: what if we could comptime call the syntax highlighter \\Workspaces expects an array of strings, e.g. \\ "workspaces": [ \\ "path/to/package" @@ -5522,7 +5522,7 @@ pub const Package = extern struct { const key = item.key.?.asString(allocator).?; const value = item.value.?.asString(allocator) orelse { log.addErrorFmt(&source, item.value.?.loc, allocator, - // TODO: what if we could comptime call the syntax highlighter + // TODO: what if we could comptime call the syntax highlighter \\{0s} expects a map of specifiers, e.g. \\ "{0s}": {{ \\ "bun": "latest" @@ -5545,7 +5545,7 @@ pub const Package = extern struct { else => { if (group.behavior.isWorkspace()) { log.addErrorFmt(&source, dependencies_q.loc, allocator, - // TODO: what if we could comptime call the syntax highlighter + // TODO: what if we could comptime call the syntax highlighter \\Workspaces expects an array of strings, e.g. \\ "workspaces": [ \\ "path/to/package" @@ -6980,10 +6980,10 @@ pub const Serializer = struct { if (next_num == has_patched_dependencies_tag) { var patched_dependencies_name_and_version_hashes = try Lockfile.Buffers.readArray( - stream, - allocator, - std.ArrayListUnmanaged(PackageNameAndVersionHash), - ); + stream, + allocator, + std.ArrayListUnmanaged(PackageNameAndVersionHash), + ); defer patched_dependencies_name_and_version_hashes.deinit(allocator); var map = lockfile.patched_dependencies; diff --git a/src/io/PipeWriter.zig b/src/io/PipeWriter.zig index 2cbad444c4..0d81d90468 100644 --- a/src/io/PipeWriter.zig +++ b/src/io/PipeWriter.zig @@ -396,7 +396,7 @@ pub fn PosixStreamingWriter( } // TODO: configurable? - const chunk_size: usize = std.mem.page_size; + const chunk_size: usize = std.heap.page_size_min; pub fn memoryCost(this: *const @This()) usize { return @sizeOf(@This()) + this.outgoing.memoryCost(); @@ -1064,10 +1064,10 @@ pub const StreamBuffer = struct { } pub fn maybeShrink(this: *StreamBuffer) void { - if (this.list.capacity > std.mem.page_size) { + if (this.list.capacity > std.heap.pageSize()) { // workaround insane zig decision to make it undefined behavior to resize .len < .capacity this.list.expandToCapacity(); - this.list.shrinkAndFree(std.mem.page_size); + this.list.shrinkAndFree(std.heap.pageSize()); } } diff --git a/src/io/io.zig b/src/io/io.zig index 2353a33b26..dc46c10658 100644 --- a/src/io/io.zig +++ b/src/io/io.zig @@ -306,7 +306,8 @@ pub const Loop = struct { timespec.sec = @intCast(sec); timespec.nsec = @intCast(nsec); } else { - std.posix.clock_gettime(std.posix.CLOCK.MONOTONIC, timespec) catch {}; + const updated = std.posix.clock_gettime(std.posix.CLOCK.MONOTONIC) catch return; + timespec.* = updated; } } }; diff --git a/src/io/time.zig b/src/io/time.zig index ce30e31896..4d58360f01 100644 --- a/src/io/time.zig +++ b/src/io/time.zig @@ -40,7 +40,7 @@ pub const Time = struct { // For more detail and why CLOCK_MONOTONIC_RAW is even worse than CLOCK_MONOTONIC, // see https://github.com/ziglang/zig/pull/933#discussion_r656021295. var ts: std.posix.timespec = undefined; - std.posix.clock_gettime(std.posix.CLOCK_BOOTTIME, &ts) catch @panic("CLOCK_BOOTTIME required"); + std.posix.clock_gettime(std.posix.CLOCK.BOOTTIME, &ts) catch @panic("CLOCK_BOOTTIME required"); break :blk @as(u64, @intCast(ts.tv_sec)) * std.time.ns_per_s + @as(u64, @intCast(ts.tv_nsec)); }; @@ -57,7 +57,7 @@ pub const Time = struct { // https://opensource.apple.com/source/Libc/Libc-1158.1.2/gen/clock_gettime.3.auto.html var ts: std.posix.timespec = undefined; - std.posix.clock_gettime(std.posix.CLOCK_REALTIME, &ts) catch unreachable; + std.posix.clock_gettime(std.posix.CLOCK.REALTIME, &ts) catch unreachable; return @as(i64, ts.tv_sec) * std.time.ns_per_s + ts.tv_nsec; } diff --git a/src/js_ast.zig b/src/js_ast.zig index 02d40cc204..59aab796e1 100644 --- a/src/js_ast.zig +++ b/src/js_ast.zig @@ -7833,7 +7833,7 @@ pub const Scope = struct { if (Symbol.isKindHoistedOrFunction(new) and Symbol.isKindHoistedOrFunction(existing) and (scope.kind == .entry or scope.kind == .function_body or scope.kind == .function_args or - (new == existing and Symbol.isKindHoisted(existing)))) + (new == existing and Symbol.isKindHoisted(existing)))) { return .replace_with_new; } @@ -8554,7 +8554,7 @@ pub const Macro = struct { }; pub const ASTMemoryAllocator = struct { - const SFA = std.heap.StackFallbackAllocator(@min(8192, std.mem.page_size)); + const SFA = std.heap.StackFallbackAllocator(@min(8192, std.heap.page_size_min)); stack_allocator: SFA = undefined, bump_allocator: std.mem.Allocator = undefined, diff --git a/src/js_parser.zig b/src/js_parser.zig index f2bfc9ba87..081e6ae040 100644 --- a/src/js_parser.zig +++ b/src/js_parser.zig @@ -3029,8 +3029,8 @@ pub const Parser = struct { // - require("foo") import_record.is_unused = import_record.is_unused or (import_record.kind == .stmt and - !import_record.was_originally_bare_import and - !import_record.calls_runtime_re_export_fn); + !import_record.was_originally_bare_import and + !import_record.calls_runtime_re_export_fn); } var iter = scan_pass.used_symbols.iterator(); @@ -5224,7 +5224,7 @@ fn NewParser_( // we must also unwrap requires into imports. const should_unwrap_require = p.options.features.unwrap_commonjs_to_esm and (p.unwrap_all_requires or - if (path.packageName()) |pkg| p.options.features.shouldUnwrapRequire(pkg) else false) and + if (path.packageName()) |pkg| p.options.features.shouldUnwrapRequire(pkg) else false) and // We cannot unwrap a require wrapped in a try/catch because // import statements cannot be wrapped in a try/catch and // require cannot return a promise. @@ -6911,14 +6911,14 @@ fn NewParser_( var notes = allocator.alloc(logger.Data, 1) catch unreachable; notes[0] = logger.rangeData( - p.source, - r, - std.fmt.allocPrint( - allocator, - "{s} was originally declared here", - .{name}, - ) catch unreachable, - ); + p.source, + r, + std.fmt.allocPrint( + allocator, + "{s} was originally declared here", + .{name}, + ) catch unreachable, + ); p.log.addRangeErrorFmtWithNotes(p.source, js_lexer.rangeOfIdentifier(p.source, member_in_scope.loc), allocator, notes, "{s} has already been declared", .{name}) catch unreachable; } else if (_scope == scope.parent) { @@ -7765,7 +7765,7 @@ fn NewParser_( p.panic("Internal error", .{}); } - _ = children.popOrNull(); + _ = children.pop(); } fn parseFn(p: *P, name: ?js_ast.LocRef, opts: FnOrArrowDataParse) anyerror!G.Fn { @@ -12166,11 +12166,11 @@ fn NewParser_( } if (!p.lexer.has_newline_before and ( - // Import Assertions are deprecated. - // Import Attributes are the new way to do this. - // But some code may still use "assert" - // We support both and treat them identically. - // Once Prettier & TypeScript support import attributes, we will add runtime support + // Import Assertions are deprecated. + // Import Attributes are the new way to do this. + // But some code may still use "assert" + // We support both and treat them identically. + // Once Prettier & TypeScript support import attributes, we will add runtime support p.lexer.isContextualKeyword("assert") or p.lexer.token == .t_with)) { try p.lexer.next(); @@ -13357,7 +13357,7 @@ fn NewParser_( if ((p.fn_or_arrow_data_parse.allow_await != .allow_ident and strings.eqlComptime(name, "await")) or (p.fn_or_arrow_data_parse.allow_yield != .allow_ident and - strings.eqlComptime(name, "yield"))) + strings.eqlComptime(name, "yield"))) { if (strings.eqlComptime(name, "await")) { p.log.addRangeError(p.source, name_range, "Cannot use \"await\" here") catch unreachable; @@ -16596,7 +16596,7 @@ fn NewParser_( // Process all binary operations from the deepest-visited node back toward // our original top-level binary operation. while (p.binary_expression_stack.items.len > stack_bottom) { - v = p.binary_expression_stack.pop(); + v = p.binary_expression_stack.pop().?; v.e.left = current; current = v.visitRightAndFinish(p); } @@ -17131,10 +17131,11 @@ fn NewParser_( in.assign_target == .none and key.data.isStringValue() and strings.eqlComptime( - // __proto__ is utf8, assume it lives in refs - key.data.e_string.slice(p.allocator), - "__proto__", - )) { + // __proto__ is utf8, assume it lives in refs + key.data.e_string.slice(p.allocator), + "__proto__", + )) + { if (has_proto) { const r = js_lexer.rangeOfIdentifier(p.source, key.loc); p.log.addRangeError(p.source, r, "Cannot specify the \"__proto__\" property more than once per object") catch unreachable; @@ -17250,9 +17251,10 @@ fn NewParser_( if (e_.optional_chain == null and target_was_identifier_before_visit and strings.eqlComptime( - p.symbols.items[e_.target.data.e_identifier.ref.inner_index].original_name, - "eval", - )) { + p.symbols.items[e_.target.data.e_identifier.ref.inner_index].original_name, + "eval", + )) + { e_.is_direct_eval = true; // Pessimistically assume that if this looks like a CommonJS module @@ -17910,18 +17912,18 @@ fn NewParser_( .e_if => |ex| { return p.exprCanBeRemovedIfUnusedWithoutDCECheck(&ex.test_) and (p.isSideEffectFreeUnboundIdentifierRef( - ex.yes, - ex.test_, - true, - ) or - p.exprCanBeRemovedIfUnusedWithoutDCECheck(&ex.yes)) and + ex.yes, + ex.test_, + true, + ) or + p.exprCanBeRemovedIfUnusedWithoutDCECheck(&ex.yes)) and (p.isSideEffectFreeUnboundIdentifierRef( - ex.no, - ex.test_, - false, - ) or p.exprCanBeRemovedIfUnusedWithoutDCECheck( - &ex.no, - )); + ex.no, + ex.test_, + false, + ) or p.exprCanBeRemovedIfUnusedWithoutDCECheck( + &ex.no, + )); }, .e_array => |ex| { for (ex.items.slice()) |*item| { @@ -18533,13 +18535,14 @@ fn NewParser_( // just not module.exports = { bar: function() {} } // just not module.exports = { bar() {} } switch (prop.value.?.data) { - .e_commonjs_export_identifier, .e_import_identifier, .e_identifier => false, - .e_call => |call| switch (call.target.data) { .e_commonjs_export_identifier, .e_import_identifier, .e_identifier => false, - else => |call_target| !@as(Expr.Tag, call_target).isPrimitiveLiteral(), - }, - else => !prop.value.?.isPrimitiveLiteral(), - }) { + .e_call => |call| switch (call.target.data) { + .e_commonjs_export_identifier, .e_import_identifier, .e_identifier => false, + else => |call_target| !@as(Expr.Tag, call_target).isPrimitiveLiteral(), + }, + else => !prop.value.?.isPrimitiveLiteral(), + }) + { p.deoptimizeCommonJSNamedExports(); return null; } @@ -23032,7 +23035,7 @@ fn NewParser_( if (stmt.data == .s_local and // Need to re-check lower_using for the k_using case in case lower_await is true ((stmt.data.s_local.kind == .k_using and p.options.features.lower_using) or - (stmt.data.s_local.kind == .k_await_using))) + (stmt.data.s_local.kind == .k_await_using))) { return true; } @@ -23728,8 +23731,8 @@ fn NewParser_( const preserve_strict_mode = p.module_scope.strict_mode == .explicit_strict_mode and !(parts.items.len > 0 and - parts.items[0].stmts.len > 0 and - parts.items[0].stmts[0].data == .s_directive); + parts.items[0].stmts.len > 0 and + parts.items[0].stmts[0].data == .s_directive); total_stmts_count += @as(usize, @intCast(@intFromBool(preserve_strict_mode))); @@ -24279,9 +24282,9 @@ const ReactRefresh = struct { return id.len >= 4 and strings.hasPrefixComptime(id, "use") and switch (id[3]) { - 'A'...'Z' => true, - else => false, - }; + 'A'...'Z' => true, + else => false, + }; } pub const built_in_hooks = bun.ComptimeEnumMap(enum { diff --git a/src/js_printer.zig b/src/js_printer.zig index 4e5fcbd960..6e0d531fc3 100644 --- a/src/js_printer.zig +++ b/src/js_printer.zig @@ -3129,7 +3129,7 @@ fn NewPrinter( // Process all binary operations from the deepest-visited node back toward // our original top-level binary operation while (p.binary_expression_stack.items.len > stack_bottom) { - var last = p.binary_expression_stack.pop(); + var last = p.binary_expression_stack.pop().?; last.visitRightAndFinish(p); } }, @@ -5262,9 +5262,9 @@ fn NewPrinter( // This seems silly to cache but the .items() function apparently costs 1ms according to Instruments. printer.source_map_builder.line_offset_table_byte_offset_list = printer - .source_map_builder - .line_offset_tables - .items(.byte_offset_to_start_of_line); + .source_map_builder + .line_offset_tables + .items(.byte_offset_to_start_of_line); } return printer; diff --git a/src/libarchive/libarchive.zig b/src/libarchive/libarchive.zig index f19bcd5705..d64b2284fc 100644 --- a/src/libarchive/libarchive.zig +++ b/src/libarchive/libarchive.zig @@ -437,7 +437,7 @@ pub const Archiver = struct { if (comptime Environment.isWindows) { try bun.MakePath.makePath(u16, dir, path); } else { - std.posix.mkdiratZ(dir_fd, pathname, @as(u32, @intCast(mode))) catch |err| { + std.posix.mkdiratZ(dir_fd, pathname, @intCast(mode)) catch |err| { // It's possible for some tarballs to return a directory twice, with and // without `./` in the beginning. So if it already exists, continue to the // next entry. diff --git a/src/linear_fifo.zig b/src/linear_fifo.zig index 62f8f24e03..00ceeeb5b5 100644 --- a/src/linear_fifo.zig +++ b/src/linear_fifo.zig @@ -97,7 +97,7 @@ pub fn LinearFifo( bun.copy(T, self.buf[0..self.count], self.buf[self.head..][0..self.count]); self.head = 0; } else { - var tmp: [mem.page_size / 2 / @sizeOf(T)]T = undefined; + var tmp: [std.heap.page_size_min / 2 / @sizeOf(T)]T = undefined; while (self.head != 0) { const n = @min(self.head, tmp.len); diff --git a/src/multi_array_list.zig b/src/multi_array_list.zig index 087b8a4274..c6bd265ea7 100644 --- a/src/multi_array_list.zig +++ b/src/multi_array_list.zig @@ -264,23 +264,15 @@ pub fn MultiArrayList(comptime T: type) type { return index; } - /// Remove and return the last element from the list. - /// Asserts the list has at least one item. + /// Remove and return the last element from the list, or return null if list is empty. /// Invalidates pointers to fields of the removed element. - pub fn pop(self: *Self) T { + pub fn pop(self: *Self) ?T { + if (self.len == 0) return null; const val = self.get(self.len - 1); self.len -= 1; return val; } - /// Remove and return the last element from the list, or - /// return `null` if list is empty. - /// Invalidates pointers to fields of the removed element, if any. - pub fn popOrNull(self: *Self) ?T { - if (self.len == 0) return null; - return self.pop(); - } - /// Inserts an item into an ordered list. Shifts all elements /// after and including the specified index back by one and /// sets the given index to the specified element. May reallocate diff --git a/src/patch.zig b/src/patch.zig index cd531c5caa..f00e929736 100644 --- a/src/patch.zig +++ b/src/patch.zig @@ -1381,26 +1381,27 @@ pub fn gitDiffInternal( try map.put("USERPROFILE", ""); child_proc.env_map = ↦ - var stdout = std.ArrayList(u8).init(allocator); - var stderr = std.ArrayList(u8).init(allocator); + var stdout: std.ArrayListUnmanaged(u8) = .empty; + var stderr: std.ArrayListUnmanaged(u8) = .empty; var deinit_stdout = true; var deinit_stderr = true; defer { - if (deinit_stdout) stdout.deinit(); - if (deinit_stderr) stderr.deinit(); + if (deinit_stdout) stdout.deinit(allocator); + if (deinit_stderr) stderr.deinit(allocator); } try child_proc.spawn(); - try child_proc.collectOutput(&stdout, &stderr, 1024 * 1024 * 4); + try child_proc.collectOutput(allocator, &stdout, &stderr, 1024 * 1024 * 4); _ = try child_proc.wait(); if (stderr.items.len > 0) { deinit_stderr = false; - return .{ .err = stderr }; + return .{ .err = stderr.toManaged(allocator) }; } debug("Before postprocess: {s}\n", .{stdout.items}); - try gitDiffPostprocess(&stdout, old_folder, new_folder); + var stdout_managed = stdout.toManaged(allocator); + try gitDiffPostprocess(&stdout_managed, old_folder, new_folder); deinit_stdout = false; - return .{ .result = stdout }; + return .{ .result = stdout_managed }; } /// Now we need to do the equivalent of these regex subtitutions. @@ -1535,9 +1536,9 @@ fn gitDiffPostprocess(stdout: *std.ArrayList(u8), old_folder: []const u8, new_fo fn shouldSkipLine(line: []const u8) bool { return line.len == 0 or (switch (line[0]) { - ' ', '-', '+' => true, - else => false, - } and - // line like: "--- a/numbers.txt" or "+++ b/numbers.txt" we should not skip - (!(line.len >= 4 and (std.mem.eql(u8, line[0..4], "--- ") or std.mem.eql(u8, line[0..4], "+++ "))))); + ' ', '-', '+' => true, + else => false, + } and + // line like: "--- a/numbers.txt" or "+++ b/numbers.txt" we should not skip + (!(line.len >= 4 and (std.mem.eql(u8, line[0..4], "--- ") or std.mem.eql(u8, line[0..4], "+++ "))))); } diff --git a/src/ptr/CowSlice.zig b/src/ptr/CowSlice.zig index 224f6b5a12..e3493d7a65 100644 --- a/src/ptr/CowSlice.zig +++ b/src/ptr/CowSlice.zig @@ -190,7 +190,11 @@ pub fn CowSliceZ(T: type, comptime sentinel: ?T) type { if (comptime cow_str_assertions) if (str.debug) |debug| { debug.mutex.lock(); bun.assertf( - debug.allocator.ptr == allocator.ptr and debug.allocator.vtable == allocator.vtable, + // We cannot compare `ptr` here, because allocator implementations with no + // associated data set the context pointer to `undefined`, therefore comparing + // `ptr` may be undefined behavior. See https://github.com/ziglang/zig/pull/22691 + // and https://github.com/ziglang/zig/issues/23068. + debug.allocator.vtable == allocator.vtable, "CowSlice.deinit called with a different allocator than the one used to create it", .{}, ); diff --git a/src/resolver/resolver.zig b/src/resolver/resolver.zig index 4ca990006d..1b0e9df7cf 100644 --- a/src/resolver/resolver.zig +++ b/src/resolver/resolver.zig @@ -733,17 +733,17 @@ pub const Resolver = struct { // while these rules should not be applied to the entrypoint as it is never external (#12734) if (kind != .entry_point_build and kind != .entry_point_run and (r.isExternalPattern(import_path) or - // "fill: url(#filter);" - (kind.isFromCSS() and strings.startsWith(import_path, "#")) or + // "fill: url(#filter);" + (kind.isFromCSS() and strings.startsWith(import_path, "#")) or - // "background: url(http://example.com/images/image.png);" - strings.startsWith(import_path, "http://") or + // "background: url(http://example.com/images/image.png);" + strings.startsWith(import_path, "http://") or - // "background: url(https://example.com/images/image.png);" - strings.startsWith(import_path, "https://") or + // "background: url(https://example.com/images/image.png);" + strings.startsWith(import_path, "https://") or - // "background: url(//example.com/images/image.png);" - strings.startsWith(import_path, "//"))) + // "background: url(//example.com/images/image.png);" + strings.startsWith(import_path, "//"))) { if (r.debug_logs) |*debug| { debug.addNote("Marking this path as implicitly external"); @@ -1122,9 +1122,9 @@ pub const Resolver = struct { const platform = bun.path.Platform.auto; const ends_with_dir = platform.isSeparator(import_path[import_path.len - 1]) or (import_path.len > 3 and - platform.isSeparator(import_path[import_path.len - 3]) and - import_path[import_path.len - 2] == '.' and - import_path[import_path.len - 1] == '.'); + platform.isSeparator(import_path[import_path.len - 3]) and + import_path[import_path.len - 2] == '.' and + import_path[import_path.len - 1] == '.'); const buf = bufs(.relative_abs_path); import_path = r.fs.absBuf(&.{import_path}, buf); if (ends_with_dir) { @@ -1324,7 +1324,7 @@ pub const Resolver = struct { // Always mark "fs" as disabled, matching Webpack v4 behavior if (strings.hasPrefixComptime(import_path_without_node_prefix, "fs") and (import_path_without_node_prefix.len == 2 or - import_path_without_node_prefix[2] == '/')) + import_path_without_node_prefix[2] == '/')) { result.path_pair.primary.namespace = "node"; result.path_pair.primary.text = import_path_without_node_prefix; @@ -3027,7 +3027,7 @@ pub const Resolver = struct { if (strings.startsWith(path, prefix) and strings.endsWith(path, suffix) and (prefix.len > longest_match_prefix_length or - (prefix.len == longest_match_prefix_length and suffix.len > longest_match_suffix_length))) + (prefix.len == longest_match_prefix_length and suffix.len > longest_match_suffix_length))) { longest_match_prefix_length = @as(i32, @intCast(prefix.len)); longest_match_suffix_length = @as(i32, @intCast(suffix.len)); @@ -4168,10 +4168,10 @@ pub const Resolver = struct { } } - var merged_config = parent_configs.pop(); + var merged_config = parent_configs.pop().?; // starting from the base config (end of the list) // successively apply the inheritable attributes to the next config - while (parent_configs.popOrNull()) |parent_config| { + while (parent_configs.pop()) |parent_config| { merged_config.emit_decorator_metadata = merged_config.emit_decorator_metadata or parent_config.emit_decorator_metadata; if (parent_config.base_url.len > 0) { merged_config.base_url = parent_config.base_url; diff --git a/src/string/WTFStringImpl.zig b/src/string/WTFStringImpl.zig index c762f39d73..84d54aa669 100644 --- a/src/string/WTFStringImpl.zig +++ b/src/string/WTFStringImpl.zig @@ -232,7 +232,7 @@ pub const WTFStringImplStruct = extern struct { }; pub const StringImplAllocator = struct { - fn alloc(ptr: *anyopaque, len: usize, _: u8, _: usize) ?[*]u8 { + fn alloc(ptr: *anyopaque, len: usize, _: std.mem.Alignment, _: usize) ?[*]u8 { var this = bun.cast(WTFStringImpl, ptr); const len_ = this.byteLength(); @@ -247,14 +247,10 @@ pub const StringImplAllocator = struct { return @constCast(this.m_ptr.latin1); } - fn resize(_: *anyopaque, _: []u8, _: u8, _: usize, _: usize) bool { - return false; - } - pub fn free( ptr: *anyopaque, buf: []u8, - _: u8, + _: std.mem.Alignment, _: usize, ) void { var this = bun.cast(WTFStringImpl, ptr); @@ -265,7 +261,8 @@ pub const StringImplAllocator = struct { pub const VTable = std.mem.Allocator.VTable{ .alloc = &alloc, - .resize = &resize, + .resize = &std.mem.Allocator.noResize, + .remap = &std.mem.Allocator.noRemap, .free = &free, }; diff --git a/src/sys.zig b/src/sys.zig index 6af501973c..b3e31f0ba2 100644 --- a/src/sys.zig +++ b/src/sys.zig @@ -10,6 +10,7 @@ const assertIsValidWindowsPath = bun.strings.assertIsValidWindowsPath; const default_allocator = bun.default_allocator; const kernel32 = bun.windows; const mem = std.mem; +const page_size_min = std.heap.page_size_min; const mode_t = posix.mode_t; const libc = std.posix.system; @@ -2766,26 +2767,27 @@ pub fn getFdPath(fd: bun.FileDescriptor, out_buffer: *[MAX_PATH_BYTES]u8) Maybe( /// * SIGSEGV - Attempted write into a region mapped as read-only. /// * SIGBUS - Attempted access to a portion of the buffer that does not correspond to the file pub fn mmap( - ptr: ?[*]align(mem.page_size) u8, + ptr: ?[*]align(page_size_min) u8, length: usize, prot: u32, flags: std.posix.MAP, fd: bun.FileDescriptor, offset: u64, -) Maybe([]align(mem.page_size) u8) { +) Maybe([]align(page_size_min) u8) { const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned const rc = std.c.mmap(ptr, length, prot, flags, fd.cast(), ioffset); const fail = std.c.MAP_FAILED; if (rc == fail) { - return Maybe([]align(mem.page_size) u8){ - .err = .{ .errno = @as(Syscall.Error.Int, @truncate(@intFromEnum(bun.C.getErrno(@as(i64, @bitCast(@intFromPtr(fail))))))), .syscall = .mmap }, - }; + return .initErr(.{ + .errno = @as(Syscall.Error.Int, @truncate(@intFromEnum(bun.C.getErrno(@as(i64, @bitCast(@intFromPtr(fail))))))), + .syscall = .mmap, + }); } - return Maybe([]align(mem.page_size) u8){ .result = @as([*]align(mem.page_size) u8, @ptrCast(@alignCast(rc)))[0..length] }; + return .initResult(@as([*]align(page_size_min) u8, @ptrCast(@alignCast(rc)))[0..length]); } -pub fn mmapFile(path: [:0]const u8, flags: std.c.MAP, wanted_size: ?usize, offset: usize) Maybe([]align(mem.page_size) u8) { +pub fn mmapFile(path: [:0]const u8, flags: std.c.MAP, wanted_size: ?usize, offset: usize) Maybe([]align(page_size_min) u8) { assertIsValidWindowsPath(u8, path); const fd = switch (open(path, bun.O.RDWR, 0)) { .result => |fd| fd, @@ -2945,7 +2947,7 @@ pub fn socketpair(domain: socketpair_t, socktype: socketpair_t, protocol: socket return Maybe([2]bun.FileDescriptor){ .result = .{ bun.toFD(fds_i[0]), bun.toFD(fds_i[1]) } }; } -pub fn munmap(memory: []align(mem.page_size) const u8) Maybe(void) { +pub fn munmap(memory: []align(page_size_min) const u8) Maybe(void) { if (Maybe(void).errnoSys(syscall.munmap(memory.ptr, memory.len), .munmap)) |err| { return err; } else return Maybe(void).success; @@ -3346,7 +3348,7 @@ pub fn existsAtType(fd: bun.FileDescriptor, subpath: anytype) Maybe(ExistsAtType // from libuv: directories cannot be read-only // https://github.com/libuv/libuv/blob/eb5af8e3c0ea19a6b0196d5db3212dae1785739b/src/win/fs.c#L2144-L2146 (basic_info.FileAttributes & kernel32.FILE_ATTRIBUTE_DIRECTORY == 0 or - basic_info.FileAttributes & kernel32.FILE_ATTRIBUTE_READONLY == 0); + basic_info.FileAttributes & kernel32.FILE_ATTRIBUTE_READONLY == 0); const is_dir = basic_info.FileAttributes != kernel32.INVALID_FILE_ATTRIBUTES and basic_info.FileAttributes & kernel32.FILE_ATTRIBUTE_DIRECTORY != 0 and diff --git a/src/thread_pool.zig b/src/thread_pool.zig index 6cf1a85555..0d432cc336 100644 --- a/src/thread_pool.zig +++ b/src/thread_pool.zig @@ -419,11 +419,11 @@ pub const default_thread_stack_size = brk: { if (!Environment.isMac) break :brk default; - const size = default - (default % std.mem.page_size); + const size = default - (default % std.heap.page_size_max); // stack size must be a multiple of page_size // macOS will fail to spawn a thread if the stack size is not a multiple of page_size - if (size % std.mem.page_size != 0) + if (size % std.heap.page_size_max != 0) @compileError("Thread stack size is not a multiple of page size"); break :brk size; diff --git a/src/walker_skippable.zig b/src/walker_skippable.zig index 434913c9aa..332b6e512d 100644 --- a/src/walker_skippable.zig +++ b/src/walker_skippable.zig @@ -111,7 +111,7 @@ pub fn next(self: *Walker) !?WalkerEntry { .kind = base.kind, }; } else { - var item = self.stack.pop(); + var item = self.stack.pop().?; if (self.stack.items.len != 0) { item.iter.iter.dir.close(); } diff --git a/test/internal/ban-words.test.ts b/test/internal/ban-words.test.ts index 067fe55446..818c81b9e7 100644 --- a/test/internal/ban-words.test.ts +++ b/test/internal/ban-words.test.ts @@ -18,6 +18,10 @@ const words: Record "std.StringHashMap(": { reason: "bun.StringHashMap has a faster `eql`" }, "std.enums.tagName(": { reason: "Use bun.tagName instead", limit: 2 }, "std.unicode": { reason: "Use bun.strings instead", limit: 36 }, + "allocator.ptr ==": { reason: "The std.mem.Allocator context pointer can be undefined, which makes this comparison undefined behavior" }, + "allocator.ptr !=": { reason: "The std.mem.Allocator context pointer can be undefined, which makes this comparison undefined behavior", limit: 1 }, + "== allocator.ptr": { reason: "The std.mem.Allocator context pointer can be undefined, which makes this comparison undefined behavior" }, + "!= allocator.ptr": { reason: "The std.mem.Allocator context pointer can be undefined, which makes this comparison undefined behavior" }, [String.raw`: [a-zA-Z0-9_\.\*\?\[\]\(\)]+ = undefined,`]: { reason: "Do not default a struct field to undefined", limit: 251, regex: true }, }; const words_keys = [...Object.keys(words)]; diff --git a/test/js/bun/patch/patch.test.ts b/test/js/bun/patch/patch.test.ts index 2a0afdbca3..327f7099db 100644 --- a/test/js/bun/patch/patch.test.ts +++ b/test/js/bun/patch/patch.test.ts @@ -50,6 +50,33 @@ const join = ? (...strings: string[]): string => __join(...strings.map(s => s.replaceAll("\\", "/"))).replaceAll("\\", "/") : __join; +// Recurse through a nested object, and return a copy where for any object where the only two +// properties are a numeric `capacity` and an array `items`, the capacity has been deleted. Meant to +// be used on serialized Zig structs that contain ArrayLists, because the allocation strategy of +// ArrayList can change and result in a different `capacity` without changing the interpretation of +// the ArrayList's value. +function removeCapacity(patch: any): any { + if (Array.isArray(patch)) { + return patch.map(removeCapacity); + } else if (patch !== null && typeof patch == "object") { + const keys = Object.keys(patch); + keys.sort(); + if (keys.length == 2 && keys[0] == "capacity" && keys[1] == "items") { + if (typeof patch.capacity == "number" && Array.isArray(patch.items)) { + // this looks like an ArrayList, so delete the capacity because it is unstable + return { items: patch.items.map(removeCapacity) }; + } + } + // ordinary object, so just apply to all the children + const result = {}; + for (const k of keys) { + result[k] = removeCapacity(patch[k]); + } + return result; + } + return patch; +} + describe("apply", () => { test("edgecase", async () => { const newcontents = "module.exports = x => x % 420 === 0;"; @@ -451,7 +478,7 @@ describe("apply", () => { describe("parse", () => { test("works for a simple case", () => { - expect(JSON.parse(parse(patch))).toEqual({ + expect(removeCapacity(JSON.parse(parse(patch)))).toEqual({ "parts": { "items": [ { @@ -465,37 +492,34 @@ describe("parse", () => { "items": [ { "type": "context", - "lines": { "items": ["this", "is", ""], "capacity": 8 }, + "lines": { "items": ["this", "is", ""] }, "no_newline_at_end_of_file": false, }, { "type": "deletion", - "lines": { "items": ["a"], "capacity": 8 }, + "lines": { "items": ["a"] }, "no_newline_at_end_of_file": false, }, { "type": "insertion", - "lines": { "items": [""], "capacity": 8 }, + "lines": { "items": [""] }, "no_newline_at_end_of_file": false, }, { "type": "context", - "lines": { "items": ["file"], "capacity": 8 }, + "lines": { "items": ["file"] }, "no_newline_at_end_of_file": false, }, ], - "capacity": 8, }, }, ], - "capacity": 8, }, "before_hash": "2de83dd", "after_hash": "842652c", }, }, ], - "capacity": 8, }, }); }); @@ -513,7 +537,7 @@ describe("parse", () => { }); test(`can handle files with CRLF line breaks`, () => { - expect(JSON.parse(parse(crlfLineBreaks))).toEqual({ + expect(removeCapacity(JSON.parse(parse(crlfLineBreaks)))).toEqual({ "parts": { "items": [ { @@ -526,24 +550,22 @@ describe("parse", () => { "items": [ { "type": "insertion", - "lines": { "items": ["this is a new file\r"], "capacity": 8 }, + "lines": { "items": ["this is a new file\r"] }, "no_newline_at_end_of_file": false, }, ], - "capacity": 8, }, }, "hash": "3e1267f", }, }, ], - "capacity": 8, }, }); }); test("works", () => { - expect(JSON.parse(parse(modeChangeAndModifyAndRename))).toEqual({ + expect(removeCapacity(JSON.parse(parse(modeChangeAndModifyAndRename)))).toEqual({ "parts": { "items": [ { "file_rename": { "from_path": "numbers.txt", "to_path": "banana.txt" } }, @@ -559,38 +581,35 @@ describe("parse", () => { "items": [ { "type": "deletion", - "lines": { "items": ["one"], "capacity": 8 }, + "lines": { "items": ["one"] }, "no_newline_at_end_of_file": false, }, { "type": "insertion", - "lines": { "items": ["ne"], "capacity": 8 }, + "lines": { "items": ["ne"] }, "no_newline_at_end_of_file": false, }, { "type": "context", - "lines": { "items": ["", "two", ""], "capacity": 8 }, + "lines": { "items": ["", "two", ""] }, "no_newline_at_end_of_file": false, }, ], - "capacity": 8, }, }, ], - "capacity": 8, }, "before_hash": "fbf1785", "after_hash": "92d2c5f", }, }, ], - "capacity": 8, }, }); }); test("parses old-style patches", () => { - expect(JSON.parse(parse(oldStylePatch))).toEqual({ + expect(removeCapacity(JSON.parse(parse(oldStylePatch)))).toEqual({ "parts": { "items": [ { @@ -610,7 +629,6 @@ describe("parse", () => { "function isValidNameError(name, node) {", " !(typeof name === 'string') ? (0, _invariant2.default)(0, 'Expected string') : void 0;", ], - "capacity": 8, }, "no_newline_at_end_of_file": false, }, @@ -622,7 +640,6 @@ describe("parse", () => { " return new _GraphQLError.GraphQLError('Name \"' + name + '\" must not begin with \"__\", which is reserved by ' + 'GraphQL introspection.', node);", " }", ], - "capacity": 8, }, "no_newline_at_end_of_file": false, }, @@ -634,7 +651,6 @@ describe("parse", () => { " // return new _GraphQLError.GraphQLError('Name \"' + name + '\" must not begin with \"__\", which is reserved by ' + 'GraphQL introspection.', node);", " // }", ], - "capacity": 8, }, "no_newline_at_end_of_file": false, }, @@ -646,26 +662,23 @@ describe("parse", () => { " return new _GraphQLError.GraphQLError('Names must match /^[_a-zA-Z][_a-zA-Z0-9]*$/ but \"' + name + '\" does not.', node);", " }", ], - "capacity": 8, }, "no_newline_at_end_of_file": false, }, { "type": "insertion", - "lines": { "items": [""], "capacity": 8 }, + "lines": { "items": [""] }, "no_newline_at_end_of_file": false, }, { "type": "context", - "lines": { "items": ["}"], "capacity": 8 }, + "lines": { "items": ["}"] }, "no_newline_at_end_of_file": true, }, ], - "capacity": 8, }, }, ], - "capacity": 8, }, "before_hash": null, "after_hash": null, @@ -688,7 +701,6 @@ describe("parse", () => { "export function isValidNameError(name, node) {", " !(typeof name === 'string') ? invariant(0, 'Expected string') : void 0;", ], - "capacity": 8, }, "no_newline_at_end_of_file": false, }, @@ -700,7 +712,6 @@ describe("parse", () => { " return new GraphQLError('Name \"' + name + '\" must not begin with \"__\", which is reserved by ' + 'GraphQL introspection.', node);", " }", ], - "capacity": 8, }, "no_newline_at_end_of_file": false, }, @@ -712,7 +723,6 @@ describe("parse", () => { " // return new GraphQLError('Name \"' + name + '\" must not begin with \"__\", which is reserved by ' + 'GraphQL introspection.', node);", " // }", ], - "capacity": 8, }, "no_newline_at_end_of_file": false, }, @@ -724,23 +734,19 @@ describe("parse", () => { " return new GraphQLError('Names must match /^[_a-zA-Z][_a-zA-Z0-9]*$/ but \"' + name + '\" does not.', node);", " }", ], - "capacity": 8, }, "no_newline_at_end_of_file": false, }, ], - "capacity": 8, }, }, ], - "capacity": 8, }, "before_hash": null, "after_hash": null, }, }, ], - "capacity": 8, }, }); }); diff --git a/test/js/bun/util/password.test.ts b/test/js/bun/util/password.test.ts index 0d8fd5cc25..3f73068f88 100644 --- a/test/js/bun/util/password.test.ts +++ b/test/js/bun/util/password.test.ts @@ -183,6 +183,15 @@ test("bcrypt uses the SHA-512 of passwords longer than 72 characters", async () expect(await password.verify(boop2, hashed, "bcrypt")).toBeFalse(); }); +test("bcrypt pre-hashing does not break compatibility across Bun versions", async () => { + // hash generated by Bun 1.2.4 + // if we change the mechanism used to pre-hash long passwords so bcrypt doesn't truncate them, + // then this hash will not be considered valid by later versions of Bun. + const hash = "$2b$10$PsJ3/W82mzNJoP0rSblfvet2ab9jZg2aH7tIxr1B8uFLJwuWk/jTi"; + const secret = "hello".repeat(100); + expect(await password.verify(secret, hash)).toBeTrue(); +}); + const defaultAlgorithm = "argon2id"; const algorithms = [undefined, "argon2id", "bcrypt"]; const argons = ["argon2i", "argon2id", "argon2d"]; @@ -257,7 +266,7 @@ for (let algorithmValue of algorithms) { test(`${a}`, async () => { await runSlowTest(a); await runSlowTestWithOptions(a); - }) + }); } return; }