From edf13bd91d44cf8609d90d83729aaab849670e3c Mon Sep 17 00:00:00 2001 From: "taylor.fish" Date: Tue, 9 Sep 2025 20:41:10 -0700 Subject: [PATCH] Refactor `BabyList` (#22502) (For internal tracking: fixes STAB-1129, STAB-1145, STAB-1146, STAB-1150, STAB-1126, STAB-1147, STAB-1148, STAB-1149, STAB-1158) --------- Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: Jarred Sumner Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com> --- src/HTMLScanner.zig | 4 +- src/allocators/MimallocArena.zig | 9 + src/ast/Ast.zig | 12 +- src/ast/Binding.zig | 11 +- src/ast/ConvertESMExportsForHmr.zig | 20 +- src/ast/E.zig | 26 +- src/ast/Expr.zig | 11 +- src/ast/Macro.zig | 39 +- src/ast/P.zig | 134 +-- src/ast/Parser.zig | 22 +- src/ast/SideEffects.zig | 14 +- src/ast/Symbol.zig | 2 +- src/ast/maybe.zig | 8 +- src/ast/parse.zig | 34 +- src/ast/parseFn.zig | 2 +- src/ast/parseJSXElement.zig | 4 +- src/ast/parsePrefix.zig | 4 +- src/ast/parseProperty.zig | 6 +- src/ast/parseStmt.zig | 28 +- src/ast/parseTypescript.zig | 13 +- src/ast/visit.zig | 19 +- src/ast/visitExpr.zig | 40 +- src/ast/visitStmt.zig | 18 +- src/bun.js/ModuleLoader.zig | 2 +- src/bun.js/api/bun/h2_frame_parser.zig | 8 +- src/bun.js/api/bun/socket.zig | 34 +- src/bun.js/api/html_rewriter.zig | 4 +- src/bun.js/api/server/NodeHTTPResponse.zig | 13 +- src/bun.js/api/server/RequestContext.zig | 13 +- src/bun.js/api/server/ServerConfig.zig | 6 +- src/bun.js/ipc.zig | 6 +- src/bun.js/node/fs_events.zig | 5 +- src/bun.js/node/path_watcher.zig | 19 +- src/bun.js/webcore/ArrayBufferSink.zig | 22 +- src/bun.js/webcore/Body.zig | 6 +- src/bun.js/webcore/ByteBlobLoader.zig | 10 +- src/bun.js/webcore/ByteStream.zig | 17 +- src/bun.js/webcore/FileReader.zig | 167 ++-- src/bun.js/webcore/ResumableSink.zig | 24 +- src/bun.js/webcore/Sink.zig | 28 +- src/bun.js/webcore/fetch.zig | 17 +- src/bun.js/webcore/streams.zig | 54 +- src/bun.zig | 8 +- src/bundler/AstBuilder.zig | 8 +- src/bundler/Chunk.zig | 2 +- src/bundler/LinkerContext.zig | 14 +- src/bundler/LinkerGraph.zig | 39 +- src/bundler/ParseTask.zig | 27 +- src/bundler/ThreadPool.zig | 29 +- src/bundler/bundle_v2.zig | 45 +- src/bundler/linker_context/computeChunks.zig | 2 +- .../computeCrossChunkDependencies.zig | 16 +- .../linker_context/convertStmtsForChunk.zig | 4 +- .../convertStmtsForChunkForDevServer.zig | 4 +- src/bundler/linker_context/doStep5.zig | 23 +- .../findImportedCSSFilesInJSOrder.zig | 2 +- .../findImportedFilesInCSSOrder.zig | 30 +- .../generateCodeForFileInChunkJS.zig | 16 +- .../generateCompileResultForCssChunk.zig | 4 +- .../linker_context/postProcessJSChunk.zig | 2 +- .../linker_context/prepareCssAstsForChunk.zig | 4 +- .../linker_context/scanImportsAndExports.zig | 13 +- src/cli/create_command.zig | 24 +- src/cli/pm_pkg_command.zig | 5 +- src/cli/pm_view_command.zig | 2 +- src/cli/publish_command.zig | 12 +- src/collections.zig | 8 +- src/collections/baby_list.zig | 794 +++++++++++------- .../{BoundedArray.zig => bounded_array.zig} | 0 src/css/css_parser.zig | 22 +- src/css/generics.zig | 2 +- src/css/properties/grid.zig | 1 + src/css/small_list.zig | 13 +- src/deps/uws/WindowsNamedPipe.zig | 4 +- .../PackageManager/PackageJSONEditor.zig | 73 +- .../updatePackageJSONAndInstall.zig | 7 +- src/install/PackageManagerTask.zig | 17 +- src/interchange/json.zig | 17 +- src/interchange/yaml.zig | 14 +- src/io/PipeWriter.zig | 29 +- src/js_printer.zig | 4 +- src/linker.zig | 6 +- src/pool.zig | 17 +- src/ptr/owned.zig | 2 + src/s3/client.zig | 18 +- src/s3/multipart.zig | 12 +- src/safety/CriticalSection.zig | 4 +- src/safety/ThreadLock.zig | 4 +- src/safety/alloc.zig | 51 +- src/shell/Builtin.zig | 10 +- src/shell/IOWriter.zig | 4 +- src/shell/interpreter.zig | 8 +- src/shell/shell.zig | 6 +- src/shell/states/Cmd.zig | 30 +- src/shell/subproc.zig | 46 +- src/sourcemap/CodeCoverage.zig | 6 +- src/sourcemap/LineOffsetTable.zig | 4 +- src/sourcemap/sourcemap.zig | 4 +- src/sql/mysql/MySQLConnection.zig | 4 +- src/sql/postgres/PostgresSQLConnection.zig | 2 +- .../protocol/NotificationResponse.zig | 4 +- src/sql/shared/Data.zig | 24 +- src/string/MutableString.zig | 7 - src/string/SmolStr.zig | 6 +- src/transpiler.zig | 20 +- src/valkey/valkey.zig | 6 +- test/bake/bake-harness.ts | 6 +- test/internal/ban-limits.json | 2 +- 108 files changed, 1425 insertions(+), 1163 deletions(-) rename src/collections/{BoundedArray.zig => bounded_array.zig} (100%) diff --git a/src/HTMLScanner.zig b/src/HTMLScanner.zig index f9edb06d0d..7e305c7b4c 100644 --- a/src/HTMLScanner.zig +++ b/src/HTMLScanner.zig @@ -18,7 +18,7 @@ pub fn deinit(this: *HTMLScanner) void { for (this.import_records.slice()) |*record| { this.allocator.free(record.path.text); } - this.import_records.deinitWithAllocator(this.allocator); + this.import_records.deinit(this.allocator); } fn createImportRecord(this: *HTMLScanner, input_path: []const u8, kind: ImportKind) !void { @@ -44,7 +44,7 @@ fn createImportRecord(this: *HTMLScanner, input_path: []const u8, kind: ImportKi .range = logger.Range.None, }; - try this.import_records.push(this.allocator, record); + try this.import_records.append(this.allocator, record); } const debug = bun.Output.scoped(.HTMLScanner, .hidden); diff --git a/src/allocators/MimallocArena.zig b/src/allocators/MimallocArena.zig index 0588a34821..0b6a646b86 100644 --- a/src/allocators/MimallocArena.zig +++ b/src/allocators/MimallocArena.zig @@ -78,6 +78,15 @@ pub const Borrowed = struct { else null; } + + pub fn downcast(std_alloc: std.mem.Allocator) Borrowed { + bun.assertf( + isInstance(std_alloc), + "not a MimallocArena (vtable is {*})", + .{std_alloc.vtable}, + ); + return .fromOpaque(std_alloc.ptr); + } }; const BorrowedHeap = if (safety_checks) *DebugHeap else *mimalloc.Heap; diff --git a/src/ast/Ast.zig b/src/ast/Ast.zig index 9f619e4a60..9aa1386f1a 100644 --- a/src/ast/Ast.zig +++ b/src/ast/Ast.zig @@ -83,14 +83,14 @@ pub const TsEnumsMap = std.ArrayHashMapUnmanaged(Ref, bun.StringHashMapUnmanaged pub fn fromParts(parts: []Part) Ast { return Ast{ - .parts = Part.List.init(parts), + .parts = Part.List.fromOwnedSlice(parts), .runtime_imports = .{}, }; } -pub fn initTest(parts: []Part) Ast { +pub fn initTest(parts: []const Part) Ast { return Ast{ - .parts = Part.List.init(parts), + .parts = Part.List.fromBorrowedSliceDangerous(parts), .runtime_imports = .{}, }; } @@ -107,9 +107,9 @@ pub fn toJSON(self: *const Ast, _: std.mem.Allocator, stream: anytype) !void { /// Do not call this if it wasn't globally allocated! pub fn deinit(this: *Ast) void { // TODO: assert mimalloc-owned memory - if (this.parts.len > 0) this.parts.deinitWithAllocator(bun.default_allocator); - if (this.symbols.len > 0) this.symbols.deinitWithAllocator(bun.default_allocator); - if (this.import_records.len > 0) this.import_records.deinitWithAllocator(bun.default_allocator); + this.parts.deinit(bun.default_allocator); + this.symbols.deinit(bun.default_allocator); + this.import_records.deinit(bun.default_allocator); } pub const Class = G.Class; diff --git a/src/ast/Binding.zig b/src/ast/Binding.zig index 1b484725c7..349bd2ae99 100644 --- a/src/ast/Binding.zig +++ b/src/ast/Binding.zig @@ -56,7 +56,14 @@ pub fn toExpr(binding: *const Binding, wrapper: anytype) Expr { }; } - return Expr.init(E.Array, E.Array{ .items = ExprNodeList.init(exprs), .is_single_line = b.is_single_line }, loc); + return Expr.init( + E.Array, + E.Array{ + .items = ExprNodeList.fromOwnedSlice(exprs), + .is_single_line = b.is_single_line, + }, + loc, + ); }, .b_object => |b| { const properties = wrapper @@ -77,7 +84,7 @@ pub fn toExpr(binding: *const Binding, wrapper: anytype) Expr { return Expr.init( E.Object, E.Object{ - .properties = G.Property.List.init(properties), + .properties = G.Property.List.fromOwnedSlice(properties), .is_single_line = b.is_single_line, }, loc, diff --git a/src/ast/ConvertESMExportsForHmr.zig b/src/ast/ConvertESMExportsForHmr.zig index 561fdeb18c..117b170f0c 100644 --- a/src/ast/ConvertESMExportsForHmr.zig +++ b/src/ast/ConvertESMExportsForHmr.zig @@ -121,7 +121,7 @@ pub fn convertStmt(ctx: *ConvertESMExportsForHmr, p: anytype, stmt: Stmt) !void const temp_id = p.generateTempRef("default_export"); try ctx.last_part.declared_symbols.append(p.allocator, .{ .ref = temp_id, .is_top_level = true }); try ctx.last_part.symbol_uses.putNoClobber(p.allocator, temp_id, .{ .count_estimate = 1 }); - try p.current_scope.generated.push(p.allocator, temp_id); + try p.current_scope.generated.append(p.allocator, temp_id); try ctx.export_props.append(p.allocator, .{ .key = Expr.init(E.String, .{ .data = "default" }, stmt.loc), @@ -395,7 +395,7 @@ fn visitRefToExport( const arg1 = p.generateTempRef(symbol.original_name); try ctx.last_part.declared_symbols.append(p.allocator, .{ .ref = arg1, .is_top_level = true }); try ctx.last_part.symbol_uses.putNoClobber(p.allocator, arg1, .{ .count_estimate = 1 }); - try p.current_scope.generated.push(p.allocator, arg1); + try p.current_scope.generated.append(p.allocator, arg1); // 'get abc() { return abc }' try ctx.export_props.append(p.allocator, .{ @@ -438,7 +438,7 @@ pub fn finalize(ctx: *ConvertESMExportsForHmr, p: anytype, all_parts: []js_ast.P if (ctx.export_props.items.len > 0) { const obj = Expr.init(E.Object, .{ - .properties = G.Property.List.fromList(ctx.export_props), + .properties = G.Property.List.moveFromList(&ctx.export_props), }, logger.Loc.Empty); // `hmr.exports = ...` @@ -466,7 +466,7 @@ pub fn finalize(ctx: *ConvertESMExportsForHmr, p: anytype, all_parts: []js_ast.P .name = "reactRefreshAccept", .name_loc = .Empty, }, .Empty), - .args = .init(&.{}), + .args = .empty, }, .Empty), }, .Empty)); } @@ -474,7 +474,10 @@ pub fn finalize(ctx: *ConvertESMExportsForHmr, p: anytype, all_parts: []js_ast.P // Merge all part metadata into the first part. for (all_parts[0 .. all_parts.len - 1]) |*part| { try ctx.last_part.declared_symbols.appendList(p.allocator, part.declared_symbols); - try ctx.last_part.import_record_indices.append(p.allocator, part.import_record_indices.slice()); + try ctx.last_part.import_record_indices.appendSlice( + p.allocator, + part.import_record_indices.slice(), + ); for (part.symbol_uses.keys(), part.symbol_uses.values()) |k, v| { const gop = try ctx.last_part.symbol_uses.getOrPut(p.allocator, k); if (!gop.found_existing) { @@ -487,13 +490,16 @@ pub fn finalize(ctx: *ConvertESMExportsForHmr, p: anytype, all_parts: []js_ast.P part.declared_symbols.entries.len = 0; part.tag = .dead_due_to_inlining; part.dependencies.clearRetainingCapacity(); - try part.dependencies.push(p.allocator, .{ + try part.dependencies.append(p.allocator, .{ .part_index = @intCast(all_parts.len - 1), .source_index = p.source.index, }); } - try ctx.last_part.import_record_indices.append(p.allocator, p.import_records_for_current_part.items); + try ctx.last_part.import_record_indices.appendSlice( + p.allocator, + p.import_records_for_current_part.items, + ); try ctx.last_part.declared_symbols.appendList(p.allocator, p.declared_symbols); ctx.last_part.stmts = ctx.stmts.items; diff --git a/src/ast/E.zig b/src/ast/E.zig index 22cdab5a6b..c6cf7cf413 100644 --- a/src/ast/E.zig +++ b/src/ast/E.zig @@ -18,7 +18,7 @@ pub const Array = struct { close_bracket_loc: logger.Loc = logger.Loc.Empty, pub fn push(this: *Array, allocator: std.mem.Allocator, item: Expr) !void { - try this.items.push(allocator, item); + try this.items.append(allocator, item); } pub inline fn slice(this: Array) []Expr { @@ -30,12 +30,13 @@ pub const Array = struct { allocator: std.mem.Allocator, estimated_count: usize, ) !ExprNodeList { - var out = try allocator.alloc( - Expr, + var out: bun.BabyList(Expr) = try .initCapacity( + allocator, // This over-allocates a little but it's fine estimated_count + @as(usize, this.items.len), ); - var remain = out; + out.expandToCapacity(); + var remain = out.slice(); for (this.items.slice()) |item| { switch (item.data) { .e_spread => |val| { @@ -63,7 +64,8 @@ pub const Array = struct { remain = remain[1..]; } - return ExprNodeList.init(out[0 .. out.len - remain.len]); + out.shrinkRetainingCapacity(out.len - remain.len); + return out; } pub fn toJS(this: @This(), allocator: std.mem.Allocator, globalObject: *jsc.JSGlobalObject) ToJSError!jsc.JSValue { @@ -573,7 +575,7 @@ pub const Object = struct { if (asProperty(self, key)) |query| { self.properties.ptr[query.i].value = expr; } else { - try self.properties.push(allocator, .{ + try self.properties.append(allocator, .{ .key = Expr.init(E.String, E.String.init(key), expr.loc), .value = expr, }); @@ -588,7 +590,7 @@ pub const Object = struct { pub fn set(self: *const Object, key: Expr, allocator: std.mem.Allocator, value: Expr) SetError!void { if (self.hasProperty(key.data.e_string.data)) return error.Clobber; - try self.properties.push(allocator, .{ + try self.properties.append(allocator, .{ .key = key, .value = value, }); @@ -642,7 +644,7 @@ pub const Object = struct { value_ = obj; } - try self.properties.push(allocator, .{ + try self.properties.append(allocator, .{ .key = rope.head, .value = value_, }); @@ -683,7 +685,7 @@ pub const Object = struct { if (rope.next) |next| { var obj = Expr.init(E.Object, E.Object{ .properties = .{} }, rope.head.loc); const out = try obj.data.e_object.getOrPutObject(next, allocator); - try self.properties.push(allocator, .{ + try self.properties.append(allocator, .{ .key = rope.head, .value = obj, }); @@ -691,7 +693,7 @@ pub const Object = struct { } const out = Expr.init(E.Object, E.Object{}, rope.head.loc); - try self.properties.push(allocator, .{ + try self.properties.append(allocator, .{ .key = rope.head, .value = out, }); @@ -732,7 +734,7 @@ pub const Object = struct { if (rope.next) |next| { var obj = Expr.init(E.Object, E.Object{ .properties = .{} }, rope.head.loc); const out = try obj.data.e_object.getOrPutArray(next, allocator); - try self.properties.push(allocator, .{ + try self.properties.append(allocator, .{ .key = rope.head, .value = obj, }); @@ -740,7 +742,7 @@ pub const Object = struct { } const out = Expr.init(E.Array, E.Array{}, rope.head.loc); - try self.properties.push(allocator, .{ + try self.properties.append(allocator, .{ .key = rope.head, .value = out, }); diff --git a/src/ast/Expr.zig b/src/ast/Expr.zig index bfd893c37b..b1814aab95 100644 --- a/src/ast/Expr.zig +++ b/src/ast/Expr.zig @@ -273,13 +273,10 @@ pub fn set(expr: *Expr, allocator: std.mem.Allocator, name: string, value: Expr) } } - var new_props = expr.data.e_object.properties.listManaged(allocator); - try new_props.append(.{ + try expr.data.e_object.properties.append(allocator, .{ .key = Expr.init(E.String, .{ .data = name }, logger.Loc.Empty), .value = value, }); - - expr.data.e_object.properties = BabyList(G.Property).fromList(new_props); } /// Don't use this if you care about performance. @@ -298,13 +295,10 @@ pub fn setString(expr: *Expr, allocator: std.mem.Allocator, name: string, value: } } - var new_props = expr.data.e_object.properties.listManaged(allocator); - try new_props.append(.{ + try expr.data.e_object.properties.append(allocator, .{ .key = Expr.init(E.String, .{ .data = name }, logger.Loc.Empty), .value = Expr.init(E.String, .{ .data = value }, logger.Loc.Empty), }); - - expr.data.e_object.properties = BabyList(G.Property).fromList(new_props); } pub fn getObject(expr: *const Expr, name: string) ?Expr { @@ -3245,7 +3239,6 @@ const JSPrinter = @import("../js_printer.zig"); const std = @import("std"); const bun = @import("bun"); -const BabyList = bun.BabyList; const Environment = bun.Environment; const JSONParser = bun.json; const MutableString = bun.MutableString; diff --git a/src/ast/Macro.zig b/src/ast/Macro.zig index b4b3f6dbd4..620fa2ed8f 100644 --- a/src/ast/Macro.zig +++ b/src/ast/Macro.zig @@ -386,7 +386,7 @@ pub const Runner = struct { const result = Expr.init( E.Array, E.Array{ - .items = ExprNodeList.init(&[_]Expr{}), + .items = ExprNodeList.empty, .was_originally_macro = true, }, this.caller.loc, @@ -398,7 +398,7 @@ pub const Runner = struct { var out = Expr.init( E.Array, E.Array{ - .items = ExprNodeList.init(array[0..0]), + .items = ExprNodeList.empty, .was_originally_macro = true, }, this.caller.loc, @@ -413,7 +413,7 @@ pub const Runner = struct { continue; i += 1; } - out.data.e_array.items = ExprNodeList.init(array); + out.data.e_array.items = ExprNodeList.fromOwnedSlice(array); _entry.value_ptr.* = out; return out; }, @@ -438,27 +438,37 @@ pub const Runner = struct { .include_value = true, }).init(this.global, obj); defer object_iter.deinit(); - var properties = this.allocator.alloc(G.Property, object_iter.len) catch unreachable; - errdefer this.allocator.free(properties); - var out = Expr.init( + + const out = _entry.value_ptr; + out.* = Expr.init( E.Object, E.Object{ - .properties = BabyList(G.Property).init(properties), + .properties = bun.handleOom( + G.Property.List.initCapacity(this.allocator, object_iter.len), + ), .was_originally_macro = true, }, this.caller.loc, ); - _entry.value_ptr.* = out; + const properties = &out.data.e_object.properties; + errdefer properties.clearAndFree(this.allocator); while (try object_iter.next()) |prop| { - properties[object_iter.i] = G.Property{ - .key = Expr.init(E.String, E.String.init(prop.toOwnedSlice(this.allocator) catch unreachable), this.caller.loc), + bun.assertf( + object_iter.i == properties.len, + "`properties` unexpectedly modified (length {d}, expected {d})", + .{ properties.len, object_iter.i }, + ); + properties.appendAssumeCapacity(G.Property{ + .key = Expr.init( + E.String, + E.String.init(prop.toOwnedSlice(this.allocator) catch unreachable), + this.caller.loc, + ), .value = try this.run(object_iter.value), - }; + }); } - out.data.e_object.properties = BabyList(G.Property).init(properties[0..object_iter.i]); - _entry.value_ptr.* = out; - return out; + return out.*; }, .JSON => { @@ -644,7 +654,6 @@ const Resolver = @import("../resolver/resolver.zig").Resolver; const isPackagePath = @import("../resolver/resolver.zig").isPackagePath; const bun = @import("bun"); -const BabyList = bun.BabyList; const Environment = bun.Environment; const Output = bun.Output; const Transpiler = bun.Transpiler; diff --git a/src/ast/P.zig b/src/ast/P.zig index e3ee2b52a5..3dd1b87160 100644 --- a/src/ast/P.zig +++ b/src/ast/P.zig @@ -536,7 +536,7 @@ pub fn NewParser_( return p.newExpr(E.Call{ .target = require_resolve_ref, - .args = ExprNodeList.init(args), + .args = ExprNodeList.fromOwnedSlice(args), }, arg.loc); } @@ -570,7 +570,7 @@ pub fn NewParser_( return p.newExpr( E.Call{ .target = p.valueForRequire(arg.loc), - .args = ExprNodeList.init(args), + .args = ExprNodeList.fromOwnedSlice(args), }, arg.loc, ); @@ -648,7 +648,7 @@ pub fn NewParser_( return p.newExpr( E.Call{ .target = p.valueForRequire(arg.loc), - .args = ExprNodeList.init(args), + .args = ExprNodeList.fromOwnedSlice(args), }, arg.loc, ); @@ -955,7 +955,7 @@ pub fn NewParser_( .e_identifier => |ident| { // is this a require("something") if (strings.eqlComptime(p.loadNameFromRef(ident.ref), "require") and call.args.len == 1 and std.meta.activeTag(call.args.ptr[0].data) == .e_string) { - _ = p.addImportRecord(.require, loc, call.args.first_().data.e_string.string(p.allocator) catch unreachable); + _ = p.addImportRecord(.require, loc, call.args.at(0).data.e_string.string(p.allocator) catch unreachable); } }, else => {}, @@ -971,7 +971,7 @@ pub fn NewParser_( .e_identifier => |ident| { // is this a require("something") if (strings.eqlComptime(p.loadNameFromRef(ident.ref), "require") and call.args.len == 1 and std.meta.activeTag(call.args.ptr[0].data) == .e_string) { - _ = p.addImportRecord(.require, loc, call.args.first_().data.e_string.string(p.allocator) catch unreachable); + _ = p.addImportRecord(.require, loc, call.args.at(0).data.e_string.string(p.allocator) catch unreachable); } }, else => {}, @@ -1250,7 +1250,7 @@ pub fn NewParser_( .ref = namespace_ref, .is_top_level = true, }); - try p.module_scope.generated.push(allocator, namespace_ref); + try p.module_scope.generated.append(allocator, namespace_ref); for (imports, clause_items) |alias, *clause_item| { const ref = symbols.get(alias) orelse unreachable; const alias_name = if (@TypeOf(symbols) == RuntimeImports) RuntimeImports.all[alias] else alias; @@ -1305,7 +1305,7 @@ pub fn NewParser_( parts.append(js_ast.Part{ .stmts = stmts, .declared_symbols = declared_symbols, - .import_record_indices = bun.BabyList(u32).init(import_records), + .import_record_indices = bun.BabyList(u32).fromOwnedSlice(import_records), .tag = .runtime, }) catch unreachable; } @@ -1360,7 +1360,7 @@ pub fn NewParser_( .ref = namespace_ref, .is_top_level = true, }); - try p.module_scope.generated.push(allocator, namespace_ref); + try p.module_scope.generated.append(allocator, namespace_ref); for (clauses) |entry| { if (entry.enabled) { @@ -1374,7 +1374,7 @@ pub fn NewParser_( .name = LocRef{ .ref = entry.ref, .loc = logger.Loc{} }, }); declared_symbols.appendAssumeCapacity(.{ .ref = entry.ref, .is_top_level = true }); - try p.module_scope.generated.push(allocator, entry.ref); + try p.module_scope.generated.append(allocator, entry.ref); try p.is_import_item.put(allocator, entry.ref, {}); try p.named_imports.put(allocator, entry.ref, .{ .alias = entry.name, @@ -2113,7 +2113,7 @@ pub fn NewParser_( // const hoisted_ref = p.newSymbol(.hoisted, symbol.original_name) catch unreachable; symbols = p.symbols.items; - scope.generated.push(p.allocator, hoisted_ref) catch unreachable; + bun.handleOom(scope.generated.append(p.allocator, hoisted_ref)); p.hoisted_ref_for_sloppy_mode_block_fn.put(p.allocator, value.ref, hoisted_ref) catch unreachable; value.ref = hoisted_ref; symbol = &symbols[hoisted_ref.innerIndex()]; @@ -2258,7 +2258,7 @@ pub fn NewParser_( .generated = .{}, }; - try parent.children.push(allocator, scope); + try parent.children.append(allocator, scope); scope.strict_mode = parent.strict_mode; p.current_scope = scope; @@ -2569,7 +2569,7 @@ pub fn NewParser_( const name = try strings.append(p.allocator, "import_", try path_name.nonUniqueNameString(p.allocator)); stmt.namespace_ref = try p.newSymbol(.other, name); var scope: *Scope = p.current_scope; - try scope.generated.push(p.allocator, stmt.namespace_ref); + try scope.generated.append(p.allocator, stmt.namespace_ref); } var item_refs = ImportItemForNamespaceMap.init(p.allocator); @@ -2761,7 +2761,7 @@ pub fn NewParser_( var scope = p.current_scope; - try scope.generated.push(p.allocator, name.ref.?); + try scope.generated.append(p.allocator, name.ref.?); return name; } @@ -3067,7 +3067,7 @@ pub fn NewParser_( // this module will be unable to reference this symbol. However, we must // still add the symbol to the scope so it gets minified (automatically- // generated code may still reference the symbol). - try p.module_scope.generated.push(p.allocator, ref); + try p.module_scope.generated.append(p.allocator, ref); return ref; } @@ -3141,7 +3141,7 @@ pub fn NewParser_( entry.key_ptr.* = name; entry.value_ptr.* = js_ast.Scope.Member{ .ref = ref, .loc = loc }; if (comptime is_generated) { - try p.module_scope.generated.push(p.allocator, ref); + try p.module_scope.generated.append(p.allocator, ref); } return ref; } @@ -3448,7 +3448,10 @@ pub fn NewParser_( decls[0] = Decl{ .binding = p.b(B.Identifier{ .ref = ref }, local.loc), }; - try partStmts.append(p.s(S.Local{ .decls = G.Decl.List.init(decls) }, local.loc)); + try partStmts.append(p.s( + S.Local{ .decls = G.Decl.List.fromOwnedSlice(decls) }, + local.loc, + )); try p.declared_symbols.append(p.allocator, .{ .ref = ref, .is_top_level = true }); } } @@ -3463,7 +3466,7 @@ pub fn NewParser_( .symbol_uses = p.symbol_uses, .import_symbol_property_uses = p.import_symbol_property_uses, .declared_symbols = p.declared_symbols.toOwnedSlice(), - .import_record_indices = bun.BabyList(u32).init( + .import_record_indices = bun.BabyList(u32).fromOwnedSlice( p.import_records_for_current_part.toOwnedSlice( p.allocator, ) catch unreachable, @@ -4343,7 +4346,7 @@ pub fn NewParser_( .ref = (p.declareGeneratedSymbol(.other, symbol_name) catch unreachable), }; - p.module_scope.generated.push(p.allocator, loc_ref.ref.?) catch unreachable; + bun.handleOom(p.module_scope.generated.append(p.allocator, loc_ref.ref.?)); p.is_import_item.put(p.allocator, loc_ref.ref.?, {}) catch unreachable; @field(p.jsx_imports, @tagName(field)) = loc_ref; break :brk loc_ref.ref.?; @@ -4445,7 +4448,7 @@ pub fn NewParser_( var local = p.s( S.Local{ .is_export = true, - .decls = Decl.List.init(decls), + .decls = Decl.List.fromOwnedSlice(decls), }, loc, ); @@ -4466,7 +4469,7 @@ pub fn NewParser_( var local = p.s( S.Local{ .is_export = true, - .decls = Decl.List.init(decls), + .decls = Decl.List.fromOwnedSlice(decls), }, loc, ); @@ -4588,7 +4591,7 @@ pub fn NewParser_( stmts.append( p.s(S.Local{ .kind = .k_var, - .decls = G.Decl.List.init(decls), + .decls = G.Decl.List.fromOwnedSlice(decls), .is_export = is_export, }, stmt_loc), ) catch |err| bun.handleOom(err); @@ -4597,7 +4600,7 @@ pub fn NewParser_( stmts.append( p.s(S.Local{ .kind = .k_let, - .decls = G.Decl.List.init(decls), + .decls = G.Decl.List.fromOwnedSlice(decls), }, stmt_loc), ) catch |err| bun.handleOom(err); } @@ -4682,7 +4685,7 @@ pub fn NewParser_( const call = p.newExpr( E.Call{ .target = target, - .args = ExprNodeList.init(args_list), + .args = ExprNodeList.fromOwnedSlice(args_list), // TODO: make these fully tree-shakable. this annotation // as-is is incorrect. This would be done by changing all // enum wrappers into `var Enum = ...` instead of two @@ -4737,18 +4740,16 @@ pub fn NewParser_( for (func.func.args, 0..) |arg, i| { for (arg.ts_decorators.ptr[0..arg.ts_decorators.len]) |arg_decorator| { var decorators = if (is_constructor) - class.ts_decorators.listManaged(p.allocator) + &class.ts_decorators else - prop.ts_decorators.listManaged(p.allocator); + &prop.ts_decorators; const args = p.allocator.alloc(Expr, 2) catch unreachable; args[0] = p.newExpr(E.Number{ .value = @as(f64, @floatFromInt(i)) }, arg_decorator.loc); args[1] = arg_decorator; - decorators.append(p.callRuntime(arg_decorator.loc, "__legacyDecorateParamTS", args)) catch unreachable; - if (is_constructor) { - class.ts_decorators.update(decorators); - } else { - prop.ts_decorators.update(decorators); - } + decorators.append( + p.allocator, + p.callRuntime(arg_decorator.loc, "__legacyDecorateParamTS", args), + ) catch |err| bun.handleOom(err); } } }, @@ -4778,7 +4779,7 @@ pub fn NewParser_( target = p.newExpr(E.Dot{ .target = p.newExpr(E.Identifier{ .ref = class.class_name.?.ref.? }, class.class_name.?.loc), .name = "prototype", .name_loc = loc }, loc); } - var array = prop.ts_decorators.listManaged(p.allocator); + var array: std.ArrayList(Expr) = .init(p.allocator); if (p.options.features.emit_decorator_metadata) { switch (prop.kind) { @@ -4803,7 +4804,7 @@ pub fn NewParser_( entry.* = p.serializeMetadata(method_arg.ts_metadata) catch unreachable; } - args[1] = p.newExpr(E.Array{ .items = ExprNodeList.init(args_array) }, logger.Loc.Empty); + args[1] = p.newExpr(E.Array{ .items = ExprNodeList.fromOwnedSlice(args_array) }, logger.Loc.Empty); array.append(p.callRuntime(loc, "__legacyMetadataTS", args)) catch unreachable; } @@ -4828,7 +4829,7 @@ pub fn NewParser_( { var args = p.allocator.alloc(Expr, 2) catch unreachable; args[0] = p.newExpr(E.String{ .data = "design:paramtypes" }, logger.Loc.Empty); - args[1] = p.newExpr(E.Array{ .items = ExprNodeList.init(&[_]Expr{}) }, logger.Loc.Empty); + args[1] = p.newExpr(E.Array{ .items = ExprNodeList.empty }, logger.Loc.Empty); array.append(p.callRuntime(loc, "__legacyMetadataTS", args)) catch unreachable; } } @@ -4848,7 +4849,7 @@ pub fn NewParser_( entry.* = p.serializeMetadata(method_arg.ts_metadata) catch unreachable; } - args[1] = p.newExpr(E.Array{ .items = ExprNodeList.init(args_array) }, logger.Loc.Empty); + args[1] = p.newExpr(E.Array{ .items = ExprNodeList.fromOwnedSlice(args_array) }, logger.Loc.Empty); array.append(p.callRuntime(loc, "__legacyMetadataTS", args)) catch unreachable; } @@ -4865,8 +4866,9 @@ pub fn NewParser_( } } + bun.handleOom(array.insertSlice(0, prop.ts_decorators.slice())); const args = p.allocator.alloc(Expr, 4) catch unreachable; - args[0] = p.newExpr(E.Array{ .items = ExprNodeList.init(array.items) }, loc); + args[0] = p.newExpr(E.Array{ .items = ExprNodeList.moveFromList(&array) }, loc); args[1] = target; args[2] = descriptor_key; args[3] = descriptor_kind; @@ -4928,10 +4930,10 @@ pub fn NewParser_( if (class.extends != null) { const target = p.newExpr(E.Super{}, stmt.loc); const arguments_ref = p.newSymbol(.unbound, arguments_str) catch unreachable; - p.current_scope.generated.push(p.allocator, arguments_ref) catch unreachable; + bun.handleOom(p.current_scope.generated.append(p.allocator, arguments_ref)); const super = p.newExpr(E.Spread{ .value = p.newExpr(E.Identifier{ .ref = arguments_ref }, stmt.loc) }, stmt.loc); - const args = ExprNodeList.one(p.allocator, super) catch unreachable; + const args = bun.handleOom(ExprNodeList.initOne(p.allocator, super)); constructor_stmts.append(p.s(S.SExpr{ .value = p.newExpr(E.Call{ .target = target, .args = args }, stmt.loc) }, stmt.loc)) catch unreachable; } @@ -4979,7 +4981,7 @@ pub fn NewParser_( stmts.appendSliceAssumeCapacity(instance_decorators.items); stmts.appendSliceAssumeCapacity(static_decorators.items); if (class.ts_decorators.len > 0) { - var array = class.ts_decorators.listManaged(p.allocator); + var array = class.ts_decorators.moveToListManaged(p.allocator); if (p.options.features.emit_decorator_metadata) { if (constructor_function != null) { @@ -4995,9 +4997,9 @@ pub fn NewParser_( param_array[i] = p.serializeMetadata(constructor_arg.ts_metadata) catch unreachable; } - args[1] = p.newExpr(E.Array{ .items = ExprNodeList.init(param_array) }, logger.Loc.Empty); + args[1] = p.newExpr(E.Array{ .items = ExprNodeList.fromOwnedSlice(param_array) }, logger.Loc.Empty); } else { - args[1] = p.newExpr(E.Array{ .items = ExprNodeList.init(&[_]Expr{}) }, logger.Loc.Empty); + args[1] = p.newExpr(E.Array{ .items = ExprNodeList.empty }, logger.Loc.Empty); } array.append(p.callRuntime(stmt.loc, "__legacyMetadataTS", args)) catch unreachable; @@ -5005,7 +5007,7 @@ pub fn NewParser_( } const args = p.allocator.alloc(Expr, 2) catch unreachable; - args[0] = p.newExpr(E.Array{ .items = ExprNodeList.init(array.items) }, stmt.loc); + args[0] = p.newExpr(E.Array{ .items = ExprNodeList.fromOwnedSlice(array.items) }, stmt.loc); args[1] = p.newExpr(E.Identifier{ .ref = class.class_name.?.ref.? }, class.class_name.?.loc); stmts.appendAssumeCapacity(Stmt.assign( @@ -5415,7 +5417,7 @@ pub fn NewParser_( name, loc_ref.ref.?, ); - p.module_scope.generated.push(p.allocator, loc_ref.ref.?) catch unreachable; + bun.handleOom(p.module_scope.generated.append(p.allocator, loc_ref.ref.?)); return loc_ref.ref.?; } } else { @@ -5439,7 +5441,7 @@ pub fn NewParser_( return p.newExpr( E.Call{ .target = p.runtimeIdentifier(loc, name), - .args = ExprNodeList.init(args), + .args = ExprNodeList.fromOwnedSlice(args), }, loc, ); @@ -5499,7 +5501,7 @@ pub fn NewParser_( for (to_flatten.children.slice()) |item| { item.parent = parent; - parent.children.push(p.allocator, item) catch unreachable; + bun.handleOom(parent.children.append(p.allocator, item)); } } @@ -5520,7 +5522,7 @@ pub fn NewParser_( .ref = ref, }) catch |err| bun.handleOom(err); - bun.handleOom(scope.generated.append(p.allocator, &.{ref})); + bun.handleOom(scope.generated.append(p.allocator, ref)); return ref; } @@ -5710,7 +5712,7 @@ pub fn NewParser_( } const is_top_level = scope == p.module_scope; - scope.generated.append(p.allocator, &.{ + scope.generated.appendSlice(p.allocator, &.{ ctx.stack_ref, caught_ref, err_ref, @@ -5750,7 +5752,7 @@ pub fn NewParser_( const finally_stmts = finally: { if (ctx.has_await_using) { const promise_ref = p.generateTempRef("_promise"); - bun.handleOom(scope.generated.append(p.allocator, &.{promise_ref})); + bun.handleOom(scope.generated.append(p.allocator, promise_ref)); p.declared_symbols.appendAssumeCapacity(.{ .is_top_level = is_top_level, .ref = promise_ref }); const promise_ref_expr = p.newExpr(E.Identifier{ .ref = promise_ref }, loc); @@ -5768,7 +5770,7 @@ pub fn NewParser_( .binding = p.b(B.Identifier{ .ref = promise_ref }, loc), .value = call_dispose, }; - break :decls G.Decl.List.init(decls); + break :decls G.Decl.List.fromOwnedSlice(decls); }, }, loc); @@ -5804,7 +5806,7 @@ pub fn NewParser_( .binding = p.b(B.Identifier{ .ref = ctx.stack_ref }, loc), .value = p.newExpr(E.Array{}, loc), }; - break :decls G.Decl.List.init(decls); + break :decls G.Decl.List.fromOwnedSlice(decls); }, .kind = .k_let, }, loc)); @@ -5826,7 +5828,7 @@ pub fn NewParser_( .binding = p.b(B.Identifier{ .ref = has_err_ref }, loc), .value = p.newExpr(E.Number{ .value = 1 }, loc), }; - break :decls G.Decl.List.init(decls); + break :decls G.Decl.List.fromOwnedSlice(decls); }, }, loc); break :catch_body statements; @@ -6103,7 +6105,7 @@ pub fn NewParser_( .body = .{ .stmts = p.allocator.dupe(Stmt, &.{ p.s(S.Return{ .value = p.newExpr(E.Array{ - .items = ExprNodeList.init(ctx.user_hooks.values()), + .items = ExprNodeList.fromBorrowedSliceDangerous(ctx.user_hooks.values()), }, loc) }, loc), }) catch |err| bun.handleOom(err), .loc = loc, @@ -6115,7 +6117,7 @@ pub fn NewParser_( // _s(func, "", force, () => [useCustom]) return p.newExpr(E.Call{ .target = Expr.initIdentifier(ctx.signature_cb, loc), - .args = ExprNodeList.init(args), + .args = ExprNodeList.fromOwnedSlice(args), }, loc); } @@ -6196,11 +6198,14 @@ pub fn NewParser_( } if (part.import_record_indices.len == 0) { - part.import_record_indices = @TypeOf(part.import_record_indices).init( - (p.import_records_for_current_part.clone(p.allocator) catch unreachable).items, - ); + part.import_record_indices = .fromOwnedSlice(bun.handleOom( + p.allocator.dupe(u32, p.import_records_for_current_part.items), + )); } else { - part.import_record_indices.append(p.allocator, p.import_records_for_current_part.items) catch unreachable; + part.import_record_indices.appendSlice( + p.allocator, + p.import_records_for_current_part.items, + ) catch |err| bun.handleOom(err); } parts.items[parts_end] = part; @@ -6341,7 +6346,7 @@ pub fn NewParser_( entry.value_ptr.* = .{}; } - entry.value_ptr.push(ctx.allocator, @as(u32, @truncate(ctx.part_index))) catch unreachable; + bun.handleOom(entry.value_ptr.append(ctx.allocator, @as(u32, @truncate(ctx.part_index)))); } }; @@ -6367,7 +6372,7 @@ pub fn NewParser_( entry.value_ptr.* = .{}; } - entry.value_ptr.push(p.allocator, js_ast.namespace_export_part_index) catch unreachable; + bun.handleOom(entry.value_ptr.append(p.allocator, js_ast.namespace_export_part_index)); } } @@ -6390,17 +6395,12 @@ pub fn NewParser_( break :brk Ref.None; }; - const parts_list = bun.BabyList(js_ast.Part).fromList(parts); - return .{ .runtime_imports = p.runtime_imports, - .parts = parts_list, .module_scope = p.module_scope.*, - .symbols = js_ast.Symbol.List.fromList(p.symbols), .exports_ref = p.exports_ref, .wrapper_ref = wrapper_ref, .module_ref = p.module_ref, - .import_records = ImportRecord.List.fromList(p.import_records), .export_star_import_records = p.export_star_import_records.items, .approximate_newline_count = p.lexer.approximate_newline_count, .exports_kind = exports_kind, @@ -6440,12 +6440,14 @@ pub fn NewParser_( .has_commonjs_export_names = p.has_commonjs_export_names, .hashbang = hashbang, - // TODO: cross-module constant inlining // .const_values = p.const_values, .ts_enums = try p.computeTsEnumsMap(allocator), - .import_meta_ref = p.import_meta_ref, + + .symbols = js_ast.Symbol.List.moveFromList(&p.symbols), + .parts = bun.BabyList(js_ast.Part).moveFromList(parts), + .import_records = ImportRecord.List.moveFromList(&p.import_records), }; } diff --git a/src/ast/Parser.zig b/src/ast/Parser.zig index ff68c2e000..ed8461d8c9 100644 --- a/src/ast/Parser.zig +++ b/src/ast/Parser.zig @@ -188,7 +188,7 @@ pub const Parser = struct { // in the `symbols` array. bun.assert(p.symbols.items.len == 0); var symbols_ = symbols; - p.symbols = symbols_.listManaged(p.allocator); + p.symbols = symbols_.moveToListManaged(p.allocator); try p.prepareForVisitPass(); @@ -550,10 +550,7 @@ pub const Parser = struct { var sliced = try ListManaged(Stmt).initCapacity(p.allocator, 1); sliced.items.len = 1; var _local = local.*; - var list = try ListManaged(G.Decl).initCapacity(p.allocator, 1); - list.items.len = 1; - list.items[0] = decl; - _local.decls.update(list); + _local.decls = try .initOne(p.allocator, decl); sliced.items[0] = p.s(_local, stmt.loc); try p.appendPart(&parts, sliced.items); } @@ -686,7 +683,7 @@ pub const Parser = struct { var part_stmts = p.allocator.alloc(Stmt, 1) catch unreachable; part_stmts[0] = p.s(S.Local{ .kind = .k_var, - .decls = Decl.List.init(decls), + .decls = Decl.List.fromOwnedSlice(decls), }, logger.Loc.Empty); before.append(js_ast.Part{ .stmts = part_stmts, @@ -713,7 +710,7 @@ pub const Parser = struct { var import_part_stmts = remaining_stmts[0..1]; remaining_stmts = remaining_stmts[1..]; - bun.handleOom(p.module_scope.generated.push(p.allocator, deferred_import.namespace.ref.?)); + bun.handleOom(p.module_scope.generated.append(p.allocator, deferred_import.namespace.ref.?)); import_part_stmts[0] = Stmt.alloc( S.Import, @@ -835,7 +832,7 @@ pub const Parser = struct { part.symbol_uses = .{}; return js_ast.Result{ .ast = js_ast.Ast{ - .import_records = ImportRecord.List.init(p.import_records.items), + .import_records = ImportRecord.List.moveFromList(&p.import_records), .redirect_import_record_index = id, .named_imports = p.named_imports, .named_exports = p.named_exports, @@ -905,7 +902,10 @@ pub const Parser = struct { break :brk new_stmts.items; }; - part.import_record_indices.push(p.allocator, right.data.e_require_string.import_record_index) catch unreachable; + part.import_record_indices.append( + p.allocator, + right.data.e_require_string.import_record_index, + ) catch |err| bun.handleOom(err); p.symbols.items[p.module_ref.innerIndex()].use_count_estimate = 0; p.symbols.items[namespace_ref.innerIndex()].use_count_estimate -|= 1; _ = part.symbol_uses.swapRemove(namespace_ref); @@ -1165,7 +1165,7 @@ pub const Parser = struct { var part_stmts = p.allocator.alloc(Stmt, 1) catch unreachable; part_stmts[0] = p.s(S.Local{ .kind = .k_var, - .decls = Decl.List.init(decls), + .decls = Decl.List.fromOwnedSlice(decls), }, logger.Loc.Empty); before.append(js_ast.Part{ .stmts = part_stmts, @@ -1245,7 +1245,7 @@ pub const Parser = struct { before.append(js_ast.Part{ .stmts = part_stmts, .declared_symbols = declared_symbols, - .import_record_indices = bun.BabyList(u32).init(import_record_indices), + .import_record_indices = bun.BabyList(u32).fromOwnedSlice(import_record_indices), .tag = .bun_test, }) catch unreachable; diff --git a/src/ast/SideEffects.zig b/src/ast/SideEffects.zig index e67b4f3eeb..1b0f023c3a 100644 --- a/src/ast/SideEffects.zig +++ b/src/ast/SideEffects.zig @@ -273,7 +273,8 @@ pub const SideEffects = enum(u1) { } properties_slice = properties_slice[0..end]; - expr.data.e_object.properties = G.Property.List.init(properties_slice); + expr.data.e_object.properties = + G.Property.List.fromBorrowedSliceDangerous(properties_slice); return expr; } } @@ -311,16 +312,14 @@ pub const SideEffects = enum(u1) { for (items) |item| { if (item.data == .e_spread) { var end: usize = 0; - for (items) |item__| { - const item_ = item__; + for (items) |item_| { if (item_.data != .e_missing) { items[end] = item_; end += 1; } - - expr.data.e_array.items = ExprNodeList.init(items[0..end]); - return expr; } + expr.data.e_array.items.shrinkRetainingCapacity(end); + return expr; } } @@ -457,7 +456,7 @@ pub const SideEffects = enum(u1) { findIdentifiers(decl.binding, &decls); } - local.decls.update(decls); + local.decls = .moveFromList(&decls); return true; }, @@ -889,7 +888,6 @@ const js_ast = bun.ast; const Binding = js_ast.Binding; const E = js_ast.E; const Expr = js_ast.Expr; -const ExprNodeList = js_ast.ExprNodeList; const Stmt = js_ast.Stmt; const G = js_ast.G; diff --git a/src/ast/Symbol.zig b/src/ast/Symbol.zig index eada199e7d..1a8d31d5d4 100644 --- a/src/ast/Symbol.zig +++ b/src/ast/Symbol.zig @@ -412,7 +412,7 @@ pub const Map = struct { } pub fn initWithOneList(list: List) Map { - const baby_list = BabyList(List).init((&list)[0..1]); + const baby_list = BabyList(List).fromBorrowedSliceDangerous((&list)[0..1]); return initList(baby_list); } diff --git a/src/ast/maybe.zig b/src/ast/maybe.zig index 6a3b0b243d..1c461b3099 100644 --- a/src/ast/maybe.zig +++ b/src/ast/maybe.zig @@ -68,7 +68,7 @@ pub fn AstMaybe( .loc = name_loc, .ref = p.newSymbol(.import, name) catch unreachable, }; - p.module_scope.generated.push(p.allocator, new_item.ref.?) catch unreachable; + bun.handleOom(p.module_scope.generated.append(p.allocator, new_item.ref.?)); import_items.put(name, new_item) catch unreachable; p.is_import_item.put(p.allocator, new_item.ref.?, {}) catch unreachable; @@ -214,7 +214,7 @@ pub fn AstMaybe( .other, std.fmt.allocPrint(p.allocator, "${any}", .{bun.fmt.fmtIdentifier(key)}) catch unreachable, ) catch unreachable; - p.module_scope.generated.push(p.allocator, new_ref) catch unreachable; + bun.handleOom(p.module_scope.generated.append(p.allocator, new_ref)); named_export_entry.value_ptr.* = .{ .loc_ref = LocRef{ .loc = name_loc, @@ -320,7 +320,7 @@ pub fn AstMaybe( .other, std.fmt.allocPrint(p.allocator, "${any}", .{bun.fmt.fmtIdentifier(name)}) catch unreachable, ) catch unreachable; - p.module_scope.generated.push(p.allocator, new_ref) catch unreachable; + bun.handleOom(p.module_scope.generated.append(p.allocator, new_ref)); named_export_entry.value_ptr.* = .{ .loc_ref = LocRef{ .loc = name_loc, @@ -493,7 +493,7 @@ pub fn AstMaybe( .other, std.fmt.allocPrint(p.allocator, "${any}", .{bun.fmt.fmtIdentifier(name)}) catch unreachable, ) catch unreachable; - p.module_scope.generated.push(p.allocator, new_ref) catch unreachable; + bun.handleOom(p.module_scope.generated.append(p.allocator, new_ref)); named_export_entry.value_ptr.* = .{ .loc_ref = LocRef{ .loc = name_loc, diff --git a/src/ast/parse.zig b/src/ast/parse.zig index c7c026f091..2582d5bd40 100644 --- a/src/ast/parse.zig +++ b/src/ast/parse.zig @@ -200,7 +200,7 @@ pub fn Parse( .class_name = name, .extends = extends, .close_brace_loc = close_brace_loc, - .ts_decorators = ExprNodeList.init(class_opts.ts_decorators), + .ts_decorators = ExprNodeList.fromOwnedSlice(class_opts.ts_decorators), .class_keyword = class_keyword, .body_loc = body_loc, .properties = properties.items, @@ -283,7 +283,7 @@ pub fn Parse( } const close_paren_loc = p.lexer.loc(); try p.lexer.expect(.t_close_paren); - return ExprListLoc{ .list = ExprNodeList.fromList(args), .loc = close_paren_loc }; + return ExprListLoc{ .list = ExprNodeList.moveFromList(&args), .loc = close_paren_loc }; } pub fn parseJSXPropValueIdentifier(noalias p: *P, previous_string_with_backslash_loc: *logger.Loc) !Expr { @@ -474,7 +474,10 @@ pub fn Parse( if (opts.is_async) { p.logExprErrors(&errors); const async_expr = p.newExpr(E.Identifier{ .ref = try p.storeNameInRef("async") }, loc); - return p.newExpr(E.Call{ .target = async_expr, .args = ExprNodeList.init(items) }, loc); + return p.newExpr(E.Call{ + .target = async_expr, + .args = ExprNodeList.fromOwnedSlice(items), + }, loc); } // Is this a chain of expressions and comma operators? @@ -621,16 +624,17 @@ pub fn Parse( try p.forbidLexicalDecl(token_range.loc); } - const decls = try p.parseAndDeclareDecls(.other, opts); + var decls_list = try p.parseAndDeclareDecls(.other, opts); + const decls: G.Decl.List = .moveFromList(&decls_list); return ExprOrLetStmt{ .stmt_or_expr = js_ast.StmtOrExpr{ .stmt = p.s(S.Local{ .kind = .k_let, - .decls = G.Decl.List.fromList(decls), + .decls = decls, .is_export = opts.is_export, }, token_range.loc), }, - .decls = decls.items, + .decls = decls.slice(), }; } }, @@ -650,19 +654,20 @@ pub fn Parse( } // p.markSyntaxFeature(.using, token_range.loc); opts.is_using_statement = true; - const decls = try p.parseAndDeclareDecls(.constant, opts); + var decls_list = try p.parseAndDeclareDecls(.constant, opts); + const decls: G.Decl.List = .moveFromList(&decls_list); if (!opts.is_for_loop_init) { - try p.requireInitializers(.k_using, decls.items); + try p.requireInitializers(.k_using, decls.slice()); } return ExprOrLetStmt{ .stmt_or_expr = js_ast.StmtOrExpr{ .stmt = p.s(S.Local{ .kind = .k_using, - .decls = G.Decl.List.fromList(decls), + .decls = decls, .is_export = false, }, token_range.loc), }, - .decls = decls.items, + .decls = decls.slice(), }; } } else if (p.fn_or_arrow_data_parse.allow_await == .allow_expr and strings.eqlComptime(raw, "await")) { @@ -689,19 +694,20 @@ pub fn Parse( } // p.markSyntaxFeature(.using, using_range.loc); opts.is_using_statement = true; - const decls = try p.parseAndDeclareDecls(.constant, opts); + var decls_list = try p.parseAndDeclareDecls(.constant, opts); + const decls: G.Decl.List = .moveFromList(&decls_list); if (!opts.is_for_loop_init) { - try p.requireInitializers(.k_await_using, decls.items); + try p.requireInitializers(.k_await_using, decls.slice()); } return ExprOrLetStmt{ .stmt_or_expr = js_ast.StmtOrExpr{ .stmt = p.s(S.Local{ .kind = .k_await_using, - .decls = G.Decl.List.fromList(decls), + .decls = decls, .is_export = false, }, token_range.loc), }, - .decls = decls.items, + .decls = decls.slice(), }; } break :value Expr{ diff --git a/src/ast/parseFn.zig b/src/ast/parseFn.zig index 9e53368ada..bf27bc2d31 100644 --- a/src/ast/parseFn.zig +++ b/src/ast/parseFn.zig @@ -281,7 +281,7 @@ pub fn ParseFn( } args.append(p.allocator, G.Arg{ - .ts_decorators = ExprNodeList.init(ts_decorators), + .ts_decorators = ExprNodeList.fromOwnedSlice(ts_decorators), .binding = arg, .default = default_value, diff --git a/src/ast/parseJSXElement.zig b/src/ast/parseJSXElement.zig index 3faa412c96..5aadd4bbdb 100644 --- a/src/ast/parseJSXElement.zig +++ b/src/ast/parseJSXElement.zig @@ -148,7 +148,7 @@ pub fn ParseJSXElement( const is_key_after_spread = key_prop_i > -1 and first_spread_prop_i > -1 and key_prop_i > first_spread_prop_i; flags.setPresent(.is_key_after_spread, is_key_after_spread); - properties = G.Property.List.fromList(props); + properties = G.Property.List.moveFromList(&props); if (is_key_after_spread and p.options.jsx.runtime == .automatic and !p.has_classic_runtime_warned) { try p.log.addWarning(p.source, spread_loc, "\"key\" prop after a {...spread} is deprecated in JSX. Falling back to classic runtime."); p.has_classic_runtime_warned = true; @@ -268,7 +268,7 @@ pub fn ParseJSXElement( return p.newExpr(E.JSXElement{ .tag = end_tag.data.asExpr(), - .children = ExprNodeList.fromList(children), + .children = ExprNodeList.moveFromList(&children), .properties = properties, .key_prop_index = key_prop_i, .flags = flags, diff --git a/src/ast/parsePrefix.zig b/src/ast/parsePrefix.zig index 1c210f5444..14eae7eb71 100644 --- a/src/ast/parsePrefix.zig +++ b/src/ast/parsePrefix.zig @@ -516,7 +516,7 @@ pub fn ParsePrefix( self_errors.mergeInto(errors.?); } return p.newExpr(E.Array{ - .items = ExprNodeList.fromList(items), + .items = ExprNodeList.moveFromList(&items), .comma_after_spread = comma_after_spread.toNullable(), .is_single_line = is_single_line, .close_bracket_loc = close_bracket_loc, @@ -600,7 +600,7 @@ pub fn ParsePrefix( } return p.newExpr(E.Object{ - .properties = G.Property.List.fromList(properties), + .properties = G.Property.List.moveFromList(&properties), .comma_after_spread = if (comma_after_spread.start > 0) comma_after_spread else diff --git a/src/ast/parseProperty.zig b/src/ast/parseProperty.zig index 9ca95c0b74..01586cdc60 100644 --- a/src/ast/parseProperty.zig +++ b/src/ast/parseProperty.zig @@ -119,7 +119,7 @@ pub fn ParseProperty( } return G.Property{ - .ts_decorators = ExprNodeList.init(opts.ts_decorators), + .ts_decorators = try ExprNodeList.fromSlice(p.allocator, opts.ts_decorators), .kind = kind, .flags = Flags.Property.init(.{ .is_computed = is_computed, @@ -333,7 +333,7 @@ pub fn ParseProperty( ) catch unreachable; block.* = G.ClassStaticBlock{ - .stmts = js_ast.BabyList(Stmt).init(stmts), + .stmts = js_ast.BabyList(Stmt).fromOwnedSlice(stmts), .loc = loc, }; @@ -506,7 +506,7 @@ pub fn ParseProperty( try p.lexer.expectOrInsertSemicolon(); return G.Property{ - .ts_decorators = ExprNodeList.init(opts.ts_decorators), + .ts_decorators = try ExprNodeList.fromSlice(p.allocator, opts.ts_decorators), .kind = kind, .flags = Flags.Property.init(.{ .is_computed = is_computed, diff --git a/src/ast/parseStmt.zig b/src/ast/parseStmt.zig index 274f64eaf9..b8bce67462 100644 --- a/src/ast/parseStmt.zig +++ b/src/ast/parseStmt.zig @@ -493,9 +493,13 @@ pub fn ParseStmt( } fn t_var(p: *P, opts: *ParseStatementOptions, loc: logger.Loc) anyerror!Stmt { try p.lexer.next(); - const decls = try p.parseAndDeclareDecls(.hoisted, opts); + var decls = try p.parseAndDeclareDecls(.hoisted, opts); try p.lexer.expectOrInsertSemicolon(); - return p.s(S.Local{ .kind = .k_var, .decls = Decl.List.fromList(decls), .is_export = opts.is_export }, loc); + return p.s(S.Local{ + .kind = .k_var, + .decls = Decl.List.moveFromList(&decls), + .is_export = opts.is_export, + }, loc); } fn t_const(p: *P, opts: *ParseStatementOptions, loc: logger.Loc) anyerror!Stmt { if (opts.lexical_decl != .allow_all) { @@ -509,14 +513,18 @@ pub fn ParseStmt( return p.parseTypescriptEnumStmt(loc, opts); } - const decls = try p.parseAndDeclareDecls(.constant, opts); + var decls = try p.parseAndDeclareDecls(.constant, opts); try p.lexer.expectOrInsertSemicolon(); if (!opts.is_typescript_declare) { try p.requireInitializers(.k_const, decls.items); } - return p.s(S.Local{ .kind = .k_const, .decls = Decl.List.fromList(decls), .is_export = opts.is_export }, loc); + return p.s(S.Local{ + .kind = .k_const, + .decls = Decl.List.moveFromList(&decls), + .is_export = opts.is_export, + }, loc); } fn t_if(p: *P, _: *ParseStatementOptions, loc: logger.Loc) anyerror!Stmt { var current_loc = loc; @@ -795,15 +803,17 @@ pub fn ParseStmt( is_var = true; try p.lexer.next(); var stmtOpts = ParseStatementOptions{}; - decls.update(try p.parseAndDeclareDecls(.hoisted, &stmtOpts)); - init_ = p.s(S.Local{ .kind = .k_var, .decls = Decl.List.fromList(decls) }, init_loc); + var decls_list = try p.parseAndDeclareDecls(.hoisted, &stmtOpts); + decls = .moveFromList(&decls_list); + init_ = p.s(S.Local{ .kind = .k_var, .decls = decls }, init_loc); }, // for (const ) .t_const => { try p.lexer.next(); var stmtOpts = ParseStatementOptions{}; - decls.update(try p.parseAndDeclareDecls(.constant, &stmtOpts)); - init_ = p.s(S.Local{ .kind = .k_const, .decls = Decl.List.fromList(decls) }, init_loc); + var decls_list = try p.parseAndDeclareDecls(.constant, &stmtOpts); + decls = .moveFromList(&decls_list); + init_ = p.s(S.Local{ .kind = .k_const, .decls = decls }, init_loc); }, // for (;) .t_semicolon => {}, @@ -1293,7 +1303,7 @@ pub fn ParseStmt( for (local.decls.slice()) |decl| { try extractDeclsForBinding(decl.binding, &_decls); } - decls.update(_decls); + decls = .moveFromList(&_decls); }, else => {}, } diff --git a/src/ast/parseTypescript.zig b/src/ast/parseTypescript.zig index bf6793aa25..35432904e3 100644 --- a/src/ast/parseTypescript.zig +++ b/src/ast/parseTypescript.zig @@ -201,7 +201,7 @@ pub fn ParseTypescript( // run the renamer. For external-facing things the renamer will avoid // collisions automatically so this isn't important for correctness. arg_ref = p.newSymbol(.hoisted, strings.cat(p.allocator, "_", name_text) catch unreachable) catch unreachable; - p.current_scope.generated.push(p.allocator, arg_ref) catch unreachable; + bun.handleOom(p.current_scope.generated.append(p.allocator, arg_ref)); } else { arg_ref = p.newSymbol(.hoisted, name_text) catch unreachable; } @@ -238,7 +238,7 @@ pub fn ParseTypescript( try p.lexer.expect(.t_string_literal); try p.lexer.expect(.t_close_paren); if (!opts.is_typescript_declare) { - const args = try ExprNodeList.one(p.allocator, path); + const args = try ExprNodeList.initOne(p.allocator, path); value = p.newExpr(E.Call{ .target = target, .close_paren_loc = p.lexer.loc(), .args = args }, loc); } } else { @@ -266,7 +266,12 @@ pub fn ParseTypescript( .binding = p.b(B.Identifier{ .ref = ref }, default_name_loc), .value = value, }; - return p.s(S.Local{ .kind = kind, .decls = Decl.List.init(decls), .is_export = opts.is_export, .was_ts_import_equals = true }, loc); + return p.s(S.Local{ + .kind = kind, + .decls = Decl.List.fromOwnedSlice(decls), + .is_export = opts.is_export, + .was_ts_import_equals = true, + }, loc); } pub fn parseTypescriptEnumStmt(p: *P, loc: logger.Loc, opts: *ParseStatementOptions) anyerror!Stmt { @@ -372,7 +377,7 @@ pub fn ParseTypescript( // run the renamer. For external-facing things the renamer will avoid // collisions automatically so this isn't important for correctness. arg_ref = p.newSymbol(.hoisted, strings.cat(p.allocator, "_", name_text) catch unreachable) catch unreachable; - p.current_scope.generated.push(p.allocator, arg_ref) catch unreachable; + bun.handleOom(p.current_scope.generated.append(p.allocator, arg_ref)); } else { arg_ref = p.declareSymbol(.hoisted, name_loc, name_text) catch unreachable; } diff --git a/src/ast/visit.zig b/src/ast/visit.zig index e37b74e4f2..18ee06ec63 100644 --- a/src/ast/visit.zig +++ b/src/ast/visit.zig @@ -567,9 +567,9 @@ pub fn Visit( // Make it an error to use "arguments" in a static class block p.current_scope.forbid_arguments = true; - var list = property.class_static_block.?.stmts.listManaged(p.allocator); + var list = property.class_static_block.?.stmts.moveToListManaged(p.allocator); p.visitStmts(&list, .fn_body) catch unreachable; - property.class_static_block.?.stmts = js_ast.BabyList(Stmt).fromList(list); + property.class_static_block.?.stmts = js_ast.BabyList(Stmt).moveFromList(&list); p.popScope(); p.fn_or_arrow_data_visit = old_fn_or_arrow_data; @@ -912,12 +912,13 @@ pub fn Visit( before.ensureUnusedCapacity(@as(usize, @intFromBool(let_decls.items.len > 0)) + @as(usize, @intFromBool(var_decls.items.len > 0)) + non_fn_stmts.items.len) catch unreachable; if (let_decls.items.len > 0) { + const decls: Decl.List = .moveFromList(&let_decls); before.appendAssumeCapacity(p.s( S.Local{ .kind = .k_let, - .decls = Decl.List.fromList(let_decls), + .decls = decls, }, - let_decls.items[0].value.?.loc, + decls.at(0).value.?.loc, )); } @@ -928,12 +929,13 @@ pub fn Visit( before.appendAssumeCapacity(new); } } else { + const decls: Decl.List = .moveFromList(&var_decls); before.appendAssumeCapacity(p.s( S.Local{ .kind = .k_var, - .decls = Decl.List.fromList(var_decls), + .decls = decls, }, - var_decls.items[0].value.?.loc, + decls.at(0).value.?.loc, )); } } @@ -1166,7 +1168,10 @@ pub fn Visit( if (prev_stmt.data == .s_local and local.canMergeWith(prev_stmt.data.s_local)) { - prev_stmt.data.s_local.decls.append(p.allocator, local.decls.slice()) catch unreachable; + prev_stmt.data.s_local.decls.appendSlice( + p.allocator, + local.decls.slice(), + ) catch |err| bun.handleOom(err); continue; } } diff --git a/src/ast/visitExpr.zig b/src/ast/visitExpr.zig index 3e23434856..a01a185b1d 100644 --- a/src/ast/visitExpr.zig +++ b/src/ast/visitExpr.zig @@ -228,26 +228,30 @@ pub fn VisitExpr( // That would reduce the amount of allocations a little if (runtime == .classic or is_key_after_spread) { // Arguments to createElement() - const args = p.allocator.alloc(Expr, 2 + children_count) catch unreachable; - // There are at least two args: - // - name of the tag - // - props - var i: usize = 2; - args[0] = tag; + var args = bun.BabyList(Expr).initCapacity( + p.allocator, + 2 + children_count, + ) catch |err| bun.handleOom(err); + args.appendAssumeCapacity(tag); const num_props = e_.properties.len; if (num_props > 0) { const props = p.allocator.alloc(G.Property, num_props) catch unreachable; bun.copy(G.Property, props, e_.properties.slice()); - args[1] = p.newExpr(E.Object{ .properties = G.Property.List.init(props) }, expr.loc); + args.appendAssumeCapacity(p.newExpr( + E.Object{ .properties = G.Property.List.fromOwnedSlice(props) }, + expr.loc, + )); } else { - args[1] = p.newExpr(E.Null{}, expr.loc); + args.appendAssumeCapacity(p.newExpr(E.Null{}, expr.loc)); } const children_elements = e_.children.slice()[0..children_count]; for (children_elements) |child| { - args[i] = p.visitExpr(child); - i += @as(usize, @intCast(@intFromBool(args[i].data != .e_missing))); + const arg = p.visitExpr(child); + if (arg.data != .e_missing) { + args.appendAssumeCapacity(arg); + } } const target = p.jsxStringsToMemberExpression(expr.loc, p.options.jsx.factory) catch unreachable; @@ -255,7 +259,7 @@ pub fn VisitExpr( // Call createElement() return p.newExpr(E.Call{ .target = if (runtime == .classic) target else p.jsxImport(.createElement, expr.loc), - .args = ExprNodeList.init(args[0..i]), + .args = args, // Enable tree shaking .can_be_unwrapped_if_unused = if (!p.options.ignore_dce_annotations and !p.options.jsx.side_effects) .if_unused else .never, .close_paren_loc = e_.close_tag_loc, @@ -265,7 +269,7 @@ pub fn VisitExpr( else if (runtime == .automatic) { // --- These must be done in all cases -- const allocator = p.allocator; - var props: std.ArrayListUnmanaged(G.Property) = e_.properties.list(); + var props = &e_.properties; const maybe_key_value: ?ExprNodeIndex = if (e_.key_prop_index > -1) props.orderedRemove(@intCast(e_.key_prop_index)).value else null; @@ -296,8 +300,8 @@ pub fn VisitExpr( // -> //
// jsx("div", {...foo}) - while (props.items.len == 1 and props.items[0].kind == .spread and props.items[0].value.?.data == .e_object) { - props = props.items[0].value.?.data.e_object.properties.list(); + while (props.len == 1 and props.at(0).kind == .spread and props.at(0).value.?.data == .e_object) { + props = &props.at(0).value.?.data.e_object.properties; } // Typescript defines static jsx as children.len > 1 or single spread @@ -326,7 +330,7 @@ pub fn VisitExpr( args[0] = tag; args[1] = p.newExpr(E.Object{ - .properties = G.Property.List.fromList(props), + .properties = props.*, }, expr.loc); if (maybe_key_value) |key| { @@ -360,7 +364,7 @@ pub fn VisitExpr( return p.newExpr(E.Call{ .target = p.jsxImportAutomatic(expr.loc, is_static_jsx), - .args = ExprNodeList.init(args), + .args = ExprNodeList.fromOwnedSlice(args), // Enable tree shaking .can_be_unwrapped_if_unused = if (!p.options.ignore_dce_annotations and !p.options.jsx.side_effects) .if_unused else .never, .was_jsx_element = true, @@ -1279,7 +1283,7 @@ pub fn VisitExpr( // the try/catch statement is there to handle the potential run-time // error from the unbundled require() call failing. if (e_.args.len == 1) { - const first = e_.args.first_(); + const first = e_.args.slice()[0]; const state = TransposeState{ .is_require_immediately_assigned_to_decl = in.is_immediately_assigned_to_decl and first.data == .e_string, @@ -1324,7 +1328,7 @@ pub fn VisitExpr( } if (e_.args.len == 1) { - const first = e_.args.first_(); + const first = e_.args.slice()[0]; switch (first.data) { .e_string => { // require.resolve(FOO) => require.resolve(FOO) diff --git a/src/ast/visitStmt.zig b/src/ast/visitStmt.zig index 4de08b6d57..9a6e7b879a 100644 --- a/src/ast/visitStmt.zig +++ b/src/ast/visitStmt.zig @@ -126,7 +126,7 @@ pub fn VisitStmt( const name = p.loadNameFromRef(data.namespace_ref); data.namespace_ref = try p.newSymbol(.other, name); - try p.current_scope.generated.push(p.allocator, data.namespace_ref); + try p.current_scope.generated.append(p.allocator, data.namespace_ref); try p.recordDeclaredSymbol(data.namespace_ref); if (p.options.features.replace_exports.count() > 0) { @@ -146,7 +146,7 @@ pub fn VisitStmt( const _name = p.loadNameFromRef(old_ref); const ref = try p.newSymbol(.import, _name); - try p.current_scope.generated.push(p.allocator, ref); + try p.current_scope.generated.append(p.allocator, ref); try p.recordDeclaredSymbol(ref); data.items[j] = item; data.items[j].name.ref = ref; @@ -163,7 +163,7 @@ pub fn VisitStmt( for (data.items) |*item| { const _name = p.loadNameFromRef(item.name.ref.?); const ref = try p.newSymbol(.import, _name); - try p.current_scope.generated.push(p.allocator, ref); + try p.current_scope.generated.append(p.allocator, ref); try p.recordDeclaredSymbol(ref); item.name.ref = ref; } @@ -176,7 +176,7 @@ pub fn VisitStmt( // "export * from 'path'" const name = p.loadNameFromRef(data.namespace_ref); data.namespace_ref = try p.newSymbol(.other, name); - try p.current_scope.generated.push(p.allocator, data.namespace_ref); + try p.current_scope.generated.append(p.allocator, data.namespace_ref); try p.recordDeclaredSymbol(data.namespace_ref); // "export * as ns from 'path'" @@ -262,7 +262,7 @@ pub fn VisitStmt( }) { // declare a temporary ref for this const temp_id = p.generateTempRef("default_export"); - try p.current_scope.generated.push(p.allocator, temp_id); + try p.current_scope.generated.append(p.allocator, temp_id); try stmts.append(Stmt.alloc(S.Local, .{ .kind = .k_const, @@ -293,7 +293,7 @@ pub fn VisitStmt( .value = data.value.expr, }; stmts.appendAssumeCapacity(p.s(S.Local{ - .decls = G.Decl.List.init(decls), + .decls = G.Decl.List.fromOwnedSlice(decls), }, stmt.loc)); const items = bun.handleOom(p.allocator.alloc(js_ast.ClauseItem, 1)); items[0] = js_ast.ClauseItem{ @@ -390,7 +390,7 @@ pub fn VisitStmt( } const temp_id = p.generateTempRef("default_export"); - try p.current_scope.generated.push(p.allocator, temp_id); + try p.current_scope.generated.append(p.allocator, temp_id); break :brk temp_id; }; @@ -865,7 +865,7 @@ pub fn VisitStmt( .kind = .k_var, .is_export = false, .was_commonjs_export = true, - .decls = G.Decl.List.init(decls), + .decls = G.Decl.List.fromOwnedSlice(decls), }, stmt.loc, ), @@ -1205,7 +1205,7 @@ pub fn VisitStmt( .binding = p.b(B.Identifier{ .ref = id.ref }, loc), .value = p.newExpr(E.Identifier{ .ref = temp_ref }, loc), }; - break :bindings G.Decl.List.init(decls); + break :bindings G.Decl.List.fromOwnedSlice(decls); }, }, loc); diff --git a/src/bun.js/ModuleLoader.zig b/src/bun.js/ModuleLoader.zig index c40da5ff99..5bdfc5cb88 100644 --- a/src/bun.js/ModuleLoader.zig +++ b/src/bun.js/ModuleLoader.zig @@ -1111,7 +1111,7 @@ pub fn transpileSourceCode( .allocator = null, .specifier = input_specifier, .source_url = input_specifier.createIfDifferent(path.text), - .jsvalue_for_export = parse_result.ast.parts.@"[0]"().stmts[0].data.s_expr.value.toJS(allocator, globalObject orelse jsc_vm.global) catch |e| panic("Unexpected JS error: {s}", .{@errorName(e)}), + .jsvalue_for_export = parse_result.ast.parts.at(0).stmts[0].data.s_expr.value.toJS(allocator, globalObject orelse jsc_vm.global) catch |e| panic("Unexpected JS error: {s}", .{@errorName(e)}), .tag = .exports_object, }; } diff --git a/src/bun.js/api/bun/h2_frame_parser.zig b/src/bun.js/api/bun/h2_frame_parser.zig index 856d82b853..8f032d36e0 100644 --- a/src/bun.js/api/bun/h2_frame_parser.zig +++ b/src/bun.js/api/bun/h2_frame_parser.zig @@ -4465,13 +4465,9 @@ pub const H2FrameParser = struct { this.detachNativeSocket(); this.readBuffer.deinit(); - - { - var writeBuffer = this.writeBuffer; - this.writeBuffer = .{}; - writeBuffer.deinitWithAllocator(this.allocator); - } + this.writeBuffer.clearAndFree(this.allocator); this.writeBufferOffset = 0; + if (this.hpack) |hpack| { hpack.deinit(); this.hpack = null; diff --git a/src/bun.js/api/bun/socket.zig b/src/bun.js/api/bun/socket.zig index 6574b78fc2..7d8b5bb6a5 100644 --- a/src/bun.js/api/bun/socket.zig +++ b/src/bun.js/api/bun/socket.zig @@ -285,7 +285,7 @@ pub fn NewSocket(comptime ssl: bool) type { // Ensure the socket is still alive for any defer's we have this.ref(); defer this.deref(); - this.buffered_data_for_node_net.deinitWithAllocator(bun.default_allocator); + this.buffered_data_for_node_net.clearAndFree(bun.default_allocator); const needs_deref = !this.socket.isDetached(); this.socket = Socket.detached; @@ -368,7 +368,7 @@ pub fn NewSocket(comptime ssl: bool) type { pub fn closeAndDetach(this: *This, code: uws.Socket.CloseCode) void { const socket = this.socket; - this.buffered_data_for_node_net.deinitWithAllocator(bun.default_allocator); + this.buffered_data_for_node_net.clearAndFree(bun.default_allocator); this.socket.detach(); this.detachNativeCallback(); @@ -883,7 +883,7 @@ pub fn NewSocket(comptime ssl: bool) type { pub fn writeBuffered(this: *This, globalObject: *jsc.JSGlobalObject, callframe: *jsc.CallFrame) bun.JSError!JSValue { if (this.socket.isDetached()) { - this.buffered_data_for_node_net.deinitWithAllocator(bun.default_allocator); + this.buffered_data_for_node_net.clearAndFree(bun.default_allocator); // TODO: should we separate unattached and detached? unattached shouldn't throw here const err: jsc.SystemError = .{ .errno = @intFromEnum(bun.sys.SystemErrno.EBADF), @@ -904,7 +904,7 @@ pub fn NewSocket(comptime ssl: bool) type { pub fn endBuffered(this: *This, globalObject: *jsc.JSGlobalObject, callframe: *jsc.CallFrame) bun.JSError!JSValue { if (this.socket.isDetached()) { - this.buffered_data_for_node_net.deinitWithAllocator(bun.default_allocator); + this.buffered_data_for_node_net.clearAndFree(bun.default_allocator); return .false; } @@ -987,8 +987,7 @@ pub fn NewSocket(comptime ssl: bool) type { const written: usize = @intCast(@max(rc, 0)); const leftover = total_to_write -| written; if (leftover == 0) { - this.buffered_data_for_node_net.deinitWithAllocator(bun.default_allocator); - this.buffered_data_for_node_net = .{}; + this.buffered_data_for_node_net.clearAndFree(bun.default_allocator); break :brk rc; } @@ -1004,7 +1003,10 @@ pub fn NewSocket(comptime ssl: bool) type { } if (remaining_in_input_data.len > 0) { - bun.handleOom(this.buffered_data_for_node_net.append(bun.default_allocator, remaining_in_input_data)); + bun.handleOom(this.buffered_data_for_node_net.appendSlice( + bun.default_allocator, + remaining_in_input_data, + )); } break :brk rc; @@ -1012,15 +1014,17 @@ pub fn NewSocket(comptime ssl: bool) type { } // slower-path: clone the data, do one write. - bun.handleOom(this.buffered_data_for_node_net.append(bun.default_allocator, buffer.slice())); + bun.handleOom(this.buffered_data_for_node_net.appendSlice( + bun.default_allocator, + buffer.slice(), + )); const rc = this.writeMaybeCorked(this.buffered_data_for_node_net.slice()); if (rc > 0) { const wrote: usize = @intCast(@max(rc, 0)); // did we write everything? // we can free this temporary buffer. if (wrote == this.buffered_data_for_node_net.len) { - this.buffered_data_for_node_net.deinitWithAllocator(bun.default_allocator); - this.buffered_data_for_node_net = .{}; + this.buffered_data_for_node_net.clearAndFree(bun.default_allocator); } else { // Otherwise, let's move the temporary buffer back. const len = @as(usize, @intCast(this.buffered_data_for_node_net.len)) - wrote; @@ -1166,7 +1170,10 @@ pub fn NewSocket(comptime ssl: bool) type { if (buffer_unwritten_data) { const remaining = bytes[uwrote..]; if (remaining.len > 0) { - bun.handleOom(this.buffered_data_for_node_net.append(bun.default_allocator, remaining)); + bun.handleOom(this.buffered_data_for_node_net.appendSlice( + bun.default_allocator, + remaining, + )); } } @@ -1203,8 +1210,7 @@ pub fn NewSocket(comptime ssl: bool) type { _ = bun.c.memmove(this.buffered_data_for_node_net.ptr, remaining.ptr, remaining.len); this.buffered_data_for_node_net.len = @truncate(remaining.len); } else { - this.buffered_data_for_node_net.deinitWithAllocator(bun.default_allocator); - this.buffered_data_for_node_net = .{}; + this.buffered_data_for_node_net.clearAndFree(bun.default_allocator); } } } @@ -1293,7 +1299,7 @@ pub fn NewSocket(comptime ssl: bool) type { this.markInactive(); this.detachNativeCallback(); - this.buffered_data_for_node_net.deinitWithAllocator(bun.default_allocator); + this.buffered_data_for_node_net.deinit(bun.default_allocator); this.poll_ref.unref(jsc.VirtualMachine.get()); // need to deinit event without being attached diff --git a/src/bun.js/api/html_rewriter.zig b/src/bun.js/api/html_rewriter.zig index c2a4f2d720..b928bb7e8d 100644 --- a/src/bun.js/api/html_rewriter.zig +++ b/src/bun.js/api/html_rewriter.zig @@ -277,7 +277,7 @@ pub const HTMLRewriter = struct { return; } - const write_result = this.output.write(.{ .temporary = bun.ByteList.init(bytes) }); + const write_result = this.output.write(.{ .temporary = bun.ByteList.fromBorrowedSliceDangerous(bytes) }); switch (write_result) { .err => |err| { @@ -346,7 +346,7 @@ pub const HTMLRewriter = struct { .path = bun.handleOom(bun.default_allocator.dupe(u8, LOLHTML.HTMLString.lastError().slice())), }; }; - if (comptime deinit_) bytes.listManaged(bun.default_allocator).deinit(); + if (comptime deinit_) bytes.deinit(bun.default_allocator); return null; } diff --git a/src/bun.js/api/server/NodeHTTPResponse.zig b/src/bun.js/api/server/NodeHTTPResponse.zig index 2e0f8883ed..8207977a07 100644 --- a/src/bun.js/api/server/NodeHTTPResponse.zig +++ b/src/bun.js/api/server/NodeHTTPResponse.zig @@ -257,7 +257,7 @@ pub fn shouldRequestBePending(this: *const NodeHTTPResponse) bool { pub fn dumpRequestBody(this: *NodeHTTPResponse, globalObject: *jsc.JSGlobalObject, _: *jsc.CallFrame, thisValue: jsc.JSValue) bun.JSError!jsc.JSValue { if (this.buffered_request_body_data_during_pause.cap > 0) { - this.buffered_request_body_data_during_pause.deinitWithAllocator(bun.default_allocator); + this.buffered_request_body_data_during_pause.clearAndFree(bun.default_allocator); } if (!this.flags.request_has_completed) { this.clearOnDataCallback(thisValue, globalObject); @@ -273,7 +273,7 @@ fn markRequestAsDone(this: *NodeHTTPResponse) void { this.clearOnDataCallback(this.getThisValue(), jsc.VirtualMachine.get().global); this.upgrade_context.deinit(); - this.buffered_request_body_data_during_pause.deinitWithAllocator(bun.default_allocator); + this.buffered_request_body_data_during_pause.clearAndFree(bun.default_allocator); const server = this.server; this.js_ref.unref(jsc.VirtualMachine.get()); this.deref(); @@ -705,7 +705,10 @@ pub fn abort(this: *NodeHTTPResponse, _: *jsc.JSGlobalObject, _: *jsc.CallFrame) fn onBufferRequestBodyWhilePaused(this: *NodeHTTPResponse, chunk: []const u8, last: bool) void { log("onBufferRequestBodyWhilePaused({d}, {})", .{ chunk.len, last }); - bun.handleOom(this.buffered_request_body_data_during_pause.append(bun.default_allocator, chunk)); + bun.handleOom(this.buffered_request_body_data_during_pause.appendSlice( + bun.default_allocator, + chunk, + )); if (last) { this.flags.is_data_buffered_during_pause_last = true; if (this.body_read_ref.has) { @@ -743,7 +746,7 @@ fn onDataOrAborted(this: *NodeHTTPResponse, chunk: []const u8, last: bool, event const bytes: jsc.JSValue = brk: { if (chunk.len > 0 and this.buffered_request_body_data_during_pause.len > 0) { const buffer = jsc.JSValue.createBufferFromLength(globalThis, chunk.len + this.buffered_request_body_data_during_pause.len) catch return; // TODO: properly propagate exception upwards - this.buffered_request_body_data_during_pause.deinitWithAllocator(bun.default_allocator); + this.buffered_request_body_data_during_pause.clearAndFree(bun.default_allocator); if (buffer.asArrayBuffer(globalThis)) |array_buffer| { var input = array_buffer.slice(); @memcpy(input[0..this.buffered_request_body_data_during_pause.len], this.buffered_request_body_data_during_pause.slice()); @@ -1134,7 +1137,7 @@ fn deinit(this: *NodeHTTPResponse) void { bun.debugAssert(!this.flags.is_request_pending); bun.debugAssert(this.flags.socket_closed or this.flags.request_has_completed); - this.buffered_request_body_data_during_pause.deinitWithAllocator(bun.default_allocator); + this.buffered_request_body_data_during_pause.deinit(bun.default_allocator); this.js_ref.unref(jsc.VirtualMachine.get()); this.body_read_ref.unref(jsc.VirtualMachine.get()); diff --git a/src/bun.js/api/server/RequestContext.zig b/src/bun.js/api/server/RequestContext.zig index 849303e32e..e1c0097107 100644 --- a/src/bun.js/api/server/RequestContext.zig +++ b/src/bun.js/api/server/RequestContext.zig @@ -1761,7 +1761,7 @@ pub fn NewRequestContext(comptime ssl_enabled: bool, comptime debug_mode: bool, // we can avoid streaming it and just send it all at once. if (byte_stream.has_received_last_chunk) { var byte_list = byte_stream.drain(); - this.blob = .fromArrayList(byte_list.listManaged(bun.default_allocator)); + this.blob = .fromArrayList(byte_list.moveToListManaged(bun.default_allocator)); this.readable_stream_ref.deinit(); this.doRenderBlob(); return; @@ -1771,7 +1771,8 @@ pub fn NewRequestContext(comptime ssl_enabled: bool, comptime debug_mode: bool, this.readable_stream_ref = jsc.WebCore.ReadableStream.Strong.init(stream, globalThis); this.byte_stream = byte_stream; - this.response_buf_owned = byte_stream.drain().list(); + var response_buf = byte_stream.drain(); + this.response_buf_owned = response_buf.moveToList(); // we don't set size here because even if we have a hint // uWebSockets won't let us partially write streaming content @@ -1817,8 +1818,8 @@ pub fn NewRequestContext(comptime ssl_enabled: bool, comptime debug_mode: bool, if (is_done) this.deref(); if (stream_needs_deinit) { switch (stream_) { - .owned_and_done => |*owned| owned.listManaged(allocator).deinit(), - .owned => |*owned| owned.listManaged(allocator).deinit(), + .owned_and_done => |*owned| owned.deinit(allocator), + .owned => |*owned| owned.deinit(allocator), else => unreachable, } } @@ -2240,7 +2241,7 @@ pub fn NewRequestContext(comptime ssl_enabled: bool, comptime debug_mode: bool, if (!last) { readable.ptr.Bytes.onData( .{ - .temporary = bun.ByteList.initConst(chunk), + .temporary = bun.ByteList.fromBorrowedSliceDangerous(chunk), }, bun.default_allocator, ); @@ -2256,7 +2257,7 @@ pub fn NewRequestContext(comptime ssl_enabled: bool, comptime debug_mode: bool, readable.value.ensureStillAlive(); readable.ptr.Bytes.onData( .{ - .temporary_and_done = bun.ByteList.initConst(chunk), + .temporary_and_done = bun.ByteList.fromBorrowedSliceDangerous(chunk), }, bun.default_allocator, ); diff --git a/src/bun.js/api/server/ServerConfig.zig b/src/bun.js/api/server/ServerConfig.zig index 907e878bf9..5e347924d5 100644 --- a/src/bun.js/api/server/ServerConfig.zig +++ b/src/bun.js/api/server/ServerConfig.zig @@ -260,11 +260,11 @@ pub fn deinit(this: *ServerConfig) void { ssl_config.deinit(); this.ssl_config = null; } - if (this.sni) |sni| { + if (this.sni) |*sni| { for (sni.slice()) |*ssl_config| { ssl_config.deinit(); } - this.sni.?.deinitWithAllocator(bun.default_allocator); + sni.deinit(bun.default_allocator); this.sni = null; } @@ -939,7 +939,7 @@ pub fn fromJS( args.sni = bun.handleOom(bun.BabyList(SSLConfig).initCapacity(bun.default_allocator, value_iter.len - 1)); } - bun.handleOom(args.sni.?.push(bun.default_allocator, ssl_config)); + bun.handleOom(args.sni.?.append(bun.default_allocator, ssl_config)); } } } diff --git a/src/bun.js/ipc.zig b/src/bun.js/ipc.zig index 6ce31d65c4..660dbe0136 100644 --- a/src/bun.js/ipc.zig +++ b/src/bun.js/ipc.zig @@ -459,7 +459,7 @@ pub const SendQueue = struct { for (self.queue.items) |*item| item.deinit(); self.queue.deinit(); self.internal_msg_queue.deinit(); - self.incoming.deinitWithAllocator(bun.default_allocator); + self.incoming.deinit(bun.default_allocator); if (self.waiting_for_ack) |*waiting| waiting.deinit(); // if there is a close next tick task, cancel it so it doesn't get called and then UAF @@ -1297,10 +1297,10 @@ pub const IPCHandlers = struct { pub const WindowsNamedPipe = struct { fn onReadAlloc(send_queue: *SendQueue, suggested_size: usize) []u8 { - var available = send_queue.incoming.available(); + var available = send_queue.incoming.unusedCapacitySlice(); if (available.len < suggested_size) { bun.handleOom(send_queue.incoming.ensureUnusedCapacity(bun.default_allocator, suggested_size)); - available = send_queue.incoming.available(); + available = send_queue.incoming.unusedCapacitySlice(); } log("NewNamedPipeIPCHandler#onReadAlloc {d}", .{suggested_size}); return available.ptr[0..suggested_size]; diff --git a/src/bun.js/node/fs_events.zig b/src/bun.js/node/fs_events.zig index 59df4243ff..22315f8d0a 100644 --- a/src/bun.js/node/fs_events.zig +++ b/src/bun.js/node/fs_events.zig @@ -484,7 +484,7 @@ pub const FSEventsLoop = struct { defer this.mutex.unlock(); if (this.watcher_count == this.watchers.len) { this.watcher_count += 1; - this.watchers.push(bun.default_allocator, watcher) catch unreachable; + bun.handleOom(this.watchers.append(bun.default_allocator, watcher)); } else { var watchers = this.watchers.slice(); for (watchers, 0..) |w, i| { @@ -544,8 +544,7 @@ pub const FSEventsLoop = struct { } } - this.watchers.deinitWithAllocator(bun.default_allocator); - + this.watchers.deinit(bun.default_allocator); bun.default_allocator.destroy(this); } }; diff --git a/src/bun.js/node/path_watcher.zig b/src/bun.js/node/path_watcher.zig index 3556185f40..16c1f5b462 100644 --- a/src/bun.js/node/path_watcher.zig +++ b/src/bun.js/node/path_watcher.zig @@ -113,7 +113,7 @@ pub const PathWatcherManager = struct { const this = bun.handleOom(bun.default_allocator.create(PathWatcherManager)); errdefer bun.default_allocator.destroy(this); var watchers = bun.handleOom(bun.BabyList(?*PathWatcher).initCapacity(bun.default_allocator, 1)); - errdefer watchers.deinitWithAllocator(bun.default_allocator); + errdefer watchers.deinit(bun.default_allocator); const manager = PathWatcherManager{ .file_paths = bun.StringHashMap(PathInfo).init(bun.default_allocator), @@ -348,7 +348,7 @@ pub const PathWatcherManager = struct { routine = entry.value_ptr.*; if (watcher.refPendingDirectory()) { - routine.watcher_list.push(bun.default_allocator, watcher) catch |err| { + routine.watcher_list.append(bun.default_allocator, watcher) catch |err| { watcher.unrefPendingDirectory(); return err; }; @@ -369,7 +369,7 @@ pub const PathWatcherManager = struct { }; errdefer routine.deinit(); if (watcher.refPendingDirectory()) { - routine.watcher_list.push(bun.default_allocator, watcher) catch |err| { + routine.watcher_list.append(bun.default_allocator, watcher) catch |err| { watcher.unrefPendingDirectory(); return err; }; @@ -448,7 +448,7 @@ pub const PathWatcherManager = struct { { watcher.mutex.lock(); defer watcher.mutex.unlock(); - watcher.file_paths.push(bun.default_allocator, child_path.path) catch |err| { + watcher.file_paths.append(bun.default_allocator, child_path.path) catch |err| { manager._decrementPathRef(entry_path_z); return switch (err) { error.OutOfMemory => .{ .err = .{ @@ -541,7 +541,7 @@ pub const PathWatcherManager = struct { if (this.watcher_count == this.watchers.len) { this.watcher_count += 1; - this.watchers.push(bun.default_allocator, watcher) catch |err| { + this.watchers.append(bun.default_allocator, watcher) catch |err| { this.watcher_count -= 1; return err; }; @@ -687,11 +687,8 @@ pub const PathWatcherManager = struct { } this.file_paths.deinit(); - - this.watchers.deinitWithAllocator(bun.default_allocator); - + this.watchers.deinit(bun.default_allocator); this.current_fd_task.deinit(); - bun.default_allocator.destroy(this); } }; @@ -889,11 +886,11 @@ pub const PathWatcher = struct { manager.unregisterWatcher(this); } else { manager.unregisterWatcher(this); - this.file_paths.deinitWithAllocator(bun.default_allocator); + this.file_paths.deinit(bun.default_allocator); } } else { manager.unregisterWatcher(this); - this.file_paths.deinitWithAllocator(bun.default_allocator); + this.file_paths.deinit(bun.default_allocator); } } diff --git a/src/bun.js/webcore/ArrayBufferSink.zig b/src/bun.js/webcore/ArrayBufferSink.zig index d6ba0bd7c1..dd50843d34 100644 --- a/src/bun.js/webcore/ArrayBufferSink.zig +++ b/src/bun.js/webcore/ArrayBufferSink.zig @@ -16,15 +16,13 @@ pub fn connect(this: *ArrayBufferSink, signal: Signal) void { } pub fn start(this: *ArrayBufferSink, stream_start: streams.Start) bun.sys.Maybe(void) { - this.bytes.len = 0; - var list = this.bytes.listManaged(this.allocator); - list.clearRetainingCapacity(); + this.bytes.clearRetainingCapacity(); switch (stream_start) { .ArrayBufferSink => |config| { if (config.chunk_size > 0) { - list.ensureTotalCapacityPrecise(config.chunk_size) catch return .{ .err = Syscall.Error.oom }; - this.bytes.update(list); + this.bytes.ensureTotalCapacityPrecise(this.allocator, config.chunk_size) catch + return .{ .err = Syscall.Error.oom }; } this.as_uint8array = config.as_uint8array; @@ -63,7 +61,7 @@ pub fn finalize(this: *ArrayBufferSink) void { pub fn init(allocator: std.mem.Allocator, next: ?Sink) !*ArrayBufferSink { return bun.new(ArrayBufferSink, .{ - .bytes = bun.ByteList.init(&.{}), + .bytes = bun.ByteList.empty, .allocator = allocator, .next = next, }); @@ -121,7 +119,7 @@ pub fn end(this: *ArrayBufferSink, err: ?Syscall.Error) bun.sys.Maybe(void) { return .success; } pub fn destroy(this: *ArrayBufferSink) void { - this.bytes.deinitWithAllocator(this.allocator); + this.bytes.deinit(this.allocator); bun.destroy(this); } pub fn toJS(this: *ArrayBufferSink, globalThis: *JSGlobalObject, as_uint8array: bool) JSValue { @@ -134,10 +132,9 @@ pub fn toJS(this: *ArrayBufferSink, globalThis: *JSGlobalObject, as_uint8array: return value; } - var list = this.bytes.listManaged(this.allocator); - this.bytes = bun.ByteList.init(""); + defer this.bytes = bun.ByteList.empty; return ArrayBuffer.fromBytes( - try list.toOwnedSlice(), + try this.bytes.toOwnedSlice(this.allocator), if (as_uint8array) .Uint8Array else @@ -151,12 +148,11 @@ pub fn endFromJS(this: *ArrayBufferSink, _: *JSGlobalObject) bun.sys.Maybe(Array } bun.assert(this.next == null); - var list = this.bytes.listManaged(this.allocator); - this.bytes = bun.ByteList.init(""); this.done = true; this.signal.close(null); + defer this.bytes = bun.ByteList.empty; return .{ .result = ArrayBuffer.fromBytes( - bun.handleOom(list.toOwnedSlice()), + bun.handleOom(this.bytes.toOwnedSlice(this.allocator)), if (this.as_uint8array) .Uint8Array else diff --git a/src/bun.js/webcore/Body.zig b/src/bun.js/webcore/Body.zig index 9dc2de6f1f..fc47b6570c 100644 --- a/src/bun.js/webcore/Body.zig +++ b/src/bun.js/webcore/Body.zig @@ -1441,8 +1441,8 @@ pub const ValueBufferer = struct { defer { if (stream_needs_deinit) { switch (stream_) { - .owned_and_done => |*owned| owned.listManaged(allocator).deinit(), - .owned => |*owned| owned.listManaged(allocator).deinit(), + .owned_and_done => |*owned| owned.deinit(allocator), + .owned => |*owned| owned.deinit(allocator), else => unreachable, } } @@ -1503,7 +1503,7 @@ pub const ValueBufferer = struct { var globalThis = sink.global; buffer_stream.* = ArrayBufferSink.JSSink{ .sink = ArrayBufferSink{ - .bytes = bun.ByteList.init(&.{}), + .bytes = bun.ByteList.empty, .allocator = allocator, .next = null, }, diff --git a/src/bun.js/webcore/ByteBlobLoader.zig b/src/bun.js/webcore/ByteBlobLoader.zig index 350cab17f0..6f6016ca4b 100644 --- a/src/bun.js/webcore/ByteBlobLoader.zig +++ b/src/bun.js/webcore/ByteBlobLoader.zig @@ -166,12 +166,12 @@ pub fn drain(this: *ByteBlobLoader) bun.ByteList { temporary = temporary[this.offset..]; temporary = temporary[0..@min(16384, @min(temporary.len, this.remain))]; - var byte_list = bun.ByteList.init(temporary); - const cloned = bun.handleOom(byte_list.listManaged(bun.default_allocator).clone()); - this.offset +|= @as(Blob.SizeType, @truncate(cloned.items.len)); - this.remain -|= @as(Blob.SizeType, @truncate(cloned.items.len)); + var byte_list = bun.ByteList.fromBorrowedSliceDangerous(temporary); + const cloned = bun.handleOom(byte_list.clone(bun.default_allocator)); + this.offset +|= @as(Blob.SizeType, cloned.len); + this.remain -|= @as(Blob.SizeType, cloned.len); - return bun.ByteList.fromList(cloned); + return cloned; } pub fn toBufferedValue(this: *ByteBlobLoader, globalThis: *JSGlobalObject, action: streams.BufferAction.Tag) bun.JSError!JSValue { diff --git a/src/bun.js/webcore/ByteStream.zig b/src/bun.js/webcore/ByteStream.zig index 25d4d3036a..95a2017230 100644 --- a/src/bun.js/webcore/ByteStream.zig +++ b/src/bun.js/webcore/ByteStream.zig @@ -43,7 +43,8 @@ pub fn onStart(this: *@This()) streams.Start { } if (this.has_received_last_chunk) { - return .{ .owned_and_done = bun.ByteList.fromList(this.buffer.moveToUnmanaged()) }; + var buffer = this.buffer.moveToUnmanaged(); + return .{ .owned_and_done = bun.ByteList.moveFromList(&buffer) }; } if (this.highWaterMark == 0) { @@ -230,11 +231,11 @@ pub fn append( if (this.buffer.capacity == 0) { switch (stream_) { .owned => |*owned| { - this.buffer = owned.listManaged(allocator); + this.buffer = owned.moveToListManaged(allocator); this.offset += offset; }, .owned_and_done => |*owned| { - this.buffer = owned.listManaged(allocator); + this.buffer = owned.moveToListManaged(allocator); this.offset += offset; }, .temporary_and_done, .temporary => { @@ -390,16 +391,8 @@ pub fn deinit(this: *@This()) void { pub fn drain(this: *@This()) bun.ByteList { if (this.buffer.items.len > 0) { - const out = bun.ByteList.fromList(this.buffer); - this.buffer = .{ - .allocator = bun.default_allocator, - .items = &.{}, - .capacity = 0, - }; - - return out; + return bun.ByteList.moveFromList(&this.buffer); } - return .{}; } diff --git a/src/bun.js/webcore/FileReader.zig b/src/bun.js/webcore/FileReader.zig index 0807d0bf54..33dcc82036 100644 --- a/src/bun.js/webcore/FileReader.zig +++ b/src/bun.js/webcore/FileReader.zig @@ -264,9 +264,7 @@ pub fn onStart(this: *FileReader) streams.Start { if (this.reader.isDone()) { this.consumeReaderBuffer(); if (this.buffered.items.len > 0) { - const buffered = this.buffered; - this.buffered = .{}; - return .{ .owned_and_done = bun.ByteList.fromList(buffered) }; + return .{ .owned_and_done = bun.ByteList.moveFromList(&this.buffered) }; } } else if (comptime Environment.isPosix) { if (!was_lazy and this.reader.flags.pollable) { @@ -331,6 +329,7 @@ pub fn onReadChunk(this: *@This(), init_buf: []const u8, state: bun.io.ReadState } } + const reader_buffer = this.reader.buffer(); if (this.read_inside_on_pull != .none) { switch (this.read_inside_on_pull) { .js => |in_progress| { @@ -352,35 +351,30 @@ pub fn onReadChunk(this: *@This(), init_buf: []const u8, state: bun.io.ReadState else => @panic("Invalid state"), } } else if (this.pending.state == .pending) { - if (buf.len == 0) { - { - if (this.buffered.items.len == 0) { - if (this.buffered.capacity > 0) { - this.buffered.clearAndFree(bun.default_allocator); - } - - if (this.reader.buffer().items.len != 0) { - this.buffered = this.reader.buffer().moveToUnmanaged(); - } - } - - var buffer = &this.buffered; - defer buffer.clearAndFree(bun.default_allocator); - if (buffer.items.len > 0) { - if (this.pending_view.len >= buffer.items.len) { - @memcpy(this.pending_view[0..buffer.items.len], buffer.items); - this.pending.result = .{ .into_array_and_done = .{ .value = this.pending_value.get() orelse .zero, .len = @truncate(buffer.items.len) } }; - } else { - this.pending.result = .{ .owned_and_done = bun.ByteList.fromList(buffer.*) }; - buffer.* = .{}; - } - } else { - this.pending.result = .{ .done = {} }; - } - } + defer { this.pending_value.clearWithoutDeallocation(); this.pending_view = &.{}; this.pending.run(); + } + + if (buf.len == 0) { + if (this.buffered.items.len == 0) { + this.buffered.clearAndFree(bun.default_allocator); + this.buffered = reader_buffer.moveToUnmanaged(); + } + + var buffer = &this.buffered; + defer buffer.clearAndFree(bun.default_allocator); + if (buffer.items.len > 0) { + if (this.pending_view.len >= buffer.items.len) { + @memcpy(this.pending_view[0..buffer.items.len], buffer.items); + this.pending.result = .{ .into_array_and_done = .{ .value = this.pending_value.get() orelse .zero, .len = @truncate(buffer.items.len) } }; + } else { + this.pending.result = .{ .owned_and_done = bun.ByteList.moveFromList(buffer) }; + } + } else { + this.pending.result = .{ .done = {} }; + } return false; } @@ -388,78 +382,63 @@ pub fn onReadChunk(this: *@This(), init_buf: []const u8, state: bun.io.ReadState if (this.pending_view.len >= buf.len) { @memcpy(this.pending_view[0..buf.len], buf); - this.reader.buffer().clearRetainingCapacity(); + reader_buffer.clearRetainingCapacity(); this.buffered.clearRetainingCapacity(); - if (was_done) { - this.pending.result = .{ - .into_array_and_done = .{ - .value = this.pending_value.get() orelse .zero, - .len = @truncate(buf.len), - }, - }; - } else { - this.pending.result = .{ - .into_array = .{ - .value = this.pending_value.get() orelse .zero, - .len = @truncate(buf.len), - }, - }; - } + const into_array: streams.Result.IntoArray = .{ + .value = this.pending_value.get() orelse .zero, + .len = @truncate(buf.len), + }; - this.pending_value.clearWithoutDeallocation(); - this.pending_view = &.{}; - this.pending.run(); + this.pending.result = if (was_done) + .{ .into_array_and_done = into_array } + else + .{ .into_array = into_array }; + return !was_done; + } + + if (bun.isSliceInBuffer(buf, reader_buffer.allocatedSlice())) { + if (this.reader.isDone()) { + bun.assert_eql(buf.ptr, reader_buffer.items.ptr); + var buffer = reader_buffer.moveToUnmanaged(); + buffer.shrinkRetainingCapacity(buf.len); + this.pending.result = .{ .owned_and_done = .moveFromList(&buffer) }; + } else { + reader_buffer.clearRetainingCapacity(); + this.pending.result = .{ .temporary = .fromBorrowedSliceDangerous(buf) }; + } return !was_done; } if (!bun.isSliceInBuffer(buf, this.buffered.allocatedSlice())) { - if (this.reader.isDone()) { - if (bun.isSliceInBuffer(buf, this.reader.buffer().allocatedSlice())) { - this.reader.buffer().* = std.ArrayList(u8).init(bun.default_allocator); - } - this.pending.result = .{ - .temporary_and_done = bun.ByteList.init(buf), - }; - } else { - this.pending.result = .{ - .temporary = bun.ByteList.init(buf), - }; - - if (bun.isSliceInBuffer(buf, this.reader.buffer().allocatedSlice())) { - this.reader.buffer().clearRetainingCapacity(); - } - } - - this.pending_value.clearWithoutDeallocation(); - this.pending_view = &.{}; - this.pending.run(); + this.pending.result = if (this.reader.isDone()) + .{ .temporary_and_done = .fromBorrowedSliceDangerous(buf) } + else + .{ .temporary = .fromBorrowedSliceDangerous(buf) }; return !was_done; } - if (this.reader.isDone()) { - this.pending.result = .{ - .owned_and_done = bun.ByteList.init(buf), - }; - } else { - this.pending.result = .{ - .owned = bun.ByteList.init(buf), - }; - } + bun.assert_eql(buf.ptr, this.buffered.items.ptr); + var buffered = this.buffered; this.buffered = .{}; - this.pending_value.clearWithoutDeallocation(); - this.pending_view = &.{}; - this.pending.run(); + buffered.shrinkRetainingCapacity(buf.len); + + this.pending.result = if (this.reader.isDone()) + .{ .owned_and_done = .moveFromList(&buffered) } + else + .{ .owned = .moveFromList(&buffered) }; return !was_done; } else if (!bun.isSliceInBuffer(buf, this.buffered.allocatedSlice())) { bun.handleOom(this.buffered.appendSlice(bun.default_allocator, buf)); - if (bun.isSliceInBuffer(buf, this.reader.buffer().allocatedSlice())) { - this.reader.buffer().clearRetainingCapacity(); + if (bun.isSliceInBuffer(buf, reader_buffer.allocatedSlice())) { + reader_buffer.clearRetainingCapacity(); } } // For pipes, we have to keep pulling or the other process will block. - return this.read_inside_on_pull != .temporary and !(this.buffered.items.len + this.reader.buffer().items.len >= this.highwater_mark and !this.reader.flags.pollable); + return this.read_inside_on_pull != .temporary and + !(this.buffered.items.len + reader_buffer.items.len >= this.highwater_mark and + !this.reader.flags.pollable); } fn isPulling(this: *const FileReader) bool { @@ -525,20 +504,17 @@ pub fn onPull(this: *FileReader, buffer: []u8, array: jsc.JSValue) streams.Resul .temporary => |buf| { log("onPull({d}) = {d}", .{ buffer.len, buf.len }); if (this.reader.isDone()) { - return .{ .temporary_and_done = bun.ByteList.init(buf) }; + return .{ .temporary_and_done = bun.ByteList.fromBorrowedSliceDangerous(buf) }; } - return .{ .temporary = bun.ByteList.init(buf) }; + return .{ .temporary = bun.ByteList.fromBorrowedSliceDangerous(buf) }; }, .use_buffered => { - const buffered = this.buffered; - this.buffered = .{}; - log("onPull({d}) = {d}", .{ buffer.len, buffered.items.len }); + log("onPull({d}) = {d}", .{ buffer.len, this.buffered.items.len }); if (this.reader.isDone()) { - return .{ .owned_and_done = bun.ByteList.fromList(buffered) }; + return .{ .owned_and_done = bun.ByteList.moveFromList(&this.buffered) }; } - - return .{ .owned = bun.ByteList.fromList(buffered) }; + return .{ .owned = bun.ByteList.moveFromList(&this.buffered) }; }, else => {}, } @@ -560,8 +536,7 @@ pub fn onPull(this: *FileReader, buffer: []u8, array: jsc.JSValue) streams.Resul pub fn drain(this: *FileReader) bun.ByteList { if (this.buffered.items.len > 0) { - const out = bun.ByteList.fromList(this.buffered); - this.buffered = .{}; + const out = bun.ByteList.moveFromList(&this.buffered); if (comptime Environment.allow_assert) { bun.assert(this.reader.buffer().items.ptr != out.ptr); } @@ -572,9 +547,7 @@ pub fn drain(this: *FileReader) bun.ByteList { return .{}; } - const out = this.reader.buffer().*; - this.reader.buffer().* = std.ArrayList(u8).init(bun.default_allocator); - return bun.ByteList.fromList(out); + return bun.ByteList.moveFromList(this.reader.buffer()); } pub fn setRefOrUnref(this: *FileReader, enable: bool) void { @@ -594,7 +567,7 @@ pub fn onReaderDone(this: *FileReader) void { this.consumeReaderBuffer(); if (this.pending.state == .pending) { if (this.buffered.items.len > 0) { - this.pending.result = .{ .owned_and_done = bun.ByteList.fromList(this.buffered) }; + this.pending.result = .{ .owned_and_done = bun.ByteList.moveFromList(&this.buffered) }; } else { this.pending.result = .{ .done = {} }; } diff --git a/src/bun.js/webcore/ResumableSink.zig b/src/bun.js/webcore/ResumableSink.zig index 11df2ea90c..ddbe325c40 100644 --- a/src/bun.js/webcore/ResumableSink.zig +++ b/src/bun.js/webcore/ResumableSink.zig @@ -91,25 +91,23 @@ pub fn ResumableSink( break :brk_err null; }; - var byte_list = byte_stream.drain(); - const bytes = byte_list.listManaged(bun.default_allocator); - defer bytes.deinit(); - log("onWrite {}", .{bytes.items.len}); - _ = onWrite(this.context, bytes.items); + var bytes = byte_stream.drain(); + defer bytes.deinit(bun.default_allocator); + log("onWrite {}", .{bytes.len}); + _ = onWrite(this.context, bytes.slice()); onEnd(this.context, err); this.deref(); return this; } // We can pipe but we also wanna to drain as much as possible first - var byte_list = byte_stream.drain(); - const bytes = byte_list.listManaged(bun.default_allocator); - defer bytes.deinit(); + var bytes = byte_stream.drain(); + defer bytes.deinit(bun.default_allocator); // lets write and see if we can still pipe or if we have backpressure - if (bytes.items.len > 0) { - log("onWrite {}", .{bytes.items.len}); + if (bytes.len > 0) { + log("onWrite {}", .{bytes.len}); // we ignore the return value here because we dont want to pause the stream // if we pause will just buffer in the pipe and we can do the buffer in one place - _ = onWrite(this.context, bytes.items); + _ = onWrite(this.context, bytes.slice()); } this.status = .piped; byte_stream.pipe = jsc.WebCore.Pipe.Wrap(@This(), onStreamPipe).init(this); @@ -292,8 +290,8 @@ pub fn ResumableSink( defer { if (stream_needs_deinit) { switch (stream_) { - .owned_and_done => |*owned| owned.listManaged(allocator).deinit(), - .owned => |*owned| owned.listManaged(allocator).deinit(), + .owned_and_done => |*owned| owned.deinit(allocator), + .owned => |*owned| owned.deinit(allocator), else => unreachable, } } diff --git a/src/bun.js/webcore/Sink.zig b/src/bun.js/webcore/Sink.zig index 765b71a919..f26a613feb 100644 --- a/src/bun.js/webcore/Sink.zig +++ b/src/bun.js/webcore/Sink.zig @@ -53,10 +53,10 @@ pub const UTF8Fallback = struct { bun.strings.replaceLatin1WithUTF8(buf[0..str.len]); if (input.isDone()) { - const result = writeFn(ctx, .{ .temporary_and_done = bun.ByteList.init(buf[0..str.len]) }); + const result = writeFn(ctx, .{ .temporary_and_done = bun.ByteList.fromBorrowedSliceDangerous(buf[0..str.len]) }); return result; } else { - const result = writeFn(ctx, .{ .temporary = bun.ByteList.init(buf[0..str.len]) }); + const result = writeFn(ctx, .{ .temporary = bun.ByteList.fromBorrowedSliceDangerous(buf[0..str.len]) }); return result; } } @@ -67,9 +67,9 @@ pub const UTF8Fallback = struct { bun.strings.replaceLatin1WithUTF8(slice[0..str.len]); if (input.isDone()) { - return writeFn(ctx, .{ .owned_and_done = bun.ByteList.init(slice) }); + return writeFn(ctx, .{ .owned_and_done = bun.ByteList.fromOwnedSlice(slice) }); } else { - return writeFn(ctx, .{ .owned = bun.ByteList.init(slice) }); + return writeFn(ctx, .{ .owned = bun.ByteList.fromOwnedSlice(slice) }); } } } @@ -83,10 +83,10 @@ pub const UTF8Fallback = struct { bun.assert(copied.written <= stack_size); bun.assert(copied.read <= stack_size); if (input.isDone()) { - const result = writeFn(ctx, .{ .temporary_and_done = bun.ByteList.init(buf[0..copied.written]) }); + const result = writeFn(ctx, .{ .temporary_and_done = bun.ByteList.fromBorrowedSliceDangerous(buf[0..copied.written]) }); return result; } else { - const result = writeFn(ctx, .{ .temporary = bun.ByteList.init(buf[0..copied.written]) }); + const result = writeFn(ctx, .{ .temporary = bun.ByteList.fromBorrowedSliceDangerous(buf[0..copied.written]) }); return result; } } @@ -94,9 +94,9 @@ pub const UTF8Fallback = struct { { const allocated = bun.strings.toUTF8Alloc(bun.default_allocator, str) catch return .{ .err = Syscall.Error.oom }; if (input.isDone()) { - return writeFn(ctx, .{ .owned_and_done = bun.ByteList.init(allocated) }); + return writeFn(ctx, .{ .owned_and_done = bun.ByteList.fromOwnedSlice(allocated) }); } else { - return writeFn(ctx, .{ .owned = bun.ByteList.init(allocated) }); + return writeFn(ctx, .{ .owned = bun.ByteList.fromOwnedSlice(allocated) }); } } } @@ -394,7 +394,9 @@ pub fn JSSink(comptime SinkType: type, comptime abi_name: []const u8) type { return jsc.JSValue.jsNumber(0); } - return this.sink.writeBytes(.{ .temporary = bun.ByteList.init(slice) }).toJS(globalThis); + return this.sink.writeBytes( + .{ .temporary = bun.ByteList.fromBorrowedSliceDangerous(slice) }, + ).toJS(globalThis); } if (!arg.isString()) { @@ -414,10 +416,14 @@ pub fn JSSink(comptime SinkType: type, comptime abi_name: []const u8) type { defer str.ensureStillAlive(); if (view.is16Bit()) { - return this.sink.writeUTF16(.{ .temporary = bun.ByteList.initConst(std.mem.sliceAsBytes(view.utf16SliceAligned())) }).toJS(globalThis); + return this.sink.writeUTF16(.{ .temporary = bun.ByteList.fromBorrowedSliceDangerous( + std.mem.sliceAsBytes(view.utf16SliceAligned()), + ) }).toJS(globalThis); } - return this.sink.writeLatin1(.{ .temporary = bun.ByteList.initConst(view.slice()) }).toJS(globalThis); + return this.sink.writeLatin1( + .{ .temporary = bun.ByteList.fromBorrowedSliceDangerous(view.slice()) }, + ).toJS(globalThis); } pub fn writeUTF8(globalThis: *JSGlobalObject, callframe: *jsc.CallFrame) bun.JSError!jsc.JSValue { diff --git a/src/bun.js/webcore/fetch.zig b/src/bun.js/webcore/fetch.zig index 46b06fea9b..633f17d16c 100644 --- a/src/bun.js/webcore/fetch.zig +++ b/src/bun.js/webcore/fetch.zig @@ -407,14 +407,14 @@ pub const FetchTasklet = struct { if (readable.ptr == .Bytes) { readable.ptr.Bytes.size_hint = this.getSizeHint(); // body can be marked as used but we still need to pipe the data - const scheduled_response_buffer = this.scheduled_response_buffer.list; + const scheduled_response_buffer = &this.scheduled_response_buffer.list; const chunk = scheduled_response_buffer.items; if (this.result.has_more) { readable.ptr.Bytes.onData( .{ - .temporary = bun.ByteList.initConst(chunk), + .temporary = bun.ByteList.fromBorrowedSliceDangerous(chunk), }, bun.default_allocator, ); @@ -424,16 +424,9 @@ pub const FetchTasklet = struct { defer prev.deinit(); buffer_reset = false; this.memory_reporter.discard(scheduled_response_buffer.allocatedSlice()); - this.scheduled_response_buffer = .{ - .allocator = bun.default_allocator, - .list = .{ - .items = &.{}, - .capacity = 0, - }, - }; readable.ptr.Bytes.onData( .{ - .owned_and_done = bun.ByteList.initConst(chunk), + .owned_and_done = bun.ByteList.moveFromList(scheduled_response_buffer), }, bun.default_allocator, ); @@ -456,7 +449,7 @@ pub const FetchTasklet = struct { if (this.result.has_more) { readable.ptr.Bytes.onData( .{ - .temporary = bun.ByteList.initConst(chunk), + .temporary = bun.ByteList.fromBorrowedSliceDangerous(chunk), }, bun.default_allocator, ); @@ -468,7 +461,7 @@ pub const FetchTasklet = struct { readable.value.ensureStillAlive(); readable.ptr.Bytes.onData( .{ - .temporary_and_done = bun.ByteList.initConst(chunk), + .temporary_and_done = bun.ByteList.fromBorrowedSliceDangerous(chunk), }, bun.default_allocator, ); diff --git a/src/bun.js/webcore/streams.zig b/src/bun.js/webcore/streams.zig index b88ae0708c..ad1bab017c 100644 --- a/src/bun.js/webcore/streams.zig +++ b/src/bun.js/webcore/streams.zig @@ -207,8 +207,8 @@ pub const Result = union(Tag) { pub fn deinit(this: *Result) void { switch (this.*) { - .owned => |*owned| owned.deinitWithAllocator(bun.default_allocator), - .owned_and_done => |*owned_and_done| owned_and_done.deinitWithAllocator(bun.default_allocator), + .owned => |*owned| owned.clearAndFree(bun.default_allocator), + .owned_and_done => |*owned_and_done| owned_and_done.clearAndFree(bun.default_allocator), .err => |err| { if (err == .JSValue) { err.JSValue.unprotect(); @@ -910,17 +910,13 @@ pub fn HTTPServerWritable(comptime ssl: bool) type { else => {}, } - var list = this.buffer.listManaged(this.allocator); - list.clearRetainingCapacity(); - list.ensureTotalCapacityPrecise(this.highWaterMark) catch return .{ .err = Syscall.Error.oom }; - this.buffer.update(list); + this.buffer.clearRetainingCapacity(); + this.buffer.ensureTotalCapacityPrecise(this.allocator, this.highWaterMark) catch + return .{ .err = Syscall.Error.oom }; this.done = false; - this.signal.start(); - log("start({d})", .{this.highWaterMark}); - return .success; } @@ -1260,12 +1256,7 @@ pub fn HTTPServerWritable(comptime ssl: bool) type { pub fn destroy(this: *@This()) void { log("destroy()", .{}); - var bytes = this.buffer.listManaged(this.allocator); - if (bytes.capacity > 0) { - this.buffer = bun.ByteList.init(""); - bytes.deinit(); - } - + this.buffer.deinit(this.allocator); this.unregisterAutoFlusher(); this.allocator.destroy(this); } @@ -1298,19 +1289,18 @@ pub fn HTTPServerWritable(comptime ssl: bool) type { if (this.pooled_buffer) |pooled| { this.buffer.len = 0; if (this.buffer.cap > 64 * 1024) { - this.buffer.deinitWithAllocator(bun.default_allocator); - this.buffer = bun.ByteList.init(""); + this.buffer.clearAndFree(bun.default_allocator); } pooled.data = this.buffer; - this.buffer = bun.ByteList.init(""); + this.buffer = bun.ByteList.empty; this.pooled_buffer = null; pooled.release(); } else if (this.buffer.cap == 0) { // } else if (FeatureFlags.http_buffer_pooling and !WebCore.ByteListPool.full()) { const buffer = this.buffer; - this.buffer = bun.ByteList.init(""); + this.buffer = bun.ByteList.empty; WebCore.ByteListPool.push(this.allocator, buffer); } else { // Don't release this buffer until destroy() is called @@ -1621,9 +1611,9 @@ pub const ReadResult = union(enum) { const done = is_done or (close_on_empty and slice.len == 0); break :brk if (owned and done) - Result{ .owned_and_done = bun.ByteList.init(slice) } + Result{ .owned_and_done = bun.ByteList.fromOwnedSlice(slice) } else if (owned) - Result{ .owned = bun.ByteList.init(slice) } + Result{ .owned = bun.ByteList.fromOwnedSlice(slice) } else if (done) Result{ .into_array_and_done = .{ .len = @as(Blob.SizeType, @truncate(slice.len)), .value = view } } else @@ -1633,28 +1623,6 @@ pub const ReadResult = union(enum) { } }; -pub const AutoSizer = struct { - buffer: *bun.ByteList, - allocator: std.mem.Allocator, - max: usize, - - pub fn resize(this: *AutoSizer, size: usize) ![]u8 { - const available = this.buffer.cap - this.buffer.len; - if (available >= size) return this.buffer.ptr[this.buffer.len..this.buffer.cap][0..size]; - const to_grow = size -| available; - if (to_grow + @as(usize, this.buffer.cap) > this.max) - return this.buffer.ptr[this.buffer.len..this.buffer.cap]; - - var list = this.buffer.listManaged(this.allocator); - const prev_len = list.items.len; - try list.ensureTotalCapacity(to_grow + @as(usize, this.buffer.cap)); - this.buffer.update(list); - return this.buffer.ptr[prev_len..@as(usize, this.buffer.cap)]; - } -}; - -const string = []const u8; - const std = @import("std"); const bun = @import("bun"); diff --git a/src/bun.zig b/src/bun.zig index a991c7806a..9091ba0f8a 100644 --- a/src/bun.zig +++ b/src/bun.zig @@ -414,14 +414,12 @@ pub const StringHashMapUnowned = struct { pub const collections = @import("./collections.zig"); pub const MultiArrayList = bun.collections.MultiArrayList; pub const BabyList = collections.BabyList; -pub const OffsetList = collections.OffsetList; +pub const ByteList = collections.ByteList; // alias of BabyList(u8) +pub const OffsetByteList = collections.OffsetByteList; pub const bit_set = collections.bit_set; pub const HiveArray = collections.HiveArray; pub const BoundedArray = collections.BoundedArray; -pub const ByteList = BabyList(u8); -pub const OffsetByteList = OffsetList(u8); - pub fn DebugOnly(comptime Type: type) type { if (comptime Environment.isDebug) { return Type; @@ -3745,7 +3743,7 @@ pub const S3 = @import("./s3/client.zig"); /// decommits it or the memory allocator reuses it for a new allocation. /// So if we're about to free something sensitive, we should zero it out first. pub fn freeSensitive(allocator: std.mem.Allocator, slice: anytype) void { - @memset(@constCast(slice), 0); + std.crypto.secureZero(std.meta.Child(@TypeOf(slice)), @constCast(slice)); allocator.free(slice); } diff --git a/src/bundler/AstBuilder.zig b/src/bundler/AstBuilder.zig index 419285bbe7..b42ac70c55 100644 --- a/src/bundler/AstBuilder.zig +++ b/src/bundler/AstBuilder.zig @@ -101,7 +101,7 @@ pub const AstBuilder = struct { .source_index = p.source_index, .tag = .symbol, }; - try p.current_scope.generated.push(p.allocator, ref); + try p.current_scope.generated.append(p.allocator, ref); try p.declared_symbols.append(p.allocator, .{ .ref = ref, .is_top_level = p.scopes.items.len == 0 or p.current_scope == p.scopes.items[0], @@ -260,16 +260,16 @@ pub const AstBuilder = struct { parts.mut(1).declared_symbols = p.declared_symbols; parts.mut(1).scopes = p.scopes.items; - parts.mut(1).import_record_indices = BabyList(u32).fromList(p.import_records_for_current_part); + parts.mut(1).import_record_indices = BabyList(u32).moveFromList(&p.import_records_for_current_part); return .{ .parts = parts, .module_scope = module_scope.*, - .symbols = js_ast.Symbol.List.fromList(p.symbols), + .symbols = js_ast.Symbol.List.moveFromList(&p.symbols), .exports_ref = Ref.None, .wrapper_ref = Ref.None, .module_ref = p.module_ref, - .import_records = ImportRecord.List.fromList(p.import_records), + .import_records = ImportRecord.List.moveFromList(&p.import_records), .export_star_import_records = &.{}, .approximate_newline_count = 1, .exports_kind = .esm, diff --git a/src/bundler/Chunk.zig b/src/bundler/Chunk.zig index 2abc1e462b..ac17d003f3 100644 --- a/src/bundler/Chunk.zig +++ b/src/bundler/Chunk.zig @@ -528,7 +528,7 @@ pub const Chunk = struct { pub fn deinit(self: *Self, a: std.mem.Allocator) void { // do shallow deinit since `LayerName` has // allocations in arena - self.deinitWithAllocator(a); + self.clearAndFree(a); } }); diff --git a/src/bundler/LinkerContext.zig b/src/bundler/LinkerContext.zig index 7b9cd948ed..a8109b3240 100644 --- a/src/bundler/LinkerContext.zig +++ b/src/bundler/LinkerContext.zig @@ -307,7 +307,7 @@ pub const LinkerContext = struct { @panic("Assertion failed: HTML import file not found in pathToSourceIndexMap"); }; - bun.handleOom(html_source_indices.push(this.allocator(), source_index)); + bun.handleOom(html_source_indices.append(this.allocator(), source_index)); // S.LazyExport is a call to __jsonParse. const original_ref = parts[html_import] @@ -454,7 +454,7 @@ pub const LinkerContext = struct { var parts_list = this.allocator().alloc(u32, 1) catch unreachable; parts_list[0] = part_index; - top_level.put(this.allocator(), ref, BabyList(u32).init(parts_list)) catch unreachable; + top_level.put(this.allocator(), ref, BabyList(u32).fromOwnedSlice(parts_list)) catch unreachable; var resolved_exports = &this.graph.meta.items(.resolved_exports)[source_index]; resolved_exports.put(this.allocator(), alias, ExportData{ @@ -2074,7 +2074,7 @@ pub const LinkerContext = struct { .{ .ref = c.graph.ast.items(.wrapper_ref)[source_index], .is_top_level = true }, }, ) catch unreachable, - .dependencies = Dependency.List.init(dependencies), + .dependencies = Dependency.List.fromOwnedSlice(dependencies), }, ) catch unreachable; bun.assert(part_index != js_ast.namespace_export_part_index); @@ -2126,7 +2126,7 @@ pub const LinkerContext = struct { .declared_symbols = js_ast.DeclaredSymbol.List.fromSlice(c.allocator(), &[_]js_ast.DeclaredSymbol{ .{ .ref = wrapper_ref, .is_top_level = true }, }) catch unreachable, - .dependencies = Dependency.List.init(dependencies), + .dependencies = Dependency.List.fromOwnedSlice(dependencies), }, ) catch unreachable; bun.assert(part_index != js_ast.namespace_export_part_index); @@ -2315,7 +2315,7 @@ pub const LinkerContext = struct { c.allocator(), import_ref, .{ - .re_exports = bun.BabyList(js_ast.Dependency).init(re_exports.items), + .re_exports = bun.BabyList(js_ast.Dependency).fromOwnedSlice(re_exports.items), .data = .{ .source_index = Index.source(result.source_index), .import_ref = result.ref, @@ -2334,7 +2334,7 @@ pub const LinkerContext = struct { c.allocator(), import_ref, .{ - .re_exports = bun.BabyList(js_ast.Dependency).init(re_exports.items), + .re_exports = bun.BabyList(js_ast.Dependency).fromOwnedSlice(re_exports.items), .data = .{ .source_index = Index.source(result.source_index), .import_ref = result.ref, @@ -2497,7 +2497,7 @@ pub const LinkerContext = struct { try pieces.append(OutputPiece.init(output, OutputPiece.Query.none)); return .{ - .pieces = bun.BabyList(Chunk.OutputPiece).init(pieces.items), + .pieces = bun.BabyList(Chunk.OutputPiece).fromOwnedSlice(pieces.items), }; } }; diff --git a/src/bundler/LinkerGraph.zig b/src/bundler/LinkerGraph.zig index e9a2705848..b29fdaa47f 100644 --- a/src/bundler/LinkerGraph.zig +++ b/src/bundler/LinkerGraph.zig @@ -59,15 +59,16 @@ pub fn generateNewSymbol(this: *LinkerGraph, source_index: u32, kind: Symbol.Kin ref.tag = .symbol; // TODO: will this crash on resize due to using threadlocal mimalloc heap? - source_symbols.push( + source_symbols.append( this.allocator, .{ .kind = kind, .original_name = original_name, }, - ) catch unreachable; + ) catch |err| bun.handleOom(err); - this.ast.items(.module_scope)[source_index].generated.push(this.allocator, ref) catch unreachable; + this.ast.items(.module_scope)[source_index].generated.append(this.allocator, ref) catch |err| + bun.handleOom(err); return ref; } @@ -98,7 +99,7 @@ pub fn addPartToFile( ) !u32 { var parts: *Part.List = &graph.ast.items(.parts)[id]; const part_id = @as(u32, @truncate(parts.len)); - try parts.push(graph.allocator, part); + try parts.append(graph.allocator, part); var top_level_symbol_to_parts_overlay: ?*TopLevelSymbolToParts = null; const Iterator = struct { @@ -127,12 +128,12 @@ pub fn addPartToFile( list.appendSliceAssumeCapacity(original_parts.slice()); list.appendAssumeCapacity(self.part_id); - entry.value_ptr.* = .init(list.items); + entry.value_ptr.* = .fromOwnedSlice(list.items); } else { entry.value_ptr.* = BabyList(u32).fromSlice(self.graph.allocator, &.{self.part_id}) catch |err| bun.handleOom(err); } } else { - entry.value_ptr.push(self.graph.allocator, self.part_id) catch unreachable; + bun.handleOom(entry.value_ptr.append(self.graph.allocator, self.part_id)); } } }; @@ -144,7 +145,7 @@ pub fn addPartToFile( .top_level_symbol_to_parts_overlay = &top_level_symbol_to_parts_overlay, }; - js_ast.DeclaredSymbol.forEachTopLevelSymbol(&parts.ptr[part_id].declared_symbols, &ctx, Iterator.next); + js_ast.DeclaredSymbol.forEachTopLevelSymbol(&parts.mut(part_id).declared_symbols, &ctx, Iterator.next); return part_id; } @@ -352,7 +353,9 @@ pub fn load( } { - var input_symbols = js_ast.Symbol.Map.initList(js_ast.Symbol.NestedList.init(this.ast.items(.symbols))); + var input_symbols = js_ast.Symbol.Map.initList( + js_ast.Symbol.NestedList.fromBorrowedSliceDangerous(this.ast.items(.symbols)), + ); var symbols = bun.handleOom(input_symbols.symbols_for_source.clone(this.allocator)); for (symbols.slice(), input_symbols.symbols_for_source.slice()) |*dest, src| { dest.* = bun.handleOom(src.clone(this.allocator)); @@ -412,6 +415,26 @@ pub fn load( } } +/// Transfers ownership of the AST to the graph allocator. +/// This is valid only if all allocators are `MimallocArena`s. +pub fn takeAstOwnership(this: *LinkerGraph) void { + const ast = this.ast.slice(); + const heap: bun.allocators.MimallocArena.Borrowed = .downcast(this.allocator); + if (comptime !bun.collections.baby_list.safety_checks) return; + for (ast.items(.import_records)) |*import_records| { + import_records.transferOwnership(heap); + } + for (ast.items(.parts)) |*parts| { + parts.transferOwnership(heap); + for (parts.slice()) |*part| { + part.dependencies.transferOwnership(heap); + } + } + for (ast.items(.symbols)) |*symbols| { + symbols.transferOwnership(heap); + } +} + pub const File = struct { entry_bits: AutoBitSet = undefined, diff --git a/src/bundler/ParseTask.zig b/src/bundler/ParseTask.zig index 9f9cc14173..b59ee29ff8 100644 --- a/src/bundler/ParseTask.zig +++ b/src/bundler/ParseTask.zig @@ -419,12 +419,12 @@ fn getAST( }, Logger.Loc{ .start = 0 }), }; require_args[1] = Expr.init(E.Object, E.Object{ - .properties = G.Property.List.init(object_properties), + .properties = G.Property.List.fromOwnedSlice(object_properties), .is_single_line = true, }, Logger.Loc{ .start = 0 }); const require_call = Expr.init(E.Call, E.Call{ .target = require_property, - .args = BabyList(Expr).init(require_args), + .args = BabyList(Expr).fromOwnedSlice(require_args), }, Logger.Loc{ .start = 0 }); const root = Expr.init(E.Dot, E.Dot{ @@ -460,7 +460,7 @@ fn getAST( const root = Expr.init(E.Call, E.Call{ .target = .{ .data = .{ .e_require_call_target = {} }, .loc = .{ .start = 0 } }, - .args = BabyList(Expr).init(require_args), + .args = BabyList(Expr).fromOwnedSlice(require_args), }, Logger.Loc{ .start = 0 }); unique_key_for_additional_file.* = .{ @@ -1075,7 +1075,7 @@ fn runWithSourceCode( var transpiler = this.transpilerForTarget(task.known_target); errdefer transpiler.resetStore(); - var resolver: *Resolver = &transpiler.resolver; + const resolver: *Resolver = &transpiler.resolver; const file_path = &task.path; const loader = task.loader orelse file_path.loader(&transpiler.options.loaders) orelse options.Loader.file; @@ -1130,19 +1130,14 @@ fn runWithSourceCode( else .none; - if ( - // separate_ssr_graph makes boundaries switch to client because the server file uses that generated file as input. - // this is not done when there is one server graph because it is easier for plugins to deal with. - (use_directive == .client and + if (use_directive == .client and task.known_target != .bake_server_components_ssr and - this.ctx.framework.?.server_components.?.separate_ssr_graph) or - // set the target to the client when bundling client-side files - ((transpiler.options.server_components or transpiler.options.dev_server != null) and - task.known_target == .browser)) + this.ctx.framework.?.server_components.?.separate_ssr_graph and + task.known_target != .browser) { - transpiler = this.ctx.client_transpiler.?; - resolver = &transpiler.resolver; - bun.assert(transpiler.options.target == .browser); + // separate_ssr_graph makes boundaries switch to client because the server file uses that generated file as input. + // this is not done when there is one server graph because it is easier for plugins to deal with. + transpiler = this.transpilerForTarget(.browser); } const source = &Logger.Source{ @@ -1163,7 +1158,7 @@ fn runWithSourceCode( var opts = js_parser.Parser.Options.init(task.jsx, loader); opts.bundle = true; opts.warn_about_unbundled_modules = false; - opts.macro_context = &this.data.macro_context; + opts.macro_context = &transpiler.macro_context.?; opts.package_version = task.package_version; opts.features.allow_runtime = !source.index.isRuntime(); diff --git a/src/bundler/ThreadPool.zig b/src/bundler/ThreadPool.zig index 693e1d05ee..fb4a8c1db7 100644 --- a/src/bundler/ThreadPool.zig +++ b/src/bundler/ThreadPool.zig @@ -269,10 +269,8 @@ pub const ThreadPool = struct { pub const WorkerData = struct { log: *Logger.Log, estimated_input_lines_of_code: usize = 0, - macro_context: js_ast.Macro.MacroContext, - transpiler: Transpiler = undefined, - other_transpiler: Transpiler = undefined, - has_loaded_other_transpiler: bool = false, + transpiler: Transpiler, + other_transpiler: ?Transpiler = null, }; pub fn init(worker: *Worker, v2: *BundleV2) void { @@ -294,9 +292,8 @@ pub const ThreadPool = struct { this.ast_memory_allocator.reset(); this.data = WorkerData{ - .log = allocator.create(Logger.Log) catch unreachable, - .estimated_input_lines_of_code = 0, - .macro_context = undefined, + .log = bun.handleOom(allocator.create(Logger.Log)), + .transpiler = undefined, }; this.data.log.* = Logger.Log.init(allocator); this.ctx = ctx; @@ -313,20 +310,22 @@ pub const ThreadPool = struct { transpiler.setAllocator(allocator); transpiler.linker.resolver = &transpiler.resolver; transpiler.macro_context = js_ast.Macro.MacroContext.init(transpiler); - this.data.macro_context = transpiler.macro_context.?; const CacheSet = @import("../cache.zig"); transpiler.resolver.caches = CacheSet.Set.init(allocator); } pub fn transpilerForTarget(this: *Worker, target: bun.options.Target) *Transpiler { if (target == .browser and this.data.transpiler.options.target != target) { - if (!this.data.has_loaded_other_transpiler) { - this.data.has_loaded_other_transpiler = true; - this.initializeTranspiler(&this.data.other_transpiler, this.ctx.client_transpiler.?, this.allocator); - } - - bun.debugAssert(this.data.other_transpiler.options.target == target); - return &this.data.other_transpiler; + const other_transpiler = if (this.data.other_transpiler) |*other| + other + else blk: { + this.data.other_transpiler = undefined; + const other = &this.data.other_transpiler.?; + this.initializeTranspiler(other, this.ctx.client_transpiler.?, this.allocator); + break :blk other; + }; + bun.debugAssert(other_transpiler.options.target == target); + return other_transpiler; } return &this.data.transpiler; diff --git a/src/bundler/bundle_v2.zig b/src/bundler/bundle_v2.zig index 8cf22fc9f1..f19971e026 100644 --- a/src/bundler/bundle_v2.zig +++ b/src/bundler/bundle_v2.zig @@ -758,7 +758,7 @@ pub const BundleV2 = struct { if (!this.enqueueOnLoadPluginIfNeeded(task)) { if (loader.shouldCopyForBundling()) { var additional_files: *BabyList(AdditionalFile) = &this.graph.input_files.items(.additional_files)[source_index.get()]; - additional_files.push(this.allocator(), .{ .source_index = task.source_index.get() }) catch unreachable; + bun.handleOom(additional_files.append(this.allocator(), .{ .source_index = task.source_index.get() })); this.graph.input_files.items(.side_effects)[source_index.get()] = .no_side_effects__pure_data; this.graph.estimated_file_loader_count += 1; } @@ -824,7 +824,7 @@ pub const BundleV2 = struct { if (!this.enqueueOnLoadPluginIfNeeded(task)) { if (loader.shouldCopyForBundling()) { var additional_files: *BabyList(AdditionalFile) = &this.graph.input_files.items(.additional_files)[source_index.get()]; - additional_files.push(this.allocator(), .{ .source_index = task.source_index.get() }) catch unreachable; + bun.handleOom(additional_files.append(this.allocator(), .{ .source_index = task.source_index.get() })); this.graph.input_files.items(.side_effects)[source_index.get()] = _resolver.SideEffects.no_side_effects__pure_data; this.graph.estimated_file_loader_count += 1; } @@ -1138,8 +1138,8 @@ pub const BundleV2 = struct { bun.safety.alloc.assertEq(this.allocator(), this.transpiler.allocator); bun.safety.alloc.assertEq(this.allocator(), this.linker.graph.allocator); this.linker.graph.ast = try this.graph.ast.clone(this.allocator()); - var ast = this.linker.graph.ast.slice(); - for (ast.items(.module_scope)) |*module_scope| { + + for (this.linker.graph.ast.items(.module_scope)) |*module_scope| { for (module_scope.children.slice()) |child| { child.parent = module_scope; } @@ -1150,6 +1150,10 @@ pub const BundleV2 = struct { module_scope.generated = try module_scope.generated.clone(this.allocator()); } + + // Some parts of the AST are owned by worker allocators at this point. + // Transfer ownership to the graph heap. + this.linker.graph.takeAstOwnership(); } /// This generates the two asts for 'bun:bake/client' and 'bun:bake/server'. Both are generated @@ -1249,7 +1253,7 @@ pub const BundleV2 = struct { try client_manifest_props.append(alloc, .{ .key = client_path, .value = server.newExpr(E.Object{ - .properties = G.Property.List.init(client_manifest_items), + .properties = G.Property.List.fromOwnedSlice(client_manifest_items), }), }); } else { @@ -1264,7 +1268,7 @@ pub const BundleV2 = struct { .ref = try server.newSymbol(.other, "serverManifest"), }, Logger.Loc.Empty), .value = server.newExpr(E.Object{ - .properties = G.Property.List.fromList(server_manifest_props), + .properties = G.Property.List.moveFromList(&server_manifest_props), }), }}), .is_export = true, @@ -1276,7 +1280,7 @@ pub const BundleV2 = struct { .ref = try server.newSymbol(.other, "ssrManifest"), }, Logger.Loc.Empty), .value = server.newExpr(E.Object{ - .properties = G.Property.List.fromList(client_manifest_props), + .properties = G.Property.List.moveFromList(&client_manifest_props), }), }}), .is_export = true, @@ -1316,7 +1320,7 @@ pub const BundleV2 = struct { if (!this.enqueueOnLoadPluginIfNeeded(task)) { if (loader.shouldCopyForBundling()) { var additional_files: *BabyList(AdditionalFile) = &this.graph.input_files.items(.additional_files)[source_index.get()]; - additional_files.push(this.allocator(), .{ .source_index = task.source_index.get() }) catch unreachable; + bun.handleOom(additional_files.append(this.allocator(), .{ .source_index = task.source_index.get() })); this.graph.input_files.items(.side_effects)[source_index.get()] = _resolver.SideEffects.no_side_effects__pure_data; this.graph.estimated_file_loader_count += 1; } @@ -1370,7 +1374,7 @@ pub const BundleV2 = struct { if (!this.enqueueOnLoadPluginIfNeeded(task)) { if (loader.shouldCopyForBundling()) { var additional_files: *BabyList(AdditionalFile) = &this.graph.input_files.items(.additional_files)[source_index.get()]; - additional_files.push(this.allocator(), .{ .source_index = task.source_index.get() }) catch unreachable; + bun.handleOom(additional_files.append(this.allocator(), .{ .source_index = task.source_index.get() })); this.graph.input_files.items(.side_effects)[source_index.get()] = _resolver.SideEffects.no_side_effects__pure_data; this.graph.estimated_file_loader_count += 1; } @@ -1679,9 +1683,9 @@ pub const BundleV2 = struct { .entry_point_index = null, .is_executable = false, })) catch unreachable; - additional_files[index].push(this.allocator(), AdditionalFile{ + additional_files[index].append(this.allocator(), AdditionalFile{ .output_file = @as(u32, @truncate(additional_output_files.items.len - 1)), - }) catch unreachable; + }) catch |err| bun.handleOom(err); } } @@ -2259,7 +2263,7 @@ pub const BundleV2 = struct { if (should_copy_for_bundling) { const source_index = load.source_index; var additional_files: *BabyList(AdditionalFile) = &this.graph.input_files.items(.additional_files)[source_index.get()]; - additional_files.push(this.allocator(), .{ .source_index = source_index.get() }) catch unreachable; + bun.handleOom(additional_files.append(this.allocator(), .{ .source_index = source_index.get() })); this.graph.input_files.items(.side_effects)[source_index.get()] = .no_side_effects__pure_data; this.graph.estimated_file_loader_count += 1; } @@ -2458,7 +2462,7 @@ pub const BundleV2 = struct { if (!this.enqueueOnLoadPluginIfNeeded(task)) { if (loader.shouldCopyForBundling()) { var additional_files: *BabyList(AdditionalFile) = &this.graph.input_files.items(.additional_files)[source_index.get()]; - additional_files.push(this.allocator(), .{ .source_index = task.source_index.get() }) catch unreachable; + bun.handleOom(additional_files.append(this.allocator(), .{ .source_index = task.source_index.get() })); this.graph.input_files.items(.side_effects)[source_index.get()] = _resolver.SideEffects.no_side_effects__pure_data; this.graph.estimated_file_loader_count += 1; } @@ -2492,7 +2496,7 @@ pub const BundleV2 = struct { if (!entry.found_existing) { entry.value_ptr.* = .{}; } - entry.value_ptr.push( + entry.value_ptr.append( this.allocator(), .{ .to_source_index = source_index, @@ -3529,7 +3533,7 @@ pub const BundleV2 = struct { import_record.source_index = fake_input_file.source.index; try this.pathToSourceIndexMap(target).put(this.allocator(), path_text, fake_input_file.source.index.get()); - try graph.html_imports.server_source_indices.push(this.allocator(), fake_input_file.source.index.get()); + try graph.html_imports.server_source_indices.append(this.allocator(), fake_input_file.source.index.get()); this.ensureClientTranspiler(); } @@ -3710,7 +3714,7 @@ pub const BundleV2 = struct { if (loader.shouldCopyForBundling()) { var additional_files: *BabyList(AdditionalFile) = &graph.input_files.items(.additional_files)[result.source.index.get()]; - additional_files.push(this.allocator(), .{ .source_index = new_task.source_index.get() }) catch unreachable; + bun.handleOom(additional_files.append(this.allocator(), .{ .source_index = new_task.source_index.get() })); new_input_file.side_effects = _resolver.SideEffects.no_side_effects__pure_data; graph.estimated_file_loader_count += 1; } @@ -3719,7 +3723,7 @@ pub const BundleV2 = struct { } else { if (loader.shouldCopyForBundling()) { var additional_files: *BabyList(AdditionalFile) = &graph.input_files.items(.additional_files)[result.source.index.get()]; - additional_files.push(this.allocator(), .{ .source_index = existing.value_ptr.* }) catch unreachable; + bun.handleOom(additional_files.append(this.allocator(), .{ .source_index = existing.value_ptr.* })); graph.estimated_file_loader_count += 1; } @@ -3735,16 +3739,15 @@ pub const BundleV2 = struct { result.loader.isCSS(); if (this.resolve_tasks_waiting_for_import_source_index.fetchSwapRemove(result.source.index.get())) |pending_entry| { - for (pending_entry.value.slice()) |to_assign| { + var value = pending_entry.value; + for (value.slice()) |to_assign| { if (save_import_record_source_index or input_file_loaders[to_assign.to_source_index.get()].isCSS()) { import_records.slice()[to_assign.import_record_index].source_index = to_assign.to_source_index; } } - - var list = pending_entry.value.list(); - list.deinit(this.allocator()); + value.deinit(this.allocator()); } if (result.ast.css != null) { diff --git a/src/bundler/linker_context/computeChunks.zig b/src/bundler/linker_context/computeChunks.zig index 7f73e32e9d..266d71c35a 100644 --- a/src/bundler/linker_context/computeChunks.zig +++ b/src/bundler/linker_context/computeChunks.zig @@ -286,7 +286,7 @@ pub noinline fn computeChunks( } // We don't care about the order of the HTML chunks that have no JS chunks. - try sorted_chunks.append(this.allocator(), html_chunks.values()); + try sorted_chunks.appendSlice(this.allocator(), html_chunks.values()); break :sort_chunks sorted_chunks.slice(); }; diff --git a/src/bundler/linker_context/computeCrossChunkDependencies.zig b/src/bundler/linker_context/computeCrossChunkDependencies.zig index 111281f41e..8e4c3ba3ac 100644 --- a/src/bundler/linker_context/computeCrossChunkDependencies.zig +++ b/src/bundler/linker_context/computeCrossChunkDependencies.zig @@ -237,7 +237,7 @@ fn computeCrossChunkDependenciesWithChunkMetas(c: *LinkerContext, chunks: []Chun var entry = try js .imports_from_other_chunks .getOrPutValue(c.allocator(), other_chunk_index, .{}); - try entry.value_ptr.push(c.allocator(), .{ + try entry.value_ptr.append(c.allocator(), .{ .ref = import_ref, }); } @@ -272,12 +272,10 @@ fn computeCrossChunkDependenciesWithChunkMetas(c: *LinkerContext, chunks: []Chun const dynamic_chunk_indices = chunk_meta.dynamic_imports.keys(); std.sort.pdq(Index.Int, dynamic_chunk_indices, {}, std.sort.asc(Index.Int)); - var imports = chunk.cross_chunk_imports.listManaged(c.allocator()); - defer chunk.cross_chunk_imports.update(imports); - imports.ensureUnusedCapacity(dynamic_chunk_indices.len) catch unreachable; - const prev_len = imports.items.len; - imports.items.len += dynamic_chunk_indices.len; - for (dynamic_chunk_indices, imports.items[prev_len..]) |dynamic_chunk_index, *item| { + const new_imports = bun.handleOom( + chunk.cross_chunk_imports.writableSlice(c.allocator(), dynamic_chunk_indices.len), + ); + for (dynamic_chunk_indices, new_imports) |dynamic_chunk_index, *item| { item.* = .{ .import_kind = .dynamic, .chunk_index = dynamic_chunk_index, @@ -387,7 +385,7 @@ fn computeCrossChunkDependenciesWithChunkMetas(c: *LinkerContext, chunks: []Chun }); } - cross_chunk_imports.push(c.allocator(), .{ + cross_chunk_imports.append(c.allocator(), .{ .import_kind = .stmt, .chunk_index = cross_chunk_import.chunk_index, }) catch unreachable; @@ -397,7 +395,7 @@ fn computeCrossChunkDependenciesWithChunkMetas(c: *LinkerContext, chunks: []Chun .import_record_index = import_record_index, .namespace_ref = Ref.None, }; - cross_chunk_prefix_stmts.push( + cross_chunk_prefix_stmts.append( c.allocator(), .{ .data = .{ diff --git a/src/bundler/linker_context/convertStmtsForChunk.zig b/src/bundler/linker_context/convertStmtsForChunk.zig index d25ec13780..eb9eb2a3f8 100644 --- a/src/bundler/linker_context/convertStmtsForChunk.zig +++ b/src/bundler/linker_context/convertStmtsForChunk.zig @@ -188,7 +188,7 @@ pub fn convertStmtsForChunk( }, stmt.loc, ), - .args = bun.BabyList(Expr).init(args), + .args = bun.BabyList(Expr).fromOwnedSlice(args), }, stmt.loc, ), @@ -272,7 +272,7 @@ pub fn convertStmtsForChunk( }, stmt.loc, ), - .args = js_ast.ExprNodeList.init(args), + .args = js_ast.ExprNodeList.fromOwnedSlice(args), }, stmt.loc, ), diff --git a/src/bundler/linker_context/convertStmtsForChunkForDevServer.zig b/src/bundler/linker_context/convertStmtsForChunkForDevServer.zig index ca36be7531..144dc62ea2 100644 --- a/src/bundler/linker_context/convertStmtsForChunkForDevServer.zig +++ b/src/bundler/linker_context/convertStmtsForChunkForDevServer.zig @@ -72,7 +72,7 @@ pub fn convertStmtsForChunkForDevServer( .name = if (record.tag == .runtime) "require" else "builtin", .name_loc = stmt.loc, }, stmt.loc), - .args = .init(try allocator.dupe(Expr, &.{Expr.init(E.String, .{ + .args = .fromOwnedSlice(try allocator.dupe(Expr, &.{Expr.init(E.String, .{ .data = if (record.tag == .runtime) "bun:wrap" else record.path.pretty, }, record.range.loc)})), }, stmt.loc); @@ -144,7 +144,7 @@ pub fn convertStmtsForChunkForDevServer( .name_loc = .Empty, }, .Empty), .right = Expr.init(E.Array, .{ - .items = .fromList(esm_callbacks), + .items = .moveFromList(&esm_callbacks), .is_single_line = esm_callbacks.items.len <= 2, }, .Empty), }, .Empty), diff --git a/src/bundler/linker_context/doStep5.zig b/src/bundler/linker_context/doStep5.zig index 4b1d2520ac..440c224e1a 100644 --- a/src/bundler/linker_context/doStep5.zig +++ b/src/bundler/linker_context/doStep5.zig @@ -86,6 +86,10 @@ pub fn doStep5(c: *LinkerContext, source_index_: Index, _: usize) void { const our_imports_to_bind = imports_to_bind[id]; outer: for (parts_slice, 0..) |*part, part_index| { + // Previously owned by `c.allocator()`, which is a `MimallocArena` (from + // `BundleV2.graph.heap`). + part.dependencies.transferOwnership(&worker.heap); + // Now that all files have been parsed, determine which property // accesses off of imported symbols are inlined enum values and // which ones aren't @@ -188,7 +192,7 @@ pub fn doStep5(c: *LinkerContext, source_index_: Index, _: usize) void { if (!local.found_existing or local.value_ptr.* != part_index) { local.value_ptr.* = @as(u32, @intCast(part_index)); // note: if we crash on append, it is due to threadlocal heaps in mimalloc - part.dependencies.push( + part.dependencies.append( allocator, .{ .source_index = Index.source(source_index), @@ -200,7 +204,7 @@ pub fn doStep5(c: *LinkerContext, source_index_: Index, _: usize) void { // Also map from imports to parts that use them if (named_imports.getPtr(ref)) |existing| { - existing.local_parts_with_uses.push(allocator, @intCast(part_index)) catch unreachable; + bun.handleOom(existing.local_parts_with_uses.append(allocator, @intCast(part_index))); } } } @@ -360,7 +364,7 @@ pub fn createExportsForFile( allocator, js_ast.S.Local, .{ - .decls = G.Decl.List.init(decls), + .decls = G.Decl.List.fromOwnedSlice(decls), }, loc, ); @@ -375,7 +379,12 @@ pub fn createExportsForFile( var args = allocator.alloc(js_ast.Expr, 2) catch unreachable; args[0..2].* = [_]js_ast.Expr{ js_ast.Expr.initIdentifier(exports_ref, loc), - js_ast.Expr.allocate(allocator, js_ast.E.Object, .{ .properties = js_ast.G.Property.List.fromList(properties) }, loc), + js_ast.Expr.allocate( + allocator, + js_ast.E.Object, + .{ .properties = .moveFromList(&properties) }, + loc, + ), }; remaining_stmts[0] = js_ast.Stmt.allocate( allocator, @@ -386,7 +395,7 @@ pub fn createExportsForFile( js_ast.E.Call, .{ .target = js_ast.Expr.initIdentifier(export_ref, loc), - .args = js_ast.ExprNodeList.init(args), + .args = js_ast.ExprNodeList.fromOwnedSlice(args), }, loc, ), @@ -433,7 +442,7 @@ pub fn createExportsForFile( E.Call, E.Call{ .target = Expr.initIdentifier(toCommonJSRef, Loc.Empty), - .args = js_ast.ExprNodeList.init(call_args), + .args = js_ast.ExprNodeList.fromOwnedSlice(call_args), }, Loc.Empty, ), @@ -451,7 +460,7 @@ pub fn createExportsForFile( c.graph.ast.items(.parts)[id].slice()[js_ast.namespace_export_part_index] = .{ .stmts = if (c.options.output_format != .internal_bake_dev) all_export_stmts else &.{}, .symbol_uses = ns_export_symbol_uses, - .dependencies = js_ast.Dependency.List.fromList(ns_export_dependencies), + .dependencies = js_ast.Dependency.List.moveFromList(&ns_export_dependencies), .declared_symbols = declared_symbols, // This can be removed if nothing uses it diff --git a/src/bundler/linker_context/findImportedCSSFilesInJSOrder.zig b/src/bundler/linker_context/findImportedCSSFilesInJSOrder.zig index 5d4e995e80..6a60363634 100644 --- a/src/bundler/linker_context/findImportedCSSFilesInJSOrder.zig +++ b/src/bundler/linker_context/findImportedCSSFilesInJSOrder.zig @@ -68,7 +68,7 @@ pub fn findImportedCSSFilesInJSOrder(this: *LinkerContext, temp_allocator: std.m } if (is_css and source_index.isValid()) { - bun.handleOom(o.push(temp, source_index)); + bun.handleOom(o.append(temp, source_index)); } } }.visit; diff --git a/src/bundler/linker_context/findImportedFilesInCSSOrder.zig b/src/bundler/linker_context/findImportedFilesInCSSOrder.zig index 926179da68..2384be6932 100644 --- a/src/bundler/linker_context/findImportedFilesInCSSOrder.zig +++ b/src/bundler/linker_context/findImportedFilesInCSSOrder.zig @@ -63,7 +63,7 @@ pub fn findImportedFilesInCSSOrder(this: *LinkerContext, temp_allocator: std.mem } } - visitor.visited.push( + visitor.visited.append( visitor.temp_allocator, source_index, ) catch |err| bun.handleOom(err); @@ -103,7 +103,7 @@ pub fn findImportedFilesInCSSOrder(this: *LinkerContext, temp_allocator: std.mem var nested_import_records = bun.handleOom(wrapping_import_records.clone(visitor.allocator)); // Clone these import conditions and append them to the state - bun.handleOom(nested_conditions.push(visitor.allocator, rule.import.conditionsWithImportRecords(visitor.allocator, &nested_import_records))); + bun.handleOom(nested_conditions.append(visitor.allocator, rule.import.conditionsWithImportRecords(visitor.allocator, &nested_import_records))); visitor.visit(record.source_index, &nested_conditions, wrapping_import_records); continue; } @@ -121,8 +121,8 @@ pub fn findImportedFilesInCSSOrder(this: *LinkerContext, temp_allocator: std.mem // merged. When this happens we need to generate a nested imported // CSS file using a data URL. if (rule.import.hasConditions()) { - bun.handleOom(all_conditions.push(visitor.allocator, rule.import.conditionsWithImportRecords(visitor.allocator, &all_import_records))); - visitor.order.push( + bun.handleOom(all_conditions.append(visitor.allocator, rule.import.conditionsWithImportRecords(visitor.allocator, &all_import_records))); + visitor.order.append( visitor.allocator, Chunk.CssImportOrder{ .kind = .{ @@ -133,7 +133,7 @@ pub fn findImportedFilesInCSSOrder(this: *LinkerContext, temp_allocator: std.mem }, ) catch |err| bun.handleOom(err); } else { - visitor.order.push( + visitor.order.append( visitor.allocator, Chunk.CssImportOrder{ .kind = .{ @@ -169,7 +169,7 @@ pub fn findImportedFilesInCSSOrder(this: *LinkerContext, temp_allocator: std.mem ); } // Accumulate imports in depth-first postorder - visitor.order.push(visitor.allocator, Chunk.CssImportOrder{ + visitor.order.append(visitor.allocator, Chunk.CssImportOrder{ .kind = .{ .source_index = source_index }, .conditions = wrapping_conditions.*, }) catch |err| bun.handleOom(err); @@ -208,7 +208,7 @@ pub fn findImportedFilesInCSSOrder(this: *LinkerContext, temp_allocator: std.mem var is_at_layer_prefix = true; for (order.slice()) |*entry| { if ((entry.kind == .layers and is_at_layer_prefix) or entry.kind == .external_path) { - bun.handleOom(wip_order.push(temp_allocator, entry.*)); + bun.handleOom(wip_order.append(temp_allocator, entry.*)); } if (entry.kind != .layers) { is_at_layer_prefix = false; @@ -219,7 +219,7 @@ pub fn findImportedFilesInCSSOrder(this: *LinkerContext, temp_allocator: std.mem is_at_layer_prefix = true; for (order.slice()) |*entry| { if ((entry.kind != .layers or !is_at_layer_prefix) and entry.kind != .external_path) { - bun.handleOom(wip_order.push(temp_allocator, entry.*)); + bun.handleOom(wip_order.append(temp_allocator, entry.*)); } if (entry.kind != .layers) { is_at_layer_prefix = false; @@ -261,7 +261,7 @@ pub fn findImportedFilesInCSSOrder(this: *LinkerContext, temp_allocator: std.mem continue :next_backward; } } - bun.handleOom(gop.value_ptr.push(temp_allocator, i)); + bun.handleOom(gop.value_ptr.append(temp_allocator, i)); }, .external_path => |p| { const gop = bun.handleOom(external_path_duplicates.getOrPut(p.text)); @@ -279,7 +279,7 @@ pub fn findImportedFilesInCSSOrder(this: *LinkerContext, temp_allocator: std.mem continue :next_backward; } } - bun.handleOom(gop.value_ptr.push(temp_allocator, i)); + bun.handleOom(gop.value_ptr.append(temp_allocator, i)); }, .layers => {}, } @@ -405,7 +405,7 @@ pub fn findImportedFilesInCSSOrder(this: *LinkerContext, temp_allocator: std.mem if (index == layer_duplicates.len) { // This is the first time we've seen this combination of layer names. // Allocate a new set of duplicate indices to track this combination. - layer_duplicates.push(temp_allocator, DuplicateEntry{ + layer_duplicates.append(temp_allocator, DuplicateEntry{ .layers = layers_key, }) catch |err| bun.handleOom(err); } @@ -449,7 +449,7 @@ pub fn findImportedFilesInCSSOrder(this: *LinkerContext, temp_allocator: std.mem // Non-layer entries still need to be present because they have // other side effects beside inserting things in the layer order - bun.handleOom(wip_order.push(temp_allocator, entry.*)); + bun.handleOom(wip_order.append(temp_allocator, entry.*)); } // Don't add this to the duplicate list below because it's redundant @@ -457,11 +457,11 @@ pub fn findImportedFilesInCSSOrder(this: *LinkerContext, temp_allocator: std.mem } } - layer_duplicates.mut(index).indices.push( + layer_duplicates.mut(index).indices.append( temp_allocator, wip_order.len, ) catch |err| bun.handleOom(err); - bun.handleOom(wip_order.push(temp_allocator, entry.*)); + bun.handleOom(wip_order.append(temp_allocator, entry.*)); } debugCssOrder(this, &wip_order, .WHILE_OPTIMIZING_REDUNDANT_LAYER_RULES); @@ -484,7 +484,7 @@ pub fn findImportedFilesInCSSOrder(this: *LinkerContext, temp_allocator: std.mem did_clone = @intCast(prev_index); } // need to clone the layers here as they could be references to css ast - wip_order.mut(prev_index).kind.layers.toOwned(temp_allocator).append( + wip_order.mut(prev_index).kind.layers.toOwned(temp_allocator).appendSlice( temp_allocator, entry.kind.layers.inner().sliceConst(), ) catch |err| bun.handleOom(err); diff --git a/src/bundler/linker_context/generateCodeForFileInChunkJS.zig b/src/bundler/linker_context/generateCodeForFileInChunkJS.zig index a3486a0214..514abc7e0b 100644 --- a/src/bundler/linker_context/generateCodeForFileInChunkJS.zig +++ b/src/bundler/linker_context/generateCodeForFileInChunkJS.zig @@ -365,7 +365,7 @@ pub fn generateCodeForFileInChunkJS( }, Logger.Loc.Empty, ), - .args = bun.BabyList(Expr).init(cjs_args), + .args = bun.BabyList(Expr).fromOwnedSlice(cjs_args), }, Logger.Loc.Empty, ); @@ -388,7 +388,7 @@ pub fn generateCodeForFileInChunkJS( Stmt.alloc( S.Local, S.Local{ - .decls = G.Decl.List.init(decls), + .decls = G.Decl.List.fromOwnedSlice(decls), }, Logger.Loc.Empty, ), @@ -502,7 +502,7 @@ pub fn generateCodeForFileInChunkJS( Stmt.alloc( S.Local, S.Local{ - .decls = G.Decl.List.fromList(hoist.decls), + .decls = G.Decl.List.moveFromList(&hoist.decls), }, Logger.Loc.Empty, ), @@ -529,7 +529,7 @@ pub fn generateCodeForFileInChunkJS( // "var init_foo = __esm(...);" const value = Expr.init(E.Call, .{ .target = Expr.initIdentifier(c.esm_runtime_ref, Logger.Loc.Empty), - .args = bun.BabyList(Expr).init(esm_args), + .args = bun.BabyList(Expr).fromOwnedSlice(esm_args), }, Logger.Loc.Empty); var decls = bun.handleOom(temp_allocator.alloc(G.Decl, 1)); @@ -546,7 +546,7 @@ pub fn generateCodeForFileInChunkJS( stmts.outside_wrapper_prefix.append( Stmt.alloc(S.Local, .{ - .decls = G.Decl.List.init(decls), + .decls = G.Decl.List.fromOwnedSlice(decls), }, Logger.Loc.Empty), ) catch |err| bun.handleOom(err); } else { @@ -642,12 +642,12 @@ fn mergeAdjacentLocalStmts(stmts: *std.ArrayList(Stmt), allocator: std.mem.Alloc if (did_merge_with_previous_local) { // Avoid O(n^2) behavior for repeated variable declarations // Appending to this decls list is safe because did_merge_with_previous_local is true - before.decls.append(allocator, after.decls.slice()) catch unreachable; + before.decls.appendSlice(allocator, after.decls.slice()) catch unreachable; } else { // Append the declarations to the previous variable statement did_merge_with_previous_local = true; - var clone = std.ArrayList(G.Decl).initCapacity(allocator, before.decls.len + after.decls.len) catch unreachable; + var clone = bun.BabyList(G.Decl).initCapacity(allocator, before.decls.len + after.decls.len) catch unreachable; clone.appendSliceAssumeCapacity(before.decls.slice()); clone.appendSliceAssumeCapacity(after.decls.slice()); // we must clone instead of overwrite in-place incase the same S.Local is used across threads @@ -656,7 +656,7 @@ fn mergeAdjacentLocalStmts(stmts: *std.ArrayList(Stmt), allocator: std.mem.Alloc allocator, S.Local, S.Local{ - .decls = BabyList(G.Decl).fromList(clone), + .decls = clone, .is_export = before.is_export, .was_commonjs_export = before.was_commonjs_export, .was_ts_import_equals = before.was_ts_import_equals, diff --git a/src/bundler/linker_context/generateCompileResultForCssChunk.zig b/src/bundler/linker_context/generateCompileResultForCssChunk.zig index 86546e0658..5ef7dce172 100644 --- a/src/bundler/linker_context/generateCompileResultForCssChunk.zig +++ b/src/bundler/linker_context/generateCompileResultForCssChunk.zig @@ -68,7 +68,9 @@ fn generateCompileResultForCssChunkImpl(worker: *ThreadPool.Worker, c: *LinkerCo }; }, .external_path => { - var import_records = BabyList(ImportRecord).init(css_import.condition_import_records.sliceConst()); + var import_records = BabyList(ImportRecord).fromBorrowedSliceDangerous( + css_import.condition_import_records.sliceConst(), + ); const printer_options = bun.css.PrinterOptions{ // TODO: make this more configurable .minify = c.options.minify_whitespace, diff --git a/src/bundler/linker_context/postProcessJSChunk.zig b/src/bundler/linker_context/postProcessJSChunk.zig index 2328af78f4..ccbc8754d5 100644 --- a/src/bundler/linker_context/postProcessJSChunk.zig +++ b/src/bundler/linker_context/postProcessJSChunk.zig @@ -43,7 +43,7 @@ pub fn postProcessJSChunk(ctx: GenerateChunkCtx, worker: *ThreadPool.Worker, chu }; var cross_chunk_import_records = ImportRecord.List.initCapacity(worker.allocator, chunk.cross_chunk_imports.len) catch unreachable; - defer cross_chunk_import_records.deinitWithAllocator(worker.allocator); + defer cross_chunk_import_records.deinit(worker.allocator); for (chunk.cross_chunk_imports.slice()) |import_record| { cross_chunk_import_records.appendAssumeCapacity( .{ diff --git a/src/bundler/linker_context/prepareCssAstsForChunk.zig b/src/bundler/linker_context/prepareCssAstsForChunk.zig index 3c217eb563..c92b3cdc8f 100644 --- a/src/bundler/linker_context/prepareCssAstsForChunk.zig +++ b/src/bundler/linker_context/prepareCssAstsForChunk.zig @@ -50,7 +50,7 @@ fn prepareCssAstsForChunkImpl(c: *LinkerContext, chunk: *Chunk, allocator: std.m var conditions: ?*bun.css.ImportConditions = null; if (entry.conditions.len > 0) { conditions = entry.conditions.mut(0); - entry.condition_import_records.push( + entry.condition_import_records.append( allocator, bun.ImportRecord{ .kind = .at, .path = p.*, .range = Logger.Range{} }, ) catch |err| bun.handleOom(err); @@ -118,7 +118,7 @@ fn prepareCssAstsForChunkImpl(c: *LinkerContext, chunk: *Chunk, allocator: std.m var empty_conditions = bun.css.ImportConditions{}; const actual_conditions = if (conditions) |cc| cc else &empty_conditions; - entry.condition_import_records.push(allocator, bun.ImportRecord{ + entry.condition_import_records.append(allocator, bun.ImportRecord{ .kind = .at, .path = p.*, .range = Logger.Range.none, diff --git a/src/bundler/linker_context/scanImportsAndExports.zig b/src/bundler/linker_context/scanImportsAndExports.zig index 64bd1f6cb4..921f4fd74c 100644 --- a/src/bundler/linker_context/scanImportsAndExports.zig +++ b/src/bundler/linker_context/scanImportsAndExports.zig @@ -372,6 +372,10 @@ pub fn scanImportsAndExports(this: *LinkerContext) !void { LinkerContext.doStep5, this.graph.reachable_files, ); + + // Some parts of the AST may now be owned by worker allocators. Transfer ownership back + // to the graph allocator. + this.graph.takeAstOwnership(); } if (comptime FeatureFlags.help_catch_memory_issues) { @@ -537,10 +541,7 @@ pub fn scanImportsAndExports(this: *LinkerContext) !void { const total_len = parts_declaring_symbol.len + @as(usize, import.re_exports.len) + @as(usize, part.dependencies.len); if (part.dependencies.cap < total_len) { - var list = std.ArrayList(Dependency).init(this.allocator()); - list.ensureUnusedCapacity(total_len) catch unreachable; - list.appendSliceAssumeCapacity(part.dependencies.slice()); - part.dependencies.update(list); + bun.handleOom(part.dependencies.ensureTotalCapacity(this.allocator(), total_len)); } // Depend on the file containing the imported symbol @@ -618,7 +619,7 @@ pub fn scanImportsAndExports(this: *LinkerContext) !void { const entry_point_part_index = this.graph.addPartToFile( id, .{ - .dependencies = js_ast.Dependency.List.fromList(dependencies), + .dependencies = js_ast.Dependency.List.moveFromList(&dependencies), .can_be_removed_if_unused = false, }, ) catch |err| bun.handleOom(err); @@ -1020,7 +1021,7 @@ const ExportStarContext = struct { }) catch |err| bun.handleOom(err); } else if (gop.value_ptr.data.source_index.get() != other_source_index) { // Two different re-exports colliding makes it potentially ambiguous - gop.value_ptr.potentially_ambiguous_export_star_refs.push(this.allocator, .{ + gop.value_ptr.potentially_ambiguous_export_star_refs.append(this.allocator, .{ .data = .{ .source_index = Index.source(other_source_index), .import_ref = name.ref, diff --git a/src/cli/create_command.zig b/src/cli/create_command.zig index 854d90d78e..2fdca20a9d 100644 --- a/src/cli/create_command.zig +++ b/src/cli/create_command.zig @@ -666,7 +666,7 @@ pub const CreateCommand = struct { break :process_package_json; } - const properties_list = std.ArrayList(js_ast.G.Property).fromOwnedSlice(default_allocator, package_json_expr.data.e_object.properties.slice()); + var properties_list = std.ArrayList(js_ast.G.Property).fromOwnedSlice(default_allocator, package_json_expr.data.e_object.properties.slice()); if (ctx.log.errors > 0) { try ctx.log.print(Output.errorWriter()); @@ -744,7 +744,7 @@ pub const CreateCommand = struct { // has_react_scripts = has_react_scripts or property.hasAnyPropertyNamed(&.{"react-scripts"}); // has_relay = has_relay or property.hasAnyPropertyNamed(&.{ "react-relay", "relay-runtime", "babel-plugin-relay" }); - // property.data.e_object.properties = js_ast.G.Property.List.init(Prune.prune(property.data.e_object.properties.slice())); + // property.data.e_object.properties = js_ast.G.Property.List.fromBorrowedSliceDangerous(Prune.prune(property.data.e_object.properties.slice())); if (property.data.e_object.properties.len > 0) { has_dependencies = true; dev_dependencies = q.expr; @@ -765,8 +765,7 @@ pub const CreateCommand = struct { // has_react_scripts = has_react_scripts or property.hasAnyPropertyNamed(&.{"react-scripts"}); // has_relay = has_relay or property.hasAnyPropertyNamed(&.{ "react-relay", "relay-runtime", "babel-plugin-relay" }); - // property.data.e_object.properties = js_ast.G.Property.List.init(Prune.prune(property.data.e_object.properties.slice())); - property.data.e_object.properties = js_ast.G.Property.List.init(property.data.e_object.properties.slice()); + // property.data.e_object.properties = js_ast.G.Property.List.fromBorrowedSliceDangerous(Prune.prune(property.data.e_object.properties.slice())); if (property.data.e_object.properties.len > 0) { has_dependencies = true; @@ -1052,9 +1051,12 @@ pub const CreateCommand = struct { pub const bun_bun_for_nextjs_task: string = "bun bun --use next"; }; - InjectionPrefill.bun_macro_relay_object.properties = js_ast.G.Property.List.init(InjectionPrefill.bun_macro_relay_properties[0..]); - InjectionPrefill.bun_macros_relay_object.properties = js_ast.G.Property.List.init(&InjectionPrefill.bun_macros_relay_object_properties); - InjectionPrefill.bun_macros_relay_only_object.properties = js_ast.G.Property.List.init(&InjectionPrefill.bun_macros_relay_only_object_properties); + InjectionPrefill.bun_macro_relay_object.properties = js_ast.G.Property.List + .fromBorrowedSliceDangerous(InjectionPrefill.bun_macro_relay_properties[0..]); + InjectionPrefill.bun_macros_relay_object.properties = js_ast.G.Property.List + .fromBorrowedSliceDangerous(&InjectionPrefill.bun_macros_relay_object_properties); + InjectionPrefill.bun_macros_relay_only_object.properties = js_ast.G.Property.List + .fromBorrowedSliceDangerous(&InjectionPrefill.bun_macros_relay_only_object_properties); // if (needs_to_inject_dev_dependency and dev_dependencies == null) { // var e_object = try ctx.allocator.create(E.Object); @@ -1264,7 +1266,7 @@ pub const CreateCommand = struct { package_json_expr.data.e_object.is_single_line = false; - package_json_expr.data.e_object.properties = js_ast.G.Property.List.fromList(properties_list); + package_json_expr.data.e_object.properties = js_ast.G.Property.List.moveFromList(&properties_list); { var i: usize = 0; var property_i: usize = 0; @@ -1303,7 +1305,9 @@ pub const CreateCommand = struct { script_property_out_i += 1; } - property.value.?.data.e_object.properties = js_ast.G.Property.List.init(scripts_properties[0..script_property_out_i]); + property.value.?.data.e_object.properties = js_ast.G.Property.List.fromBorrowedSliceDangerous( + scripts_properties[0..script_property_out_i], + ); } } @@ -1382,7 +1386,7 @@ pub const CreateCommand = struct { } } } - package_json_expr.data.e_object.properties = js_ast.G.Property.List.init(package_json_expr.data.e_object.properties.ptr[0..property_i]); + package_json_expr.data.e_object.properties.shrinkRetainingCapacity(property_i); } const file: bun.FD = .fromStdFile(package_json_file.?); diff --git a/src/cli/pm_pkg_command.zig b/src/cli/pm_pkg_command.zig index a694398c0f..cde17580f6 100644 --- a/src/cli/pm_pkg_command.zig +++ b/src/cli/pm_pkg_command.zig @@ -713,7 +713,7 @@ pub const PmPkgCommand = struct { } if (!found) return false; - var new_props: std.ArrayList(js_ast.G.Property) = try .initCapacity(allocator, old_props.len - 1); + var new_props: bun.BabyList(js_ast.G.Property) = try .initCapacity(allocator, old_props.len - 1); for (old_props) |prop| { if (prop.key) |k| { switch (k.data) { @@ -727,8 +727,7 @@ pub const PmPkgCommand = struct { } new_props.appendAssumeCapacity(prop); } - const new_list = js_ast.G.Property.List.fromList(new_props); - obj.data.e_object.properties = new_list; + obj.data.e_object.properties = new_props; return true; } diff --git a/src/cli/pm_view_command.zig b/src/cli/pm_view_command.zig index ec06f94be6..3ab8552edf 100644 --- a/src/cli/pm_view_command.zig +++ b/src/cli/pm_view_command.zig @@ -193,7 +193,7 @@ pub fn view(allocator: std.mem.Allocator, manager: *PackageManager, spec_: strin const versions_array = bun.ast.Expr.init( bun.ast.E.Array, bun.ast.E.Array{ - .items = .init(keys), + .items = .fromOwnedSlice(keys), }, .{ .start = -1 }, ); diff --git a/src/cli/publish_command.zig b/src/cli/publish_command.zig index 015b66735a..1c9fcf9f8d 100644 --- a/src/cli/publish_command.zig +++ b/src/cli/publish_command.zig @@ -900,7 +900,7 @@ pub const PublishCommand = struct { try json.set(allocator, "dist", Expr.init( E.Object, - .{ .properties = G.Property.List.init(dist_props) }, + .{ .properties = G.Property.List.fromOwnedSlice(dist_props) }, logger.Loc.Empty, )); @@ -988,7 +988,7 @@ pub const PublishCommand = struct { json.data.e_object.properties.ptr[bin_query.i].value = Expr.init( E.Object, .{ - .properties = G.Property.List.fromList(bin_props), + .properties = G.Property.List.moveFromList(&bin_props), }, logger.Loc.Empty, ); @@ -1064,7 +1064,7 @@ pub const PublishCommand = struct { json.data.e_object.properties.ptr[bin_query.i].value = Expr.init( E.Object, - .{ .properties = G.Property.List.fromList(bin_props) }, + .{ .properties = G.Property.List.moveFromList(&bin_props) }, logger.Loc.Empty, ); }, @@ -1153,7 +1153,11 @@ pub const PublishCommand = struct { } } - try json.set(allocator, "bin", Expr.init(E.Object, .{ .properties = G.Property.List.fromList(bin_props) }, logger.Loc.Empty)); + try json.set(allocator, "bin", Expr.init( + E.Object, + .{ .properties = G.Property.List.moveFromList(&bin_props) }, + logger.Loc.Empty, + )); } } diff --git a/src/collections.zig b/src/collections.zig index be939b0e03..5cfbc74de1 100644 --- a/src/collections.zig +++ b/src/collections.zig @@ -1,6 +1,8 @@ pub const MultiArrayList = @import("./collections/multi_array_list.zig").MultiArrayList; -pub const BabyList = @import("./collections/baby_list.zig").BabyList; -pub const OffsetList = @import("./collections/baby_list.zig").OffsetList; +pub const baby_list = @import("./collections/baby_list.zig"); +pub const BabyList = baby_list.BabyList; +pub const ByteList = baby_list.ByteList; // alias of BabyList(u8) +pub const OffsetByteList = baby_list.OffsetByteList; pub const bit_set = @import("./collections/bit_set.zig"); pub const HiveArray = @import("./collections/hive_array.zig").HiveArray; -pub const BoundedArray = @import("./collections/BoundedArray.zig").BoundedArray; +pub const BoundedArray = @import("./collections/bounded_array.zig").BoundedArray; diff --git a/src/collections/baby_list.zig b/src/collections/baby_list.zig index a41a6fd8f8..57feab74a6 100644 --- a/src/collections/baby_list.zig +++ b/src/collections/baby_list.zig @@ -1,62 +1,288 @@ /// This is like ArrayList except it stores the length and capacity as u32 /// In practice, it is very unusual to have lengths above 4 GiB pub fn BabyList(comptime Type: type) type { + const Origin = union(enum) { + owned, + borrowed: struct { + trace: if (traces_enabled) StoredTrace else void, + }, + }; + return struct { const Self = @This(); // NOTE: If you add, remove, or rename any public fields, you need to update // `looksLikeListContainerType` in `meta.zig`. - ptr: [*]Type = &[_]Type{}, + + /// Don't access this field directly, as it's not safety-checked. Use `.slice()`, `.at()`, + /// or `.mut()`. + ptr: [*]Type = &.{}, len: u32 = 0, cap: u32 = 0, + #origin: if (safety_checks) Origin else void = if (safety_checks) .owned, #allocator: bun.safety.CheckedAllocator = .{}, pub const Elem = Type; - pub fn parse(input: *bun.css.Parser) bun.css.Result(Self) { - return switch (input.parseCommaSeparated(Type, bun.css.generic.parseFor(Type))) { - .result => |v| return .{ .result = Self{ - .ptr = v.items.ptr, - .len = @intCast(v.items.len), - .cap = @intCast(v.capacity), - } }, - .err => |e| return .{ .err = e }, + pub const empty: Self = .{}; + + pub fn initCapacity(allocator: std.mem.Allocator, len: usize) OOM!Self { + var this = initWithBuffer(try allocator.alloc(Type, len)); + this.#allocator.set(allocator); + return this; + } + + pub fn initOne(allocator: std.mem.Allocator, value: Type) OOM!Self { + var items = try allocator.alloc(Type, 1); + items[0] = value; + return .{ + .ptr = @as([*]Type, @ptrCast(items.ptr)), + .len = 1, + .cap = 1, + .#allocator = .init(allocator), }; } - pub fn toCss(this: *const Self, comptime W: type, dest: *bun.css.Printer(W)) bun.css.PrintErr!void { - return bun.css.to_css.fromBabyList(Type, this, W, dest); - } + pub fn moveFromList(list_ptr: anytype) Self { + const ListType = std.meta.Child(@TypeOf(list_ptr)); - pub fn eql(lhs: *const Self, rhs: *const Self) bool { - if (lhs.len != rhs.len) return false; - for (lhs.sliceConst(), rhs.sliceConst()) |*a, *b| { - if (!bun.css.generic.eql(Type, a, b)) return false; + if (comptime ListType == Self) { + @compileError("unnecessary call to `moveFromList`"); } - return true; + + const unsupported_arg_msg = "unsupported argument to `moveFromList`: *" ++ + @typeName(ListType); + + const items = if (comptime @hasField(ListType, "items")) + list_ptr.items + else if (comptime std.meta.hasFn(ListType, "slice")) + list_ptr.slice() + else + @compileError(unsupported_arg_msg); + + const capacity = if (comptime @hasField(ListType, "capacity")) + list_ptr.capacity + else if (comptime @hasField(ListType, "cap")) + list_ptr.cap + else if (comptime std.meta.hasFn(ListType, "capacity")) + list_ptr.capacity() + else + @compileError(unsupported_arg_msg); + + if (comptime Environment.allow_assert) { + bun.assert(items.len <= capacity); + } + + var this: Self = .{ + .ptr = items.ptr, + .len = @intCast(items.len), + .cap = @intCast(capacity), + }; + + const allocator = if (comptime @hasField(ListType, "allocator")) + list_ptr.allocator + else if (comptime std.meta.hasFn(ListType, "allocator")) + list_ptr.allocator(); + + if (comptime @TypeOf(allocator) == void) { + list_ptr.* = .empty; + } else { + this.#allocator.set(bun.allocators.asStd(allocator)); + list_ptr.* = .init(allocator); + } + return this; } - pub fn set(this: *@This(), slice_: []Type) void { - this.ptr = slice_.ptr; - this.len = @intCast(slice_.len); - this.cap = @intCast(slice_.len); + /// Requirements: + /// + /// * `items` must be owned memory, allocated with some allocator. That same allocator must + /// be passed to methods that expect it, like `append`. + /// + /// * `items` must be the *entire* region of allocated memory. It cannot be a subslice. + /// If you really need an owned subslice, use `shrinkRetainingCapacity` followed by + /// `toOwnedSlice` on an `ArrayList`. + pub fn fromOwnedSlice(items: []Type) Self { + return .{ + .ptr = items.ptr, + .len = @intCast(items.len), + .cap = @intCast(items.len), + }; } - pub fn available(this: *Self) []Type { - return this.ptr[this.len..this.cap]; + /// Same requirements as `fromOwnedSlice`. + pub fn initWithBuffer(buffer: []Type) Self { + return .{ + .ptr = buffer.ptr, + .len = 0, + .cap = @intCast(buffer.len), + }; } - pub fn deinitWithAllocator(this: *Self, allocator: std.mem.Allocator) void { + /// Copies all elements of `items` into new memory. Creates shallow copies. + pub fn fromSlice(allocator: std.mem.Allocator, items: []const Type) OOM!Self { + const allocated = try allocator.alloc(Type, items.len); + bun.copy(Type, allocated, items); + + return Self{ + .ptr = allocated.ptr, + .len = @intCast(allocated.len), + .cap = @intCast(allocated.len), + .#allocator = .init(allocator), + }; + } + + /// This method invalidates the `BabyList`. Use `clearAndFree` if you want to empty the + /// list instead. + pub fn deinit(this: *Self, allocator: std.mem.Allocator) void { + this.assertOwned(); this.listManaged(allocator).deinit(); + this.* = undefined; + } + + pub fn clearAndFree(this: *Self, allocator: std.mem.Allocator) void { + this.deinit(allocator); this.* = .{}; } - pub fn shrinkAndFree(this: *Self, allocator: std.mem.Allocator, size: usize) void { + pub fn clearRetainingCapacity(this: *Self) void { + this.len = 0; + } + + pub fn slice(this: Self) callconv(bun.callconv_inline) []Type { + return this.ptr[0..this.len]; + } + + /// Same as `.slice()`, with an explicit coercion to const. + pub fn sliceConst(this: Self) callconv(bun.callconv_inline) []const Type { + return this.slice(); + } + + pub fn at(this: Self, index: usize) callconv(bun.callconv_inline) *const Type { + bun.assert(index < this.len); + return &this.ptr[index]; + } + + pub fn mut(this: Self, index: usize) callconv(bun.callconv_inline) *Type { + bun.assert(index < this.len); + return &this.ptr[index]; + } + + pub fn first(this: Self) callconv(bun.callconv_inline) ?*Type { + return if (this.len > 0) &this.ptr[0] else null; + } + + pub fn last(this: Self) callconv(bun.callconv_inline) ?*Type { + return if (this.len > 0) &this.ptr[this.len - 1] else null; + } + + /// Empties the `BabyList`. + pub fn toOwnedSlice(this: *Self, allocator: std.mem.Allocator) OOM![]Type { + if ((comptime safety_checks) and this.len != this.cap) this.assertOwned(); var list_ = this.listManaged(allocator); - list_.shrinkAndFree(size); + const result = try list_.toOwnedSlice(); + this.* = .empty; + return result; + } + + pub fn moveToList(this: *Self) std.ArrayListUnmanaged(Type) { + this.assertOwned(); + defer this.* = .empty; + return this.list(); + } + + pub fn moveToListManaged(this: *Self, allocator: std.mem.Allocator) std.ArrayList(Type) { + this.assertOwned(); + defer this.* = .empty; + return this.listManaged(allocator); + } + + pub fn expandToCapacity(this: *Self) void { + this.len = this.cap; + } + + pub fn ensureTotalCapacity( + this: *Self, + allocator: std.mem.Allocator, + new_capacity: usize, + ) !void { + if ((comptime safety_checks) and new_capacity > this.cap) this.assertOwned(); + var list_ = this.listManaged(allocator); + try list_.ensureTotalCapacity(new_capacity); this.update(list_); } + pub fn ensureTotalCapacityPrecise( + this: *Self, + allocator: std.mem.Allocator, + new_capacity: usize, + ) !void { + if ((comptime safety_checks) and new_capacity > this.cap) this.assertOwned(); + var list_ = this.listManaged(allocator); + try list_.ensureTotalCapacityPrecise(new_capacity); + this.update(list_); + } + + pub fn ensureUnusedCapacity( + this: *Self, + allocator: std.mem.Allocator, + count: usize, + ) OOM!void { + if ((comptime safety_checks) and count > this.cap - this.len) this.assertOwned(); + var list_ = this.listManaged(allocator); + try list_.ensureUnusedCapacity(count); + this.update(list_); + } + + pub fn shrinkAndFree(this: *Self, allocator: std.mem.Allocator, new_len: usize) void { + if ((comptime safety_checks) and new_len < this.cap) this.assertOwned(); + var list_ = this.listManaged(allocator); + list_.shrinkAndFree(new_len); + this.update(list_); + } + + pub fn shrinkRetainingCapacity(this: *Self, new_len: usize) void { + bun.assertf( + new_len <= this.len, + "shrinkRetainingCapacity: new len ({d}) cannot exceed old ({d})", + .{ new_len, this.len }, + ); + this.len = @intCast(new_len); + } + + pub fn append(this: *Self, allocator: std.mem.Allocator, value: Type) OOM!void { + if ((comptime safety_checks) and this.len == this.cap) this.assertOwned(); + var list_ = this.listManaged(allocator); + try list_.append(value); + this.update(list_); + } + + pub fn appendAssumeCapacity(this: *Self, value: Type) void { + bun.assert(this.cap > this.len); + this.ptr[this.len] = value; + this.len += 1; + } + + pub fn appendSlice(this: *Self, allocator: std.mem.Allocator, vals: []const Type) !void { + if ((comptime safety_checks) and this.cap - this.len < vals.len) this.assertOwned(); + var list_ = this.listManaged(allocator); + try list_.appendSlice(vals); + this.update(list_); + } + + pub fn appendSliceAssumeCapacity(this: *Self, values: []const Type) void { + bun.assert(this.cap >= this.len + @as(u32, @intCast(values.len))); + const tail = this.ptr[this.len .. this.len + values.len]; + bun.copy(Type, tail, values); + this.len += @intCast(values.len); + bun.assert(this.cap >= this.len); + } + + pub fn pop(this: *Self) ?Type { + if (this.len == 0) return null; + this.len -= 1; + return this.ptr[this.len]; + } + pub fn orderedRemove(this: *Self, index: usize) Type { var l = this.list(); defer this.update(l); @@ -69,70 +295,23 @@ pub fn BabyList(comptime Type: type) type { return l.swapRemove(index); } - pub fn sortAsc(this: *Self) void { - bun.strings.sortAsc(this.slice()); - } - - pub fn contains(this: Self, item: []const Type) bool { - return this.len > 0 and @intFromPtr(item.ptr) >= @intFromPtr(this.ptr) and @intFromPtr(item.ptr) < @intFromPtr(this.ptr) + this.len; - } - - pub fn initConst(items: []const Type) callconv(bun.callconv_inline) Self { - @setRuntimeSafety(false); - return Self{ - // Remove the const qualifier from the items - .ptr = @constCast(items.ptr), - .len = @intCast(items.len), - .cap = @intCast(items.len), - }; - } - - pub fn ensureUnusedCapacity(this: *Self, allocator: std.mem.Allocator, count: usize) !void { + pub fn insert(this: *Self, allocator: std.mem.Allocator, index: usize, val: Type) OOM!void { + if ((comptime safety_checks) and this.len == this.cap) this.assertOwned(); var list_ = this.listManaged(allocator); - try list_.ensureUnusedCapacity(count); + try list_.insert(index, val); this.update(list_); } - pub fn pop(this: *Self) ?Type { - if (this.len == 0) return null; - this.len -= 1; - return this.ptr[this.len]; - } - - pub fn clone(this: Self, allocator: std.mem.Allocator) !Self { - const copy = try this.list().clone(allocator); - return Self{ - .ptr = copy.items.ptr, - .len = @intCast(copy.items.len), - .cap = @intCast(copy.capacity), - }; - } - - pub fn deepClone(this: Self, allocator: std.mem.Allocator) !Self { - if (!@hasDecl(Type, "deepClone")) { - @compileError("Unsupported type for BabyList.deepClone(): " ++ @typeName(Type)); - } - - var list_ = try initCapacity(allocator, this.len); - for (this.slice()) |item| { - const clone_result = item.deepClone(allocator); - const cloned_item = switch (comptime @typeInfo(@TypeOf(clone_result))) { - .error_union => try clone_result, - else => clone_result, - }; - list_.appendAssumeCapacity(cloned_item); - } - return list_; - } - - /// Same as `deepClone` but calls `bun.outOfMemory` instead of returning an error. - /// `Type.deepClone` must not return any error except `error.OutOfMemory`. - pub fn deepCloneInfallible(this: Self, allocator: std.mem.Allocator) Self { - return bun.handleOom(this.deepClone(allocator)); - } - - pub fn clearRetainingCapacity(this: *Self) void { - this.len = 0; + pub fn insertSlice( + this: *Self, + allocator: std.mem.Allocator, + index: usize, + vals: []const Type, + ) OOM!void { + if ((comptime safety_checks) and this.cap - this.len < vals.len) this.assertOwned(); + var list_ = this.listManaged(allocator); + try list_.insertSlice(index, vals); + this.update(list_); } pub fn replaceRange( @@ -141,201 +320,70 @@ pub fn BabyList(comptime Type: type) type { start: usize, len_: usize, new_items: []const Type, - ) !void { + ) OOM!void { var list_ = this.listManaged(allocator); try list_.replaceRange(start, len_, new_items); } - pub fn appendAssumeCapacity(this: *Self, value: Type) void { - bun.assert(this.cap > this.len); - this.ptr[this.len] = value; - this.len += 1; + pub fn clone(this: Self, allocator: std.mem.Allocator) OOM!Self { + var copy = try this.list().clone(allocator); + return .moveFromList(©); } - pub fn writableSlice(this: *Self, allocator: std.mem.Allocator, cap: usize) ![]Type { + pub fn unusedCapacitySlice(this: Self) []Type { + return this.ptr[this.len..this.cap]; + } + + pub fn contains(this: Self, item: []const Type) bool { + return this.len > 0 and + @intFromPtr(item.ptr) >= @intFromPtr(this.ptr) and + @intFromPtr(item.ptr) < @intFromPtr(this.ptr) + this.len; + } + + pub fn sortAsc(this: *Self) void { + bun.strings.sortAsc(this.slice()); + } + + pub fn writableSlice( + this: *Self, + allocator: std.mem.Allocator, + additional: usize, + ) OOM![]Type { + if ((comptime safety_checks) and additional > this.cap - this.len) this.assertOwned(); var list_ = this.listManaged(allocator); - try list_.ensureUnusedCapacity(cap); - const writable = list_.items.ptr[this.len .. this.len + @as(u32, @intCast(cap))]; - list_.items.len += cap; + try list_.ensureUnusedCapacity(additional); + const prev_len = list_.items.len; + list_.items.len += additional; + const writable = list_.items[prev_len..]; this.update(list_); return writable; } - pub fn appendSliceAssumeCapacity(this: *Self, values: []const Type) void { - const tail = this.ptr[this.len .. this.len + values.len]; - bun.assert(this.cap >= this.len + @as(u32, @intCast(values.len))); - bun.copy(Type, tail, values); - this.len += @intCast(values.len); - bun.assert(this.cap >= this.len); - } - - pub fn initCapacity(allocator: std.mem.Allocator, len: usize) std.mem.Allocator.Error!Self { - var this = initWithBuffer(try allocator.alloc(Type, len)); - this.#allocator.set(allocator); - return this; - } - - pub fn initWithBuffer(buffer: []Type) Self { - return Self{ - .ptr = buffer.ptr, - .len = 0, - .cap = @intCast(buffer.len), - }; - } - - pub fn init(items: []const Type) Self { - @setRuntimeSafety(false); - return Self{ - .ptr = @constCast(items.ptr), - .len = @intCast(items.len), - .cap = @intCast(items.len), - }; - } - - pub fn fromList(list_: anytype) Self { - if (comptime @TypeOf(list_) == Self) { - return list_; - } - - if (comptime @TypeOf(list_) == []const Type) { - return init(list_); - } - - if (comptime Environment.allow_assert) { - bun.assert(list_.items.len <= list_.capacity); - } - - return Self{ - .ptr = list_.items.ptr, - .len = @intCast(list_.items.len), - .cap = @intCast(list_.capacity), - }; - } - - pub fn fromSlice(allocator: std.mem.Allocator, items: []const Type) !Self { - const allocated = try allocator.alloc(Type, items.len); - bun.copy(Type, allocated, items); - - return Self{ - .ptr = allocated.ptr, - .len = @intCast(allocated.len), - .cap = @intCast(allocated.len), - .#allocator = .init(allocator), - }; - } - - pub fn allocatedSlice(this: *const Self) []u8 { - if (this.cap == 0) return &.{}; - + pub fn allocatedSlice(this: Self) []Type { return this.ptr[0..this.cap]; } - pub fn update(this: *Self, list_: anytype) void { - this.* = .{ - .ptr = list_.items.ptr, - .len = @intCast(list_.items.len), - .cap = @intCast(list_.capacity), - }; - - if (comptime Environment.allow_assert) { - bun.assert(this.len <= this.cap); - } + pub fn memoryCost(this: Self) usize { + return this.cap; } - pub fn list(this: Self) std.ArrayListUnmanaged(Type) { - return std.ArrayListUnmanaged(Type){ - .items = this.ptr[0..this.len], - .capacity = this.cap, - }; - } - - pub fn listManaged(this: *Self, allocator: std.mem.Allocator) std.ArrayList(Type) { - this.#allocator.set(allocator); - var list_ = this.list(); - return list_.toManaged(allocator); - } - - pub fn first(this: Self) callconv(bun.callconv_inline) ?*Type { - return if (this.len > 0) this.ptr[0] else @as(?*Type, null); - } - - pub fn last(this: Self) callconv(bun.callconv_inline) ?*Type { - return if (this.len > 0) &this.ptr[this.len - 1] else @as(?*Type, null); - } - - pub fn first_(this: Self) callconv(bun.callconv_inline) Type { - return this.ptr[0]; - } - - pub fn at(this: Self, index: usize) callconv(bun.callconv_inline) *const Type { - bun.assert(index < this.len); - return &this.ptr[index]; - } - - pub fn mut(this: Self, index: usize) callconv(bun.callconv_inline) *Type { - bun.assert(index < this.len); - return &this.ptr[index]; - } - - pub fn one(allocator: std.mem.Allocator, value: Type) !Self { - var items = try allocator.alloc(Type, 1); - items[0] = value; - return Self{ - .ptr = @as([*]Type, @ptrCast(items.ptr)), - .len = 1, - .cap = 1, - .#allocator = .init(allocator), - }; - } - - pub fn @"[0]"(this: Self) callconv(bun.callconv_inline) Type { - return this.ptr[0]; - } - const OOM = error{OutOfMemory}; - - pub fn push(this: *Self, allocator: std.mem.Allocator, value: Type) OOM!void { - var list_ = this.listManaged(allocator); - try list_.append(value); - this.update(list_); - } - - pub fn appendFmt(this: *Self, allocator: std.mem.Allocator, comptime fmt: []const u8, args: anytype) !void { + /// This method is available only for `BabyList(u8)`. + pub fn appendFmt( + this: *Self, + allocator: std.mem.Allocator, + comptime fmt: []const u8, + args: anytype, + ) OOM!void { + if ((comptime safety_checks) and this.len == this.cap) this.assertOwned(); var list_ = this.listManaged(allocator); const writer = list_.writer(); try writer.print(fmt, args); - this.update(list_); } - pub fn insert(this: *Self, allocator: std.mem.Allocator, index: usize, val: Type) !void { - var list_ = this.listManaged(allocator); - try list_.insert(index, val); - this.update(list_); - } - - pub fn insertSlice(this: *Self, allocator: std.mem.Allocator, index: usize, vals: []const Type) !void { - var list_ = this.listManaged(allocator); - try list_.insertSlice(index, vals); - this.update(list_); - } - - pub fn append(this: *Self, allocator: std.mem.Allocator, value: []const Type) !void { - var list_ = this.listManaged(allocator); - try list_.appendSlice(value); - this.update(list_); - } - - pub fn slice(this: Self) callconv(bun.callconv_inline) []Type { - @setRuntimeSafety(false); - return this.ptr[0..this.len]; - } - - pub fn sliceConst(this: *const Self) callconv(bun.callconv_inline) []const Type { - @setRuntimeSafety(false); - return this.ptr[0..this.len]; - } - - pub fn write(this: *Self, allocator: std.mem.Allocator, str: []const u8) !u32 { + /// This method is available only for `BabyList(u8)`. + pub fn write(this: *Self, allocator: std.mem.Allocator, str: []const u8) OOM!u32 { + if ((comptime safety_checks) and this.cap - this.len < str.len) this.assertOwned(); if (comptime Type != u8) @compileError("Unsupported for type " ++ @typeName(Type)); const initial = this.len; @@ -345,7 +393,9 @@ pub fn BabyList(comptime Type: type) type { return this.len - initial; } + /// This method is available only for `BabyList(u8)`. pub fn writeLatin1(this: *Self, allocator: std.mem.Allocator, str: []const u8) OOM!u32 { + if ((comptime safety_checks) and str.len > 0) this.assertOwned(); if (comptime Type != u8) @compileError("Unsupported for type " ++ @typeName(Type)); const initial = this.len; @@ -355,7 +405,9 @@ pub fn BabyList(comptime Type: type) type { return this.len - initial; } + /// This method is available only for `BabyList(u8)`. pub fn writeUTF16(this: *Self, allocator: std.mem.Allocator, str: []const u16) OOM!u32 { + if ((comptime safety_checks) and str.len > 0) this.assertOwned(); if (comptime Type != u8) @compileError("Unsupported for type " ++ @typeName(Type)); @@ -407,6 +459,7 @@ pub fn BabyList(comptime Type: type) type { return this.len - initial; } + /// This method is available only for `BabyList(u8)`. pub fn writeTypeAsBytesAssumeCapacity(this: *Self, comptime Int: type, int: Int) void { if (comptime Type != u8) @compileError("Unsupported for type " ++ @typeName(Type)); @@ -415,12 +468,95 @@ pub fn BabyList(comptime Type: type) type { this.len += @sizeOf(Int); } - pub fn memoryCost(self: *const Self) usize { - return self.cap; + pub fn parse(input: *bun.css.Parser) bun.css.Result(Self) { + return switch (input.parseCommaSeparated(Type, bun.css.generic.parseFor(Type))) { + .result => |v| return .{ .result = Self{ + .ptr = v.items.ptr, + .len = @intCast(v.items.len), + .cap = @intCast(v.capacity), + } }, + .err => |e| return .{ .err = e }, + }; + } + + pub fn toCss(this: *const Self, comptime W: type, dest: *bun.css.Printer(W)) bun.css.PrintErr!void { + return bun.css.to_css.fromBabyList(Type, this, W, dest); + } + + pub fn eql(lhs: *const Self, rhs: *const Self) bool { + if (lhs.len != rhs.len) return false; + for (lhs.sliceConst(), rhs.sliceConst()) |*a, *b| { + if (!bun.css.generic.eql(Type, a, b)) return false; + } + return true; + } + + pub fn deepClone(this: Self, allocator: std.mem.Allocator) !Self { + if (!@hasDecl(Type, "deepClone")) { + @compileError("Unsupported type for BabyList.deepClone(): " ++ @typeName(Type)); + } + + var list_ = try initCapacity(allocator, this.len); + for (this.slice()) |item| { + const clone_result = item.deepClone(allocator); + const cloned_item = switch (comptime @typeInfo(@TypeOf(clone_result))) { + .error_union => try clone_result, + else => clone_result, + }; + list_.appendAssumeCapacity(cloned_item); + } + return list_; + } + + /// Same as `deepClone` but calls `bun.outOfMemory` instead of returning an error. + /// `Type.deepClone` must not return any error except `error.OutOfMemory`. + pub fn deepCloneInfallible(this: Self, allocator: std.mem.Allocator) Self { + return bun.handleOom(this.deepClone(allocator)); + } + + /// Avoid using this function. It creates a `BabyList` that will immediately invoke + /// illegal behavior if you call any method that could allocate or free memory. On top of + /// that, if `items` points to read-only memory, any attempt to modify a list element (which + /// is very easy given how many methods return non-const pointers and slices) will also + /// invoke illegal behavior. + /// + /// To find an alternative: + /// + /// 1. Determine how the resulting `BabyList` is being used. Is it stored in a struct field? + /// Is it passed to a function? + /// + /// 2. Determine whether that struct field or function parameter expects the list to be + /// mutable. Does it potentially call any methods that could allocate or free, like + /// `append` or `deinit`? + /// + /// 3. If the list is expected to be mutable, don't use this function, because the returned + /// list will invoke illegal behavior if mutated. Use `fromSlice` or another allocating + /// function instead. + /// + /// 4. If the list is *not* expected to be mutable, don't use a `BabyList` at all. Change + /// the field or parameter to be a plain slice instead. + /// + /// Requirements: + /// + /// * Methods that could potentially free, remap, or resize `items` cannot be called. + pub fn fromBorrowedSliceDangerous(items: []const Type) Self { + var this: Self = .fromOwnedSlice(@constCast(items)); + if (comptime safety_checks) this.#origin = .{ .borrowed = .{ + .trace = if (traces_enabled) .capture(@returnAddress()), + } }; + return this; + } + + /// Transfers ownership of this `BabyList` to a new allocator. + /// + /// This method is valid only if both the old allocator and new allocator are + /// `MimallocArena`s. See `bun.safety.CheckedAllocator.transferOwnership`. + pub fn transferOwnership(this: *Self, new_allocator: anytype) void { + this.#allocator.transferOwnership(new_allocator); } pub fn format( - self: Self, + this: Self, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype, @@ -429,65 +565,113 @@ pub fn BabyList(comptime Type: type) type { return std.fmt.format( writer, "BabyList({s}){{{any}}}", - .{ @typeName(Type), self.list() }, + .{ @typeName(Type), this.list() }, ); } - }; -} -pub fn OffsetList(comptime Type: type) type { - return struct { - head: u32 = 0, - byte_list: List = .{}, + fn assertOwned(this: *Self) void { + if ((comptime !safety_checks) or this.#origin == .owned) return; + if (comptime traces_enabled) { + bun.Output.note("borrowed BabyList created here:", .{}); + bun.crash_handler.dumpStackTrace( + this.#origin.borrowed.trace.trace(), + .{ .frame_count = 10, .stop_at_jsc_llint = true }, + ); + } + std.debug.panic( + "cannot perform this operation on a BabyList that doesn't own its data", + .{}, + ); + } - const List = BabyList(Type); - const Self = @This(); - - pub fn init(head: u32, byte_list: List) Self { + fn list(this: Self) std.ArrayListUnmanaged(Type) { return .{ - .head = head, - .byte_list = byte_list, + .items = this.slice(), + .capacity = this.cap, }; } - pub fn write(self: *Self, allocator: std.mem.Allocator, bytes: []const u8) !void { - _ = try self.byte_list.write(allocator, bytes); + fn listManaged(this: *Self, allocator: std.mem.Allocator) std.ArrayList(Type) { + this.#allocator.set(allocator); + var list_ = this.list(); + return list_.toManaged(allocator); } - pub fn slice(this: *Self) []u8 { - return this.byte_list.slice()[0..this.head]; - } - - pub fn remaining(this: *Self) []u8 { - return this.byte_list.slice()[this.head..]; - } - - pub fn consume(self: *Self, bytes: u32) void { - self.head +|= bytes; - if (self.head >= self.byte_list.len) { - self.head = 0; - self.byte_list.len = 0; + fn update(this: *Self, list_: anytype) void { + this.ptr = list_.items.ptr; + this.len = @intCast(list_.items.len); + this.cap = @intCast(list_.capacity); + if (comptime Environment.allow_assert) { + bun.assert(this.len <= this.cap); } } - - pub fn len(self: *const Self) u32 { - return self.byte_list.len - self.head; - } - - pub fn clear(self: *Self) void { - self.head = 0; - self.byte_list.len = 0; - } - - pub fn deinit(self: *Self, allocator: std.mem.Allocator) void { - self.byte_list.deinitWithAllocator(allocator); - self.* = .{}; - } }; } +pub const ByteList = BabyList(u8); + +pub const OffsetByteList = struct { + const Self = @This(); + + head: u32 = 0, + byte_list: ByteList = .{}, + + pub fn init(head: u32, byte_list: ByteList) Self { + return .{ + .head = head, + .byte_list = byte_list, + }; + } + + pub fn write(self: *Self, allocator: std.mem.Allocator, bytes: []const u8) !void { + _ = try self.byte_list.write(allocator, bytes); + } + + pub fn slice(self: *const Self) []u8 { + return self.byte_list.slice()[0..self.head]; + } + + pub fn remaining(self: *const Self) []u8 { + return self.byte_list.slice()[self.head..]; + } + + pub fn consume(self: *Self, bytes: u32) void { + self.head +|= bytes; + if (self.head >= self.byte_list.len) { + self.head = 0; + self.byte_list.len = 0; + } + } + + pub fn len(self: *const Self) u32 { + return self.byte_list.len - self.head; + } + + pub fn clear(self: *Self) void { + self.head = 0; + self.byte_list.len = 0; + } + + /// This method invalidates `self`. Use `clearAndFree` to reset to empty instead. + pub fn deinit(self: *Self, allocator: std.mem.Allocator) void { + self.byte_list.deinit(allocator); + self.* = undefined; + } + + pub fn clearAndFree(self: *Self, allocator: std.mem.Allocator) void { + self.deinit(allocator); + self.* = .{}; + } +}; + +pub const safety_checks = Environment.ci_assert; + const std = @import("std"); const bun = @import("bun"); -const Environment = bun.Environment; +const OOM = bun.OOM; const strings = bun.strings; +const StoredTrace = bun.crash_handler.StoredTrace; + +const Environment = bun.Environment; +const traces_enabled = Environment.isDebug; diff --git a/src/collections/BoundedArray.zig b/src/collections/bounded_array.zig similarity index 100% rename from src/collections/BoundedArray.zig rename to src/collections/bounded_array.zig diff --git a/src/css/css_parser.zig b/src/css/css_parser.zig index 6e09de4202..3ec785d33b 100644 --- a/src/css/css_parser.zig +++ b/src/css/css_parser.zig @@ -1416,7 +1416,7 @@ pub const BundlerAtRuleParser = struct { pub fn onImportRule(this: *This, import_rule: *ImportRule, start_position: u32, end_position: u32) void { const import_record_index = this.import_records.len; import_rule.import_record_idx = import_record_index; - this.import_records.push(this.allocator, ImportRecord{ + this.import_records.append(this.allocator, ImportRecord{ .path = bun.fs.Path.init(import_rule.url), .kind = if (import_rule.supports != null) .at_conditional else .at, .range = bun.logger.Range{ @@ -1439,9 +1439,9 @@ pub const BundlerAtRuleParser = struct { cloned.v.ensureTotalCapacity(this.allocator, this.enclosing_layer.v.len() + layer.v.len()); cloned.v.appendSliceAssumeCapacity(this.enclosing_layer.v.slice()); cloned.v.appendSliceAssumeCapacity(layer.v.slice()); - bun.handleOom(this.layer_names.push(this.allocator, cloned)); + bun.handleOom(this.layer_names.append(this.allocator, cloned)); } else { - bun.handleOom(this.layer_names.push(this.allocator, layer.deepClone(this.allocator))); + bun.handleOom(this.layer_names.append(this.allocator, layer.deepClone(this.allocator))); } } } @@ -2688,7 +2688,7 @@ pub fn NestedRuleParser(comptime T: type) type { if (!entry.found_existing) { entry.value_ptr.* = ComposesEntry{}; } - bun.handleOom(entry.value_ptr.*.composes.push(allocator, composes.deepClone(allocator))); + bun.handleOom(entry.value_ptr.*.composes.append(allocator, composes.deepClone(allocator))); } } @@ -3017,7 +3017,7 @@ pub fn fillPropertyBitSet(allocator: Allocator, bitset: *PropertyBitset, block: for (block.declarations.items) |*prop| { const tag = switch (prop.*) { .custom => { - bun.handleOom(custom_properties.push(allocator, prop.custom.name.asStr())); + bun.handleOom(custom_properties.append(allocator, prop.custom.name.asStr())); continue; }, .unparsed => |u| @as(PropertyIdTag, u.property_id), @@ -3030,7 +3030,7 @@ pub fn fillPropertyBitSet(allocator: Allocator, bitset: *PropertyBitset, block: for (block.important_declarations.items) |*prop| { const tag = switch (prop.*) { .custom => { - bun.handleOom(custom_properties.push(allocator, prop.custom.name.asStr())); + bun.handleOom(custom_properties.append(allocator, prop.custom.name.asStr())); continue; }, .unparsed => |u| @as(PropertyIdTag, u.property_id), @@ -3426,7 +3426,7 @@ pub fn StyleSheet(comptime AtRule: type) type { out.v.appendAssumeCapacity(rule.*); const import_record_idx = new_import_records.len; import_rule.import_record_idx = import_record_idx; - new_import_records.push(allocator, ImportRecord{ + new_import_records.append(allocator, ImportRecord{ .path = bun.fs.Path.init(import_rule.url), .kind = if (import_rule.supports != null) .at_conditional else .at, .range = bun.logger.Range.None, @@ -3790,7 +3790,7 @@ const ParseUntilErrorBehavior = enum { // return switch (this.*) { // .list => |list| { // const len = list.len; -// bun.handleOom(list.push(allocator, record)); +// bun.handleOom(list.append(allocator, record)); // return len; // }, // // .dummy => |*d| { @@ -3835,7 +3835,7 @@ pub const Parser = struct { }, .loc = loc, }; - extra.symbols.push(this.allocator(), bun.ast.Symbol{ + extra.symbols.append(this.allocator(), bun.ast.Symbol{ .kind = .local_css, .original_name = name, }) catch |err| bun.handleOom(err); @@ -3854,7 +3854,7 @@ pub const Parser = struct { pub fn addImportRecord(this: *Parser, url: []const u8, start_position: usize, kind: ImportKind) Result(u32) { if (this.import_records) |import_records| { const idx = import_records.len; - import_records.push(this.allocator(), ImportRecord{ + import_records.append(this.allocator(), ImportRecord{ .path = bun.fs.Path.init(url), .kind = kind, .range = bun.logger.Range{ @@ -6975,7 +6975,7 @@ pub const parse_utility = struct { ) Result(T) { // I hope this is okay var import_records = bun.BabyList(bun.ImportRecord){}; - defer import_records.deinitWithAllocator(allocator); + defer import_records.deinit(allocator); var i = ParserInput.new(allocator, input); var parser = Parser.new(&i, &import_records, .{}, null); const result = switch (parse_one(&parser)) { diff --git a/src/css/generics.zig b/src/css/generics.zig index 43503b3469..1a85c08ab3 100644 --- a/src/css/generics.zig +++ b/src/css/generics.zig @@ -483,7 +483,7 @@ pub inline fn deepClone(comptime T: type, this: *const T, allocator: Allocator) @compileError(@typeName(T) ++ " does not have a deepClone() function"); } - return T.deepClone(this, allocator); + return this.deepClone(allocator); } pub inline fn tryFromAngle(comptime T: type, angle: Angle) ?T { diff --git a/src/css/properties/grid.zig b/src/css/properties/grid.zig index db3c595619..39b0abcc7a 100644 --- a/src/css/properties/grid.zig +++ b/src/css/properties/grid.zig @@ -309,6 +309,7 @@ pub const TrackRepeat = struct { if (i.expectComma().asErr()) |e| return .{ .err = e }; + // TODO: this code will not compile if used var line_names = bun.BabyList(CustomIdentList).init(i.allocator); var track_sizes = bun.BabyList(TrackSize).init(i.allocator); diff --git a/src/css/small_list.zig b/src/css/small_list.zig index 132b1609b7..1696a1fa33 100644 --- a/src/css/small_list.zig +++ b/src/css/small_list.zig @@ -117,12 +117,13 @@ pub fn SmallList(comptime T: type, comptime N: comptime_int) type { .data = .{ .heap = .{ .len = list.len, .ptr = list.ptr } }, }; } - defer list.deinitWithAllocator(allocator); + var list_ = list; + defer list_.deinit(allocator); var this: @This() = .{ - .capacity = list.len, + .capacity = list_.len, .data = .{ .inlined = undefined }, }; - @memcpy(this.data.inlined[0..list.len], list.items[0..list.len]); + @memcpy(this.data.inlined[0..list_.len], list_.items[0..list_.len]); return this; } @@ -237,7 +238,7 @@ pub fn SmallList(comptime T: type, comptime N: comptime_int) type { break :images images; }; if (!images.isEmpty()) { - bun.handleOom(res.push(allocator, images)); + bun.handleOom(res.append(allocator, images)); } } @@ -250,7 +251,7 @@ pub fn SmallList(comptime T: type, comptime N: comptime_int) type { const image = in.getImage().getPrefixed(alloc, css.VendorPrefix.fromName(prefix)); out.* = in.withImage(alloc, image); } - bun.handleOom(r.push(alloc, images)); + bun.handleOom(r.append(alloc, images)); } } }.helper; @@ -261,7 +262,7 @@ pub fn SmallList(comptime T: type, comptime N: comptime_int) type { if (prefixes.none) { if (rgb) |r| { - bun.handleOom(res.push(allocator, r)); + bun.handleOom(res.append(allocator, r)); } if (fallbacks.p3) { diff --git a/src/deps/uws/WindowsNamedPipe.zig b/src/deps/uws/WindowsNamedPipe.zig index f45ff568db..bf4238e0c4 100644 --- a/src/deps/uws/WindowsNamedPipe.zig +++ b/src/deps/uws/WindowsNamedPipe.zig @@ -79,10 +79,10 @@ fn onPipeClose(this: *WindowsNamedPipe) void { } fn onReadAlloc(this: *WindowsNamedPipe, suggested_size: usize) []u8 { - var available = this.incoming.available(); + var available = this.incoming.unusedCapacitySlice(); if (available.len < suggested_size) { bun.handleOom(this.incoming.ensureUnusedCapacity(bun.default_allocator, suggested_size)); - available = this.incoming.available(); + available = this.incoming.unusedCapacitySlice(); } return available.ptr[0..suggested_size]; } diff --git a/src/install/PackageManager/PackageJSONEditor.zig b/src/install/PackageManager/PackageJSONEditor.zig index 9b157767d8..959bc17871 100644 --- a/src/install/PackageManager/PackageJSONEditor.zig +++ b/src/install/PackageManager/PackageJSONEditor.zig @@ -93,8 +93,8 @@ pub fn editTrustedDependencies(allocator: std.mem.Allocator, package_json: *Expr } const trusted_dependencies_to_add = len; - const new_trusted_deps = brk: { - var deps = try allocator.alloc(Expr, trusted_dependencies.len + trusted_dependencies_to_add); + const new_trusted_deps: JSAst.ExprNodeList = brk: { + const deps = try allocator.alloc(Expr, trusted_dependencies.len + trusted_dependencies_to_add); @memcpy(deps[0..trusted_dependencies.len], trusted_dependencies); @memset(deps[trusted_dependencies.len..], Expr.empty); @@ -127,7 +127,7 @@ pub fn editTrustedDependencies(allocator: std.mem.Allocator, package_json: *Expr for (deps) |dep| bun.assert(dep.data != .e_missing); } - break :brk deps; + break :brk .fromOwnedSlice(deps); }; var needs_new_trusted_dependencies_list = true; @@ -141,20 +141,18 @@ pub fn editTrustedDependencies(allocator: std.mem.Allocator, package_json: *Expr break :brk Expr.init( E.Array, - E.Array{ - .items = JSAst.ExprNodeList.init(new_trusted_deps), - }, + E.Array{ .items = new_trusted_deps }, logger.Loc.Empty, ); }; if (trusted_dependencies_to_add > 0 and new_trusted_deps.len > 0) { - trusted_dependencies_array.data.e_array.items = JSAst.ExprNodeList.init(new_trusted_deps); + trusted_dependencies_array.data.e_array.items = new_trusted_deps; trusted_dependencies_array.data.e_array.alphabetizeStrings(); } if (package_json.data != .e_object or package_json.data.e_object.properties.len == 0) { - var root_properties = try allocator.alloc(JSAst.G.Property, 1); + const root_properties = try allocator.alloc(JSAst.G.Property, 1); root_properties[0] = JSAst.G.Property{ .key = Expr.init( E.String, @@ -169,12 +167,12 @@ pub fn editTrustedDependencies(allocator: std.mem.Allocator, package_json: *Expr package_json.* = Expr.init( E.Object, E.Object{ - .properties = JSAst.G.Property.List.init(root_properties), + .properties = JSAst.G.Property.List.fromOwnedSlice(root_properties), }, logger.Loc.Empty, ); } else if (needs_new_trusted_dependencies_list) { - var root_properties = try allocator.alloc(G.Property, package_json.data.e_object.properties.len + 1); + const root_properties = try allocator.alloc(G.Property, package_json.data.e_object.properties.len + 1); @memcpy(root_properties[0..package_json.data.e_object.properties.len], package_json.data.e_object.properties.slice()); root_properties[root_properties.len - 1] = .{ .key = Expr.init( @@ -189,7 +187,7 @@ pub fn editTrustedDependencies(allocator: std.mem.Allocator, package_json: *Expr package_json.* = Expr.init( E.Object, E.Object{ - .properties = JSAst.G.Property.List.init(root_properties), + .properties = JSAst.G.Property.List.fromOwnedSlice(root_properties), }, logger.Loc.Empty, ); @@ -501,9 +499,12 @@ pub fn edit( } } - var new_dependencies = try allocator.alloc(G.Property, dependencies.len + remaining - replacing); - bun.copy(G.Property, new_dependencies, dependencies); - @memset(new_dependencies[dependencies.len..], G.Property{}); + var new_dependencies = try std.ArrayListUnmanaged(G.Property) + .initCapacity(allocator, dependencies.len + remaining - replacing); + new_dependencies.expandToCapacity(); + + bun.copy(G.Property, new_dependencies.items, dependencies); + @memset(new_dependencies.items[dependencies.len..], G.Property{}); var trusted_dependencies: []Expr = &[_]Expr{}; if (options.add_trusted_dependencies) { @@ -515,10 +516,10 @@ pub fn edit( } const trusted_dependencies_to_add = manager.trusted_deps_to_add_to_package_json.items.len; - const new_trusted_deps = brk: { - if (!options.add_trusted_dependencies or trusted_dependencies_to_add == 0) break :brk &[_]Expr{}; + const new_trusted_deps: JSAst.ExprNodeList = brk: { + if (!options.add_trusted_dependencies or trusted_dependencies_to_add == 0) break :brk .empty; - var deps = try allocator.alloc(Expr, trusted_dependencies.len + trusted_dependencies_to_add); + const deps = try allocator.alloc(Expr, trusted_dependencies.len + trusted_dependencies_to_add); @memcpy(deps[0..trusted_dependencies.len], trusted_dependencies); @memset(deps[trusted_dependencies.len..], Expr.empty); @@ -547,7 +548,7 @@ pub fn edit( for (deps) |dep| bun.assert(dep.data != .e_missing); } - break :brk deps; + break :brk .fromOwnedSlice(deps); }; for (updates.*) |*request| { @@ -555,31 +556,31 @@ pub fn edit( defer if (comptime Environment.allow_assert) bun.assert(request.e_string != null); var k: usize = 0; - while (k < new_dependencies.len) : (k += 1) { - if (new_dependencies[k].key) |key| { + while (k < new_dependencies.items.len) : (k += 1) { + if (new_dependencies.items[k].key) |key| { const name = request.getName(); if (!key.data.e_string.eql(string, name)) continue; if (request.package_id == invalid_package_id) { // Duplicate dependency (e.g., "react" in both "dependencies" and // "optionalDependencies"). Remove the old dependency. - new_dependencies[k] = .{}; - new_dependencies = new_dependencies[0 .. new_dependencies.len - 1]; + new_dependencies.items[k] = .{}; + new_dependencies.items.len -= 1; } } - new_dependencies[k].key = JSAst.Expr.allocate( + new_dependencies.items[k].key = JSAst.Expr.allocate( allocator, JSAst.E.String, .{ .data = try allocator.dupe(u8, request.getResolvedName(manager.lockfile)) }, logger.Loc.Empty, ); - new_dependencies[k].value = JSAst.Expr.allocate(allocator, JSAst.E.String, .{ + new_dependencies.items[k].value = JSAst.Expr.allocate(allocator, JSAst.E.String, .{ // we set it later .data = "", }, logger.Loc.Empty); - request.e_string = new_dependencies[k].value.?.data.e_string; + request.e_string = new_dependencies.items[k].value.?.data.e_string; break; } } @@ -595,12 +596,12 @@ pub fn edit( } break :brk JSAst.Expr.allocate(allocator, JSAst.E.Object, .{ - .properties = JSAst.G.Property.List.init(new_dependencies), + .properties = .empty, }, logger.Loc.Empty); }; - dependencies_object.data.e_object.properties = JSAst.G.Property.List.init(new_dependencies); - if (new_dependencies.len > 1) + dependencies_object.data.e_object.properties = JSAst.G.Property.List.moveFromList(&new_dependencies); + if (dependencies_object.data.e_object.properties.len > 1) dependencies_object.data.e_object.alphabetizeProperties(); var needs_new_trusted_dependencies_list = true; @@ -617,19 +618,19 @@ pub fn edit( } break :brk Expr.allocate(allocator, E.Array, .{ - .items = JSAst.ExprNodeList.init(new_trusted_deps), + .items = new_trusted_deps, }, logger.Loc.Empty); }; if (options.add_trusted_dependencies and trusted_dependencies_to_add > 0) { - trusted_dependencies_array.data.e_array.items = JSAst.ExprNodeList.init(new_trusted_deps); + trusted_dependencies_array.data.e_array.items = new_trusted_deps; if (new_trusted_deps.len > 1) { trusted_dependencies_array.data.e_array.alphabetizeStrings(); } } if (current_package_json.data != .e_object or current_package_json.data.e_object.properties.len == 0) { - var root_properties = try allocator.alloc(JSAst.G.Property, if (options.add_trusted_dependencies) 2 else 1); + const root_properties = try allocator.alloc(JSAst.G.Property, if (options.add_trusted_dependencies) 2 else 1); root_properties[0] = JSAst.G.Property{ .key = JSAst.Expr.allocate(allocator, JSAst.E.String, .{ .data = dependency_list, @@ -647,11 +648,11 @@ pub fn edit( } current_package_json.* = JSAst.Expr.allocate(allocator, JSAst.E.Object, .{ - .properties = JSAst.G.Property.List.init(root_properties), + .properties = JSAst.G.Property.List.fromOwnedSlice(root_properties), }, logger.Loc.Empty); } else { if (needs_new_dependency_list and needs_new_trusted_dependencies_list) { - var root_properties = try allocator.alloc(G.Property, current_package_json.data.e_object.properties.len + 2); + const root_properties = try allocator.alloc(G.Property, current_package_json.data.e_object.properties.len + 2); @memcpy(root_properties[0..current_package_json.data.e_object.properties.len], current_package_json.data.e_object.properties.slice()); root_properties[root_properties.len - 2] = .{ .key = Expr.allocate(allocator, E.String, E.String{ @@ -666,10 +667,10 @@ pub fn edit( .value = trusted_dependencies_array, }; current_package_json.* = Expr.allocate(allocator, E.Object, .{ - .properties = G.Property.List.init(root_properties), + .properties = G.Property.List.fromOwnedSlice(root_properties), }, logger.Loc.Empty); } else if (needs_new_dependency_list or needs_new_trusted_dependencies_list) { - var root_properties = try allocator.alloc(JSAst.G.Property, current_package_json.data.e_object.properties.len + 1); + const root_properties = try allocator.alloc(JSAst.G.Property, current_package_json.data.e_object.properties.len + 1); @memcpy(root_properties[0..current_package_json.data.e_object.properties.len], current_package_json.data.e_object.properties.slice()); root_properties[root_properties.len - 1] = .{ .key = JSAst.Expr.allocate(allocator, JSAst.E.String, .{ @@ -678,7 +679,7 @@ pub fn edit( .value = if (needs_new_dependency_list) dependencies_object else trusted_dependencies_array, }; current_package_json.* = JSAst.Expr.allocate(allocator, JSAst.E.Object, .{ - .properties = JSAst.G.Property.List.init(root_properties), + .properties = JSAst.G.Property.List.fromOwnedSlice(root_properties), }, logger.Loc.Empty); } } diff --git a/src/install/PackageManager/updatePackageJSONAndInstall.zig b/src/install/PackageManager/updatePackageJSONAndInstall.zig index 9407add0fe..da973a2e26 100644 --- a/src/install/PackageManager/updatePackageJSONAndInstall.zig +++ b/src/install/PackageManager/updatePackageJSONAndInstall.zig @@ -165,9 +165,10 @@ fn updatePackageJSONAndInstallWithManagerWithUpdates( // If the dependencies list is now empty, remove it from the package.json // since we're swapRemove, we have to re-sort it if (query.expr.data.e_object.properties.len == 0) { - var arraylist = current_package_json.root.data.e_object.properties.list(); - _ = arraylist.swapRemove(query.i); - current_package_json.root.data.e_object.properties.update(arraylist); + // TODO: Theoretically we could change these two lines to + // `.orderedRemove(query.i)`, but would that change user-facing + // behavior? + _ = current_package_json.root.data.e_object.properties.swapRemove(query.i); current_package_json.root.data.e_object.packageJSONSort(); } else { var obj = query.expr.data.e_object; diff --git a/src/install/PackageManagerTask.zig b/src/install/PackageManagerTask.zig index a78a7fefa2..c324dc8246 100644 --- a/src/install/PackageManagerTask.zig +++ b/src/install/PackageManagerTask.zig @@ -94,17 +94,15 @@ pub fn callback(task: *ThreadPool.Task) void { .package_manifest => { const allocator = bun.default_allocator; var manifest = &this.request.package_manifest; - const body = manifest.network.response_buffer.move(); - defer { - bun.default_allocator.free(body); - } + const body = &manifest.network.response_buffer; + defer body.deinit(); const package_manifest = Npm.Registry.getPackageMetadata( allocator, manager.scopeForPackageName(manifest.name.slice()), (manifest.network.response.metadata orelse @panic("Assertion failure: Expected metadata to be set")).response, - body, + body.slice(), &this.log, manifest.name.slice(), manifest.network.callback.package_manifest.loaded_manifest, @@ -135,15 +133,12 @@ pub fn callback(task: *ThreadPool.Task) void { } }, .extract => { - const bytes = this.request.extract.network.response_buffer.move(); - - defer { - bun.default_allocator.free(bytes); - } + const buffer = &this.request.extract.network.response_buffer; + defer buffer.deinit(); const result = this.request.extract.tarball.run( &this.log, - bytes, + buffer.slice(), ) catch |err| { bun.handleErrorReturnTrace(err, @errorReturnTrace()); diff --git a/src/interchange/json.zig b/src/interchange/json.zig index 3109d8c600..bec03a2501 100644 --- a/src/interchange/json.zig +++ b/src/interchange/json.zig @@ -194,7 +194,7 @@ fn JSONLikeParser_( } try p.lexer.expect(.t_close_bracket); return newExpr(E.Array{ - .items = ExprNodeList.fromList(exprs), + .items = ExprNodeList.moveFromList(&exprs), .is_single_line = is_single_line, .was_originally_macro = comptime opts.was_originally_macro, }, loc); @@ -266,7 +266,7 @@ fn JSONLikeParser_( } try p.lexer.expect(.t_close_brace); return newExpr(E.Object{ - .properties = G.Property.List.fromList(properties), + .properties = G.Property.List.moveFromList(&properties), .is_single_line = is_single_line, .was_originally_macro = comptime opts.was_originally_macro, }, loc); @@ -552,21 +552,20 @@ pub fn toAST( }, .@"struct" => |Struct| { const fields: []const std.builtin.Type.StructField = Struct.fields; - var properties = try allocator.alloc(js_ast.G.Property, fields.len); - var property_i: usize = 0; + var properties = try BabyList(js_ast.G.Property).initCapacity(allocator, fields.len); + inline for (fields) |field| { - properties[property_i] = G.Property{ + properties.appendAssumeCapacity(G.Property{ .key = Expr.init(E.String, E.String{ .data = field.name }, logger.Loc.Empty), .value = try toAST(allocator, field.type, @field(value, field.name)), - }; - property_i += 1; + }); } return Expr.init( js_ast.E.Object, js_ast.E.Object{ - .properties = BabyList(G.Property).init(properties[0..property_i]), - .is_single_line = property_i <= 1, + .properties = properties, + .is_single_line = properties.len <= 1, }, logger.Loc.Empty, ); diff --git a/src/interchange/yaml.zig b/src/interchange/yaml.zig index eeba0420ab..b76a0af3a8 100644 --- a/src/interchange/yaml.zig +++ b/src/interchange/yaml.zig @@ -19,13 +19,13 @@ pub const YAML = struct { // multi-document yaml streams are converted into arrays - var items: std.ArrayList(Expr) = try .initCapacity(allocator, stream.docs.items.len); + var items: bun.BabyList(Expr) = try .initCapacity(allocator, stream.docs.items.len); for (stream.docs.items) |doc| { items.appendAssumeCapacity(doc.root); } - return .init(E.Array, .{ .items = .fromList(items) }, .Empty); + return .init(E.Array, .{ .items = items }, .Empty); }, }; } @@ -756,7 +756,7 @@ pub fn Parser(comptime enc: Encoding) type { try self.scan(.{}); - return .init(E.Array, .{ .items = .fromList(seq) }, sequence_start.loc()); + return .init(E.Array, .{ .items = .moveFromList(&seq) }, sequence_start.loc()); } fn parseFlowMapping(self: *@This()) ParseError!Expr { @@ -866,7 +866,7 @@ pub fn Parser(comptime enc: Encoding) type { try self.scan(.{}); - return .init(E.Object, .{ .properties = .fromList(props) }, mapping_start.loc()); + return .init(E.Object, .{ .properties = .moveFromList(&props) }, mapping_start.loc()); } fn parseBlockSequence(self: *@This()) ParseError!Expr { @@ -941,7 +941,7 @@ pub fn Parser(comptime enc: Encoding) type { } } - return .init(E.Array, .{ .items = .fromList(seq) }, sequence_start.loc()); + return .init(E.Array, .{ .items = .moveFromList(&seq) }, sequence_start.loc()); } fn parseBlockMapping( @@ -1022,7 +1022,7 @@ pub fn Parser(comptime enc: Encoding) type { } if (self.context.get() == .flow_in) { - return .init(E.Object, .{ .properties = .fromList(props) }, mapping_start.loc()); + return .init(E.Object, .{ .properties = .moveFromList(&props) }, mapping_start.loc()); } try self.context.set(.block_in); @@ -1126,7 +1126,7 @@ pub fn Parser(comptime enc: Encoding) type { } } - return .init(E.Object, .{ .properties = .fromList(props) }, mapping_start.loc()); + return .init(E.Object, .{ .properties = .moveFromList(&props) }, mapping_start.loc()); } const NodeProperties = struct { diff --git a/src/io/PipeWriter.zig b/src/io/PipeWriter.zig index b172cf133d..30ac28f95e 100644 --- a/src/io/PipeWriter.zig +++ b/src/io/PipeWriter.zig @@ -1127,16 +1127,11 @@ pub const StreamBuffer = struct { } pub fn writeAssumeCapacity(this: *StreamBuffer, buffer: []const u8) void { - var byte_list = bun.ByteList.fromList(this.list); - defer this.list = byte_list.listManaged(this.list.allocator); - byte_list.appendSliceAssumeCapacity(buffer); + this.list.appendSliceAssumeCapacity(buffer); } pub fn ensureUnusedCapacity(this: *StreamBuffer, capacity: usize) OOM!void { - var byte_list = bun.ByteList.fromList(this.list); - defer this.list = byte_list.listManaged(this.list.allocator); - - _ = try byte_list.ensureUnusedCapacity(this.list.allocator, capacity); + return this.list.ensureUnusedCapacity(capacity); } pub fn writeTypeAsBytes(this: *StreamBuffer, comptime T: type, data: *const T) OOM!void { @@ -1144,8 +1139,8 @@ pub const StreamBuffer = struct { } pub fn writeTypeAsBytesAssumeCapacity(this: *StreamBuffer, comptime T: type, data: T) void { - var byte_list = bun.ByteList.fromList(this.list); - defer this.list = byte_list.listManaged(this.list.allocator); + var byte_list = bun.ByteList.moveFromList(&this.list); + defer this.list = byte_list.moveToListManaged(this.list.allocator); byte_list.writeTypeAsBytesAssumeCapacity(T, data); } @@ -1156,16 +1151,16 @@ pub const StreamBuffer = struct { } { - var byte_list = bun.ByteList.fromList(this.list); - defer this.list = byte_list.listManaged(this.list.allocator); + var byte_list = bun.ByteList.moveFromList(&this.list); + defer this.list = byte_list.moveToListManaged(this.list.allocator); _ = try byte_list.writeLatin1(this.list.allocator, buffer); } return this.list.items[this.cursor..]; } else if (comptime @TypeOf(writeFn) == @TypeOf(&writeUTF16) and writeFn == &writeUTF16) { { - var byte_list = bun.ByteList.fromList(this.list); - defer this.list = byte_list.listManaged(this.list.allocator); + var byte_list = bun.ByteList.moveFromList(&this.list); + defer this.list = byte_list.moveToListManaged(this.list.allocator); _ = try byte_list.writeUTF16(this.list.allocator, buffer); } @@ -1185,15 +1180,15 @@ pub const StreamBuffer = struct { } } - var byte_list = bun.ByteList.fromList(this.list); - defer this.list = byte_list.listManaged(this.list.allocator); + var byte_list = bun.ByteList.moveFromList(&this.list); + defer this.list = byte_list.moveToListManaged(this.list.allocator); _ = try byte_list.writeLatin1(this.list.allocator, buffer); } pub fn writeUTF16(this: *StreamBuffer, buffer: []const u16) OOM!void { - var byte_list = bun.ByteList.fromList(this.list); - defer this.list = byte_list.listManaged(this.list.allocator); + var byte_list = bun.ByteList.moveFromList(&this.list); + defer this.list = byte_list.moveToListManaged(this.list.allocator); _ = try byte_list.writeUTF16(this.list.allocator, buffer); } diff --git a/src/js_printer.zig b/src/js_printer.zig index 385ca62d13..b005a6cb12 100644 --- a/src/js_printer.zig +++ b/src/js_printer.zig @@ -5865,8 +5865,8 @@ pub fn printJSON( var stmts = [_]js_ast.Stmt{stmt}; var parts = [_]js_ast.Part{.{ .stmts = &stmts }}; const ast = Ast.initTest(&parts); - const list = js_ast.Symbol.List.init(ast.symbols.slice()); - const nested_list = js_ast.Symbol.NestedList.init(&[_]js_ast.Symbol.List{list}); + const list = js_ast.Symbol.List.fromBorrowedSliceDangerous(ast.symbols.slice()); + const nested_list = js_ast.Symbol.NestedList.fromBorrowedSliceDangerous(&.{list}); var renamer = rename.NoOpRenamer.init(js_ast.Symbol.Map.initList(nested_list), source); var printer = PrinterType.init( diff --git a/src/linker.zig b/src/linker.zig index 13c1316405..a71015523a 100644 --- a/src/linker.zig +++ b/src/linker.zig @@ -112,14 +112,10 @@ pub const Linker = struct { const is_deferred = result.pending_imports.len > 0; - const import_records = result.ast.import_records.listManaged(linker.allocator); - defer { - result.ast.import_records = ImportRecord.List.fromList(import_records); - } // Step 1. Resolve imports & requires switch (result.loader) { .jsx, .js, .ts, .tsx => { - for (import_records.items, 0..) |*import_record, record_i| { + for (result.ast.import_records.slice(), 0..) |*import_record, record_i| { if (import_record.is_unused or (is_bun and is_deferred and !result.isPendingImport(@intCast(record_i)))) continue; diff --git a/src/pool.zig b/src/pool.zig index 8e2b538ae8..85d7b8f043 100644 --- a/src/pool.zig +++ b/src/pool.zig @@ -214,8 +214,7 @@ pub fn ObjectPool( if (comptime max_count > 0) { if (data().count >= max_count) { if (comptime log_allocations) std.io.getStdErr().writeAll(comptime std.fmt.comptimePrint("Free {s} - {d} bytes\n", .{ @typeName(Type), @sizeOf(Type) })) catch {}; - if (std.meta.hasFn(Type, "deinit")) node.data.deinit(); - node.allocator.destroy(node); + destroyNode(node); return; } } @@ -242,10 +241,20 @@ pub fn ObjectPool( dat.list.first = null; while (next) |node| { next = node.next; - if (std.meta.hasFn(Type, "deinit")) node.data.deinit(); - node.allocator.destroy(node); + destroyNode(node); } } + + fn destroyNode(node: *LinkedList.Node) void { + // TODO: Once a generic-allocator version of `BabyList` is added, change + // `ByteListPool` in `bun.js/webcore.zig` to use a managed default-allocator + // `ByteList` instead, and then get rid of the special-casing for `ByteList` + // here. This will fix a memory leak. + if (comptime Type != bun.ByteList) { + bun.memory.deinit(&node.data); + } + node.allocator.destroy(node); + } }; } diff --git a/src/ptr/owned.zig b/src/ptr/owned.zig index 1af997a3d9..3dd2b36d2c 100644 --- a/src/ptr/owned.zig +++ b/src/ptr/owned.zig @@ -60,6 +60,7 @@ pub fn OwnedIn(comptime Pointer: type, comptime Allocator: type) type { } }, .slice => struct { + /// Note: this creates *shallow* copies of `elem`. pub fn alloc(count: usize, elem: Child) AllocError!Self { return .allocIn(count, elem, bun.memory.initDefault(Allocator)); } @@ -82,6 +83,7 @@ pub fn OwnedIn(comptime Pointer: type, comptime Allocator: type) type { } }, .slice => struct { + /// Note: this creates *shallow* copies of `elem`. pub fn allocIn(count: usize, elem: Child, allocator_: Allocator) AllocError!Self { const data = try bun.allocators.asStd(allocator_).alloc(Child, count); @memset(data, elem); diff --git a/src/s3/client.zig b/src/s3/client.zig index 8225d211ab..1117409b8a 100644 --- a/src/s3/client.zig +++ b/src/s3/client.zig @@ -104,7 +104,7 @@ pub fn listObjects( ) void { var search_params: bun.ByteList = .{}; - bun.handleOom(search_params.append(bun.default_allocator, "?")); + bun.handleOom(search_params.appendSlice(bun.default_allocator, "?")); if (listOptions.continuation_token) |continuation_token| { var buff: [1024]u8 = undefined; @@ -127,9 +127,9 @@ pub fn listObjects( if (listOptions.encoding_type != null) { if (listOptions.continuation_token != null or listOptions.delimiter != null) { - bun.handleOom(search_params.append(bun.default_allocator, "&encoding-type=url")); + bun.handleOom(search_params.appendSlice(bun.default_allocator, "&encoding-type=url")); } else { - bun.handleOom(search_params.append(bun.default_allocator, "encoding-type=url")); + bun.handleOom(search_params.appendSlice(bun.default_allocator, "encoding-type=url")); } } @@ -142,9 +142,9 @@ pub fn listObjects( } if (listOptions.continuation_token != null or listOptions.delimiter != null or listOptions.encoding_type != null or listOptions.fetch_owner != null) { - bun.handleOom(search_params.append(bun.default_allocator, "&list-type=2")); + bun.handleOom(search_params.appendSlice(bun.default_allocator, "&list-type=2")); } else { - bun.handleOom(search_params.append(bun.default_allocator, "list-type=2")); + bun.handleOom(search_params.appendSlice(bun.default_allocator, "list-type=2")); } if (listOptions.max_keys) |max_keys| { @@ -170,7 +170,7 @@ pub fn listObjects( .method = .GET, .search_params = search_params.slice(), }, true, null) catch |sign_err| { - search_params.deinitWithAllocator(bun.default_allocator); + search_params.deinit(bun.default_allocator); const error_code_and_message = Error.getSignErrorCodeAndMessage(sign_err); callback(.{ .failure = .{ .code = error_code_and_message.code, .message = error_code_and_message.message } }, callback_context); @@ -178,7 +178,7 @@ pub fn listObjects( return; }; - search_params.deinitWithAllocator(bun.default_allocator); + search_params.deinit(bun.default_allocator); const headers = bun.handleOom(bun.http.Headers.fromPicoHttpHeaders(result.headers(), bun.default_allocator)); @@ -631,14 +631,14 @@ pub fn readableStream( } if (has_more) { readable.ptr.Bytes.onData( - .{ .temporary = bun.ByteList.initConst(chunk.list.items) }, + .{ .temporary = bun.ByteList.fromBorrowedSliceDangerous(chunk.list.items) }, bun.default_allocator, ); return; } readable.ptr.Bytes.onData( - .{ .temporary_and_done = bun.ByteList.initConst(chunk.list.items) }, + .{ .temporary_and_done = bun.ByteList.fromBorrowedSliceDangerous(chunk.list.items) }, bun.default_allocator, ); return; diff --git a/src/s3/multipart.zig b/src/s3/multipart.zig index 18fe0a8308..acc303469b 100644 --- a/src/s3/multipart.zig +++ b/src/s3/multipart.zig @@ -284,7 +284,7 @@ pub const MultiPartUpload = struct { if (this.multipart_etags.capacity > 0) this.multipart_etags.deinit(bun.default_allocator); if (this.multipart_upload_list.cap > 0) - this.multipart_upload_list.deinitWithAllocator(bun.default_allocator); + this.multipart_upload_list.deinit(bun.default_allocator); bun.destroy(this); } @@ -438,7 +438,10 @@ pub const MultiPartUpload = struct { // sort the etags std.sort.block(UploadPart.UploadPartResult, this.multipart_etags.items, this, UploadPart.sortEtags); // start the multipart upload list - bun.handleOom(this.multipart_upload_list.append(bun.default_allocator, "")); + bun.handleOom(this.multipart_upload_list.appendSlice( + bun.default_allocator, + "", + )); for (this.multipart_etags.items) |tag| { bun.handleOom(this.multipart_upload_list.appendFmt(bun.default_allocator, "{}{s}", .{ tag.number, tag.etag })); @@ -446,7 +449,10 @@ pub const MultiPartUpload = struct { } this.multipart_etags.deinit(bun.default_allocator); this.multipart_etags = .{}; - bun.handleOom(this.multipart_upload_list.append(bun.default_allocator, "")); + bun.handleOom(this.multipart_upload_list.appendSlice( + bun.default_allocator, + "", + )); // will deref and ends after commit this.commitMultiPartRequest(); } else if (this.state == .singlefile_started) { diff --git a/src/safety/CriticalSection.zig b/src/safety/CriticalSection.zig index d308295b5d..39604d9233 100644 --- a/src/safety/CriticalSection.zig +++ b/src/safety/CriticalSection.zig @@ -197,11 +197,11 @@ pub fn end(self: *Self) void { if (comptime enabled) self.internal_state.unlock(); } +pub const enabled = bun.Environment.ci_assert; + const bun = @import("bun"); const invalid_thread_id = @import("./thread_id.zig").invalid; const StoredTrace = bun.crash_handler.StoredTrace; - -const enabled = bun.Environment.ci_assert; const traces_enabled = bun.Environment.isDebug; const std = @import("std"); diff --git a/src/safety/ThreadLock.zig b/src/safety/ThreadLock.zig index 143bd1904a..8d8798a7bb 100644 --- a/src/safety/ThreadLock.zig +++ b/src/safety/ThreadLock.zig @@ -67,11 +67,11 @@ pub fn lockOrAssert(self: *Self) void { } } +pub const enabled = bun.Environment.ci_assert; + const bun = @import("bun"); const invalid_thread_id = @import("./thread_id.zig").invalid; const StoredTrace = bun.crash_handler.StoredTrace; - -const enabled = bun.Environment.ci_assert; const traces_enabled = bun.Environment.isDebug; const std = @import("std"); diff --git a/src/safety/alloc.zig b/src/safety/alloc.zig index 3c544496d8..6a0c6eec48 100644 --- a/src/safety/alloc.zig +++ b/src/safety/alloc.zig @@ -30,7 +30,7 @@ fn hasPtr(alloc: Allocator) bool { bun.MaxHeapAllocator.isInstance(alloc) or alloc.vtable == bun.allocators.c_allocator.vtable or alloc.vtable == bun.allocators.z_allocator.vtable or - bun.MimallocArena.isInstance(alloc) or + MimallocArena.isInstance(alloc) or bun.jsc.CachedBytecode.isInstance(alloc) or bun.bundle_v2.allocatorHasPointer(alloc) or ((comptime bun.heap_breakdown.enabled) and bun.heap_breakdown.Zone.isInstance(alloc)) or @@ -93,7 +93,7 @@ pub const CheckedAllocator = struct { #allocator: if (enabled) NullableAllocator else void = if (enabled) .init(null), #trace: if (traces_enabled) StoredTrace else void = if (traces_enabled) StoredTrace.empty, - pub fn init(alloc: Allocator) Self { + pub inline fn init(alloc: Allocator) Self { var self: Self = .{}; self.set(alloc); return self; @@ -136,15 +136,58 @@ pub const CheckedAllocator = struct { // Assertion will always fail. We want the error message. bun.safety.alloc.assertEq(old_alloc, alloc); } + + /// Transfers ownership of the collection to a new allocator. + /// + /// This method is valid only if both the old allocator and new allocator are `MimallocArena`s. + /// This is okay because data allocated by one `MimallocArena` can always be freed by another + /// (this includes `resize` and `remap`). + /// + /// `new_allocator` should be one of the following: + /// + /// * `*MimallocArena` + /// * `*const MimallocArena` + /// * `MimallocArena.Borrowed` + /// + /// If you only have an `std.mem.Allocator`, see `MimallocArena.Borrowed.downcast`. + pub inline fn transferOwnership(self: *Self, new_allocator: anytype) void { + if (comptime !enabled) return; + const ArgType = @TypeOf(new_allocator); + const new_std = switch (comptime ArgType) { + *MimallocArena, + *const MimallocArena, + MimallocArena.Borrowed, + => new_allocator.allocator(), + else => @compileError("unsupported argument: " ++ @typeName(ArgType)), + }; + + defer self.* = .init(new_std); + const old_allocator = self.#allocator.get() orelse return; + if (MimallocArena.isInstance(old_allocator)) return; + + if (comptime traces_enabled) { + bun.Output.errGeneric("collection first used here:", .{}); + var trace = self.#trace; + bun.crash_handler.dumpStackTrace( + trace.trace(), + .{ .frame_count = 10, .stop_at_jsc_llint = true }, + ); + } + std.debug.panic( + "cannot transfer ownership from non-MimallocArena (old vtable is {*})", + .{old_allocator.vtable}, + ); + } }; +pub const enabled = bun.Environment.ci_assert; + const bun = @import("bun"); const std = @import("std"); const Allocator = std.mem.Allocator; const StoredTrace = bun.crash_handler.StoredTrace; - -const enabled = bun.Environment.ci_assert; const traces_enabled = bun.Environment.isDebug; const LinuxMemFdAllocator = bun.allocators.LinuxMemFdAllocator; +const MimallocArena = bun.allocators.MimallocArena; const NullableAllocator = bun.allocators.NullableAllocator; diff --git a/src/shell/Builtin.zig b/src/shell/Builtin.zig index 1fe2761be0..8578485590 100644 --- a/src/shell/Builtin.zig +++ b/src/shell/Builtin.zig @@ -619,11 +619,17 @@ pub fn done(this: *Builtin, exit_code: anytype) Yield { // Aggregate output data if shell state is piped and this cmd is piped if (cmd.io.stdout == .pipe and cmd.io.stdout == .pipe and this.stdout == .buf) { - bun.handleOom(cmd.base.shell.buffered_stdout().append(bun.default_allocator, this.stdout.buf.items[0..])); + bun.handleOom(cmd.base.shell.buffered_stdout().appendSlice( + bun.default_allocator, + this.stdout.buf.items[0..], + )); } // Aggregate output data if shell state is piped and this cmd is piped if (cmd.io.stderr == .pipe and cmd.io.stderr == .pipe and this.stderr == .buf) { - bun.handleOom(cmd.base.shell.buffered_stderr().append(bun.default_allocator, this.stderr.buf.items[0..])); + bun.handleOom(cmd.base.shell.buffered_stderr().appendSlice( + bun.default_allocator, + this.stderr.buf.items[0..], + )); } return cmd.parent.childDone(cmd, this.exit_code.?); diff --git a/src/shell/IOWriter.zig b/src/shell/IOWriter.zig index a594687ae7..26d0bb9f9b 100644 --- a/src/shell/IOWriter.zig +++ b/src/shell/IOWriter.zig @@ -323,7 +323,7 @@ pub fn doFileWrite(this: *IOWriter) Yield { }; if (child.bytelist) |bl| { const written_slice = this.buf.items[this.total_bytes_written .. this.total_bytes_written + amt]; - bun.handleOom(bl.append(bun.default_allocator, written_slice)); + bun.handleOom(bl.appendSlice(bun.default_allocator, written_slice)); } child.written += amt; if (!child.wroteEverything()) { @@ -347,7 +347,7 @@ pub fn onWritePollable(this: *IOWriter, amount: usize, status: bun.io.WriteStatu } else { if (child.bytelist) |bl| { const written_slice = this.buf.items[this.total_bytes_written .. this.total_bytes_written + amount]; - bun.handleOom(bl.append(bun.default_allocator, written_slice)); + bun.handleOom(bl.appendSlice(bun.default_allocator, written_slice)); } this.total_bytes_written += amount; child.written += amount; diff --git a/src/shell/interpreter.zig b/src/shell/interpreter.zig index e6d1a1db50..b14bcb5b99 100644 --- a/src/shell/interpreter.zig +++ b/src/shell/interpreter.zig @@ -417,10 +417,10 @@ pub const Interpreter = struct { if (comptime free_buffered_io) { if (this._buffered_stdout == .owned) { - this._buffered_stdout.owned.deinitWithAllocator(bun.default_allocator); + this._buffered_stdout.owned.deinit(bun.default_allocator); } if (this._buffered_stderr == .owned) { - this._buffered_stderr.owned.deinitWithAllocator(bun.default_allocator); + this._buffered_stderr.owned.deinit(bun.default_allocator); } } @@ -1181,10 +1181,10 @@ pub const Interpreter = struct { fn deinitFromFinalizer(this: *ThisInterpreter) void { if (this.root_shell._buffered_stderr == .owned) { - this.root_shell._buffered_stderr.owned.deinitWithAllocator(bun.default_allocator); + this.root_shell._buffered_stderr.owned.deinit(bun.default_allocator); } if (this.root_shell._buffered_stdout == .owned) { - this.root_shell._buffered_stdout.owned.deinitWithAllocator(bun.default_allocator); + this.root_shell._buffered_stdout.owned.deinit(bun.default_allocator); } this.this_jsvalue = .zero; this.allocator.destroy(this); diff --git a/src/shell/shell.zig b/src/shell/shell.zig index 270fe85148..e8ded9f1ae 100644 --- a/src/shell/shell.zig +++ b/src/shell/shell.zig @@ -4098,8 +4098,8 @@ pub fn SmolList(comptime T: type, comptime INLINED_MAX: comptime_int) type { pub fn promote(this: *Inlined, n: usize, new: T) bun.BabyList(T) { var list = bun.handleOom(bun.BabyList(T).initCapacity(bun.default_allocator, n)); - bun.handleOom(list.append(bun.default_allocator, this.items[0..INLINED_MAX])); - bun.handleOom(list.push(bun.default_allocator, new)); + bun.handleOom(list.appendSlice(bun.default_allocator, this.items[0..INLINED_MAX])); + bun.handleOom(list.append(bun.default_allocator, new)); return list; } @@ -4244,7 +4244,7 @@ pub fn SmolList(comptime T: type, comptime INLINED_MAX: comptime_int) type { this.inlined.len += 1; }, .heap => { - bun.handleOom(this.heap.push(bun.default_allocator, new)); + bun.handleOom(this.heap.append(bun.default_allocator, new)); }, } } diff --git a/src/shell/states/Cmd.zig b/src/shell/states/Cmd.zig index 4bda507b16..8a732f27d6 100644 --- a/src/shell/states/Cmd.zig +++ b/src/shell/states/Cmd.zig @@ -116,12 +116,9 @@ const BufferedIoClosed = struct { } = .open, owned: bool = false, - /// BufferedInput/Output uses jsc vm allocator - pub fn deinit(this: *BufferedIoState, jsc_vm_allocator: Allocator) void { + pub fn deinit(this: *BufferedIoState) void { if (this.state == .closed and this.owned) { - var list = this.state.closed.listManaged(jsc_vm_allocator); - list.deinit(); - this.state.closed = .{}; + this.state.closed.clearAndFree(bun.default_allocator); } } @@ -130,13 +127,13 @@ const BufferedIoClosed = struct { } }; - fn deinit(this: *BufferedIoClosed, jsc_vm_allocator: Allocator) void { + fn deinit(this: *BufferedIoClosed) void { if (this.stdout) |*io| { - io.deinit(jsc_vm_allocator); + io.deinit(); } if (this.stderr) |*io| { - io.deinit(jsc_vm_allocator); + io.deinit(); } } @@ -157,10 +154,11 @@ const BufferedIoClosed = struct { // If the shell state is piped (inside a cmd substitution) aggregate the output of this command if (cmd.io.stdout == .pipe and cmd.io.stdout == .pipe and !cmd.node.redirect.redirectsElsewhere(.stdout)) { const the_slice = readable.pipe.slice(); - bun.handleOom(cmd.base.shell.buffered_stdout().append(bun.default_allocator, the_slice)); + bun.handleOom(cmd.base.shell.buffered_stdout().appendSlice(bun.default_allocator, the_slice)); } - stdout.state = .{ .closed = bun.ByteList.fromList(readable.pipe.takeBuffer()) }; + var buffer = readable.pipe.takeBuffer(); + stdout.state = .{ .closed = bun.ByteList.moveFromList(&buffer) }; } }, .stderr => { @@ -170,10 +168,11 @@ const BufferedIoClosed = struct { // If the shell state is piped (inside a cmd substitution) aggregate the output of this command if (cmd.io.stderr == .pipe and cmd.io.stderr == .pipe and !cmd.node.redirect.redirectsElsewhere(.stderr)) { const the_slice = readable.pipe.slice(); - bun.handleOom(cmd.base.shell.buffered_stderr().append(bun.default_allocator, the_slice)); + bun.handleOom(cmd.base.shell.buffered_stderr().appendSlice(bun.default_allocator, the_slice)); } - stderr.state = .{ .closed = bun.ByteList.fromList(readable.pipe.takeBuffer()) }; + var buffer = readable.pipe.takeBuffer(); + stderr.state = .{ .closed = bun.ByteList.moveFromList(&buffer) }; } }, .stdin => { @@ -706,7 +705,7 @@ pub fn deinit(this: *Cmd) void { cmd.deinit(); } - this.exec.subproc.buffered_closed.deinit(this.base.eventLoop().allocator()); + this.exec.subproc.buffered_closed.deinit(); } else { this.exec.bltn.deinit(); } @@ -767,7 +766,7 @@ pub fn bufferedOutputCloseStdout(this: *Cmd, err: ?jsc.SystemError) void { if (this.io.stdout == .fd and this.io.stdout.fd.captured != null and !this.node.redirect.redirectsElsewhere(.stdout)) { var buf = this.io.stdout.fd.captured.?; const the_slice = this.exec.subproc.child.stdout.pipe.slice(); - bun.handleOom(buf.append(bun.default_allocator, the_slice)); + bun.handleOom(buf.appendSlice(bun.default_allocator, the_slice)); } this.exec.subproc.buffered_closed.close(this, .{ .stdout = &this.exec.subproc.child.stdout }); this.exec.subproc.child.closeIO(.stdout); @@ -783,14 +782,13 @@ pub fn bufferedOutputCloseStderr(this: *Cmd, err: ?jsc.SystemError) void { } if (this.io.stderr == .fd and this.io.stderr.fd.captured != null and !this.node.redirect.redirectsElsewhere(.stderr)) { var buf = this.io.stderr.fd.captured.?; - bun.handleOom(buf.append(bun.default_allocator, this.exec.subproc.child.stderr.pipe.slice())); + bun.handleOom(buf.appendSlice(bun.default_allocator, this.exec.subproc.child.stderr.pipe.slice())); } this.exec.subproc.buffered_closed.close(this, .{ .stderr = &this.exec.subproc.child.stderr }); this.exec.subproc.child.closeIO(.stderr); } const std = @import("std"); -const Allocator = std.mem.Allocator; const bun = @import("bun"); const assert = bun.assert; diff --git a/src/shell/subproc.zig b/src/shell/subproc.zig index 9a8f838be8..dd308b13e7 100644 --- a/src/shell/subproc.zig +++ b/src/shell/subproc.zig @@ -724,6 +724,9 @@ pub const ShellSubprocess = struct { event_loop: jsc.EventLoopHandle, shellio: *ShellIO, spawn_args_: SpawnArgs, + // We have to use an out pointer because this function may invoke callbacks that expect a + // fully initialized parent object. Writing to this out pointer may be the last step needed + // to initialize the object. out: **@This(), notify_caller_process_already_exited: *bool, ) bun.shell.Result(void) { @@ -732,10 +735,7 @@ pub const ShellSubprocess = struct { var spawn_args = spawn_args_; - _ = switch (spawnMaybeSyncImpl( - .{ - .is_sync = false, - }, + return switch (spawnMaybeSyncImpl( event_loop, arena.allocator(), &spawn_args, @@ -743,25 +743,23 @@ pub const ShellSubprocess = struct { out, notify_caller_process_already_exited, )) { - .result => |subproc| subproc, + .result => .success, .err => |err| return .{ .err = err }, }; - - return .success; } fn spawnMaybeSyncImpl( - comptime config: struct { - is_sync: bool, - }, event_loop: jsc.EventLoopHandle, allocator: Allocator, spawn_args: *SpawnArgs, shellio: *ShellIO, + // We have to use an out pointer because this function may invoke callbacks that expect a + // fully initialized parent object. Writing to this out pointer may be the last step needed + // to initialize the object. out_subproc: **@This(), notify_caller_process_already_exited: *bool, - ) bun.shell.Result(*@This()) { - const is_sync = config.is_sync; + ) bun.shell.Result(void) { + const is_sync = false; if (!spawn_args.override_env and spawn_args.env_array.items.len == 0) { // spawn_args.env_array.items = bun.handleOom(jsc_vm.transpiler.env.map.createNullDelimitedEnvMap(allocator)); @@ -873,14 +871,12 @@ pub const ShellSubprocess = struct { subprocess.stdin.pipe.signal = bun.webcore.streams.Signal.init(&subprocess.stdin); } - if (comptime !is_sync) { - switch (subprocess.process.watch()) { - .result => {}, - .err => { - notify_caller_process_already_exited.* = true; - spawn_args.lazy = false; - }, - } + switch (subprocess.process.watch()) { + .result => {}, + .err => { + notify_caller_process_already_exited.* = true; + spawn_args.lazy = false; + }, } if (subprocess.stdin == .buffer) { @@ -889,7 +885,7 @@ pub const ShellSubprocess = struct { if (subprocess.stdout == .pipe) { subprocess.stdout.pipe.start(subprocess, event_loop).assert(); - if ((is_sync or !spawn_args.lazy) and subprocess.stdout == .pipe) { + if (!spawn_args.lazy and subprocess.stdout == .pipe) { subprocess.stdout.pipe.readAll(); } } @@ -897,7 +893,7 @@ pub const ShellSubprocess = struct { if (subprocess.stderr == .pipe) { subprocess.stderr.pipe.start(subprocess, event_loop).assert(); - if ((is_sync or !spawn_args.lazy) and subprocess.stderr == .pipe) { + if (!spawn_args.lazy and subprocess.stderr == .pipe) { subprocess.stderr.pipe.readAll(); } } @@ -906,7 +902,7 @@ pub const ShellSubprocess = struct { log("returning", .{}); - return .{ .result = subprocess }; + return .{ .result = {} }; } pub fn wait(this: *@This(), sync: bool) void { @@ -985,7 +981,7 @@ pub const PipeReader = struct { pub fn append(this: *BufferedOutput, bytes: []const u8) void { switch (this.*) { .bytelist => { - bun.handleOom(this.bytelist.append(bun.default_allocator, bytes)); + bun.handleOom(this.bytelist.appendSlice(bun.default_allocator, bytes)); }, .array_buffer => { const array_buf_slice = this.array_buffer.buf.slice(); @@ -1001,7 +997,7 @@ pub const PipeReader = struct { pub fn deinit(this: *BufferedOutput) void { switch (this.*) { .bytelist => { - this.bytelist.deinitWithAllocator(bun.default_allocator); + this.bytelist.deinit(bun.default_allocator); }, .array_buffer => { // FIXME: SHOULD THIS BE HERE? diff --git a/src/sourcemap/CodeCoverage.zig b/src/sourcemap/CodeCoverage.zig index eebaa4a7ea..58923ba0f8 100644 --- a/src/sourcemap/CodeCoverage.zig +++ b/src/sourcemap/CodeCoverage.zig @@ -264,7 +264,7 @@ pub const Report = struct { pub fn deinit(this: *Report, allocator: std.mem.Allocator) void { this.executable_lines.deinit(allocator); this.lines_which_have_executed.deinit(allocator); - this.line_hits.deinitWithAllocator(allocator); + this.line_hits.deinit(allocator); this.functions.deinit(allocator); this.stmts.deinit(allocator); this.functions_which_have_executed.deinit(allocator); @@ -445,7 +445,7 @@ pub const ByteRangeMapping = struct { const line_hits_slice = line_hits.slice(); @memset(line_hits_slice, 0); - errdefer line_hits.deinitWithAllocator(allocator); + errdefer line_hits.deinit(allocator); for (blocks, 0..) |block, i| { if (block.endOffset < 0 or block.startOffset < 0) continue; // does not map to anything @@ -535,7 +535,7 @@ pub const ByteRangeMapping = struct { line_hits.len = line_count; const line_hits_slice = line_hits.slice(); @memset(line_hits_slice, 0); - errdefer line_hits.deinitWithAllocator(allocator); + errdefer line_hits.deinit(allocator); for (blocks, 0..) |block, i| { if (block.endOffset < 0 or block.startOffset < 0) continue; // does not map to anything diff --git a/src/sourcemap/LineOffsetTable.zig b/src/sourcemap/LineOffsetTable.zig index 06b4a01db7..59d02ea313 100644 --- a/src/sourcemap/LineOffsetTable.zig +++ b/src/sourcemap/LineOffsetTable.zig @@ -171,7 +171,7 @@ pub fn generate(allocator: std.mem.Allocator, contents: []const u8, approximate_ list.append(allocator, .{ .byte_offset_to_start_of_line = line_byte_offset, .byte_offset_to_first_non_ascii = byte_offset_to_first_non_ascii, - .columns_for_non_ascii = BabyList(i32).init(owned), + .columns_for_non_ascii = BabyList(i32).fromOwnedSlice(owned), }) catch unreachable; column = 0; @@ -213,7 +213,7 @@ pub fn generate(allocator: std.mem.Allocator, contents: []const u8, approximate_ list.append(allocator, .{ .byte_offset_to_start_of_line = line_byte_offset, .byte_offset_to_first_non_ascii = byte_offset_to_first_non_ascii, - .columns_for_non_ascii = BabyList(i32).init(owned), + .columns_for_non_ascii = BabyList(i32).fromOwnedSlice(owned), }) catch unreachable; } diff --git a/src/sourcemap/sourcemap.zig b/src/sourcemap/sourcemap.zig index 4b8378ef7b..7a75934db7 100644 --- a/src/sourcemap/sourcemap.zig +++ b/src/sourcemap/sourcemap.zig @@ -203,7 +203,7 @@ pub fn parseJSON( } map_data.mappings.names = names_list.items; - map_data.mappings.names_buffer = .fromList(names_buffer); + map_data.mappings.names_buffer = .moveFromList(&names_buffer); } } } @@ -427,7 +427,7 @@ pub const Mapping = struct { inline else => |*list| list.deinit(allocator), } - self.names_buffer.deinitWithAllocator(allocator); + self.names_buffer.deinit(allocator); allocator.free(self.names); } diff --git a/src/sql/mysql/MySQLConnection.zig b/src/sql/mysql/MySQLConnection.zig index 82bce2824e..6e73f95521 100644 --- a/src/sql/mysql/MySQLConnection.zig +++ b/src/sql/mysql/MySQLConnection.zig @@ -309,7 +309,7 @@ pub fn getConnected(this: *MySQLConnection, _: *jsc.JSGlobalObject) JSValue { pub fn doClose(this: *MySQLConnection, globalObject: *jsc.JSGlobalObject, _: *jsc.CallFrame) bun.JSError!JSValue { _ = globalObject; this.disconnect(); - this.write_buffer.deinit(bun.default_allocator); + this.write_buffer.clearAndFree(bun.default_allocator); return .js_undefined; } @@ -1913,7 +1913,7 @@ pub fn handleResultSet(this: *MySQLConnection, comptime Context: type, reader: N fn close(this: *@This()) void { this.disconnect(); this.unregisterAutoFlusher(); - this.write_buffer.deinit(bun.default_allocator); + this.write_buffer.clearAndFree(bun.default_allocator); } pub fn closeStatement(this: *MySQLConnection, statement: *MySQLStatement) !void { diff --git a/src/sql/postgres/PostgresSQLConnection.zig b/src/sql/postgres/PostgresSQLConnection.zig index be6f899198..d2c2a31b67 100644 --- a/src/sql/postgres/PostgresSQLConnection.zig +++ b/src/sql/postgres/PostgresSQLConnection.zig @@ -871,7 +871,7 @@ pub fn doFlush(this: *PostgresSQLConnection, _: *jsc.JSGlobalObject, _: *jsc.Cal fn close(this: *@This()) void { this.disconnect(); this.unregisterAutoFlusher(); - this.write_buffer.deinit(bun.default_allocator); + this.write_buffer.clearAndFree(bun.default_allocator); } pub fn doClose(this: *@This(), globalObject: *jsc.JSGlobalObject, _: *jsc.CallFrame) bun.JSError!JSValue { diff --git a/src/sql/postgres/protocol/NotificationResponse.zig b/src/sql/postgres/protocol/NotificationResponse.zig index 8b319e09cd..17229e596d 100644 --- a/src/sql/postgres/protocol/NotificationResponse.zig +++ b/src/sql/postgres/protocol/NotificationResponse.zig @@ -5,8 +5,8 @@ channel: bun.ByteList = .{}, payload: bun.ByteList = .{}, pub fn deinit(this: *@This()) void { - this.channel.deinitWithAllocator(bun.default_allocator); - this.payload.deinitWithAllocator(bun.default_allocator); + this.channel.clearAndFree(bun.default_allocator); + this.payload.clearAndFree(bun.default_allocator); } pub fn decodeInternal(this: *@This(), comptime Container: type, reader: NewReader(Container)) !void { diff --git a/src/sql/shared/Data.zig b/src/sql/shared/Data.zig index 964cc11525..f63540b93e 100644 --- a/src/sql/shared/Data.zig +++ b/src/sql/shared/Data.zig @@ -20,21 +20,27 @@ pub const Data = union(enum) { inline_storage.len = @truncate(possibly_inline_bytes.len); return .{ .inline_storage = inline_storage }; } - return .{ .owned = bun.ByteList.init(try allocator.dupe(u8, possibly_inline_bytes)) }; + return .{ + .owned = bun.ByteList.fromOwnedSlice(try allocator.dupe(u8, possibly_inline_bytes)), + }; } pub fn toOwned(this: @This()) !bun.ByteList { return switch (this) { .owned => this.owned, - .temporary => bun.ByteList.init(try bun.default_allocator.dupe(u8, this.temporary)), - .empty => bun.ByteList.init(&.{}), - .inline_storage => bun.ByteList.init(try bun.default_allocator.dupe(u8, this.inline_storage.slice())), + .temporary => bun.ByteList.fromOwnedSlice( + try bun.default_allocator.dupe(u8, this.temporary), + ), + .empty => bun.ByteList.empty, + .inline_storage => bun.ByteList.fromOwnedSlice( + try bun.default_allocator.dupe(u8, this.inline_storage.slice()), + ), }; } pub fn deinit(this: *@This()) void { switch (this.*) { - .owned => this.owned.deinitWithAllocator(bun.default_allocator), + .owned => |*owned| owned.clearAndFree(bun.default_allocator), .temporary => {}, .empty => {}, .inline_storage => {}, @@ -45,12 +51,10 @@ pub const Data = union(enum) { /// Generally, for security reasons. pub fn zdeinit(this: *@This()) void { switch (this.*) { - .owned => { - + .owned => |*owned| { // Zero bytes before deinit - @memset(this.owned.slice(), 0); - - this.owned.deinitWithAllocator(bun.default_allocator); + bun.freeSensitive(bun.default_allocator, owned.slice()); + owned.deinit(bun.default_allocator); }, .temporary => {}, .empty => {}, diff --git a/src/string/MutableString.zig b/src/string/MutableString.zig index b4e2da39ea..48a0346f12 100644 --- a/src/string/MutableString.zig +++ b/src/string/MutableString.zig @@ -258,13 +258,6 @@ pub fn slice(self: *MutableString) []u8 { return self.list.items; } -/// Take ownership of the existing value without discarding excess capacity. -pub fn move(self: *MutableString) []u8 { - const out = self.list.items; - self.list = .{}; - return out; -} - /// Appends `0` if needed pub fn sliceWithSentinel(self: *MutableString) [:0]u8 { if (self.list.items.len > 0 and self.list.items[self.list.items.len - 1] != 0) { diff --git a/src/string/SmolStr.zig b/src/string/SmolStr.zig index 55560abd54..77ac00c562 100644 --- a/src/string/SmolStr.zig +++ b/src/string/SmolStr.zig @@ -169,7 +169,7 @@ pub const SmolStr = packed struct(u128) { if (inlined.len() + 1 > Inlined.max_len) { var baby_list = try BabyList(u8).initCapacity(allocator, inlined.len() + 1); baby_list.appendSliceAssumeCapacity(inlined.slice()); - try baby_list.push(allocator, char); + try baby_list.append(allocator, char); this.__len = baby_list.len; this.__ptr = baby_list.ptr; this.cap = baby_list.cap; @@ -188,7 +188,7 @@ pub const SmolStr = packed struct(u128) { .len = this.__len, .cap = this.cap, }; - try baby_list.push(allocator, char); + try baby_list.append(allocator, char); this.__len = baby_list.len; this.__ptr = baby_list.ptr; @@ -217,7 +217,7 @@ pub const SmolStr = packed struct(u128) { .len = this.__len, .cap = this.cap, }; - try baby_list.append(allocator, values); + try baby_list.appendSlice(allocator, values); this.* = SmolStr.fromBabyList(baby_list); return; diff --git a/src/transpiler.zig b/src/transpiler.zig index ecbbd382a5..2f020eeac8 100644 --- a/src/transpiler.zig +++ b/src/transpiler.zig @@ -775,7 +775,7 @@ pub const Transpiler = struct { bun.perf.trace("JSPrinter.print"); defer tracer.end(); - const symbols = js_ast.Symbol.NestedList.init(&[_]js_ast.Symbol.List{ast.symbols}); + const symbols = js_ast.Symbol.NestedList.fromBorrowedSliceDangerous(&.{ast.symbols}); return switch (format) { .cjs => try js_printer.printCommonJS( @@ -1199,13 +1199,18 @@ pub const Transpiler = struct { const properties: []js_ast.G.Property = expr.data.e_object.properties.slice(); if (properties.len > 0) { var stmts = allocator.alloc(js_ast.Stmt, 3) catch return null; - var decls = allocator.alloc(js_ast.G.Decl, properties.len) catch return null; + var decls = std.ArrayListUnmanaged(js_ast.G.Decl).initCapacity( + allocator, + properties.len, + ) catch |err| bun.handleOom(err); + decls.expandToCapacity(); + symbols = allocator.alloc(js_ast.Symbol, properties.len) catch return null; var export_clauses = allocator.alloc(js_ast.ClauseItem, properties.len) catch return null; var duplicate_key_checker = bun.StringHashMap(u32).init(allocator); defer duplicate_key_checker.deinit(); var count: usize = 0; - for (properties, decls, symbols, 0..) |*prop, *decl, *symbol, i| { + for (properties, decls.items, symbols, 0..) |*prop, *decl, *symbol, i| { const name = prop.key.?.data.e_string.slice(allocator); // Do not make named exports for "default" exports if (strings.eqlComptime(name, "default")) @@ -1213,7 +1218,7 @@ pub const Transpiler = struct { const visited = duplicate_key_checker.getOrPut(name) catch continue; if (visited.found_existing) { - decls[visited.value_ptr.*].value = prop.value.?; + decls.items[visited.value_ptr.*].value = prop.value.?; continue; } visited.value_ptr.* = @truncate(i); @@ -1241,10 +1246,11 @@ pub const Transpiler = struct { count += 1; } + decls.shrinkRetainingCapacity(count); stmts[0] = js_ast.Stmt.alloc( js_ast.S.Local, js_ast.S.Local{ - .decls = js_ast.G.Decl.List.init(decls[0..count]), + .decls = js_ast.G.Decl.List.moveFromList(&decls), .kind = .k_var, }, logger.Loc{ @@ -1297,7 +1303,7 @@ pub const Transpiler = struct { } }; var ast = js_ast.Ast.fromParts(parts); - ast.symbols = js_ast.Symbol.List.init(symbols); + ast.symbols = js_ast.Symbol.List.fromOwnedSlice(symbols); return ParseResult{ .ast = ast, @@ -1324,7 +1330,7 @@ pub const Transpiler = struct { parts[0] = js_ast.Part{ .stmts = stmts }; return ParseResult{ - .ast = js_ast.Ast.initTest(parts), + .ast = js_ast.Ast.fromParts(parts), .source = source.*, .loader = loader, .input_fd = input_fd, diff --git a/src/valkey/valkey.zig b/src/valkey/valkey.zig index 97bd11c2be..87b9cea495 100644 --- a/src/valkey/valkey.zig +++ b/src/valkey/valkey.zig @@ -431,7 +431,7 @@ pub const ValkeyClient = struct { /// Handle connection closed event pub fn onClose(this: *ValkeyClient) void { this.unregisterAutoFlusher(); - this.write_buffer.deinit(this.allocator); + this.write_buffer.clearAndFree(this.allocator); // If manually closing, don't attempt to reconnect if (this.flags.is_manually_closed) { @@ -794,8 +794,8 @@ pub const ValkeyClient = struct { /// Handle socket open event pub fn onOpen(this: *ValkeyClient, socket: uws.AnySocket) void { this.socket = socket; - this.write_buffer.deinit(this.allocator); - this.read_buffer.deinit(this.allocator); + this.write_buffer.clearAndFree(this.allocator); + this.read_buffer.clearAndFree(this.allocator); this.start(); } diff --git a/test/bake/bake-harness.ts b/test/bake/bake-harness.ts index 8e91c328c5..f7aa1fd988 100644 --- a/test/bake/bake-harness.ts +++ b/test/bake/bake-harness.ts @@ -1563,7 +1563,11 @@ class OutputLineStream extends EventEmitter { this.lines.push(line); if ( line.includes("============================================================") || - line.includes("Allocation scope leaked") + line.includes("Allocation scope leaked") || + line.includes("collection first used here") || + line.includes("allocator mismatch") || + line.includes("assertion failure") || + line.includes("race condition") ) { // Tell consumers to wait for the process to exit this.panicked = true; diff --git a/test/internal/ban-limits.json b/test/internal/ban-limits.json index af4ce32d94..2766870d4b 100644 --- a/test/internal/ban-limits.json +++ b/test/internal/ban-limits.json @@ -10,7 +10,7 @@ ".stdDir()": 41, ".stdFile()": 18, "// autofix": 168, - ": [^=]+= undefined,$": 260, + ": [^=]+= undefined,$": 258, "== alloc.ptr": 0, "== allocator.ptr": 0, "@import(\"bun\").": 0,