Files
bun.sh/src/allocators.zig
Jarred Sumner c9fe57fa63 wip use wrapper for managing process (#8456)
* WIP sync close (shows ref count bug in stream)

* fix closing on PipeWriter and PipeReader

* remove old todos

* join

* Some shell changes

at least it compiles

* fix some compile errors

* fix ref/unref server on windows

* actually use the ref count in this places

* make windows compile again

* more tests passing

* Make shell compile again

* Slowly remove some `@panic("TODO SHELL")`

* Eliminate `@panic("TODO SHELL")` for BufferedWriter

* Holy cleansing of `@panic("TODO SHELL")`

at least it compiles now

* Okay now the shell compiles, but segfaults

* Fix compiler errors

* more stable stream and now Content-Range pass

* make windows compile again

* revert stuff until the fix is actually ready

* revert onDone thing

* Fix buffered writer for shell

* Fix buffered writer + shell/subproc.zig and windows build

* Fix for #8982 got lost in the merge

* Actually buffer subproc output

* Fix some stuff shell

* oops

* fix context deinit

* fix renderMissing

* shell: Fix array buffer

* more stable streams (#9053)

fix stream ref counting

* wip

* Remove `@panic("TODO")` on shell event loop tasks and Redirect  open flags got lost in merge

* Support redirects

* fixes

cc @cirospaciari

* Update ReadableStreamInternals.ts

* Fix spurious error

* Update stream.js

* leak

* Fix UAF

cc @cirospaciari

* Fix memory leaks

* HOLY FUCK big refactor

* misc cleanup

* shell: Fix a bunch of tests

* clean up

* gitignore: fix ending newline

* get windows compiling again

* tidy

* hide linker warn with icu

* closeIfPossible

* Better leak test

* Fix forgetting to decrement reference count

* Update stdio.zig

* Fix shell windows build

* Stupid unreachable

* Woops

* basic echo hi works on windows

* Fix flaky test on Windows

* Fix windows regression in Bun.main (#9156)

* Fix windows regression in Bun.main

* Handle invalid handles

* Fix flaky test

* Better launch config

* Fixup

* Make this test less flaky on Windows

* Fixup

* Cygwin

* Support signal codes in subprocess.kill(), resolve file path

* Treat null as ignore

* Ignore carriage returns

* Fixup

* shell: Fix IOWriter bug

* shell: Use custom `open()`/`openat()`

* windows shell subproc works

* zack commit

* I think I understand WindowsStreamingWriter

* fix thing

* why were we doing this in tests

* shell: Fix rm

* shell: Add rm -rf node_modules/ test

* shell: use `.runAsTest()` in some places to make it easier to determine which test failed

* [autofix.ci] apply automated fixes

* woopsie

* Various changes

* Fix

* shell: abstract output task logic

* shell: mkdir builtin

* fixup

* stuff

* shell: Make writing length of 0 in IOWriter immediately resolve

* shell: Implement `touch`

* shell: basic `cat` working

* Make it compile on windows

* shell: Fix IOReader bug

* [autofix.ci] apply automated fixes

* fix windows kill on subprocess/process

* fix dns tests to match behavior on windows (same as nodejs)

* fix windows ci

* again

* move `close_handle` to flags in `PipeWriter` and fix shell hanging

* Fix `ls` not giving non-zero exit code on error

* Handle edgecase in is_atty

* Fix writer.flush() when there's no data

* Fix some tests

* Disable uv_unref on uv_process_t on Windows, for now.

* fix writer.end

* fix stdout.write

* fix child-process on win32

* Make this test less flaky on Windows

* Add assertion

* Make these the same

* Make it pass on windows

* Don't commit

* Log the test name

* Make this test less flaky on windows

* Make this test less flaky on windows

* Print which test is taking awhile in the runner

* fixups

* Fixups

* Add some assertions

* Bring back test concurrency

* shell: bring back redirect stdin

* make it compile again cc @zackradisic

* initialize env map with capacity

* some fixes

* cleanup

* oops

* fix leak, fix done

* fix unconsumedPromises on events

* always run expect

* Update child_process.test.ts

* fix reading special files

* Fix a test

* Deflake this test

* Make these comparisons easier

* Won't really fix it but slightly cleaner

* Update serve.test.ts

* Make the checks for if the body is already used more resilient

* Move this to the harness

* Make this test not hang in development

* Fix this test

* Make the logs better

* zero init some things

* Make this test better

* Fix readSocket

* Parallelize this test

* Handle EPipe and avoid big data

* This was a mistake

* Fix a bunch of things

* Fix memory leak

* Avoid sigpipe + optimize + delete dead code

* Make this take less time

* Make it bigger

* Remove some redundant code

* Update process.zig

* Merge and hopefully don't breka things along teh way

* Silence build warning

* Uncomment on posix

* Skip test on windows

* windows

* Cleanup test

* Update

* Deflake

* always

* less flaky test

* [autofix.ci] apply automated fixes

* logs

* fix uaf on shell IOReader

* stuff to make it work with mini event loop

* fix 2 double free scenarios, support redirections on windows

* shell: Make `1>&2` and `2>&1` work with libuv

* yoops

* Partial fix

* Partial fix

* fix build

* fix build

* ok

* Make a couple shell tests pass

* More logging

* fix

* fix

* Fix build issue

* more tests pass

* Deflake

* Deflake

* Use Output.panic instead of garbled text

* Formatting

* Introduce `bun.sys.File`, use it for `Output.Source.StreamType`, fix nested Output.scoped() calls, use Win32 `ReadFile` API for reading when it's not a libuv file descriptor.

This lets us avoid the subtle usages of `unreachable` in std.os when writing to stdout/stderr.

Previously, we were initializing the libuv loop immediately at launch due to checking for the existence of a bun build --compile'd executable. When the file descriptor is not from libuv, it's just overhead to use libuv

cc @paperdave, please tell me if Iany of that is incorrect or if you think this is a bad idea.

* Fix closing undefined memory file descriptors in spawn

cc @zackradisic

* pause instead of close

* Fix poorly-written test

* We don't need big numbers for this test

* sad workaround

* fixup

* Clearer error handling for this test

* Fix incorrect test

@electroid when ReadableStream isn't closed, hanging is the correct behavior when consuming buffered data. We cannot know if the buffered data is finished if the stream never closes.

* Fix build

* Remove known failing on windows

* Deflake

* Mark no longer failing

* show all the failing tests

* Sort the list of tests

* fix argument handling

* dont show "posix_spawn" as an error code on windows

* make bun-upgrade.test.ts pass on windows

* fix bunx and bun create again sorry

* a

* fix invalidexe because we should not be running javascript files as if they were exes

* Concurrency in test runner + better logging

* Revert "fix invalidexe because we should not be running javascript files as if they were exes"

This reverts commit da47cf8247.

* WIP: Unix fixes (#9322)

* wip

* [autofix.ci] apply automated fixes

* wip 2

* [autofix.ci] apply automated fixes

---------

Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
Co-authored-by: Jarred Sumner <jarred@jarredsumner.com>

* Update runner.node.mjs

* Update runner.node.mjs

* Document some environment variables

* shell: Make `Response` work with builtins

* Make it compile

* make pwd test pass

* [autofix.ci] apply automated fixes

* Fix printing garbage for source code previews

* Update javascript.zig

* Fix posix test failures

* Fix signal dispatch

cc @paperdave. Signals can be run from any thread. This causes an assertion failure when the receiving thread happens to not be the main thread. Easiest to reproduce on linux when you spawn 100 short-lived processes at once.

* windows

---------

Co-authored-by: cirospaciari <ciro.spaciari@gmail.com>
Co-authored-by: Zack Radisic <56137411+zackradisic@users.noreply.github.com>
Co-authored-by: Zack Radisic <zackradisic@Zacks-MBP-2.attlocal.net>
Co-authored-by: Jarred Sumner <709451+Jarred-Sumner@users.noreply.github.com>
Co-authored-by: Meghan Denny <meghan@bun.sh>
Co-authored-by: Zack Radisic <zack@theradisic.com>
Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
Co-authored-by: dave caruso <me@paperdave.net>
Co-authored-by: Dylan Conway <dylan.conway567@gmail.com>
2024-03-11 08:24:30 -07:00

728 lines
25 KiB
Zig

const std = @import("std");
const FeatureFlags = @import("./feature_flags.zig");
const Environment = @import("./env.zig");
const FixedBufferAllocator = std.heap.FixedBufferAllocator;
const bun = @import("root").bun;
pub fn isSliceInBufferT(comptime T: type, slice: []const T, buffer: []const T) bool {
return (@intFromPtr(buffer.ptr) <= @intFromPtr(slice.ptr) and
(@intFromPtr(slice.ptr) + slice.len) <= (@intFromPtr(buffer.ptr) + buffer.len));
}
/// Checks if a slice's pointer is contained within another slice.
/// If you need to make this generic, use isSliceInBufferT.
pub fn isSliceInBuffer(slice: []const u8, buffer: []const u8) bool {
return isSliceInBufferT(u8, slice, buffer);
}
pub fn sliceRange(slice: []const u8, buffer: []const u8) ?[2]u32 {
return if (@intFromPtr(buffer.ptr) <= @intFromPtr(slice.ptr) and
(@intFromPtr(slice.ptr) + slice.len) <= (@intFromPtr(buffer.ptr) + buffer.len))
[2]u32{
@as(u32, @truncate(@intFromPtr(slice.ptr) - @intFromPtr(buffer.ptr))),
@as(u32, @truncate(slice.len)),
}
else
null;
}
pub const IndexType = packed struct {
index: u31,
is_overflow: bool = false,
};
const HashKeyType = u64;
const IndexMapContext = struct {
pub fn hash(_: @This(), key: HashKeyType) HashKeyType {
return key;
}
pub fn eql(_: @This(), a: HashKeyType, b: HashKeyType) bool {
return a == b;
}
};
pub const IndexMap = std.HashMapUnmanaged(HashKeyType, IndexType, IndexMapContext, 80);
pub const IndexMapManaged = std.HashMap(HashKeyType, IndexType, IndexMapContext, 80);
pub const Result = struct {
hash: HashKeyType,
index: IndexType,
status: ItemStatus,
pub fn hasCheckedIfExists(r: *const Result) bool {
return r.index.index != Unassigned.index;
}
pub fn isOverflowing(r: *const Result, comptime count: usize) bool {
return r.index >= count;
}
};
pub const NotFound = IndexType{
.index = std.math.maxInt(u31),
};
pub const Unassigned = IndexType{
.index = std.math.maxInt(u31) - 1,
};
pub const ItemStatus = enum(u3) {
unknown,
exists,
not_found,
};
fn OverflowGroup(comptime Block: type) type {
return struct {
const Overflow = @This();
// 16 million files should be good enough for anyone
// ...right?
const max = 4095;
const UsedSize = std.math.IntFittingRange(0, max + 1);
const default_allocator = @import("root").bun.default_allocator;
used: UsedSize = 0,
allocated: UsedSize = 0,
ptrs: [max]*Block = undefined,
pub fn tail(this: *Overflow) *Block {
if (this.allocated > 0 and this.ptrs[this.used].isFull()) {
this.used +%= 1;
if (this.allocated > this.used) {
this.ptrs[this.used].used = 0;
}
}
if (this.allocated <= this.used) {
this.ptrs[this.allocated] = default_allocator.create(Block) catch unreachable;
this.ptrs[this.allocated].* = Block{};
this.allocated +%= 1;
}
return this.ptrs[this.used];
}
pub inline fn slice(this: *Overflow) []*Block {
return this.ptrs[0..this.used];
}
};
}
pub fn OverflowList(comptime ValueType: type, comptime count: comptime_int) type {
return struct {
const This = @This();
const SizeType = std.math.IntFittingRange(0, count);
const Block = struct {
used: SizeType = 0,
items: [count]ValueType = undefined,
pub inline fn isFull(block: *const Block) bool {
return block.used >= @as(SizeType, count);
}
pub fn append(block: *Block, value: ValueType) *ValueType {
if (comptime Environment.allow_assert) std.debug.assert(block.used < count);
const index = block.used;
block.items[index] = value;
block.used +%= 1;
return &block.items[index];
}
};
const Overflow = OverflowGroup(Block);
list: Overflow = Overflow{},
count: u31 = 0,
pub inline fn len(this: *const This) u31 {
return this.count;
}
pub inline fn append(this: *This, value: ValueType) *ValueType {
this.count += 1;
return this.list.tail().append(value);
}
fn reset(this: *This) void {
for (this.list.slice()) |block| {
block.used = 0;
}
this.list.used = 0;
}
pub inline fn atIndex(this: *const This, index: IndexType) *const ValueType {
const block_id = if (index.index > 0)
index.index / count
else
0;
if (comptime Environment.allow_assert) std.debug.assert(index.is_overflow);
if (comptime Environment.allow_assert) std.debug.assert(this.list.used >= block_id);
if (comptime Environment.allow_assert) std.debug.assert(this.list.ptrs[block_id].used > (index.index % count));
return &this.list.ptrs[block_id].items[index.index % count];
}
pub inline fn atIndexMut(this: *This, index: IndexType) *ValueType {
const block_id = if (index.index > 0)
index.index / count
else
0;
if (comptime Environment.allow_assert) std.debug.assert(index.is_overflow);
if (comptime Environment.allow_assert) std.debug.assert(this.list.used >= block_id);
if (comptime Environment.allow_assert) std.debug.assert(this.list.ptrs[block_id].used > (index.index % count));
return &this.list.ptrs[block_id].items[index.index % count];
}
};
}
pub fn BSSList(comptime ValueType: type, comptime _count: anytype) type {
const count = _count * 2;
const max_index = count - 1;
return struct {
const ChunkSize = 256;
const OverflowBlock = struct {
used: std.atomic.Value(u16) = std.atomic.Value(u16).init(0),
data: [ChunkSize]ValueType = undefined,
prev: ?*OverflowBlock = null,
pub fn append(this: *OverflowBlock, item: ValueType) !*ValueType {
const index = this.used.fetchAdd(1, .AcqRel);
if (index >= ChunkSize) return error.OutOfMemory;
this.data[index] = item;
return &this.data[index];
}
};
const Allocator = std.mem.Allocator;
const Self = @This();
allocator: Allocator,
mutex: Mutex = Mutex.init(),
head: *OverflowBlock = undefined,
tail: OverflowBlock = OverflowBlock{},
backing_buf: [count]ValueType = undefined,
used: u32 = 0,
pub var instance: Self = undefined;
pub var loaded = false;
pub inline fn blockIndex(index: u31) usize {
return index / ChunkSize;
}
pub fn init(allocator: std.mem.Allocator) *Self {
if (!loaded) {
instance = Self{
.allocator = allocator,
.tail = OverflowBlock{},
};
instance.head = &instance.tail;
loaded = true;
}
return &instance;
}
pub fn isOverflowing() bool {
return instance.used >= @as(u16, count);
}
pub fn exists(_: *Self, value: ValueType) bool {
return isSliceInBuffer(value, instance.backing_buf);
}
fn appendOverflow(self: *Self, value: ValueType) !*ValueType {
instance.used += 1;
return self.head.append(value) catch brk: {
var new_block = try self.allocator.create(OverflowBlock);
new_block.* = OverflowBlock{};
new_block.prev = self.head;
self.head = new_block;
break :brk self.head.append(value);
};
}
pub fn append(self: *Self, value: ValueType) !*ValueType {
self.mutex.lock();
defer self.mutex.unlock();
if (instance.used > max_index) {
return self.appendOverflow(value);
} else {
const index = instance.used;
instance.backing_buf[index] = value;
instance.used += 1;
return &instance.backing_buf[index];
}
}
pub const Pair = struct { index: IndexType, value: *ValueType };
};
}
const Mutex = @import("./lock.zig").Lock;
/// Append-only list.
/// Stores an initial count in .bss section of the object file
/// Overflows to heap when count is exceeded.
pub fn BSSStringList(comptime _count: usize, comptime _item_length: usize) type {
// I experimented with string interning here and it was around...maybe 1% when generating a .bun?
// I tried:
// - arraybacked list
// - hashmap list
// + 1 for sentinel
const item_length = _item_length + 1;
const count = _count * 2;
const max_index = count - 1;
const ValueType = []const u8;
return struct {
pub const Overflow = OverflowList([]const u8, count / 4);
const Allocator = std.mem.Allocator;
const Self = @This();
backing_buf: [count * item_length]u8 = undefined,
backing_buf_used: u64 = undefined,
overflow_list: Overflow = Overflow{},
allocator: Allocator,
slice_buf: [count][]const u8 = undefined,
slice_buf_used: u16 = 0,
mutex: Mutex = Mutex.init(),
pub var instance: Self = undefined;
var loaded: bool = false;
// only need the mutex on append
const EmptyType = struct {
len: usize = 0,
};
pub fn init(allocator: std.mem.Allocator) *Self {
if (!loaded) {
instance = Self{
.allocator = allocator,
.backing_buf_used = 0,
};
loaded = true;
}
return &instance;
}
pub inline fn isOverflowing() bool {
return instance.slice_buf_used >= @as(u16, count);
}
pub fn exists(self: *const Self, value: ValueType) bool {
return isSliceInBuffer(value, &self.backing_buf);
}
pub fn editableSlice(slice: []const u8) []u8 {
return @constCast(slice);
}
pub fn appendMutable(self: *Self, comptime AppendType: type, _value: AppendType) ![]u8 {
const appended = try @call(bun.callmod_inline, append, .{ self, AppendType, _value });
return @constCast(appended);
}
pub fn getMutable(self: *Self, len: usize) ![]u8 {
return try self.appendMutable(EmptyType, EmptyType{ .len = len });
}
pub fn printWithType(self: *Self, comptime fmt: []const u8, comptime Args: type, args: Args) ![]const u8 {
var buf = try self.appendMutable(EmptyType, EmptyType{ .len = std.fmt.count(fmt, args) + 1 });
buf[buf.len - 1] = 0;
return std.fmt.bufPrint(buf.ptr[0 .. buf.len - 1], fmt, args) catch unreachable;
}
pub fn print(self: *Self, comptime fmt: []const u8, args: anytype) ![]const u8 {
return try printWithType(self, fmt, @TypeOf(args), args);
}
pub fn append(self: *Self, comptime AppendType: type, _value: AppendType) ![]const u8 {
self.mutex.lock();
defer self.mutex.unlock();
return try self.doAppend(AppendType, _value);
}
threadlocal var lowercase_append_buf: [bun.MAX_PATH_BYTES]u8 = undefined;
pub fn appendLowerCase(self: *Self, comptime AppendType: type, _value: AppendType) ![]const u8 {
self.mutex.lock();
defer self.mutex.unlock();
for (_value, 0..) |c, i| {
lowercase_append_buf[i] = std.ascii.toLower(c);
}
const slice = lowercase_append_buf[0.._value.len];
return self.doAppend(
@TypeOf(slice),
slice,
);
}
inline fn doAppend(
self: *Self,
comptime AppendType: type,
_value: AppendType,
) ![]const u8 {
const value_len: usize = brk: {
switch (comptime AppendType) {
EmptyType, []const u8, []u8, [:0]const u8, [:0]u8 => {
break :brk _value.len;
},
else => {
var len: usize = 0;
for (_value) |val| {
len += val.len;
}
break :brk len;
},
}
unreachable;
} + 1;
var value: [:0]u8 = undefined;
if (value_len + instance.backing_buf_used < instance.backing_buf.len - 1) {
const start = instance.backing_buf_used;
instance.backing_buf_used += value_len;
switch (AppendType) {
EmptyType => {
instance.backing_buf[instance.backing_buf_used - 1] = 0;
},
[]const u8, []u8, [:0]const u8, [:0]u8 => {
bun.copy(u8, instance.backing_buf[start .. instance.backing_buf_used - 1], _value);
instance.backing_buf[instance.backing_buf_used - 1] = 0;
},
else => {
var remainder = instance.backing_buf[start..];
for (_value) |val| {
bun.copy(u8, remainder, val);
remainder = remainder[val.len..];
}
remainder[0] = 0;
},
}
value = instance.backing_buf[start .. instance.backing_buf_used - 1 :0];
} else {
var value_buf = try self.allocator.alloc(u8, value_len);
switch (comptime AppendType) {
EmptyType => {},
[]const u8, []u8, [:0]const u8, [:0]u8 => {
bun.copy(u8, value_buf, _value);
},
else => {
var remainder = value_buf;
for (_value) |val| {
bun.copy(u8, remainder, val);
remainder = remainder[val.len..];
}
},
}
value_buf[value_len - 1] = 0;
value = value_buf[0 .. value_len - 1 :0];
}
var result = IndexType{ .index = std.math.maxInt(u31), .is_overflow = instance.slice_buf_used > max_index };
if (result.is_overflow) {
result.index = @as(u31, @intCast(self.overflow_list.len()));
} else {
result.index = instance.slice_buf_used;
instance.slice_buf_used += 1;
}
if (result.is_overflow) {
if (self.overflow_list.len() == result.index) {
_ = self.overflow_list.append(value);
} else {
self.overflow_list.atIndexMut(result).* = value;
}
return value;
} else {
instance.slice_buf[result.index] = value;
return instance.slice_buf[result.index];
}
}
};
}
pub fn BSSMap(comptime ValueType: type, comptime count: anytype, comptime store_keys: bool, comptime estimated_key_length: usize, comptime remove_trailing_slashes: bool) type {
const max_index = count - 1;
const BSSMapType = struct {
const Allocator = std.mem.Allocator;
const Self = @This();
const Overflow = OverflowList(ValueType, count / 4);
index: IndexMap,
overflow_list: Overflow = Overflow{},
allocator: Allocator,
mutex: Mutex = Mutex.init(),
backing_buf: [count]ValueType = undefined,
backing_buf_used: u16 = 0,
pub var instance: Self = undefined;
var loaded: bool = false;
pub fn init(allocator: std.mem.Allocator) *Self {
if (!loaded) {
instance = Self{
.index = IndexMap{},
.allocator = allocator,
};
loaded = true;
}
return &instance;
}
pub fn isOverflowing() bool {
return instance.backing_buf_used >= @as(u16, count);
}
pub fn getOrPut(self: *Self, denormalized_key: []const u8) !Result {
const key = if (comptime remove_trailing_slashes) std.mem.trimRight(u8, denormalized_key, "/") else denormalized_key;
const _key = bun.hash(key);
self.mutex.lock();
defer self.mutex.unlock();
const index = try self.index.getOrPut(self.allocator, _key);
if (index.found_existing) {
return Result{
.hash = _key,
.index = index.value_ptr.*,
.status = switch (index.value_ptr.index) {
NotFound.index => .not_found,
Unassigned.index => .unknown,
else => .exists,
},
};
}
index.value_ptr.* = Unassigned;
return Result{
.hash = _key,
.index = Unassigned,
.status = .unknown,
};
}
pub fn get(self: *Self, denormalized_key: []const u8) ?*ValueType {
const key = if (comptime remove_trailing_slashes) std.mem.trimRight(u8, denormalized_key, "/") else denormalized_key;
const _key = bun.hash(key);
self.mutex.lock();
defer self.mutex.unlock();
const index = self.index.get(_key) orelse return null;
return self.atIndex(index);
}
pub fn markNotFound(self: *Self, result: Result) void {
self.mutex.lock();
defer self.mutex.unlock();
self.index.put(self.allocator, result.hash, NotFound) catch unreachable;
}
pub fn atIndex(self: *Self, index: IndexType) ?*ValueType {
if (index.index == NotFound.index or index.index == Unassigned.index) return null;
if (index.is_overflow) {
return self.overflow_list.atIndexMut(index);
} else {
return &instance.backing_buf[index.index];
}
}
pub fn put(self: *Self, result: *Result, value: ValueType) !*ValueType {
self.mutex.lock();
defer self.mutex.unlock();
if (result.index.index == NotFound.index or result.index.index == Unassigned.index) {
result.index.is_overflow = instance.backing_buf_used > max_index;
if (result.index.is_overflow) {
result.index.index = self.overflow_list.len();
} else {
result.index.index = instance.backing_buf_used;
instance.backing_buf_used += 1;
}
}
try self.index.put(self.allocator, result.hash, result.index);
if (result.index.is_overflow) {
if (self.overflow_list.len() == result.index.index) {
return self.overflow_list.append(value);
} else {
const ptr = self.overflow_list.atIndexMut(result.index);
ptr.* = value;
return ptr;
}
} else {
instance.backing_buf[result.index.index] = value;
return &instance.backing_buf[result.index.index];
}
}
/// Returns true if the entry was removed
pub fn remove(self: *Self, denormalized_key: []const u8) bool {
self.mutex.lock();
defer self.mutex.unlock();
const key = if (comptime remove_trailing_slashes)
std.mem.trimRight(u8, denormalized_key, "/")
else
denormalized_key;
const _key = bun.hash(key);
return self.index.remove(_key);
// const index = self.index.get(_key) orelse return;
// switch (index) {
// Unassigned.index, NotFound.index => {
// self.index.remove(_key);
// },
// 0...max_index => {
// if (comptime hasDeinit(ValueType)) {
// instance.backing_buf[index].deinit();
// }
// instance.backing_buf[index] = undefined;
// },
// else => {
// const i = index - count;
// if (hasDeinit(ValueType)) {
// self.overflow_list.items[i].deinit();
// }
// self.overflow_list.items[index - count] = undefined;
// },
// }
}
};
if (!store_keys) {
return BSSMapType;
}
return struct {
map: *BSSMapType,
key_list_buffer: [count * estimated_key_length]u8 = undefined,
key_list_buffer_used: usize = 0,
key_list_slices: [count][]u8 = undefined,
key_list_overflow: OverflowList([]u8, count / 4) = OverflowList([]u8, count / 4){},
const Self = @This();
pub var instance: Self = undefined;
pub var instance_loaded = false;
pub fn init(allocator: std.mem.Allocator) *Self {
if (!instance_loaded) {
instance = Self{
.map = BSSMapType.init(allocator),
};
instance_loaded = true;
}
return &instance;
}
pub fn isOverflowing() bool {
return instance.map.backing_buf_used >= count;
}
pub fn getOrPut(self: *Self, key: []const u8) !Result {
return try self.map.getOrPut(key);
}
pub fn get(self: *Self, key: []const u8) ?*ValueType {
return @call(bun.callmod_inline, BSSMapType.get, .{ self.map, key });
}
pub fn atIndex(self: *Self, index: IndexType) ?*ValueType {
return @call(bun.callmod_inline, BSSMapType.atIndex, .{ self.map, index });
}
pub fn keyAtIndex(_: *Self, index: IndexType) ?[]const u8 {
return switch (index.index) {
Unassigned.index, NotFound.index => null,
else => {
if (!index.is_overflow) {
return instance.key_list_slices[index.index];
} else {
return instance.key_list_overflow.items[index.index];
}
},
};
}
pub fn put(self: *Self, key: anytype, comptime store_key: bool, result: *Result, value: ValueType) !*ValueType {
const ptr = try self.map.put(result, value);
if (store_key) {
try self.putKey(key, result);
}
return ptr;
}
pub fn isKeyStaticallyAllocated(key: anytype) bool {
return isSliceInBuffer(key, &instance.key_list_buffer);
}
// There's two parts to this.
// 1. Storing the underyling string.
// 2. Making the key accessible at the index.
pub fn putKey(self: *Self, key: anytype, result: *Result) !void {
self.map.mutex.lock();
defer self.map.mutex.unlock();
var slice: []u8 = undefined;
// Is this actually a slice into the map? Don't free it.
if (isKeyStaticallyAllocated(key)) {
slice = key;
} else if (instance.key_list_buffer_used + key.len < instance.key_list_buffer.len) {
const start = instance.key_list_buffer_used;
instance.key_list_buffer_used += key.len;
slice = instance.key_list_buffer[start..instance.key_list_buffer_used];
bun.copy(u8, slice, key);
} else {
slice = try self.map.allocator.dupe(u8, key);
}
if (comptime remove_trailing_slashes) {
slice = std.mem.trimRight(u8, slice, "/");
}
if (!result.index.is_overflow) {
instance.key_list_slices[result.index.index] = slice;
} else {
if (@as(u31, @intCast(instance.key_list_overflow.items.len)) > result.index.index) {
const existing_slice = instance.key_list_overflow.items[result.index.index];
if (!isKeyStaticallyAllocated(existing_slice)) {
self.map.allocator.free(existing_slice);
}
instance.key_list_overflow.items[result.index.index] = slice;
} else {
try instance.key_list_overflow.append(self.map.allocator, slice);
}
}
}
pub fn markNotFound(self: *Self, result: Result) void {
self.map.markNotFound(result);
}
/// This does not free the keys.
/// Returns `true` if an entry had previously existed.
pub fn remove(self: *Self, key: []const u8) bool {
return self.map.remove(key);
}
};
}