Compare commits

...

5 Commits

Author SHA1 Message Date
Jarred Sumner
3a6b9928f3 experiment: use libpas 2024-12-24 04:19:38 -08:00
Jarred Sumner
f5e874e1b5 Update BuildMimalloc.cmake 2024-12-24 03:58:29 -08:00
Jarred Sumner
4eefe8eb29 Bump mimalloc 2024-12-24 03:48:14 -08:00
Jarred Sumner
6c73395d04 Merge branch 'main' into jarred/experiment-fragmentation 2024-12-24 02:27:17 -08:00
Jarred Sumner
1ce2a10f2d experiment: tweak GC timings 2024-12-24 00:55:43 -08:00
13 changed files with 486 additions and 70 deletions

View File

@@ -0,0 +1,17 @@
import { readFile } from "node:fs/promises";
import { writeFileSync } from "node:fs";
(function () {
writeFileSync("/tmp/bun-bench-large.text", Buffer.alloc(1024 * 1024 * 8, "abcdefg!"));
})();
if (globalThis.Bun) {
Bun.gc(true);
}
console.log("Before:", "RSS", (process.memoryUsage.rss() / 1024 / 1024) | 0, "MB");
for (let i = 0; i < 1024; i++) {
await readFile("/tmp/bun-bench-large.text");
}
console.log("After:", "RSS", (process.memoryUsage.rss() / 1024 / 1024) | 0, "MB");

View File

@@ -4,7 +4,7 @@ register_repository(
REPOSITORY
oven-sh/mimalloc
COMMIT
82b2c2277a4d570187c07b376557dc5bde81d848
7085b6cec31641fddaca3d40932cda82e91baf07
)
set(MIMALLOC_CMAKE_ARGS

View File

@@ -433,7 +433,7 @@ pub const ArrayBuffer = extern struct {
}
pub fn toJSUnchecked(this: ArrayBuffer, ctx: JSC.C.JSContextRef, exception: JSC.C.ExceptionRef) JSC.JSValue {
// The reason for this is
// The reason for this is
// JSC C API returns a detached arraybuffer
// if you pass it a zero-length TypedArray
@@ -457,7 +457,7 @@ pub const ArrayBuffer = extern struct {
this.ptr,
this.byte_len,
MarkedArrayBuffer_deallocator,
@as(*anyopaque, @ptrFromInt(@intFromPtr(&bun.default_allocator))),
@constCast(@ptrCast(bun.default_allocator.vtable)),
exception,
));
}
@@ -468,7 +468,7 @@ pub const ArrayBuffer = extern struct {
this.ptr,
this.byte_len,
MarkedArrayBuffer_deallocator,
@as(*anyopaque, @ptrFromInt(@intFromPtr(&bun.default_allocator))),
@constCast(@ptrCast(bun.default_allocator.vtable)),
exception,
));
}
@@ -480,32 +480,6 @@ pub const ArrayBuffer = extern struct {
return this.value;
}
// If it's not a mimalloc heap buffer, we're not going to call a deallocator
if (this.len > 0 and !bun.Mimalloc.mi_is_in_heap_region(this.ptr)) {
log("toJS but will never free: {d} bytes", .{this.len});
if (this.typed_array_type == .ArrayBuffer) {
return JSC.JSValue.fromRef(JSC.C.JSObjectMakeArrayBufferWithBytesNoCopy(
ctx,
this.ptr,
this.byte_len,
null,
null,
exception,
));
}
return JSC.JSValue.fromRef(JSC.C.JSObjectMakeTypedArrayWithBytesNoCopy(
ctx,
this.typed_array_type.toC(),
this.ptr,
this.byte_len,
null,
null,
exception,
));
}
return this.toJSUnchecked(ctx, exception);
}
@@ -640,7 +614,22 @@ pub const MarkedArrayBuffer = struct {
}
pub fn toNodeBuffer(this: *const MarkedArrayBuffer, ctx: js.JSContextRef) JSC.JSValue {
return JSValue.createBufferWithCtx(ctx, this.buffer.byteSlice(), this.buffer.ptr, MarkedArrayBuffer_deallocator);
return JSValue.createBufferWithCtx(
ctx,
this.buffer.byteSlice(),
@constCast(@ptrCast(if (this.allocator) |allocator| allocator.vtable else bun.default_allocator.vtable)),
@ptrCast(&Bun__freeTypedArrayWithAllocatorVTable),
);
}
fn Bun__freeTypedArrayWithAllocatorVTable(ptr: *anyopaque, vtable: *const std.mem.Allocator.VTable) callconv(.C) void {
if (vtable == bun.default_allocator.vtable or vtable == bun.MimallocArena.VTable) {
bun.Mimalloc.mi_free(ptr);
} else if (vtable == bun.libpas_allocator.vtable) {
bun.libpas.bun_libpas_free(ptr);
} else {
@panic("Unknown allocator used");
}
}
pub fn toJSObjectRef(this: *const MarkedArrayBuffer, ctx: js.JSContextRef, exception: js.ExceptionRef) js.JSObjectRef {
@@ -662,8 +651,8 @@ pub const MarkedArrayBuffer = struct {
this.buffer.ptr,
this.buffer.byte_len,
MarkedArrayBuffer_deallocator,
this.buffer.ptr,
@ptrCast(&Bun__freeTypedArrayWithAllocatorVTable),
@constCast(@ptrCast(if (this.allocator) |allocator| allocator.vtable else bun.default_allocator.vtable)),
exception,
);
}

View File

@@ -218,6 +218,18 @@ extern "C" unsigned getJSCBytecodeCacheVersion()
return getWebKitBytecodeCacheVersion();
}
extern "C" void Bun__onEachMicrotaskTick(void* bun_vm, JSC::VM* vm);
static void defaultOnEachMicrotaskTick(JSC::VM& vm)
{
Bun__onEachMicrotaskTick(clientData(vm)->bunVM, &vm);
}
static void defaultOnEachMicrotaskTickWithVM(void* bun_vm, JSC::VM& vm)
{
Bun__onEachMicrotaskTick(bun_vm, &vm);
}
extern "C" void JSCInitialize(const char* envp[], size_t envc, void (*onCrash)(const char* ptr, size_t length), bool evalMode)
{
static bool has_loaded_jsc = false;
@@ -799,6 +811,8 @@ static void checkIfNextTickWasCalledDuringMicrotask(JSC::VM& vm)
globalObject->resetOnEachMicrotaskTick();
queue->drain(vm, globalObject);
}
Bun__onEachMicrotaskTick(globalObject->m_bunVM, &vm);
}
static void cleanupAsyncHooksData(JSC::VM& vm)
@@ -810,7 +824,8 @@ static void cleanupAsyncHooksData(JSC::VM& vm)
vm.setOnEachMicrotaskTick(&checkIfNextTickWasCalledDuringMicrotask);
checkIfNextTickWasCalledDuringMicrotask(vm);
} else {
vm.setOnEachMicrotaskTick(nullptr);
vm.setOnEachMicrotaskTick(&defaultOnEachMicrotaskTick);
Bun__onEachMicrotaskTick(globalObject->m_bunVM, &vm);
}
}
@@ -856,7 +871,7 @@ void Zig::GlobalObject::resetOnEachMicrotaskTick()
vm.setOnEachMicrotaskTick(&cleanupAsyncHooksData);
} else {
if (this->m_nextTickQueue) {
vm.setOnEachMicrotaskTick(nullptr);
vm.setOnEachMicrotaskTick(&defaultOnEachMicrotaskTick);
} else {
vm.setOnEachMicrotaskTick(&checkIfNextTickWasCalledDuringMicrotask);
}
@@ -923,8 +938,10 @@ extern "C" JSC__JSGlobalObject* Zig__GlobalObject__create(void* console_client,
globalObject->resetOnEachMicrotaskTick();
Bun::JSNextTickQueue* queue = jsCast<Bun::JSNextTickQueue*>(nextTickQueue);
queue->drain(vm, globalObject);
Bun__onEachMicrotaskTick(globalObject->m_bunVM, &vm);
return;
}
Bun__onEachMicrotaskTick(globalObject->m_bunVM, &vm);
});
if (executionContextId > -1) {

View File

@@ -5709,7 +5709,7 @@ extern "C" JSC__JSValue JSC__JSValue__createRopeString(JSC__JSValue JSValue0, JS
extern "C" size_t JSC__VM__blockBytesAllocated(JSC__VM* vm)
{
#if ENABLE(RESOURCE_USAGE)
return vm->heap.blockBytesAllocated() + vm->heap.extraMemorySize();
return vm->heap.blockBytesAllocated() + vm->heap.extraMemorySize() + vm->heap.externalMemorySize();
#else
return 0;
#endif

View File

@@ -0,0 +1,192 @@
#include "root.h"
#if USE(SYSTEM_MALLOC)
#include <wtf/OSAllocator.h>
#include <wtf/FastMalloc.h>
extern "C" {
// Core allocation functions
void* bun_libpas_malloc(size_t size)
{
return FastMalloc::malloc(size);
}
void* bun_libpas_try_malloc(size_t size)
{
return FastMalloc::tryMalloc(size);
}
void* bun_libpas_calloc(size_t count, size_t size)
{
return FastMalloc::tryZeroedMalloc(count * size);
}
void* bun_libpas_try_calloc(size_t count, size_t size)
{
return FastMalloc::tryZeroedMalloc(count * size);
}
void* bun_libpas_realloc(void* ptr, size_t size)
{
return FastMalloc::realloc(ptr, size);
}
void* bun_libpas_try_realloc(void* ptr, size_t size)
{
return nullptr;
}
void bun_libpas_free(void* ptr)
{
WTF::fastAlignedFree(ptr);
}
// Aligned allocation functions
void* bun_libpas_memalign(size_t alignment, size_t size)
{
return WTF::fastCompactAlignedMalloc(alignment, size);
}
void* bun_libpas_try_memalign(size_t alignment, size_t size)
{
return WTF::tryFastCompactAlignedMalloc(alignment, size);
}
// Memory size query
size_t bun_libpas_malloc_size(const void* ptr)
{
return WTF::fastMallocSize(ptr);
}
size_t bun_libpas_malloc_good_size(size_t size)
{
return WTF::fastMallocGoodSize(size);
}
// Memory management functions
void bun_libpas_scavenge()
{
// No-op for system malloc
}
void bun_libpas_scavenge_this_thread()
{
// No-op for system malloc
}
// Virtual memory functions
void* bun_libpas_try_allocate_zeroed_virtual_pages(size_t size)
{
const size_t pageSize = WTF::pageSize();
size_t alignedSize = (size + pageSize - 1) & ~(pageSize - 1);
void* result = OSAllocator::tryReserveAndCommit(alignedSize);
if (result) {
memset(result, 0, alignedSize);
}
return result;
}
void bun_libpas_free_virtual_pages(void* ptr, size_t size)
{
if (!ptr) return;
OSAllocator::decommitAndRelease(ptr, size);
}
}
#else
#include <bmalloc/bmalloc.h>
#include <bmalloc/CompactAllocationMode.h>
extern "C" {
// Core allocation functions
void* bun_libpas_malloc(size_t size)
{
return bmalloc::api::malloc(size, bmalloc::CompactAllocationMode::Compact);
}
void* bun_libpas_try_malloc(size_t size)
{
return bmalloc::api::tryMalloc(size, bmalloc::CompactAllocationMode::Compact);
}
void* bun_libpas_calloc(size_t count, size_t size)
{
return bmalloc::api::zeroedMalloc(count * size, bmalloc::CompactAllocationMode::Compact);
}
void* bun_libpas_try_calloc(size_t count, size_t size)
{
return bmalloc::api::tryZeroedMalloc(count * size, bmalloc::CompactAllocationMode::Compact);
}
void* bun_libpas_realloc(void* ptr, size_t size)
{
return bmalloc::api::realloc(ptr, size, bmalloc::CompactAllocationMode::Compact);
}
void* bun_libpas_try_realloc(void* ptr, size_t size)
{
return bmalloc::api::tryRealloc(ptr, size, bmalloc::CompactAllocationMode::Compact);
}
void bun_libpas_free(void* ptr)
{
bmalloc::api::free(ptr);
}
// Aligned allocation functions
void* bun_libpas_memalign(size_t alignment, size_t size)
{
return bmalloc::api::memalign(alignment, size, bmalloc::CompactAllocationMode::Compact);
}
void* bun_libpas_try_memalign(size_t alignment, size_t size)
{
return bmalloc::api::tryMemalign(alignment, size, bmalloc::CompactAllocationMode::Compact);
}
// Memory size query
size_t bun_libpas_malloc_size(const void* ptr)
{
#if BENABLE(MALLOC_SIZE)
return bmalloc::api::mallocSize(ptr);
#else
return 0;
#endif
}
size_t bun_libpas_malloc_good_size(size_t size)
{
#if BENABLE(MALLOC_GOOD_SIZE)
return bmalloc::api::mallocGoodSize(size);
#else
return size;
#endif
}
// Memory management functions
void bun_libpas_scavenge()
{
bmalloc::api::scavenge();
}
void bun_libpas_scavenge_this_thread()
{
bmalloc::api::scavengeThisThread();
}
// Virtual memory functions
// void* bun_libpas_try_allocate_zeroed_virtual_pages(size_t size)
// {
// }
void bun_libpas_free_virtual_pages(void* ptr, size_t size)
{
if (!ptr) return;
bmalloc::api::freeLargeVirtual(ptr, size);
}
}
#endif

View File

@@ -543,6 +543,9 @@ pub const GarbageCollectionController = struct {
gc_repeating_timer_fast: bool = true,
disabled: bool = false,
pub export fn Bun__onEachMicrotaskTick(vm: *VirtualMachine, jsc_vm: *JSC.VM) void {
vm.gc_controller.processGCTimerWithHeapSize(jsc_vm, jsc_vm.blockBytesAllocated());
}
pub fn init(this: *GarbageCollectionController, vm: *VirtualMachine) void {
const actual = uws.Loop.get();
this.gc_timer = uws.Timer.createFallthrough(actual, this);
@@ -612,7 +615,7 @@ pub const GarbageCollectionController = struct {
pub fn onGCRepeatingTimer(timer: *uws.Timer) callconv(.C) void {
var this = timer.as(*GarbageCollectionController);
const prev_heap_size = this.gc_last_heap_size_on_repeating_timer;
this.performGC();
this.performGC(this.bunVM().jsc);
this.gc_last_heap_size_on_repeating_timer = this.gc_last_heap_size;
if (prev_heap_size == this.gc_last_heap_size_on_repeating_timer) {
this.heap_size_didnt_change_for_repeating_timer_ticks_count +|= 1;
@@ -652,7 +655,7 @@ pub const GarbageCollectionController = struct {
this.updateGCRepeatTimer(.fast);
if (this_heap_size > prev * 2) {
this.performGC();
this.performGC(vm);
} else {
this.scheduleGCTimer();
}
@@ -661,17 +664,16 @@ pub const GarbageCollectionController = struct {
.scheduled => {
if (this_heap_size > prev * 2) {
this.updateGCRepeatTimer(.fast);
this.performGC();
this.performGC(vm);
}
},
}
}
pub fn performGC(this: *GarbageCollectionController) void {
pub fn performGC(this: *GarbageCollectionController, js_vm: *JSC.VM) void {
if (this.disabled) return;
var vm = this.bunVM().jsc;
vm.collectAsync();
this.gc_last_heap_size = vm.blockBytesAllocated();
js_vm.collectAsync();
this.gc_last_heap_size = js_vm.blockBytesAllocated();
}
pub const GCTimerState = enum {
@@ -1618,7 +1620,7 @@ pub const EventLoop = struct {
/// Asynchronously run the garbage collector and track how much memory is now allocated
pub fn performGC(this: *EventLoop) void {
this.virtual_machine.gc_controller.performGC();
this.virtual_machine.gc_controller.performGC(this.virtual_machine.jsc);
}
pub fn wakeup(this: *EventLoop) void {

View File

@@ -4728,7 +4728,7 @@ pub const NodeFS = struct {
};
},
.string => brk: {
const str = bun.SliceWithUnderlyingString.transcodeFromOwnedSlice(@constCast(ret.result.string), args.encoding);
const str = bun.SliceWithUnderlyingString.transcodeFromOwnedSlice(bun.libpas_allocator, @constCast(ret.result.string), args.encoding);
if (str.underlying.tag == .Dead and str.utf8.len == 0) {
return .{ .err = Syscall.Error.fromCode(.NOMEM, .read).withPathLike(args.path) };
@@ -4873,10 +4873,10 @@ pub const NodeFS = struct {
return .{
.result = .{
.buffer = Buffer.fromBytes(
bun.default_allocator.dupe(u8, temporary_read_buffer) catch return .{
bun.libpas_allocator.dupe(u8, temporary_read_buffer) catch return .{
.err = Syscall.Error.fromCode(.NOMEM, .read).withPathLike(args.path),
},
bun.default_allocator,
bun.libpas_allocator,
.Uint8Array,
),
},
@@ -4940,7 +4940,7 @@ pub const NodeFS = struct {
}
}
var buf = std.ArrayList(u8).init(bun.default_allocator);
var buf = std.ArrayList(u8).init(bun.libpas_allocator);
defer if (!did_succeed) buf.clearAndFree();
buf.ensureTotalCapacityPrecise(
@min(
@@ -5051,7 +5051,7 @@ pub const NodeFS = struct {
return switch (args.encoding) {
.buffer => .{
.result = .{
.buffer = Buffer.fromBytes(buf.items, bun.default_allocator, .Uint8Array),
.buffer = Buffer.fromBytes(buf.items, bun.libpas_allocator, .Uint8Array),
},
},
else => brk: {

View File

@@ -1056,18 +1056,18 @@ pub const Encoder = struct {
};
}
pub fn toBunStringFromOwnedSlice(input: []u8, encoding: JSC.Node.Encoding) bun.String {
pub fn toBunStringFromOwnedSlice(allocator: std.mem.Allocator, input: []u8, encoding: JSC.Node.Encoding) bun.String {
if (input.len == 0)
return bun.String.empty;
switch (encoding) {
.ascii => {
if (strings.isAllASCII(input)) {
return bun.String.createExternalGloballyAllocated(.latin1, input);
return bun.String.createExternalWithKnownAllocator(allocator, input, .latin1);
}
const str, const chars = bun.String.createUninitialized(.latin1, input.len);
defer bun.default_allocator.free(input);
defer allocator.free(input);
if (str.tag == .Dead) {
return str;
}
@@ -1075,35 +1075,35 @@ pub const Encoder = struct {
return str;
},
.latin1 => {
return bun.String.createExternalGloballyAllocated(.latin1, input);
return bun.String.createExternalWithKnownAllocator(allocator, input, .latin1);
},
.buffer, .utf8 => {
const converted = strings.toUTF16Alloc(bun.default_allocator, input, false, false) catch {
bun.default_allocator.free(input);
const converted = strings.toUTF16Alloc(allocator, input, false, false) catch {
allocator.free(input);
return bun.String.dead;
};
if (converted) |utf16| {
defer bun.default_allocator.free(input);
return bun.String.createExternalGloballyAllocated(.utf16, utf16);
defer allocator.free(input);
return bun.String.createExternalWithKnownAllocator(allocator, utf16, .utf16);
}
// If we get here, it means we can safely assume the string is 100% ASCII characters
return bun.String.createExternalGloballyAllocated(.latin1, input);
return bun.String.createExternalWithKnownAllocator(allocator, input, .latin1);
},
.ucs2, .utf16le => {
// Avoid incomplete characters
if (input.len / 2 == 0) {
bun.default_allocator.free(input);
allocator.free(input);
return bun.String.empty;
}
const as_u16 = std.mem.bytesAsSlice(u16, input);
return bun.String.createExternalGloballyAllocated(.utf16, @alignCast(as_u16));
return bun.String.createExternalWithKnownAllocator(allocator, as_u16, .utf16);
},
.hex => {
defer bun.default_allocator.free(input);
defer allocator.free(input);
const str, const chars = bun.String.createUninitialized(.latin1, input.len * 2);
if (str.tag == .Dead) {
@@ -1125,7 +1125,7 @@ pub const Encoder = struct {
// be addressed separately because constructFromU8's base64url also
// appears inconsistent with Node.js.
.base64url => {
defer bun.default_allocator.free(input);
defer allocator.free(input);
const out, const chars = bun.String.createUninitialized(.latin1, bun.base64.urlSafeEncodeLen(input));
if (out.tag != .Dead) {
_ = bun.base64.encodeURLSafe(chars, input);
@@ -1134,11 +1134,11 @@ pub const Encoder = struct {
},
.base64 => {
defer bun.default_allocator.free(input);
defer allocator.free(input);
const to_len = bun.base64.encodeLen(input);
const to = bun.default_allocator.alloc(u8, to_len) catch return bun.String.dead;
const to = allocator.alloc(u8, to_len) catch return bun.String.dead;
const wrote = bun.base64.encode(to, input);
return bun.String.createExternalGloballyAllocated(.latin1, to[0..wrote]);
return bun.String.createExternalWithKnownAllocator(allocator, to[0..wrote], .latin1);
},
}
}

View File

@@ -4122,3 +4122,6 @@ pub inline fn isComptimeKnown(x: anytype) bool {
pub inline fn itemOrNull(comptime T: type, slice: []const T, index: usize) ?T {
return if (index < slice.len) slice[index] else null;
}
pub const libpas = @import("./libpas.zig");
pub const libpas_allocator = libpas.libpas_allocator;

173
src/libpas.zig Normal file
View File

@@ -0,0 +1,173 @@
const std = @import("std");
const builtin = @import("builtin");
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
// External C functions from bun-libpas.cpp
pub extern fn bun_libpas_malloc(size: usize) ?*anyopaque;
pub extern fn bun_libpas_try_malloc(size: usize) ?*anyopaque;
pub extern fn bun_libpas_calloc(count: usize, size: usize) ?*anyopaque;
pub extern fn bun_libpas_try_calloc(count: usize, size: usize) ?*anyopaque;
pub extern fn bun_libpas_realloc(ptr: ?*anyopaque, size: usize) ?*anyopaque;
pub extern fn bun_libpas_try_realloc(ptr: ?*anyopaque, size: usize) ?*anyopaque;
pub extern fn bun_libpas_free(ptr: ?*anyopaque) void;
pub extern fn bun_libpas_memalign(alignment: usize, size: usize) ?*anyopaque;
pub extern fn bun_libpas_try_memalign(alignment: usize, size: usize) ?*anyopaque;
pub extern fn bun_libpas_malloc_size(ptr: *const anyopaque) usize;
pub extern fn bun_libpas_malloc_good_size(size: usize) usize;
pub extern fn bun_libpas_scavenge() void;
pub extern fn bun_libpas_scavenge_this_thread() void;
pub extern fn bun_libpas_try_allocate_zeroed_virtual_pages(size: usize) ?*anyopaque;
pub extern fn bun_libpas_free_virtual_pages(ptr: ?*anyopaque, size: usize) void;
pub const LibPasAllocator = struct {
pub const supports_malloc_size = true;
// Fast path for small allocations that don't need special alignment
inline fn fastAlloc(len: usize) ?[*]u8 {
return @as([*]u8, @ptrCast(bun_libpas_try_malloc(len)));
}
inline fn alignedAlloc(len: usize, log2_align: u8) ?[*]u8 {
// Fast path: if alignment is small enough, use regular malloc
// since libpas guarantees certain minimum alignments
if (log2_align <= 3) { // 8-byte alignment or less
return fastAlloc(len);
}
const alignment = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_align));
return @as([*]u8, @ptrCast(bun_libpas_try_memalign(alignment, len)));
}
inline fn alignedFree(ptr: [*]u8) void {
bun_libpas_free(ptr);
}
inline fn alignedAllocSize(ptr: [*]u8) usize {
return bun_libpas_malloc_size(ptr);
}
fn alloc(
_: *anyopaque,
len: usize,
log2_align: u8,
return_address: usize,
) ?[*]u8 {
_ = return_address;
assert(len > 0);
return alignedAlloc(len, log2_align);
}
fn resize(
_: *anyopaque,
buf: []u8,
log2_buf_align: u8,
new_len: usize,
return_address: usize,
) bool {
_ = return_address;
// Fast path: shrinking
if (new_len <= buf.len) {
return true;
}
// Check if we have enough space in the existing allocation
const full_len = alignedAllocSize(buf.ptr);
if (new_len <= full_len) {
return true;
}
// Try to realloc if alignment requirements allow it
if (log2_buf_align <= 3) {
if (bun_libpas_try_realloc(buf.ptr, new_len)) |_| {
return true;
}
}
return false;
}
fn free(
_: *anyopaque,
buf: []u8,
log2_buf_align: u8,
return_address: usize,
) void {
_ = log2_buf_align;
_ = return_address;
alignedFree(buf.ptr);
}
// Additional utility functions for direct usage
pub fn goodSize(size: usize) usize {
return bun_libpas_malloc_good_size(size);
}
pub fn allocSize(ptr: *const anyopaque) usize {
return bun_libpas_malloc_size(ptr);
}
};
/// Supports the full Allocator interface, including alignment, and exploiting
/// malloc_size functionality. This allocator uses libpas (bmalloc) under the hood.
pub const libpas_allocator = Allocator{
.ptr = undefined,
.vtable = &libpas_allocator_vtable,
};
const libpas_allocator_vtable = Allocator.VTable{
.alloc = LibPasAllocator.alloc,
.resize = LibPasAllocator.resize,
.free = LibPasAllocator.free,
};
/// Virtual memory management functions
pub const virtual = struct {
/// Allocates zeroed virtual memory pages aligned to the system page size.
/// The size will be rounded up to the nearest page size.
pub fn allocatePages(size: usize) ?[*]u8 {
return @as([*]u8, @ptrCast(bun_libpas_try_allocate_zeroed_virtual_pages(size)));
}
/// Frees virtual memory pages previously allocated with allocatePages.
pub fn freePages(ptr: [*]u8, size: usize) void {
bun_libpas_free_virtual_pages(ptr, size);
}
/// Allocates zeroed virtual memory pages and returns them as a slice.
pub fn allocatePagesSlice(comptime T: type, count: usize) ?[]T {
const size = count * @sizeOf(T);
const ptr = allocatePages(size) orelse return null;
return @as([*]T, @ptrCast(@alignCast(ptr)))[0..count];
}
};
/// Utility functions for memory management
pub const memory = struct {
pub fn scavenge() void {
bun_libpas_scavenge();
}
pub fn scavengeThisThread() void {
bun_libpas_scavenge_this_thread();
}
pub fn mallocSize(ptr: *const anyopaque) usize {
return bun_libpas_malloc_size(ptr);
}
pub fn mallocGoodSize(size: usize) usize {
return bun_libpas_malloc_good_size(size);
}
/// Allocate zeroed memory directly
pub fn calloc(count: usize, size: usize) ?*anyopaque {
return bun_libpas_try_calloc(count, size);
}
/// Try to resize memory in place
pub fn tryRealloc(ptr: *anyopaque, size: usize) ?*anyopaque {
return bun_libpas_try_realloc(ptr, size);
}
};

View File

@@ -136,7 +136,7 @@ pub const Arena = struct {
/// It uses pthread_getspecific to do that.
/// We can save those extra calls if we just do it once in here
pub fn getThreadlocalDefault() Allocator {
return Allocator{ .ptr = mimalloc.mi_heap_get_default(), .vtable = &c_allocator_vtable };
return Allocator{ .ptr = mimalloc.mi_heap_get_default(), .vtable = c_allocator_vtable };
}
pub fn backingAllocator(this: Arena) Allocator {
@@ -146,7 +146,7 @@ pub const Arena = struct {
pub fn allocator(this: Arena) Allocator {
@setRuntimeSafety(false);
return Allocator{ .ptr = this.heap.?, .vtable = &c_allocator_vtable };
return Allocator{ .ptr = this.heap.?, .vtable = c_allocator_vtable };
}
pub fn deinit(this: *Arena) void {
@@ -283,9 +283,11 @@ pub const Arena = struct {
mimalloc.mi_free(buf.ptr);
}
}
pub const VTable = c_allocator_vtable;
};
const c_allocator_vtable = Allocator.VTable{
const c_allocator_vtable = &Allocator.VTable{
.alloc = &Arena.alloc,
.resize = &Arena.resize,
.free = &Arena.free,

View File

@@ -625,6 +625,27 @@ pub const String = extern struct {
/// len is the number of characters in that buffer.
pub const ExternalStringImplFreeFunction = fn (ctx: *anyopaque, buffer: *anyopaque, len: u32) callconv(.C) void;
fn freeExternalStringImplFromVTable(vtable: *const std.mem.Allocator.VTable, buffer: *anyopaque, _: u32) callconv(.C) void {
if (vtable == bun.default_allocator.vtable or vtable == bun.MimallocArena.VTable) {
bun.Mimalloc.mi_free(buffer);
} else if (vtable == bun.libpas_allocator.vtable) {
bun.libpas.bun_libpas_free(buffer);
} else {
@panic("Unknown allocator");
}
}
pub fn createExternalWithKnownAllocator(allocator: std.mem.Allocator, bytes: anytype, latin1_or_utf16: String.WTFStringEncoding) String {
JSC.markBinding(@src());
bun.assert(bytes.len > 0);
if (bytes.len > max_length()) {
allocator.free(bytes);
return dead;
}
return BunString__createExternal(@ptrCast(bytes.ptr), bytes.len, latin1_or_utf16 == .latin1, @constCast(allocator.vtable), @ptrCast(&freeExternalStringImplFromVTable));
}
pub fn createExternal(bytes: []const u8, isLatin1: bool, ctx: *anyopaque, callback: ?*const ExternalStringImplFreeFunction) String {
JSC.markBinding(@src());
bun.assert(bytes.len > 0);
@@ -1395,7 +1416,7 @@ pub const SliceWithUnderlyingString = struct {
/// Transcode a byte array to an encoded String, avoiding unnecessary copies.
///
/// owned_input_bytes ownership is transferred to this function
pub fn transcodeFromOwnedSlice(owned_input_bytes: []u8, encoding: JSC.Node.Encoding) SliceWithUnderlyingString {
pub fn transcodeFromOwnedSlice(allocator: std.mem.Allocator, owned_input_bytes: []u8, encoding: JSC.Node.Encoding) SliceWithUnderlyingString {
if (owned_input_bytes.len == 0) {
return .{
.utf8 = ZigString.Slice.empty,
@@ -1404,7 +1425,7 @@ pub const SliceWithUnderlyingString = struct {
}
return .{
.underlying = JSC.WebCore.Encoder.toBunStringFromOwnedSlice(owned_input_bytes, encoding),
.underlying = JSC.WebCore.Encoder.toBunStringFromOwnedSlice(allocator, owned_input_bytes, encoding),
};
}