Compare commits

...

11 Commits

Author SHA1 Message Date
Jarred Sumner
073a87a344 Update bindings.cpp 2025-04-13 12:12:28 -07:00
Jarred Sumner
284048648b Try using reportAbandonedObjectGraph a lot 2025-04-13 11:29:49 -07:00
Jarred Sumner
9db14f5db5 Delete our custom GC logic 2025-04-13 10:56:54 -07:00
Jarred Sumner
f53639da88 move these files around 2025-04-13 10:37:12 -07:00
Jarred Sumner
c134d8628f Merge branch 'main' into jarred/revive-gc-timer-pr 2025-04-13 09:43:53 -07:00
Jarred Sumner
32340622fc run it even more 2025-04-13 08:52:09 -07:00
Jarred Sumner
d69425d4ed various cleanup 2025-04-13 08:48:26 -07:00
Jarred Sumner
4bf1737005 Update epoll_kqueue.c 2025-04-13 08:16:14 -07:00
Jarred Sumner
8491c20e06 Revive #17919 2025-04-13 08:12:46 -07:00
Jarred Sumner
4353cc0940 Update BunClientData.cpp 2025-04-13 08:04:16 -07:00
Jarred Sumner
acab7ac2f5 Update BunClientData.h 2025-04-13 08:03:11 -07:00
13 changed files with 256 additions and 341 deletions

View File

@@ -19,6 +19,7 @@
#include "internal/internal.h"
#include <stdlib.h>
#include <time.h>
#include <stdbool.h>
#if defined(LIBUS_USE_EPOLL) || defined(LIBUS_USE_KQUEUE)
@@ -247,9 +248,9 @@ void us_loop_run(struct us_loop_t *loop) {
}
}
extern void Bun__JSC_onBeforeWait(void*);
extern void Bun__JSC_onAfterWait(void*);
extern void Bun__JSC_onBeforeWait(void* _Nonnull jsc_vm);
extern void Bun__JSC_onAfterWait(void* _Nonnull jsc_vm, bool hasMoreEventLoopWorkToDo);
extern void Bun__JSC_onDidRunCallbacks(void* _Nonnull jsc_vm);
void us_loop_run_bun_tick(struct us_loop_t *loop, const struct timespec* timeout) {
if (loop->num_polls == 0)
return;
@@ -265,8 +266,10 @@ void us_loop_run_bun_tick(struct us_loop_t *loop, const struct timespec* timeout
/* Emit pre callback */
us_internal_loop_pre(loop);
/* Safe if jsc_vm is NULL */
Bun__JSC_onBeforeWait(loop->data.jsc_vm);
void* jsc_vm = loop->data.jsc_vm;
if (jsc_vm) {
Bun__JSC_onBeforeWait(jsc_vm);
}
/* Fetch ready polls */
#ifdef LIBUS_USE_EPOLL
@@ -276,8 +279,9 @@ void us_loop_run_bun_tick(struct us_loop_t *loop, const struct timespec* timeout
loop->num_ready_polls = kevent64(loop->fd, NULL, 0, loop->ready_polls, 1024, 0, timeout);
} while (IS_EINTR(loop->num_ready_polls));
#endif
Bun__JSC_onAfterWait(loop->data.jsc_vm);
if (jsc_vm) {
Bun__JSC_onAfterWait(jsc_vm, loop->num_ready_polls > 0);
}
/* Iterate ready polls, dispatching them by type */
for (loop->current_ready_poll = 0; loop->current_ready_poll < loop->num_ready_polls; loop->current_ready_poll++) {
@@ -317,6 +321,10 @@ void us_loop_run_bun_tick(struct us_loop_t *loop, const struct timespec* timeout
}
}
if (jsc_vm) {
Bun__JSC_onDidRunCallbacks(jsc_vm);
}
/* Emit post callback */
us_internal_loop_post(loop);
}

View File

@@ -189,7 +189,7 @@ extern "C" GlobalObject* BakeCreateProdGlobal(void* console)
vm.heap.acquireAccess();
JSC::JSLockHolder locker(vm);
BunVirtualMachine* bunVM = Bun__getVM();
WebCore::JSVMClientData::create(&vm, bunVM);
WebCore::JSVMClientData::create(vm, bunVM, JSC::HeapType::Large);
JSC::Structure* structure = Bake::GlobalObject::createStructure(vm);
Bake::GlobalObject* global = Bake::GlobalObject::create(

View File

@@ -0,0 +1,67 @@
pub fn ConcurrentPromiseTask(comptime Context: type) type {
return struct {
const This = @This();
ctx: *Context,
task: WorkPoolTask = .{ .callback = &runFromThreadPool },
event_loop: *JSC.EventLoop,
allocator: std.mem.Allocator,
promise: JSC.JSPromise.Strong = .{},
globalThis: *JSC.JSGlobalObject,
concurrent_task: JSC.ConcurrentTask = .{},
// This is a poll because we want it to enter the uSockets loop
ref: Async.KeepAlive = .{},
pub const new = bun.TrivialNew(@This());
pub fn createOnJSThread(allocator: std.mem.Allocator, globalThis: *JSC.JSGlobalObject, value: *Context) !*This {
var this = This.new(.{
.event_loop = VirtualMachine.get().event_loop,
.ctx = value,
.allocator = allocator,
.globalThis = globalThis,
});
var promise = JSC.JSPromise.create(globalThis);
this.promise.strong.set(globalThis, promise.asValue(globalThis));
this.ref.ref(this.event_loop.virtual_machine);
return this;
}
pub fn runFromThreadPool(task: *WorkPoolTask) void {
var this: *This = @fieldParentPtr("task", task);
Context.run(this.ctx);
this.onFinish();
}
pub fn runFromJS(this: *This) void {
const promise = this.promise.swap();
this.ref.unref(this.event_loop.virtual_machine);
var ctx = this.ctx;
ctx.then(promise);
}
pub fn schedule(this: *This) void {
WorkPool.schedule(&this.task);
}
pub fn onFinish(this: *This) void {
this.event_loop.enqueueTaskConcurrent(this.concurrent_task.from(this, .manual_deinit));
}
pub fn deinit(this: *This) void {
this.promise.deinit();
bun.destroy(this);
}
};
}
const bun = @import("root").bun;
const JSC = bun.JSC;
const WorkPool = JSC.WorkPool;
const Async = bun.Async;
const WorkPoolTask = JSC.WorkPoolTask;
const std = @import("std");
const VirtualMachine = JSC.VirtualMachine;

73
src/bun.js/WorkTask.zig Normal file
View File

@@ -0,0 +1,73 @@
pub fn WorkTask(comptime Context: type) type {
return struct {
const TaskType = WorkPoolTask;
const This = @This();
ctx: *Context,
task: TaskType = .{ .callback = &runFromThreadPool },
event_loop: *JSC.EventLoop,
allocator: std.mem.Allocator,
globalThis: *JSC.JSGlobalObject,
concurrent_task: ConcurrentTask = .{},
async_task_tracker: JSC.AsyncTaskTracker,
// This is a poll because we want it to enter the uSockets loop
ref: Async.KeepAlive = .{},
pub fn createOnJSThread(allocator: std.mem.Allocator, globalThis: *JSC.JSGlobalObject, value: *Context) !*This {
var vm = globalThis.bunVM();
var this = bun.new(This, .{
.event_loop = vm.eventLoop(),
.ctx = value,
.allocator = allocator,
.globalThis = globalThis,
.async_task_tracker = JSC.AsyncTaskTracker.init(vm),
});
this.ref.ref(this.event_loop.virtual_machine);
return this;
}
pub fn deinit(this: *This) void {
this.ref.unref(this.event_loop.virtual_machine);
bun.destroy(this);
}
pub fn runFromThreadPool(task: *TaskType) void {
JSC.markBinding(@src());
const this: *This = @fieldParentPtr("task", task);
Context.run(this.ctx, this);
}
pub fn runFromJS(this: *This) void {
var ctx = this.ctx;
const tracker = this.async_task_tracker;
const vm = this.event_loop.virtual_machine;
const globalThis = this.globalThis;
this.ref.unref(vm);
tracker.willDispatch(globalThis);
ctx.then(globalThis);
tracker.didDispatch(globalThis);
}
pub fn schedule(this: *This) void {
const vm = this.event_loop.virtual_machine;
this.ref.ref(vm);
this.async_task_tracker.didSchedule(this.globalThis);
WorkPool.schedule(&this.task);
}
pub fn onFinish(this: *This) void {
this.event_loop.enqueueTaskConcurrent(this.concurrent_task.from(this, .manual_deinit));
}
};
}
const bun = @import("root").bun;
const JSC = bun.JSC;
const WorkPool = JSC.WorkPool;
const Async = bun.Async;
const WorkPoolTask = JSC.WorkPoolTask;
const std = @import("std");
const ConcurrentTask = JSC.ConcurrentTask;

View File

@@ -1409,7 +1409,7 @@ pub const WTFTimer = struct {
}
pub export fn WTFTimer__runIfImminent(vm: *VirtualMachine) void {
vm.eventLoop().runImminentGCTimer();
_ = vm.eventLoop().runImminentGCTimer();
}
pub fn run(this: *WTFTimer, vm: *VirtualMachine) void {

View File

@@ -44,7 +44,7 @@ JSHeapData::JSHeapData(Heap& heap)
#define CLIENT_ISO_SUBSPACE_INIT(subspace) subspace(m_heapData->subspace)
JSVMClientData::JSVMClientData(VM& vm, RefPtr<SourceProvider> sourceProvider)
JSVMClientData::JSVMClientData(VM& vm, void* bunVM, RefPtr<SourceProvider> sourceProvider, JSC::HeapType heapType)
: m_builtinNames(vm)
, m_builtinFunctions(vm, sourceProvider, m_builtinNames)
, m_heapData(JSHeapData::ensureHeapData(vm.heap))
@@ -52,6 +52,7 @@ JSVMClientData::JSVMClientData(VM& vm, RefPtr<SourceProvider> sourceProvider)
, CLIENT_ISO_SUBSPACE_INIT(m_domConstructorSpace)
, CLIENT_ISO_SUBSPACE_INIT(m_domNamespaceObjectSpace)
, m_clientSubspaces(makeUnique<ExtendedDOMClientIsoSubspaces>())
, bunVM(bunVM)
{
}
@@ -77,26 +78,25 @@ JSVMClientData::~JSVMClientData()
ASSERT(m_normalWorld->hasOneRef());
m_normalWorld = nullptr;
}
void JSVMClientData::create(VM* vm, void* bunVM)
void JSVMClientData::create(VM& vm, void* bunVM, JSC::HeapType heapType)
{
auto provider = WebCore::createBuiltinsSourceProvider();
JSVMClientData* clientData = new JSVMClientData(*vm, provider);
clientData->bunVM = bunVM;
vm->deferredWorkTimer->onAddPendingWork = [clientData](Ref<JSC::DeferredWorkTimer::TicketData>&& ticket, JSC::DeferredWorkTimer::WorkType kind) -> void {
JSVMClientData* clientData = new JSVMClientData(vm, bunVM, provider, heapType);
vm.deferredWorkTimer->onAddPendingWork = [clientData](Ref<JSC::DeferredWorkTimer::TicketData>&& ticket, JSC::DeferredWorkTimer::WorkType kind) -> void {
Bun::JSCTaskScheduler::onAddPendingWork(clientData, WTFMove(ticket), kind);
};
vm->deferredWorkTimer->onScheduleWorkSoon = [clientData](JSC::DeferredWorkTimer::Ticket ticket, JSC::DeferredWorkTimer::Task&& task) -> void {
vm.deferredWorkTimer->onScheduleWorkSoon = [clientData](JSC::DeferredWorkTimer::Ticket ticket, JSC::DeferredWorkTimer::Task&& task) -> void {
Bun::JSCTaskScheduler::onScheduleWorkSoon(clientData, ticket, WTFMove(task));
};
vm->deferredWorkTimer->onCancelPendingWork = [clientData](JSC::DeferredWorkTimer::Ticket ticket) -> void {
vm.deferredWorkTimer->onCancelPendingWork = [clientData](JSC::DeferredWorkTimer::Ticket ticket) -> void {
Bun::JSCTaskScheduler::onCancelPendingWork(clientData, ticket);
};
vm->clientData = clientData; // ~VM deletes this pointer.
clientData->m_normalWorld = DOMWrapperWorld::create(*vm, DOMWrapperWorld::Type::Normal);
vm.clientData = clientData; // ~VM deletes this pointer.
clientData->m_normalWorld = DOMWrapperWorld::create(vm, DOMWrapperWorld::Type::Normal);
vm->heap.addMarkingConstraint(makeUnique<WebCore::DOMGCOutputConstraint>(*vm, clientData->heapData()));
vm->m_typedArrayController = adoptRef(new WebCoreTypedArrayController(true));
vm.heap.addMarkingConstraint(makeUnique<WebCore::DOMGCOutputConstraint>(vm, clientData->heapData()));
vm.m_typedArrayController = adoptRef(new WebCoreTypedArrayController(true));
clientData->builtinFunctions().exportNames();
}

View File

@@ -24,6 +24,7 @@ class DOMWrapperWorld;
#include "WebCoreJSBuiltins.h"
#include "JSCTaskScheduler.h"
#include "HTTPHeaderIdentifiers.h"
namespace Zig {
}
@@ -79,11 +80,9 @@ class JSVMClientData : public JSC::VM::ClientData {
WTF_MAKE_FAST_ALLOCATED_WITH_HEAP_IDENTIFIER(JSVMClientData);
public:
explicit JSVMClientData(JSC::VM&, RefPtr<JSC::SourceProvider>);
virtual ~JSVMClientData();
static void create(JSC::VM*, void*);
static void create(JSC::VM& vm, void* bunVM, JSC::HeapType heapType);
JSHeapData& heapData() { return *m_heapData; }
BunBuiltinNames& builtinNames() { return m_builtinNames; }
@@ -116,6 +115,8 @@ public:
Bun::JSCTaskScheduler deferredWorkTimer;
private:
explicit JSVMClientData(JSC::VM&, void* bunVM, RefPtr<JSC::SourceProvider>, JSC::HeapType heapType);
bool isWebCoreJSClientData() const final { return true; }
BunBuiltinNames m_builtinNames;

View File

@@ -11,23 +11,23 @@ static thread_local std::optional<JSC::JSLock::DropAllLocks> drop_all_locks { st
extern "C" void WTFTimer__runIfImminent(void* bun_vm);
// Safe if VM is nullptr
extern "C" void Bun__JSC_onBeforeWait(JSC::VM* vm)
extern "C" void Bun__JSC_onBeforeWait(JSC::VM* _Nonnull vm)
{
ASSERT(!drop_all_locks.has_value());
if (vm) {
bool previouslyHadAccess = vm->heap.hasHeapAccess();
drop_all_locks.emplace(*vm);
if (previouslyHadAccess) {
vm->heap.releaseAccess();
}
bool previouslyHadAccess = vm->heap.hasHeapAccess();
drop_all_locks.emplace(*vm);
if (previouslyHadAccess) {
vm->heap.releaseAccess();
}
}
extern "C" void Bun__JSC_onAfterWait(JSC::VM* vm)
extern "C" void Bun__JSC_onAfterWait(JSC::VM* _Nonnull vm, bool hasMoreEventLoopWorkToDo)
{
vm->heap.acquireAccess();
drop_all_locks.reset();
}
extern "C" void Bun__JSC_onDidRunCallbacks(JSC::VM* _Nonnull vm)
{
if (vm) {
vm->heap.acquireAccess();
drop_all_locks.reset();
}
}

View File

@@ -36,6 +36,12 @@ pub const VM = opaque {
JSC__VM__holdAPILock(this, ctx, callback);
}
pub fn reportAbandonedObjectGraph(vm: *VM) void {
JSC__VM__reportAbandonedObjectGraph(vm);
}
extern fn JSC__VM__reportAbandonedObjectGraph(vm: *VM) void;
extern fn JSC__VM__getAPILock(vm: *VM) void;
extern fn JSC__VM__releaseAPILock(vm: *VM) void;

View File

@@ -173,7 +173,7 @@
#include "ProcessBindingBuffer.h"
#include "NodeValidator.h"
#include "ProcessBindingFs.h"
#include <JavaScriptCore/GCActivityCallback.h>
#include "JSBunRequest.h"
#include "ServerRouteList.h"
@@ -815,6 +815,7 @@ static JSValue computeErrorInfoWrapperToJSValue(JSC::VM& vm, Vector<StackFrame>&
static void checkIfNextTickWasCalledDuringMicrotask(JSC::VM& vm)
{
auto* globalObject = defaultGlobalObject();
if (auto nextTickQueueValue = globalObject->m_nextTickQueue.get()) {
auto* queue = jsCast<Bun::JSNextTickQueue*>(nextTickQueueValue);
@@ -825,6 +826,7 @@ static void checkIfNextTickWasCalledDuringMicrotask(JSC::VM& vm)
static void cleanupAsyncHooksData(JSC::VM& vm)
{
auto* globalObject = defaultGlobalObject();
globalObject->m_asyncContextData.get()->putInternalField(vm, 0, jsUndefined());
globalObject->asyncHooksNeedsCleanup = false;
@@ -891,6 +893,10 @@ void Zig::GlobalObject::resetOnEachMicrotaskTick()
extern "C" JSC__JSGlobalObject* Zig__GlobalObject__create(void* console_client, int32_t executionContextId, bool miniMode, bool evalMode, void* worker_ptr)
{
auto heapSize = miniMode ? JSC::HeapType::Small : JSC::HeapType::Large;
// We're going to create the timers ourselves.
JSC::GCActivityCallback::s_shouldCreateGCTimer = false;
RefPtr<JSC::VM> vmPtr = JSC::VM::tryCreate(heapSize);
if (UNLIKELY(!vmPtr)) {
BUN_PANIC("Failed to allocate JavaScriptCore Virtual Machine. Did your computer run out of memory? Or maybe you compiled Bun with a mismatching libc++ version or compiler?");
@@ -925,7 +931,7 @@ extern "C" JSC__JSGlobalObject* Zig__GlobalObject__create(void* console_client,
// Every JS VM's RunLoop should use Bun's RunLoop implementation
ASSERT(vmPtr->runLoop().kind() == WTF::RunLoop::Kind::Bun);
WebCore::JSVMClientData::create(&vm, Bun__getVM());
WebCore::JSVMClientData::create(vm, Bun__getVM(), heapSize);
const auto createGlobalObject = [&]() -> Zig::GlobalObject* {
if (UNLIKELY(executionContextId == std::numeric_limits<int32_t>::max() || executionContextId > 1)) {

View File

@@ -6187,6 +6187,16 @@ CPP_DECL void JSC__VM__setControlFlowProfiler(JSC__VM* vm, bool isEnabled)
}
}
extern "C" void JSC__VM__reportAbandonedObjectGraph(JSC__VM* vm)
{
static size_t previousAllocatedThisCycle = 0;
size_t allocatedThisCycle = vm->heap.blockBytesAllocated();
if (allocatedThisCycle != previousAllocatedThisCycle) {
previousAllocatedThisCycle = allocatedThisCycle;
vm->heap.collectAsync();
}
}
CPP_DECL void JSC__VM__performOpportunisticallyScheduledTasks(JSC__VM* vm, double until)
{
vm->performOpportunisticallyScheduledTasks(MonotonicTime::now() + Seconds(until), {});

View File

@@ -29,131 +29,8 @@ pub const WorkPoolTask = @import("../work_pool.zig").Task;
const uws = bun.uws;
const Async = bun.Async;
pub fn ConcurrentPromiseTask(comptime Context: type) type {
return struct {
const This = @This();
ctx: *Context,
task: WorkPoolTask = .{ .callback = &runFromThreadPool },
event_loop: *JSC.EventLoop,
allocator: std.mem.Allocator,
promise: JSC.JSPromise.Strong = .{},
globalThis: *JSC.JSGlobalObject,
concurrent_task: JSC.ConcurrentTask = .{},
// This is a poll because we want it to enter the uSockets loop
ref: Async.KeepAlive = .{},
pub const new = bun.TrivialNew(@This());
pub fn createOnJSThread(allocator: std.mem.Allocator, globalThis: *JSC.JSGlobalObject, value: *Context) !*This {
var this = This.new(.{
.event_loop = VirtualMachine.get().event_loop,
.ctx = value,
.allocator = allocator,
.globalThis = globalThis,
});
var promise = JSC.JSPromise.create(globalThis);
this.promise.strong.set(globalThis, promise.asValue(globalThis));
this.ref.ref(this.event_loop.virtual_machine);
return this;
}
pub fn runFromThreadPool(task: *WorkPoolTask) void {
var this: *This = @fieldParentPtr("task", task);
Context.run(this.ctx);
this.onFinish();
}
pub fn runFromJS(this: *This) void {
const promise = this.promise.swap();
this.ref.unref(this.event_loop.virtual_machine);
var ctx = this.ctx;
ctx.then(promise);
}
pub fn schedule(this: *This) void {
WorkPool.schedule(&this.task);
}
pub fn onFinish(this: *This) void {
this.event_loop.enqueueTaskConcurrent(this.concurrent_task.from(this, .manual_deinit));
}
pub fn deinit(this: *This) void {
this.promise.deinit();
bun.destroy(this);
}
};
}
pub fn WorkTask(comptime Context: type) type {
return struct {
const TaskType = WorkPoolTask;
const This = @This();
ctx: *Context,
task: TaskType = .{ .callback = &runFromThreadPool },
event_loop: *JSC.EventLoop,
allocator: std.mem.Allocator,
globalThis: *JSC.JSGlobalObject,
concurrent_task: ConcurrentTask = .{},
async_task_tracker: JSC.AsyncTaskTracker,
// This is a poll because we want it to enter the uSockets loop
ref: Async.KeepAlive = .{},
pub fn createOnJSThread(allocator: std.mem.Allocator, globalThis: *JSC.JSGlobalObject, value: *Context) !*This {
var vm = globalThis.bunVM();
var this = bun.new(This, .{
.event_loop = vm.eventLoop(),
.ctx = value,
.allocator = allocator,
.globalThis = globalThis,
.async_task_tracker = JSC.AsyncTaskTracker.init(vm),
});
this.ref.ref(this.event_loop.virtual_machine);
return this;
}
pub fn deinit(this: *This) void {
this.ref.unref(this.event_loop.virtual_machine);
bun.destroy(this);
}
pub fn runFromThreadPool(task: *TaskType) void {
JSC.markBinding(@src());
const this: *This = @fieldParentPtr("task", task);
Context.run(this.ctx, this);
}
pub fn runFromJS(this: *This) void {
var ctx = this.ctx;
const tracker = this.async_task_tracker;
const vm = this.event_loop.virtual_machine;
const globalThis = this.globalThis;
this.ref.unref(vm);
tracker.willDispatch(globalThis);
ctx.then(globalThis);
tracker.didDispatch(globalThis);
}
pub fn schedule(this: *This) void {
const vm = this.event_loop.virtual_machine;
this.ref.ref(vm);
this.async_task_tracker.didSchedule(this.globalThis);
WorkPool.schedule(&this.task);
}
pub fn onFinish(this: *This) void {
this.event_loop.enqueueTaskConcurrent(this.concurrent_task.from(this, .manual_deinit));
}
};
}
pub const ConcurrentPromiseTask = @import("./ConcurrentPromiseTask.zig").ConcurrentPromiseTask;
pub const WorkTask = @import("./WorkTask.zig").WorkTask;
pub const AnyTask = struct {
ctx: ?*anyopaque,
@@ -589,156 +466,6 @@ pub const ConcurrentTask = struct {
}
};
// This type must be unique per JavaScript thread
pub const GarbageCollectionController = struct {
gc_timer: *uws.Timer = undefined,
gc_last_heap_size: usize = 0,
gc_last_heap_size_on_repeating_timer: usize = 0,
heap_size_didnt_change_for_repeating_timer_ticks_count: u8 = 0,
gc_timer_state: GCTimerState = GCTimerState.pending,
gc_repeating_timer: *uws.Timer = undefined,
gc_timer_interval: i32 = 0,
gc_repeating_timer_fast: bool = true,
disabled: bool = false,
pub fn init(this: *GarbageCollectionController, vm: *VirtualMachine) void {
const actual = uws.Loop.get();
this.gc_timer = uws.Timer.createFallthrough(actual, this);
this.gc_repeating_timer = uws.Timer.createFallthrough(actual, this);
actual.internal_loop_data.jsc_vm = vm.jsc;
if (comptime Environment.isDebug) {
if (bun.getenvZ("BUN_TRACK_LAST_FN_NAME") != null) {
vm.eventLoop().debug.track_last_fn_name = true;
}
}
var gc_timer_interval: i32 = 1000;
if (vm.transpiler.env.get("BUN_GC_TIMER_INTERVAL")) |timer| {
if (std.fmt.parseInt(i32, timer, 10)) |parsed| {
if (parsed > 0) {
gc_timer_interval = parsed;
}
} else |_| {}
}
this.gc_timer_interval = gc_timer_interval;
this.disabled = vm.transpiler.env.has("BUN_GC_TIMER_DISABLE");
if (!this.disabled)
this.gc_repeating_timer.set(this, onGCRepeatingTimer, gc_timer_interval, gc_timer_interval);
}
pub fn scheduleGCTimer(this: *GarbageCollectionController) void {
this.gc_timer_state = .scheduled;
this.gc_timer.set(this, onGCTimer, 16, 0);
}
pub fn bunVM(this: *GarbageCollectionController) *VirtualMachine {
return @alignCast(@fieldParentPtr("gc_controller", this));
}
pub fn onGCTimer(timer: *uws.Timer) callconv(.C) void {
var this = timer.as(*GarbageCollectionController);
if (this.disabled) return;
this.gc_timer_state = .run_on_next_tick;
}
// We want to always run GC once in awhile
// But if you have a long-running instance of Bun, you don't want the
// program constantly using CPU doing GC for no reason
//
// So we have two settings for this GC timer:
//
// - Fast: GC runs every 1 second
// - Slow: GC runs every 30 seconds
//
// When the heap size is increasing, we always switch to fast mode
// When the heap size has been the same or less for 30 seconds, we switch to slow mode
pub fn updateGCRepeatTimer(this: *GarbageCollectionController, comptime setting: @Type(.enum_literal)) void {
if (setting == .fast and !this.gc_repeating_timer_fast) {
this.gc_repeating_timer_fast = true;
this.gc_repeating_timer.set(this, onGCRepeatingTimer, this.gc_timer_interval, this.gc_timer_interval);
this.heap_size_didnt_change_for_repeating_timer_ticks_count = 0;
} else if (setting == .slow and this.gc_repeating_timer_fast) {
this.gc_repeating_timer_fast = false;
this.gc_repeating_timer.set(this, onGCRepeatingTimer, 30_000, 30_000);
this.heap_size_didnt_change_for_repeating_timer_ticks_count = 0;
}
}
pub fn onGCRepeatingTimer(timer: *uws.Timer) callconv(.C) void {
var this = timer.as(*GarbageCollectionController);
const prev_heap_size = this.gc_last_heap_size_on_repeating_timer;
this.performGC();
this.gc_last_heap_size_on_repeating_timer = this.gc_last_heap_size;
if (prev_heap_size == this.gc_last_heap_size_on_repeating_timer) {
this.heap_size_didnt_change_for_repeating_timer_ticks_count +|= 1;
if (this.heap_size_didnt_change_for_repeating_timer_ticks_count >= 30) {
// make the timer interval longer
this.updateGCRepeatTimer(.slow);
}
} else {
this.heap_size_didnt_change_for_repeating_timer_ticks_count = 0;
this.updateGCRepeatTimer(.fast);
}
}
pub fn processGCTimer(this: *GarbageCollectionController) void {
if (this.disabled) return;
var vm = this.bunVM().jsc;
this.processGCTimerWithHeapSize(vm, vm.blockBytesAllocated());
}
fn processGCTimerWithHeapSize(this: *GarbageCollectionController, vm: *JSC.VM, this_heap_size: usize) void {
const prev = this.gc_last_heap_size;
switch (this.gc_timer_state) {
.run_on_next_tick => {
// When memory usage is not stable, run the GC more.
if (this_heap_size != prev) {
this.scheduleGCTimer();
this.updateGCRepeatTimer(.fast);
} else {
this.gc_timer_state = .pending;
}
vm.collectAsync();
this.gc_last_heap_size = this_heap_size;
},
.pending => {
if (this_heap_size != prev) {
this.updateGCRepeatTimer(.fast);
if (this_heap_size > prev * 2) {
this.performGC();
} else {
this.scheduleGCTimer();
}
}
},
.scheduled => {
if (this_heap_size > prev * 2) {
this.updateGCRepeatTimer(.fast);
this.performGC();
}
},
}
}
pub fn performGC(this: *GarbageCollectionController) void {
if (this.disabled) return;
var vm = this.bunVM().jsc;
vm.collectAsync();
this.gc_last_heap_size = vm.blockBytesAllocated();
}
pub const GCTimerState = enum {
pending,
scheduled,
run_on_next_tick,
};
};
export fn Bun__tickWhilePaused(paused: *bool) void {
JSC.markBinding(@src());
VirtualMachine.get().eventLoop().tickWhilePaused(paused);
@@ -839,7 +566,7 @@ pub const EventLoop = struct {
entered_event_loop_count: isize = 0,
concurrent_ref: std.atomic.Value(i32) = std.atomic.Value(i32).init(0),
imminent_gc_timer: std.atomic.Value(?*JSC.BunTimer.WTFTimer) = .{ .raw = null },
is_doing_something_important: bool = false,
signal_handler: if (Environment.isPosix) ?*PosixSignalHandle else void = if (Environment.isPosix) null,
pub export fn Bun__ensureSignalHandler() void {
@@ -854,6 +581,36 @@ pub const EventLoop = struct {
}
}
pub fn important(this: *EventLoop) ImportantScope {
return .{ .previous_important = this.is_doing_something_important, .event_loop = this };
}
pub const ImportantScope = struct {
previous_important: bool = false,
event_loop: *EventLoop,
pub fn enter(this: *const ImportantScope) void {
this.event_loop.is_doing_something_important = true;
}
pub fn exit(this: *const ImportantScope) void {
this.event_loop.is_doing_something_important = this.previous_important;
}
};
fn enterActiveLoop(loop: *uws.Loop, ctx: *VirtualMachine) void {
var deadline: bun.timespec = undefined;
var event_loop_sleep_timer = if (comptime Environment.isDebug) std.time.Timer.start() catch unreachable;
const timeout = ctx.timer.getTimeout(&deadline, ctx);
loop.tickWithTimeout(if (timeout) &deadline else null);
if (comptime Environment.isDebug) {
log("tick {}, timeout: {}", .{ std.fmt.fmtDuration(event_loop_sleep_timer.read()), std.fmt.fmtDuration(deadline.ns()) });
}
}
pub const Debug = if (Environment.isDebug) struct {
is_inside_tick_queue: bool = false,
js_call_count_outside_tick_queue: usize = 0,
@@ -932,6 +689,10 @@ pub const EventLoop = struct {
if (comptime bun.Environment.isDebug) {
this.debug.drain_microtasks_count_outside_tick_queue += @as(usize, @intFromBool(!this.debug.is_inside_tick_queue));
}
if (!this.runImminentGCTimer()) {
this.performGC();
}
}
pub fn drainMicrotasks(this: *EventLoop) void {
@@ -1456,10 +1217,12 @@ pub const EventLoop = struct {
}
}
pub fn runImminentGCTimer(this: *EventLoop) void {
pub fn runImminentGCTimer(this: *EventLoop) bool {
if (this.imminent_gc_timer.swap(null, .seq_cst)) |timer| {
timer.run(this.virtual_machine);
return true;
}
return false;
}
pub fn tickConcurrentWithCount(this: *EventLoop) usize {
@@ -1471,7 +1234,7 @@ pub const EventLoop = struct {
}
}
this.runImminentGCTimer();
_ = this.runImminentGCTimer();
var concurrent = this.concurrent_tasks.popBatch();
const count = concurrent.count;
@@ -1546,18 +1309,10 @@ pub const EventLoop = struct {
}
}
this.runImminentGCTimer();
_ = this.runImminentGCTimer();
if (loop.isActive()) {
this.processGCTimer();
var event_loop_sleep_timer = if (comptime Environment.isDebug) std.time.Timer.start() catch unreachable;
// for the printer, this is defined:
var timespec: bun.timespec = if (Environment.isDebug) .{ .sec = 0, .nsec = 0 } else undefined;
loop.tickWithTimeout(if (ctx.timer.getTimeout(&timespec, ctx)) &timespec else null);
if (comptime Environment.isDebug) {
log("tick {}, timeout: {}", .{ std.fmt.fmtDuration(event_loop_sleep_timer.read()), std.fmt.fmtDuration(timespec.ns()) });
}
enterActiveLoop(loop, ctx);
} else {
loop.tickWithoutIdle();
if (comptime Environment.isDebug) {
@@ -1593,7 +1348,6 @@ pub const EventLoop = struct {
}
}
this.processGCTimer();
this.processGCTimer();
loop.tick();
@@ -1626,10 +1380,7 @@ pub const EventLoop = struct {
}
if (loop.isActive()) {
this.processGCTimer();
var timespec: bun.timespec = undefined;
loop.tickWithTimeout(if (ctx.timer.getTimeout(&timespec, ctx)) &timespec else null);
enterActiveLoop(loop, ctx);
} else {
loop.tickWithoutIdle();
}
@@ -1642,7 +1393,7 @@ pub const EventLoop = struct {
}
pub fn processGCTimer(this: *EventLoop) void {
this.virtual_machine.gc_controller.processGCTimer();
this.virtual_machine.jsc.reportAbandonedObjectGraph();
}
pub fn tick(this: *EventLoop) void {
@@ -1743,18 +1494,12 @@ pub const EventLoop = struct {
} else {
this.virtual_machine.event_loop_handle = bun.Async.Loop.get();
}
this.virtual_machine.gc_controller.init(this.virtual_machine);
// _ = actual.addPostHandler(*JSC.EventLoop, this, JSC.EventLoop.afterUSocketsTick);
// _ = actual.addPreHandler(*JSC.VM, this.virtual_machine.jsc, JSC.VM.drainMicrotasks);
}
bun.uws.Loop.get().internal_loop_data.setParentEventLoop(bun.JSC.EventLoopHandle.init(this));
}
/// Asynchronously run the garbage collector and track how much memory is now allocated
pub fn performGC(this: *EventLoop) void {
this.virtual_machine.gc_controller.performGC();
}
pub fn performGC(_: *EventLoop) void {}
pub fn wakeup(this: *EventLoop) void {
if (comptime Environment.isWindows) {

View File

@@ -887,7 +887,6 @@ pub const VirtualMachine = struct {
module_loader: ModuleLoader = .{},
gc_controller: JSC.GarbageCollectionController = .{},
worker: ?*JSC.WebWorker = null,
ipc: ?IPCInstanceUnion = null,
@@ -1368,7 +1367,7 @@ pub const VirtualMachine = struct {
};
}
pub inline fn eventLoop(this: *VirtualMachine) *EventLoop {
pub inline fn eventLoop(this: *const VirtualMachine) *EventLoop {
return this.event_loop;
}