mirror of
https://github.com/oven-sh/bun
synced 2026-02-03 07:28:53 +00:00
Compare commits
27 Commits
dylan/pyth
...
jarred/gc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0f64e56efd | ||
|
|
3eea78a8e0 | ||
|
|
c63e8215bd | ||
|
|
f5f1aae80d | ||
|
|
b139b351c9 | ||
|
|
8d9583dbec | ||
|
|
acdbbfcd9a | ||
|
|
7a28e013d3 | ||
|
|
0308c83713 | ||
|
|
87be346736 | ||
|
|
e81d01bb04 | ||
|
|
798df5e3fd | ||
|
|
8fd538cd8a | ||
|
|
5a8783e818 | ||
|
|
7064339760 | ||
|
|
0e2bac82c5 | ||
|
|
3fe48f6535 | ||
|
|
3183401956 | ||
|
|
c3765a4239 | ||
|
|
c6f7000df9 | ||
|
|
75ca134707 | ||
|
|
599986cd6b | ||
|
|
794b4e6fcb | ||
|
|
50199657d4 | ||
|
|
98467da4ae | ||
|
|
bb8022860c | ||
|
|
b016940508 |
@@ -7,4 +7,5 @@ src/react-refresh.js
|
||||
*.min.js
|
||||
test/snippets
|
||||
test/js/node/test
|
||||
test/napi/node-napi-tests
|
||||
bun.lock
|
||||
|
||||
@@ -733,6 +733,7 @@ struct us_socket_t *us_socket_context_connect_unix(int ssl, struct us_socket_con
|
||||
connect_socket->long_timeout = 255;
|
||||
connect_socket->low_prio_state = 0;
|
||||
connect_socket->connect_state = NULL;
|
||||
connect_socket->connect_next = NULL;
|
||||
connect_socket->allow_half_open = (options & LIBUS_SOCKET_ALLOW_HALF_OPEN);
|
||||
us_internal_socket_context_link_socket(context, connect_socket);
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
#include "internal/internal.h"
|
||||
#include <stdlib.h>
|
||||
#include <time.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
#if defined(LIBUS_USE_EPOLL) || defined(LIBUS_USE_KQUEUE)
|
||||
|
||||
@@ -248,7 +249,8 @@ void us_loop_run(struct us_loop_t *loop) {
|
||||
}
|
||||
|
||||
extern void Bun__JSC_onBeforeWait(void*);
|
||||
extern void Bun__JSC_onAfterWait(void*);
|
||||
extern void Bun__JSC_onAfterWait(void*, bool hasMoreEventLoopWorkToDo);
|
||||
extern void Bun__JSC_onDidRunCallbacks(void*);
|
||||
|
||||
void us_loop_run_bun_tick(struct us_loop_t *loop, const struct timespec* timeout) {
|
||||
if (loop->num_polls == 0)
|
||||
@@ -265,8 +267,10 @@ void us_loop_run_bun_tick(struct us_loop_t *loop, const struct timespec* timeout
|
||||
/* Emit pre callback */
|
||||
us_internal_loop_pre(loop);
|
||||
|
||||
/* Safe if jsc_vm is NULL */
|
||||
Bun__JSC_onBeforeWait(loop->data.jsc_vm);
|
||||
void* jsc_vm = loop->data.jsc_vm;
|
||||
if (jsc_vm) {
|
||||
Bun__JSC_onBeforeWait(jsc_vm);
|
||||
}
|
||||
|
||||
/* Fetch ready polls */
|
||||
#ifdef LIBUS_USE_EPOLL
|
||||
@@ -278,7 +282,9 @@ void us_loop_run_bun_tick(struct us_loop_t *loop, const struct timespec* timeout
|
||||
} while (IS_EINTR(loop->num_ready_polls));
|
||||
#endif
|
||||
|
||||
Bun__JSC_onAfterWait(loop->data.jsc_vm);
|
||||
if (jsc_vm) {
|
||||
Bun__JSC_onAfterWait(jsc_vm, loop->num_ready_polls > 0);
|
||||
}
|
||||
|
||||
/* Iterate ready polls, dispatching them by type */
|
||||
for (loop->current_ready_poll = 0; loop->current_ready_poll < loop->num_ready_polls; loop->current_ready_poll++) {
|
||||
@@ -318,6 +324,10 @@ void us_loop_run_bun_tick(struct us_loop_t *loop, const struct timespec* timeout
|
||||
}
|
||||
}
|
||||
|
||||
if (jsc_vm) {
|
||||
Bun__JSC_onDidRunCallbacks(jsc_vm);
|
||||
}
|
||||
|
||||
/* Emit post callback */
|
||||
us_internal_loop_post(loop);
|
||||
}
|
||||
|
||||
@@ -312,7 +312,7 @@ void us_internal_dispatch_ready_poll(struct us_poll_t *p, int error, int eof, in
|
||||
s->long_timeout = 255;
|
||||
s->low_prio_state = 0;
|
||||
s->allow_half_open = listen_socket->s.allow_half_open;
|
||||
|
||||
s->connect_next = 0;
|
||||
|
||||
/* We always use nodelay */
|
||||
bsd_socket_nodelay(client_fd, 1);
|
||||
|
||||
@@ -317,18 +317,17 @@ struct us_socket_t *us_socket_from_fd(struct us_socket_context_t *ctx, int socke
|
||||
|
||||
struct us_socket_t *s = (struct us_socket_t *) p1;
|
||||
s->context = ctx;
|
||||
s->timeout = 0;
|
||||
s->long_timeout = 0;
|
||||
s->timeout = 255;
|
||||
s->long_timeout = 255;
|
||||
s->low_prio_state = 0;
|
||||
s->allow_half_open = 0;
|
||||
s->connect_next = 0;
|
||||
s->connect_state = 0;
|
||||
|
||||
/* We always use nodelay */
|
||||
bsd_socket_nodelay(fd, 1);
|
||||
|
||||
int flags = fcntl(fd, F_GETFL, 0);
|
||||
if (flags != -1) {
|
||||
flags |= O_NONBLOCK;
|
||||
fcntl(fd, F_SETFL, flags);
|
||||
}
|
||||
apple_no_sigpipe(fd);
|
||||
bsd_set_nonblocking(fd);
|
||||
|
||||
us_internal_socket_context_link_socket(ctx, s);
|
||||
|
||||
|
||||
@@ -189,7 +189,7 @@ extern "C" GlobalObject* BakeCreateProdGlobal(void* console)
|
||||
vm.heap.acquireAccess();
|
||||
JSC::JSLockHolder locker(vm);
|
||||
BunVirtualMachine* bunVM = Bun__getVM();
|
||||
WebCore::JSVMClientData::create(&vm, bunVM);
|
||||
WebCore::JSVMClientData::create(vm, bunVM, JSC::HeapType::Large);
|
||||
|
||||
JSC::Structure* structure = Bake::GlobalObject::createStructure(vm);
|
||||
Bake::GlobalObject* global = Bake::GlobalObject::create(
|
||||
|
||||
41
src/bun.js/GCController.zig
Normal file
41
src/bun.js/GCController.zig
Normal file
@@ -0,0 +1,41 @@
|
||||
const std = @import("std");
|
||||
const bun = @import("root").bun;
|
||||
const JSC = bun.JSC;
|
||||
const VM = JSC.VM;
|
||||
|
||||
pub export fn Bun__isBusyDoingImportantWork(vm: *JSC.VirtualMachine) bool {
|
||||
const loop = vm.eventLoop();
|
||||
return loop.is_doing_something_important or
|
||||
loop.tasks.count > 0 or
|
||||
loop.immediate_tasks.count > 0 or loop.next_immediate_tasks.count > 0 or
|
||||
loop.concurrent_tasks.peek() > 0;
|
||||
}
|
||||
|
||||
// Wrapper for the Bun::GCController C++ class
|
||||
pub const GCController = opaque {
|
||||
pub export fn Bun__GCController__setup(ptr: *GCController) void {
|
||||
const vm = JSC.VirtualMachine.get();
|
||||
vm.gc_controller = ptr;
|
||||
}
|
||||
|
||||
pub fn performGC(this: *GCController) void {
|
||||
this.performOpportunisticGC();
|
||||
}
|
||||
|
||||
extern "c" fn Bun__GCController__initialize(controller: *GCController) void;
|
||||
extern "c" fn Bun__GCController__get(vm: *VM) *GCController;
|
||||
extern "c" fn Bun__GCController__performOpportunisticGC(controller: *GCController) void;
|
||||
extern "c" fn Bun__GCController__getMetrics(controller: *GCController, incrementalSweepCount: ?*usize, edenGCCount: ?*usize, fullGCCount: ?*usize, totalSweepTimeMs: ?*f64, maxSweepTimeMs: ?*f64) void;
|
||||
|
||||
fn get(vm: *VM) *GCController {
|
||||
return Bun__GCController__get(vm);
|
||||
}
|
||||
|
||||
pub fn performOpportunisticGC(this: *GCController) void {
|
||||
Bun__GCController__performOpportunisticGC(this);
|
||||
}
|
||||
|
||||
pub fn getMetrics(this: *GCController, incrementalSweepCount: ?*usize, edenGCCount: ?*usize, fullGCCount: ?*usize, totalSweepTimeMs: ?*f64, maxSweepTimeMs: ?*f64) void {
|
||||
Bun__GCController__getMetrics(this, incrementalSweepCount, edenGCCount, fullGCCount, totalSweepTimeMs, maxSweepTimeMs);
|
||||
}
|
||||
};
|
||||
@@ -26,6 +26,38 @@ pub const TimeoutMap = std.AutoArrayHashMapUnmanaged(
|
||||
|
||||
const TimerHeap = heap.Intrusive(EventLoopTimer, void, EventLoopTimer.less);
|
||||
|
||||
pub const TimerRef = struct {
|
||||
state: enum {
|
||||
unset,
|
||||
ref,
|
||||
unref,
|
||||
} = .unset,
|
||||
|
||||
fn setRef(this: *TimerRef, enable: bool, vm: *JSC.VirtualMachine) void {
|
||||
if (enable and this.state == .ref) {
|
||||
return;
|
||||
} else if (!enable and this.state != .ref) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (enable) {
|
||||
this.state = .ref;
|
||||
vm.timer.incrementTimerRef(1);
|
||||
} else {
|
||||
this.state = .unref;
|
||||
vm.timer.incrementTimerRef(-1);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn unref(this: *TimerRef, vm: *JSC.VirtualMachine) void {
|
||||
setRef(this, false, vm);
|
||||
}
|
||||
|
||||
pub fn ref(this: *TimerRef, vm: *JSC.VirtualMachine) void {
|
||||
setRef(this, true, vm);
|
||||
}
|
||||
};
|
||||
|
||||
pub const All = struct {
|
||||
last_id: i32 = 1,
|
||||
lock: bun.Mutex = .{},
|
||||
@@ -173,9 +205,6 @@ pub const All = struct {
|
||||
}
|
||||
|
||||
pub fn getTimeout(this: *All, spec: *timespec, vm: *VirtualMachine) bool {
|
||||
if (this.active_timer_count == 0) {
|
||||
return false;
|
||||
}
|
||||
if (vm.event_loop.immediate_tasks.count > 0 or vm.event_loop.next_immediate_tasks.count > 0) {
|
||||
spec.* = .{ .nsec = 0, .sec = 0 };
|
||||
return true;
|
||||
|
||||
@@ -1774,6 +1774,7 @@ pub const DNSResolver = struct {
|
||||
vm: *JSC.VirtualMachine,
|
||||
polls: PollsMap,
|
||||
options: c_ares.ChannelOptions = .{},
|
||||
timer_ref: JSC.BunTimer.TimerRef = .{},
|
||||
|
||||
ref_count: u32 = 1,
|
||||
event_loop_timer: EventLoopTimer = .{
|
||||
@@ -1889,8 +1890,8 @@ pub const DNSResolver = struct {
|
||||
const NameInfoPendingCache = bun.HiveArray(GetNameInfoRequest.PendingCacheKey, 32);
|
||||
|
||||
pub fn checkTimeouts(this: *DNSResolver, now: *const timespec, vm: *JSC.VirtualMachine) EventLoopTimer.Arm {
|
||||
this.timer_ref.unref(vm);
|
||||
defer {
|
||||
vm.timer.incrementTimerRef(-1);
|
||||
this.deref();
|
||||
}
|
||||
|
||||
@@ -1933,25 +1934,27 @@ pub const DNSResolver = struct {
|
||||
}
|
||||
|
||||
fn addTimer(this: *DNSResolver, now: ?*const timespec) bool {
|
||||
this.timer_ref.ref(this.vm);
|
||||
|
||||
if (this.event_loop_timer.state == .ACTIVE) {
|
||||
return false;
|
||||
}
|
||||
|
||||
this.ref();
|
||||
this.event_loop_timer.next = (now orelse ×pec.now()).addMs(1000);
|
||||
this.vm.timer.incrementTimerRef(1);
|
||||
this.vm.timer.insert(&this.event_loop_timer);
|
||||
return true;
|
||||
}
|
||||
|
||||
fn removeTimer(this: *DNSResolver) void {
|
||||
this.timer_ref.unref(this.vm);
|
||||
|
||||
if (this.event_loop_timer.state != .ACTIVE) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Normally checkTimeouts does this, so we have to be sure to do it ourself if we cancel the timer
|
||||
defer {
|
||||
this.vm.timer.incrementTimerRef(-1);
|
||||
this.deref();
|
||||
}
|
||||
|
||||
|
||||
@@ -2377,7 +2377,7 @@ pub fn spawnMaybeSync(
|
||||
subprocess.stdout.pipe.watch();
|
||||
}
|
||||
|
||||
jsc_vm.tick();
|
||||
jsc_vm.eventLoop().tick();
|
||||
jsc_vm.eventLoop().autoTick();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7556,7 +7556,11 @@ pub fn NewServer(comptime NamespaceType: type, comptime ssl_enabled_: bool, comp
|
||||
|
||||
pub fn onUserRouteRequest(user_route: *UserRoute, req: *uws.Request, resp: *App.Response) void {
|
||||
const server = user_route.server;
|
||||
const vm = server.vm;
|
||||
const index = user_route.id;
|
||||
const important = vm.eventLoop().important();
|
||||
important.enter();
|
||||
defer important.exit();
|
||||
|
||||
var should_deinit_context = false;
|
||||
var prepared = server.prepareJsRequestContext(req, resp, &should_deinit_context, false) orelse return;
|
||||
@@ -7603,7 +7607,9 @@ pub fn NewServer(comptime NamespaceType: type, comptime ssl_enabled_: bool, comp
|
||||
pub fn onRequest(this: *ThisServer, req: *uws.Request, resp: *App.Response) void {
|
||||
var should_deinit_context = false;
|
||||
const prepared = this.prepareJsRequestContext(req, resp, &should_deinit_context, true) orelse return;
|
||||
|
||||
const important = this.vm.eventLoop().important();
|
||||
important.enter();
|
||||
defer important.exit();
|
||||
bun.assert(this.config.onRequest != .zero);
|
||||
|
||||
const js_value = this.jsValueAssertAlive();
|
||||
@@ -7804,6 +7810,10 @@ pub fn NewServer(comptime NamespaceType: type, comptime ssl_enabled_: bool, comp
|
||||
fn upgradeWebSocketUserRoute(this: *UserRoute, resp: *App.Response, req: *uws.Request, upgrade_ctx: *uws.uws_socket_context_t) void {
|
||||
const server = this.server;
|
||||
const index = this.id;
|
||||
const vm = server.vm;
|
||||
const important = vm.eventLoop().important();
|
||||
important.enter();
|
||||
defer important.exit();
|
||||
|
||||
var should_deinit_context = false;
|
||||
var prepared = server.prepareJsRequestContext(req, resp, &should_deinit_context, false) orelse return;
|
||||
|
||||
@@ -44,7 +44,7 @@ JSHeapData::JSHeapData(Heap& heap)
|
||||
|
||||
#define CLIENT_ISO_SUBSPACE_INIT(subspace) subspace(m_heapData->subspace)
|
||||
|
||||
JSVMClientData::JSVMClientData(VM& vm, RefPtr<SourceProvider> sourceProvider)
|
||||
JSVMClientData::JSVMClientData(VM& vm, void* bunVM, RefPtr<SourceProvider> sourceProvider, JSC::HeapType heapType)
|
||||
: m_builtinNames(vm)
|
||||
, m_builtinFunctions(vm, sourceProvider, m_builtinNames)
|
||||
, m_heapData(JSHeapData::ensureHeapData(vm.heap))
|
||||
@@ -52,6 +52,8 @@ JSVMClientData::JSVMClientData(VM& vm, RefPtr<SourceProvider> sourceProvider)
|
||||
, CLIENT_ISO_SUBSPACE_INIT(m_domConstructorSpace)
|
||||
, CLIENT_ISO_SUBSPACE_INIT(m_domNamespaceObjectSpace)
|
||||
, m_clientSubspaces(makeUnique<ExtendedDOMClientIsoSubspaces>())
|
||||
, m_gcController(vm, bunVM, heapType)
|
||||
, bunVM(bunVM)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -77,26 +79,25 @@ JSVMClientData::~JSVMClientData()
|
||||
ASSERT(m_normalWorld->hasOneRef());
|
||||
m_normalWorld = nullptr;
|
||||
}
|
||||
void JSVMClientData::create(VM* vm, void* bunVM)
|
||||
void JSVMClientData::create(VM& vm, void* bunVM, JSC::HeapType heapType)
|
||||
{
|
||||
auto provider = WebCore::createBuiltinsSourceProvider();
|
||||
JSVMClientData* clientData = new JSVMClientData(*vm, provider);
|
||||
clientData->bunVM = bunVM;
|
||||
vm->deferredWorkTimer->onAddPendingWork = [clientData](Ref<JSC::DeferredWorkTimer::TicketData>&& ticket, JSC::DeferredWorkTimer::WorkType kind) -> void {
|
||||
JSVMClientData* clientData = new JSVMClientData(vm, bunVM, provider, heapType);
|
||||
vm.deferredWorkTimer->onAddPendingWork = [clientData](Ref<JSC::DeferredWorkTimer::TicketData>&& ticket, JSC::DeferredWorkTimer::WorkType kind) -> void {
|
||||
Bun::JSCTaskScheduler::onAddPendingWork(clientData, WTFMove(ticket), kind);
|
||||
};
|
||||
vm->deferredWorkTimer->onScheduleWorkSoon = [clientData](JSC::DeferredWorkTimer::Ticket ticket, JSC::DeferredWorkTimer::Task&& task) -> void {
|
||||
vm.deferredWorkTimer->onScheduleWorkSoon = [clientData](JSC::DeferredWorkTimer::Ticket ticket, JSC::DeferredWorkTimer::Task&& task) -> void {
|
||||
Bun::JSCTaskScheduler::onScheduleWorkSoon(clientData, ticket, WTFMove(task));
|
||||
};
|
||||
vm->deferredWorkTimer->onCancelPendingWork = [clientData](JSC::DeferredWorkTimer::Ticket ticket) -> void {
|
||||
vm.deferredWorkTimer->onCancelPendingWork = [clientData](JSC::DeferredWorkTimer::Ticket ticket) -> void {
|
||||
Bun::JSCTaskScheduler::onCancelPendingWork(clientData, ticket);
|
||||
};
|
||||
|
||||
vm->clientData = clientData; // ~VM deletes this pointer.
|
||||
clientData->m_normalWorld = DOMWrapperWorld::create(*vm, DOMWrapperWorld::Type::Normal);
|
||||
vm.clientData = clientData; // ~VM deletes this pointer.
|
||||
clientData->m_normalWorld = DOMWrapperWorld::create(vm, DOMWrapperWorld::Type::Normal);
|
||||
|
||||
vm->heap.addMarkingConstraint(makeUnique<WebCore::DOMGCOutputConstraint>(*vm, clientData->heapData()));
|
||||
vm->m_typedArrayController = adoptRef(new WebCoreTypedArrayController(true));
|
||||
vm.heap.addMarkingConstraint(makeUnique<WebCore::DOMGCOutputConstraint>(vm, clientData->heapData()));
|
||||
vm.m_typedArrayController = adoptRef(new WebCoreTypedArrayController(true));
|
||||
clientData->builtinFunctions().exportNames();
|
||||
}
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ class DOMWrapperWorld;
|
||||
#include <wtf/StdLibExtras.h>
|
||||
#include "WebCoreJSBuiltins.h"
|
||||
#include "JSCTaskScheduler.h"
|
||||
|
||||
#include "BunGCController.h"
|
||||
namespace Zig {
|
||||
}
|
||||
|
||||
@@ -79,11 +79,9 @@ class JSVMClientData : public JSC::VM::ClientData {
|
||||
WTF_MAKE_FAST_ALLOCATED_WITH_HEAP_IDENTIFIER(JSVMClientData);
|
||||
|
||||
public:
|
||||
explicit JSVMClientData(JSC::VM&, RefPtr<JSC::SourceProvider>);
|
||||
|
||||
virtual ~JSVMClientData();
|
||||
|
||||
static void create(JSC::VM*, void*);
|
||||
static void create(JSC::VM& vm, void* bunVM, JSC::HeapType heapType);
|
||||
|
||||
JSHeapData& heapData() { return *m_heapData; }
|
||||
BunBuiltinNames& builtinNames() { return m_builtinNames; }
|
||||
@@ -113,7 +111,11 @@ public:
|
||||
void* bunVM;
|
||||
Bun::JSCTaskScheduler deferredWorkTimer;
|
||||
|
||||
Bun::GCController& gcController() { return m_gcController; }
|
||||
|
||||
private:
|
||||
explicit JSVMClientData(JSC::VM&, void* bunVM, RefPtr<JSC::SourceProvider>, JSC::HeapType heapType);
|
||||
|
||||
bool isWebCoreJSClientData() const final { return true; }
|
||||
|
||||
BunBuiltinNames m_builtinNames;
|
||||
@@ -128,6 +130,8 @@ private:
|
||||
|
||||
std::unique_ptr<ExtendedDOMClientIsoSubspaces> m_clientSubspaces;
|
||||
Vector<JSC::IsoSubspace*> m_outputConstraintSpaces;
|
||||
|
||||
Bun::GCController m_gcController;
|
||||
};
|
||||
|
||||
} // namespace WebCore
|
||||
|
||||
397
src/bun.js/bindings/BunGCController.cpp
Normal file
397
src/bun.js/bindings/BunGCController.cpp
Normal file
@@ -0,0 +1,397 @@
|
||||
#include "root.h"
|
||||
|
||||
#include "BunGCController.h"
|
||||
#include <JavaScriptCore/VM.h>
|
||||
#include <JavaScriptCore/Heap.h>
|
||||
#include <JavaScriptCore/IncrementalSweeper.h>
|
||||
#include <wtf/SystemTracing.h>
|
||||
#include <JavaScriptCore/GCActivityCallback.h>
|
||||
#include <JavaScriptCore/FullGCActivityCallback.h>
|
||||
#include <JavaScriptCore/EdenGCActivityCallback.h>
|
||||
#include <JavaScriptCore/JSRunLoopTimer.h>
|
||||
#include "BunClientData.h"
|
||||
#include <JavaScriptCore/ObjectConstructor.h>
|
||||
#include "mimalloc.h"
|
||||
#include "BunProcess.h"
|
||||
|
||||
namespace Bun {
|
||||
extern "C" bool Bun__isBusyDoingImportantWork(void* bunVM);
|
||||
|
||||
static size_t ramSize()
|
||||
{
|
||||
return JSC::Options::forceRAMSize() || WTF::ramSize();
|
||||
}
|
||||
|
||||
// Based on WebKit's WebCore::OpportunisticTaskScheduler::FullGCActivityCallback
|
||||
class FullGCActivityCallback final : public JSC::FullGCActivityCallback {
|
||||
public:
|
||||
using Base = JSC::FullGCActivityCallback;
|
||||
|
||||
static Ref<FullGCActivityCallback> create(JSC::Heap& heap, void* bunVM)
|
||||
{
|
||||
return adoptRef(*new FullGCActivityCallback(heap, bunVM));
|
||||
}
|
||||
|
||||
void doCollection(JSC::VM&) final;
|
||||
void doCollectionEvenIfBusy(JSC::VM&);
|
||||
bool isDeferred() const { return m_deferCount > 0; }
|
||||
bool scheduleCollection(JSC::VM&);
|
||||
bool scheduleCollectionToReclaimMemoryOnIdle(JSC::VM&);
|
||||
JSC::HeapVersion m_version { 0 };
|
||||
|
||||
private:
|
||||
FullGCActivityCallback(JSC::Heap&, void* bunVM);
|
||||
|
||||
void* m_bunVM = nullptr;
|
||||
JSC::VM& m_vm;
|
||||
bool m_isIdleCollection { false };
|
||||
|
||||
unsigned m_deferCount { 0 };
|
||||
};
|
||||
|
||||
// Based on WebKit's WebCore::OpportunisticTaskScheduler::EdenGCActivityCallback
|
||||
class EdenGCActivityCallback final : public JSC::EdenGCActivityCallback {
|
||||
public:
|
||||
using Base = JSC::EdenGCActivityCallback;
|
||||
|
||||
static Ref<EdenGCActivityCallback> create(JSC::Heap& heap, void* bunVM)
|
||||
{
|
||||
return adoptRef(*new EdenGCActivityCallback(heap, bunVM));
|
||||
}
|
||||
|
||||
void doCollection(JSC::VM&) final;
|
||||
void doCollectionIfNeeded(JSC::VM&);
|
||||
void doCollectionEvenIfBusy(JSC::VM&);
|
||||
|
||||
bool isDeferred() const { return m_deferCount > 0; }
|
||||
JSC::HeapVersion m_version { 0 };
|
||||
bool scheduleCollection(JSC::VM&, bool soon);
|
||||
|
||||
private:
|
||||
EdenGCActivityCallback(JSC::Heap&, void* bunVM);
|
||||
|
||||
JSC::VM& m_vm;
|
||||
void* m_bunVM = nullptr;
|
||||
|
||||
unsigned m_deferCount { 0 };
|
||||
};
|
||||
|
||||
FullGCActivityCallback::FullGCActivityCallback(JSC::Heap& heap, void* bunVM)
|
||||
: Base(heap, JSC::Synchronousness::Async)
|
||||
, m_vm(heap.vm())
|
||||
, m_bunVM(bunVM)
|
||||
{
|
||||
}
|
||||
|
||||
// Timer-based GC callback
|
||||
void FullGCActivityCallback::doCollection(JSC::VM& vm)
|
||||
{
|
||||
|
||||
doCollectionEvenIfBusy(vm);
|
||||
}
|
||||
|
||||
void FullGCActivityCallback::doCollectionEvenIfBusy(JSC::VM& vm)
|
||||
{
|
||||
m_version = 0;
|
||||
m_deferCount = 0;
|
||||
bool releaseCriticalMemory = false;
|
||||
if (m_isIdleCollection) {
|
||||
size_t rss = 0;
|
||||
|
||||
m_isIdleCollection = false;
|
||||
|
||||
if (vm.heap.blockBytesAllocated() > 1024 * 1024 * 512) {
|
||||
// getRSS is kind of expensive so we only check this if we're using a lot of memory
|
||||
if (getRSS(&rss)) {
|
||||
|
||||
// If we're using more than 70% of the RAM, attempt to free up as much memory as possible
|
||||
if (rss > (ramSize() * 7 / 10)) {
|
||||
releaseCriticalMemory = true;
|
||||
vm.deleteAllCode(JSC::DeleteAllCodeEffort::DeleteAllCodeIfNotCollecting);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Base::doCollection(vm);
|
||||
|
||||
if (releaseCriticalMemory) {
|
||||
// After GC, we release memory to try to reclaim as much memory as possible
|
||||
WTF::releaseFastMallocFreeMemory();
|
||||
mi_collect(false);
|
||||
}
|
||||
}
|
||||
|
||||
EdenGCActivityCallback::EdenGCActivityCallback(JSC::Heap& heap, void* bunVM)
|
||||
: Base(heap, JSC::Synchronousness::Async)
|
||||
, m_vm(heap.vm())
|
||||
, m_bunVM(bunVM)
|
||||
{
|
||||
}
|
||||
|
||||
bool EdenGCActivityCallback::scheduleCollection(JSC::VM& vm, bool soon)
|
||||
{
|
||||
constexpr WTF::Seconds normalDelay { 60_ms };
|
||||
constexpr WTF::Seconds aggressiveDelay { 16_ms };
|
||||
constexpr unsigned deferCountThreshold = 4;
|
||||
|
||||
// Check if we should be more aggressive based on soon parameter
|
||||
bool underHighMemoryPressure = soon;
|
||||
|
||||
if (!m_version || m_version != vm.heap.objectSpace().edenVersion()) {
|
||||
m_version = vm.heap.objectSpace().edenVersion();
|
||||
m_deferCount = 0;
|
||||
m_delay = underHighMemoryPressure ? aggressiveDelay : normalDelay;
|
||||
setTimeUntilFire(m_delay);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (++m_deferCount < (underHighMemoryPressure ? deferCountThreshold / 2 : deferCountThreshold)) {
|
||||
m_delay = underHighMemoryPressure ? aggressiveDelay : normalDelay;
|
||||
setTimeUntilFire(m_delay);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool FullGCActivityCallback::scheduleCollectionToReclaimMemoryOnIdle(JSC::VM& vm)
|
||||
{
|
||||
constexpr WTF::Seconds delay { 3000_ms };
|
||||
constexpr unsigned deferCountThreshold = 10;
|
||||
if (!m_version || m_version != vm.heap.objectSpace().markingVersion()) {
|
||||
m_version = vm.heap.objectSpace().markingVersion();
|
||||
m_deferCount = 0;
|
||||
m_delay = delay;
|
||||
setTimeUntilFire(delay);
|
||||
m_isIdleCollection = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (++m_deferCount < deferCountThreshold) {
|
||||
m_delay = delay;
|
||||
setTimeUntilFire(delay);
|
||||
m_isIdleCollection = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool FullGCActivityCallback::scheduleCollection(JSC::VM& vm)
|
||||
{
|
||||
// Servers can tolerate slightly larger pauses for better overall throughput
|
||||
constexpr WTF::Seconds delay { 300_ms };
|
||||
constexpr unsigned deferCountThreshold = 3;
|
||||
|
||||
// Detect idle periods based on event loop activity (if possible)
|
||||
bool inIdlePeriod = !WebCore::clientData(vm)->gcController().hasMoreEventLoopWorkToDo();
|
||||
|
||||
if (!m_version || m_version != vm.heap.objectSpace().markingVersion()) {
|
||||
m_version = vm.heap.objectSpace().markingVersion();
|
||||
m_deferCount = 0;
|
||||
m_delay = delay;
|
||||
m_isIdleCollection = false;
|
||||
setTimeUntilFire(inIdlePeriod ? delay / 2 : delay); // Run sooner during idle periods
|
||||
return true;
|
||||
}
|
||||
|
||||
if (++m_deferCount < deferCountThreshold) {
|
||||
m_delay = delay;
|
||||
m_isIdleCollection = false;
|
||||
setTimeUntilFire(inIdlePeriod ? delay / 2 : delay);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// Timer-based GC callback
|
||||
void EdenGCActivityCallback::doCollection(JSC::VM& vm)
|
||||
{
|
||||
doCollectionEvenIfBusy(vm);
|
||||
}
|
||||
|
||||
void EdenGCActivityCallback::doCollectionEvenIfBusy(JSC::VM& vm)
|
||||
{
|
||||
|
||||
m_version = 0;
|
||||
m_deferCount = 0;
|
||||
Base::doCollection(vm);
|
||||
}
|
||||
|
||||
extern "C" void Bun__GCController__setup(Bun::GCController* controller);
|
||||
|
||||
GCController::GCController(JSC::VM& vm, void* bunVM, JSC::HeapType heapType)
|
||||
: m_vm(vm)
|
||||
, bunVM(bunVM)
|
||||
, m_edenCallback(EdenGCActivityCallback::create(vm.heap, bunVM))
|
||||
, m_fullCallback(FullGCActivityCallback::create(vm.heap, bunVM))
|
||||
{
|
||||
// Set them as active callbacks in the heap
|
||||
m_vm.heap.setEdenActivityCallback(Ref(m_edenCallback));
|
||||
m_vm.heap.setFullActivityCallback(Ref(m_fullCallback));
|
||||
|
||||
{
|
||||
const char* disable_stop_if_necessary_timer = getenv("BUN_DISABLE_STOP_IF_NECESSARY_TIMER");
|
||||
// Keep stopIfNecessaryTimer enabled by default when either:
|
||||
// - `--smol` is passed
|
||||
// - The machine has less than 4GB of RAM
|
||||
bool shouldDisableStopIfNecessaryTimer = heapType == JSC::HeapType::Large;
|
||||
if (ramSize() < 1024ull * 1024ull * 1024ull * 4ull) {
|
||||
shouldDisableStopIfNecessaryTimer = false;
|
||||
}
|
||||
|
||||
if (disable_stop_if_necessary_timer) {
|
||||
const char value = disable_stop_if_necessary_timer[0];
|
||||
if (value == '0') {
|
||||
shouldDisableStopIfNecessaryTimer = false;
|
||||
} else if (value == '1') {
|
||||
shouldDisableStopIfNecessaryTimer = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (shouldDisableStopIfNecessaryTimer) {
|
||||
m_vm.heap.disableStopIfNecessaryTimer();
|
||||
}
|
||||
}
|
||||
|
||||
Bun__GCController__setup(this);
|
||||
}
|
||||
|
||||
GCController::~GCController()
|
||||
{
|
||||
}
|
||||
|
||||
void GCController::performOpportunisticGC()
|
||||
{
|
||||
// runs after an HTTP request has completed
|
||||
// note: there may be other in-flight requests
|
||||
|
||||
// Check if under memory pressure - be more aggressive if needed
|
||||
bool underPressure = checkMemoryPressure();
|
||||
size_t previousBlockBytesAllocated = m_lastBlockBytesAllocated;
|
||||
size_t blockBytesAllocated = m_vm.heap.blockBytesAllocated();
|
||||
m_lastBlockBytesAllocated = blockBytesAllocated;
|
||||
|
||||
if (blockBytesAllocated > previousBlockBytesAllocated || underPressure) {
|
||||
m_hasStayedTheSameFor = 0;
|
||||
|
||||
if (!Bun__isBusyDoingImportantWork(bunVM)) {
|
||||
// Always schedule an Eden GC if memory is growing
|
||||
m_edenCallback->scheduleCollection(m_vm, true);
|
||||
}
|
||||
|
||||
// Only schedule full GC if under pressure or memory growing significantly
|
||||
if (underPressure && !m_fullCallback->isScheduled()) {
|
||||
m_fullCallback->scheduleCollection(m_vm);
|
||||
}
|
||||
|
||||
} else if (m_hasStayedTheSameFor < 10) {
|
||||
// If memory usage plateaus, still do Eden collections
|
||||
if (!hasMoreEventLoopWorkToDo() && !Bun__isBusyDoingImportantWork(bunVM)) {
|
||||
if (m_edenCallback->scheduleCollection(m_vm, false)) {
|
||||
m_hasStayedTheSameFor++;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// After long plateau, occasionally do full collection to compact memory
|
||||
if (!hasMoreEventLoopWorkToDo() && !Bun__isBusyDoingImportantWork(bunVM)) {
|
||||
m_fullCallback->scheduleCollectionToReclaimMemoryOnIdle(m_vm);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void GCController::configureEdenGC(bool enabled, unsigned intervalMs)
|
||||
{
|
||||
if (enabled) {
|
||||
m_edenCallback->setEnabled(true);
|
||||
m_edenCallback->setTimeUntilFire(WTF::Seconds::fromMilliseconds(intervalMs));
|
||||
} else {
|
||||
m_edenCallback->setEnabled(false);
|
||||
m_edenCallback->cancel();
|
||||
}
|
||||
}
|
||||
|
||||
void GCController::configureFullGC(bool enabled, unsigned intervalMs)
|
||||
{
|
||||
if (enabled) {
|
||||
m_fullCallback->setEnabled(true);
|
||||
m_fullCallback->setTimeUntilFire(WTF::Seconds::fromMilliseconds(intervalMs));
|
||||
} else {
|
||||
m_fullCallback->setEnabled(false);
|
||||
m_fullCallback->cancel();
|
||||
}
|
||||
}
|
||||
|
||||
bool GCController::hasPendingGCWork() const
|
||||
{
|
||||
return Bun__isBusyDoingImportantWork(bunVM);
|
||||
}
|
||||
|
||||
bool GCController::checkMemoryPressure() const
|
||||
{
|
||||
|
||||
// vm.heap.size() is slow. It makes Express 1/3 the requests per second.
|
||||
// We use blockBytesAllocated() instead.
|
||||
size_t currentHeapSize = m_vm.heap.blockBytesAllocated();
|
||||
|
||||
bool highMemoryUsage = currentHeapSize > (ramSize() * 7 / 10);
|
||||
|
||||
// Check allocation rate (is memory growing rapidly?)
|
||||
bool rapidMemoryGrowth = m_lastBlockBytesAllocated > 0 && (currentHeapSize > m_lastBlockBytesAllocated * 1.5);
|
||||
|
||||
// Memory is considered under pressure if either condition is true
|
||||
return highMemoryUsage || // Using more than 70% of available RAM
|
||||
(rapidMemoryGrowth && m_hasStayedTheSameFor < 5) || // Rapid memory growth
|
||||
(currentHeapSize > 1024ull * 1024ull * 1024ull); // Over 1GB allocated
|
||||
}
|
||||
|
||||
} // namespace Bun
|
||||
|
||||
extern "C" {
|
||||
|
||||
Bun::GCController* Bun__GCController__get(JSC::VM* vm)
|
||||
{
|
||||
auto* clientData = WebCore::clientData(*vm);
|
||||
auto& gcController = clientData->gcController();
|
||||
return &gcController;
|
||||
}
|
||||
|
||||
void Bun__GCController__performOpportunisticGC(Bun::GCController* controller)
|
||||
{
|
||||
controller->performOpportunisticGC();
|
||||
}
|
||||
|
||||
// TODO: expose to JS
|
||||
void Bun__GCController__getMetrics(
|
||||
Bun::GCController* controller,
|
||||
size_t* incrementalSweepCount,
|
||||
size_t* edenGCCount,
|
||||
size_t* fullGCCount,
|
||||
double* totalSweepTimeMs,
|
||||
double* maxSweepTimeMs)
|
||||
{
|
||||
if (!controller)
|
||||
return;
|
||||
|
||||
const auto& metrics = controller->metrics();
|
||||
|
||||
if (incrementalSweepCount)
|
||||
*incrementalSweepCount = metrics.incrementalSweepCount;
|
||||
if (edenGCCount)
|
||||
*edenGCCount = metrics.edenGCCount;
|
||||
if (fullGCCount)
|
||||
*fullGCCount = metrics.fullGCCount;
|
||||
if (totalSweepTimeMs)
|
||||
*totalSweepTimeMs = metrics.totalSweepTimeMs;
|
||||
if (maxSweepTimeMs)
|
||||
*maxSweepTimeMs = metrics.maxSweepTimeMs;
|
||||
}
|
||||
|
||||
JSC::JSObject* createGCStatsObject(JSC::VM& vm, JSC::JSGlobalObject* globalObject)
|
||||
{
|
||||
auto* object = JSC::constructEmptyObject(globalObject, globalObject->objectPrototype());
|
||||
return object;
|
||||
}
|
||||
}
|
||||
80
src/bun.js/bindings/BunGCController.h
Normal file
80
src/bun.js/bindings/BunGCController.h
Normal file
@@ -0,0 +1,80 @@
|
||||
#pragma once
|
||||
|
||||
#include "root.h"
|
||||
|
||||
namespace JSC {
|
||||
class VM;
|
||||
class JSObject;
|
||||
}
|
||||
|
||||
namespace WTF {
|
||||
class MonotonicTime;
|
||||
}
|
||||
|
||||
namespace Bun {
|
||||
|
||||
class EdenGCActivityCallback;
|
||||
class FullGCActivityCallback;
|
||||
|
||||
// Implemented in C++ to properly integrate with JSC's FullGCActivityCallback & EdenGCActivityCallback
|
||||
// The lifetime of this is tied to the JSVMClientData instance, which is tied to the JSC::VM instance
|
||||
class GCController {
|
||||
public:
|
||||
GCController(JSC::VM&, void* bunVM, JSC::HeapType heapType);
|
||||
~GCController();
|
||||
|
||||
// Configure the Eden GC for smaller, more frequent collections
|
||||
void configureEdenGC(bool enabled, unsigned intervalMs = 30);
|
||||
|
||||
// Configure the Full GC for larger, less frequent collections
|
||||
void configureFullGC(bool enabled, unsigned intervalMs = 300);
|
||||
|
||||
// Utility method to check for pending GC work
|
||||
bool hasPendingGCWork() const;
|
||||
|
||||
// Check if the system is under memory pressure
|
||||
bool checkMemoryPressure() const;
|
||||
|
||||
// Call this to maybe schedule a GC to run sometimes.
|
||||
void performOpportunisticGC();
|
||||
|
||||
// Metrics
|
||||
class Metrics {
|
||||
public:
|
||||
size_t incrementalSweepCount = 0;
|
||||
size_t edenGCCount = 0;
|
||||
size_t fullGCCount = 0;
|
||||
size_t blocksSwept = 0;
|
||||
double totalSweepTimeMs = 0;
|
||||
double maxSweepTimeMs = 0;
|
||||
|
||||
void reset()
|
||||
{
|
||||
incrementalSweepCount = 0;
|
||||
edenGCCount = 0;
|
||||
fullGCCount = 0;
|
||||
blocksSwept = 0;
|
||||
totalSweepTimeMs = 0;
|
||||
maxSweepTimeMs = 0;
|
||||
}
|
||||
};
|
||||
|
||||
Metrics& metrics() { return m_metrics; }
|
||||
void* bunVM = nullptr;
|
||||
|
||||
bool hasMoreEventLoopWorkToDo() const { return m_hasMoreEventLoopWorkToDo; }
|
||||
void setHasMoreEventLoopWorkToDo(bool hasMoreEventLoopWorkToDo) { m_hasMoreEventLoopWorkToDo = hasMoreEventLoopWorkToDo; }
|
||||
|
||||
private:
|
||||
JSC::VM& m_vm;
|
||||
Ref<EdenGCActivityCallback> m_edenCallback;
|
||||
Ref<FullGCActivityCallback> m_fullCallback;
|
||||
Metrics m_metrics = {};
|
||||
bool m_hasMoreEventLoopWorkToDo = false;
|
||||
size_t m_lastBlockBytesAllocated = 0;
|
||||
size_t m_hasStayedTheSameFor = 0;
|
||||
};
|
||||
|
||||
JSC::JSObject* createGCStatsObject(JSC::VM& vm);
|
||||
|
||||
} // namespace Bun
|
||||
@@ -11,23 +11,30 @@ static thread_local std::optional<JSC::JSLock::DropAllLocks> drop_all_locks { st
|
||||
|
||||
extern "C" void WTFTimer__runIfImminent(void* bun_vm);
|
||||
|
||||
// Safe if VM is nullptr
|
||||
extern "C" void Bun__JSC_onBeforeWait(JSC::VM* vm)
|
||||
extern "C" void Bun__JSC_onBeforeWait(JSC::VM* _Nonnull vm)
|
||||
{
|
||||
ASSERT(!drop_all_locks.has_value());
|
||||
if (vm) {
|
||||
bool previouslyHadAccess = vm->heap.hasHeapAccess();
|
||||
drop_all_locks.emplace(*vm);
|
||||
if (previouslyHadAccess) {
|
||||
vm->heap.releaseAccess();
|
||||
}
|
||||
|
||||
bool previouslyHadAccess = vm->heap.hasHeapAccess();
|
||||
drop_all_locks.emplace(*vm);
|
||||
if (previouslyHadAccess) {
|
||||
vm->heap.releaseAccess();
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" void Bun__JSC_onAfterWait(JSC::VM* vm)
|
||||
extern "C" void Bun__JSC_onAfterWait(JSC::VM* _Nonnull vm, bool hasMoreEventLoopWorkToDo)
|
||||
{
|
||||
if (vm) {
|
||||
vm->heap.acquireAccess();
|
||||
drop_all_locks.reset();
|
||||
vm->heap.acquireAccess();
|
||||
drop_all_locks.reset();
|
||||
|
||||
if (hasMoreEventLoopWorkToDo) {
|
||||
auto& gcController = WebCore::clientData(*vm)->gcController();
|
||||
gcController.setHasMoreEventLoopWorkToDo(true);
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" void Bun__JSC_onDidRunCallbacks(JSC::VM* _Nonnull vm)
|
||||
{
|
||||
auto& gcController = WebCore::clientData(*vm)->gcController();
|
||||
gcController.setHasMoreEventLoopWorkToDo(false);
|
||||
}
|
||||
|
||||
@@ -169,7 +169,7 @@
|
||||
#include "ProcessBindingBuffer.h"
|
||||
#include "NodeValidator.h"
|
||||
#include "ProcessBindingFs.h"
|
||||
|
||||
#include <JavaScriptCore/GCActivityCallback.h>
|
||||
#include "JSBunRequest.h"
|
||||
#include "ServerRouteList.h"
|
||||
|
||||
@@ -886,7 +886,12 @@ void Zig::GlobalObject::resetOnEachMicrotaskTick()
|
||||
extern "C" JSC__JSGlobalObject* Zig__GlobalObject__create(void* console_client, int32_t executionContextId, bool miniMode, bool evalMode, void* worker_ptr)
|
||||
{
|
||||
auto heapSize = miniMode ? JSC::HeapType::Small : JSC::HeapType::Large;
|
||||
|
||||
// We're going to create the timers ourselves.
|
||||
JSC::GCActivityCallback::s_shouldCreateGCTimer = false;
|
||||
|
||||
RefPtr<JSC::VM> vmPtr = JSC::VM::tryCreate(heapSize);
|
||||
|
||||
if (UNLIKELY(!vmPtr)) {
|
||||
BUN_PANIC("Failed to allocate JavaScriptCore Virtual Machine. Did your computer run out of memory? Or maybe you compiled Bun with a mismatching libc++ version or compiler?");
|
||||
}
|
||||
@@ -896,34 +901,10 @@ extern "C" JSC__JSGlobalObject* Zig__GlobalObject__create(void* console_client,
|
||||
vm.heap.acquireAccess();
|
||||
JSC::JSLockHolder locker(vm);
|
||||
|
||||
{
|
||||
const char* disable_stop_if_necessary_timer = getenv("BUN_DISABLE_STOP_IF_NECESSARY_TIMER");
|
||||
// Keep stopIfNecessaryTimer enabled by default when either:
|
||||
// - `--smol` is passed
|
||||
// - The machine has less than 4GB of RAM
|
||||
bool shouldDisableStopIfNecessaryTimer = !miniMode;
|
||||
if (WTF::ramSize() < 1024ull * 1024ull * 1024ull * 4ull) {
|
||||
shouldDisableStopIfNecessaryTimer = false;
|
||||
}
|
||||
|
||||
if (disable_stop_if_necessary_timer) {
|
||||
const char value = disable_stop_if_necessary_timer[0];
|
||||
if (value == '0') {
|
||||
shouldDisableStopIfNecessaryTimer = false;
|
||||
} else if (value == '1') {
|
||||
shouldDisableStopIfNecessaryTimer = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (shouldDisableStopIfNecessaryTimer) {
|
||||
vm.heap.disableStopIfNecessaryTimer();
|
||||
}
|
||||
}
|
||||
|
||||
// Every JS VM's RunLoop should use Bun's RunLoop implementation
|
||||
ASSERT(vmPtr->runLoop().kind() == WTF::RunLoop::Kind::Bun);
|
||||
|
||||
WebCore::JSVMClientData::create(&vm, Bun__getVM());
|
||||
WebCore::JSVMClientData::create(vm, Bun__getVM(), heapSize);
|
||||
|
||||
const auto createGlobalObject = [&]() -> Zig::GlobalObject* {
|
||||
if (UNLIKELY(executionContextId == std::numeric_limits<int32_t>::max() || executionContextId > -1)) {
|
||||
|
||||
@@ -31,7 +31,6 @@ pub const ZigGlobalObject = extern struct {
|
||||
) *JSGlobalObject {
|
||||
vm.eventLoop().ensureWaker();
|
||||
const global = shim.cppFn("create", .{ console, context_id, mini_mode, eval_mode, worker_ptr });
|
||||
|
||||
// JSC might mess with the stack size.
|
||||
bun.StackCheck.configureThread();
|
||||
|
||||
|
||||
@@ -592,156 +592,6 @@ pub const ConcurrentTask = struct {
|
||||
}
|
||||
};
|
||||
|
||||
// This type must be unique per JavaScript thread
|
||||
pub const GarbageCollectionController = struct {
|
||||
gc_timer: *uws.Timer = undefined,
|
||||
gc_last_heap_size: usize = 0,
|
||||
gc_last_heap_size_on_repeating_timer: usize = 0,
|
||||
heap_size_didnt_change_for_repeating_timer_ticks_count: u8 = 0,
|
||||
gc_timer_state: GCTimerState = GCTimerState.pending,
|
||||
gc_repeating_timer: *uws.Timer = undefined,
|
||||
gc_timer_interval: i32 = 0,
|
||||
gc_repeating_timer_fast: bool = true,
|
||||
disabled: bool = false,
|
||||
|
||||
pub fn init(this: *GarbageCollectionController, vm: *VirtualMachine) void {
|
||||
const actual = uws.Loop.get();
|
||||
this.gc_timer = uws.Timer.createFallthrough(actual, this);
|
||||
this.gc_repeating_timer = uws.Timer.createFallthrough(actual, this);
|
||||
actual.internal_loop_data.jsc_vm = vm.jsc;
|
||||
|
||||
if (comptime Environment.isDebug) {
|
||||
if (bun.getenvZ("BUN_TRACK_LAST_FN_NAME") != null) {
|
||||
vm.eventLoop().debug.track_last_fn_name = true;
|
||||
}
|
||||
}
|
||||
|
||||
var gc_timer_interval: i32 = 1000;
|
||||
if (vm.transpiler.env.get("BUN_GC_TIMER_INTERVAL")) |timer| {
|
||||
if (std.fmt.parseInt(i32, timer, 10)) |parsed| {
|
||||
if (parsed > 0) {
|
||||
gc_timer_interval = parsed;
|
||||
}
|
||||
} else |_| {}
|
||||
}
|
||||
this.gc_timer_interval = gc_timer_interval;
|
||||
|
||||
this.disabled = vm.transpiler.env.has("BUN_GC_TIMER_DISABLE");
|
||||
|
||||
if (!this.disabled)
|
||||
this.gc_repeating_timer.set(this, onGCRepeatingTimer, gc_timer_interval, gc_timer_interval);
|
||||
}
|
||||
|
||||
pub fn scheduleGCTimer(this: *GarbageCollectionController) void {
|
||||
this.gc_timer_state = .scheduled;
|
||||
this.gc_timer.set(this, onGCTimer, 16, 0);
|
||||
}
|
||||
|
||||
pub fn bunVM(this: *GarbageCollectionController) *VirtualMachine {
|
||||
return @alignCast(@fieldParentPtr("gc_controller", this));
|
||||
}
|
||||
|
||||
pub fn onGCTimer(timer: *uws.Timer) callconv(.C) void {
|
||||
var this = timer.as(*GarbageCollectionController);
|
||||
if (this.disabled) return;
|
||||
this.gc_timer_state = .run_on_next_tick;
|
||||
}
|
||||
|
||||
// We want to always run GC once in awhile
|
||||
// But if you have a long-running instance of Bun, you don't want the
|
||||
// program constantly using CPU doing GC for no reason
|
||||
//
|
||||
// So we have two settings for this GC timer:
|
||||
//
|
||||
// - Fast: GC runs every 1 second
|
||||
// - Slow: GC runs every 30 seconds
|
||||
//
|
||||
// When the heap size is increasing, we always switch to fast mode
|
||||
// When the heap size has been the same or less for 30 seconds, we switch to slow mode
|
||||
pub fn updateGCRepeatTimer(this: *GarbageCollectionController, comptime setting: @Type(.enum_literal)) void {
|
||||
if (setting == .fast and !this.gc_repeating_timer_fast) {
|
||||
this.gc_repeating_timer_fast = true;
|
||||
this.gc_repeating_timer.set(this, onGCRepeatingTimer, this.gc_timer_interval, this.gc_timer_interval);
|
||||
this.heap_size_didnt_change_for_repeating_timer_ticks_count = 0;
|
||||
} else if (setting == .slow and this.gc_repeating_timer_fast) {
|
||||
this.gc_repeating_timer_fast = false;
|
||||
this.gc_repeating_timer.set(this, onGCRepeatingTimer, 30_000, 30_000);
|
||||
this.heap_size_didnt_change_for_repeating_timer_ticks_count = 0;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn onGCRepeatingTimer(timer: *uws.Timer) callconv(.C) void {
|
||||
var this = timer.as(*GarbageCollectionController);
|
||||
const prev_heap_size = this.gc_last_heap_size_on_repeating_timer;
|
||||
this.performGC();
|
||||
this.gc_last_heap_size_on_repeating_timer = this.gc_last_heap_size;
|
||||
if (prev_heap_size == this.gc_last_heap_size_on_repeating_timer) {
|
||||
this.heap_size_didnt_change_for_repeating_timer_ticks_count +|= 1;
|
||||
if (this.heap_size_didnt_change_for_repeating_timer_ticks_count >= 30) {
|
||||
// make the timer interval longer
|
||||
this.updateGCRepeatTimer(.slow);
|
||||
}
|
||||
} else {
|
||||
this.heap_size_didnt_change_for_repeating_timer_ticks_count = 0;
|
||||
this.updateGCRepeatTimer(.fast);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn processGCTimer(this: *GarbageCollectionController) void {
|
||||
if (this.disabled) return;
|
||||
var vm = this.bunVM().jsc;
|
||||
this.processGCTimerWithHeapSize(vm, vm.blockBytesAllocated());
|
||||
}
|
||||
|
||||
fn processGCTimerWithHeapSize(this: *GarbageCollectionController, vm: *JSC.VM, this_heap_size: usize) void {
|
||||
const prev = this.gc_last_heap_size;
|
||||
|
||||
switch (this.gc_timer_state) {
|
||||
.run_on_next_tick => {
|
||||
// When memory usage is not stable, run the GC more.
|
||||
if (this_heap_size != prev) {
|
||||
this.scheduleGCTimer();
|
||||
this.updateGCRepeatTimer(.fast);
|
||||
} else {
|
||||
this.gc_timer_state = .pending;
|
||||
}
|
||||
vm.collectAsync();
|
||||
this.gc_last_heap_size = this_heap_size;
|
||||
},
|
||||
.pending => {
|
||||
if (this_heap_size != prev) {
|
||||
this.updateGCRepeatTimer(.fast);
|
||||
|
||||
if (this_heap_size > prev * 2) {
|
||||
this.performGC();
|
||||
} else {
|
||||
this.scheduleGCTimer();
|
||||
}
|
||||
}
|
||||
},
|
||||
.scheduled => {
|
||||
if (this_heap_size > prev * 2) {
|
||||
this.updateGCRepeatTimer(.fast);
|
||||
this.performGC();
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn performGC(this: *GarbageCollectionController) void {
|
||||
if (this.disabled) return;
|
||||
var vm = this.bunVM().jsc;
|
||||
vm.collectAsync();
|
||||
this.gc_last_heap_size = vm.blockBytesAllocated();
|
||||
}
|
||||
|
||||
pub const GCTimerState = enum {
|
||||
pending,
|
||||
scheduled,
|
||||
run_on_next_tick,
|
||||
};
|
||||
};
|
||||
|
||||
export fn Bun__tickWhilePaused(paused: *bool) void {
|
||||
JSC.markBinding(@src());
|
||||
VirtualMachine.get().eventLoop().tickWhilePaused(paused);
|
||||
@@ -842,9 +692,26 @@ pub const EventLoop = struct {
|
||||
entered_event_loop_count: isize = 0,
|
||||
concurrent_ref: std.atomic.Value(i32) = std.atomic.Value(i32).init(0),
|
||||
imminent_gc_timer: std.atomic.Value(?*JSC.BunTimer.WTFTimer) = .{ .raw = null },
|
||||
|
||||
is_doing_something_important: bool = false,
|
||||
signal_handler: if (Environment.isPosix) ?*PosixSignalHandle else void = if (Environment.isPosix) null,
|
||||
|
||||
pub fn important(this: *EventLoop) ImportantScope {
|
||||
return .{ .previous_important = this.is_doing_something_important, .event_loop = this };
|
||||
}
|
||||
|
||||
pub const ImportantScope = struct {
|
||||
previous_important: bool = false,
|
||||
event_loop: *EventLoop,
|
||||
|
||||
pub fn enter(this: *const ImportantScope) void {
|
||||
this.event_loop.is_doing_something_important = true;
|
||||
}
|
||||
|
||||
pub fn exit(this: *const ImportantScope) void {
|
||||
this.event_loop.is_doing_something_important = this.previous_important;
|
||||
}
|
||||
};
|
||||
|
||||
pub export fn Bun__ensureSignalHandler() void {
|
||||
if (Environment.isPosix) {
|
||||
if (VirtualMachine.getMainThreadVM()) |vm| {
|
||||
@@ -1478,6 +1345,18 @@ pub const EventLoop = struct {
|
||||
|
||||
return this.virtual_machine.event_loop_handle.?;
|
||||
}
|
||||
fn enterActiveLoop(loop: *uws.Loop, ctx: *VirtualMachine) void {
|
||||
var deadline: bun.timespec = undefined;
|
||||
|
||||
var event_loop_sleep_timer = if (comptime Environment.isDebug) std.time.Timer.start() catch unreachable;
|
||||
|
||||
const timeout = ctx.timer.getTimeout(&deadline, ctx);
|
||||
loop.tickWithTimeout(if (timeout) &deadline else null);
|
||||
|
||||
if (comptime Environment.isDebug) {
|
||||
log("tick {}, timeout: {}", .{ std.fmt.fmtDuration(event_loop_sleep_timer.read()), std.fmt.fmtDuration(deadline.ns()) });
|
||||
}
|
||||
}
|
||||
|
||||
pub fn autoTick(this: *EventLoop) void {
|
||||
var ctx = this.virtual_machine;
|
||||
@@ -1503,15 +1382,7 @@ pub const EventLoop = struct {
|
||||
this.runImminentGCTimer();
|
||||
|
||||
if (loop.isActive()) {
|
||||
this.processGCTimer();
|
||||
var event_loop_sleep_timer = if (comptime Environment.isDebug) std.time.Timer.start() catch unreachable;
|
||||
// for the printer, this is defined:
|
||||
var timespec: bun.timespec = if (Environment.isDebug) .{ .sec = 0, .nsec = 0 } else undefined;
|
||||
loop.tickWithTimeout(if (ctx.timer.getTimeout(×pec, ctx)) ×pec else null);
|
||||
|
||||
if (comptime Environment.isDebug) {
|
||||
log("tick {}, timeout: {}", .{ std.fmt.fmtDuration(event_loop_sleep_timer.read()), std.fmt.fmtDuration(timespec.ns()) });
|
||||
}
|
||||
enterActiveLoop(loop, ctx);
|
||||
} else {
|
||||
loop.tickWithoutIdle();
|
||||
if (comptime Environment.isDebug) {
|
||||
@@ -1590,10 +1461,7 @@ pub const EventLoop = struct {
|
||||
}
|
||||
|
||||
if (loop.isActive()) {
|
||||
this.processGCTimer();
|
||||
var timespec: bun.timespec = undefined;
|
||||
|
||||
loop.tickWithTimeout(if (ctx.timer.getTimeout(×pec, ctx)) ×pec else null);
|
||||
enterActiveLoop(loop, ctx);
|
||||
} else {
|
||||
loop.tickWithoutIdle();
|
||||
}
|
||||
@@ -1607,7 +1475,7 @@ pub const EventLoop = struct {
|
||||
}
|
||||
|
||||
pub fn processGCTimer(this: *EventLoop) void {
|
||||
this.virtual_machine.gc_controller.processGCTimer();
|
||||
this.virtual_machine.gc_controller.performOpportunisticGC();
|
||||
}
|
||||
|
||||
pub fn tick(this: *EventLoop) void {
|
||||
@@ -1708,10 +1576,6 @@ pub const EventLoop = struct {
|
||||
} else {
|
||||
this.virtual_machine.event_loop_handle = bun.Async.Loop.get();
|
||||
}
|
||||
|
||||
this.virtual_machine.gc_controller.init(this.virtual_machine);
|
||||
// _ = actual.addPostHandler(*JSC.EventLoop, this, JSC.EventLoop.afterUSocketsTick);
|
||||
// _ = actual.addPreHandler(*JSC.VM, this.virtual_machine.jsc, JSC.VM.drainMicrotasks);
|
||||
}
|
||||
bun.uws.Loop.get().internal_loop_data.setParentEventLoop(bun.JSC.EventLoopHandle.init(this));
|
||||
}
|
||||
|
||||
@@ -48,6 +48,7 @@ const MarkedArrayBuffer = @import("./base.zig").MarkedArrayBuffer;
|
||||
const getAllocator = @import("./base.zig").getAllocator;
|
||||
const JSValue = bun.JSC.JSValue;
|
||||
const NewClass = @import("./base.zig").NewClass;
|
||||
const GCController = @import("./GCController.zig").GCController;
|
||||
|
||||
const JSGlobalObject = bun.JSC.JSGlobalObject;
|
||||
const ExceptionValueRef = bun.JSC.ExceptionValueRef;
|
||||
@@ -886,7 +887,7 @@ pub const VirtualMachine = struct {
|
||||
|
||||
module_loader: ModuleLoader = .{},
|
||||
|
||||
gc_controller: JSC.GarbageCollectionController = .{},
|
||||
gc_controller: *GCController = undefined,
|
||||
worker: ?*JSC.WebWorker = null,
|
||||
ipc: ?IPCInstanceUnion = null,
|
||||
|
||||
@@ -1331,7 +1332,7 @@ pub const VirtualMachine = struct {
|
||||
};
|
||||
}
|
||||
|
||||
pub inline fn eventLoop(this: *VirtualMachine) *EventLoop {
|
||||
pub inline fn eventLoop(this: *const VirtualMachine) *EventLoop {
|
||||
return this.event_loop;
|
||||
}
|
||||
|
||||
|
||||
@@ -103,11 +103,13 @@ pub const TestRunner = struct {
|
||||
|
||||
unhandled_errors_between_tests: u32 = 0,
|
||||
|
||||
test_timeout_ref: JSC.BunTimer.TimerRef = .{},
|
||||
|
||||
pub const Drainer = JSC.AnyTask.New(TestRunner, drain);
|
||||
|
||||
pub fn onTestTimeout(this: *TestRunner, now: *const bun.timespec, vm: *VirtualMachine) void {
|
||||
_ = vm; // autofix
|
||||
this.event_loop_timer.state = .FIRED;
|
||||
this.test_timeout_ref.unref(vm);
|
||||
|
||||
if (this.pending_test) |pending_test| {
|
||||
if (!pending_test.reported and (this.active_test_for_timeout orelse return) == pending_test.test_id) {
|
||||
@@ -132,13 +134,10 @@ pub const TestRunner = struct {
|
||||
const then = bun.timespec.msFromNow(@intCast(milliseconds));
|
||||
const vm = JSC.VirtualMachine.get();
|
||||
|
||||
this.event_loop_timer.tag = .TestRunner;
|
||||
if (this.event_loop_timer.state == .ACTIVE) {
|
||||
vm.timer.remove(&this.event_loop_timer);
|
||||
}
|
||||
this.test_timeout_ref.ref(vm);
|
||||
|
||||
this.event_loop_timer.next = then;
|
||||
vm.timer.insert(&this.event_loop_timer);
|
||||
this.event_loop_timer.tag = .TestRunner;
|
||||
vm.timer.update(&this.event_loop_timer, &then);
|
||||
}
|
||||
|
||||
pub fn enqueue(this: *TestRunner, task: *TestRunnerTask) void {
|
||||
|
||||
@@ -5494,8 +5494,8 @@ pub fn NewReadyWatcher(
|
||||
}
|
||||
|
||||
pub fn onPoll(this: *Context, sizeOrOffset: i64, _: u16) void {
|
||||
defer JSC.VirtualMachine.get().drainMicrotasks();
|
||||
ready(this, sizeOrOffset);
|
||||
JSC.VirtualMachine.get().drainMicrotasks();
|
||||
}
|
||||
|
||||
pub fn unwatch(this: *Context, fd_: anytype) void {
|
||||
|
||||
@@ -5,7 +5,7 @@ import path from "path";
|
||||
if (isFlaky && isLinux) {
|
||||
test.todo("processes get killed");
|
||||
} else {
|
||||
test.each([true, false])("processes get killed", async sync => {
|
||||
test.each([true, false])("processes get killed (sync: %p)", async sync => {
|
||||
const { exited, stdout, stderr } = Bun.spawn({
|
||||
cmd: [
|
||||
bunExe(),
|
||||
@@ -20,7 +20,7 @@ if (isFlaky && isLinux) {
|
||||
const [out, err, exitCode] = await Promise.all([new Response(stdout).text(), new Response(stderr).text(), exited]);
|
||||
console.log(out);
|
||||
console.log(err);
|
||||
// TODO: figure out how to handle terminatio nexception from spawn sync properly.
|
||||
// TODO: figure out how to handle termination exception from spawn sync properly.
|
||||
expect(exitCode).not.toBe(0);
|
||||
expect(out).not.toContain("This should not be printed!");
|
||||
expect(err).toContain("killed 1 dangling process");
|
||||
|
||||
@@ -18,7 +18,7 @@ const words: Record<string, { reason: string; limit?: number; regex?: boolean }>
|
||||
"std.StringHashMap(": { reason: "bun.StringHashMap has a faster `eql`" },
|
||||
"std.enums.tagName(": { reason: "Use bun.tagName instead", limit: 2 },
|
||||
"std.unicode": { reason: "Use bun.strings instead", limit: 36 },
|
||||
[String.raw`: [a-zA-Z0-9_\.\*\?\[\]\(\)]+ = undefined,`]: { reason: "Do not default a struct field to undefined", limit: 251, regex: true },
|
||||
[String.raw`: [a-zA-Z0-9_\.\*\?\[\]\(\)]+ = undefined,`]: { reason: "Do not default a struct field to undefined", limit: 249, regex: true },
|
||||
};
|
||||
const words_keys = [...Object.keys(words)];
|
||||
|
||||
|
||||
@@ -215,7 +215,8 @@ static napi_value testAdjustExternalMemory(napi_env env, napi_callback_info info
|
||||
napi_value result;
|
||||
int64_t adjustedValue;
|
||||
|
||||
NODE_API_CALL(env, napi_adjust_external_memory(env, 1, &adjustedValue));
|
||||
// Some JavaScript engines may ignore small increments passed to napi_adjust_external_memory
|
||||
NODE_API_CALL(env, napi_adjust_external_memory(env, 10000, &adjustedValue));
|
||||
NODE_API_CALL(env, napi_create_double(env, (double)adjustedValue, &result));
|
||||
|
||||
return result;
|
||||
|
||||
Reference in New Issue
Block a user