Compare commits

..

2 Commits

Author SHA1 Message Date
Claude Bot
ed2b323578 Add regression test for NAPI finalizer segfault (#24552)
Added test coverage to prevent regression of the NAPI finalizer crash
that occurs when finalizers run during env teardown.

Test components:
1. New NAPI addon (test_finalizer_on_teardown.c) - creates objects with
   finalizers that attempt to access the env during cleanup
2. Regression test (24552.test.ts) - spawns subprocesses to reproduce
   the crash scenario, simulating rspack/rsbuild usage pattern

The tests verify:
- Finalizers don't crash when env is being torn down
- Multiple subprocess spawns in a loop don't cause crashes
- No segfault or panic messages in output

Without the fix in src/napi/napi.zig, these tests would segfault.
With the fix, they pass by safely skipping finalizer execution when
the env is no longer valid.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-11 03:36:01 +00:00
Claude Bot
fa198b1441 Fix NAPI finalizer segfault when running rspack/rsbuild
Fixes #24552

When NAPI modules (like rspack/rsbuild via SWC) are used in subprocesses,
finalizers can be enqueued during garbage collection but may attempt to run
after the NapiEnv has started tearing down, causing segmentation faults at
address 0x0.

The crash occurs because:
1. NAPI objects with finalizers are garbage collected
2. Finalizers are enqueued to run on the next tick
3. The NapiEnv begins cleanup (m_globalObject becomes null/invalid or VM terminates)
4. When the finalizer runs, it dereferences the invalid globalObject pointer

The fix:
- Add NapiEnv__canRunFinalizer() to check if env, globalObject are valid and VM is not terminating
- Call this check in Finalizer.run() before accessing the env
- Skip finalizer execution if the env is no longer valid

This prevents the segfault while ensuring finalizers run when safe to do so.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-11 03:29:11 +00:00
8 changed files with 184 additions and 86 deletions

View File

@@ -2970,4 +2970,10 @@ extern "C" void NapiEnv__deref(napi_env env)
env->deref();
}
extern "C" bool NapiEnv__canRunFinalizer(napi_env env)
{
// Check if the global object is still valid and VM is not terminating
return env && env->globalObject() && !env->isVMTerminating();
}
}

View File

@@ -1366,6 +1366,7 @@ pub export fn napi_internal_suppress_crash_on_abort_if_desired() void {
}
extern fn napi_internal_remove_finalizer(env: napi_env, fun: napi_finalize, hint: ?*anyopaque, data: ?*anyopaque) callconv(.C) void;
extern fn NapiEnv__canRunFinalizer(*NapiEnv) bool;
pub const Finalizer = struct {
env: NapiEnv.Ref,
@@ -1375,6 +1376,14 @@ pub const Finalizer = struct {
pub fn run(this: *Finalizer) void {
const env = this.env.get();
// Safety check: Ensure the env is still valid before running the finalizer.
// This prevents crashes when finalizers are enqueued but the NapiEnv is being
// torn down (e.g., when a subprocess using NAPI modules is terminating).
if (!NapiEnv__canRunFinalizer(env)) {
return;
}
const handle_scope = NapiHandleScope.open(env, false);
defer if (handle_scope) |scope| scope.close(env);

View File

@@ -15,7 +15,6 @@ pub const AnyPostgresError = error{
InvalidTimeFormat,
JSError,
JSTerminated,
MessageTooLarge,
MultidimensionalArrayNotSupportedYet,
NullsInArrayNotSupportedYet,
OutOfMemory,
@@ -94,7 +93,6 @@ pub fn postgresErrorToJS(globalObject: *jsc.JSGlobalObject, message: ?[]const u8
error.InvalidServerKey => "ERR_POSTGRES_INVALID_SERVER_KEY",
error.InvalidServerSignature => "ERR_POSTGRES_INVALID_SERVER_SIGNATURE",
error.InvalidTimeFormat => "ERR_POSTGRES_INVALID_TIME_FORMAT",
error.MessageTooLarge => "ERR_POSTGRES_MESSAGE_TOO_LARGE",
error.MultidimensionalArrayNotSupportedYet => "ERR_POSTGRES_MULTIDIMENSIONAL_ARRAY_NOT_SUPPORTED_YET",
error.NullsInArrayNotSupportedYet => "ERR_POSTGRES_NULLS_IN_ARRAY_NOT_SUPPORTED_YET",
error.Overflow => "ERR_POSTGRES_OVERFLOW",
@@ -126,11 +124,8 @@ pub fn postgresErrorToJS(globalObject: *jsc.JSGlobalObject, message: ?[]const u8
},
};
var buffer_message = [_]u8{0} ** 512;
const msg = message orelse switch (err) {
error.MessageTooLarge => "Query message exceeds PostgreSQL protocol limit of 2GB. Try reducing the number of rows in your bulk insert or split it into smaller batches.",
else => std.fmt.bufPrint(&buffer_message, "Failed to bind query: {s}", .{@errorName(err)}) catch "Failed to bind query",
};
var buffer_message = [_]u8{0} ** 256;
const msg = message orelse std.fmt.bufPrint(buffer_message[0..], "Failed to bind query: {s}", .{@errorName(err)}) catch "Failed to bind query";
return createPostgresError(globalObject, msg, .{ .code = code }) catch |e| globalObject.takeError(e);
}

View File

@@ -23,23 +23,11 @@ pub fn NewWriterWrap(
context: WrappedWriter,
pub fn write(this: LengthWriter) AnyPostgresError!void {
const message_length = this.context.offset() - this.index;
// PostgreSQL protocol uses 32-bit signed integers for message lengths
// Maximum value is 2,147,483,647 bytes (~2GB)
if (message_length > std.math.maxInt(i32)) {
return error.MessageTooLarge;
}
try this.context.pwrite(&Int32(message_length), this.index);
try this.context.pwrite(&Int32(this.context.offset() - this.index), this.index);
}
pub fn writeExcludingSelf(this: LengthWriter) AnyPostgresError!void {
const message_length = this.context.offset() -| (this.index + 4);
// PostgreSQL protocol uses 32-bit signed integers for message lengths
// Maximum value is 2,147,483,647 bytes (~2GB)
if (message_length > std.math.maxInt(i32)) {
return error.MessageTooLarge;
}
try this.context.pwrite(&Int32(message_length), this.index);
try this.context.pwrite(&Int32(this.context.offset() -| (this.index + 4)), this.index);
}
};

View File

@@ -232,5 +232,16 @@
"NAPI_VERSION_EXPERIMENTAL=1",
],
},
{
"target_name": "test_finalizer_on_teardown",
"sources": ["test_finalizer_on_teardown.c"],
"include_dirs": ["<!@(node -p \"require('node-addon-api').include\")"],
"libraries": [],
"dependencies": ["<!(node -p \"require('node-addon-api').gyp\")"],
"defines": [
"NAPI_DISABLE_CPP_EXCEPTIONS",
"NODE_API_EXPERIMENTAL_NOGC_ENV_OPT_OUT=1",
],
},
]
}

View File

@@ -0,0 +1,74 @@
#include <node_api.h>
#include <stdio.h>
#include <stdlib.h>
// This test reproduces issue #24552 where finalizers crash when run during
// env teardown (e.g., when a subprocess using NAPI modules terminates).
static int finalize_count = 0;
// Finalizer that tries to access the env
static void finalizer_that_uses_env(napi_env env, void* finalize_data, void* finalize_hint) {
finalize_count++;
// These operations would crash if env->globalObject() is null or VM is terminating
// The fix in Finalizer.run() checks NapiEnv__canRunFinalizer() before allowing this
napi_value global;
napi_status status = napi_get_global(env, &global);
if (status == napi_ok) {
printf("Finalizer %d: Successfully accessed global object\n", finalize_count);
} else {
printf("Finalizer %d: Could not access global (expected during teardown)\n", finalize_count);
}
// Free the allocated data
free(finalize_data);
}
static napi_value create_objects_with_finalizers(napi_env env, napi_callback_info info) {
size_t argc = 1;
napi_value args[1];
napi_get_cb_info(env, info, &argc, args, NULL, NULL);
int count = 10;
if (argc >= 1) {
napi_get_value_int32(env, args[0], &count);
}
napi_value result_array;
napi_create_array_with_length(env, count, &result_array);
for (int i = 0; i < count; i++) {
napi_value obj;
napi_create_object(env, &obj);
int* data = (int*)malloc(sizeof(int));
*data = i;
// Wrap with finalizer that will try to access the env
napi_wrap(env, obj, data, finalizer_that_uses_env, NULL, NULL);
napi_set_element(env, result_array, i, obj);
}
return result_array;
}
static napi_value get_finalize_count(napi_env env, napi_callback_info info) {
napi_value result;
napi_create_int32(env, finalize_count, &result);
return result;
}
static napi_value init(napi_env env, napi_value exports) {
napi_property_descriptor properties[] = {
{ "createObjects", 0, create_objects_with_finalizers, 0, 0, 0, napi_default, 0 },
{ "getFinalizeCount", 0, get_finalize_count, 0, 0, 0, napi_default, 0 }
};
napi_define_properties(env, exports, sizeof(properties) / sizeof(properties[0]), properties);
return exports;
}
NAPI_MODULE(NODE_GYP_MODULE_NAME, init)

View File

@@ -0,0 +1,80 @@
// Test for https://github.com/oven-sh/bun/issues/24552
// Segmentation fault when NAPI finalizers run during env teardown
import { expect, test } from "bun:test";
import { bunEnv, bunExe } from "harness";
import { join } from "path";
test("NAPI finalizers should not crash during subprocess teardown", async () => {
const addonPath = join(__dirname, "../../napi/napi-app/build/Debug/test_finalizer_on_teardown.node");
// Spawn a subprocess that loads NAPI modules with finalizers
await using proc = Bun.spawn({
cmd: [
bunExe(),
"-e",
`
// Load the NAPI addon
const addon = require('${addonPath}');
// Create objects with finalizers
const objects = addon.createObjects(20);
// Let them be garbage collected
// Force GC if available
if (global.gc) {
global.gc();
}
// Log that we're exiting normally
console.log("Exiting normally");
// Process will exit here, triggering finalizers
// Before the fix, this would crash with segfault at address 0x0
`,
],
env: bunEnv,
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
// The process should exit successfully without crashing
expect(exitCode).toBe(0);
expect(stdout).toContain("Exiting normally");
// Should not have segfault or crash messages
expect(stderr).not.toContain("Segmentation fault");
expect(stderr).not.toContain("panic");
});
test("NAPI finalizers in loop scenario (like rspack)", async () => {
const addonPath = join(__dirname, "../../napi/napi-app/build/Debug/test_finalizer_on_teardown.node");
// Simulate running rspack/rsbuild for multiple configs in a loop
const configs = ["config1", "config2", "config3"];
for (const config of configs) {
await using proc = Bun.spawn({
cmd: [
bunExe(),
"-e",
`
const addon = require('${addonPath}');
const objects = addon.createObjects(10);
console.log('Processing ${config}');
`,
],
env: bunEnv,
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
expect(exitCode).toBe(0);
expect(stdout).toContain(`Processing ${config}`);
expect(stderr).not.toContain("Segmentation fault");
expect(stderr).not.toContain("panic");
}
});

View File

@@ -1,65 +0,0 @@
// https://github.com/oven-sh/bun/issues/24640
// Test that large bulk inserts return a proper error instead of panicking
import { describe, expect, test } from "bun:test";
import { getSecret } from "harness";
import postgres from "postgres";
const databaseUrl = getSecret("TLS_POSTGRES_DATABASE_URL");
describe.skipIf(!databaseUrl)("postgres large bulk insert", () => {
test("should throw error instead of panic when message exceeds protocol limit", async () => {
const sql = postgres(databaseUrl!);
try {
// Create a test table
await sql`DROP TABLE IF EXISTS test_bulk_insert_24640`;
await sql`CREATE TABLE test_bulk_insert_24640 (
id serial PRIMARY KEY,
data TEXT
)`;
// Create a large array that will exceed the protocol limit
// Each row will have ~300KB of data to trigger the overflow with fewer rows
const largeString = "x".repeat(300 * 1024); // 300KB per row
const rows = Array.from({ length: 8000 }, (_, i) => ({
data: largeString,
}));
// This should throw an error instead of panicking
await expect(async () => {
await sql`INSERT INTO test_bulk_insert_24640 ${sql(rows)}`;
}).toThrow();
} finally {
try {
await sql`DROP TABLE IF EXISTS test_bulk_insert_24640`;
} catch {}
await sql.end();
}
}, 60000); // 60 second timeout for this test
test("should work with smaller batches", async () => {
const sql = postgres(databaseUrl!);
try {
// Create a test table
await sql`DROP TABLE IF EXISTS test_bulk_insert_24640_small`;
await sql`CREATE TABLE test_bulk_insert_24640_small (
id serial PRIMARY KEY,
data TEXT
)`;
// Create smaller batches that should work
const rows = Array.from({ length: 100 }, (_, i) => ({
data: `row ${i}`,
}));
await sql`INSERT INTO test_bulk_insert_24640_small ${sql(rows)}`;
const result = await sql`SELECT COUNT(*) as count FROM test_bulk_insert_24640_small`;
expect(result[0].count).toBe("100");
} finally {
try {
await sql`DROP TABLE IF EXISTS test_bulk_insert_24640_small`;
} catch {}
await sql.end();
}
});
});