Fix PostgreSQL bulk insert panic with proper overflow handling

Previously, when a bulk insert operation exceeded the PostgreSQL
protocol's 2GB message size limit, Bun would panic with "integer
does not fit in destination type". This typically occurred with
around 8,150+ items depending on row size.

This change:
- Adds a new specific error.MessageTooLarge to the error set
- Checks message size before casting to i32 in NewWriter.zig
- Returns ERR_POSTGRES_MESSAGE_TOO_LARGE with a clear, actionable message
- Keeps error.Overflow generic for future numeric overflow cases

The error message now clearly explains:
"Query message exceeds PostgreSQL protocol limit of 2GB. Try reducing
the number of rows in your bulk insert or split it into smaller batches."

Fixes #24640

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Claude Bot
2025-11-12 14:52:17 +00:00
parent 0a307ed880
commit 840143f6ca
3 changed files with 86 additions and 4 deletions

View File

@@ -15,6 +15,7 @@ pub const AnyPostgresError = error{
InvalidTimeFormat,
JSError,
JSTerminated,
MessageTooLarge,
MultidimensionalArrayNotSupportedYet,
NullsInArrayNotSupportedYet,
OutOfMemory,
@@ -93,6 +94,7 @@ pub fn postgresErrorToJS(globalObject: *jsc.JSGlobalObject, message: ?[]const u8
error.InvalidServerKey => "ERR_POSTGRES_INVALID_SERVER_KEY",
error.InvalidServerSignature => "ERR_POSTGRES_INVALID_SERVER_SIGNATURE",
error.InvalidTimeFormat => "ERR_POSTGRES_INVALID_TIME_FORMAT",
error.MessageTooLarge => "ERR_POSTGRES_MESSAGE_TOO_LARGE",
error.MultidimensionalArrayNotSupportedYet => "ERR_POSTGRES_MULTIDIMENSIONAL_ARRAY_NOT_SUPPORTED_YET",
error.NullsInArrayNotSupportedYet => "ERR_POSTGRES_NULLS_IN_ARRAY_NOT_SUPPORTED_YET",
error.Overflow => "ERR_POSTGRES_OVERFLOW",
@@ -124,8 +126,11 @@ pub fn postgresErrorToJS(globalObject: *jsc.JSGlobalObject, message: ?[]const u8
},
};
var buffer_message = [_]u8{0} ** 256;
const msg = message orelse std.fmt.bufPrint(buffer_message[0..], "Failed to bind query: {s}", .{@errorName(err)}) catch "Failed to bind query";
var buffer_message = [_]u8{0} ** 512;
const msg = message orelse switch (err) {
error.MessageTooLarge => "Query message exceeds PostgreSQL protocol limit of 2GB. Try reducing the number of rows in your bulk insert or split it into smaller batches.",
else => std.fmt.bufPrint(&buffer_message, "Failed to bind query: {s}", .{@errorName(err)}) catch "Failed to bind query",
};
return createPostgresError(globalObject, msg, .{ .code = code }) catch |e| globalObject.takeError(e);
}

View File

@@ -23,11 +23,23 @@ pub fn NewWriterWrap(
context: WrappedWriter,
pub fn write(this: LengthWriter) AnyPostgresError!void {
try this.context.pwrite(&Int32(this.context.offset() - this.index), this.index);
const message_length = this.context.offset() - this.index;
// PostgreSQL protocol uses 32-bit signed integers for message lengths
// Maximum value is 2,147,483,647 bytes (~2GB)
if (message_length > std.math.maxInt(i32)) {
return error.MessageTooLarge;
}
try this.context.pwrite(&Int32(message_length), this.index);
}
pub fn writeExcludingSelf(this: LengthWriter) AnyPostgresError!void {
try this.context.pwrite(&Int32(this.context.offset() -| (this.index + 4)), this.index);
const message_length = this.context.offset() -| (this.index + 4);
// PostgreSQL protocol uses 32-bit signed integers for message lengths
// Maximum value is 2,147,483,647 bytes (~2GB)
if (message_length > std.math.maxInt(i32)) {
return error.MessageTooLarge;
}
try this.context.pwrite(&Int32(message_length), this.index);
}
};

View File

@@ -0,0 +1,65 @@
// https://github.com/oven-sh/bun/issues/24640
// Test that large bulk inserts return a proper error instead of panicking
import { describe, expect, test } from "bun:test";
import { getSecret } from "harness";
import postgres from "postgres";
const databaseUrl = getSecret("TLS_POSTGRES_DATABASE_URL");
describe.skipIf(!databaseUrl)("postgres large bulk insert", () => {
test("should throw error instead of panic when message exceeds protocol limit", async () => {
const sql = postgres(databaseUrl!);
try {
// Create a test table
await sql`DROP TABLE IF EXISTS test_bulk_insert_24640`;
await sql`CREATE TABLE test_bulk_insert_24640 (
id serial PRIMARY KEY,
data TEXT
)`;
// Create a large array that will exceed the protocol limit
// Each row will have ~300KB of data to trigger the overflow with fewer rows
const largeString = "x".repeat(300 * 1024); // 300KB per row
const rows = Array.from({ length: 8000 }, (_, i) => ({
data: largeString,
}));
// This should throw an error instead of panicking
await expect(async () => {
await sql`INSERT INTO test_bulk_insert_24640 ${sql(rows)}`;
}).toThrow();
} finally {
try {
await sql`DROP TABLE IF EXISTS test_bulk_insert_24640`;
} catch {}
await sql.end();
}
}, 60000); // 60 second timeout for this test
test("should work with smaller batches", async () => {
const sql = postgres(databaseUrl!);
try {
// Create a test table
await sql`DROP TABLE IF EXISTS test_bulk_insert_24640_small`;
await sql`CREATE TABLE test_bulk_insert_24640_small (
id serial PRIMARY KEY,
data TEXT
)`;
// Create smaller batches that should work
const rows = Array.from({ length: 100 }, (_, i) => ({
data: `row ${i}`,
}));
await sql`INSERT INTO test_bulk_insert_24640_small ${sql(rows)}`;
const result = await sql`SELECT COUNT(*) as count FROM test_bulk_insert_24640_small`;
expect(result[0].count).toBe("100");
} finally {
try {
await sql`DROP TABLE IF EXISTS test_bulk_insert_24640_small`;
} catch {}
await sql.end();
}
});
});