mirror of
https://github.com/oven-sh/bun
synced 2026-02-02 15:08:46 +00:00
fix(io): Prevent data corruption in Bun.write for files >2GB (#25720)
Closes #8254 Fixes a data corruption bug in `Bun.write()` where files larger than 2GB would have chunks skipped resulting in corrupted output with missing data. The `doWriteLoop` had an issue where it would essentially end up offsetting twice every 2GB chunks: - it first sliced the buffer by `total_written`: ```remain = remain[@min(this.total_written, remain.len)..]``` - it would then increment `bytes_blob.offset`: `this.bytes_blob.offset += @truncate(wrote)` but because `sharedView()` already uses the blob offset `slice_ = slice_[this.offset..]` it would end up doubling the offset. In a local reproduction writing a 16GB file with each 2GB chunk filled with incrementing values `[1, 2, 3, 4, 5, 6, 7, 8]`, the buggy version produced: `[1, 3, 5, 7, …]`, skipping every other chunk. The fix is to simply remove the redundant manual offset and rely only on `total_written` to track write progress.
This commit is contained in:
40
test/regression/issue/8254.test.ts
Normal file
40
test/regression/issue/8254.test.ts
Normal file
@@ -0,0 +1,40 @@
|
||||
// https://github.com/oven-sh/bun/issues/8254
|
||||
// Bun.write() should correctly write files larger than 2GB without data corruption
|
||||
|
||||
import { expect, test } from "bun:test";
|
||||
import { tempDir } from "harness";
|
||||
import { join } from "path";
|
||||
|
||||
test("Bun.write() should write past 2GB boundary without corruption", async () => {
|
||||
using tmpbase = tempDir("issue-8254", {});
|
||||
|
||||
const TWO_GB = 2 ** 31;
|
||||
const CHUNK_SIZE = 1024 * 1024; // 1MB
|
||||
// Force a second write iteration by crossing the 2GB boundary
|
||||
const NUM_CHUNKS = Math.floor(TWO_GB / CHUNK_SIZE) + 1;
|
||||
const path = join(tmpbase, "large-file.bin");
|
||||
|
||||
const chunks: Uint8Array<ArrayBuffer>[] = [];
|
||||
for (let i = 0; i < NUM_CHUNKS; i++) {
|
||||
const chunk = new Uint8Array(CHUNK_SIZE);
|
||||
chunk.fill(i % 256);
|
||||
chunks.push(chunk);
|
||||
}
|
||||
|
||||
const blob = new Blob(chunks);
|
||||
const written = await Bun.write(path, blob);
|
||||
|
||||
expect(written).toBeGreaterThan(TWO_GB);
|
||||
|
||||
const file = Bun.file(path);
|
||||
|
||||
// Check bytes just before and after 2GB boundary
|
||||
const positions = [TWO_GB - 1, TWO_GB, TWO_GB + 1];
|
||||
|
||||
for (const pos of positions) {
|
||||
const buf = new Uint8Array(await file.slice(pos, pos + 1).arrayBuffer());
|
||||
|
||||
const expected = Math.floor(pos / CHUNK_SIZE) % 256;
|
||||
expect(buf[0]).toBe(expected);
|
||||
}
|
||||
});
|
||||
Reference in New Issue
Block a user