Compare commits

..

1 Commits

Author SHA1 Message Date
Claude Bot
435516605e fix(blob): read file-backed blobs when constructing Blob from multiple parts
When a `Bun.file()` blob was combined with other parts in `new Blob([...])`,
its contents were silently dropped because `sharedView()` returns empty for
file-backed blobs whose data hasn't been read into memory yet.

Now synchronously reads file contents when file-backed blobs appear as parts
in the Blob constructor, matching the behavior when `Bun.file()` is the sole
part.

Closes #27071

Co-Authored-By: Claude <noreply@anthropic.com>
2026-02-17 04:23:20 +00:00
6 changed files with 121 additions and 88 deletions

View File

@@ -1087,32 +1087,10 @@ pub const WindowsSpawnOptions = struct {
dup2: struct { out: bun.jsc.Subprocess.StdioKind, to: bun.jsc.Subprocess.StdioKind },
pub fn deinit(this: *const Stdio) void {
switch (this.*) {
.buffer => |pipe| closePipeAndDestroy(pipe),
.ipc => |pipe| closePipeAndDestroy(pipe),
else => {},
if (this.* == .buffer) {
bun.default_allocator.destroy(this.buffer);
}
}
/// Close a pipe that may have been initialized with uv_pipe_init.
/// After uv_pipe_init, the pipe is registered in the event loop's
/// handle_queue. Freeing it without uv_close corrupts the queue's
/// linked list, causing segfaults on subsequent handle insertions.
pub fn closePipeAndDestroy(pipe: *bun.windows.libuv.Pipe) void {
if (pipe.loop == null or pipe.isClosed()) {
// Never initialized or already fully closed — safe to free directly.
bun.default_allocator.destroy(pipe);
} else if (!pipe.isClosing()) {
// Initialized and not yet closing — must uv_close to remove from handle queue.
pipe.close(&onPipeCloseForDeinit);
}
// else: isClosing — uv_close was already called, the pending close
// callback owns the lifetime.
}
fn onPipeCloseForDeinit(pipe: *bun.windows.libuv.Pipe) callconv(.c) void {
bun.default_allocator.destroy(pipe);
}
};
pub fn deinit(this: *const WindowsSpawnOptions) void {
@@ -1652,9 +1630,8 @@ pub fn spawnProcessWindows(
stdio.data.fd = fd_i;
},
.ipc => |my_pipe| {
// ipc option inside stdin, stderr or stdout are not supported.
// Must close properly since the pipe may have been initialized.
WindowsSpawnOptions.Stdio.closePipeAndDestroy(my_pipe);
// ipc option inside stdin, stderr or stdout are not supported
bun.default_allocator.destroy(my_pipe);
stdio.flags = uv.UV_IGNORE;
},
.ignore => {

View File

@@ -235,10 +235,10 @@ pub const Stdio = union(enum) {
return .{ .err = .blob_used_as_out };
}
break :brk .{ .buffer = createZeroedPipe() };
break :brk .{ .buffer = bun.handleOom(bun.default_allocator.create(uv.Pipe)) };
},
.ipc => .{ .ipc = createZeroedPipe() },
.capture, .pipe, .array_buffer, .readable_stream => .{ .buffer = createZeroedPipe() },
.ipc => .{ .ipc = bun.handleOom(bun.default_allocator.create(uv.Pipe)) },
.capture, .pipe, .array_buffer, .readable_stream => .{ .buffer = bun.handleOom(bun.default_allocator.create(uv.Pipe)) },
.fd => |fd| .{ .pipe = fd },
.dup2 => .{ .dup2 = .{ .out = stdio.dup2.out, .to = stdio.dup2.to } },
.path => |pathlike| .{ .path = pathlike.slice() },
@@ -487,15 +487,6 @@ pub const Stdio = union(enum) {
}
};
/// Allocate a zero-initialized uv.Pipe. Zero-init ensures `pipe.loop` is null
/// for pipes that were never passed to `uv_pipe_init`, which
/// `closePipeAndDestroy` relies on to decide whether `uv_close` is needed.
fn createZeroedPipe() *uv.Pipe {
const pipe = bun.default_allocator.create(uv.Pipe) catch |err| bun.handleOom(err);
pipe.* = std.mem.zeroes(uv.Pipe);
return pipe;
}
const std = @import("std");
const bun = @import("bun");

View File

@@ -3836,6 +3836,33 @@ fn fromJSMovable(
return FromJSFunction(global, arg);
}
/// Synchronously read a file-backed blob's contents and push them to the joiner.
/// Used when constructing a new Blob from parts that include file-backed blobs.
fn readFileIntoJoiner(blob: *Blob, global: *JSGlobalObject, joiner: *StringJoiner) bun.JSError!void {
const store = blob.store orelse return;
if (store.data != .file) return;
const file = store.data.file;
const res = jsc.Node.fs.NodeFS.readFile(
global.bunVM().nodeFS(),
.{
.encoding = .buffer,
.path = file.pathlike,
.offset = blob.offset,
.max_size = blob.size,
},
.sync,
);
switch (res) {
.err => |err| {
return global.throwValue(try err.toJS(global));
},
.result => |result| {
joiner.push(result.slice(), result.buffer.allocator);
},
}
}
fn fromJSWithoutDeferGC(
global: *JSGlobalObject,
arg: JSValue,
@@ -4022,7 +4049,11 @@ fn fromJSWithoutDeferGC(
.DOMWrapper => {
if (item.as(Blob)) |blob| {
could_have_non_ascii = could_have_non_ascii or blob.charset != .all_ascii;
joiner.pushStatic(blob.sharedView());
if (blob.needsToReadFile()) {
try readFileIntoJoiner(blob, global, &joiner);
} else {
joiner.pushStatic(blob.sharedView());
}
continue;
} else {
const sliced = try current.toSliceClone(global);
@@ -4042,7 +4073,11 @@ fn fromJSWithoutDeferGC(
.DOMWrapper => {
if (current.as(Blob)) |blob| {
could_have_non_ascii = could_have_non_ascii or blob.charset != .all_ascii;
joiner.pushStatic(blob.sharedView());
if (blob.needsToReadFile()) {
try readFileIntoJoiner(blob, global, &joiner);
} else {
joiner.pushStatic(blob.sharedView());
}
} else {
const sliced = try current.toSliceClone(global);
const allocator = sliced.allocator.get();

View File

@@ -222,11 +222,7 @@ pub const Source = union(enum) {
switch (pipe.open(fd)) {
.err => |err| {
// The pipe was already registered in the event loop's handle_queue
// by uv_pipe_init above. We must call uv_close to properly remove
// it from the queue before freeing the memory, otherwise the
// handle_queue linked list becomes corrupted (dangling pointers).
pipe.close(&onPipeOpenFailClose);
bun.default_allocator.destroy(pipe);
return .{
.err = err,
};
@@ -237,10 +233,6 @@ pub const Source = union(enum) {
return .{ .result = pipe };
}
fn onPipeOpenFailClose(pipe: *Pipe) callconv(.c) void {
bun.default_allocator.destroy(pipe);
}
pub const StdinTTY = struct {
var data: uv.uv_tty_t = undefined;
var lock: bun.Mutex = .{};

View File

@@ -1,38 +0,0 @@
import { expect, test } from "bun:test";
import { bunEnv, bunExe } from "harness";
// https://github.com/oven-sh/bun/issues/27063
// On Windows, when Bun.spawn fails (e.g., ENOENT for a nonexistent executable),
// pipes initialized with uv_pipe_init were freed without calling uv_close first.
// This corrupted libuv's internal handle_queue linked list, causing segfaults
// on subsequent spawn calls.
test("spawning nonexistent executables repeatedly does not crash", async () => {
// Spawn a nonexistent executable multiple times. Before the fix, on Windows
// this would corrupt the libuv handle queue and crash on a subsequent spawn.
for (let i = 0; i < 5; i++) {
try {
const proc = Bun.spawn({
cmd: ["this-executable-does-not-exist-27063"],
stdout: "pipe",
stderr: "pipe",
});
await proc.exited;
} catch {
// Expected to fail - we're testing that it doesn't crash
}
}
// If we get here without crashing, the handle queue is intact.
// Verify a valid spawn still works after the failed ones.
await using proc = Bun.spawn({
cmd: [bunExe(), "-e", "console.log('ok')"],
env: bunEnv,
stdout: "pipe",
stderr: "pipe",
});
const [stdout, exitCode] = await Promise.all([proc.stdout.text(), proc.exited]);
expect(stdout.trim()).toBe("ok");
expect(exitCode).toBe(0);
});

View File

@@ -0,0 +1,76 @@
import { expect, test } from "bun:test";
import { tempDir } from "harness";
test("new Blob([Bun.file(), buffer]) includes file contents", async () => {
using dir = tempDir("blob-file-concat", {
"testfile.txt": "HELLO_FROM_FILE",
});
const file = Bun.file(`${dir}/testfile.txt`);
const buffer = Buffer.from("BUFFER_DATA");
// file + buffer
const r1 = await new Blob([file, buffer]).text();
expect(r1).toBe("HELLO_FROM_FILEBUFFER_DATA");
// buffer + file
const r2 = await new Blob([buffer, file]).text();
expect(r2).toBe("BUFFER_DATAHELLO_FROM_FILE");
// file + file
const r3 = await new Blob([file, file]).text();
expect(r3).toBe("HELLO_FROM_FILEHELLO_FROM_FILE");
// single file still works
const r4 = await new Blob([file]).text();
expect(r4).toBe("HELLO_FROM_FILE");
// size should be correct
expect(new Blob([file, buffer]).size).toBe(26);
expect(new Blob([buffer, file]).size).toBe(26);
expect(new Blob([file, file]).size).toBe(30);
});
test("new Blob([Bun.file(), string]) includes file contents", async () => {
using dir = tempDir("blob-file-string", {
"testfile.txt": "FILE_CONTENT",
});
const file = Bun.file(`${dir}/testfile.txt`);
const r1 = await new Blob([file, "STRING_DATA"]).text();
expect(r1).toBe("FILE_CONTENTSTRING_DATA");
const r2 = await new Blob(["STRING_DATA", file]).text();
expect(r2).toBe("STRING_DATAFILE_CONTENT");
});
test("new Blob([Bun.file(), Uint8Array]) includes file contents", async () => {
using dir = tempDir("blob-file-uint8", {
"testfile.txt": "FILE_DATA",
});
const file = Bun.file(`${dir}/testfile.txt`);
const uint8 = new Uint8Array([65, 66, 67]); // "ABC"
const r1 = await new Blob([file, uint8]).text();
expect(r1).toBe("FILE_DATAABC");
const r2 = await new Blob([uint8, file]).text();
expect(r2).toBe("ABCFILE_DATA");
});
test("new Blob([Bun.file(), Blob]) includes file contents", async () => {
using dir = tempDir("blob-file-blob", {
"testfile.txt": "FILE_DATA",
});
const file = Bun.file(`${dir}/testfile.txt`);
const otherBlob = new Blob(["BLOB_DATA"]);
const r1 = await new Blob([file, otherBlob]).text();
expect(r1).toBe("FILE_DATABLOB_DATA");
const r2 = await new Blob([otherBlob, file]).text();
expect(r2).toBe("BLOB_DATAFILE_DATA");
});