Compare commits

...

2 Commits

Author SHA1 Message Date
Ciro Spaciari
9d93f55551 Merge branch 'main' into claude/fix-sendfile-race-26406 2026-01-26 11:17:22 -08:00
Claude Bot
d0948e9a5c fix(server): wait for headers to flush before sendfile
When responding with `Response(Bun.file(...))`, the sendfile syscall
could execute before HTTP headers were fully flushed to the kernel
socket buffer. Under network backpressure (more common over LAN due
to latency), headers might still be in userspace buffer when sendfile
writes directly to the kernel socket, causing file content to arrive
before HTTP headers. Clients would see HTTP/0.9 responses.

The fix checks `getBufferedAmount()` after writing headers. If there's
buffered data, we wait for the socket to become writable (via onWritable
callback) before calling sendfile, ensuring headers are fully flushed.

Fixes #26406

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-24 02:10:44 +00:00
4 changed files with 121 additions and 5 deletions

View File

@@ -1015,13 +1015,26 @@ pub fn NewRequestContext(comptime ssl_enabled: bool, comptime debug_mode: bool,
return;
}
// If there's buffered data (headers not fully flushed to kernel socket),
// we must wait for the socket to become writable before calling sendfile.
// Otherwise, sendfile would write file content before the headers, causing
// clients to see HTTP/0.9 responses (content without headers).
if (resp.getBufferedAmount() > 0) {
this.sendfile.has_set_on_writable = true;
this.flags.has_marked_pending = true;
resp.onWritable(*RequestContext, onWritableSendfile, this);
return;
}
_ = this.onSendfile();
}
pub fn renderMetadataAndNewline(this: *RequestContext) void {
if (this.resp) |resp| {
this.renderMetadata();
resp.prepareForSendfile();
// Discard the buffered amount here - we'll check getBufferedAmount()
// after the corked call completes in renderSendFile().
_ = resp.prepareForSendfile();
}
}

View File

@@ -1739,7 +1739,7 @@ __attribute__((callback (corker, ctx)))
}
}
void uws_res_prepare_for_sendfile(int ssl, uws_res_r res)
size_t uws_res_prepare_for_sendfile(int ssl, uws_res_r res)
{
if (ssl)
{
@@ -1750,6 +1750,7 @@ __attribute__((callback (corker, ctx)))
ptr[0] = '\r';
ptr[1] = '\n';
uwsRes->uncork();
return uwsRes->getBufferedAmount();
}
else
{
@@ -1760,6 +1761,7 @@ __attribute__((callback (corker, ctx)))
ptr[0] = '\r';
ptr[1] = '\n';
uwsRes->uncork();
return uwsRes->getBufferedAmount();
}
}

View File

@@ -56,8 +56,8 @@ pub fn NewResponse(ssl_flag: i32) type {
return this.state().isHttpConnectionClose();
}
pub fn prepareForSendfile(res: *Response) void {
c.uws_res_prepare_for_sendfile(ssl_flag, res.downcast());
pub fn prepareForSendfile(res: *Response) usize {
return c.uws_res_prepare_for_sendfile(ssl_flag, res.downcast());
}
pub fn uncork(res: *Response) void {
@@ -706,7 +706,7 @@ const c = struct {
close: bool,
) bool;
pub extern fn uws_res_end_stream(ssl: i32, res: *c.uws_res, close_connection: bool) void;
pub extern fn uws_res_prepare_for_sendfile(ssl: i32, res: *c.uws_res) void;
pub extern fn uws_res_prepare_for_sendfile(ssl: i32, res: *c.uws_res) usize;
pub extern fn uws_res_get_native_handle(ssl: i32, res: *c.uws_res) *Socket;
pub extern fn uws_res_get_remote_address_as_text(ssl: i32, res: *c.uws_res, dest: *[*]const u8) usize;

View File

@@ -0,0 +1,101 @@
import { describe, expect, it } from "bun:test";
import { tempDirWithFiles } from "harness";
import { join } from "node:path";
// Test for https://github.com/oven-sh/bun/issues/26406
// Response(Bun.file(...)) would fail when accessed over LAN (192.168.x.x) but work on localhost.
// The issue was that sendfile() could execute before HTTP headers were fully flushed to kernel socket buffer.
// When there was network backpressure, headers might still be in userspace buffer when sendfile writes directly
// to kernel socket, causing file content to arrive BEFORE HTTP headers (appearing as HTTP/0.9).
describe("Response(Bun.file()) headers are sent before file content", () => {
it("concurrent requests to file response all receive valid HTTP headers", async () => {
const tempDir = tempDirWithFiles("sendfile-test", {
// Use a larger file to increase chance of triggering backpressure
"test.txt": "Hello from sendfile test!\n".repeat(1000),
});
using server = Bun.serve({
port: 0,
fetch() {
return new Response(Bun.file(join(tempDir, "test.txt")));
},
});
// Make many concurrent requests to increase chance of socket backpressure
const numRequests = 100;
const requests = Array.from({ length: numRequests }, async () => {
const response = await fetch(server.url);
// The key assertion: we should get valid HTTP headers
// If the bug occurs, the response might be treated as HTTP/0.9 (no headers)
// or the status would be incorrect
expect(response.status).toBe(200);
expect(response.headers.get("Content-Type")).toContain("text/plain");
expect(response.headers.get("Content-Length")).toBe("26000"); // 26 chars * 1000
const text = await response.text();
expect(text.length).toBe(26000);
expect(text.startsWith("Hello from sendfile test!")).toBe(true);
});
await Promise.all(requests);
});
it("large file response maintains correct headers under load", async () => {
const largeContent = Buffer.alloc(1024 * 1024, "x").toString(); // 1MB file
const tempDir = tempDirWithFiles("sendfile-large-test", {
"large.txt": largeContent,
});
using server = Bun.serve({
port: 0,
fetch() {
return new Response(Bun.file(join(tempDir, "large.txt")));
},
});
// Sequential requests with large files to test sustained operation
for (let i = 0; i < 10; i++) {
const response = await fetch(server.url);
expect(response.status).toBe(200);
expect(response.headers.get("Content-Length")).toBe("1048576");
const text = await response.text();
expect(text.length).toBe(1048576);
}
});
it("file response with custom headers preserves all headers", async () => {
const tempDir = tempDirWithFiles("sendfile-headers-test", {
"test.txt": "Content here",
});
using server = Bun.serve({
port: 0,
fetch() {
return new Response(Bun.file(join(tempDir, "test.txt")), {
headers: {
"X-Custom-Header": "custom-value",
"Cache-Control": "no-cache",
},
});
},
});
// Multiple concurrent requests to test header consistency
const requests = Array.from({ length: 50 }, async () => {
const response = await fetch(server.url);
expect(response.status).toBe(200);
expect(response.headers.get("X-Custom-Header")).toBe("custom-value");
expect(response.headers.get("Cache-Control")).toBe("no-cache");
expect(response.headers.get("Content-Type")).toContain("text/plain");
await response.text();
});
await Promise.all(requests);
});
});