mirror of
https://github.com/oven-sh/bun
synced 2026-02-18 23:01:58 +00:00
Compare commits
4 Commits
claude/fix
...
claude/fix
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f4cd7389b0 | ||
|
|
0d2e08fde1 | ||
|
|
5156fb9b3f | ||
|
|
3f04bc02a6 |
@@ -1707,15 +1707,6 @@ pub fn NewWrappedHandler(comptime tls: bool) type {
|
||||
|
||||
pub fn onClose(this: WrappedSocket, socket: Socket, err: c_int, data: ?*anyopaque) bun.JSError!void {
|
||||
if (comptime tls) {
|
||||
// Clean up the raw TCP socket from upgradeTLS() — its onClose
|
||||
// never fires because uws closes through the TLS context only.
|
||||
defer {
|
||||
if (!this.tcp.socket.isDetached()) {
|
||||
this.tcp.socket.detach();
|
||||
this.tcp.has_pending_activity.store(false, .release);
|
||||
this.tcp.deref();
|
||||
}
|
||||
}
|
||||
try TLSSocket.onClose(this.tls, socket, err, data);
|
||||
} else {
|
||||
try TLSSocket.onClose(this.tcp, socket, err, data);
|
||||
|
||||
@@ -165,6 +165,19 @@ pub fn write(index: u32, graph: *const Graph, linker_graph: *const LinkerGraph,
|
||||
var already_visited_output_file = try bun.bit_set.AutoBitSet.initEmpty(bun.default_allocator, additional_output_files.len);
|
||||
defer already_visited_output_file.deinit(bun.default_allocator);
|
||||
|
||||
// Build a set of source indices directly referenced by the HTML file's import records.
|
||||
// Assets referenced only via HTML tags (e.g. <img src>, <link rel="icon">) use .url
|
||||
// import records which don't propagate entry_bits through the linker's code splitting
|
||||
// graph. We need to include these files in the manifest so they are served correctly.
|
||||
const import_records = graph.ast.items(.import_records);
|
||||
var html_referenced_sources = try bun.bit_set.AutoBitSet.initEmpty(bun.default_allocator, graph.input_files.len);
|
||||
defer html_referenced_sources.deinit(bun.default_allocator);
|
||||
for (import_records[browser_source_index].slice()) |*record| {
|
||||
if (record.source_index.isValid()) {
|
||||
html_referenced_sources.set(record.source_index.get());
|
||||
}
|
||||
}
|
||||
|
||||
// Write all chunks that have files associated with this entry point.
|
||||
// Also include browser chunks from server builds (lazy-loaded chunks from dynamic imports).
|
||||
// When there's only one HTML import, all browser chunks belong to that manifest.
|
||||
@@ -219,7 +232,7 @@ pub fn write(index: u32, graph: *const Graph, linker_graph: *const LinkerGraph,
|
||||
if (source_index.get() == server_source_index) continue;
|
||||
const bits: *const AutoBitSet = &file_entry_bits[source_index.get()];
|
||||
|
||||
if (bits.hasIntersection(&entry_point_bits)) {
|
||||
if (bits.hasIntersection(&entry_point_bits) or html_referenced_sources.isSet(source_index.get())) {
|
||||
already_visited_output_file.set(i);
|
||||
if (!first) try writer.writeAll(",");
|
||||
first = false;
|
||||
|
||||
@@ -816,12 +816,6 @@ fn BaseWindowsPipeWriter(
|
||||
pub fn close(this: *WindowsPipeWriter) void {
|
||||
this.is_done = true;
|
||||
if (this.source) |source| {
|
||||
// Check if there's a pending async write before closing.
|
||||
// If so, we must defer the onCloseSource notification to
|
||||
// onWriteComplete, because the parent's onClose handler may
|
||||
// free the writer's StreamBuffer resources, and the pending
|
||||
// write callback would then access freed memory.
|
||||
const has_pending_async_write = this.hasPendingAsyncWrite();
|
||||
switch (source) {
|
||||
.sync_file, .file => |file| {
|
||||
// Use state machine to handle close after operation completes
|
||||
@@ -843,12 +837,7 @@ fn BaseWindowsPipeWriter(
|
||||
},
|
||||
}
|
||||
this.source = null;
|
||||
if (!has_pending_async_write) {
|
||||
this.onCloseSource();
|
||||
}
|
||||
// When has_pending_async_write is true, onCloseSource() will
|
||||
// be called from onWriteComplete/onFsWriteComplete after the
|
||||
// pending write callback completes.
|
||||
this.onCloseSource();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1000,22 +989,7 @@ pub fn WindowsBufferedWriter(Parent: type, function_table: anytype) type {
|
||||
return .success;
|
||||
}
|
||||
|
||||
/// Returns true if there is an outstanding async write request
|
||||
/// (uv_write or uv_fs_write) that hasn't completed yet.
|
||||
pub fn hasPendingAsyncWrite(this: *const WindowsWriter) bool {
|
||||
return this.pending_payload_size > 0;
|
||||
}
|
||||
|
||||
fn onWriteComplete(this: *WindowsWriter, status: uv.ReturnCode) void {
|
||||
// If the source was closed (e.g. close() was called while a write
|
||||
// was in-flight), clean up and notify the parent via onCloseSource
|
||||
// (which was deferred by close()).
|
||||
if (this.source == null) {
|
||||
this.pending_payload_size = 0;
|
||||
this.onCloseSource();
|
||||
return;
|
||||
}
|
||||
|
||||
const written = this.pending_payload_size;
|
||||
this.pending_payload_size = 0;
|
||||
if (status.toError(.write)) |err| {
|
||||
@@ -1318,28 +1292,12 @@ pub fn WindowsStreamingWriter(comptime Parent: type, function_table: anytype) ty
|
||||
return (this.outgoing.isNotEmpty() or this.current_payload.isNotEmpty());
|
||||
}
|
||||
|
||||
/// Returns true if there is an outstanding async write request
|
||||
/// (uv_write or uv_fs_write) that hasn't completed yet.
|
||||
pub fn hasPendingAsyncWrite(this: *const WindowsWriter) bool {
|
||||
return this.current_payload.isNotEmpty();
|
||||
}
|
||||
|
||||
fn isDone(this: *WindowsWriter) bool {
|
||||
// done is flags andd no more data queued? so we are done!
|
||||
return this.is_done and !this.hasPendingData();
|
||||
}
|
||||
|
||||
fn onWriteComplete(this: *WindowsWriter, status: uv.ReturnCode) void {
|
||||
// If the source was closed (e.g. close() was called while a write
|
||||
// was in-flight), clean up buffers and notify the parent via
|
||||
// onCloseSource (which was deferred by close()).
|
||||
if (this.source == null) {
|
||||
this.current_payload.reset();
|
||||
this.outgoing.reset();
|
||||
this.onCloseSource();
|
||||
return;
|
||||
}
|
||||
|
||||
if (status.toError(.write)) |err| {
|
||||
this.last_write_result = .{ .err = err };
|
||||
log("onWrite() = {s}", .{err.name()});
|
||||
|
||||
@@ -51,15 +51,6 @@ function onError(msg, err, callback) {
|
||||
process.nextTick(emitErrorNt, msg, err, callback);
|
||||
}
|
||||
|
||||
function isHTTPHeaderStateSentOrAssigned(state) {
|
||||
return state === NodeHTTPHeaderState.sent || state === NodeHTTPHeaderState.assigned;
|
||||
}
|
||||
function throwHeadersSentIfNecessary(self, action) {
|
||||
if (self._header != null || isHTTPHeaderStateSentOrAssigned(self[headerStateSymbol])) {
|
||||
throw $ERR_HTTP_HEADERS_SENT(action);
|
||||
}
|
||||
}
|
||||
|
||||
function write_(msg, chunk, encoding, callback, fromEnd) {
|
||||
if (typeof callback !== "function") callback = nop;
|
||||
|
||||
@@ -261,14 +252,18 @@ const OutgoingMessagePrototype = {
|
||||
|
||||
removeHeader(name) {
|
||||
validateString(name, "name");
|
||||
throwHeadersSentIfNecessary(this, "remove");
|
||||
if ((this._header !== undefined && this._header !== null) || this[headerStateSymbol] === NodeHTTPHeaderState.sent) {
|
||||
throw $ERR_HTTP_HEADERS_SENT("remove");
|
||||
}
|
||||
const headers = this[headersSymbol];
|
||||
if (!headers) return;
|
||||
headers.delete(name);
|
||||
},
|
||||
|
||||
setHeader(name, value) {
|
||||
throwHeadersSentIfNecessary(this, "set");
|
||||
if ((this._header !== undefined && this._header !== null) || this[headerStateSymbol] == NodeHTTPHeaderState.sent) {
|
||||
throw $ERR_HTTP_HEADERS_SENT("set");
|
||||
}
|
||||
validateHeaderName(name);
|
||||
validateHeaderValue(name, value);
|
||||
const headers = (this[headersSymbol] ??= new Headers());
|
||||
@@ -276,7 +271,9 @@ const OutgoingMessagePrototype = {
|
||||
return this;
|
||||
},
|
||||
setHeaders(headers) {
|
||||
throwHeadersSentIfNecessary(this, "set");
|
||||
if (this._header != null || this[headerStateSymbol] === NodeHTTPHeaderState.sent) {
|
||||
throw $ERR_HTTP_HEADERS_SENT("set");
|
||||
}
|
||||
|
||||
if (!headers || $isArray(headers) || typeof headers.keys !== "function" || typeof headers.get !== "function") {
|
||||
throw $ERR_INVALID_ARG_TYPE("headers", ["Headers", "Map"], headers);
|
||||
|
||||
@@ -1,63 +0,0 @@
|
||||
// Regression test for TLS upgrade raw socket leak (#12117, #24118, #25948)
|
||||
// When a TCP socket is upgraded to TLS via tls.connect({ socket }),
|
||||
// both a TLS wrapper and a raw TCP wrapper are created in Zig.
|
||||
// Previously, the raw socket's has_pending_activity was never set to
|
||||
// false on close, causing it (and all its retained objects) to leak.
|
||||
|
||||
import { describe, expect, it } from "bun:test";
|
||||
import { tls as COMMON_CERT, expectMaxObjectTypeCount } from "harness";
|
||||
import { once } from "node:events";
|
||||
import net from "node:net";
|
||||
import tls from "node:tls";
|
||||
|
||||
describe("TLS upgrade", () => {
|
||||
it("should not leak TLSSocket objects after close", async () => {
|
||||
// Create a TLS server that echoes data and closes
|
||||
const server = tls.createServer(
|
||||
{
|
||||
key: COMMON_CERT.key,
|
||||
cert: COMMON_CERT.cert,
|
||||
},
|
||||
socket => {
|
||||
socket.end("hello");
|
||||
},
|
||||
);
|
||||
|
||||
await once(server.listen(0, "127.0.0.1"), "listening");
|
||||
const port = (server.address() as net.AddressInfo).port;
|
||||
|
||||
// Simulate the MongoDB driver pattern: create a plain TCP socket,
|
||||
// then upgrade it to TLS via tls.connect({ socket }).
|
||||
// Do this multiple times to accumulate leaked objects.
|
||||
const iterations = 50;
|
||||
|
||||
try {
|
||||
for (let i = 0; i < iterations; i++) {
|
||||
const tcpSocket = net.createConnection({ host: "127.0.0.1", port });
|
||||
await once(tcpSocket, "connect");
|
||||
|
||||
const tlsSocket = tls.connect({
|
||||
socket: tcpSocket,
|
||||
ca: COMMON_CERT.cert,
|
||||
rejectUnauthorized: false,
|
||||
});
|
||||
await once(tlsSocket, "secureConnect");
|
||||
|
||||
// Read any data and destroy the TLS socket (simulates SDAM close)
|
||||
tlsSocket.on("data", () => {});
|
||||
tlsSocket.destroy();
|
||||
|
||||
await once(tlsSocket, "close");
|
||||
}
|
||||
} finally {
|
||||
server.close();
|
||||
await once(server, "close");
|
||||
}
|
||||
|
||||
// After all connections are closed and GC runs, the TLSSocket count
|
||||
// should be low. Before the fix, each iteration would leak 1 raw
|
||||
// TLSSocket (the TCP wrapper from upgradeTLS), accumulating over time.
|
||||
// Allow some slack for prototypes/structures (typically 2-3 baseline).
|
||||
await expectMaxObjectTypeCount(expect, "TLSSocket", 10, 1000);
|
||||
});
|
||||
});
|
||||
214
test/regression/issue/27031.test.ts
Normal file
214
test/regression/issue/27031.test.ts
Normal file
@@ -0,0 +1,214 @@
|
||||
import { describe, expect } from "bun:test";
|
||||
import { itBundled } from "../../bundler/expectBundled";
|
||||
|
||||
// Small valid PNG bytes for test assets
|
||||
const pngBytes = Buffer.from([
|
||||
0x89,
|
||||
0x50,
|
||||
0x4e,
|
||||
0x47,
|
||||
0x0d,
|
||||
0x0a,
|
||||
0x1a,
|
||||
0x0a, // PNG header
|
||||
0x00,
|
||||
0x00,
|
||||
0x00,
|
||||
0x0d,
|
||||
0x49,
|
||||
0x48,
|
||||
0x44,
|
||||
0x52, // IHDR chunk
|
||||
0x00,
|
||||
0x00,
|
||||
0x00,
|
||||
0x10,
|
||||
0x00,
|
||||
0x00,
|
||||
0x00,
|
||||
0x10, // 16x16
|
||||
0x08,
|
||||
0x02,
|
||||
0x00,
|
||||
0x00,
|
||||
0x00,
|
||||
0x90,
|
||||
0x91,
|
||||
0x68, // 8-bit RGB
|
||||
0x36,
|
||||
0x00,
|
||||
0x00,
|
||||
0x00,
|
||||
0x00,
|
||||
0x49,
|
||||
0x45,
|
||||
0x4e, // IEND chunk
|
||||
0x44,
|
||||
0xae,
|
||||
0x42,
|
||||
0x60,
|
||||
0x82,
|
||||
]);
|
||||
|
||||
// Different content so each file gets a unique hash
|
||||
const pngBytes2 = Buffer.from([
|
||||
0x89,
|
||||
0x50,
|
||||
0x4e,
|
||||
0x47,
|
||||
0x0d,
|
||||
0x0a,
|
||||
0x1a,
|
||||
0x0a,
|
||||
0x00,
|
||||
0x00,
|
||||
0x00,
|
||||
0x0d,
|
||||
0x49,
|
||||
0x48,
|
||||
0x44,
|
||||
0x52,
|
||||
0x00,
|
||||
0x00,
|
||||
0x00,
|
||||
0x20,
|
||||
0x00,
|
||||
0x00,
|
||||
0x00,
|
||||
0x20, // 32x32
|
||||
0x08,
|
||||
0x02,
|
||||
0x00,
|
||||
0x00,
|
||||
0x00,
|
||||
0xfc,
|
||||
0x18,
|
||||
0xed,
|
||||
0xa3,
|
||||
0x00,
|
||||
0x00,
|
||||
0x00,
|
||||
0x00,
|
||||
0x49,
|
||||
0x45,
|
||||
0x4e,
|
||||
0x44,
|
||||
0xae,
|
||||
0x42,
|
||||
0x60,
|
||||
0x82,
|
||||
]);
|
||||
|
||||
const pngBytes3 = Buffer.from([
|
||||
0x89,
|
||||
0x50,
|
||||
0x4e,
|
||||
0x47,
|
||||
0x0d,
|
||||
0x0a,
|
||||
0x1a,
|
||||
0x0a,
|
||||
0x00,
|
||||
0x00,
|
||||
0x00,
|
||||
0x0d,
|
||||
0x49,
|
||||
0x48,
|
||||
0x44,
|
||||
0x52,
|
||||
0x00,
|
||||
0x00,
|
||||
0x00,
|
||||
0x08,
|
||||
0x00,
|
||||
0x00,
|
||||
0x00,
|
||||
0x08, // 8x8
|
||||
0x08,
|
||||
0x02,
|
||||
0x00,
|
||||
0x00,
|
||||
0x00,
|
||||
0x4b,
|
||||
0x6d,
|
||||
0x29,
|
||||
0xde,
|
||||
0x00,
|
||||
0x00,
|
||||
0x00,
|
||||
0x00,
|
||||
0x49,
|
||||
0x45,
|
||||
0x4e,
|
||||
0x44,
|
||||
0xae,
|
||||
0x42,
|
||||
0x60,
|
||||
0x82,
|
||||
]);
|
||||
|
||||
describe.concurrent("bundler", () => {
|
||||
// Regression test: Images referenced only via HTML tags should appear in
|
||||
// HTMLBundle.files array, not just images imported via JavaScript.
|
||||
// https://github.com/oven-sh/bun/issues/27031
|
||||
itBundled("html-import/html-only-asset-references", {
|
||||
outdir: "out/",
|
||||
files: {
|
||||
"/server.js": `
|
||||
import html from "./index.html";
|
||||
|
||||
const manifest = html;
|
||||
|
||||
// All three images should be in the files array
|
||||
const fileLoaders = manifest.files.map(f => f.loader);
|
||||
const fileInputs = manifest.files.map(f => f.input);
|
||||
|
||||
// logo.png and banner.png are only referenced via HTML tags, not JS imports
|
||||
// icon.png is imported via both HTML and JS
|
||||
const hasLogo = fileInputs.some(i => i === "logo.png");
|
||||
const hasBanner = fileInputs.some(i => i === "banner.png");
|
||||
const hasIcon = fileInputs.some(i => i === "icon.png");
|
||||
|
||||
if (!hasLogo) throw new Error("logo.png missing from manifest files (referenced via <img src>)");
|
||||
if (!hasBanner) throw new Error("banner.png missing from manifest files (referenced via <img src>)");
|
||||
if (!hasIcon) throw new Error("icon.png missing from manifest files (referenced via <link rel=icon> and JS import)");
|
||||
|
||||
// All image files should have loader "file"
|
||||
const imageFiles = manifest.files.filter(f => f.path.includes(".png"));
|
||||
for (const img of imageFiles) {
|
||||
if (img.loader !== "file") throw new Error("Expected loader 'file' for " + img.path + ", got " + img.loader);
|
||||
if (!img.headers || !img.headers["content-type"]) throw new Error("Missing content-type header for " + img.path);
|
||||
}
|
||||
|
||||
console.log("OK: " + imageFiles.length + " image files in manifest");
|
||||
`,
|
||||
"/index.html": `
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<link rel="icon" href="./icon.png" />
|
||||
</head>
|
||||
<body>
|
||||
<img src="./logo.png" alt="Logo" />
|
||||
<img src="./banner.png" alt="Banner" />
|
||||
<script type="module" src="./app.js"></script>
|
||||
</body>
|
||||
</html>`,
|
||||
"/app.js": `
|
||||
import icon from './icon.png';
|
||||
console.log("Icon imported via JS:", icon);
|
||||
`,
|
||||
"/logo.png": pngBytes,
|
||||
"/banner.png": pngBytes2,
|
||||
"/icon.png": pngBytes3,
|
||||
},
|
||||
entryPoints: ["/server.js"],
|
||||
target: "bun",
|
||||
|
||||
run: {
|
||||
validate({ stdout }) {
|
||||
expect(stdout).toContain("OK: 3 image files in manifest");
|
||||
},
|
||||
},
|
||||
});
|
||||
});
|
||||
@@ -1,89 +0,0 @@
|
||||
import { expect, test } from "bun:test";
|
||||
import { bunEnv, bunExe } from "harness";
|
||||
|
||||
// https://github.com/oven-sh/bun/issues/27097
|
||||
// Segfault when pipe write completion callback fires after close() freed StreamBuffer
|
||||
// The bug: on Windows, close() called onCloseSource() synchronously, which could
|
||||
// free the writer's StreamBuffer resources. If a uv_write was pending, its callback
|
||||
// would later access the freed memory, causing a segfault at 0xFFFFFFFFFFFFFFFF.
|
||||
|
||||
test("closing spawn stdin while write is pending should not crash", async () => {
|
||||
// Spawn a process that reads from stdin.
|
||||
// Write data to stdin, then immediately close.
|
||||
// This creates a scenario where a pipe write may be pending when close() is called.
|
||||
for (let i = 0; i < 5; i++) {
|
||||
await using proc = Bun.spawn({
|
||||
cmd: [bunExe(), "-e", "process.stdin.resume(); process.stdin.on('close', () => process.exit(0));"],
|
||||
env: bunEnv,
|
||||
stdin: "pipe",
|
||||
stdout: "ignore",
|
||||
stderr: "ignore",
|
||||
});
|
||||
|
||||
// Write a large amount of data to stdin - this makes it more likely that
|
||||
// a write will be pending when we close
|
||||
try {
|
||||
proc.stdin.write("x".repeat(65536));
|
||||
proc.stdin.flush();
|
||||
} catch {
|
||||
// Write may fail if process already exited - that's fine
|
||||
}
|
||||
|
||||
// Close stdin while the write may still be pending
|
||||
proc.stdin.end();
|
||||
|
||||
// Wait for the process to exit
|
||||
await proc.exited;
|
||||
}
|
||||
}, 30_000);
|
||||
|
||||
test("rapid spawn and close cycles should not corrupt pipe state", async () => {
|
||||
// Simulate the pattern from the bug report: many spawn operations over time.
|
||||
// Each spawn creates pipes, writes some data, and tears down.
|
||||
const iterations = 10;
|
||||
|
||||
for (let i = 0; i < iterations; i++) {
|
||||
await using proc = Bun.spawn({
|
||||
cmd: [bunExe(), "-e", "console.log('ok');"],
|
||||
env: bunEnv,
|
||||
stdout: "pipe",
|
||||
stderr: "ignore",
|
||||
});
|
||||
|
||||
const stdout = await new Response(proc.stdout).text();
|
||||
const exitCode = await proc.exited;
|
||||
expect(stdout.trim()).toBe("ok");
|
||||
expect(exitCode).toBe(0);
|
||||
}
|
||||
}, 30_000);
|
||||
|
||||
test("FileSink write and close race should not crash", async () => {
|
||||
// Test the FileSink (StreamingWriter) path by using spawn with a ReadableStream
|
||||
// as stdin, which creates a FileSink internally.
|
||||
for (let i = 0; i < 5; i++) {
|
||||
const data = "hello ".repeat(1000);
|
||||
const stream = new ReadableStream({
|
||||
start(controller) {
|
||||
controller.enqueue(new TextEncoder().encode(data));
|
||||
controller.close();
|
||||
},
|
||||
});
|
||||
|
||||
await using proc = Bun.spawn({
|
||||
cmd: [
|
||||
bunExe(),
|
||||
"-e",
|
||||
"let t=0; process.stdin.on('data',(c)=>{t+=c.length}); process.stdin.on('end',()=>{console.log(t)})",
|
||||
],
|
||||
env: bunEnv,
|
||||
stdin: stream,
|
||||
stdout: "pipe",
|
||||
stderr: "ignore",
|
||||
});
|
||||
|
||||
const stdout = await new Response(proc.stdout).text();
|
||||
const exitCode = await proc.exited;
|
||||
expect(Number(stdout.trim())).toBeGreaterThan(0);
|
||||
expect(exitCode).toBe(0);
|
||||
}
|
||||
}, 30_000);
|
||||
Reference in New Issue
Block a user