Compare commits

...

2 Commits

Author SHA1 Message Date
Claude Bot
ef6986b7d4 fix(fetch): use AtomicShared for SSLConfig cache to fix UAF on redirects
Replace the linear-scan SSL context cache with a proper content-hashing
HashMap using AtomicShared(*SSLConfig) for thread-safe refcounting.

The previous approach had a UAF: on cache hit, the caller's config was
freed, then client.tls_props was reassigned to the cached key. On
redirect, connect() would be called again with client.tls_props pointing
to the cached key, and the code would free it—destroying the cache's
own key and corrupting the map.

Changes:
- Add SSLConfig.Ref (AtomicShared) and content-based MapContext
- Add SSLConfig.contentHash() for HashMap key hashing
- Replace AutoArrayHashMap with content-hashing ArrayHashMap + getOrPut
- Use Ref.clone()/leak()/adoptRawUnsafe()/deinit() for proper ownership
- Release client's tls_props ref in HTTPClient.deinit()
- Allocate SSLConfig via Ref.new() in fetch() for shared ownership

Co-Authored-By: Claude <noreply@anthropic.com>
2026-02-23 02:36:36 +00:00
Claude Bot
05327256e5 fix(fetch): free SSLConfig on SSL context cache hit to prevent memory leak
When fetch() is called with TLS client certificates (mTLS), each request
allocates a new SSLConfig on the heap. The SSL context cache in
HTTPThread.connect() checks for an equivalent cached context via
isSame(), but the guard `if (requested_config != client.tls_props)` was
always false because requested_config was initialized from
client.tls_props.?, so the duplicate config was never freed.

This caused ~3KB of leaked memory per mTLS fetch() request (the
SSLConfig struct plus its cert/key string data), growing linearly and
never reclaimed.

The fix removes the incorrect pointer identity check and always frees
the caller's SSLConfig when a matching cached context is found.

Closes #27358

Co-Authored-By: Claude <noreply@anthropic.com>
2026-02-22 21:46:27 +00:00
5 changed files with 166 additions and 24 deletions

View File

@@ -25,6 +25,20 @@ requires_custom_request_ctx: bool = false,
is_using_default_ciphers: bool = true,
low_memory_mode: bool = false,
/// Thread-safe shared pointer for SSLConfig used by the HTTP client cache.
pub const Ref = bun.ptr.AtomicShared(*SSLConfig);
/// HashMap context for content-based hashing/equality of `Ref` keys.
/// Compares the underlying SSLConfig values, not pointer addresses.
pub const MapContext = struct {
pub fn hash(_: @This(), key: Ref) u32 {
return key.get().contentHash();
}
pub fn eql(_: @This(), a: Ref, b: Ref, _: usize) bool {
return a.get().isSame(b.get());
}
};
const ReadFromBlobError = bun.JSError || error{
NullStore,
NotAFile,
@@ -143,6 +157,39 @@ pub fn isSame(this: *const SSLConfig, other: *const SSLConfig) bool {
return true;
}
/// Content-based hash for use in HashMap lookups.
pub fn contentHash(this: *const SSLConfig) u32 {
var hasher = std.hash.Wyhash.init(0);
inline for (comptime std.meta.fields(SSLConfig)) |field| {
const value = @field(this, field.name);
switch (field.type) {
?[*:0]const u8 => {
if (value) |s| {
const slice = bun.asByteSlice(s);
hasher.update(slice);
hasher.update(&.{1});
} else {
hasher.update(&.{0});
}
},
?[][*:0]const u8 => {
if (value) |slice| {
hasher.update(std.mem.asBytes(&slice.len));
for (slice) |s| {
hasher.update(bun.asByteSlice(s));
hasher.update(&.{0});
}
hasher.update(&.{1});
} else {
hasher.update(&.{0});
}
},
else => hasher.update(std.mem.asBytes(&value)),
}
}
return @truncate(hasher.final());
}
fn stringsEqual(a: [*:0]const u8, b: [*:0]const u8) bool {
const lhs = bun.asByteSlice(a);
const rhs = bun.asByteSlice(b);

View File

@@ -275,8 +275,8 @@ fn fetchImpl(
if (ssl_config) |conf| {
ssl_config = null;
conf.deinit();
bun.default_allocator.destroy(conf);
var ref = SSLConfig.Ref.adoptRawUnsafe(conf);
ref.deinit();
}
}
@@ -466,9 +466,8 @@ fn fetchImpl(
is_error = true;
return .zero;
}) |config| {
const ssl_config_object = bun.handleOom(bun.default_allocator.create(SSLConfig));
ssl_config_object.* = config;
break :extract_ssl_config ssl_config_object;
var shared = SSLConfig.Ref.new(config);
break :extract_ssl_config shared.leak();
}
}
}

View File

@@ -514,6 +514,11 @@ pub fn deinit(this: *HTTPClient) void {
this.proxy_tunnel = null;
tunnel.detachAndDeref();
}
if (this.tls_props) |tls| {
this.tls_props = null;
var ref = SSLConfig.Ref.adoptRawUnsafe(tls);
ref.deinit();
}
this.unix_socket_path.deinit();
this.unix_socket_path = jsc.ZigString.Slice.empty;
}

View File

@@ -1,6 +1,11 @@
const HTTPThread = @This();
var custom_ssl_context_map = std.AutoArrayHashMap(*SSLConfig, *NewHTTPContext(true)).init(bun.default_allocator);
var custom_ssl_context_map = std.ArrayHashMap(
SSLConfig.Ref,
*NewHTTPContext(true),
SSLConfig.MapContext,
true,
).init(bun.default_allocator);
loop: *jsc.MiniEventLoop,
http_context: NewHTTPContext(false),
@@ -226,29 +231,33 @@ pub fn connect(this: *@This(), client: *HTTPClient, comptime is_ssl: bool) !NewH
if (comptime is_ssl) {
const needs_own_context = client.tls_props != null and client.tls_props.?.requires_custom_request_ctx;
if (needs_own_context) {
var requested_config = client.tls_props.?;
for (custom_ssl_context_map.keys()) |other_config| {
if (requested_config.isSame(other_config)) {
// we free the callers config since we have a existing one
if (requested_config != client.tls_props) {
requested_config.deinit();
bun.default_allocator.destroy(requested_config);
}
client.tls_props = other_config;
if (client.http_proxy) |url| {
return try custom_ssl_context_map.get(other_config).?.connect(client, url.hostname, url.getPortAuto());
} else {
return try custom_ssl_context_map.get(other_config).?.connect(client, client.url.hostname, client.url.getPortAuto());
}
const requested_config = client.tls_props.?;
// Wrap the raw pointer as a Ref for lookup without changing the refcount.
const lookup_ref = SSLConfig.Ref.adoptRawUnsafe(requested_config);
const gop = try custom_ssl_context_map.getOrPut(lookup_ref);
if (gop.found_existing) {
// Cache hit: release the caller's config and point client at the cached one.
var caller_ref = lookup_ref;
caller_ref.deinit();
var new_ref = gop.key_ptr.*.clone();
client.tls_props = new_ref.leak();
const cached_context = gop.value_ptr.*;
if (client.http_proxy) |url| {
return try cached_context.connect(client, url.hostname, url.getPortAuto());
} else {
return try cached_context.connect(client, client.url.hostname, client.url.getPortAuto());
}
}
// we need the config so dont free it
// Cache miss: create a new SSL context for this config.
var custom_context = try bun.default_allocator.create(NewHTTPContext(is_ssl));
custom_context.initWithClientConfig(client) catch |err| {
client.tls_props = null;
requested_config.deinit();
bun.default_allocator.destroy(requested_config);
// Remove the incomplete entry that getOrPut inserted.
custom_ssl_context_map.swapRemoveAt(gop.index);
var caller_ref = lookup_ref;
caller_ref.deinit();
bun.default_allocator.destroy(custom_context);
// TODO: these error names reach js. figure out how they should be handled
@@ -259,7 +268,10 @@ pub fn connect(this: *@This(), client: *HTTPClient, comptime is_ssl: bool) !NewH
error.LoadCAFile => error.FailedToOpenSocket,
};
};
try custom_ssl_context_map.put(requested_config, custom_context);
// getOrPut inserted lookup_ref (the caller's ref) as the key.
// Clone it so the cache owns an independent reference.
gop.key_ptr.* = lookup_ref.clone();
gop.value_ptr.* = custom_context;
// We might deinit the socket context, so we disable keepalive to make sure we don't
// free it while in use.
client.flags.disable_keepalive = true;

View File

@@ -0,0 +1,79 @@
import { expect, test } from "bun:test";
import { bunEnv, bunExe, tempDir } from "harness";
// Regression test for https://github.com/oven-sh/bun/issues/27358
// fetch() with TLS client certificates (mTLS) leaked SSLConfig objects
// because the SSL context cache hit path didn't free the duplicate config.
test("fetch() with mTLS client certs reuses SSL context correctly", async () => {
using dir = tempDir("mtls-leak-test", {
"generate-certs.sh": `#!/bin/bash
set -e
openssl req -x509 -newkey rsa:2048 -keyout ca.key -out ca.crt -days 1 -nodes -subj "/CN=TestCA" 2>/dev/null
openssl req -newkey rsa:2048 -keyout server.key -out server.csr -nodes -subj "/CN=localhost" 2>/dev/null
openssl x509 -req -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out server.crt -days 1 2>/dev/null
openssl req -newkey rsa:2048 -keyout client.key -out client.csr -nodes -subj "/CN=client" 2>/dev/null
openssl x509 -req -in client.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out client.crt -days 1 2>/dev/null
`,
"test.ts": `
import fs from "fs";
import path from "path";
const dir = process.argv[2];
const serverCert = fs.readFileSync(path.join(dir, "server.crt"), "utf-8");
const serverKey = fs.readFileSync(path.join(dir, "server.key"), "utf-8");
const clientCert = fs.readFileSync(path.join(dir, "client.crt"), "utf-8");
const clientKey = fs.readFileSync(path.join(dir, "client.key"), "utf-8");
const caCert = fs.readFileSync(path.join(dir, "ca.crt"), "utf-8");
const server = Bun.serve({
port: 0,
tls: { cert: serverCert, key: serverKey, ca: caCert },
fetch() { return new Response("ok"); },
});
const url = "https://localhost:" + String(server.port) + "/";
// Make many requests with client certs to exercise SSL context caching.
// Before the fix, each request leaked an SSLConfig allocation (~3KB).
let successCount = 0;
for (let i = 0; i < 500; i++) {
const res = await fetch(url, {
tls: { cert: clientCert, key: clientKey, rejectUnauthorized: false },
});
const text = await res.text();
if (text === "ok") successCount++;
}
server.stop();
console.log(JSON.stringify({ successCount }));
process.exit(0);
`,
});
// Generate certs
const genProc = Bun.spawnSync({
cmd: ["bash", "generate-certs.sh"],
cwd: String(dir),
stderr: "pipe",
});
expect(genProc.exitCode).toBe(0);
// Run the test in a subprocess
await using proc = Bun.spawn({
cmd: [bunExe(), "test.ts", String(dir)],
cwd: String(dir),
env: bunEnv,
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
if (exitCode !== 0) {
console.error("stderr:", stderr);
}
const result = JSON.parse(stdout.trim());
expect(result.successCount).toBe(500);
expect(exitCode).toBe(0);
}, 60_000);