Compare commits

...

1 Commits

Author SHA1 Message Date
Claude Bot
3013e10861 harden(http): enforce max decompressed output size for fetch responses
Add a 128 MB limit on HTTP response decompression output, matching the
existing WebSocket MAX_DECOMPRESSED_SIZE protection. This applies to all
compression formats (gzip, deflate, brotli, zstd) across both the
libdeflate fast path and the streaming decompression slow path.

Responses that decompress within the limit are unaffected. The limit
only triggers when the decompressed output exceeds 128 MB, at which
point the decompression is aborted with an error. The `decompress: false`
fetch option bypasses decompression entirely and is not affected.

Co-Authored-By: Claude <noreply@anthropic.com>
2026-02-27 06:07:11 +00:00
6 changed files with 202 additions and 4 deletions

View File

@@ -96,6 +96,10 @@ pub const BrotliReaderArrayList = struct {
}
pub fn readAll(this: *BrotliReaderArrayList, is_done: bool) !void {
return this.readAllWithLimit(is_done, null);
}
pub fn readAllWithLimit(this: *BrotliReaderArrayList, is_done: bool, max_output_size: ?usize) !void {
defer this.list_ptr.* = this.list;
if (this.state == .End or this.state == .Error) {
@@ -134,6 +138,14 @@ pub const BrotliReaderArrayList = struct {
this.list.items.len += bytes_written;
this.total_in += bytes_read;
// Check decompressed output size limit
if (max_output_size) |limit| {
if (this.list.items.len > limit) {
this.state = .Error;
return error.DecompressionOutputTooLarge;
}
}
switch (result) {
.success => {
if (comptime bun.Environment.allow_assert) {

View File

@@ -172,6 +172,10 @@ pub const ZstdReaderArrayList = struct {
}
pub fn readAll(this: *ZstdReaderArrayList, is_done: bool) !void {
return this.readAllWithLimit(is_done, null);
}
pub fn readAllWithLimit(this: *ZstdReaderArrayList, is_done: bool, max_output_size: ?usize) !void {
defer this.list_ptr.* = this.list;
if (this.state == .End or this.state == .Error) return;
@@ -221,6 +225,14 @@ pub const ZstdReaderArrayList = struct {
this.total_in += bytes_read;
this.total_out += bytes_written;
// Check decompressed output size limit
if (max_output_size) |limit| {
if (this.total_out > limit) {
this.state = .Error;
return error.DecompressionOutputTooLarge;
}
}
if (rc == 0) {
// Frame is complete
this.state = .Uninitialized; // Reset state since frame is complete

View File

@@ -104,10 +104,14 @@ pub const Decompressor = union(enum) {
}
pub fn readAll(this: *Decompressor, is_done: bool) !void {
return this.readAllWithLimit(is_done, null);
}
pub fn readAllWithLimit(this: *Decompressor, is_done: bool, max_output_size: ?usize) !void {
switch (this.*) {
.zlib => |zlib| try zlib.readAll(is_done),
.brotli => |brotli| try brotli.readAll(is_done),
.zstd => |reader| try reader.readAll(is_done),
.zlib => |zlib| try zlib.readAllWithLimit(is_done, max_output_size),
.brotli => |brotli| try brotli.readAllWithLimit(is_done, max_output_size),
.zstd => |reader| try reader.readAllWithLimit(is_done, max_output_size),
.none => {},
}
}

View File

@@ -115,6 +115,12 @@ pub fn isDone(this: *InternalState) bool {
return this.flags.received_last_chunk;
}
/// Maximum decompressed output size for HTTP responses (128 MB).
/// Matches the WebSocket MAX_DECOMPRESSED_SIZE limit.
/// This guards against responses with disproportionate compression ratios
/// that would otherwise cause unbounded memory growth during decompression.
const max_http_decompressed_size: usize = 128 * 1024 * 1024;
pub fn decompressBytes(this: *InternalState, buffer: []const u8, body_out_str: *MutableString, is_final_chunk: bool) !void {
defer this.compressed_body.reset();
var gzip_timer: std.time.Timer = undefined;
@@ -145,6 +151,9 @@ pub fn decompressBytes(this: *InternalState, buffer: []const u8, body_out_str: *
const result = deflater.decompressor.decompress(buffer, body_out_str.list.allocatedSlice(), .gzip);
if (result.status == .success) {
if (result.written > max_http_decompressed_size) {
return error.DecompressionOutputTooLarge;
}
body_out_str.list.items.len = result.written;
still_needs_to_decompress = false;
}
@@ -160,6 +169,9 @@ pub fn decompressBytes(this: *InternalState, buffer: []const u8, body_out_str: *
});
if (result.status == .success) {
if (result.written > max_http_decompressed_size) {
return error.DecompressionOutputTooLarge;
}
try body_out_str.list.ensureTotalCapacityPrecise(body_out_str.allocator, result.written);
body_out_str.list.appendSliceAssumeCapacity(deflater.shared_buffer[0..result.written]);
still_needs_to_decompress = false;
@@ -177,7 +189,7 @@ pub fn decompressBytes(this: *InternalState, buffer: []const u8, body_out_str: *
try this.decompressor.updateBuffers(this.encoding, buffer, body_out_str);
this.decompressor.readAll(this.isDone()) catch |err| {
this.decompressor.readAllWithLimit(this.isDone(), max_http_decompressed_size) catch |err| {
if (this.isDone() or error.ShortRead != err) {
Output.prettyErrorln("<r><red>Decompression error: {s}<r>", .{bun.asByteSlice(@errorName(err))});
Output.flush();

View File

@@ -301,6 +301,7 @@ pub const ZlibError = error{
InvalidArgument,
ZlibError,
ShortRead,
DecompressionOutputTooLarge,
};
const ZlibAllocator = struct {
@@ -431,6 +432,10 @@ pub const ZlibReaderArrayList = struct {
}
pub fn readAll(this: *ZlibReader, is_done: bool) ZlibError!void {
return this.readAllWithLimit(is_done, null);
}
pub fn readAllWithLimit(this: *ZlibReader, is_done: bool, max_output_size: ?usize) ZlibError!void {
defer {
if (this.list.items.len > this.zlib.total_out) {
this.list.shrinkRetainingCapacity(this.zlib.total_out);
@@ -480,6 +485,14 @@ pub const ZlibReaderArrayList = struct {
const rc = inflate(&this.zlib, FlushValue.NoFlush);
this.state = State.Inflating;
// Check decompressed output size limit
if (max_output_size) |limit| {
if (this.zlib.total_out > limit) {
this.state = State.Error;
return error.DecompressionOutputTooLarge;
}
}
switch (rc) {
ReturnCode.StreamEnd => {
this.end();

View File

@@ -0,0 +1,145 @@
import { serve } from "bun";
import { describe, expect, test } from "bun:test";
import { brotliCompressSync } from "node:zlib";
/**
* Tests that HTTP response decompression enforces output size limits.
* This hardens the decompression path against responses with
* disproportionate compression ratios that would otherwise cause
* unbounded memory growth.
*
* The limit is 128 MB, matching the WebSocket decompression limit.
*/
// 128 MB + 1 byte, just over the limit.
// All zeros compresses extremely well - a few hundred bytes compressed for 128MB+ decompressed.
const OVER_LIMIT_SIZE = 128 * 1024 * 1024 + 1;
const overLimitData = Buffer.alloc(OVER_LIMIT_SIZE, 0);
// Pre-compute compressed payloads at module scope to avoid test timeouts.
// gzip/deflate/zstd are fast; brotli is slower but still manageable at module level.
const compressedPayloads = {
gzip: Bun.gzipSync(overLimitData),
deflate: Bun.deflateSync(overLimitData),
zstd: Bun.zstdCompressSync(overLimitData),
br: brotliCompressSync(overLimitData),
};
// Free the large source buffer now that compression is done
overLimitData.fill(0); // hint to GC
describe("fetch decompression output limits", () => {
for (const encoding of ["gzip", "br", "zstd", "deflate"] as const) {
test(`rejects ${encoding} response exceeding decompression limit`, async () => {
const compressed = compressedPayloads[encoding];
// Verify the compressed payload is much smaller than the decompressed output
expect(compressed.length).toBeLessThan(1024 * 1024);
using server = serve({
port: 0,
fetch() {
return new Response(compressed, {
headers: {
"Content-Encoding": encoding,
"Content-Type": "application/octet-stream",
"Content-Length": compressed.length.toString(),
},
});
},
});
try {
const resp = await fetch(server.url);
await resp.arrayBuffer();
expect.unreachable();
} catch (e: any) {
expect(e).toBeDefined();
}
}, 30_000);
test(`allows ${encoding} response within decompression limit`, async () => {
// 1 MB of data - well within the 128 MB limit
const originalData = Buffer.alloc(1024 * 1024, 0x41);
let compressed: Uint8Array;
switch (encoding) {
case "gzip":
compressed = Bun.gzipSync(originalData);
break;
case "br":
compressed = brotliCompressSync(originalData);
break;
case "zstd":
compressed = Bun.zstdCompressSync(originalData);
break;
case "deflate":
compressed = Bun.deflateSync(originalData);
break;
}
using server = serve({
port: 0,
fetch() {
return new Response(compressed, {
headers: {
"Content-Encoding": encoding,
"Content-Type": "application/octet-stream",
"Content-Length": compressed.length.toString(),
},
});
},
});
const resp = await fetch(server.url);
const body = await resp.arrayBuffer();
expect(body.byteLength).toBe(originalData.length);
});
}
test("allows gzip response exactly at the limit", async () => {
// 128 MB exactly should be allowed
const exactLimitSize = 128 * 1024 * 1024;
const data = Buffer.alloc(exactLimitSize, 0);
const compressed = Bun.gzipSync(data);
using server = serve({
port: 0,
fetch() {
return new Response(compressed, {
headers: {
"Content-Encoding": "gzip",
"Content-Type": "application/octet-stream",
"Content-Length": compressed.length.toString(),
},
});
},
});
const resp = await fetch(server.url);
const body = await resp.arrayBuffer();
expect(body.byteLength).toBe(exactLimitSize);
}, 30_000);
test("decompress: false bypasses the limit", async () => {
// When decompress is false, there should be no limit enforced
// (the compressed data is returned as-is)
const compressed = compressedPayloads.gzip;
using server = serve({
port: 0,
fetch() {
return new Response(compressed, {
headers: {
"Content-Encoding": "gzip",
"Content-Type": "application/octet-stream",
"Content-Length": compressed.length.toString(),
},
});
},
});
const resp = await fetch(server.url, { decompress: false });
const body = await resp.arrayBuffer();
// Should get the raw compressed bytes back
expect(body.byteLength).toBe(compressed.length);
}, 30_000);
});