mirror of
https://github.com/oven-sh/bun
synced 2026-02-19 23:31:45 +00:00
Compare commits
7 Commits
claude/fix
...
claude/elf
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dcb816e899 | ||
|
|
919939762a | ||
|
|
24d6f5ea7b | ||
|
|
39801b1a9a | ||
|
|
b9ac0a5b42 | ||
|
|
09198b16d8 | ||
|
|
b7d505b6c1 |
@@ -36,7 +36,7 @@ namespace uWS {
|
||||
constexpr uint64_t STATE_IS_ERROR = ~0ull;//0xFFFFFFFFFFFFFFFF;
|
||||
constexpr uint64_t STATE_SIZE_OVERFLOW = 0x0Full << (sizeof(uint64_t) * 8 - 8);//0x0F00000000000000;
|
||||
|
||||
inline uint64_t chunkSize(uint64_t state) {
|
||||
inline unsigned int chunkSize(uint64_t state) {
|
||||
return state & STATE_SIZE_MASK;
|
||||
}
|
||||
|
||||
@@ -135,7 +135,7 @@ namespace uWS {
|
||||
// short read
|
||||
}
|
||||
|
||||
inline void decChunkSize(uint64_t &state, uint64_t by) {
|
||||
inline void decChunkSize(uint64_t &state, unsigned int by) {
|
||||
|
||||
//unsigned int bits = state & STATE_IS_CHUNKED;
|
||||
|
||||
@@ -204,7 +204,7 @@ namespace uWS {
|
||||
}
|
||||
|
||||
// do we have data to emit all?
|
||||
uint64_t remaining = chunkSize(state);
|
||||
unsigned int remaining = chunkSize(state);
|
||||
if (data.length() >= remaining) {
|
||||
// emit all but 2 bytes then reset state to 0 and goto beginning
|
||||
// not fin
|
||||
@@ -244,7 +244,7 @@ namespace uWS {
|
||||
} else {
|
||||
/* We will consume all our input data */
|
||||
std::string_view emitSoon;
|
||||
uint64_t size = chunkSize(state);
|
||||
unsigned int size = chunkSize(state);
|
||||
size_t len = data.length();
|
||||
if (size > 2) {
|
||||
uint64_t maximalAppEmit = size - 2;
|
||||
@@ -280,7 +280,7 @@ namespace uWS {
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
decChunkSize(state, (uint64_t) len);
|
||||
decChunkSize(state, (unsigned int) len);
|
||||
state |= STATE_IS_CHUNKED;
|
||||
data.remove_prefix(len);
|
||||
if (emitSoon.length()) {
|
||||
|
||||
@@ -154,6 +154,22 @@ pub const StandaloneModuleGraph = struct {
|
||||
}
|
||||
};
|
||||
|
||||
const ELF = struct {
|
||||
pub extern "C" fn Bun__getStandaloneModuleGraphELFVaddr() ?*align(1) u64;
|
||||
|
||||
pub fn getData() ?[]const u8 {
|
||||
const vaddr = (Bun__getStandaloneModuleGraphELFVaddr() orelse return null).*;
|
||||
if (vaddr == 0) return null;
|
||||
// BUN_COMPILED.size holds the virtual address of the appended data.
|
||||
// The kernel mapped it via PT_LOAD, so we can dereference directly.
|
||||
// Format at target: [u64 payload_len][payload bytes]
|
||||
const target: [*]const u8 = @ptrFromInt(vaddr);
|
||||
const payload_len = std.mem.readInt(u64, target[0..8], .little);
|
||||
if (payload_len < 8) return null;
|
||||
return target[8..][0..payload_len];
|
||||
}
|
||||
};
|
||||
|
||||
pub const File = struct {
|
||||
name: []const u8 = "",
|
||||
loader: bun.options.Loader,
|
||||
@@ -885,6 +901,56 @@ pub const StandaloneModuleGraph = struct {
|
||||
}
|
||||
return cloned_executable_fd;
|
||||
},
|
||||
.linux => {
|
||||
// ELF section approach: find .bun section and expand it
|
||||
const input_result = bun.sys.File.readToEnd(.{ .handle = cloned_executable_fd }, bun.default_allocator);
|
||||
if (input_result.err) |err| {
|
||||
Output.prettyErrorln("Error reading executable: {f}", .{err});
|
||||
cleanup(zname, cloned_executable_fd);
|
||||
return bun.invalid_fd;
|
||||
}
|
||||
|
||||
const elf_file = bun.elf.ElfFile.init(bun.default_allocator, input_result.bytes.items) catch |err| {
|
||||
Output.prettyErrorln("Error initializing ELF file: {}", .{err});
|
||||
cleanup(zname, cloned_executable_fd);
|
||||
return bun.invalid_fd;
|
||||
};
|
||||
defer elf_file.deinit();
|
||||
|
||||
elf_file.writeBunSection(bytes) catch |err| {
|
||||
Output.prettyErrorln("Error writing .bun section to ELF: {}", .{err});
|
||||
cleanup(zname, cloned_executable_fd);
|
||||
return bun.invalid_fd;
|
||||
};
|
||||
input_result.bytes.deinit();
|
||||
|
||||
switch (Syscall.setFileOffset(cloned_executable_fd, 0)) {
|
||||
.err => |err| {
|
||||
Output.prettyErrorln("Error seeking to start of temporary file: {f}", .{err});
|
||||
cleanup(zname, cloned_executable_fd);
|
||||
return bun.invalid_fd;
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
// Write the modified ELF data back to the file
|
||||
const write_file = bun.sys.File{ .handle = cloned_executable_fd };
|
||||
switch (write_file.writeAll(elf_file.data.items)) {
|
||||
.err => |err| {
|
||||
Output.prettyErrorln("Error writing ELF file: {f}", .{err});
|
||||
cleanup(zname, cloned_executable_fd);
|
||||
return bun.invalid_fd;
|
||||
},
|
||||
.result => {},
|
||||
}
|
||||
// Truncate the file to the exact size of the modified ELF
|
||||
_ = Syscall.ftruncate(cloned_executable_fd, @intCast(elf_file.data.items.len));
|
||||
|
||||
if (comptime !Environment.isWindows) {
|
||||
_ = bun.c.fchmod(cloned_executable_fd.native(), 0o777);
|
||||
}
|
||||
return cloned_executable_fd;
|
||||
},
|
||||
else => {
|
||||
var total_byte_count: usize = undefined;
|
||||
if (Environment.isWindows) {
|
||||
@@ -1261,99 +1327,23 @@ pub const StandaloneModuleGraph = struct {
|
||||
return try fromBytesAlloc(allocator, @constCast(pe_bytes), offsets);
|
||||
}
|
||||
|
||||
// Do not invoke libuv here.
|
||||
const self_exe = openSelf() catch return null;
|
||||
defer self_exe.close();
|
||||
|
||||
var trailer_bytes: [4096]u8 = undefined;
|
||||
std.posix.lseek_END(self_exe.cast(), -4096) catch return null;
|
||||
|
||||
var read_amount: usize = 0;
|
||||
while (read_amount < trailer_bytes.len) {
|
||||
switch (Syscall.read(self_exe, trailer_bytes[read_amount..])) {
|
||||
.result => |read| {
|
||||
if (read == 0) return null;
|
||||
|
||||
read_amount += read;
|
||||
},
|
||||
.err => {
|
||||
return null;
|
||||
},
|
||||
if (comptime Environment.isLinux) {
|
||||
const elf_bytes = ELF.getData() orelse return null;
|
||||
if (elf_bytes.len < @sizeOf(Offsets) + trailer.len) {
|
||||
Output.debugWarn("bun standalone module graph is too small to be valid", .{});
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
if (read_amount < trailer.len + @sizeOf(usize) + @sizeOf(Offsets))
|
||||
// definitely missing data
|
||||
return null;
|
||||
|
||||
var end = @as([]u8, &trailer_bytes).ptr + read_amount - @sizeOf(usize);
|
||||
const total_byte_count: usize = @as(usize, @bitCast(end[0..8].*));
|
||||
|
||||
if (total_byte_count > std.math.maxInt(u32) or total_byte_count < 4096) {
|
||||
// sanity check: the total byte count should never be more than 4 GB
|
||||
// bun is at least like 30 MB so if it reports a size less than 4096 bytes then something is wrong
|
||||
return null;
|
||||
}
|
||||
end -= trailer.len;
|
||||
|
||||
if (!bun.strings.hasPrefixComptime(end[0..trailer.len], trailer)) {
|
||||
// invalid trailer
|
||||
return null;
|
||||
}
|
||||
|
||||
end -= @sizeOf(Offsets);
|
||||
|
||||
const offsets: Offsets = std.mem.bytesAsValue(Offsets, end[0..@sizeOf(Offsets)]).*;
|
||||
if (offsets.byte_count >= total_byte_count) {
|
||||
// if we hit this branch then the file is corrupted and we should just give up
|
||||
return null;
|
||||
}
|
||||
|
||||
var to_read = try bun.default_allocator.alloc(u8, offsets.byte_count);
|
||||
var to_read_from = to_read;
|
||||
|
||||
// Reading the data and making sure it's page-aligned + won't crash due
|
||||
// to out of bounds using mmap() is very complicated.
|
||||
// we just read the whole thing into memory for now.
|
||||
// at the very least
|
||||
// if you have not a ton of code, we only do a single read() call
|
||||
if (Environment.allow_assert or offsets.byte_count > 1024 * 3) {
|
||||
const offset_from_end = trailer_bytes.len - (@intFromPtr(end) - @intFromPtr(@as([]u8, &trailer_bytes).ptr));
|
||||
std.posix.lseek_END(self_exe.cast(), -@as(i64, @intCast(offset_from_end + offsets.byte_count))) catch return null;
|
||||
|
||||
if (comptime Environment.allow_assert) {
|
||||
// actually we just want to verify this logic is correct in development
|
||||
if (offsets.byte_count <= 1024 * 3) {
|
||||
to_read_from = try bun.default_allocator.alloc(u8, offsets.byte_count);
|
||||
}
|
||||
}
|
||||
|
||||
var remain = to_read_from;
|
||||
while (remain.len > 0) {
|
||||
switch (Syscall.read(self_exe, remain)) {
|
||||
.result => |read| {
|
||||
if (read == 0) return null;
|
||||
|
||||
remain = remain[read..];
|
||||
},
|
||||
.err => {
|
||||
bun.default_allocator.free(to_read);
|
||||
return null;
|
||||
},
|
||||
}
|
||||
const elf_bytes_slice = elf_bytes[elf_bytes.len - @sizeOf(Offsets) - trailer.len ..];
|
||||
const trailer_bytes = elf_bytes[elf_bytes.len - trailer.len ..][0..trailer.len];
|
||||
if (!bun.strings.eqlComptime(trailer_bytes, trailer)) {
|
||||
Output.debugWarn("bun standalone module graph has invalid trailer", .{});
|
||||
return null;
|
||||
}
|
||||
const offsets = std.mem.bytesAsValue(Offsets, elf_bytes_slice).*;
|
||||
return try fromBytesAlloc(allocator, @constCast(elf_bytes), offsets);
|
||||
}
|
||||
|
||||
if (offsets.byte_count <= 1024 * 3) {
|
||||
// we already have the bytes
|
||||
end -= offsets.byte_count;
|
||||
@memcpy(to_read[0..offsets.byte_count], end[0..offsets.byte_count]);
|
||||
if (comptime Environment.allow_assert) {
|
||||
bun.assert(bun.strings.eqlLong(to_read, end[0..offsets.byte_count], true));
|
||||
}
|
||||
}
|
||||
|
||||
return try fromBytesAlloc(allocator, to_read, offsets);
|
||||
comptime unreachable;
|
||||
}
|
||||
|
||||
/// Allocates a StandaloneModuleGraph on the heap, populates it from bytes, sets it globally, and returns the pointer.
|
||||
@@ -1364,107 +1354,6 @@ pub const StandaloneModuleGraph = struct {
|
||||
return graph_ptr;
|
||||
}
|
||||
|
||||
/// heuristic: `bun build --compile` won't be supported if the name is "bun", "bunx", or "node".
|
||||
/// this is a cheap way to avoid the extra overhead of opening the executable, and also just makes sense.
|
||||
fn isBuiltInExe(comptime T: type, argv0: []const T) bool {
|
||||
if (argv0.len == 0) return false;
|
||||
|
||||
if (argv0.len == 3) {
|
||||
if (bun.strings.eqlComptimeCheckLenWithType(T, argv0, bun.strings.literal(T, "bun"), false)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (argv0.len == 4) {
|
||||
if (bun.strings.eqlComptimeCheckLenWithType(T, argv0, bun.strings.literal(T, "bunx"), false)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (bun.strings.eqlComptimeCheckLenWithType(T, argv0, bun.strings.literal(T, "node"), false)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (comptime Environment.isDebug) {
|
||||
if (bun.strings.eqlComptimeCheckLenWithType(T, argv0, bun.strings.literal(T, "bun-debug"), true)) {
|
||||
return true;
|
||||
}
|
||||
if (bun.strings.eqlComptimeCheckLenWithType(T, argv0, bun.strings.literal(T, "bun-debugx"), true)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
fn openSelf() std.fs.OpenSelfExeError!bun.FileDescriptor {
|
||||
if (!Environment.isWindows) {
|
||||
const argv = bun.argv;
|
||||
if (argv.len > 0) {
|
||||
if (isBuiltInExe(u8, argv[0])) {
|
||||
return error.FileNotFound;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch (Environment.os) {
|
||||
.linux => {
|
||||
if (std.fs.openFileAbsoluteZ("/proc/self/exe", .{})) |easymode| {
|
||||
return .fromStdFile(easymode);
|
||||
} else |_| {
|
||||
if (bun.argv.len > 0) {
|
||||
// The user doesn't have /proc/ mounted, so now we just guess and hope for the best.
|
||||
var whichbuf: bun.PathBuffer = undefined;
|
||||
if (bun.which(
|
||||
&whichbuf,
|
||||
bun.env_var.PATH.get() orelse return error.FileNotFound,
|
||||
"",
|
||||
bun.argv[0],
|
||||
)) |path| {
|
||||
return .fromStdFile(try std.fs.cwd().openFileZ(path, .{}));
|
||||
}
|
||||
}
|
||||
|
||||
return error.FileNotFound;
|
||||
}
|
||||
},
|
||||
.mac => {
|
||||
// Use of MAX_PATH_BYTES here is valid as the resulting path is immediately
|
||||
// opened with no modification.
|
||||
const self_exe_path = try bun.selfExePath();
|
||||
const file = try std.fs.openFileAbsoluteZ(self_exe_path.ptr, .{});
|
||||
return .fromStdFile(file);
|
||||
},
|
||||
.windows => {
|
||||
const image_path_unicode_string = std.os.windows.peb().ProcessParameters.ImagePathName;
|
||||
const image_path = image_path_unicode_string.Buffer.?[0 .. image_path_unicode_string.Length / 2];
|
||||
|
||||
var nt_path_buf: bun.WPathBuffer = undefined;
|
||||
const nt_path = bun.strings.addNTPathPrefixIfNeeded(&nt_path_buf, image_path);
|
||||
|
||||
const basename_start = std.mem.lastIndexOfScalar(u16, nt_path, '\\') orelse
|
||||
return error.FileNotFound;
|
||||
const basename = nt_path[basename_start + 1 .. nt_path.len - ".exe".len];
|
||||
if (isBuiltInExe(u16, basename)) {
|
||||
return error.FileNotFound;
|
||||
}
|
||||
|
||||
return bun.sys.openFileAtWindows(
|
||||
.cwd(),
|
||||
nt_path,
|
||||
.{
|
||||
.access_mask = w.SYNCHRONIZE | w.GENERIC_READ,
|
||||
.disposition = w.FILE_OPEN,
|
||||
.options = w.FILE_SYNCHRONOUS_IO_NONALERT | w.FILE_OPEN_REPARSE_POINT,
|
||||
},
|
||||
).unwrap() catch {
|
||||
return error.FileNotFound;
|
||||
};
|
||||
},
|
||||
.wasm => @compileError("TODO"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Source map serialization in the bundler is specially designed to be
|
||||
/// loaded in memory as is. Source contents are compressed with ZSTD to
|
||||
/// reduce the file size, and mappings are stored as uncompressed VLQ.
|
||||
|
||||
@@ -910,6 +910,10 @@ extern "C" void Bun__signpost_emit(os_log_t log, os_signpost_type_t type, os_sig
|
||||
#undef EMIT_SIGNPOST
|
||||
#undef FOR_EACH_TRACE_EVENT
|
||||
|
||||
#endif // OS(DARWIN) signpost code
|
||||
|
||||
#if OS(DARWIN) || defined(__linux__)
|
||||
|
||||
#define BLOB_HEADER_ALIGNMENT 16 * 1024
|
||||
|
||||
extern "C" {
|
||||
@@ -919,6 +923,8 @@ struct BlobHeader {
|
||||
} __attribute__((aligned(BLOB_HEADER_ALIGNMENT)));
|
||||
}
|
||||
|
||||
#if OS(DARWIN)
|
||||
|
||||
extern "C" BlobHeader __attribute__((section("__BUN,__bun"))) BUN_COMPILED = { 0, 0 };
|
||||
|
||||
extern "C" uint64_t* Bun__getStandaloneModuleGraphMachoLength()
|
||||
@@ -926,6 +932,17 @@ extern "C" uint64_t* Bun__getStandaloneModuleGraphMachoLength()
|
||||
return &BUN_COMPILED.size;
|
||||
}
|
||||
|
||||
#else // __linux__
|
||||
|
||||
extern "C" BlobHeader __attribute__((section(".bun"), aligned(BLOB_HEADER_ALIGNMENT), used)) BUN_COMPILED = { 0 };
|
||||
|
||||
extern "C" uint64_t* Bun__getStandaloneModuleGraphELFVaddr()
|
||||
{
|
||||
return &BUN_COMPILED.size;
|
||||
}
|
||||
|
||||
#endif // OS(DARWIN) / __linux__
|
||||
|
||||
#elif defined(_WIN32)
|
||||
// Windows PE section handling
|
||||
#include <windows.h>
|
||||
|
||||
@@ -3704,6 +3704,7 @@ pub fn freeSensitive(allocator: std.mem.Allocator, slice: anytype) void {
|
||||
|
||||
pub const macho = @import("./macho.zig");
|
||||
pub const pe = @import("./pe.zig");
|
||||
pub const elf = @import("./elf.zig");
|
||||
pub const valkey = @import("./valkey/index.zig");
|
||||
pub const highway = @import("./highway.zig");
|
||||
|
||||
|
||||
230
src/elf.zig
Normal file
230
src/elf.zig
Normal file
@@ -0,0 +1,230 @@
|
||||
/// ELF file manipulation for `bun build --compile` on Linux.
|
||||
///
|
||||
/// Analogous to `macho.zig` (macOS) and `pe.zig` (Windows).
|
||||
/// Finds the `.bun` ELF section (placed by a linker symbol in c-bindings.cpp)
|
||||
/// and expands it to hold the standalone module graph data.
|
||||
///
|
||||
/// Must work on any host platform (macOS, Windows, Linux) for cross-compilation.
|
||||
pub const ElfFile = struct {
|
||||
data: std.array_list.Managed(u8),
|
||||
allocator: Allocator,
|
||||
|
||||
pub fn init(allocator: Allocator, elf_data: []const u8) !*ElfFile {
|
||||
if (elf_data.len < @sizeOf(Elf64_Ehdr)) return error.InvalidElfFile;
|
||||
|
||||
const ehdr = readEhdr(elf_data);
|
||||
|
||||
// Validate ELF magic
|
||||
if (!std.mem.eql(u8, ehdr.e_ident[0..4], "\x7fELF")) return error.InvalidElfFile;
|
||||
|
||||
// Must be 64-bit
|
||||
if (ehdr.e_ident[elf.EI_CLASS] != elf.ELFCLASS64) return error.Not64Bit;
|
||||
|
||||
// Must be little-endian (bun only supports x64 + arm64, both LE)
|
||||
if (ehdr.e_ident[elf.EI_DATA] != elf.ELFDATA2LSB) return error.NotLittleEndian;
|
||||
|
||||
var data = try std.array_list.Managed(u8).initCapacity(allocator, elf_data.len);
|
||||
try data.appendSlice(elf_data);
|
||||
|
||||
const self = try allocator.create(ElfFile);
|
||||
errdefer allocator.destroy(self);
|
||||
|
||||
self.* = .{
|
||||
.data = data,
|
||||
.allocator = allocator,
|
||||
};
|
||||
|
||||
return self;
|
||||
}
|
||||
|
||||
pub fn deinit(self: *ElfFile) void {
|
||||
self.data.deinit();
|
||||
self.allocator.destroy(self);
|
||||
}
|
||||
|
||||
/// Find the `.bun` section and write `payload` to the end of the ELF file,
|
||||
/// creating a new PT_LOAD segment (from PT_GNU_STACK) to map it. Stores the
|
||||
/// new segment's vaddr at the original BUN_COMPILED location so the runtime
|
||||
/// can dereference it directly.
|
||||
///
|
||||
/// We always append rather than writing in-place because .bun is in the middle
|
||||
/// of a PT_LOAD segment — sections like .dynamic, .got, .got.plt come after it,
|
||||
/// and expanding in-place would invalidate their absolute virtual addresses.
|
||||
pub fn writeBunSection(self: *ElfFile, payload: []const u8) !void {
|
||||
const ehdr = readEhdr(self.data.items);
|
||||
const bun_section_offset = try self.findBunSection(ehdr);
|
||||
const page_size = pageSize(ehdr);
|
||||
|
||||
const header_size: u64 = @sizeOf(u64);
|
||||
const new_content_size: u64 = header_size + payload.len;
|
||||
const aligned_new_size = alignUp(new_content_size, page_size);
|
||||
|
||||
// Find the highest virtual address across all PT_LOAD segments
|
||||
var max_vaddr_end: u64 = 0;
|
||||
const phdr_size = @sizeOf(Elf64_Phdr);
|
||||
for (0..ehdr.e_phnum) |i| {
|
||||
const phdr_offset = @as(usize, @intCast(ehdr.e_phoff)) + i * phdr_size;
|
||||
const phdr = std.mem.bytesAsValue(Elf64_Phdr, self.data.items[phdr_offset..][0..phdr_size]).*;
|
||||
if (phdr.p_type == elf.PT_LOAD) {
|
||||
const vaddr_end = phdr.p_vaddr + phdr.p_memsz;
|
||||
if (vaddr_end > max_vaddr_end) {
|
||||
max_vaddr_end = vaddr_end;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The new segment's virtual address: after all existing mappings, page-aligned
|
||||
const new_vaddr = alignUp(max_vaddr_end, page_size);
|
||||
|
||||
// The new data goes at the end of the file, page-aligned
|
||||
const new_file_offset = alignUp(self.data.items.len, page_size);
|
||||
|
||||
// Grow the buffer to hold the new data + section header table after it
|
||||
const shdr_table_size = @as(u64, ehdr.e_shnum) * @sizeOf(Elf64_Shdr);
|
||||
const new_shdr_offset = new_file_offset + aligned_new_size;
|
||||
const total_new_size = new_shdr_offset + shdr_table_size;
|
||||
|
||||
const old_file_size = self.data.items.len;
|
||||
try self.data.ensureTotalCapacity(total_new_size);
|
||||
self.data.items.len = total_new_size;
|
||||
|
||||
// Zero the gap between old file end and new data (alignment padding).
|
||||
// Without this, uninitialized allocator memory would leak into the output.
|
||||
if (new_file_offset > old_file_size) {
|
||||
@memset(self.data.items[old_file_size..new_file_offset], 0);
|
||||
}
|
||||
|
||||
// Copy the section header table to its new location
|
||||
const old_shdr_offset = ehdr.e_shoff;
|
||||
bun.memmove(
|
||||
self.data.items[new_shdr_offset..][0..shdr_table_size],
|
||||
self.data.items[old_shdr_offset..][0..shdr_table_size],
|
||||
);
|
||||
|
||||
// Update e_shoff to the new section header table location
|
||||
self.writeEhdrShoff(new_shdr_offset);
|
||||
|
||||
// Write the payload at the new location: [u64 LE size][data][zero padding]
|
||||
std.mem.writeInt(u64, self.data.items[new_file_offset..][0..8], @intCast(payload.len), .little);
|
||||
@memcpy(self.data.items[new_file_offset + header_size ..][0..payload.len], payload);
|
||||
|
||||
// Zero the padding between payload end and section header table
|
||||
const padding_start = new_file_offset + new_content_size;
|
||||
if (new_shdr_offset > padding_start) {
|
||||
@memset(self.data.items[padding_start..new_shdr_offset], 0);
|
||||
}
|
||||
|
||||
// Write the vaddr of the appended data at the ORIGINAL .bun section location
|
||||
// (where BUN_COMPILED symbol points). At runtime, BUN_COMPILED.size will be
|
||||
// this vaddr (always non-zero), which the runtime dereferences as a pointer.
|
||||
// Non-standalone binaries have BUN_COMPILED.size = 0, so 0 means "no data".
|
||||
std.mem.writeInt(u64, self.data.items[bun_section_offset..][0..8], new_vaddr, .little);
|
||||
|
||||
// Find PT_GNU_STACK and convert it to PT_LOAD for the new .bun data.
|
||||
// PT_GNU_STACK only controls stack executability; on modern kernels the
|
||||
// stack defaults to non-executable without it, so repurposing is safe.
|
||||
var found_gnu_stack = false;
|
||||
for (0..ehdr.e_phnum) |i| {
|
||||
const phdr_offset = @as(usize, @intCast(ehdr.e_phoff)) + i * phdr_size;
|
||||
const phdr = std.mem.bytesAsValue(Elf64_Phdr, self.data.items[phdr_offset..][0..phdr_size]).*;
|
||||
|
||||
if (phdr.p_type == elf.PT_GNU_STACK) {
|
||||
// Convert to PT_LOAD
|
||||
const new_phdr: Elf64_Phdr = .{
|
||||
.p_type = elf.PT_LOAD,
|
||||
.p_flags = elf.PF_R, // read-only
|
||||
.p_offset = new_file_offset,
|
||||
.p_vaddr = new_vaddr,
|
||||
.p_paddr = new_vaddr,
|
||||
.p_filesz = aligned_new_size,
|
||||
.p_memsz = aligned_new_size,
|
||||
.p_align = page_size,
|
||||
};
|
||||
@memcpy(self.data.items[phdr_offset..][0..phdr_size], std.mem.asBytes(&new_phdr));
|
||||
found_gnu_stack = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found_gnu_stack) {
|
||||
return error.NoGnuStackSegment;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write(self: *const ElfFile, writer: anytype) !void {
|
||||
try writer.writeAll(self.data.items);
|
||||
}
|
||||
|
||||
// --- Internal helpers ---
|
||||
|
||||
/// Returns the file offset (sh_offset) of the `.bun` section.
|
||||
fn findBunSection(self: *const ElfFile, ehdr: Elf64_Ehdr) !u64 {
|
||||
const shdr_size = @sizeOf(Elf64_Shdr);
|
||||
const shdr_table_offset = ehdr.e_shoff;
|
||||
const shnum = ehdr.e_shnum;
|
||||
|
||||
if (shnum == 0) return error.BunSectionNotFound;
|
||||
if (shdr_table_offset + @as(u64, shnum) * shdr_size > self.data.items.len)
|
||||
return error.InvalidElfFile;
|
||||
|
||||
// Read the .shstrtab section to get section names
|
||||
const shstrtab_shdr = self.readShdr(shdr_table_offset, ehdr.e_shstrndx);
|
||||
const strtab_offset = shstrtab_shdr.sh_offset;
|
||||
const strtab_size = shstrtab_shdr.sh_size;
|
||||
|
||||
if (strtab_offset + strtab_size > self.data.items.len) return error.InvalidElfFile;
|
||||
const strtab = self.data.items[strtab_offset..][0..strtab_size];
|
||||
|
||||
// Search for .bun section
|
||||
for (0..shnum) |i| {
|
||||
const shdr = self.readShdr(shdr_table_offset, @intCast(i));
|
||||
const name_offset = shdr.sh_name;
|
||||
|
||||
if (name_offset < strtab.len) {
|
||||
const name = std.mem.sliceTo(strtab[name_offset..], 0);
|
||||
if (std.mem.eql(u8, name, ".bun")) {
|
||||
return shdr.sh_offset;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return error.BunSectionNotFound;
|
||||
}
|
||||
|
||||
fn readShdr(self: *const ElfFile, table_offset: u64, index: u16) Elf64_Shdr {
|
||||
const offset = table_offset + @as(u64, index) * @sizeOf(Elf64_Shdr);
|
||||
return std.mem.bytesAsValue(Elf64_Shdr, self.data.items[offset..][0..@sizeOf(Elf64_Shdr)]).*;
|
||||
}
|
||||
|
||||
fn writeEhdrShoff(self: *ElfFile, new_shoff: u64) void {
|
||||
// e_shoff is at offset 40 in Elf64_Ehdr
|
||||
std.mem.writeInt(u64, self.data.items[40..][0..8], new_shoff, .little);
|
||||
}
|
||||
|
||||
fn pageSize(ehdr: Elf64_Ehdr) u64 {
|
||||
return switch (ehdr.e_machine) {
|
||||
.AARCH64, .PPC64 => 0x10000, // 64KB
|
||||
else => 0x1000, // 4KB
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
fn readEhdr(data: []const u8) Elf64_Ehdr {
|
||||
return std.mem.bytesAsValue(Elf64_Ehdr, data[0..@sizeOf(Elf64_Ehdr)]).*;
|
||||
}
|
||||
|
||||
fn alignUp(value: u64, alignment: u64) u64 {
|
||||
if (alignment == 0) return value;
|
||||
const mask = alignment - 1;
|
||||
return (value + mask) & ~mask;
|
||||
}
|
||||
|
||||
const bun = @import("bun");
|
||||
|
||||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
const elf = std.elf;
|
||||
const Elf64_Ehdr = elf.Elf64_Ehdr;
|
||||
const Elf64_Phdr = elf.Elf64_Phdr;
|
||||
const Elf64_Shdr = elf.Elf64_Shdr;
|
||||
@@ -260,14 +260,35 @@ devTest("hmr handles rapid consecutive edits", {
|
||||
await Bun.sleep(1);
|
||||
}
|
||||
|
||||
// Wait event-driven for "render 10" to appear. Intermediate renders may
|
||||
// be skipped (watcher coalescing) and the final render may fire multiple
|
||||
// times (duplicate reloads), so we just listen for any occurrence.
|
||||
const finalRender = "render 10";
|
||||
while (true) {
|
||||
const message = await client.getStringMessage();
|
||||
if (message === finalRender) break;
|
||||
if (typeof message === "string" && message.includes("HMR_ERROR")) {
|
||||
throw new Error("Unexpected HMR error message: " + message);
|
||||
}
|
||||
}
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
const check = () => {
|
||||
for (const msg of client.messages) {
|
||||
if (typeof msg === "string" && msg.includes("HMR_ERROR")) {
|
||||
cleanup();
|
||||
reject(new Error("Unexpected HMR error message: " + msg));
|
||||
return;
|
||||
}
|
||||
if (msg === finalRender) {
|
||||
cleanup();
|
||||
resolve();
|
||||
return;
|
||||
}
|
||||
}
|
||||
};
|
||||
const cleanup = () => {
|
||||
client.off("message", check);
|
||||
};
|
||||
client.on("message", check);
|
||||
// Check messages already buffered.
|
||||
check();
|
||||
});
|
||||
// Drain all buffered messages — intermediate renders and possible
|
||||
// duplicates of the final render are expected and harmless.
|
||||
client.messages.length = 0;
|
||||
|
||||
const hmrErrors = await client.js`return globalThis.__hmrErrors ? [...globalThis.__hmrErrors] : [];`;
|
||||
if (hmrErrors.length > 0) {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { describe, expect, test } from "bun:test";
|
||||
import { isArm64, isLinux, isMacOS, isMusl, isWindows, tempDir } from "harness";
|
||||
import { chmodSync } from "node:fs";
|
||||
import { join } from "path";
|
||||
|
||||
describe("Bun.build compile", () => {
|
||||
@@ -188,4 +189,156 @@ describe("compiled binary validity", () => {
|
||||
});
|
||||
});
|
||||
|
||||
if (isLinux) {
|
||||
describe("ELF section", () => {
|
||||
test("compiled binary runs with execute-only permissions", async () => {
|
||||
using dir = tempDir("build-compile-exec-only", {
|
||||
"app.js": `console.log("exec-only-output");`,
|
||||
});
|
||||
|
||||
const outfile = join(dir + "", "app-exec-only");
|
||||
const result = await Bun.build({
|
||||
entrypoints: [join(dir + "", "app.js")],
|
||||
compile: {
|
||||
outfile,
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.success).toBe(true);
|
||||
|
||||
chmodSync(result.outputs[0].path, 0o111);
|
||||
|
||||
await using proc = Bun.spawn({
|
||||
cmd: [result.outputs[0].path],
|
||||
stdout: "pipe",
|
||||
stderr: "pipe",
|
||||
});
|
||||
|
||||
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
|
||||
|
||||
expect(stdout.trim()).toBe("exec-only-output");
|
||||
expect(exitCode).toBe(0);
|
||||
});
|
||||
|
||||
test("compiled binary with large payload runs correctly", async () => {
|
||||
// Generate a string payload >16KB to exceed the initial .bun section allocation
|
||||
// (BUN_COMPILED is aligned to 16KB). This forces the expansion path in elf.zig
|
||||
// which appends data to the end of the file and converts PT_GNU_STACK to PT_LOAD.
|
||||
const largeString = Buffer.alloc(20000, "x").toString();
|
||||
using dir = tempDir("build-compile-large-payload", {
|
||||
"app.js": `const data = "${largeString}"; console.log("large-payload-" + data.length);`,
|
||||
});
|
||||
|
||||
const outfile = join(dir + "", "app-large");
|
||||
const result = await Bun.build({
|
||||
entrypoints: [join(dir + "", "app.js")],
|
||||
compile: {
|
||||
outfile,
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.success).toBe(true);
|
||||
|
||||
await using proc = Bun.spawn({
|
||||
cmd: [result.outputs[0].path],
|
||||
stdout: "pipe",
|
||||
stderr: "pipe",
|
||||
});
|
||||
|
||||
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
|
||||
|
||||
expect(stdout).toContain("large-payload-20000");
|
||||
expect(exitCode).toBe(0);
|
||||
});
|
||||
|
||||
test("compiled binary with large payload runs with execute-only permissions", async () => {
|
||||
// Same as above but also verifies execute-only works with the expansion path
|
||||
const largeString = Buffer.alloc(20000, "y").toString();
|
||||
using dir = tempDir("build-compile-large-exec-only", {
|
||||
"app.js": `const data = "${largeString}"; console.log("large-exec-only-" + data.length);`,
|
||||
});
|
||||
|
||||
const outfile = join(dir + "", "app-large-exec-only");
|
||||
const result = await Bun.build({
|
||||
entrypoints: [join(dir + "", "app.js")],
|
||||
compile: {
|
||||
outfile,
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.success).toBe(true);
|
||||
|
||||
chmodSync(result.outputs[0].path, 0o111);
|
||||
|
||||
await using proc = Bun.spawn({
|
||||
cmd: [result.outputs[0].path],
|
||||
stdout: "pipe",
|
||||
stderr: "pipe",
|
||||
});
|
||||
|
||||
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
|
||||
|
||||
expect(stdout).toContain("large-exec-only-20000");
|
||||
expect(exitCode).toBe(0);
|
||||
});
|
||||
|
||||
test("compiled binary has .bun ELF section", async () => {
|
||||
using dir = tempDir("build-compile-elf-section", {
|
||||
"app.js": `console.log("elf-section-test");`,
|
||||
});
|
||||
|
||||
const outfile = join(dir + "", "app-elf-section");
|
||||
const result = await Bun.build({
|
||||
entrypoints: [join(dir + "", "app.js")],
|
||||
compile: {
|
||||
outfile,
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.success).toBe(true);
|
||||
|
||||
// Verify .bun ELF section exists by reading section headers
|
||||
const file = Bun.file(result.outputs[0].path);
|
||||
const bytes = new Uint8Array(await file.arrayBuffer());
|
||||
|
||||
// Parse ELF header to find section headers
|
||||
const view = new DataView(bytes.buffer);
|
||||
// e_shoff at offset 40 (little-endian u64)
|
||||
const shoff = Number(view.getBigUint64(40, true));
|
||||
// e_shentsize at offset 58
|
||||
const shentsize = view.getUint16(58, true);
|
||||
// e_shnum at offset 60
|
||||
const shnum = view.getUint16(60, true);
|
||||
// e_shstrndx at offset 62
|
||||
const shstrndx = view.getUint16(62, true);
|
||||
|
||||
// Read .shstrtab section header to get string table
|
||||
const strtabOff = shoff + shstrndx * shentsize;
|
||||
const strtabFileOffset = Number(view.getBigUint64(strtabOff + 24, true));
|
||||
const strtabSize = Number(view.getBigUint64(strtabOff + 32, true));
|
||||
|
||||
const decoder = new TextDecoder();
|
||||
let foundBunSection = false;
|
||||
for (let i = 0; i < shnum; i++) {
|
||||
const hdrOff = shoff + i * shentsize;
|
||||
const nameIdx = view.getUint32(hdrOff, true);
|
||||
if (nameIdx < strtabSize) {
|
||||
// Read null-terminated string from strtab
|
||||
let end = strtabFileOffset + nameIdx;
|
||||
while (end < bytes.length && bytes[end] !== 0) end++;
|
||||
const name = decoder.decode(bytes.slice(strtabFileOffset + nameIdx, end));
|
||||
if (name === ".bun") {
|
||||
foundBunSection = true;
|
||||
// Verify the section has non-zero size
|
||||
const shSize = Number(view.getBigUint64(hdrOff + 32, true));
|
||||
expect(shSize).toBeGreaterThan(0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
expect(foundBunSection).toBe(true);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// file command test works well
|
||||
|
||||
@@ -36,8 +36,8 @@
|
||||
"std.enums.tagName(": 2,
|
||||
"std.fs.Dir": 164,
|
||||
"std.fs.File": 93,
|
||||
"std.fs.cwd": 110,
|
||||
"std.fs.openFileAbsolute": 10,
|
||||
"std.fs.cwd": 109,
|
||||
"std.fs.openFileAbsolute": 8,
|
||||
"std.log": 1,
|
||||
"std.mem.indexOfAny(u8": 0,
|
||||
"std.unicode": 27,
|
||||
|
||||
@@ -1,278 +0,0 @@
|
||||
import { describe, expect, test } from "bun:test";
|
||||
import net from "net";
|
||||
|
||||
describe("chunked encoding size truncation", () => {
|
||||
test("does not truncate chunk sizes that exceed 32-bit range", async () => {
|
||||
// A chunk size of 0x100000002 (4GB + 2) would be truncated to 2
|
||||
// if chunkSize() returns unsigned int (32-bit).
|
||||
// With the fix, the parser correctly stores the full 64-bit value,
|
||||
// so it waits for ~4GB of data rather than reading just 2 bytes.
|
||||
// This means the smuggled request after the 2 bytes is NOT parsed.
|
||||
let smuggled = false;
|
||||
let requestCount = 0;
|
||||
|
||||
await using server = Bun.serve({
|
||||
port: 0,
|
||||
async fetch(req) {
|
||||
requestCount++;
|
||||
const url = new URL(req.url);
|
||||
if (url.pathname === "/smuggled") {
|
||||
smuggled = true;
|
||||
}
|
||||
try {
|
||||
await req.text();
|
||||
} catch {
|
||||
// body read failure is acceptable
|
||||
}
|
||||
return new Response("OK");
|
||||
},
|
||||
});
|
||||
|
||||
const client = net.connect(server.port, "127.0.0.1");
|
||||
|
||||
// The attack payload: chunk size = 0x100000002.
|
||||
// If truncated to 32 bits -> size = 2, parser reads "AB", then sees
|
||||
// "GET /smuggled" as a new pipelined HTTP request (VULNERABLE).
|
||||
// If correctly stored as 4GB+2, parser waits for more data (SAFE).
|
||||
const smuggleAttempt =
|
||||
"POST / HTTP/1.1\r\n" +
|
||||
"Host: localhost\r\n" +
|
||||
"Transfer-Encoding: chunked\r\n" +
|
||||
"\r\n" +
|
||||
"100000002\r\n" +
|
||||
"AB\r\n" +
|
||||
"0\r\n" +
|
||||
"\r\n" +
|
||||
"GET /smuggled HTTP/1.1\r\n" +
|
||||
"Host: localhost\r\n" +
|
||||
"\r\n";
|
||||
|
||||
await new Promise<void>(resolve => {
|
||||
client.on("error", () => {
|
||||
// Connection error is acceptable (server may close it)
|
||||
resolve();
|
||||
});
|
||||
client.on("close", () => {
|
||||
resolve();
|
||||
});
|
||||
// We give it a short window - if the parser truncated the size,
|
||||
// the smuggled request would be processed almost instantly
|
||||
client.setTimeout(2000, () => {
|
||||
client.destroy();
|
||||
resolve();
|
||||
});
|
||||
client.write(smuggleAttempt);
|
||||
});
|
||||
|
||||
// The critical assertion: the /smuggled request must NEVER be processed
|
||||
expect(smuggled).toBe(false);
|
||||
// At most one request should have been handled (the POST /)
|
||||
// With the fix, even that one may not complete since the parser
|
||||
// is waiting for 4GB of chunk data that will never arrive
|
||||
expect(requestCount).toBeLessThanOrEqual(1);
|
||||
});
|
||||
|
||||
test("rejects chunk sizes in the overflow detection range", async () => {
|
||||
// STATE_SIZE_OVERFLOW = 0x0F00000000000000. When chunkSize() returned
|
||||
// unsigned int (32-bit), the AND with STATE_SIZE_OVERFLOW always yielded 0
|
||||
// because STATE_SIZE_OVERFLOW has no bits in the lower 32 positions.
|
||||
// With uint64_t return type, the overflow check works correctly.
|
||||
|
||||
await using server = Bun.serve({
|
||||
port: 0,
|
||||
async fetch(req) {
|
||||
try {
|
||||
await req.text();
|
||||
} catch {
|
||||
// body read failure is acceptable
|
||||
}
|
||||
return new Response("OK");
|
||||
},
|
||||
});
|
||||
|
||||
const client = net.connect(server.port, "127.0.0.1");
|
||||
|
||||
// Chunk size that should trigger overflow: 16 hex digits (max uint64)
|
||||
// This exercises the STATE_SIZE_OVERFLOW check in consumeHexNumber()
|
||||
const maliciousRequest =
|
||||
"POST / HTTP/1.1\r\n" +
|
||||
"Host: localhost\r\n" +
|
||||
"Transfer-Encoding: chunked\r\n" +
|
||||
"\r\n" +
|
||||
"FFFFFFFFFFFFFFFF\r\n" +
|
||||
"AB\r\n" +
|
||||
"0\r\n" +
|
||||
"\r\n";
|
||||
|
||||
const response = await new Promise<string>(resolve => {
|
||||
let data = "";
|
||||
client.on("error", () => {
|
||||
resolve(data);
|
||||
});
|
||||
client.on("data", chunk => {
|
||||
data += chunk.toString();
|
||||
});
|
||||
client.on("close", () => {
|
||||
resolve(data);
|
||||
});
|
||||
client.setTimeout(5000, () => {
|
||||
client.destroy();
|
||||
resolve(data);
|
||||
});
|
||||
client.write(maliciousRequest);
|
||||
});
|
||||
|
||||
// Must be rejected - connection should be closed with 400
|
||||
expect(response).toContain("400");
|
||||
});
|
||||
|
||||
test("overflow check catches values above 0x0F00000000000000", async () => {
|
||||
// A chunk size of 0x1000000000000001 (> STATE_SIZE_OVERFLOW threshold)
|
||||
// should be caught by the overflow check.
|
||||
// Before the fix, chunkSize() returned 32-bit, making this check dead code.
|
||||
|
||||
await using server = Bun.serve({
|
||||
port: 0,
|
||||
async fetch(req) {
|
||||
try {
|
||||
await req.text();
|
||||
} catch {}
|
||||
return new Response("OK");
|
||||
},
|
||||
});
|
||||
|
||||
const client = net.connect(server.port, "127.0.0.1");
|
||||
|
||||
// 0x1000000000000001 - has bits in the STATE_SIZE_OVERFLOW range
|
||||
const maliciousRequest =
|
||||
"POST / HTTP/1.1\r\n" +
|
||||
"Host: localhost\r\n" +
|
||||
"Transfer-Encoding: chunked\r\n" +
|
||||
"\r\n" +
|
||||
"1000000000000001\r\n" +
|
||||
"X\r\n" +
|
||||
"0\r\n" +
|
||||
"\r\n";
|
||||
|
||||
const response = await new Promise<string>(resolve => {
|
||||
let data = "";
|
||||
client.on("error", () => {
|
||||
resolve(data);
|
||||
});
|
||||
client.on("data", chunk => {
|
||||
data += chunk.toString();
|
||||
});
|
||||
client.on("close", () => {
|
||||
resolve(data);
|
||||
});
|
||||
client.setTimeout(5000, () => {
|
||||
client.destroy();
|
||||
resolve(data);
|
||||
});
|
||||
client.write(maliciousRequest);
|
||||
});
|
||||
|
||||
// Must be rejected
|
||||
expect(response).toContain("400");
|
||||
});
|
||||
|
||||
test("smuggled request via 32-bit wraparound is prevented", async () => {
|
||||
// This is the most direct test: with truncation, 0x100000005 becomes 5,
|
||||
// which would read exactly "Hello" as the chunk body, complete the request,
|
||||
// and then parse the smuggled GET request.
|
||||
let smuggled = false;
|
||||
let capturedPaths: string[] = [];
|
||||
|
||||
await using server = Bun.serve({
|
||||
port: 0,
|
||||
async fetch(req) {
|
||||
const url = new URL(req.url);
|
||||
capturedPaths.push(url.pathname);
|
||||
if (url.pathname === "/smuggled") {
|
||||
smuggled = true;
|
||||
}
|
||||
try {
|
||||
await req.text();
|
||||
} catch {}
|
||||
return new Response("OK");
|
||||
},
|
||||
});
|
||||
|
||||
const client = net.connect(server.port, "127.0.0.1");
|
||||
|
||||
// With truncation: chunk size = 0x100000005 -> truncated to 5.
|
||||
// Parser reads "Hello" (5 bytes), then "\r\n" terminator, then "0\r\n\r\n"
|
||||
// completes the chunked body. Then "GET /smuggled..." is parsed as new request.
|
||||
//
|
||||
// Without truncation: chunk size = 0x100000005 = ~4GB, parser waits for
|
||||
// 4GB+ of data, never processes the smuggled request.
|
||||
const smuggleAttempt =
|
||||
"POST / HTTP/1.1\r\n" +
|
||||
"Host: localhost\r\n" +
|
||||
"Transfer-Encoding: chunked\r\n" +
|
||||
"\r\n" +
|
||||
"100000005\r\n" +
|
||||
"Hello\r\n" +
|
||||
"0\r\n" +
|
||||
"\r\n" +
|
||||
"GET /smuggled HTTP/1.1\r\n" +
|
||||
"Host: localhost\r\n" +
|
||||
"\r\n";
|
||||
|
||||
await new Promise<void>(resolve => {
|
||||
client.on("error", () => resolve());
|
||||
client.on("close", () => resolve());
|
||||
client.setTimeout(2000, () => {
|
||||
client.destroy();
|
||||
resolve();
|
||||
});
|
||||
client.write(smuggleAttempt);
|
||||
});
|
||||
|
||||
// The smuggled request must NEVER be processed
|
||||
expect(smuggled).toBe(false);
|
||||
expect(capturedPaths).not.toContain("/smuggled");
|
||||
});
|
||||
|
||||
test("valid small chunk sizes still work correctly", async () => {
|
||||
// Ensure the fix doesn't break normal chunked encoding
|
||||
let receivedBody = "";
|
||||
|
||||
await using server = Bun.serve({
|
||||
port: 0,
|
||||
async fetch(req) {
|
||||
receivedBody = await req.text();
|
||||
return new Response("Success");
|
||||
},
|
||||
});
|
||||
|
||||
const client = net.connect(server.port, "127.0.0.1");
|
||||
|
||||
const validRequest =
|
||||
"POST / HTTP/1.1\r\n" +
|
||||
"Host: localhost\r\n" +
|
||||
"Transfer-Encoding: chunked\r\n" +
|
||||
"\r\n" +
|
||||
"5\r\n" +
|
||||
"Hello\r\n" +
|
||||
"6\r\n" +
|
||||
" World\r\n" +
|
||||
"0\r\n" +
|
||||
"\r\n";
|
||||
|
||||
const response = await new Promise<string>((resolve, reject) => {
|
||||
client.on("error", reject);
|
||||
client.on("data", chunk => {
|
||||
resolve(chunk.toString());
|
||||
});
|
||||
client.setTimeout(5000, () => {
|
||||
client.destroy();
|
||||
reject(new Error("timeout"));
|
||||
});
|
||||
client.write(validRequest);
|
||||
});
|
||||
|
||||
expect(response).toContain("HTTP/1.1 200");
|
||||
expect(receivedBody).toBe("Hello World");
|
||||
});
|
||||
});
|
||||
Reference in New Issue
Block a user