mirror of
https://github.com/oven-sh/bun
synced 2026-03-01 13:01:06 +01:00
Compare commits
9 Commits
claude/fix
...
claude/fix
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b200ec7f3e | ||
|
|
57c0a6d099 | ||
|
|
7848648e09 | ||
|
|
379daff22d | ||
|
|
5b0db0191e | ||
|
|
9ef9ac1db1 | ||
|
|
f5d98191b7 | ||
|
|
83bca9bea8 | ||
|
|
7794cc866e |
@@ -198,13 +198,16 @@ const myPlugin: BunPlugin = {
|
||||
};
|
||||
```
|
||||
|
||||
The builder object provides some methods for hooking into parts of the bundling process. Bun implements `onResolve` and `onLoad`; it does not yet implement the esbuild hooks `onStart`, `onEnd`, and `onDispose`, and `resolve` utilities. `initialOptions` is partially implemented, being read-only and only having a subset of esbuild's options; use `config` (same thing but with Bun's `BuildConfig` format) instead.
|
||||
The builder object provides some methods for hooking into parts of the bundling process. Bun implements `onStart`, `onEnd`, `onResolve`, and `onLoad`. It does not yet implement the esbuild hooks `onDispose` and `resolve`. `initialOptions` is partially implemented, being read-only and only having a subset of esbuild's options; use `config` (same thing but with Bun's `BuildConfig` format) instead.
|
||||
|
||||
```ts title="myPlugin.ts" icon="/icons/typescript.svg"
|
||||
import type { BunPlugin } from "bun";
|
||||
const myPlugin: BunPlugin = {
|
||||
name: "my-plugin",
|
||||
setup(builder) {
|
||||
builder.onStart(() => {
|
||||
/* called when the bundle starts */
|
||||
});
|
||||
builder.onResolve(
|
||||
{
|
||||
/* onResolve.options */
|
||||
@@ -225,6 +228,9 @@ const myPlugin: BunPlugin = {
|
||||
};
|
||||
},
|
||||
);
|
||||
builder.onEnd(result => {
|
||||
/* called when the bundle is complete */
|
||||
});
|
||||
},
|
||||
};
|
||||
```
|
||||
|
||||
@@ -15,6 +15,7 @@ Plugins can register callbacks to be run at various points in the lifecycle of a
|
||||
- `onResolve()`: Run before a module is resolved
|
||||
- `onLoad()`: Run before a module is loaded
|
||||
- `onBeforeParse()`: Run zero-copy native addons in the parser thread before a file is parsed
|
||||
- `onEnd()`: Run after the bundle is complete
|
||||
|
||||
## Reference
|
||||
|
||||
@@ -39,6 +40,7 @@ type PluginBuilder = {
|
||||
exports?: Record<string, any>;
|
||||
},
|
||||
) => void;
|
||||
onEnd(callback: (result: BuildOutput) => void | Promise<void>): void;
|
||||
config: BuildConfig;
|
||||
};
|
||||
|
||||
@@ -423,3 +425,53 @@ This lifecycle callback is run immediately before a file is parsed by Bun's bund
|
||||
As input, it receives the file's contents and can optionally return new source code.
|
||||
|
||||
<Info>This callback can be called from any thread and so the napi module implementation must be thread-safe.</Info>
|
||||
|
||||
### onEnd
|
||||
|
||||
```ts
|
||||
onEnd(callback: (result: BuildOutput) => void | Promise<void>): void;
|
||||
```
|
||||
|
||||
Registers a callback to be run after the bundle is complete. The callback receives the [`BuildOutput`](/docs/bundler#outputs) object containing the build results, including output files and any build messages.
|
||||
|
||||
```ts title="index.ts" icon="/icons/typescript.svg"
|
||||
const result = await Bun.build({
|
||||
entrypoints: ["./app.ts"],
|
||||
outdir: "./dist",
|
||||
plugins: [
|
||||
{
|
||||
name: "onEnd example",
|
||||
setup(build) {
|
||||
build.onEnd(result => {
|
||||
console.log(`Build completed with ${result.outputs.length} files`);
|
||||
for (const log of result.logs) {
|
||||
console.log(log);
|
||||
}
|
||||
});
|
||||
},
|
||||
},
|
||||
],
|
||||
});
|
||||
```
|
||||
|
||||
The callback can return a `Promise`. The build output promise from `Bun.build()` will not resolve until all `onEnd()` callbacks have completed.
|
||||
|
||||
```ts title="index.ts" icon="/icons/typescript.svg"
|
||||
const result = await Bun.build({
|
||||
entrypoints: ["./app.ts"],
|
||||
outdir: "./dist",
|
||||
plugins: [
|
||||
{
|
||||
name: "Upload to S3",
|
||||
setup(build) {
|
||||
build.onEnd(async result => {
|
||||
if (!result.success) return;
|
||||
for (const output of result.outputs) {
|
||||
await uploadToS3(output);
|
||||
}
|
||||
});
|
||||
},
|
||||
},
|
||||
],
|
||||
});
|
||||
```
|
||||
|
||||
@@ -1707,6 +1707,15 @@ pub fn NewWrappedHandler(comptime tls: bool) type {
|
||||
|
||||
pub fn onClose(this: WrappedSocket, socket: Socket, err: c_int, data: ?*anyopaque) bun.JSError!void {
|
||||
if (comptime tls) {
|
||||
// Clean up the raw TCP socket from upgradeTLS() — its onClose
|
||||
// never fires because uws closes through the TLS context only.
|
||||
defer {
|
||||
if (!this.tcp.socket.isDetached()) {
|
||||
this.tcp.socket.detach();
|
||||
this.tcp.has_pending_activity.store(false, .release);
|
||||
this.tcp.deref();
|
||||
}
|
||||
}
|
||||
try TLSSocket.onClose(this.tls, socket, err, data);
|
||||
} else {
|
||||
try TLSSocket.onClose(this.tcp, socket, err, data);
|
||||
|
||||
@@ -1154,6 +1154,14 @@ pub const FetchTasklet = struct {
|
||||
}
|
||||
}
|
||||
|
||||
/// Whether the request body should skip chunked transfer encoding framing.
|
||||
/// True for upgraded connections (e.g. WebSocket) or when the user explicitly
|
||||
/// set Content-Length without setting Transfer-Encoding.
|
||||
fn skipChunkedFraming(this: *const FetchTasklet) bool {
|
||||
return this.upgraded_connection or
|
||||
(this.request_headers.get("content-length") != null and this.request_headers.get("transfer-encoding") == null);
|
||||
}
|
||||
|
||||
pub fn writeRequestData(this: *FetchTasklet, data: []const u8) ResumableSinkBackpressure {
|
||||
log("writeRequestData {}", .{data.len});
|
||||
if (this.signal) |signal| {
|
||||
@@ -1175,7 +1183,7 @@ pub const FetchTasklet = struct {
|
||||
// dont have backpressure so we will schedule the data to be written
|
||||
// if we have backpressure the onWritable will drain the buffer
|
||||
needs_schedule = stream_buffer.isEmpty();
|
||||
if (this.upgraded_connection) {
|
||||
if (this.skipChunkedFraming()) {
|
||||
bun.handleOom(stream_buffer.write(data));
|
||||
} else {
|
||||
//16 is the max size of a hex number size that represents 64 bits + 2 for the \r\n
|
||||
@@ -1209,15 +1217,14 @@ pub const FetchTasklet = struct {
|
||||
}
|
||||
this.abortTask();
|
||||
} else {
|
||||
if (!this.upgraded_connection) {
|
||||
// If is not upgraded we need to send the terminating chunk
|
||||
if (!this.skipChunkedFraming()) {
|
||||
// Using chunked transfer encoding, send the terminating chunk
|
||||
const thread_safe_stream_buffer = this.request_body_streaming_buffer orelse return;
|
||||
const stream_buffer = thread_safe_stream_buffer.acquire();
|
||||
defer thread_safe_stream_buffer.release();
|
||||
bun.handleOom(stream_buffer.write(http.end_of_chunked_http1_1_encoding_response_body));
|
||||
}
|
||||
if (this.http) |http_| {
|
||||
// just tell to write the end of the chunked encoding aka 0\r\n\r\n
|
||||
http.http_thread.scheduleRequestWrite(http_, .end);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3683,7 +3683,20 @@ pub const BundleV2 = struct {
|
||||
}
|
||||
}
|
||||
|
||||
const import_record_loader = import_record.loader orelse path.loader(&transpiler.options.loaders) orelse .file;
|
||||
const import_record_loader = brk: {
|
||||
const resolved_loader = import_record.loader orelse path.loader(&transpiler.options.loaders) orelse .file;
|
||||
// When an HTML file references a URL asset (e.g. <link rel="manifest" href="./manifest.json" />),
|
||||
// the file must be copied to the output directory as-is. If the resolved loader would
|
||||
// parse/transform the file (e.g. .json, .toml) rather than copy it, force the .file loader
|
||||
// so that `shouldCopyForBundling()` returns true and the asset is emitted.
|
||||
// Only do this for HTML sources — CSS url() imports should retain their original behavior.
|
||||
if (loader == .html and import_record.kind == .url and !resolved_loader.shouldCopyForBundling() and
|
||||
!resolved_loader.isJavaScriptLike() and !resolved_loader.isCSS() and resolved_loader != .html)
|
||||
{
|
||||
break :brk Loader.file;
|
||||
}
|
||||
break :brk resolved_loader;
|
||||
};
|
||||
import_record.loader = import_record_loader;
|
||||
|
||||
const is_html_entrypoint = import_record_loader == .html and target.isServerSide() and this.transpiler.options.dev_server == null;
|
||||
|
||||
16
src/http.zig
16
src/http.zig
@@ -719,7 +719,21 @@ pub fn buildRequest(this: *HTTPClient, body_len: usize) picohttp.Request {
|
||||
|
||||
if (body_len > 0 or this.method.hasRequestBody()) {
|
||||
if (this.flags.is_streaming_request_body) {
|
||||
if (add_transfer_encoding and this.flags.upgrade_state == .none) {
|
||||
if (original_content_length) |content_length| {
|
||||
if (add_transfer_encoding) {
|
||||
// User explicitly set Content-Length and did not set Transfer-Encoding;
|
||||
// preserve Content-Length instead of using chunked encoding.
|
||||
// This matches Node.js behavior where an explicit Content-Length is always honored.
|
||||
request_headers_buf[header_count] = .{
|
||||
.name = content_length_header_name,
|
||||
.value = content_length,
|
||||
};
|
||||
header_count += 1;
|
||||
}
|
||||
// If !add_transfer_encoding, the user explicitly set Transfer-Encoding,
|
||||
// which was already added to request_headers_buf. We respect that and
|
||||
// do not add Content-Length (they are mutually exclusive per HTTP/1.1).
|
||||
} else if (add_transfer_encoding and this.flags.upgrade_state == .none) {
|
||||
request_headers_buf[header_count] = chunked_encoded_header;
|
||||
header_count += 1;
|
||||
}
|
||||
|
||||
@@ -383,6 +383,7 @@ pub const PackageInstall = struct {
|
||||
&[_]bun.OSPathSlice{},
|
||||
&[_]bun.OSPathSlice{},
|
||||
) catch |err| return Result.fail(err, .opening_cache_dir, @errorReturnTrace());
|
||||
walker_.resolve_unknown_entry_types = true;
|
||||
defer walker_.deinit();
|
||||
|
||||
const FileCopier = struct {
|
||||
@@ -520,6 +521,7 @@ pub const PackageInstall = struct {
|
||||
else
|
||||
&[_]bun.OSPathSlice{},
|
||||
) catch |err| bun.handleOom(err);
|
||||
state.walker.resolve_unknown_entry_types = true;
|
||||
|
||||
if (!Environment.isWindows) {
|
||||
state.subdir = destbase.makeOpenPath(bun.span(destpath), .{
|
||||
|
||||
@@ -623,6 +623,17 @@ pub const PackageInstaller = struct {
|
||||
// else => unreachable,
|
||||
// };
|
||||
|
||||
// If a newly computed integrity hash is available (e.g. for a GitHub
|
||||
// tarball) and the lockfile doesn't already have one, persist it so
|
||||
// the lockfile gets re-saved with the hash.
|
||||
if (data.integrity.tag.isSupported()) {
|
||||
var pkg_metas = this.lockfile.packages.items(.meta);
|
||||
if (!pkg_metas[package_id].integrity.tag.isSupported()) {
|
||||
pkg_metas[package_id].integrity = data.integrity;
|
||||
this.manager.options.enable.force_save_lockfile = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (this.manager.task_queue.fetchRemove(task_id)) |removed| {
|
||||
var callbacks = removed.value;
|
||||
defer callbacks.deinit(this.manager.allocator);
|
||||
|
||||
@@ -133,6 +133,12 @@ pub fn processExtractedTarballPackage(
|
||||
break :package pkg;
|
||||
};
|
||||
|
||||
// Store the tarball integrity hash so the lockfile can pin the
|
||||
// exact content downloaded from the remote (GitHub) server.
|
||||
if (data.integrity.tag.isSupported()) {
|
||||
package.meta.integrity = data.integrity;
|
||||
}
|
||||
|
||||
package = manager.lockfile.appendPackage(package) catch unreachable;
|
||||
package_id.* = package.meta.id;
|
||||
|
||||
|
||||
@@ -23,7 +23,26 @@ pub inline fn run(this: *const ExtractTarball, log: *logger.Log, bytes: []const
|
||||
return error.IntegrityCheckFailed;
|
||||
}
|
||||
}
|
||||
return this.extract(log, bytes);
|
||||
var result = try this.extract(log, bytes);
|
||||
|
||||
// Compute and store SHA-512 integrity hash for GitHub tarballs so the
|
||||
// lockfile can pin the exact tarball content. On subsequent installs the
|
||||
// hash stored in the lockfile is forwarded via this.integrity and verified
|
||||
// above, preventing a compromised server from silently swapping the tarball.
|
||||
if (this.resolution.tag == .github) {
|
||||
if (this.integrity.tag.isSupported()) {
|
||||
// Re-installing with an existing lockfile: integrity was already
|
||||
// verified above, propagate the known value to ExtractData so that
|
||||
// the lockfile keeps it on re-serialisation.
|
||||
result.integrity = this.integrity;
|
||||
} else {
|
||||
// First install (no integrity in the lockfile yet): compute it.
|
||||
result.integrity = .{ .tag = .sha512 };
|
||||
Crypto.SHA512.hash(bytes, result.integrity.value[0..Crypto.SHA512.digest]);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
pub fn buildURL(
|
||||
@@ -547,6 +566,7 @@ const string = []const u8;
|
||||
|
||||
const Npm = @import("./npm.zig");
|
||||
const std = @import("std");
|
||||
const Crypto = @import("../sha.zig").Hashers;
|
||||
const FileSystem = @import("../fs.zig").FileSystem;
|
||||
const Integrity = @import("./integrity.zig").Integrity;
|
||||
const Resolution = @import("./resolution.zig").Resolution;
|
||||
|
||||
@@ -209,6 +209,7 @@ pub const ExtractData = struct {
|
||||
path: string = "",
|
||||
buf: []u8 = "",
|
||||
} = null,
|
||||
integrity: Integrity = .{},
|
||||
};
|
||||
|
||||
pub const DependencyInstallContext = struct {
|
||||
@@ -271,6 +272,7 @@ pub const VersionSlice = external.VersionSlice;
|
||||
|
||||
pub const Dependency = @import("./dependency.zig");
|
||||
pub const Behavior = @import("./dependency.zig").Behavior;
|
||||
pub const Integrity = @import("./integrity.zig").Integrity;
|
||||
|
||||
pub const Lockfile = @import("./lockfile.zig");
|
||||
pub const PatchedDep = Lockfile.PatchedDep;
|
||||
|
||||
@@ -12,12 +12,16 @@ pub const FileCopier = struct {
|
||||
return .{
|
||||
.src_path = src_path,
|
||||
.dest_subpath = dest_subpath,
|
||||
.walker = try .walk(
|
||||
src_dir,
|
||||
bun.default_allocator,
|
||||
&.{},
|
||||
skip_dirnames,
|
||||
),
|
||||
.walker = walker: {
|
||||
var w = try Walker.walk(
|
||||
src_dir,
|
||||
bun.default_allocator,
|
||||
&.{},
|
||||
skip_dirnames,
|
||||
);
|
||||
w.resolve_unknown_entry_types = true;
|
||||
break :walker w;
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@@ -15,12 +15,16 @@ pub fn init(
|
||||
.src_dir = folder_dir,
|
||||
.src = src,
|
||||
.dest = dest,
|
||||
.walker = try .walk(
|
||||
folder_dir,
|
||||
bun.default_allocator,
|
||||
&.{},
|
||||
skip_dirnames,
|
||||
),
|
||||
.walker = walker: {
|
||||
var w = try Walker.walk(
|
||||
folder_dir,
|
||||
bun.default_allocator,
|
||||
&.{},
|
||||
skip_dirnames,
|
||||
);
|
||||
w.resolve_unknown_entry_types = true;
|
||||
break :walker w;
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@@ -644,9 +644,16 @@ pub const Stringifier = struct {
|
||||
&path_buf,
|
||||
);
|
||||
|
||||
try writer.print(", {f}]", .{
|
||||
repo.resolved.fmtJson(buf, .{}),
|
||||
});
|
||||
if (pkg_meta.integrity.tag.isSupported()) {
|
||||
try writer.print(", {f}, \"{f}\"]", .{
|
||||
repo.resolved.fmtJson(buf, .{}),
|
||||
pkg_meta.integrity,
|
||||
});
|
||||
} else {
|
||||
try writer.print(", {f}]", .{
|
||||
repo.resolved.fmtJson(buf, .{}),
|
||||
});
|
||||
}
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
@@ -1885,6 +1892,15 @@ pub fn parseIntoBinaryLockfile(
|
||||
};
|
||||
|
||||
@field(res.value, @tagName(tag)).resolved = try string_buf.append(bun_tag_str);
|
||||
|
||||
// Optional integrity hash (added to pin tarball content)
|
||||
if (i < pkg_info.len) {
|
||||
const integrity_expr = pkg_info.at(i);
|
||||
if (integrity_expr.asString(allocator)) |integrity_str| {
|
||||
pkg.meta.integrity = Integrity.parse(integrity_str);
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
@@ -51,6 +51,15 @@ function onError(msg, err, callback) {
|
||||
process.nextTick(emitErrorNt, msg, err, callback);
|
||||
}
|
||||
|
||||
function isHTTPHeaderStateSentOrAssigned(state) {
|
||||
return state === NodeHTTPHeaderState.sent || state === NodeHTTPHeaderState.assigned;
|
||||
}
|
||||
function throwHeadersSentIfNecessary(self, action) {
|
||||
if (self._header != null || isHTTPHeaderStateSentOrAssigned(self[headerStateSymbol])) {
|
||||
throw $ERR_HTTP_HEADERS_SENT(action);
|
||||
}
|
||||
}
|
||||
|
||||
function write_(msg, chunk, encoding, callback, fromEnd) {
|
||||
if (typeof callback !== "function") callback = nop;
|
||||
|
||||
@@ -252,18 +261,14 @@ const OutgoingMessagePrototype = {
|
||||
|
||||
removeHeader(name) {
|
||||
validateString(name, "name");
|
||||
if ((this._header !== undefined && this._header !== null) || this[headerStateSymbol] === NodeHTTPHeaderState.sent) {
|
||||
throw $ERR_HTTP_HEADERS_SENT("remove");
|
||||
}
|
||||
throwHeadersSentIfNecessary(this, "remove");
|
||||
const headers = this[headersSymbol];
|
||||
if (!headers) return;
|
||||
headers.delete(name);
|
||||
},
|
||||
|
||||
setHeader(name, value) {
|
||||
if ((this._header !== undefined && this._header !== null) || this[headerStateSymbol] == NodeHTTPHeaderState.sent) {
|
||||
throw $ERR_HTTP_HEADERS_SENT("set");
|
||||
}
|
||||
throwHeadersSentIfNecessary(this, "set");
|
||||
validateHeaderName(name);
|
||||
validateHeaderValue(name, value);
|
||||
const headers = (this[headersSymbol] ??= new Headers());
|
||||
@@ -271,9 +276,7 @@ const OutgoingMessagePrototype = {
|
||||
return this;
|
||||
},
|
||||
setHeaders(headers) {
|
||||
if (this._header || this[headerStateSymbol] !== NodeHTTPHeaderState.none) {
|
||||
throw $ERR_HTTP_HEADERS_SENT("set");
|
||||
}
|
||||
throwHeadersSentIfNecessary(this, "set");
|
||||
|
||||
if (!headers || $isArray(headers) || typeof headers.keys !== "function" || typeof headers.get !== "function") {
|
||||
throw $ERR_INVALID_ARG_TYPE("headers", ["Headers", "Map"], headers);
|
||||
|
||||
@@ -185,7 +185,7 @@ pub const Tag = enum(short) {
|
||||
}
|
||||
|
||||
fn PostgresBinarySingleDimensionArray(comptime T: type) type {
|
||||
return struct {
|
||||
return extern struct {
|
||||
// struct array_int4 {
|
||||
// int4_t ndim; /* Number of dimensions */
|
||||
// int4_t _ign; /* offset for data, removed by libpq */
|
||||
@@ -197,51 +197,44 @@ pub const Tag = enum(short) {
|
||||
// int4_t first_value; /* Beginning of integer data */
|
||||
// };
|
||||
|
||||
// Header is 5 x i32 = 20 bytes (ndim, offset_for_data, element_type, len, index)
|
||||
const header_size = 20;
|
||||
// Each array element is preceded by a 4-byte length prefix
|
||||
const elem_stride = @sizeOf(T) + 4;
|
||||
|
||||
const Int = std.meta.Int(.unsigned, @bitSizeOf(T));
|
||||
ndim: i32,
|
||||
offset_for_data: i32,
|
||||
element_type: i32,
|
||||
|
||||
len: i32,
|
||||
bytes: []const u8,
|
||||
index: i32,
|
||||
first_value: T,
|
||||
|
||||
/// Parses the binary array header from a raw (potentially unaligned) byte slice.
|
||||
/// Uses std.mem.readInt to safely handle unaligned network data.
|
||||
pub fn init(bytes: []const u8) @This() {
|
||||
// Read the len field at offset 12 (after ndim + offset_for_data + element_type)
|
||||
const len: i32 = @bitCast(std.mem.readInt(u32, bytes[12..16], .big));
|
||||
return .{
|
||||
.len = len,
|
||||
.bytes = bytes,
|
||||
};
|
||||
}
|
||||
|
||||
/// Reads array elements from the data portion, byte-swapping each value.
|
||||
/// WARNING: This destructively mutates `this.bytes` (via `@constCast`) by
|
||||
/// writing decoded elements densely into the header region starting at
|
||||
/// offset `header_size`. Each element is read from its original position
|
||||
/// (at `header_size + i * elem_stride + 4`) and written to `header_size +
|
||||
/// i * @sizeOf(T)`. The returned slice points into this modified buffer.
|
||||
pub fn slice(this: @This()) []align(1) T {
|
||||
if (this.len <= 0) return &.{};
|
||||
pub fn slice(this: *@This()) []T {
|
||||
if (this.len == 0) return &.{};
|
||||
|
||||
var head = @as([*]T, @ptrCast(&this.first_value));
|
||||
var current = head;
|
||||
const len: usize = @intCast(this.len);
|
||||
const data = @constCast(this.bytes);
|
||||
|
||||
// Data starts after the 20-byte header. Each element has a 4-byte
|
||||
// length prefix followed by the element bytes.
|
||||
// We write the decoded elements densely starting at the data region.
|
||||
const out: [*]align(1) T = @ptrCast(data.ptr + header_size);
|
||||
|
||||
for (0..len) |i| {
|
||||
const elem_offset = header_size + i * elem_stride + 4;
|
||||
const val = std.mem.readInt(Int, data[elem_offset..][0..@sizeOf(T)], .big);
|
||||
out[i] = @bitCast(val);
|
||||
// Skip every other value as it contains the size of the element
|
||||
current = current[1..];
|
||||
|
||||
const val = current[0];
|
||||
const Int = std.meta.Int(.unsigned, @bitSizeOf(T));
|
||||
const swapped = @byteSwap(@as(Int, @bitCast(val)));
|
||||
|
||||
head[i] = @bitCast(swapped);
|
||||
|
||||
current = current[1..];
|
||||
}
|
||||
|
||||
return out[0..len];
|
||||
return head[0..len];
|
||||
}
|
||||
|
||||
pub fn init(bytes: []const u8) *@This() {
|
||||
const this: *@This() = @ptrCast(@alignCast(@constCast(bytes.ptr)));
|
||||
this.ndim = @byteSwap(this.ndim);
|
||||
this.offset_for_data = @byteSwap(this.offset_for_data);
|
||||
this.element_type = @byteSwap(this.element_type);
|
||||
this.len = @byteSwap(this.len);
|
||||
this.index = @byteSwap(this.index);
|
||||
return this;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ skip_filenames: []const u64 = &[_]u64{},
|
||||
skip_dirnames: []const u64 = &[_]u64{},
|
||||
skip_all: []const u64 = &[_]u64{},
|
||||
seed: u64 = 0,
|
||||
resolve_unknown_entry_types: bool = false,
|
||||
|
||||
const NameBufferList = std.array_list.Managed(bun.OSPathChar);
|
||||
|
||||
@@ -38,7 +39,22 @@ pub fn next(self: *Walker) bun.sys.Maybe(?WalkerEntry) {
|
||||
.err => |err| return .initErr(err),
|
||||
.result => |res| {
|
||||
if (res) |base| {
|
||||
switch (base.kind) {
|
||||
// Some filesystems (NFS, FUSE, bind mounts) don't provide
|
||||
// d_type and return DT_UNKNOWN. Optionally resolve via
|
||||
// fstatat so callers get accurate types for recursion.
|
||||
// This only affects POSIX; Windows always provides types.
|
||||
const kind: std.fs.Dir.Entry.Kind = if (comptime !Environment.isWindows)
|
||||
(if (base.kind == .unknown and self.resolve_unknown_entry_types) brk: {
|
||||
const dir_fd = top.iter.iter.dir;
|
||||
break :brk switch (bun.sys.fstatat(dir_fd, base.name.sliceAssumeZ())) {
|
||||
.result => |stat_buf| bun.sys.kindFromMode(stat_buf.mode),
|
||||
.err => continue, // skip entries we can't stat
|
||||
};
|
||||
} else base.kind)
|
||||
else
|
||||
base.kind;
|
||||
|
||||
switch (kind) {
|
||||
.directory => {
|
||||
if (std.mem.indexOfScalar(
|
||||
u64,
|
||||
@@ -78,7 +94,7 @@ pub fn next(self: *Walker) bun.sys.Maybe(?WalkerEntry) {
|
||||
const cur_len = self.name_buffer.items.len;
|
||||
bun.handleOom(self.name_buffer.append(0));
|
||||
|
||||
if (base.kind == .directory) {
|
||||
if (kind == .directory) {
|
||||
const new_dir = switch (bun.openDirForIterationOSPath(top.iter.iter.dir, base.name.slice())) {
|
||||
.result => |fd| fd,
|
||||
.err => |err| return .initErr(err),
|
||||
@@ -95,7 +111,7 @@ pub fn next(self: *Walker) bun.sys.Maybe(?WalkerEntry) {
|
||||
.dir = top.iter.iter.dir,
|
||||
.basename = self.name_buffer.items[dirname_len..cur_len :0],
|
||||
.path = self.name_buffer.items[0..cur_len :0],
|
||||
.kind = base.kind,
|
||||
.kind = kind,
|
||||
});
|
||||
} else {
|
||||
var item = self.stack.pop().?;
|
||||
|
||||
@@ -899,4 +899,75 @@ body {
|
||||
expect(entry2Html).toMatch(/src=".*\.js"/);
|
||||
},
|
||||
});
|
||||
|
||||
// Test manifest.json is copied as an asset and link href is rewritten
|
||||
itBundled("html/manifest-json", {
|
||||
outdir: "out/",
|
||||
files: {
|
||||
"/index.html": `
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<link rel="manifest" href="./manifest.json" />
|
||||
</head>
|
||||
<body>
|
||||
<h1>App</h1>
|
||||
<script src="./app.js"></script>
|
||||
</body>
|
||||
</html>`,
|
||||
"/manifest.json": JSON.stringify({
|
||||
name: "My App",
|
||||
short_name: "App",
|
||||
start_url: "/",
|
||||
display: "standalone",
|
||||
background_color: "#ffffff",
|
||||
theme_color: "#000000",
|
||||
}),
|
||||
"/app.js": "console.log('hello')",
|
||||
},
|
||||
entryPoints: ["/index.html"],
|
||||
onAfterBundle(api) {
|
||||
const htmlContent = api.readFile("out/index.html");
|
||||
|
||||
// The original manifest.json reference should be rewritten to a hashed filename
|
||||
expect(htmlContent).not.toContain('manifest.json"');
|
||||
expect(htmlContent).toMatch(/href="(?:\.\/|\/)?manifest-[a-zA-Z0-9]+\.json"/);
|
||||
|
||||
// Extract the hashed manifest filename and verify its content
|
||||
const manifestMatch = htmlContent.match(/href="(?:\.\/|\/)?(manifest-[a-zA-Z0-9]+\.json)"/);
|
||||
expect(manifestMatch).not.toBeNull();
|
||||
const manifestContent = api.readFile("out/" + manifestMatch![1]);
|
||||
expect(manifestContent).toContain('"name"');
|
||||
expect(manifestContent).toContain('"My App"');
|
||||
},
|
||||
});
|
||||
|
||||
// Test that other non-JS/CSS file types referenced via URL imports are copied as assets
|
||||
itBundled("html/xml-asset", {
|
||||
outdir: "out/",
|
||||
files: {
|
||||
"/index.html": `
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<link rel="manifest" href="./site.webmanifest" />
|
||||
</head>
|
||||
<body>
|
||||
<h1>App</h1>
|
||||
</body>
|
||||
</html>`,
|
||||
"/site.webmanifest": JSON.stringify({
|
||||
name: "My App",
|
||||
icons: [{ src: "/icon.png", sizes: "192x192" }],
|
||||
}),
|
||||
},
|
||||
entryPoints: ["/index.html"],
|
||||
onAfterBundle(api) {
|
||||
const htmlContent = api.readFile("out/index.html");
|
||||
|
||||
// The webmanifest reference should be rewritten to a hashed filename
|
||||
expect(htmlContent).not.toContain("site.webmanifest");
|
||||
expect(htmlContent).toMatch(/href=".*\.webmanifest"/);
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
255
test/cli/install/GHSA-pfwx-36v6-832x.test.ts
Normal file
255
test/cli/install/GHSA-pfwx-36v6-832x.test.ts
Normal file
@@ -0,0 +1,255 @@
|
||||
import { file } from "bun";
|
||||
import { describe, expect, test } from "bun:test";
|
||||
import { rm } from "fs/promises";
|
||||
import { bunEnv, bunExe, tempDir } from "harness";
|
||||
import { join } from "path";
|
||||
|
||||
// Each test uses its own BUN_INSTALL_CACHE_DIR inside the temp dir for full
|
||||
// isolation. This avoids interfering with the global cache or other tests.
|
||||
function envWithCache(dir: string) {
|
||||
return { ...bunEnv, BUN_INSTALL_CACHE_DIR: join(String(dir), ".bun-cache") };
|
||||
}
|
||||
|
||||
describe.concurrent("GitHub tarball integrity", () => {
|
||||
test("should store integrity hash in lockfile for GitHub dependencies", async () => {
|
||||
using dir = tempDir("github-integrity", {
|
||||
"package.json": JSON.stringify({
|
||||
name: "test-github-integrity",
|
||||
dependencies: {
|
||||
"is-number": "jonschlinkert/is-number#98e8ff1",
|
||||
},
|
||||
}),
|
||||
});
|
||||
|
||||
const env = envWithCache(dir);
|
||||
|
||||
await using proc = Bun.spawn({
|
||||
cmd: [bunExe(), "install"],
|
||||
cwd: String(dir),
|
||||
env,
|
||||
stdout: "pipe",
|
||||
stderr: "pipe",
|
||||
});
|
||||
|
||||
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
|
||||
|
||||
expect(stderr).toContain("Saved lockfile");
|
||||
expect(exitCode).toBe(0);
|
||||
|
||||
const lockfileContent = await file(join(String(dir), "bun.lock")).text();
|
||||
|
||||
// The lockfile should contain a sha512 integrity hash for the GitHub dependency
|
||||
expect(lockfileContent).toContain("sha512-");
|
||||
// The resolved commit hash should be present
|
||||
expect(lockfileContent).toContain("jonschlinkert-is-number-98e8ff1");
|
||||
// Verify the format: the integrity appears after the resolved commit hash
|
||||
expect(lockfileContent).toMatch(/"jonschlinkert-is-number-98e8ff1",\s*"sha512-/);
|
||||
});
|
||||
|
||||
test("should verify integrity passes on re-install with matching hash", async () => {
|
||||
using dir = tempDir("github-integrity-match", {
|
||||
"package.json": JSON.stringify({
|
||||
name: "test-github-integrity-match",
|
||||
dependencies: {
|
||||
"is-number": "jonschlinkert/is-number#98e8ff1",
|
||||
},
|
||||
}),
|
||||
});
|
||||
|
||||
const env = envWithCache(dir);
|
||||
|
||||
// First install to generate lockfile with correct integrity
|
||||
await using proc1 = Bun.spawn({
|
||||
cmd: [bunExe(), "install"],
|
||||
cwd: String(dir),
|
||||
env,
|
||||
stdout: "pipe",
|
||||
stderr: "pipe",
|
||||
});
|
||||
|
||||
const [stdout1, stderr1, exitCode1] = await Promise.all([proc1.stdout.text(), proc1.stderr.text(), proc1.exited]);
|
||||
expect(stderr1).not.toContain("error:");
|
||||
expect(exitCode1).toBe(0);
|
||||
|
||||
// Read the generated lockfile and extract the integrity hash adjacent to
|
||||
// the GitHub resolved entry to avoid accidentally matching an npm hash.
|
||||
const lockfileContent = await file(join(String(dir), "bun.lock")).text();
|
||||
const integrityMatch = lockfileContent.match(/"jonschlinkert-is-number-98e8ff1",\s*"(sha512-[A-Za-z0-9+/]+=*)"/);
|
||||
expect(integrityMatch).not.toBeNull();
|
||||
const integrityHash = integrityMatch![1];
|
||||
|
||||
// Clear cache and node_modules, then re-install with the same lockfile
|
||||
await rm(join(String(dir), ".bun-cache"), { recursive: true, force: true });
|
||||
await rm(join(String(dir), "node_modules"), { recursive: true, force: true });
|
||||
|
||||
await using proc2 = Bun.spawn({
|
||||
cmd: [bunExe(), "install"],
|
||||
cwd: String(dir),
|
||||
env,
|
||||
stdout: "pipe",
|
||||
stderr: "pipe",
|
||||
});
|
||||
|
||||
const [stdout2, stderr2, exitCode2] = await Promise.all([proc2.stdout.text(), proc2.stderr.text(), proc2.exited]);
|
||||
|
||||
// Should succeed because the integrity matches
|
||||
expect(stderr2).not.toContain("Integrity check failed");
|
||||
expect(exitCode2).toBe(0);
|
||||
|
||||
// Lockfile should still contain the same integrity hash
|
||||
const lockfileContent2 = await file(join(String(dir), "bun.lock")).text();
|
||||
expect(lockfileContent2).toContain(integrityHash);
|
||||
});
|
||||
|
||||
test("should reject GitHub tarball when integrity check fails", async () => {
|
||||
using dir = tempDir("github-integrity-reject", {
|
||||
"package.json": JSON.stringify({
|
||||
name: "test-github-integrity-reject",
|
||||
dependencies: {
|
||||
"is-number": "jonschlinkert/is-number#98e8ff1",
|
||||
},
|
||||
}),
|
||||
// Pre-create a lockfile with an invalid integrity hash (valid base64, 64 zero bytes)
|
||||
"bun.lock": JSON.stringify({
|
||||
lockfileVersion: 1,
|
||||
configVersion: 1,
|
||||
workspaces: {
|
||||
"": {
|
||||
name: "test-github-integrity-reject",
|
||||
dependencies: {
|
||||
"is-number": "jonschlinkert/is-number#98e8ff1",
|
||||
},
|
||||
},
|
||||
},
|
||||
packages: {
|
||||
"is-number": [
|
||||
"is-number@github:jonschlinkert/is-number#98e8ff1",
|
||||
{},
|
||||
"jonschlinkert-is-number-98e8ff1",
|
||||
"sha512-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
|
||||
],
|
||||
},
|
||||
}),
|
||||
});
|
||||
|
||||
// Fresh per-test cache ensures the tarball must be downloaded from the network
|
||||
const env = envWithCache(dir);
|
||||
|
||||
await using proc = Bun.spawn({
|
||||
cmd: [bunExe(), "install"],
|
||||
cwd: String(dir),
|
||||
env,
|
||||
stdout: "pipe",
|
||||
stderr: "pipe",
|
||||
});
|
||||
|
||||
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
|
||||
|
||||
expect(stderr).toContain("Integrity check failed");
|
||||
expect(exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
test("should update lockfile with integrity when old format has none", async () => {
|
||||
using dir = tempDir("github-integrity-upgrade", {
|
||||
"package.json": JSON.stringify({
|
||||
name: "test-github-integrity-upgrade",
|
||||
dependencies: {
|
||||
"is-number": "jonschlinkert/is-number#98e8ff1",
|
||||
},
|
||||
}),
|
||||
// Pre-create a lockfile in the old format (no integrity hash)
|
||||
"bun.lock": JSON.stringify({
|
||||
lockfileVersion: 1,
|
||||
configVersion: 1,
|
||||
workspaces: {
|
||||
"": {
|
||||
name: "test-github-integrity-upgrade",
|
||||
dependencies: {
|
||||
"is-number": "jonschlinkert/is-number#98e8ff1",
|
||||
},
|
||||
},
|
||||
},
|
||||
packages: {
|
||||
"is-number": ["is-number@github:jonschlinkert/is-number#98e8ff1", {}, "jonschlinkert-is-number-98e8ff1"],
|
||||
},
|
||||
}),
|
||||
});
|
||||
|
||||
// Fresh per-test cache ensures the tarball must be downloaded
|
||||
const env = envWithCache(dir);
|
||||
|
||||
await using proc = Bun.spawn({
|
||||
cmd: [bunExe(), "install"],
|
||||
cwd: String(dir),
|
||||
env,
|
||||
stdout: "pipe",
|
||||
stderr: "pipe",
|
||||
});
|
||||
|
||||
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
|
||||
|
||||
// Should succeed without errors
|
||||
expect(stderr).not.toContain("Integrity check failed");
|
||||
expect(stderr).not.toContain("error:");
|
||||
// The lockfile should be re-saved with the new integrity hash
|
||||
expect(stderr).toContain("Saved lockfile");
|
||||
expect(exitCode).toBe(0);
|
||||
|
||||
// Verify the lockfile now contains the integrity hash
|
||||
const lockfileContent = await file(join(String(dir), "bun.lock")).text();
|
||||
expect(lockfileContent).toContain("sha512-");
|
||||
expect(lockfileContent).toMatch(/"jonschlinkert-is-number-98e8ff1",\s*"sha512-/);
|
||||
});
|
||||
|
||||
test("should accept GitHub dependency from cache without re-downloading", async () => {
|
||||
// Use a shared cache dir for both installs so the second is a true cache hit
|
||||
using dir = tempDir("github-integrity-cached", {
|
||||
"package.json": JSON.stringify({
|
||||
name: "test-github-integrity-cached",
|
||||
dependencies: {
|
||||
"is-number": "jonschlinkert/is-number#98e8ff1",
|
||||
},
|
||||
}),
|
||||
});
|
||||
|
||||
const env = envWithCache(dir);
|
||||
|
||||
// First install warms the per-test cache
|
||||
await using proc1 = Bun.spawn({
|
||||
cmd: [bunExe(), "install"],
|
||||
cwd: String(dir),
|
||||
env,
|
||||
stdout: "pipe",
|
||||
stderr: "pipe",
|
||||
});
|
||||
|
||||
const [stdout1, stderr1, exitCode1] = await Promise.all([proc1.stdout.text(), proc1.stderr.text(), proc1.exited]);
|
||||
expect(stderr1).not.toContain("error:");
|
||||
expect(exitCode1).toBe(0);
|
||||
|
||||
// Remove node_modules but keep the cache
|
||||
await rm(join(String(dir), "node_modules"), { recursive: true, force: true });
|
||||
|
||||
// Strip the integrity from the lockfile to simulate an old-format lockfile
|
||||
// that should still work when the cache already has the package
|
||||
const lockfileContent = await file(join(String(dir), "bun.lock")).text();
|
||||
const stripped = lockfileContent.replace(/,\s*"sha512-[^"]*"/, "");
|
||||
await Bun.write(join(String(dir), "bun.lock"), stripped);
|
||||
|
||||
// Second install should hit the cache and succeed without re-downloading
|
||||
await using proc2 = Bun.spawn({
|
||||
cmd: [bunExe(), "install"],
|
||||
cwd: String(dir),
|
||||
env,
|
||||
stdout: "pipe",
|
||||
stderr: "pipe",
|
||||
});
|
||||
|
||||
const [stdout2, stderr2, exitCode2] = await Promise.all([proc2.stdout.text(), proc2.stderr.text(), proc2.exited]);
|
||||
|
||||
// Should succeed without integrity errors (package served from cache)
|
||||
expect(stderr2).not.toContain("Integrity check failed");
|
||||
expect(stderr2).not.toContain("error:");
|
||||
expect(exitCode2).toBe(0);
|
||||
});
|
||||
});
|
||||
63
test/regression/issue/12117.test.ts
Normal file
63
test/regression/issue/12117.test.ts
Normal file
@@ -0,0 +1,63 @@
|
||||
// Regression test for TLS upgrade raw socket leak (#12117, #24118, #25948)
|
||||
// When a TCP socket is upgraded to TLS via tls.connect({ socket }),
|
||||
// both a TLS wrapper and a raw TCP wrapper are created in Zig.
|
||||
// Previously, the raw socket's has_pending_activity was never set to
|
||||
// false on close, causing it (and all its retained objects) to leak.
|
||||
|
||||
import { describe, expect, it } from "bun:test";
|
||||
import { tls as COMMON_CERT, expectMaxObjectTypeCount } from "harness";
|
||||
import { once } from "node:events";
|
||||
import net from "node:net";
|
||||
import tls from "node:tls";
|
||||
|
||||
describe("TLS upgrade", () => {
|
||||
it("should not leak TLSSocket objects after close", async () => {
|
||||
// Create a TLS server that echoes data and closes
|
||||
const server = tls.createServer(
|
||||
{
|
||||
key: COMMON_CERT.key,
|
||||
cert: COMMON_CERT.cert,
|
||||
},
|
||||
socket => {
|
||||
socket.end("hello");
|
||||
},
|
||||
);
|
||||
|
||||
await once(server.listen(0, "127.0.0.1"), "listening");
|
||||
const port = (server.address() as net.AddressInfo).port;
|
||||
|
||||
// Simulate the MongoDB driver pattern: create a plain TCP socket,
|
||||
// then upgrade it to TLS via tls.connect({ socket }).
|
||||
// Do this multiple times to accumulate leaked objects.
|
||||
const iterations = 50;
|
||||
|
||||
try {
|
||||
for (let i = 0; i < iterations; i++) {
|
||||
const tcpSocket = net.createConnection({ host: "127.0.0.1", port });
|
||||
await once(tcpSocket, "connect");
|
||||
|
||||
const tlsSocket = tls.connect({
|
||||
socket: tcpSocket,
|
||||
ca: COMMON_CERT.cert,
|
||||
rejectUnauthorized: false,
|
||||
});
|
||||
await once(tlsSocket, "secureConnect");
|
||||
|
||||
// Read any data and destroy the TLS socket (simulates SDAM close)
|
||||
tlsSocket.on("data", () => {});
|
||||
tlsSocket.destroy();
|
||||
|
||||
await once(tlsSocket, "close");
|
||||
}
|
||||
} finally {
|
||||
server.close();
|
||||
await once(server, "close");
|
||||
}
|
||||
|
||||
// After all connections are closed and GC runs, the TLSSocket count
|
||||
// should be low. Before the fix, each iteration would leak 1 raw
|
||||
// TLSSocket (the TCP wrapper from upgradeTLS), accumulating over time.
|
||||
// Allow some slack for prototypes/structures (typically 2-3 baseline).
|
||||
await expectMaxObjectTypeCount(expect, "TLSSocket", 10, 1000);
|
||||
});
|
||||
});
|
||||
89
test/regression/issue/27049.test.ts
Normal file
89
test/regression/issue/27049.test.ts
Normal file
@@ -0,0 +1,89 @@
|
||||
import { expect, test } from "bun:test";
|
||||
import http from "node:http";
|
||||
|
||||
test("ClientRequest.setHeaders should not throw ERR_HTTP_HEADERS_SENT on new request", async () => {
|
||||
await using server = Bun.serve({
|
||||
port: 0,
|
||||
fetch(req) {
|
||||
return new Response(req.headers.get("x-test") ?? "missing");
|
||||
},
|
||||
});
|
||||
|
||||
const { resolve, reject, promise } = Promise.withResolvers<string>();
|
||||
|
||||
const req = http.request(`http://localhost:${server.port}/test`, { method: "GET" }, res => {
|
||||
let data = "";
|
||||
res.on("data", (chunk: Buffer) => {
|
||||
data += chunk.toString();
|
||||
});
|
||||
res.on("end", () => resolve(data));
|
||||
});
|
||||
|
||||
req.on("error", reject);
|
||||
|
||||
// This should not throw - headers haven't been sent yet
|
||||
req.setHeaders(new Headers({ "x-test": "value" }));
|
||||
|
||||
req.end();
|
||||
|
||||
const body = await promise;
|
||||
expect(body).toBe("value");
|
||||
});
|
||||
|
||||
test("ClientRequest.setHeaders works with Map", async () => {
|
||||
await using server = Bun.serve({
|
||||
port: 0,
|
||||
fetch(req) {
|
||||
return new Response(req.headers.get("x-map-test") ?? "missing");
|
||||
},
|
||||
});
|
||||
|
||||
const { resolve, reject, promise } = Promise.withResolvers<string>();
|
||||
|
||||
const req = http.request(`http://localhost:${server.port}/test`, { method: "GET" }, res => {
|
||||
let data = "";
|
||||
res.on("data", (chunk: Buffer) => {
|
||||
data += chunk.toString();
|
||||
});
|
||||
res.on("end", () => resolve(data));
|
||||
});
|
||||
|
||||
req.on("error", reject);
|
||||
|
||||
req.setHeaders(new Map([["x-map-test", "map-value"]]));
|
||||
|
||||
req.end();
|
||||
|
||||
const body = await promise;
|
||||
expect(body).toBe("map-value");
|
||||
});
|
||||
|
||||
test("ServerResponse.setHeaders should not throw before headers are sent", async () => {
|
||||
const { resolve, reject, promise } = Promise.withResolvers<string>();
|
||||
|
||||
const server = http.createServer((req, res) => {
|
||||
// This should not throw - headers haven't been sent yet
|
||||
res.setHeaders(new Headers({ "x-custom": "server-value" }));
|
||||
res.writeHead(200);
|
||||
res.end("ok");
|
||||
});
|
||||
|
||||
try {
|
||||
server.listen(0, () => {
|
||||
const port = (server.address() as any).port;
|
||||
try {
|
||||
const req = http.request(`http://localhost:${port}/test`, res => {
|
||||
resolve(res.headers["x-custom"] as string);
|
||||
});
|
||||
req.on("error", reject);
|
||||
req.end();
|
||||
} catch (e) {
|
||||
reject(e);
|
||||
}
|
||||
});
|
||||
|
||||
expect(await promise).toBe("server-value");
|
||||
} finally {
|
||||
server.close();
|
||||
}
|
||||
});
|
||||
336
test/regression/issue/27061.test.ts
Normal file
336
test/regression/issue/27061.test.ts
Normal file
@@ -0,0 +1,336 @@
|
||||
import { describe, expect, test } from "bun:test";
|
||||
import http from "node:http";
|
||||
|
||||
// Regression test for https://github.com/oven-sh/bun/issues/27061
|
||||
// When http.ClientRequest.write() is called more than once (streaming data in chunks),
|
||||
// Bun was stripping the explicitly-set Content-Length header and switching to
|
||||
// Transfer-Encoding: chunked. Node.js preserves Content-Length in all cases.
|
||||
|
||||
describe("node:http ClientRequest preserves explicit Content-Length", () => {
|
||||
test("with multiple req.write() calls", async () => {
|
||||
const { promise, resolve, reject } = Promise.withResolvers<{
|
||||
contentLength: string | undefined;
|
||||
transferEncoding: string | undefined;
|
||||
bodyLength: number;
|
||||
}>();
|
||||
|
||||
const server = http.createServer((req, res) => {
|
||||
const chunks: Buffer[] = [];
|
||||
req.on("data", (chunk: Buffer) => chunks.push(chunk));
|
||||
req.on("end", () => {
|
||||
resolve({
|
||||
contentLength: req.headers["content-length"],
|
||||
transferEncoding: req.headers["transfer-encoding"],
|
||||
bodyLength: Buffer.concat(chunks).length,
|
||||
});
|
||||
res.writeHead(200);
|
||||
res.end("ok");
|
||||
});
|
||||
});
|
||||
|
||||
await new Promise<void>(res => server.listen(0, "127.0.0.1", res));
|
||||
const port = (server.address() as any).port;
|
||||
|
||||
try {
|
||||
const chunk1 = Buffer.alloc(100, "a");
|
||||
const chunk2 = Buffer.alloc(100, "b");
|
||||
const totalLength = chunk1.length + chunk2.length;
|
||||
|
||||
const req = http.request({
|
||||
hostname: "127.0.0.1",
|
||||
port,
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Length": totalLength.toString(),
|
||||
},
|
||||
});
|
||||
|
||||
await new Promise<void>((res, rej) => {
|
||||
req.on("error", rej);
|
||||
req.on("response", () => res());
|
||||
req.write(chunk1);
|
||||
req.write(chunk2);
|
||||
req.end();
|
||||
});
|
||||
|
||||
const result = await promise;
|
||||
expect(result.contentLength).toBe("200");
|
||||
expect(result.transferEncoding).toBeUndefined();
|
||||
expect(result.bodyLength).toBe(200);
|
||||
} finally {
|
||||
server.close();
|
||||
}
|
||||
});
|
||||
|
||||
test("with req.write() + req.end(data)", async () => {
|
||||
const { promise, resolve, reject } = Promise.withResolvers<{
|
||||
contentLength: string | undefined;
|
||||
transferEncoding: string | undefined;
|
||||
bodyLength: number;
|
||||
}>();
|
||||
|
||||
const server = http.createServer((req, res) => {
|
||||
const chunks: Buffer[] = [];
|
||||
req.on("data", (chunk: Buffer) => chunks.push(chunk));
|
||||
req.on("end", () => {
|
||||
resolve({
|
||||
contentLength: req.headers["content-length"],
|
||||
transferEncoding: req.headers["transfer-encoding"],
|
||||
bodyLength: Buffer.concat(chunks).length,
|
||||
});
|
||||
res.writeHead(200);
|
||||
res.end("ok");
|
||||
});
|
||||
});
|
||||
|
||||
await new Promise<void>(res => server.listen(0, "127.0.0.1", res));
|
||||
const port = (server.address() as any).port;
|
||||
|
||||
try {
|
||||
const chunk1 = Buffer.alloc(100, "a");
|
||||
const chunk2 = Buffer.alloc(100, "b");
|
||||
const totalLength = chunk1.length + chunk2.length;
|
||||
|
||||
const req = http.request({
|
||||
hostname: "127.0.0.1",
|
||||
port,
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Length": totalLength.toString(),
|
||||
},
|
||||
});
|
||||
|
||||
await new Promise<void>((res, rej) => {
|
||||
req.on("error", rej);
|
||||
req.on("response", () => res());
|
||||
req.write(chunk1);
|
||||
req.end(chunk2);
|
||||
});
|
||||
|
||||
const result = await promise;
|
||||
expect(result.contentLength).toBe("200");
|
||||
expect(result.transferEncoding).toBeUndefined();
|
||||
expect(result.bodyLength).toBe(200);
|
||||
} finally {
|
||||
server.close();
|
||||
}
|
||||
});
|
||||
|
||||
test("with three req.write() calls", async () => {
|
||||
const { promise, resolve, reject } = Promise.withResolvers<{
|
||||
contentLength: string | undefined;
|
||||
transferEncoding: string | undefined;
|
||||
bodyLength: number;
|
||||
}>();
|
||||
|
||||
const server = http.createServer((req, res) => {
|
||||
const chunks: Buffer[] = [];
|
||||
req.on("data", (chunk: Buffer) => chunks.push(chunk));
|
||||
req.on("end", () => {
|
||||
resolve({
|
||||
contentLength: req.headers["content-length"],
|
||||
transferEncoding: req.headers["transfer-encoding"],
|
||||
bodyLength: Buffer.concat(chunks).length,
|
||||
});
|
||||
res.writeHead(200);
|
||||
res.end("ok");
|
||||
});
|
||||
});
|
||||
|
||||
await new Promise<void>(res => server.listen(0, "127.0.0.1", res));
|
||||
const port = (server.address() as any).port;
|
||||
|
||||
try {
|
||||
const chunk1 = Buffer.alloc(100, "a");
|
||||
const chunk2 = Buffer.alloc(100, "b");
|
||||
const chunk3 = Buffer.alloc(100, "c");
|
||||
const totalLength = chunk1.length + chunk2.length + chunk3.length;
|
||||
|
||||
const req = http.request({
|
||||
hostname: "127.0.0.1",
|
||||
port,
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Length": totalLength.toString(),
|
||||
},
|
||||
});
|
||||
|
||||
await new Promise<void>((res, rej) => {
|
||||
req.on("error", rej);
|
||||
req.on("response", () => res());
|
||||
req.write(chunk1);
|
||||
req.write(chunk2);
|
||||
req.write(chunk3);
|
||||
req.end();
|
||||
});
|
||||
|
||||
const result = await promise;
|
||||
expect(result.contentLength).toBe("300");
|
||||
expect(result.transferEncoding).toBeUndefined();
|
||||
expect(result.bodyLength).toBe(300);
|
||||
} finally {
|
||||
server.close();
|
||||
}
|
||||
});
|
||||
|
||||
test("single req.write() still works", async () => {
|
||||
const { promise, resolve, reject } = Promise.withResolvers<{
|
||||
contentLength: string | undefined;
|
||||
transferEncoding: string | undefined;
|
||||
bodyLength: number;
|
||||
}>();
|
||||
|
||||
const server = http.createServer((req, res) => {
|
||||
const chunks: Buffer[] = [];
|
||||
req.on("data", (chunk: Buffer) => chunks.push(chunk));
|
||||
req.on("end", () => {
|
||||
resolve({
|
||||
contentLength: req.headers["content-length"],
|
||||
transferEncoding: req.headers["transfer-encoding"],
|
||||
bodyLength: Buffer.concat(chunks).length,
|
||||
});
|
||||
res.writeHead(200);
|
||||
res.end("ok");
|
||||
});
|
||||
});
|
||||
|
||||
await new Promise<void>(res => server.listen(0, "127.0.0.1", res));
|
||||
const port = (server.address() as any).port;
|
||||
|
||||
try {
|
||||
const data = Buffer.alloc(200, "x");
|
||||
|
||||
const req = http.request({
|
||||
hostname: "127.0.0.1",
|
||||
port,
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Length": data.length.toString(),
|
||||
},
|
||||
});
|
||||
|
||||
await new Promise<void>((res, rej) => {
|
||||
req.on("error", rej);
|
||||
req.on("response", () => res());
|
||||
req.write(data);
|
||||
req.end();
|
||||
});
|
||||
|
||||
const result = await promise;
|
||||
expect(result.contentLength).toBe("200");
|
||||
expect(result.transferEncoding).toBeUndefined();
|
||||
expect(result.bodyLength).toBe(200);
|
||||
} finally {
|
||||
server.close();
|
||||
}
|
||||
});
|
||||
|
||||
test("without explicit Content-Length still uses chunked encoding", async () => {
|
||||
const { promise, resolve, reject } = Promise.withResolvers<{
|
||||
contentLength: string | undefined;
|
||||
transferEncoding: string | undefined;
|
||||
bodyLength: number;
|
||||
}>();
|
||||
|
||||
const server = http.createServer((req, res) => {
|
||||
const chunks: Buffer[] = [];
|
||||
req.on("data", (chunk: Buffer) => chunks.push(chunk));
|
||||
req.on("end", () => {
|
||||
resolve({
|
||||
contentLength: req.headers["content-length"],
|
||||
transferEncoding: req.headers["transfer-encoding"],
|
||||
bodyLength: Buffer.concat(chunks).length,
|
||||
});
|
||||
res.writeHead(200);
|
||||
res.end("ok");
|
||||
});
|
||||
});
|
||||
|
||||
await new Promise<void>(res => server.listen(0, "127.0.0.1", res));
|
||||
const port = (server.address() as any).port;
|
||||
|
||||
try {
|
||||
const chunk1 = Buffer.alloc(100, "a");
|
||||
const chunk2 = Buffer.alloc(100, "b");
|
||||
|
||||
const req = http.request({
|
||||
hostname: "127.0.0.1",
|
||||
port,
|
||||
method: "POST",
|
||||
// No Content-Length header
|
||||
});
|
||||
|
||||
await new Promise<void>((res, rej) => {
|
||||
req.on("error", rej);
|
||||
req.on("response", () => res());
|
||||
req.write(chunk1);
|
||||
req.write(chunk2);
|
||||
req.end();
|
||||
});
|
||||
|
||||
const result = await promise;
|
||||
// Without explicit Content-Length, chunked encoding should be used
|
||||
expect(result.transferEncoding).toBe("chunked");
|
||||
expect(result.bodyLength).toBe(200);
|
||||
} finally {
|
||||
server.close();
|
||||
}
|
||||
});
|
||||
|
||||
test("explicit Transfer-Encoding takes precedence over Content-Length", async () => {
|
||||
const { promise, resolve } = Promise.withResolvers<{
|
||||
contentLength: string | undefined;
|
||||
transferEncoding: string | undefined;
|
||||
bodyLength: number;
|
||||
}>();
|
||||
|
||||
const server = http.createServer((req, res) => {
|
||||
const chunks: Buffer[] = [];
|
||||
req.on("data", (chunk: Buffer) => chunks.push(chunk));
|
||||
req.on("end", () => {
|
||||
resolve({
|
||||
contentLength: req.headers["content-length"],
|
||||
transferEncoding: req.headers["transfer-encoding"],
|
||||
bodyLength: Buffer.concat(chunks).length,
|
||||
});
|
||||
res.writeHead(200);
|
||||
res.end("ok");
|
||||
});
|
||||
});
|
||||
|
||||
await new Promise<void>(res => server.listen(0, "127.0.0.1", res));
|
||||
const port = (server.address() as any).port;
|
||||
|
||||
try {
|
||||
const chunk1 = Buffer.alloc(100, "a");
|
||||
const chunk2 = Buffer.alloc(100, "b");
|
||||
|
||||
const req = http.request({
|
||||
hostname: "127.0.0.1",
|
||||
port,
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Length": "200",
|
||||
"Transfer-Encoding": "chunked",
|
||||
},
|
||||
});
|
||||
|
||||
await new Promise<void>((res, rej) => {
|
||||
req.on("error", rej);
|
||||
req.on("response", () => res());
|
||||
req.write(chunk1);
|
||||
req.write(chunk2);
|
||||
req.end();
|
||||
});
|
||||
|
||||
const result = await promise;
|
||||
// When user explicitly sets Transfer-Encoding, it should be used
|
||||
// and Content-Length should not be added
|
||||
expect(result.transferEncoding).toBe("chunked");
|
||||
expect(result.contentLength).toBeUndefined();
|
||||
expect(result.bodyLength).toBe(200);
|
||||
} finally {
|
||||
server.close();
|
||||
}
|
||||
});
|
||||
});
|
||||
@@ -1,231 +0,0 @@
|
||||
import { SQL } from "bun";
|
||||
import { expect, test } from "bun:test";
|
||||
import net from "net";
|
||||
|
||||
// Regression test for https://github.com/oven-sh/bun/issues/27079
|
||||
// Bun crashes with "incorrect alignment" panic when processing binary-format
|
||||
// PostgreSQL int4[] or float4[] arrays from a network buffer whose alignment
|
||||
// doesn't match the struct's natural alignment (4 bytes).
|
||||
test("PostgreSQL binary int4_array should not crash on unaligned data", async () => {
|
||||
// We build a mock PostgreSQL server that returns a binary int4_array column.
|
||||
// The server introduces a 1-byte padding before the DataRow payload to ensure
|
||||
// the array data is NOT 4-byte aligned, which triggered the original panic.
|
||||
|
||||
const server = net.createServer(socket => {
|
||||
let gotStartup = false;
|
||||
|
||||
socket.on("data", data => {
|
||||
if (!gotStartup) {
|
||||
gotStartup = true;
|
||||
// Client sent startup message. Respond with:
|
||||
// 1. AuthenticationOk
|
||||
// 2. ParameterStatus (server_encoding = UTF8)
|
||||
// 3. BackendKeyData
|
||||
// 4. ReadyForQuery (idle)
|
||||
const authOk = pgMsg("R", int32BE(0)); // AuthOk
|
||||
const paramStatus = pgMsg("S", Buffer.concat([cstr("client_encoding"), cstr("UTF8")]));
|
||||
const backendKey = pgMsg("K", Buffer.concat([int32BE(1234), int32BE(5678)]));
|
||||
const ready = pgMsg("Z", Buffer.from([0x49])); // 'I' = idle
|
||||
|
||||
socket.write(Buffer.concat([authOk, paramStatus, backendKey, ready]));
|
||||
return;
|
||||
}
|
||||
|
||||
// Assume any subsequent data is a query. Respond with a result set
|
||||
// containing one row with one column: an int4[] array in binary format.
|
||||
|
||||
// RowDescription: 1 field
|
||||
// name = "arr"
|
||||
// table_oid = 0, column_index = 0
|
||||
// type_oid = 1007 (int4_array)
|
||||
// type_size = -1, type_modifier = -1
|
||||
// format = 1 (binary)
|
||||
const fieldName = cstr("arr");
|
||||
const rowDesc = pgMsg(
|
||||
"T",
|
||||
Buffer.concat([
|
||||
int16BE(1), // number of fields
|
||||
fieldName,
|
||||
int32BE(0), // table OID
|
||||
int16BE(0), // column index
|
||||
int32BE(1007), // type OID = int4_array
|
||||
int16BE(-1), // type size
|
||||
int32BE(-1), // type modifier
|
||||
int16BE(1), // format code = binary
|
||||
]),
|
||||
);
|
||||
|
||||
// Build the binary int4 array payload:
|
||||
// PostgreSQL binary array format:
|
||||
// ndim (4 bytes) = 1
|
||||
// has_nulls (4 bytes) = 0
|
||||
// element_type (4 bytes) = 23 (int4)
|
||||
// dim_length (4 bytes) = 3 (3 elements)
|
||||
// dim_lower_bound (4 bytes) = 1
|
||||
// For each element: length (4 bytes) + value (4 bytes)
|
||||
const arrayData = Buffer.concat([
|
||||
int32BE(1), // ndim = 1
|
||||
int32BE(0), // has_nulls = 0
|
||||
int32BE(23), // element_type = int4
|
||||
int32BE(3), // length = 3 elements
|
||||
int32BE(1), // lower bound = 1
|
||||
// Element 0: length=4, value=10
|
||||
int32BE(4),
|
||||
int32BE(10),
|
||||
// Element 1: length=4, value=20
|
||||
int32BE(4),
|
||||
int32BE(20),
|
||||
// Element 2: length=4, value=30
|
||||
int32BE(4),
|
||||
int32BE(30),
|
||||
]);
|
||||
|
||||
// DataRow: 1 column
|
||||
const dataRow = pgMsg(
|
||||
"D",
|
||||
Buffer.concat([
|
||||
int16BE(1), // number of columns
|
||||
int32BE(arrayData.length), // column data length
|
||||
arrayData,
|
||||
]),
|
||||
);
|
||||
|
||||
// CommandComplete
|
||||
const cmdComplete = pgMsg("C", cstr("SELECT 1"));
|
||||
|
||||
// ReadyForQuery (idle)
|
||||
const ready2 = pgMsg("Z", Buffer.from([0x49]));
|
||||
|
||||
socket.write(Buffer.concat([rowDesc, dataRow, cmdComplete, ready2]));
|
||||
});
|
||||
});
|
||||
|
||||
await new Promise<void>(r => server.listen(0, "127.0.0.1", () => r()));
|
||||
const port = (server.address() as net.AddressInfo).port;
|
||||
|
||||
const sql = new SQL({
|
||||
url: `postgres://test@127.0.0.1:${port}/test`,
|
||||
max: 1,
|
||||
idle_timeout: 1,
|
||||
});
|
||||
|
||||
try {
|
||||
const rows = await sql`SELECT 1`;
|
||||
// The query should succeed without an alignment panic.
|
||||
// Verify we got an Int32Array with the correct values.
|
||||
expect(rows.length).toBe(1);
|
||||
const arr = rows[0].arr;
|
||||
expect(arr).toBeInstanceOf(Int32Array);
|
||||
expect(Array.from(arr)).toEqual([10, 20, 30]);
|
||||
} finally {
|
||||
await sql.close();
|
||||
server.close();
|
||||
}
|
||||
});
|
||||
|
||||
test("PostgreSQL binary float4_array should not crash on unaligned data", async () => {
|
||||
const server = net.createServer(socket => {
|
||||
let gotStartup = false;
|
||||
|
||||
socket.on("data", data => {
|
||||
if (!gotStartup) {
|
||||
gotStartup = true;
|
||||
const authOk = pgMsg("R", int32BE(0));
|
||||
const paramStatus = pgMsg("S", Buffer.concat([cstr("client_encoding"), cstr("UTF8")]));
|
||||
const backendKey = pgMsg("K", Buffer.concat([int32BE(1234), int32BE(5678)]));
|
||||
const ready = pgMsg("Z", Buffer.from([0x49]));
|
||||
socket.write(Buffer.concat([authOk, paramStatus, backendKey, ready]));
|
||||
return;
|
||||
}
|
||||
|
||||
// RowDescription: 1 field with float4_array (OID 1021) in binary format
|
||||
const fieldName = cstr("arr");
|
||||
const rowDesc = pgMsg(
|
||||
"T",
|
||||
Buffer.concat([
|
||||
int16BE(1),
|
||||
fieldName,
|
||||
int32BE(0),
|
||||
int16BE(0),
|
||||
int32BE(1021), // type OID = float4_array
|
||||
int16BE(-1),
|
||||
int32BE(-1),
|
||||
int16BE(1), // binary format
|
||||
]),
|
||||
);
|
||||
|
||||
// Binary float4 array: [1.5, 2.5]
|
||||
const arrayData = Buffer.concat([
|
||||
int32BE(1), // ndim = 1
|
||||
int32BE(0), // has_nulls = 0
|
||||
int32BE(700), // element_type = float4
|
||||
int32BE(2), // length = 2 elements
|
||||
int32BE(1), // lower bound = 1
|
||||
// Element 0: length=4, value=1.5
|
||||
int32BE(4),
|
||||
float32BE(1.5),
|
||||
// Element 1: length=4, value=2.5
|
||||
int32BE(4),
|
||||
float32BE(2.5),
|
||||
]);
|
||||
|
||||
const dataRow = pgMsg("D", Buffer.concat([int16BE(1), int32BE(arrayData.length), arrayData]));
|
||||
|
||||
const cmdComplete = pgMsg("C", cstr("SELECT 1"));
|
||||
const ready2 = pgMsg("Z", Buffer.from([0x49]));
|
||||
socket.write(Buffer.concat([rowDesc, dataRow, cmdComplete, ready2]));
|
||||
});
|
||||
});
|
||||
|
||||
await new Promise<void>(r => server.listen(0, "127.0.0.1", () => r()));
|
||||
const port = (server.address() as net.AddressInfo).port;
|
||||
|
||||
const sql = new SQL({
|
||||
url: `postgres://test@127.0.0.1:${port}/test`,
|
||||
max: 1,
|
||||
idle_timeout: 1,
|
||||
});
|
||||
|
||||
try {
|
||||
const rows = await sql`SELECT 1`;
|
||||
expect(rows.length).toBe(1);
|
||||
const arr = rows[0].arr;
|
||||
expect(arr).toBeInstanceOf(Float32Array);
|
||||
expect(Array.from(arr)).toEqual([1.5, 2.5]);
|
||||
} finally {
|
||||
await sql.close();
|
||||
server.close();
|
||||
}
|
||||
});
|
||||
|
||||
// Helper functions
|
||||
function pgMsg(type: string, payload: Buffer): Buffer {
|
||||
const len = payload.length + 4;
|
||||
const buf = Buffer.alloc(5 + payload.length);
|
||||
buf.write(type, 0, 1, "ascii");
|
||||
buf.writeInt32BE(len, 1);
|
||||
payload.copy(buf, 5);
|
||||
return buf;
|
||||
}
|
||||
|
||||
function int32BE(val: number): Buffer {
|
||||
const buf = Buffer.alloc(4);
|
||||
buf.writeInt32BE(val, 0);
|
||||
return buf;
|
||||
}
|
||||
|
||||
function int16BE(val: number): Buffer {
|
||||
const buf = Buffer.alloc(2);
|
||||
buf.writeInt16BE(val, 0);
|
||||
return buf;
|
||||
}
|
||||
|
||||
function float32BE(val: number): Buffer {
|
||||
const buf = Buffer.alloc(4);
|
||||
buf.writeFloatBE(val, 0);
|
||||
return buf;
|
||||
}
|
||||
|
||||
function cstr(s: string): Buffer {
|
||||
return Buffer.concat([Buffer.from(s, "utf8"), Buffer.from([0])]);
|
||||
}
|
||||
123
test/regression/issue/27095.test.ts
Normal file
123
test/regression/issue/27095.test.ts
Normal file
@@ -0,0 +1,123 @@
|
||||
import { file, spawn, write } from "bun";
|
||||
import { afterAll, beforeAll, expect, test } from "bun:test";
|
||||
import { existsSync } from "fs";
|
||||
import { readdir } from "fs/promises";
|
||||
import { VerdaccioRegistry, bunEnv, bunExe } from "harness";
|
||||
import { join } from "path";
|
||||
|
||||
// Issue #27095: bun install silently skips files when linking packages from
|
||||
// cache to node_modules on NFS/FUSE/bind-mount filesystems that return
|
||||
// DT_UNKNOWN for d_type.
|
||||
//
|
||||
// The fix adds resolve_unknown_entry_types to the walker so it falls back to
|
||||
// fstatat() for unknown entries. This test verifies that all backends
|
||||
// (clonefile, hardlink, copyfile) correctly install every file from a package
|
||||
// with a deeply nested directory structure.
|
||||
|
||||
const registry = new VerdaccioRegistry();
|
||||
|
||||
beforeAll(async () => {
|
||||
await registry.start();
|
||||
});
|
||||
|
||||
afterAll(() => {
|
||||
registry.stop();
|
||||
});
|
||||
|
||||
/** Recursively count all files and directories under `dir`. */
|
||||
async function countEntriesRecursive(dir: string): Promise<number> {
|
||||
let count = 0;
|
||||
const entries = await readdir(dir, { withFileTypes: true });
|
||||
for (const entry of entries) {
|
||||
count++;
|
||||
if (entry.isDirectory()) {
|
||||
count += await countEntriesRecursive(join(dir, entry.name));
|
||||
}
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
for (const backend of ["clonefile", "hardlink", "copyfile"]) {
|
||||
test(`all files installed with backend: ${backend} (#27095)`, async () => {
|
||||
const { packageJson, packageDir } = await registry.createTestDir({
|
||||
bunfigOpts: { linker: "isolated" },
|
||||
});
|
||||
|
||||
// Create a file dependency with a nested directory tree.
|
||||
// This mimics what happens with packages like typescript that have
|
||||
// many files in deeply nested directories - the exact scenario
|
||||
// where DT_UNKNOWN would cause silent skipping.
|
||||
const files: Record<string, string> = {
|
||||
"package.json": JSON.stringify({ name: "nested-pkg", version: "1.0.0" }),
|
||||
"index.js": "module.exports = 'root';",
|
||||
"lib/a.js": "module.exports = 'a';",
|
||||
"lib/b.js": "module.exports = 'b';",
|
||||
"lib/types/a.d.ts": "export declare const a: string;",
|
||||
"lib/types/b.d.ts": "export declare const b: string;",
|
||||
"lib/types/nested/c.d.ts": "export declare const c: string;",
|
||||
"lib/types/nested/d.d.ts": "export declare const d: string;",
|
||||
"lib/types/nested/deep/e.d.ts": "export declare const e: string;",
|
||||
};
|
||||
|
||||
// Write the nested package files
|
||||
await Promise.all(
|
||||
Object.entries(files).map(([path, content]) => write(join(packageDir, "nested-pkg", path), content)),
|
||||
);
|
||||
|
||||
await write(
|
||||
packageJson,
|
||||
JSON.stringify({
|
||||
name: "test-27095",
|
||||
dependencies: {
|
||||
"nested-pkg": "file:./nested-pkg",
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
const { stdout, stderr, exited } = spawn({
|
||||
cmd: [bunExe(), "install", "--backend", backend],
|
||||
cwd: packageDir,
|
||||
env: bunEnv,
|
||||
stdout: "pipe",
|
||||
stderr: "pipe",
|
||||
});
|
||||
|
||||
const err = await stderr.text();
|
||||
const out = await stdout.text();
|
||||
|
||||
expect(err).not.toContain("error");
|
||||
|
||||
// Verify every single file was installed
|
||||
const installedBase = join(
|
||||
packageDir,
|
||||
"node_modules",
|
||||
".bun",
|
||||
"nested-pkg@file+nested-pkg",
|
||||
"node_modules",
|
||||
"nested-pkg",
|
||||
);
|
||||
|
||||
// Check each expected file exists and has correct content
|
||||
for (const [path, expectedContent] of Object.entries(files)) {
|
||||
const fullPath = join(installedBase, path);
|
||||
expect(existsSync(fullPath)).toBe(true);
|
||||
if (path.endsWith(".json")) {
|
||||
expect(await file(fullPath).json()).toEqual(JSON.parse(expectedContent));
|
||||
} else {
|
||||
expect(await file(fullPath).text()).toBe(expectedContent);
|
||||
}
|
||||
}
|
||||
|
||||
// Verify the nested directories exist
|
||||
expect(existsSync(join(installedBase, "lib"))).toBe(true);
|
||||
expect(existsSync(join(installedBase, "lib", "types"))).toBe(true);
|
||||
expect(existsSync(join(installedBase, "lib", "types", "nested"))).toBe(true);
|
||||
expect(existsSync(join(installedBase, "lib", "types", "nested", "deep"))).toBe(true);
|
||||
|
||||
// Verify total count matches (9 files + 4 directories = 13 entries)
|
||||
const totalEntries = await countEntriesRecursive(installedBase);
|
||||
expect(totalEntries).toBe(13);
|
||||
|
||||
expect(await exited).toBe(0);
|
||||
});
|
||||
}
|
||||
Reference in New Issue
Block a user