Compare commits

...

6 Commits

Author SHA1 Message Date
Dylan Conway
a0da7377f7 update 2025-07-09 20:42:18 -07:00
Dylan Conway
b31bee1e48 update 2025-07-09 20:30:19 -07:00
dylan-conway
a4e9a31b94 bun run prettier 2025-07-10 00:54:08 +00:00
Dylan Conway
01c97bee80 test 2025-07-09 17:47:46 -07:00
Meghan Denny
392acbee5a js: internal/validators: define simple validators in js (#20897) 2025-07-09 16:45:40 -07:00
190n
8b7888aeee [publish images] upload encrypted core dumps from CI (#19189)
Co-authored-by: 190n <7763597+190n@users.noreply.github.com>
Co-authored-by: Ashcon Partovi <ashcon@partovi.net>
2025-07-09 15:42:11 -07:00
11 changed files with 583 additions and 53 deletions

View File

@@ -951,14 +951,22 @@ endif()
if(APPLE)
target_link_options(${bun} PUBLIC
-dead_strip
-dead_strip_dylibs
-Wl,-ld_new
-Wl,-no_compact_unwind
-Wl,-stack_size,0x1200000
-fno-keep-static-consts
-Wl,-map,${bun}.linker-map
)
# don't strip in debug, this seems to be needed so that the Zig std library
# `*dbHelper` DWARF symbols (used by LLDB for pretty printing) are in the
# output executable
if(NOT DEBUG)
target_link_options(${bun} PUBLIC
-dead_strip
-dead_strip_dylibs
)
endif()
endif()
if(LINUX)
@@ -995,7 +1003,6 @@ if(LINUX)
-Wl,-no-pie
-Wl,-icf=safe
-Wl,--as-needed
-Wl,--gc-sections
-Wl,-z,stack-size=12800000
-Wl,--compress-debug-sections=zlib
-Wl,-z,lazy
@@ -1011,6 +1018,15 @@ if(LINUX)
-Wl,--build-id=sha1 # Better for debugging than default
-Wl,-Map=${bun}.linker-map
)
# don't strip in debug, this seems to be needed so that the Zig std library
# `*dbHelper` DWARF symbols (used by LLDB for pretty printing) are in the
# output executable
if(NOT DEBUG)
target_link_options(${bun} PUBLIC
-Wl,--gc-sections
)
endif()
endif()
# --- Symbols list ---

View File

@@ -124,7 +124,7 @@ const argv0 = argv0_stdout.toString().trim();
console.log(`Testing ${argv0} v${revision}`);
const ntStatusPath = "C:\\Program Files (x86)\\Windows Kits\\10\\Include\\10.0.22621.0\\shared\\ntstatus.h";
const ntStatusPath = "C:\\Program Files (x86)\\Windows Kits\\10\\Include\\10.0.26100.0\\shared\\ntstatus.h";
let ntstatus_header_cache = null;
function lookupWindowsError(code) {
if (ntstatus_header_cache === null) {

View File

@@ -1,5 +1,5 @@
#!/bin/sh
# Version: 13
# Version: 14
# A script that installs the dependencies needed to build and test Bun.
# This should work on macOS and Linux with a POSIX shell.
@@ -195,6 +195,17 @@ download_file() {
print "$file_tmp_path"
}
# path=$(download_and_verify_file URL sha256)
download_and_verify_file() {
file_url="$1"
hash="$2"
path=$(download_file "$file_url")
execute sh -c 'echo "'"$hash $path"'" | sha256sum -c' >/dev/null 2>&1
print "$path"
}
append_to_profile() {
content="$1"
profiles=".profile .zprofile .bash_profile .bashrc .zshrc"
@@ -400,7 +411,7 @@ check_package_manager() {
pm="brew"
;;
linux)
if [ -f "$(which apt)" ]; then
if [ -f "$(which apt-get)" ]; then
pm="apt"
elif [ -f "$(which dnf)" ]; then
pm="dnf"
@@ -470,10 +481,8 @@ check_ulimit() {
print "Checking ulimits..."
systemd_conf="/etc/systemd/system.conf"
if [ -f "$systemd_conf" ]; then
limits_conf="/etc/security/limits.d/99-unlimited.conf"
create_file "$limits_conf"
fi
limits_conf="/etc/security/limits.d/99-unlimited.conf"
create_file "$limits_conf"
limits="core data fsize memlock nofile rss stack cpu nproc as locks sigpending msgqueue"
for limit in $limits; do
@@ -495,6 +504,10 @@ check_ulimit() {
fi
if [ -f "$systemd_conf" ]; then
# in systemd's configuration you need to say "infinity" when you mean "unlimited"
if [ "$limit_value" = "unlimited" ]; then
limit_value="infinity"
fi
append_file "$systemd_conf" "DefaultLimit$limit_upper=$limit_value"
fi
done
@@ -549,7 +562,7 @@ check_ulimit() {
package_manager() {
case "$pm" in
apt)
execute_sudo apt "$@"
execute_sudo apt-get "$@"
;;
dnf)
case "$distro" in
@@ -598,6 +611,7 @@ install_packages() {
package_manager install \
--yes \
--no-install-recommends \
--fix-missing \
"$@"
;;
dnf)
@@ -673,7 +687,7 @@ install_common_software() {
esac
case "$distro" in
amzn)
amzn | alpine)
install_packages \
tar
;;
@@ -1362,6 +1376,58 @@ install_chromium() {
esac
}
install_age() {
# we only use this to encrypt core dumps, which we only have on Linux
case "$os" in
linux)
age_tarball=""
case "$arch" in
x64)
age_tarball="$(download_and_verify_file https://github.com/FiloSottile/age/releases/download/v1.2.1/age-v1.2.1-linux-amd64.tar.gz 7df45a6cc87d4da11cc03a539a7470c15b1041ab2b396af088fe9990f7c79d50)"
;;
aarch64)
age_tarball="$(download_and_verify_file https://github.com/FiloSottile/age/releases/download/v1.2.1/age-v1.2.1-linux-arm64.tar.gz 57fd79a7ece5fe501f351b9dd51a82fbee1ea8db65a8839db17f5c080245e99f)"
;;
esac
age_extract_dir="$(create_tmp_directory)"
execute tar -C "$age_extract_dir" -zxf "$age_tarball" age/age
move_to_bin "$age_extract_dir/age/age"
;;
esac
}
configure_core_dumps() {
# we only have core dumps on Linux
case "$os" in
linux)
# set up a directory that the test runner will look in after running tests
cores_dir="/var/bun-cores-$distro-$release-$arch"
sysctl_file="/etc/sysctl.d/local.conf"
create_directory "$cores_dir"
# ensure core_pattern will point there
# %e = executable filename
# %p = pid
append_file "$sysctl_file" "kernel.core_pattern = $cores_dir/%e-%p.core"
# disable apport.service if it exists since it will override the core_pattern
if which systemctl >/dev/null; then
if systemctl list-unit-files apport.service >/dev/null; then
execute_sudo "$systemctl" disable --now apport.service
fi
fi
# load the new configuration
execute_sudo sysctl -p "$sysctl_file"
# ensure that a regular user will be able to run sysctl
if [ -d /sbin ]; then
append_to_path /sbin
fi
;;
esac
}
clean_system() {
if ! [ "$ci" = "1" ]; then
return
@@ -1387,6 +1453,8 @@ main() {
install_build_essentials
install_chromium
install_fuse_python
install_age
configure_core_dumps
clean_system
}

63
scripts/debug-coredump.ts Normal file
View File

@@ -0,0 +1,63 @@
import fs from "node:fs";
import { tmpdir } from "node:os";
import { basename, join } from "node:path";
import { parseArgs } from "node:util";
// usage: bun debug-coredump.ts
// -p <PID of the test that crashed> (buildkite should show this)
// -b <URL to the bun-profile.zip artifact for the appropriate platform>
// -c <URL to the bun-cores.tar.gz.age artifact for the appropriate platform>
// -d <debugger> (default: lldb)
const {
values: { pid: stringPid, ["build-url"]: buildUrl, ["cores-url"]: coresUrl, debugger: debuggerPath },
} = parseArgs({
options: {
pid: { type: "string", short: "p" },
["build-url"]: { type: "string", short: "b" },
["cores-url"]: { type: "string", short: "c" },
debugger: { type: "string", short: "d", default: "lldb" },
},
});
if (stringPid === undefined) throw new Error("no PID given");
const pid = parseInt(stringPid);
if (buildUrl === undefined) throw new Error("no build-url given");
if (coresUrl === undefined) throw new Error("no cores-url given");
if (!process.env.AGE_CORES_IDENTITY?.startsWith("AGE-SECRET-KEY-"))
throw new Error("no identity given in $AGE_CORES_IDENTITY");
const id = Bun.hash(buildUrl + coresUrl).toString(36);
const dir = join(tmpdir(), `debug-coredump-${id}.tmp`);
fs.mkdirSync(dir, { recursive: true });
if (!fs.existsSync(join(dir, "bun-profile")) || !fs.existsSync(join(dir, `bun-${pid}.core`))) {
console.log("downloading bun-profile.zip");
const zip = await (await fetch(buildUrl)).arrayBuffer();
await Bun.write(join(dir, "bun-profile.zip"), zip);
// -j: junk paths (don't create directories when extracting)
// -o: overwrite without prompting
// -d: extract to this directory instead of cwd
await Bun.$`unzip -j -o ${join(dir, "bun-profile.zip")} -d ${dir}`;
console.log("downloading cores");
const cores = await (await fetch(coresUrl)).arrayBuffer();
await Bun.$`bash -c ${`age -d -i <(echo "$AGE_CORES_IDENTITY")`} < ${cores} | tar -zxvC ${dir}`;
console.log("moving cores out of nested directory");
for await (const file of new Bun.Glob("bun-cores-*/bun-*.core").scan(dir)) {
fs.renameSync(join(dir, file), join(dir, basename(file)));
}
} else {
console.log(`already downloaded in ${dir}`);
}
console.log("launching debugger:");
console.log(`${debuggerPath} --core ${join(dir, `bun-${pid}.core`)} ${join(dir, "bun-profile")}`);
const proc = await Bun.spawn([debuggerPath, "--core", join(dir, `bun-${pid}.core`), join(dir, "bun-profile")], {
stdin: "inherit",
stdout: "inherit",
stderr: "inherit",
});
await proc.exited;
process.exit(proc.exitCode);

View File

@@ -51,6 +51,7 @@ import {
isBuildkite,
isCI,
isGithubAction,
isLinux,
isMacOS,
isWindows,
isX64,
@@ -59,6 +60,7 @@ import {
startGroup,
tmpdir,
unzip,
uploadArtifact,
} from "./utils.mjs";
let isQuiet = false;
const cwd = import.meta.dirname ? dirname(import.meta.dirname) : process.cwd();
@@ -146,6 +148,10 @@ const { values: options, positionals: filters } = parseArgs({
type: "boolean",
default: isBuildkite,
},
["coredump-upload"]: {
type: "boolean",
default: isBuildkite && isLinux,
},
},
});
@@ -605,6 +611,78 @@ async function runTests() {
}
}
if (options["coredump-upload"]) {
try {
// this sysctl is set in bootstrap.sh to /var/bun-cores-$distro-$release-$arch
const sysctl = await spawnSafe({ command: "sysctl", args: ["-n", "kernel.core_pattern"] });
let coresDir = sysctl.stdout;
if (sysctl.ok) {
if (coresDir.startsWith("|")) {
throw new Error("cores are being piped not saved");
}
// change /foo/bar/%e-%p.core to /foo/bar
coresDir = dirname(sysctl.stdout);
} else {
throw new Error(`Failed to check core_pattern: ${sysctl.error}`);
}
const coresDirBase = dirname(coresDir);
const coresDirName = basename(coresDir);
const coreFileNames = readdirSync(coresDir);
if (coreFileNames.length > 0) {
console.log(`found ${coreFileNames.length} cores in ${coresDir}`);
let totalBytes = 0;
let totalBlocks = 0;
for (const f of coreFileNames) {
const stat = statSync(join(coresDir, f));
totalBytes += stat.size;
totalBlocks += stat.blocks;
}
console.log(`total apparent size = ${totalBytes} bytes`);
console.log(`total size on disk = ${512 * totalBlocks} bytes`);
const outdir = mkdtempSync(join(tmpdir(), "cores-upload"));
const outfileName = `${coresDirName}.tar.gz.age`;
const outfileAbs = join(outdir, outfileName);
// This matches an age identity known by Bun employees. Core dumps from CI have to be kept
// secret since they will contain API keys.
const ageRecipient = "age1eunsrgxwjjpzr48hm0y98cw2vn5zefjagt4r0qj4503jg2nxedqqkmz6fu"; // reject external PRs changing this, see above
// Run tar in the parent directory of coresDir so that it creates archive entries with
// coresDirName in them. This way when you extract the tarball you get a folder named
// bun-cores-XYZ containing core files, instead of a bunch of core files strewn in your
// current directory
const before = Date.now();
const zipAndEncrypt = await spawnSafe({
command: "bash",
args: [
"-c",
// tar -S: handle sparse files efficiently
`set -euo pipefail && tar -Sc "$0" | gzip -1 | age -e -r ${ageRecipient} -o "$1"`,
// $0
coresDirName,
// $1
outfileAbs,
],
cwd: coresDirBase,
stdout: () => {},
timeout: 60_000,
});
const elapsed = Date.now() - before;
if (!zipAndEncrypt.ok) {
throw new Error(zipAndEncrypt.error);
}
console.log(`saved core dumps to ${outfileAbs} (${statSync(outfileAbs).size} bytes) in ${elapsed} ms`);
await uploadArtifact(outfileAbs);
} else {
console.log(`no cores found in ${coresDir}`);
}
} catch (err) {
console.error("Error collecting and uploading core dumps:", err);
}
}
if (!isCI && !isQuiet) {
console.table({
"Total Tests": okResults.length + failedResults.length + flakyResults.length,
@@ -780,6 +858,7 @@ async function spawnSafe(options) {
const [, message] = error || [];
error = message ? message.split("\n")[0].toLowerCase() : "crash";
error = error.indexOf("\\n") !== -1 ? error.substring(0, error.indexOf("\\n")) : error;
error = `pid ${subprocess.pid} ${error}`;
} else if (signalCode) {
if (signalCode === "SIGTERM" && duration >= timeout) {
error = "timeout";
@@ -871,7 +950,7 @@ async function spawnBun(execPath, { args, cwd, timeout, env, stdout, stderr }) {
};
if (basename(execPath).includes("asan")) {
bunEnv.ASAN_OPTIONS = "allow_user_segv_handler=1";
bunEnv.ASAN_OPTIONS = "allow_user_segv_handler=1:disable_coredump=0";
}
if (isWindows && bunEnv.Path) {
@@ -1023,7 +1102,7 @@ function getTestTimeout(testPath) {
if (/integration|3rd_party|docker|bun-install-registry|v8/i.test(testPath)) {
return integrationTimeout;
}
if (/napi/i.test(testPath)) {
if (/napi/i.test(testPath) || /v8/i.test(testPath)) {
return napiTimeout;
}
return testTimeout;

View File

@@ -16,7 +16,7 @@ import {
} from "node:fs";
import { connect } from "node:net";
import { hostname, homedir as nodeHomedir, tmpdir as nodeTmpdir, release, userInfo } from "node:os";
import { dirname, join, relative, resolve } from "node:path";
import { basename, dirname, join, relative, resolve } from "node:path";
import { normalize as normalizeWindows } from "node:path/win32";
export const isWindows = process.platform === "win32";
@@ -1370,13 +1370,16 @@ export async function getLastSuccessfulBuild() {
}
/**
* @param {string} filename
* @param {string} [cwd]
* @param {string} filename Absolute path to file to upload
*/
export async function uploadArtifact(filename, cwd) {
export async function uploadArtifact(filename) {
if (isBuildkite) {
const relativePath = relative(cwd ?? process.cwd(), filename);
await spawnSafe(["buildkite-agent", "artifact", "upload", relativePath], { cwd, stdio: "inherit" });
await spawnSafe(["buildkite-agent", "artifact", "upload", basename(filename)], {
cwd: dirname(filename),
stdio: "inherit",
});
} else {
console.warn(`not in buildkite. artifact ${filename} not uploaded.`);
}
}

View File

@@ -167,6 +167,7 @@ pub const Features = struct {
};
pub const workspace = Features{
.check_for_duplicate_dependencies = true,
.dev_dependencies = true,
.optional_dependencies = true,
.trusted_dependencies = true,

View File

@@ -1064,11 +1064,11 @@ pub const Package = extern struct {
else => external_alias.hash,
};
var workspace_path: ?String = null;
var workspace_version = workspace_ver;
var has_workspace_path: ?String = null;
var has_workspace_version = workspace_ver;
if (comptime tag == null) {
workspace_path = lockfile.workspace_paths.get(name_hash);
workspace_version = lockfile.workspace_versions.get(name_hash);
has_workspace_path = lockfile.workspace_paths.get(name_hash);
has_workspace_version = lockfile.workspace_versions.get(name_hash);
}
if (comptime tag != null) {
@@ -1093,9 +1093,9 @@ pub const Package = extern struct {
},
.npm => {
const npm = dependency_version.value.npm;
if (workspace_version != null) {
if (pm.options.link_workspace_packages and npm.version.satisfies(workspace_version.?, buf, buf)) {
const path = workspace_path.?.sliced(buf);
if (has_workspace_version) |workspace_version| {
if (pm.options.link_workspace_packages and npm.version.satisfies(workspace_version, buf, buf)) {
const path = has_workspace_path.?.sliced(buf);
if (Dependency.parseWithTag(
allocator,
external_alias.value,
@@ -1112,7 +1112,7 @@ pub const Package = extern struct {
} else {
// It doesn't satisfy, but a workspace shares the same name. Override the workspace with the other dependency
for (package_dependencies[0..dependencies_count]) |*dep| {
if (dep.name_hash == name_hash and dep.version.tag == .workspace) {
if (dep.name_hash == name_hash and dep.behavior.isWorkspaceOnly()) {
dep.* = .{
.behavior = if (in_workspace) group.behavior.add(.workspace) else group.behavior,
.name = external_alias.value,
@@ -1126,11 +1126,11 @@ pub const Package = extern struct {
}
},
.workspace => workspace: {
if (workspace_path) |path| {
if (has_workspace_path) |workspace_path| {
if (workspace_range) |range| {
if (workspace_version) |ver| {
if (range.satisfies(ver, buf, buf)) {
dependency_version.value.workspace = path;
if (has_workspace_version) |workspace_version| {
if (range.satisfies(workspace_version, buf, buf)) {
dependency_version.value.workspace = workspace_path;
break :workspace;
}
}
@@ -1138,7 +1138,7 @@ pub const Package = extern struct {
// important to trim before len == 0 check. `workspace:foo@ ` should install successfully
const version_literal = strings.trim(range.input, &strings.whitespace_chars);
if (version_literal.len == 0 or range.@"is *"() or Semver.Version.isTaggedVersionOnly(version_literal)) {
dependency_version.value.workspace = path;
dependency_version.value.workspace = workspace_path;
break :workspace;
}
@@ -1157,7 +1157,7 @@ pub const Package = extern struct {
return error.InstallFailed;
}
dependency_version.value.workspace = path;
dependency_version.value.workspace = workspace_path;
} else {
const workspace = dependency_version.value.workspace.slice(buf);
const path = string_builder.append(String, if (strings.eqlComptime(workspace, "*")) "*" else brk: {
@@ -1190,13 +1190,13 @@ pub const Package = extern struct {
const workspace_entry = try lockfile.workspace_paths.getOrPut(allocator, name_hash);
const found_matching_workspace = workspace_entry.found_existing;
if (workspace_version) |ver| {
try lockfile.workspace_versions.put(allocator, name_hash, ver);
if (has_workspace_version) |workspace_version| {
try lockfile.workspace_versions.put(allocator, name_hash, workspace_version);
for (package_dependencies[0..dependencies_count]) |*package_dep| {
if (switch (package_dep.version.tag) {
// `dependencies` & `workspaces` defined within the same `package.json`
.npm => String.Builder.stringHash(package_dep.realname().slice(buf)) == name_hash and
package_dep.version.value.npm.version.satisfies(ver, buf, buf),
package_dep.version.value.npm.version.satisfies(workspace_version, buf, buf),
// `workspace:*`
.workspace => found_matching_workspace and
String.Builder.stringHash(package_dep.realname().slice(buf)) == name_hash,
@@ -1234,19 +1234,25 @@ pub const Package = extern struct {
// `peerDependencies` may be specified on existing dependencies. Packages in `workspaces` are deduplicated when
// the array is processed
if (comptime features.check_for_duplicate_dependencies and !group.behavior.isPeer() and !group.behavior.isWorkspace()) {
const entry = lockfile.scratch.duplicate_checker_map.getOrPutAssumeCapacity(external_alias.hash);
if (entry.found_existing) {
// duplicate dependencies are allowed in optionalDependencies
if (comptime group.behavior.isOptional()) {
if (comptime features.check_for_duplicate_dependencies) {
if (!this_dep.behavior.isWorkspaceOnly()) {
const entry = lockfile.scratch.duplicate_checker_map.getOrPutAssumeCapacity(external_alias.hash);
if (entry.found_existing) {
// duplicate dependencies are allowed in optionalDependencies and devDependencies. choose dev over others
for (package_dependencies[0..dependencies_count]) |*package_dep| {
if (package_dep.name_hash == this_dep.name_hash) {
package_dep.* = this_dep;
break;
if (comptime group.behavior.isOptional() or group.behavior.isDev()) {
package_dep.* = this_dep;
return null;
}
if (package_dep.behavior.isDev()) {
// choose the existing one.
return null;
}
}
}
return null;
} else {
var notes = try allocator.alloc(logger.Data, 1);
notes[0] = .{
@@ -1263,9 +1269,9 @@ pub const Package = extern struct {
.{external_alias.slice(buf)},
);
}
}
entry.value_ptr.* = value_loc;
entry.value_ptr.* = value_loc;
}
}
return this_dep;

View File

@@ -65,12 +65,30 @@ function validateLinkHeaderValue(hints) {
);
}
function validateString(value, name) {
if (typeof value !== "string") throw $ERR_INVALID_ARG_TYPE(name, "string", value);
}
function validateFunction(value, name) {
if (typeof value !== "function") throw $ERR_INVALID_ARG_TYPE(name, "function", value);
}
function validateBoolean(value, name) {
if (typeof value !== "boolean") throw $ERR_INVALID_ARG_TYPE(name, "boolean", value);
}
function validateUndefined(value, name) {
if (value !== undefined) throw $ERR_INVALID_ARG_TYPE(name, "undefined", value);
}
function validateInternalField(object, fieldKey, className) {
if (typeof object !== "object" || object === null || !ObjectPrototypeHasOwnProperty.$call(object, fieldKey)) {
throw $ERR_INVALID_ARG_TYPE("this", className, object);
}
}
hideFromStack(validateLinkHeaderValue, validateInternalField);
hideFromStack(validateString, validateFunction, validateBoolean, validateUndefined);
export default {
/** (value, name) */
@@ -82,15 +100,15 @@ export default {
/** `(value, name, min, max)` */
validateNumber: $newCppFunction("NodeValidator.cpp", "jsFunction_validateNumber", 0),
/** `(value, name)` */
validateString: $newCppFunction("NodeValidator.cpp", "jsFunction_validateString", 0),
validateString,
/** `(number, name)` */
validateFiniteNumber: $newCppFunction("NodeValidator.cpp", "jsFunction_validateFiniteNumber", 0),
/** `(number, name, lower, upper, def)` */
checkRangesOrGetDefault: $newCppFunction("NodeValidator.cpp", "jsFunction_checkRangesOrGetDefault", 0),
/** `(value, name)` */
validateFunction: $newCppFunction("NodeValidator.cpp", "jsFunction_validateFunction", 0),
validateFunction,
/** `(value, name)` */
validateBoolean: $newCppFunction("NodeValidator.cpp", "jsFunction_validateBoolean", 0),
validateBoolean,
/** `(port, name = 'Port', allowZero = true)` */
validatePort: $newCppFunction("NodeValidator.cpp", "jsFunction_validatePort", 0),
/** `(signal, name)` */
@@ -108,7 +126,7 @@ export default {
/** `(value, name)` */
validatePlainFunction: $newCppFunction("NodeValidator.cpp", "jsFunction_validatePlainFunction", 0),
/** `(value, name)` */
validateUndefined: $newCppFunction("NodeValidator.cpp", "jsFunction_validateUndefined", 0),
validateUndefined,
/** `(buffer, name = 'buffer')` */
validateBuffer: $newCppFunction("NodeValidator.cpp", "jsFunction_validateBuffer", 0),
/** `(value, name, oneOf)` */

View File

@@ -0,0 +1,276 @@
import { expect, test } from "bun:test";
import { bunEnv, bunExe, tempDirWithFiles } from "harness";
import { join } from "path";
test("workspace devDependencies should take priority over peerDependencies for resolution", async () => {
const dir = tempDirWithFiles("dev-peer-priority", {
"package.json": JSON.stringify({
name: "test-monorepo",
version: "1.0.0",
workspaces: {
packages: ["packages/*"],
nodeLinker: "isolated",
},
}),
"packages/lib/package.json": JSON.stringify({
name: "lib",
version: "1.0.0",
dependencies: {},
devDependencies: {
"my-dep": "workspace:*", // Use workspace protocol for dev
},
peerDependencies: {
"my-dep": "^1.0.0", // Range that wants 1.x
},
}),
"packages/lib/test.js": `const dep = require("my-dep"); console.log(dep.version);`,
// Only provide workspace package with version 2.0.0
"packages/my-dep/package.json": JSON.stringify({
name: "my-dep",
version: "2.0.0",
main: "index.js",
}),
"packages/my-dep/index.js": `module.exports = { version: "2.0.0" };`,
});
// Run bun install with a dead registry to ensure no network requests
const { stdout, stderr, exitCode } = await new Promise<{ stdout: string; stderr: string; exitCode: number }>(
resolve => {
const proc = Bun.spawn({
cmd: [bunExe(), "install", "--no-progress", "--no-summary"],
cwd: dir,
env: {
...bunEnv,
NPM_CONFIG_REGISTRY: "http://localhost:9999/", // Dead URL - will fail if used
},
stdout: "pipe",
stderr: "pipe",
});
proc.exited.then(exitCode => {
Promise.all([new Response(proc.stdout).text(), new Response(proc.stderr).text()]).then(([stdout, stderr]) => {
resolve({ stdout, stderr, exitCode });
});
});
},
);
if (exitCode !== 0) {
console.error("Install failed with exit code:", exitCode);
console.error("stdout:", stdout);
console.error("stderr:", stderr);
}
expect(exitCode).toBe(0);
// Check that no network requests were made for packages that should be resolved locally
expect(stderr).not.toContain("GET");
expect(stderr).not.toContain("http");
// Check that the lockfile was created correctly
const lockfilePath = join(dir, "bun.lock");
expect(await Bun.file(lockfilePath).exists()).toBe(true);
// Verify that version 2.0.0 (devDependency) was linked
// If peerDependency range ^1.0.0 was used, it would try to fetch from npm and fail
const testResult = await new Promise<string>(resolve => {
const proc = Bun.spawn({
cmd: [bunExe(), "packages/lib/test.js"],
cwd: dir,
env: bunEnv,
stdout: "pipe",
});
new Response(proc.stdout).text().then(resolve);
});
expect(testResult.trim()).toBe("2.0.0");
});
test("devDependencies and peerDependencies with different versions should coexist", async () => {
const dir = tempDirWithFiles("dev-peer-different-versions", {
"package.json": JSON.stringify({
name: "test-monorepo",
version: "1.0.0",
workspaces: {
packages: ["packages/*"],
nodeLinker: "isolated",
},
}),
"packages/lib/package.json": JSON.stringify({
name: "lib",
version: "1.0.0",
dependencies: {},
devDependencies: {
"utils": "1.0.0",
},
peerDependencies: {
"utils": "^1.0.0",
},
}),
"packages/lib/index.js": `console.log("lib");`,
"packages/utils/package.json": JSON.stringify({
name: "utils",
version: "1.0.0",
main: "index.js",
}),
"packages/utils/index.js": `console.log("utils");`,
});
// Run bun install in the monorepo
const { stdout, stderr, exitCode } = await new Promise<{ stdout: string; stderr: string; exitCode: number }>(
resolve => {
const proc = Bun.spawn({
cmd: [bunExe(), "install", "--no-progress", "--no-summary"],
cwd: dir,
env: bunEnv,
stdout: "pipe",
stderr: "pipe",
});
proc.exited.then(exitCode => {
Promise.all([new Response(proc.stdout).text(), new Response(proc.stderr).text()]).then(([stdout, stderr]) => {
resolve({ stdout, stderr, exitCode });
});
});
},
);
if (exitCode !== 0) {
console.error("Install failed with exit code:", exitCode);
console.error("stdout:", stdout);
console.error("stderr:", stderr);
}
expect(exitCode).toBe(0);
// Check that the lockfile was created correctly
const lockfilePath = join(dir, "bun.lock");
expect(await Bun.file(lockfilePath).exists()).toBe(true);
});
test("dependency behavior comparison prioritizes devDependencies", async () => {
const dir = tempDirWithFiles("behavior-comparison", {
"package.json": JSON.stringify({
name: "test-app",
version: "1.0.0",
dependencies: {},
devDependencies: {
"typescript": "^5.0.0",
},
peerDependencies: {
"typescript": "^4.0.0 || ^5.0.0",
},
}),
"index.js": `console.log("app");`,
});
// Run bun install
const { stdout, stderr, exitCode } = await new Promise<{ stdout: string; stderr: string; exitCode: number }>(
resolve => {
const proc = Bun.spawn({
cmd: [bunExe(), "install", "--no-progress", "--no-summary"],
cwd: dir,
env: bunEnv,
stdout: "pipe",
stderr: "pipe",
});
proc.exited.then(exitCode => {
Promise.all([new Response(proc.stdout).text(), new Response(proc.stderr).text()]).then(([stdout, stderr]) => {
resolve({ stdout, stderr, exitCode });
});
});
},
);
if (exitCode !== 0) {
console.error("Install failed with exit code:", exitCode);
console.error("stdout:", stdout);
console.error("stderr:", stderr);
}
expect(exitCode).toBe(0);
// Check that the lockfile was created correctly
const lockfilePath = join(dir, "bun.lock");
expect(await Bun.file(lockfilePath).exists()).toBe(true);
});
test("Next.js monorepo scenario should not make unnecessary network requests", async () => {
const dir = tempDirWithFiles("nextjs-monorepo", {
"package.json": JSON.stringify({
name: "nextjs-monorepo",
version: "1.0.0",
workspaces: {
packages: ["packages/*"],
nodeLinker: "isolated",
},
}),
"packages/web/package.json": JSON.stringify({
name: "web",
version: "1.0.0",
dependencies: {},
devDependencies: {
"next": "15.0.0-canary.119", // Specific canary version for dev
},
peerDependencies: {
"next": "^14.0.0 || ^15.0.0", // Range that would accept 14.x or 15.x stable
},
}),
"packages/web/test.js": `const next = require("next/package.json"); console.log(next.version);`,
// Only provide the canary version that matches devDependencies
"packages/next/package.json": JSON.stringify({
name: "next",
version: "15.0.0-canary.119",
main: "index.js",
}),
"packages/next/index.js": `console.log("next workspace");`,
});
// Run bun install with dead registry
const { stdout, stderr, exitCode } = await new Promise<{ stdout: string; stderr: string; exitCode: number }>(
resolve => {
const proc = Bun.spawn({
cmd: [bunExe(), "install", "--no-progress", "--no-summary"],
cwd: dir,
env: {
...bunEnv,
NPM_CONFIG_REGISTRY: "http://localhost:9999/", // Dead URL
},
stdout: "pipe",
stderr: "pipe",
});
proc.exited.then(exitCode => {
Promise.all([new Response(proc.stdout).text(), new Response(proc.stderr).text()]).then(([stdout, stderr]) => {
resolve({ stdout, stderr, exitCode });
});
});
},
);
expect(exitCode).toBe(0);
// The key test: should not make network requests for packages that exist in workspace
// When devDependencies are prioritized over peerDependencies, the workspace version should be used
expect(stderr).not.toContain("GET");
expect(stderr).not.toContain("404");
expect(stderr).not.toContain("http");
// Check that the lockfile was created correctly
const lockfilePath = join(dir, "bun.lock");
expect(await Bun.file(lockfilePath).exists()).toBe(true);
// Verify that version 15.0.0-canary.119 (devDependency) was used
// If peer range was used, it would try to fetch a stable version from npm and fail
const testResult = await new Promise<string>(resolve => {
const proc = Bun.spawn({
cmd: [bunExe(), "packages/web/test.js"],
cwd: dir,
env: bunEnv,
stdout: "pipe",
});
new Response(proc.stdout).text().then(resolve);
});
expect(testResult.trim()).toBe("15.0.0-canary.119");
});

View File

@@ -64,7 +64,7 @@ export const bunEnv: NodeJS.Dict<string> = {
const ciEnv = { ...bunEnv };
if (isASAN) {
bunEnv.ASAN_OPTIONS ??= "allow_user_segv_handler=1";
bunEnv.ASAN_OPTIONS ??= "allow_user_segv_handler=1:disable_coredump=0";
}
if (isWindows) {