Compare commits

...

1 Commits

Author SHA1 Message Date
Claude Bot
65cb1e2e5c chore(tests): consolidate regression tests into appropriate test files
This commit reorganizes ~128 regression tests from test/regression/issue/
into their appropriate existing test files based on what they test:

- HTTP server tests → test/js/bun/http/serve.test.ts
- child_process tests → test/js/node/child_process/child_process.test.ts
- fetch/body tests → test/js/web/fetch/body.test.ts
- expect matchers → test/js/bun/test/expect.test.js
- jest hooks → test/js/bun/test/jest-hooks.test.ts
- CSS tests → test/js/bun/css/css.test.ts
- S3 tests → test/js/bun/s3/s3.test.ts
- Shell tests → test/js/bun/shell/bunshell.test.ts
- TTY tests → test/js/node/tty.test.ts
- Mock tests → test/js/bun/test/mock-fn.test.js
- WebSocket tests → test/js/bun/websocket/websocket-server.test.ts
- Bundler tests → test/bundler/*.test.ts
- And many more...

The remaining 86 tests in test/regression/issue/ are kept because they:
- Test very specific edge cases worth keeping isolated
- Don't have a clear existing test file to merge into
- Were explicitly marked to keep during analysis

All moved tests retain comments referencing their original issue numbers
for traceability (e.g., "// Regression test for #XXXXX").

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-16 22:31:00 +00:00
211 changed files with 9175 additions and 9543 deletions

View File

@@ -1,7 +1,8 @@
import assert from "assert";
import { afterEach, describe, expect, test } from "bun:test";
import { readFileSync, writeFileSync } from "fs";
import { bunEnv, bunExe, tempDirWithFiles, tempDirWithFilesAnon } from "harness";
import { execSync } from "child_process";
import { existsSync, readFileSync, writeFileSync } from "fs";
import { bunEnv, bunExe, isWindows, tempDir, tempDirWithFiles, tempDirWithFilesAnon } from "harness";
import path, { join } from "path";
import { buildNoThrow } from "./buildNoThrow";
@@ -1119,3 +1120,358 @@ export { greeting };`,
expect(result.success).toBeDefined();
});
});
// Regression test for compile --outfile with subdirectories
describe.if(isWindows)("compile --outfile with subdirectories", () => {
test("places executable in subdirectory with forward slash", async () => {
using dir = tempDir("compile-subdir-forward", {
"app.js": `console.log("Hello from subdirectory!");`,
});
// Use forward slash in outfile
const outfile = "subdir/nested/app.exe";
await using proc = Bun.spawn({
cmd: [bunExe(), "build", "--compile", join(String(dir), "app.js"), "--outfile", outfile],
env: bunEnv,
cwd: String(dir),
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
expect(exitCode).toBe(0);
expect(stderr).toBe("");
// Check that the file exists in the subdirectory
const expectedPath = join(String(dir), "subdir", "nested", "app.exe");
expect(existsSync(expectedPath)).toBe(true);
// Run the executable to verify it works
await using exe = Bun.spawn({
cmd: [expectedPath],
env: bunEnv,
stdout: "pipe",
});
const exeOutput = await exe.stdout.text();
expect(exeOutput.trim()).toBe("Hello from subdirectory!");
});
test("places executable in subdirectory with backslash", async () => {
using dir = tempDir("compile-subdir-backslash", {
"app.js": `console.log("Hello with backslash!");`,
});
// Use backslash in outfile
const outfile = "subdir\\nested\\app.exe";
await using proc = Bun.spawn({
cmd: [bunExe(), "build", "--compile", join(String(dir), "app.js"), "--outfile", outfile],
env: bunEnv,
cwd: String(dir),
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
expect(exitCode).toBe(0);
expect(stderr).toBe("");
// Check that the file exists in the subdirectory
const expectedPath = join(String(dir), "subdir", "nested", "app.exe");
expect(existsSync(expectedPath)).toBe(true);
});
test("creates parent directories if they don't exist", async () => {
using dir = tempDir("compile-create-dirs", {
"app.js": `console.log("Created directories!");`,
});
// Use a deep nested path that doesn't exist yet
const outfile = "a/b/c/d/e/app.exe";
await using proc = Bun.spawn({
cmd: [bunExe(), "build", "--compile", join(String(dir), "app.js"), "--outfile", outfile],
env: bunEnv,
cwd: String(dir),
stdout: "pipe",
stderr: "pipe",
});
const exitCode = await proc.exited;
expect(exitCode).toBe(0);
// Check that the file and all directories were created
const expectedPath = join(String(dir), "a", "b", "c", "d", "e", "app.exe");
expect(existsSync(expectedPath)).toBe(true);
});
test.if(isWindows)("Windows metadata works with subdirectories", async () => {
using dir = tempDir("compile-metadata-subdir", {
"app.js": `console.log("App with metadata!");`,
});
const outfile = "output/bin/app.exe";
await using proc = Bun.spawn({
cmd: [
bunExe(),
"build",
"--compile",
join(String(dir), "app.js"),
"--outfile",
outfile,
"--windows-title",
"Subdirectory App",
"--windows-version",
"1.2.3.4",
"--windows-description",
"App in a subdirectory",
],
env: bunEnv,
cwd: String(dir),
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
expect(exitCode).toBe(0);
expect(stderr).toBe("");
const expectedPath = join(String(dir), "output", "bin", "app.exe");
expect(existsSync(expectedPath)).toBe(true);
// Verify metadata was set correctly
const getMetadata = (field: string) => {
try {
return execSync(`powershell -Command "(Get-ItemProperty '${expectedPath}').VersionInfo.${field}"`, {
encoding: "utf8",
}).trim();
} catch {
return "";
}
};
expect(getMetadata("ProductName")).toBe("Subdirectory App");
expect(getMetadata("FileDescription")).toBe("App in a subdirectory");
expect(getMetadata("ProductVersion")).toBe("1.2.3.4");
});
test("fails gracefully when parent is a file", async () => {
using dir = tempDir("compile-parent-is-file", {
"app.js": `console.log("Won't compile!");`,
"blocked": "This is a file, not a directory",
});
// Try to use blocked/app.exe where blocked is a file
const outfile = "blocked/app.exe";
await using proc = Bun.spawn({
cmd: [bunExe(), "build", "--compile", join(String(dir), "app.js"), "--outfile", outfile],
env: bunEnv,
cwd: String(dir),
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
expect(exitCode).not.toBe(0);
// Should get an error about the path
expect(stderr.toLowerCase()).toContain("notdir");
});
test("works with . and .. in paths", async () => {
using dir = tempDir("compile-relative-paths", {
"src/app.js": `console.log("Relative paths work!");`,
});
// Use relative path with . and ..
const outfile = "./output/../output/./app.exe";
await using proc = Bun.spawn({
cmd: [bunExe(), "build", "--compile", join(String(dir), "src", "app.js"), "--outfile", outfile],
env: bunEnv,
cwd: String(dir),
stdout: "pipe",
stderr: "pipe",
});
const exitCode = await proc.exited;
expect(exitCode).toBe(0);
// Should normalize to output/app.exe
const expectedPath = join(String(dir), "output", "app.exe");
expect(existsSync(expectedPath)).toBe(true);
});
});
// Regression test for Bun.build() compile with subdirectories
describe("Bun.build() compile with subdirectories", () => {
test.if(isWindows)("places executable in subdirectory via API", async () => {
using dir = tempDir("api-compile-subdir", {
"app.js": `console.log("API subdirectory test!");`,
});
const result = await Bun.build({
entrypoints: [join(String(dir), "app.js")],
compile: {
outfile: "dist/bin/app.exe",
},
outdir: String(dir),
});
expect(result.success).toBe(true);
expect(result.outputs.length).toBe(1);
// The output path should include the subdirectories
expect(result.outputs[0].path).toContain("dist");
expect(result.outputs[0].path).toContain("bin");
// File should exist at the expected location
const expectedPath = join(String(dir), "dist", "bin", "app.exe");
expect(existsSync(expectedPath)).toBe(true);
});
test.if(isWindows)("API with Windows metadata and subdirectories", async () => {
using dir = tempDir("api-metadata-subdir", {
"app.js": `console.log("API with metadata!");`,
});
const result = await Bun.build({
entrypoints: [join(String(dir), "app.js")],
compile: {
outfile: "build/release/app.exe",
windows: {
title: "API Subdirectory App",
version: "2.0.0.0",
publisher: "Test Publisher",
},
},
outdir: String(dir),
});
expect(result.success).toBe(true);
const expectedPath = join(String(dir), "build", "release", "app.exe");
expect(existsSync(expectedPath)).toBe(true);
// Verify metadata
const getMetadata = (field: string) => {
try {
return execSync(`powershell -Command "(Get-ItemProperty '${expectedPath}').VersionInfo.${field}"`, {
encoding: "utf8",
}).trim();
} catch {
return "";
}
};
expect(getMetadata("ProductName")).toBe("API Subdirectory App");
expect(getMetadata("CompanyName")).toBe("Test Publisher");
expect(getMetadata("ProductVersion")).toBe("2.0.0.0");
});
});
// Regression test for https://github.com/oven-sh/bun/issues/22157
// Compiled binaries were including executable name in process.argv
test("issue 22157: compiled binary should not include executable name in process.argv", async () => {
const dir = tempDirWithFiles("22157-basic", {
"index.js": /* js */ `
import { parseArgs } from "node:util"
console.log(JSON.stringify(process.argv));
// This should work - no extra executable name should cause parseArgs to throw
parseArgs({
args: process.argv.slice(2),
});
console.log("SUCCESS");
`,
});
// Compile the binary
await using compileProc = Bun.spawn({
cmd: [bunExe(), "build", "--compile", "--outfile=test-binary", "./index.js"],
cwd: dir,
env: bunEnv,
stdout: "pipe",
stderr: "pipe",
});
await compileProc.exited;
// Run the compiled binary - should not throw
await using runProc = Bun.spawn({
cmd: ["./test-binary"],
cwd: dir,
env: bunEnv,
stdout: "pipe",
stderr: "pipe",
});
const [stdout, exitCode] = await Promise.all([runProc.stdout.text(), runProc.exited]);
expect(exitCode).toBe(0);
expect(stdout).toContain("SUCCESS");
// Verify process.argv structure
const argvMatch = stdout.match(/\[.*?\]/);
expect(argvMatch).toBeTruthy();
const processArgv = JSON.parse(argvMatch![0]);
expect(processArgv).toHaveLength(2);
expect(processArgv[0]).toBe("bun");
// Windows uses "B:/~BUN/root/", Unix uses "/$bunfs/root/"
expect(processArgv[1]).toMatch(/(\$bunfs|~BUN).*root/);
});
// Regression test for https://github.com/oven-sh/bun/issues/22157
test("issue 22157: compiled binary with user args should pass them correctly", async () => {
const dir = tempDirWithFiles("22157-args", {
"index.js": /* js */ `
console.log(JSON.stringify(process.argv));
// Expect: ["bun", "/$bunfs/root/..." or "B:/~BUN/root/...", "arg1", "arg2"]
if (process.argv.length !== 4) {
console.error("Expected 4 argv items, got", process.argv.length);
process.exit(1);
}
if (process.argv[2] !== "arg1" || process.argv[3] !== "arg2") {
console.error("User args not correct");
process.exit(1);
}
console.log("SUCCESS");
`,
});
await using compileProc = Bun.spawn({
cmd: [bunExe(), "build", "--compile", "--outfile=test-binary", "./index.js"],
cwd: dir,
env: bunEnv,
stdout: "pipe",
stderr: "pipe",
});
await compileProc.exited;
await using runProc = Bun.spawn({
cmd: ["./test-binary", "arg1", "arg2"],
cwd: dir,
env: bunEnv,
stdout: "pipe",
stderr: "pipe",
});
const [stdout, exitCode] = await Promise.all([runProc.stdout.text(), runProc.exited]);
expect(exitCode).toBe(0);
expect(stdout).toContain("SUCCESS");
});

View File

@@ -1,5 +1,6 @@
import { describe, expect } from "bun:test";
import { isBroken, isWindows } from "harness";
import { $ } from "bun";
import { describe, expect, test } from "bun:test";
import { bunEnv, bunExe, isBroken, isWindows, tempDir, tempDirWithFiles } from "harness";
import { join } from "node:path";
import { itBundled } from "./expectBundled";
@@ -2382,3 +2383,281 @@ for (const backend of ["api", "cli"] as const) {
});
});
}
// Regression test for #9559
test("bun build --target bun should support non-ascii source", async () => {
const files = {
"index.js": `
console.log(JSON.stringify({\u{6211}: "a"}));
const \u{6211} = "b";
console.log(JSON.stringify({\u{6211}}));
`,
};
const source = tempDirWithFiles("source", files);
$.throws(true);
await $`${bunExe()} build --target bun ${join(source, "index.js")} --outfile ${join(source, "bundle.js")}`;
const result = await $`${bunExe()} ${join(source, "bundle.js")}`.text();
expect(result).toBe(`{"\u{6211}":"a"}\n{"\u{6211}":"b"}\n`);
});
// Regression test for #22003
test.skipIf(isWindows)("tab character in filename should be escaped in sourcemap JSON", async () => {
using dir = tempDir("22003", {
// Filename with tab character
"file\ttab.js": "module.exports = 42;",
});
await using proc = Bun.spawn({
cmd: [bunExe(), "build", "file\ttab.js", "--outfile=out.js", "--sourcemap"],
env: bunEnv,
cwd: String(dir),
stderr: "pipe",
});
const [stderr, exitCode] = await Promise.all([proc.stderr.text(), proc.exited]);
expect(exitCode).toBe(0);
expect(stderr).not.toContain("InvalidSourceMap");
const sourcemapContent = await Bun.file(`${dir}/out.js.map`).text();
// Must be valid JSON (system bun would produce invalid JSON with literal tab)
let sourcemap;
expect(() => {
sourcemap = JSON.parse(sourcemapContent);
}).not.toThrow();
// The filename in sources should have the tab properly escaped
expect(sourcemap.sources).toContain("file\ttab.js");
// Verify no literal tab bytes (0x09) in the raw JSON
const hasLiteralTab = sourcemapContent.includes("\t");
expect(hasLiteralTab).toBe(false);
});
// Regression test for #25648
// Named function expression names should be renamed when they shadow an outer symbol
// that's referenced inside the function body. This prevents infinite recursion.
test("named function expression should be renamed when shadowing outer symbol", async () => {
using dir = tempDir("issue-25648", {
"lib.ts": `
export function get(x: number) {
return x * 2;
}
export function doSomething(fn: () => number) {
return fn();
}
`,
"index.ts": `
import * as $ from './lib';
export function test() {
return $.doSomething(function get() {
return $.get(123); // This should reference the outer get, not the function expression
});
}
console.log(test());
`,
});
// Bundle and run the code
await using buildProc = Bun.spawn({
cmd: [bunExe(), "build", "index.ts", "--bundle", "--outfile=out.js"],
env: bunEnv,
cwd: String(dir),
stdout: "pipe",
stderr: "pipe",
});
const [buildStdout, buildStderr, buildExitCode] = await Promise.all([
buildProc.stdout.text(),
buildProc.stderr.text(),
buildProc.exited,
]);
expect(buildStderr).toBe("");
expect(buildExitCode).toBe(0);
// Run the bundled output
await using runProc = Bun.spawn({
cmd: [bunExe(), "out.js"],
env: bunEnv,
cwd: String(dir),
stdout: "pipe",
stderr: "pipe",
});
const [runStdout, runStderr, runExitCode] = await Promise.all([
runProc.stdout.text(),
runProc.stderr.text(),
runProc.exited,
]);
// Should print 246 (123 * 2), NOT cause infinite recursion
expect(runStdout.trim()).toBe("246");
expect(runStderr).toBe("");
expect(runExitCode).toBe(0);
});
test("named function expression with namespace import should not cause infinite recursion", async () => {
using dir = tempDir("issue-25648-2", {
"svelte-mock.ts": `
export function get<T>(store: { value: T }): T {
return store.value;
}
export function set<T>(store: { value: T }, value: T) {
store.value = value;
}
export function bind_value(
element: HTMLElement,
get_fn: () => string,
set_fn: (value: string) => void
) {
return get_fn();
}
`,
"index.ts": `
import * as $ from './svelte-mock';
const query = { value: "hello" };
// This pattern is generated by the Svelte compiler in dev mode
const result = $.bind_value(
{} as HTMLElement,
function get() {
return $.get(query); // Should call outer $.get, not this function
},
function set($$value: string) {
$.set(query, $$value);
}
);
console.log(result);
`,
});
// Bundle and run the code
await using buildProc = Bun.spawn({
cmd: [bunExe(), "build", "index.ts", "--bundle", "--outfile=out.js"],
env: bunEnv,
cwd: String(dir),
stdout: "pipe",
stderr: "pipe",
});
const [buildStdout, buildStderr, buildExitCode] = await Promise.all([
buildProc.stdout.text(),
buildProc.stderr.text(),
buildProc.exited,
]);
expect(buildStderr).toBe("");
expect(buildExitCode).toBe(0);
// Run the bundled output
await using runProc = Bun.spawn({
cmd: [bunExe(), "out.js"],
env: bunEnv,
cwd: String(dir),
stdout: "pipe",
stderr: "pipe",
});
const [runStdout, runStderr, runExitCode] = await Promise.all([
runProc.stdout.text(),
runProc.stderr.text(),
runProc.exited,
]);
// Should print "hello", NOT cause "Maximum call stack size exceeded"
expect(runStdout.trim()).toBe("hello");
expect(runStderr).toBe("");
expect(runExitCode).toBe(0);
});
test("class expression name should be renamed when shadowing outer symbol", async () => {
using dir = tempDir("issue-25648-3", {
"lib.ts": `
export class Foo {
value = 42;
}
export function makeThing<T>(cls: new () => T): T {
return new cls();
}
`,
"index.ts": `
import * as $ from './lib';
export function test() {
return $.makeThing(class Foo extends $.Foo {
getValue() {
return this.value;
}
// Self-reference: uses the inner class name Foo
static create() {
return new Foo();
}
clone() {
return new Foo();
}
});
}
const instance = test();
console.log(instance.getValue());
// Test self-referencing static method
console.log((instance.constructor as any).create().getValue());
// Test self-referencing instance method
console.log(instance.clone().getValue());
`,
});
// Bundle and run the code
await using buildProc = Bun.spawn({
cmd: [bunExe(), "build", "index.ts", "--bundle", "--outfile=out.js"],
env: bunEnv,
cwd: String(dir),
stdout: "pipe",
stderr: "pipe",
});
const [buildStdout, buildStderr, buildExitCode] = await Promise.all([
buildProc.stdout.text(),
buildProc.stderr.text(),
buildProc.exited,
]);
expect(buildStderr).toBe("");
expect(buildExitCode).toBe(0);
// Run the bundled output
await using runProc = Bun.spawn({
cmd: [bunExe(), "out.js"],
env: bunEnv,
cwd: String(dir),
stdout: "pipe",
stderr: "pipe",
});
const [runStdout, runStderr, runExitCode] = await Promise.all([
runProc.stdout.text(),
runProc.stderr.text(),
runProc.exited,
]);
// Should print 42 three times (getValue, static create().getValue, clone().getValue)
expect(runStdout.trim()).toBe("42\n42\n42");
expect(runStderr).toBe("");
expect(runExitCode).toBe(0);
});

View File

@@ -1109,15 +1109,15 @@ describe("bundler", () => {
"/entry.js": /* js */ `
// Test all equality operators with typeof undefined
console.log(typeof x !== 'undefined');
console.log(typeof x != 'undefined');
console.log(typeof x != 'undefined');
console.log('undefined' !== typeof x);
console.log('undefined' != typeof x);
console.log(typeof x === 'undefined');
console.log(typeof x == 'undefined');
console.log('undefined' === typeof x);
console.log('undefined' == typeof x);
// These should not be optimized
console.log(typeof x === 'string');
console.log(x === 'undefined');
@@ -1135,4 +1135,94 @@ describe("bundler", () => {
);
},
});
// Regression test for #21137
itBundled("minify/TypeofUndefinedInCommaOperator", {
files: {
"/entry.js": /* js */ `
function testFunc() {
return (typeof undefinedVar !== "undefined", false);
}
function testFunc2() {
return (typeof someVar === "undefined", true);
}
function testFunc3() {
return ((typeof a !== "undefined", 1), (typeof b === "undefined", 2));
}
const result = typeof window !== "undefined" ? (typeof document !== "undefined", true) : false;
console.log(testFunc());
console.log(testFunc2());
console.log(testFunc3());
console.log(result);
`,
},
minifySyntax: true,
onAfterBundle(api) {
const code = api.readFile("/out.js");
// The output should NOT contain invalid syntax like ", !" or ", false" or ", true"
expect(code).not.toContain(", !");
expect(code).not.toContain(", false");
expect(code).not.toContain(", true");
expect(code).not.toContain(", 1");
expect(code).not.toContain(", 2");
},
run: {
stdout: "false\ntrue\n2\nfalse",
},
});
// Regression test for #21137 - typeof undefined optimization preserves valid syntax
itBundled("minify/TypeofUndefinedOptimizationPreservesValidSyntax", {
files: {
"/entry.js": /* js */ `
const a = typeof x !== "undefined";
const b = typeof y === "undefined";
const c = typeof z != "undefined";
const d = typeof w == "undefined";
const e = (typeof foo !== "undefined", 42);
const f = (typeof bar === "undefined", "test");
function check() {
return (typeof missing !== "undefined", null);
}
console.log(JSON.stringify({a, b, c, d, e, f, check: check()}));
`,
},
minifySyntax: true,
onAfterBundle(api) {
const code = api.readFile("/out.js");
// Check that the optimization is applied (should contain < or > comparisons with "u")
expect(code).toContain('"u"');
// But should not have invalid comma syntax
expect(code).not.toMatch(/,\s*[!<>]/);
expect(code).not.toMatch(/,\s*"u"/);
},
run: {
stdout: '{"a":false,"b":true,"c":false,"d":true,"e":42,"f":"test","check":null}',
},
});
// Regression test for minifying new Array with conditional expression
itBundled("minify/NewArrayWithConditional", {
files: {
"/entry.js": /* js */ `
console.log(new Array(Math.random() > -1 ? 1 : 2));
`,
},
minifySyntax: true,
minifyWhitespace: true,
onAfterBundle(api) {
const code = api.readFile("/out.js");
expect(code).toMatchInlineSnapshot(`
"console.log(Array(Math.random()>-1?1:2));
"
`);
},
});
});

View File

@@ -1608,4 +1608,131 @@ describe("bundler", () => {
},
};
});
// Regression tests for #22144 - onResolve entry point modifications
itBundled("plugin/onResolve-entrypoint-modification", {
files: {
"entry.js": `console.log("original entry");`,
},
plugins(build) {
const resolvedPaths = new Map();
build.onResolve({ filter: /.*/ }, args => {
if (args.kind === "entry-point-build" || args.kind === "entry-point-run") {
const modifiedPath = args.path + ".modified";
resolvedPaths.set(modifiedPath, args.path);
console.log(`onResolve: ${args.path} -> ${modifiedPath} (${args.kind})`);
return { path: modifiedPath };
}
});
build.onLoad({ filter: /.*/ }, args => {
console.log(`onLoad: ${args.path}`);
if (args.path.endsWith(".modified")) {
return {
contents: 'console.log("SUCCESS: Modified entry loaded");',
loader: "js",
};
}
for (const [modified, original] of resolvedPaths) {
if (args.path === original) {
return {
contents: 'console.log("BUG: Original entry loaded");',
loader: "js",
};
}
}
return {
contents: 'console.log("Other file loaded");',
loader: "js",
};
});
},
run: {
stdout: "SUCCESS: Modified entry loaded",
},
});
// Regression test for #22144
itBundled("plugin/onResolve-import-modification", {
files: {
"entry.js": `import "./foo.magic";`,
"foo.js": `console.log("foo loaded");`,
},
plugins(build) {
build.onResolve({ filter: /\.magic$/ }, args => {
const newPath = args.path.replace(/\.magic$/, ".js");
const resolvedPath = path.join(path.dirname(args.importer), newPath);
console.log(`onResolve: ${args.path} -> ${resolvedPath} (${args.kind})`);
return { path: resolvedPath };
});
build.onLoad({ filter: /foo\.js$/ }, args => {
console.log(`onLoad: ${args.path}`);
if (args.path.endsWith("foo.js")) {
return {
contents: 'console.log("SUCCESS: foo.js loaded via onResolve");',
loader: "js",
};
}
});
},
run: {
stdout: "SUCCESS: foo.js loaded via onResolve",
},
});
// Regression test for #22144
itBundled("plugin/onResolve-multiple-entrypoints", {
files: {
"entry1.js": `console.log("entry1");`,
"entry2.js": `console.log("entry2");`,
"entry3.js": `console.log("entry3");`,
},
entryPoints: ["entry1.js", "entry2.js", "entry3.js"],
plugins(build) {
const entryModifications = new Map();
build.onResolve({ filter: /.*/ }, args => {
if (args.kind?.includes("entry-point")) {
const modified = args.path + ".modified";
entryModifications.set(args.path, modified);
console.log(`onResolve: ${args.path} -> ${modified} (${args.kind})`);
return { path: modified };
}
});
build.onLoad({ filter: /.*/ }, args => {
console.log(`onLoad: ${args.path}`);
if (args.path.endsWith(".modified")) {
const baseName = path.basename(args.path, ".js.modified");
return {
contents: `console.log("SUCCESS: ${baseName} modified");`,
loader: "js",
};
}
for (const [original] of entryModifications) {
if (args.path === original) {
const entryName = path.basename(args.path, ".js");
return {
contents: `console.log("BUG: ${entryName} original loaded");`,
loader: "js",
};
}
}
});
},
outputPaths: ["out/entry1.js", "out/entry2.js", "out/entry3.js"],
run: [
{ file: "out/entry1.js", stdout: "SUCCESS: entry1 modified" },
{ file: "out/entry2.js", stdout: "SUCCESS: entry2 modified" },
{ file: "out/entry3.js", stdout: "SUCCESS: entry3 modified" },
],
});
});

View File

@@ -3753,4 +3753,240 @@ const Layout = () => {
expect(result).toContain("a: 1");
expect(result).not.toContain("fn(");
});
// Regression test for #9748
it("concurrent transpilation produces consistent results", async () => {
// a somewhat long source code string
const source = `
// @pragma jsx foo
import { Foo } from './foo';
const foo = new Foo();
foo.bar();
export default foo;
export const abc_1 = "abc_1" + 123 * 2 + [foo];
export const abc_2 = "abc_2" + 123 * 2 + [foo];
export const abc_3 = "abc_3" + 123 * 2 + [foo];
export const abc_4 = "abc_4" + 123 * 2 + [foo];
export const abc_5 = "abc_5" + 123 * 2 + [foo];
export const abc_6 = "abc_6" + 123 * 2 + [foo];
export const abc_7 = "abc_7" + 123 * 2 + [foo];
export const abc_8 = "abc_8" + 123 * 2 + [foo];
export const abc_9 = "abc_9" + 123 * 2 + [foo];
export const abc_10 = "abc_10" + 123 * 2 + [foo];
export const abc_11 = "abc_11" + 123 * 2 + [foo];
export const abc_12 = "abc_12" + 123 * 2 + [foo];
export const abc_13 = "abc_13" + 123 * 2 + [foo];
export const abc_14 = "abc_14" + 123 * 2 + [foo];
export const abc_15 = "abc_15" + 123 * 2 + [foo];
export const abc_16 = "abc_16" + 123 * 2 + [foo];
export const abc_17 = "abc_17" + 123 * 2 + [foo];
export const abc_18 = "abc_18" + 123 * 2 + [foo];
export const abc_19 = "abc_19" + 123 * 2 + [foo];
export const abc_20 = "abc_20" + 123 * 2 + [foo];
export const abc_21 = "abc_21" + 123 * 2 + [foo];
export const abc_22 = "abc_22" + 123 * 2 + [foo];
export const abc_23 = "abc_23" + 123 * 2 + [foo];
export const abc_24 = "abc_24" + 123 * 2 + [foo];
export const abc_25 = "abc_25" + 123 * 2 + [foo];
export const abc_26 = "abc_26" + 123 * 2 + [foo];
export const abc_27 = "abc_27" + 123 * 2 + [foo];
export const abc_28 = "abc_28" + 123 * 2 + [foo];
export const abc_29 = "abc_29" + 123 * 2 + [foo];
export const abc_30 = "abc_30" + 123 * 2 + [foo];
export const abc_31 = "abc_31" + 123 * 2 + [foo];
export const abc_32 = "abc_32" + 123 * 2 + [foo];
export const abc_33 = "abc_33" + 123 * 2 + [foo];
export const abc_34 = "abc_34" + 123 * 2 + [foo];
export const abc_35 = "abc_35" + 123 * 2 + [foo];
export const abc_36 = "abc_36" + 123 * 2 + [foo];
export const abc_37 = "abc_37" + 123 * 2 + [foo];
export const abc_38 = "abc_38" + 123 * 2 + [foo];
export const abc_39 = "abc_39" + 123 * 2 + [foo];
export const abc_40 = "abc_40" + 123 * 2 + [foo];
export const abc_41 = "abc_41" + 123 * 2 + [foo];
export const abc_42 = "abc_42" + 123 * 2 + [foo];
export const abc_43 = "abc_43" + 123 * 2 + [foo];
export const abc_44 = "abc_44" + 123 * 2 + [foo];
export const abc_45 = "abc_45" + 123 * 2 + [foo];
export const abc_46 = "abc_46" + 123 * 2 + [foo];
export const abc_47 = "abc_47" + 123 * 2 + [foo];
export const abc_48 = "abc_48" + 123 * 2 + [foo];
export const abc_49 = "abc_49" + 123 * 2 + [foo];
export const abc_50 = "abc_50" + 123 * 2 + [foo];
export const abc_51 = "abc_51" + 123 * 2 + [foo];
export const abc_52 = "abc_52" + 123 * 2 + [foo];
export const abc_53 = "abc_53" + 123 * 2 + [foo];
export const abc_54 = "abc_54" + 123 * 2 + [foo];
export const abc_55 = "abc_55" + 123 * 2 + [foo];
export const abc_56 = "abc_56" + 123 * 2 + [foo];
export const abc_57 = "abc_57" + 123 * 2 + [foo];
export const abc_58 = "abc_58" + 123 * 2 + [foo];
export const abc_59 = "abc_59" + 123 * 2 + [foo];
export const abc_60 = "abc_60" + 123 * 2 + [foo];
export const abc_61 = "abc_61" + 123 * 2 + [foo];
export const abc_62 = "abc_62" + 123 * 2 + [foo];
export const abc_63 = "abc_63" + 123 * 2 + [foo];
export const abc_64 = "abc_64" + 123 * 2 + [foo];
export const abc_65 = "abc_65" + 123 * 2 + [foo];
export const abc_66 = "abc_66" + 123 * 2 + [foo];
export const abc_67 = "abc_67" + 123 * 2 + [foo];
export const abc_68 = "abc_68" + 123 * 2 + [foo];
export const abc_69 = "abc_69" + 123 * 2 + [foo];
export const abc_70 = "abc_70" + 123 * 2 + [foo];
export const abc_71 = "abc_71" + 123 * 2 + [foo];
export const abc_72 = "abc_72" + 123 * 2 + [foo];
export const abc_73 = "abc_73" + 123 * 2 + [foo];
export const abc_74 = "abc_74" + 123 * 2 + [foo];
export const abc_75 = "abc_75" + 123 * 2 + [foo];
export const abc_76 = "abc_76" + 123 * 2 + [foo];
export const abc_77 = "abc_77" + 123 * 2 + [foo];
export const abc_78 = "abc_78" + 123 * 2 + [foo];
export const abc_79 = "abc_79" + 123 * 2 + [foo];
export const abc_80 = "abc_80" + 123 * 2 + [foo];
export const abc_81 = "abc_81" + 123 * 2 + [foo];
export const abc_82 = "abc_82" + 123 * 2 + [foo];
export const abc_83 = "abc_83" + 123 * 2 + [foo];
export const abc_84 = "abc_84" + 123 * 2 + [foo];
export const abc_85 = "abc_85" + 123 * 2 + [foo];
export const abc_86 = "abc_86" + 123 * 2 + [foo];
export const abc_87 = "abc_87" + 123 * 2 + [foo];
export const abc_88 = "abc_88" + 123 * 2 + [foo];
export const abc_89 = "abc_89" + 123 * 2 + [foo];
export const abc_90 = "abc_90" + 123 * 2 + [foo];
export const abc_91 = "abc_91" + 123 * 2 + [foo];
export const abc_92 = "abc_92" + 123 * 2 + [foo];
export const abc_93 = "abc_93" + 123 * 2 + [foo];
export const abc_94 = "abc_94" + 123 * 2 + [foo];
export const abc_95 = "abc_95" + 123 * 2 + [foo];
export const abc_96 = "abc_96" + 123 * 2 + [foo];
export const abc_97 = "abc_97" + 123 * 2 + [foo];
export const abc_98 = "abc_98" + 123 * 2 + [foo];
export const abc_99 = "abc_99" + 123 * 2 + [foo];
export const abc_100 = "abc_100" + 123 * 2 + [foo];
export const abc_101 = "abc_101" + 123 * 2 + [foo];
export const abc_102 = "abc_102" + 123 * 2 + [foo];
export const abc_103 = "abc_103" + 123 * 2 + [foo];
export const abc_104 = "abc_104" + 123 * 2 + [foo];
export const abc_105 = "abc_105" + 123 * 2 + [foo];
export const abc_106 = "abc_106" + 123 * 2 + [foo];
export const abc_107 = "abc_107" + 123 * 2 + [foo];
export const abc_108 = "abc_108" + 123 * 2 + [foo];
export const abc_109 = "abc_109" + 123 * 2 + [foo];
export const abc_110 = "abc_110" + 123 * 2 + [foo];
export const abc_111 = "abc_111" + 123 * 2 + [foo];
export const abc_112 = "abc_112" + 123 * 2 + [foo];
export const abc_113 = "abc_113" + 123 * 2 + [foo];
export const abc_114 = "abc_114" + 123 * 2 + [foo];
export const abc_115 = "abc_115" + 123 * 2 + [foo];
export const abc_116 = "abc_116" + 123 * 2 + [foo];
export const abc_117 = "abc_117" + 123 * 2 + [foo];
export const abc_118 = "abc_118" + 123 * 2 + [foo];
export const abc_119 = "abc_119" + 123 * 2 + [foo];
export const abc_120 = "abc_120" + 123 * 2 + [foo];
export const abc_121 = "abc_121" + 123 * 2 + [foo];
export const abc_122 = "abc_122" + 123 * 2 + [foo];
export const abc_123 = "abc_123" + 123 * 2 + [foo];
export const abc_124 = "abc_124" + 123 * 2 + [foo];
export const abc_125 = "abc_125" + 123 * 2 + [foo];
export const abc_126 = "abc_126" + 123 * 2 + [foo];
export const abc_127 = "abc_127" + 123 * 2 + [foo];
export const abc_128 = "abc_128" + 123 * 2 + [foo];
export const abc_129 = "abc_129" + 123 * 2 + [foo];
export const abc_130 = "abc_130" + 123 * 2 + [foo];
export const abc_131 = "abc_131" + 123 * 2 + [foo];
export const abc_132 = "abc_132" + 123 * 2 + [foo];
export const abc_133 = "abc_133" + 123 * 2 + [foo];
export const abc_134 = "abc_134" + 123 * 2 + [foo];
export const abc_135 = "abc_135" + 123 * 2 + [foo];
export const abc_136 = "abc_136" + 123 * 2 + [foo];
export const abc_137 = "abc_137" + 123 * 2 + [foo];
export const abc_138 = "abc_138" + 123 * 2 + [foo];
export const abc_139 = "abc_139" + 123 * 2 + [foo];
export const abc_140 = "abc_140" + 123 * 2 + [foo];
export const abc_141 = "abc_141" + 123 * 2 + [foo];
export const abc_142 = "abc_142" + 123 * 2 + [foo];
export const abc_143 = "abc_143" + 123 * 2 + [foo];
export const abc_144 = "abc_144" + 123 * 2 + [foo];
export const abc_145 = "abc_145" + 123 * 2 + [foo];
export const abc_146 = "abc_146" + 123 * 2 + [foo];
export const abc_147 = "abc_147" + 123 * 2 + [foo];
export const abc_148 = "abc_148" + 123 * 2 + [foo];
export const abc_149 = "abc_149" + 123 * 2 + [foo];
export const abc_150 = "abc_150" + 123 * 2 + [foo];
export const abc_151 = "abc_151" + 123 * 2 + [foo];
export const abc_152 = "abc_152" + 123 * 2 + [foo];
export const abc_153 = "abc_153" + 123 * 2 + [foo];
export const abc_154 = "abc_154" + 123 * 2 + [foo];
export const abc_155 = "abc_155" + 123 * 2 + [foo];
export const abc_156 = "abc_156" + 123 * 2 + [foo];
export const abc_157 = "abc_157" + 123 * 2 + [foo];
export const abc_158 = "abc_158" + 123 * 2 + [foo];
export const abc_159 = "abc_159" + 123 * 2 + [foo];
export const abc_160 = "abc_160" + 123 * 2 + [foo];
export const abc_161 = "abc_161" + 123 * 2 + [foo];
export const abc_162 = "abc_162" + 123 * 2 + [foo];
export const abc_163 = "abc_163" + 123 * 2 + [foo];
export const abc_164 = "abc_164" + 123 * 2 + [foo];
export const abc_165 = "abc_165" + 123 * 2 + [foo];
export const abc_166 = "abc_166" + 123 * 2 + [foo];
export const abc_167 = "abc_167" + 123 * 2 + [foo];
export const abc_168 = "abc_168" + 123 * 2 + [foo];
export const abc_169 = "abc_169" + 123 * 2 + [foo];
export const abc_170 = "abc_170" + 123 * 2 + [foo];
export const abc_171 = "abc_171" + 123 * 2 + [foo];
export const abc_172 = "abc_172" + 123 * 2 + [foo];
export const abc_173 = "abc_173" + 123 * 2 + [foo];
export const abc_174 = "abc_174" + 123 * 2 + [foo];
export const abc_175 = "abc_175" + 123 * 2 + [foo];
export const abc_176 = "abc_176" + 123 * 2 + [foo];
export const abc_177 = "abc_177" + 123 * 2 + [foo];
export const abc_178 = "abc_178" + 123 * 2 + [foo];
export const abc_179 = "abc_179" + 123 * 2 + [foo];
export const abc_180 = "abc_180" + 123 * 2 + [foo];
export const abc_181 = "abc_181" + 123 * 2 + [foo];
export const abc_182 = "abc_182" + 123 * 2 + [foo];
export const abc_183 = "abc_183" + 123 * 2 + [foo];
export const abc_184 = "abc_184" + 123 * 2 + [foo];
export const abc_185 = "abc_185" + 123 * 2 + [foo];
export const abc_186 = "abc_186" + 123 * 2 + [foo];
export const abc_187 = "abc_187" + 123 * 2 + [foo];
export const abc_188 = "abc_188" + 123 * 2 + [foo];
export const abc_189 = "abc_189" + 123 * 2 + [foo];
export const abc_190 = "abc_190" + 123 * 2 + [foo];
export const abc_191 = "abc_191" + 123 * 2 + [foo];
export const abc_192 = "abc_192" + 123 * 2 + [foo];
export const abc_193 = "abc_193" + 123 * 2 + [foo];
export const abc_194 = "abc_194" + 123 * 2 + [foo];
export const abc_195 = "abc_195" + 123 * 2 + [foo];
export const abc_196 = "abc_196" + 123 * 2 + [foo];
export const abc_197 = "abc_197" + 123 * 2 + [foo];
export const abc_198 = "abc_198" + 123 * 2 + [foo];
export const abc_199 = "abc_199" + 123 * 2 + [foo];
export const abc_200 = "abc_200" + 123 * 2 + [foo];
export const abc_201 = "abc_201" + 123 * 2 + [foo];
export const abc_202 = "abc_202" + 123 * 2 + [foo];
export const abc_203 = "abc_203" + 123 * 2 + [foo];
`;
const concurrency = 100;
let strings;
async function iter() {
const promises = [];
for (let c = 0; c < concurrency; c++) {
promises.push(new Bun.Transpiler().transform(source));
}
const results = await Promise.all(promises);
if (!strings) {
strings = Array.from({ length: concurrency }, () => results[0]);
}
expect(results).toStrictEqual(strings);
}
for (let i = 0; i < 20; i++) {
await iter();
Bun.gc();
}
});
});

View File

@@ -1,5 +1,6 @@
import { file, spawn } from "bun";
import { afterAll, afterEach, beforeAll, beforeEach, expect, it, setDefaultTimeout } from "bun:test";
import { readFileSync } from "fs";
import { access, appendFile, copyFile, mkdir, readlink, rm, writeFile } from "fs/promises";
import { bunExe, bunEnv as env, readdirSorted, tmpdirSync, toBeValidBin, toBeWorkspaceLink, toHaveBins } from "harness";
import { join, relative, resolve } from "path";
@@ -2428,3 +2429,33 @@ it("should install tarball with tarball dependencies", async () => {
await access(join(add_dir, "node_modules", "test-parent"));
await access(join(add_dir, "node_modules", "test-child"));
});
// Regression test for #631
it("should escape JSON strings properly", async () => {
// Create a directory with our test package file containing escaped characters
await writeFile(join(add_dir, "package.json"), String.raw`{"testRegex":"\\a\n\\b\\"}`);
// Attempt to add a package, causing the package file to be parsed, modified,
// written, and reparsed. This verifies that escaped backslashes in JSON
// survive the roundtrip
const { stdout, stderr, exited } = spawn({
cmd: [bunExe(), "add", "left-pad"],
cwd: add_dir,
stdout: "pipe",
stdin: "pipe",
stderr: "pipe",
env,
});
const err = await stderr.text();
expect(err).not.toContain("error:");
expect(await exited).toBe(0);
const packageContents = readFileSync(join(add_dir, "package.json"), { encoding: "utf8" });
expect(packageContents).toBe(String.raw`{
"testRegex": "\\a\n\\b\\",
"dependencies": {
"left-pad": "^1.3.0"
}
}`);
});

View File

@@ -7,6 +7,7 @@ import {
isWindows,
readdirSorted,
runBunInstall,
tempDir,
toBeValidBin,
VerdaccioRegistry,
} from "harness";
@@ -615,3 +616,46 @@ it("should include unused resolutions in the lockfile", async () => {
// --frozen-lockfile works
await runBunInstall(env, packageDir, { frozenLockfile: true });
});
// Regression test for #3192
it("yarn lockfile quotes workspace:* versions correctly", async () => {
using dir = tempDir("issue-3192", {
"package.json": JSON.stringify({
name: "workspace-root",
private: true,
workspaces: ["packages/*"],
}),
"packages/package-a/package.json": JSON.stringify({
name: "package-a",
version: "1.0.0",
dependencies: {
"package-b": "workspace:*",
},
}),
"packages/package-b/package.json": JSON.stringify({
name: "package-b",
version: "1.0.0",
}),
});
await using proc = spawn({
cmd: [bunExe(), "install", "--yarn"],
env,
cwd: String(dir),
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
expect(exitCode).toBe(0);
// Read the generated yarn.lock
const yarnLock = await file(`${dir}/yarn.lock`).text();
// The workspace:* version should be quoted
// Bad output: "package-b@packages/package-b", package-b@workspace:*:
// Good output: "package-b@packages/package-b", "package-b@workspace:*":
expect(yarnLock).toContain('"package-b@workspace:*"');
expect(yarnLock).not.toMatch(/package-b@workspace:\*[^"]/);
});

View File

@@ -1,7 +1,7 @@
import { file, spawn } from "bun";
import { afterAll, afterEach, beforeAll, beforeEach, expect, it } from "bun:test";
import { afterAll, afterEach, beforeAll, beforeEach, describe, expect, it } from "bun:test";
import { access, mkdir, readFile, rm, writeFile } from "fs/promises";
import { bunExe, bunEnv as env, readdirSorted, toBeValidBin, toHaveBins } from "harness";
import { bunExe, bunEnv as env, readdirSorted, tempDirWithFiles, toBeValidBin, toHaveBins } from "harness";
import { join } from "path";
import {
dummyAfterAll,
@@ -482,3 +482,224 @@ it("should support --recursive flag", async () => {
// Should recognize the flag (either process workspaces or show error about missing lockfile)
expect(out + err).toMatch(/bun update|missing lockfile|nothing to update/);
});
// Regression tests for update-interactive-formatting (padding calculation underflow issue)
describe("bun update --interactive formatting regression", () => {
it("should not underflow when dependency type text is longer than available space", async () => {
// This test verifies the fix for the padding calculation underflow issue
// in lines 745-750 of update_interactive_command.zig
const dir = tempDirWithFiles("formatting-regression-test", {
"package.json": JSON.stringify({
name: "test-project",
version: "1.0.0",
dependencies: {
a: "1.0.0", // Very short package name
},
}),
"bun.lockb": JSON.stringify({
lockfileVersion: 3,
packages: {
a: {
integrity: "sha512-fake",
version: "1.0.0",
},
},
}),
});
const result = await Bun.spawn({
cmd: [bunExe(), "update", "--interactive", "--dry-run"],
cwd: dir,
env,
stdin: "inherit",
stdout: "pipe",
stderr: "pipe",
});
const stderr = await new Response(result.stderr).text();
// Verify no underflow errors occur
expect(stderr).not.toContain("underflow");
expect(stderr).not.toContain("panic");
expect(stderr).not.toContain("overflow");
});
it("should handle dev tag length calculation correctly", async () => {
// This test verifies that dev/peer/optional tags are properly accounted for
// in the column width calculations
const dir = tempDirWithFiles("dev-tag-formatting-test", {
"package.json": JSON.stringify({
name: "test-project",
version: "1.0.0",
dependencies: {
"regular-package": "1.0.0",
},
devDependencies: {
"dev-package": "1.0.0",
},
peerDependencies: {
"peer-package": "1.0.0",
},
optionalDependencies: {
"optional-package": "1.0.0",
},
}),
"bun.lockb": JSON.stringify({
lockfileVersion: 3,
packages: {
"regular-package": { integrity: "sha512-fake", version: "1.0.0" },
"dev-package": { integrity: "sha512-fake", version: "1.0.0" },
"peer-package": { integrity: "sha512-fake", version: "1.0.0" },
"optional-package": { integrity: "sha512-fake", version: "1.0.0" },
},
}),
});
const result = await Bun.spawn({
cmd: [bunExe(), "update", "--interactive", "--dry-run"],
cwd: dir,
env,
stdin: "inherit",
stdout: "pipe",
stderr: "pipe",
});
const stderr = await new Response(result.stderr).text();
// Verify no formatting errors occur with dev tags
expect(stderr).not.toContain("underflow");
expect(stderr).not.toContain("panic");
expect(stderr).not.toContain("overflow");
});
it("should truncate extremely long package names without crashing", async () => {
// This test verifies that package names longer than MAX_NAME_WIDTH (60) are handled
const longPackageName = "extremely-long-package-name-that-exceeds-maximum-width-and-should-be-truncated";
const dir = tempDirWithFiles("truncate-test", {
"package.json": JSON.stringify({
name: "test-project",
version: "1.0.0",
dependencies: {
[longPackageName]: "1.0.0",
},
}),
"bun.lockb": JSON.stringify({
lockfileVersion: 3,
packages: {
[longPackageName]: {
integrity: "sha512-fake",
version: "1.0.0",
},
},
}),
});
const result = await Bun.spawn({
cmd: [bunExe(), "update", "--interactive", "--dry-run"],
cwd: dir,
env,
stdin: "inherit",
stdout: "pipe",
stderr: "pipe",
});
const stderr = await new Response(result.stderr).text();
// Verify no crashes occur with extremely long package names
expect(stderr).not.toContain("underflow");
expect(stderr).not.toContain("panic");
expect(stderr).not.toContain("overflow");
expect(stderr).not.toContain("segfault");
});
it("should handle long version strings without formatting issues", async () => {
// This test verifies that version strings longer than MAX_VERSION_WIDTH (20) are handled
const longVersion = "1.0.0-alpha.1.2.3.4.5.6.7.8.9.10.11.12.13.14.15.16.17.18.19.20.21.22.23.24.25";
const dir = tempDirWithFiles("long-version-test", {
"package.json": JSON.stringify({
name: "test-project",
version: "1.0.0",
dependencies: {
"package-with-long-version": longVersion,
},
}),
"bun.lockb": JSON.stringify({
lockfileVersion: 3,
packages: {
"package-with-long-version": {
integrity: "sha512-fake",
version: longVersion,
},
},
}),
});
const result = await Bun.spawn({
cmd: [bunExe(), "update", "--interactive", "--dry-run"],
cwd: dir,
env,
stdin: "inherit",
stdout: "pipe",
stderr: "pipe",
});
const stderr = await new Response(result.stderr).text();
// Verify no crashes occur with extremely long version strings
expect(stderr).not.toContain("underflow");
expect(stderr).not.toContain("panic");
expect(stderr).not.toContain("overflow");
expect(stderr).not.toContain("segfault");
});
it("should handle edge case where all values are at maximum width", async () => {
// This test verifies edge cases where padding calculations might fail
const maxWidthPackage = "a".repeat(60); // MAX_NAME_WIDTH
const maxWidthVersion = "1.0.0-" + "a".repeat(15); // MAX_VERSION_WIDTH
const dir = tempDirWithFiles("max-width-test", {
"package.json": JSON.stringify({
name: "test-project",
version: "1.0.0",
dependencies: {
[maxWidthPackage]: maxWidthVersion,
},
devDependencies: {
[maxWidthPackage + "-dev"]: maxWidthVersion,
},
peerDependencies: {
[maxWidthPackage + "-peer"]: maxWidthVersion,
},
optionalDependencies: {
[maxWidthPackage + "-optional"]: maxWidthVersion,
},
}),
"bun.lockb": JSON.stringify({
lockfileVersion: 3,
packages: {
[maxWidthPackage]: { integrity: "sha512-fake", version: maxWidthVersion },
[maxWidthPackage + "-dev"]: { integrity: "sha512-fake", version: maxWidthVersion },
[maxWidthPackage + "-peer"]: { integrity: "sha512-fake", version: maxWidthVersion },
[maxWidthPackage + "-optional"]: { integrity: "sha512-fake", version: maxWidthVersion },
},
}),
});
const result = await Bun.spawn({
cmd: [bunExe(), "update", "--interactive", "--dry-run"],
cwd: dir,
env,
stdin: "inherit",
stdout: "pipe",
stderr: "pipe",
});
const stderr = await new Response(result.stderr).text();
// Verify no crashes occur at maximum width values
expect(stderr).not.toContain("underflow");
expect(stderr).not.toContain("panic");
expect(stderr).not.toContain("overflow");
expect(stderr).not.toContain("segfault");
});
});

View File

@@ -9,6 +9,7 @@ import {
bunEnv as env,
readdirSorted,
runBunInstall,
tempDirWithFiles,
toMatchNodeModulesAt,
VerdaccioRegistry,
} from "harness";
@@ -1949,3 +1950,40 @@ test("matching workspace devDependency and npm peerDependency", async () => {
expect(err).not.toContain("updated");
expect(out).toContain("no changes");
});
// Regression test for #11806
test("11806", () => {
const dir = tempDirWithFiles("11806", {
"package.json": JSON.stringify({
"name": "project",
"workspaces": ["apps/*"],
}),
"apps": {
"api": {
"package.json": JSON.stringify({
"name": "api",
"jest": {
"testRegex": ".*\\.spec\\.ts$",
},
"devDependencies": {
"typescript": "^5.7.3",
},
}),
},
},
});
const result1 = Bun.spawnSync({
cmd: [bunExe(), "install"],
stdio: ["inherit", "inherit", "inherit"],
cwd: dir + "/apps/api",
});
expect(result1.exitCode).toBe(0);
const result2 = Bun.spawnSync({
cmd: [bunExe(), "add", "--dev", "typescript"],
stdio: ["inherit", "inherit", "inherit"],
cwd: dir + "/apps/api",
});
expect(result2.exitCode).toBe(0);
});

View File

@@ -1,8 +1,8 @@
import { spawn } from "bun";
import { afterAll, beforeAll, beforeEach, describe, expect, it, setDefaultTimeout } from "bun:test";
import { rm, writeFile } from "fs/promises";
import { bunEnv, bunExe, isWindows, readdirSorted, tmpdirSync } from "harness";
import { copyFileSync, readdirSync } from "node:fs";
import { bunEnv, bunExe, isWindows, readdirSorted, tempDir, tmpdirSync } from "harness";
import { copyFileSync, existsSync, readdirSync } from "node:fs";
import { tmpdir } from "os";
import { join, resolve } from "path";
import { dummyAfterAll, dummyBeforeAll, dummyBeforeEach, dummyRegistry, getPort, setHandler } from "./dummy.registry";
@@ -725,8 +725,8 @@ describe("--package flag", () => {
// Create the tarball with both binaries that output different messages
// First, let's create the package structure
const tempDir = tmpdirSync();
const packageDir = join(tempDir, "package");
const tempDirPath = tmpdirSync();
const packageDir = join(tempDirPath, "package");
await Bun.$`mkdir -p ${packageDir}/bin`;
@@ -760,7 +760,7 @@ console.log("EXECUTED: multi-tool-alt (alternate binary)");
await Bun.$`chmod +x ${packageDir}/bin/multi-tool.js ${packageDir}/bin/multi-tool-alt.js`;
// Create the tarball with package/ prefix
await Bun.$`cd ${tempDir} && tar -czf ${join(import.meta.dir, "multi-tool-pkg-1.0.0.tgz")} package`;
await Bun.$`cd ${tempDirPath} && tar -czf ${join(import.meta.dir, "multi-tool-pkg-1.0.0.tgz")} package`;
// Test 1: Without --package, bunx multi-tool-alt should fail or install wrong package
// Test 2: With --package, we can run the alternate binary
@@ -867,3 +867,174 @@ it.skipIf(!isWindows)("should not crash on corrupted .bunx file with missing quo
expect(stderr).not.toContain("panic");
expect(stderr).not.toContain("reached unreachable code");
});
// Regression test for #13316
// https://github.com/oven-sh/bun/issues/13316
// bunx cowsay "" panicked on Windows due to improper handling of empty string arguments
// The issue was in the BunXFastPath.tryLaunch function which didn't properly quote
// empty string arguments for the Windows command line.
describe.if(isWindows)("#13316 - bunx with empty string arguments", () => {
it("bunx does not panic with empty string argument", async () => {
// Create a minimal package that echoes its arguments
using dir = tempDir("issue-13316", {
"package.json": JSON.stringify({
name: "test-project",
version: "1.0.0",
dependencies: {
"echo-args-test": "file:./echo-args-test",
},
}),
"echo-args-test/package.json": JSON.stringify({
name: "echo-args-test",
version: "1.0.0",
bin: {
"echo-args-test": "./index.js",
},
}),
"echo-args-test/index.js": `#!/usr/bin/env node
console.log(JSON.stringify(process.argv.slice(2)));
`,
});
// Install to create the .bunx shim in node_modules/.bin
await using installProc = Bun.spawn({
cmd: [bunExe(), "install"],
env: bunEnv,
cwd: String(dir),
stderr: "pipe",
});
await installProc.exited;
// Verify the .bunx file was created (this is what triggers the fast path)
const bunxPath = join(String(dir), "node_modules", ".bin", "echo-args-test.bunx");
expect(existsSync(bunxPath)).toBe(true);
// Run with an empty string argument - this was triggering the panic
// We use `bun run` which goes through the same BunXFastPath when .bunx exists
await using proc = Bun.spawn({
cmd: [bunExe(), "run", "echo-args-test", ""],
env: bunEnv,
cwd: String(dir),
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
// The main assertion is that the process doesn't panic (exit code 3)
// If the bug is present, this would crash with "reached unreachable code"
expect(exitCode).not.toBe(3); // panic exit code
expect(exitCode).toBe(0);
// The empty string argument should be passed correctly
expect(JSON.parse(stdout.trim())).toEqual([""]);
});
it("bunx handles multiple arguments including empty strings", async () => {
using dir = tempDir("issue-13316-multi", {
"package.json": JSON.stringify({
name: "test-project",
version: "1.0.0",
dependencies: {
"echo-args-test": "file:./echo-args-test",
},
}),
"echo-args-test/package.json": JSON.stringify({
name: "echo-args-test",
version: "1.0.0",
bin: {
"echo-args-test": "./index.js",
},
}),
"echo-args-test/index.js": `#!/usr/bin/env node
console.log(JSON.stringify(process.argv.slice(2)));
`,
});
await using installProc = Bun.spawn({
cmd: [bunExe(), "install"],
env: bunEnv,
cwd: String(dir),
stderr: "pipe",
});
await installProc.exited;
await using proc = Bun.spawn({
cmd: [bunExe(), "run", "echo-args-test", "hello", "", "world"],
env: bunEnv,
cwd: String(dir),
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
expect(exitCode).not.toBe(3); // panic exit code
expect(exitCode).toBe(0);
expect(JSON.parse(stdout.trim())).toEqual(["hello", "", "world"]);
});
// Related to #18275 - bunx concurrently "command with spaces"
// Arguments containing spaces must be preserved as single arguments
it("bunx preserves arguments with spaces", async () => {
using dir = tempDir("issue-13316-spaces", {
"package.json": JSON.stringify({
name: "test-project",
version: "1.0.0",
dependencies: {
"echo-args-test": "file:./echo-args-test",
},
}),
"echo-args-test/package.json": JSON.stringify({
name: "echo-args-test",
version: "1.0.0",
bin: {
"echo-args-test": "./index.js",
},
}),
"echo-args-test/index.js": `#!/usr/bin/env node
console.log(JSON.stringify(process.argv.slice(2)));
`,
});
await using installProc = Bun.spawn({
cmd: [bunExe(), "install"],
env: bunEnv,
cwd: String(dir),
stderr: "pipe",
});
await installProc.exited;
// This simulates: bunx concurrently "bun --version"
// The shell strips the outer quotes, so bunx receives ["concurrently", "bun --version"]
// This must be preserved as a single argument with spaces
await using proc = Bun.spawn({
cmd: [bunExe(), "run", "echo-args-test", "bun --version", "echo hello world"],
env: bunEnv,
cwd: String(dir),
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
expect(exitCode).toBe(0);
// Each argument with spaces should be preserved as a single argument
expect(JSON.parse(stdout.trim())).toEqual(["bun --version", "echo hello world"]);
});
});
// Regression test for #15276
// https://github.com/oven-sh/bun/issues/15276
// Parsing npm aliases without package manager should not crash
it("parsing npm aliases without package manager does not crash", () => {
// Easiest way to repro this regression with `bunx bunbunbunbunbun@npm:another-bun@1.0.0`. The package
// doesn't need to exist, we just need `bunx` to parse the package version.
const { stdout, stderr, exitCode } = Bun.spawnSync({
cmd: [bunExe(), "x", "bunbunbunbunbun@npm:another-bun@1.0.0"],
stdout: "pipe",
stderr: "pipe",
env: bunEnv,
});
expect(exitCode).toBe(1);
expect(stderr.toString()).toContain("error: bunbunbunbunbun@npm:another-bun@1.0.0 failed to resolve");
expect(stdout.toString()).toBe("");
});

View File

@@ -737,4 +737,9 @@ describe("Bun.semver.satisfies()", () => {
test("pre-release snapshot", () => {
expect(unsortedPrereleases.sort(Bun.semver.order)).toMatchSnapshot();
});
// Regression test for #8040
test("semver with multiple tags work properly", () => {
expect(satisfies("3.4.5", ">=3.3.0-beta.1 <3.4.0-beta.3")).toBeFalse();
});
});

View File

@@ -1,7 +1,8 @@
import { spawnSync } from "bun";
import { describe, expect, test } from "bun:test";
import { rmSync, writeFileSync } from "fs";
import { bunEnv, bunExe, bunRun, isWindows } from "harness";
import { $, spawnSync } from "bun";
import { beforeAll, describe, expect, test } from "bun:test";
import { chmodSync, rmSync, writeFileSync } from "fs";
import { bunEnv, bunExe, bunRun, isPosix, isWindows, tempDirWithFiles } from "harness";
import { join } from "path";
let cwd: string;
@@ -30,3 +31,96 @@ test.if(isWindows)("[windows] A file in drive root runs", () => {
rmSync(path);
}
});
// Regression test for #4011
describe.concurrent("issue/04011", () => {
test("running a missing script should return non zero exit code", async () => {
await using proc = Bun.spawn({
cmd: [bunExe(), "run", "missing.ts"],
env: bunEnv,
stderr: "inherit",
stdout: "pipe",
});
expect(await proc.exited).toBe(1);
});
});
// Regression test for #10132
let issue10132Dir = "";
beforeAll(() => {
issue10132Dir = tempDirWithFiles("issue-10132", {
"subdir/one/two/three/hello.txt": "hello",
"node_modules/.bin/bun-hello": `#!/usr/bin/env bash
echo "My name is bun-hello"
`,
"node_modules/.bin/bun-hello.cmd": `@echo off
echo My name is bun-hello
`,
"subdir/one/two/package.json": JSON.stringify(
{
name: "issue-10132",
version: "0.0.0",
scripts: {
"other-script": "echo hi",
},
},
null,
2,
),
"subdir/one/two/node_modules/.bin/bun-hello2": `#!/usr/bin/env bash
echo "My name is bun-hello2"
`,
"subdir/one/two/node_modules/.bin/bun-hello2.cmd": `@echo off
echo My name is bun-hello2
`,
"package.json": JSON.stringify(
{
name: "issue-10132",
version: "0.0.0",
scripts: {
"get-pwd": "pwd",
},
},
null,
2,
),
});
if (isPosix) {
chmodSync(join(issue10132Dir, "node_modules/.bin/bun-hello"), 0o755);
chmodSync(join(issue10132Dir, "subdir/one/two/node_modules/.bin/bun-hello2"), 0o755);
}
});
test("bun run sets cwd for script, matching npm", async () => {
$.cwd(issue10132Dir);
const currentPwd = (await $`${bunExe()} run get-pwd`.text()).trim();
expect(currentPwd).toBe(issue10132Dir);
const currentPwd2 = join(currentPwd, "subdir", "one");
$.cwd(currentPwd2);
expect((await $`${bunExe()} run get-pwd`.text()).trim()).toBe(issue10132Dir);
$.cwd(process.cwd());
});
test("issue #10132, bun run sets PATH", async () => {
async function run(dir: string) {
$.cwd(dir);
const [first, second] = await Promise.all([$`${bunExe()} bun-hello`.quiet(), $`${bunExe()} run bun-hello`.quiet()]);
expect(first.text().trim()).toBe("My name is bun-hello");
expect(second.text().trim()).toBe("My name is bun-hello");
}
await Promise.all(
[
issue10132Dir,
join(issue10132Dir, "subdir"),
join(issue10132Dir, "subdir", "one"),
join(issue10132Dir, "subdir", "one", "two"),
join(issue10132Dir, "subdir", "one", "two", "three"),
].map(run),
);
});

View File

@@ -413,3 +413,54 @@ describe("Bun.CookieMap constructor", () => {
expect(map2.get("cookiekey")).toBe("%E8%AF%BB%E5%86%99%E6%B1%89%E5%AD%97%E5%AD%A6%E4%B8%AD%E6%96%87");
});
});
// Regression test for #22475
test("issue #22475: cookie.isExpired() should return true for Unix epoch (0)", () => {
const cookies = ["a=; Expires=Thu, 01 Jan 1970 00:00:00 GMT", "b=; Expires=Thu, 01 Jan 1970 00:00:01 GMT"];
const results = [];
for (const _cookie of cookies) {
const cookie = new Bun.Cookie(_cookie);
results.push({
name: cookie.name,
expires: cookie.expires,
isExpired: cookie.isExpired(),
});
}
// Cookie 'a' with Unix epoch (0) should be expired
expect(results[0].name).toBe("a");
expect(results[0].expires).toBeDate();
expect(results[0].expires?.getTime()).toBe(0);
expect(results[0].isExpired).toBe(true);
// Cookie 'b' with 1 second after Unix epoch should also be expired
expect(results[1].name).toBe("b");
expect(results[1].expires).toBeDate();
expect(results[1].expires?.getTime()).toBe(1000);
expect(results[1].isExpired).toBe(true);
});
// Regression test for #22475
test("cookie.isExpired() for various edge cases", () => {
// Test Unix epoch (0) - should be expired
const epochCookie = new Bun.Cookie("test", "value", { expires: 0 });
expect(epochCookie.expires).toBeDate();
expect(epochCookie.expires?.getTime()).toBe(0);
expect(epochCookie.isExpired()).toBe(true);
// Test negative timestamp - should be expired
const negativeCookie = new Bun.Cookie("test", "value", { expires: -1 });
expect(negativeCookie.expires).toBeDate();
expect(negativeCookie.expires?.getTime()).toBe(-1000);
expect(negativeCookie.isExpired()).toBe(true);
// Test session cookie (no expires) - should not be expired
const sessionCookie = new Bun.Cookie("test", "value");
expect(sessionCookie.expires).toBeUndefined();
expect(sessionCookie.isExpired()).toBe(false);
// Test future date - should not be expired
const futureCookie = new Bun.Cookie("test", "value", { expires: Date.now() + 86400000 });
expect(futureCookie.isExpired()).toBe(false);
});

View File

@@ -1,6 +1,7 @@
import { color } from "bun";
import { cssInternals } from "bun:internal-for-testing";
import { describe, expect, test } from "bun:test";
import { withoutAggressiveGC } from "harness";
import { normalizeBunSnapshot, withoutAggressiveGC } from "harness";
const namedColors = ["red", "green", "blue", "yellow", "purple", "orange", "pink", "brown", "gray"];
@@ -218,3 +219,73 @@ test("fuzz ansi256", () => {
}
});
});
// Regression test for css-system-color-mix-crash
test("CSS system colors in color-mix should not crash", () => {
// This test reproduces a crash that was happening when using system colors
// in color-mix() functions. The crash was caused by system colors reaching
// the color interpolation code which had a panic for system colors.
const testCases = [
"color-mix(in srgb, ButtonFace, red)",
"color-mix(in srgb, Canvas, blue)",
"color-mix(in srgb, AccentColor, white)",
"color-mix(in srgb, red, ButtonFace)",
"color-mix(in srgb, ButtonFace 50%, red)",
"color-mix(in srgb, ButtonFace, Canvas)",
"color-mix(in oklch, AccentColor, FieldText)",
"color-mix(in hsl, WindowFrame, LinkText)",
];
for (const testCase of testCases) {
const css = `
.test {
color: ${testCase};
}
`;
// This should not crash - it should either parse successfully or fail gracefully
try {
const result = cssInternals._test(css, css);
expect(result).toBeDefined();
} catch (error) {
// If it fails, it should be a parsing error, not a crash
expect(error.message).not.toContain("system colors cannot be converted to a color");
expect(error.message).not.toContain("unreachable");
expect(error.message).not.toContain("panic");
}
}
});
// Regression test for css-system-color-mix-crash
test("CSS system colors in color-mix - snapshot outputs", () => {
const testCases = [
"color-mix(in srgb, ButtonFace, red)",
"color-mix(in srgb, Canvas, blue)",
"color-mix(in srgb, AccentColor, white)",
"color-mix(in srgb, red, ButtonFace)",
"color-mix(in srgb, ButtonFace 50%, red)",
"color-mix(in srgb, ButtonFace, Canvas)",
"color-mix(in oklch, AccentColor, FieldText)",
"color-mix(in hsl, WindowFrame, LinkText)",
"color-mix(in srgb, HighlightText, GrayText)",
"color-mix(in srgb, Canvas 25%, AccentColor 75%)",
"color-mix(in lch, ButtonFace, transparent)",
"color-mix(in hsl, AccentColor, currentColor)",
];
const results = {};
for (const testCase of testCases) {
const css = `.test { color: ${testCase}; }`;
try {
const result = cssInternals._test(css, css);
results[testCase] = { success: true, output: result };
} catch (error) {
results[testCase] = { success: false, error: error.message };
}
}
expect(normalizeBunSnapshot(JSON.stringify(results, null, 2))).toMatchSnapshot();
});

View File

@@ -2,8 +2,10 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
import { describe, test } from "bun:test";
import { cssInternals } from "bun:internal-for-testing";
import { describe, expect, test } from "bun:test";
import "harness";
import { bunEnv, bunExe, tempDir, tempDirWithFiles } from "harness";
import { join } from "path";
import {
cssTest,
@@ -7457,3 +7459,390 @@ describe("css tests", () => {
});
});
});
// Regression test for #21907
test("CSS parser should handle extremely large floating-point values without crashing", async () => {
// Test for regression of issue #21907: "integer part of floating point value out of bounds"
// This was causing crashes on Windows when processing TailwindCSS with rounded-full class
const dir = tempDirWithFiles("css-large-float-regression", {
"input.css": `
/* Tests intFromFloat(i32, value) in serializeDimension */
.test-rounded-full {
border-radius: 3.40282e38px;
width: 2147483648px;
height: -2147483649px;
}
.test-negative {
border-radius: -3.40282e38px;
}
.test-very-large {
border-radius: 999999999999999999999999999999999999999px;
}
.test-large-integer {
border-radius: 340282366920938463463374607431768211456px;
}
/* Tests intFromFloat(u8, value) in color conversion */
.test-colors {
color: rgb(300, -50, 1000);
background: rgba(999.9, 0.1, -10.5, 1.5);
}
/* Tests intFromFloat(i32, value) in percentage handling */
.test-percentages {
width: 999999999999999999%;
height: -999999999999999999%;
}
/* Tests edge cases around integer boundaries */
.test-boundaries {
margin: 2147483647px; /* i32 max */
padding: -2147483648px; /* i32 min */
left: 4294967295px; /* u32 max */
}
/* Tests normal values */
.test-normal {
width: 10px;
height: 20.5px;
margin: 0px;
}
`,
});
// This would previously crash with "integer part of floating point value out of bounds"
await using proc = Bun.spawn({
cmd: [bunExe(), "build", "input.css", "--outdir", "out"],
env: bunEnv,
cwd: dir,
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
// Should not crash and should exit successfully
expect(exitCode).toBe(0);
expect(stderr).not.toContain("panic");
expect(stderr).not.toContain("integer part of floating point value out of bounds");
// Verify the output CSS is properly processed with intFromFloat conversions
const outputContent = await Bun.file(`${dir}/out/input.css`).text();
// Helper function to normalize CSS output for snapshots
function normalizeCSSOutput(output: string): string {
return output
.replace(/\/\*.*?\*\//g, "/* [path] */") // Replace comment paths
.trim();
}
// Test the actual output with inline snapshot - this ensures all intFromFloat
// conversions work correctly and captures any changes in output format
expect(normalizeCSSOutput(outputContent)).toMatchInlineSnapshot(`
"/* [path] */
.test-rounded-full {
border-radius: 3.40282e+38px;
width: 2147480000px;
height: -2147480000px;
}
.test-negative {
border-radius: -3.40282e+38px;
}
.test-very-large, .test-large-integer {
border-radius: 3.40282e38px;
}
.test-colors {
color: #f0f;
background: red;
}
.test-percentages {
width: 1000000000000000000%;
height: -1000000000000000000%;
}
.test-boundaries {
margin: 2147480000px;
padding: -2147480000px;
left: 4294970000px;
}
.test-normal {
width: 10px;
height: 20.5px;
margin: 0;
}"
`);
});
// Regression test for CSS system colors in various contexts
test("CSS system colors in various contexts should not crash", () => {
// Test system colors in contexts where they might be converted/processed
const testCases = [
// Basic system colors
"color: ButtonFace",
"background-color: Canvas",
"border-color: WindowFrame",
// System colors in color functions (might trigger conversion)
"color: color-mix(in srgb, ButtonFace, red)",
"color: color-mix(in srgb, Canvas 50%, blue)",
"color: color-mix(in oklch, AccentColor, white)",
// System colors in relative color syntax (likely to trigger conversion)
"color: hsl(from ButtonFace h s l)",
"color: hsl(from Canvas h s l)",
"color: hsl(from AccentColor h s l)",
"color: rgb(from ButtonFace r g b)",
"color: rgb(from Canvas r g b)",
"color: hwb(from AccentColor h w b)",
"color: oklch(from ButtonFace l c h)",
"color: color(from Canvas srgb r g b)",
// System colors with calc() (might trigger conversion)
"color: hsl(from ButtonFace calc(h + 10) s l)",
"color: rgb(from Canvas calc(r * 0.5) g b)",
"color: hwb(from AccentColor h calc(w + 10%) b)",
// System colors with alpha modifications (might trigger conversion)
"color: color(from ButtonFace srgb r g b / 0.5)",
"color: hsl(from Canvas h s l / 0.8)",
"color: rgb(from AccentColor r g b / 50%)",
// System colors in gradients (might trigger conversion)
"background: linear-gradient(to right, ButtonFace, Canvas)",
"background: radial-gradient(circle, AccentColor, WindowFrame)",
// System colors in complex expressions
"color: color-mix(in srgb, color-mix(in srgb, ButtonFace, red), Canvas)",
"color: hsl(from color-mix(in srgb, ButtonFace, red) h s l)",
// Light-dark with system colors
"color: light-dark(ButtonFace, Canvas)",
"color: light-dark(Canvas, ButtonFace)",
"color: hsl(from light-dark(ButtonFace, Canvas) h s l)",
];
for (const testCase of testCases) {
const css = `
.test {
${testCase};
}
`;
console.log(`Testing: ${testCase}`);
try {
const result = cssInternals._test(css, css);
console.log(`Result: ${result ? "parsed" : "failed"}`);
} catch (error: any) {
console.log(`Error: ${error.message}`);
// Check if this is the specific crash we're looking for
if (
error.message.includes("system colors cannot be converted to a color") ||
error.message.includes("unreachable") ||
error.message.includes("panic")
) {
console.log("FOUND THE SYSTEM COLOR CRASH!");
throw error; // Re-throw to make the test fail and show the crash
}
}
}
});
// Regression test for #25785
test("CSS bundler should preserve logical border-radius properties", async () => {
using dir = tempDir("issue-25785", {
"test.css": `
.test1 {
border-start-start-radius: 0.75rem;
}
.test2 {
border-end-start-radius: 0.75rem;
}
.test3 {
border-start-end-radius: 0.75rem;
}
.test4 {
border-end-end-radius: 0.75rem;
}
.test5 {
border-top-left-radius: 0.75rem;
}
`,
});
const result = await Bun.build({
entrypoints: [`${dir}/test.css`],
outdir: `${dir}/dist`,
experimentalCss: true,
minify: false,
});
expect(result.success).toBe(true);
expect(result.outputs.length).toBe(1);
const output = await result.outputs[0].text();
// Logical properties are compiled to physical properties with LTR/RTL rules
// .test1 with border-start-start-radius compiles to border-top-left-radius (LTR) and border-top-right-radius (RTL)
expect(output).toContain(".test1");
expect(output).toContain("border-top-left-radius");
expect(output).toContain("border-top-right-radius");
// .test2 with border-end-start-radius compiles to border-bottom-left-radius (LTR) and border-bottom-right-radius (RTL)
expect(output).toContain(".test2");
expect(output).toContain("border-bottom-left-radius");
expect(output).toContain("border-bottom-right-radius");
// .test3 with border-start-end-radius
expect(output).toContain(".test3");
// .test4 with border-end-end-radius
expect(output).toContain(".test4");
// Physical property should also be preserved
expect(output).toContain(".test5");
});
// Regression test for #25785
test("CSS bundler should handle logical border-radius with targets that compile logical properties", async () => {
using dir = tempDir("issue-25785-compiled", {
"test.css": `
.test1 {
border-start-start-radius: 0.75rem;
}
.test2 {
border-end-start-radius: 0.75rem;
}
.test3 {
border-start-end-radius: 0.75rem;
}
.test4 {
border-end-end-radius: 0.75rem;
}
`,
});
const result = await Bun.build({
entrypoints: [`${dir}/test.css`],
outdir: `${dir}/dist`,
experimentalCss: true,
minify: false,
// Target older browsers that don't support logical properties
target: "browser",
});
expect(result.success).toBe(true);
expect(result.outputs.length).toBe(1);
const output = await result.outputs[0].text();
// When logical properties are compiled down, they should produce physical properties
// with :lang() selectors to handle LTR/RTL
// At minimum, the output should NOT be empty (the bug caused empty output)
expect(output.trim().length).toBeGreaterThan(0);
// Should have some border-radius output (compiled to physical)
expect(output).toMatch(/border-.*-radius/);
// All classes should be present in the output
expect(output).toContain(".test1");
expect(output).toContain(".test2");
expect(output).toContain(".test3");
expect(output).toContain(".test4");
});
// Regression test for #25794
test("CSS logical properties should not be stripped when nested rules are present", async () => {
// Test for regression of issue #25794: CSS logical properties (e.g., inset-inline-end)
// are stripped from bundler output when they appear in a nested selector that also
// contains further nested rules (like pseudo-elements).
const dir = tempDirWithFiles("css-logical-properties-nested", {
"input.css": `.test-longform {
background-color: teal;
&.test-longform--end {
inset-inline-end: 20px;
&:after {
content: "";
}
}
}
`,
});
await using proc = Bun.spawn({
cmd: [bunExe(), "build", "input.css", "--outdir", "out"],
env: bunEnv,
cwd: dir,
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
// Verify the output CSS contains the logical property fallbacks
const outputContent = await Bun.file(`${dir}/out/input.css`).text();
// Helper function to normalize CSS output for snapshots
function normalizeCSSOutput(output: string): string {
return output
.replace(/\/\*.*?\*\//g, "/* [path] */") // Replace comment paths
.trim();
}
// The output should contain LTR/RTL fallback rules for inset-inline-end
// inset-inline-end: 20px should generate:
// - right: 20px for LTR languages
// - left: 20px for RTL languages
// The bundler generates vendor-prefixed variants for browser compatibility
expect(normalizeCSSOutput(outputContent)).toMatchInlineSnapshot(`
"/* [path] */
.test-longform {
background-color: teal;
}
.test-longform.test-longform--end:not(:-webkit-any(:lang(ae), :lang(ar), :lang(arc), :lang(bcc), :lang(bqi), :lang(ckb), :lang(dv), :lang(fa), :lang(glk), :lang(he), :lang(ku), :lang(mzn), :lang(nqo), :lang(pnb), :lang(ps), :lang(sd), :lang(ug), :lang(ur), :lang(yi))) {
right: 20px;
}
.test-longform.test-longform--end:not(:-moz-any(:lang(ae), :lang(ar), :lang(arc), :lang(bcc), :lang(bqi), :lang(ckb), :lang(dv), :lang(fa), :lang(glk), :lang(he), :lang(ku), :lang(mzn), :lang(nqo), :lang(pnb), :lang(ps), :lang(sd), :lang(ug), :lang(ur), :lang(yi))) {
right: 20px;
}
.test-longform.test-longform--end:not(:is(:lang(ae), :lang(ar), :lang(arc), :lang(bcc), :lang(bqi), :lang(ckb), :lang(dv), :lang(fa), :lang(glk), :lang(he), :lang(ku), :lang(mzn), :lang(nqo), :lang(pnb), :lang(ps), :lang(sd), :lang(ug), :lang(ur), :lang(yi))) {
right: 20px;
}
.test-longform.test-longform--end:-webkit-any(:lang(ae), :lang(ar), :lang(arc), :lang(bcc), :lang(bqi), :lang(ckb), :lang(dv), :lang(fa), :lang(glk), :lang(he), :lang(ku), :lang(mzn), :lang(nqo), :lang(pnb), :lang(ps), :lang(sd), :lang(ug), :lang(ur), :lang(yi)) {
left: 20px;
}
.test-longform.test-longform--end:-moz-any(:lang(ae), :lang(ar), :lang(arc), :lang(bcc), :lang(bqi), :lang(ckb), :lang(dv), :lang(fa), :lang(glk), :lang(he), :lang(ku), :lang(mzn), :lang(nqo), :lang(pnb), :lang(ps), :lang(sd), :lang(ug), :lang(ur), :lang(yi)) {
left: 20px;
}
.test-longform.test-longform--end:is(:lang(ae), :lang(ar), :lang(arc), :lang(bcc), :lang(bqi), :lang(ckb), :lang(dv), :lang(fa), :lang(glk), :lang(he), :lang(ku), :lang(mzn), :lang(nqo), :lang(pnb), :lang(ps), :lang(sd), :lang(ug), :lang(ur), :lang(yi)) {
left: 20px;
}
.test-longform.test-longform--end:after {
content: "";
}"
`);
// Should exit successfully
expect(exitCode).toBe(0);
});

View File

@@ -970,3 +970,30 @@ describe.if(!!libPath)("can open more than 63 symbols via", () => {
});
}
});
// Regression test for #25231
describe("CString constructor", () => {
it("Bun.FFI.CString is callable with new", () => {
// Create a buffer with a null-terminated string
const buf = Buffer.from("hello\0");
const ptrValue = ptr(buf);
// CString should be callable with new
const result = new CString(ptrValue, 0, 5);
// The result should be the string "hello"
expect(String(result)).toBe("hello");
});
it("Bun.FFI.CString can be called without new", () => {
// Create a buffer with a null-terminated string
const buf = Buffer.from("hello\0");
const ptrValue = ptr(buf);
// CString should also be callable without new
const result = CString(ptrValue, 0, 5);
// The result should be the string "hello"
expect(result).toBe("hello");
});
});

View File

@@ -194,3 +194,66 @@ describe.todoIf(isBroken && isMacOS)("static", () => {
expect(handler.mock.calls.length, "Handler should be called").toBe(previousCallCount + 1);
});
});
// Regression test for #24817
// https://github.com/oven-sh/bun/issues/24817
// Unicode not working with static route
test("static routes should handle unicode correctly", async () => {
using server = Bun.serve({
port: 0,
routes: {
"/dynamic": () => new Response("▲"),
"/static": new Response("▲"),
"/unicode-string": new Response("こんにちは世界"),
"/emoji": new Response("🎉🚀✨"),
},
});
const baseUrl = server.url.href;
// Test basic unicode character
{
const staticResp = await fetch(`${baseUrl}/static`);
const staticText = await staticResp.text();
expect(staticText).toBe("▲");
expect(staticResp.headers.get("content-type")).toBe("text/plain; charset=utf-8");
}
// Test Japanese characters
{
const resp = await fetch(`${baseUrl}/unicode-string`);
const text = await resp.text();
expect(text).toBe("こんにちは世界");
expect(resp.headers.get("content-type")).toBe("text/plain; charset=utf-8");
}
// Test emoji
{
const resp = await fetch(`${baseUrl}/emoji`);
const text = await resp.text();
expect(text).toBe("🎉🚀✨");
expect(resp.headers.get("content-type")).toBe("text/plain; charset=utf-8");
}
});
// Regression test for #24817
test("static routes with explicit content-type should not override", async () => {
using server = Bun.serve({
port: 0,
routes: {
"/custom": new Response("▲", { headers: { "content-type": "text/html" } }),
},
});
const baseUrl = server.url.href;
const resp = await fetch(`${baseUrl}/custom`);
const text = await resp.text();
expect(text).toBe("▲");
// Should respect the explicit content-type
expect(resp.headers.get("content-type")).toBe("text/html");
});

View File

@@ -2189,3 +2189,355 @@ it.concurrent("#20283", async () => {
// there should be no cookies and the clone should have succeeded
expect(json).toEqual({ cookies: {}, clonedCookies: {} });
});
// Regression test for #6443
describe("Bun.serve() request.url with TLS", () => {
const tlsOptions = {
cert: file(new URL("./fixtures/cert.pem", import.meta.url)),
key: file(new URL("./fixtures/cert.key", import.meta.url)),
};
const servers = [
{
port: 0,
url: /^http:\/\/localhost:\d+\/$/,
},
{
tls: tlsOptions,
port: 0,
url: /^https:\/\/localhost:\d+\/$/,
},
];
it.each(servers)("%j", async ({ url, ...options }) => {
const server = serve({
hostname: "localhost",
...options,
fetch(request) {
return new Response(request.url);
},
});
try {
const proto = options.tls ? "https" : "http";
const target = `${proto}://localhost:${server.port}/`;
const response = await fetch(target, { tls: { rejectUnauthorized: false } });
expect(response.text()).resolves.toMatch(url);
} finally {
server.stop(true);
}
});
});
// Regression test for server assertion failure when stopping with pending requests
it("server.stop() with pending requests should not cause assertion failure", async () => {
// Create initial server
let server = Bun.serve({
port: 0,
fetch(req) {
return new Response("OK");
},
});
try {
// Make one awaited request
await fetch(server.url).catch(() => {});
// Make one non-awaited request
fetch(server.url).catch(() => {});
// Stop immediately - this should not cause an assertion failure
server.stop();
// If we get here without crashing, the fix worked
expect(true).toBe(true);
} finally {
// Ensure cleanup in case test fails
try {
server.stop();
} catch {}
}
});
// Additional test to ensure server still works normally after the fix
it("server still works normally after jsref changes", async () => {
let server = Bun.serve({
port: 0,
fetch(req) {
return new Response("Hello World");
},
});
try {
const response = await fetch(server.url);
const text = await response.text();
expect(text).toBe("Hello World");
expect(response.status).toBe(200);
} finally {
server.stop();
}
});
// Regression test for #22353
it("issue #22353 - server should handle oversized request without crashing", async () => {
using server = Bun.serve({
port: 0,
maxRequestBodySize: 1024, // 1KB limit
async fetch(req) {
const body = await req.text();
return new Response(
JSON.stringify({
received: true,
size: body.length,
}),
{
headers: { "Content-Type": "application/json" },
},
);
},
});
const resp = await fetch(server.url, {
method: "POST",
body: "A".repeat(1025),
});
expect(resp.status).toBe(413);
expect(await resp.text()).toBeEmpty();
for (let i = 0; i < 100; i++) {
const resp2 = await fetch(server.url, {
method: "POST",
});
expect(resp2.status).toBe(200);
expect(await resp2.json()).toEqual({
received: true,
size: 0,
});
}
}, 10000);
// Regression test for #21677
it("issue #21677 - should not add redundant Date headers", async () => {
const testDate1 = new Date("2025-08-07T17:01:47.000Z").toUTCString();
const testDate2 = new Date("2025-08-07T17:02:23.000Z").toUTCString();
const testDate3 = new Date("2025-08-07T17:03:06.000Z").toUTCString();
using server = Bun.serve({
port: 0,
routes: {
"/static": () =>
new Response(`date test`, {
headers: { date: testDate1 },
}),
"/proxy": async () => {
// Create a simple server response with a Date header to proxy
const simpleResponse = new Response("proxied content", {
headers: {
"Date": testDate3,
"Content-Type": "text/plain",
},
});
return simpleResponse;
},
},
fetch: () =>
new Response(`date test`, {
headers: { date: testDate2 },
}),
});
// Test dynamic route (default fetch handler)
{
const response = await fetch(server.url);
// Should only have one Date header, not multiple
const dateHeaders = [...response.headers.entries()].filter(([key]) => key.toLowerCase() === "date");
expect(dateHeaders).toHaveLength(1);
expect(dateHeaders[0][1]).toBe(testDate2);
}
// Test static route
{
const response = await fetch(new URL("/static", server.url));
// Should only have one Date header, not multiple
const dateHeaders = [...response.headers.entries()].filter(([key]) => key.toLowerCase() === "date");
expect(dateHeaders).toHaveLength(1);
expect(dateHeaders[0][1]).toBe(testDate1);
}
// Test proxy route
{
const response = await fetch(new URL("/proxy", server.url));
// Should only have one Date header, not multiple
const dateHeaders = [...response.headers.entries()].filter(([key]) => key.toLowerCase() === "date");
expect(dateHeaders).toHaveLength(1);
expect(dateHeaders[0][1]).toBe(testDate3);
}
});
// Regression test for #21677
it("issue #21677 - reproduce with raw HTTP to verify duplicate headers", async () => {
const testDate = new Date("2025-08-07T17:02:23.000Z").toUTCString();
using server = Bun.serve({
port: 0,
fetch: () =>
new Response(`date test`, {
headers: { date: testDate },
}),
});
// Use TCP socket to get raw HTTP response and check for duplicate headers
await new Promise((resolve, reject) => {
const socket = Bun.connect({
hostname: "localhost",
port: server.port,
socket: {
data(socket, data) {
const response = data.toString();
// Should NOT contain multiple Date headers
const lines = response.split("\r\n");
const dateHeaderLines = lines.filter(line => line.toLowerCase().startsWith("date:"));
expect(dateHeaderLines).toHaveLength(1);
expect(dateHeaderLines[0]).toBe(`Date: ${testDate}`);
socket.end();
resolve(undefined);
},
error(socket, error) {
reject(error);
},
open(socket) {
socket.write("GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n");
},
},
});
});
});
// Regression test for #21792
// This test verifies the fix for SNI TLS array handling - was incorrectly rejecting arrays with exactly 1 TLS config
describe("SNI TLS array handling (issue #21792)", () => {
// Use existing test certificates from jsonwebtoken tests
const certDir = join(import.meta.dir, "../../third_party/jsonwebtoken");
const crtA = readFileSync(join(certDir, "pub.pem"), "utf8");
const keyA = readFileSync(join(certDir, "priv.pem"), "utf8");
const crtB = crtA; // Reuse same cert for second test server
const keyB = keyA;
it("should accept empty TLS array (no TLS)", () => {
// Empty array should be treated as no TLS
using server = Bun.serve({
port: 0,
tls: [],
fetch: () => new Response("Hello"),
development: true,
});
expect(server.url.toString()).toStartWith("http://"); // HTTP, not HTTPS
});
it("should accept single TLS config in array", () => {
// This was the bug: single TLS config in array was incorrectly rejected
using server = Bun.serve({
port: 0,
tls: [{ cert: crtA, key: keyA, serverName: "serverA.com" }],
fetch: () => new Response("Hello from serverA"),
development: true,
});
expect(server.url.toString()).toStartWith("https://");
});
it("should accept multiple TLS configs for SNI", () => {
using server = Bun.serve({
port: 0,
tls: [
{ cert: crtA, key: keyA, serverName: "serverA.com" },
{ cert: crtB, key: keyB, serverName: "serverB.com" },
],
fetch: request => {
const host = request.headers.get("host") || "unknown";
return new Response(`Hello from ${host}`);
},
development: true,
});
expect(server.url.toString()).toStartWith("https://");
});
it("should reject TLS array with missing serverName for SNI configs", () => {
expect(() => {
Bun.serve({
port: 0,
tls: [
{ cert: crtA, key: keyA, serverName: "serverA.com" },
{ cert: crtB, key: keyB }, // Missing serverName
],
fetch: () => new Response("Hello"),
development: true,
});
}).toThrow("SNI tls object must have a serverName");
});
it("should reject TLS array with empty serverName for SNI configs", () => {
expect(() => {
Bun.serve({
port: 0,
tls: [
{ cert: crtA, key: keyA, serverName: "serverA.com" },
{ cert: crtB, key: keyB, serverName: "" }, // Empty serverName
],
fetch: () => new Response("Hello"),
development: true,
});
}).toThrow("SNI tls object must have a serverName");
});
it("should accept single TLS config without serverName when alone", () => {
// When there's only one TLS config in the array, serverName is optional
using server = Bun.serve({
port: 0,
tls: [{ cert: crtA, key: keyA }], // No serverName - should work for single config
fetch: () => new Response("Hello from default"),
development: true,
});
expect(server.url.toString()).toStartWith("https://");
});
it("should support traditional non-array TLS config", () => {
// Traditional single TLS config (not in array) should still work
using server = Bun.serve({
port: 0,
tls: { cert: crtA, key: keyA },
fetch: () => new Response("Hello traditional"),
development: true,
});
expect(server.url.toString()).toStartWith("https://");
});
});
// Regression test for #18547
it("issue #18547 - cloned request should have same cookies and params", async () => {
using serve = Bun.serve({
port: 0,
routes: {
"/:foo": request => {
request.cookies.set("sessionToken", "123456");
// Ensure cloned requests have the same cookies and params of the original
const clone = request.clone();
expect(clone.cookies.get("sessionToken")).toEqual("123456");
expect(clone.params.foo).toEqual("foo");
// And that changes made to the clone don't affect the original
clone.cookies.set("sessionToken", "654321");
expect(request.cookies.get("sessionToken")).toEqual("123456");
expect(clone.cookies.get("sessionToken")).toEqual("654321");
return new Response("OK");
},
},
});
const response = await fetch(`${serve.url}/foo`);
// Or the context of the original request
expect(response.headers.get("set-cookie")).toEqual("sessionToken=123456; Path=/; SameSite=Lax");
});

View File

@@ -197,7 +197,7 @@ plugin({
});
// This is to test that it works when imported from a separate file
import { bunEnv, bunExe } from "harness";
import { bunEnv, bunExe, tempDir } from "harness";
import { render as svelteRender } from "svelte/server";
import "../../third_party/svelte";
import "./module-plugins";
@@ -560,3 +560,190 @@ it("recursion throws stack overflow at entry point", () => {
expect(result.stderr.toString()).toContain("RangeError: Maximum call stack size exceeded.");
});
// Regression test for #22199
describe("onResolve returning undefined/null", () => {
it("plugin onResolve returning undefined should not crash", () => {
using dir = tempDir("plugin-undefined", {
"plugin.js": `
Bun.plugin({
name: "test-plugin",
setup(build) {
build.onResolve({ filter: /.*\\.(ts|tsx|js|jsx)$/ }, async (args) => {
// Returning undefined should continue to next plugin or default resolution
return undefined;
});
},
});
`,
"index.js": `console.log("Hello from index.js");`,
});
const result = Bun.spawnSync({
cmd: [bunExe(), "--preload", "./plugin.js", "./index.js"],
env: bunEnv,
cwd: String(dir),
stderr: "inherit",
});
expect(result.exitCode).toBe(0);
expect(result.stdout.toString().trim()).toBe("Hello from index.js");
});
it("plugin onResolve returning null should not crash", () => {
using dir = tempDir("plugin-null", {
"plugin.js": `
Bun.plugin({
name: "test-plugin",
setup(build) {
build.onResolve({ filter: /.*\\.(ts|tsx|js|jsx)$/ }, async (args) => {
// Returning null should continue to next plugin or default resolution
return null;
});
},
});
`,
"index.js": `console.log("Hello from index.js");`,
});
const result = Bun.spawnSync({
cmd: [bunExe(), "--preload", "./plugin.js", "./index.js"],
env: bunEnv,
cwd: String(dir),
stderr: "inherit",
});
expect(result.exitCode).toBe(0);
expect(result.stdout.toString().trim()).toBe("Hello from index.js");
});
it("plugin onResolve with sync function returning undefined should not crash", () => {
using dir = tempDir("plugin-sync-undefined", {
"plugin.js": `
Bun.plugin({
name: "test-plugin",
setup(build) {
build.onResolve({ filter: /.*\\.(ts|tsx|js|jsx)$/ }, (args) => {
// Sync function returning undefined
return undefined;
});
},
});
`,
"index.js": `console.log("Hello from index.js");`,
});
const result = Bun.spawnSync({
cmd: [bunExe(), "--preload", "./plugin.js", "./index.js"],
env: bunEnv,
cwd: String(dir),
stderr: "inherit",
});
expect(result.exitCode).toBe(0);
expect(result.stdout.toString().trim()).toBe("Hello from index.js");
});
it("plugin onResolve with rejected promise should throw error", () => {
using dir = tempDir("plugin-reject", {
"plugin.js": `
Bun.plugin({
name: "test-plugin",
setup(build) {
build.onResolve({ filter: /.*\\.(ts|tsx|js|jsx)$/ }, async (args) => {
throw new Error("Custom plugin error");
});
},
});
`,
"index.js": `console.log("Hello from index.js");`,
});
const result = Bun.spawnSync({
cmd: [bunExe(), "--preload", "./plugin.js", "./index.js"],
env: bunEnv,
cwd: String(dir),
stderr: "pipe",
});
expect(result.exitCode).toBe(1);
expect(result.stderr.toString()).toContain("Custom plugin error");
});
});
// Regression test for #12548
describe("TypeScript syntax with 'ts' loader in BunPlugin", () => {
it("issue #12548: TypeScript syntax should work with 'ts' loader in BunPlugin", async () => {
using dir = tempDir("issue-12548", {
"index.js": `
import plugin from "./plugin.js";
Bun.plugin(plugin);
// This should work with 'ts' loader
console.log(require('virtual-ts-module'));
`,
"plugin.js": `
export default {
setup(build) {
build.module('virtual-ts-module', () => ({
contents: "import { type TSchema } from '@sinclair/typebox'; export const test = 'works';",
loader: 'ts',
}));
},
};
`,
});
await using proc = Bun.spawn({
cmd: [bunExe(), "index.js"],
env: bunEnv,
cwd: String(dir),
stderr: "pipe",
stdout: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
expect(exitCode).toBe(0);
expect(stderr).toBe("");
expect(stdout).toContain('test: "works"');
});
it("issue #12548: TypeScript type imports work with 'ts' loader", async () => {
using dir = tempDir("issue-12548-type-imports", {
"index.js": `
Bun.plugin({
setup(build) {
build.module('test-module', () => ({
contents: \`
import { type TSchema } from '@sinclair/typebox';
type MyType = { a: number };
export type { MyType };
export const value = 42;
\`,
loader: 'ts',
}));
},
});
const mod = require('test-module');
console.log(JSON.stringify(mod));
`,
});
await using proc = Bun.spawn({
cmd: [bunExe(), "index.js"],
env: bunEnv,
cwd: String(dir),
stderr: "pipe",
stdout: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
expect(exitCode).toBe(0);
expect(stderr).toBe("");
expect(stdout).toContain('{"value":42}');
});
});

View File

@@ -1,8 +1,8 @@
import { spawnSync } from "bun";
import { isModuleResolveFilenameSlowPathEnabled } from "bun:internal-for-testing";
import { expect, it, mock } from "bun:test";
import { bunEnv, bunExe, ospath } from "harness";
import { mkdirSync, rmSync, writeFileSync } from "node:fs";
import { bunEnv, bunExe, bunRun, ospath, tmpdirSync } from "harness";
import { mkdirSync, rmSync, symlinkSync, writeFileSync } from "node:fs";
import Module from "node:module";
import { tmpdir } from "node:os";
import { join, sep } from "node:path";
@@ -295,3 +295,34 @@ it("import.meta is correct in a module that was required with a query param", as
expect(cjs.dir).toBe(import.meta.dir);
expect(cjs.file).toBe("other-cjs.js");
});
// Regression test for #8757
it("absolute path to a file that is symlinked has import.meta.main", () => {
const fixturePath = join(import.meta.dir, "symlink-main-fixture.js");
const fixtureDir = import.meta.dir;
const fixtureFile = "symlink-main-fixture.js";
const root = tmpdirSync();
try {
symlinkSync(fixturePath, root + "/main.js");
} catch (e) {
if (process.platform == "win32") {
console.log("symlinkSync failed on Windows, skipping test");
return;
}
throw e;
}
const result = bunRun(root + "/main.js");
expect(result.stdout.trim()).toBe(
[
//
fixturePath,
fixturePath,
"true",
fixtureDir,
fixtureFile,
fixturePath,
].join("\n"),
);
});

View File

@@ -0,0 +1,9 @@
// Fixture for regression test #8757
// When run via a symlink, import.meta.main should still be true
console.log(process.argv[1]);
console.log(Bun.main);
console.log(import.meta.main);
console.log(import.meta.dir);
console.log(import.meta.file);
console.log(import.meta.path);

View File

@@ -1550,6 +1550,228 @@ describe.concurrent("s3 missing credentials", () => {
});
});
// Regression test for S3 presigned URL query parameter order
it("S3 presigned URL should have correct query parameter order", () => {
const s3 = new S3Client({
accessKeyId: "test-key",
secretAccessKey: "test-secret",
endpoint: "https://s3.example.com",
bucket: "test-bucket",
});
const url = s3.presign("test-file.txt", {
method: "PUT",
acl: "public-read",
expiresIn: 300,
});
// Parse the URL to get query parameters
const urlObj = new URL(url);
const params = Array.from(urlObj.searchParams.keys());
console.log("Query parameters order:", params);
// Verify alphabetical order (after URL decoding)
const expected = params.slice().sort();
expect(params).toEqual(expected);
// Verify that required AWS SigV4 parameters are present
expect(params).toContain("X-Amz-Algorithm");
expect(params).toContain("X-Amz-Credential");
expect(params).toContain("X-Amz-Date");
expect(params).toContain("X-Amz-Expires");
expect(params).toContain("X-Amz-SignedHeaders");
expect(params).toContain("X-Amz-Signature");
expect(params).toContain("X-Amz-Acl");
});
// Regression test for S3 presigned URL performance with stack allocator
it("S3 presigned URL performance test with stack allocator", () => {
const s3 = new S3Client({
accessKeyId: "test-key-123456789012345678901234567890",
secretAccessKey: "test-secret-123456789012345678901234567890123456789012345678901234567890",
endpoint: "https://s3.example.com",
bucket: "test-bucket-with-long-name-to-test-allocation",
});
// Test with various parameter combinations to stress the allocator
const testCases = [
{
name: "simple",
params: {},
},
{
name: "with-acl",
params: { acl: "public-read" },
},
{
name: "with-multiple-params",
params: {
method: "PUT",
acl: "public-read-write",
expiresIn: 3600,
storageClass: "STANDARD_IA",
},
},
];
for (const testCase of testCases) {
const url = s3.presign(`test-file-${testCase.name}.txt`, testCase.params);
// Verify URL is generated correctly
expect(url).toContain("test-file-");
expect(url).toContain("X-Amz-Algorithm=AWS4-HMAC-SHA256");
expect(url).toContain("X-Amz-Credential=");
expect(url).toContain("X-Amz-Date=");
expect(url).toContain("X-Amz-Signature=");
// Parse URL to verify parameter order
const urlObj = new URL(url);
const params = Array.from(urlObj.searchParams.keys());
const sortedParams = params.slice().sort();
expect(params).toEqual(sortedParams);
}
// Performance test - should not throw or crash
for (let i = 0; i < 100; i++) {
const url = s3.presign(`perf-test-${i}.txt`, {
method: "PUT",
acl: "private",
expiresIn: 300,
});
expect(url).toContain("perf-test-");
}
expect(true).toBe(true);
});
// Regression test for GitHub issue #25750: S3 File.presign() ignores contentDisposition and type options
describe("issue #25750 - S3 presign contentDisposition and type", () => {
const s3Client = new S3Client({
region: "us-east-1",
endpoint: "https://s3.us-east-1.amazonaws.com",
accessKeyId: "test-key",
secretAccessKey: "test-secret",
bucket: "test-bucket",
});
it("should include response-content-disposition in presigned URL", () => {
const file = s3Client.file("example.txt");
const url = file.presign({
method: "GET",
expiresIn: 900,
contentDisposition: 'attachment; filename="quarterly-report.txt"',
});
expect(url).toContain("response-content-disposition=");
expect(url).toContain("attachment");
expect(url).toContain("quarterly-report.txt");
});
it("should include response-content-type in presigned URL", () => {
const file = s3Client.file("example.txt");
const url = file.presign({
method: "GET",
expiresIn: 900,
type: "application/octet-stream",
});
expect(url).toContain("response-content-type=");
expect(url).toContain("application%2Foctet-stream");
});
it("should include both response-content-disposition and response-content-type in presigned URL", () => {
const file = s3Client.file("example.txt");
const url = file.presign({
method: "GET",
expiresIn: 900,
contentDisposition: 'attachment; filename="quarterly-report.txt"',
type: "application/octet-stream",
});
expect(url).toContain("response-content-disposition=");
expect(url).toContain("response-content-type=");
expect(url).toContain("attachment");
expect(url).toContain("application%2Foctet-stream");
});
it("should work with S3Client.presign static method", () => {
const url = S3Client.presign("example.txt", {
region: "us-east-1",
endpoint: "https://s3.us-east-1.amazonaws.com",
accessKeyId: "test-key",
secretAccessKey: "test-secret",
bucket: "test-bucket",
contentDisposition: 'attachment; filename="report.pdf"',
type: "application/pdf",
expiresIn: 3600,
});
expect(url).toContain("response-content-disposition=");
expect(url).toContain("response-content-type=");
expect(url).toContain("report.pdf");
expect(url).toContain("application%2Fpdf");
});
it("should properly URL-encode special characters in contentDisposition", () => {
const file = s3Client.file("test.txt");
const url = file.presign({
method: "GET",
contentDisposition: 'attachment; filename="file with spaces & symbols.txt"',
});
expect(url).toContain("response-content-disposition=");
// Special characters should be URL encoded
expect(url).toContain("%20"); // space
expect(url).toContain("%26"); // &
});
it("should not include response-content-disposition when empty string is provided", () => {
const file = s3Client.file("test.txt");
const url = file.presign({
method: "GET",
contentDisposition: "",
});
expect(url).not.toContain("response-content-disposition=");
});
it("should not include response-content-type when empty string is provided", () => {
const file = s3Client.file("test.txt");
const url = file.presign({
method: "GET",
type: "",
});
expect(url).not.toContain("response-content-type=");
});
it("query parameters should be in correct alphabetical order", () => {
const file = s3Client.file("test.txt");
const url = file.presign({
method: "GET",
contentDisposition: "inline",
type: "text/plain",
});
// Check that response-content-disposition comes before response-content-type
// and both come after X-Amz-SignedHeaders and before any x-amz-* lowercase params
const dispositionIndex = url.indexOf("response-content-disposition=");
const typeIndex = url.indexOf("response-content-type=");
const signedHeadersIndex = url.indexOf("X-Amz-SignedHeaders=");
expect(dispositionIndex).toBeGreaterThan(signedHeadersIndex);
expect(typeIndex).toBeGreaterThan(dispositionIndex);
});
});
// Archive + S3 integration tests
describe.skipIf(!minioCredentials)("Archive with S3", () => {
const credentials = minioCredentials!;

View File

@@ -7,7 +7,17 @@
import { $ } from "bun";
import { afterAll, beforeAll, describe, expect, it, test } from "bun:test";
import { mkdir, rm, stat } from "fs/promises";
import { bunExe, isPosix, isWindows, runWithErrorPromise, tempDirWithFiles, tmpdirSync } from "harness";
import {
bunExe,
bunEnv as harnessBunEnv,
isPosix,
isWindows,
runWithErrorPromise,
tempDir,
tempDirWithFiles,
tmpdirSync,
} from "harness";
import { readdirSync } from "node:fs";
import { join, sep } from "path";
import { createTestBuilder, sortedShellOutput } from "./util";
const TestBuilder = createTestBuilder(import.meta.path);
@@ -2516,3 +2526,209 @@ function sentinelByte(buf: Uint8Array): number {
}
throw new Error("No sentinel byte");
}
// Regression test for #9340
test("bun shell should move multiple files", async () => {
const files = { file1: "", file2: "", file3: "" };
const filenames = Object.keys(files);
const source = tempDirWithFiles("source", files);
const target = tempDirWithFiles("target", {});
await $`mv ${filenames} ${target}`.cwd(source);
expect(readdirSync(source)).toBeEmpty();
expect(readdirSync(target).sort()).toEqual(filenames);
});
// Regression test for #22650
test("issue #22650 - shell crash with && operator followed by external command", async () => {
// Minimal reproduction: echo && <external command>
// This triggers the crash because after the first command succeeds,
// the shell tries to spawn an external process but top_level_dir is not set
await using proc = Bun.spawn({
cmd: [bunExe(), "exec", "echo test && node --version"],
env: harnessBunEnv,
stderr: "pipe",
stdout: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
// Should not have any errors
expect(stderr).toBe("");
// Should execute both commands successfully
expect(stdout).toContain("test");
expect(stdout).toMatch(/v\d+\.\d+\.\d+/); // Node version pattern
expect(exitCode).toBe(0);
});
// Regression test for #25831
describe("ls -l long listing format", () => {
test("ls -l shows long listing format", async () => {
// Create temp directory with test files
using dir = tempDir("ls-long-listing", {
"file.txt": "hello world",
"script.sh": "#!/bin/bash\necho hello",
subdir: {
"nested.txt": "nested content",
},
});
// Run ls -l in the temp directory
await using proc = Bun.spawn({
cmd: [
bunExe(),
"-e",
`
import { $ } from "bun";
$.cwd("${String(dir).replace(/\\/g, "\\\\")}");
const result = await $\`ls -l\`.text();
console.log(result);
`,
],
env: harnessBunEnv,
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
// Verify no errors on stderr
expect(stderr).toBe("");
// Should show permission string (starts with - or d, followed by rwx/sStT permissions)
// Format: -rw-r--r-- 1 uid gid size date name
expect(stdout).toMatch(/^[-dlbcps][-rwxsStT]{9}/m); // Permission string pattern
expect(stdout).toContain("file.txt");
expect(stdout).toContain("script.sh");
expect(stdout).toContain("subdir");
// Verify that it's actually showing long format (contains size and date info)
// Long format has at least permissions, link count, uid, gid, size, date, name
const lines = stdout
.trim()
.split("\n")
.filter(line => line.includes("file.txt"));
expect(lines.length).toBeGreaterThan(0);
// Each line should have multiple space-separated fields
const fileLine = lines[0];
const fields = fileLine.trim().split(/\s+/);
expect(fields.length).toBeGreaterThanOrEqual(7); // perms, nlink, uid, gid, size, date fields, name
expect(exitCode).toBe(0);
});
test("ls without -l shows short format", async () => {
using dir = tempDir("ls-short-listing", {
"file1.txt": "content1",
"file2.txt": "content2",
});
await using proc = Bun.spawn({
cmd: [
bunExe(),
"-e",
`
import { $ } from "bun";
$.cwd("${String(dir).replace(/\\/g, "\\\\")}");
const result = await $\`ls\`.text();
console.log(result);
`,
],
env: harnessBunEnv,
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
// Verify no errors on stderr
expect(stderr).toBe("");
// Short format should just show filenames, not permission strings
expect(stdout).not.toMatch(/^[-dlbcps][-rwxsStT]{9}/m);
expect(stdout).toContain("file1.txt");
expect(stdout).toContain("file2.txt");
expect(exitCode).toBe(0);
});
test("ls -al shows hidden files in long format", async () => {
using dir = tempDir("ls-all-long", {
".hidden": "hidden content",
"visible.txt": "visible content",
});
await using proc = Bun.spawn({
cmd: [
bunExe(),
"-e",
`
import { $ } from "bun";
$.cwd("${String(dir).replace(/\\/g, "\\\\")}");
const result = await $\`ls -al\`.text();
console.log(result);
`,
],
env: harnessBunEnv,
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
// Verify no errors on stderr
expect(stderr).toBe("");
// Should show hidden files
expect(stdout).toContain(".hidden");
expect(stdout).toContain("visible.txt");
// Should also show . and .. entries
expect(stdout).toMatch(/^d[-rwxsStT]{9}.*\s\.$/m); // . directory
expect(stdout).toMatch(/^d[-rwxsStT]{9}.*\s\.\.$/m); // .. directory
// Should be in long format
expect(stdout).toMatch(/^[-dlbcps][-rwxsStT]{9}/m);
expect(exitCode).toBe(0);
});
test("ls -l shows directory type indicator", async () => {
using dir = tempDir("ls-dir-type", {
"regular-file.txt": "content",
subdir: {
"nested.txt": "nested",
},
});
await using proc = Bun.spawn({
cmd: [
bunExe(),
"-e",
`
import { $ } from "bun";
$.cwd("${String(dir).replace(/\\/g, "\\\\")}");
const result = await $\`ls -l\`.text();
console.log(result);
`,
],
env: harnessBunEnv,
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
// Verify no errors on stderr
expect(stderr).toBe("");
// Directory should start with 'd'
expect(stdout).toMatch(/^d[-rwxsStT]{9}.*subdir$/m);
// Regular file should start with '-'
expect(stdout).toMatch(/^-[-rwxsStT]{9}.*regular-file\.txt$/m);
expect(exitCode).toBe(0);
});
});

View File

@@ -1019,3 +1019,100 @@ describe("option combinations", () => {
expect(await proc.exited).toBe(0);
});
});
// Regression tests for #23316
describe("long path handling on Windows", () => {
it("spawn should handle cwd paths >= MAX_PATH on Windows", async () => {
if (!isWindows) {
return;
}
using dir = tmpdirSync();
// Create a deeply nested directory structure that exceeds MAX_PATH (260 chars)
// Windows MAX_PATH is 260, so we'll create a path > 260 characters
const segments: string[] = [];
let currentPath = String(dir);
let totalLength = currentPath.length;
// Keep adding directory segments until we exceed MAX_PATH
let i = 0;
while (totalLength < 280) {
const segment = `dir${i.toString().padStart(3, "0")}`;
segments.push(segment);
totalLength += segment.length + 1; // +1 for the path separator
i++;
}
// Create the nested directory structure
let deepPath = String(dir);
for (const segment of segments) {
deepPath = join(deepPath, segment);
await Bun.write(join(deepPath, ".keep"), "");
}
console.log(`Created deep path (length: ${deepPath.length}): ${deepPath}`);
expect(deepPath.length).toBeGreaterThanOrEqual(260);
// This should either:
// 1. Succeed and spawn the process
// 2. Fail gracefully with an error (not panic with UV_ENOTCONN)
let err;
try {
await Bun.spawn({
cmd: [bunExe(), "--version"],
cwd: deepPath,
stdout: "pipe",
stderr: "pipe",
}).exited;
} catch (e) {
err = e;
}
expect(err).toBeInstanceOf(Error);
});
it("spawn should handle cwd paths with disabled 8.3 names on Windows", async () => {
if (!isWindows) {
return;
}
using dir = tmpdirSync();
await Bun.write(join(String(dir), "test.js"), `console.log("hello");`);
// Create a moderately long path that would trigger GetShortPathNameW
// but might fail if 8.3 names are disabled
const segments = Array.from({ length: 20 }, (_, i) => `directory_with_long_name_${i}`);
let deepPath = String(dir);
for (const segment of segments) {
deepPath = join(deepPath, segment);
await Bun.write(join(deepPath, ".keep"), "");
}
console.log(`Created path for 8.3 test (length: ${deepPath.length}): ${deepPath}`);
// Attempt to copy test.js to the deep path
let err;
try {
await Bun.write(join(deepPath, "test.js"), `console.log("hello");`);
} catch (e) {
err = e;
}
expect(err).toBeInstanceOf(Error);
// This should not panic, even if GetShortPathNameW fails
err = undefined;
try {
const proc = Bun.spawn({
cmd: [bunExe(), "test.js"],
cwd: deepPath,
stdout: "inherit",
stderr: "inherit",
});
const exitCode = await proc.exited;
if (exitCode !== 0) throw new Error("process exited");
} catch (e) {
err = e;
}
expect(err).toBeInstanceOf(Error);
});
});

View File

@@ -1,4 +1,4 @@
import { describe, test, expect, beforeAll } from "@jest/globals";
import { beforeAll, describe, expect, test } from "@jest/globals";
describe("Outer describe", () => {
beforeAll(() => {

View File

@@ -1,4 +1,5 @@
import { expect, test } from "bun:test";
import { bunEnv, bunExe, normalizeBunSnapshot, tempDir } from "harness";
test("Bun.version", () => {
expect(process.versions.bun).toBe(Bun.version);
@@ -10,3 +11,119 @@ test("expect().not.not", () => {
expect(1).not.not.toBe(1);
expect(1).not.not.not.toBe(2);
});
// Regression test for #14624
test("uncaught promise rejection in async test should not hang", async () => {
using dir = tempDir("issue-14624", {
"hang.test.js": `
import { test } from 'bun:test'
test('async test with uncaught rejection', async () => {
console.log('test start');
// This creates an unhandled promise rejection
(async () => { throw new Error('uncaught error'); })();
await Bun.sleep(1);
console.log('test end');
})
`,
});
const proc = Bun.spawn({
cmd: [bunExe(), "test", "hang.test.js"],
env: bunEnv,
cwd: String(dir),
stdout: "pipe",
stderr: "pipe",
});
// Set a timeout to detect if the process hangs
let timeout = false;
const timer = setTimeout(() => {
timeout = true;
proc.kill();
}, 3000);
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
clearTimeout(timer);
const output = stdout + stderr;
expect(timeout).toBeFalse();
expect(output).toContain("test start");
// expect(output).toContain("test end"); // the process exits before this executes
expect(output).toContain("uncaught error");
expect(exitCode).not.toBe(0);
expect(output).toMatch(/✗|\(fail\)/);
expect(output).toMatch(/\n 1 fail/);
});
// Regression test for #19107
test.failing("throw undefined no crash", () => {
expect(() => {
throw undefined;
}).toThrow(TypeError);
});
// Regression test for #20100
test("20100", async () => {
const result = Bun.spawn({
cmd: [bunExe(), "test", import.meta.dir + "/20100.fixture.ts"],
stdout: "pipe",
stderr: "pipe",
env: bunEnv,
});
const exitCode = await result.exited;
const stdout = await result.stdout.text();
const stderr = await result.stderr.text();
expect(exitCode).toBe(0);
expect(normalizeBunSnapshot(stdout)).toMatchInlineSnapshot(`
"bun test <version> (<revision>)
<top-level>
<top-level-test> { unpredictableVar: "top level" } </top-level-test>
<describe-1>
<describe-1-test> { unpredictableVar: "describe 1" } </describe-1-test>
</describe-1>
<describe-2>
<describe-2-test> { unpredictableVar: "describe 2" } </describe-2-test>
</describe-2>
</top-level>"
`);
});
// Regression test for #21177
test("21177 - filter skips beforeAll when describe is filtered", async () => {
const result = Bun.spawn({
cmd: [bunExe(), "test", import.meta.dir + "/21177.fixture.ts", "-t", "true is true"],
stdout: "pipe",
stderr: "pipe",
env: bunEnv,
});
const exitCode = await result.exited;
const stdout = await result.stdout.text();
const stderr = await result.stderr.text();
expect(normalizeBunSnapshot(stdout)).toMatchInlineSnapshot(`"bun test <version> (<revision>)"`);
expect(exitCode).toBe(0);
});
// Regression test for #21177
test("21177 - filter runs parent beforeAll hooks", async () => {
const result = Bun.spawn({
cmd: [bunExe(), "test", import.meta.dir + "/21177.fixture-2.ts", "-t", "middle is middle"],
stdout: "pipe",
stderr: "pipe",
env: bunEnv,
});
const exitCode = await result.exited;
const stdout = await result.stdout.text();
const stderr = await result.stderr.text();
expect(normalizeBunSnapshot(stdout)).toMatchInlineSnapshot(`
"bun test <version> (<revision>)
Running beforeAll in Outer describe
Running beforeAll in Middle describe"
`);
expect(exitCode).toBe(0);
});

View File

@@ -1,6 +1,6 @@
import { spawnSync } from "bun";
import { describe, expect, jest, test } from "bun:test";
import { bunEnv, bunExe, tempDirWithFiles } from "harness";
import { bunEnv, bunExe, normalizeBunSnapshot, tempDir, tempDirWithFiles } from "harness";
describe("blocks should handle a number, string, anonymous class, named class, or function for the first arg", () => {
const numberMock = jest.fn();
@@ -197,7 +197,7 @@ describe("passing arrow function as args", () => {
"describe-test.test.js": `
import { describe, test, expect } from "bun:test";
describe(() => {}, () => {
test("should NOT pass", () => {
@@ -223,3 +223,92 @@ describe("passing arrow function as args", () => {
expect(fullOutput).toInclude("1 fail");
});
});
// Regression test for #8768
test("issue #8768: describe.todo() doesn't fail when todo test passes", async () => {
using dir = tempDir("issue-08768", {
"describe-todo.test.js": `
import { describe, test, expect } from "bun:test";
describe.todo("E", () => {
test("E", () => { expect("hello").toBe("hello") })
});
`.trim(),
"test-todo.test.js": `
import { test, expect } from "bun:test";
test.todo("E", () => { expect("hello").toBe("hello") });
`.trim(),
});
// Run describe.todo() with --todo flag
await using proc1 = Bun.spawn({
cmd: [bunExe(), "test", "--todo", "describe-todo.test.js"],
env: bunEnv,
cwd: String(dir),
stderr: "pipe",
stdout: "pipe",
});
const [stdout1, stderr1, exitCode1] = await Promise.all([proc1.stdout.text(), proc1.stderr.text(), proc1.exited]);
// Run test.todo() with --todo flag for comparison
await using proc2 = Bun.spawn({
cmd: [bunExe(), "test", "--todo", "test-todo.test.js"],
env: bunEnv,
cwd: String(dir),
stderr: "pipe",
stdout: "pipe",
});
const [stdout2, stderr2, exitCode2] = await Promise.all([proc2.stdout.text(), proc2.stderr.text(), proc2.exited]);
// test.todo() correctly fails when the test passes (expected behavior)
expect(exitCode2).not.toBe(0);
const output2 = stdout2 + stderr2;
expect(output2).toContain("todo");
expect(output2).toMatch(/this test is marked as todo but passes/i);
expect(exitCode1).toBe(1);
const output1 = stdout1 + stderr1;
expect(output1).toContain("todo");
expect(output1).toMatch(/this test is marked as todo but passes/i);
});
// Regression test for #19875
test("issue #19875: describe.only with nested describe.todo", async () => {
using dir = tempDir("issue-19875", {
"19875.test.ts": `
import { describe, it, expect } from "bun:test";
describe.only("only", () => {
describe.todo("todo", () => {
it("fail", () => {
expect(2).toBe(3);
});
});
});
`.trim(),
});
const result = Bun.spawn({
cmd: [bunExe(), "test", "19875.test.ts"],
stdout: "pipe",
stderr: "pipe",
cwd: String(dir),
env: { ...bunEnv, CI: "false" }, // tests '.only()'
});
const exitCode = await result.exited;
const stderr = await result.stderr.text();
expect(exitCode).toBe(0);
expect(normalizeBunSnapshot(stderr)).toMatchInlineSnapshot(`
"19875.test.ts:
(todo) only > todo > fail
0 pass
1 todo
0 fail
Ran 1 test across 1 file."
`);
});

View File

@@ -393,3 +393,56 @@ describe("MatcherContext", () => {
});
});
});
// Regression test for #16312
// Tests that expect.extend works with @testing-library/jest-dom matchers
if (isBun) {
const matchers = await import("@testing-library/jest-dom/matchers");
expect.extend(matchers);
test("expect extended with jest-dom matchers", () => {
expect(expect.toBeInTheDocument).not.toBe(undefined);
});
}
// Regression test for ENG-22942: Crash when calling expect.extend with non-function values
// The crash occurred because JSWrappingFunction assumed all callable objects are JSFunction,
// but class constructors like Expect are callable but not JSFunction instances.
if (isBun) {
describe("expect.extend with non-function values", () => {
test("expect.extend with jest object should throw TypeError, not crash", () => {
const jest = Bun.jest(import.meta.path);
expect(() => {
jest.expect.extend(jest);
}).toThrow(TypeError);
});
test("expect.extend with object containing non-function values should throw", () => {
const jest = Bun.jest(import.meta.path);
expect(() => {
jest.expect.extend({
notAFunction: "string value",
});
}).toThrow("expect.extend: `notAFunction` is not a valid matcher");
});
test("expect.extend with valid matchers still works", () => {
const jest = Bun.jest(import.meta.path);
jest.expect.extend({
toBeEven(received) {
const pass = received % 2 === 0;
return {
message: () => `expected ${received} ${pass ? "not " : ""}to be even`,
pass,
};
},
});
jest.expect(4).toBeEven();
jest.expect(3).not.toBeEven();
});
});
}

View File

@@ -4899,6 +4899,197 @@ describe("expect()", () => {
});
});
// Regression test for #7736
describe("toEqual on a large Map", () => {
const MAP_SIZE = 918 * 4;
function* genpairs() {
for (let i = 0; i < MAP_SIZE; i++) {
yield ["k" + i, "v" + i];
}
}
for (let MapClass of [
Map,
class CustomMap extends Map {
abc = 123;
constructor(iterable) {
super(iterable);
}
},
]) {
test(MapClass.name, () => {
const x = new MapClass(genpairs());
const y = new MapClass(genpairs());
expect(x).toEqual(y);
x.set("not-okay", 1);
y.set("okay", 1);
expect(x).not.toEqual(y);
x.delete("not-okay");
x.set("okay", 1);
expect(x).toEqual(y);
x.set("okay", 2);
expect(x).not.toEqual(y);
});
}
});
// Regression test for #7736
describe("toEqual on a large Set", () => {
const MAP_SIZE = 918 * 4;
function* genvalues() {
for (let i = 0; i < MAP_SIZE; i++) {
yield "v" + i;
}
}
for (let SetClass of [
Set,
class CustomSet extends Set {
constructor(iterable) {
super(iterable);
this.abc = 123;
}
},
]) {
test(SetClass.name, () => {
const x = new SetClass(genvalues());
const y = new SetClass(genvalues());
expect(x).toEqual(y);
x.add("not-okay");
y.add("okay");
expect(x).not.toEqual(y);
x.delete("not-okay");
x.add("okay");
expect(x).toEqual(y);
});
}
});
// Regression test for #11677
test("toContainKeys empty", () => {
expect({ "": 1 }).toContainKeys([""]);
});
// Regression test for #11677
test("toContainKey proxy", () => {
expect(
new Proxy(
{},
{
has(target, str) {
return str === "foo";
},
getOwnPropertyDescriptor(target, str) {
if (str === "foo") {
return { value: 1, configurable: true, enumerable: true };
}
return undefined;
},
},
),
).toContainKey("foo");
});
// Regression test for #11677
test("toContainKeys proxy", () => {
expect(
new Proxy(
{},
{
has(target, str) {
return str === "foo";
},
getOwnPropertyDescriptor(target, str) {
if (str === "foo") {
return { value: 1, configurable: true, enumerable: true };
}
return undefined;
},
},
),
).toContainKeys(["foo"]);
});
// Regression test for #11677
test("toContainKeys proxy throwing", () => {
expect(() =>
expect(
new Proxy(
{},
{
has(target, str) {
return str === "foo";
},
getOwnPropertyDescriptor(target, str) {
throw new Error("my error!");
},
},
),
).not.toContainKeys(["my error!"]),
).toThrow();
});
// Regression test for #11677
test("NOT toContainKeys empty", () => {
expect({}).not.toContainKeys([""]);
});
// Regression test for #11677
test("NOT toContainAnyKeys string empty", () => {
expect({}).not.toContainAnyKeys([""]);
});
// Regression test for #11677
test("toContainAnyKeys true string empty", () => {
expect({ "": 1 }).toContainAnyKeys([""]);
});
// Regression test for #11677
test("toContainAnyKeys holey", () => {
expect([,]).not.toContainAnyKeys([,]);
});
// Regression test for #11677
test("NOT toContainAnyKeysEmpty", () => {
expect({}).not.toContainAnyKeys([]);
});
// Regression test for #12276
test("toIncludeRepeated should check for exact count, not at least count", () => {
// The bug: toIncludeRepeated was checking if string contains AT LEAST n occurrences
// Instead of EXACTLY n occurrences
// These should pass - exact match
expect("hello hello world").toIncludeRepeated("hello", 2);
expect("hello world").toIncludeRepeated("hello", 1);
expect("world").toIncludeRepeated("hello", 0);
// These should pass - not exact match with .not
expect("hello hello world").not.toIncludeRepeated("hello", 1);
expect("hello hello world").not.toIncludeRepeated("hello", 3);
expect("hello hello hello").not.toIncludeRepeated("hello", 2);
// Additional test cases
expect("abc abc abc").toIncludeRepeated("abc", 3);
expect("abc abc abc").not.toIncludeRepeated("abc", 1);
expect("abc abc abc").not.toIncludeRepeated("abc", 2);
expect("abc abc abc").not.toIncludeRepeated("abc", 4);
// Edge cases - std.mem.count doesn't count overlapping occurrences
expect("aaa").toIncludeRepeated("aa", 1); // "aa" appears once (non-overlapping)
expect("aaaa").toIncludeRepeated("aa", 2); // "aa" appears twice (non-overlapping)
});
function tmpFile(exists) {
const { join } = require("path");
const { tmpdir } = require("os");

View File

@@ -1,4 +1,5 @@
import { describe, expect, it } from "bun:test";
import { describe, expect, it, test } from "bun:test";
import { bunEnv, bunExe, normalizeBunSnapshot } from "harness";
const NUMBERS = [
[1, 1, 2],
@@ -57,3 +58,38 @@ describe.each(["some", "cool", "strings"])("works with describe: %s", s => {
describe("does not return zero", () => {
expect(it.each([1, 2])("wat", () => {})).toBeUndefined();
});
// Regression test for #11793
test("#11793 - test.each with nested empty array", async () => {
const result = Bun.spawn({
cmd: [bunExe(), "test", import.meta.dir + "/11793.fixture.ts"],
stdout: "pipe",
stderr: "pipe",
env: bunEnv,
});
const exitCode = await result.exited;
const stdout = await result.stdout.text();
const stderr = await result.stderr.text();
expect(exitCode).toBe(1);
expect(normalizeBunSnapshot(stderr)).toMatchInlineSnapshot(`
"test/js/bun/test/11793.fixture.ts:
1 | const { test, expect } = require("bun:test");
2 |
3 | test.each([[]])("%p", array => {
4 | expect(array.length).toBe(0);
^
error: expect(received).toBe(expected)
Expected: 0
Received: 1
at <anonymous> (file:NN:NN)
(fail) %p
0 pass
1 fail
1 expect() calls
Ran 1 test across 1 file."
`);
});

View File

@@ -1,4 +1,4 @@
import { describe, test, expect, beforeAll } from "bun:test";
import { beforeAll, describe, test } from "bun:test";
describe("desc1", () => {
beforeAll(() => {

View File

@@ -1,5 +1,5 @@
// foo.test.ts
import { describe, it, beforeAll } from "bun:test";
import { beforeAll, describe, it } from "bun:test";
describe("foo", () => {
beforeAll(() => {

View File

@@ -1,4 +1,4 @@
import { beforeEach, it, expect } from "bun:test";
import { beforeEach, expect, it } from "bun:test";
beforeEach(async () => {
await Bun.sleep(100);
throw 5;

View File

@@ -51,11 +51,11 @@ describe("Show Deletion Tests", async () => {
await initTest();
agent = await withUser();
await bulkCreateShows(10, agent);
console.log("Show Deletion Tests pre ");
console.log("Show Deletion Tests pre");
});
afterAll(async () => {
console.log("Show Deletion test post ");
console.log("Show Deletion test post");
await clearDatabase();
});

View File

@@ -1,4 +1,5 @@
import { afterAll, afterEach, beforeAll, beforeEach, describe, expect, it } from "bun:test";
import { afterAll, afterEach, beforeAll, beforeEach, describe, expect, it, test } from "bun:test";
import { bunEnv, bunExe, normalizeBunSnapshot, tempDir } from "harness";
let hooks_run: string[] = [];
@@ -249,3 +250,252 @@ describe("test jest hooks in bun-test", () => {
});
});
});
// Regression test for #14135
// beforeAll should not run for skipped tests when using .only
test("regression #14135 - beforeAll should not run for skipped describe blocks", async () => {
const result = Bun.spawn({
cmd: [bunExe(), "test", import.meta.dir + "/jest-hooks-14135.fixture.ts"],
stdout: "pipe",
stderr: "pipe",
env: { ...bunEnv, CI: "false" }, // tests '.only()'
});
const exitCode = await result.exited;
const stdout = await result.stdout.text();
expect(normalizeBunSnapshot(stdout)).toMatchInlineSnapshot(`
"bun test <version> (<revision>)
beforeAll 2
test 2"
`);
expect(exitCode).toBe(0);
});
// Regression test for #19758
// tests that beforeAll runs in order instead of immediately
test("regression #19758 - beforeAll runs in order instead of immediately", async () => {
const result = Bun.spawn({
cmd: [bunExe(), "test", import.meta.dir + "/jest-hooks-19758.fixture.ts"],
stdout: "pipe",
stderr: "pipe",
env: bunEnv,
});
const exitCode = await result.exited;
const stdout = await result.stdout.text();
expect(normalizeBunSnapshot(stdout)).toMatchInlineSnapshot(`
"bun test <version> (<revision>)
-- foo beforeAll
-- bar beforeAll
bar.1
-- baz beforeAll
baz.1"
`);
expect(exitCode).toBe(0);
});
// Regression test for #20980
// error in beforeEach should prevent the test from running
test("regression #20980 - error in beforeEach prevents test from running", async () => {
const result = Bun.spawn({
cmd: [bunExe(), "test", import.meta.dir + "/jest-hooks-20980.fixture.ts"],
stdout: "pipe",
stderr: "pipe",
env: bunEnv,
});
const exitCode = await result.exited;
const stderr = await result.stderr.text();
expect(normalizeBunSnapshot(stderr)).toMatchInlineSnapshot(`
"test/js/bun/test/jest-hooks-20980.fixture.ts:
error: 5
5
(fail) test 0
0 pass
1 fail
Ran 1 test across 1 file."
`);
expect(exitCode).toBe(1);
});
// Regression test for #21830
// make sure beforeAll runs in the right order
test("regression #21830 - beforeAll runs in the right order", async () => {
const result = Bun.spawn({
cmd: [bunExe(), "test", import.meta.dir + "/jest-hooks-21830.fixture.ts"],
stdout: "pipe",
stderr: "pipe",
env: bunEnv,
});
const exitCode = await result.exited;
const stdout = await result.stdout.text();
expect(normalizeBunSnapshot(stdout)).toMatchInlineSnapshot(`
"bun test <version> (<revision>)
Create Show Tests pre
Create Show Tests post
Get Show Data Tests pre
Get Show Data Tests post
Show Deletion Tests pre
Show Deletion test post"
`);
expect(exitCode).toBe(0);
});
// Regression test for #23133
// Passing HookOptions to lifecycle hooks should work
describe("regression #23133 - lifecycle hooks accept timeout options", () => {
const logs: string[] = [];
// Test beforeAll with object timeout option
beforeAll(
() => {
logs.push("beforeAll with object timeout");
},
{ timeout: 10_000 },
);
// Test beforeAll with numeric timeout option
beforeAll(() => {
logs.push("beforeAll with numeric timeout");
}, 5000);
// Test beforeEach with timeout option
beforeEach(
() => {
logs.push("beforeEach");
},
{ timeout: 10_000 },
);
// Test afterEach with timeout option
afterEach(
() => {
logs.push("afterEach");
},
{ timeout: 10_000 },
);
// Test afterAll with timeout option
afterAll(
() => {
logs.push("afterAll");
},
{ timeout: 10_000 },
);
test("lifecycle hooks accept timeout options", () => {
expect(logs).toContain("beforeAll with object timeout");
expect(logs).toContain("beforeAll with numeric timeout");
expect(logs).toContain("beforeEach");
});
test("beforeEach runs before each test", () => {
// beforeEach should have run twice now (once for each test)
const beforeEachCount = logs.filter(l => l === "beforeEach").length;
expect(beforeEachCount).toBe(2);
});
});
// Regression test for #12250
// afterAll hook should run even with --bail flag
test.failing("regression #12250 - afterAll hook should run even with --bail flag", async () => {
using dir = tempDir("test-12250", {
"test.spec.ts": `
import { afterAll, beforeAll, describe, expect, it } from 'bun:test';
describe('test', () => {
beforeAll(async () => {
console.log('Before');
});
afterAll(async () => {
console.log('After');
});
it('should fail', async () => {
expect(true).toBe(false);
});
it('should pass', async () => {
expect(true).toBe(true);
});
});
`,
});
await using proc = Bun.spawn({
cmd: [bunExe(), "test", "--bail", "test.spec.ts"],
env: bunEnv,
cwd: String(dir),
stderr: "pipe",
stdout: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
// The test should fail with exit code 1
expect(exitCode).toBe(1);
// Before hook should run
expect(stdout).toContain("Before");
// Currently failing: afterAll hook should run even with --bail
// TODO: Remove .todo() when fixed
expect(stdout).toContain("After");
// Should bail out after first failure
expect(stdout).toContain("Bailed out after 1 failure");
expect(stdout).toContain("Ran 1 tests");
});
// Regression test for #12250
// afterAll hook runs normally without --bail flag
test("regression #12250 - afterAll hook runs normally without --bail flag", async () => {
using dir = tempDir("test-12250-control", {
"test.spec.ts": `
import { afterAll, beforeAll, describe, expect, it } from 'bun:test';
describe('test', () => {
beforeAll(async () => {
console.log('Before');
});
afterAll(async () => {
console.log('After');
});
it('should fail', async () => {
expect(true).toBe(false);
});
it('should pass', async () => {
expect(true).toBe(true);
});
});
`,
});
await using proc = Bun.spawn({
cmd: [bunExe(), "test", "test.spec.ts"],
env: bunEnv,
cwd: String(dir),
stderr: "pipe",
stdout: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
// The test should fail with exit code 1 (one test failed)
expect(exitCode).toBe(1);
// Before hook should run
expect(stdout).toContain("Before");
// Without --bail, afterAll should definitely run
expect(stdout).toContain("After");
// Without --bail, should NOT bail out early
expect(stdout).not.toContain("Bailed out");
});

View File

@@ -1015,3 +1015,58 @@ describe("spyOn", () => {
// spyOn does not work with getters/setters yet.
});
// Regression test for #1825
describe("Jest mock functions from issue #1825", () => {
test("jest.mock should be available and work with factory function", () => {
// Should not throw - jest.mock should be available
expect(() => {
jest.mock("fs", () => ({ readFile: jest.fn() }));
}).not.toThrow();
});
test("jest.resetAllMocks should be available and not throw", () => {
const mockFn = jest.fn();
mockFn();
expect(mockFn).toHaveBeenCalledTimes(1);
// Should not throw - jest.resetAllMocks should be available
expect(() => {
jest.resetAllMocks();
}).not.toThrow();
});
test("mockReturnThis should return the mock function itself", () => {
const mockFn = jest.fn();
const result = mockFn.mockReturnThis();
// mockReturnThis should return the mock function itself
expect(result).toBe(mockFn);
});
});
// Regression test for #18820
describe("mock.clearAllMocks", () => {
const random1 = mock(() => Math.random());
const random2 = mock(() => Math.random());
test("clearing all mocks", () => {
random1();
random2();
expect(random1).toHaveBeenCalledTimes(1);
expect(random2).toHaveBeenCalledTimes(1);
mock.clearAllMocks();
expect(random1).toHaveBeenCalledTimes(0);
expect(random2).toHaveBeenCalledTimes(0);
// Note: implementations are preserved
expect(typeof random1()).toBe("number");
expect(typeof random2()).toBe("number");
expect(random1).toHaveBeenCalledTimes(1);
expect(random2).toHaveBeenCalledTimes(1);
});
});

View File

@@ -8,9 +8,55 @@
// - Write test for import {foo} from "./foo"; export {foo}
import { expect, mock, spyOn, test } from "bun:test";
import { bunRun, tempDirWithFiles } from "harness";
import { join } from "path";
import { default as defaultValue, fn, iCallFn, rexported, rexportedAs, variable } from "./mock-module-fixture";
import * as spyFixture from "./spymodule-fixture";
// Regression test for #11664
test("mock.module does not segfault with path aliases", () => {
const dir = tempDirWithFiles("segfault", {
"dir/a.ts": `
import { mock } from "bun:test";
try {
await import("./b");
} catch (e) {
console.log(e);
}
mock.module("@/dir/c", () => ({
default: { winner: true },
}));
console.log()
`,
"dir/b.ts": `
import { notExist } from "@/dir/c";
[notExist];
`,
"dir/c.ts": `
import { notExist } from "@/dir/d";
export default async function(req) {
[notExist];
}
`,
"dir/d.ts": `
export const a = 1;
`,
"tsconfig.json": JSON.stringify({
compilerOptions: {
baseUrl: ".",
paths: {
"@/*": ["*"],
},
},
}),
});
bunRun(join(dir, "dir/a.ts"));
});
test("mock.module async", async () => {
mock.module("i-am-async-and-mocked", async () => {
await 42;

View File

@@ -32,3 +32,35 @@ describe("outer most describe", () => {
});
});
});
// Regression test for #5738
// Tests that test(1), describe(test(2)), test(3) run in order 1,2,3 instead of 2,1,3
test("nested describe hooks run in correct order", async () => {
const { bunEnv, bunExe, normalizeBunSnapshot } = await import("harness");
const result = Bun.spawn({
cmd: [bunExe(), "test", import.meta.dir + "/5738.fixture.ts"],
stdout: "pipe",
stderr: "pipe",
env: bunEnv,
});
const exitCode = await result.exited;
const stdout = await result.stdout.text();
expect(normalizeBunSnapshot(stdout)).toMatchInlineSnapshot(`
"bun test <version> (<revision>)
1 - beforeAll
1 - beforeEach
1 - test
1 - afterEach
2 - beforeAll
1 - beforeEach
2 - beforeEach
2 - test
2 - afterEach
1 - afterEach
2 - afterAll
1 - afterAll"
`);
expect(exitCode).toBe(0);
});

View File

@@ -1322,3 +1322,33 @@ describe.each(["toHaveLastReturnedWith", "toHaveNthReturnedWith", "toHaveReturne
});
},
);
// Regression test for #8794
// This tests that when a mocked function appears in a stack trace
// It doesn't crash when generating the stack trace.
import { spyOn } from "bun:test";
test("#8794", () => {
const target = {
a() {
throw new Error("a");
return 1;
},
method() {
return target.a();
},
};
spyOn(target, "method");
for (let i = 0; i < 20; i++) {
try {
target.method();
expect.unreachable();
} catch (e) {
e.stack;
expect(e.stack).toContain("at method ");
expect(e.stack).toContain("at a ");
}
Bun.gc(false);
}
});

View File

@@ -1,6 +1,6 @@
import { $ } from "bun";
import { expect, test } from "bun:test";
import { bunEnv, bunExe } from "harness";
import { bunEnv, bunExe, normalizeBunSnapshot } from "harness";
test.each(["./only-fixture-1.ts", "./only-fixture-2.ts", "./only-fixture-3.ts"])(
`test.only shouldn't need --only for %s`,
@@ -24,3 +24,47 @@ test("only resets per test", async () => {
expect(result.stderr.toString()).toContain(" 0 fail\n");
expect(result.stderr.toString()).toContain("Ran 6 tests across 4 files");
});
// Regression test for #20092
test("20092", async () => {
const result = Bun.spawn({
cmd: [bunExe(), "test", import.meta.dir + "/20092.fixture.ts"],
stdout: "pipe",
stderr: "pipe",
env: { ...bunEnv, CI: "false" }, // tests '.only()'
});
const exitCode = await result.exited;
const stdout = await result.stdout.text();
const stderr = await result.stderr.text();
expect(exitCode).toBe(0);
expect(normalizeBunSnapshot(stderr)).toMatchInlineSnapshot(`
"test/js/bun/test/20092.fixture.ts:
(pass) foo > works
(pass) bar > works
2 pass
0 fail
2 expect() calls
Ran 2 tests across 1 file."
`);
});
// Regression test for #5961
test("5961", async () => {
const result = Bun.spawn({
cmd: [bunExe(), "test", import.meta.dir + "/5961.fixture.ts"],
stdout: "pipe",
stderr: "pipe",
env: { ...bunEnv, CI: "false" }, // tests '.only()'
});
const exitCode = await result.exited;
const stdout = await result.stdout.text();
const stderr = await result.stderr.text();
expect(normalizeBunSnapshot(stdout)).toMatchInlineSnapshot(`
"bun test <version> (<revision>)
hi!"
`);
expect(exitCode).toBe(0);
});

View File

@@ -1,6 +1,6 @@
import { expect, test } from "bun:test";
import fsPromises from "fs/promises";
import { tempDirWithFiles } from "harness";
import { describe, expect, test } from "bun:test";
import fsPromises, { stat } from "fs/promises";
import { isWindows, tempDir, tempDirWithFiles } from "harness";
import { join } from "path";
test("delete() and stat() should work with unicode paths", async () => {
@@ -45,3 +45,141 @@ test("writer.end() should not close the fd if it does not own the fd", async ()
expect(await Bun.file(filename).text()).toBe("");
}
});
// Regression test for #25903: Bun.write() mode option when copying files using Bun.file()
// The mode option is respected when copying files via Bun.file() as the source.
// These tests are skipped on Windows where Unix-style file permissions don't apply.
describe.skipIf(isWindows)("Bun.write() mode option", () => {
test("Bun.write() respects mode option when copying files via Bun.file()", async () => {
using dir = tempDir("issue-25903", {});
const sourcePath = `${dir}/source.txt`;
const destPath = `${dir}/dest.txt`;
// Create source file (mode is determined by the write path's default behavior)
await Bun.write(sourcePath, "hello world");
// Get source file's actual permissions to verify they differ from what we'll set
const sourceStat = await stat(sourcePath);
const sourceMode = sourceStat.mode & 0o777;
// Copy using Bun.file() with specific 0o600 permissions (more restrictive)
// The mode option is honored for Bun.file() copy operations
await Bun.write(destPath, Bun.file(sourcePath), { mode: 0o600 });
// Verify destination file has the specified permissions, not inherited from source
const destStat = await stat(destPath);
expect(destStat.mode & 0o777).toBe(0o600);
// Also verify it's different from source (unless source happened to be 0o600)
if (sourceMode !== 0o600) {
expect(destStat.mode & 0o777).not.toBe(sourceMode);
}
});
test("Bun.write() respects mode option with createPath when copying via Bun.file()", async () => {
using dir = tempDir("issue-25903-createPath", {});
const sourcePath = `${dir}/source.txt`;
const destPath = `${dir}/subdir/dest.txt`;
// Create source file
await Bun.write(sourcePath, "hello world");
// Copy using Bun.file() to a path that requires directory creation, with specific permissions
await Bun.write(destPath, Bun.file(sourcePath), { mode: 0o755, createPath: true });
// Verify destination file has the specified permissions
const destStat = await stat(destPath);
expect(destStat.mode & 0o777).toBe(0o755);
});
test("Bun.write() uses default permissions when mode is not specified for Bun.file() copy", async () => {
using dir = tempDir("issue-25903-default", {});
const sourcePath = `${dir}/source.txt`;
const destPath = `${dir}/dest.txt`;
const baselinePath = `${dir}/baseline.txt`;
// Create source file
await Bun.write(sourcePath, "hello world");
// Create a baseline file using default permissions (to determine what the default is)
await Bun.write(baselinePath, "baseline");
const baselineStat = await stat(baselinePath);
const defaultMode = baselineStat.mode & 0o777;
// Copy using Bun.file() without specifying mode - should use default permissions
await Bun.write(destPath, Bun.file(sourcePath));
// When mode is not specified, the default permission is used (same as creating a new file)
// This test verifies that the destination doesn't inherit source permissions incorrectly
const destStat = await stat(destPath);
expect(destStat.mode & 0o777).toBe(defaultMode);
});
test("Bun.write() respects mode when writing to PathLike from BunFile", async () => {
using dir = tempDir("issue-25903-pathlike", {});
const sourcePath = `${dir}/source.txt`;
const destPath = `${dir}/dest.txt`;
// Create source file
await Bun.write(sourcePath, "test content");
// Write with specific mode using path string as destination and Bun.file() as source
await Bun.write(destPath, Bun.file(sourcePath), { mode: 0o700 });
const destStat = await stat(destPath);
expect(destStat.mode & 0o777).toBe(0o700);
});
test("Bun.write() respects mode when both destination and source are BunFile", async () => {
using dir = tempDir("issue-25903-bunfile-dest", {});
const sourcePath = `${dir}/source.txt`;
const destPath = `${dir}/dest.txt`;
// Create source file
await Bun.write(sourcePath, "test content for bunfile dest");
// Write with specific mode using Bun.file() as both destination and source
await Bun.write(Bun.file(destPath), Bun.file(sourcePath), { mode: 0o700 });
const destStat = await stat(destPath);
expect(destStat.mode & 0o777).toBe(0o700);
});
test("Bun.write() respects mode when overwriting an existing file", async () => {
using dir = tempDir("issue-25903-overwrite", {});
const sourcePath = `${dir}/source.txt`;
const destPath = `${dir}/dest.txt`;
// Create source file
await Bun.write(sourcePath, "source content");
// Create destination file with default permissions
await Bun.write(destPath, "original content");
const originalStat = await stat(destPath);
const originalMode = originalStat.mode & 0o777;
// Overwrite destination with different mode - should update permissions even for existing file
await Bun.write(destPath, Bun.file(sourcePath), { mode: 0o600 });
const destStat = await stat(destPath);
expect(destStat.mode & 0o777).toBe(0o600);
// Verify the mode actually changed (unless original happened to be 0o600)
if (originalMode !== 0o600) {
expect(destStat.mode & 0o777).not.toBe(originalMode);
}
});
test("Bun.write() accepts mode: 0 (no permissions)", async () => {
using dir = tempDir("issue-25903-mode-zero", {});
const sourcePath = `${dir}/source.txt`;
const destPath = `${dir}/dest.txt`;
// Create source file
await Bun.write(sourcePath, "test content");
// Write with mode 0 (no permissions) - this should be accepted, not treated as "not specified"
await Bun.write(destPath, Bun.file(sourcePath), { mode: 0o000 });
const destStat = await stat(destPath);
expect(destStat.mode & 0o777).toBe(0o000);
});
});

View File

@@ -1,6 +1,7 @@
import { fileURLToPath, pathToFileURL } from "bun";
import { describe, expect, it } from "bun:test";
import { isWindows } from "harness";
import { isWindows, tmpdirSync } from "harness";
import { join } from "path";
describe("pathToFileURL", () => {
it("should convert a path to a file url", () => {
@@ -50,3 +51,41 @@ describe("fileURLToPath", () => {
expect(fileURLToPath(import.meta.url)).toBe(import.meta.path);
});
});
// Regression test for #12360
describe("validatePath using fileURLToPath and pathToFileURL", () => {
async function validatePath(path) {
const filePath = fileURLToPath(path);
if (await Bun.file(filePath).exists()) {
return pathToFileURL(filePath);
} else {
return "";
}
}
it("should return empty string for non-existent file", async () => {
const dir = tmpdirSync();
const filePath = join(dir, "./sample.exe");
const newFilePath = await validatePath(pathToFileURL(filePath));
expect(newFilePath).toBe("");
});
it("should return file URL for existing file", async () => {
const dir = tmpdirSync();
const editorPath = pathToFileURL(join(dir, "./metaeditor64.exe"));
const terminalPath = pathToFileURL(join(dir, "./terminal64.exe"));
await Bun.write(isWindows ? editorPath.pathname.slice(1) : editorPath.pathname, "im a editor");
await Bun.write(isWindows ? terminalPath.pathname.slice(1) : terminalPath.pathname, "im a terminal");
const newEditorPath = await validatePath(editorPath);
const newTerminalPath = await validatePath(terminalPath);
expect(newEditorPath.pathname).toBe(editorPath.pathname);
expect(newTerminalPath.pathname).toBe(terminalPath.pathname);
});
});

View File

@@ -188,3 +188,187 @@ test("error.stack throwing an error doesn't lead to a crash", () => {
throw err;
}).toThrow();
});
// Regression tests for #22863 - circular error stack references
describe("circular error stack references", () => {
const { bunEnv, bunExe, tempDir } = require("harness");
test("error with circular stack reference should not cause infinite recursion", async () => {
using dir = tempDir("circular-error-stack", {
"index.js": `
const error = new Error("Test error");
error.stack = error;
console.log(error);
console.log("after error print");
`,
});
await using proc = Bun.spawn({
cmd: [bunExe(), "index.js"],
env: bunEnv,
cwd: String(dir),
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
expect(exitCode).toBe(0);
expect(stdout).toContain("after error print");
expect(stdout).not.toContain("Maximum call stack");
expect(stderr).not.toContain("Maximum call stack");
});
test("error with nested circular references should not cause infinite recursion", async () => {
using dir = tempDir("nested-circular-error", {
"index.js": `
const error1 = new Error("Error 1");
const error2 = new Error("Error 2");
error1.stack = error2;
error2.stack = error1;
console.log(error1);
console.log("after error print");
`,
});
await using proc = Bun.spawn({
cmd: [bunExe(), "index.js"],
env: bunEnv,
cwd: String(dir),
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
expect(exitCode).toBe(0);
expect(stdout).toContain("after error print");
expect(stdout).not.toContain("Maximum call stack");
expect(stderr).not.toContain("Maximum call stack");
});
test("error with circular reference in cause chain", async () => {
using dir = tempDir("circular-error-cause", {
"index.js": `
const error1 = new Error("Error 1");
const error2 = new Error("Error 2");
error1.cause = error2;
error2.cause = error1;
console.log(error1);
console.log("after error print");
`,
});
await using proc = Bun.spawn({
cmd: [bunExe(), "index.js"],
env: bunEnv,
cwd: String(dir),
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
expect(exitCode).toBe(0);
expect(stdout).toContain("after error print");
expect(stdout).not.toContain("Maximum call stack");
expect(stderr).not.toContain("Maximum call stack");
});
test("error.stack getter that throws should not crash", async () => {
using dir = tempDir("throwing-stack-getter", {
"index.js": `
const error = new Error("Test error");
Object.defineProperty(error, "stack", {
get() {
throw new Error("Stack getter throws!");
}
});
console.log(error);
console.log("after error print");
`,
});
await using proc = Bun.spawn({
cmd: [bunExe(), "index.js"],
env: bunEnv,
cwd: String(dir),
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
expect(exitCode).toBe(0);
expect(stdout).toContain("after error print");
expect(stdout).not.toContain("Stack getter throws");
expect(stderr).not.toContain("Stack getter throws");
});
test("error.stack getter returning circular reference", async () => {
using dir = tempDir("circular-stack-getter", {
"index.js": `
const error = new Error("Test error");
Object.defineProperty(error, "stack", {
get() {
return error; // Return the error itself
}
});
console.log(error);
console.log("after error print");
`,
});
await using proc = Bun.spawn({
cmd: [bunExe(), "index.js"],
env: bunEnv,
cwd: String(dir),
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
expect(exitCode).toBe(0);
expect(stdout).toContain("after error print");
expect(stdout).not.toContain("Maximum call stack");
expect(stderr).not.toContain("Maximum call stack");
});
test("error with multiple throwing getters", async () => {
using dir = tempDir("multiple-throwing-getters", {
"index.js": `
const error = new Error("Test error");
Object.defineProperty(error, "stack", {
get() {
throw new Error("Stack throws!");
}
});
Object.defineProperty(error, "cause", {
get() {
throw new Error("Cause throws!");
}
});
error.normalProp = "works";
console.log(error);
console.log("after error print");
`,
});
await using proc = Bun.spawn({
cmd: [bunExe(), "index.js"],
env: bunEnv,
cwd: String(dir),
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
expect(exitCode).toBe(0);
expect(stdout).toContain("after error print");
expect(stdout).toContain("normalProp");
expect(stdout).not.toContain("Stack throws");
expect(stdout).not.toContain("Cause throws");
});
});

View File

@@ -1,7 +1,7 @@
import type { Server, Subprocess, WebSocketHandler } from "bun";
import { serve, spawn } from "bun";
import { afterEach, describe, expect, it } from "bun:test";
import { bunEnv, bunExe, forceGuardMalloc } from "harness";
import { bunEnv, bunExe, forceGuardMalloc, tempDir } from "harness";
import { isIP } from "node:net";
import path from "node:path";
@@ -1054,3 +1054,649 @@ it("you can call server.subscriberCount() when its not a websocket server", asyn
});
expect(server.subscriberCount("boop")).toBe(0);
});
// Regression test for #23474
it("request.cookies.set() should set websocket upgrade response cookie", async () => {
using server = Bun.serve({
port: 0,
routes: {
"/ws": req => {
// Set a cookie before upgrading
req.cookies.set("test", "123", {
httpOnly: true,
path: "/",
});
const upgraded = server.upgrade(req);
if (upgraded) {
return undefined;
}
return new Response("Upgrade failed", { status: 500 });
},
},
websocket: {
message(ws, message) {
ws.close();
},
},
});
const { promise, resolve, reject } = Promise.withResolvers();
// Use Bun.connect to send a WebSocket upgrade request and check response headers
const socket = await Bun.connect({
hostname: "localhost",
port: server.port,
socket: {
data(socket, data) {
try {
const response = new TextDecoder().decode(data);
// Check that we got a successful upgrade response
expect(response).toContain("HTTP/1.1 101");
expect(response).toContain("Upgrade: websocket");
// The critical check: Set-Cookie header should be present
expect(response).toContain("Set-Cookie:");
expect(response).toContain("test=123");
socket.end();
resolve();
} catch (err) {
reject(err);
}
},
error(socket, error) {
reject(error);
},
},
});
// Send a valid WebSocket upgrade request
socket.write(
"GET /ws HTTP/1.1\r\n" +
`Host: localhost:${server.port}\r\n` +
"Upgrade: websocket\r\n" +
"Connection: Upgrade\r\n" +
"Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==\r\n" +
"Sec-WebSocket-Version: 13\r\n" +
"\r\n",
);
await promise;
});
// Regression test for #23474
it("request.cookies.set() should work with custom headers in upgrade", async () => {
using server = Bun.serve({
port: 0,
routes: {
"/ws": req => {
// Set cookies before upgrading
req.cookies.set("session", "abc123", { path: "/" });
req.cookies.set("user", "john", { httpOnly: true });
const upgraded = server.upgrade(req, {
headers: {
"X-Custom-Header": "test",
},
});
if (upgraded) {
return undefined;
}
return new Response("Upgrade failed", { status: 500 });
},
},
websocket: {
message(ws, message) {
ws.close();
},
},
});
const { promise, resolve, reject } = Promise.withResolvers();
const socket = await Bun.connect({
hostname: "localhost",
port: server.port,
socket: {
data(socket, data) {
try {
const response = new TextDecoder().decode(data);
// Check that we got a successful upgrade response
expect(response).toContain("HTTP/1.1 101");
expect(response).toContain("Upgrade: websocket");
// Check custom header
expect(response).toContain("X-Custom-Header: test");
// Check that both cookies are present
expect(response).toContain("Set-Cookie:");
expect(response).toContain("session=abc123");
expect(response).toContain("user=john");
socket.end();
resolve();
} catch (err) {
reject(err);
}
},
error(socket, error) {
reject(error);
},
},
});
socket.write(
"GET /ws HTTP/1.1\r\n" +
`Host: localhost:${server.port}\r\n` +
"Upgrade: websocket\r\n" +
"Connection: Upgrade\r\n" +
"Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==\r\n" +
"Sec-WebSocket-Version: 13\r\n" +
"\r\n",
);
await promise;
});
// Regression test for #24593
// Generate a realistic ~109KB JSON message similar to the original reproduction
function generateLargeMessage(): string {
const items = [];
for (let i = 0; i < 50; i++) {
items.push({
id: 6000 + i,
pickListId: 444,
externalRef: null,
sku: `405053843${String(i).padStart(4, "0")}`,
sequence: i + 1,
requestedQuantity: 1,
pickedQuantity: 0,
dischargedQuantity: 0,
state: "allocated",
allocatedAt: new Date().toISOString(),
startedAt: null,
cancelledAt: null,
pickedAt: null,
placedAt: null,
dischargedAt: null,
createdAt: new Date().toISOString(),
updatedAt: new Date().toISOString(),
allocations: Array.from({ length: 20 }, (_, j) => ({
id: 9000 + i * 20 + j,
pickListItemId: 6000 + i,
productId: 36000 + j,
state: "reserved",
reservedAt: new Date().toISOString(),
startedAt: null,
pickedAt: null,
placedAt: null,
cancelledAt: null,
quantity: 1,
createdAt: new Date().toISOString(),
updatedAt: new Date().toISOString(),
location: {
id: 1000 + j,
name: `Location-${j}`,
zone: `Zone-${Math.floor(j / 5)}`,
aisle: `Aisle-${j % 10}`,
shelf: `Shelf-${j % 20}`,
position: j,
},
product: {
id: 36000 + j,
sku: `SKU-${String(j).padStart(6, "0")}`,
name: `Product Name ${j} with some additional description text`,
category: `Category-${j % 5}`,
weight: 1.5 + j * 0.1,
dimensions: { width: 10, height: 20, depth: 30 },
},
})),
});
}
return JSON.stringify({
id: 444,
externalRef: null,
description: "Generated pick list",
stockId: null,
priority: 0,
state: "allocated",
picksInSequence: true,
allocatedAt: new Date().toISOString(),
startedAt: null,
pausedAt: null,
pickedAt: null,
placedAt: null,
cancelledAt: null,
dischargedAt: null,
collectedAt: null,
totalRequestedQuantity: 50,
totalPickedQuantity: 0,
totalDischargedQuantity: 0,
createdAt: new Date().toISOString(),
updatedAt: new Date().toISOString(),
items,
});
}
// Regression test for #24593
describe("WebSocket server.publish with perMessageDeflate", () => {
it("should handle large message publish without crash", async () => {
// Create a ~109KB JSON message (similar to the reproduction)
const largeMessage = generateLargeMessage();
expect(largeMessage.length).toBeGreaterThan(100000);
using server = serve({
port: 0,
fetch(req, server) {
if (server.upgrade(req)) {
return;
}
return new Response("WebSocket server");
},
websocket: {
perMessageDeflate: true,
open(ws) {
ws.subscribe("test");
},
message() {},
close() {},
},
});
const client = new WebSocket(`ws://localhost:${server.port}`);
const { promise: openPromise, resolve: resolveOpen, reject: rejectOpen } = Promise.withResolvers<void>();
const { promise: messagePromise, resolve: resolveMessage, reject: rejectMessage } = Promise.withResolvers<string>();
client.onopen = () => resolveOpen();
client.onerror = e => {
rejectOpen(e);
rejectMessage(new Error("WebSocket error"));
};
client.onmessage = event => resolveMessage(event.data);
await openPromise;
// This is the critical test - server.publish() with a large compressed message
// On Windows, this was causing a segfault in memcpy during the compression path
const published = server.publish("test", largeMessage);
expect(published).toBeGreaterThan(0); // Returns bytes sent, should be > 0
const received = await messagePromise;
expect(received.length).toBe(largeMessage.length);
expect(received).toBe(largeMessage);
client.close();
});
it("should handle multiple large message publishes", async () => {
// Test multiple publishes in succession to catch potential buffer corruption
const largeMessage = generateLargeMessage();
let messagesReceived = 0;
const expectedMessages = 5;
using server = serve({
port: 0,
fetch(req, server) {
if (server.upgrade(req)) {
return;
}
return new Response("WebSocket server");
},
websocket: {
perMessageDeflate: true,
open(ws) {
ws.subscribe("multi-test");
},
message() {},
close() {},
},
});
const client = new WebSocket(`ws://localhost:${server.port}`);
const { promise: openPromise, resolve: resolveOpen, reject: rejectOpen } = Promise.withResolvers<void>();
const {
promise: allMessagesReceived,
resolve: resolveMessages,
reject: rejectMessages,
} = Promise.withResolvers<void>();
client.onopen = () => resolveOpen();
client.onerror = e => {
rejectOpen(e);
rejectMessages(e instanceof Error ? e : new Error("WebSocket error"));
};
client.onmessage = event => {
messagesReceived++;
expect(event.data.length).toBe(largeMessage.length);
if (messagesReceived === expectedMessages) {
resolveMessages();
}
};
await openPromise;
// Publish multiple times in quick succession
for (let i = 0; i < expectedMessages; i++) {
const published = server.publish("multi-test", largeMessage);
expect(published).toBeGreaterThan(0); // Returns bytes sent
}
await allMessagesReceived;
expect(messagesReceived).toBe(expectedMessages);
client.close();
});
it("should handle publish to multiple subscribers", async () => {
// Test publishing to multiple clients - this exercises the publishBig loop
const largeMessage = generateLargeMessage();
const numClients = 3;
const clientsReceived: boolean[] = new Array(numClients).fill(false);
using server = serve({
port: 0,
fetch(req, server) {
if (server.upgrade(req)) {
return;
}
return new Response("WebSocket server");
},
websocket: {
perMessageDeflate: true,
open(ws) {
ws.subscribe("broadcast");
},
message() {},
close() {},
},
});
const clients: WebSocket[] = [];
try {
const allClientsOpen = Promise.all(
Array.from({ length: numClients }, (_, i) => {
return new Promise<void>((resolve, reject) => {
const client = new WebSocket(`ws://localhost:${server.port}`);
clients.push(client);
client.onopen = () => resolve();
client.onerror = e => reject(e);
});
}),
);
await allClientsOpen;
const allMessagesReceived = Promise.all(
clients.map(
(client, i) =>
new Promise<void>(resolve => {
client.onmessage = event => {
expect(event.data.length).toBe(largeMessage.length);
clientsReceived[i] = true;
resolve();
};
}),
),
);
// Publish to all subscribers
const published = server.publish("broadcast", largeMessage);
expect(published).toBeGreaterThan(0); // Returns bytes sent
await allMessagesReceived;
expect(clientsReceived.every(r => r)).toBe(true);
} finally {
for (const c of clients) {
try {
c.close();
} catch {}
}
}
});
// CORK_BUFFER_SIZE is 16KB - test messages right at this boundary
// since messages >= CORK_BUFFER_SIZE use publishBig path
const CORK_BUFFER_SIZE = 16 * 1024;
it.each([
{ name: "just under 16KB", size: CORK_BUFFER_SIZE - 100 },
{ name: "exactly 16KB", size: CORK_BUFFER_SIZE },
{ name: "just over 16KB", size: CORK_BUFFER_SIZE + 100 },
])("should handle message at CORK_BUFFER_SIZE boundary: $name", async ({ size }) => {
const message = Buffer.alloc(size, "D").toString();
using server = serve({
port: 0,
fetch(req, server) {
if (server.upgrade(req)) {
return;
}
return new Response("WebSocket server");
},
websocket: {
perMessageDeflate: true,
open(ws) {
ws.subscribe("boundary-test");
},
message() {},
close() {},
},
});
const client = new WebSocket(`ws://localhost:${server.port}`);
const { promise: openPromise, resolve: resolveOpen, reject: rejectOpen } = Promise.withResolvers<void>();
const { promise: messagePromise, resolve: resolveMessage, reject: rejectMessage } = Promise.withResolvers<string>();
let openSettled = false;
client.onopen = () => {
openSettled = true;
resolveOpen();
};
client.onerror = e => {
if (!openSettled) {
openSettled = true;
rejectOpen(e);
} else {
rejectMessage(e);
}
};
client.onmessage = event => resolveMessage(event.data);
await openPromise;
server.publish("boundary-test", message);
const received = await messagePromise;
expect(received.length).toBe(size);
client.close();
});
});
// Regression test for #3613
// WebSocketServer handleProtocols option should set the selected protocol in the upgrade response
it("ws WebSocketServer handleProtocols sets selected protocol", async () => {
using dir = tempDir("ws-handle-protocols", {
"server.js": `
import { WebSocketServer } from 'ws';
const wss = new WebSocketServer({
port: 0,
handleProtocols: (protocols, request) => {
return 'selected-protocol';
}
});
wss.on('listening', async () => {
const port = wss.address().port;
console.log('PORT:' + port);
// Test using fetch to verify the actual response headers
try {
const res = await fetch('http://127.0.0.1:' + port, {
headers: {
"Upgrade": "websocket",
"Connection": "Upgrade",
"Sec-WebSocket-Key": "dGhlIHNhbXBsZSBub25jZQ==",
"Sec-WebSocket-Version": "13",
"Sec-WebSocket-Protocol": "custom-protocol, selected-protocol"
}
});
console.log("STATUS:" + res.status);
console.log("PROTOCOL:" + res.headers.get("sec-websocket-protocol"));
} catch (e) {
console.log("ERROR:" + e.message);
}
wss.close();
process.exit(0);
});
wss.on('connection', (ws) => {
console.log('SERVER_WS_PROTOCOL:' + ws.protocol);
});
`,
});
await using proc = Bun.spawn({
cmd: [bunExe(), "server.js"],
cwd: String(dir),
env: bunEnv,
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
// The server should respond with the protocol selected by handleProtocols
expect(stdout).toContain("STATUS:101");
expect(stdout).toContain("PROTOCOL:selected-protocol");
expect(stdout).toContain("SERVER_WS_PROTOCOL:selected-protocol");
expect(exitCode).toBe(0);
}, 10000);
// Regression test for #3613
it("ws WebSocketServer handleProtocols with no protocol", async () => {
using dir = tempDir("ws-handle-protocols-empty", {
"server.js": `
import { WebSocketServer } from 'ws';
const wss = new WebSocketServer({
port: 0,
handleProtocols: (protocols, request) => {
// Return empty string - should not set a protocol header
return '';
}
});
wss.on('listening', async () => {
const port = wss.address().port;
console.log('PORT:' + port);
try {
const res = await fetch('http://127.0.0.1:' + port, {
headers: {
"Upgrade": "websocket",
"Connection": "Upgrade",
"Sec-WebSocket-Key": "dGhlIHNhbXBsZSBub25jZQ==",
"Sec-WebSocket-Version": "13",
"Sec-WebSocket-Protocol": "custom-protocol"
}
});
console.log("STATUS:" + res.status);
// When handleProtocols returns empty, Bun falls back to client's first protocol
console.log("PROTOCOL:" + res.headers.get("sec-websocket-protocol"));
} catch (e) {
console.log("ERROR:" + e.message);
}
wss.close();
process.exit(0);
});
wss.on('connection', (ws) => {
console.log('SERVER_WS_PROTOCOL:' + JSON.stringify(ws.protocol));
});
`,
});
await using proc = Bun.spawn({
cmd: [bunExe(), "server.js"],
cwd: String(dir),
env: bunEnv,
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
// The server should respond with 101 status
expect(stdout).toContain("STATUS:101");
expect(exitCode).toBe(0);
}, 10000);
// Regression test for #3613
it("ws WebSocketServer without handleProtocols uses first client protocol", async () => {
using dir = tempDir("ws-no-handle-protocols", {
"server.js": `
import { WebSocketServer } from 'ws';
const wss = new WebSocketServer({
port: 0,
// No handleProtocols - should default to first client protocol
});
wss.on('listening', async () => {
const port = wss.address().port;
console.log('PORT:' + port);
try {
const res = await fetch('http://127.0.0.1:' + port, {
headers: {
"Upgrade": "websocket",
"Connection": "Upgrade",
"Sec-WebSocket-Key": "dGhlIHNhbXBsZSBub25jZQ==",
"Sec-WebSocket-Version": "13",
"Sec-WebSocket-Protocol": "first-protocol, second-protocol"
}
});
console.log("STATUS:" + res.status);
console.log("PROTOCOL:" + res.headers.get("sec-websocket-protocol"));
} catch (e) {
console.log("ERROR:" + e.message);
}
wss.close();
process.exit(0);
});
wss.on('connection', (ws) => {
console.log('SERVER_WS_PROTOCOL:' + ws.protocol);
});
`,
});
await using proc = Bun.spawn({
cmd: [bunExe(), "server.js"],
cwd: String(dir),
env: bunEnv,
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
// Without handleProtocols, should default to first client protocol
expect(stdout).toContain("STATUS:101");
expect(stdout).toContain("PROTOCOL:first-protocol");
expect(stdout).toContain("SERVER_WS_PROTOCOL:first-protocol");
expect(exitCode).toBe(0);
}, 10000);

View File

@@ -1,5 +1,6 @@
import { YAML, file } from "bun";
import { describe, expect, test } from "bun:test";
import { bunEnv, bunExe, tempDir } from "harness";
import { join } from "path";
describe("Bun.YAML", () => {
@@ -2722,4 +2723,86 @@ refs:
expect(YAML.parse(YAML.stringify(workflow))).toEqual(workflow);
});
});
// Regression test for #23489
test("YAML double-quoted strings with ... should not trigger document end error - issue #23489", () => {
// Test the original failing case with Arabic text and emoji
const yaml1 = 'balance_dont_have_wallet: "👛 لا تمتلك محفظة... !"';
const result1 = YAML.parse(yaml1);
expect(result1).toEqual({
balance_dont_have_wallet: "👛 لا تمتلك محفظة... !",
});
// Test various patterns of ... in double-quoted strings
const yaml2 = `test1: "this has ... dots"
test2: "... at start"
test3: "at end ..."
test4: "👛 ... with emoji"`;
const result2 = YAML.parse(yaml2);
expect(result2).toEqual({
test1: "this has ... dots",
test2: "... at start",
test3: "at end ...",
test4: "👛 ... with emoji",
});
// Test that both single and double quotes work
const yaml3 = `single: 'this has ... dots'
double: "this has ... dots"`;
const result3 = YAML.parse(yaml3);
expect(result3).toEqual({
single: "this has ... dots",
double: "this has ... dots",
});
});
// Regression test for #23489
test("YAML import with double-quoted strings containing ... - issue #23489", async () => {
using dir = tempDir("yaml-ellipsis", {
"test.yml": 'balance: "👛 لا تمتلك محفظة... !"',
"test.ts": `
import yaml from "./test.yml";
console.log(JSON.stringify(yaml));
`,
});
await using proc = Bun.spawn({
cmd: [bunExe(), "test.ts"],
env: bunEnv,
cwd: String(dir),
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
expect(stderr).not.toContain("Unexpected document end");
expect(exitCode).toBe(0);
expect(stdout.trim()).toBe('{"balance":"👛 لا تمتلك محفظة... !"}');
});
// Regression test for #26088
// YAML parser was leaking memory on each parse call because AST nodes were
// not being freed. This caused segfaults after high-volume YAML parsing.
// Fix: Use ASTMemoryAllocator to ensure AST nodes are freed at end of scope.
test("YAML.parse shouldn't leak memory", () => {
// Create YAML with 10000 single-char strings - creates many AST E.String nodes
const items = Array.from({ length: 10000 }, () => " - x").join("\n");
const yaml = `list:\n${items}`;
Bun.gc(true);
const initialMemory = process.memoryUsage.rss();
// Parse 100 times - each creates 10000 AST string nodes
for (let i = 0; i < 100; i++) {
YAML.parse(yaml);
}
Bun.gc(true);
const finalMemory = process.memoryUsage.rss();
// Memory increase should be less than 50MB if AST nodes are freed properly
const memoryIncreaseMB = (finalMemory - initialMemory) / 1024 / 1024;
expect(memoryIncreaseMB).toBeLessThan(50);
});
});

View File

@@ -1,4 +1,5 @@
const assert = require("assert");
const assertStrict = require("assert/strict");
test("assert from require as a function does not throw", () => assert(true));
test("assert from require as a function does throw", () => {
@@ -7,3 +8,112 @@ test("assert from require as a function does throw", () => {
expect(false).toBe(true);
} catch (e) {}
});
// Regression test for #24045
test("assert.deepStrictEqual() should compare Number wrapper object values - issue #24045", () => {
// Different values should throw
expect(() => {
assertStrict.deepStrictEqual(new Number(1), new Number(2));
}).toThrow("Expected values to be strictly deep-equal");
// Same values should not throw
expect(() => {
assertStrict.deepStrictEqual(new Number(1), new Number(1));
}).not.toThrow();
// Edge cases
// 0 and -0 should be different in strict mode
expect(() => {
assertStrict.deepStrictEqual(new Number(0), new Number(-0));
}).toThrow("Expected values to be strictly deep-equal");
// NaN should equal NaN
expect(() => {
assertStrict.deepStrictEqual(new Number(NaN), new Number(NaN));
}).not.toThrow();
expect(() => {
assertStrict.deepStrictEqual(new Number(Infinity), new Number(-Infinity));
}).toThrow("Expected values to be strictly deep-equal");
});
// Regression test for #24045
test("assert.deepStrictEqual() should compare Boolean wrapper object values - issue #24045", () => {
// Different values should throw
expect(() => {
assertStrict.deepStrictEqual(new Boolean(true), new Boolean(false));
}).toThrow("Expected values to be strictly deep-equal");
// Same values should not throw
expect(() => {
assertStrict.deepStrictEqual(new Boolean(true), new Boolean(true));
}).not.toThrow();
expect(() => {
assertStrict.deepStrictEqual(new Boolean(false), new Boolean(false));
}).not.toThrow();
});
// Regression test for #24045
test("assert.deepStrictEqual() should not compare Number wrapper with primitive", () => {
// Wrapper objects should not equal primitives in strict mode
expect(() => {
assertStrict.deepStrictEqual(new Number(1), 1);
}).toThrow("Expected values to be strictly deep-equal");
expect(() => {
assertStrict.deepStrictEqual(1, new Number(1));
}).toThrow("Expected values to be strictly deep-equal");
});
// Regression test for #24045
test("assert.deepStrictEqual() should not compare Boolean wrapper with primitive", () => {
// Wrapper objects should not equal primitives in strict mode
expect(() => {
assertStrict.deepStrictEqual(new Boolean(true), true);
}).toThrow("Expected values to be strictly deep-equal");
expect(() => {
assertStrict.deepStrictEqual(false, new Boolean(false));
}).toThrow("Expected values to be strictly deep-equal");
});
// Regression test for #24045
test("assert.deepStrictEqual() should not compare Number and Boolean wrappers", () => {
// Different wrapper types should not be equal even with truthy/falsy values
expect(() => {
assertStrict.deepStrictEqual(new Number(1), new Boolean(true));
}).toThrow("Expected values to be strictly deep-equal");
expect(() => {
assertStrict.deepStrictEqual(new Number(0), new Boolean(false));
}).toThrow("Expected values to be strictly deep-equal");
});
// Regression test for #24045
test("assert.deepStrictEqual() should check own properties on wrapper objects", () => {
// Same internal value but different own properties should not be equal
const num1 = new Number(42);
const num2 = new Number(42);
num1.customProp = "hello";
expect(() => {
assertStrict.deepStrictEqual(num1, num2);
}).toThrow("Expected values to be strictly deep-equal");
// Same internal value and same own properties should be equal
num2.customProp = "hello";
expect(() => {
assertStrict.deepStrictEqual(num1, num2);
}).not.toThrow();
// Different own property values should not be equal
const bool1 = new Boolean(true);
const bool2 = new Boolean(true);
bool1.foo = 1;
bool2.foo = 2;
expect(() => {
assertStrict.deepStrictEqual(bool1, bool2);
}).toThrow("Expected values to be strictly deep-equal");
});

View File

@@ -1,6 +1,7 @@
import { AsyncLocalStorage, AsyncResource } from "async_hooks";
import { describe, expect, test } from "bun:test";
import { bunEnv, bunExe } from "harness";
import { createServer } from "node:http";
describe("AsyncLocalStorage", () => {
test("throw inside of AsyncLocalStorage.run() will be passed out", () => {
@@ -560,4 +561,31 @@ describe("async context passes through", () => {
});
expect(a).toBe("value");
});
// Regression test for #18595
test("node:http server with nested als.run()", () => {
const als = new AsyncLocalStorage();
const server = createServer((req, res) => {
const appStore = als.getStore();
als.run(appStore, async () => {
const out = `counter: ${++als.getStore().counter}`;
await new Promise(resolve => setTimeout(resolve, 10));
res.end(out);
});
});
const { promise, resolve } = Promise.withResolvers();
als.run({ counter: 0 }, () => {
server.listen(0, async () => {
const response = await fetch(`http://localhost:${server.address().port}`);
expect(await response.text()).toBe("counter: 1");
server.close();
resolve();
});
});
return promise;
});
});

View File

@@ -3184,3 +3184,10 @@ describe("*Write methods with NaN/invalid offset and length", () => {
});
}
});
// Regression test for #6467
it("write(value >= 0x80)", () => {
const buffer = Buffer.alloc(1);
buffer.write("\x80", "binary");
expect(buffer[0]).toBe(0x80);
});

View File

@@ -1,8 +1,9 @@
import { semver, write } from "bun";
import { afterAll, beforeEach, describe, expect, it } from "bun:test";
import { semver, which, write } from "bun";
import { afterAll, beforeEach, describe, expect, it, test } from "bun:test";
import fs from "fs";
import { bunEnv, bunExe, isWindows, nodeExe, runBunInstall, shellExe, tmpdirSync } from "harness";
import { ChildProcess, exec, execFile, execFileSync, execSync, spawn, spawnSync } from "node:child_process";
import { Readable } from "node:stream";
import { promisify } from "node:util";
import path from "path";
const debug = process.env.DEBUG ? console.log : () => {};
@@ -471,3 +472,101 @@ it("spawnSync(does-not-exist)", () => {
expect(x.stdout).toEqual(null);
expect(x.stderr).toEqual(null);
});
// Regression test for #8095
test.each([null, undefined])("spawnSync can pass %p as option to stdio", input => {
const { stdout, stderr, output } = spawnSync(bunExe(), { stdio: [input, input, input] });
expect(stdout).toBeInstanceOf(Buffer);
expect(stderr).toBeInstanceOf(Buffer);
expect(output).toStrictEqual([null, stdout, stderr]);
});
// Regression test for #8095
test.each([null, undefined])("spawn can pass %p as option to stdio", input => {
const { stdout, stderr, stdio } = spawn(bunExe(), { stdio: [input, input, input] });
expect(stdout).toBeInstanceOf(Readable);
expect(stderr).toBeInstanceOf(Readable);
expect(stdio).toBeArrayOfSize(3);
expect(stdio.slice(1)).toStrictEqual([stdout, stderr]);
});
// Regression test for #9279
test.if(!!which("sleep"))("child_process.spawn({ timeout }) should not exit instantly", async () => {
const start = performance.now();
await new Promise<void>((resolve, reject) => {
const child = spawn("sleep", ["1000"], { timeout: 100 });
child.on("error", reject);
child.on("exit", resolve);
});
const end = performance.now();
expect(end - start).toBeGreaterThanOrEqual(100);
});
// Regression test for #10170
test("promisified execFile returns object with stdout and stderr", async () => {
const execFileAsync = promisify(execFile);
const result = await execFileAsync(bunExe(), ["--version"]);
expect(result.stdout).toContain(Bun.version);
expect(result.stderr).toBe("");
});
// Regression test for https://github.com/microlinkhq/youtube-dl-exec/issues/246
describe("child process stdio properties should be enumerable for Object.assign()", () => {
test("child process stdio properties should be enumerable for Object.assign()", () => {
const child = spawn(process.execPath, ["-e", 'console.log("hello")']);
// The real issue: stdio properties must be enumerable for Object.assign() to work
// This is what libraries like tinyspawn depend on
expect(Object.keys(child)).toContain("stdin");
expect(Object.keys(child)).toContain("stdout");
expect(Object.keys(child)).toContain("stderr");
expect(Object.keys(child)).toContain("stdio");
// Property descriptors should show enumerable: true
for (const key of ["stdin", "stdout", "stderr", "stdio"] as const) {
expect(Object.getOwnPropertyDescriptor(child, key)?.enumerable).toBe(true);
}
});
test("Object.assign should copy child process stdio properties", () => {
const child = spawn(process.execPath, ["-e", 'console.log("hello")']);
// This is what tinyspawn does: Object.assign(promise, childProcess)
const merged: any = {};
Object.assign(merged, child);
// The merged object should have the stdio properties
expect(merged.stdout).toBeTruthy();
expect(merged.stderr).toBeTruthy();
expect(merged.stdin).toBeTruthy();
expect(merged.stdio).toBeTruthy();
// Should maintain stream functionality
expect(typeof merged.stdout.pipe).toBe("function");
expect(typeof merged.stdout.on).toBe("function");
});
test("tinyspawn-like library usage should work", () => {
// Simulate the exact pattern from tinyspawn library
let childProcess: any;
const promise = new Promise(resolve => {
childProcess = spawn(process.execPath, ["-e", 'console.log("test")']);
childProcess.on("exit", () => resolve(childProcess));
});
// This is the critical line that was failing in Bun
const subprocess: any = Object.assign(promise, childProcess);
// Should have stdio properties immediately after Object.assign
expect(subprocess.stdout).toBeTruthy();
expect(subprocess.stderr).toBeTruthy();
expect(subprocess.stdin).toBeTruthy();
// Should still be a Promise
expect(subprocess instanceof Promise).toBe(true);
// Should have stream methods available
expect(typeof subprocess.stdout.pipe).toBe("function");
expect(typeof subprocess.stdout.on).toBe("function");
});
});

View File

@@ -1,4 +1,5 @@
import { describe, expect, test } from "bun:test";
import { bunEnv, bunExe } from "harness";
import { Console } from "node:console";
import { Writable } from "node:stream";
@@ -88,3 +89,76 @@ test("console._stderr", () => {
configurable: true,
});
});
// Regression test for #24234
describe("console.log %j format specifier", () => {
test("console.log with %j should format as JSON", async () => {
await using proc = Bun.spawn({
cmd: [bunExe(), "-e", "console.log('%j', {foo: 'bar'})"],
env: bunEnv,
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
expect(stderr).toBe("");
expect(stdout).toBe('{"foo":"bar"}\n');
expect(exitCode).toBe(0);
});
test("console.log with %j should handle arrays", async () => {
await using proc = Bun.spawn({
cmd: [bunExe(), "-e", "console.log('%j', [1, 2, 3])"],
env: bunEnv,
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
expect(stderr).toBe("");
expect(stdout).toBe("[1,2,3]\n");
expect(exitCode).toBe(0);
});
test("console.log with %j should handle nested objects", async () => {
await using proc = Bun.spawn({
cmd: [bunExe(), "-e", "console.log('%j', {a: {b: {c: 123}}})"],
env: bunEnv,
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
expect(stderr).toBe("");
expect(stdout).toBe('{"a":{"b":{"c":123}}}\n');
expect(exitCode).toBe(0);
});
test("console.log with %j should handle primitives", async () => {
await using proc = Bun.spawn({
cmd: [bunExe(), "-e", "console.log('%j %j %j %j', 'string', 123, true, null)"],
env: bunEnv,
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
expect(stderr).toBe("");
expect(stdout).toBe('"string" 123 true null\n');
expect(exitCode).toBe(0);
});
test("console.log with %j and additional text", async () => {
await using proc = Bun.spawn({
cmd: [bunExe(), "-e", "console.log('Result: %j', {status: 'ok'})"],
env: bunEnv,
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
expect(stderr).toBe("");
expect(stdout).toBe('Result: {"status":"ok"}\n');
expect(exitCode).toBe(0);
});
});

View File

@@ -308,3 +308,30 @@ it("should send cipher events in the right order", async () => {
``,
]);
});
// Regression test for #9469
it("generateKeyPair with promisify returns object with publicKey and privateKey", async () => {
if (!crypto.generateKeyPair) {
return; // skip if missing crypto.generateKeyPair
}
const util = require("util");
const generateKeyPairAsync = util.promisify(crypto.generateKeyPair);
const ret = await generateKeyPairAsync("rsa", {
publicExponent: 3,
modulusLength: 512,
publicKeyEncoding: {
type: "pkcs1",
format: "pem",
},
privateKeyEncoding: {
type: "pkcs8",
format: "pem",
},
});
expect(Object.keys(ret)).toHaveLength(2);
const { publicKey, privateKey } = ret;
expect(typeof publicKey).toBe("string");
expect(typeof privateKey).toBe("string");
});

View File

@@ -545,3 +545,56 @@ describe("uses `dns.promises` implementations for `util.promisify` factory", ()
expect(await util.promisify(dns.lookup)("google.com")).toEqual(await dns.promises.lookup("google.com"));
});
});
// Regression test for #22712
describe("dns.resolve callback parameters match Node.js", () => {
test("dns.resolve callback parameters", done => {
dns.resolve("dns.google", (...args) => {
// Should receive exactly 2 parameters: error and addresses array
expect(args.length).toBe(2);
expect(args[0]).toBe(null); // no error
expect(Array.isArray(args[1])).toBe(true); // addresses should be array
expect(args[1].every(addr => typeof addr === "string")).toBe(true); // each address should be string
done();
});
});
test("dns.resolve with A record type callback parameters", done => {
dns.resolve("dns.google", "A", (...args) => {
expect(args.length).toBe(2);
expect(args[0]).toBe(null);
expect(Array.isArray(args[1])).toBe(true);
expect(args[1].every(addr => typeof addr === "string")).toBe(true);
done();
});
});
test("dns.resolve with AAAA record type callback parameters", done => {
// Use a hostname that has AAAA records
dns.resolve("google.com", "AAAA", (...args) => {
expect(args.length).toBe(2);
expect(args[0]).toBe(null);
expect(Array.isArray(args[1])).toBe(true);
expect(args[1].every(addr => typeof addr === "string")).toBe(true);
done();
});
});
test("dns.promises.resolve returns array of strings", async () => {
const result = await dns.promises.resolve("dns.google");
expect(Array.isArray(result)).toBe(true);
expect(result.every(addr => typeof addr === "string")).toBe(true);
});
test("dns.promises.resolve with A record returns array of strings", async () => {
const result = await dns.promises.resolve("dns.google", "A");
expect(Array.isArray(result)).toBe(true);
expect(result.every(addr => typeof addr === "string")).toBe(true);
});
test("dns.promises.resolve with AAAA record returns array of strings", async () => {
const result = await dns.promises.resolve("google.com", "AAAA");
expect(Array.isArray(result)).toBe(true);
expect(result.every(addr => typeof addr === "string")).toBe(true);
});
});

View File

@@ -882,3 +882,104 @@ test("getEventListeners", () => {
test("EventEmitter.name", () => {
expect(EventEmitter.name).toBe("EventEmitter");
});
// Regression test for #14187
test("issue-14187: abort signal cleans up listeners from EventEmitter.on()", async () => {
const { on } = require("events");
const ac = new AbortController();
const ee = new EventEmitter();
async function* gen() {
for await (const item of on(ee, "beep", { signal: ac.signal })) {
yield item;
}
}
const iterator = gen();
iterator.next().catch(() => {});
expect(ee.listenerCount("beep")).toBe(1);
expect(ee.listenerCount("error")).toBe(1);
ac.abort();
expect(ee.listenerCount("beep")).toBe(0);
expect(ee.listenerCount("error")).toBe(0);
});
// Regression test for #24147
describe("removeAllListeners() from event handler with removeListener meta-listener", () => {
test("removeAllListeners() from event handler with removeListener meta-listener", () => {
const emitter = new EventEmitter();
emitter.on("test", () => {
// This should not crash even though there are no 'foo' listeners
emitter.removeAllListeners("foo");
});
// Register a removeListener meta-listener to trigger the bug
emitter.on("removeListener", () => {});
// This should not throw
expect(() => emitter.emit("test")).not.toThrow();
});
test("removeAllListeners() with actual listeners to remove", () => {
const emitter = new EventEmitter();
let fooCallCount = 0;
let removeListenerCallCount = 0;
emitter.on("foo", () => fooCallCount++);
emitter.on("foo", () => fooCallCount++);
emitter.on("test", () => {
// Remove all 'foo' listeners while inside an event handler
emitter.removeAllListeners("foo");
});
// Track removeListener calls
emitter.on("removeListener", () => {
removeListenerCallCount++;
});
// Emit test event which triggers removeAllListeners
emitter.emit("test");
// Verify listeners were removed
expect(emitter.listenerCount("foo")).toBe(0);
// Verify removeListener was called twice (once for each foo listener)
expect(removeListenerCallCount).toBe(2);
// Verify foo listeners were never called
expect(fooCallCount).toBe(0);
});
test("nested removeAllListeners() calls", () => {
const emitter = new EventEmitter();
const events: string[] = [];
emitter.on("outer", () => {
events.push("outer-start");
emitter.removeAllListeners("inner");
events.push("outer-end");
});
emitter.on("inner", () => {
events.push("inner");
});
emitter.on("removeListener", type => {
events.push(`removeListener:${String(type)}`);
});
// This should not crash
expect(() => emitter.emit("outer")).not.toThrow();
// Verify correct execution order
expect(events).toEqual(["outer-start", "removeListener:inner", "outer-end"]);
// Verify inner listeners were removed
expect(emitter.listenerCount("inner")).toBe(0);
});
});

View File

@@ -6,6 +6,7 @@ import {
getMaxFD,
isBroken,
isIntelMacOS,
isLinux,
isPosix,
isWindows,
tempDirWithFiles,
@@ -3680,3 +3681,148 @@ it("overflowing mode doesn't crash", () => {
}),
);
});
// Regression test for #16474
it("fs.mkdir recursive should not error on existing", async () => {
const testDir = tmpdirSync();
const dir1 = join(testDir, "test123");
expect(mkdirSync(dir1, { recursive: true })).toBe(path.toNamespacedPath(dir1));
expect(mkdirSync(dir1, { recursive: true })).toBeUndefined();
expect(() => {
mkdirSync(dir1);
}).toThrow("EEXIST: file already exists");
// relative
expect(() => {
mkdirSync("123test", { recursive: true });
mkdirSync("123test", { recursive: true });
mkdirSync("123test/456test", { recursive: true });
mkdirSync("123test/456test", { recursive: true });
}).not.toThrow();
const dir2 = join(testDir, "test456");
expect(await promises.mkdir(dir2)).toBeUndefined();
expect(await promises.mkdir(dir2, { recursive: true })).toBeUndefined();
// nested
const dir3 = join(testDir, "test789", "nested");
expect(mkdirSync(dir3, { recursive: true })).toBe(path.toNamespacedPath(join(testDir, "test789")));
expect(mkdirSync(dir3, { recursive: true })).toBeUndefined();
// file
const file = join(testDir, "test789", "file.txt");
writeFileSync(file, "hi");
expect(() => {
mkdirSync(file, { recursive: true });
}).toThrow("EEXIST: file already exists");
expect(async () => {
await promises.mkdir(file, { recursive: true });
}).toThrow("EEXIST: file already exists");
});
// Regression test for #3657
describe.skipIf(!isLinux)("GitHub Issue #3657", () => {
it("fs.watch on directory emits 'change' events for files created after watch starts", async () => {
const testDir = tempDirWithFiles("issue-3657", {});
const testFile = path.join(testDir, "test.txt");
const events: Array<{ eventType: string; filename: string | null }> = [];
let resolver: () => void;
const promise = new Promise<void>(resolve => {
resolver = resolve;
});
const watcher = fs.watch(testDir, { signal: AbortSignal.timeout(5000) }, (eventType, filename) => {
events.push({ eventType, filename: filename as string | null });
// We expect at least 2 events: one rename (create) and one change (modify)
if (events.length >= 2) {
resolver();
}
});
// Give the watcher time to initialize
await Bun.sleep(100);
// Create the file - should emit 'rename' event
fs.writeFileSync(testFile, "hello");
// Wait a bit for the event to be processed
await Bun.sleep(100);
// Modify the file - should emit 'change' event
fs.appendFileSync(testFile, " world");
try {
await promise;
} finally {
watcher.close();
}
// Verify we got at least one event for "test.txt"
const testFileEvents = events.filter(e => e.filename === "test.txt");
expect(testFileEvents.length).toBeGreaterThanOrEqual(2);
// Verify we got a 'rename' event (file creation)
const renameEvents = testFileEvents.filter(e => e.eventType === "rename");
expect(renameEvents.length).toBeGreaterThanOrEqual(1);
// Verify we got a 'change' event (file modification)
const changeEvents = testFileEvents.filter(e => e.eventType === "change");
expect(changeEvents.length).toBeGreaterThanOrEqual(1);
});
it("fs.watch emits multiple 'change' events for repeated modifications", async () => {
const testDir = tempDirWithFiles("issue-3657-multi", {});
const testFile = path.join(testDir, "multi.txt");
const events: Array<{ eventType: string; filename: string | null }> = [];
let resolver: () => void;
const promise = new Promise<void>(resolve => {
resolver = resolve;
});
const watcher = fs.watch(testDir, { signal: AbortSignal.timeout(5000) }, (eventType, filename) => {
events.push({ eventType, filename: filename as string | null });
// We expect 1 rename (create) + 3 change events = 4 total
if (events.length >= 4) {
resolver();
}
});
// Give the watcher time to initialize
await Bun.sleep(100);
// Create the file - should emit 'rename' event
fs.writeFileSync(testFile, "line1\n");
await Bun.sleep(100);
// Multiple modifications - should emit 'change' events
fs.appendFileSync(testFile, "line2\n");
await Bun.sleep(100);
fs.appendFileSync(testFile, "line3\n");
await Bun.sleep(100);
fs.appendFileSync(testFile, "line4\n");
try {
await promise;
} finally {
watcher.close();
}
// Verify we got events for "multi.txt"
const testFileEvents = events.filter(e => e.filename === "multi.txt");
expect(testFileEvents.length).toBeGreaterThanOrEqual(4);
// Verify we got a 'rename' event (file creation)
const renameEvents = testFileEvents.filter(e => e.eventType === "rename");
expect(renameEvents.length).toBeGreaterThanOrEqual(1);
// Verify we got multiple 'change' events (file modifications)
const changeEvents = testFileEvents.filter(e => e.eventType === "change");
expect(changeEvents.length).toBeGreaterThanOrEqual(3);
});
});

View File

@@ -1708,3 +1708,212 @@ describe("HTTP Server Security Tests - Advanced", () => {
expect(text).toBe("Hello World");
});
});
// Regression test for #25862
// Pipelined data sent immediately after CONNECT request headers should be
// delivered to the `head` parameter of the 'connect' event handler.
test("CONNECT request should receive pipelined data in head parameter", async () => {
const PIPELINED_DATA = "PIPELINED_DATA";
const { promise: headReceived, resolve: resolveHead } = Promise.withResolvers<Buffer>();
await using server = http.createServer();
server.on("connect", (req, socket, head) => {
resolveHead(head);
socket.write("HTTP/1.1 200 Connection Established\r\n\r\n");
socket.end();
});
await once(server.listen(0, "127.0.0.1"), "listening");
const { port, address } = server.address() as AddressInfo;
const { promise: clientDone, resolve: resolveClient } = Promise.withResolvers<void>();
const client = connect({ port, host: address }, () => {
// Send CONNECT request with pipelined data in the same write
// This simulates what Cap'n Proto's KJ HTTP library does
client.write(`CONNECT example.com:443 HTTP/1.1\r\nHost: example.com:443\r\n\r\n${PIPELINED_DATA}`);
});
client.on("data", () => {
// We got the response, we can close
client.end();
});
client.on("close", () => {
resolveClient();
});
const head = await headReceived;
await clientDone;
expect(head).toBeInstanceOf(Buffer);
expect(head.length).toBe(PIPELINED_DATA.length);
expect(head.toString()).toBe(PIPELINED_DATA);
});
// Regression test for #26143 - https GET request with body hangs
describe("issue #26143 - https GET request with body hangs", () => {
test("http.request GET with body should complete", async () => {
// Use Node.js-style http.createServer which properly handles bodies on all methods
const server = http.createServer((req: any, res: any) => {
let body = "";
req.on("data", (chunk: string) => {
body += chunk;
});
req.on("end", () => {
res.writeHead(200, { "Content-Type": "application/json" });
res.end(JSON.stringify({ received: body }));
});
});
await new Promise<void>(resolve => server.listen(0, resolve));
const port = server.address().port;
try {
const result = await new Promise<{ status: number; data: string }>((resolve, reject) => {
const options = {
hostname: "localhost",
port,
path: "/test",
method: "GET",
headers: {
"Content-Type": "application/json",
"Content-Length": 2,
},
};
const req = http.request(options, (res: any) => {
let data = "";
res.on("data", (chunk: string) => {
data += chunk;
});
res.on("end", () => {
resolve({ status: res.statusCode, data });
});
});
req.on("error", reject);
req.write("{}");
req.end();
});
expect(result.status).toBe(200);
expect(result.data).toContain('"received":"{}"');
} finally {
server.close();
}
});
test("GET request without body should still work", async () => {
const server = http.createServer((req: any, res: any) => {
res.writeHead(200, { "Content-Type": "application/json" });
res.end(JSON.stringify({ method: req.method }));
});
await new Promise<void>(resolve => server.listen(0, resolve));
const port = server.address().port;
try {
const result = await new Promise<{ status: number; data: string }>((resolve, reject) => {
const options = {
hostname: "localhost",
port,
path: "/test",
method: "GET",
};
const req = http.request(options, (res: any) => {
let data = "";
res.on("data", (chunk: string) => {
data += chunk;
});
res.on("end", () => {
resolve({ status: res.statusCode, data });
});
});
req.on("error", reject);
req.end();
});
expect(result.status).toBe(200);
expect(result.data).toContain('"method":"GET"');
} finally {
server.close();
}
});
test("HEAD request with body should complete", async () => {
const server = http.createServer((req: any, res: any) => {
let body = "";
req.on("data", (chunk: string) => {
body += chunk;
});
req.on("end", () => {
res.writeHead(200, { "X-Custom": "header", "X-Body-Received": body });
res.end();
});
});
await new Promise<void>(resolve => server.listen(0, resolve));
const port = server.address().port;
try {
const result = await new Promise<{ status: number; header: string | undefined }>((resolve, reject) => {
const options = {
hostname: "localhost",
port,
path: "/test",
method: "HEAD",
headers: {
"Content-Type": "application/json",
"Content-Length": 2,
},
};
const req = http.request(options, (res: any) => {
res.on("data", () => {});
res.on("end", () => {
resolve({ status: res.statusCode, header: res.headers["x-custom"] });
});
});
req.on("error", reject);
req.write("{}");
req.end();
});
expect(result.status).toBe(200);
expect(result.header).toBe("header");
} finally {
server.close();
}
});
test("Bun.fetch without allowGetBody should still throw", async () => {
const server = http.createServer((req: any, res: any) => {
res.writeHead(200);
res.end();
});
await new Promise<void>(resolve => server.listen(0, resolve));
const port = server.address().port;
try {
// Without allowGetBody, this should throw
expect(async () => {
await fetch(`http://localhost:${port}/test`, {
method: "GET",
headers: {
"Content-Type": "application/json",
"Content-Length": "2",
},
body: "{}",
});
}).toThrow("fetch() request with GET/HEAD/OPTIONS method cannot have body.");
} finally {
server.close();
}
});
});

View File

@@ -1,10 +1,11 @@
import { Socket as _BunSocket, TCPSocketListener } from "bun";
import { heapStats } from "bun:jsc";
import { describe, expect, it } from "bun:test";
import { bunEnv, bunExe, expectMaxObjectTypeCount, isWindows, tmpdirSync } from "harness";
import { bunEnv, bunExe, expectMaxObjectTypeCount, isWindows, tls as tlsCert, tmpdirSync } from "harness";
import { randomUUID } from "node:crypto";
import { connect, createConnection, createServer, isIP, isIPv4, isIPv6, Server, Socket, Stream } from "node:net";
import net, { connect, createConnection, createServer, isIP, isIPv4, isIPv6, Server, Socket, Stream } from "node:net";
import { join } from "node:path";
import tls from "node:tls";
const socket_domain = tmpdirSync();
@@ -656,3 +657,266 @@ it.if(isWindows)(
},
20_000,
);
// Regression test for #22481
it("client socket can write Uint8Array", async () => {
const server = createServer(socket => {
socket.on("data", data => {
// Echo back what we received
socket.write(data);
socket.end();
});
});
await new Promise<void>(resolve => {
server.listen(0, "127.0.0.1", () => resolve());
});
const port = (server.address() as any).port;
const testData = "Hello from Uint8Array!";
const u8 = new Uint8Array(testData.split("").map(x => x.charCodeAt(0)));
// Test with Uint8Array
{
const received = await new Promise<string>((resolve, reject) => {
const client = createConnection(port, "127.0.0.1", () => {
// Write Uint8Array directly
client.write(u8, err => {
if (err) reject(err);
});
});
let data = "";
client.on("data", chunk => {
data += chunk.toString();
});
client.on("end", () => {
resolve(data);
});
client.on("error", reject);
});
expect(received).toBe(testData);
}
// Test with Buffer.from(Uint8Array) for comparison
{
const received = await new Promise<string>((resolve, reject) => {
const client = createConnection(port, "127.0.0.1", () => {
// Write Buffer created from Uint8Array
client.write(Buffer.from(u8), err => {
if (err) reject(err);
});
});
let data = "";
client.on("data", chunk => {
data += chunk.toString();
});
client.on("end", () => {
resolve(data);
});
client.on("error", reject);
});
expect(received).toBe(testData);
}
// Test with other TypedArrays (Float32Array view)
{
const float32 = new Float32Array([1.5, 2.5]);
const u8view = new Uint8Array(float32.buffer);
const received = await new Promise<Buffer>((resolve, reject) => {
const client = createConnection(port, "127.0.0.1", () => {
client.write(u8view, err => {
if (err) reject(err);
});
});
const chunks: Buffer[] = [];
client.on("data", chunk => {
chunks.push(chunk);
});
client.on("end", () => {
resolve(Buffer.concat(chunks));
});
client.on("error", reject);
});
// Check that we received the same bytes back
expect(received).toEqual(Buffer.from(u8view));
}
server.close();
});
// Regression test for #24575
it("socket._handle.fd should be accessible on TCP sockets", async () => {
const { promise, resolve, reject } = Promise.withResolvers<void>();
let serverFd: number | undefined;
let clientFd: number | undefined;
const server = net.createServer(socket => {
// Server-side socket should have _handle.fd
expect(socket._handle).toBeDefined();
expect(socket._handle.fd).toBeTypeOf("number");
expect(socket._handle.fd).toBeGreaterThan(0);
serverFd = socket._handle.fd;
socket.end(`server fd: ${socket._handle.fd}`);
});
server.listen(0, "127.0.0.1", () => {
const client = net.connect({
host: "127.0.0.1",
port: (server.address() as any).port,
});
client.on("connect", () => {
// Client-side socket should have _handle.fd
expect(client._handle).toBeDefined();
expect(client._handle.fd).toBeTypeOf("number");
expect(client._handle.fd).toBeGreaterThan(0);
clientFd = client._handle.fd;
});
client.on("data", data => {
const response = data.toString();
expect(response).toStartWith("server fd: ");
// Verify we got valid fds
expect(serverFd).toBeTypeOf("number");
expect(clientFd).toBeTypeOf("number");
expect(serverFd).toBeGreaterThan(0);
expect(clientFd).toBeGreaterThan(0);
// Server and client should have different fds
expect(serverFd).not.toBe(clientFd);
server.close();
resolve();
});
client.on("error", reject);
});
server.on("error", reject);
await promise;
});
// Regression test for #24575
it("socket._handle.fd should remain consistent during connection lifetime", async () => {
const { promise, resolve, reject } = Promise.withResolvers<void>();
const server = net.createServer(socket => {
const initialFd = socket._handle.fd;
// Send multiple messages to ensure fd doesn't change
socket.write("message1\n");
expect(socket._handle.fd).toBe(initialFd);
socket.write("message2\n");
expect(socket._handle.fd).toBe(initialFd);
socket.end("message3\n");
expect(socket._handle.fd).toBe(initialFd);
});
server.listen(0, "127.0.0.1", () => {
const client = net.connect({
host: "127.0.0.1",
port: (server.address() as any).port,
});
let initialClientFd: number;
let buffer = "";
client.on("connect", () => {
initialClientFd = client._handle.fd;
expect(initialClientFd).toBeGreaterThan(0);
});
client.on("data", data => {
buffer += data.toString();
// Fd should remain consistent across multiple data events
expect(client._handle.fd).toBe(initialClientFd);
});
client.on("end", () => {
// Verify we received all messages
expect(buffer).toBe("message1\nmessage2\nmessage3\n");
server.close();
resolve();
});
client.on("error", reject);
});
server.on("error", reject);
await promise;
});
// Regression test for #24575
it("socket._handle.fd should be accessible on TLS sockets", async () => {
const { promise, resolve, reject } = Promise.withResolvers<void>();
let serverFd: number | undefined;
let clientFd: number | undefined;
const server = tls.createServer(tlsCert, socket => {
// Server-side TLS socket should have _handle.fd
expect(socket._handle).toBeDefined();
expect(socket._handle.fd).toBeTypeOf("number");
// TLS sockets should have a valid fd (may be -1 on some platforms/states)
expect(typeof socket._handle.fd).toBe("number");
serverFd = socket._handle.fd;
socket.end(`server fd: ${socket._handle.fd}`);
});
server.listen(0, "127.0.0.1", () => {
const client = tls.connect({
host: "127.0.0.1",
port: (server.address() as any).port,
rejectUnauthorized: false,
});
client.on("secureConnect", () => {
// Client-side TLS socket should have _handle.fd
expect(client._handle).toBeDefined();
expect(client._handle.fd).toBeTypeOf("number");
// TLS sockets should have a valid fd (may be -1 on some platforms/states)
expect(typeof client._handle.fd).toBe("number");
clientFd = client._handle.fd;
});
client.on("data", data => {
const response = data.toString();
expect(response).toMatch(/server fd: -?\d+/);
// Verify we got valid fds (number type, even if -1)
expect(serverFd).toBeTypeOf("number");
expect(clientFd).toBeTypeOf("number");
server.close();
resolve();
});
client.on("error", reject);
});
server.on("error", reject);
await promise;
});

View File

@@ -1178,3 +1178,134 @@ it("process.versions", () => {
expect(process.versions.napi).toEqual("10");
expect(process.versions.modules).toEqual("137");
});
// Regression test for #1632
describe("issue #1632 - broken pipe behavior for process.stdout.write()", () => {
const { spawn } = require("child_process");
it("process.stdout.write() should exit non-zero on broken pipe", async () => {
// Use child_process.spawn to get proper Node-style streams with destroy()
const child = spawn(bunExe(), ["-e", 'process.stdout.write("testing\\n");'], {
env: bunEnv,
stdio: ["pipe", "pipe", "pipe"],
});
// Destroy stdout immediately to create a broken pipe
child.stdout.destroy();
const exitCode = await new Promise(resolve => {
child.on("exit", resolve);
});
// The process should exit with a non-zero code due to the unhandled EPIPE error
// Node.js exits with code 1 in this case
expect(exitCode).not.toBe(0);
});
it("console.log should not panic on broken pipe", async () => {
// console.log should ignore errors (uses catch {}) and not crash
const child = spawn(bunExe(), ["-e", 'console.log("testing");'], {
env: bunEnv,
stdio: ["pipe", "pipe", "pipe"],
});
// Destroy stdout immediately
child.stdout.destroy();
let stderr = "";
child.stderr.on("data", data => {
stderr += data.toString();
});
await new Promise(resolve => {
child.on("exit", resolve);
});
// console.log ignores errors, so the process shouldn't panic
expect(stderr).not.toContain("panic");
});
it("matches Node.js behavior - broken pipe causes exit code 1", async () => {
// This test spawns a subprocess that tries to write to a destroyed stdout
// using child_process.exec pattern from the original issue
await using proc = Bun.spawn({
cmd: [
bunExe(),
"-e",
`
const { exec } = require("child_process");
const child = exec(process.execPath + ' -e "process.stdout.write(\\'testing\\\\n\\')"', (err) => {
if (err) {
console.log("exit_code:" + err.code);
console.log("killed:" + err.killed);
console.log("signal:" + err.signal);
} else {
console.log("no_error");
}
});
child.stdout.destroy();
`,
],
env: bunEnv,
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
// The parent process should complete successfully
expect(exitCode).toBe(0);
// The child should have exited with an error (code 1) due to EPIPE
// Node.js behavior: "1 false null" - exit code 1, not killed, no signal
// If it says no_error, the write completed before stdout was destroyed (timing)
if (stdout.includes("exit_code:")) {
expect(stdout).toContain("exit_code:1");
}
});
it("process.stdout.write() callback receives EPIPE error", async () => {
// Test that the write callback receives the EPIPE error
const child = spawn(
bunExe(),
[
"-e",
`
// Handle the error via callback
process.stdout.write("testing\\n", (err) => {
if (err) {
// Error should have code EPIPE
console.error("ERROR_CODE:" + err.code);
process.exit(42);
}
process.exit(0);
});
`,
],
{
env: bunEnv,
stdio: ["pipe", "pipe", "pipe"],
},
);
// Destroy stdout immediately to create broken pipe
child.stdout.destroy();
let stderr = "";
child.stderr.on("data", data => {
stderr += data.toString();
});
const exitCode = await new Promise(resolve => {
child.on("exit", resolve);
});
// Either:
// 1. The error callback was called with EPIPE and process exited with 42, or
// 2. The write completed before stdout was destroyed and process exited with 0
// Both are acceptable - we mainly want to verify it doesn't exit 0 silently when there IS an error
if (exitCode === 42) {
expect(stderr).toContain("ERROR_CODE:EPIPE");
}
});
});

View File

@@ -245,3 +245,67 @@ describe.each(["with", "without"])("setImmediate %s timers running", mode => {
it("should defer microtasks when an exception is thrown in an immediate", async () => {
expect(["run", path.join(import.meta.dir, "timers-immediate-exception-fixture.js")]).toRun();
});
// Regression test for #25639 - setTimeout Timeout object missing _idleStart property
// Next.js 16 uses _idleStart to coordinate timers for Cache Components
describe("_idleStart", () => {
test("setTimeout returns Timeout object with _idleStart property", () => {
const timer = setTimeout(() => {}, 100);
try {
// Verify _idleStart exists and is a number
expect("_idleStart" in timer).toBe(true);
expect(typeof timer._idleStart).toBe("number");
// _idleStart should be a positive timestamp
expect(timer._idleStart).toBeGreaterThan(0);
} finally {
clearTimeout(timer);
}
});
test("setInterval returns Timeout object with _idleStart property", () => {
const timer = setInterval(() => {}, 100);
try {
// Verify _idleStart exists and is a number
expect("_idleStart" in timer).toBe(true);
expect(typeof timer._idleStart).toBe("number");
// _idleStart should be a positive timestamp
expect(timer._idleStart).toBeGreaterThan(0);
} finally {
clearInterval(timer);
}
});
test("_idleStart is writable (Next.js modifies it to coordinate timers)", () => {
const timer = setTimeout(() => {}, 100);
try {
const originalIdleStart = timer._idleStart;
expect(typeof originalIdleStart).toBe("number");
// Next.js sets _idleStart to coordinate timers
const newIdleStart = originalIdleStart - 100;
timer._idleStart = newIdleStart;
expect(timer._idleStart).toBe(newIdleStart);
} finally {
clearTimeout(timer);
}
});
test("timers created at different times have different _idleStart values", async () => {
const timer1 = setTimeout(() => {}, 100);
// Wait a bit to ensure different timestamp
await Bun.sleep(10);
const timer2 = setTimeout(() => {}, 100);
try {
expect(timer2._idleStart).toBeGreaterThanOrEqual(timer1._idleStart);
} finally {
clearTimeout(timer1);
clearTimeout(timer2);
}
});
});

View File

@@ -1,6 +1,8 @@
import { describe, expect, it } from "bun:test";
import { isWindows } from "harness";
import { WriteStream } from "node:tty";
import { describe, expect, it, test } from "bun:test";
import { openSync } from "fs";
import { bunEnv, bunExe, isWindows, normalizeBunSnapshot, tempDir } from "harness";
import tty, { WriteStream } from "node:tty";
import { join } from "path";
describe("WriteStream.prototype.getColorDepth", () => {
it("iTerm ancient", () => {
@@ -24,3 +26,486 @@ describe("WriteStream.prototype.getColorDepth", () => {
expect(WriteStream.prototype.getColorDepth.call(undefined, {})).toBe(isWindows ? 24 : 1);
});
});
// Regression tests for #22591 - TTY reopening after stdin EOF
test("tty.ReadStream should have ref/unref methods when opened on /dev/tty", () => {
// Skip this test if /dev/tty is not available (e.g., in CI without TTY)
let ttyFd: number;
try {
ttyFd = openSync("/dev/tty", "r");
} catch (err: any) {
if (err.code === "ENXIO" || err.code === "ENOENT") {
// No TTY available, skip the test
return;
}
throw err;
}
try {
// Create a tty.ReadStream with the /dev/tty file descriptor
const stream = new tty.ReadStream(ttyFd);
// Verify the stream is recognized as a TTY
expect(stream.isTTY).toBe(true);
// Verify ref/unref methods exist
expect(typeof stream.ref).toBe("function");
expect(typeof stream.unref).toBe("function");
// Verify ref/unref return the stream for chaining
expect(stream.ref()).toBe(stream);
expect(stream.unref()).toBe(stream);
// Clean up - destroy will close the fd
stream.destroy();
} finally {
// Don't double-close the fd - stream.destroy() already closed it
}
});
test("tty.ReadStream ref/unref should behave like Node.js", async () => {
// Skip on Windows - no /dev/tty
if (process.platform === "win32") {
return;
}
// Create a test script that uses tty.ReadStream with ref/unref
const script = `
const fs = require('fs');
const tty = require('tty');
let ttyFd;
try {
ttyFd = fs.openSync('/dev/tty', 'r');
} catch (err) {
// No TTY available
console.log('NO_TTY');
process.exit(0);
}
const stream = new tty.ReadStream(ttyFd);
// Test that ref/unref methods exist and work
if (typeof stream.ref !== 'function' || typeof stream.unref !== 'function') {
console.error('ref/unref methods missing');
process.exit(1);
}
// Unref should allow process to exit
stream.unref();
// Set a timer that would keep process alive if ref() was called
const timer = setTimeout(() => {
console.log('TIMEOUT');
}, 100);
timer.unref();
// Process should exit immediately since both stream and timer are unref'd
console.log('SUCCESS');
// Clean up properly
stream.destroy();
`;
// Write the test script to a temporary file
const path = require("path");
const os = require("os");
const tempFile = path.join(os.tmpdir(), "test-tty-ref-unref-" + Date.now() + ".js");
await Bun.write(tempFile, script);
// Run the script with bun
const proc = Bun.spawn({
cmd: [bunExe(), tempFile],
env: bunEnv,
stdout: "pipe",
stderr: "pipe",
});
const [exitCode, stdout, stderr] = await Promise.all([proc.exited, proc.stdout.text(), proc.stderr.text()]);
if (stdout.includes("NO_TTY")) {
// No TTY available in test environment, skip
return;
}
expect(stderr).toBe("");
expect(exitCode).toBe(0);
expect(normalizeBunSnapshot(stdout)).toMatchInlineSnapshot(`"SUCCESS"`);
});
// Regression test for #22591 - can reopen /dev/tty after stdin EOF
test.skipIf(isWindows)("can reopen /dev/tty after stdin EOF for interactive session", async () => {
// This test ensures that Bun can reopen /dev/tty after stdin reaches EOF,
// which is needed for tools like Claude Code that read piped input then
// switch to interactive mode.
// Create test script that reads piped input then reopens TTY
const testScript = `
const fs = require('fs');
const tty = require('tty');
// Read piped input
let inputData = '';
process.stdin.on('data', (chunk) => {
inputData += chunk;
});
process.stdin.on('end', () => {
console.log('GOT_INPUT:' + inputData.trim());
// After stdin ends, reopen TTY for interaction
try {
const fd = fs.openSync('/dev/tty', 'r+');
console.log('OPENED_TTY:true');
const ttyStream = new tty.ReadStream(fd);
console.log('CREATED_STREAM:true');
console.log('POS:' + ttyStream.pos);
console.log('START:' + ttyStream.start);
// Verify we can set raw mode
if (typeof ttyStream.setRawMode === 'function') {
ttyStream.setRawMode(true);
console.log('SET_RAW_MODE:true');
ttyStream.setRawMode(false);
}
ttyStream.destroy();
fs.closeSync(fd);
console.log('SUCCESS:true');
process.exit(0);
} catch (err) {
console.log('ERROR:' + err.code);
process.exit(1);
}
});
if (process.stdin.isTTY) {
console.log('ERROR:NO_PIPED_INPUT');
process.exit(1);
}
`;
using dir = tempDir("tty-reopen", {});
const scriptPath = join(String(dir), "test.js");
await Bun.write(scriptPath, testScript);
// Check if script command is available (might not be on Alpine by default)
const hasScript = Bun.which("script");
if (!hasScript) {
// Try without script - if /dev/tty isn't available, test will fail appropriately
await using proc = Bun.spawn({
cmd: ["sh", "-c", `echo "test input" | ${bunExe()} ${scriptPath}`],
env: bunEnv,
cwd: String(dir),
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
// If it fails with ENXIO, skip the test
if (exitCode !== 0 && stdout.includes("ERROR:ENXIO")) {
console.log("Skipping test: requires 'script' command for PTY simulation");
return;
}
// Otherwise check results - snapshot first to see what happened
const output = stdout + (stderr ? "\nSTDERR:\n" + stderr : "");
expect(normalizeBunSnapshot(output, dir)).toMatchInlineSnapshot(`
"GOT_INPUT:test input
OPENED_TTY:true
CREATED_STREAM:true
POS:undefined
START:undefined
SET_RAW_MODE:true
SUCCESS:true"
`);
expect(exitCode).toBe(0);
return;
}
// Use script command to provide a PTY environment
// This simulates a real terminal where /dev/tty is available
// macOS and Linux have different script command syntax
const isMacOS = process.platform === "darwin";
const scriptCmd = isMacOS
? ["script", "-q", "/dev/null", "sh", "-c", `echo "test input" | ${bunExe()} ${scriptPath}`]
: ["script", "-q", "-c", `echo "test input" | ${bunExe()} ${scriptPath}`, "/dev/null"];
await using proc = Bun.spawn({
cmd: scriptCmd,
env: bunEnv,
cwd: String(dir),
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
// First snapshot the combined output to see what actually happened
const output = stdout + (stderr ? "\nSTDERR:\n" + stderr : "");
// Use JSON.stringify to make control characters visible
const jsonOutput = JSON.stringify(normalizeBunSnapshot(output, dir));
// macOS script adds control characters, Linux doesn't
const expected = isMacOS
? `"^D\\b\\bGOT_INPUT:test input\\nOPENED_TTY:true\\nCREATED_STREAM:true\\nPOS:undefined\\nSTART:undefined\\nSET_RAW_MODE:true\\nSUCCESS:true"`
: `"GOT_INPUT:test input\\nOPENED_TTY:true\\nCREATED_STREAM:true\\nPOS:undefined\\nSTART:undefined\\nSET_RAW_MODE:true\\nSUCCESS:true"`;
expect(jsonOutput).toBe(expected);
// Then check exit code
expect(exitCode).toBe(0);
});
// Regression test for #22591 - TTY ReadStream should not set position for character devices
test.skipIf(isWindows)("TTY ReadStream should not set position for character devices", async () => {
// This test ensures that when creating a ReadStream with an fd (like for TTY),
// the position remains undefined so that fs.read uses read() syscall instead
// of pread() which would fail with ESPIPE on character devices.
const testScript = `
const fs = require('fs');
const tty = require('tty');
try {
const fd = fs.openSync('/dev/tty', 'r+');
const ttyStream = new tty.ReadStream(fd);
// These should be undefined for TTY streams
console.log('POS_TYPE:' + typeof ttyStream.pos);
console.log('START_TYPE:' + typeof ttyStream.start);
// Monkey-patch fs.read to check what position is passed
const originalRead = fs.read;
let capturedPosition = 'NOT_CALLED';
let readCalled = false;
fs.read = function(fd, buffer, offset, length, position, callback) {
capturedPosition = position;
readCalled = true;
// Don't actually read, just call callback with 0 bytes
process.nextTick(() => callback(null, 0, buffer));
return originalRead;
};
// Set up data handler to trigger read
ttyStream.on('data', () => {});
ttyStream.on('error', () => {});
// Immediately log the state since we don't actually need to wait for a real read
console.log('POSITION_PASSED:' + capturedPosition);
console.log('POSITION_TYPE:' + typeof capturedPosition);
console.log('READ_CALLED:' + readCalled);
ttyStream.destroy();
fs.closeSync(fd);
process.exit(0);
} catch (err) {
console.log('ERROR:' + err.code);
process.exit(1);
}
`;
using dir = tempDir("tty-position", {});
const scriptPath = join(String(dir), "test.js");
await Bun.write(scriptPath, testScript);
// Check if script command is available
const hasScript = Bun.which("script");
if (!hasScript) {
// Try without script
await using proc = Bun.spawn({
cmd: [bunExe(), scriptPath],
env: bunEnv,
cwd: String(dir),
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
if (exitCode !== 0 && stdout.includes("ERROR:ENXIO")) {
console.log("Skipping test: requires 'script' command for PTY simulation");
return;
}
// Snapshot first to see what happened
const output = stdout + (stderr ? "\nSTDERR:\n" + stderr : "");
expect(normalizeBunSnapshot(output, dir)).toMatchInlineSnapshot(`
"POS_TYPE:undefined
START_TYPE:undefined
POSITION_PASSED:NOT_CALLED
POSITION_TYPE:string
READ_CALLED:false"
`);
expect(exitCode).toBe(0);
return;
}
// Use script command to provide a PTY environment
// macOS and Linux have different script command syntax
const isMacOS = process.platform === "darwin";
const scriptCmd = isMacOS
? ["script", "-q", "/dev/null", bunExe(), scriptPath]
: ["script", "-q", "-c", `${bunExe()} ${scriptPath}`, "/dev/null"];
await using proc = Bun.spawn({
cmd: scriptCmd,
env: bunEnv,
cwd: String(dir),
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
// First snapshot the combined output to see what actually happened
const output = stdout + (stderr ? "\nSTDERR:\n" + stderr : "");
// Use JSON.stringify to make control characters visible
const jsonOutput = JSON.stringify(normalizeBunSnapshot(output, dir));
// macOS script adds control characters, Linux doesn't
const expected = isMacOS
? `"^D\\b\\bPOS_TYPE:undefined\\nSTART_TYPE:undefined\\nPOSITION_PASSED:NOT_CALLED\\nPOSITION_TYPE:string\\nREAD_CALLED:false"`
: `"POS_TYPE:undefined\\nSTART_TYPE:undefined\\nPOSITION_PASSED:NOT_CALLED\\nPOSITION_TYPE:string\\nREAD_CALLED:false"`;
expect(jsonOutput).toBe(expected);
// Then check exit code
expect(exitCode).toBe(0);
});
// Regression test for #22591 - TUI app pattern: read piped stdin then reopen /dev/tty
test("TUI app pattern: read piped stdin then reopen /dev/tty", async () => {
// Skip on Windows - no /dev/tty
if (process.platform === "win32") {
return;
}
// Check if 'script' command is available for TTY simulation
const scriptPathCmd = Bun.which("script");
if (!scriptPathCmd) {
// Skip test on platforms without 'script' command
return;
}
// Create a simpler test script that mimics TUI app behavior
const tuiAppPattern = `
const fs = require('fs');
const tty = require('tty');
async function main() {
// Step 1: Check if stdin is piped
if (!process.stdin.isTTY) {
// Read all piped input
let input = '';
for await (const chunk of process.stdin) {
input += chunk;
}
console.log('PIPED_INPUT:' + input.trim());
// Step 2: After stdin EOF, try to reopen /dev/tty
try {
const ttyFd = fs.openSync('/dev/tty', 'r');
const ttyStream = new tty.ReadStream(ttyFd);
// Verify TTY stream has expected properties
if (!ttyStream.isTTY) {
console.error('ERROR: tty.ReadStream not recognized as TTY');
process.exit(1);
}
// Verify ref/unref methods exist and work
if (typeof ttyStream.ref !== 'function' || typeof ttyStream.unref !== 'function') {
console.error('ERROR: ref/unref methods missing');
process.exit(1);
}
// Test that we can call ref/unref without errors
ttyStream.unref();
ttyStream.ref();
console.log('TTY_REOPENED:SUCCESS');
// Clean up - only destroy the stream, don't double-close the fd
ttyStream.destroy();
} catch (err) {
console.error('ERROR:' + err.code + ':' + err.message);
process.exit(1);
}
} else {
console.log('NO_PIPE');
}
}
main().catch(err => {
console.error('UNCAUGHT:' + err.message);
process.exit(1);
});
`;
using dir = tempDir("tui-app-test", {
"tui-app-sim.js": tuiAppPattern,
});
// Create a simple test that pipes input
// macOS and Linux have different script command syntax
const isMacOS = process.platform === "darwin";
const cmd = isMacOS
? [scriptPathCmd, "-q", "/dev/null", "sh", "-c", `echo "piped content" | ${bunExe()} tui-app-sim.js`]
: [scriptPathCmd, "-q", "-c", `echo "piped content" | ${bunExe()} tui-app-sim.js`, "/dev/null"];
const proc = Bun.spawn({
cmd,
env: bunEnv,
cwd: String(dir),
stdout: "pipe",
stderr: "pipe",
});
const [exitCode, stdout, stderr] = await Promise.all([proc.exited, proc.stdout.text(), proc.stderr.text()]);
// First snapshot the combined output to see what actually happened
const output = stdout + (stderr ? "\nSTDERR:\n" + stderr : "");
// Use JSON.stringify to make control characters visible
const jsonOutput = JSON.stringify(normalizeBunSnapshot(output, dir));
// macOS script adds control characters, Linux doesn't
const expected = isMacOS
? `"^D\\b\\bPIPED_INPUT:piped content\\nTTY_REOPENED:SUCCESS"`
: `"PIPED_INPUT:piped content\\nTTY_REOPENED:SUCCESS"`;
expect(jsonOutput).toBe(expected);
// Then check exit code
expect(exitCode).toBe(0);
});
// Regression test for #22591 - tty.ReadStream handles non-TTY file descriptors correctly
test("tty.ReadStream handles non-TTY file descriptors correctly", () => {
const fs = require("fs");
const path = require("path");
const os = require("os");
// Create a regular file in the system temp directory
const tempFile = path.join(os.tmpdir(), "test-regular-file-" + Date.now() + ".txt");
fs.writeFileSync(tempFile, "test content");
try {
const fd = fs.openSync(tempFile, "r");
const stream = new tty.ReadStream(fd);
// Regular file should not be identified as TTY
expect(stream.isTTY).toBe(false);
// ref/unref should still exist (for compatibility) but may be no-ops
expect(typeof stream.ref).toBe("function");
expect(typeof stream.unref).toBe("function");
// Clean up - only destroy the stream, don't double-close the fd
stream.destroy();
} finally {
try {
fs.unlinkSync(tempFile);
} catch (e) {
// Ignore cleanup errors
}
}
});

View File

@@ -1,4 +1,4 @@
import { parse } from "url";
import { domainToASCII, domainToUnicode, parse } from "url";
describe("Url.prototype.parse", () => {
it("parses URL correctly", () => {
@@ -86,3 +86,24 @@ it("#16705", () => {
process.platform === "win32" ? "C:\\firebase-gen-{{ firebase.gen }}" : "/C:/firebase-gen-{{ firebase.gen }}",
);
});
// Regression test for #24191
// url.domainToASCII should return empty string for invalid domains, not throw
it("url.domainToASCII returns empty string for invalid domains", () => {
// Invalid punycode with non-ASCII characters should return empty string, not throw
expect(domainToASCII("xn--iñvalid.com")).toBe("");
// Valid domains should still work
expect(domainToASCII("example.com")).toBe("example.com");
expect(domainToASCII("münchen.de")).toBe("xn--mnchen-3ya.de");
});
// Regression test for #24191
it("url.domainToUnicode returns empty string for invalid domains", () => {
// Invalid punycode with non-ASCII characters should return empty string, not throw
expect(domainToUnicode("xn--iñvalid.com")).toBe("");
// Valid domains should still work
expect(domainToUnicode("example.com")).toBe("example.com");
expect(domainToUnicode("xn--mnchen-3ya.de")).toBe("münchen.de");
});

View File

@@ -90,4 +90,16 @@ describe("Bun.inspect", () => {
it("depth = 0", () => {
expect(Bun.inspect({ a: { b: { c: { d: 1 } } } }, { depth: 0 })).toEqual("{\n a: [Object ...],\n}");
});
// Regression test for #16007
it("Set is properly formatted in Bun.inspect()", () => {
const set = new Set(["foo", "bar"]);
const formatted = Bun.inspect({ set });
expect(formatted).toBe(`{
set: Set(2) {
"foo",
"bar",
},
}`);
});
});

View File

@@ -298,4 +298,11 @@ describe("util.promisify", () => {
});
});
});
// Regression test for #15201
it("should promisify globalThis.setTimeout", async () => {
const setTimeoutPromise = promisify(globalThis.setTimeout);
const result = await setTimeoutPromise(1, "ok");
expect(result).toBe("ok");
});
});

View File

@@ -56,6 +56,20 @@ describe("vm", () => {
);
expect(result).toBe(2);
});
// Regression test for #9778
test("issue #9778", () => {
const code = `
process.on("poop", () => {
throw new Error("woopsie");
});
`;
runInNewContext(code, {
process,
});
expect(() => process.emit("poop")).toThrow("woopsie");
});
});
describe("runInThisContext()", () => {

View File

@@ -1,4 +1,5 @@
import { describe, expect, test } from "bun:test";
import { isWindows } from "harness";
describe("Atomics", () => {
describe("basic operations", () => {
@@ -306,4 +307,59 @@ describe("Atomics", () => {
expect(Atomics.load(view, 0)).toBe(-50);
});
});
// Regression tests for Atomics.waitAsync UAF bug
// These tests reproduce a UAF bug where Atomics.waitAsync creates a DispatchTimer
// which creates a new WTFTimer, violating Bun's assumption that there's only one WTFTimer per VM.
// The UAF occurs when the timer fires and continues to reference `this` after it's been freed.
describe("waitAsync regression tests", () => {
test.todoIf(isWindows)("Atomics.waitAsync with setTimeout does not crash (UAF bug)", async () => {
// Run 2 times to trigger the UAF with ASAN
for (let i = 0; i < 2; i++) {
const buffer = new SharedArrayBuffer(16);
const view = new Int32Array(buffer);
Atomics.store(view, 0, 0);
const result = Atomics.waitAsync(view, 0, 0, 1); // 1ms timeout
expect(result.async).toBe(true);
expect(result.value).toBeInstanceOf(Promise);
// This setTimeout would trigger the UAF bug by creating another WTFTimer
const timeoutPromise = new Promise<string>(resolve => {
setTimeout(() => {
resolve("hi");
}, 5); // 5ms timeout
});
const [waitResult, timeoutResult] = await Promise.all([result.value, timeoutPromise]);
expect(waitResult).toBe("timed-out");
expect(timeoutResult).toBe("hi");
}
});
test.todoIf(isWindows)("Multiple Atomics.waitAsync calls do not crash", async () => {
const buffer = new SharedArrayBuffer(16);
const view = new Int32Array(buffer);
Atomics.store(view, 0, 0);
Atomics.store(view, 1, 0);
Atomics.store(view, 2, 0);
const result1 = Atomics.waitAsync(view, 0, 0, 10);
const result2 = Atomics.waitAsync(view, 1, 0, 20);
const result3 = Atomics.waitAsync(view, 2, 0, 30);
expect(result1.async).toBe(true);
expect(result2.async).toBe(true);
expect(result3.async).toBe(true);
const [r1, r2, r3] = await Promise.all([result1.value, result2.value, result3.value]);
expect(r1).toBe("timed-out");
expect(r2).toBe("timed-out");
expect(r3).toBe("timed-out");
});
});
});

View File

@@ -122,3 +122,84 @@ describe("Ed25519", () => {
});
});
});
// Regression test for #1466
describe("AES-GCM empty data", () => {
async function doTest(additionalData: Uint8Array | undefined) {
const name = "AES-GCM";
const key = await crypto.subtle.generateKey({ name, length: 128 }, false, ["encrypt", "decrypt"]);
const plaintext = new Uint8Array();
const iv = crypto.getRandomValues(new Uint8Array(16));
const algorithm = { name, iv, tagLength: 128, additionalData };
const ciphertext = await crypto.subtle.encrypt(algorithm, key, plaintext);
const decrypted = await crypto.subtle.decrypt(algorithm, key, ciphertext);
expect(new TextDecoder().decode(decrypted)).toBe("");
}
it("crypto.subtle.encrypt AES-GCM empty data", async () => {
await doTest(undefined);
});
it("crypto.subtle.encrypt AES-GCM empty data with additional associated data", async () => {
await doTest(crypto.getRandomValues(new Uint8Array(16)));
});
});
// Regression test for #24399
describe("ECDSA/ECDH JWK export", () => {
const CURVE_CONFIGS = [
{ curve: "P-256", expectedLength: 43 }, // 32 bytes = 43 base64url characters
{ curve: "P-384", expectedLength: 64 }, // 48 bytes = 64 base64url characters
{ curve: "P-521", expectedLength: 88 }, // 66 bytes = 88 base64url characters
] as const;
it("ECDSA exported JWK fields have correct length", async () => {
for (const { curve, expectedLength } of CURVE_CONFIGS) {
// Generate 10 keys to ensure we catch padding issues (which occur ~50% of the time for P-521)
for (let i = 0; i < 10; i++) {
const { privateKey } = await crypto.subtle.generateKey({ name: "ECDSA", namedCurve: curve }, true, ["sign"]);
const jwk = await crypto.subtle.exportKey("jwk", privateKey);
expect(jwk.d).toBeDefined();
expect(jwk.d!.length).toBe(expectedLength);
expect(jwk.x!.length).toBe(expectedLength);
expect(jwk.y!.length).toBe(expectedLength);
}
}
});
it("ECDH exported JWK fields have correct length", async () => {
for (const { curve, expectedLength } of CURVE_CONFIGS) {
// Generate 10 keys to ensure we catch padding issues
for (let i = 0; i < 10; i++) {
const { privateKey } = await crypto.subtle.generateKey({ name: "ECDH", namedCurve: curve }, true, [
"deriveBits",
]);
const jwk = await crypto.subtle.exportKey("jwk", privateKey);
expect(jwk.d).toBeDefined();
expect(jwk.d!.length).toBe(expectedLength);
expect(jwk.x!.length).toBe(expectedLength);
expect(jwk.y!.length).toBe(expectedLength);
}
}
});
it("exported JWK can be re-imported and used for signing", async () => {
const { privateKey } = await crypto.subtle.generateKey({ name: "ECDSA", namedCurve: "P-521" }, true, ["sign"]);
const jwk = await crypto.subtle.exportKey("jwk", privateKey);
expect(jwk.d!.length).toBe(88);
// Re-import the key
const importedKey = await crypto.subtle.importKey("jwk", jwk, { name: "ECDSA", namedCurve: "P-521" }, true, [
"sign",
]);
// Verify we can use it for signing
const data = new TextEncoder().encode("test data");
const signature = await crypto.subtle.sign({ name: "ECDSA", hash: "SHA-384" }, importedKey, data);
expect(signature.byteLength).toBeGreaterThan(0);
});
});

View File

@@ -518,3 +518,37 @@ test("ReadableStream with mixed content (starting with ArrayBuffer) can be conve
expect(text).toContain("🌍");
expect(text).toContain("Здравствуй, мир!");
});
// Regression test for #2368
test("can clone a response", async () => {
const response = new Response("bun", {
status: 201,
headers: {
"Content-Type": "text/bun;charset=utf-8",
},
});
// @ts-ignore
const clone = response.clone();
expect(clone.status).toBe(201);
expect(clone.headers.get("content-type")).toBe("text/bun;charset=utf-8");
expect(await response.text()).toBe("bun");
expect(await clone.text()).toBe("bun");
});
// Regression test for #2368
test("can clone a request", async () => {
const request = new Request("http://example.com/", {
method: "PUT",
headers: {
"Content-Type": "text/bun;charset=utf-8",
},
body: "bun",
});
expect(request.method).toBe("PUT");
// @ts-ignore
const clone = new Request(request);
expect(clone.method).toBe("PUT");
expect(clone.headers.get("content-type")).toBe("text/bun;charset=utf-8");
expect(await request.text()).toBe("bun");
expect(await clone.text()).toBe("bun");
});

View File

@@ -668,3 +668,87 @@ function arrayBuffer(buffer: BufferSource) {
}
return buffer.buffer;
}
// Regression test for #2367
test("should not be able to parse json from empty body", () => {
expect(async () => await new Response().json()).toThrow(SyntaxError);
expect(async () => await new Request("http://example.com/").json()).toThrow(SyntaxError);
});
// Regression test for #2369
test("can read json() from request", async () => {
for (let i = 0; i < 10; i++) {
const request = new Request("http://example.com/", {
method: "PUT",
body: '[1,2,"hello",{}]',
});
expect(await request.json()).toEqual([1, 2, "hello", {}]);
}
});
// Regression tests for #7001
test("req.body.locked is true after body is consumed", async () => {
const req = new Request("https://example.com/", {
body: "test",
method: "POST",
});
await new Response(req.body).arrayBuffer();
expect(req.body.locked).toBe(true);
});
test("req.bodyUsed is true after body is consumed", async () => {
const req = new Request("https://example.com/", {
body: "test",
method: "POST",
});
await new Response(req.body).arrayBuffer();
expect(req.bodyUsed).toBe(true);
});
test("await fetch(req) throws if req.body is already consumed (arrayBuffer)", async () => {
const req = new Request("https://example.com/", {
body: "test",
method: "POST",
});
await new Response(req.body).arrayBuffer();
expect(() => fetch(req)).toThrow();
expect(req.bodyUsed).toBe(true);
});
test("await fetch(req) throws if req.body is already consumed (text)", async () => {
const req = new Request("https://example.com/", {
body: "test",
method: "POST",
});
await new Response(req.body).text();
expect(() => fetch(req)).toThrow();
expect(req.bodyUsed).toBe(true);
});
test("await fetch(req) throws if req.body is already consumed (stream that has been read)", async () => {
const req = new Request("https://example.com/", {
body: "test",
method: "POST",
});
await req.body.getReader().read();
expect(() => fetch(req)).toThrow();
expect(req.bodyUsed).toBe(true);
});
test("await fetch(req) throws if req.body is already consumed (stream)", async () => {
const req = new Request("https://example.com/", {
body: "test",
method: "POST",
});
req.body.getReader();
expect(() => fetch(req)).toThrow();
expect(req.bodyUsed).toBe(true);
});

View File

@@ -1,6 +1,8 @@
import { Socket } from "bun";
import { beforeAll, expect, it } from "bun:test";
import { beforeAll, describe, expect, it, test } from "bun:test";
import { gcTick } from "harness";
import { Readable } from "node:stream";
import { brotliCompressSync, createGzip, deflateRawSync, deflateSync } from "node:zlib";
import path from "path";
const gzipped = path.join(import.meta.dir, "fixture.html.gz");
@@ -222,3 +224,764 @@ it("fetch() with a gzip response works (multiple chunks, TCP server)", async don
server.stop();
done();
});
// Regression test for #18413
describe("empty compressed responses", () => {
test("empty chunked gzip response should work", async () => {
using server = Bun.serve({
port: 0,
async fetch(req) {
// Create an empty gzip stream
const gzipStream = createGzip();
gzipStream.end(); // End immediately without writing data
// Convert to web stream
const webStream = Readable.toWeb(gzipStream);
return new Response(webStream, {
headers: {
"Content-Encoding": "gzip",
"Transfer-Encoding": "chunked",
"Content-Type": "text/plain",
},
});
},
});
const response = await fetch(`http://localhost:${server.port}`);
expect(response.status).toBe(200);
// This should not throw "Decompression error: ShortRead"
const text = await response.text();
expect(text).toBe(""); // Empty response
});
test("empty gzip response without chunked encoding", async () => {
using server = Bun.serve({
port: 0,
async fetch(req) {
// Create an empty gzip buffer
const emptyGzip = Bun.gzipSync(Buffer.alloc(0));
return new Response(emptyGzip, {
headers: {
"Content-Encoding": "gzip",
"Content-Type": "text/plain",
"Content-Length": emptyGzip.length.toString(),
},
});
},
});
const response = await fetch(`http://localhost:${server.port}`);
expect(response.status).toBe(200);
const text = await response.text();
expect(text).toBe("");
});
test("empty chunked response without gzip", async () => {
using server = Bun.serve({
port: 0,
async fetch(req) {
return new Response(
new ReadableStream({
start(controller) {
// Just close immediately
controller.close();
},
}),
{
headers: {
"Transfer-Encoding": "chunked",
"Content-Type": "text/plain",
},
},
);
},
});
const response = await fetch(`http://localhost:${server.port}`);
expect(response.status).toBe(200);
const text = await response.text();
expect(text).toBe("");
});
test("empty chunked brotli response should work", async () => {
using server = Bun.serve({
port: 0,
async fetch(req) {
// Create an empty brotli buffer using the proper API
const emptyBrotli = brotliCompressSync(Buffer.alloc(0));
// Return as chunked response
return new Response(
new ReadableStream({
start(controller) {
controller.enqueue(emptyBrotli);
controller.close();
},
}),
{
headers: {
"Content-Encoding": "br",
"Transfer-Encoding": "chunked",
"Content-Type": "text/plain",
},
},
);
},
});
const response = await fetch(`http://localhost:${server.port}`);
expect(response.status).toBe(200);
// Should not throw decompression error
const text = await response.text();
expect(text).toBe("");
});
test("empty non-chunked brotli response", async () => {
using server = Bun.serve({
port: 0,
async fetch(req) {
// Create an empty brotli buffer using the proper API
const emptyBrotli = brotliCompressSync(Buffer.alloc(0));
return new Response(emptyBrotli, {
headers: {
"Content-Encoding": "br",
"Content-Type": "text/plain",
"Content-Length": emptyBrotli.length.toString(),
},
});
},
});
const response = await fetch(`http://localhost:${server.port}`);
expect(response.status).toBe(200);
const text = await response.text();
expect(text).toBe("");
});
test("empty chunked zstd response should work", async () => {
using server = Bun.serve({
port: 0,
async fetch(req) {
// Create an empty zstd buffer using the proper API
const emptyZstd = Bun.zstdCompressSync(Buffer.alloc(0));
// Return as chunked response
return new Response(
new ReadableStream({
start(controller) {
controller.enqueue(emptyZstd);
controller.close();
},
}),
{
headers: {
"Content-Encoding": "zstd",
"Transfer-Encoding": "chunked",
"Content-Type": "text/plain",
},
},
);
},
});
const response = await fetch(`http://localhost:${server.port}`);
expect(response.status).toBe(200);
// Should not throw decompression error
const text = await response.text();
expect(text).toBe("");
});
test("empty non-chunked zstd response", async () => {
using server = Bun.serve({
port: 0,
async fetch(req) {
// Create an empty zstd buffer using the proper API
const emptyZstd = Bun.zstdCompressSync(Buffer.alloc(0));
return new Response(emptyZstd, {
headers: {
"Content-Encoding": "zstd",
"Content-Type": "text/plain",
"Content-Length": emptyZstd.length.toString(),
},
});
},
});
const response = await fetch(`http://localhost:${server.port}`);
expect(response.status).toBe(200);
const text = await response.text();
expect(text).toBe("");
});
test("empty chunked deflate response should work", async () => {
using server = Bun.serve({
port: 0,
async fetch(req) {
// Create an empty deflate buffer
const emptyDeflate = Bun.deflateSync(Buffer.alloc(0));
// Return as chunked response
return new Response(
new ReadableStream({
start(controller) {
controller.enqueue(emptyDeflate);
controller.close();
},
}),
{
headers: {
"Content-Encoding": "deflate",
"Transfer-Encoding": "chunked",
"Content-Type": "text/plain",
},
},
);
},
});
const response = await fetch(`http://localhost:${server.port}`);
expect(response.status).toBe(200);
// Should not throw decompression error
const text = await response.text();
expect(text).toBe("");
});
test("empty non-chunked deflate response", async () => {
using server = Bun.serve({
port: 0,
async fetch(req) {
// Create an empty deflate buffer
const emptyDeflate = Bun.deflateSync(Buffer.alloc(0));
return new Response(emptyDeflate, {
headers: {
"Content-Encoding": "deflate",
"Content-Type": "text/plain",
"Content-Length": emptyDeflate.length.toString(),
},
});
},
});
const response = await fetch(`http://localhost:${server.port}`);
expect(response.status).toBe(200);
const text = await response.text();
expect(text).toBe("");
});
});
// Regression test for #18413 - deflate semantics
describe("deflate semantics", () => {
// Test data
const deflateTestData = Buffer.from("Hello, World! This is a test of deflate encoding.");
// Test zlib-wrapped deflate (RFC 1950 - has 2-byte header and 4-byte Adler32 trailer)
test("deflate with zlib wrapper should work", async () => {
using server = Bun.serve({
port: 0,
async fetch(req) {
// Create zlib-wrapped deflate (this is what the spec says deflate should be)
const compressed = deflateSync(deflateTestData);
// Verify it has a zlib header: CMF must be 0x78 and (CMF<<8 | FLG) % 31 == 0
expect(compressed[0]).toBe(0x78);
expect(((compressed[0] << 8) | compressed[1]) % 31).toBe(0);
return new Response(compressed, {
headers: {
"Content-Encoding": "deflate",
"Content-Type": "text/plain",
},
});
},
});
const response = await fetch(`http://localhost:${server.port}`);
const text = await response.text();
expect(text).toBe(deflateTestData.toString());
});
// Test raw deflate (RFC 1951 - no header/trailer, just compressed data)
test("raw deflate without zlib wrapper should work", async () => {
using server = Bun.serve({
port: 0,
async fetch(req) {
// Create raw deflate (no zlib wrapper)
const compressed = deflateRawSync(deflateTestData);
// Verify it doesn't have zlib header (shouldn't start with 0x78)
expect(compressed[0]).not.toBe(0x78);
return new Response(compressed, {
headers: {
"Content-Encoding": "deflate",
"Content-Type": "text/plain",
},
});
},
});
const response = await fetch(`http://localhost:${server.port}`);
const text = await response.text();
expect(text).toBe(deflateTestData.toString());
});
// Test empty zlib-wrapped deflate
test("empty zlib-wrapped deflate should work", async () => {
using server = Bun.serve({
port: 0,
async fetch(req) {
const compressed = deflateSync(Buffer.alloc(0));
return new Response(compressed, {
headers: {
"Content-Encoding": "deflate",
"Content-Type": "text/plain",
},
});
},
});
const response = await fetch(`http://localhost:${server.port}`);
const text = await response.text();
expect(text).toBe("");
});
// Test empty raw deflate
test("empty raw deflate should work", async () => {
using server = Bun.serve({
port: 0,
async fetch(req) {
const compressed = deflateRawSync(Buffer.alloc(0));
return new Response(compressed, {
headers: {
"Content-Encoding": "deflate",
"Content-Type": "text/plain",
},
});
},
});
const response = await fetch(`http://localhost:${server.port}`);
const text = await response.text();
expect(text).toBe("");
});
// Test chunked zlib-wrapped deflate
test("chunked zlib-wrapped deflate should work", async () => {
using server = Bun.serve({
port: 0,
async fetch(req) {
const compressed = deflateSync(deflateTestData);
const mid = Math.floor(compressed.length / 2);
return new Response(
new ReadableStream({
async start(controller) {
controller.enqueue(compressed.slice(0, mid));
await Bun.sleep(50);
controller.enqueue(compressed.slice(mid));
controller.close();
},
}),
{
headers: {
"Content-Encoding": "deflate",
"Transfer-Encoding": "chunked",
"Content-Type": "text/plain",
},
},
);
},
});
const response = await fetch(`http://localhost:${server.port}`);
const text = await response.text();
expect(text).toBe(deflateTestData.toString());
});
// Test chunked raw deflate
test("chunked raw deflate should work", async () => {
using server = Bun.serve({
port: 0,
async fetch(req) {
const compressed = deflateRawSync(deflateTestData);
const mid = Math.floor(compressed.length / 2);
return new Response(
new ReadableStream({
async start(controller) {
controller.enqueue(compressed.slice(0, mid));
await Bun.sleep(50);
controller.enqueue(compressed.slice(mid));
controller.close();
},
}),
{
headers: {
"Content-Encoding": "deflate",
"Transfer-Encoding": "chunked",
"Content-Type": "text/plain",
},
},
);
},
});
const response = await fetch(`http://localhost:${server.port}`);
const text = await response.text();
expect(text).toBe(deflateTestData.toString());
});
// Test truncated zlib-wrapped deflate (missing trailer)
test("truncated zlib-wrapped deflate should fail", async () => {
using server = Bun.serve({
port: 0,
async fetch(req) {
const compressed = deflateSync(deflateTestData);
// Remove the 4-byte Adler32 trailer
const truncated = compressed.slice(0, -4);
return new Response(truncated, {
headers: {
"Content-Encoding": "deflate",
"Content-Type": "text/plain",
},
});
},
});
try {
const response = await fetch(`http://localhost:${server.port}`);
await response.text();
expect.unreachable("Should have thrown decompression error");
} catch (err: any) {
expect(err.code).toMatch(/ZlibError|ShortRead/);
}
});
// Test invalid deflate data (not deflate at all)
test("invalid deflate data should fail", async () => {
using server = Bun.serve({
port: 0,
async fetch(req) {
// Random bytes that are neither zlib-wrapped nor raw deflate
const invalid = new Uint8Array([0xff, 0xfe, 0xfd, 0xfc, 0xfb]);
return new Response(invalid, {
headers: {
"Content-Encoding": "deflate",
"Content-Type": "text/plain",
},
});
},
});
try {
const response = await fetch(`http://localhost:${server.port}`);
await response.text();
expect.unreachable("Should have thrown decompression error");
} catch (err: any) {
expect(err.code).toMatch(/ZlibError/);
}
});
});
// Regression test for #18413 - truncation and edge cases
describe("compression truncation and edge cases", () => {
// Helper to create a server that sends truncated compressed data
function createTruncatedServer(compression: "gzip" | "br" | "zstd" | "deflate", truncateBytes: number = 1) {
return Bun.serve({
port: 0,
async fetch(req) {
let compressed: Uint8Array;
const data = Buffer.from("Hello World! This is a test message.");
switch (compression) {
case "gzip":
compressed = Bun.gzipSync(data);
break;
case "br":
compressed = brotliCompressSync(data);
break;
case "zstd":
compressed = Bun.zstdCompressSync(data);
break;
case "deflate":
compressed = Bun.deflateSync(data);
break;
}
// Truncate the compressed data
const truncated = compressed.slice(0, compressed.length - truncateBytes);
return new Response(truncated, {
headers: {
"Content-Encoding": compression,
"Content-Type": "text/plain",
"Content-Length": truncated.length.toString(),
},
});
},
});
}
// Helper to create a server that sends data in delayed chunks
function createDelayedChunksServer(compression: "gzip" | "br" | "zstd" | "deflate", delayMs: number = 100) {
return Bun.serve({
port: 0,
async fetch(req) {
let compressed: Uint8Array;
const data = Buffer.from("Hello World! This is a test message.");
switch (compression) {
case "gzip":
compressed = Bun.gzipSync(data);
break;
case "br":
compressed = brotliCompressSync(data);
break;
case "zstd":
compressed = Bun.zstdCompressSync(data);
break;
case "deflate":
compressed = Bun.deflateSync(data);
break;
}
// Split compressed data into chunks
const mid = Math.floor(compressed.length / 2);
const chunk1 = compressed.slice(0, mid);
const chunk2 = compressed.slice(mid);
return new Response(
new ReadableStream({
async start(controller) {
// Send first chunk
controller.enqueue(chunk1);
// Delay before sending second chunk
await Bun.sleep(delayMs);
controller.enqueue(chunk2);
controller.close();
},
}),
{
headers: {
"Content-Encoding": compression,
"Transfer-Encoding": "chunked",
"Content-Type": "text/plain",
},
},
);
},
});
}
// Test truncated gzip stream
test("truncated gzip stream should throw error", async () => {
using server = createTruncatedServer("gzip", 5);
try {
const response = await fetch(`http://localhost:${server.port}`);
await response.text();
expect.unreachable("Should have thrown decompression error");
} catch (err: any) {
expect(err.code || err.name || err.message).toMatch(/ZlibError|ShortRead/);
}
});
// Test truncated brotli stream
test("truncated brotli stream should throw error", async () => {
using server = createTruncatedServer("br", 5);
try {
const response = await fetch(`http://localhost:${server.port}`);
await response.text();
expect.unreachable("Should have thrown decompression error");
} catch (err: any) {
expect(err.code || err.name || err.message).toMatch(/BrotliDecompressionError/);
}
});
// Test truncated zstd stream
test("truncated zstd stream should throw error", async () => {
using server = createTruncatedServer("zstd", 5);
try {
const response = await fetch(`http://localhost:${server.port}`);
await response.text();
expect.unreachable("Should have thrown decompression error");
} catch (err: any) {
expect(err.code || err.name || err.message).toMatch(/ZstdDecompressionError/);
}
});
// Test truncated deflate stream
test("truncated deflate stream should throw error", async () => {
using server = createTruncatedServer("deflate", 1);
try {
const response = await fetch(`http://localhost:${server.port}`);
await response.text();
expect.unreachable("Should have thrown decompression error");
} catch (err: any) {
expect(err.code || err.name || err.message).toMatch(/ZlibError|ShortRead/);
}
});
// Test delayed chunks for gzip (should succeed)
test("gzip with delayed chunks should succeed", async () => {
using server = createDelayedChunksServer("gzip", 50);
const response = await fetch(`http://localhost:${server.port}`);
const text = await response.text();
expect(text).toBe("Hello World! This is a test message.");
});
// Test delayed chunks for brotli (should succeed)
test("brotli with delayed chunks should succeed", async () => {
using server = createDelayedChunksServer("br", 50);
const response = await fetch(`http://localhost:${server.port}`);
const text = await response.text();
expect(text).toBe("Hello World! This is a test message.");
});
// Test delayed chunks for zstd (should succeed)
test("zstd with delayed chunks should succeed", async () => {
using server = createDelayedChunksServer("zstd", 50);
const response = await fetch(`http://localhost:${server.port}`);
const text = await response.text();
expect(text).toBe("Hello World! This is a test message.");
});
// Test delayed chunks for deflate (should succeed)
test("deflate with delayed chunks should succeed", async () => {
using server = createDelayedChunksServer("deflate", 50);
const response = await fetch(`http://localhost:${server.port}`);
const text = await response.text();
expect(text).toBe("Hello World! This is a test message.");
});
// Test mismatched Content-Encoding
test("mismatched Content-Encoding should fail gracefully", async () => {
using server = Bun.serve({
port: 0,
async fetch(req) {
// Send gzip data but claim it's brotli
const gzipped = Bun.gzipSync(Buffer.from("Hello World"));
return new Response(gzipped, {
headers: {
"Content-Encoding": "br",
"Content-Type": "text/plain",
},
});
},
});
try {
const response = await fetch(`http://localhost:${server.port}`);
await response.text();
expect.unreachable("Should have thrown decompression error");
} catch (err: any) {
expect(err.code || err.name || err.message).toMatch(/BrotliDecompressionError/);
}
});
// Test sending zero-byte compressed body
test("zero-byte body with gzip Content-Encoding and Content-Length: 0", async () => {
using server = Bun.serve({
port: 0,
async fetch(req) {
return new Response(new Uint8Array(0), {
headers: {
"Content-Encoding": "gzip",
"Content-Type": "text/plain",
"Content-Length": "0",
},
});
},
});
// When Content-Length is 0, the decompressor is not invoked, so this succeeds
const response = await fetch(`http://localhost:${server.port}`);
const text = await response.text();
expect(text).toBe("");
});
// Test sending invalid compressed data
test("invalid gzip data should fail", async () => {
using server = Bun.serve({
port: 0,
async fetch(req) {
// Send random bytes claiming to be gzip
const invalid = new Uint8Array([0xff, 0xff, 0xff, 0xff, 0xff]);
return new Response(invalid, {
headers: {
"Content-Encoding": "gzip",
"Content-Type": "text/plain",
},
});
},
});
try {
const response = await fetch(`http://localhost:${server.port}`);
await response.text();
expect.unreachable("Should have thrown decompression error");
} catch (err: any) {
expect(err.code || err.name || err.message).toMatch(/ZlibError/);
}
});
// Test sending first chunk delayed with empty initial chunk
test("empty first chunk followed by valid gzip should succeed", async () => {
using server = Bun.serve({
port: 0,
async fetch(req) {
const gzipped = Bun.gzipSync(Buffer.from("Hello World"));
return new Response(
new ReadableStream({
async start(controller) {
// Send empty chunk first
controller.enqueue(new Uint8Array(0));
await Bun.sleep(50);
// Then send the actual compressed data
controller.enqueue(gzipped);
controller.close();
},
}),
{
headers: {
"Content-Encoding": "gzip",
"Transfer-Encoding": "chunked",
"Content-Type": "text/plain",
},
},
);
},
});
const response = await fetch(`http://localhost:${server.port}`);
const text = await response.text();
expect(text).toBe("Hello World");
});
});

View File

@@ -22,7 +22,7 @@ import type { AddressInfo } from "net";
import net from "net";
import { join } from "path";
import { Readable } from "stream";
import { gzipSync } from "zlib";
import { gzipSync, zstdCompressSync } from "zlib";
const tmp_dir = tmpdirSync();
const fetchFixture3 = join(import.meta.dir, "fetch-leak-test-fixture-3.js");
const fetchFixture4 = join(import.meta.dir, "fetch-leak-test-fixture-4.js");
@@ -2447,3 +2447,299 @@ it("should allow to follow redirect if connection is closed, abort should work e
}
}
});
// Regression test for #21049
it("fetch with Request object respects redirect: 'manual' option", async () => {
// Test server that redirects
await using server = Bun.serve({
port: 0,
fetch(req) {
const url = new URL(req.url);
if (url.pathname === "/redirect") {
return new Response(null, {
status: 302,
headers: {
Location: "/target",
},
});
}
if (url.pathname === "/target") {
return new Response("Target reached", { status: 200 });
}
return new Response("Not found", { status: 404 });
},
});
// Test 1: Direct fetch with redirect: "manual" (currently works)
const directResponse = await fetch(`${server.url}/redirect`, {
redirect: "manual",
});
expect(directResponse.status).toBe(302);
expect(directResponse.url).toBe(`${server.url}/redirect`);
expect(directResponse.headers.get("location")).toBe("/target");
expect(directResponse.redirected).toBe(false);
// Test 2: Fetch with Request object and redirect: "manual" (currently broken)
const request = new Request(`${server.url}/redirect`, {
redirect: "manual",
});
const requestResponse = await fetch(request);
expect(requestResponse.status).toBe(302);
expect(requestResponse.url).toBe(`${server.url}/redirect`); // This should be the original URL, not the target
expect(requestResponse.headers.get("location")).toBe("/target");
expect(requestResponse.redirected).toBe(false);
// Test 3: Verify the behavior matches Node.js and Deno
const testScript = `
async function main() {
const request = new Request("${server.url}/redirect", {
redirect: "manual",
});
const response = await fetch(request);
console.log(JSON.stringify({
status: response.status,
url: response.url,
redirected: response.redirected,
location: response.headers.get("location")
}));
}
main();
`;
// Run with Bun
await using bunProc = Bun.spawn({
cmd: [bunExe(), "-e", testScript],
env: bunEnv,
stdout: "pipe",
stderr: "pipe",
});
const [bunStdout, bunExitCode] = await Promise.all([new Response(bunProc.stdout).text(), bunProc.exited]);
expect(bunExitCode).toBe(0);
const bunResult = JSON.parse(bunStdout.trim());
// The bug: Bun follows the redirect even though redirect: "manual" was specified
// Expected: status=302, url=original, redirected=false
// Actual (bug): status=200, url=target, redirected=true
expect(bunResult).toEqual({
status: 302,
url: `${server.url}/redirect`,
redirected: false,
location: "/target",
});
});
// Regression test for #21049
it("fetch with Request object respects redirect: 'manual' for external URLs", async () => {
// This test uses a real URL that redirects
using server = Bun.serve({
port: 0,
routes: {
"/redirect": new Response(null, {
status: 302,
headers: {
Location: "/target",
},
}),
"/target": new Response("Target reached", { status: 200 }),
},
});
const request = new Request(`${server.url}/redirect`, {
redirect: "manual",
});
const response = await fetch(request);
// When redirect: "manual" is set, we should get the redirect response
expect(response.status).toBe(302);
expect(response.url).toBe(`${server.url}/redirect`);
expect(response.redirected).toBe(false);
expect(response.headers.get("location")).toBe("/target");
});
// Regression test for #21049
it("fetch with Request respects redirect when fetch has other options but no redirect", async () => {
// Test server that redirects
await using server = Bun.serve({
port: 0,
fetch(req) {
const url = new URL(req.url);
if (url.pathname === "/redirect") {
return new Response(null, {
status: 302,
headers: {
Location: "/target",
},
});
}
if (url.pathname === "/target") {
return new Response("Target reached", {
status: 200,
headers: {
"X-Target": "true",
},
});
}
return new Response("Not found", { status: 404 });
},
});
// Create a Request with redirect: "manual"
const request = new Request(`${server.url}/redirect`, {
redirect: "manual",
headers: {
"X-Original": "request",
},
});
// Test 1: fetch with other options but NO redirect option
// Should use the Request's redirect: "manual"
const response1 = await fetch(request, {
headers: {
"X-Additional": "fetch-option",
},
// Note: no redirect option here
});
expect(response1.status).toBe(302);
expect(response1.url).toBe(`${server.url}/redirect`);
expect(response1.redirected).toBe(false);
expect(response1.headers.get("location")).toBe("/target");
// Test 2: fetch with explicit redirect option should override Request's redirect
const response2 = await fetch(request, {
headers: {
"X-Additional": "fetch-option",
},
redirect: "follow", // Explicitly override
});
expect(response2.status).toBe(200);
expect(response2.url).toBe(new URL("/target", server.url).href);
expect(response2.redirected).toBe(true);
expect(response2.headers.get("X-Target")).toBe("true");
// Test 3: fetch with empty options object should use Request's redirect
const response3 = await fetch(request, {});
expect(response3.status).toBe(302);
expect(response3.url).toBe(`${server.url}/redirect`);
expect(response3.redirected).toBe(false);
});
// Regression test for #20053
it("issue #20053 - multi-frame zstd responses should be fully decompressed", async () => {
// Create multiple zstd frames that when concatenated form a single large response
// This simulates what happens with chunked encoding where each chunk might be
// compressed as a separate frame
const part1 = "A".repeat(16384); // Exactly 16KB
const part2 = "B".repeat(3627); // Remaining data to total ~20KB
const compressed1 = zstdCompressSync(Buffer.from(part1));
const compressed2 = zstdCompressSync(Buffer.from(part2));
using server = Bun.serve({
port: 0,
async fetch(req) {
// Concatenate two zstd frames (simulating chunked response with multiple frames)
const combined = Buffer.concat([compressed1, compressed2]);
return new Response(combined, {
headers: {
"content-type": "text/plain",
"content-encoding": "zstd",
"transfer-encoding": "chunked",
},
});
},
});
// Make a request to the server
const response = await fetch(`http://localhost:${server.port}/`);
const text = await response.text();
// Both frames should be decompressed and concatenated
expect(text.length).toBe(part1.length + part2.length);
expect(text.substring(0, 16384)).toBe("A".repeat(16384));
expect(text.substring(16384)).toBe("B".repeat(3627));
});
// Regression test for #20053
it("issue #20053 - zstd with chunked encoding splits JSON into multiple frames", async () => {
// This test simulates the exact scenario from the original issue
// where Hono with compression middleware sends multiple zstd frames
const largeData = { data: "A".repeat(20000) };
const jsonString = JSON.stringify(largeData);
using server = Bun.serve({
port: 0,
async fetch(req) {
// Simulate chunked encoding by compressing in parts
// This is what happens when the server uses chunked transfer encoding
// with compression - each chunk might be compressed separately
const part1 = jsonString.slice(0, 16384);
const part2 = jsonString.slice(16384);
const compressed1 = zstdCompressSync(Buffer.from(part1));
const compressed2 = zstdCompressSync(Buffer.from(part2));
// Server sends multiple zstd frames as would happen with chunked encoding
const combined = Buffer.concat([compressed1, compressed2]);
return new Response(combined, {
headers: {
"content-type": "application/json",
"content-encoding": "zstd",
"transfer-encoding": "chunked",
},
});
},
});
const response = await fetch(`http://localhost:${server.port}/`);
const text = await response.text();
// The decompressed response should be the concatenation of all frames
expect(text.length).toBe(jsonString.length);
expect(text).toBe(jsonString);
// Verify it can be parsed as JSON
const parsed = JSON.parse(text);
expect(parsed.data.length).toBe(20000);
expect(parsed.data).toBe("A".repeat(20000));
});
// Regression test for #20053
it("issue #20053 - streaming zstd decompression handles frame boundaries correctly", async () => {
// Test that the decompressor correctly handles the case where a frame completes
// but more data might arrive later (streaming scenario)
const part1 = "First frame content";
const part2 = "Second frame content";
const compressed1 = zstdCompressSync(Buffer.from(part1));
const compressed2 = zstdCompressSync(Buffer.from(part2));
using server = Bun.serve({
port: 0,
async fetch(req) {
// Simulate streaming by sending frames separately
const combined = Buffer.concat([compressed1, compressed2]);
return new Response(combined, {
headers: {
"content-type": "text/plain",
"content-encoding": "zstd",
"transfer-encoding": "chunked",
},
});
},
});
const response = await fetch(`http://localhost:${server.port}/`);
const text = await response.text();
// Both frames should be decompressed
expect(text).toBe(part1 + part2);
});

View File

@@ -49,7 +49,7 @@ describe("2-arg form", () => {
test("print size", () => {
expect(normalizeBunSnapshot(Bun.inspect(new Response(Bun.file(import.meta.filename)))), import.meta.dir)
.toMatchInlineSnapshot(`
"Response (5.83 KB) {
"Response (7.66 KB) {
ok: true,
url: "",
status: 200,
@@ -176,3 +176,42 @@ describe("clone()", () => {
expect(clonedText).toBe("Hello, world!");
});
});
// Regression test for #7397
test("Response.redirect clones string from Location header", () => {
const url = new URL("http://example.com");
url.hostname = "example1.com";
const { href } = url;
expect(href).toBe("http://example1.com/");
const response = Response.redirect(href);
expect(response.headers.get("Location")).toBe(href);
});
// Regression tests for #21257
// `Response.json()` should throw with top level value of `function` `symbol` `undefined` (node compatibility)
test("Response.json() throws TypeError for non-JSON serializable top-level values", () => {
// These should throw "Value is not JSON serializable"
expect(() => Response.json(Symbol("test"))).toThrow("Value is not JSON serializable");
expect(() => Response.json(function testFunc() {})).toThrow("Value is not JSON serializable");
expect(() => Response.json(undefined)).toThrow("Value is not JSON serializable");
});
test("Response.json() works correctly with valid values", () => {
// These should not throw
expect(() => Response.json(null)).not.toThrow();
expect(() => Response.json({})).not.toThrow();
expect(() => Response.json("string")).not.toThrow();
expect(() => Response.json(123)).not.toThrow();
expect(() => Response.json(true)).not.toThrow();
expect(() => Response.json([1, 2, 3])).not.toThrow();
// Objects containing non-serializable values should not throw at top-level
expect(() => Response.json({ symbol: Symbol("test") })).not.toThrow();
expect(() => Response.json({ func: function () {} })).not.toThrow();
expect(() => Response.json({ undef: undefined })).not.toThrow();
});
test("Response.json() BigInt error matches Node.js", () => {
// BigInt should throw with Node.js compatible error message
expect(() => Response.json(123n)).toThrow("Do not know how to serialize a BigInt");
});

View File

@@ -1,4 +1,5 @@
import { describe, expect, test } from "bun:test";
import { describe, expect, jest, test } from "bun:test";
import { tempDirWithFiles } from "harness";
describe("HTMLRewriter DOCTYPE handler", () => {
test("remove and removed property work on DOCTYPE", () => {
@@ -22,3 +23,318 @@ describe("HTMLRewriter DOCTYPE handler", () => {
expect(result).toContain("<html>");
});
});
// Regression test for #7827
test("#7827", () => {
for (let i = 0; i < 10; i++)
(function () {
const element = jest.fn(element => {
element.tagName;
});
const rewriter = new HTMLRewriter().on("p", {
element,
});
const content = "<p>Lorem ipsum!</p>";
rewriter.transform(new Response(content));
rewriter.transform(new Response(content));
expect(element).toHaveBeenCalledTimes(2);
})();
Bun.gc(true);
});
// Regression test for #19219
test("HTMLRewriter should throw proper errors instead of [native code: Exception]", () => {
const rewriter = new HTMLRewriter().on("p", {
element(element) {
// This will cause an error by trying to call a non-existent method
(element as any).nonExistentMethod();
},
});
const html = "<html><body><p>Hello</p></body></html>";
// Should throw a proper TypeError, not [native code: Exception]
expect(() => {
rewriter.transform(html);
}).toThrow(TypeError);
// Verify the error message is descriptive
try {
rewriter.transform(html);
} catch (error: any) {
expect(error).toBeInstanceOf(TypeError);
expect(error.message).toContain("nonExistentMethod");
expect(error.message).toContain("is not a function");
// Make sure it's not the generic [native code: Exception] message
expect(error.toString()).not.toContain("[native code: Exception]");
}
});
// Regression test for #19219
test("HTMLRewriter should propagate errors from handlers correctly", () => {
const rewriter = new HTMLRewriter().on("div", {
element() {
throw new Error("Custom error from handler");
},
});
const html = "<div>test</div>";
expect(() => {
rewriter.transform(html);
}).toThrow("Custom error from handler");
});
// Regression test for #19219
test("HTMLRewriter should handle errors in async handlers", async () => {
const rewriter = new HTMLRewriter().on("div", {
async element() {
throw new Error("Async handler error");
},
});
const html = "<div>test</div>";
const response = new Response(html);
expect(() => {
rewriter.transform(response);
}).toThrow("Async handler error");
});
// Regression test for #21680
test("HTMLRewriter should not crash when element handler throws an exception - issue #21680", () => {
// The most important test: ensure the original crashing case from the GitHub issue doesn't crash
// This was the exact case from the issue that caused "ASSERTION FAILED: Unexpected exception observed"
// Create a minimal HTML file for testing
const dir = tempDirWithFiles("htmlrewriter-crash-test", {
"min.html": "<script></script>",
});
// Original failing case: this should not crash the process
expect(() => {
const rewriter = new HTMLRewriter().on("script", {
element(a) {
throw new Error("abc");
},
});
rewriter.transform(new Response(Bun.file(`${dir}/min.html`)));
}).not.toThrow(); // The important thing is it doesn't crash, we're ok with it silently failing
// Test with Response containing string content
expect(() => {
const rewriter = new HTMLRewriter().on("script", {
element(a) {
throw new Error("response test");
},
});
rewriter.transform(new Response("<script></script>"));
}).toThrow("response test");
});
// Regression test for #21680
test("HTMLRewriter exception handling should not break normal operation", () => {
// Ensure that after an exception occurs, the rewriter still works normally
let normalCallCount = 0;
// First, trigger an exception
try {
const rewriter = new HTMLRewriter().on("div", {
element(element) {
throw new Error("test error");
},
});
rewriter.transform(new Response("<div>test</div>"));
} catch (e) {
// Expected to throw
}
// Then ensure normal operation still works
const rewriter2 = new HTMLRewriter().on("div", {
element(element) {
normalCallCount++;
element.setInnerContent("replaced");
},
});
const result = rewriter2.transform(new Response("<div>original</div>"));
expect(normalCallCount).toBe(1);
// The transform should complete successfully without throwing
});
// Regression tests for htmlrewriter-additional-bugs
test("HTMLRewriter selector validation should throw proper errors", () => {
// Test various invalid CSS selectors that should be rejected
const invalidSelectors = [
"", // empty selector
" ", // whitespace only
"<<<", // invalid CSS
"div[", // incomplete attribute selector
"div)", // mismatched brackets
"div::", // invalid pseudo
"..invalid", // invalid start
];
invalidSelectors.forEach(selector => {
expect(() => {
const rewriter = new HTMLRewriter();
rewriter.on(selector, {
element(element) {
element.setInnerContent("should not reach here");
},
});
}).toThrow(); // Should throw a meaningful error, not silently succeed
});
});
test("HTMLRewriter should properly validate handler objects", () => {
// Test null and undefined handlers
expect(() => {
const rewriter = new HTMLRewriter();
rewriter.on("div", null);
}).toThrow("Expected object");
expect(() => {
const rewriter = new HTMLRewriter();
rewriter.on("div", undefined);
}).toThrow("Expected object");
// Test non-object handlers
expect(() => {
const rewriter = new HTMLRewriter();
rewriter.on("div", "not an object");
}).toThrow("Expected object");
expect(() => {
const rewriter = new HTMLRewriter();
rewriter.on("div", 42);
}).toThrow("Expected object");
});
test("HTMLRewriter memory management - no leaks on selector parse errors", () => {
// This test ensures that selector_slice memory is properly freed
// even when selector parsing fails
for (let i = 0; i < 100; i++) {
try {
const rewriter = new HTMLRewriter();
// Use an invalid selector to trigger error path
rewriter.on("div[incomplete", {
element(element) {
console.log("Should not reach here");
},
});
} catch (e) {
// Expected to throw, but no memory should leak
}
}
// If there were memory leaks, running this many times would consume significant memory
// The test passes if it completes without memory issues
expect(true).toBe(true);
});
test("HTMLRewriter should handle various input edge cases safely", () => {
// Empty string input (should work)
expect(() => {
const rewriter = new HTMLRewriter();
rewriter.transform("");
}).not.toThrow();
// Null input (should throw)
expect(() => {
const rewriter = new HTMLRewriter();
rewriter.transform(null);
}).toThrow("Expected Response or Body");
// Large input (should work)
expect(() => {
const rewriter = new HTMLRewriter();
const largeHtml = "<div>" + "x".repeat(100000) + "</div>";
rewriter.transform(largeHtml);
}).not.toThrow();
});
test("HTMLRewriter concurrent usage should work correctly", () => {
// Same rewriter instance should handle multiple transforms
const rewriter = new HTMLRewriter().on("div", {
element(element) {
element.setInnerContent("modified");
},
});
expect(() => {
const result1 = rewriter.transform("<div>original1</div>");
const result2 = rewriter.transform("<div>original2</div>");
}).not.toThrow();
});
test("HTMLRewriter should handle many handlers on same element", () => {
let rewriter = new HTMLRewriter();
// Add many handlers to the same element type
for (let i = 0; i < 50; i++) {
rewriter = rewriter.on("div", {
element(element) {
const current = element.getAttribute("data-count") || "0";
element.setAttribute("data-count", (parseInt(current) + 1).toString());
},
});
}
expect(() => {
rewriter.transform('<div data-count="0">test</div>');
}).not.toThrow();
});
test("HTMLRewriter should handle special characters in selectors safely", () => {
// These selectors with special characters should either work or fail gracefully
const specialSelectors = [
"div[data-test=\"'quotes'\"]",
'div[data-test="\\"escaped\\""]',
'div[class~="space separated"]',
'input[type="text"]',
];
specialSelectors.forEach(selector => {
expect(() => {
const rewriter = new HTMLRewriter().on(selector, {
element(element) {
element.setAttribute("data-processed", "true");
},
});
// The important thing is it doesn't crash
}).not.toThrow();
});
});
// Regression test for text-chunk-null-access
test("TextChunk methods handle null text_chunk gracefully", async () => {
// This test reproduces a crash where TextChunk methods are called
// after the underlying text_chunk has been cleaned up or is null
let textChunkRef: any;
const html = "<p>Test content</p>";
const rewriter = new HTMLRewriter().on("p", {
text(text) {
// Store reference to the text chunk
textChunkRef = text;
},
});
await rewriter.transform(new Response(html)).text();
// Force garbage collection to clean up internal references
if (typeof Bun !== "undefined" && Bun.gc) {
Bun.gc(true);
}
// It should be undefined to be consistent with the rest of the APIs.
expect(textChunkRef.removed).toBeUndefined();
expect(textChunkRef.lastInTextNode).toBeUndefined();
});

View File

@@ -1,4 +1,5 @@
import { expect, test } from "bun:test";
import { Request as NodeFetchRequest } from "node-fetch";
test("undefined args don't throw", () => {
const request = new Request("https://example.com/", {
@@ -74,3 +75,69 @@ test("clone() does not lock original body when body was accessed before clone",
expect(originalText).toBe("Hello, world!");
expect(clonedText).toBe("Hello, world!");
});
// Regression test for #2993
test("Request cache option is set correctly", () => {
const cacheValues = ["default", "no-store", "reload", "no-cache", "force-cache", "only-if-cached"] as const;
for (const cache of cacheValues) {
const request = new Request("http://localhost:8080/", { cache });
expect(request.cache).toBe(cache);
}
});
// Regression test for #2993
test("Request mode option is set correctly", () => {
const modeValues = ["same-origin", "no-cors", "cors", "navigate"] as const;
for (const mode of modeValues) {
const request = new Request("http://localhost:8080/", { mode });
expect(request.mode).toBe(mode);
}
});
// Regression test for #2993
test("Request cache defaults to 'default'", () => {
const request = new Request("http://localhost:8080/");
expect(request.cache).toBe("default");
});
// Regression test for #2993
test("Request mode defaults to 'cors'", () => {
const request = new Request("http://localhost:8080/");
expect(request.mode).toBe("cors");
});
// Regression test for #2993
test("Request.clone() preserves cache and mode options", () => {
const original = new Request("http://localhost:8080/", { cache: "no-cache", mode: "same-origin" });
const cloned = original.clone();
expect(cloned.cache).toBe("no-cache");
expect(cloned.mode).toBe("same-origin");
});
// Regression test for #2993
test("new Request(request) preserves cache and mode options", () => {
const original = new Request("http://localhost:8080/", { cache: "force-cache", mode: "no-cors" });
const newRequest = new Request(original);
expect(newRequest.cache).toBe("force-cache");
expect(newRequest.mode).toBe("no-cors");
});
// Regression test for #2993
test("new Request(request, init) allows overriding cache and mode", () => {
const original = new Request("http://localhost:8080/", { cache: "default", mode: "cors" });
const newRequest = new Request(original, { cache: "no-cache", mode: "same-origin" });
expect(newRequest.cache).toBe("no-cache");
expect(newRequest.mode).toBe("same-origin");
});
// Regression test for #14865
test("node fetch Request URL field is set even with a valid URL", () => {
expect(new NodeFetchRequest("/").url).toBe("/");
expect(new NodeFetchRequest("https://bun.sh/").url).toBe("https://bun.sh/");
expect(new NodeFetchRequest(new URL("https://bun.sh/")).url).toBe("https://bun.sh/");
});

View File

@@ -1205,3 +1205,66 @@ it("handles exceptions during empty stream creation", () => {
throw new Error("not stack overflow");
}).toThrow("not stack overflow");
});
// Regression test for #19661
describe("ReadableStream controller close error", () => {
test("closing an already closed controller throws proper error", async () => {
const { resolve, promise } = Promise.withResolvers();
let controller;
const stream = () =>
new ReadableStream({
start(controller1) {
controller = controller1;
controller1.close();
process.nextTick(resolve);
},
});
stream();
await promise;
expect(() => controller.close()).toThrowError(
expect.objectContaining({
name: "TypeError",
message: "Invalid state: Controller is already closed",
code: "ERR_INVALID_STATE",
}),
);
});
test("server version - closing an already closed controller throws proper error", async () => {
const { resolve, promise } = Promise.withResolvers();
let controller;
const stream = () =>
new ReadableStream({
start(controller1) {
controller = controller1;
controller.close();
process.nextTick(resolve);
},
});
const server = Bun.serve({
port: 0,
fetch(req) {
return new Response(stream());
},
});
try {
await fetch(server.url, {});
await promise;
expect(() => controller.close()).toThrowError(
expect.objectContaining({
name: "TypeError",
message: "Invalid state: Controller is already closed",
code: "ERR_INVALID_STATE",
}),
);
} finally {
server.stop(true);
}
});
});

View File

@@ -1,4 +1,4 @@
import { describe, expect, it } from "bun:test";
import { describe, expect, it, test } from "bun:test";
import crypto from "crypto";
import { readFileSync } from "fs";
import { bunEnv, bunExe, gc, tempDir, tls } from "harness";
@@ -865,3 +865,34 @@ it.serial("instances should be finalized when GC'd", async () => {
// expect that current and initial websocket be close to the same (normaly 1 or 2 difference)
expect(Math.abs(current_websocket_count - initial_websocket_count)).toBeLessThanOrEqual(50);
});
// Regression test for #12040
test("ws.send callback works as expected", async () => {
const { WebSocket, WebSocketServer } = await import("ws");
const { createServer } = await import("node:http");
const httpServer = createServer();
const { promise, resolve } = Promise.withResolvers();
const { promise: promise2, resolve: resolve2 } = Promise.withResolvers();
const wss = new WebSocketServer({
server: httpServer,
WebSocket,
});
wss.on("connection", ws => {
// Following are two messages about to be sent, each with a slightly different way of calling the `ws.send` method:
ws.send("foo", () => resolve());
ws.send("bar", {}, () => resolve2());
});
const { promise: promise3, resolve: resolve3 } = Promise.withResolvers();
httpServer.listen(0, () => resolve3());
await promise3;
var ws = new WebSocket("ws://localhost:" + httpServer.address().port);
ws.on("message", msg => {});
await Promise.all([promise, promise2]);
ws.close();
wss.close();
});

View File

@@ -1,44 +0,0 @@
import { describe, expect, it } from "bun:test";
import { mkdirSync, readFileSync, rmSync, writeFileSync } from "fs";
import { join } from "path";
import { bunEnv, bunExe, tmpdirSync } from "../../harness.js";
describe.concurrent("issue/00631", () => {
it("JSON strings escaped properly", async () => {
const testDir = tmpdirSync();
// Clean up from prior runs if necessary
rmSync(testDir, { recursive: true, force: true });
// Create a directory with our test package file
mkdirSync(testDir, { recursive: true });
writeFileSync(join(testDir, "package.json"), String.raw`{"testRegex":"\\a\n\\b\\"}`);
// Attempt to add a package, causing the package file to be parsed, modified,
// written, and reparsed. This verifies that escaped backslashes in JSON
// survive the roundtrip
await using proc = Bun.spawn({
cmd: [bunExe(), "add", "left-pad"],
env: bunEnv,
cwd: testDir,
stdout: "pipe",
stderr: "pipe",
});
const [stderr, exitCode] = await Promise.all([proc.stderr.text(), proc.exited]);
if (exitCode !== 0) {
console.log(stderr);
}
expect(exitCode).toBe(0);
const packageContents = readFileSync(join(testDir, "package.json"), { encoding: "utf8" });
expect(packageContents).toBe(String.raw`{
"testRegex": "\\a\n\\b\\",
"dependencies": {
"left-pad": "^1.3.0"
}
}`);
//// If successful clean up test artifacts
rmSync(testDir, { recursive: true });
});
});

View File

@@ -1,32 +0,0 @@
import { test } from "bun:test";
import { createServer } from "node:http";
import { WebSocket, WebSocketServer } from "ws";
// https://github.com/oven-sh/bun/issues/12040
test("ws.send callback works as expected", async () => {
const httpServer = createServer();
const { promise, resolve } = Promise.withResolvers();
const { promise: promise2, resolve: resolve2 } = Promise.withResolvers();
const wss = new WebSocketServer({
server: httpServer,
WebSocket,
});
wss.on("connection", ws => {
// Following are two messages about to be sent, each with a slightly different way of calling the `ws.send` method:
ws.send("foo", () => resolve());
ws.send("bar", {}, () => resolve2());
});
const { promise: promise3, resolve: resolve3 } = Promise.withResolvers();
httpServer.listen(0, () => resolve3());
await promise3;
var ws = new WebSocket("ws://localhost:" + httpServer.address().port);
ws.on("message", msg => {});
await Promise.all([promise, promise2]);
ws.close();
wss.close();
});

View File

@@ -1,40 +0,0 @@
// https://github.com/oven-sh/bun/issues/12360
import { fileURLToPath, pathToFileURL } from "bun";
import { expect, test } from "bun:test";
import { isWindows, tmpdirSync } from "harness";
import { join } from "path";
export async function validatePath(path: URL): Promise<URL | string> {
const filePath = fileURLToPath(path);
if (await Bun.file(filePath).exists()) {
return pathToFileURL(filePath);
} else {
return "";
}
}
test("validate executable given in the config using `validatePath`: invalid value", async () => {
const dir = tmpdirSync();
const filePath = join(dir, "./sample.exe");
const newFilePath = await validatePath(pathToFileURL(filePath));
expect(newFilePath).toBe("");
});
test("validate executable given in the config using `validatePath`: expected real implementation", async () => {
const dir = tmpdirSync();
const editorPath: URL | string = pathToFileURL(join(dir, "./metaeditor64.exe"));
const terminalPath: URL | string = pathToFileURL(join(dir, "./terminal64.exe"));
await Bun.write(isWindows ? editorPath.pathname.slice(1) : editorPath.pathname, "im a editor");
await Bun.write(isWindows ? terminalPath.pathname.slice(1) : terminalPath.pathname, "im a terminal");
const newEditorPath = <URL>await validatePath(editorPath);
const newTerminalPath = <URL>await validatePath(terminalPath);
expect(newEditorPath.pathname).toBe(editorPath.pathname);
expect(newTerminalPath.pathname).toBe(terminalPath.pathname);
});

View File

@@ -1,24 +0,0 @@
import { expect, test } from "bun:test";
import { EventEmitter, on } from "events";
test("issue-14187", async () => {
const ac = new AbortController();
const ee = new EventEmitter();
async function* gen() {
for await (const item of on(ee, "beep", { signal: ac.signal })) {
yield item;
}
}
const iterator = gen();
iterator.next().catch(() => {});
expect(ee.listenerCount("beep")).toBe(1);
expect(ee.listenerCount("error")).toBe(1);
ac.abort();
expect(ee.listenerCount("beep")).toBe(0);
expect(ee.listenerCount("error")).toBe(0);
});

View File

@@ -1,18 +0,0 @@
async function doTest(additionalData) {
const name = "AES-GCM";
const key = await crypto.subtle.generateKey({ name, length: 128 }, false, ["encrypt", "decrypt"]);
const plaintext = new Uint8Array();
const iv = crypto.getRandomValues(new Uint8Array(16));
const algorithm = { name, iv, tagLength: 128, additionalData };
const ciphertext = await crypto.subtle.encrypt(algorithm, key, plaintext);
const decrypted = await crypto.subtle.decrypt(algorithm, key, ciphertext);
expect(new TextDecoder().decode(decrypted)).toBe("");
}
it("crypto.subtle.encrypt AES-GCM empty data", async () => {
doTest(undefined);
});
it("crypto.subtle.encrypt AES-GCM empty data with additional associated data", async () => {
doTest(crypto.getRandomValues(new Uint8Array(16)));
});

View File

@@ -1,8 +0,0 @@
import { expect, test } from "bun:test";
import { Request } from "node-fetch";
test("node fetch Request URL field is set even with a valid URL", () => {
expect(new Request("/").url).toBe("/");
expect(new Request("https://bun.sh/").url).toBe("https://bun.sh/");
expect(new Request(new URL("https://bun.sh/")).url).toBe("https://bun.sh/");
});

View File

@@ -1,6 +0,0 @@
import { promisify } from "util";
test("abc", () => {
const setTimeout = promisify(globalThis.setTimeout);
setTimeout(1, "ok").then(console.log);
});

View File

@@ -1,6 +0,0 @@
import { expect, test } from "bun:test";
test("should not be able to parse json from empty body", () => {
expect(async () => await new Response().json()).toThrow(SyntaxError);
expect(async () => await new Request("http://example.com/").json()).toThrow(SyntaxError);
});

View File

@@ -1,33 +0,0 @@
import { expect, test } from "bun:test";
test("can clone a response", async () => {
const response = new Response("bun", {
status: 201,
headers: {
"Content-Type": "text/bun;charset=utf-8",
},
});
// @ts-ignore
const clone = response.clone();
expect(clone.status).toBe(201);
expect(clone.headers.get("content-type")).toBe("text/bun;charset=utf-8");
expect(await response.text()).toBe("bun");
expect(await clone.text()).toBe("bun");
});
test("can clone a request", async () => {
const request = new Request("http://example.com/", {
method: "PUT",
headers: {
"Content-Type": "text/bun;charset=utf-8",
},
body: "bun",
});
expect(request.method).toBe("PUT");
// @ts-ignore
const clone = new Request(request);
expect(clone.method).toBe("PUT");
expect(clone.headers.get("content-type")).toBe("text/bun;charset=utf-8");
expect(await request.text()).toBe("bun");
expect(await clone.text()).toBe("bun");
});

View File

@@ -1,11 +0,0 @@
import { expect, test } from "bun:test";
test("can read json() from request", async () => {
for (let i = 0; i < 10; i++) {
const request = new Request("http://example.com/", {
method: "PUT",
body: '[1,2,"hello",{}]',
});
expect(await request.json()).toEqual([1, 2, "hello", {}]);
}
});

View File

@@ -1,15 +0,0 @@
import { describe, expect, test } from "bun:test";
import { bunEnv, bunExe } from "harness";
describe.concurrent("issue/04011", () => {
test("running a missing script should return non zero exit code", async () => {
await using proc = Bun.spawn({
cmd: [bunExe(), "run", "missing.ts"],
env: bunEnv,
stderr: "inherit",
stdout: "pipe",
});
expect(await proc.exited).toBe(1);
});
});

View File

@@ -1,39 +0,0 @@
import { file, serve } from "bun";
import { describe, expect, test } from "bun:test";
describe("Bun.serve()", () => {
const tls = {
cert: file(new URL("../fixtures/cert.pem", import.meta.url)),
key: file(new URL("../fixtures/cert.key", import.meta.url)),
};
const servers = [
{
port: 0,
url: /^http:\/\/localhost:\d+\/$/,
},
{
tls,
port: 0,
url: /^https:\/\/localhost:\d+\/$/,
},
];
test.each(servers)("%j", async ({ url, ...options }) => {
const server = serve({
hostname: "localhost",
...options,
fetch(request) {
return new Response(request.url);
},
});
try {
const proto = options.tls ? "https" : "http";
const target = `${proto}://localhost:${server.port}/`;
const response = await fetch(target, { tls: { rejectUnauthorized: false } });
expect(response.text()).resolves.toMatch(url);
} finally {
server.stop(true);
}
});
});

View File

@@ -1,7 +0,0 @@
import { expect, test } from "bun:test";
test("write(value >= 0x80)", () => {
const buffer = Buffer.alloc(1);
buffer.write("\x80", "binary");
expect(buffer[0]).toBe(0x80);
});

View File

@@ -1,67 +0,0 @@
import { expect, test } from "bun:test";
test("req.body.locked is true after body is consumed", async () => {
const req = new Request("https://example.com/", {
body: "test",
method: "POST",
});
await new Response(req.body).arrayBuffer();
expect(req.body.locked).toBe(true);
});
test("req.bodyUsed is true after body is consumed", async () => {
const req = new Request("https://example.com/", {
body: "test",
method: "POST",
});
await new Response(req.body).arrayBuffer();
expect(req.bodyUsed).toBe(true);
});
test("await fetch(req) throws if req.body is already consumed (arrayBuffer)", async () => {
const req = new Request("https://example.com/", {
body: "test",
method: "POST",
});
await new Response(req.body).arrayBuffer();
expect(() => fetch(req)).toThrow();
expect(req.bodyUsed).toBe(true);
});
test("await fetch(req) throws if req.body is already consumed (text)", async () => {
const req = new Request("https://example.com/", {
body: "test",
method: "POST",
});
await new Response(req.body).text();
expect(() => fetch(req)).toThrow();
expect(req.bodyUsed).toBe(true);
});
test("await fetch(req) throws if req.body is already consumed (stream that has been read)", async () => {
const req = new Request("https://example.com/", {
body: "test",
method: "POST",
});
await req.body.getReader().read();
expect(() => fetch(req)).toThrow();
expect(req.bodyUsed).toBe(true);
});
test("await fetch(req) throws if req.body is already consumed (stream)", async () => {
const req = new Request("https://example.com/", {
body: "test",
method: "POST",
});
req.body.getReader();
expect(() => fetch(req)).toThrow();
expect(req.bodyUsed).toBe(true);
});

View File

@@ -1,10 +0,0 @@
import { expect, test } from "bun:test";
test("Response.redirect clones string from Location header", () => {
const url = new URL("http://example.com");
url.hostname = "example1.com";
const { href } = url;
expect(href).toBe("http://example1.com/");
const response = Response.redirect(href);
expect(response.headers.get("Location")).toBe(href);
});

View File

@@ -1,78 +0,0 @@
import { describe, expect, test } from "bun:test";
const MAP_SIZE = 918 * 4;
describe("toEqual on a large Map", () => {
function* genpairs() {
for (let i = 0; i < MAP_SIZE; i++) {
yield ["k" + i, "v" + i] as const;
}
}
for (let MapClass of [
Map,
class CustomMap extends Map {
abc: number = 123;
// @ts-expect-error
constructor(iterable) {
// @ts-expect-error
super(iterable);
}
},
] as const) {
test(MapClass.name, () => {
// @ts-expect-error
const x = new MapClass<any, any>(genpairs());
// @ts-expect-error
const y = new MapClass<any, any>(genpairs());
expect(x).toEqual(y);
x.set("not-okay", 1);
y.set("okay", 1);
expect(x).not.toEqual(y);
x.delete("not-okay");
x.set("okay", 1);
expect(x).toEqual(y);
x.set("okay", 2);
expect(x).not.toEqual(y);
});
}
});
describe("toEqual on a large Set", () => {
function* genvalues() {
for (let i = 0; i < MAP_SIZE; i++) {
yield "v" + i;
}
}
for (let SetClass of [
Set,
class CustomSet extends Set {
constructor(iterable: any) {
super(iterable);
this.abc = 123;
}
abc: any;
},
]) {
test(SetClass.name, () => {
const x = new SetClass(genvalues());
const y = new SetClass(genvalues());
expect(x).toEqual(y);
x.add("not-okay");
y.add("okay");
expect(x).not.toEqual(y);
x.delete("not-okay");
x.add("okay");
expect(x).toEqual(y);
});
}
});

View File

@@ -1,22 +0,0 @@
import { expect, jest, test } from "bun:test";
test("#7827", () => {
for (let i = 0; i < 10; i++)
(function () {
const element = jest.fn(element => {
element.tagName;
});
const rewriter = new HTMLRewriter().on("p", {
element,
});
const content = "<p>Lorem ipsum!</p>";
rewriter.transform(new Response(content));
rewriter.transform(new Response(content));
expect(element).toHaveBeenCalledTimes(2);
})();
Bun.gc(true);
});

View File

@@ -1,6 +0,0 @@
import { semver } from "bun";
import { expect, test } from "bun:test";
test("semver with multiple tags work properly", () => {
expect(semver.satisfies("3.4.5", ">=3.3.0-beta.1 <3.4.0-beta.3")).toBeFalse();
});

Some files were not shown because too many files have changed in this diff Show More