mirror of
https://github.com/oven-sh/bun
synced 2026-02-02 15:08:46 +00:00
Compare commits
3 Commits
bun-v1.3.4
...
claude/imp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8d86931cc8 | ||
|
|
52a2b441ca | ||
|
|
54087b4b5c |
@@ -2338,7 +2338,7 @@ extern fn Bun__CallFrame__getLineNumber(callframe: *JSC.CallFrame, globalObject:
|
||||
|
||||
fn captureTestLineNumber(callframe: *JSC.CallFrame, globalThis: *JSGlobalObject) u32 {
|
||||
if (Jest.runner) |runner| {
|
||||
if (runner.test_options.file_reporter == .junit) {
|
||||
if (runner.test_options.file_reporter == .junit or runner.test_options.agent) {
|
||||
return Bun__CallFrame__getLineNumber(callframe, globalThis);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -662,7 +662,8 @@ pub const Bunfig = struct {
|
||||
if (console_expr.get("depth")) |depth| {
|
||||
if (depth.data == .e_number) {
|
||||
const depth_value = @as(u16, @intFromFloat(depth.data.e_number.value));
|
||||
this.ctx.runtime_options.console_depth = depth_value;
|
||||
// Treat depth=0 as maxInt(u16) for infinite depth
|
||||
this.ctx.runtime_options.console_depth = if (depth_value == 0) std.math.maxInt(u16) else depth_value;
|
||||
} else {
|
||||
try this.addError(depth.loc, "Expected number");
|
||||
}
|
||||
|
||||
@@ -362,6 +362,7 @@ pub const Command = struct {
|
||||
|
||||
file_reporter: ?TestCommand.FileReporter = null,
|
||||
reporter_outfile: ?[]const u8 = null,
|
||||
agent: bool = false,
|
||||
};
|
||||
|
||||
pub const Debugger = union(enum) {
|
||||
|
||||
@@ -192,6 +192,7 @@ pub const test_only_params = [_]ParamType{
|
||||
clap.parseParam("-t, --test-name-pattern <STR> Run only tests with a name that matches the given regex.") catch unreachable,
|
||||
clap.parseParam("--reporter <STR> Specify the test reporter. Currently --reporter=junit is the only supported format.") catch unreachable,
|
||||
clap.parseParam("--reporter-outfile <STR> The output file used for the format from --reporter.") catch unreachable,
|
||||
clap.parseParam("--agent Use agent reporter (only prints errors and summary).") catch unreachable,
|
||||
};
|
||||
pub const test_params = test_only_params ++ runtime_params_ ++ transpiler_params_ ++ base_params_;
|
||||
|
||||
@@ -481,6 +482,7 @@ pub fn parse(allocator: std.mem.Allocator, ctx: Command.Context, comptime cmd: C
|
||||
ctx.test_options.update_snapshots = args.flag("--update-snapshots");
|
||||
ctx.test_options.run_todo = args.flag("--todo");
|
||||
ctx.test_options.only = args.flag("--only");
|
||||
ctx.test_options.agent = args.flag("--agent");
|
||||
}
|
||||
|
||||
ctx.args.absolute_working_dir = cwd;
|
||||
@@ -674,7 +676,8 @@ pub fn parse(allocator: std.mem.Allocator, ctx: Command.Context, comptime cmd: C
|
||||
Output.errGeneric("Invalid value for --console-depth: \"{s}\". Must be a positive integer\n", .{depth_str});
|
||||
Global.exit(1);
|
||||
};
|
||||
ctx.runtime_options.console_depth = depth;
|
||||
// Treat depth=0 as maxInt(u16) for infinite depth
|
||||
ctx.runtime_options.console_depth = if (depth == 0) std.math.maxInt(u16) else depth;
|
||||
}
|
||||
|
||||
if (args.option("--dns-result-order")) |order| {
|
||||
|
||||
@@ -824,15 +824,18 @@ pub const CommandLineReporter = struct {
|
||||
}
|
||||
|
||||
pub fn handleTestPass(cb: *TestRunner.Callback, id: Test.ID, file: string, label: string, expectations: u32, elapsed_ns: u64, parent: ?*jest.DescribeScope) void {
|
||||
const writer = Output.errorWriterBuffered();
|
||||
defer Output.flush();
|
||||
|
||||
var this: *CommandLineReporter = @fieldParentPtr("callback", cb);
|
||||
|
||||
writeTestStatusLine(.pass, &writer);
|
||||
// In agent mode, don't print pass status
|
||||
if (!this.jest.test_options.agent) {
|
||||
const writer = Output.errorWriterBuffered();
|
||||
defer Output.flush();
|
||||
|
||||
const line_number = this.jest.tests.items(.line_number)[id];
|
||||
printTestLine(.pass, label, elapsed_ns, parent, expectations, false, writer, file, this.file_reporter, line_number);
|
||||
writeTestStatusLine(.pass, &writer);
|
||||
|
||||
const line_number = this.jest.tests.items(.line_number)[id];
|
||||
printTestLine(.pass, label, elapsed_ns, parent, expectations, false, writer, file, this.file_reporter, line_number);
|
||||
}
|
||||
|
||||
this.jest.tests.items(.status)[id] = TestRunner.Test.Status.pass;
|
||||
this.summary().pass += 1;
|
||||
@@ -840,26 +843,50 @@ pub const CommandLineReporter = struct {
|
||||
}
|
||||
|
||||
pub fn handleTestFail(cb: *TestRunner.Callback, id: Test.ID, file: string, label: string, expectations: u32, elapsed_ns: u64, parent: ?*jest.DescribeScope) void {
|
||||
var writer_ = Output.errorWriterBuffered();
|
||||
defer Output.flush();
|
||||
var this: *CommandLineReporter = @fieldParentPtr("callback", cb);
|
||||
|
||||
// when the tests fail, we want to repeat the failures at the end
|
||||
// so that you can see them better when there are lots of tests that ran
|
||||
const initial_length = this.failures_to_repeat_buf.items.len;
|
||||
var writer = this.failures_to_repeat_buf.writer(bun.default_allocator);
|
||||
|
||||
writeTestStatusLine(.fail, &writer);
|
||||
const line_number = this.jest.tests.items(.line_number)[id];
|
||||
printTestLine(.fail, label, elapsed_ns, parent, expectations, false, writer, file, this.file_reporter, line_number);
|
||||
|
||||
// We must always reset the colors because (skip) will have set them to <d>
|
||||
if (Output.enable_ansi_colors_stderr) {
|
||||
writer.writeAll(Output.prettyFmt("<r>", true)) catch {};
|
||||
// In agent mode, print failures immediately with clean format
|
||||
if (this.jest.test_options.agent) {
|
||||
var writer_ = Output.errorWriterBuffered();
|
||||
defer Output.flush();
|
||||
|
||||
// Add source_url:line above the failing test output
|
||||
const filename = brk: {
|
||||
if (strings.hasPrefix(file, bun.fs.FileSystem.instance.top_level_dir)) {
|
||||
break :brk strings.withoutLeadingPathSeparator(file[bun.fs.FileSystem.instance.top_level_dir.len..]);
|
||||
} else {
|
||||
break :brk file;
|
||||
}
|
||||
};
|
||||
|
||||
if (line_number > 0) {
|
||||
writer_.print("{s}:{d}\n", .{ filename, line_number }) catch {};
|
||||
} else {
|
||||
writer_.print("{s}\n", .{filename}) catch {};
|
||||
}
|
||||
|
||||
writer_.print("FAIL: {s}\n", .{label}) catch {};
|
||||
} else {
|
||||
// In normal mode, build the repeat buffer for later display
|
||||
const initial_length = this.failures_to_repeat_buf.items.len;
|
||||
var writer = this.failures_to_repeat_buf.writer(bun.default_allocator);
|
||||
|
||||
writeTestStatusLine(.fail, &writer);
|
||||
printTestLine(.fail, label, elapsed_ns, parent, expectations, false, writer, file, this.file_reporter, line_number);
|
||||
|
||||
// We must always reset the colors because (skip) will have set them to <d>
|
||||
if (Output.enable_ansi_colors_stderr) {
|
||||
writer.writeAll(Output.prettyFmt("<r>", true)) catch {};
|
||||
}
|
||||
|
||||
// Output to stderr immediately in normal mode
|
||||
var writer_ = Output.errorWriterBuffered();
|
||||
defer Output.flush();
|
||||
writer_.writeAll(this.failures_to_repeat_buf.items[initial_length..]) catch {};
|
||||
}
|
||||
|
||||
writer_.writeAll(this.failures_to_repeat_buf.items[initial_length..]) catch {};
|
||||
|
||||
// this.updateDots();
|
||||
this.summary().fail += 1;
|
||||
this.summary().expectations += expectations;
|
||||
@@ -876,7 +903,8 @@ pub const CommandLineReporter = struct {
|
||||
var this: *CommandLineReporter = @fieldParentPtr("callback", cb);
|
||||
|
||||
// If you do it.only, don't report the skipped tests because its pretty noisy
|
||||
if (jest.Jest.runner != null and !jest.Jest.runner.?.only) {
|
||||
// In agent mode, don't print skipped tests
|
||||
if (jest.Jest.runner != null and !jest.Jest.runner.?.only and !this.jest.test_options.agent) {
|
||||
var writer_ = Output.errorWriterBuffered();
|
||||
defer Output.flush();
|
||||
// when the tests skip, we want to repeat the failures at the end
|
||||
@@ -921,21 +949,24 @@ pub const CommandLineReporter = struct {
|
||||
}
|
||||
|
||||
pub fn handleTestTodo(cb: *TestRunner.Callback, id: Test.ID, file: string, label: string, expectations: u32, elapsed_ns: u64, parent: ?*jest.DescribeScope) void {
|
||||
var writer_ = Output.errorWriterBuffered();
|
||||
|
||||
var this: *CommandLineReporter = @fieldParentPtr("callback", cb);
|
||||
|
||||
// when the tests skip, we want to repeat the failures at the end
|
||||
// so that you can see them better when there are lots of tests that ran
|
||||
const initial_length = this.todos_to_repeat_buf.items.len;
|
||||
var writer = this.todos_to_repeat_buf.writer(bun.default_allocator);
|
||||
// In agent mode, don't print todo status
|
||||
if (!this.jest.test_options.agent) {
|
||||
var writer_ = Output.errorWriterBuffered();
|
||||
|
||||
writeTestStatusLine(.todo, &writer);
|
||||
const line_number = this.jest.tests.items(.line_number)[id];
|
||||
printTestLine(.todo, label, elapsed_ns, parent, expectations, true, writer, file, this.file_reporter, line_number);
|
||||
// when the tests skip, we want to repeat the failures at the end
|
||||
// so that you can see them better when there are lots of tests that ran
|
||||
const initial_length = this.todos_to_repeat_buf.items.len;
|
||||
var writer = this.todos_to_repeat_buf.writer(bun.default_allocator);
|
||||
|
||||
writer_.writeAll(this.todos_to_repeat_buf.items[initial_length..]) catch {};
|
||||
Output.flush();
|
||||
writeTestStatusLine(.todo, &writer);
|
||||
const line_number = this.jest.tests.items(.line_number)[id];
|
||||
printTestLine(.todo, label, elapsed_ns, parent, expectations, true, writer, file, this.file_reporter, line_number);
|
||||
|
||||
writer_.writeAll(this.todos_to_repeat_buf.items[initial_length..]) catch {};
|
||||
Output.flush();
|
||||
}
|
||||
|
||||
// this.updateDots();
|
||||
this.summary().todo += 1;
|
||||
@@ -1307,6 +1338,12 @@ pub const TestCommand = struct {
|
||||
pub fn exec(ctx: Command.Context) !void {
|
||||
Output.is_github_action = Output.isGithubAction();
|
||||
|
||||
// Disable ANSI colors in agent mode
|
||||
if (ctx.test_options.agent) {
|
||||
Output.enable_ansi_colors_stderr = false;
|
||||
Output.enable_ansi_colors_stdout = false;
|
||||
}
|
||||
|
||||
// print the version so you know its doing stuff if it takes a sec
|
||||
Output.prettyln("<r><b>bun test <r><d>v" ++ Global.package_json_version_with_sha ++ "<r>", .{});
|
||||
Output.flush();
|
||||
@@ -1530,7 +1567,8 @@ pub const TestCommand = struct {
|
||||
const write_snapshots_success = try jest.Jest.runner.?.snapshots.writeInlineSnapshots();
|
||||
try jest.Jest.runner.?.snapshots.writeSnapshotFile();
|
||||
var coverage_options = ctx.test_options.coverage;
|
||||
if (reporter.summary().pass > 20) {
|
||||
// In agent mode, don't print repeat buffers since errors are printed immediately
|
||||
if (reporter.summary().pass > 20 and !ctx.test_options.agent) {
|
||||
if (reporter.summary().skip > 0) {
|
||||
Output.prettyError("\n<r><d>{d} tests skipped:<r>\n", .{reporter.summary().skip});
|
||||
Output.flush();
|
||||
@@ -1737,7 +1775,11 @@ pub const TestCommand = struct {
|
||||
}
|
||||
const summary = reporter.summary();
|
||||
|
||||
if (failed_to_find_any_tests or summary.didLabelFilterOutAllTests() or summary.fail > 0 or (coverage_options.enabled and coverage_options.fractions.failing and coverage_options.fail_on_low_coverage) or !write_snapshots_success) {
|
||||
// In agent mode, exit with code 1 when no tests are run
|
||||
const no_tests_run = (summary.pass + summary.fail + summary.skip + summary.todo) == 0;
|
||||
const should_exit_with_error = failed_to_find_any_tests or summary.didLabelFilterOutAllTests() or summary.fail > 0 or (coverage_options.enabled and coverage_options.fractions.failing and coverage_options.fail_on_low_coverage) or !write_snapshots_success or (ctx.test_options.agent and no_tests_run);
|
||||
|
||||
if (should_exit_with_error) {
|
||||
Global.exit(1);
|
||||
} else if (reporter.jest.unhandled_errors_between_tests > 0) {
|
||||
Global.exit(reporter.jest.unhandled_errors_between_tests);
|
||||
@@ -1841,12 +1883,15 @@ pub const TestCommand = struct {
|
||||
vm.onUnhandledRejection = jest.TestRunnerTask.onUnhandledRejection;
|
||||
|
||||
while (repeat_index < repeat_count) : (repeat_index += 1) {
|
||||
if (repeat_count > 1) {
|
||||
Output.prettyErrorln("<r>\n{s}{s}: <d>(run #{d})<r>\n", .{ file_prefix, file_title, repeat_index + 1 });
|
||||
} else {
|
||||
Output.prettyErrorln("<r>\n{s}{s}:\n", .{ file_prefix, file_title });
|
||||
// In agent mode, don't print the file name header
|
||||
if (!reporter.jest.test_options.agent) {
|
||||
if (repeat_count > 1) {
|
||||
Output.prettyErrorln("<r>\n{s}{s}: <d>(run #{d})<r>\n", .{ file_prefix, file_title, repeat_index + 1 });
|
||||
} else {
|
||||
Output.prettyErrorln("<r>\n{s}{s}:\n", .{ file_prefix, file_title });
|
||||
}
|
||||
Output.flush();
|
||||
}
|
||||
Output.flush();
|
||||
|
||||
var promise = try vm.loadEntryPointForTestRunner(file_path);
|
||||
reporter.summary().files += 1;
|
||||
|
||||
@@ -82,7 +82,7 @@
|
||||
"tsyringe": "4.8.0",
|
||||
"type-graphql": "2.0.0-rc.2",
|
||||
"typeorm": "0.3.20",
|
||||
"typescript": "^5.8.3",
|
||||
"typescript": "5.8.3",
|
||||
"undici": "5.20.0",
|
||||
"unzipper": "0.12.3",
|
||||
"uuid": "11.1.0",
|
||||
|
||||
@@ -229,7 +229,7 @@ describe("console depth", () => {
|
||||
);
|
||||
});
|
||||
|
||||
test("edge case: depth 0 should show only top level structure", async () => {
|
||||
test("edge case: depth 0 should show infinite depth", async () => {
|
||||
const dir = tempDirWithFiles("console-depth-zero", {
|
||||
"test.js": testScript,
|
||||
});
|
||||
@@ -250,7 +250,70 @@ describe("console depth", () => {
|
||||
expect(stderr).toBe("");
|
||||
expect(normalizeOutput(stdout)).toMatchInlineSnapshot(`
|
||||
"{
|
||||
level1: [Object ...],
|
||||
level1: {
|
||||
level2: {
|
||||
level3: {
|
||||
level4: {
|
||||
level5: {
|
||||
level6: {
|
||||
level7: {
|
||||
level8: {
|
||||
level9: {
|
||||
level10: \"deep value\",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}"
|
||||
`);
|
||||
});
|
||||
|
||||
test("bunfig.toml depth=0 should show infinite depth", async () => {
|
||||
const dir = tempDirWithFiles("console-depth-bunfig-zero", {
|
||||
"test.js": testScript,
|
||||
"bunfig.toml": `[console]\ndepth = 0`,
|
||||
});
|
||||
|
||||
await using proc = Bun.spawn({
|
||||
cmd: [bunExe(), "test.js"],
|
||||
env: bunEnv,
|
||||
cwd: dir,
|
||||
});
|
||||
|
||||
const [stdout, stderr, exitCode] = await Promise.all([
|
||||
new Response(proc.stdout).text(),
|
||||
new Response(proc.stderr).text(),
|
||||
proc.exited,
|
||||
]);
|
||||
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stderr).toBe("");
|
||||
expect(normalizeOutput(stdout)).toMatchInlineSnapshot(`
|
||||
"{
|
||||
level1: {
|
||||
level2: {
|
||||
level3: {
|
||||
level4: {
|
||||
level5: {
|
||||
level6: {
|
||||
level7: {
|
||||
level8: {
|
||||
level9: {
|
||||
level10: \"deep value\",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}"
|
||||
`);
|
||||
});
|
||||
|
||||
337
test/cli/test/agent.test.ts
Normal file
337
test/cli/test/agent.test.ts
Normal file
@@ -0,0 +1,337 @@
|
||||
import { expect, test } from "bun:test";
|
||||
import { bunExe, tempDirWithFiles } from "harness";
|
||||
|
||||
test("--agent flag: only prints errors and summary", async () => {
|
||||
const dir = tempDirWithFiles("agent-test-1", {
|
||||
"pass.test.js": `
|
||||
import { test, expect } from "bun:test";
|
||||
|
||||
test("passing test", () => {
|
||||
expect(1 + 1).toBe(2);
|
||||
});
|
||||
`,
|
||||
"fail.test.js": `
|
||||
import { test, expect } from "bun:test";
|
||||
|
||||
test("failing test", () => {
|
||||
expect(1 + 1).toBe(3);
|
||||
});
|
||||
`,
|
||||
"skip.test.js": `
|
||||
import { test, expect } from "bun:test";
|
||||
|
||||
test.skip("skipped test", () => {
|
||||
expect(1 + 1).toBe(2);
|
||||
});
|
||||
`,
|
||||
"todo.test.js": `
|
||||
import { test, expect } from "bun:test";
|
||||
|
||||
test.todo("todo test", () => {
|
||||
expect(1 + 1).toBe(2);
|
||||
});
|
||||
`,
|
||||
});
|
||||
|
||||
await using proc = Bun.spawn({
|
||||
cmd: [bunExe(), "test", "--agent"],
|
||||
cwd: dir,
|
||||
stderr: "pipe",
|
||||
stdout: "pipe",
|
||||
});
|
||||
|
||||
const [stdout, stderr, exitCode] = await Promise.all([
|
||||
new Response(proc.stdout).text(),
|
||||
new Response(proc.stderr).text(),
|
||||
proc.exited,
|
||||
]);
|
||||
|
||||
// Should exit with code 1 because tests failed
|
||||
expect(exitCode).toBe(1);
|
||||
|
||||
// Should not contain ANSI color codes
|
||||
expect(stderr).not.toContain("\u001b[");
|
||||
expect(stdout).not.toContain("\u001b[");
|
||||
|
||||
// Should contain failure output
|
||||
expect(stderr).toContain("failing test");
|
||||
expect(stderr).toContain("Expected:");
|
||||
expect(stderr).toContain("Received:");
|
||||
|
||||
// Should NOT contain pass/skip/todo individual test output
|
||||
expect(stderr).not.toContain("passing test");
|
||||
expect(stderr).not.toContain("skipped test");
|
||||
expect(stderr).not.toContain("todo test");
|
||||
|
||||
// Should contain summary with counts
|
||||
expect(stderr).toContain("1 pass");
|
||||
expect(stderr).toContain("1 skip");
|
||||
expect(stderr).toContain("1 todo");
|
||||
expect(stderr).toContain("1 fail");
|
||||
|
||||
// Should contain total test count
|
||||
expect(stderr).toContain("Ran 4 test");
|
||||
});
|
||||
|
||||
test("--agent flag: exits with code 1 when no tests are run", async () => {
|
||||
const dir = tempDirWithFiles("agent-test-2", {
|
||||
"not-a-test.js": `console.log("not a test");`,
|
||||
});
|
||||
|
||||
await using proc = Bun.spawn({
|
||||
cmd: [bunExe(), "test", "--agent"],
|
||||
cwd: dir,
|
||||
stderr: "pipe",
|
||||
stdout: "pipe",
|
||||
});
|
||||
|
||||
const [stdout, stderr, exitCode] = await Promise.all([
|
||||
new Response(proc.stdout).text(),
|
||||
new Response(proc.stderr).text(),
|
||||
proc.exited,
|
||||
]);
|
||||
|
||||
// Should exit with code 1 when no tests are found
|
||||
expect(exitCode).toBe(1);
|
||||
|
||||
// Should not contain ANSI color codes
|
||||
expect(stderr).not.toContain("\u001b[");
|
||||
expect(stdout).not.toContain("\u001b[");
|
||||
});
|
||||
|
||||
test("--agent flag: with only passing tests", async () => {
|
||||
const dir = tempDirWithFiles("agent-test-3", {
|
||||
"pass1.test.js": `
|
||||
import { test, expect } from "bun:test";
|
||||
|
||||
test("passing test 1", () => {
|
||||
expect(1 + 1).toBe(2);
|
||||
});
|
||||
`,
|
||||
"pass2.test.js": `
|
||||
import { test, expect } from "bun:test";
|
||||
|
||||
test("passing test 2", () => {
|
||||
expect(2 + 2).toBe(4);
|
||||
});
|
||||
`,
|
||||
});
|
||||
|
||||
await using proc = Bun.spawn({
|
||||
cmd: [bunExe(), "test", "--agent"],
|
||||
cwd: dir,
|
||||
stderr: "pipe",
|
||||
stdout: "pipe",
|
||||
});
|
||||
|
||||
const [stdout, stderr, exitCode] = await Promise.all([
|
||||
new Response(proc.stdout).text(),
|
||||
new Response(proc.stderr).text(),
|
||||
proc.exited,
|
||||
]);
|
||||
|
||||
// Should exit with code 0 when all tests pass
|
||||
expect(exitCode).toBe(0);
|
||||
|
||||
// Should not contain ANSI color codes
|
||||
expect(stderr).not.toContain("\u001b[");
|
||||
expect(stdout).not.toContain("\u001b[");
|
||||
|
||||
// Should NOT contain individual test pass output
|
||||
expect(stderr).not.toContain("passing test 1");
|
||||
expect(stderr).not.toContain("passing test 2");
|
||||
|
||||
// Should contain summary with counts
|
||||
expect(stderr).toContain("2 pass");
|
||||
expect(stderr).toContain("0 fail");
|
||||
|
||||
// Should contain total test count
|
||||
expect(stderr).toContain("Ran 2 test");
|
||||
});
|
||||
|
||||
test("--agent flag: with test filters", async () => {
|
||||
const dir = tempDirWithFiles("agent-test-4", {
|
||||
"test1.test.js": `
|
||||
import { test, expect } from "bun:test";
|
||||
|
||||
test("matching test", () => {
|
||||
expect(1 + 1).toBe(2);
|
||||
});
|
||||
|
||||
test("other test", () => {
|
||||
expect(2 + 2).toBe(4);
|
||||
});
|
||||
`,
|
||||
});
|
||||
|
||||
await using proc = Bun.spawn({
|
||||
cmd: [bunExe(), "test", "--agent", "-t", "matching"],
|
||||
cwd: dir,
|
||||
stderr: "pipe",
|
||||
stdout: "pipe",
|
||||
});
|
||||
|
||||
const [stdout, stderr, exitCode] = await Promise.all([
|
||||
new Response(proc.stdout).text(),
|
||||
new Response(proc.stderr).text(),
|
||||
proc.exited,
|
||||
]);
|
||||
|
||||
// Should exit with code 0 when filtered tests pass
|
||||
expect(exitCode).toBe(0);
|
||||
|
||||
// Should not contain ANSI color codes
|
||||
expect(stderr).not.toContain("\u001b[");
|
||||
expect(stdout).not.toContain("\u001b[");
|
||||
|
||||
// Should contain summary with counts (only 1 test should run)
|
||||
expect(stderr).toContain("1 pass");
|
||||
expect(stderr).toContain("0 fail");
|
||||
|
||||
// Should contain total test count
|
||||
expect(stderr).toContain("Ran 1 test");
|
||||
});
|
||||
|
||||
test("--agent flag: with many failures (tests immediate output)", async () => {
|
||||
const dir = tempDirWithFiles("agent-test-5", {
|
||||
"fail1.test.js": `
|
||||
import { test, expect } from "bun:test";
|
||||
|
||||
test("fail 1", () => {
|
||||
expect(1).toBe(2);
|
||||
});
|
||||
`,
|
||||
"fail2.test.js": `
|
||||
import { test, expect } from "bun:test";
|
||||
|
||||
test("fail 2", () => {
|
||||
expect(2).toBe(3);
|
||||
});
|
||||
`,
|
||||
"fail3.test.js": `
|
||||
import { test, expect } from "bun:test";
|
||||
|
||||
test("fail 3", () => {
|
||||
expect(3).toBe(4);
|
||||
});
|
||||
`,
|
||||
// Add many passing tests to trigger the repeat buffer logic
|
||||
...Array.from({ length: 25 }, (_, i) => ({
|
||||
[`pass${i}.test.js`]: `
|
||||
import { test, expect } from "bun:test";
|
||||
|
||||
test("pass ${i}", () => {
|
||||
expect(${i}).toBe(${i});
|
||||
});
|
||||
`,
|
||||
})).reduce((acc, obj) => ({ ...acc, ...obj }), {}),
|
||||
});
|
||||
|
||||
await using proc = Bun.spawn({
|
||||
cmd: [bunExe(), "test", "--agent"],
|
||||
cwd: dir,
|
||||
stderr: "pipe",
|
||||
stdout: "pipe",
|
||||
});
|
||||
|
||||
const [stdout, stderr, exitCode] = await Promise.all([
|
||||
new Response(proc.stdout).text(),
|
||||
new Response(proc.stderr).text(),
|
||||
proc.exited,
|
||||
]);
|
||||
|
||||
// Should exit with code 1 because tests failed
|
||||
expect(exitCode).toBe(1);
|
||||
|
||||
// Should not contain ANSI color codes
|
||||
expect(stderr).not.toContain("\u001b[");
|
||||
expect(stdout).not.toContain("\u001b[");
|
||||
|
||||
// Should contain failure output (printed immediately)
|
||||
expect(stderr).toContain("fail 1");
|
||||
expect(stderr).toContain("fail 2");
|
||||
expect(stderr).toContain("fail 3");
|
||||
|
||||
// Should NOT contain repeat buffer headers (since agent mode disables them)
|
||||
expect(stderr).not.toContain("tests failed:");
|
||||
|
||||
// Should contain summary with counts
|
||||
expect(stderr).toContain("25 pass");
|
||||
expect(stderr).toContain("3 fail");
|
||||
|
||||
// Should contain total test count
|
||||
expect(stderr).toContain("Ran 28 test");
|
||||
});
|
||||
|
||||
test("normal mode vs agent mode comparison", async () => {
|
||||
const dir = tempDirWithFiles("agent-test-6", {
|
||||
"test.test.js": `
|
||||
import { test, expect } from "bun:test";
|
||||
|
||||
test("passing test", () => {
|
||||
expect(1 + 1).toBe(2);
|
||||
});
|
||||
|
||||
test("failing test", () => {
|
||||
expect(1 + 1).toBe(3);
|
||||
});
|
||||
|
||||
test.skip("skipped test", () => {
|
||||
expect(1 + 1).toBe(2);
|
||||
});
|
||||
`,
|
||||
});
|
||||
|
||||
// Run in normal mode
|
||||
await using normalProc = Bun.spawn({
|
||||
cmd: [bunExe(), "test"],
|
||||
cwd: dir,
|
||||
stderr: "pipe",
|
||||
stdout: "pipe",
|
||||
});
|
||||
|
||||
const [normalStdout, normalStderr, normalExitCode] = await Promise.all([
|
||||
new Response(normalProc.stdout).text(),
|
||||
new Response(normalProc.stderr).text(),
|
||||
normalProc.exited,
|
||||
]);
|
||||
|
||||
// Run in agent mode
|
||||
await using agentProc = Bun.spawn({
|
||||
cmd: [bunExe(), "test", "--agent"],
|
||||
cwd: dir,
|
||||
stderr: "pipe",
|
||||
stdout: "pipe",
|
||||
});
|
||||
|
||||
const [agentStdout, agentStderr, agentExitCode] = await Promise.all([
|
||||
new Response(agentProc.stdout).text(),
|
||||
new Response(agentProc.stderr).text(),
|
||||
agentProc.exited,
|
||||
]);
|
||||
|
||||
// Both should exit with the same code
|
||||
expect(normalExitCode).toBe(agentExitCode);
|
||||
expect(normalExitCode).toBe(1); // Because tests failed
|
||||
|
||||
// Agent mode should not contain ANSI color codes (even if normal mode might not have them in CI)
|
||||
expect(agentStderr).not.toContain("\u001b[");
|
||||
|
||||
// Normal mode should show individual test results, agent mode should not
|
||||
expect(normalStderr).toContain("(pass) passing test");
|
||||
expect(normalStderr).toContain("(skip) skipped test");
|
||||
expect(agentStderr).not.toContain("(pass) passing test");
|
||||
expect(agentStderr).not.toContain("(skip) skipped test");
|
||||
|
||||
// Both should contain failure output
|
||||
expect(normalStderr).toContain("failing test");
|
||||
expect(agentStderr).toContain("failing test");
|
||||
|
||||
// Both should contain summary counts
|
||||
expect(normalStderr).toContain("1 pass");
|
||||
expect(normalStderr).toContain("1 fail");
|
||||
expect(normalStderr).toContain("1 skip");
|
||||
expect(agentStderr).toContain("1 pass");
|
||||
expect(agentStderr).toContain("1 fail");
|
||||
expect(agentStderr).toContain("1 skip");
|
||||
});
|
||||
Reference in New Issue
Block a user