mirror of
https://github.com/oven-sh/bun
synced 2026-02-02 15:08:46 +00:00
Add --only-failures flag to bun:test (#23312)
## Summary Adds a new `--only-failures` flag to `bun test` that only displays test failures, similar to `--dots` but without printing dots for each test. ## Motivation When running large test suites or in CI environments, users often only care about test failures. The existing `--dots` reporter reduces verbosity by showing dots, but still requires visual scanning to find failures. The `--only-failures` flag provides a cleaner output by completely suppressing passing tests. ## Changes - Added `--only-failures` CLI flag in `Arguments.zig` - Added `only_failures` boolean to the test reporters struct in `cli.zig` - Updated test output logic in `test_command.zig` to skip non-failures when flag is set - Updated `jest.zig` and `bun_test.zig` to handle the new flag - Added comprehensive tests in `only-failures.test.ts` ## Usage ```bash bun test --only-failures ``` Example output (only shows failures): ``` test/example.test.ts: (fail) failing test error: expect(received).toBe(expected) Expected: 3 Received: 2 5 pass 1 skip 2 fail Ran 8 tests across 1 file. ``` ## Test Plan - Verified `--only-failures` flag only shows failing tests - Verified normal test output still works without the flag - Verified `--dots` reporter still works correctly - Added regression tests with snapshot comparisons 🤖 Generated with [Claude Code](https://claude.com/claude-code) --------- Co-authored-by: Claude Bot <claude-bot@bun.sh> Co-authored-by: Claude <noreply@anthropic.com> Co-authored-by: pfg <pfg@pfg.pw>
This commit is contained in:
@@ -249,6 +249,41 @@ This is useful for:
|
||||
|
||||
The `--concurrent` CLI flag will override this setting when specified.
|
||||
|
||||
### `test.onlyFailures`
|
||||
|
||||
When enabled, only failed tests are displayed in the output. This helps reduce noise in large test suites by hiding passing tests. Default `false`.
|
||||
|
||||
```toml
|
||||
[test]
|
||||
onlyFailures = true
|
||||
```
|
||||
|
||||
This is equivalent to using the `--only-failures` flag when running `bun test`.
|
||||
|
||||
### `test.reporter`
|
||||
|
||||
Configure the test reporter settings.
|
||||
|
||||
#### `test.reporter.dots`
|
||||
|
||||
Enable the dots reporter, which displays a compact output showing a dot for each test. Default `false`.
|
||||
|
||||
```toml
|
||||
[test.reporter]
|
||||
dots = true
|
||||
```
|
||||
|
||||
#### `test.reporter.junit`
|
||||
|
||||
Enable JUnit XML reporting and specify the output file path.
|
||||
|
||||
```toml
|
||||
[test.reporter]
|
||||
junit = "test-results.xml"
|
||||
```
|
||||
|
||||
This generates a JUnit XML report that can be consumed by CI systems and other tools.
|
||||
|
||||
### `test.randomize`
|
||||
|
||||
Run tests in random order. Default `false`.
|
||||
|
||||
@@ -173,7 +173,7 @@ pub const BunTestRoot = struct {
|
||||
pub fn onBeforePrint(this: *BunTestRoot) void {
|
||||
if (this.active_file.get()) |active_file| {
|
||||
if (active_file.reporter) |reporter| {
|
||||
if (reporter.last_printed_dot and reporter.reporters.dots) {
|
||||
if (reporter.reporters.dots and reporter.last_printed_dot) {
|
||||
bun.Output.prettyError("<r>\n", .{});
|
||||
bun.Output.flush();
|
||||
reporter.last_printed_dot = false;
|
||||
|
||||
@@ -15,7 +15,7 @@ const CurrentFile = struct {
|
||||
repeat_index: u32,
|
||||
reporter: *CommandLineReporter,
|
||||
) void {
|
||||
if (Output.isAIAgent() or reporter.reporters.dots) {
|
||||
if (reporter.reporters.dots or reporter.reporters.only_failures) {
|
||||
this.freeAndClear();
|
||||
this.title = bun.handleOom(bun.default_allocator.dupe(u8, title));
|
||||
this.prefix = bun.handleOom(bun.default_allocator.dupe(u8, prefix));
|
||||
|
||||
@@ -239,6 +239,11 @@ pub const Bunfig = struct {
|
||||
this.ctx.test_options.coverage.enabled = expr.data.e_boolean.value;
|
||||
}
|
||||
|
||||
if (test_.get("onlyFailures")) |expr| {
|
||||
try this.expect(expr, .e_boolean);
|
||||
this.ctx.test_options.reporters.only_failures = expr.data.e_boolean.value;
|
||||
}
|
||||
|
||||
if (test_.get("reporter")) |expr| {
|
||||
try this.expect(expr, .e_object);
|
||||
if (expr.get("junit")) |junit_expr| {
|
||||
|
||||
@@ -355,6 +355,7 @@ pub const Command = struct {
|
||||
|
||||
reporters: struct {
|
||||
dots: bool = false,
|
||||
only_failures: bool = false,
|
||||
junit: bool = false,
|
||||
} = .{},
|
||||
reporter_outfile: ?[]const u8 = null,
|
||||
|
||||
@@ -209,6 +209,7 @@ pub const test_only_params = [_]ParamType{
|
||||
clap.parseParam("--reporter <STR> Test output reporter format. Available: 'junit' (requires --reporter-outfile), 'dots'. Default: console output.") catch unreachable,
|
||||
clap.parseParam("--reporter-outfile <STR> Output file path for the reporter format (required with --reporter).") catch unreachable,
|
||||
clap.parseParam("--dots Enable dots reporter. Shorthand for --reporter=dots.") catch unreachable,
|
||||
clap.parseParam("--only-failures Only display test failures, hiding passing tests.") catch unreachable,
|
||||
clap.parseParam("--max-concurrency <NUMBER> Maximum number of concurrent tests to execute at once. Default is 20.") catch unreachable,
|
||||
};
|
||||
pub const test_params = test_only_params ++ runtime_params_ ++ transpiler_params_ ++ base_params_;
|
||||
@@ -463,6 +464,11 @@ pub fn parse(allocator: std.mem.Allocator, ctx: Command.Context, comptime cmd: C
|
||||
ctx.test_options.reporters.dots = true;
|
||||
}
|
||||
|
||||
// Handle --only-failures flag
|
||||
if (args.flag("--only-failures")) {
|
||||
ctx.test_options.reporters.only_failures = true;
|
||||
}
|
||||
|
||||
if (args.option("--coverage-dir")) |dir| {
|
||||
ctx.test_options.coverage.reports_directory = dir;
|
||||
}
|
||||
|
||||
@@ -579,6 +579,7 @@ pub const CommandLineReporter = struct {
|
||||
|
||||
reporters: struct {
|
||||
dots: bool = false,
|
||||
only_failures: bool = false,
|
||||
junit: ?*JunitReporter = null,
|
||||
} = .{},
|
||||
|
||||
@@ -874,8 +875,8 @@ pub const CommandLineReporter = struct {
|
||||
},
|
||||
}
|
||||
buntest.reporter.?.last_printed_dot = true;
|
||||
} else if (Output.isAIAgent() and (comptime result.basicResult()) != .fail) {
|
||||
// when using AI agents, only print failures
|
||||
} else if (((comptime result.basicResult()) != .fail) and (buntest.reporter != null and buntest.reporter.?.reporters.only_failures)) {
|
||||
// when using --only-failures, only print failures
|
||||
} else {
|
||||
buntest.bun_test_root.onBeforePrint();
|
||||
|
||||
@@ -900,7 +901,7 @@ pub const CommandLineReporter = struct {
|
||||
|
||||
var this: *CommandLineReporter = buntest.reporter orelse return; // command line reporter is missing! uh oh!
|
||||
|
||||
if (!this.reporters.dots) switch (sequence.result.basicResult()) {
|
||||
if (!this.reporters.dots and !this.reporters.only_failures) switch (sequence.result.basicResult()) {
|
||||
.skip => bun.handleOom(this.skips_to_repeat_buf.appendSlice(bun.default_allocator, output_buf.items[initial_length..])),
|
||||
.todo => bun.handleOom(this.todos_to_repeat_buf.appendSlice(bun.default_allocator, output_buf.items[initial_length..])),
|
||||
.fail => bun.handleOom(this.failures_to_repeat_buf.appendSlice(bun.default_allocator, output_buf.items[initial_length..])),
|
||||
@@ -1362,6 +1363,11 @@ pub const TestCommand = struct {
|
||||
if (ctx.test_options.reporters.dots) {
|
||||
reporter.reporters.dots = true;
|
||||
}
|
||||
if (ctx.test_options.reporters.only_failures) {
|
||||
reporter.reporters.only_failures = true;
|
||||
} else if (Output.isAIAgent()) {
|
||||
reporter.reporters.only_failures = true; // only-failures defaults to true for ai agents
|
||||
}
|
||||
|
||||
js_ast.Expr.Data.Store.create();
|
||||
js_ast.Stmt.Data.Store.create();
|
||||
|
||||
27
test/js/bun/test/only-failures.fixture.ts
Normal file
27
test/js/bun/test/only-failures.fixture.ts
Normal file
@@ -0,0 +1,27 @@
|
||||
import { expect, test } from "bun:test";
|
||||
|
||||
test("passing test 1", () => {
|
||||
expect(1 + 1).toBe(2);
|
||||
});
|
||||
|
||||
test("passing test 2", () => {
|
||||
expect(2 + 2).toBe(4);
|
||||
});
|
||||
|
||||
test("failing test", () => {
|
||||
expect(1 + 1).toBe(3);
|
||||
});
|
||||
|
||||
test("passing test 3", () => {
|
||||
expect(3 + 3).toBe(6);
|
||||
});
|
||||
|
||||
test.skip("skipped test", () => {
|
||||
expect(true).toBe(false);
|
||||
});
|
||||
|
||||
test.todo("todo test");
|
||||
|
||||
test("another failing test", () => {
|
||||
throw new Error("This test fails");
|
||||
});
|
||||
120
test/js/bun/test/only-failures.test.ts
Normal file
120
test/js/bun/test/only-failures.test.ts
Normal file
@@ -0,0 +1,120 @@
|
||||
import { expect, test } from "bun:test";
|
||||
import { bunEnv, bunExe, normalizeBunSnapshot, tempDir } from "harness";
|
||||
|
||||
test.concurrent("only-failures flag should show only failures", async () => {
|
||||
const result = await Bun.spawn({
|
||||
cmd: [bunExe(), "test", import.meta.dir + "/only-failures.fixture.ts", "--only-failures"],
|
||||
stdout: "pipe",
|
||||
stderr: "pipe",
|
||||
env: bunEnv,
|
||||
});
|
||||
const exitCode = await result.exited;
|
||||
const stdout = await result.stdout.text();
|
||||
const stderr = await result.stderr.text();
|
||||
expect({
|
||||
exitCode,
|
||||
stdout: normalizeBunSnapshot(stdout),
|
||||
stderr: normalizeBunSnapshot(stderr),
|
||||
}).toMatchInlineSnapshot(`
|
||||
{
|
||||
"exitCode": 1,
|
||||
"stderr":
|
||||
"test/js/bun/test/only-failures.fixture.ts:
|
||||
7 | test("passing test 2", () => {
|
||||
8 | expect(2 + 2).toBe(4);
|
||||
9 | });
|
||||
10 |
|
||||
11 | test("failing test", () => {
|
||||
12 | expect(1 + 1).toBe(3);
|
||||
^
|
||||
error: expect(received).toBe(expected)
|
||||
|
||||
Expected: 3
|
||||
Received: 2
|
||||
at <anonymous> (file:NN:NN)
|
||||
(fail) failing test
|
||||
21 | });
|
||||
22 |
|
||||
23 | test.todo("todo test");
|
||||
24 |
|
||||
25 | test("another failing test", () => {
|
||||
26 | throw new Error("This test fails");
|
||||
^
|
||||
error: This test fails
|
||||
at <anonymous> (file:NN:NN)
|
||||
(fail) another failing test
|
||||
|
||||
3 pass
|
||||
1 skip
|
||||
1 todo
|
||||
2 fail
|
||||
4 expect() calls
|
||||
Ran 7 tests across 1 file."
|
||||
,
|
||||
"stdout": "bun test <version> (<revision>)",
|
||||
}
|
||||
`);
|
||||
});
|
||||
|
||||
test.concurrent("only-failures flag should work with multiple files", async () => {
|
||||
const result = await Bun.spawn({
|
||||
cmd: [
|
||||
bunExe(),
|
||||
"test",
|
||||
import.meta.dir + "/printing/dots/dots1.fixture.ts",
|
||||
import.meta.dir + "/only-failures.fixture.ts",
|
||||
"--only-failures",
|
||||
],
|
||||
stdout: "pipe",
|
||||
stderr: "pipe",
|
||||
env: bunEnv,
|
||||
});
|
||||
const exitCode = await result.exited;
|
||||
const stdout = await result.stdout.text();
|
||||
const stderr = await result.stderr.text();
|
||||
expect(exitCode).toBe(1);
|
||||
expect(normalizeBunSnapshot(stderr)).toContain("(fail) failing test");
|
||||
expect(normalizeBunSnapshot(stderr)).toContain("(fail) another failing test");
|
||||
expect(normalizeBunSnapshot(stderr)).not.toContain("(pass)");
|
||||
});
|
||||
|
||||
test.concurrent("only-failures should work via bunfig.toml", async () => {
|
||||
using dir = tempDir("bunfig-only-failures", {
|
||||
"bunfig.toml": `
|
||||
[test]
|
||||
onlyFailures = true
|
||||
`,
|
||||
"my.test.ts": `
|
||||
import { test, expect } from "bun:test";
|
||||
|
||||
test("passing test", () => {
|
||||
expect(1 + 1).toBe(2);
|
||||
});
|
||||
|
||||
test("failing test", () => {
|
||||
expect(1 + 1).toBe(3);
|
||||
});
|
||||
|
||||
test("another passing test", () => {
|
||||
expect(true).toBe(true);
|
||||
});
|
||||
`,
|
||||
});
|
||||
|
||||
const result = await Bun.spawn({
|
||||
cmd: [bunExe(), "test"],
|
||||
stdout: "pipe",
|
||||
stderr: "pipe",
|
||||
env: bunEnv,
|
||||
cwd: String(dir),
|
||||
});
|
||||
|
||||
const exitCode = await result.exited;
|
||||
const stderr = await result.stderr.text();
|
||||
|
||||
expect(exitCode).toBe(1);
|
||||
// Should only show the failing test
|
||||
expect(normalizeBunSnapshot(stderr, dir)).toContain("(fail) failing test");
|
||||
// Should not show passing tests
|
||||
expect(normalizeBunSnapshot(stderr, dir)).not.toContain("(pass)");
|
||||
});
|
||||
Reference in New Issue
Block a user