Compare commits

..

1 Commits

Author SHA1 Message Date
Claude Bot
40df7144da wip 2025-07-17 12:59:24 +00:00
8 changed files with 63 additions and 426 deletions

View File

@@ -3,8 +3,8 @@ register_repository(
libuv
REPOSITORY
libuv/libuv
TAG
v1.51.0
COMMIT
da527d8d2a908b824def74382761566371439003
)
if(WIN32)

View File

@@ -248,34 +248,4 @@ $ bun test foo
Any test file in the directory with an _absolute path_ that contains one of the targets will run. Glob patterns are not yet supported. -->
## AI Agent Integration
When using Bun's test runner with AI coding assistants, you can enable quieter output to improve readability and reduce context noise. This feature minimizes test output verbosity while preserving essential failure information.
### Environment Variables
Set any of the following environment variables to enable AI-friendly output:
- `CLAUDECODE=1` - For Claude Code
- `REPL_ID=1` - For Replit
- `IS_CODE_AGENT=1` - Generic AI agent flag
### Behavior
When an AI agent environment is detected:
- Only test failures are displayed in detail
- Passing, skipped, and todo test indicators are hidden
- Summary statistics remain intact
- JUnit XML reporting is preserved
```bash
# Example: Enable quiet output for Claude Code
$ CLAUDECODE=1 bun test
# Still shows failures and summary, but hides verbose passing test output
```
This feature is particularly useful in AI-assisted development workflows where reduced output verbosity improves context efficiency while maintaining visibility into test failures.
{% bunCLIUsage command="test" /%}

View File

@@ -39,61 +39,8 @@ pub const Tag = enum(u3) {
skipped_because_label,
};
const debug = Output.scoped(.jest, false);
var max_test_id_for_debugger: u32 = 0;
const CurrentFile = struct {
title: string = "",
prefix: string = "",
repeat_info: struct {
count: u32 = 0,
index: u32 = 0,
} = .{},
has_printed_filename: bool = false,
pub fn set(this: *CurrentFile, title: string, prefix: string, repeat_count: u32, repeat_index: u32) void {
if (Output.isAIAgent()) {
this.freeAndClear();
this.title = bun.default_allocator.dupe(u8, title) catch bun.outOfMemory();
this.prefix = bun.default_allocator.dupe(u8, prefix) catch bun.outOfMemory();
this.repeat_info.count = repeat_count;
this.repeat_info.index = repeat_index;
this.has_printed_filename = false;
return;
}
this.has_printed_filename = true;
print(title, prefix, repeat_count, repeat_index);
}
fn freeAndClear(this: *CurrentFile) void {
bun.default_allocator.free(this.title);
bun.default_allocator.free(this.prefix);
}
fn print(title: string, prefix: string, repeat_count: u32, repeat_index: u32) void {
if (repeat_count > 0) {
if (repeat_count > 1) {
Output.prettyErrorln("<r>\n{s}{s}: <d>(run #{d})<r>\n", .{ prefix, title, repeat_index + 1 });
} else {
Output.prettyErrorln("<r>\n{s}{s}:\n", .{ prefix, title });
}
} else {
Output.prettyErrorln("<r>\n{s}{s}:\n", .{ prefix, title });
}
Output.flush();
}
pub fn printIfNeeded(this: *CurrentFile) void {
if (this.has_printed_filename) return;
this.has_printed_filename = true;
print(this.title, this.prefix, this.repeat_info.count, this.repeat_info.index);
}
};
pub const TestRunner = struct {
current_file: CurrentFile = CurrentFile{},
tests: TestRunner.Test.List = .{},
log: *logger.Log,
files: File.List = .{},
@@ -1380,10 +1327,6 @@ pub const TestRunnerTask = struct {
deduped = true;
} else {
if (is_unhandled and Jest.runner != null) {
if (Output.isAIAgent()) {
Jest.runner.?.current_file.printIfNeeded();
}
Output.prettyErrorln(
\\<r>
\\<b><d>#<r> <red><b>Unhandled error<r><d> between tests<r>
@@ -1392,12 +1335,7 @@ pub const TestRunnerTask = struct {
, .{});
Output.flush();
} else if (!is_unhandled and Jest.runner != null) {
if (Output.isAIAgent()) {
Jest.runner.?.current_file.printIfNeeded();
}
}
jsc_vm.runErrorHandlerWithDedupe(rejection, jsc_vm.onUnhandledRejectionExceptionList);
if (is_unhandled and Jest.runner != null) {
Output.prettyError("<r><d>-------------------------------<r>\n\n", .{});

View File

@@ -97,11 +97,6 @@ fn fmtStatusTextLine(comptime status: @Type(.enum_literal), comptime emoji_or_co
}
fn writeTestStatusLine(comptime status: @Type(.enum_literal), writer: anytype) void {
// When using AI agents, only print failures
if (Output.isAIAgent() and status != .fail) {
return;
}
if (Output.enable_ansi_colors_stderr)
writer.print(fmtStatusTextLine(status, true), .{}) catch unreachable
else
@@ -658,54 +653,52 @@ pub const CommandLineReporter = struct {
}
const scopes: []*jest.DescribeScope = scopes_stack.slice();
const display_label = if (label.len > 0) label else "test";
// Quieter output when claude code is in use.
if (!Output.isAIAgent() or status == .fail) {
const color_code = comptime if (skip) "<d>" else "";
const color_code = comptime if (skip) "<d>" else "";
if (Output.enable_ansi_colors_stderr) {
for (scopes, 0..) |_, i| {
const index = (scopes.len - 1) - i;
const scope = scopes[index];
if (scope.label.len == 0) continue;
writer.writeAll(" ") catch unreachable;
if (Output.enable_ansi_colors_stderr) {
for (scopes, 0..) |_, i| {
const index = (scopes.len - 1) - i;
const scope = scopes[index];
if (scope.label.len == 0) continue;
writer.writeAll(" ") catch unreachable;
writer.print(comptime Output.prettyFmt("<r>" ++ color_code, true), .{}) catch unreachable;
writer.writeAll(scope.label) catch unreachable;
writer.print(comptime Output.prettyFmt("<d>", true), .{}) catch unreachable;
writer.writeAll(" >") catch unreachable;
}
} else {
for (scopes, 0..) |_, i| {
const index = (scopes.len - 1) - i;
const scope = scopes[index];
if (scope.label.len == 0) continue;
writer.writeAll(" ") catch unreachable;
writer.writeAll(scope.label) catch unreachable;
writer.writeAll(" >") catch unreachable;
}
writer.print(comptime Output.prettyFmt("<r>" ++ color_code, true), .{}) catch unreachable;
writer.writeAll(scope.label) catch unreachable;
writer.print(comptime Output.prettyFmt("<d>", true), .{}) catch unreachable;
writer.writeAll(" >") catch unreachable;
}
const line_color_code = if (comptime skip) "<r><d>" else "<r><b>";
if (Output.enable_ansi_colors_stderr)
writer.print(comptime Output.prettyFmt(line_color_code ++ " {s}<r>", true), .{display_label}) catch unreachable
else
writer.print(comptime Output.prettyFmt(" {s}", false), .{display_label}) catch unreachable;
if (elapsed_ns > (std.time.ns_per_us * 10)) {
writer.print(" {any}", .{
Output.ElapsedFormatter{
.colors = Output.enable_ansi_colors_stderr,
.duration_ns = elapsed_ns,
},
}) catch unreachable;
} else {
for (scopes, 0..) |_, i| {
const index = (scopes.len - 1) - i;
const scope = scopes[index];
if (scope.label.len == 0) continue;
writer.writeAll(" ") catch unreachable;
writer.writeAll(scope.label) catch unreachable;
writer.writeAll(" >") catch unreachable;
}
writer.writeAll("\n") catch unreachable;
}
const line_color_code = if (comptime skip) "<r><d>" else "<r><b>";
if (Output.enable_ansi_colors_stderr)
writer.print(comptime Output.prettyFmt(line_color_code ++ " {s}<r>", true), .{display_label}) catch unreachable
else
writer.print(comptime Output.prettyFmt(" {s}", false), .{display_label}) catch unreachable;
if (elapsed_ns > (std.time.ns_per_us * 10)) {
writer.print(" {any}", .{
Output.ElapsedFormatter{
.colors = Output.enable_ansi_colors_stderr,
.duration_ns = elapsed_ns,
},
}) catch unreachable;
}
writer.writeAll("\n") catch unreachable;
if (file_reporter) |reporter| {
switch (reporter) {
.junit => |junit| {
@@ -851,8 +844,6 @@ pub const CommandLineReporter = struct {
defer Output.flush();
var this: *CommandLineReporter = @fieldParentPtr("callback", cb);
this.jest.current_file.printIfNeeded();
// when the tests fail, we want to repeat the failures at the end
// so that you can see them better when there are lots of tests that ran
const initial_length = this.failures_to_repeat_buf.items.len;
@@ -1539,7 +1530,7 @@ pub const TestCommand = struct {
const write_snapshots_success = try jest.Jest.runner.?.snapshots.writeInlineSnapshots();
try jest.Jest.runner.?.snapshots.writeSnapshotFile();
var coverage_options = ctx.test_options.coverage;
if (reporter.summary().pass > 20 and !Output.isAIAgent()) {
if (reporter.summary().pass > 20) {
if (reporter.summary().skip > 0) {
Output.prettyError("\n<r><d>{d} tests skipped:<r>\n", .{reporter.summary().skip});
Output.flush();
@@ -1580,24 +1571,16 @@ pub const TestCommand = struct {
if (test_files.len == 0) {
failed_to_find_any_tests = true;
// "bun test" - positionals[0] == "test"
// Therefore positionals starts at [1].
if (ctx.positionals.len < 2) {
if (Output.isAIAgent()) {
// Be very clear to ai.
Output.errGeneric("0 test files matching **{{.test,.spec,_test_,_spec_}}.{{js,ts,jsx,tsx}} in --cwd={}", .{bun.fmt.quote(bun.fs.FileSystem.instance.top_level_dir)});
} else {
// Be friendlier to humans.
Output.prettyErrorln(
\\<yellow>No tests found!<r>
\\
\\Tests need ".test", "_test_", ".spec" or "_spec_" in the filename <d>(ex: "MyApp.test.ts")<r>
\\
, .{});
}
if (ctx.positionals.len == 0) {
Output.prettyErrorln(
\\<yellow>No tests found!<r>
\\Tests need ".test", "_test_", ".spec" or "_spec_" in the filename <d>(ex: "MyApp.test.ts")<r>
\\
, .{});
} else {
Output.prettyErrorln("<yellow>The following filters did not match any test files:<r>", .{});
var has_file_like: ?usize = null;
Output.prettyError(" ", .{});
for (ctx.positionals[1..], 1..) |filter, i| {
Output.prettyError(" {s}", .{filter});
@@ -1628,12 +1611,10 @@ pub const TestCommand = struct {
, .{ ctx.positionals[i], ctx.positionals[i] });
}
}
if (!Output.isAIAgent()) {
Output.prettyError(
\\
\\Learn more about bun test: <magenta>https://bun.com/docs/cli/test<r>
, .{});
}
Output.prettyError(
\\
\\Learn more about the test runner: <magenta>https://bun.com/docs/cli/test<r>
, .{});
} else {
Output.prettyError("\n", .{});
@@ -1860,7 +1841,12 @@ pub const TestCommand = struct {
vm.onUnhandledRejection = jest.TestRunnerTask.onUnhandledRejection;
while (repeat_index < repeat_count) : (repeat_index += 1) {
reporter.jest.current_file.set(file_title, file_prefix, repeat_count, repeat_index);
if (repeat_count > 1) {
Output.prettyErrorln("<r>\n{s}{s}: <d>(run #{d})<r>\n", .{ file_prefix, file_title, repeat_index + 1 });
} else {
Output.prettyErrorln("<r>\n{s}{s}:\n", .{ file_prefix, file_title });
}
Output.flush();
var promise = try vm.loadEntryPointForTestRunner(file_path);
reporter.summary().files += 1;

View File

@@ -590,8 +590,8 @@ pub const UpdateInteractiveCommand = struct {
var max_latest_len: usize = "Latest".len;
// Set reasonable limits to prevent excessive column widths
const MAX_NAME_WIDTH = 60;
const MAX_VERSION_WIDTH = 20;
const MAX_NAME_WIDTH = 80;
const MAX_VERSION_WIDTH = 30;
for (packages) |pkg| {
// Include dev tag length in max calculation
@@ -870,8 +870,8 @@ pub const UpdateInteractiveCommand = struct {
defer if (package_url.len > 0) bun.default_allocator.free(package_url);
// Truncate package name if it's too long
const display_name = if (pkg.name.len > 60)
try std.fmt.allocPrint(bun.default_allocator, "{s}...", .{pkg.name[0..57]})
const display_name = if (pkg.name.len > MAX_NAME_WIDTH)
try std.fmt.allocPrint(bun.default_allocator, "{s}...", .{pkg.name[0..MAX_NAME_WIDTH-3]})
else
pkg.name;
defer if (display_name.ptr != pkg.name.ptr) bun.default_allocator.free(display_name);

View File

@@ -457,62 +457,11 @@ pub inline fn isEmojiEnabled() bool {
pub fn isGithubAction() bool {
if (bun.getenvZ("GITHUB_ACTIONS")) |value| {
return strings.eqlComptime(value, "true") and
// Do not print github annotations for AI agents because that wastes the context window.
!isAIAgent();
return strings.eqlComptime(value, "true");
}
return false;
}
pub fn isAIAgent() bool {
const get_is_agent = struct {
var value = false;
fn evaluate() bool {
if (bun.getenvZ("IS_CODE_AGENT")) |env| {
return strings.eqlComptime(env, "1");
}
if (isVerbose()) {
return false;
}
// Claude Code.
if (bun.getenvTruthy("CLAUDECODE")) {
return true;
}
// Replit.
if (bun.getenvTruthy("REPL_ID")) {
return true;
}
// TODO: add environment variable for Gemini
// Gemini does not appear to add any environment variables to identify it.
// TODO: add environment variable for Codex
// codex does not appear to add any environment variables to identify it.
// TODO: add environment variable for Cursor Background Agents
// cursor does not appear to add any environment variables to identify it.
return false;
}
fn setValue() void {
value = evaluate();
}
var once = std.once(setValue);
pub fn isEnabled() bool {
once.call();
return value;
}
};
return get_is_agent.isEnabled();
}
pub fn isVerbose() bool {
// Set by Github Actions when a workflow is run using debug mode.
if (bun.getenvZ("RUNNER_DEBUG")) |value| {

View File

@@ -1,67 +0,0 @@
// Bun Snapshot v1, https://bun.sh/docs/test/snapshots
exports[`CLAUDECODE=1 shows quiet test output (only failures) 1`] = `
"test2.test.js:
4 | test("passing test", () => {
5 | expect(1).toBe(1);
6 | });
7 |
8 | test("failing test", () => {
9 | expect(1).toBe(2);
^
error: expect(received).toBe(expected)
Expected: 2
Received: 1
at <anonymous> (file:NN:NN)
(fail) failing test
1 pass
1 skip
1 todo
1 fail
2 expect() calls
Ran 4 tests across 1 file.
bun test <version> (<revision>)"
`;
exports[`CLAUDECODE=1 vs CLAUDECODE=0 comparison: normal 1`] = `
"test3.test.js:
(pass) passing test
(pass) another passing test
(skip) skipped test
(todo) todo test
2 pass
1 skip
1 todo
0 fail
2 expect() calls
Ran 4 tests across 1 file.
bun test <version> (<revision>)"
`;
exports[`CLAUDECODE=1 vs CLAUDECODE=0 comparison: quiet 1`] = `
"2 pass
1 skip
1 todo
0 fail
2 expect() calls
Ran 4 tests across 1 file.
bun test <version> (<revision>)"
`;
exports[`CLAUDECODE flag handles no test files found: no-tests-normal 1`] = `
"No tests found!
Tests need ".test", "_test_", ".spec" or "_spec_" in the filename (ex: "MyApp.test.ts")
Learn more about bun test: https://bun.com/docs/cli/test
bun test <version> (<revision>)"
`;
exports[`CLAUDECODE flag handles no test files found: no-tests-quiet 1`] = `
"error: 0 test files matching **{.test,.spec,_test_,_spec_}.{js,ts,jsx,tsx} in --cwd="<dir>"
bun test <version> (<revision>)"
`;

View File

@@ -1,139 +0,0 @@
import { spawnSync } from "bun";
import { expect, test } from "bun:test";
import { bunEnv, bunExe, normalizeBunSnapshot, tempDirWithFiles } from "harness";
test("CLAUDECODE=1 shows quiet test output (only failures)", async () => {
const dir = tempDirWithFiles("claudecode-test-quiet", {
"test2.test.js": `
import { test, expect } from "bun:test";
test("passing test", () => {
expect(1).toBe(1);
});
test("failing test", () => {
expect(1).toBe(2);
});
test.skip("skipped test", () => {
expect(1).toBe(1);
});
test.todo("todo test");
`,
});
await using proc = Bun.spawn({
cmd: [bunExe(), "test", "test2.test.js"],
env: { ...bunEnv, CLAUDECODE: "1" },
cwd: dir,
stderr: "pipe",
stdout: "pipe",
});
const [stdout, stderr] = await Promise.all([proc.stdout.text(), proc.stderr.text()]);
const output = stderr + stdout;
const normalized = normalizeBunSnapshot(output, dir);
expect(normalized).toMatchSnapshot();
});
test("CLAUDECODE=1 vs CLAUDECODE=0 comparison", async () => {
const dir = tempDirWithFiles("claudecode-test-compare", {
"test3.test.js": `
import { test, expect } from "bun:test";
test("passing test", () => {
expect(1).toBe(1);
});
test("another passing test", () => {
expect(2).toBe(2);
});
test.skip("skipped test", () => {
expect(1).toBe(1);
});
test.todo("todo test");
`,
});
// Run with CLAUDECODE=0 (normal output)
const result1 = spawnSync({
cmd: [bunExe(), "test", "test3.test.js"],
env: { ...bunEnv, CLAUDECODE: "0" },
cwd: dir,
stderr: "pipe",
stdout: "pipe",
});
// Run with CLAUDECODE=1 (quiet output)
const result2 = spawnSync({
cmd: [bunExe(), "test", "test3.test.js"],
env: { ...bunEnv, CLAUDECODE: "1" },
cwd: dir,
stderr: "pipe",
stdout: "pipe",
});
const normalOutput = result1.stderr.toString() + result1.stdout.toString();
const quietOutput = result2.stderr.toString() + result2.stdout.toString();
// Normal output should contain pass/skip/todo indicators
expect(normalOutput).toContain("(pass)"); // pass indicator
expect(normalOutput).toContain("(skip)"); // skip indicator
expect(normalOutput).toContain("(todo)"); // todo indicator
// Quiet output should NOT contain pass/skip/todo indicators (only failures)
expect(quietOutput).not.toContain("(pass)"); // pass indicator
expect(quietOutput).not.toContain("(skip)"); // skip indicator
expect(quietOutput).not.toContain("(todo)"); // todo indicator
// Both should contain the summary at the end
expect(normalOutput).toContain("2 pass");
expect(normalOutput).toContain("1 skip");
expect(normalOutput).toContain("1 todo");
expect(quietOutput).toContain("2 pass");
expect(quietOutput).toContain("1 skip");
expect(quietOutput).toContain("1 todo");
expect(normalizeBunSnapshot(normalOutput, dir)).toMatchSnapshot("normal");
expect(normalizeBunSnapshot(quietOutput, dir)).toMatchSnapshot("quiet");
});
test("CLAUDECODE flag handles no test files found", () => {
const dir = tempDirWithFiles("empty-project", {
"package.json": `{
"name": "empty-project",
"version": "1.0.0"
}`,
"src/index.js": `console.log("hello world");`,
});
// Run with CLAUDECODE=0 (normal output) - no test files
const result1 = spawnSync({
cmd: [bunExe(), "test"],
env: { ...bunEnv, CLAUDECODE: "0" },
cwd: dir,
stderr: "pipe",
stdout: "pipe",
});
// Run with CLAUDECODE=1 (quiet output) - no test files
const result2 = spawnSync({
cmd: [bunExe(), "test"],
env: { ...bunEnv, CLAUDECODE: "1" },
cwd: dir,
stderr: "pipe",
stdout: "pipe",
});
const normalOutput = result1.stderr.toString() + result1.stdout.toString();
const quietOutput = result2.stderr.toString() + result2.stdout.toString();
expect(normalizeBunSnapshot(normalOutput, dir)).toMatchSnapshot("no-tests-normal");
expect(normalizeBunSnapshot(quietOutput, dir)).toMatchSnapshot("no-tests-quiet");
});