diff --git a/src/bun.js/test/jest.zig b/src/bun.js/test/jest.zig index fff501544c..f8e85979b7 100644 --- a/src/bun.js/test/jest.zig +++ b/src/bun.js/test/jest.zig @@ -39,8 +39,61 @@ pub const Tag = enum(u3) { skipped_because_label, }; const debug = Output.scoped(.jest, false); + var max_test_id_for_debugger: u32 = 0; + +const CurrentFile = struct { + title: string = "", + prefix: string = "", + repeat_info: struct { + count: u32 = 0, + index: u32 = 0, + } = .{}, + has_printed_filename: bool = false, + + pub fn set(this: *CurrentFile, title: string, prefix: string, repeat_count: u32, repeat_index: u32) void { + if (Output.isAIAgent()) { + this.freeAndClear(); + this.title = bun.default_allocator.dupe(u8, title) catch bun.outOfMemory(); + this.prefix = bun.default_allocator.dupe(u8, prefix) catch bun.outOfMemory(); + this.repeat_info.count = repeat_count; + this.repeat_info.index = repeat_index; + this.has_printed_filename = false; + return; + } + + this.has_printed_filename = true; + print(title, prefix, repeat_count, repeat_index); + } + + fn freeAndClear(this: *CurrentFile) void { + bun.default_allocator.free(this.title); + bun.default_allocator.free(this.prefix); + } + + fn print(title: string, prefix: string, repeat_count: u32, repeat_index: u32) void { + if (repeat_count > 0) { + if (repeat_count > 1) { + Output.prettyErrorln("\n{s}{s}: (run #{d})\n", .{ prefix, title, repeat_index + 1 }); + } else { + Output.prettyErrorln("\n{s}{s}:\n", .{ prefix, title }); + } + } else { + Output.prettyErrorln("\n{s}{s}:\n", .{ prefix, title }); + } + + Output.flush(); + } + + pub fn printIfNeeded(this: *CurrentFile) void { + if (this.has_printed_filename) return; + this.has_printed_filename = true; + print(this.title, this.prefix, this.repeat_info.count, this.repeat_info.index); + } +}; + pub const TestRunner = struct { + current_file: CurrentFile = CurrentFile{}, tests: TestRunner.Test.List = .{}, log: *logger.Log, files: File.List = .{}, @@ -1327,6 +1380,10 @@ pub const TestRunnerTask = struct { deduped = true; } else { if (is_unhandled and Jest.runner != null) { + if (Output.isAIAgent()) { + Jest.runner.?.current_file.printIfNeeded(); + } + Output.prettyErrorln( \\ \\# Unhandled error between tests @@ -1335,7 +1392,12 @@ pub const TestRunnerTask = struct { , .{}); Output.flush(); + } else if (!is_unhandled and Jest.runner != null) { + if (Output.isAIAgent()) { + Jest.runner.?.current_file.printIfNeeded(); + } } + jsc_vm.runErrorHandlerWithDedupe(rejection, jsc_vm.onUnhandledRejectionExceptionList); if (is_unhandled and Jest.runner != null) { Output.prettyError("-------------------------------\n\n", .{}); diff --git a/src/cli/test_command.zig b/src/cli/test_command.zig index a5b3693993..cd7a89feb8 100644 --- a/src/cli/test_command.zig +++ b/src/cli/test_command.zig @@ -97,6 +97,11 @@ fn fmtStatusTextLine(comptime status: @Type(.enum_literal), comptime emoji_or_co } fn writeTestStatusLine(comptime status: @Type(.enum_literal), writer: anytype) void { + // When using AI agents, only print failures + if (Output.isAIAgent() and status != .fail) { + return; + } + if (Output.enable_ansi_colors_stderr) writer.print(fmtStatusTextLine(status, true), .{}) catch unreachable else @@ -653,52 +658,54 @@ pub const CommandLineReporter = struct { } const scopes: []*jest.DescribeScope = scopes_stack.slice(); - const display_label = if (label.len > 0) label else "test"; - const color_code = comptime if (skip) "" else ""; + // Quieter output when claude code is in use. + if (!Output.isAIAgent() or status == .fail) { + const color_code = comptime if (skip) "" else ""; - if (Output.enable_ansi_colors_stderr) { - for (scopes, 0..) |_, i| { - const index = (scopes.len - 1) - i; - const scope = scopes[index]; - if (scope.label.len == 0) continue; - writer.writeAll(" ") catch unreachable; + if (Output.enable_ansi_colors_stderr) { + for (scopes, 0..) |_, i| { + const index = (scopes.len - 1) - i; + const scope = scopes[index]; + if (scope.label.len == 0) continue; + writer.writeAll(" ") catch unreachable; - writer.print(comptime Output.prettyFmt("" ++ color_code, true), .{}) catch unreachable; - writer.writeAll(scope.label) catch unreachable; - writer.print(comptime Output.prettyFmt("", true), .{}) catch unreachable; - writer.writeAll(" >") catch unreachable; + writer.print(comptime Output.prettyFmt("" ++ color_code, true), .{}) catch unreachable; + writer.writeAll(scope.label) catch unreachable; + writer.print(comptime Output.prettyFmt("", true), .{}) catch unreachable; + writer.writeAll(" >") catch unreachable; + } + } else { + for (scopes, 0..) |_, i| { + const index = (scopes.len - 1) - i; + const scope = scopes[index]; + if (scope.label.len == 0) continue; + writer.writeAll(" ") catch unreachable; + writer.writeAll(scope.label) catch unreachable; + writer.writeAll(" >") catch unreachable; + } } - } else { - for (scopes, 0..) |_, i| { - const index = (scopes.len - 1) - i; - const scope = scopes[index]; - if (scope.label.len == 0) continue; - writer.writeAll(" ") catch unreachable; - writer.writeAll(scope.label) catch unreachable; - writer.writeAll(" >") catch unreachable; + + const line_color_code = if (comptime skip) "" else ""; + + if (Output.enable_ansi_colors_stderr) + writer.print(comptime Output.prettyFmt(line_color_code ++ " {s}", true), .{display_label}) catch unreachable + else + writer.print(comptime Output.prettyFmt(" {s}", false), .{display_label}) catch unreachable; + + if (elapsed_ns > (std.time.ns_per_us * 10)) { + writer.print(" {any}", .{ + Output.ElapsedFormatter{ + .colors = Output.enable_ansi_colors_stderr, + .duration_ns = elapsed_ns, + }, + }) catch unreachable; } + + writer.writeAll("\n") catch unreachable; } - const line_color_code = if (comptime skip) "" else ""; - - if (Output.enable_ansi_colors_stderr) - writer.print(comptime Output.prettyFmt(line_color_code ++ " {s}", true), .{display_label}) catch unreachable - else - writer.print(comptime Output.prettyFmt(" {s}", false), .{display_label}) catch unreachable; - - if (elapsed_ns > (std.time.ns_per_us * 10)) { - writer.print(" {any}", .{ - Output.ElapsedFormatter{ - .colors = Output.enable_ansi_colors_stderr, - .duration_ns = elapsed_ns, - }, - }) catch unreachable; - } - - writer.writeAll("\n") catch unreachable; - if (file_reporter) |reporter| { switch (reporter) { .junit => |junit| { @@ -844,6 +851,8 @@ pub const CommandLineReporter = struct { defer Output.flush(); var this: *CommandLineReporter = @fieldParentPtr("callback", cb); + this.jest.current_file.printIfNeeded(); + // when the tests fail, we want to repeat the failures at the end // so that you can see them better when there are lots of tests that ran const initial_length = this.failures_to_repeat_buf.items.len; @@ -1530,7 +1539,7 @@ pub const TestCommand = struct { const write_snapshots_success = try jest.Jest.runner.?.snapshots.writeInlineSnapshots(); try jest.Jest.runner.?.snapshots.writeSnapshotFile(); var coverage_options = ctx.test_options.coverage; - if (reporter.summary().pass > 20) { + if (reporter.summary().pass > 20 and !Output.isAIAgent()) { if (reporter.summary().skip > 0) { Output.prettyError("\n{d} tests skipped:\n", .{reporter.summary().skip}); Output.flush(); @@ -1571,16 +1580,24 @@ pub const TestCommand = struct { if (test_files.len == 0) { failed_to_find_any_tests = true; - if (ctx.positionals.len == 0) { - Output.prettyErrorln( - \\No tests found! - \\Tests need ".test", "_test_", ".spec" or "_spec_" in the filename (ex: "MyApp.test.ts") - \\ - , .{}); + // "bun test" - positionals[0] == "test" + // Therefore positionals starts at [1]. + if (ctx.positionals.len < 2) { + if (Output.isAIAgent()) { + // Be very clear to ai. + Output.errGeneric("0 test files matching **{{.test,.spec,_test_,_spec_}}.{{js,ts,jsx,tsx}} in --cwd={}", .{bun.fmt.quote(bun.fs.FileSystem.instance.top_level_dir)}); + } else { + // Be friendlier to humans. + Output.prettyErrorln( + \\No tests found! + \\ + \\Tests need ".test", "_test_", ".spec" or "_spec_" in the filename (ex: "MyApp.test.ts") + \\ + , .{}); + } } else { Output.prettyErrorln("The following filters did not match any test files:", .{}); var has_file_like: ?usize = null; - Output.prettyError(" ", .{}); for (ctx.positionals[1..], 1..) |filter, i| { Output.prettyError(" {s}", .{filter}); @@ -1611,10 +1628,12 @@ pub const TestCommand = struct { , .{ ctx.positionals[i], ctx.positionals[i] }); } } - Output.prettyError( - \\ - \\Learn more about the test runner: https://bun.com/docs/cli/test - , .{}); + if (!Output.isAIAgent()) { + Output.prettyError( + \\ + \\Learn more about bun test: https://bun.com/docs/cli/test + , .{}); + } } else { Output.prettyError("\n", .{}); @@ -1841,12 +1860,7 @@ pub const TestCommand = struct { vm.onUnhandledRejection = jest.TestRunnerTask.onUnhandledRejection; while (repeat_index < repeat_count) : (repeat_index += 1) { - if (repeat_count > 1) { - Output.prettyErrorln("\n{s}{s}: (run #{d})\n", .{ file_prefix, file_title, repeat_index + 1 }); - } else { - Output.prettyErrorln("\n{s}{s}:\n", .{ file_prefix, file_title }); - } - Output.flush(); + reporter.jest.current_file.set(file_title, file_prefix, repeat_count, repeat_index); var promise = try vm.loadEntryPointForTestRunner(file_path); reporter.summary().files += 1; diff --git a/src/output.zig b/src/output.zig index 8f82c7be1a..5f1b1c9378 100644 --- a/src/output.zig +++ b/src/output.zig @@ -457,11 +457,62 @@ pub inline fn isEmojiEnabled() bool { pub fn isGithubAction() bool { if (bun.getenvZ("GITHUB_ACTIONS")) |value| { - return strings.eqlComptime(value, "true"); + return strings.eqlComptime(value, "true") and + // Do not print github annotations for AI agents because that wastes the context window. + !isAIAgent(); } return false; } +pub fn isAIAgent() bool { + const get_is_agent = struct { + var value = false; + fn evaluate() bool { + if (bun.getenvZ("IS_CODE_AGENT")) |env| { + return strings.eqlComptime(env, "1"); + } + + if (isVerbose()) { + return false; + } + + // Claude Code. + if (bun.getenvTruthy("CLAUDECODE")) { + return true; + } + + // Replit. + if (bun.getenvTruthy("REPL_ID")) { + return true; + } + + // TODO: add environment variable for Gemini + // Gemini does not appear to add any environment variables to identify it. + + // TODO: add environment variable for Codex + // codex does not appear to add any environment variables to identify it. + + // TODO: add environment variable for Cursor Background Agents + // cursor does not appear to add any environment variables to identify it. + + return false; + } + + fn setValue() void { + value = evaluate(); + } + + var once = std.once(setValue); + + pub fn isEnabled() bool { + once.call(); + return value; + } + }; + + return get_is_agent.isEnabled(); +} + pub fn isVerbose() bool { // Set by Github Actions when a workflow is run using debug mode. if (bun.getenvZ("RUNNER_DEBUG")) |value| { diff --git a/test/cli/test/__snapshots__/claudecode-flag.test.ts.snap b/test/cli/test/__snapshots__/claudecode-flag.test.ts.snap new file mode 100644 index 0000000000..ce36ec955e --- /dev/null +++ b/test/cli/test/__snapshots__/claudecode-flag.test.ts.snap @@ -0,0 +1,67 @@ +// Bun Snapshot v1, https://bun.sh/docs/test/snapshots + +exports[`CLAUDECODE=1 shows quiet test output (only failures) 1`] = ` +"test2.test.js: +4 | test("passing test", () => { +5 | expect(1).toBe(1); +6 | }); +7 | +8 | test("failing test", () => { +9 | expect(1).toBe(2); + ^ +error: expect(received).toBe(expected) + +Expected: 2 +Received: 1 + at (file:NN:NN) +(fail) failing test + + 1 pass + 1 skip + 1 todo + 1 fail + 2 expect() calls +Ran 4 tests across 1 file. +bun test ()" +`; + +exports[`CLAUDECODE=1 vs CLAUDECODE=0 comparison: normal 1`] = ` +"test3.test.js: +(pass) passing test +(pass) another passing test +(skip) skipped test +(todo) todo test + + 2 pass + 1 skip + 1 todo + 0 fail + 2 expect() calls +Ran 4 tests across 1 file. +bun test ()" +`; + +exports[`CLAUDECODE=1 vs CLAUDECODE=0 comparison: quiet 1`] = ` +"2 pass + 1 skip + 1 todo + 0 fail + 2 expect() calls +Ran 4 tests across 1 file. +bun test ()" +`; + +exports[`CLAUDECODE flag handles no test files found: no-tests-normal 1`] = ` +"No tests found! + +Tests need ".test", "_test_", ".spec" or "_spec_" in the filename (ex: "MyApp.test.ts") + +Learn more about bun test: https://bun.com/docs/cli/test +bun test ()" +`; + +exports[`CLAUDECODE flag handles no test files found: no-tests-quiet 1`] = ` +"error: 0 test files matching **{.test,.spec,_test_,_spec_}.{js,ts,jsx,tsx} in --cwd="" + +bun test ()" +`; diff --git a/test/cli/test/claudecode-flag.test.ts b/test/cli/test/claudecode-flag.test.ts new file mode 100644 index 0000000000..76be32a1c3 --- /dev/null +++ b/test/cli/test/claudecode-flag.test.ts @@ -0,0 +1,139 @@ +import { spawnSync } from "bun"; +import { expect, test } from "bun:test"; +import { bunEnv, bunExe, normalizeBunSnapshot, tempDirWithFiles } from "harness"; + +test("CLAUDECODE=1 shows quiet test output (only failures)", async () => { + const dir = tempDirWithFiles("claudecode-test-quiet", { + "test2.test.js": ` + import { test, expect } from "bun:test"; + + test("passing test", () => { + expect(1).toBe(1); + }); + + test("failing test", () => { + expect(1).toBe(2); + }); + + test.skip("skipped test", () => { + expect(1).toBe(1); + }); + + test.todo("todo test"); + `, + }); + + await using proc = Bun.spawn({ + cmd: [bunExe(), "test", "test2.test.js"], + env: { ...bunEnv, CLAUDECODE: "1" }, + cwd: dir, + stderr: "pipe", + stdout: "pipe", + }); + + const [stdout, stderr] = await Promise.all([proc.stdout.text(), proc.stderr.text()]); + + const output = stderr + stdout; + const normalized = normalizeBunSnapshot(output, dir); + + expect(normalized).toMatchSnapshot(); +}); + +test("CLAUDECODE=1 vs CLAUDECODE=0 comparison", async () => { + const dir = tempDirWithFiles("claudecode-test-compare", { + "test3.test.js": ` + import { test, expect } from "bun:test"; + + test("passing test", () => { + expect(1).toBe(1); + }); + + test("another passing test", () => { + expect(2).toBe(2); + }); + + test.skip("skipped test", () => { + expect(1).toBe(1); + }); + + test.todo("todo test"); + `, + }); + + // Run with CLAUDECODE=0 (normal output) + const result1 = spawnSync({ + cmd: [bunExe(), "test", "test3.test.js"], + env: { ...bunEnv, CLAUDECODE: "0" }, + cwd: dir, + stderr: "pipe", + stdout: "pipe", + }); + + // Run with CLAUDECODE=1 (quiet output) + const result2 = spawnSync({ + cmd: [bunExe(), "test", "test3.test.js"], + env: { ...bunEnv, CLAUDECODE: "1" }, + cwd: dir, + stderr: "pipe", + stdout: "pipe", + }); + + const normalOutput = result1.stderr.toString() + result1.stdout.toString(); + const quietOutput = result2.stderr.toString() + result2.stdout.toString(); + + // Normal output should contain pass/skip/todo indicators + expect(normalOutput).toContain("(pass)"); // pass indicator + expect(normalOutput).toContain("(skip)"); // skip indicator + expect(normalOutput).toContain("(todo)"); // todo indicator + + // Quiet output should NOT contain pass/skip/todo indicators (only failures) + expect(quietOutput).not.toContain("(pass)"); // pass indicator + expect(quietOutput).not.toContain("(skip)"); // skip indicator + expect(quietOutput).not.toContain("(todo)"); // todo indicator + + // Both should contain the summary at the end + expect(normalOutput).toContain("2 pass"); + expect(normalOutput).toContain("1 skip"); + expect(normalOutput).toContain("1 todo"); + + expect(quietOutput).toContain("2 pass"); + expect(quietOutput).toContain("1 skip"); + expect(quietOutput).toContain("1 todo"); + + expect(normalizeBunSnapshot(normalOutput, dir)).toMatchSnapshot("normal"); + expect(normalizeBunSnapshot(quietOutput, dir)).toMatchSnapshot("quiet"); +}); + +test("CLAUDECODE flag handles no test files found", () => { + const dir = tempDirWithFiles("empty-project", { + "package.json": `{ + "name": "empty-project", + "version": "1.0.0" + }`, + "src/index.js": `console.log("hello world");`, + }); + + // Run with CLAUDECODE=0 (normal output) - no test files + const result1 = spawnSync({ + cmd: [bunExe(), "test"], + env: { ...bunEnv, CLAUDECODE: "0" }, + cwd: dir, + stderr: "pipe", + stdout: "pipe", + }); + + // Run with CLAUDECODE=1 (quiet output) - no test files + const result2 = spawnSync({ + cmd: [bunExe(), "test"], + env: { ...bunEnv, CLAUDECODE: "1" }, + cwd: dir, + stderr: "pipe", + stdout: "pipe", + }); + + const normalOutput = result1.stderr.toString() + result1.stdout.toString(); + const quietOutput = result2.stderr.toString() + result2.stdout.toString(); + + expect(normalizeBunSnapshot(normalOutput, dir)).toMatchSnapshot("no-tests-normal"); + expect(normalizeBunSnapshot(quietOutput, dir)).toMatchSnapshot("no-tests-quiet"); +});