diff --git a/src/bun.js/test/jest.zig b/src/bun.js/test/jest.zig index db590730f1..bc63ddfa77 100644 --- a/src/bun.js/test/jest.zig +++ b/src/bun.js/test/jest.zig @@ -36,6 +36,7 @@ pub const Tag = enum(u3) { only, skip, todo, + skipped_because_label, }; const debug = Output.scoped(.jest, false); var max_test_id_for_debugger: u32 = 0; @@ -84,9 +85,24 @@ pub const TestRunner = struct { filter_buffer: MutableString, unhandled_errors_between_tests: u32 = 0, + summary: Summary = Summary{}, pub const Drainer = JSC.AnyTask.New(TestRunner, drain); + pub const Summary = struct { + pass: u32 = 0, + expectations: u32 = 0, + skip: u32 = 0, + todo: u32 = 0, + fail: u32 = 0, + files: u32 = 0, + skipped_because_label: u32 = 0, + + pub fn didLabelFilterOutAllTests(this: *const Summary) bool { + return this.skipped_because_label > 0 and (this.pass + this.skip + this.todo + this.fail + this.expectations) == 0; + } + }; + pub fn onTestTimeout(this: *TestRunner, now: *const bun.timespec, vm: *VirtualMachine) void { _ = vm; // autofix this.event_loop_timer.state = .FIRED; @@ -178,6 +194,7 @@ pub const TestRunner = struct { onTestPass: OnTestUpdate, onTestFail: OnTestUpdate, onTestSkip: OnTestUpdate, + onTestFilteredOut: OnTestUpdate, // when a test is filtered out by a label onTestTodo: OnTestUpdate, }; @@ -201,6 +218,11 @@ pub const TestRunner = struct { this.callback.onTestTodo(this.callback, test_id, file, label, 0, 0, parent); } + pub fn reportFilteredOut(this: *TestRunner, test_id: Test.ID, file: string, label: string, parent: ?*DescribeScope) void { + this.tests.items(.status)[test_id] = .skip; + this.callback.onTestFilteredOut(this.callback, test_id, file, label, 0, 0, parent); + } + pub fn addTestCount(this: *TestRunner, count: u32) u32 { this.tests.ensureUnusedCapacity(this.allocator, count) catch unreachable; const start = @as(Test.ID, @truncate(this.tests.len)); @@ -250,6 +272,7 @@ pub const TestRunner = struct { fail, skip, todo, + skipped_because_label, /// A test marked as `.failing()` actually passed fail_because_failing_test_passed, fail_because_todo_passed, @@ -1612,6 +1635,7 @@ pub const TestRunnerTask = struct { ); }, .skip => Jest.runner.?.reportSkip(test_id, this.source_file_path, test_.label, describe), + .skipped_because_label => Jest.runner.?.reportFilteredOut(test_id, this.source_file_path, test_.label, describe), .todo => Jest.runner.?.reportTodo(test_id, this.source_file_path, test_.label, describe), .fail_because_todo_passed => |count| { Output.prettyErrorln(" ^ this test is marked as todo but passes. Remove `.todo` or check that test is correct.", .{}); @@ -1673,6 +1697,7 @@ pub const Result = union(TestRunner.Test.Status) { fail: u32, skip: void, todo: void, + skipped_because_label: void, fail_because_failing_test_passed: u32, fail_because_todo_passed: u32, fail_because_expected_has_assertions: void, @@ -1815,15 +1840,21 @@ inline fn createScope( if (is_test) { if (!is_skip) { - if (Jest.runner.?.filter_regex) |regex| { - var buffer: bun.MutableString = Jest.runner.?.filter_buffer; - buffer.reset(); - appendParentLabel(&buffer, parent) catch @panic("Bun ran out of memory while filtering tests"); - buffer.append(label) catch unreachable; - const str = bun.String.fromBytes(buffer.slice()); - is_skip = !regex.matches(str); - if (is_skip) { - tag_to_use = .skip; + if (Jest.runner) |runner| { + if (runner.filter_regex) |regex| { + var buffer: bun.MutableString = runner.filter_buffer; + buffer.reset(); + appendParentLabel(&buffer, parent) catch @panic("Bun ran out of memory while filtering tests"); + buffer.append(label) catch unreachable; + const str = bun.String.fromBytes(buffer.slice()); + is_skip = !regex.matches(str); + if (is_skip) { + tag_to_use = .skipped_because_label; + if (comptime is_test) { + // These won't get counted for describe scopes, which means the process will not exit with 1. + runner.summary.skipped_because_label += 1; + } + } } } } @@ -1905,7 +1936,7 @@ inline fn createIfScope( .pass => .{ Scope.skip, Scope.call }, .fail => @compileError("unreachable"), .only => @compileError("unreachable"), - .skip => .{ Scope.call, Scope.skip }, + .skipped_because_label, .skip => .{ Scope.call, Scope.skip }, .todo => .{ Scope.call, Scope.todo }, }; @@ -2130,6 +2161,11 @@ fn eachBind(globalThis: *JSGlobalObject, callframe: *CallFrame) bun.JSError!JSVa buffer.append(formattedLabel) catch unreachable; const str = bun.String.fromBytes(buffer.slice()); is_skip = !regex.matches(str); + if (is_skip) { + if (each_data.is_test) { + Jest.runner.?.summary.skipped_because_label += 1; + } + } } if (is_skip) { diff --git a/src/cli.zig b/src/cli.zig index f95921e6e8..d0a6d357c5 100644 --- a/src/cli.zig +++ b/src/cli.zig @@ -357,6 +357,7 @@ pub const Command = struct { only: bool = false, bail: u32 = 0, coverage: TestCommand.CodeCoverageOptions = .{}, + test_filter_pattern: ?[]const u8 = null, test_filter_regex: ?*RegularExpression = null, file_reporter: ?TestCommand.FileReporter = null, diff --git a/src/cli/Arguments.zig b/src/cli/Arguments.zig index e6e6659a9f..55eb512c7f 100644 --- a/src/cli/Arguments.zig +++ b/src/cli/Arguments.zig @@ -463,6 +463,7 @@ pub fn parse(allocator: std.mem.Allocator, ctx: Command.Context, comptime cmd: C } } if (args.option("--test-name-pattern")) |namePattern| { + ctx.test_options.test_filter_pattern = namePattern; const regex = RegularExpression.init(bun.String.fromBytes(namePattern), RegularExpression.Flags.none) catch { Output.prettyErrorln( "error: --test-name-pattern expects a valid regular expression but received {}", diff --git a/src/cli/test_command.zig b/src/cli/test_command.zig index cacdd6735e..c956f96119 100644 --- a/src/cli/test_command.zig +++ b/src/cli/test_command.zig @@ -398,7 +398,7 @@ pub const JunitReporter = struct { \\ , .{}); }, - .skip => { + .skipped_because_label, .skip => { this.testcases_metrics.skipped += 1; try this.contents.appendSlice(bun.default_allocator, ">\n \n \n"); }, @@ -462,7 +462,6 @@ pub const CommandLineReporter = struct { jest: TestRunner, callback: TestRunner.Callback, last_dot: u32 = 0, - summary: Summary = Summary{}, prev_file: u64 = 0, repeat_count: u32 = 1, @@ -476,15 +475,6 @@ pub const CommandLineReporter = struct { junit: *JunitReporter, }; - pub const Summary = struct { - pass: u32 = 0, - expectations: u32 = 0, - skip: u32 = 0, - todo: u32 = 0, - fail: u32 = 0, - files: u32 = 0, - }; - const DotColorMap = std.EnumMap(TestRunner.Test.Status, string); const dots: DotColorMap = brk: { var map: DotColorMap = DotColorMap.init(.{}); @@ -607,6 +597,10 @@ pub const CommandLineReporter = struct { } } + pub inline fn summary(this: *CommandLineReporter) *TestRunner.Summary { + return &this.jest.summary; + } + pub fn handleTestPass(cb: *TestRunner.Callback, id: Test.ID, file: string, label: string, expectations: u32, elapsed_ns: u64, parent: ?*jest.DescribeScope) void { const writer_ = Output.errorWriter(); var buffered_writer = std.io.bufferedWriter(writer_); @@ -620,8 +614,8 @@ pub const CommandLineReporter = struct { printTestLine(.pass, label, elapsed_ns, parent, expectations, false, writer, file, this.file_reporter); this.jest.tests.items(.status)[id] = TestRunner.Test.Status.pass; - this.summary.pass += 1; - this.summary.expectations += expectations; + this.summary().pass += 1; + this.summary().expectations += expectations; } pub fn handleTestFail(cb: *TestRunner.Callback, id: Test.ID, file: string, label: string, expectations: u32, elapsed_ns: u64, parent: ?*jest.DescribeScope) void { @@ -646,11 +640,11 @@ pub const CommandLineReporter = struct { Output.flush(); // this.updateDots(); - this.summary.fail += 1; - this.summary.expectations += expectations; + this.summary().fail += 1; + this.summary().expectations += expectations; this.jest.tests.items(.status)[id] = TestRunner.Test.Status.fail; - if (this.jest.bail == this.summary.fail) { + if (this.jest.bail == this.summary().fail) { this.printSummary(); Output.prettyError("\nBailed out after {d} failure{s}\n", .{ this.jest.bail, if (this.jest.bail == 1) "" else "s" }); Global.exit(1); @@ -676,8 +670,18 @@ pub const CommandLineReporter = struct { } // this.updateDots(); - this.summary.skip += 1; - this.summary.expectations += expectations; + this.summary().skip += 1; + this.summary().expectations += expectations; + this.jest.tests.items(.status)[id] = TestRunner.Test.Status.skip; + } + + pub fn handleTestFilteredOut(cb: *TestRunner.Callback, id: Test.ID, _: string, _: string, expectations: u32, _: u64, _: ?*jest.DescribeScope) void { + var this: *CommandLineReporter = @fieldParentPtr("callback", cb); + + // this.updateDots(); + this.summary().skipped_because_label += 1; + this.summary().skip += 1; + this.summary().expectations += expectations; this.jest.tests.items(.status)[id] = TestRunner.Test.Status.skip; } @@ -698,16 +702,23 @@ pub const CommandLineReporter = struct { Output.flush(); // this.updateDots(); - this.summary.todo += 1; - this.summary.expectations += expectations; + this.summary().todo += 1; + this.summary().expectations += expectations; this.jest.tests.items(.status)[id] = TestRunner.Test.Status.todo; } pub fn printSummary(this: *CommandLineReporter) void { - const tests = this.summary.fail + this.summary.pass + this.summary.skip + this.summary.todo; - const files = this.summary.files; + const summary_ = this.summary(); + const tests = summary_.fail + summary_.pass + summary_.skip + summary_.todo; + const files = summary_.files; + + Output.prettyError("Ran {d} test{s} across {d} file{s}. ", .{ + tests, + if (tests == 1) "" else "s", + files, + if (files == 1) "" else "s", + }); - Output.prettyError("Ran {d} tests across {d} files. ", .{ tests, files }); Output.printStartEnd(bun.start_time, std.time.nanoTimestamp()); } @@ -1063,6 +1074,7 @@ pub const TestCommand = struct { .onTestFail = CommandLineReporter.handleTestFail, .onTestSkip = CommandLineReporter.handleTestSkip, .onTestTodo = CommandLineReporter.handleTestTodo, + .onTestFilteredOut = CommandLineReporter.handleTestFilteredOut, }; reporter.repeat_count = @max(ctx.test_options.repeat_count, 1); reporter.jest.callback = &reporter.callback; @@ -1225,33 +1237,33 @@ pub const TestCommand = struct { const write_snapshots_success = try jest.Jest.runner.?.snapshots.writeInlineSnapshots(); try jest.Jest.runner.?.snapshots.writeSnapshotFile(); var coverage_options = ctx.test_options.coverage; - if (reporter.summary.pass > 20) { - if (reporter.summary.skip > 0) { - Output.prettyError("\n{d} tests skipped:\n", .{reporter.summary.skip}); + if (reporter.summary().pass > 20) { + if (reporter.summary().skip > 0) { + Output.prettyError("\n{d} tests skipped:\n", .{reporter.summary().skip}); Output.flush(); var error_writer = Output.errorWriter(); error_writer.writeAll(reporter.skips_to_repeat_buf.items) catch unreachable; } - if (reporter.summary.todo > 0) { - if (reporter.summary.skip > 0) { + if (reporter.summary().todo > 0) { + if (reporter.summary().skip > 0) { Output.prettyError("\n", .{}); } - Output.prettyError("\n{d} tests todo:\n", .{reporter.summary.todo}); + Output.prettyError("\n{d} tests todo:\n", .{reporter.summary().todo}); Output.flush(); var error_writer = Output.errorWriter(); error_writer.writeAll(reporter.todos_to_repeat_buf.items) catch unreachable; } - if (reporter.summary.fail > 0) { - if (reporter.summary.skip > 0 or reporter.summary.todo > 0) { + if (reporter.summary().fail > 0) { + if (reporter.summary().skip > 0 or reporter.summary().todo > 0) { Output.prettyError("\n", .{}); } - Output.prettyError("\n{d} tests failed:\n", .{reporter.summary.fail}); + Output.prettyError("\n{d} tests failed:\n", .{reporter.summary().fail}); Output.flush(); var error_writer = Output.errorWriter(); @@ -1261,7 +1273,11 @@ pub const TestCommand = struct { Output.flush(); + var failed_to_find_any_tests = false; + if (test_files.len == 0) { + failed_to_find_any_tests = true; + if (ctx.positionals.len == 0) { Output.prettyErrorln( \\No tests found! @@ -1321,76 +1337,92 @@ pub const TestCommand = struct { } } - if (reporter.summary.pass > 0) { - Output.prettyError("", .{}); - } + const summary = reporter.summary(); + const did_label_filter_out_all_tests = summary.didLabelFilterOutAllTests() and reporter.jest.unhandled_errors_between_tests == 0; - Output.prettyError(" {d:5>} pass\n", .{reporter.summary.pass}); - - if (reporter.summary.skip > 0) { - Output.prettyError(" {d:5>} skip\n", .{reporter.summary.skip}); - } - - if (reporter.summary.todo > 0) { - Output.prettyError(" {d:5>} todo\n", .{reporter.summary.todo}); - } - - if (reporter.summary.fail > 0) { - Output.prettyError("", .{}); - } else { - Output.prettyError("", .{}); - } - - Output.prettyError(" {d:5>} fail\n", .{reporter.summary.fail}); - if (reporter.jest.unhandled_errors_between_tests > 0) { - Output.prettyError(" {d:5>} error{s}\n", .{ reporter.jest.unhandled_errors_between_tests, if (reporter.jest.unhandled_errors_between_tests > 1) "s" else "" }); - } - - var print_expect_calls = reporter.summary.expectations > 0; - if (reporter.jest.snapshots.total > 0) { - const passed = reporter.jest.snapshots.passed; - const failed = reporter.jest.snapshots.failed; - const added = reporter.jest.snapshots.added; - - var first = true; - if (print_expect_calls and added == 0 and failed == 0) { - print_expect_calls = false; - Output.prettyError(" {d:5>} snapshots, {d:5>} expect() calls", .{ reporter.jest.snapshots.total, reporter.summary.expectations }); - } else { - Output.prettyError(" snapshots: ", .{}); - - if (passed > 0) { - Output.prettyError("{d} passed", .{passed}); - first = false; - } - - if (added > 0) { - if (first) { - first = false; - Output.prettyError("+{d} added", .{added}); - } else { - Output.prettyError(", {d} added", .{added}); - } - } - - if (failed > 0) { - if (first) { - first = false; - Output.prettyError("{d} failed", .{failed}); - } else { - Output.prettyError(", {d} failed", .{failed}); - } - } + if (!did_label_filter_out_all_tests) { + if (summary.pass > 0) { + Output.prettyError("", .{}); } - Output.prettyError("\n", .{}); - } + Output.prettyError(" {d:5>} pass\n", .{summary.pass}); - if (print_expect_calls) { - Output.prettyError(" {d:5>} expect() calls\n", .{reporter.summary.expectations}); - } + if (summary.skip > 0) { + Output.prettyError(" {d:5>} skip\n", .{summary.skip}); + } else if (summary.skipped_because_label > 0) { + Output.prettyError(" {d:5>} filtered out\n", .{summary.skipped_because_label}); + } - reporter.printSummary(); + if (summary.todo > 0) { + Output.prettyError(" {d:5>} todo\n", .{summary.todo}); + } + + if (summary.fail > 0) { + Output.prettyError("", .{}); + } else { + Output.prettyError("", .{}); + } + + Output.prettyError(" {d:5>} fail\n", .{summary.fail}); + if (reporter.jest.unhandled_errors_between_tests > 0) { + Output.prettyError(" {d:5>} error{s}\n", .{ reporter.jest.unhandled_errors_between_tests, if (reporter.jest.unhandled_errors_between_tests > 1) "s" else "" }); + } + + var print_expect_calls = reporter.summary().expectations > 0; + if (reporter.jest.snapshots.total > 0) { + const passed = reporter.jest.snapshots.passed; + const failed = reporter.jest.snapshots.failed; + const added = reporter.jest.snapshots.added; + + var first = true; + if (print_expect_calls and added == 0 and failed == 0) { + print_expect_calls = false; + Output.prettyError(" {d:5>} snapshots, {d:5>} expect() calls", .{ reporter.jest.snapshots.total, reporter.summary().expectations }); + } else { + Output.prettyError(" snapshots: ", .{}); + + if (passed > 0) { + Output.prettyError("{d} passed", .{passed}); + first = false; + } + + if (added > 0) { + if (first) { + first = false; + Output.prettyError("+{d} added", .{added}); + } else { + Output.prettyError(", {d} added", .{added}); + } + } + + if (failed > 0) { + if (first) { + first = false; + Output.prettyError("{d} failed", .{failed}); + } else { + Output.prettyError(", {d} failed", .{failed}); + } + } + } + + Output.prettyError("\n", .{}); + } + + if (print_expect_calls) { + Output.prettyError(" {d:5>} expect() calls\n", .{reporter.summary().expectations}); + } + + reporter.printSummary(); + } else { + Output.prettyError("error: regex {} matched 0 tests. Searched {d} file{s} (skipping {d} test{s}) ", .{ + bun.fmt.quote(ctx.test_options.test_filter_pattern.?), + summary.files, + if (summary.files == 1) "" else "s", + summary.skipped_because_label, + if (summary.skipped_because_label == 1) "" else "s", + }); + Output.printStartEnd(ctx.start_time, std.time.nanoTimestamp()); + } } Output.prettyError("\n", .{}); @@ -1410,8 +1442,9 @@ pub const TestCommand = struct { if (vm.hot_reload == .watch) { vm.runWithAPILock(JSC.VirtualMachine, vm, runEventLoopForWatch); } + const summary = reporter.summary(); - if (reporter.summary.fail > 0 or (coverage_options.enabled and coverage_options.fractions.failing and coverage_options.fail_on_low_coverage) or !write_snapshots_success) { + if (failed_to_find_any_tests or summary.didLabelFilterOutAllTests() or summary.fail > 0 or (coverage_options.enabled and coverage_options.fractions.failing and coverage_options.fail_on_low_coverage) or !write_snapshots_success) { Global.exit(1); } else if (reporter.jest.unhandled_errors_between_tests > 0) { Global.exit(reporter.jest.unhandled_errors_between_tests); @@ -1523,14 +1556,14 @@ pub const TestCommand = struct { Output.flush(); var promise = try vm.loadEntryPointForTestRunner(file_path); - reporter.summary.files += 1; + reporter.summary().files += 1; switch (promise.status(vm.global.vm())) { .rejected => { vm.unhandledRejection(vm.global, promise.result(vm.global.vm()), promise.asValue()); - reporter.summary.fail += 1; + reporter.summary().fail += 1; - if (reporter.jest.bail == reporter.summary.fail) { + if (reporter.jest.bail == reporter.summary().fail) { reporter.printSummary(); Output.prettyError("\nBailed out after {d} failure{s}\n", .{ reporter.jest.bail, if (reporter.jest.bail == 1) "" else "s" }); diff --git a/test/cli/test/bun-test.test.ts b/test/cli/test/bun-test.test.ts index 18f47e3388..b32c0fdfab 100644 --- a/test/cli/test/bun-test.test.ts +++ b/test/cli/test/bun-test.test.ts @@ -883,6 +883,61 @@ describe("bun test", () => { test.todo("check formatting for %p", () => {}); }); + test("Prints error when no test matches", () => { + const stderr = runTest({ + args: ["-t", "not-a-test"], + input: ` + import { test, expect } from "bun:test"; + test("test", () => {}); + `, + expectExitCode: 1, + }); + expect( + stderr + .replace(/bun-test-(.*)\.test\.ts/, "bun-test-*.test.ts") + .trim() + .replace(/\[.*\ms\]/, "[xx ms]"), + ).toMatchInlineSnapshot(` + "bun-test-*.test.ts: + + error: regex "not-a-test" matched 0 tests. Searched 1 file (skipping 1 test) [xx ms]" + `); + }); + + test("Does not print the regex error when a test fails", () => { + const stderr = runTest({ + args: ["-t", "not-a-test"], + input: ` + import { test, expect } from "bun:test"; + test("not-a-test", () => { + expect(false).toBe(true); + }); + `, + expectExitCode: 1, + }); + expect(stderr).not.toContain("error: regex"); + expect(stderr).toContain("1 fail"); + }); + + test("Does not print the regex error when a test matches and a test passes", () => { + const stderr = runTest({ + args: ["-t", "not-a-test"], + input: ` + import { test, expect } from "bun:test"; + test("not-a-test", () => { + expect(false).toBe(true); + }); + test("not-a-test", () => { + expect(true).toBe(true); + }); + `, + expectExitCode: 1, + }); + expect(stderr).not.toContain("error: regex"); + expect(stderr).toContain("1 fail"); + expect(stderr).toContain("1 pass"); + }); + test("path to a non-test.ts file will work", () => { const stderr = runTest({ args: ["./index.ts"], @@ -944,21 +999,26 @@ function runTest({ cwd, args = [], env = {}, + expectExitCode = undefined, }: { input?: string | (string | { filename: string; contents: string })[]; cwd?: string; args?: string[]; env?: Record; + expectExitCode?: number; } = {}): string { cwd ??= createTest(input); try { - const { stderr } = spawnSync({ + const { stderr, exitCode } = spawnSync({ cwd, cmd: [bunExe(), "test", ...args], env: { ...bunEnv, ...env }, stderr: "pipe", stdout: "ignore", }); + if (expectExitCode !== undefined) { + expect(exitCode).toBe(expectExitCode); + } return stderr.toString(); } finally { rmSync(cwd, { recursive: true }); diff --git a/test/js/bun/test/test-error-code-done-callback.test.ts b/test/js/bun/test/test-error-code-done-callback.test.ts index bfdc62d945..0d4e9eab0e 100644 --- a/test/js/bun/test/test-error-code-done-callback.test.ts +++ b/test/js/bun/test/test-error-code-done-callback.test.ts @@ -134,7 +134,7 @@ test("verify we print error messages passed to done callbacks", () => { 0 pass 9 fail - Ran 9 tests across 1 files. + Ran 9 tests across 1 file. " `); }); diff --git a/test/js/bun/test/test-only.test.ts b/test/js/bun/test/test-only.test.ts index 5a45cc484a..cae48a490b 100644 --- a/test/js/bun/test/test-only.test.ts +++ b/test/js/bun/test/test-only.test.ts @@ -9,7 +9,7 @@ test.each(["./only-fixture-1.ts", "./only-fixture-2.ts", "./only-fixture-3.ts"]) expect(result.stderr.toString()).toContain(" 1 pass\n"); expect(result.stderr.toString()).toContain(" 0 fail\n"); - expect(result.stderr.toString()).toContain("Ran 1 tests across 1 files"); + expect(result.stderr.toString()).toContain("Ran 1 test across 1 file"); }, ); diff --git a/test/js/bun/test/test-test.test.ts b/test/js/bun/test/test-test.test.ts index a24e036ef5..76668873d2 100644 --- a/test/js/bun/test/test-test.test.ts +++ b/test/js/bun/test/test-test.test.ts @@ -297,11 +297,27 @@ it("should return non-zero exit code for invalid syntax", async () => { stderr: "pipe", env: bunEnv, }); - const err = await new Response(stderr).text(); - expect(err).toContain("error: Unexpected end of file"); - expect(err).toContain(" 0 pass"); - expect(err).toContain(" 1 fail"); - expect(err).toContain("Ran 1 tests across 1 files"); + const err = (await new Response(stderr).text()).replaceAll("\\", "/"); + expect(err.replaceAll(test_dir.replaceAll("\\", "/"), "").replaceAll(/\[(.*)\ms\]/g, "[xx ms]")) + .toMatchInlineSnapshot(` + " + bad.test.js: + + # Unhandled error between tests + ------------------------------- + 1 | !!! + ^ + error: Unexpected end of file + at /bad.test.js:1:3 + ------------------------------- + + + 0 pass + 1 fail + 1 error + Ran 1 test across 1 file. [xx ms] + " + `); expect(stdout).toBeDefined(); expect(await new Response(stdout).text()).toBe(`bun test ${Bun.version_with_sha}\n`); expect(await exited).toBe(1); @@ -732,7 +748,7 @@ test("my-test", () => { expect(output).toContain("1 error"); } - expect(output).toContain("Ran 1 tests across 1 files"); + expect(output).toContain("Ran 1 test across 1 file"); }); } }); diff --git a/test/js/node/test/parallel/needs-test/README.md b/test/js/node/test/parallel/needs-test/README.md deleted file mode 100644 index 821ae16ee3..0000000000 --- a/test/js/node/test/parallel/needs-test/README.md +++ /dev/null @@ -1,8 +0,0 @@ -A good deal of parallel test cases can be run directly via `bun `. -However, some newer cases use `node:test`. - -Files in this directory need to be run with `bun test `. The -`node:test` module is shimmed via a require cache hack in -`test/js/node/harness.js` to use `bun:test`. Note that our test runner -(`scripts/runner.node.mjs`) checks for `needs-test` in the names of test files, -so don't rename this folder without updating that code.