mirror of
https://github.com/oven-sh/bun
synced 2026-02-18 23:01:58 +00:00
Compare commits
1 Commits
claude/fix
...
nektro-pat
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
95b5e7c55b |
99
.github/workflows/update-hdrhistogram.yml
vendored
99
.github/workflows/update-hdrhistogram.yml
vendored
@@ -1,99 +0,0 @@
|
||||
name: Update hdrhistogram
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 4 * * 0"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
check-update:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Check hdrhistogram version
|
||||
id: check-version
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
# Extract the commit hash from the line after COMMIT
|
||||
CURRENT_VERSION=$(awk '/[[:space:]]*COMMIT[[:space:]]*$/{getline; gsub(/^[[:space:]]+|[[:space:]]+$/,"",$0); print}' cmake/targets/BuildHdrHistogram.cmake)
|
||||
|
||||
if [ -z "$CURRENT_VERSION" ]; then
|
||||
echo "Error: Could not find COMMIT line in BuildHdrHistogram.cmake"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Validate that it looks like a git hash
|
||||
if ! [[ $CURRENT_VERSION =~ ^[0-9a-f]{40}$ ]]; then
|
||||
echo "Error: Invalid git hash format in BuildHdrHistogram.cmake"
|
||||
echo "Found: $CURRENT_VERSION"
|
||||
echo "Expected: 40 character hexadecimal string"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "current=$CURRENT_VERSION" >> $GITHUB_OUTPUT
|
||||
|
||||
LATEST_RELEASE=$(curl -sL https://api.github.com/repos/HdrHistogram/HdrHistogram_c/releases/latest)
|
||||
if [ -z "$LATEST_RELEASE" ]; then
|
||||
echo "Error: Failed to fetch latest release from GitHub API"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
LATEST_TAG=$(echo "$LATEST_RELEASE" | jq -r '.tag_name')
|
||||
if [ -z "$LATEST_TAG" ] || [ "$LATEST_TAG" = "null" ]; then
|
||||
echo "Error: Could not extract tag name from GitHub API response"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
LATEST_TAG_SHA=$(curl -sL "https://api.github.com/repos/HdrHistogram/HdrHistogram_c/git/refs/tags/$LATEST_TAG" | jq -r '.object.sha')
|
||||
if [ -z "$LATEST_TAG_SHA" ] || [ "$LATEST_TAG_SHA" = "null" ]; then
|
||||
echo "Error: Could not fetch SHA for tag $LATEST_TAG"
|
||||
exit 1
|
||||
fi
|
||||
LATEST_SHA=$(curl -sL "https://api.github.com/repos/HdrHistogram/HdrHistogram_c/git/tags/$LATEST_TAG_SHA" | jq -r '.object.sha')
|
||||
if [ -z "$LATEST_SHA" ] || [ "$LATEST_SHA" = "null" ]; then
|
||||
echo "Error: Could not fetch SHA for tag $LATEST_TAG @ $LATEST_TAG_SHA"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! [[ $LATEST_SHA =~ ^[0-9a-f]{40}$ ]]; then
|
||||
echo "Error: Invalid SHA format received from GitHub"
|
||||
echo "Found: $LATEST_SHA"
|
||||
echo "Expected: 40 character hexadecimal string"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "latest=$LATEST_SHA" >> $GITHUB_OUTPUT
|
||||
echo "tag=$LATEST_TAG" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Update version if needed
|
||||
if: success() && steps.check-version.outputs.current != steps.check-version.outputs.latest
|
||||
run: |
|
||||
set -euo pipefail
|
||||
# Handle multi-line format where COMMIT and its value are on separate lines
|
||||
sed -i -E '/[[:space:]]*COMMIT[[:space:]]*$/{n;s/[[:space:]]*([0-9a-f]+)[[:space:]]*$/ ${{ steps.check-version.outputs.latest }}/}' cmake/targets/BuildHdrHistogram.cmake
|
||||
|
||||
- name: Create Pull Request
|
||||
if: success() && steps.check-version.outputs.current != steps.check-version.outputs.latest
|
||||
uses: peter-evans/create-pull-request@v4
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
add-paths: |
|
||||
cmake/targets/BuildHdrHistogram.cmake
|
||||
commit-message: "deps: update hdrhistogram to ${{ steps.check-version.outputs.tag }} (${{ steps.check-version.outputs.latest }})"
|
||||
title: "deps: update hdrhistogram to ${{ steps.check-version.outputs.tag }}"
|
||||
delete-branch: true
|
||||
branch: deps/update-cares-${{ github.run_number }}
|
||||
body: |
|
||||
## What does this PR do?
|
||||
|
||||
Updates hdrhistogram to version ${{ steps.check-version.outputs.tag }}
|
||||
|
||||
Compare: https://github.com/HdrHistogram/HdrHistogram_c/compare/${{ steps.check-version.outputs.current }}...${{ steps.check-version.outputs.latest }}
|
||||
|
||||
Auto-updated by [this workflow](https://github.com/oven-sh/bun/actions/workflows/update-cares.yml)
|
||||
99
.github/workflows/update-highway.yml
vendored
99
.github/workflows/update-highway.yml
vendored
@@ -1,99 +0,0 @@
|
||||
name: Update highway
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 4 * * 0"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
check-update:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Check highway version
|
||||
id: check-version
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
# Extract the commit hash from the line after COMMIT
|
||||
CURRENT_VERSION=$(awk '/[[:space:]]*COMMIT[[:space:]]*$/{getline; gsub(/^[[:space:]]+|[[:space:]]+$/,"",$0); print}' cmake/targets/BuildHighway.cmake)
|
||||
|
||||
if [ -z "$CURRENT_VERSION" ]; then
|
||||
echo "Error: Could not find COMMIT line in BuildHighway.cmake"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Validate that it looks like a git hash
|
||||
if ! [[ $CURRENT_VERSION =~ ^[0-9a-f]{40}$ ]]; then
|
||||
echo "Error: Invalid git hash format in BuildHighway.cmake"
|
||||
echo "Found: $CURRENT_VERSION"
|
||||
echo "Expected: 40 character hexadecimal string"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "current=$CURRENT_VERSION" >> $GITHUB_OUTPUT
|
||||
|
||||
LATEST_RELEASE=$(curl -sL https://api.github.com/repos/google/highway/releases/latest)
|
||||
if [ -z "$LATEST_RELEASE" ]; then
|
||||
echo "Error: Failed to fetch latest release from GitHub API"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
LATEST_TAG=$(echo "$LATEST_RELEASE" | jq -r '.tag_name')
|
||||
if [ -z "$LATEST_TAG" ] || [ "$LATEST_TAG" = "null" ]; then
|
||||
echo "Error: Could not extract tag name from GitHub API response"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
LATEST_TAG_SHA=$(curl -sL "https://api.github.com/repos/google/highway/git/refs/tags/$LATEST_TAG" | jq -r '.object.sha')
|
||||
if [ -z "$LATEST_TAG_SHA" ] || [ "$LATEST_TAG_SHA" = "null" ]; then
|
||||
echo "Error: Could not fetch SHA for tag $LATEST_TAG"
|
||||
exit 1
|
||||
fi
|
||||
LATEST_SHA=$(curl -sL "https://api.github.com/repos/google/highway/git/tags/$LATEST_TAG_SHA" | jq -r '.object.sha')
|
||||
if [ -z "$LATEST_SHA" ] || [ "$LATEST_SHA" = "null" ]; then
|
||||
echo "Error: Could not fetch SHA for tag $LATEST_TAG @ $LATEST_TAG_SHA"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! [[ $LATEST_SHA =~ ^[0-9a-f]{40}$ ]]; then
|
||||
echo "Error: Invalid SHA format received from GitHub"
|
||||
echo "Found: $LATEST_SHA"
|
||||
echo "Expected: 40 character hexadecimal string"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "latest=$LATEST_SHA" >> $GITHUB_OUTPUT
|
||||
echo "tag=$LATEST_TAG" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Update version if needed
|
||||
if: success() && steps.check-version.outputs.current != steps.check-version.outputs.latest
|
||||
run: |
|
||||
set -euo pipefail
|
||||
# Handle multi-line format where COMMIT and its value are on separate lines
|
||||
sed -i -E '/[[:space:]]*COMMIT[[:space:]]*$/{n;s/[[:space:]]*([0-9a-f]+)[[:space:]]*$/ ${{ steps.check-version.outputs.latest }}/}' cmake/targets/BuildHighway.cmake
|
||||
|
||||
- name: Create Pull Request
|
||||
if: success() && steps.check-version.outputs.current != steps.check-version.outputs.latest
|
||||
uses: peter-evans/create-pull-request@v4
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
add-paths: |
|
||||
cmake/targets/BuildHighway.cmake
|
||||
commit-message: "deps: update highway to ${{ steps.check-version.outputs.tag }} (${{ steps.check-version.outputs.latest }})"
|
||||
title: "deps: update highway to ${{ steps.check-version.outputs.tag }}"
|
||||
delete-branch: true
|
||||
branch: deps/update-cares-${{ github.run_number }}
|
||||
body: |
|
||||
## What does this PR do?
|
||||
|
||||
Updates highway to version ${{ steps.check-version.outputs.tag }}
|
||||
|
||||
Compare: https://github.com/google/highway/compare/${{ steps.check-version.outputs.current }}...${{ steps.check-version.outputs.latest }}
|
||||
|
||||
Auto-updated by [this workflow](https://github.com/oven-sh/bun/actions/workflows/update-cares.yml)
|
||||
@@ -139,10 +139,10 @@ endif()
|
||||
optionx(REVISION STRING "The git revision of the build" DEFAULT ${DEFAULT_REVISION})
|
||||
|
||||
# Used in process.version, process.versions.node, napi, and elsewhere
|
||||
setx(NODEJS_VERSION "24.3.0")
|
||||
optionx(NODEJS_VERSION STRING "The version of Node.js to report" DEFAULT "24.3.0")
|
||||
|
||||
# Used in process.versions.modules and compared while loading V8 modules
|
||||
setx(NODEJS_ABI_VERSION "137")
|
||||
optionx(NODEJS_ABI_VERSION STRING "The ABI version of Node.js to report" DEFAULT "137")
|
||||
|
||||
if(APPLE)
|
||||
set(DEFAULT_STATIC_SQLITE OFF)
|
||||
|
||||
@@ -248,34 +248,4 @@ $ bun test foo
|
||||
|
||||
Any test file in the directory with an _absolute path_ that contains one of the targets will run. Glob patterns are not yet supported. -->
|
||||
|
||||
## AI Agent Integration
|
||||
|
||||
When using Bun's test runner with AI coding assistants, you can enable quieter output to improve readability and reduce context noise. This feature minimizes test output verbosity while preserving essential failure information.
|
||||
|
||||
### Environment Variables
|
||||
|
||||
Set any of the following environment variables to enable AI-friendly output:
|
||||
|
||||
- `CLAUDECODE=1` - For Claude Code
|
||||
- `REPL_ID=1` - For Replit
|
||||
- `IS_CODE_AGENT=1` - Generic AI agent flag
|
||||
|
||||
### Behavior
|
||||
|
||||
When an AI agent environment is detected:
|
||||
|
||||
- Only test failures are displayed in detail
|
||||
- Passing, skipped, and todo test indicators are hidden
|
||||
- Summary statistics remain intact
|
||||
- JUnit XML reporting is preserved
|
||||
|
||||
```bash
|
||||
# Example: Enable quiet output for Claude Code
|
||||
$ CLAUDECODE=1 bun test
|
||||
|
||||
# Still shows failures and summary, but hides verbose passing test output
|
||||
```
|
||||
|
||||
This feature is particularly useful in AI-assisted development workflows where reduced output verbosity improves context efficiency while maintaining visibility into test failures.
|
||||
|
||||
{% bunCLIUsage command="test" /%}
|
||||
|
||||
@@ -95,9 +95,6 @@ You can use the following configurations to debug JavaScript and TypeScript file
|
||||
// The URL of the WebSocket inspector to attach to.
|
||||
// This value can be retrieved by using `bun --inspect`.
|
||||
"url": "ws://localhost:6499/",
|
||||
// Optional path mapping for remote debugging
|
||||
"localRoot": "${workspaceFolder}",
|
||||
"remoteRoot": "/app",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
@@ -279,14 +279,6 @@
|
||||
"type": "boolean",
|
||||
"description": "If the debugger should stop on the first line of the program.",
|
||||
"default": false
|
||||
},
|
||||
"localRoot": {
|
||||
"type": "string",
|
||||
"description": "The local path that maps to \"remoteRoot\" when attaching to a remote Bun process."
|
||||
},
|
||||
"remoteRoot": {
|
||||
"type": "string",
|
||||
"description": "The remote path to the code when attaching. File paths reported by Bun that start with this path will be mapped back to 'localRoot'."
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import { DebugSession, OutputEvent } from "@vscode/debugadapter";
|
||||
import { tmpdir } from "node:os";
|
||||
import * as path from "node:path";
|
||||
import { join } from "node:path";
|
||||
import * as vscode from "vscode";
|
||||
import {
|
||||
@@ -221,7 +220,7 @@ class InlineDebugAdapterFactory implements vscode.DebugAdapterDescriptorFactory
|
||||
session: vscode.DebugSession,
|
||||
): Promise<vscode.ProviderResult<vscode.DebugAdapterDescriptor>> {
|
||||
const { configuration } = session;
|
||||
const { request, url, __untitledName, localRoot, remoteRoot } = configuration;
|
||||
const { request, url, __untitledName } = configuration;
|
||||
|
||||
if (request === "attach") {
|
||||
for (const [adapterUrl, adapter] of adapters) {
|
||||
@@ -231,10 +230,7 @@ class InlineDebugAdapterFactory implements vscode.DebugAdapterDescriptorFactory
|
||||
}
|
||||
}
|
||||
|
||||
const adapter = new FileDebugSession(session.id, __untitledName, {
|
||||
localRoot,
|
||||
remoteRoot,
|
||||
});
|
||||
const adapter = new FileDebugSession(session.id, __untitledName);
|
||||
await adapter.initialize();
|
||||
return new vscode.DebugAdapterInlineImplementation(adapter);
|
||||
}
|
||||
@@ -279,11 +275,6 @@ interface RuntimeExceptionThrownEvent {
|
||||
};
|
||||
}
|
||||
|
||||
interface PathMapping {
|
||||
localRoot?: string;
|
||||
remoteRoot?: string;
|
||||
}
|
||||
|
||||
class FileDebugSession extends DebugSession {
|
||||
// If these classes are moved/published, we should make sure
|
||||
// we remove these non-null assertions so consumers of
|
||||
@@ -292,60 +283,18 @@ class FileDebugSession extends DebugSession {
|
||||
sessionId?: string;
|
||||
untitledDocPath?: string;
|
||||
bunEvalPath?: string;
|
||||
localRoot?: string;
|
||||
remoteRoot?: string;
|
||||
#isWindowsRemote = false;
|
||||
|
||||
constructor(sessionId?: string, untitledDocPath?: string, mapping?: PathMapping) {
|
||||
constructor(sessionId?: string, untitledDocPath?: string) {
|
||||
super();
|
||||
this.sessionId = sessionId;
|
||||
this.untitledDocPath = untitledDocPath;
|
||||
|
||||
if (mapping) {
|
||||
this.localRoot = mapping.localRoot;
|
||||
this.remoteRoot = mapping.remoteRoot;
|
||||
if (typeof mapping.remoteRoot === "string") {
|
||||
this.#isWindowsRemote = mapping.remoteRoot.includes("\\");
|
||||
}
|
||||
}
|
||||
|
||||
if (untitledDocPath) {
|
||||
const cwd = vscode.workspace.workspaceFolders?.[0]?.uri?.fsPath ?? process.cwd();
|
||||
this.bunEvalPath = join(cwd, "[eval]");
|
||||
}
|
||||
}
|
||||
|
||||
mapRemoteToLocal(p: string | undefined): string | undefined {
|
||||
if (!p || !this.remoteRoot || !this.localRoot) return p;
|
||||
const remoteModule = this.#isWindowsRemote ? path.win32 : path.posix;
|
||||
let remoteRoot = remoteModule.normalize(this.remoteRoot);
|
||||
if (!remoteRoot.endsWith(remoteModule.sep)) remoteRoot += remoteModule.sep;
|
||||
let target = remoteModule.normalize(p);
|
||||
const starts = this.#isWindowsRemote
|
||||
? target.toLowerCase().startsWith(remoteRoot.toLowerCase())
|
||||
: target.startsWith(remoteRoot);
|
||||
if (starts) {
|
||||
const rel = target.slice(remoteRoot.length);
|
||||
const localRel = rel.split(remoteModule.sep).join(path.sep);
|
||||
return path.join(this.localRoot, localRel);
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
mapLocalToRemote(p: string | undefined): string | undefined {
|
||||
if (!p || !this.remoteRoot || !this.localRoot) return p;
|
||||
let localRoot = path.normalize(this.localRoot);
|
||||
if (!localRoot.endsWith(path.sep)) localRoot += path.sep;
|
||||
let localPath = path.normalize(p);
|
||||
if (localPath.startsWith(localRoot)) {
|
||||
const rel = localPath.slice(localRoot.length);
|
||||
const remoteModule = this.#isWindowsRemote ? path.win32 : path.posix;
|
||||
const remoteRel = rel.split(path.sep).join(remoteModule.sep);
|
||||
return remoteModule.join(this.remoteRoot, remoteRel);
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
async initialize() {
|
||||
const uniqueId = this.sessionId ?? Math.random().toString(36).slice(2);
|
||||
const url =
|
||||
@@ -358,20 +307,14 @@ class FileDebugSession extends DebugSession {
|
||||
|
||||
if (untitledDocPath) {
|
||||
this.adapter.on("Adapter.response", (response: DebugProtocolResponse) => {
|
||||
if (response.body?.source?.path) {
|
||||
if (response.body.source.path === bunEvalPath) {
|
||||
response.body.source.path = untitledDocPath;
|
||||
} else {
|
||||
response.body.source.path = this.mapRemoteToLocal(response.body.source.path);
|
||||
}
|
||||
if (response.body?.source?.path === bunEvalPath) {
|
||||
response.body.source.path = untitledDocPath;
|
||||
}
|
||||
if (Array.isArray(response.body?.breakpoints)) {
|
||||
for (const bp of response.body.breakpoints) {
|
||||
if (bp.source?.path === bunEvalPath) {
|
||||
bp.source.path = untitledDocPath;
|
||||
bp.verified = true;
|
||||
} else if (bp.source?.path) {
|
||||
bp.source.path = this.mapRemoteToLocal(bp.source.path);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -379,35 +322,14 @@ class FileDebugSession extends DebugSession {
|
||||
});
|
||||
|
||||
this.adapter.on("Adapter.event", (event: DebugProtocolEvent) => {
|
||||
if (event.body?.source?.path) {
|
||||
if (event.body.source.path === bunEvalPath) {
|
||||
event.body.source.path = untitledDocPath;
|
||||
} else {
|
||||
event.body.source.path = this.mapRemoteToLocal(event.body.source.path);
|
||||
}
|
||||
if (event.body?.source?.path === bunEvalPath) {
|
||||
event.body.source.path = untitledDocPath;
|
||||
}
|
||||
this.sendEvent(event);
|
||||
});
|
||||
} else {
|
||||
this.adapter.on("Adapter.response", (response: DebugProtocolResponse) => {
|
||||
if (response.body?.source?.path) {
|
||||
response.body.source.path = this.mapRemoteToLocal(response.body.source.path);
|
||||
}
|
||||
if (Array.isArray(response.body?.breakpoints)) {
|
||||
for (const bp of response.body.breakpoints) {
|
||||
if (bp.source?.path) {
|
||||
bp.source.path = this.mapRemoteToLocal(bp.source.path);
|
||||
}
|
||||
}
|
||||
}
|
||||
this.sendResponse(response);
|
||||
});
|
||||
this.adapter.on("Adapter.event", (event: DebugProtocolEvent) => {
|
||||
if (event.body?.source?.path) {
|
||||
event.body.source.path = this.mapRemoteToLocal(event.body.source.path);
|
||||
}
|
||||
this.sendEvent(event);
|
||||
});
|
||||
this.adapter.on("Adapter.response", response => this.sendResponse(response));
|
||||
this.adapter.on("Adapter.event", event => this.sendEvent(event));
|
||||
}
|
||||
|
||||
this.adapter.on("Adapter.reverseRequest", ({ command, arguments: args }) =>
|
||||
@@ -423,15 +345,11 @@ class FileDebugSession extends DebugSession {
|
||||
if (type === "request") {
|
||||
const { untitledDocPath, bunEvalPath } = this;
|
||||
const { command } = message;
|
||||
if (command === "setBreakpoints" || command === "breakpointLocations") {
|
||||
if (untitledDocPath && (command === "setBreakpoints" || command === "breakpointLocations")) {
|
||||
const args = message.arguments as any;
|
||||
if (untitledDocPath && args.source?.path === untitledDocPath) {
|
||||
if (args.source?.path === untitledDocPath) {
|
||||
args.source.path = bunEvalPath;
|
||||
} else if (args.source?.path) {
|
||||
args.source.path = this.mapLocalToRemote(args.source.path);
|
||||
}
|
||||
} else if (command === "source" && message.arguments?.source?.path) {
|
||||
message.arguments.source.path = this.mapLocalToRemote(message.arguments.source.path);
|
||||
}
|
||||
|
||||
this.adapter.emit("Adapter.request", message);
|
||||
@@ -449,7 +367,7 @@ class TerminalDebugSession extends FileDebugSession {
|
||||
signal!: TCPSocketSignal | UnixSignal;
|
||||
|
||||
constructor() {
|
||||
super(undefined, undefined);
|
||||
super();
|
||||
}
|
||||
|
||||
async initialize() {
|
||||
|
||||
@@ -39,61 +39,8 @@ pub const Tag = enum(u3) {
|
||||
skipped_because_label,
|
||||
};
|
||||
const debug = Output.scoped(.jest, false);
|
||||
|
||||
var max_test_id_for_debugger: u32 = 0;
|
||||
|
||||
const CurrentFile = struct {
|
||||
title: string = "",
|
||||
prefix: string = "",
|
||||
repeat_info: struct {
|
||||
count: u32 = 0,
|
||||
index: u32 = 0,
|
||||
} = .{},
|
||||
has_printed_filename: bool = false,
|
||||
|
||||
pub fn set(this: *CurrentFile, title: string, prefix: string, repeat_count: u32, repeat_index: u32) void {
|
||||
if (Output.isAIAgent()) {
|
||||
this.freeAndClear();
|
||||
this.title = bun.default_allocator.dupe(u8, title) catch bun.outOfMemory();
|
||||
this.prefix = bun.default_allocator.dupe(u8, prefix) catch bun.outOfMemory();
|
||||
this.repeat_info.count = repeat_count;
|
||||
this.repeat_info.index = repeat_index;
|
||||
this.has_printed_filename = false;
|
||||
return;
|
||||
}
|
||||
|
||||
this.has_printed_filename = true;
|
||||
print(title, prefix, repeat_count, repeat_index);
|
||||
}
|
||||
|
||||
fn freeAndClear(this: *CurrentFile) void {
|
||||
bun.default_allocator.free(this.title);
|
||||
bun.default_allocator.free(this.prefix);
|
||||
}
|
||||
|
||||
fn print(title: string, prefix: string, repeat_count: u32, repeat_index: u32) void {
|
||||
if (repeat_count > 0) {
|
||||
if (repeat_count > 1) {
|
||||
Output.prettyErrorln("<r>\n{s}{s}: <d>(run #{d})<r>\n", .{ prefix, title, repeat_index + 1 });
|
||||
} else {
|
||||
Output.prettyErrorln("<r>\n{s}{s}:\n", .{ prefix, title });
|
||||
}
|
||||
} else {
|
||||
Output.prettyErrorln("<r>\n{s}{s}:\n", .{ prefix, title });
|
||||
}
|
||||
|
||||
Output.flush();
|
||||
}
|
||||
|
||||
pub fn printIfNeeded(this: *CurrentFile) void {
|
||||
if (this.has_printed_filename) return;
|
||||
this.has_printed_filename = true;
|
||||
print(this.title, this.prefix, this.repeat_info.count, this.repeat_info.index);
|
||||
}
|
||||
};
|
||||
|
||||
pub const TestRunner = struct {
|
||||
current_file: CurrentFile = CurrentFile{},
|
||||
tests: TestRunner.Test.List = .{},
|
||||
log: *logger.Log,
|
||||
files: File.List = .{},
|
||||
@@ -1380,10 +1327,6 @@ pub const TestRunnerTask = struct {
|
||||
deduped = true;
|
||||
} else {
|
||||
if (is_unhandled and Jest.runner != null) {
|
||||
if (Output.isAIAgent()) {
|
||||
Jest.runner.?.current_file.printIfNeeded();
|
||||
}
|
||||
|
||||
Output.prettyErrorln(
|
||||
\\<r>
|
||||
\\<b><d>#<r> <red><b>Unhandled error<r><d> between tests<r>
|
||||
@@ -1392,12 +1335,7 @@ pub const TestRunnerTask = struct {
|
||||
, .{});
|
||||
|
||||
Output.flush();
|
||||
} else if (!is_unhandled and Jest.runner != null) {
|
||||
if (Output.isAIAgent()) {
|
||||
Jest.runner.?.current_file.printIfNeeded();
|
||||
}
|
||||
}
|
||||
|
||||
jsc_vm.runErrorHandlerWithDedupe(rejection, jsc_vm.onUnhandledRejectionExceptionList);
|
||||
if (is_unhandled and Jest.runner != null) {
|
||||
Output.prettyError("<r><d>-------------------------------<r>\n\n", .{});
|
||||
|
||||
@@ -97,11 +97,6 @@ fn fmtStatusTextLine(comptime status: @Type(.enum_literal), comptime emoji_or_co
|
||||
}
|
||||
|
||||
fn writeTestStatusLine(comptime status: @Type(.enum_literal), writer: anytype) void {
|
||||
// When using AI agents, only print failures
|
||||
if (Output.isAIAgent() and status != .fail) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (Output.enable_ansi_colors_stderr)
|
||||
writer.print(fmtStatusTextLine(status, true), .{}) catch unreachable
|
||||
else
|
||||
@@ -658,54 +653,52 @@ pub const CommandLineReporter = struct {
|
||||
}
|
||||
|
||||
const scopes: []*jest.DescribeScope = scopes_stack.slice();
|
||||
|
||||
const display_label = if (label.len > 0) label else "test";
|
||||
|
||||
// Quieter output when claude code is in use.
|
||||
if (!Output.isAIAgent() or status == .fail) {
|
||||
const color_code = comptime if (skip) "<d>" else "";
|
||||
const color_code = comptime if (skip) "<d>" else "";
|
||||
|
||||
if (Output.enable_ansi_colors_stderr) {
|
||||
for (scopes, 0..) |_, i| {
|
||||
const index = (scopes.len - 1) - i;
|
||||
const scope = scopes[index];
|
||||
if (scope.label.len == 0) continue;
|
||||
writer.writeAll(" ") catch unreachable;
|
||||
if (Output.enable_ansi_colors_stderr) {
|
||||
for (scopes, 0..) |_, i| {
|
||||
const index = (scopes.len - 1) - i;
|
||||
const scope = scopes[index];
|
||||
if (scope.label.len == 0) continue;
|
||||
writer.writeAll(" ") catch unreachable;
|
||||
|
||||
writer.print(comptime Output.prettyFmt("<r>" ++ color_code, true), .{}) catch unreachable;
|
||||
writer.writeAll(scope.label) catch unreachable;
|
||||
writer.print(comptime Output.prettyFmt("<d>", true), .{}) catch unreachable;
|
||||
writer.writeAll(" >") catch unreachable;
|
||||
}
|
||||
} else {
|
||||
for (scopes, 0..) |_, i| {
|
||||
const index = (scopes.len - 1) - i;
|
||||
const scope = scopes[index];
|
||||
if (scope.label.len == 0) continue;
|
||||
writer.writeAll(" ") catch unreachable;
|
||||
writer.writeAll(scope.label) catch unreachable;
|
||||
writer.writeAll(" >") catch unreachable;
|
||||
}
|
||||
writer.print(comptime Output.prettyFmt("<r>" ++ color_code, true), .{}) catch unreachable;
|
||||
writer.writeAll(scope.label) catch unreachable;
|
||||
writer.print(comptime Output.prettyFmt("<d>", true), .{}) catch unreachable;
|
||||
writer.writeAll(" >") catch unreachable;
|
||||
}
|
||||
|
||||
const line_color_code = if (comptime skip) "<r><d>" else "<r><b>";
|
||||
|
||||
if (Output.enable_ansi_colors_stderr)
|
||||
writer.print(comptime Output.prettyFmt(line_color_code ++ " {s}<r>", true), .{display_label}) catch unreachable
|
||||
else
|
||||
writer.print(comptime Output.prettyFmt(" {s}", false), .{display_label}) catch unreachable;
|
||||
|
||||
if (elapsed_ns > (std.time.ns_per_us * 10)) {
|
||||
writer.print(" {any}", .{
|
||||
Output.ElapsedFormatter{
|
||||
.colors = Output.enable_ansi_colors_stderr,
|
||||
.duration_ns = elapsed_ns,
|
||||
},
|
||||
}) catch unreachable;
|
||||
} else {
|
||||
for (scopes, 0..) |_, i| {
|
||||
const index = (scopes.len - 1) - i;
|
||||
const scope = scopes[index];
|
||||
if (scope.label.len == 0) continue;
|
||||
writer.writeAll(" ") catch unreachable;
|
||||
writer.writeAll(scope.label) catch unreachable;
|
||||
writer.writeAll(" >") catch unreachable;
|
||||
}
|
||||
|
||||
writer.writeAll("\n") catch unreachable;
|
||||
}
|
||||
|
||||
const line_color_code = if (comptime skip) "<r><d>" else "<r><b>";
|
||||
|
||||
if (Output.enable_ansi_colors_stderr)
|
||||
writer.print(comptime Output.prettyFmt(line_color_code ++ " {s}<r>", true), .{display_label}) catch unreachable
|
||||
else
|
||||
writer.print(comptime Output.prettyFmt(" {s}", false), .{display_label}) catch unreachable;
|
||||
|
||||
if (elapsed_ns > (std.time.ns_per_us * 10)) {
|
||||
writer.print(" {any}", .{
|
||||
Output.ElapsedFormatter{
|
||||
.colors = Output.enable_ansi_colors_stderr,
|
||||
.duration_ns = elapsed_ns,
|
||||
},
|
||||
}) catch unreachable;
|
||||
}
|
||||
|
||||
writer.writeAll("\n") catch unreachable;
|
||||
|
||||
if (file_reporter) |reporter| {
|
||||
switch (reporter) {
|
||||
.junit => |junit| {
|
||||
@@ -851,8 +844,6 @@ pub const CommandLineReporter = struct {
|
||||
defer Output.flush();
|
||||
var this: *CommandLineReporter = @fieldParentPtr("callback", cb);
|
||||
|
||||
this.jest.current_file.printIfNeeded();
|
||||
|
||||
// when the tests fail, we want to repeat the failures at the end
|
||||
// so that you can see them better when there are lots of tests that ran
|
||||
const initial_length = this.failures_to_repeat_buf.items.len;
|
||||
@@ -1539,7 +1530,7 @@ pub const TestCommand = struct {
|
||||
const write_snapshots_success = try jest.Jest.runner.?.snapshots.writeInlineSnapshots();
|
||||
try jest.Jest.runner.?.snapshots.writeSnapshotFile();
|
||||
var coverage_options = ctx.test_options.coverage;
|
||||
if (reporter.summary().pass > 20 and !Output.isAIAgent()) {
|
||||
if (reporter.summary().pass > 20) {
|
||||
if (reporter.summary().skip > 0) {
|
||||
Output.prettyError("\n<r><d>{d} tests skipped:<r>\n", .{reporter.summary().skip});
|
||||
Output.flush();
|
||||
@@ -1580,24 +1571,16 @@ pub const TestCommand = struct {
|
||||
if (test_files.len == 0) {
|
||||
failed_to_find_any_tests = true;
|
||||
|
||||
// "bun test" - positionals[0] == "test"
|
||||
// Therefore positionals starts at [1].
|
||||
if (ctx.positionals.len < 2) {
|
||||
if (Output.isAIAgent()) {
|
||||
// Be very clear to ai.
|
||||
Output.errGeneric("0 test files matching **{{.test,.spec,_test_,_spec_}}.{{js,ts,jsx,tsx}} in --cwd={}", .{bun.fmt.quote(bun.fs.FileSystem.instance.top_level_dir)});
|
||||
} else {
|
||||
// Be friendlier to humans.
|
||||
Output.prettyErrorln(
|
||||
\\<yellow>No tests found!<r>
|
||||
\\
|
||||
\\Tests need ".test", "_test_", ".spec" or "_spec_" in the filename <d>(ex: "MyApp.test.ts")<r>
|
||||
\\
|
||||
, .{});
|
||||
}
|
||||
if (ctx.positionals.len == 0) {
|
||||
Output.prettyErrorln(
|
||||
\\<yellow>No tests found!<r>
|
||||
\\Tests need ".test", "_test_", ".spec" or "_spec_" in the filename <d>(ex: "MyApp.test.ts")<r>
|
||||
\\
|
||||
, .{});
|
||||
} else {
|
||||
Output.prettyErrorln("<yellow>The following filters did not match any test files:<r>", .{});
|
||||
var has_file_like: ?usize = null;
|
||||
Output.prettyError(" ", .{});
|
||||
for (ctx.positionals[1..], 1..) |filter, i| {
|
||||
Output.prettyError(" {s}", .{filter});
|
||||
|
||||
@@ -1628,12 +1611,10 @@ pub const TestCommand = struct {
|
||||
, .{ ctx.positionals[i], ctx.positionals[i] });
|
||||
}
|
||||
}
|
||||
if (!Output.isAIAgent()) {
|
||||
Output.prettyError(
|
||||
\\
|
||||
\\Learn more about bun test: <magenta>https://bun.com/docs/cli/test<r>
|
||||
, .{});
|
||||
}
|
||||
Output.prettyError(
|
||||
\\
|
||||
\\Learn more about the test runner: <magenta>https://bun.com/docs/cli/test<r>
|
||||
, .{});
|
||||
} else {
|
||||
Output.prettyError("\n", .{});
|
||||
|
||||
@@ -1860,7 +1841,12 @@ pub const TestCommand = struct {
|
||||
vm.onUnhandledRejection = jest.TestRunnerTask.onUnhandledRejection;
|
||||
|
||||
while (repeat_index < repeat_count) : (repeat_index += 1) {
|
||||
reporter.jest.current_file.set(file_title, file_prefix, repeat_count, repeat_index);
|
||||
if (repeat_count > 1) {
|
||||
Output.prettyErrorln("<r>\n{s}{s}: <d>(run #{d})<r>\n", .{ file_prefix, file_title, repeat_index + 1 });
|
||||
} else {
|
||||
Output.prettyErrorln("<r>\n{s}{s}:\n", .{ file_prefix, file_title });
|
||||
}
|
||||
Output.flush();
|
||||
|
||||
var promise = try vm.loadEntryPointForTestRunner(file_path);
|
||||
reporter.summary().files += 1;
|
||||
|
||||
@@ -37,11 +37,11 @@ pub fn PosixPipeWriter(
|
||||
return struct {
|
||||
fn tryWrite(this: *This, force_sync: bool, buf_: []const u8) WriteResult {
|
||||
return switch (if (!force_sync) getFileType(this) else .file) {
|
||||
inline else => |ft| return tryWriteWithWriteFn(this, buf_, force_sync, comptime writeToFileType(ft)),
|
||||
inline else => |ft| return tryWriteWithWriteFn(this, buf_, comptime writeToFileType(ft)),
|
||||
};
|
||||
}
|
||||
|
||||
fn tryWriteWithWriteFn(this: *This, buf: []const u8, force_sync: bool, comptime write_fn: *const fn (bun.FileDescriptor, []const u8) JSC.Maybe(usize)) WriteResult {
|
||||
fn tryWriteWithWriteFn(this: *This, buf: []const u8, comptime write_fn: *const fn (bun.FileDescriptor, []const u8) JSC.Maybe(usize)) WriteResult {
|
||||
const fd = getFd(this);
|
||||
|
||||
var offset: usize = 0;
|
||||
@@ -54,13 +54,6 @@ pub fn PosixPipeWriter(
|
||||
}
|
||||
|
||||
if (err.getErrno() == .PIPE) {
|
||||
// For process stdio (force_sync), emit EPIPE errors so they can be caught
|
||||
// For regular pipes, treat as completion to maintain compatibility
|
||||
if (force_sync) {
|
||||
log("EPIPE error in force_sync mode - propagating error", .{});
|
||||
return .{ .err = err };
|
||||
}
|
||||
log("EPIPE error in regular mode - converting to done", .{});
|
||||
return .{ .done = offset };
|
||||
}
|
||||
|
||||
@@ -658,7 +651,7 @@ pub fn PosixStreamingWriter(comptime Parent: type, comptime function_table: anyt
|
||||
onWrite(this.parent, amt, .end_of_file);
|
||||
return .{ .done = amt };
|
||||
},
|
||||
else => |r| return r,
|
||||
else => {},
|
||||
}
|
||||
|
||||
return rc;
|
||||
|
||||
@@ -457,62 +457,11 @@ pub inline fn isEmojiEnabled() bool {
|
||||
|
||||
pub fn isGithubAction() bool {
|
||||
if (bun.getenvZ("GITHUB_ACTIONS")) |value| {
|
||||
return strings.eqlComptime(value, "true") and
|
||||
// Do not print github annotations for AI agents because that wastes the context window.
|
||||
!isAIAgent();
|
||||
return strings.eqlComptime(value, "true");
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
pub fn isAIAgent() bool {
|
||||
const get_is_agent = struct {
|
||||
var value = false;
|
||||
fn evaluate() bool {
|
||||
if (bun.getenvZ("IS_CODE_AGENT")) |env| {
|
||||
return strings.eqlComptime(env, "1");
|
||||
}
|
||||
|
||||
if (isVerbose()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Claude Code.
|
||||
if (bun.getenvTruthy("CLAUDECODE")) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Replit.
|
||||
if (bun.getenvTruthy("REPL_ID")) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// TODO: add environment variable for Gemini
|
||||
// Gemini does not appear to add any environment variables to identify it.
|
||||
|
||||
// TODO: add environment variable for Codex
|
||||
// codex does not appear to add any environment variables to identify it.
|
||||
|
||||
// TODO: add environment variable for Cursor Background Agents
|
||||
// cursor does not appear to add any environment variables to identify it.
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
fn setValue() void {
|
||||
value = evaluate();
|
||||
}
|
||||
|
||||
var once = std.once(setValue);
|
||||
|
||||
pub fn isEnabled() bool {
|
||||
once.call();
|
||||
return value;
|
||||
}
|
||||
};
|
||||
|
||||
return get_is_agent.isEnabled();
|
||||
}
|
||||
|
||||
pub fn isVerbose() bool {
|
||||
// Set by Github Actions when a workflow is run using debug mode.
|
||||
if (bun.getenvZ("RUNNER_DEBUG")) |value| {
|
||||
|
||||
@@ -1,67 +0,0 @@
|
||||
// Bun Snapshot v1, https://bun.sh/docs/test/snapshots
|
||||
|
||||
exports[`CLAUDECODE=1 shows quiet test output (only failures) 1`] = `
|
||||
"test2.test.js:
|
||||
4 | test("passing test", () => {
|
||||
5 | expect(1).toBe(1);
|
||||
6 | });
|
||||
7 |
|
||||
8 | test("failing test", () => {
|
||||
9 | expect(1).toBe(2);
|
||||
^
|
||||
error: expect(received).toBe(expected)
|
||||
|
||||
Expected: 2
|
||||
Received: 1
|
||||
at <anonymous> (file:NN:NN)
|
||||
(fail) failing test
|
||||
|
||||
1 pass
|
||||
1 skip
|
||||
1 todo
|
||||
1 fail
|
||||
2 expect() calls
|
||||
Ran 4 tests across 1 file.
|
||||
bun test <version> (<revision>)"
|
||||
`;
|
||||
|
||||
exports[`CLAUDECODE=1 vs CLAUDECODE=0 comparison: normal 1`] = `
|
||||
"test3.test.js:
|
||||
(pass) passing test
|
||||
(pass) another passing test
|
||||
(skip) skipped test
|
||||
(todo) todo test
|
||||
|
||||
2 pass
|
||||
1 skip
|
||||
1 todo
|
||||
0 fail
|
||||
2 expect() calls
|
||||
Ran 4 tests across 1 file.
|
||||
bun test <version> (<revision>)"
|
||||
`;
|
||||
|
||||
exports[`CLAUDECODE=1 vs CLAUDECODE=0 comparison: quiet 1`] = `
|
||||
"2 pass
|
||||
1 skip
|
||||
1 todo
|
||||
0 fail
|
||||
2 expect() calls
|
||||
Ran 4 tests across 1 file.
|
||||
bun test <version> (<revision>)"
|
||||
`;
|
||||
|
||||
exports[`CLAUDECODE flag handles no test files found: no-tests-normal 1`] = `
|
||||
"No tests found!
|
||||
|
||||
Tests need ".test", "_test_", ".spec" or "_spec_" in the filename (ex: "MyApp.test.ts")
|
||||
|
||||
Learn more about bun test: https://bun.com/docs/cli/test
|
||||
bun test <version> (<revision>)"
|
||||
`;
|
||||
|
||||
exports[`CLAUDECODE flag handles no test files found: no-tests-quiet 1`] = `
|
||||
"error: 0 test files matching **{.test,.spec,_test_,_spec_}.{js,ts,jsx,tsx} in --cwd="<dir>"
|
||||
|
||||
bun test <version> (<revision>)"
|
||||
`;
|
||||
@@ -1,139 +0,0 @@
|
||||
import { spawnSync } from "bun";
|
||||
import { expect, test } from "bun:test";
|
||||
import { bunEnv, bunExe, normalizeBunSnapshot, tempDirWithFiles } from "harness";
|
||||
|
||||
test("CLAUDECODE=1 shows quiet test output (only failures)", async () => {
|
||||
const dir = tempDirWithFiles("claudecode-test-quiet", {
|
||||
"test2.test.js": `
|
||||
import { test, expect } from "bun:test";
|
||||
|
||||
test("passing test", () => {
|
||||
expect(1).toBe(1);
|
||||
});
|
||||
|
||||
test("failing test", () => {
|
||||
expect(1).toBe(2);
|
||||
});
|
||||
|
||||
test.skip("skipped test", () => {
|
||||
expect(1).toBe(1);
|
||||
});
|
||||
|
||||
test.todo("todo test");
|
||||
`,
|
||||
});
|
||||
|
||||
await using proc = Bun.spawn({
|
||||
cmd: [bunExe(), "test", "test2.test.js"],
|
||||
env: { ...bunEnv, CLAUDECODE: "1" },
|
||||
cwd: dir,
|
||||
stderr: "pipe",
|
||||
stdout: "pipe",
|
||||
});
|
||||
|
||||
const [stdout, stderr] = await Promise.all([proc.stdout.text(), proc.stderr.text()]);
|
||||
|
||||
const output = stderr + stdout;
|
||||
const normalized = normalizeBunSnapshot(output, dir);
|
||||
|
||||
expect(normalized).toMatchSnapshot();
|
||||
});
|
||||
|
||||
test("CLAUDECODE=1 vs CLAUDECODE=0 comparison", async () => {
|
||||
const dir = tempDirWithFiles("claudecode-test-compare", {
|
||||
"test3.test.js": `
|
||||
import { test, expect } from "bun:test";
|
||||
|
||||
test("passing test", () => {
|
||||
expect(1).toBe(1);
|
||||
});
|
||||
|
||||
test("another passing test", () => {
|
||||
expect(2).toBe(2);
|
||||
});
|
||||
|
||||
test.skip("skipped test", () => {
|
||||
expect(1).toBe(1);
|
||||
});
|
||||
|
||||
test.todo("todo test");
|
||||
`,
|
||||
});
|
||||
|
||||
// Run with CLAUDECODE=0 (normal output)
|
||||
const result1 = spawnSync({
|
||||
cmd: [bunExe(), "test", "test3.test.js"],
|
||||
env: { ...bunEnv, CLAUDECODE: "0" },
|
||||
cwd: dir,
|
||||
stderr: "pipe",
|
||||
stdout: "pipe",
|
||||
});
|
||||
|
||||
// Run with CLAUDECODE=1 (quiet output)
|
||||
const result2 = spawnSync({
|
||||
cmd: [bunExe(), "test", "test3.test.js"],
|
||||
env: { ...bunEnv, CLAUDECODE: "1" },
|
||||
cwd: dir,
|
||||
stderr: "pipe",
|
||||
stdout: "pipe",
|
||||
});
|
||||
|
||||
const normalOutput = result1.stderr.toString() + result1.stdout.toString();
|
||||
const quietOutput = result2.stderr.toString() + result2.stdout.toString();
|
||||
|
||||
// Normal output should contain pass/skip/todo indicators
|
||||
expect(normalOutput).toContain("(pass)"); // pass indicator
|
||||
expect(normalOutput).toContain("(skip)"); // skip indicator
|
||||
expect(normalOutput).toContain("(todo)"); // todo indicator
|
||||
|
||||
// Quiet output should NOT contain pass/skip/todo indicators (only failures)
|
||||
expect(quietOutput).not.toContain("(pass)"); // pass indicator
|
||||
expect(quietOutput).not.toContain("(skip)"); // skip indicator
|
||||
expect(quietOutput).not.toContain("(todo)"); // todo indicator
|
||||
|
||||
// Both should contain the summary at the end
|
||||
expect(normalOutput).toContain("2 pass");
|
||||
expect(normalOutput).toContain("1 skip");
|
||||
expect(normalOutput).toContain("1 todo");
|
||||
|
||||
expect(quietOutput).toContain("2 pass");
|
||||
expect(quietOutput).toContain("1 skip");
|
||||
expect(quietOutput).toContain("1 todo");
|
||||
|
||||
expect(normalizeBunSnapshot(normalOutput, dir)).toMatchSnapshot("normal");
|
||||
expect(normalizeBunSnapshot(quietOutput, dir)).toMatchSnapshot("quiet");
|
||||
});
|
||||
|
||||
test("CLAUDECODE flag handles no test files found", () => {
|
||||
const dir = tempDirWithFiles("empty-project", {
|
||||
"package.json": `{
|
||||
"name": "empty-project",
|
||||
"version": "1.0.0"
|
||||
}`,
|
||||
"src/index.js": `console.log("hello world");`,
|
||||
});
|
||||
|
||||
// Run with CLAUDECODE=0 (normal output) - no test files
|
||||
const result1 = spawnSync({
|
||||
cmd: [bunExe(), "test"],
|
||||
env: { ...bunEnv, CLAUDECODE: "0" },
|
||||
cwd: dir,
|
||||
stderr: "pipe",
|
||||
stdout: "pipe",
|
||||
});
|
||||
|
||||
// Run with CLAUDECODE=1 (quiet output) - no test files
|
||||
const result2 = spawnSync({
|
||||
cmd: [bunExe(), "test"],
|
||||
env: { ...bunEnv, CLAUDECODE: "1" },
|
||||
cwd: dir,
|
||||
stderr: "pipe",
|
||||
stdout: "pipe",
|
||||
});
|
||||
|
||||
const normalOutput = result1.stderr.toString() + result1.stdout.toString();
|
||||
const quietOutput = result2.stderr.toString() + result2.stdout.toString();
|
||||
|
||||
expect(normalizeBunSnapshot(normalOutput, dir)).toMatchSnapshot("no-tests-normal");
|
||||
expect(normalizeBunSnapshot(quietOutput, dir)).toMatchSnapshot("no-tests-quiet");
|
||||
});
|
||||
@@ -1,27 +0,0 @@
|
||||
'use strict';
|
||||
const common = require('../common');
|
||||
const assert = require('assert');
|
||||
const child_process = require('child_process');
|
||||
const fixtures = require('../common/fixtures');
|
||||
const { getSystemErrorName } = require('util');
|
||||
|
||||
const testScript = fixtures.path('catch-stdout-error.js');
|
||||
|
||||
const child = child_process.exec(
|
||||
...common.escapePOSIXShell`"${process.execPath}" "${testScript}" | "${process.execPath}" -pe "process.stdin.on('data' , () => process.exit(1))"`
|
||||
);
|
||||
let output = '';
|
||||
|
||||
child.stderr.on('data', function(c) {
|
||||
output += c;
|
||||
});
|
||||
|
||||
|
||||
child.on('close', common.mustCall(function(code) {
|
||||
output = JSON.parse(output);
|
||||
|
||||
assert.strictEqual(output.code, 'EPIPE');
|
||||
assert.strictEqual(getSystemErrorName(output.errno), 'EPIPE');
|
||||
assert.strictEqual(output.syscall, 'write');
|
||||
console.log('ok');
|
||||
}));
|
||||
16
test/js/node/test/parallel/test-worker-terminate-unrefed.js
Normal file
16
test/js/node/test/parallel/test-worker-terminate-unrefed.js
Normal file
@@ -0,0 +1,16 @@
|
||||
'use strict';
|
||||
const common = require('../common');
|
||||
const { once } = require('events');
|
||||
const { Worker } = require('worker_threads');
|
||||
|
||||
// Test that calling worker.terminate() on an unref()’ed Worker instance
|
||||
// still resolves the returned Promise.
|
||||
|
||||
async function test() {
|
||||
const worker = new Worker('setTimeout(() => {}, 1000000);', { eval: true });
|
||||
await once(worker, 'online');
|
||||
worker.unref();
|
||||
await worker.terminate();
|
||||
}
|
||||
|
||||
test().then(common.mustCall());
|
||||
@@ -101,9 +101,7 @@
|
||||
},
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"typecheck": "tsc --noEmit",
|
||||
"bd:v": "(bun run --silent --cwd=../ build:debug &> /tmp/bun.debug.build.log || (cat /tmp/bun.debug.build.log && rm -rf /tmp/bun.debug.build.log && exit 1)) && rm -f /tmp/bun.debug.build.log && ../build/debug/bun-debug",
|
||||
"bd": "BUN_DEBUG_QUIET_LOGS=1 bun --silent bd:v"
|
||||
"typecheck": "tsc --noEmit"
|
||||
},
|
||||
"resolutions": {
|
||||
"react": "../node_modules/react"
|
||||
|
||||
Reference in New Issue
Block a user