Compare commits

..

3 Commits

Author SHA1 Message Date
Claude Bot
fddb931601 fix: await promise in "returns a promise" test, use array overload
- "returns a promise" test now awaits the promise and checks exitCode
- "array form works" test uses the array overload (cmd as first arg)

Co-Authored-By: Claude <noreply@anthropic.com>
2026-02-26 02:08:17 +00:00
Claude Bot
ead52a0285 fix: address review feedback for spawnAndWait
- buildBufferedResult now includes exitedDueToTimeout (true/false) when
  timeout was configured, and exitedDueToMaxBuffer (true/false) when
  maxBuffer was configured, matching spawnSync behavior
- Tests: replace POSIX-only commands (echo, true) with bunExe() + -e
- Tests: use tempDir() for cwd test instead of hardcoded /tmp
- Tests: use Buffer.alloc() pattern for large output test
- Tests: make timer test deterministic with Promise-based await
- Tests: add exitCode assertions to all tests

Co-Authored-By: Claude <noreply@anthropic.com>
2026-02-26 01:52:42 +00:00
Claude Bot
b913b1cd67 feat: add Bun.spawnAndWait() — async spawn with buffered result
Adds Bun.spawnAndWait(), which spawns a process asynchronously (like
Bun.spawn) but returns a Promise that resolves with the same result
shape as Bun.spawnSync() — buffered stdout/stderr as Buffers, exitCode,
success, signalCode, resourceUsage, and pid.

Unlike spawnSync, this does not block the event loop.
Unlike spawn, the result contains buffered output instead of streams.

Defaults stdout and stderr to "pipe" (same as spawnSync).

Co-Authored-By: Claude <noreply@anthropic.com>
2026-02-26 01:09:16 +00:00
61 changed files with 535 additions and 5072 deletions

View File

@@ -474,8 +474,7 @@ function getBuildCommand(target, options, label) {
if (target.os === "windows" && label === "build-bun") {
// Only sign release builds, not canary builds (DigiCert charges per signature)
// Skip signing on ARM64 for now — smctl (x64-only) silently fails under emulation
const enableSigning = !options.canary && target.arch !== "aarch64" ? " -DENABLE_WINDOWS_CODESIGNING=ON" : "";
const enableSigning = !options.canary ? " -DENABLE_WINDOWS_CODESIGNING=ON" : "";
return `bun run build:${buildProfile}${enableSigning}`;
}

View File

@@ -43,7 +43,7 @@ bunx cowsay 'Hello, world!' # execute a package
## Install
Bun supports Linux (x64 & arm64), macOS (x64 & Apple Silicon) and Windows (x64 & arm64).
Bun supports Linux (x64 & arm64), macOS (x64 & Apple Silicon) and Windows (x64).
> **Linux users** — Kernel version 5.6 or higher is strongly recommended, but the minimum is 5.1.

View File

@@ -148,9 +148,6 @@ _bun_completions() {
upgrade)
COMPREPLY=( $(compgen -W "--version --cwd --help -v -h") );
return;;
repl)
COMPREPLY=( $(compgen -W "--help -h --eval -e --print -p --preload -r --smol --config -c --cwd --env-file --no-env-file" -- "${cur_word}") );
return;;
run)
_file_arguments "!(*.@(js|ts|jsx|tsx|mjs|cjs)?($|))";
COMPREPLY+=( $(compgen -W "--version --cwd --help --silent -v -h" -- "${cur_word}" ) );

View File

@@ -35,7 +35,7 @@ end
set -l bun_install_boolean_flags yarn production optional development no-save dry-run force no-cache silent verbose global
set -l bun_install_boolean_flags_descriptions "Write a yarn.lock file (yarn v1)" "Don't install devDependencies" "Add dependency to optionalDependencies" "Add dependency to devDependencies" "Don't update package.json or save a lockfile" "Don't install anything" "Always request the latest versions from the registry & reinstall all dependencies" "Ignore manifest cache entirely" "Don't output anything" "Excessively verbose logging" "Use global folder"
set -l bun_builtin_cmds_without_run dev create help bun upgrade discord install remove add update init pm x repl
set -l bun_builtin_cmds_without_run dev create help bun upgrade discord install remove add update init pm x
set -l bun_builtin_cmds_accepting_flags create help bun upgrade discord run init link unlink pm x update
function __bun_complete_bins_scripts --inherit-variable bun_builtin_cmds_without_run -d "Emit bun completions for bins and scripts"
@@ -185,12 +185,3 @@ complete -c bun -n "__fish_use_subcommand" -a "x" -d "Execute a package binary,
complete -c bun -n "__fish_use_subcommand" -a "outdated" -d "Display the latest versions of outdated dependencies" -f
complete -c bun -n "__fish_use_subcommand" -a "update" -d "Update dependencies to their latest versions" -f
complete -c bun -n "__fish_use_subcommand" -a "publish" -d "Publish your package from local to npm" -f
complete -c bun -n "__fish_use_subcommand" -a "repl" -d "Start a REPL session with Bun" -f
complete -c bun -n "__fish_seen_subcommand_from repl" -s "e" -l "eval" -r -d "Evaluate argument as a script, then exit" -f
complete -c bun -n "__fish_seen_subcommand_from repl" -s "p" -l "print" -r -d "Evaluate argument as a script, print the result, then exit" -f
complete -c bun -n "__fish_seen_subcommand_from repl" -s "r" -l "preload" -r -d "Import a module before other modules are loaded"
complete -c bun -n "__fish_seen_subcommand_from repl" -l "smol" -d "Use less memory, but run garbage collection more often" -f
complete -c bun -n "__fish_seen_subcommand_from repl" -s "c" -l "config" -r -d "Specify path to Bun config file"
complete -c bun -n "__fish_seen_subcommand_from repl" -l "cwd" -r -d "Absolute path to resolve files & entry points from"
complete -c bun -n "__fish_seen_subcommand_from repl" -l "env-file" -r -d "Load environment variables from the specified file(s)"
complete -c bun -n "__fish_seen_subcommand_from repl" -l "no-env-file" -d "Disable automatic loading of .env files" -f

View File

@@ -524,33 +524,6 @@ _bun_upgrade_completion() {
}
_bun_repl_completion() {
_arguments -s -C \
'1: :->cmd' \
'--help[Print this help menu]' \
'-h[Print this help menu]' \
'(-p --print)--eval[Evaluate argument as a script, then exit]:script' \
'(-p --print)-e[Evaluate argument as a script, then exit]:script' \
'(-e --eval)--print[Evaluate argument as a script, print the result, then exit]:script' \
'(-e --eval)-p[Evaluate argument as a script, print the result, then exit]:script' \
'--preload[Import a module before other modules are loaded]:preload' \
'-r[Import a module before other modules are loaded]:preload' \
'--smol[Use less memory, but run garbage collection more often]' \
'--config[Specify path to Bun config file]: :->config' \
'-c[Specify path to Bun config file]: :->config' \
'--cwd[Absolute path to resolve files & entry points from]:cwd' \
'--env-file[Load environment variables from the specified file(s)]:env-file' \
'--no-env-file[Disable automatic loading of .env files]' &&
ret=0
case $state in
config)
_bun_list_bunfig_toml
;;
esac
}
_bun_build_completion() {
_arguments -s -C \
'1: :->cmd' \
@@ -814,10 +787,6 @@ _bun() {
upgrade)
_bun_upgrade_completion
;;
repl)
_bun_repl_completion
;;
build)
_bun_build_completion
@@ -901,10 +870,6 @@ _bun() {
upgrade)
_bun_upgrade_completion
;;
repl)
_bun_repl_completion
;;
build)
_bun_build_completion

View File

@@ -157,31 +157,6 @@ To build for Windows x64:
</Tab>
</Tabs>
To build for Windows arm64:
<Tabs>
<Tab title="CLI">
```bash icon="terminal" terminal
bun build --compile --target=bun-windows-arm64 ./path/to/my/app.ts --outfile myapp
# note: if no .exe extension is provided, Bun will automatically add it for Windows executables
```
</Tab>
<Tab title="JavaScript">
```ts build.ts icon="/icons/typescript.svg"
await Bun.build({
entrypoints: ["./path/to/my/app.ts"],
compile: {
target: "bun-windows-arm64",
outfile: "./myapp", // .exe added automatically
},
});
```
</Tab>
</Tabs>
To build for macOS arm64:
<Tabs>
@@ -228,16 +203,16 @@ To build for macOS x64:
The order of the `--target` flag does not matter, as long as they're delimited by a `-`.
| --target | Operating System | Architecture | Modern | Baseline | Libc |
| -------------------- | ---------------- | ------------ | ------ | -------- | ----- |
| bun-linux-x64 | Linux | x64 | ✅ | ✅ | glibc |
| bun-linux-arm64 | Linux | arm64 | ✅ | N/A | glibc |
| bun-windows-x64 | Windows | x64 | ✅ | ✅ | - |
| bun-windows-arm64 | Windows | arm64 | | N/A | - |
| bun-darwin-x64 | macOS | x64 | ✅ | ✅ | - |
| bun-darwin-arm64 | macOS | arm64 | ✅ | N/A | - |
| bun-linux-x64-musl | Linux | x64 | ✅ | ✅ | musl |
| bun-linux-arm64-musl | Linux | arm64 | ✅ | N/A | musl |
| --target | Operating System | Architecture | Modern | Baseline | Libc |
| --------------------- | ---------------- | ------------ | ------ | -------- | ----- |
| bun-linux-x64 | Linux | x64 | ✅ | ✅ | glibc |
| bun-linux-arm64 | Linux | arm64 | ✅ | N/A | glibc |
| bun-windows-x64 | Windows | x64 | ✅ | ✅ | - |
| ~~bun-windows-arm64~~ | ~~Windows~~ | ~~arm64~~ | | | - |
| bun-darwin-x64 | macOS | x64 | ✅ | ✅ | - |
| bun-darwin-arm64 | macOS | arm64 | ✅ | N/A | - |
| bun-linux-x64-musl | Linux | x64 | ✅ | ✅ | musl |
| bun-linux-arm64-musl | Linux | arm64 | ✅ | N/A | musl |
<Warning>
On x64 platforms, Bun uses SIMD optimizations which require a modern CPU supporting AVX2 instructions. The `-baseline`
@@ -1277,8 +1252,7 @@ type Target =
| "bun-linux-arm64-musl"
| "bun-windows-x64"
| "bun-windows-x64-baseline"
| "bun-windows-x64-modern"
| "bun-windows-arm64";
| "bun-windows-x64-modern";
```
### Complete example

View File

@@ -75,7 +75,7 @@
{
"group": "Core Runtime",
"icon": "cog",
"pages": ["/runtime/index", "/runtime/watch-mode", "/runtime/debugger", "/runtime/repl", "/runtime/bunfig"]
"pages": ["/runtime/index", "/runtime/watch-mode", "/runtime/debugger", "/runtime/bunfig"]
},
{
"group": "File & Module System",

View File

@@ -260,13 +260,6 @@ To download Bun binaries directly, visit the [releases page on GitHub](https://g
>
For older CPUs without AVX2
</Card>
<Card
icon="/icons/windows.svg"
title="Windows ARM64"
href="https://github.com/oven-sh/bun/releases/latest/download/bun-windows-aarch64.zip"
>
Windows on ARM (Snapdragon, etc.)
</Card>
<Card
icon="/icons/apple.svg"
title="macOS ARM64"

View File

@@ -437,6 +437,39 @@ As a rule of thumb, the asynchronous `Bun.spawn` API is better for HTTP servers
---
## Async buffered API (`Bun.spawnAndWait()`)
`Bun.spawnAndWait` combines the best of both APIs: it returns a `Promise` that resolves with the same `SyncSubprocess` result as `Bun.spawnSync` (buffered `stdout`/`stderr` as `Buffer`, `exitCode`, `success`, etc.), but **without blocking the event loop**.
```ts
const result = await Bun.spawnAndWait(["echo", "hello"]);
console.log(result.stdout.toString()); // => "hello\n"
console.log(result.exitCode); // => 0
console.log(result.success); // => true
```
Like `Bun.spawnSync`, `stdout` and `stderr` default to `"pipe"` and are returned as `Buffer` objects. Like `Bun.spawn`, the event loop continues running while the process executes — timers, network requests, and other async work proceed normally.
```ts
const result = await Bun.spawnAndWait({
cmd: ["ls", "-la"],
cwd: "/tmp",
env: { ...process.env, MY_VAR: "hello" },
});
if (result.success) {
console.log(result.stdout.toString());
} else {
console.error(`Failed with exit code ${result.exitCode}`);
console.error(result.stderr.toString());
}
```
This is useful when you need the simplicity of `spawnSync`'s buffered result but can't afford to block the event loop — for example, in HTTP servers or when running multiple subprocesses concurrently with `Promise.all`.
---
## Benchmarks
<Note>
@@ -482,9 +515,11 @@ A reference of the Spawn API and types are shown below. The real types have comp
interface Bun {
spawn(command: string[], options?: SpawnOptions.OptionsObject): Subprocess;
spawnSync(command: string[], options?: SpawnOptions.OptionsObject): SyncSubprocess;
spawnAndWait(command: string[], options?: SpawnOptions.OptionsObject): Promise<SyncSubprocess>;
spawn(options: { cmd: string[] } & SpawnOptions.OptionsObject): Subprocess;
spawnSync(options: { cmd: string[] } & SpawnOptions.OptionsObject): SyncSubprocess;
spawnAndWait(options: { cmd: string[] } & SpawnOptions.OptionsObject): Promise<SyncSubprocess>;
}
namespace SpawnOptions {

View File

@@ -1,176 +0,0 @@
---
title: "REPL"
description: "An interactive JavaScript and TypeScript REPL with syntax highlighting, history, and tab completion"
---
`bun repl` starts an interactive Read-Eval-Print Loop (REPL) for evaluating JavaScript and TypeScript expressions. It's useful for quickly testing code snippets, exploring APIs, and debugging.
```sh terminal icon="terminal"
bun repl
```
```txt
Welcome to Bun v1.3.3
Type .copy [code] to copy to clipboard. .help for more info.
> 1 + 1
2
> const greeting = "Hello, Bun!"
undefined
> greeting
'Hello, Bun!'
```
---
## Features
- **TypeScript & JSX** — Write TypeScript and JSX directly. Bun transpiles everything on the fly.
- **Top-level `await`** — Await promises directly at the prompt without wrapping in an async function.
- **Syntax highlighting** — Input is highlighted as you type.
- **Persistent history** — History is saved to `~/.bun_repl_history` and persists across sessions.
- **Tab completion** — Press `Tab` to complete property names and REPL commands.
- **Multi-line input** — Unclosed brackets, braces, and parentheses automatically continue on the next line.
- **Node.js globals** — `require`, `module`, `__dirname`, and `__filename` are available, resolved relative to your current working directory.
---
## Special variables
The REPL exposes two special variables that update after each evaluation.
| Variable | Description |
| -------- | --------------------------------- |
| `_` | The result of the last expression |
| `_error` | The last error that was thrown |
```txt
> 2 + 2
4
> _ * 10
40
> JSON.parse("oops")
SyntaxError: JSON Parse error: Unexpected identifier "oops"
> _error
SyntaxError: JSON Parse error: Unexpected identifier "oops"
```
---
## Top-level `await`
Promises are automatically awaited. You can `await` any expression directly at the prompt.
```txt
> await fetch("https://api.github.com/repos/oven-sh/bun").then(r => r.json()).then(r => r.stargazers_count)
81234
> const response = await fetch("https://example.com")
undefined
> response.status
200
```
---
## Importing modules
Just like Bun's runtime, you can use either `require` or `import` in the REPL and it Just Works — mix ESM and CommonJS freely at the prompt. Module resolution uses the same rules as `bun run`, so you can import from `node_modules`, relative paths, or `node:` builtins.
```txt
> import { z } from "zod"
undefined
> const path = require("path")
undefined
> z.string().parse(path.join("/tmp", "file.txt"))
'/tmp/file.txt'
```
Declarations persist for the rest of the session, and `const`/`let` can be redeclared across evaluations (unlike in regular scripts) so you can re-run `import` and `require` statements while iterating.
---
## Multi-line input
When you press `Enter` on a line with unclosed brackets, braces, or parentheses, the REPL automatically continues on the next line. The prompt changes to `...` to indicate continuation.
```txt
> function add(a, b) {
... return a + b;
... }
undefined
> add(2, 3)
5
```
For longer multi-line entries, use `.editor` to enter editor mode, which buffers all input until you press `Ctrl+D`.
---
## REPL commands
Type `.help` at the prompt to see all available REPL commands.
| Command | Description |
| ---------- | ------------------------------------------------------------------------------------------------ |
| `.help` | Print the help message listing commands and keybindings |
| `.exit` | Exit the REPL |
| `.clear` | Clear the screen |
| `.copy` | Copy the last result to the clipboard. Pass an expression to evaluate and copy it: `.copy 1 + 1` |
| `.load` | Load a file into the REPL session: `.load ./script.ts` |
| `.save` | Save the current REPL history to a file: `.save ./session.txt` |
| `.editor` | Enter multi-line editor mode (press `Ctrl+D` to evaluate, `Ctrl+C` to cancel) |
| `.break` | Cancel the current multi-line input |
| `.history` | Print the command history |
---
## Keybindings
The REPL supports Emacs-style line editing.
| Keybinding | Action |
| ------------------- | -------------------------------------------------------- |
| `Ctrl+A` | Move to start of line |
| `Ctrl+E` | Move to end of line |
| `Ctrl+B` / `Ctrl+F` | Move backward/forward one character |
| `Alt+B` / `Alt+F` | Move backward/forward one word |
| `Ctrl+U` | Delete to start of line |
| `Ctrl+K` | Delete to end of line |
| `Ctrl+W` | Delete word backward |
| `Ctrl+D` | Delete character (or exit if line is empty) |
| `Ctrl+L` | Clear screen |
| `Ctrl+T` | Swap the two characters before the cursor |
| `Up` / `Down` | Navigate history |
| `Tab` | Auto-complete |
| `Ctrl+C` | Cancel current input (press twice on empty line to exit) |
---
## History
REPL history is automatically saved to `~/.bun_repl_history` (up to 1000 entries) and loaded at the start of each session. Use `Up`/`Down` to navigate.
To export your history to a different file, use `.save`:
```txt
> .save ./my-session.txt
```
---
## Non-interactive mode
Use `-e` / `--eval` to evaluate a script with REPL semantics and exit. Use `-p` / `--print` to additionally print the result.
```sh terminal icon="terminal"
bun repl -e "const x: number = 42; console.log(x)"
# 42
bun repl -p "await fetch('https://example.com').then(r => r.status)"
# 200
bun repl -p "{ a: 1, b: 2 }"
# { a: 1, b: 2 }
```
This uses the same transforms as the interactive REPL, so a bare object literal like `{ a: 1 }` is treated as an object expression instead of a block statement. The process exits after the event loop drains (pending timers and I/O complete first). On error, the process exits with code `1`.

View File

@@ -7237,6 +7237,54 @@ declare module "bun" {
options?: SpawnOptions.SpawnSyncOptions<In, Out, Err>,
): SyncSubprocess<Out, Err>;
/**
* Spawn a new process, returning a promise that resolves with the buffered
* stdout, stderr, exit code, and other information — the same shape as
* {@link Bun.spawnSync()}.
*
* Unlike `Bun.spawn()`, the result contains buffered `stdout` and `stderr`
* as `Buffer` objects instead of `ReadableStream`.
* Unlike `Bun.spawnSync()`, this does not block the event loop.
*
* @category Process Management
*
* ```js
* const { stdout, exitCode } = await Bun.spawnAndWait(["echo", "hello"]);
* console.log(stdout.toString()); // "hello\n"
* ```
*/
function spawnAndWait<
const In extends SpawnOptions.Writable = "ignore",
const Out extends SpawnOptions.Readable = "pipe",
const Err extends SpawnOptions.Readable = "pipe",
>(
options: SpawnOptions.SpawnOptions<In, Out, Err> & {
cmd: string[];
},
): Promise<SyncSubprocess<Out, Err>>;
/**
* Spawn a new process, returning a promise that resolves with the buffered
* stdout, stderr, exit code, and other information — the same shape as
* {@link Bun.spawnSync()}.
*
* Unlike `Bun.spawn()`, the result contains buffered `stdout` and `stderr`
* as `Buffer` objects instead of `ReadableStream`.
* Unlike `Bun.spawnSync()`, this does not block the event loop.
*
* @category Process Management
*
* ```js
* const { stdout, exitCode } = await Bun.spawnAndWait(["echo", "hello"]);
* console.log(stdout.toString()); // "hello\n"
* ```
*/
function spawnAndWait<
const In extends SpawnOptions.Writable = "ignore",
const Out extends SpawnOptions.Readable = "pipe",
const Err extends SpawnOptions.Readable = "pipe",
>(cmds: string[], options?: SpawnOptions.SpawnOptions<In, Out, Err>): Promise<SyncSubprocess<Out, Err>>;
/** Utility type for any process from {@link Bun.spawn()} with both stdout and stderr set to `"pipe"` */
type ReadableSubprocess = Subprocess<any, "pipe", "pipe">;
/** Utility type for any process from {@link Bun.spawn()} with stdin set to `"pipe"` */

View File

@@ -402,8 +402,8 @@ function Install-Bun {
}
if ($script:IsARM64) {
# ARM64 bun binary from blob storage (faster than GitHub releases for CI)
Write-Output "Installing Bun (ARM64)..."
# No published ARM64 bun binary yet — download from our blob storage
Write-Output "Installing Bun (ARM64 from blob storage)..."
$zip = Download-File "https://buncistore.blob.core.windows.net/artifacts/bun-windows-aarch64.zip" -Name "bun-arm64.zip"
$extractDir = "$env:TEMP\bun-arm64"
Expand-Archive -Path $zip -DestinationPath $extractDir -Force

View File

@@ -210,13 +210,7 @@ if (instructionFailures > 0) {
console.error(" FAILED: Code uses unsupported CPU instructions.");
// Report to Buildkite annotations tab
const platform = isWindows
? isAarch64
? "Windows aarch64"
: "Windows x64"
: isAarch64
? "Linux aarch64"
: "Linux x64";
const platform = isWindows ? "Windows x64" : isAarch64 ? "Linux aarch64" : "Linux x64";
const annotation = [
`<details>`,
`<summary>CPU instruction violation on ${platform}${instructionFailures} failed</summary>`,

View File

@@ -28,6 +28,9 @@ pub fn ReplTransforms(comptime P: type) type {
return;
}
// Check if there's top-level await
const has_top_level_await = p.top_level_await_keyword.len > 0;
// Collect all statements into a single array
var all_stmts = bun.handleOom(allocator.alloc(Stmt, total_stmts_count));
var stmt_idx: usize = 0;
@@ -38,17 +41,6 @@ pub fn ReplTransforms(comptime P: type) type {
}
}
// Check if there's top-level await or imports (imports become dynamic awaited imports)
var has_top_level_await = p.top_level_await_keyword.len > 0;
if (!has_top_level_await) {
for (all_stmts) |stmt| {
if (stmt.data == .s_import) {
has_top_level_await = true;
break;
}
}
}
// Apply transform with is_async based on presence of top-level await
try transformWithHoisting(p, parts, all_stmts, allocator, has_top_level_await);
}
@@ -162,86 +154,6 @@ pub fn ReplTransforms(comptime P: type) type {
try inner_stmts.append(stmt);
}
},
.s_import => |import_data| {
// Convert static imports to dynamic imports for REPL evaluation:
// import X from 'mod' -> var X = (await import('mod')).default
// import { a, b } from 'mod' -> var {a, b} = await import('mod')
// import * as X from 'mod' -> var X = await import('mod')
// import 'mod' -> await import('mod')
const path_str = p.import_records.items[import_data.import_record_index].path.text;
const import_expr = p.newExpr(E.Import{
.expr = p.newExpr(E.String{ .data = path_str }, stmt.loc),
.import_record_index = std.math.maxInt(u32),
}, stmt.loc);
const await_expr = p.newExpr(E.Await{ .value = import_expr }, stmt.loc);
if (import_data.star_name_loc) |_| {
// import * as X from 'mod' -> var X = await import('mod')
try hoisted_stmts.append(p.s(S.Local{
.kind = .k_var,
.decls = Decl.List.fromOwnedSlice(bun.handleOom(allocator.dupe(G.Decl, &.{
G.Decl{
.binding = p.b(B.Identifier{ .ref = import_data.namespace_ref }, stmt.loc),
.value = null,
},
}))),
}, stmt.loc));
const assign = p.newExpr(E.Binary{
.op = .bin_assign,
.left = p.newExpr(E.Identifier{ .ref = import_data.namespace_ref }, stmt.loc),
.right = await_expr,
}, stmt.loc);
try inner_stmts.append(p.s(S.SExpr{ .value = assign }, stmt.loc));
} else if (import_data.default_name) |default_name| {
// import X from 'mod' -> var X = (await import('mod')).default
// import X, { a } from 'mod' -> var __ns = await import('mod'); var X = __ns.default; var a = __ns.a;
try hoisted_stmts.append(p.s(S.Local{
.kind = .k_var,
.decls = Decl.List.fromOwnedSlice(bun.handleOom(allocator.dupe(G.Decl, &.{
G.Decl{
.binding = p.b(B.Identifier{ .ref = default_name.ref.? }, default_name.loc),
.value = null,
},
}))),
}, stmt.loc));
if (import_data.items.len > 0) {
// Share a single await import() between default and named imports.
// namespace_ref is synthesized by processImportStatement for all non-star imports.
try convertNamedImports(p, import_data, await_expr, &hoisted_stmts, &inner_stmts, allocator, stmt.loc);
const ns_ref_expr = p.newExpr(E.Identifier{ .ref = import_data.namespace_ref }, stmt.loc);
const dot_default = p.newExpr(E.Dot{
.target = ns_ref_expr,
.name = "default",
.name_loc = stmt.loc,
}, stmt.loc);
const assign = p.newExpr(E.Binary{
.op = .bin_assign,
.left = p.newExpr(E.Identifier{ .ref = default_name.ref.? }, default_name.loc),
.right = dot_default,
}, stmt.loc);
try inner_stmts.append(p.s(S.SExpr{ .value = assign }, stmt.loc));
} else {
const dot_default = p.newExpr(E.Dot{
.target = await_expr,
.name = "default",
.name_loc = stmt.loc,
}, stmt.loc);
const assign = p.newExpr(E.Binary{
.op = .bin_assign,
.left = p.newExpr(E.Identifier{ .ref = default_name.ref.? }, default_name.loc),
.right = dot_default,
}, stmt.loc);
try inner_stmts.append(p.s(S.SExpr{ .value = assign }, stmt.loc));
}
} else if (import_data.items.len > 0) {
// import { a, b } from 'mod' -> destructure from await import('mod')
try convertNamedImports(p, import_data, await_expr, &hoisted_stmts, &inner_stmts, allocator, stmt.loc);
} else {
// import 'mod' (side-effect only) -> await import('mod')
try inner_stmts.append(p.s(S.SExpr{ .value = await_expr }, stmt.loc));
}
},
.s_directive => |directive| {
// In REPL mode, treat directives (string literals) as expressions
const str_expr = p.newExpr(E.String{ .data = directive.value }, stmt.loc);
@@ -283,63 +195,6 @@ pub fn ReplTransforms(comptime P: type) type {
}
}
/// Convert named imports to individual var assignments from the dynamic import
/// import { a, b as c } from 'mod' ->
/// var a; var c; (hoisted)
/// var __mod = await import('mod'); a = __mod.a; c = __mod.b; (inner)
fn convertNamedImports(
p: *P,
import_data: *const S.Import,
await_expr: Expr,
hoisted_stmts: *ListManaged(Stmt),
inner_stmts: *ListManaged(Stmt),
allocator: Allocator,
loc: logger.Loc,
) !void {
// Store the module in the namespace ref: var __ns = await import('mod')
try hoisted_stmts.append(p.s(S.Local{
.kind = .k_var,
.decls = Decl.List.fromOwnedSlice(bun.handleOom(allocator.dupe(G.Decl, &.{
G.Decl{
.binding = p.b(B.Identifier{ .ref = import_data.namespace_ref }, loc),
.value = null,
},
}))),
}, loc));
const ns_assign = p.newExpr(E.Binary{
.op = .bin_assign,
.left = p.newExpr(E.Identifier{ .ref = import_data.namespace_ref }, loc),
.right = await_expr,
}, loc);
try inner_stmts.append(p.s(S.SExpr{ .value = ns_assign }, loc));
// For each named import: var name; name = __ns.originalName;
for (import_data.items) |item| {
try hoisted_stmts.append(p.s(S.Local{
.kind = .k_var,
.decls = Decl.List.fromOwnedSlice(bun.handleOom(allocator.dupe(G.Decl, &.{
G.Decl{
.binding = p.b(B.Identifier{ .ref = item.name.ref.? }, item.name.loc),
.value = null,
},
}))),
}, loc));
const ns_ref_expr = p.newExpr(E.Identifier{ .ref = import_data.namespace_ref }, loc);
const prop_access = p.newExpr(E.Dot{
.target = ns_ref_expr,
.name = item.alias,
.name_loc = item.name.loc,
}, loc);
const item_assign = p.newExpr(E.Binary{
.op = .bin_assign,
.left = p.newExpr(E.Identifier{ .ref = item.name.ref.? }, item.name.loc),
.right = prop_access,
}, loc);
try inner_stmts.append(p.s(S.SExpr{ .value = item_assign }, loc));
}
}
/// Wrap the last expression in return { value: expr }
fn wrapLastExpressionWithReturn(p: *P, inner_stmts: *ListManaged(Stmt), allocator: Allocator) void {
if (inner_stmts.items.len > 0) {

View File

@@ -3,17 +3,6 @@ pub const webcore = @import("./bun.js/webcore.zig");
pub const api = @import("./bun.js/api.zig");
pub const bindgen = @import("./bun.js/bindgen.zig");
pub fn applyStandaloneRuntimeFlags(b: *bun.Transpiler, graph: *const bun.StandaloneModuleGraph) void {
b.options.env.disable_default_env_files = graph.flags.disable_default_env_files;
b.options.env.behavior = if (graph.flags.disable_default_env_files)
.disable
else
.load_all_without_inlining;
b.resolver.opts.load_tsconfig_json = !graph.flags.disable_autoload_tsconfig;
b.resolver.opts.load_package_json = !graph.flags.disable_autoload_package_json;
}
pub const Run = struct {
ctx: Command.Context,
vm: *VirtualMachine,
@@ -93,7 +82,18 @@ pub const Run = struct {
.unspecified => {},
}
applyStandaloneRuntimeFlags(b, graph_ptr);
// If .env loading is disabled, only load process env vars
// Otherwise, load all .env files
if (graph_ptr.flags.disable_default_env_files) {
b.options.env.behavior = .disable;
} else {
b.options.env.behavior = .load_all_without_inlining;
}
// Control loading of tsconfig.json and package.json at runtime
// By default, these are disabled for standalone executables
b.resolver.opts.load_tsconfig_json = !graph_ptr.flags.disable_autoload_tsconfig;
b.resolver.opts.load_package_json = !graph_ptr.flags.disable_autoload_package_json;
b.configureDefines() catch {
failWithBuildError(vm);

View File

@@ -2669,7 +2669,7 @@ pub fn remapZigException(
allow_source_code_preview: bool,
) void {
error_instance.toZigException(this.global, exception);
var enable_source_code_preview = allow_source_code_preview and
const enable_source_code_preview = allow_source_code_preview and
!(bun.feature_flag.BUN_DISABLE_SOURCE_CODE_PREVIEW.get() or
bun.feature_flag.BUN_DISABLE_TRANSPILED_SOURCE_CODE_PREVIEW.get());
@@ -2764,12 +2764,6 @@ pub fn remapZigException(
}
}
// Don't show source code preview for REPL frames - it would show the
// transformed IIFE wrapper code, not what the user typed.
if (top.source_url.eqlComptime("[repl]")) {
enable_source_code_preview = false;
}
var top_source_url = top.source_url.toUTF8(bun.default_allocator);
defer top_source_url.deinit();
@@ -2821,6 +2815,7 @@ pub fn remapZigException(
// Avoid printing "export default 'native'"
break :code ZigString.Slice.empty;
}
var log = logger.Log.init(bun.default_allocator);
defer log.deinit();

View File

@@ -37,6 +37,7 @@ pub const BunObject = struct {
pub const stringWidth = toJSCallback(Bun.stringWidth);
pub const sleepSync = toJSCallback(Bun.sleepSync);
pub const spawn = toJSCallback(host_fn.wrapStaticMethod(api.Subprocess, "spawn", false));
pub const spawnAndWait = toJSCallback(host_fn.wrapStaticMethod(api.Subprocess, "spawnAndWait", false));
pub const spawnSync = toJSCallback(host_fn.wrapStaticMethod(api.Subprocess, "spawnSync", false));
pub const udpSocket = toJSCallback(host_fn.wrapStaticMethod(api.UDPSocket, "udpSocket", false));
pub const which = toJSCallback(Bun.which);
@@ -183,6 +184,7 @@ pub const BunObject = struct {
@export(&BunObject.stringWidth, .{ .name = callbackName("stringWidth") });
@export(&BunObject.sleepSync, .{ .name = callbackName("sleepSync") });
@export(&BunObject.spawn, .{ .name = callbackName("spawn") });
@export(&BunObject.spawnAndWait, .{ .name = callbackName("spawnAndWait") });
@export(&BunObject.spawnSync, .{ .name = callbackName("spawnSync") });
@export(&BunObject.udpSocket, .{ .name = callbackName("udpSocket") });
@export(&BunObject.which, .{ .name = callbackName("which") });

View File

@@ -698,7 +698,8 @@ pub fn setRawMode(
if (comptime Environment.isPosix) {
// Use the existing TTY mode function
const tty_result = bun.tty.setMode(this.master_fd.cast(), if (enabled) .raw else .normal);
const mode: c_int = if (enabled) 1 else 0;
const tty_result = Bun__ttySetMode(this.master_fd.cast(), mode);
if (tty_result != 0) {
return globalObject.throw("Failed to set raw mode", .{});
}
@@ -707,6 +708,9 @@ pub fn setRawMode(
this.flags.raw_mode = enabled;
return .js_undefined;
}
extern fn Bun__ttySetMode(fd: c_int, mode: c_int) c_int;
/// POSIX termios struct for terminal flags manipulation
const Termios = if (Environment.isPosix) std.posix.termios else void;

View File

@@ -94,12 +94,39 @@ fn getArgv(globalThis: *jsc.JSGlobalObject, args: JSValue, PATH: []const u8, cwd
/// Bun.spawn() calls this.
pub fn spawn(globalThis: *jsc.JSGlobalObject, args: JSValue, secondaryArgsValue: ?JSValue) bun.JSError!JSValue {
return spawnMaybeSync(globalThis, args, secondaryArgsValue, false);
return spawnMaybeSync(globalThis, args, secondaryArgsValue, false, false);
}
/// Bun.spawnSync() calls this.
pub fn spawnSync(globalThis: *jsc.JSGlobalObject, args: JSValue, secondaryArgsValue: ?JSValue) bun.JSError!JSValue {
return spawnMaybeSync(globalThis, args, secondaryArgsValue, true);
return spawnMaybeSync(globalThis, args, secondaryArgsValue, true, true);
}
/// Bun.spawnAndWait() calls this.
/// Like Bun.spawn() but returns a Promise that resolves with the same result
/// shape as Bun.spawnSync() (buffered stdout/stderr, exitCode, etc).
pub fn spawnAndWait(globalThis: *jsc.JSGlobalObject, args: JSValue, secondaryArgsValue: ?JSValue) bun.JSError!JSValue {
// Use the async spawn path but with stderr defaulting to pipe (like spawnSync)
const subprocess_js = try spawnMaybeSync(globalThis, args, secondaryArgsValue, false, true);
const subprocess = Subprocess.fromJSDirect(subprocess_js) orelse
return globalThis.throwInvalidArguments("failed to create subprocess", .{});
// Mark as buffered async mode
subprocess.flags.is_buffered_async = true;
// Create the promise that will resolve with the buffered result
const promise = jsc.JSPromise.create(globalThis);
subprocess.spawn_and_wait_promise.set(globalThis, promise.toJS());
// Take an extra ref to prevent deallocation before the promise resolves.
// This is balanced by deref() in maybeResolveBufferedAsync.
subprocess.ref();
// If the process already exited and stdio is already closed, resolve immediately
subprocess.maybeResolveBufferedAsync();
return promise.toJS();
}
pub fn spawnMaybeSync(
@@ -107,6 +134,7 @@ pub fn spawnMaybeSync(
args_: JSValue,
secondaryArgsValue: ?JSValue,
comptime is_sync: bool,
comptime default_stderr_to_pipe: bool,
) bun.JSError!JSValue {
if (comptime is_sync) {
// We skip this on Windows due to test failures.
@@ -134,7 +162,7 @@ pub fn spawnMaybeSync(
.{ .inherit = {} },
};
if (comptime is_sync) {
if (comptime is_sync or default_stderr_to_pipe) {
stdio[1] = .{ .pipe = {} };
stdio[2] = .{ .pipe = {} };
}

View File

@@ -1379,22 +1379,36 @@ pub fn spawnProcessPosix(
break :brk .{ pair[if (i == 0) 1 else 0], pair[if (i == 0) 0 else 1] };
};
// Note: we intentionally do NOT call shutdown() on the
// socketpair fds. On SOCK_STREAM socketpairs, shutdown(fd, SHUT_WR)
// sends a FIN to the peer, which causes programs that poll the
// write end for readability (e.g. Python's asyncio connect_write_pipe)
// to interpret it as "connection closed" and tear down their transport.
// The socketpair is already used unidirectionally by convention.
if (comptime Environment.isMac) {
// macOS seems to default to around 8 KB for the buffer size
// this is comically small.
// TODO: investigate if this should be adjusted on Linux.
const so_recvbuf: c_int = 1024 * 512;
const so_sendbuf: c_int = 1024 * 512;
if (i == 0) {
if (i == 0) {
// their copy of stdin should be readable
_ = std.c.shutdown(@intCast(fds[1].cast()), std.posix.SHUT.WR);
// our copy of stdin should be writable
_ = std.c.shutdown(@intCast(fds[0].cast()), std.posix.SHUT.RD);
if (comptime Environment.isMac) {
// macOS seems to default to around 8 KB for the buffer size
// this is comically small.
// TODO: investigate if this should be adjusted on Linux.
const so_recvbuf: c_int = 1024 * 512;
const so_sendbuf: c_int = 1024 * 512;
_ = std.c.setsockopt(fds[1].cast(), std.posix.SOL.SOCKET, std.posix.SO.RCVBUF, &so_recvbuf, @sizeOf(c_int));
_ = std.c.setsockopt(fds[0].cast(), std.posix.SOL.SOCKET, std.posix.SO.SNDBUF, &so_sendbuf, @sizeOf(c_int));
} else {
}
} else {
// their copy of stdout or stderr should be writable
_ = std.c.shutdown(@intCast(fds[1].cast()), std.posix.SHUT.RD);
// our copy of stdout or stderr should be readable
_ = std.c.shutdown(@intCast(fds[0].cast()), std.posix.SHUT.WR);
if (comptime Environment.isMac) {
// macOS seems to default to around 8 KB for the buffer size
// this is comically small.
// TODO: investigate if this should be adjusted on Linux.
const so_recvbuf: c_int = 1024 * 512;
const so_sendbuf: c_int = 1024 * 512;
_ = std.c.setsockopt(fds[0].cast(), std.posix.SOL.SOCKET, std.posix.SO.RCVBUF, &so_recvbuf, @sizeOf(c_int));
_ = std.c.setsockopt(fds[1].cast(), std.posix.SOL.SOCKET, std.posix.SO.SNDBUF, &so_sendbuf, @sizeOf(c_int));
}

View File

@@ -51,6 +51,10 @@ stdout_maxbuf: ?*MaxBuf = null,
stderr_maxbuf: ?*MaxBuf = null,
exited_due_to_maxbuf: ?MaxBuf.Kind = null,
/// Promise for Bun.spawnAndWait() — resolves with SyncSubprocess-shaped result
/// when process exits AND all stdio pipes are closed.
spawn_and_wait_promise: jsc.Strong.Optional = .empty,
pub const Flags = packed struct(u8) {
is_sync: bool = false,
killed: bool = false,
@@ -58,7 +62,8 @@ pub const Flags = packed struct(u8) {
finalized: bool = false,
deref_on_stdin_destroyed: bool = false,
is_stdin_a_readable_stream: bool = false,
_: u2 = 0,
is_buffered_async: bool = false,
_: u1 = 0,
};
pub const SignalCode = bun.SignalCode;
@@ -147,6 +152,10 @@ pub fn hasExited(this: *const Subprocess) bool {
}
pub fn computeHasPendingActivity(this: *const Subprocess) bool {
if (this.spawn_and_wait_promise.has()) {
return true;
}
if (this.ipc_data != null) {
return true;
}
@@ -225,6 +234,88 @@ pub fn onCloseIO(this: *Subprocess, kind: StdioKind) void {
}
},
}
if (this.flags.is_buffered_async) {
this.maybeResolveBufferedAsync();
}
}
/// Called from onProcessExit and onCloseIO when is_buffered_async is set.
/// Resolves the spawnAndWait promise once ALL conditions are met:
/// 1. The process has exited
/// 2. stdout is not an active pipe (has been closed/buffered)
/// 3. stderr is not an active pipe (has been closed/buffered)
pub fn maybeResolveBufferedAsync(this: *Subprocess) void {
// Condition 1: process must have exited
if (!this.process.hasExited()) return;
// Condition 2: stdout must not be an active pipe
if (this.stdout == .pipe) return;
// Condition 3: stderr must not be an active pipe
if (this.stderr == .pipe) return;
// All conditions met — resolve the promise
const promise_js = this.spawn_and_wait_promise.trySwap() orelse return;
const globalThis = this.globalThis;
const loop = globalThis.bunVM().eventLoop();
loop.enter();
defer loop.exit();
if (this.buildBufferedResult(globalThis)) |result| {
if (promise_js.asAnyPromise()) |promise| {
promise.resolve(globalThis, result) catch {};
}
} else |_| {
if (promise_js.asAnyPromise()) |promise| {
const err = if (globalThis.hasException())
globalThis.takeException(error.JSError)
else
JSValue.zero;
if (err != .zero) {
promise.reject(globalThis, err) catch {};
}
}
}
this.updateHasPendingActivity();
// Balance the ref() taken in spawnAndWait
this.deref();
}
/// Build a result object with the same shape as spawnSync's return value.
fn buildBufferedResult(this: *Subprocess, globalThis: *jsc.JSGlobalObject) bun.JSError!JSValue {
const signalCode = this.getSignalCode(globalThis);
const exitCode = this.getExitCode(globalThis);
const stdout = try this.stdout.toBufferedValue(globalThis);
const stderr = try this.stderr.toBufferedValue(globalThis);
const resource_usage: JSValue = if (!globalThis.hasException()) try this.createResourceUsageObject(globalThis) else .zero;
const resultPid = jsc.JSValue.jsNumberFromInt32(this.pid());
const sync_value = jsc.JSValue.createEmptyObject(globalThis, 0);
sync_value.put(globalThis, jsc.ZigString.static("exitCode"), exitCode);
if (!signalCode.isEmptyOrUndefinedOrNull()) {
sync_value.put(globalThis, jsc.ZigString.static("signalCode"), signalCode);
}
sync_value.put(globalThis, jsc.ZigString.static("stdout"), stdout);
sync_value.put(globalThis, jsc.ZigString.static("stderr"), stderr);
sync_value.put(globalThis, jsc.ZigString.static("success"), JSValue.jsBoolean(exitCode.isInt32() and exitCode.asInt32() == 0));
sync_value.put(globalThis, jsc.ZigString.static("resourceUsage"), resource_usage);
// Match spawnSync: include exitedDueToTimeout when a timeout was configured
if (this.event_loop_timer.next.ns() != 0 or this.event_loop_timer.state == .FIRED) {
sync_value.put(globalThis, jsc.ZigString.static("exitedDueToTimeout"), if (this.event_loop_timer.state == .FIRED) .true else .false);
}
// Match spawnSync: include exitedDueToMaxBuffer when maxBuffer was configured
if (this.stdout_maxbuf != null or this.stderr_maxbuf != null or this.exited_due_to_maxbuf != null) {
sync_value.put(globalThis, jsc.ZigString.static("exitedDueToMaxBuffer"), if (this.exited_due_to_maxbuf != null) .true else .false);
}
sync_value.put(globalThis, jsc.ZigString.static("pid"), resultPid);
return sync_value;
}
pub fn jsRef(this: *Subprocess) void {
@@ -699,6 +790,10 @@ pub fn onProcessExit(this: *Subprocess, process: *Process, status: bun.spawn.Sta
);
}
}
if (this.flags.is_buffered_async) {
this.maybeResolveBufferedAsync();
}
}
}
@@ -772,6 +867,7 @@ pub fn finalize(this: *Subprocess) callconv(.c) void {
// access it after it's been freed We cannot call any methods which
// access GC'd values during the finalizer
this.this_value.finalize();
this.spawn_and_wait_promise.deinit();
this.clearAbortSignal();
@@ -917,6 +1013,7 @@ pub const Writable = @import("./subprocess/Writable.zig").Writable;
pub const MaxBuf = bun.io.MaxBuf;
pub const spawnSync = js_bun_spawn_bindings.spawnSync;
pub const spawn = js_bun_spawn_bindings.spawn;
pub const spawnAndWait = js_bun_spawn_bindings.spawnAndWait;
const IPC = @import("../../ipc.zig");
const Terminal = @import("./Terminal.zig");

View File

@@ -24,12 +24,6 @@ client_renegotiation_window: u32 = 0,
requires_custom_request_ctx: bool = false,
is_using_default_ciphers: bool = true,
low_memory_mode: bool = false,
ref_count: RC = .init(),
cached_hash: u64 = 0,
const RC = bun.ptr.ThreadSafeRefCount(@This(), "ref_count", destroy, .{});
pub const ref = RC.ref;
pub const deref = RC.deref;
const ReadFromBlobError = bun.JSError || error{
NullStore,
@@ -119,7 +113,6 @@ pub fn forClientVerification(this: SSLConfig) SSLConfig {
pub fn isSame(this: *const SSLConfig, other: *const SSLConfig) bool {
inline for (comptime std.meta.fields(SSLConfig)) |field| {
if (comptime std.mem.eql(u8, field.name, "ref_count") or std.mem.eql(u8, field.name, "cached_hash")) continue;
const first = @field(this, field.name);
const second = @field(other, field.name);
switch (field.type) {
@@ -192,8 +185,6 @@ pub fn deinit(this: *SSLConfig) void {
.requires_custom_request_ctx = {},
.is_using_default_ciphers = {},
.low_memory_mode = {},
.ref_count = {},
.cached_hash = {},
});
}
@@ -231,97 +222,9 @@ pub fn clone(this: *const SSLConfig) SSLConfig {
.requires_custom_request_ctx = this.requires_custom_request_ctx,
.is_using_default_ciphers = this.is_using_default_ciphers,
.low_memory_mode = this.low_memory_mode,
.ref_count = .init(),
.cached_hash = 0,
};
}
pub fn contentHash(this: *SSLConfig) u64 {
if (this.cached_hash != 0) return this.cached_hash;
var hasher = std.hash.Wyhash.init(0);
inline for (comptime std.meta.fields(SSLConfig)) |field| {
if (comptime std.mem.eql(u8, field.name, "ref_count") or std.mem.eql(u8, field.name, "cached_hash")) continue;
const value = @field(this, field.name);
switch (field.type) {
?[*:0]const u8 => {
if (value) |s| {
hasher.update(bun.asByteSlice(s));
}
hasher.update(&.{0});
},
?[][*:0]const u8 => {
if (value) |slice| {
for (slice) |s| {
hasher.update(bun.asByteSlice(s));
hasher.update(&.{0});
}
}
hasher.update(&.{0});
},
else => {
hasher.update(std.mem.asBytes(&value));
},
}
}
const hash = hasher.final();
// Avoid 0 since it's the sentinel for "not computed"
this.cached_hash = if (hash == 0) 1 else hash;
return this.cached_hash;
}
/// Called by the RC mixin when refcount reaches 0.
fn destroy(this: *SSLConfig) void {
GlobalRegistry.remove(this);
this.deinit();
bun.default_allocator.destroy(this);
}
pub const GlobalRegistry = struct {
const MapContext = struct {
pub fn hash(_: @This(), key: *SSLConfig) u32 {
return @truncate(key.contentHash());
}
pub fn eql(_: @This(), a: *SSLConfig, b: *SSLConfig, _: usize) bool {
return a.isSame(b);
}
};
var mutex: bun.Mutex = .{};
var configs: std.ArrayHashMapUnmanaged(*SSLConfig, void, MapContext, true) = .empty;
/// Takes ownership of a heap-allocated SSLConfig.
/// If an identical config already exists in the registry, the new one is freed
/// and the existing one is returned (with refcount incremented).
/// If no match, the new config is registered and returned.
pub fn intern(new_config: *SSLConfig) *SSLConfig {
mutex.lock();
defer mutex.unlock();
// Look up by content hash/equality
const gop = bun.handleOom(configs.getOrPutContext(bun.default_allocator, new_config, .{}));
if (gop.found_existing) {
// Identical config already exists - free the new one, return existing
const existing = gop.key_ptr.*;
new_config.ref_count.clearWithoutDestructor();
new_config.deinit();
bun.default_allocator.destroy(new_config);
existing.ref();
return existing;
}
// New config - it's already inserted by getOrPut
// refcount is already 1 from initialization
return new_config;
}
/// Remove a config from the registry. Called when refcount reaches 0.
fn remove(config: *SSLConfig) void {
mutex.lock();
defer mutex.unlock();
_ = configs.swapRemoveContext(config, .{});
}
};
pub const zero = SSLConfig{};
pub fn fromJS(
@@ -391,9 +294,9 @@ pub fn fromGenerated(
const protocols = switch (generated.alpn_protocols) {
.none => null,
.string => |*val| val.get().toOwnedSliceZ(bun.default_allocator),
.buffer => |*val| blk: {
const buffer: jsc.ArrayBuffer = val.get().asArrayBuffer();
.string => |*ref| ref.get().toOwnedSliceZ(bun.default_allocator),
.buffer => |*ref| blk: {
const buffer: jsc.ArrayBuffer = ref.get().asArrayBuffer();
break :blk try bun.default_allocator.dupeZ(u8, buffer.byteSlice());
},
};
@@ -463,9 +366,9 @@ fn handleFile(
) ReadFromBlobError!?[][*:0]const u8 {
const single = try handleSingleFile(global, switch (file.*) {
.none => return null,
.string => |*val| .{ .string = val.get() },
.buffer => |*val| .{ .buffer = val.get() },
.file => |*val| .{ .file = val.get() },
.string => |*ref| .{ .string = ref.get() },
.buffer => |*ref| .{ .buffer = ref.get() },
.file => |*ref| .{ .file = ref.get() },
.array => |*list| return try handleFileArray(global, list.items()),
});
errdefer bun.freeSensitive(bun.default_allocator, single);
@@ -488,9 +391,9 @@ fn handleFileArray(
}
for (elements) |*elem| {
result.appendAssumeCapacity(try handleSingleFile(global, switch (elem.*) {
.string => |*val| .{ .string = val.get() },
.buffer => |*val| .{ .buffer = val.get() },
.file => |*val| .{ .file = val.get() },
.string => |*ref| .{ .string = ref.get() },
.buffer => |*ref| .{ .buffer = ref.get() },
.file => |*ref| .{ .file = ref.get() },
}));
}
return try result.toOwnedSlice();

View File

@@ -71,6 +71,7 @@
macro(shrink) \
macro(sleepSync) \
macro(spawn) \
macro(spawnAndWait) \
macro(spawnSync) \
macro(stringWidth) \
macro(udpSocket) \

View File

@@ -1017,6 +1017,7 @@ JSC_DEFINE_HOST_FUNCTION(functionFileURLToPath, (JSC::JSGlobalObject * globalObj
sleep functionBunSleep DontDelete|Function 1
sleepSync BunObject_callback_sleepSync DontDelete|Function 1
spawn BunObject_callback_spawn DontDelete|Function 1
spawnAndWait BunObject_callback_spawnAndWait DontDelete|Function 1
spawnSync BunObject_callback_spawnSync DontDelete|Function 1
stderr BunObject_lazyPropCb_wrap_stderr DontDelete|PropertyCallback
stdin BunObject_lazyPropCb_wrap_stdin DontDelete|PropertyCallback

View File

@@ -1,15 +1,10 @@
// clang-format off
#include "root.h"
#include "ModuleLoader.h"
#include "headers-handwritten.h"
#include "PathInlines.h"
#include "JSCommonJSModule.h"
#include "root.h"
#include <JavaScriptCore/JSBoundFunction.h>
#include <JavaScriptCore/PropertySlot.h>
#include <JavaScriptCore/JSMap.h>
#include <JavaScriptCore/JSString.h>
#include <JavaScriptCore/SourceCode.h>
#include "ZigGlobalObject.h"
#include "InternalModuleRegistry.h"
@@ -90,44 +85,3 @@ extern "C" [[ZIG_EXPORT(nothrow)]] void Bun__ExposeNodeModuleGlobals(Zig::Global
FOREACH_EXPOSED_BUILTIN_IMR(PUT_CUSTOM_GETTER_SETTER)
#undef PUT_CUSTOM_GETTER_SETTER
}
// Set up require(), module, __filename, __dirname on globalThis for the REPL.
// Creates a CommonJS module object rooted at the given directory so require() resolves correctly.
extern "C" [[ZIG_EXPORT(check_slow)]] void Bun__REPL__setupGlobalRequire(
Zig::GlobalObject* globalObject,
const unsigned char* cwdPtr,
size_t cwdLen)
{
using namespace JSC;
auto& vm = getVM(globalObject);
auto scope = DECLARE_THROW_SCOPE(vm);
auto cwdStr = WTF::String::fromUTF8(std::span { cwdPtr, cwdLen });
auto* filename = jsString(vm, makeString(cwdStr, PLATFORM_SEP_s, "[repl]"_s));
auto* dirname = jsString(vm, WTF::String(cwdStr));
auto* moduleObject = Bun::JSCommonJSModule::create(vm,
globalObject->CommonJSModuleObjectStructure(),
filename, filename, dirname, SourceCode());
moduleObject->hasEvaluated = true;
auto* resolveFunction = JSBoundFunction::create(vm, globalObject,
globalObject->requireResolveFunctionUnbound(), filename,
ArgList(), 1, globalObject->commonStrings().resolveString(globalObject),
makeSource("resolve"_s, SourceOrigin(), SourceTaintedOrigin::Untainted));
RETURN_IF_EXCEPTION(scope, );
auto* requireFunction = JSBoundFunction::create(vm, globalObject,
globalObject->requireFunctionUnbound(), moduleObject,
ArgList(), 1, globalObject->commonStrings().requireString(globalObject),
makeSource("require"_s, SourceOrigin(), SourceTaintedOrigin::Untainted));
RETURN_IF_EXCEPTION(scope, );
requireFunction->putDirect(vm, vm.propertyNames->resolve, resolveFunction, 0);
moduleObject->putDirect(vm, WebCore::clientData(vm)->builtinNames().requirePublicName(), requireFunction, 0);
globalObject->putDirect(vm, WebCore::builtinNames(vm).requirePublicName(), requireFunction, 0);
globalObject->putDirect(vm, Identifier::fromString(vm, "module"_s), moduleObject, 0);
globalObject->putDirect(vm, Identifier::fromString(vm, "__filename"_s), filename, 0);
globalObject->putDirect(vm, Identifier::fromString(vm, "__dirname"_s), dirname, 0);
}

View File

@@ -2450,7 +2450,6 @@ JSC_DEFINE_CUSTOM_GETTER(getConsoleConstructor, (JSGlobalObject * globalObject,
if (returnedException) {
auto scope = DECLARE_THROW_SCOPE(vm);
throwException(globalObject, scope, returnedException.get());
return {};
}
console->putDirect(vm, property, result, 0);
return JSValue::encode(result);

View File

@@ -6151,166 +6151,6 @@ CPP_DECL [[ZIG_EXPORT(nothrow)]] unsigned int Bun__CallFrame__getLineNumber(JSC:
return lineColumn.line;
}
// REPL evaluation function - evaluates JavaScript code in the global scope
// Returns the result value, or undefined if an exception was thrown
// If an exception is thrown, the exception value is stored in *exception
extern "C" JSC::EncodedJSValue Bun__REPL__evaluate(
JSC::JSGlobalObject* globalObject,
const unsigned char* sourcePtr,
size_t sourceLen,
const unsigned char* filenamePtr,
size_t filenameLen,
JSC::EncodedJSValue* exception)
{
auto& vm = JSC::getVM(globalObject);
auto scope = DECLARE_TOP_EXCEPTION_SCOPE(vm);
WTF::String source = WTF::String::fromUTF8(std::span { sourcePtr, sourceLen });
WTF::String filename = filenameLen > 0
? WTF::String::fromUTF8(std::span { filenamePtr, filenameLen })
: "[repl]"_s;
JSC::SourceCode sourceCode = JSC::makeSource(
source,
JSC::SourceOrigin {},
JSC::SourceTaintedOrigin::Untainted,
filename,
WTF::TextPosition(),
JSC::SourceProviderSourceType::Program);
WTF::NakedPtr<JSC::Exception> evalException;
JSC::JSValue result = JSC::evaluate(globalObject, sourceCode, globalObject->globalThis(), evalException);
if (evalException) {
*exception = JSC::JSValue::encode(evalException->value());
// Set _error on the globalObject directly (not globalThis proxy)
globalObject->putDirect(vm, JSC::Identifier::fromString(vm, "_error"_s), evalException->value());
scope.clearException();
return JSC::JSValue::encode(JSC::jsUndefined());
}
if (scope.exception()) {
*exception = JSC::JSValue::encode(scope.exception()->value());
// Set _error on the globalObject directly (not globalThis proxy)
globalObject->putDirect(vm, JSC::Identifier::fromString(vm, "_error"_s), scope.exception()->value());
scope.clearException();
return JSC::JSValue::encode(JSC::jsUndefined());
}
// Note: _ is now set in Zig code (repl.zig) after extracting the value from
// the REPL transform wrapper. We don't set it here anymore.
return JSC::JSValue::encode(result);
}
// REPL completion function - gets completions for a partial property access
// Returns an array of completion strings, or undefined if no completions
extern "C" JSC::EncodedJSValue Bun__REPL__getCompletions(
JSC::JSGlobalObject* globalObject,
JSC::EncodedJSValue targetValue,
const unsigned char* prefixPtr,
size_t prefixLen)
{
auto& vm = JSC::getVM(globalObject);
auto scope = DECLARE_THROW_SCOPE(vm);
JSC::JSValue target = JSC::JSValue::decode(targetValue);
if (!target || target.isUndefined() || target.isNull()) {
target = globalObject->globalThis();
}
if (!target.isObject()) {
JSObject* boxed = target.toObject(globalObject);
RETURN_IF_EXCEPTION(scope, JSC::JSValue::encode(JSC::jsUndefined()));
target = boxed;
}
WTF::String prefix = prefixLen > 0
? WTF::String::fromUTF8(std::span { prefixPtr, prefixLen })
: WTF::String();
JSC::JSObject* object = target.getObject();
JSC::PropertyNameArrayBuilder propertyNames(vm, JSC::PropertyNameMode::Strings, JSC::PrivateSymbolMode::Exclude);
object->getPropertyNames(globalObject, propertyNames, DontEnumPropertiesMode::Include);
RETURN_IF_EXCEPTION(scope, JSC::JSValue::encode(JSC::jsUndefined()));
JSC::JSArray* completions = JSC::constructEmptyArray(globalObject, nullptr, 0);
RETURN_IF_EXCEPTION(scope, JSC::JSValue::encode(JSC::jsUndefined()));
unsigned completionIndex = 0;
for (const auto& propertyName : propertyNames) {
WTF::String name = propertyName.string();
if (prefix.isEmpty() || name.startsWith(prefix)) {
completions->putDirectIndex(globalObject, completionIndex++, JSC::jsString(vm, name));
RETURN_IF_EXCEPTION(scope, JSC::JSValue::encode(JSC::jsUndefined()));
}
}
// Also check the prototype chain
JSC::JSValue proto = object->getPrototype(globalObject);
RETURN_IF_EXCEPTION(scope, JSC::JSValue::encode(completions));
while (proto && proto.isObject()) {
JSC::JSObject* protoObj = proto.getObject();
JSC::PropertyNameArrayBuilder protoNames(vm, JSC::PropertyNameMode::Strings, JSC::PrivateSymbolMode::Exclude);
protoObj->getPropertyNames(globalObject, protoNames, DontEnumPropertiesMode::Include);
RETURN_IF_EXCEPTION(scope, JSC::JSValue::encode(completions));
for (const auto& propertyName : protoNames) {
WTF::String name = propertyName.string();
if (prefix.isEmpty() || name.startsWith(prefix)) {
completions->putDirectIndex(globalObject, completionIndex++, JSC::jsString(vm, name));
RETURN_IF_EXCEPTION(scope, JSC::JSValue::encode(completions));
}
}
proto = protoObj->getPrototype(globalObject);
RETURN_IF_EXCEPTION(scope, JSC::JSValue::encode(completions));
}
return JSC::JSValue::encode(completions);
}
// Format a value for REPL output using util.inspect style
extern "C" JSC::EncodedJSValue Bun__REPL__formatValue(
JSC::JSGlobalObject* globalObject,
JSC::EncodedJSValue valueEncoded,
int32_t depth,
bool colors)
{
auto& vm = JSC::getVM(globalObject);
auto scope = DECLARE_THROW_SCOPE(vm);
// Get the util.inspect function from the global object
auto* bunGlobal = jsCast<Zig::GlobalObject*>(globalObject);
JSC::JSValue inspectFn = bunGlobal->utilInspectFunction();
if (!inspectFn || !inspectFn.isCallable()) {
// Fallback to toString if util.inspect is not available
JSC::JSValue value = JSC::JSValue::decode(valueEncoded);
JSString* str = value.toString(globalObject);
RETURN_IF_EXCEPTION(scope, JSC::JSValue::encode(JSC::jsUndefined()));
return JSC::JSValue::encode(str);
}
// Create options object
JSC::JSObject* options = JSC::constructEmptyObject(globalObject);
options->putDirect(vm, JSC::Identifier::fromString(vm, "depth"_s), JSC::jsNumber(depth));
options->putDirect(vm, JSC::Identifier::fromString(vm, "colors"_s), JSC::jsBoolean(colors));
options->putDirect(vm, JSC::Identifier::fromString(vm, "maxArrayLength"_s), JSC::jsNumber(100));
options->putDirect(vm, JSC::Identifier::fromString(vm, "maxStringLength"_s), JSC::jsNumber(10000));
options->putDirect(vm, JSC::Identifier::fromString(vm, "breakLength"_s), JSC::jsNumber(80));
JSC::MarkedArgumentBuffer args;
args.append(JSC::JSValue::decode(valueEncoded));
args.append(options);
JSC::JSValue result = JSC::call(globalObject, inspectFn, JSC::ArgList(args), "util.inspect"_s);
RETURN_IF_EXCEPTION(scope, JSC::JSValue::encode(JSC::jsUndefined()));
return JSC::JSValue::encode(result);
}
extern "C" void JSC__ArrayBuffer__ref(JSC::ArrayBuffer* self) { self->ref(); }
extern "C" void JSC__ArrayBuffer__deref(JSC::ArrayBuffer* self) { self->deref(); }
extern "C" void JSC__ArrayBuffer__asBunArrayBuffer(JSC::ArrayBuffer* self, Bun__ArrayBuffer* out)

View File

@@ -168,12 +168,6 @@ CPP_DECL uint32_t JSC__JSInternalPromise__status(const JSC::JSInternalPromise* a
CPP_DECL void JSC__JSFunction__optimizeSoon(JSC::EncodedJSValue JSValue0);
#pragma mark - REPL Functions
CPP_DECL JSC::EncodedJSValue Bun__REPL__evaluate(JSC::JSGlobalObject* globalObject, const unsigned char* sourcePtr, size_t sourceLen, const unsigned char* filenamePtr, size_t filenameLen, JSC::EncodedJSValue* exception);
CPP_DECL JSC::EncodedJSValue Bun__REPL__getCompletions(JSC::JSGlobalObject* globalObject, JSC::EncodedJSValue targetValue, const unsigned char* prefixPtr, size_t prefixLen);
CPP_DECL JSC::EncodedJSValue Bun__REPL__formatValue(JSC::JSGlobalObject* globalObject, JSC::EncodedJSValue valueEncoded, int32_t depth, bool colors);
#pragma mark - JSC::JSGlobalObject
CPP_DECL VirtualMachine* JSC__JSGlobalObject__bunVM(JSC::JSGlobalObject* arg0);

View File

@@ -57,46 +57,6 @@ static std::optional<WTF::String> stripANSI(const std::span<const Char> input)
return result.toString();
}
struct BunANSIIterator {
const unsigned char* input;
size_t input_len;
size_t cursor;
const unsigned char* slice_ptr;
size_t slice_len;
};
extern "C" bool Bun__ANSI__next(BunANSIIterator* it)
{
auto start = it->input + it->cursor;
const auto end = it->input + it->input_len;
// Skip past any ANSI sequences at current position
while (start < end) {
const auto escPos = ANSI::findEscapeCharacter(start, end);
if (escPos != start) break;
const auto after = ANSI::consumeANSI(start, end);
if (after == start) {
start++;
break;
}
start = after;
}
if (start >= end) {
it->cursor = it->input_len;
it->slice_ptr = nullptr;
it->slice_len = 0;
return false;
}
const auto escPos = ANSI::findEscapeCharacter(start, end);
const auto slice_end = escPos ? escPos : end;
it->slice_ptr = start;
it->slice_len = slice_end - start;
it->cursor = slice_end - it->input;
return true;
}
JSC_DEFINE_HOST_FUNCTION(jsFunctionBunStripANSI, (JSC::JSGlobalObject * globalObject, JSC::CallFrame* callFrame))
{
auto& vm = globalObject->vm();

View File

@@ -325,30 +325,16 @@ pub fn start(
}
this.arena = bun.MimallocArena.init();
const allocator = this.arena.?.allocator();
const map = try allocator.create(bun.DotEnv.Map);
map.* = try this.parent.transpiler.env.map.cloneWithAllocator(allocator);
const loader = try allocator.create(bun.DotEnv.Loader);
loader.* = bun.DotEnv.Loader.init(map, allocator);
var vm = try jsc.VirtualMachine.initWorker(this, .{
.allocator = allocator,
.allocator = this.arena.?.allocator(),
.args = transform_options,
.env_loader = loader,
.store_fd = this.store_fd,
.graph = this.parent.standalone_module_graph,
});
vm.allocator = allocator;
vm.allocator = this.arena.?.allocator();
vm.arena = &this.arena.?;
var b = &vm.transpiler;
b.resolver.env_loader = b.env;
if (this.parent.standalone_module_graph) |graph| {
bun.bun_js.applyStandaloneRuntimeFlags(b, graph);
}
b.configureDefines() catch {
this.flushLogs();
@@ -356,6 +342,16 @@ pub fn start(
return;
};
// TODO: we may have to clone other parts of vm state. this will be more
// important when implementing vm.deinit()
const map = try vm.allocator.create(bun.DotEnv.Map);
map.* = try vm.transpiler.env.map.cloneWithAllocator(vm.allocator);
const loader = try vm.allocator.create(bun.DotEnv.Loader);
loader.* = bun.DotEnv.Loader.init(map, vm.allocator);
vm.transpiler.env = loader;
vm.loadExtraEnvAndSourceCodePrinter();
vm.is_main_thread = false;
jsc.VirtualMachine.is_main_thread_vm = false;

View File

@@ -275,7 +275,8 @@ fn fetchImpl(
if (ssl_config) |conf| {
ssl_config = null;
conf.deref();
conf.deinit();
bun.default_allocator.destroy(conf);
}
}
@@ -467,8 +468,7 @@ fn fetchImpl(
}) |config| {
const ssl_config_object = bun.handleOom(bun.default_allocator.create(SSLConfig));
ssl_config_object.* = config;
// Intern via GlobalRegistry for deduplication and pointer equality
break :extract_ssl_config SSLConfig.GlobalRegistry.intern(ssl_config_object);
break :extract_ssl_config ssl_config_object;
}
}
}

View File

@@ -78,10 +78,6 @@ pub const FetchTasklet = struct {
bun.debugAssert(count > 0);
if (count == 1) {
if (this.javascript_vm.isShuttingDown()) {
this.deinit() catch |err| switch (err) {};
return;
}
// this is really unlikely to happen, but can happen
// lets make sure that we always call deinit from main thread
@@ -1159,7 +1155,6 @@ pub const FetchTasklet = struct {
/// This is ALWAYS called from the http thread and we cannot touch the buffer here because is locked
pub fn onWriteRequestDataDrain(this: *FetchTasklet) void {
if (this.javascript_vm.isShuttingDown()) return;
// ref until the main thread callback is called
this.ref();
this.javascript_vm.eventLoop().enqueueTaskConcurrent(jsc.ConcurrentTask.fromCallback(this, FetchTasklet.resumeRequestDataStream));
@@ -1388,8 +1383,7 @@ pub const FetchTasklet = struct {
return;
}
}
// will deinit when done with the http client (when is_done = true)
if (task.javascript_vm.isShuttingDown()) return;
task.javascript_vm.eventLoop().enqueueTaskConcurrent(task.concurrent_task.from(task, .manual_deinit));
}
};

View File

@@ -191,7 +191,6 @@ pub const linux = @import("./linux.zig");
/// Translated from `c-headers-for-zig.h` for the current platform.
pub const c = @import("translated-c-headers");
pub const tty = @import("./tty.zig");
pub const sha = @import("./sha.zig");
pub const FeatureFlags = @import("./feature_flags.zig");

View File

@@ -92,7 +92,6 @@ pub const AuditCommand = @import("./cli/audit_command.zig").AuditCommand;
pub const InitCommand = @import("./cli/init_command.zig").InitCommand;
pub const WhyCommand = @import("./cli/why_command.zig").WhyCommand;
pub const FuzzilliCommand = @import("./cli/fuzzilli_command.zig").FuzzilliCommand;
pub const ReplCommand = @import("./cli/repl_command.zig").ReplCommand;
pub const Arguments = @import("./cli/Arguments.zig");
@@ -843,8 +842,12 @@ pub const Command = struct {
return;
},
.ReplCommand => {
const ctx = try Command.init(allocator, log, .RunCommand);
try ReplCommand.exec(ctx);
// TODO: Put this in native code.
var ctx = try Command.init(allocator, log, .BunxCommand);
ctx.debug.run_in_bun = true; // force the same version of bun used. fixes bun-debug for example
var args = bun.argv[0..];
args[1] = "bun-repl";
try BunxCommand.exec(ctx, args);
return;
},
.RemoveCommand => {

View File

@@ -38,6 +38,9 @@ pub const InitCommand = struct {
return input.items[0 .. input.items.len - 1 :0];
}
}
extern fn Bun__ttySetMode(fd: i32, mode: i32) i32;
fn processRadioButton(label: string, comptime Choices: type) !Choices {
const colors = Output.enable_ansi_colors_stdout;
const choices = switch (colors) {
@@ -187,7 +190,7 @@ pub const InitCommand = struct {
}) catch null;
if (Environment.isPosix)
_ = bun.tty.setMode(0, .raw);
_ = Bun__ttySetMode(0, 1);
defer {
if (comptime Environment.isWindows) {
@@ -199,7 +202,7 @@ pub const InitCommand = struct {
}
}
if (Environment.isPosix) {
_ = bun.tty.setMode(0, .normal);
_ = Bun__ttySetMode(0, 0);
}
}

View File

@@ -1,191 +0,0 @@
//! Bun REPL Command - Native Zig REPL with full TUI support
//!
//! This is the entry point for `bun repl` which provides an interactive
//! JavaScript REPL with:
//! - Syntax highlighting using QuickAndDirtySyntaxHighlighter
//! - Full line editing with Emacs-style keybindings
//! - Persistent history
//! - Tab completion
//! - Multi-line input support
//! - REPL commands (.help, .exit, .clear, .load, .save, .editor)
pub const ReplCommand = struct {
pub fn exec(ctx: Command.Context) !void {
@branchHint(.cold);
// Initialize the Zig REPL
var repl = Repl.init(ctx.allocator);
defer repl.deinit();
// Boot the JavaScript VM for the REPL
try bootReplVM(ctx, &repl);
}
fn bootReplVM(ctx: Command.Context, repl: *Repl) !void {
// Load bunfig if not already loaded
if (!ctx.debug.loaded_bunfig) {
try bun.cli.Arguments.loadConfigPath(ctx.allocator, true, "bunfig.toml", ctx, .RunCommand);
}
// Initialize JSC
bun.jsc.initialize(true); // true for eval mode
js_ast.Expr.Data.Store.create();
js_ast.Stmt.Data.Store.create();
const arena = Arena.init();
// Create a virtual path for REPL evaluation
const repl_path = "[repl]";
// Initialize the VM
const vm = try jsc.VirtualMachine.init(.{
.allocator = arena.allocator(),
.log = ctx.log,
.args = ctx.args,
.store_fd = false,
.smol = ctx.runtime_options.smol,
.eval = true,
.debugger = ctx.runtime_options.debugger,
.dns_result_order = DNSResolver.Order.fromStringOrDie(ctx.runtime_options.dns_result_order),
.is_main_thread = true,
});
var b = &vm.transpiler;
vm.preload = ctx.preloads;
vm.argv = ctx.passthrough;
vm.arena = @constCast(&arena);
vm.allocator = vm.arena.allocator();
// Configure bundler options
b.options.install = ctx.install;
b.resolver.opts.install = ctx.install;
b.resolver.opts.global_cache = ctx.debug.global_cache;
b.resolver.opts.prefer_offline_install = (ctx.debug.offline_mode_setting orelse .online) == .offline;
b.resolver.opts.prefer_latest_install = (ctx.debug.offline_mode_setting orelse .online) == .latest;
b.options.global_cache = b.resolver.opts.global_cache;
b.options.prefer_offline_install = b.resolver.opts.prefer_offline_install;
b.options.prefer_latest_install = b.resolver.opts.prefer_latest_install;
b.resolver.env_loader = b.env;
b.options.env.behavior = .load_all_without_inlining;
b.options.dead_code_elimination = false; // REPL needs all code
b.configureDefines() catch {
dumpBuildError(vm);
Global.exit(1);
};
bun.http.AsyncHTTP.loadEnv(vm.allocator, vm.log, b.env);
vm.loadExtraEnvAndSourceCodePrinter();
vm.is_main_thread = true;
jsc.VirtualMachine.is_main_thread_vm = true;
// Store VM reference in REPL (safe - no JS allocation)
repl.vm = vm;
repl.global = vm.global;
// Create the ReplRunner and execute within the API lock
// NOTE: JS-allocating operations like ExposeNodeModuleGlobals must
// be done inside the API lock callback, not before
var runner = ReplRunner{
.repl = repl,
.vm = vm,
.arena = arena,
.entry_path = repl_path,
.eval_script = ctx.runtime_options.eval.script,
.eval_and_print = ctx.runtime_options.eval.eval_and_print,
};
const callback = jsc.OpaqueWrap(ReplRunner, ReplRunner.start);
vm.global.vm().holdAPILock(&runner, callback);
}
fn dumpBuildError(vm: *jsc.VirtualMachine) void {
Output.flush();
const writer = Output.errorWriterBuffered();
defer Output.flush();
vm.log.print(writer) catch {};
}
};
/// Runs the REPL within the VM's API lock
const ReplRunner = struct {
repl: *Repl,
vm: *jsc.VirtualMachine,
arena: bun.allocators.MimallocArena,
entry_path: []const u8,
eval_script: []const u8,
eval_and_print: bool,
pub fn start(this: *ReplRunner) void {
const vm = this.vm;
// Set up the REPL environment (now inside API lock)
this.setupReplEnvironment() catch {
// setupGlobalRequire threw a JS exception — surface it and exit
if (vm.global.tryTakeException()) |exception| {
vm.printErrorLikeObjectToConsole(exception);
}
vm.exit_handler.exit_code = 1;
vm.onExit();
vm.globalExit();
};
if (this.eval_script.len > 0 or this.eval_and_print) {
// Non-interactive: evaluate the -e/--eval or -p/--print script,
// drain the event loop, and exit
const had_error = this.repl.evalScript(this.eval_script, this.eval_and_print);
Output.flush();
if (had_error) {
// Only overwrite on error so `process.exitCode = N` in the
// script is preserved on success.
vm.exit_handler.exit_code = 1;
} else {
// Fire process.on("beforeExit") and re-drain as needed
// (matches bun -e / Node.js semantics).
vm.onBeforeExit();
}
} else {
// Interactive: run the REPL loop
this.repl.runWithVM(vm) catch |err| {
Output.prettyErrorln("<r><red>REPL error: {s}<r>", .{@errorName(err)});
};
}
// Clean up
vm.onExit();
vm.globalExit();
}
fn setupReplEnvironment(this: *ReplRunner) bun.JSError!void {
const vm = this.vm;
// Expose Node.js module globals (__dirname, __filename, require, etc.)
// This must be done inside the API lock as it allocates JS objects
bun.cpp.Bun__ExposeNodeModuleGlobals(vm.global);
// Set up require(), module, __filename, __dirname relative to cwd
const cwd = vm.transpiler.fs.topLevelDirWithoutTrailingSlash();
try bun.cpp.Bun__REPL__setupGlobalRequire(vm.global, cwd.ptr, cwd.len);
// Set timezone if specified
if (vm.transpiler.env.get("TZ")) |tz| {
if (tz.len > 0) {
_ = vm.global.setTimeZone(&jsc.ZigString.init(tz));
}
}
vm.transpiler.env.loadTracy();
}
};
const Repl = @import("../repl.zig");
const bun = @import("bun");
const Global = bun.Global;
const Output = bun.Output;
const js_ast = bun.ast;
const jsc = bun.jsc;
const Arena = bun.allocators.MimallocArena;
const Command = bun.cli.Command;
const DNSResolver = bun.api.dns.Resolver;

View File

@@ -1068,7 +1068,7 @@ pub const UpdateInteractiveCommand = struct {
}) catch null;
if (Environment.isPosix)
_ = bun.tty.setMode(0, .raw);
_ = Bun__ttySetMode(0, 1);
defer {
if (comptime Environment.isWindows) {
@@ -1080,7 +1080,7 @@ pub const UpdateInteractiveCommand = struct {
}
}
if (Environment.isPosix) {
_ = bun.tty.setMode(0, .normal);
_ = Bun__ttySetMode(0, 0);
}
}
@@ -1810,6 +1810,9 @@ pub const UpdateInteractiveCommand = struct {
}
}
};
extern fn Bun__ttySetMode(fd: c_int, mode: c_int) c_int;
const string = []const u8;
pub const CatalogUpdateRequest = struct {

View File

@@ -73,7 +73,7 @@ pub fn checkServerIdentity(
};
// we inform the user that the cert is invalid
client.progressUpdate(is_ssl, client.getSslCtx(is_ssl), socket);
client.progressUpdate(is_ssl, if (is_ssl) &http_thread.https_context else &http_thread.http_context, socket);
// continue until we are aborted or not
return true;
} else {
@@ -217,7 +217,7 @@ pub fn onClose(
if (client.state.flags.is_redirect_pending) {
// if the connection is closed and we are pending redirect just do the redirect
// in this case we will re-connect or go to a different socket if needed
client.doRedirect(is_ssl, client.getSslCtx(is_ssl), socket);
client.doRedirect(is_ssl, if (is_ssl) &http_thread.https_context else &http_thread.http_context, socket);
return;
}
if (in_progress) {
@@ -226,7 +226,7 @@ pub fn onClose(
.CHUNKED_IN_TRAILERS_LINE_HEAD, .CHUNKED_IN_TRAILERS_LINE_MIDDLE => {
// ignore failure if we are in the middle of trailer headers, since we processed all the chunks and trailers are ignored
client.state.flags.received_last_chunk = true;
client.progressUpdate(comptime is_ssl, client.getSslCtx(is_ssl), socket);
client.progressUpdate(comptime is_ssl, if (is_ssl) &http_thread.https_context else &http_thread.http_context, socket);
return;
},
// here we are in the middle of a chunk so ECONNRESET is expected
@@ -235,7 +235,7 @@ pub fn onClose(
} else if (client.state.content_length == null and client.state.response_stage == .body) {
// no content length informed so we are done here
client.state.flags.received_last_chunk = true;
client.progressUpdate(comptime is_ssl, client.getSslCtx(is_ssl), socket);
client.progressUpdate(comptime is_ssl, if (is_ssl) &http_thread.https_context else &http_thread.http_context, socket);
return;
}
}
@@ -481,9 +481,6 @@ flags: Flags = Flags{},
state: InternalState = .{},
tls_props: ?*SSLConfig = null,
/// The custom SSL context used for this request (null = default context).
/// Set by HTTPThread.connect() when using custom TLS configs.
custom_ssl_ctx: ?*NewHTTPContext(true) = null,
result_callback: HTTPClientResult.Callback = undefined,
/// Some HTTP servers (such as npm) report Last-Modified times but ignore If-Modified-Since.
@@ -517,11 +514,6 @@ pub fn deinit(this: *HTTPClient) void {
this.proxy_tunnel = null;
tunnel.detachAndDeref();
}
// Release our reference on the interned SSLConfig
if (this.tls_props) |config| {
config.deref();
this.tls_props = null;
}
this.unix_socket_path.deinit();
this.unix_socket_path = jsc.ZigString.Slice.empty;
}
@@ -543,16 +535,6 @@ pub fn isKeepAlivePossible(this: *HTTPClient) bool {
return false;
}
/// Returns the SSL context for this client - either the custom context
/// (for mTLS/custom TLS) or the default global context.
pub fn getSslCtx(this: *HTTPClient, comptime is_ssl: bool) *NewHTTPContext(is_ssl) {
if (comptime is_ssl) {
return this.custom_ssl_ctx orelse &http_thread.https_context;
} else {
return &http_thread.http_context;
}
}
// lowercase hash header names so that we can be sure
pub fn hashHeaderName(name: string) u64 {
var hasher = std.hash.Wyhash.init(0);
@@ -823,7 +805,6 @@ pub fn doRedirect(
this.flags.did_have_handshaking_error and !this.flags.reject_unauthorized,
this.connected_url.hostname,
this.connected_url.getPortAuto(),
this.tls_props,
);
} else {
NewHTTPContext(is_ssl).closeSocket(socket);
@@ -961,13 +942,12 @@ fn printResponse(response: picohttp.Response) void {
pub fn onPreconnect(this: *HTTPClient, comptime is_ssl: bool, socket: NewHTTPContext(is_ssl).HTTPSocket) void {
log("onPreconnect({})", .{this.url});
this.unregisterAbortTracker();
const ctx = this.getSslCtx(is_ssl);
const ctx = if (comptime is_ssl) &http_thread.https_context else &http_thread.http_context;
ctx.releaseSocket(
socket,
this.flags.did_have_handshaking_error and !this.flags.reject_unauthorized,
this.url.hostname,
this.url.getPortAuto(),
this.tls_props,
);
this.state.reset(this.allocator);
@@ -1240,7 +1220,7 @@ pub fn onWritable(this: *HTTPClient, comptime is_first_call: bool, comptime is_s
this.state.request_stage = .body;
if (this.flags.is_streaming_request_body) {
// lets signal to start streaming the body
this.progressUpdate(is_ssl, this.getSslCtx(is_ssl), socket);
this.progressUpdate(is_ssl, if (is_ssl) &http_thread.https_context else &http_thread.http_context, socket);
}
}
return;
@@ -1253,7 +1233,7 @@ pub fn onWritable(this: *HTTPClient, comptime is_first_call: bool, comptime is_s
this.state.request_stage = .body;
if (this.flags.is_streaming_request_body) {
// lets signal to start streaming the body
this.progressUpdate(is_ssl, this.getSslCtx(is_ssl), socket);
this.progressUpdate(is_ssl, if (is_ssl) &http_thread.https_context else &http_thread.http_context, socket);
}
}
assert(
@@ -1408,7 +1388,7 @@ pub fn onWritable(this: *HTTPClient, comptime is_first_call: bool, comptime is_s
this.state.request_stage = .proxy_body;
if (this.flags.is_streaming_request_body) {
// lets signal to start streaming the body
this.progressUpdate(is_ssl, this.getSslCtx(is_ssl), socket);
this.progressUpdate(is_ssl, if (is_ssl) &http_thread.https_context else &http_thread.http_context, socket);
}
assert(this.state.request_body.len > 0);
@@ -1799,7 +1779,7 @@ pub fn drainResponseBody(this: *HTTPClient, comptime is_ssl: bool, socket: NewHT
return;
}
this.sendProgressUpdateWithoutStageCheck(is_ssl, this.getSslCtx(is_ssl), socket);
this.sendProgressUpdateWithoutStageCheck(is_ssl, http_thread.context(is_ssl), socket);
}
fn sendProgressUpdateWithoutStageCheck(this: *HTTPClient, comptime is_ssl: bool, ctx: *NewHTTPContext(is_ssl), socket: NewHTTPContext(is_ssl).HTTPSocket) void {
@@ -1828,7 +1808,6 @@ fn sendProgressUpdateWithoutStageCheck(this: *HTTPClient, comptime is_ssl: bool,
this.flags.did_have_handshaking_error and !this.flags.reject_unauthorized,
this.connected_url.hostname,
this.connected_url.getPortAuto(),
this.tls_props,
);
} else {
NewHTTPContext(is_ssl).closeSocket(socket);

View File

@@ -8,11 +8,6 @@ pub fn NewHTTPContext(comptime ssl: bool) type {
port: u16 = 0,
/// If you set `rejectUnauthorized` to `false`, the connection fails to verify,
did_have_handshaking_error_while_reject_unauthorized_is_false: bool = false,
/// The interned SSLConfig this socket was created with (null = default context).
/// Holds a ref while the socket is in the keepalive pool.
ssl_config: ?*SSLConfig = null,
/// The context that owns this pooled socket's memory (for returning to correct pool).
owner: *Context,
};
pub fn markTaggedSocketAsDead(socket: HTTPSocket, tagged: ActiveSocket) void {
@@ -84,28 +79,6 @@ pub fn NewHTTPContext(comptime ssl: bool) type {
}
pub fn deinit(this: *@This()) void {
// Replace callbacks with no-ops first to avoid UAF when closing sockets.
this.us_socket_context.cleanCallbacks(ssl);
// Drain pooled keepalive sockets: deref their ssl_config and force-close.
// Must force-close (code != 0) because SSL clean shutdown (code=0) requires a
// shutdown handshake with the peer, which won't complete during eviction.
// Without force-close, the socket stays linked and the context refcount never
// reaches 0, leaking the SSL_CTX.
if (comptime ssl) {
var iter = this.pending_sockets.used.iterator(.{ .kind = .set });
while (iter.next()) |idx| {
const pooled = this.pending_sockets.at(@intCast(idx));
if (pooled.ssl_config) |config| {
config.deref();
pooled.ssl_config = null;
}
pooled.http_socket.close(.failure);
}
}
// Use deferred free pattern (via nextTick) to avoid freeing the uSockets
// context while close callbacks may still reference it.
this.us_socket_context.deinit(ssl);
bun.default_allocator.destroy(this);
}
@@ -188,7 +161,7 @@ pub fn NewHTTPContext(comptime ssl: bool) type {
/// If `did_have_handshaking_error_while_reject_unauthorized_is_false`
/// is set, then we can only reuse the socket for HTTP Keep Alive if
/// `reject_unauthorized` is set to `false`.
pub fn releaseSocket(this: *@This(), socket: HTTPSocket, did_have_handshaking_error_while_reject_unauthorized_is_false: bool, hostname: []const u8, port: u16, ssl_config: ?*SSLConfig) void {
pub fn releaseSocket(this: *@This(), socket: HTTPSocket, did_have_handshaking_error_while_reject_unauthorized_is_false: bool, hostname: []const u8, port: u16) void {
// log("releaseSocket(0x{f})", .{bun.fmt.hexIntUpper(@intFromPtr(socket.socket))});
if (comptime Environment.allow_assert) {
@@ -213,12 +186,6 @@ pub fn NewHTTPContext(comptime ssl: bool) type {
@memcpy(pending.hostname_buf[0..hostname.len], hostname);
pending.hostname_len = @as(u8, @truncate(hostname.len));
pending.port = port;
pending.owner = this;
// Hold a ref on ssl_config while it's in the keepalive pool
pending.ssl_config = ssl_config;
if (ssl_config) |config| {
config.ref();
}
log("Keep-Alive release {s}:{d}", .{
hostname,
@@ -332,12 +299,7 @@ pub fn NewHTTPContext(comptime ssl: bool) type {
}
fn addMemoryBackToPool(pooled: *PooledSocket) void {
// Release the ssl_config ref held by this pooled socket
if (pooled.ssl_config) |config| {
config.deref();
pooled.ssl_config = null;
}
assert(pooled.owner.pending_sockets.put(pooled));
assert(context().pending_sockets.put(pooled));
}
pub fn onData(
@@ -350,7 +312,7 @@ pub fn NewHTTPContext(comptime ssl: bool) type {
return client.onData(
comptime ssl,
buf,
client.getSslCtx(ssl),
if (comptime ssl) &bun.http.http_thread.https_context else &bun.http.http_thread.http_context,
socket,
);
} else if (tagged.is(PooledSocket)) {
@@ -430,7 +392,7 @@ pub fn NewHTTPContext(comptime ssl: bool) type {
}
};
fn existingSocket(this: *@This(), reject_unauthorized: bool, hostname: []const u8, port: u16, ssl_config: ?*SSLConfig) ?HTTPSocket {
fn existingSocket(this: *@This(), reject_unauthorized: bool, hostname: []const u8, port: u16) ?HTTPSocket {
if (hostname.len > MAX_KEEPALIVE_HOSTNAME)
return null;
@@ -442,11 +404,6 @@ pub fn NewHTTPContext(comptime ssl: bool) type {
continue;
}
// Match ssl_config by pointer equality (interned configs)
if (socket.ssl_config != ssl_config) {
continue;
}
if (socket.did_have_handshaking_error_while_reject_unauthorized_is_false and reject_unauthorized) {
continue;
}
@@ -464,12 +421,7 @@ pub fn NewHTTPContext(comptime ssl: bool) type {
continue;
}
// Release the pooled socket's ssl_config ref (caller has its own ref)
if (socket.ssl_config) |config| {
config.deref();
socket.ssl_config = null;
}
assert(this.pending_sockets.put(socket));
assert(context().pending_sockets.put(socket));
log("+ Keep-Alive reuse {s}:{d}", .{ hostname, port });
return http_socket;
}
@@ -500,7 +452,7 @@ pub fn NewHTTPContext(comptime ssl: bool) type {
client.connected_url.hostname = hostname;
if (client.isKeepAlivePossible()) {
if (this.existingSocket(client.flags.reject_unauthorized, hostname, port, client.tls_props)) |sock| {
if (this.existingSocket(client.flags.reject_unauthorized, hostname, port)) |sock| {
if (sock.ext(**anyopaque)) |ctx| {
ctx.* = bun.cast(**anyopaque, ActiveSocket.init(client).ptr());
}
@@ -547,7 +499,6 @@ const assert = bun.assert;
const strings = bun.strings;
const uws = bun.uws;
const BoringSSL = bun.BoringSSL.c;
const SSLConfig = bun.api.server.ServerConfig.SSLConfig;
const HTTPClient = bun.http;
const InitError = HTTPClient.InitError;

View File

@@ -1,15 +1,6 @@
const HTTPThread = @This();
/// SSL context cache keyed by interned SSLConfig pointer.
/// Since configs are interned via SSLConfig.GlobalRegistry, pointer equality
/// is sufficient for lookup. Each entry holds a ref on its SSLConfig.
const SslContextCacheEntry = struct {
ctx: *NewHTTPContext(true),
last_used_ns: u64,
};
const ssl_context_cache_max_size = 60;
const ssl_context_cache_ttl_ns = 30 * std.time.ns_per_min;
var custom_ssl_context_map = std.AutoArrayHashMap(*SSLConfig, SslContextCacheEntry).init(bun.default_allocator);
var custom_ssl_context_map = std.AutoArrayHashMap(*SSLConfig, *NewHTTPContext(true)).init(bun.default_allocator);
loop: *jsc.MiniEventLoop,
http_context: NewHTTPContext(false),
@@ -235,33 +226,32 @@ pub fn connect(this: *@This(), client: *HTTPClient, comptime is_ssl: bool) !NewH
if (comptime is_ssl) {
const needs_own_context = client.tls_props != null and client.tls_props.?.requires_custom_request_ctx;
if (needs_own_context) {
const requested_config = client.tls_props.?;
// Evict stale entries from the cache
evictStaleSslContexts(this);
// Look up by pointer equality (configs are interned)
if (custom_ssl_context_map.getPtr(requested_config)) |entry| {
// Cache hit - reuse existing SSL context
entry.last_used_ns = this.timer.read();
client.custom_ssl_ctx = entry.ctx;
// Keepalive is now supported for custom SSL contexts
if (client.http_proxy) |url| {
return try entry.ctx.connect(client, url.hostname, url.getPortAuto());
} else {
return try entry.ctx.connect(client, client.url.hostname, client.url.getPortAuto());
var requested_config = client.tls_props.?;
for (custom_ssl_context_map.keys()) |other_config| {
if (requested_config.isSame(other_config)) {
// we free the callers config since we have a existing one
if (requested_config != client.tls_props) {
requested_config.deinit();
bun.default_allocator.destroy(requested_config);
}
client.tls_props = other_config;
if (client.http_proxy) |url| {
return try custom_ssl_context_map.get(other_config).?.connect(client, url.hostname, url.getPortAuto());
} else {
return try custom_ssl_context_map.get(other_config).?.connect(client, client.url.hostname, client.url.getPortAuto());
}
}
}
// Cache miss - create new SSL context
// we need the config so dont free it
var custom_context = try bun.default_allocator.create(NewHTTPContext(is_ssl));
custom_context.* = .{
.pending_sockets = NewHTTPContext(is_ssl).PooledSocketHiveAllocator.empty,
.us_socket_context = undefined,
};
custom_context.initWithClientConfig(client) catch |err| {
client.tls_props = null;
requested_config.deinit();
bun.default_allocator.destroy(requested_config);
bun.default_allocator.destroy(custom_context);
// TODO: these error names reach js. figure out how they should be handled
return switch (err) {
error.FailedToOpenSocket => |e| e,
error.InvalidCA => error.FailedToOpenSocket,
@@ -269,25 +259,14 @@ pub fn connect(this: *@This(), client: *HTTPClient, comptime is_ssl: bool) !NewH
error.LoadCAFile => error.FailedToOpenSocket,
};
};
// Hold a ref on the config for the cache entry
requested_config.ref();
const now = this.timer.read();
bun.handleOom(custom_ssl_context_map.put(requested_config, .{
.ctx = custom_context,
.last_used_ns = now,
}));
// Enforce max cache size - evict oldest entry
if (custom_ssl_context_map.count() > ssl_context_cache_max_size) {
evictOldestSslContext();
}
client.custom_ssl_ctx = custom_context;
// Keepalive is now supported for custom SSL contexts
try custom_ssl_context_map.put(requested_config, custom_context);
// We might deinit the socket context, so we disable keepalive to make sure we don't
// free it while in use.
client.flags.disable_keepalive = true;
if (client.http_proxy) |url| {
// https://github.com/oven-sh/bun/issues/11343
if (url.protocol.len == 0 or strings.eqlComptime(url.protocol, "https") or strings.eqlComptime(url.protocol, "http")) {
return try custom_context.connect(client, url.hostname, url.getPortAuto());
return try this.context(is_ssl).connect(client, url.hostname, url.getPortAuto());
}
return error.UnsupportedProxyProtocol;
}
@@ -310,41 +289,6 @@ pub fn context(this: *@This(), comptime is_ssl: bool) *NewHTTPContext(is_ssl) {
return if (is_ssl) &this.https_context else &this.http_context;
}
/// Evict SSL context cache entries that haven't been used for ssl_context_cache_ttl_ns.
fn evictStaleSslContexts(this: *@This()) void {
const now = this.timer.read();
var i: usize = 0;
while (i < custom_ssl_context_map.count()) {
const entry = custom_ssl_context_map.values()[i];
if (now -| entry.last_used_ns > ssl_context_cache_ttl_ns) {
const config = custom_ssl_context_map.keys()[i];
custom_ssl_context_map.swapRemoveAt(i);
entry.ctx.deinit();
config.deref();
} else {
i += 1;
}
}
}
/// Evict the least-recently-used SSL context cache entry.
fn evictOldestSslContext() void {
if (custom_ssl_context_map.count() == 0) return;
var oldest_idx: usize = 0;
var oldest_time: u64 = std.math.maxInt(u64);
for (custom_ssl_context_map.values(), 0..) |entry, i| {
if (entry.last_used_ns < oldest_time) {
oldest_time = entry.last_used_ns;
oldest_idx = i;
}
}
const entry = custom_ssl_context_map.values()[oldest_idx];
const config = custom_ssl_context_map.keys()[oldest_idx];
custom_ssl_context_map.swapRemoveAt(oldest_idx);
entry.ctx.deinit();
config.deref();
}
fn drainQueuedShutdowns(this: *@This()) void {
while (true) {
// socket.close() can potentially be slow

View File

@@ -26,7 +26,6 @@ const SymbolDispose = Symbol.dispose;
const PromisePrototypeThen = $Promise.prototype.$then;
let addAbortListener;
let AsyncLocalStorage;
function isRequest(stream) {
return stream.setHeader && typeof stream.abort === "function";
@@ -46,8 +45,7 @@ function eos(stream, options, callback) {
validateFunction(callback, "callback");
validateAbortSignal(options.signal, "options.signal");
AsyncLocalStorage ??= require("node:async_hooks").AsyncLocalStorage;
callback = once(AsyncLocalStorage.bind(callback));
callback = once(callback);
if (isReadableStream(stream) || isWritableStream(stream)) {
return eosWeb(stream, options, callback);

File diff suppressed because it is too large Load Diff

View File

@@ -257,7 +257,6 @@ pub const Runtime = struct {
.emit_decorator_metadata,
.standard_decorators,
.lower_using,
.repl_mode,
// note that we do not include .inject_jest_globals, as we bail out of the cache entirely if this is true
};

View File

@@ -44,7 +44,7 @@ pub fn start(this: *@This()) Yield {
break;
}
const maybe1 = iter.next() orelse return this.fail(Builtin.Kind.usageString(.seq));
const maybe1 = iter.next().?;
const int1 = std.fmt.parseFloat(f32, bun.sliceTo(maybe1, 0)) catch return this.fail("seq: invalid argument\n");
if (!std.math.isFinite(int1)) return this.fail("seq: invalid argument\n");
this._end = int1;

View File

@@ -2387,36 +2387,6 @@ pub const CodePoint = i32;
const string = []const u8;
/// SIMD-accelerated iterator that yields slices of text between ANSI escape sequences.
/// The C++ side uses ANSI::findEscapeCharacter (SIMD) and ANSI::consumeANSI.
pub const ANSIIterator = extern struct {
input: [*]const u8,
input_len: usize,
cursor: usize,
slice_ptr: ?[*]const u8,
slice_len: usize,
pub fn init(input: []const u8) ANSIIterator {
return .{
.input = input.ptr,
.input_len = input.len,
.cursor = 0,
.slice_ptr = null,
.slice_len = 0,
};
}
/// Returns the next slice of non-ANSI text, or null when done.
pub fn next(self: *ANSIIterator) ?[]const u8 {
if (Bun__ANSI__next(self)) {
return (self.slice_ptr orelse return null)[0..self.slice_len];
}
return null;
}
extern fn Bun__ANSI__next(it: *ANSIIterator) bool;
};
const escapeHTML_ = @import("./immutable/escapeHTML.zig");
const escapeRegExp_ = @import("./escapeRegExp.zig");
const paths_ = @import("./immutable/paths.zig");

View File

@@ -1,11 +0,0 @@
pub const Mode = enum(c_int) {
normal = 0,
raw = 1,
io = 2,
};
pub fn setMode(fd: c_int, mode: Mode) c_int {
return Bun__ttySetMode(fd, @intFromEnum(mode));
}
extern fn Bun__ttySetMode(fd: c_int, mode: c_int) c_int;

View File

@@ -168,40 +168,6 @@ console.log("PRELOAD");
},
});
// Regression test: standalone workers must not load .env when autoloadDotenv is disabled
itBundled("compile/AutoloadDotenvDisabledWorkerCLI", {
compile: {
autoloadDotenv: false,
},
backend: "cli",
files: {
"/entry.ts": /* js */ `
import { rmSync } from "fs";
rmSync("./worker.ts", { force: true });
const worker = new Worker("./worker.ts");
console.log(await new Promise(resolve => {
worker.onmessage = event => resolve(event.data);
}));
worker.terminate();
`,
"/worker.ts": /* js */ `
postMessage(process.env.TEST_VAR || "not found");
`,
},
entryPointsRaw: ["./entry.ts", "./worker.ts"],
outfile: "dist/out",
runtimeFiles: {
"/.env": `TEST_VAR=from_dotenv`,
},
run: {
stdout: "not found",
file: "dist/out",
setCwd: true,
},
});
// Test CLI backend with autoloadDotenv: true
itBundled("compile/AutoloadDotenvEnabledCLI", {
compile: {

View File

@@ -0,0 +1,12 @@
import { expect, test } from "bun:test";
import "harness";
import { isArm64, isMusl } from "harness";
// https://github.com/oven-sh/bun/issues/12070
test.skipIf(
// swc, which bun-repl uses, published a glibc build for arm64 musl
// and so it crashes on process.exit.
isMusl && isArm64,
)("bun repl", () => {
expect(["repl", "-e", "process.exit(0)"]).toRun();
});

View File

@@ -129,12 +129,6 @@ describe.concurrent(() => {
"name": "bun-windows-x64-baseline.zip",
"browser_download_url": `https://pub-5e11e972747a44bf9aaf9394f185a982.r2.dev/releases/${tagName}/bun-windows-x64-baseline.zip`,
},
{
"url": "foo",
"content_type": "application/zip",
"name": "bun-windows-aarch64.zip",
"browser_download_url": `https://pub-5e11e972747a44bf9aaf9394f185a982.r2.dev/releases/${tagName}/bun-windows-aarch64.zip`,
},
{
"url": "foo",
"content_type": "application/zip",
@@ -147,12 +141,6 @@ describe.concurrent(() => {
"name": "bun-linux-x64-baseline.zip",
"browser_download_url": `https://pub-5e11e972747a44bf9aaf9394f185a982.r2.dev/releases/${tagName}/bun-linux-x64-baseline.zip`,
},
{
"url": "foo",
"content_type": "application/zip",
"name": "bun-linux-aarch64.zip",
"browser_download_url": `https://pub-5e11e972747a44bf9aaf9394f185a982.r2.dev/releases/${tagName}/bun-linux-aarch64.zip`,
},
{
"url": "foo",
"content_type": "application/zip",

View File

@@ -1,74 +0,0 @@
// Fixture for TLS keepalive memory leak detection.
// Spawned as a subprocess with --smol for clean memory measurement.
//
// Usage: bun --smol tls-keepalive-leak-fixture.js
// Env: TLS_CERT, TLS_KEY - PEM cert/key for the server
// NUM_REQUESTS - number of requests to make (default 50000)
// MODE - "same" (same TLS config) or "distinct" (unique configs)
const cert = process.env.TLS_CERT;
const key = process.env.TLS_KEY;
const numRequests = parseInt(process.env.NUM_REQUESTS || "50000", 10);
const mode = process.env.MODE || "same";
if (!cert || !key) {
throw new Error("TLS_CERT and TLS_KEY env vars required");
}
using server = Bun.serve({
port: 0,
tls: { cert, key },
hostname: "127.0.0.1",
fetch() {
return new Response("ok");
},
});
const url = `https://127.0.0.1:${server.port}`;
// Warmup
for (let i = 0; i < 20_000; i++) {
await fetch(url, {
tls: { ca: cert, rejectUnauthorized: false },
keepalive: true,
}).then(r => r.text());
}
Bun.gc(true);
const baselineRss = process.memoryUsage.rss();
const requests = [];
if (mode === "same") {
// All requests use the same TLS config — tests SSLConfig dedup
const tlsOpts = { ca: cert, rejectUnauthorized: false };
for (let i = 0; i < numRequests; i++) {
await fetch(url, { tls: tlsOpts, keepalive: true }).then(r => r.text());
}
} else if (mode === "distinct") {
// Each request uses a unique TLS config — tests cache eviction
for (let i = 0; i < numRequests; i++) {
await fetch(url, {
tls: { ca: cert, rejectUnauthorized: false, serverName: `host-${i}.example.com` },
keepalive: true,
}).then(r => r.text());
}
}
// Allow the HTTP thread to process deferred SSL context frees
await Bun.sleep(100);
Bun.gc(true);
await Bun.sleep(100);
Bun.gc(true);
const finalRss = process.memoryUsage.rss();
const growthMB = (finalRss - baselineRss) / (1024 * 1024);
// Output as JSON for the parent test to parse
console.log(
JSON.stringify({
baselineRss,
finalRss,
growthMB: Math.round(growthMB * 100) / 100,
numRequests,
mode,
}),
);

View File

@@ -1,167 +0,0 @@
import { describe, expect, setDefaultTimeout, test } from "bun:test";
import { bunEnv, bunExe, isASAN, tls as validTls } from "harness";
import { join } from "node:path";
setDefaultTimeout(30_000);
describe("TLS keepalive for custom SSL configs", () => {
test("keepalive reuses connections with same TLS config", async () => {
using server = Bun.serve({
port: 0,
tls: validTls,
hostname: "127.0.0.1",
fetch(req, server) {
const ip = server.requestIP(req);
return new Response(String(ip?.port ?? 0));
},
});
const url = `https://127.0.0.1:${server.port}`;
const tlsOpts = { ca: validTls.cert, rejectUnauthorized: false };
// Make sequential requests with keepalive enabled.
// With our fix: connections reuse → same client port.
// Without fix: disable_keepalive=true → new connection each time → different ports.
const ports: number[] = [];
for (let i = 0; i < 6; i++) {
const res = await fetch(url, { tls: tlsOpts, keepalive: true });
ports.push(parseInt(await res.text(), 10));
}
const uniquePorts = new Set(ports);
// Keepalive working: at most 2 unique ports (allowing one reconnect)
expect(uniquePorts.size).toBeLessThanOrEqual(2);
});
test("different TLS configs use separate connections", async () => {
using server = Bun.serve({
port: 0,
tls: validTls,
hostname: "127.0.0.1",
fetch(req, server) {
const ip = server.requestIP(req);
return new Response(String(ip?.port ?? 0));
},
});
const url = `https://127.0.0.1:${server.port}`;
// Two configs that differ (serverName makes them different SSLConfigs)
const tlsA = { ca: validTls.cert, rejectUnauthorized: false };
const tlsB = { ca: validTls.cert, rejectUnauthorized: false, serverName: "127.0.0.1" };
const resA = await fetch(url, { tls: tlsA, keepalive: true });
const portA = parseInt(await resA.text(), 10);
const resB = await fetch(url, { tls: tlsB, keepalive: true });
const portB = parseInt(await resB.text(), 10);
// Different SSL configs must not share keepalive connections
expect(portA).not.toBe(portB);
});
test("stress test - many sequential requests reuse connections", async () => {
using server = Bun.serve({
port: 0,
tls: validTls,
hostname: "127.0.0.1",
fetch(req, server) {
const ip = server.requestIP(req);
return new Response(String(ip?.port ?? 0));
},
});
const url = `https://127.0.0.1:${server.port}`;
const tlsOpts = { ca: validTls.cert, rejectUnauthorized: false };
const ports: number[] = [];
for (let i = 0; i < 50; i++) {
const res = await fetch(url, { tls: tlsOpts, keepalive: true });
ports.push(parseInt(await res.text(), 10));
}
const uniquePorts = new Set(ports);
// 50 requests through keepalive should use very few connections
expect(uniquePorts.size).toBeLessThanOrEqual(3);
});
test("keepalive disabled creates new connections each time", async () => {
using server = Bun.serve({
port: 0,
tls: validTls,
hostname: "127.0.0.1",
fetch(req, server) {
const ip = server.requestIP(req);
return new Response(String(ip?.port ?? 0));
},
});
const url = `https://127.0.0.1:${server.port}`;
const tlsOpts = { ca: validTls.cert, rejectUnauthorized: false };
// With keepalive explicitly disabled, each request should open a new connection
const ports: number[] = [];
for (let i = 0; i < 5; i++) {
const res = await fetch(url, { tls: tlsOpts, keepalive: false });
ports.push(parseInt(await res.text(), 10));
}
const uniquePorts = new Set(ports);
// Every request should use a different connection → different port
expect(uniquePorts.size).toBeGreaterThan(1);
});
});
describe.skipIf(isASAN)("TLS custom config memory leak detection", () => {
test("repeated fetches with same custom TLS config do not leak memory", async () => {
await using proc = Bun.spawn({
cmd: [bunExe(), "--smol", join(import.meta.dir, "tls-keepalive-leak-fixture.js")],
env: {
...bunEnv,
TLS_CERT: validTls.cert,
TLS_KEY: validTls.key,
NUM_REQUESTS: "100000",
MODE: "same",
},
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
const result = JSON.parse(stdout.trim());
console.log(`Same config: ${result.numRequests} requests, growth: ${result.growthMB} MB`);
if (exitCode !== 0) {
console.error(stderr);
}
expect(result.growthMB).toBeLessThan(50);
expect(exitCode).toBe(0);
});
test("many distinct TLS configs stay bounded by cache eviction", async () => {
await using proc = Bun.spawn({
cmd: [bunExe(), "--smol", join(import.meta.dir, "tls-keepalive-leak-fixture.js")],
env: {
...bunEnv,
TLS_CERT: validTls.cert,
TLS_KEY: validTls.key,
NUM_REQUESTS: "200",
MODE: "distinct",
},
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
const result = JSON.parse(stdout.trim());
console.log(`Distinct configs: ${result.numRequests} configs, growth: ${result.growthMB} MB`);
if (exitCode !== 0) {
console.error(stderr);
}
expect(result.growthMB).toBeLessThan(75 * (isASAN ? 8 : 1));
expect(exitCode).toBe(0);
});
});

File diff suppressed because it is too large Load Diff

View File

@@ -9,36 +9,6 @@ describe("seq", async () => {
.stderr("usage: seq [-w] [-f format] [-s string] [-t string] [first [incr]] last\n")
.runAsTest("prints usage");
TestBuilder.command`seq -w`
.exitCode(1)
.stdout("")
.stderr("usage: seq [-w] [-f format] [-s string] [-t string] [first [incr]] last\n")
.runAsTest("prints usage when only -w flag given");
TestBuilder.command`seq --fixed-width`
.exitCode(1)
.stdout("")
.stderr("usage: seq [-w] [-f format] [-s string] [-t string] [first [incr]] last\n")
.runAsTest("prints usage when only --fixed-width flag given");
TestBuilder.command`seq -s ,`
.exitCode(1)
.stdout("")
.stderr("usage: seq [-w] [-f format] [-s string] [-t string] [first [incr]] last\n")
.runAsTest("prints usage when only -s flag given");
TestBuilder.command`seq -t ,`
.exitCode(1)
.stdout("")
.stderr("usage: seq [-w] [-f format] [-s string] [-t string] [first [incr]] last\n")
.runAsTest("prints usage when only -t flag given");
TestBuilder.command`seq -w -s , -t .`
.exitCode(1)
.stdout("")
.stderr("usage: seq [-w] [-f format] [-s string] [-t string] [first [incr]] last\n")
.runAsTest("prints usage when only flags given");
TestBuilder.command`seq -s`
.exitCode(1)
.stdout("")

View File

@@ -1,126 +0,0 @@
import { expect, test } from "bun:test";
import { bunEnv, bunExe, isWindows } from "harness";
// Regression test: Bun used to call shutdown(SHUT_WR) on the parent's read end
// of a SOCK_STREAM socketpair used for subprocess stdout. This sent a FIN to
// the child's write end, causing programs that poll stdout for readability
// (like Python's asyncio connect_write_pipe) to interpret it as "peer closed"
// and tear down their write transport.
//
// This broke all Python MCP servers using the model_context_protocol SDK
// whenever they took more than a few seconds to initialize.
test("subprocess stdout pipe stays writable after idle delay", async () => {
// Spawn a child that delays before writing to stdout.
// The child uses poll() on stdout to detect if the read end was shutdown.
await using proc = Bun.spawn({
cmd: [
bunExe(),
"-e",
`
// Wait 2 seconds, then write to stdout
await Bun.sleep(2000);
process.stdout.write("hello after delay\\n");
`,
],
stdin: "pipe",
stdout: "pipe",
stderr: "pipe",
env: bunEnv,
});
const [stdout, stderr, exitCode] = await Promise.all([
new Response(proc.stdout).text(),
new Response(proc.stderr).text(),
proc.exited,
]);
expect(stdout).toBe("hello after delay\n");
expect(exitCode).toBe(0);
});
// Skip on Windows: Python's asyncio connect_write_pipe uses
// CreateIoCompletionPort internally, which doesn't work with
// subprocess pipe handles on Windows (OSError: [WinError 6]).
test.skipIf(isWindows)("subprocess stdout pipe works with Python asyncio connect_write_pipe", async () => {
// This is the exact scenario from the bug report: Python's asyncio
// connect_write_pipe registers stdout with epoll for read-readiness
// monitoring. If shutdown(SHUT_WR) was called on the parent's end,
// the child sees an immediate EPOLLIN event and interprets it as
// "connection closed".
const pythonScript = `
import sys, asyncio, os
async def main():
loop = asyncio.get_event_loop()
w_transport, w_protocol = await loop.connect_write_pipe(
asyncio.streams.FlowControlMixin, sys.stdout
)
writer = asyncio.StreamWriter(w_transport, w_protocol, None, loop)
# Idle period - this is where the bug would manifest
await asyncio.sleep(2)
writer.write(b"hello from asyncio\\n")
await writer.drain()
writer.close()
asyncio.run(main())
`;
await using proc = Bun.spawn({
cmd: ["python3", "-c", pythonScript],
stdin: "pipe",
stdout: "pipe",
stderr: "pipe",
env: bunEnv,
});
const [stdout, stderr, exitCode] = await Promise.all([
new Response(proc.stdout).text(),
new Response(proc.stderr).text(),
proc.exited,
]);
if (exitCode !== 0) {
console.error("stderr:", stderr);
}
expect(stdout).toBe("hello from asyncio\n");
expect(exitCode).toBe(0);
});
test("subprocess stdin pipe stays readable for child after idle delay", async () => {
// Also verify stdin works correctly after idle delay
await using proc = Bun.spawn({
cmd: [
bunExe(),
"-e",
`
// Wait, then read from stdin
await Bun.sleep(2000);
const reader = Bun.stdin.stream().getReader();
const { value } = await reader.read();
process.stdout.write(new TextDecoder().decode(value));
`,
],
stdin: "pipe",
stdout: "pipe",
stderr: "pipe",
env: bunEnv,
});
// Write to stdin after child is waiting
proc.stdin.write("hello via stdin\n");
proc.stdin.flush();
proc.stdin.end();
const [stdout, stderr, exitCode] = await Promise.all([
new Response(proc.stdout).text(),
new Response(proc.stderr).text(),
proc.exited,
]);
expect(stdout).toBe("hello via stdin\n");
expect(exitCode).toBe(0);
});

View File

@@ -0,0 +1,153 @@
import { expect, test } from "bun:test";
import { bunEnv, bunExe, tempDir } from "harness";
test("basic echo", async () => {
const result = await Bun.spawnAndWait({
cmd: [bunExe(), "-e", "console.log('hello')"],
env: bunEnv,
});
expect(result.stdout.toString()).toBe("hello\n");
expect(result.exitCode).toBe(0);
expect(result.success).toBe(true);
expect(result.pid).toBeGreaterThan(0);
});
test("stderr is captured by default", async () => {
const result = await Bun.spawnAndWait({
cmd: [bunExe(), "-e", "console.error('err output')"],
env: bunEnv,
});
expect(result.stderr.toString()).toBe("err output\n");
expect(result.exitCode).toBe(0);
});
test("non-zero exit code", async () => {
const result = await Bun.spawnAndWait({
cmd: [bunExe(), "-e", "process.exit(42)"],
env: bunEnv,
});
expect(result.exitCode).toBe(42);
expect(result.success).toBe(false);
});
test("returns a promise that resolves", async () => {
const promise = Bun.spawnAndWait({
cmd: [bunExe(), "-e", "process.exit(0)"],
env: bunEnv,
});
expect(promise).toBeInstanceOf(Promise);
const result = await promise;
expect(result.exitCode).toBe(0);
});
test("does not block the event loop", async () => {
let timerFired = false;
const timerPromise = new Promise<void>(resolve => {
setTimeout(() => {
timerFired = true;
resolve();
}, 1);
});
// Sleep for 100ms in a child process - timer should fire during wait
const result = await Bun.spawnAndWait({
cmd: [bunExe(), "-e", "await Bun.sleep(100)"],
env: bunEnv,
});
await timerPromise;
expect(result.exitCode).toBe(0);
expect(timerFired).toBe(true);
});
test("stdout and stderr are Buffers", async () => {
const result = await Bun.spawnAndWait({
cmd: [bunExe(), "-e", "console.log('out'); console.error('err')"],
env: bunEnv,
});
expect(Buffer.isBuffer(result.stdout)).toBe(true);
expect(Buffer.isBuffer(result.stderr)).toBe(true);
expect(result.stdout.toString()).toBe("out\n");
expect(result.stderr.toString()).toBe("err\n");
expect(result.exitCode).toBe(0);
});
test("resourceUsage is present", async () => {
const result = await Bun.spawnAndWait({
cmd: [bunExe(), "-e", ""],
env: bunEnv,
});
expect(result.exitCode).toBe(0);
expect(result.resourceUsage).toBeDefined();
expect(typeof result.resourceUsage.maxRSS).toBe("number");
});
test("large output is buffered correctly", async () => {
const size = 1024 * 1024; // 1MB
const result = await Bun.spawnAndWait({
cmd: [bunExe(), "-e", `process.stdout.write(Buffer.alloc(${size}, 'x').toString())`],
env: bunEnv,
});
expect(result.stdout.length).toBe(size);
expect(result.exitCode).toBe(0);
});
test("signal code when killed", async () => {
const result = await Bun.spawnAndWait({
cmd: [bunExe(), "-e", "process.kill(process.pid, 'SIGTERM')"],
env: bunEnv,
});
// Process was killed by signal
expect(result.exitCode).not.toBe(0);
});
test("env option is forwarded", async () => {
const result = await Bun.spawnAndWait({
cmd: [bunExe(), "-e", "console.log(process.env.MY_TEST_VAR)"],
env: { ...bunEnv, MY_TEST_VAR: "hello_from_env" },
});
expect(result.stdout.toString().trim()).toBe("hello_from_env");
expect(result.exitCode).toBe(0);
});
test("cwd option is forwarded", async () => {
using dir = tempDir("spawnAndWait-cwd", {});
const result = await Bun.spawnAndWait({
cmd: [bunExe(), "-e", "console.log(process.cwd())"],
env: bunEnv,
cwd: String(dir),
});
expect(result.stdout.toString().trim()).toBe(String(dir));
expect(result.exitCode).toBe(0);
});
test("invalid command throws", () => {
// spawnAndWait throws synchronously when the command is not found
expect(() => Bun.spawnAndWait(["this-command-does-not-exist-12345"])).toThrow();
});
test("array form works", async () => {
const result = await Bun.spawnAndWait([bunExe(), "-e", "console.log('array form')"], {
env: bunEnv,
});
expect(result.stdout.toString()).toBe("array form\n");
expect(result.exitCode).toBe(0);
});
test("object form with cmd works", async () => {
const result = await Bun.spawnAndWait({
cmd: [bunExe(), "-e", "console.log('object form')"],
env: bunEnv,
});
expect(result.stdout.toString()).toBe("object form\n");
expect(result.exitCode).toBe(0);
});
test("empty stdout", async () => {
const result = await Bun.spawnAndWait({
cmd: [bunExe(), "-e", "process.exit(0)"],
env: bunEnv,
stdout: "pipe",
});
expect(result.stdout.length).toBe(0);
expect(result.exitCode).toBe(0);
});

View File

@@ -1,50 +0,0 @@
import { describe, expect, test } from "bun:test";
import { bunEnv, bunExe } from "harness";
describe("console.Console getter", () => {
test("handles exception from internal call without crashing", async () => {
// When the stack is nearly exhausted, accessing console.Console triggers
// profiledCall to createConsoleConstructor, which throws StackOverflow.
// The C++ getter must return early after throwException, not continue
// to putDirect with an invalid result.
await using proc = Bun.spawn({
cmd: [
bunExe(),
"-e",
`
// Exhaust most of the stack, then try to access console.Console
// createConsoleConstructor needs significant stack space (require calls etc.)
function exhaust() {
try {
exhaust();
} catch (e) {
// Near the stack limit - this access should throw cleanly, not crash
try {
void console.Console;
} catch (e2) {
// Expected: stack overflow, not a crash
}
}
}
exhaust();
// After stack recovery, console.Console should still work
const C = console.Console;
if (typeof C !== "function") {
process.exit(1);
}
console.log("OK");
`,
],
env: bunEnv,
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
expect(stderr).not.toContain("panic");
expect(stderr).not.toContain("Segmentation fault");
expect(stdout.trim()).toBe("OK");
expect(exitCode).toBe(0);
});
});

View File

@@ -90,7 +90,6 @@ test/cli/install/bun-lockb.test.ts
test/cli/install/bun-patch.test.ts
test/cli/install/bun-pm.test.ts
test/cli/install/bun-repl.test.ts
test/js/bun/repl/repl.test.ts
test/cli/install/bun-update.test.ts
test/cli/install/bun-workspaces.test.ts
test/cli/install/bunx.test.ts

View File

@@ -1,38 +0,0 @@
// Test for GitHub issue #26058: bun repl is slow
// This test verifies that `bun repl` now uses a built-in REPL instead of bunx bun-repl
import { spawnSync } from "bun";
import { describe, expect, test } from "bun:test";
import { bunEnv, bunExe } from "harness";
describe("issue #26058 - bun repl startup time", () => {
test("bun repl starts without downloading packages", () => {
// The key indicator that bunx is being used is the "Resolving dependencies" message
// Our built-in REPL should not print this
// Use timeout to prevent hanging since REPL requires TTY for interactive input
const result = spawnSync({
cmd: [bunExe(), "repl"],
env: {
...bunEnv,
TERM: "dumb",
},
stderr: "pipe",
stdout: "pipe",
stdin: "ignore",
timeout: 3000,
});
const stderr = result.stderr?.toString() || "";
const stdout = result.stdout?.toString() || "";
// Should NOT see package manager output from bunx
expect(stderr).not.toContain("Resolving dependencies");
expect(stderr).not.toContain("bun add");
expect(stdout).not.toContain("Resolving dependencies");
// The built-in REPL should print "Welcome to Bun" when starting
// Even without a TTY, the welcome message should appear in stdout
expect(stdout).toContain("Welcome to Bun");
});
});

View File

@@ -1,71 +0,0 @@
import { describe, expect, test } from "bun:test";
import { tls as validTls } from "harness";
describe("mTLS SSLConfig keepalive (#27358)", () => {
test("fetch with custom TLS reuses keepalive connections", async () => {
// Track client ports to detect connection reuse
const clientPorts: number[] = [];
using server = Bun.serve({
port: 0,
tls: validTls,
hostname: "127.0.0.1",
fetch(req, server) {
const ip = server.requestIP(req);
return new Response(String(ip?.port ?? 0));
},
});
const url = `https://127.0.0.1:${server.port}`;
const tlsOpts = { ca: validTls.cert, rejectUnauthorized: false };
// Make sequential requests with keepalive enabled.
// With our fix: keepalive works for custom TLS, connections are reused → same port.
// With old code: disable_keepalive=true, every request opens a new TCP connection → different ports.
const numRequests = 6;
for (let i = 0; i < numRequests; i++) {
const res = await fetch(url, { tls: tlsOpts, keepalive: true });
const port = parseInt(await res.text(), 10);
clientPorts.push(port);
}
// Count unique client ports.
const uniquePorts = new Set(clientPorts);
// With keepalive working: sequential requests reuse the connection,
// so we expect significantly fewer unique ports than requests.
// The first request establishes a connection, subsequent ones reuse it.
// Allow for at most 2 unique ports (in case of a one-time reconnect).
expect(uniquePorts.size).toBeLessThanOrEqual(2);
});
test("different custom TLS configs do NOT share keepalive connections", async () => {
using server = Bun.serve({
port: 0,
tls: validTls,
hostname: "127.0.0.1",
fetch(req, server) {
const ip = server.requestIP(req);
return new Response(String(ip?.port ?? 0));
},
});
const url = `https://127.0.0.1:${server.port}`;
// Config A - just CA
const tlsA = { ca: validTls.cert, rejectUnauthorized: false };
// Config B - CA + explicit serverName (makes it a different SSLConfig)
const tlsB = { ca: validTls.cert, rejectUnauthorized: false, serverName: "127.0.0.1" };
// Request with config A
const resA = await fetch(url, { tls: tlsA, keepalive: true });
const portA = parseInt(await resA.text(), 10);
// Request with config B — must open a new connection (different SSL context)
const resB = await fetch(url, { tls: tlsB, keepalive: true });
const portB = parseInt(await resB.text(), 10);
// Different configs → different connections → different ports
expect(portA).not.toBe(portB);
});
});

View File

@@ -1,47 +0,0 @@
import { expect, test } from "bun:test";
import { bunEnv, bunExe } from "harness";
test("stream.finished callback preserves AsyncLocalStorage context", async () => {
await using proc = Bun.spawn({
cmd: [
bunExe(),
"-e",
`
const asyncHooks = require('async_hooks');
const http = require('http');
const finished = require('stream').finished;
const asyncLocalStorage = new asyncHooks.AsyncLocalStorage();
const store = { foo: 'bar' };
const server = http.createServer(function (req, res) {
asyncLocalStorage.run(store, function () {
finished(res, function () {
const value = asyncLocalStorage.getStore()?.foo;
if (value !== 'bar') {
console.log('FAIL: expected "bar" but got ' + value);
process.exitCode = 1;
} else {
console.log('PASS');
}
});
});
setTimeout(res.end.bind(res), 0);
}).listen(0, function () {
const port = this.address().port;
http.get('http://127.0.0.1:' + port, function onResponse(res) {
res.resume();
res.on('end', server.close.bind(server));
});
});
`,
],
env: bunEnv,
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
expect(stdout).toContain("PASS");
expect(exitCode).toBe(0);
});

View File

@@ -1,50 +0,0 @@
import { expect, test } from "bun:test";
import { bunEnv, bunExe, isWindows, tempDir } from "harness";
import { join } from "path";
test.if(isWindows)("standalone worker does not crash when autoloadDotenv is disabled and .env exists", async () => {
const target = process.arch === "arm64" ? "bun-windows-aarch64" : "bun-windows-x64";
using dir = tempDir("issue-27431", {
".env": "TEST_VAR=from_dotenv\n",
"entry.ts": 'console.log(process.env.TEST_VAR || "not found")\nnew Worker("./worker.ts")\n',
"worker.ts": "",
"build.ts": `
await Bun.build({
entrypoints: ["./entry.ts", "./worker.ts"],
compile: {
autoloadDotenv: false,
target: "${target}",
outfile: "./app.exe",
},
});
`,
});
await using build = Bun.spawn({
cmd: [bunExe(), join(String(dir), "build.ts")],
env: bunEnv,
cwd: String(dir),
stdout: "pipe",
stderr: "pipe",
});
const [, buildStderr, buildExitCode] = await Promise.all([build.stdout.text(), build.stderr.text(), build.exited]);
expect(buildExitCode).toBe(0);
expect(buildStderr).toBe("");
await using proc = Bun.spawn({
cmd: [join(String(dir), "app.exe")],
env: bunEnv,
cwd: String(dir),
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
expect(stdout).toContain("not found");
expect(exitCode).toBe(0);
expect(stderr).toBe("");
});