Compare commits

...

105 Commits

Author SHA1 Message Date
robobun
ddefa11070 fix(fs): handle '.' path normalization on Windows (#26634)
## Summary
- Fix path normalization for "." on Windows where `normalizeStringBuf`
was incorrectly stripping it to an empty string
- This caused `existsSync('.')`, `statSync('.')`, and other fs
operations to fail on Windows

## Test plan
- Added regression test `test/regression/issue/26631.test.ts` that tests
`existsSync`, `exists`, `statSync`, and `stat` for both `.` and `..`
paths
- All tests pass locally with `bun bd test
test/regression/issue/26631.test.ts`
- Verified code compiles on all platforms with `bun run zig:check-all`

Fixes #26631

🤖 Generated with [Claude Code](https://claude.com/claude-code)

---------

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-02-01 00:33:59 -08:00
Dylan Conway
35f8154319 bump versions 2026-01-31 17:38:23 -08:00
robobun
9d68ec882a require --compile for ESM bytecode (#26624)
## Summary
- Add validation to require `--compile` when using ESM bytecode
- Update documentation to clarify ESM bytecode requirements

## Why
ESM module resolution is two-phase: (1) analyze imports/exports, (2)
evaluate. Without `--compile`, there's no `module_info` embedded, so JSC
must still parse the file for module analysis even with bytecode -
causing a double-parse deopt.

## Changes
- **CLI**: Error when `--bytecode --format=esm` is used without
`--compile`
- **JS API**: Error when `bytecode: true, format: 'esm'` is used without
`compile: true`
- **Docs**: Update bytecode.mdx, executables.mdx, index.mdx to clarify
requirements
- **Types**: Update JSDoc for bytecode option in bun.d.ts

## Test plan
```bash
# Should error
bun build ./test.js --bytecode --format=esm --outdir=./out
# error: ESM bytecode requires --compile. Use --format=cjs for bytecode without --compile.

# Should work
bun build ./test.js --bytecode --format=esm --compile --outfile=./mytest
bun build ./test.js --bytecode --format=cjs --outdir=./out
```

🤖 Generated with [Claude Code](https://claude.com/claude-code)

---------

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
2026-01-31 17:35:03 -08:00
Dylan Conway
1337f5dba4 add --cpu-prof-interval flag (#26620)
Adds `--cpu-prof-interval` to configure the CPU profiler sampling
interval in microseconds (default: 1000), matching Node.js's
`--cpu-prof-interval` flag.

```sh
bun --cpu-prof --cpu-prof-interval 500 index.js
```

- Parsed as `u32`, truncated to `c_int` when passed to JSC's
`SamplingProfiler::setTimingInterval`
- Invalid values silently fall back to the default (1000μs)
- Warns if used without `--cpu-prof` or `--cpu-prof-md`

---------

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-31 16:59:03 -08:00
robobun
56b5be4ba4 fix(shell): prevent double-free during GC finalization (#26626)
## Summary

Fixes #26625

This fixes a segmentation fault that occurred on Windows x64 when the GC
finalizer tried to free shell interpreter resources that were already
partially freed during normal shell completion.

- Added explicit `cleanup_state` enum to track resource ownership state
- `needs_full_cleanup`: Nothing cleaned up yet, finalizer must clean
everything
- `runtime_cleaned`: `finish()` already cleaned IO/shell, finalizer
skips those
- Early return in `#derefRootShellAndIOIfNeeded()` when already cleaned
- Explicit state-based cleanup in `deinitFromFinalizer()`

The vulnerability existed on all platforms but was most reliably
triggered on Windows with high GC pressure (many concurrent shell
commands).

## Test plan

- [x] Build passes (`bun bd`)
- [x] New regression test added (`test/regression/issue/26625.test.ts`)
- [x] Existing shell tests pass (same 4 pre-existing failures, no new
failures)


🤖 Generated with [Claude Code](https://claude.com/claude-code)

---------

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
Co-authored-by: Jarred Sumner <jarred@jarredsumner.com>
2026-01-31 16:57:59 -08:00
Dylan Conway
6c119d608e Simplify bun run build:local to auto-build JSC (#26645)
## Summary

- `bun run build:local` now handles everything: configuring JSC,
building JSC, and building Bun in a single command on all platforms
(macOS, Linux, Windows). Previously required manually running `bun run
jsc:build:debug`, deleting a duplicate `InspectorProtocolObjects.h`
header, and then running the Bun build separately.
- Incremental JSC rebuilds: JSC is built via `add_custom_target` that
delegates to JSC's inner Ninja, which tracks WebKit source file changes
and only rebuilds what changed. `ninja -Cbuild/debug-local` also works
after the first build.
- Cross-platform support:
  - macOS: Uses system ICU automatically
- Linux: Uses system ICU via find_package instead of requiring bundled
static libs
- Windows: Builds ICU from source automatically (only when libs don't
already exist), sets up static CRT and ICU naming conventions

### Changes
- cmake/tools/SetupWebKit.cmake: Replace the old WEBKIT_LOCAL block
(which just set include paths and assumed JSC was pre-built) with full
JSC configure + build integration for all platforms
- cmake/targets/BuildBun.cmake: Add jsc as a build dependency, use
system ICU on Linux for local builds, handle bmalloc linking for local
builds
- CONTRIBUTING.md / docs/project/contributing.mdx: Simplify "Building
WebKit locally" docs from ~15 lines of manual steps to 3 lines

## Test plan

- [x] macOS arm64: clean build, incremental rebuild, WebKit source
change rebuild
- [x] Windows x64: clean build with ICU, incremental rebuild with ICU
skip
- [x] Linux x64: build with system ICU via find_package
- [x] No duplicate InspectorProtocolObjects.h errors
- [x] build/debug-local/bun-debug --version works

Generated with [Claude Code](https://claude.com/claude-code)

Co-authored-by: Claude <noreply@anthropic.com>
2026-01-31 16:52:51 -08:00
Ciro Spaciari
a14a89ca95 fix(proxy): respect NO_PROXY for explicit proxy options in fetch and ws (#26608)
### What does this PR do?

Extract NO_PROXY checking logic from getHttpProxyFor into a reusable
isNoProxy method on the env Loader. This allows both fetch() and
WebSocket to check NO_PROXY even when a proxy is explicitly provided via
the proxy option (not just via http_proxy env var).

Changes:
- env_loader.zig: Extract isNoProxy() from getHttpProxyFor()
- FetchTasklet.zig: Check isNoProxy() before using explicit proxy
- WebSocket.cpp: Check Bun__isNoProxy() before using explicit proxy
- virtual_machine_exports.zig: Export Bun__isNoProxy for C++ access
- Add NO_PROXY tests for both fetch and WebSocket proxy paths

### How did you verify your code works?
Tests

---------

Co-authored-by: claude[bot] <209825114+claude[bot]@users.noreply.github.com>
2026-01-30 16:20:45 -08:00
robobun
a5246344fa fix(types): Socket.reload() now correctly expects { socket: handler } (#26291)
## Summary
- Fix type definition for `Socket.reload()` to match runtime behavior
- The runtime expects `{ socket: handler }` but types previously
accepted just `handler`

## Test plan
- [x] Added regression test `test/regression/issue/26290.test.ts`
- [x] Verified test passes with `bun bd test`

Fixes #26290

🤖 Generated with [Claude Code](https://claude.com/claude-code)

---------

Co-authored-by: Alistair Smith <hi@alistair.sh>
2026-01-30 13:23:04 -08:00
robobun
f648483fe7 fix(types): add missing SIMD variants to Bun.Build.CompileTarget type (#26248)
## Summary

- Adds missing SIMD variants to the `Build.Target` TypeScript type
- The runtime accepts targets like `bun-linux-x64-modern` but TypeScript
was rejecting them
- Generalized the type to use `${Architecture}` template where possible

## Test plan

- [x] Added regression test in `test/regression/issue/26247.test.ts`
that validates all valid target combinations type-check correctly
- [x] Verified with `bun bd test test/regression/issue/26247.test.ts`

Fixes #26247

🤖 Generated with [Claude Code](https://claude.com/claude-code)

---------

Co-authored-by: Alistair Smith <hi@alistair.sh>
2026-01-30 13:13:28 -08:00
ddmoney420
01fa61045f fix(types): add missing bun-linux-x64-${SIMD} compile target type (#26607)
## Summary

- Adds missing `bun-linux-x64-baseline` and `bun-linux-x64-modern`
compile target types
- These targets are supported by the Bun CLI but were missing from the
TypeScript type definitions

## Changes

Added `bun-linux-x64-${SIMD}` to the `CompileTarget` type union, which
expands to:
- `bun-linux-x64-baseline`
- `bun-linux-x64-modern`

## Test plan

- [x] TypeScript should now accept `target: 'bun-linux-x64-modern'`
without type errors

Closes #26247

🤖 Generated with [Claude Code](https://claude.com/claude-code)
2026-01-30 12:21:11 -08:00
Alistair Smith
71ce550cfa esm bytecode (#26402)
### What does this PR do?

### How did you verify your code works?

---------

Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
Co-authored-by: Claude <noreply@anthropic.com>
Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: claude[bot] <209825114+claude[bot]@users.noreply.github.com>
2026-01-30 01:38:45 -08:00
robobun
8f61adf494 Harden chunked encoding parser (#26594)
## Summary
- Improve handling of fragmented chunk data in the HTTP parser
- Add test coverage for edge cases

## Test plan
- [x] New tests pass
- [x] Existing tests pass

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-30 01:18:39 -08:00
Dylan Conway
b4b7cc6d78 fix multi-run.test.ts on windows (#26590)
### What does this PR do?

fixes https://github.com/oven-sh/bun/issues/26597

### How did you verify your code works?

---------

Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
2026-01-29 23:35:53 -08:00
SUZUKI Sosuke
3feea91087 ci: add QEMU JIT stress tests when WebKit is updated (#26589)
## Summary

Add a CI step that runs JSC JIT stress tests under QEMU when
`SetupWebKit.cmake` is modified. This complements #26571 (basic baseline
CPU verification) by also testing JIT-generated code.

## Motivation

PR #26571 added QEMU-based verification that catches illegal
instructions in:
- Startup code
- Static initialization
- Basic interpreter execution

However, JIT compilers (DFG, FTL, Wasm BBQ/OMG) generate code at runtime
that could emit AVX or LSE instructions even if the compiled binary
doesn't. The JSC stress tests from #26380 exercise all JIT tiers through
hot loops that trigger tier-up.

## How it works

1. Detects if `cmake/tools/SetupWebKit.cmake` is modified in the PR
2. If WebKit changes are detected, runs `verify-jit-stress-qemu.sh`
after the build
3. Executes all 78 JIT stress test fixtures under QEMU with restricted
CPU features:
   - x64: `qemu-x86_64 -cpu Nehalem` (SSE4.2, no AVX)
   - aarch64: `qemu-aarch64 -cpu cortex-a53` (ARMv8.0-A, no LSE)
4. Any SIGILL from JIT-generated code fails the build

## Platforms tested

| Target | CPU Model | What it catches |
|---|---|---|
| `linux-x64-baseline` | Nehalem | JIT emitting AVX/AVX2/AVX512 |
| `linux-x64-musl-baseline` | Nehalem | JIT emitting AVX/AVX2/AVX512 |
| `linux-aarch64` | Cortex-A53 | JIT emitting LSE atomics, SVE |
| `linux-aarch64-musl` | Cortex-A53 | JIT emitting LSE atomics, SVE |

## Timeout

The step has a 30-minute timeout since QEMU emulation is ~10-50x slower
than native. This only runs on WebKit update PRs, so it won't affect
most CI runs.

## Refs

- #26380 - Added JSC JIT stress tests
- #26571 - Added basic QEMU baseline verification
2026-01-29 21:12:36 -08:00
Jarred Sumner
bb4d5b9af5 feat(cli/run): add --parallel and --sequential for running multiple scripts with workspace support (#26551)
## Summary

Adds `bun run --parallel` and `bun run --sequential` — new flags for
running multiple package.json scripts concurrently or sequentially with
Foreman-style prefixed output. Includes full `--filter`/`--workspaces`
integration for running scripts across workspace packages.

### Usage

```bash
# Run "build" and "test" concurrently from the current package.json
bun run --parallel build test

# Run "build" and "test" sequentially with prefixed output
bun run --sequential build test

# Glob-matched script names
bun run --parallel "build:*"

# Run "build" in all workspace packages concurrently
bun run --parallel --filter '*' build

# Run "build" in all workspace packages sequentially
bun run --sequential --workspaces build

# Glob-matched scripts across all packages
bun run --parallel --filter '*' "build:*"

# Multiple scripts across all packages
bun run --parallel --filter '*' build lint test

# Continue running even if one package fails
bun run --parallel --no-exit-on-error --filter '*' test

# Skip packages missing the script
bun run --parallel --workspaces --if-present build
```

## How it works

### Output format

Each script's stdout/stderr is prefixed with a colored, padded label:

```
build | compiling...
test  | running suite...
lint  | checking files...
```

### Label format

- **Without `--filter`/`--workspaces`**: labels are just the script name
→ `build | output`
- **With `--filter`/`--workspaces`**: labels are `package:script` →
`pkg-a:build | output`
- **Fallback**: if a package.json has no `name` field, the relative path
from the workspace root is used (e.g., `packages/my-pkg:build`)

### Execution model

- **`--parallel`**: all scripts start immediately, output is interleaved
with prefixes
- **`--sequential`**: scripts run one at a time in order, each waiting
for the previous to finish
- **Pre/post scripts** (`prebuild`/`postbuild`) are grouped with their
main script and run in dependency order within each group
- By default, a failure kills all remaining scripts.
`--no-exit-on-error` lets all scripts finish.

### Workspace integration

The workspace branch in `multi_run.zig` uses a two-pass approach for
deterministic ordering:

1. **Collect**: iterate workspace packages using
`FilterArg.PackageFilterIterator` (same infrastructure as
`filter_run.zig`), filtering with `FilterArg.FilterSet`, collecting
matched packages with their scripts, PATH, and cwd.
2. **Sort**: sort matched packages by name (tiebreak by directory path)
for deterministic ordering — filesystem iteration order from the glob
walker is nondeterministic.
3. **Build configs**: for each sorted package, expand script names
(including globs like `build:*`) against that package's scripts map,
creating `ScriptConfig` entries with `pkg:script` labels and per-package
cwd/PATH.

### Behavioral consistency with `filter_run.zig`

| Behavior | `filter_run.zig` | `multi_run.zig` (this PR) |
|----------|-------------------|---------------------------|
| `--workspaces` skips root package | Yes | Yes |
| `--workspaces` errors on missing script | Yes | Yes |
| `--if-present` silently skips missing | Yes | Yes |
| `--filter` without `--workspaces` includes root | Yes (if matches) |
Yes (if matches) |
| Pre/post script chains | Per-package | Per-package |
| Per-package cwd | Yes | Yes |
| Per-package PATH (`node_modules/.bin`) | Yes | Yes |

### Key implementation details

- Each workspace package script runs in its own package directory with
its own `node_modules/.bin` PATH
- `dirpath` from the glob walker is duped to avoid use-after-free when
the iterator's arena is freed between patterns
- `addScriptConfigs` takes an optional `label_prefix` parameter — `null`
for single-package mode, package name for workspace mode
- `MultiRunProcessHandle` is registered in the `ProcessExitHandler`
tagged pointer union in `process.zig`

## Files changed

| File | Change |
|------|--------|
| `src/cli/multi_run.zig` | New file: process management, output
routing, workspace integration, dependency ordering |
| `src/cli.zig` | Dispatch to `MultiRun.run()` for
`--parallel`/`--sequential`, new context fields |
| `src/cli/Arguments.zig` | Parse `--parallel`, `--sequential`,
`--no-exit-on-error` flags |
| `src/bun.js/api/bun/process.zig` | Register `MultiRunProcessHandle` in
`ProcessExitHandler` tagged pointer union |
| `test/cli/run/multi-run.test.ts` | 118 tests (102 core + 16 workspace
integration) |
| `docs/pm/filter.mdx` | Document `--parallel`/`--sequential` +
`--filter`/`--workspaces` combination |
| `docs/snippets/cli/run.mdx` | Add `--parallel`, `--sequential`,
`--no-exit-on-error` parameter docs |

## Test plan

All 118 tests pass with debug build (`bun bd test
test/cli/run/multi-run.test.ts`). The 16 new workspace tests all fail
with system bun (`USE_SYSTEM_BUN=1`), confirming they test new
functionality.

### Workspace integration tests (16 tests)

1. `--parallel --filter='*'` runs script in all packages
2. `--parallel --filter='pkg-a'` runs only in matching package
3. `--parallel --workspaces` matches all workspace packages
4. `--parallel --filter='*'` with glob expands per-package scripts
5. `--sequential --filter='*'` runs in sequence (deterministic order)
6. Workspace + failure aborts other scripts
7. Workspace + `--no-exit-on-error` lets all finish
8. `--workspaces` skips root package
9. Each workspace script runs in its own package directory (cwd
verification)
10. Multiple script names across workspaces (`build` + `test`)
11. Pre/post scripts work per workspace package
12. `--filter` skips packages without the script (no error)
13. `--workspaces` errors when a package is missing the script
14. `--workspaces --if-present` skips missing scripts silently
15. Labels are padded correctly across workspace packages
16. Package without `name` field uses relative path as label

---------

Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
Co-authored-by: Dylan Conway <dylan.conway567@gmail.com>
2026-01-29 20:20:39 -08:00
Dylan Conway
adc1a6b05c Fix aarch64 SIGILL: disable mimalloc LSE atomics + update WebKit + QEMU verification (#26586)
Fixes illegal instruction (SIGILL) crashes on ARMv8.0 aarch64 CPUs
(Cortex-A53, Raspberry Pi 4, AWS a1 instances).

## Root cause

Upstream mimalloc force-enables `MI_OPT_ARCH` on arm64, which adds
`-march=armv8.1-a` and emits LSE atomic instructions (`casa`, `swpa`,
`ldaddl`). These are not available on ARMv8.0 CPUs.

## Fix

- Pass `MI_NO_OPT_ARCH=ON` to mimalloc on aarch64 (has priority over
`MI_OPT_ARCH` in mimalloc's CMake)
- Update WebKit to autobuild-596e48e22e3a1090e5b802744a7938088b1ea860
which explicitly passes `-march` flags to the WebKit build

## Verification

Includes QEMU-based baseline CPU verification CI steps (#26571) that
catch these regressions automatically.
2026-01-29 17:18:57 -08:00
Dylan Conway
8a11a03297 [publish images] 2026-01-29 16:04:44 -08:00
Dylan Conway
baea21f0c7 ci: add QEMU-based baseline CPU verification steps (#26571)
## Summary

Add CI steps that verify baseline builds don't use CPU instructions
beyond their target. Uses QEMU user-mode emulation with restricted CPU
features — any illegal instruction causes SIGILL and fails the build.

## Platforms verified

| Build Target | QEMU Command | What it catches |
|---|---|---|
| `linux-x64-baseline` (glibc) | `qemu-x86_64 -cpu Nehalem` | AVX, AVX2,
AVX512 |
| `linux-x64-musl-baseline` | `qemu-x86_64 -cpu Nehalem` | AVX, AVX2,
AVX512 |
| `linux-aarch64` (glibc) | `qemu-aarch64 -cpu cortex-a35` | LSE
atomics, SVE, dotprod |
| `linux-aarch64-musl` | `qemu-aarch64 -cpu cortex-a35` | LSE atomics,
SVE, dotprod |

## How it works

Each verify step:
1. Downloads the built binary artifact from the `build-bun` step
2. Installs `qemu-user-static` on-the-fly (dnf/apk/apt-get)
3. Runs two smoke tests under QEMU with restricted CPU features:
   - `bun --version` — validates startup, linker, static init code
   - `bun -e eval` — validates JSC initialization and basic execution
4. Hard fails on SIGILL (exit code 132)

The verify step runs in the build group after `build-bun`, with a
5-minute timeout.

## Known issue this will surface

**mimalloc on aarch64**: Built with `MI_OPT_ARCH=ON` which adds
`-march=armv8.1-a`, enabling LSE atomics. This will SIGILL on
Cortex-A35/A53 CPUs. The aarch64 verify steps are expected to fail
initially, confirming the test catches real issues. Fix can be done
separately in `cmake/targets/BuildMimalloc.cmake`.
2026-01-29 15:53:34 -08:00
Jarred Sumner
209923a65c Update markdown.mjs 2026-01-29 13:00:10 -08:00
Jarred Sumner
cd4d98338c some benchmarks 2026-01-29 12:38:05 -08:00
Dylan Conway
b64edcb490 Update WebKit (#26549)
### What does this PR do?

Includes
9a2cc42ae1

Fixes #https://github.com/oven-sh/bun/issues/26525

### How did you verify your code works?

CI

---------

Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
Co-authored-by: Jarred Sumner <jarred@jarredsumner.com>
2026-01-29 01:22:53 -08:00
Jarred Sumner
4feede90f5 Add missing docs 2026-01-29 08:11:50 +01:00
Dylan Conway
fc4624c672 fix(node:vm): propagate async context tracking flag to NodeVMGlobalObject (#26542)
When a `SyntheticModule` callback was wrapped in an `AsyncContextFrame`
on the main globalObject (where async context tracking is enabled),
evaluating it on a `NodeVMGlobalObject` would crash because the tracking
flag wasn't propagated.

`AsyncContextFrame::call` checks `isAsyncContextTrackingEnabled()` to
decide whether to unwrap the frame — without the flag, it takes the fast
path and tries to call the `AsyncContextFrame` wrapper directly, which
is not callable.

The async context data (`m_asyncContextData`) was already shared between
parent and `NodeVMGlobalObject`, but the tracking flag was missing. This
adds propagation of `isAsyncContextTrackingEnabled` alongside the data.

**Repro:** `react-email` v5.2.5 preview server crashes when rendering a
template because it imports `node:async_hooks` (enabling async context
tracking) and uses `node:vm` `SyntheticModule` for module evaluation.

Fixes #26540

---------

Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
2026-01-28 21:40:55 -08:00
Jarred Sumner
1bfe5c6b37 feat(md): Zig markdown parser with Bun.markdown API (#26440)
## Summary

- Port md4c (CommonMark-compliant markdown parser) from C to Zig under
`src/md/`
- Three output modes:
  - `Bun.markdown.html(input, options?)` — render to HTML string
- `Bun.markdown.render(input, callbacks?)` — render with custom
callbacks for each element
- `Bun.markdown.react(input, options?)` — render to a React Fragment
element, directly usable as a component return value
- React element creation uses a cached JSC Structure with
`putDirectOffset` for fast allocation
- Component overrides in `react()`: pass tag names as options keys to
replace default HTML elements with custom components
- GFM extensions: tables, strikethrough, task lists, permissive
autolinks, disallowed raw HTML tag filter
- Wire up `.md` as a bundler loader (via explicit `{ type: "md" }`)

## JavaScript API

### `Bun.markdown.html(input, options?)`

Renders markdown to an HTML string:

```js
const html = Bun.markdown.html("# Hello **world**");
// "<h1>Hello <strong>world</strong></h1>\n"

Bun.markdown.html("## Hello", { headingIds: true });
// '<h2 id="hello">Hello</h2>\n'
```

### `Bun.markdown.render(input, callbacks?)`

Renders markdown with custom JavaScript callbacks for each element. Each
callback receives children as a string and optional metadata, and
returns a string:

```js
// Custom HTML with classes
const html = Bun.markdown.render("# Title\n\nHello **world**", {
  heading: (children, { level }) => `<h${level} class="title">${children}</h${level}>`,
  paragraph: (children) => `<p>${children}</p>`,
  strong: (children) => `<b>${children}</b>`,
});

// ANSI terminal output
const ansi = Bun.markdown.render("# Hello\n\n**bold**", {
  heading: (children) => `\x1b[1;4m${children}\x1b[0m\n`,
  paragraph: (children) => children + "\n",
  strong: (children) => `\x1b[1m${children}\x1b[22m`,
});

// Strip all formatting
const text = Bun.markdown.render("# Hello **world**", {
  heading: (children) => children,
  paragraph: (children) => children,
  strong: (children) => children,
});
// "Hello world"

// Return null to omit elements
const result = Bun.markdown.render("# Title\n\n![logo](img.png)\n\nHello", {
  image: () => null,
  heading: (children) => children,
  paragraph: (children) => children + "\n",
});
// "Title\nHello\n"
```

Parser options can be included alongside callbacks:

```js
Bun.markdown.render("Visit www.example.com", {
  link: (children, { href }) => `[${children}](${href})`,
  paragraph: (children) => children,
  permissiveAutolinks: true,
});
```

### `Bun.markdown.react(input, options?)`

Returns a React Fragment element — use it directly as a component return
value:

```tsx
// Use as a component
function Markdown({ text }: { text: string }) {
  return Bun.markdown.react(text);
}

// With custom components
function Heading({ children }: { children: React.ReactNode }) {
  return <h1 className="title">{children}</h1>;
}
const element = Bun.markdown.react("# Hello", { h1: Heading });

// Server-side rendering
import { renderToString } from "react-dom/server";
const html = renderToString(Bun.markdown.react("# Hello **world**"));
// "<h1>Hello <strong>world</strong></h1>"
```

#### React 18 and older

By default, `react()` uses `Symbol.for('react.transitional.element')` as
the `$$typeof` symbol, which is what React 19 expects. For React 18 and
older, pass `reactVersion: 18`:

```tsx
const el = Bun.markdown.react("# Hello", { reactVersion: 18 });
```

### Component Overrides

Tag names can be overridden in `react()`:

```tsx
Bun.markdown.react(input, {
  h1: MyHeading,      // block elements
  p: CustomParagraph,
  a: CustomLink,      // inline elements
  img: CustomImage,
  pre: CodeBlock,
  // ... h1-h6, p, blockquote, ul, ol, li, pre, hr, html,
  //     table, thead, tbody, tr, th, td,
  //     em, strong, a, img, code, del, math, u, br
});
```

Boolean values are ignored (not treated as overrides), so parser options
like `{ strikethrough: true }` don't conflict with component overrides.

### Options

```js
Bun.markdown.html(input, {
  tables: true,              // GFM tables (default: true)
  strikethrough: true,       // ~~deleted~~ (default: true)
  tasklists: true,           // - [x] items (default: true)
  headingIds: true,          // Generate id attributes on headings
  autolinkHeadings: true,    // Wrap heading content in <a> tags
  tagFilter: false,          // GFM disallowed HTML tags
  wikiLinks: false,          // [[wiki]] links
  latexMath: false,          // $inline$ and $$display$$
  underline: false,          // __underline__ (instead of <strong>)
  // ... and more
});
```

## Architecture

### Parser (`src/md/`)

The parser is split into focused modules using Zig's delegation pattern:

| Module | Purpose |
|--------|---------|
| `parser.zig` | Core `Parser` struct, state, and re-exported method
delegation |
| `blocks.zig` | Block-level parsing: document processing, line
analysis, block start/end |
| `containers.zig` | Container management: blockquotes, lists, list
items |
| `inlines.zig` | Inline parsing: emphasis, code spans, HTML tags,
entities |
| `links.zig` | Link/image resolution, reference links, autolink
rendering |
| `autolinks.zig` | Permissive autolink detection (www, url, email) |
| `line_analysis.zig` | Line classification: headings, fences, HTML
blocks, tables |
| `ref_defs.zig` | Reference definition parsing and lookup |
| `render_blocks.zig` | Block rendering dispatch (code, HTML, table
blocks) |
| `html_renderer.zig` | HTML renderer implementing `Renderer` VTable |
| `types.zig` | Shared types: `Renderer` VTable, `BlockType`,
`SpanType`, `TextType`, etc. |

### Renderer Abstraction

Parsing is decoupled from output via a `Renderer` VTable interface:

```zig
pub const Renderer = struct {
    ptr: *anyopaque,
    vtable: *const VTable,

    pub const VTable = struct {
        enterBlock: *const fn (...) void,
        leaveBlock: *const fn (...) void,
        enterSpan:  *const fn (...) void,
        leaveSpan:  *const fn (...) void,
        text:       *const fn (...) void,
    };
};
```

Four renderers are implemented:
- **`HtmlRenderer`** (`src/md/html_renderer.zig`) — produces HTML string
output
- **`JsCallbackRenderer`** (`src/bun.js/api/MarkdownObject.zig`) — calls
JS callbacks for each element, accumulates string output
- **`ParseRenderer`** (`src/bun.js/api/MarkdownObject.zig`) — builds
React element AST with `MarkedArgumentBuffer` for GC safety
- **`JSReactElement`** (`src/bun.js/bindings/JSReactElement.cpp`) — C++
fast path for React element creation using cached JSC Structure +
`putDirectOffset`

## Test plan

- [x] 792 spec tests pass (CommonMark, GFM tables, strikethrough,
tasklists, permissive autolinks, GFM tag filter, wiki links, coverage,
regressions)
- [x] 114 API tests pass (`html()`, `render()`, `react()`,
`renderToString` integration, component overrides)
- [x] 58 GFM compatibility tests pass

```
bun bd test test/js/bun/md/md-spec.test.ts       # 792 pass
bun bd test test/js/bun/md/md-render-api.test.ts  # 114 pass
bun bd test test/js/bun/md/gfm-compat.test.ts     # 58 pass
```

🤖 Generated with [Claude Code](https://claude.com/claude-code)

---------

Co-authored-by: Claude <noreply@anthropic.com>
Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
Co-authored-by: Dylan Conway <dylan.conway567@gmail.com>
Co-authored-by: SUZUKI Sosuke <sosuke@bun.com>
Co-authored-by: robobun <robobun@oven.sh>
Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Kirill Markelov <kerusha.chubko@gmail.com>
Co-authored-by: Ciro Spaciari <ciro.spaciari@gmail.com>
Co-authored-by: Alistair Smith <hi@alistair.sh>
2026-01-28 20:24:02 -08:00
robobun
aded701d1d feat(build): add --metafile-md CLI option for LLM-friendly bundle analysis (#26441)
## Summary

- Adds `--metafile-md` CLI option to `bun build` that generates a
markdown visualization of the module graph
- Designed to help Claude and other LLMs analyze bundle composition,
identify bloat, and understand dependency chains
- Reuses existing metafile JSON generation code as a post-processing
step

## Features

The generated markdown includes:

1. **Quick Summary** - Module counts, sizes, ESM/CJS breakdown,
output/input ratio
2. **Largest Input Files** - Sorted by size to identify potential bloat
3. **Entry Point Analysis** - Shows bundle size, exports, CSS bundles,
and bundled modules
4. **Dependency Chains** - Most commonly imported modules and reverse
dependencies
5. **Full Module Graph** - Complete import/export info for each module
6. **Raw Data for Searching** - Grep-friendly markers in code blocks:
   - `[MODULE:]`, `[SIZE:]`, `[IMPORT:]`, `[IMPORTED_BY:]`
   - `[ENTRY:]`, `[EXTERNAL:]`, `[NODE_MODULES:]`

## Usage

```bash
# Default filename (meta.md)
bun build entry.js --metafile-md --outdir=dist

# Custom filename
bun build entry.js --metafile-md=analysis.md --outdir=dist

# Both JSON and markdown
bun build entry.js --metafile=meta.json --metafile-md=meta.md --outdir=dist
```

## Example Output

See sample output: https://gist.github.com/example (will add)

## Test plan

- [x] Test default filename (`meta.md`)
- [x] Test custom filename
- [x] Test both `--metafile` and `--metafile-md` together
- [x] Test summary metrics
- [x] Test module format info (ESM/CJS)
- [x] Test external imports
- [x] Test exports list
- [x] Test bundled modules table
- [x] Test CSS bundle reference
- [x] Test import kinds (static, dynamic, require)
- [x] Test commonly imported modules
- [x] Test largest files sorting (bloat analysis)
- [x] Test output/input ratio
- [x] Test grep-friendly raw data section
- [x] Test entry point markers
- [x] Test external import markers
- [x] Test node_modules markers

All 17 new tests pass.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

---------

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
Co-authored-by: Dylan Conway <dylan.conway567@gmail.com>
2026-01-28 18:01:39 -08:00
Dylan Conway
7ebfdf97a8 fix(npm): remove shebang from placeholder scripts to fix npm i -g bun on Windows (#26517)
## Summary
- Removes the `#!/bin/sh` shebang from placeholder `bin/bun.exe` and
`bin/bunx.exe` scripts in the npm package
- Fixes `npm i -g bun` being completely broken on Windows since v1.3.7

## Problem
PR #26259 added a `#!/bin/sh` shebang to the placeholder scripts to show
a helpful error when postinstall hasn't run. However, npm's `cmd-shim`
reads shebangs to generate `.ps1`/`.cmd` wrappers **before** postinstall
runs, and bakes the interpreter path into them. On Windows, the wrappers
referenced `/bin/sh` which doesn't exist, causing:

```
& "/bin/sh$exe"  "$basedir/node_modules/bun/bin/bun.exe" $args
   ~~~~~~~~~~~~~
The term '/bin/sh.exe' is not recognized...
```

Even after postinstall successfully replaced the placeholder with the
real binary, the stale wrappers still tried to invoke `/bin/sh`.

## Fix
Remove the shebang. Without it, `cmd-shim` generates a direct invocation
wrapper that works after postinstall replaces the placeholder. On Unix,
bash/zsh still execute shebang-less files as shell scripts via ENOEXEC
fallback, so the helpful error message is preserved.

## Test plan
- [x] `bun bd test test/regression/issue/24329.test.ts` passes (2/2
tests)
- Manually verify `npm i -g bun` works on Windows

Generated with [Claude Code](https://claude.com/claude-code)

Co-authored-by: Claude <noreply@anthropic.com>
2026-01-28 00:00:50 -08:00
SUZUKI Sosuke
4cd3b241bc Upgrade WebKit to cc5e0bddf7ea (#26526)
Upgrade WebKit from `0e6527f24783ea83` to `cc5e0bddf7eae1d8` (77
commits)

This brings in the latest changes from oven-sh/WebKit (2026-01-27).
2026-01-27 23:33:47 -08:00
robobun
cae67a17e2 chore: update mimalloc to latest dev3 (#26519)
## Summary
- Updates oven-sh/mimalloc bun-dev3 branch to latest upstream
microsoft/mimalloc dev3 (ffa38ab8)
- Merged 12 new commits from upstream

### Key upstream changes included:
- fix re-initialization of threads on macOS
- add lock for sub-pagemap allocations
- fix peak commit stat
- fix use of continue in bitmap find_and_clear (fixes rare case of not
finding space while it exists)

## Test plan
- [ ] CI passes
- [ ] Memory allocation tests pass

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-27 23:33:00 -08:00
robobun
a394063a7d refactor(test): use container-based postgres_tls for TLS SQL tests (#26518)
## Summary
- Refactors `tls-sql.test.ts` to use `describeWithContainer` with a
local Docker container instead of external Neon secrets
- Updates `postgres_tls` service to build from Dockerfile (fixes SSL key
permission issues)
- Fixes pg_hba.conf to allow local socket connections for init scripts

## Test plan
- [x] Verified tests pass locally with `bun bd test
test/js/sql/tls-sql.test.ts` (30 tests pass)
- [ ] CI passes on x64 Linux (arm64 Docker tests are currently disabled)

🤖 Generated with [Claude Code](https://claude.com/claude-code)

---------

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-27 23:32:39 -08:00
Jarred Sumner
c9ebb17921 Bump 2026-01-27 17:06:36 -08:00
Dylan Conway
2f510724a9 fix(napi): return napi_function for AsyncContextFrame in napi_typeof (#26511)
## Summary
- `napi_typeof` was returning `napi_object` for `AsyncContextFrame`
values, which are internally callable JSObjects
- Native addons that check callback types (e.g. encore.dev's runtime)
would fail with `expect Function, got: Object` and panic
- Added a `jsDynamicCast<AsyncContextFrame*>` check before the final
`napi_object` fallback to correctly report these values as
`napi_function`

Closes #25933

## Test plan
- [x] Verify encore.dev + supertokens reproduction from the issue no
longer panics
- [ ] Existing napi tests continue to pass

Generated with [Claude Code](https://claude.com/claude-code)

---------

Co-authored-by: Claude <noreply@anthropic.com>
2026-01-27 13:35:15 -08:00
robobun
9a16f4c345 fix(http2): correct canReceiveData logic per RFC 7540 (#26491)
## Summary

- Fixed inverted logic in `canReceiveData` function in HTTP/2 stream
state handling
- Added gRPC streaming tests to verify correct behavior

## Problem

The `canReceiveData` function had completely inverted logic that
reported incorrect `remoteClose` status:

| Stream State | Before (Wrong) | After (Correct) |
|--------------|----------------|-----------------|
| OPEN | `false` (can't receive) | `true` (can receive) |
| HALF_CLOSED_LOCAL | `false` (can't receive) | `true` (can receive from
remote) |
| HALF_CLOSED_REMOTE | `true` (can receive) | `false` (remote closed) |
| CLOSED | `true` (can receive) | `false` (stream done) |

Per RFC 7540 Section 5.1:
- In `HALF_CLOSED_LOCAL` state, the local endpoint has sent END_STREAM
but can still **receive** data from the remote peer
- In `HALF_CLOSED_REMOTE` state, the remote endpoint has sent END_STREAM
so no more data will be received

## Test plan

- [x] Added gRPC streaming tests covering unary, server streaming,
client streaming, and bidirectional streaming calls
- [x] Verified HTTP/2 test suite passes (same or fewer failures than
before)
- [x] Verified gRPC test suite improves (7 failures vs 9 failures before
+ 2 errors)

Closes #20875

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-27 10:29:34 -08:00
robobun
ba426210c2 fix(shell): handle ".", "", "./" in cwd() by using process.cwd() (#26461)
## Summary
- Fix `$`...`.cwd(".")` causing ENOENT error with path ending in
"undefined"
- The same fix applies to `.cwd("")` and `.cwd("./")`
- Falls back to `process.cwd()` when `defaultCwd` is undefined

Closes #26460

## Test plan
- [x] Added regression test in `test/regression/issue/26460.test.ts`
- [x] Verified test fails with `USE_SYSTEM_BUN=1` (reproduces the bug)
- [x] Verified test passes with `bun bd test` (fix works)

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-26 16:04:45 -08:00
Dylan Conway
bd63fb9ef6 fix: BUN_OPTIONS bare flags getting trailing whitespace (#26464)
## Summary

Fix a bug in `appendOptionsEnv` where bare flags (no `=`) that aren't
the last option get a trailing space appended, causing the argument
parser to not recognize them.

For example, `BUN_OPTIONS="--cpu-prof --cpu-prof-dir=profiles"` would
parse `--cpu-prof` as `"--cpu-prof "` (trailing space), so CPU profiling
was never enabled.

## Root Cause

When `appendOptionsEnv` encounters a `--flag` followed by whitespace, it
advances past the whitespace looking for a possible quoted value (e.g.
`--flag "quoted"`). If no quote is found and there's no `=`, it falls
through without resetting `j`, so the emitted argument includes the
trailing whitespace.

## Fix

Save `end_of_flag = j` after scanning the flag name. Add an `else`
branch that resets `j = end_of_flag` when no value (quote or `=`) is
found after the whitespace. This is a 3-line change.

Also fixes a separate bug in `BunCPUProfiler.zig` where `--cpu-prof-dir`
with an absolute path would hit a debug assertion (`path.append` on an
already-rooted path with an absolute input). Changed to `path.join`
which handles both relative and absolute paths correctly.

## Tests

- `test/cli/env/bun-options.test.ts`: Two new tests verifying
`--cpu-prof --cpu-prof-dir=<abs-path>` produces a `.cpuprofile` file,
for both normal and standalone compiled executables.
2026-01-26 14:02:36 -08:00
robobun
9d6ef0af1d fix(fetch): preserve header case when sending HTTP requests (#26425)
## Summary
- Fixes #26422
- Preserve HTTP header case when sending requests (e.g., `Content-Type`
instead of `content-type`)
- HTTP headers are technically case-insensitive per RFC 7230, but many
APIs expect specific casing

## Test plan
- [x] Added tests that verify headers are sent with proper case on the
wire
- [x] Tests use raw TCP sockets to capture actual HTTP wire format
- [x] Tests fail with system Bun (lowercase headers), pass with fixed
build

🤖 Generated with [Claude Code](https://claude.ai/claude-code)

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-26 11:15:33 -08:00
robobun
d08e4bae09 fix(http2): prevent extra empty DATA frame on write()+end() pattern (#26410)
## Summary

- Fixes an issue where calling `req.write(data)` followed by `req.end()`
on an HTTP/2 stream would send **three** DATA frames instead of **two**
- This caused AWS ALB and other strict HTTP/2 servers to reject the
connection with `NGHTTP2_FRAME_SIZE_ERROR` (error code 6)

## Root Cause

The `Http2Stream.end()` method was creating an empty buffer
(`Buffer.alloc(0)`) when called without data:

```javascript
if (!chunk) {
  chunk = Buffer.alloc(0);
}
return super.end(chunk, encoding, callback);
```

This empty buffer was then passed to the Duplex stream's `end()`, which
triggered `_write()` with the empty buffer before calling `_final()`.
This resulted in:

1. DATA frame with actual data (from `_write`)
2. Empty DATA frame without END_STREAM (from the extra `_write` with
empty buffer)
3. Empty DATA frame with END_STREAM (from `_final`)

The second empty DATA frame was unnecessary and violated some strict
HTTP/2 implementations.

## Fix

Remove the unnecessary empty buffer creation. The Duplex stream's
`end()` method already handles the no-data case correctly by calling
`_final()` directly without calling `_write()`.

## Test plan

- [x] Manually verified with ConnectRPC client and AWS ALB endpoint
- [x] Added regression test
`test/regression/issue/25589-write-end.test.ts`
- [x] Existing HTTP/2 tests pass
- [x] Existing gRPC tests pass

Fixes #25589

🤖 Generated with [Claude Code](https://claude.ai/claude-code)

---------

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
Co-authored-by: Ciro Spaciari <ciro.spaciari@gmail.com>
Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
2026-01-26 11:09:03 -08:00
Dylan Conway
b59c77a6e7 feat: add native JSON5 parser (Bun.JSON5) (#26439)
## Summary

- Adds `Bun.JSON5.parse()` and `Bun.JSON5.stringify()` as built-in APIs
- Adds `.json5` file support in the module resolver and bundler
- Parser uses a scanner/parser split architecture with a labeled switch
pattern (like the YAML parser) — the scanner produces typed tokens, the
parser never touches source bytes directly
- 430+ tests covering the official JSON5 test suite, escape sequences,
numbers, comments, whitespace (including all Unicode whitespace types),
unquoted/reserved-word keys, unicode identifiers, deeply nested
structures, garbage input, error messages, and stringify behavior

<img width="659" height="610" alt="Screenshot 2026-01-25 at 12 19 57 AM"
src="https://github.com/user-attachments/assets/e300125a-f197-4cad-90ed-e867b6232a01"
/>

## Test plan

- [x] `bun bd test test/js/bun/json5/json5.test.ts` — 317 tests
- [x] `bun bd test test/js/bun/json5/json5-test-suite.test.ts` — 113
tests from the official JSON5 test suite
- [x] `bun bd test test/js/bun/resolve/json5/json5.test.js` — .json5
module resolution

closes #3175

🤖 Generated with [Claude Code](https://claude.com/claude-code)

---------

Co-authored-by: Claude <noreply@anthropic.com>
Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
2026-01-26 10:52:35 -08:00
SUZUKI Sosuke
6130aa8168 Add benchmark for [...set] (#26452)
### What does this PR do?

Adds benchmark for `[...set]`

### How did you verify your code works?

### Results

**Bun 1.3.6** 

```
clk: ~3.77 GHz
cpu: Apple M4 Max
runtime: bun 1.3.6 (arm64-darwin)

benchmark                   avg (min … max) p75   p99    (min … top 1%)
------------------------------------------- -------------------------------
[...set] - integers (10)      51.70 ns/iter  51.16 ns   █▄
                     (45.17 ns … 121.75 ns)  87.65 ns ▁▁██▂▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
[...set] - integers (100)    394.07 ns/iter 394.41 ns   █▇▂
                    (378.73 ns … 443.39 ns) 437.51 ns ▂████▆▄▂▁▁▁▁▂▃▃▄▂▂▁▂▁
[...set] - strings (10)       53.86 ns/iter  53.66 ns  █
                     (50.72 ns … 115.85 ns)  89.37 ns ▃█▆▂▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
[...set] - strings (100)     422.12 ns/iter 422.80 ns    ▅██▄
                    (392.35 ns … 977.34 ns) 481.31 ns ▂▂▅████▇▃▂▁▂▃▄▄▃▃▁▁▁▁
[...set] - objects (10)       54.07 ns/iter  54.33 ns  █▇
                      (49.83 ns … 98.49 ns)  87.07 ns ▂██▇▃▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
[...set] - objects (100)     446.81 ns/iter 441.72 ns  █
                    (397.71 ns … 954.87 ns) 824.61 ns ▂█▃▃▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
```

**Bun 1.3.7 canary**

```
clk: ~3.82 GHz
cpu: Apple M4 Max
runtime: bun 1.3.7 (arm64-darwin)

benchmark                   avg (min … max) p75   p99    (min … top 1%)
------------------------------------------- -------------------------------
[...set] - integers (10)       4.71 ns/iter   4.62 ns  █▄
                       (3.88 ns … 48.91 ns)  11.42 ns ▂██▃▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
[...set] - integers (100)     42.52 ns/iter  42.70 ns    █
                      (35.70 ns … 88.03 ns)  74.86 ns ▃█▂██▂▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
[...set] - strings (10)        6.02 ns/iter   5.86 ns       ▇█
                       (3.99 ns … 49.43 ns)   8.89 ns ▁▁▁▁▁▂██▆▃▃▂▁▁▁▁▁▁▁▁▁
[...set] - strings (100)      56.81 ns/iter  55.91 ns    █
                      (48.70 ns … 96.89 ns)  88.48 ns ▁▁▃█▇▂▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
[...set] - objects (10)        6.15 ns/iter   6.10 ns       █▃
                       (4.14 ns … 66.32 ns)   9.31 ns ▁▁▁▁▂███▆▅▃▂▁▁▁▁▁▁▁▁▁
[...set] - objects (100)      57.47 ns/iter  56.67 ns    ▆█
                     (47.52 ns … 110.06 ns)  93.39 ns ▁▁▃██▃▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
```

**Node 24.12.0**

```
clk: ~3.30 GHz
cpu: Apple M4 Max
runtime: node 24.12.0 (arm64-darwin)

benchmark                   avg (min … max) p75   p99    (min … top 1%)
------------------------------------------- -------------------------------
[...set] - integers (10)      28.19 ns/iter  13.03 ns █
                       (10.87 ns … 9.37 µs) 129.01 ns █▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
[...set] - integers (100)    159.41 ns/iter  96.19 ns █
                       (80.87 ns … 8.42 µs)   1.98 µs █▂▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
[...set] - strings (10)       20.13 ns/iter  13.56 ns █
                       (10.92 ns … 3.82 µs) 105.83 ns █▅▂▁▂▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
[...set] - strings (100)     138.56 ns/iter  92.29 ns █
                      (80.63 ns … 10.17 µs)   1.09 µs █▂▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
[...set] - objects (10)       16.43 ns/iter  12.40 ns  █
                       (11.07 ns … 5.99 µs)  39.03 ns ▄█▂▂▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
[...set] - objects (100)     108.58 ns/iter  97.82 ns  █
                       (80.83 ns … 2.99 µs) 298.93 ns ▂█▅▂▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
```

**Summary**

```zig
  ┌────────────────┬────────────────┬───────────────┬─────────┐
  │   Benchmark    │ Before (1.3.6) │ After (1.3.7) │ Speedup │
  ├────────────────┼────────────────┼───────────────┼─────────┤
  │ integers (10)  │ 51.70 ns       │ 4.71 ns       │ 11.0x   │
  ├────────────────┼────────────────┼───────────────┼─────────┤
  │ integers (100) │ 394.07 ns      │ 42.52 ns      │ 9.3x    │
  ├────────────────┼────────────────┼───────────────┼─────────┤
  │ strings (10)   │ 53.86 ns       │ 6.02 ns       │ 8.9x    │
  ├────────────────┼────────────────┼───────────────┼─────────┤
  │ strings (100)  │ 422.12 ns      │ 56.81 ns      │ 7.4x    │
  ├────────────────┼────────────────┼───────────────┼─────────┤
  │ objects (10)   │ 54.07 ns       │ 6.15 ns       │ 8.8x    │
  ├────────────────┼────────────────┼───────────────┼─────────┤
  │ objects (100)  │ 446.81 ns      │ 57.47 ns      │ 7.8x    │
  └────────────────┴────────────────┴───────────────┴─────────┘
```
2026-01-26 00:25:51 -08:00
SUZUKI Sosuke
a595fe1cca test: add JSC JIT stress tests from WebKit (#26380)
## Summary

This PR adds 78 stress tests that exercise JSC's JIT compilation tiers.
The tests are ported from WebKit's `JSTests/stress/` and
`JSTests/wasm/stress/` directories, covering all five JIT tiers:

- **FTL** (41 tests): math intrinsics, string ops, regexp, arguments,
exceptions, try-catch, property access, OSR, tail calls
- **DFG** (14 tests): SSA, type conversion, strength reduction,
arguments, internal functions, try-catch, class constructors
- **Allocation sinking / OSR / LICM** (6 tests): varargs, loop
unrolling, LICM
- **Wasm BBQ** (11 tests): fused-if register alloc, OSR with exceptions,
ipint-bbq OSR, tail calls
- **Wasm OMG** (6 tests): recompile, OSR stack slots, tail call clobber

Each test exercises hot loops that trigger JIT tier-up, verifying
correctness of JIT-compiled code.

## Motivation

The goal is to improve stability on platforms that Bun supports but are
not covered by WebKit's EWS (Early Warning System). By running these JIT
stress tests across all CI platforms, we can catch JIT-related
regressions that would otherwise go unnoticed.

## Licensing

Since the test fixtures are derived from WebKit's `JSTests/`, a
`LICENSE` file is included in the test directory with the BSD 2-Clause
license per WebKit's contribution policy.

## Next Steps

As a follow-up, we are considering running these tests specifically
under CPU emulation (without AVX) on baseline builds, to verify that
JIT-generated code does not emit AVX instructions on platforms that do
not support them.

---------

Co-authored-by: Jarred Sumner <jarred@jarredsumner.com>
2026-01-26 00:21:34 -08:00
Kirill Markelov
799907362f Fix hash map use-after-free in macro (#26451)
### What does this PR do?
Fixes use after free of a pointer taken from a hash map inside of
`.Promise` branch
Fixes https://github.com/oven-sh/bun/issues/24505

### How did you verify your code works?
Tested on [my minimal
repro](https://github.com/oven-sh/bun/issues/24505#issuecomment-3797984659),
and also on the repro in [this
issue](https://github.com/oven-sh/bun/issues/24505#issue-3603294746)
Didn't include test cases in code because the repro is still flaky and
involves heavy libs to reproduce
2026-01-26 00:19:58 -08:00
robobun
2c0721eabe docs(test): update static-initializers.test.ts comments to reflect mimalloc v3 (#26454)
## Summary
- Updates the docstring in static-initializers.test.ts which incorrectly
said "exactly one static initializer" - the test actually expects 2
initializers for both arm64 and x64 since the mimalloc v3 update
- Added a comment explaining that mimalloc v3 adds a static initializer
on arm64

## Background
The test file was updated in c63415c9c9 (Mimalloc v3 update) to expect 2
static initializers on arm64 (changed from 1 to 2), but the comments
were not updated to reflect this change. This PR updates the
documentation to accurately describe the expected behavior.

## Test plan
- [x] No functional changes - documentation only

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-26 00:19:45 -08:00
SUZUKI Sosuke
02680b69bf fix(lint): remove unused variables in readline and ConsoleObject (#26455)
### What does this PR do?

Just fixes lint errors

### How did you verify your code works?
2026-01-26 00:19:25 -08:00
SUZUKI Sosuke
7c50164987 fix(build-jsc): enable REMOTE_INSPECTOR for macOS release builds (#26453)
### What does this PR do?

  Move `-DENABLE_REMOTE_INSPECTOR=ON` from the debug-only flags to the
  macOS common flags so it applies to all build configurations (debug,
  release, lto). This was already the case for Linux and Windows.

  Without this, `build:release:local` fails because BunDebugger.cpp and
InspectorLifecycleAgent.cpp unconditionally use JSC inspector APIs that
  are only available when REMOTE_INSPECTOR is enabled.

### How did you verify your code works?

Build locally
2026-01-26 00:19:11 -08:00
Dylan Conway
a553fda32b fix(napi): fix use-after-free in property names and external buffer lifetime (#26450)
## Summary

- **PROPERTY_NAME_FROM_UTF8 use-after-free:** The macro used
`StringImpl::createWithoutCopying` for ASCII strings, which left
dangling pointers in JSC's atom string table when the caller freed the
input buffer (e.g. napi-rs `CString`). Fixed by using
`Identifier::fromString` which copies only when inserting into the atom
table, but never retains a reference to the caller's buffer.

- **napi_create_external_buffer data lifetime:** `finalize_cb` was
attached via `addFinalizer` (tied to GC of the `JSUint8Array` view)
instead of the `ArrayBuffer` destructor. Extracting `.buffer` and
letting the Buffer get GC'd would free the backing data while the
`ArrayBuffer` still referenced it. Fixed by attaching the destructor to
the `ArrayBuffer` via `createFromBytes`, using an armed
`NapiExternalBufferDestructor` to safely handle the
`JSUint8Array::create` error path.

Closes #26446
Closes #26423

## Test plan

- [x] Added regression test `test_napi_get_named_property_copied_string`
-- strdup/free cycles with GC to reproduce the atom table dangling
pointer
- [x] Added regression test `test_external_buffer_data_lifetime` --
extracts ArrayBuffer, drops Buffer, GCs, verifies data is intact
- [x] Both tests pass with `bun bd test` and match Node.js output via
`checkSameOutput`
- [x] Verified `test_external_buffer_data_lifetime` fails without the
fix (data corrupted) and passes on Node.js
- [x] Verified impit reproducer from #26423 works correctly with the fix

Generated with [Claude Code](https://claude.com/claude-code)

---------

Co-authored-by: Claude <noreply@anthropic.com>
2026-01-25 20:11:52 -08:00
Dylan Conway
f87fa27fac Update WebKit (#26449)
### What does this PR do?
Updates WebKit to
6e287faaed
### How did you verify your code works?
2026-01-25 18:43:55 -08:00
Dylan Conway
4071624edd Update WebKit (#26381)
### What does this PR do?
Updates WebKit to
5b6a0ac49b
### How did you verify your code works?

---------

Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
Co-authored-by: Jarred Sumner <jarred@jarredsumner.com>
Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-25 10:38:13 -08:00
robobun
bfe40e8760 fix(cmake): use BUILDKITE_BUILD_NUMBER to avoid 302 redirect (#26409)
## What does this PR do?

Fixes CMake "No jobs found" error during the build-bun step in CI by
using `BUILDKITE_BUILD_NUMBER` instead of `BUILDKITE_BUILD_ID` (UUID)
for the Buildkite API URL.

### Problem

When `BUN_LINK_ONLY=ON`, `SetupBuildkite.cmake` fetches build info from
the Buildkite API to download artifacts from earlier build steps
(build-cpp, build-zig).

The `BUILDKITE_BUILD_ID` environment variable contains a UUID (e.g.,
`019bee3e-da45-4e9f-b4d8-4bdb5aeac0ac`). When this UUID is used in the
URL, Buildkite returns a **302 redirect** to the numeric build number
URL (e.g., `/builds/35708`).

CMake's `file(DOWNLOAD)` command **does not follow HTTP redirects**, so
the downloaded file is empty. Parsing the empty JSON yields 0 jobs,
triggering the fatal error:

```
CMake Error at cmake/tools/SetupBuildkite.cmake:67 (message):
  No jobs found:
  https://buildkite.com/bun/bun/builds/019bee3e-da45-4e9f-b4d8-4bdb5aeac0ac
```

### Solution

Prefer `BUILDKITE_BUILD_NUMBER` (numeric, e.g., `35708`) when available,
which doesn't redirect. This environment variable is automatically set
by Buildkite.

## How did you verify your code works?

- Verified UUID URL returns 302: `curl -sS -w '%{http_code}'
"https://buildkite.com/bun/bun/builds/019bee3e-da45-4e9f-b4d8-4bdb5aeac0ac"`
→ `302`
- Verified numeric URL returns 200 with JSON: `curl -sS -H "Accept:
application/json" "https://buildkite.com/bun/bun/builds/35708"` → valid
JSON with jobs array

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-24 23:54:33 -08:00
github-actions[bot]
bcaae48a95 deps: update lolhtml to v2.7.1 (#26430)
## What does this PR do?

Updates lolhtml to version v2.7.1

Compare:
d64457d9ff...e9e16dca48

Auto-updated by [this
workflow](https://github.com/oven-sh/bun/actions/workflows/update-lolhtml.yml)

Co-authored-by: Jarred-Sumner <709451+Jarred-Sumner@users.noreply.github.com>
2026-01-24 23:54:09 -08:00
robobun
6b3403a2b4 ci: fix update workflows creating duplicate PRs (#26433)
## Summary
- Fixed all `update-*.yml` workflows that were creating duplicate PRs
every week

## Problem
The update workflows (libarchive, zstd, cares, etc.) were using `${{
github.run_number }}` in the branch name, e.g.:
```yaml
branch: deps/update-libarchive-${{ github.run_number }}
```

This caused a new unique branch to be created on every workflow run, so
the `peter-evans/create-pull-request` action couldn't detect existing
PRs and would create duplicates.

**Evidence:** There are currently 8+ open duplicate PRs for libarchive
alone:
- #26432 deps: update libarchive to v3.8.5 (deps/update-libarchive-56)
- #26209 deps: update libarchive to v3.8.5 (deps/update-libarchive-55)
- #25955 deps: update libarchive to v3.8.5 (deps/update-libarchive-54)
- etc.

## Solution
Changed all workflows to use static branch names, e.g.:
```yaml
branch: deps/update-libarchive
```

This allows the action to:
1. Detect if an existing branch/PR already exists
2. Update the existing PR with new changes instead of creating a new one
3. Properly use `delete-branch: true` when the PR is merged

## Files Changed
- `.github/workflows/update-cares.yml`
- `.github/workflows/update-hdrhistogram.yml`
- `.github/workflows/update-highway.yml`
- `.github/workflows/update-libarchive.yml`
- `.github/workflows/update-libdeflate.yml`
- `.github/workflows/update-lolhtml.yml`
- `.github/workflows/update-lshpack.yml`
- `.github/workflows/update-root-certs.yml`
- `.github/workflows/update-sqlite3.yml`
- `.github/workflows/update-vendor.yml`
- `.github/workflows/update-zstd.yml`

## Test plan
- [x] Verified the change is syntactically correct
- [ ] Wait for next scheduled run of any workflow to verify it updates
existing PR instead of creating a new one

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-24 23:53:50 -08:00
robobun
70fe76209b fix(readline): use symbol key for _refreshLine in tab completer (#26412) 2026-01-24 15:37:29 -08:00
Jarred Sumner
ab3df344b8 Delete slop test 2026-01-23 23:22:32 -08:00
robobun
4680e89a91 fix(bundler): add missing semicolons in minified bun module imports (#26372)
## Summary
- Fix missing semicolons in minified output when using both default and
named imports from `"bun"` module
- The issue occurred in `printInternalBunImport` when transitioning
between star_name, default_name, and items sections without flushing
pending semicolons

## Test plan
- Added regression tests in `test/regression/issue/26371.test.ts`
covering:
  - Default + named imports (`import bun, { embeddedFiles } from "bun"`)
- Namespace + named imports (`import * as bun from "bun"; import {
embeddedFiles } from "bun"`)
  - Namespace + default + named imports combination
- Verified test fails with `USE_SYSTEM_BUN=1` (reproduces bug)
- Verified test passes with `bun bd test` (fix works)

Fixes #26371

🤖 Generated with [Claude Code](https://claude.com/claude-code)

---------

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-23 23:09:01 -08:00
robobun
f88f60af5a fix(bundler): throw error when Bun.build is called from macro during bundling (#26361)
## Summary
- Fixes #26360
- Detects when `Bun.build` is called from within macro mode during
bundling and throws a clear error instead of hanging indefinitely

## Problem
When `Bun.build` API is called to bundle a file that imports from a
macro which itself uses `Bun.build`, the process would hang indefinitely
due to a deadlock:

1. The bundler uses a singleton thread for processing `Bun.build` calls
2. During parsing, when a macro is encountered, it's evaluated on that
thread
3. If the macro calls `Bun.build`, it tries to enqueue to the same
singleton thread
4. The singleton is blocked waiting for macro completion → deadlock

## Solution
Added a check in `Bun.build` that detects when it's called from macro
mode (`vm.macro_mode`) and throws a clear error with guidance:

```
Bun.build cannot be called from within a macro during bundling.

This would cause a deadlock because the bundler is waiting for the macro to complete,
but the macro's Bun.build call is waiting for the bundler.

To bundle code at compile time in a macro, use Bun.spawnSync to invoke the CLI:
  const result = Bun.spawnSync(["bun", "build", entrypoint, "--format=esm"]);
```

## Test plan
- [x] Added regression test in `test/regression/issue/26360.test.ts`
- [x] Verified test hangs/fails with system Bun (the bug exists)
- [x] Verified test passes with the fix applied
- [x] Verified regular `Bun.build` (not in macro context) still works

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
Co-authored-by: Jarred Sumner <jarred@jarredsumner.com>
2026-01-23 20:24:12 -08:00
robobun
232e0df956 fix(http): respect port numbers in NO_PROXY environment variable (#26347)
## Summary
- Fix NO_PROXY environment variable to properly respect port numbers
like Node.js and curl do
- Previously `NO_PROXY=localhost:1234` would bypass proxy for all
requests to localhost regardless of port
- Now entries with ports (e.g., `localhost:8080`) do exact host:port
matching, while entries without ports continue to use suffix matching

## Test plan
- Added tests in `test/js/bun/http/proxy.test.js` covering:
  - [x] Bypass proxy when NO_PROXY matches host:port exactly
  - [x] Use proxy when NO_PROXY has different port  
- [x] Bypass proxy when NO_PROXY has host only (no port) - existing
behavior preserved
  - [x] Handle NO_PROXY with multiple entries including port
- Verified existing proxy tests still pass

🤖 Generated with [Claude Code](https://claude.ai/code)

---------

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
Co-authored-by: Jarred Sumner <jarred@jarredsumner.com>
2026-01-23 20:21:57 -08:00
robobun
9f0e78fc42 fix(serve): add missing return after handling invalid stream in response body (#26396)
## Summary
- Fix missing return after handling `.Invalid` stream case in response
body rendering
- Add regression test for Bun.serve() concurrent instances (#26394)

## Details

When a response body contains a locked value with an invalid readable
stream (`stream.ptr == .Invalid`), the code would:
1. Call `this.response_body_readable_stream_ref.deinit()` 
2. Fall through without returning

This missing `return` caused the code to fall through to subsequent
logic that could set up invalid callbacks on an already-used body value,
potentially causing undefined behavior.

The fix adds `this.doRenderBlob()` to render an empty response body for
the invalid stream case, then returns properly.

## Test plan
- [x] Added `test/regression/issue/26394.test.ts` with tests for
concurrent Bun.serve instances
- [x] Verified test passes with `bun bd test
test/regression/issue/26394.test.ts`
- [x] Verified test passes with system Bun (`USE_SYSTEM_BUN=1 bun test`)


🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-23 12:27:36 -08:00
robobun
043fafeefa fix(http): set #js_ref in Request.toJS for server-created requests (#26390)
## Summary

- Fix Request.text() failure with "TypeError: undefined is not a
function" after many requests on certain platforms
- Set `#js_ref` in `Request.toJS()` for server-created requests,
matching the existing pattern in `Response.toJS()`

## Root Cause

When Request objects are created by the server (via `Request.init()`),
the `#js_ref` field was never initialized. This caused
`checkBodyStreamRef()` to fail silently when called in `toJS()` because
`#js_ref.tryGet()` returned null.

The bug manifested on macOS after ~4,500 requests when GC conditions
were triggered, causing the weak reference lookup to fail and resulting
in "TypeError: undefined is not a function" when calling `req.text()`.

## Fix

The fix mirrors the existing pattern in `Response.toJS()`:
1. Create the JS value first via `js.toJSUnchecked()`
2. Set `#js_ref` with the JS wrapper reference
3. Then call `checkBodyStreamRef()` which can now properly access the JS
value

## Test plan

- [x] Added regression test that exercises Request.text() with 6000
requests and periodic GC
- [x] Existing request tests pass
- [x] HTTP server tests pass

Fixes #26387

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-23 12:09:03 -08:00
Alistair Smith
ce173b1112 Revert 0c3b5e501b 2026-01-23 11:02:26 -08:00
Alistair Smith
0c3b5e501b Merge branch 'main' of github.com:oven-sh/bun into ali/esm-bytecode 2026-01-23 10:48:40 -08:00
Alistair Smith
5dc72bc1d8 use esm module info in source provider 2026-01-23 10:48:34 -08:00
Alistair Smith
dfc36a8255 start laying groundwork 2026-01-23 10:43:22 -08:00
robobun
827c7091d9 fix(pack): re-read package.json after prepack/prepare scripts (#26267)
## Summary

- Fixes `bun pm pack` not respecting changes to `package.json` made by
prepack/prepare scripts
- Tracks whether lifecycle scripts (prepublishOnly, prepack, prepare)
ran
- If scripts ran, invalidates the cached package.json and re-reads from
disk before creating the tarball

Closes #24314

## Test plan

- Added regression test `test/regression/issue/24314.test.ts` that
verifies package.json modifications from prepack/prepare scripts are
included in the tarball
- All existing pack tests pass (69 tests)


🤖 Generated with [Claude Code](https://claude.com/claude-code)

---------

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-23 00:44:12 -08:00
robobun
13f78a7044 fix(streams): return null from controller.desiredSize when stream is detached (#26378)
## Summary

- Fixed `controller.desiredSize` throwing `TypeError: null is not an
object` when the stream's internal `controlledReadableStream` has been
set to `null` during cleanup
- Added null check in `readableStreamDefaultControllerGetDesiredSize` to
return `null` when the stream reference is null, matching WHATWG Streams
spec behavior for detached/errored streams

## Root Cause

When piping streams (e.g., `fetch` with `ReadableStream` body), cleanup
code in `assignStreamIntoResumableSink` and `readStreamIntoSink` sets
`controlledReadableStream` to `null` for GC purposes. If the user's
`pull()` function is still running asynchronously when this happens,
accessing `controller.desiredSize` throws instead of returning `null`.

## Test plan

- [x] Added regression test `test/regression/issue/26377.test.ts`
- [x] Verified tests pass with `bun bd test
test/regression/issue/26377.test.ts`

Fixes #26377

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-23 00:26:52 -08:00
Dylan Conway
d8d8182e8e fix(compile): apply BUN_OPTIONS env var to standalone executables (#26346)
## Summary
- Fixed `BUN_OPTIONS` environment variable not being applied as runtime
options for standalone executables (`bun build --compile`). Previously,
args from `BUN_OPTIONS` were incorrectly passed through to
`process.argv` instead of being parsed as Bun runtime options
(`process.execArgv`).
- Removed `BUN_CPU_PROFILE`, `BUN_CPU_PROFILE_DIR`, and
`BUN_CPU_PROFILE_NAME` env vars since `BUN_OPTIONS="--cpu-prof
--cpu-prof-dir=... --cpu-prof-name=..."` now works correctly with
standalone executables.
- Made `cpu_prof.name` and `cpu_prof.dir` non-optional with empty string
defaults.

fixes #21496

## Test plan
- [x] Added tests for `BUN_OPTIONS` with standalone executables (no
`compile-exec-argv`)
- [x] Added tests for `BUN_OPTIONS` combined with `--compile-exec-argv`
- [x] Added tests for `BUN_OPTIONS` with user passthrough args
- [x] Verified existing `compile-argv` tests still pass
- [x] Verified existing `bun-options` tests still pass

🤖 Generated with [Claude Code](https://claude.com/claude-code)

---------

Co-authored-by: Claude <noreply@anthropic.com>
Co-authored-by: Jarred Sumner <jarred@jarredsumner.com>
Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
Co-authored-by: Claude Bot <claude-bot@bun.sh>
2026-01-23 00:24:18 -08:00
Jarred Sumner
e8b2455f11 feat: add Bun.JSONL.parse() for streaming newline-delimited JSON parsing (#26356)
Adds a built-in JSONL parser implemented in C++ using JavaScriptCore's
optimized JSON parser.

## API

### `Bun.JSONL.parse(input)`
Parse a complete JSONL string or `Uint8Array` and return an array of all
parsed values. Throws on invalid input.

```ts
const results = Bun.JSONL.parse('{"a":1}\n{"b":2}\n');
// [{ a: 1 }, { b: 2 }]
```

### `Bun.JSONL.parseChunk(input, start?, end?)`
Parse as many complete values as possible, returning `{ values, read,
done, error }`. Designed for streaming use cases where input arrives
incrementally.

```ts
const result = Bun.JSONL.parseChunk('{"id":1}\n{"id":2}\n{"id":3');
result.values; // [{ id: 1 }, { id: 2 }]
result.read;   // 17
result.done;   // false
result.error;  // null
```

## Implementation Details

- C++ implementation in `BunObject.cpp` using JSC's `streamingJSONParse`
- ASCII fast path: zero-copy `StringView` for pure ASCII input
- Non-ASCII: uses `fromUTF8ReplacingInvalidSequences` with
`utf16_length_from_utf8` size check to prevent overflow
- UTF-8 BOM automatically skipped for `Uint8Array` input
- Pre-built `Structure` with fixed property offsets for fast result
object creation
- `Symbol.toStringTag = "JSONL"` on the namespace object
- `parseChunk` returns errors in `error` property instead of throwing,
preserving partial results
- Comprehensive boundary checks on start/end parameters

## Tests

234 tests covering:
- Complete and partial/streaming input scenarios
- Error handling and recovery
- UTF-8 multi-byte characters and BOM handling
- start/end boundary security (exhaustive combinations, clamping, OOB
prevention)
- 4 GB input rejection (both ASCII and non-ASCII paths)
- Edge cases (empty input, single values, whitespace, special numbers)

---------

Co-authored-by: Claude <noreply@anthropic.com>
Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
2026-01-23 00:23:25 -08:00
robobun
c4f6874960 fix(ws): allow ws.once() to work multiple times (#26359)
## Summary
- Fixes `ws.once()` only working on the first call for each event type
- The bug was in the `#onOrOnce` method which tracked native listeners
via a bitset but didn't account for `once` listeners auto-removing after
firing
- Now only persistent `on()` listeners set the bitset; `once()`
listeners always add new native handlers unless a persistent listener
already exists

## Test plan
- [x] Added regression test `test/regression/issue/26358.test.ts`
- [x] Test verifies `once('message')` works multiple times
- [x] Test verifies `once('pong')` works multiple times  
- [x] Test verifies `on()` still works correctly
- [x] Test verifies mixing `on()` and `once()` works correctly
- [x] Verified test fails with `USE_SYSTEM_BUN=1` (bug exists)
- [x] Verified test passes with `bun bd test` (fix works)

Fixes #26358

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
Co-authored-by: Jarred Sumner <jarred@jarredsumner.com>
2026-01-23 00:22:36 -08:00
Dylan Conway
c63415c9c9 Mimalloc v3 update (#26379)
### What does this PR do?

### How did you verify your code works?

---------

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
Co-authored-by: Jarred Sumner <jarred@jarredsumner.com>
2026-01-23 00:10:48 -08:00
Jarred Sumner
86d4d87beb feat(unicode): migrate grapheme breaking to uucode with GB9c support (#26376)
## Summary

Replace Bun's outdated grapheme breaking implementation with [Ghostty's
approach](https://github.com/ghostty-org/ghostty/tree/main/src/unicode)
using the [uucode](https://github.com/jacobsandlund/uucode) library.
This adds proper **GB9c (Indic Conjunct Break)** support — Devanagari
and other Indic script conjuncts now correctly form single grapheme
clusters.

## Motivation

The previous implementation used a `GraphemeBoundaryClass` enum with
only 12 values and a 2-bit `BreakState` (just `extended_pictographic`
and `regional_indicator` flags). It had no support for Unicode's GB9c
rule, meaning Indic conjunct sequences (consonant + virama + consonant)
were incorrectly split into multiple grapheme clusters.

## Architecture

### Runtime (zero uucode dependency, two table lookups)

```
codepoint → [3-level LUT] → GraphemeBreakNoControl enum (u5, 17 values)
(state, gb1, gb2) → [8KB precomputed array] → (break_result, new_state)
```

The full grapheme break algorithm (GB6-GB13, GB9c, GB11, GB999) runs
only at **comptime** to populate the 8KB decision array. At runtime it's
pure table lookups.

### File Layout

```
src/deps/uucode/              ← Vendored library (MIT, build-time only)
src/unicode/uucode/           ← Build-time integration
  ├── uucode_config.zig       ← What Unicode properties to generate
  ├── grapheme_gen.zig        ← Generator: queries uucode → writes tables
  ├── lut.zig                 ← 3-level lookup table generator
  └── CLAUDE.md               ← Maintenance docs
src/string/immutable/         ← Runtime (no uucode dependency)
  ├── grapheme.zig            ← Grapheme break API + comptime decisions
  ├── grapheme_tables.zig     ← Pre-generated tables (committed, ~91KB source)
  └── visible.zig             ← Width calculation (2 lines changed)
scripts/update-uucode.sh      ← Update vendored uucode + regenerate
```

### Key Types

| Type | Size | Values |
|------|------|--------|
| `GraphemeBreakNoControl` | u5 | 17 (adds
`indic_conjunct_break_{consonant,linker,extend}`, `emoji_modifier_base`,
`zwnj`, etc.) |
| `BreakState` | u3 | 5 (`default`, `regional_indicator`,
`extended_pictographic`, `indic_conjunct_break_consonant`,
`indic_conjunct_break_linker`) |

### Binary Size

The tables store only the `GraphemeBreakNoControl` enum per codepoint
(not width or emoji properties, which visible.zig handles separately):

- stage1: 8192 × u16 = **16KB** (maps high byte → stage2 offset)
- stage2: 27392 × u8 = **27KB** (maps to stage3 index; max value is 16)
- stage3: 17 × u5 = **~17 bytes** (one per enum value)
- Precomputed decisions: **8KB**
- **Total: ~51KB** (vs previous ~70KB+)

## How to Regenerate Tables

```bash
# After updating src/deps/uucode/:
./scripts/update-uucode.sh

# Or manually:
vendor/zig/zig build generate-grapheme-tables
```

Normal builds never run the generator — they use the committed
`grapheme_tables.zig`.

## Testing

```bash
bun bd test test/js/bun/util/stringWidth.test.ts
```

New test cases verify Devanagari conjuncts (GB9c):
- `क्ष` (Ka+Virama+Ssa) → single cluster, width 2
- `क्‍ष` (Ka+Virama+ZWJ+Ssa) → single cluster, width 2
- `क्क्क` (Ka+Virama+Ka+Virama+Ka) → single cluster, width 3

---------

Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
2026-01-23 00:07:06 -08:00
robobun
3b1c3bfe97 feat(cli): add --heap-prof and --heap-prof-text flags for heap profiling (#26326)
## Summary
- Add `--heap-prof` CLI flag for generating V8-compatible heap snapshots
(`.heapsnapshot`)
- Add `--heap-prof-md` CLI flag for generating markdown heap profiles
(`.md`) designed for CLI analysis
- Add `--heap-prof-name` and `--heap-prof-dir` options for customizing
output location

## Usage

```bash
# Generate V8-compatible heap snapshot (opens in Chrome DevTools)
bun --heap-prof script.js

# Generate markdown heap profile (for CLI analysis with grep/sed/awk)
bun --heap-prof-md script.js

# Specify output location
bun --heap-prof --heap-prof-dir ./profiles --heap-prof-name my-snapshot.heapsnapshot script.js
```

## Example Output (`--heap-prof-md`)

<details>
<summary>Click to expand example markdown profile</summary>

```markdown
# Bun Heap Profile

Generated by `bun --heap-prof-md`. This profile contains complete heap data in markdown format.

**Quick Search Commands:**
```bash
grep 'type=Function' file.md          # Find all Function objects
grep 'size=[0-9]\{5,\}' file.md       # Find objects >= 10KB
grep 'EDGE.*to=12345' file.md         # Find references to object #12345
grep 'gcroot=1' file.md               # Find all GC roots
```

---

## Summary

| Metric | Value |
|--------|------:|
| Total Heap Size | 208.2 KB (213265 bytes) |
| Total Objects | 2651 |
| Total Edges | 7337 |
| Unique Types | 73 |
| GC Roots | 426 |

## Top 50 Types by Retained Size

| Rank | Type | Count | Self Size | Retained Size | Largest Instance |
|-----:|------|------:|----------:|--------------:|-----------------:|
| 1 | `Function` | 568 | 18.7 KB | 5.4 MB | 10.4 KB |
| 2 | `Structure` | 247 | 27.0 KB | 2.0 MB | 10.4 KB |
| 3 | `FunctionExecutable` | 306 | 38.2 KB | 375.5 KB | 13.0 KB |
| 4 | `FunctionCodeBlock` | 25 | 21.5 KB | 294.1 KB | 14.0 KB |
| 5 | `string` | 591 | 11.3 KB | 75.9 KB | 177 B |
...

## Top 50 Largest Objects

Objects that retain the most memory (potential memory leak sources):

| Rank | ID | Type | Self Size | Retained Size | Out-Edges | In-Edges |
|-----:|---:|------|----------:|--------------:|----------:|---------:|
| 1 | 0 | `<root>` | 0 B | 58.1 KB | 852 | 0 |
| 2 | 774 | `GlobalObject` | 10.0 KB | 41.9 KB | 717 | 807 |
| 3 | 600 | `ModuleProgramCodeBlock` | 1.2 KB | 23.9 KB | 30 | 1 |
...

## Retainer Chains

How the top 20 largest objects are kept alive (path from GC root to object):

### 1. Object #0 - `<root>` (58.1 KB retained)

```
(no path to GC root found)
```

### 2. Object #774 - `GlobalObject` (41.9 KB retained)

```
GlobalObject#774 [ROOT] (this object is a GC root)
```
...

## GC Roots

| ID | Type | Size | Retained | Label |
|---:|------|-----:|---------:|-------|
| 0 | `<root>` | 0 B | 58.1 KB | |
| 774 | `GlobalObject` | 10.0 KB | 41.9 KB | |
...

<details>
<summary>Click to expand 2651 objects (searchable with grep)</summary>

| ID | Type | Size | Retained | Flags | Label |
|---:|------|-----:|---------:|-------|-------|
| 0 | `<root>` | 0 | 59467 | gcroot=1  |  |
| 1 | `Structure` | 112 | 10644 |  |  |
...

</details>
```

</details>

## Test plan
- [x] `bun bd test test/cli/heap-prof.test.ts` - All 7 tests pass
- [x] `USE_SYSTEM_BUN=1 bun test test/cli/heap-prof.test.ts` - Tests
fail (feature not in system bun)

🤖 Generated with [Claude Code](https://claude.com/claude-code)

---------

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
2026-01-22 15:27:37 -08:00
Ciro Spaciari
2582e6f98e fix(http2): fix settings, window size handling, and dynamic header buffer allocation (#26119)
## Summary

This PR fixes multiple HTTP/2 protocol compliance issues that were
causing stream errors with various HTTP/2 clients (Fauna, gRPC/Connect,
etc.).
fixes https://github.com/oven-sh/bun/issues/12544
fixes https://github.com/oven-sh/bun/issues/25589
### Key Fixes

**Window Size and Settings Handling**
- Fix initial stream window size to use `DEFAULT_WINDOW_SIZE` until
`SETTINGS_ACK` is received
- Per RFC 7540 Section 6.5.1: The sender can only rely on settings being
applied AFTER receiving `SETTINGS_ACK`
- Properly adjust existing stream windows when `INITIAL_WINDOW_SIZE`
setting changes (RFC 7540 Section 6.9.2)

**Header List Size Enforcement**  
- Implement `maxHeaderListSize` checking per RFC 7540 Section 6.5.2
- Track cumulative header list size using HPACK entry overhead (32 bytes
per RFC 7541 Section 4.1)
- Reject streams with `ENHANCE_YOUR_CALM` when header list exceeds
configured limit

**Custom Settings Support**
- Add validation for `customSettings` option (up to 10 custom settings,
matching Node.js `MAX_ADDITIONAL_SETTINGS`)
- Validate setting IDs are in range `[0, 0xFFFF]` per RFC 7540
- Validate setting values are in range `[0, 2^32-1]`

**Settings Validation Improvements**
- Use float comparison for settings validation to handle large values
correctly (was using `toInt32()` which truncates)
- Use proper `HTTP2_INVALID_SETTING_VALUE_RangeError` error codes for
Node.js compatibility

**BufferFallbackAllocator** - New allocator that tries a provided buffer
first, falls back to heap:
- Similar to `std.heap.stackFallback` but accepts external buffer slice
- Used with `shared_request_buffer` (16KB threadlocal) for common cases
- Falls back to `bun.default_allocator` for large headers

## Test Plan

- [x] `bun bd` compiles successfully
- [x] Node.js HTTP/2 tests pass: `bun bd
test/js/node/test/parallel/test-http2-connect.js`
- [x] New regression tests for frame size issues: `bun bd test
test/regression/issue/25589.test.ts`
- [x] HTTP/2 continuation tests: `bun bd test
test/js/node/http2/node-http2-continuation.test.ts`

---------

Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Jarred Sumner <jarred@jarredsumner.com>
2026-01-22 14:35:18 -08:00
mmitchellg5
85080f7949 fix: handle DT_UNKNOWN in dir_iterator for bind-mounted filesystems (#25838)
### What does this PR do?
Fixes #24007
Possibly fixes https://github.com/oven-sh/bun/issues/18902,
https://github.com/oven-sh/bun/issues/7412

Some filesystems (bind mounts, FUSE, NFS) don't provide `d_type` in
directory entries, returning `DT_UNKNOWN`. This caused glob and
recursive readdir to skip entries entirely.

## Problem
On Linux filesystems that don't populate `d_type` in directory entries
(bind mounts, FUSE, NFS, some ext4 configurations), `readdir()` returns
`DT_UNKNOWN` instead of the actual file type. This caused:
- `Bun.Glob` to skip files/directories entirely
- `fs.readdirSync(..., {recursive: true})` to not recurse into
subdirectories
- `fs.readdirSync(..., {withFileTypes: true})` to report incorrect types

## Solution
Implemented a **lazy `lstatat()` fallback** when `d_type == DT_UNKNOWN`:

- **`sys.zig`**: Added `lstatat()` function - same as `fstatat()` but
with `AT_SYMLINK_NOFOLLOW` flag to correctly identify symlinks
- **`GlobWalker.zig`**: When encountering `.unknown` entries, first
check if filename matches pattern, then call `lstatat()` only if needed
- **`node_fs.zig`**: Handle `.unknown` in both async and sync recursive
readdir paths; propagate resolved kind to Dirent objects
- **`dir_iterator.zig`**: Return `.unknown` for `DT_UNKNOWN` entries,
letting callers handle lazy stat

**Why `lstatat` instead of `fstatat`?** We use `AT_SYMLINK_NOFOLLOW` to
preserve consistent behavior with normal filesystems - symlinks should
be reported as symlinks, not as their target type. This matches [Node.js
behavior](https://github.com/nodejs/node/blob/main/lib/internal/fs/utils.js#L251-L269)
which uses `lstat()` for the DT_UNKNOWN fallback, and follows the lazy
stat pattern established in PR #18172.

### How did you verify your code works?

**Testing:**
- Regression test: `test/regression/issue/24007.test.ts`
- FUSE filesystem test: `test/cli/run/glob-on-fuse.test.ts` (reuses
`fuse-fs.py` from PR #18172, includes symlink verification)
- All existing glob/readdir tests pass
- **Verified in Docker bind-mount environment:**
  - Official Bun: `0 files`
  - Patched Bun: `3 files`

**Performance:** No impact on normal filesystems - the `.unknown` branch
is only hit when `d_type == DT_UNKNOWN`. The lazy stat pattern avoids
unnecessary syscalls by checking pattern match first.

---------

Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com>
Co-authored-by: Jarred Sumner <jarred@jarredsumner.com>
Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
2026-01-22 13:44:49 -08:00
robobun
2a9980076d feat(windows): Add Windows ARM64 support (#26215) 2026-01-22 04:22:45 -08:00
Dylan Conway
1da41b7f91 update WebKit (#26324)
### What does this PR do?
Updates WebKit to
87c6cde57d
### How did you verify your code works?

---------

Co-authored-by: Jarred Sumner <jarred@jarredsumner.com>
2026-01-21 20:17:46 -08:00
robobun
136d345752 fix(install): show dependency name when file: path resolution fails (#26340)
## Summary
- When `bun install` encounters a stale lockfile with a `file:`
dependency path that differs from the package.json, it now shows which
dependency caused the issue instead of the misleading "Bun could not
find a package.json file to install from" error.

## Test plan
- Added regression test in `test/regression/issue/26337.test.ts`
- Verified test fails with system bun (`USE_SYSTEM_BUN=1 bun test
test/regression/issue/26337.test.ts`)
- Verified test passes with debug build (`bun bd test
test/regression/issue/26337.test.ts`)

Fixes #26337

🤖 Generated with [Claude Code](https://claude.com/claude-code)

---------

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-21 18:41:15 -08:00
Alistair Smith
93d5cc6e56 fix: rename compile "Target" ➜ "CompileTarget" 2026-01-21 13:58:32 -08:00
Jarred Sumner
37c41137f8 feat(transpiler): add replMode option for REPL transforms (#26246)
## Summary

Add a new `replMode` option to `Bun.Transpiler` that transforms code for
interactive REPL evaluation. This enables building a Node.js-compatible
REPL using `Bun.Transpiler` with `vm.runInContext` for persistent
variable scope.

## Features

- **Expression result capture**: Wraps the last expression in `{
__proto__: null, value: expr }` for result capture
- **IIFE wrappers**: Uses sync/async IIFE wrappers to avoid extra
parentheses around object literals in output
- **Variable hoisting**: Hoists `var`/`let`/`const` declarations outside
the IIFE for persistence across REPL lines
- **const → let conversion**: Converts `const` to `let` for REPL
mutability (allows re-declaration)
- **Function hoisting**: Hoists function declarations with
`this.funcName = funcName` assignment for vm context persistence
- **Class hoisting**: Hoists class declarations with `var` for vm
context persistence
- **Object literal detection**: Auto-detects object literals (code
starting with `{` without trailing `;`) and wraps them in parentheses

## Usage

```typescript
import vm from "node:vm";

const transpiler = new Bun.Transpiler({
  loader: "tsx",
  replMode: true,
});

const context = vm.createContext({ console, Promise });

async function repl(code: string) {
  const transformed = transpiler.transformSync(code);
  const result = await vm.runInContext(transformed, context);
  return result.value;
}

// Example REPL session
await repl("var x = 10");        // 10
await repl("x + 5");             // 15
await repl("class Counter {}"); // [class Counter]
await repl("new Counter()");    // Counter {}
await repl("{a: 1, b: 2}");     // {a: 1, b: 2} (auto-detected object literal)
await repl("await Promise.resolve(42)"); // 42
```

## Transform Examples

| Input | Output |
|-------|--------|
| `42` | `(() => { return { __proto__: null, value: 42 }; })()` |
| `var x = 10` | `var x; (() => { return { __proto__: null, value: x =
10 }; })()` |
| `await fetch()` | `(async () => { return { __proto__: null, value:
await fetch() }; })()` |
| `{a: 1}` | `(() => { return { __proto__: null, value: ({a: 1}) };
})()` |
| `class Foo {}` | `var Foo; (() => { return { __proto__: null, value:
Foo = class Foo {} }; })()` |

## Files Changed

- `src/ast/repl_transforms.zig`: New module containing REPL transform
logic
- `src/ast/P.zig`: Calls REPL transforms after parsing in REPL mode
- `src/bun.js/api/JSTranspiler.zig`: Adds `replMode` config option and
object literal detection
- `src/options.zig`, `src/runtime.zig`, `src/transpiler.zig`: Propagate
`repl_mode` flag
- `packages/bun-types/bun.d.ts`: TypeScript type definitions
- `test/js/bun/transpiler/repl-transform.test.ts`: Test cases

## Testing

```bash
bun bd test test/js/bun/transpiler/repl-transform.test.ts
```

34 tests covering:
- Basic transform output
- REPL session with node:vm
- Variable persistence across lines
- Object literal detection
- Edge cases (empty input, comments, TypeScript, etc.)
- No-transform cases (await inside async functions)

---------

Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
2026-01-21 13:39:25 -08:00
Dylan Conway
dc203e853a fix patch-bounds-check.test.ts (#26344)
### What does this PR do?

### How did you verify your code works?
2026-01-21 13:22:21 -08:00
robobun
2febdb5b49 feat(cli): add --cpu-prof-md flag for markdown CPU profile output (#26327)
## Summary
- Adds `--cpu-prof-md` flag that outputs CPU profiling data in markdown
format optimized for GitHub rendering and LLM analysis
- Complements the existing `--cpu-prof` flag which outputs Chrome
DevTools JSON format
- `--cpu-prof-md` works standalone or combined with `--cpu-prof` to
generate both formats

## Usage
```bash
# Markdown only
bun --cpu-prof-md script.js

# Both formats
bun --cpu-prof --cpu-prof-md script.js
```

## Example Output

# CPU Profile

| Duration | Samples | Interval | Functions |
|----------|---------|----------|----------|
| 255.7ms | 178 | 1ms | 32 |

**Top 10:** \`fibonacci\` 23.6%, \`fibonacci\` 12.6%, \`parseModule\`
11.7%, \`(anonymous)\` 9.5%, \`loadAndEvaluateModule\` 5.5%,
\`requestSatisfyUtil\` 3.7%, \`main\` 2.7%,
\`moduleDeclarationInstantiation\` 2.6%, \`loadModule\` 2.5%,
\`cacheSatisfyAndReturn\` 2.5%

## Hot Functions (Self Time)

| Self% | Self | Total% | Total | Function | Location |
|------:|-----:|-------:|------:|----------|----------|
| 23.6% | 60.5ms | 23.6% | 60.5ms | \`fibonacci\` | /tmp/test-profile.js
|
| 12.6% | 32.3ms | 100.0% | 1.29s | \`fibonacci\` |
/tmp/test-profile.js:3 |
| 11.7% | 29.9ms | 11.7% | 29.9ms | \`parseModule\` | [native code] |
| 9.5% | 24.3ms | 43.4% | 111.0ms | \`(anonymous)\` | [native code] |
| 5.5% | 14.2ms | 99.9% | 255.5ms | \`loadAndEvaluateModule\` | [native
code] |

## Call Tree (Total Time)

| Total% | Total | Self% | Self | Function | Location |
|-------:|------:|------:|-----:|----------|----------|
| 100.0% | 1.29s | 12.6% | 32.3ms | \`fibonacci\` |
/tmp/test-profile.js:3 |
| 99.9% | 255.5ms | 5.5% | 14.2ms | \`loadAndEvaluateModule\` | [native
code] |
| 86.0% | 219.9ms | 1.3% | 3.3ms | \`moduleEvaluation\` | [native code]
|
| 43.4% | 111.0ms | 9.5% | 24.3ms | \`(anonymous)\` | [native code] |

## Function Details

### \`fibonacci\`

- **Location:** \`/tmp/test-profile.js:3\`
- **Self:** 12.6% (32.3ms) | **Total:** 100.0% (1.29s)
- **Called by:** \`fibonacci\` (864), \`main\` (68)
- **Calls:** \`fibonacci\` (864), \`fibonacci\` (44), \`fibonacci\` (2)

### \`main\`

- **Location:** \`/tmp/test-profile.js:9\`
- **Self:** 0.0% (0us) | **Total:** 38.4% (98.2ms)
- **Called by:** \`(module)\` (72)
- **Calls:** \`fibonacci\` (68), \`inspect\` (2), \`fibonacci\` (2)

## Files

| Self% | Self | File |
|------:|-----:|------|
| 58.8% | 150.6ms | \`[native code]\` |
| 40.1% | 102.6ms | \`/tmp/test-profile.js\` |
| 0.9% | 2.4ms | \`bun:main\` |

## Test plan
- [x] `--cpu-prof-md` generates `.md` file with markdown tables
- [x] `--cpu-prof-md` works standalone without `--cpu-prof`
- [x] Both flags together generate both `.cpuprofile` and `.md` files
- [x] Custom filename with `--cpu-prof-name` works
- [x] Custom directory with `--cpu-prof-dir` works
- [x] All 9 tests pass

🤖 Generated with [Claude Code](https://claude.ai/code)

---------

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
Co-authored-by: Jarred Sumner <jarred@jarredsumner.com>
2026-01-21 13:21:01 -08:00
Jarred Sumner
b6b3626c14 fix(bindings): handle errors from String.toJS() for oversized strings (#26213)
## Summary

- When a string exceeds `WTF::String::MaxLength` (~4GB),
`bun.String.createUninitialized()` returns a `.Dead` tag
- The C++ layer now properly throws `ERR_STRING_TOO_LONG` when this
happens
- Updated `String.toJS()` in Zig to return `bun.JSError!jsc.JSValue`
instead of just `jsc.JSValue`
- Updated ~40 Zig caller files to handle the error with `try`
- C++ callers updated with `RETURN_IF_EXCEPTION` checks

## Test plan

- [x] `bun bd test test/js/node/buffer.test.js` - 449 tests pass
- [x] `bun bd
test/js/node/test/parallel/test-buffer-tostring-rangeerror.js` - passes

🤖 Generated with [Claude Code](https://claude.ai/code)

---------

Co-authored-by: Claude <noreply@anthropic.com>
Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
Co-authored-by: Claude Bot <claude-bot@bun.sh>
2026-01-21 13:01:25 -08:00
Dylan Conway
7f70b01259 feat: support CPU profiling via environment variables (#26313)
Adds environment variable support for enabling CPU profiling without CLI
flags:

- `BUN_CPU_PROFILE=1` — enables the CPU profiler
- `BUN_CPU_PROFILE_DIR=<path>` — sets the output directory for the
profile
- `BUN_CPU_PROFILE_NAME=<name>` — sets the profile file name

These are used as fallbacks when the corresponding `--cpu-prof` CLI
options are not provided. This is useful for profiling in contexts where
modifying the command line isn't practical (e.g. scripts invoked by
other tools).
2026-01-20 22:43:36 -08:00
robobun
08103aa2ff fix(compile): ensure bytecode alignment accounts for section header (#26299)
## Summary

Fixes bytecode alignment in standalone executables to prevent crashes
when loading bytecode cache on Windows.

The bytecode offset needs to be aligned such that when loaded at
runtime, the bytecode pointer is 128-byte aligned. Previously, alignment
was based on arbitrary memory addresses during compilation, which didn't
account for the 8-byte section header prepended at runtime. This caused
the bytecode to be misaligned, leading to segfaults in
`JSC::CachedJSValue::decode` on Windows.

## Root Cause

At runtime, embedded data starts 8 bytes after the PE/Mach-O section
virtual address (which is page-aligned, hence 128-byte aligned). For
bytecode at offset `O` to be aligned:
```
(section_va + 8 + O) % 128 == 0
=> (8 + O) % 128 == 0
=> O % 128 == 120
```

The previous code used `std.mem.alignInSlice()` which found aligned
addresses based on the compilation buffer's arbitrary address, not
accounting for the 8-byte header offset at load time.

## Changes

- **`src/StandaloneModuleGraph.zig`**: Calculate bytecode offset to
satisfy `offset % 128 == 120` instead of using `alignInSlice`
- **`test/regression/issue/26298.test.ts`**: Added regression tests for
bytecode cache in standalone executables

## Test plan

- [x] Added regression test `test/regression/issue/26298.test.ts` with 3
test cases
- [x] Existing `HelloWorldBytecode` test passes
- [x] Build succeeds

Fixes #26298

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-20 22:42:38 -08:00
Jarred Sumner
bb4d150aed Try using internal string-width in node:readline (#26306)
### What does this PR do?

Remove NFKDC normalization and stripVTControlCharacters since
Bun.stringWidth does this now

### How did you verify your code works?

ci

---------

Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
2026-01-20 22:41:14 -08:00
Dylan Conway
45af3335e6 Missing changes from #26080 (#26308)
### What does this PR do?

### How did you verify your code works?
2026-01-20 14:51:36 -08:00
robobun
dbad2857ea fix(test): delete setTimeout.clock property when disabling fake timers (#26285)
## Summary

- Fixes the `setTimeout.clock` property not being properly deleted after
`jest.useRealTimers()` is called
- Previously, the property was set to `false` instead of deleted,
causing `hasOwnProperty` checks to return `true`
- This broke React Testing Library and other libraries that check for
fake timers using `Object.prototype.hasOwnProperty.call(setTimeout,
'clock')`

## Changes

- Added `JSValue.deleteProperty()` binding in Zig to call JSC's
`deleteProperty()` method
- Updated `setFakeTimerMarker()` in `FakeTimers.zig` to delete the
`clock` property when disabling fake timers
- Updated existing test in `test/regression/issue/25869.test.ts` to
verify correct behavior
- Added new regression test in `test/regression/issue/26284.test.ts`

## Test plan

- [x] Verified new test fails with system bun (before fix)
- [x] Verified new test passes with debug build (after fix)
- [x] Verified existing fake timer tests still pass
- [x] Verified test for issue #25869 passes with fix

Fixes #26284

🤖 Generated with [Claude Code](https://claude.com/claude-code)

---------

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-20 12:51:54 -08:00
robobun
6140eb5faf fix(windows): chunk large buffers in preadv/pwritev to avoid integer overflow (#26303)
## Summary
- Fix panic "integer does not fit in destination type" when
reading/writing large files on Windows
- Add chunking for iovec arrays that exceed `c_uint` max entries
- Add chunking for individual buffers that exceed 4GB (`u32` max)

The libuv functions `uv_fs_read` and `uv_fs_write` have two size
limitations:
1. `nbufs` parameter is `c_uint` (32-bit), limiting the number of iovec
entries
2. `uv_buf_t.len` is `ULONG` (u32 on Windows), limiting individual
buffer sizes to 4GB

This change processes large operations in chunks, accumulating results
and updating file positions between chunks.

## Test plan
- [x] Verified `bun run zig:check-all` passes on all platforms
- [x] Verified `bun bd` builds successfully
- [x] Verified basic file read/write operations work correctly

🤖 Generated with [Claude Code](https://claude.com/claude-code)

---------

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
2026-01-20 12:47:07 -08:00
Alistair Smith
497a4d4818 Fix duplicate exports when two entrypoints share symbols (#26089)
### What does this PR do?

Fixes #5344
Fixes #6356

### How did you verify your code works?

Some test coverage

---------

Co-authored-by: Claude <noreply@anthropic.com>
Co-authored-by: Claude Bot <claude-bot@bun.sh>
2026-01-20 12:40:33 -08:00
Dylan Conway
66d8397bd7 fix(compile): fix native module export corruption with multiple NAPI modules (#26080)
## Summary

Fixes native module export corruption when compiling multiple NAPI
modules with `bun build --compile` on Linux.

- When loading multiple `.node` files in a compiled binary, the second
module would incorrectly get the first module's exports
- Root cause: memfd file descriptors were closed after dlopen, allowing
fd reuse. Since dlopen caches by path (`/proc/self/fd/N`), it returned
the wrong cached handle
- This bug occurs when loading native modules in quick succession, as
the fd number is likely to be reused immediately after being closed
- Fix: Disable the memfd optimization and always use temp files with
unique paths

## Test plan

- [x] Added regression test in `test/regression/issue/26045/`
- [x] Test fails with production bun (v1.3.6)
- [x] Test passes with the fix

Fixes https://github.com/oven-sh/bun/issues/26045

🤖 Generated with [Claude Code](https://claude.com/claude-code)
2026-01-20 12:40:12 -08:00
robobun
170f8f7962 fix(tls): use correct variable in setVerifyMode (#26255)
## Summary
- Fix typo in `setVerifyMode` where `reject_unauthorized` was
incorrectly reading from `request_cert_js` instead of
`reject_unauthorized_js`

## Test plan
- Existing TLS renegotiation tests pass
- Code inspection shows the fix is correct (simple variable name typo)

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-19 22:47:18 -08:00
robobun
5f470278d1 fix(update): 'l' key now selects package in interactive update (#26265)
## Summary
- The 'l' key in `bun update --interactive` now correctly selects the
package when toggling between Target and Latest versions
- Previously, pressing 'l' would toggle `use_latest` but not mark the
package as selected, causing the underline indicator to disappear and
the package not being included when confirming

## Test plan
- [x] Added regression test `test/regression/issue/24131.test.ts` that
verifies 'l' selects the package
- [x] Test fails with system bun (before fix) and passes with debug
build (after fix)
- [x] `bun bd test test/regression/issue/24131.test.ts` passes

Fixes #24131

🤖 Generated with [Claude Code](https://claude.com/claude-code)

---------

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-19 22:46:36 -08:00
robobun
3a4daa95ac fix(bundler): fix out-of-bounds access in DynamicBitSetUnmanaged.bytes() (#26283)
## Summary

Fixes an index out-of-bounds panic that occurs during bundler code
splitting on Windows.

- The `bytes()` function in `DynamicBitSetUnmanaged` was accessing
`masks[0..numMasks(bit_length) + 1]`, reading one element past the
allocated array
- When the bit set has exactly one mask (bit_length <= 64), this causes
a panic: "index out of bounds: index 1, len 1"
- The bug manifests when sorting chunk deduplication keys derived from
`AutoBitSet.bytes()`

The fix simply removes the erroneous `+ 1` from the slice bounds.

## Test plan

- [x] `bun bd test test/bundler/bundler_splitting.test.ts` - passes
- [x] `bun bd test test/bundler/esbuild/splitting.test.ts` - passes
- [x] `bun bd test test/bundler/bundler_regressions.test.ts` - passes
- [x] `bun bd test test/bundler/bundler_edgecase.test.ts` - passes

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-19 22:45:56 -08:00
robobun
d3f8bec565 fix(Terminal): callbacks not invoked inside AsyncLocalStorage.run() (#26288)
## Summary
- Fixed `Bun.Terminal` callbacks (data, exit, drain) not being invoked
when the terminal is created inside `AsyncLocalStorage.run()`

## Root Cause
The bug was a redundant `isCallable()` check when storing callbacks in
`initTerminal()`:

1. In `Options.parseFromJS()`, callbacks are validated with
`isCallable()`, then wrapped with `withAsyncContextIfNeeded()`
2. Inside `AsyncLocalStorage.run()`, `withAsyncContextIfNeeded()`
returns an `AsyncContextFrame` object that wraps the callback + async
context
3. An `AsyncContextFrame` is NOT callable - it's a wrapper object. So
the second `isCallable()` check fails
4. Because the check fails, the callback is never stored via
`js.gc.set()`
5. When `onReadChunk()` tries to get the callback, it returns `null` and
the callback is never invoked

## Fix
Removed the redundant `isCallable()` check in `initTerminal()`. The
check was already performed in `parseFromJS()` before wrapping. Other
similar patterns (socket Handlers, Timer) simply store the wrapped
callback without re-checking.

Fixes #26286

## Test plan
- [x] Added regression test in `test/regression/issue/26286.test.ts`
- [x] Verified test fails with system Bun (times out because callback
never invoked)
- [x] Verified test passes with debug build

🤖 Generated with [Claude Code](https://claude.com/claude-code)

---------

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-19 22:45:22 -08:00
robobun
62834e1bfe fix(init): resolve TypeScript errors in react-tailwind template build.ts (#26258)
## Summary
- Fixes TypeScript errors in the react-tailwind template's `build.ts`
when used with the template's strict `tsconfig.json`

## Test plan
- Added regression test `test/regression/issue/24364.test.ts` that
verifies TypeScript compilation passes
- Verified test fails with old template code and passes with fix

Closes #24364

🤖 Generated with [Claude Code](https://claude.com/claude-code)

---------

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-19 17:12:42 -08:00
robobun
704252e85f fix(npm): show helpful error when postinstall script hasn't run (#26259)
## Summary
- Replaces empty placeholder executables with shell scripts that print
helpful error messages
- The scripts exit with code 1 instead of silently succeeding with code
0
- Helps users diagnose issues when installing with `--ignore-scripts` or
using pnpm

## Problem
When installing the `bun` npm package with `--ignore-scripts` or using
pnpm (which skips postinstall by default), the placeholder `bun.exe` and
`bunx.exe` files were empty, causing them to silently exit with code 0
and produce no output. This made it very difficult for users to
understand why bun wasn't working.

## Solution
The placeholder files are now shell scripts that:
1. Print a clear error message explaining the issue
2. Provide instructions on how to fix it (manually running postinstall
or reinstalling without `--ignore-scripts`)
3. Exit with code 1 to indicate failure

Example output when running the placeholder:
```
Error: Bun's postinstall script was not run.

This occurs when using --ignore-scripts during installation, or when using a
package manager like pnpm that does not run postinstall scripts by default.

To fix this, run the postinstall script manually:
  cd node_modules/bun && node install.js

Or reinstall bun without the --ignore-scripts flag.
```

## Test plan
- [x] Added regression test that verifies the placeholder script
behavior
- [x] Test passes with `bun bd test test/regression/issue/24329.test.ts`

Fixes #24329

🤖 Generated with [Claude Code](https://claude.com/claude-code)

---------

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-19 17:11:38 -08:00
robobun
04f441453d fix(assert): partialDeepStrictEqual now correctly handles Map subset checking (#26257)
## Summary

- Fixed `assert.partialDeepStrictEqual` to correctly handle Map subset
checking
- Previously, Map comparison used `Bun.deepEquals` which required exact
equality
- Now properly checks that all entries in the expected Map exist in the
actual Map with matching values

Fixes #24338

## Test plan

- Added comprehensive test suite in
`test/regression/issue/24338.test.ts` covering:
  - Basic subset checking (key2 in Map with key1 and key2)
  - Exact match cases
  - Empty expected Map
  - Multiple matching entries
  - Nested objects as values
  - Failure cases when expected has more keys
  - Failure cases when key is missing in actual
  - Failure cases when values differ
  - Nested Map values
  - Non-string keys

🤖 Generated with [Claude Code](https://claude.ai/code)

---------

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-19 17:10:47 -08:00
robobun
362839c987 fix(websocket): forward URL credentials as Authorization header (#26278)
## Summary

- Extracts credentials from WebSocket URL (`ws://user:pass@host`) and
sends them as Basic Authorization header
- User-provided `Authorization` header takes precedence over URL
credentials
- Credentials are properly URL-decoded before being Base64-encoded

Fixes #24388

## Test plan

- [x] Added regression test `test/regression/issue/24388.test.ts` with 5
test cases:
  - Basic credentials in URL
  - Empty password
  - No credentials (no header sent)
  - Custom Authorization header takes precedence
  - Special characters (URL-encoded) in credentials
- [x] Tests pass with `bun bd test test/regression/issue/24388.test.ts`
- [x] Tests fail with `USE_SYSTEM_BUN=1 bun test` (confirming the bug
existed)

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-19 17:04:44 -08:00
robobun
0dda0f6310 fix(socket): free resources when socket is reused for reconnection (#26280)
## Summary
- Fix memory leak in socket reconnection path by freeing old resources
before reassignment
- Add regression test for socket connection/close operations

## Problem
When sockets are reused in `connectInner` (common with MongoDB driver
reconnection patterns), the old connection metadata was being
overwritten without being freed first. This caused memory leaks of:
- `connection` (hostname/path strings)
- `protos` (ALPN protocol strings)
- `server_name` (SNI hostname string)
- `socket_context` (SSL context)

## Solution
This fix ensures these resources are properly freed before reassignment
when a socket is reused for reconnection. This matches the cleanup
pattern already used in the socket's `deinit()` function.

## Test plan
- [x] Added regression test `test/regression/issue/24118.test.ts`
- [x] Verified build compiles successfully
- [x] Verified test passes with `bun bd test
test/regression/issue/24118.test.ts`

Fixes #24118

🤖 Generated with [Claude Code](https://claude.com/claude-code)

---------

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
Co-authored-by: Jarred Sumner <jarred@jarredsumner.com>
2026-01-19 15:46:52 -08:00
robobun
5e8a84e5e7 fix(fs): Dirent.isFIFO() incorrectly returns true for unknown type (#26263)
## Summary
- Fix `fs.Dirent.isFIFO()` incorrectly returning `true` for unknown file
types (e.g., on sshfs/NFS mounts)
- Remove the `EventPort` check from `isFIFO()` since `EventPort = 0 =
Unknown`
- Add regression test for the fix

Fixes #24129

## Root Cause
In `NodeDirent.cpp`, the `isFIFO()` method was checking:
```cpp
type == static_cast<int32_t>(DirEntType::NamedPipe) || type == static_cast<int32_t>(DirEntType::EventPort)
```

Since `EventPort = 0` and `Unknown = 0` (they share the same enum
value), any file with unknown type (returned by filesystems like sshfs,
NFS, etc. that don't populate `d_type`) would incorrectly trigger
`isFIFO() === true`.

## Test plan
- [x] Regression test: `bun bd test test/regression/issue/24129.test.ts`
- [x] Existing Dirent tests: `bun bd test test/js/node/fs/fs.test.ts -t
"Dirent"`

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-19 15:44:27 -08:00
robobun
01fac4a63c fix(shell): prevent double-free of ShellArgs in error path (#26277)
## Summary
- Fixes a double-free bug in the shell interpreter error handling path

## What Changed
When `interpreter.init()` succeeds but `globalThis.hasException()` is
true, the code was calling `shargs.deinit()` before
`interpreter.finalize()`. However, `interpreter.args` points to `shargs`
after `init()` succeeds, so calling `interpreter.finalize()` ->
`deinitFromFinalizer()` -> `this.args.deinit()` would then try to deinit
an already-freed `ShellArgs`, causing a double-free.

This is a related issue to #24368, which reported crashes during GC
finalization of ShellInterpreter objects. While the main fix for #24368
was added in v1.3.6 (commit 367eeb308e), this fixes an additional
double-free bug in the error handling path.

## Test plan
- [x] Added regression tests in `test/regression/issue/24368.test.ts`
- [x] Tests pass with `bun bd test test/regression/issue/24368.test.ts`
- [x] Basic shell functionality verified

Fixes #24368

🤖 Generated with [Claude Code](https://claude.com/claude-code)

---------

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
Co-authored-by: Jarred Sumner <jarred@jarredsumner.com>
2026-01-19 15:24:57 -08:00
robobun
f8adf01f51 fix(install): handle null metadata gracefully instead of panicking (#26238)
## Summary

- Fixed a panic in `bun add` when HTTP requests fail before receiving
response headers
- The panic "Assertion failure: Expected metadata to be set" now becomes
a graceful error message

## Root Cause

In `src/install/PackageManagerTask.zig`, the code assumed `metadata` is
always non-null and panicked when it wasn't. However, `metadata` can be
null when:
- HTTP request fails before receiving response headers
- Network connection is refused/lost
- Timeout occurs before response
- Firewall blocks/corrupts the response

## Fix

Replaced the panic with proper error handling, following the existing
pattern in `runTasks.zig`:

```zig
const metadata = manifest.network.response.metadata orelse {
    // Handle the error gracefully instead of panicking
    const err = manifest.network.response.fail orelse error.HTTPError;
    // ... show user-friendly error message
};
```

## Test plan

- [x] Added regression test `test/regression/issue/26236.test.ts`
- [x] Test verifies Bun shows graceful error instead of panicking
- [x] `bun bd test test/regression/issue/26236.test.ts` passes

Fixes #26236

🤖 Generated with [Claude Code](https://claude.com/claude-code)

---------

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
Co-authored-by: Jarred Sumner <jarred@jarredsumner.com>
2026-01-19 11:26:25 -08:00
robobun
d85d40ea29 fix(ffi): respect C_INCLUDE_PATH and LIBRARY_PATH env vars (#26250)
## Summary
- Make `bun:ffi`'s TinyCC compiler check standard C compiler environment
variables
- Add support for `C_INCLUDE_PATH` (include paths) and `LIBRARY_PATH`
(library paths)
- Fixes compilation on NixOS and other systems that don't use standard
FHS paths

## Test plan
- [x] Added regression test `test/regression/issue/26249.test.ts` that
verifies:
  - Single path in `C_INCLUDE_PATH` works
  - Multiple colon-separated paths in `C_INCLUDE_PATH` work
- [x] Verified test fails with system bun (without fix)
- [x] Verified test passes with debug build (with fix)
- [x] Verified existing `cc.test.ts` tests still pass

Closes #26249

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-19 11:24:02 -08:00
Jarred Sumner
c47f84348a Update CLAUDE.md 2026-01-18 14:07:30 -08:00
SUZUKI Sosuke
f8a049e9f2 perf(buffer): optimize swap16/swap64 with __builtin_bswap (#26190)
## Summary

Optimize `Buffer.swap16()` and `Buffer.swap64()` by replacing
byte-by-byte swapping loops with `__builtin_bswap16/64` compiler
intrinsics.

## Problem

`Buffer.swap16` and `Buffer.swap64` were significantly slower than
Node.js due to inefficient byte-level operations:

- **swap16**: Swapped bytes one at a time in a loop
- **swap64**: Used a nested loop with 4 byte swaps per 8-byte element

## Solution

Replace the manual byte swapping with `__builtin_bswap16/64` intrinsics,
which compile to single CPU instructions (`BSWAP` on x86, `REV` on ARM).

Use `memcpy` for loading/storing values to handle potentially unaligned
buffers safely.

## Benchmark Results (64KB buffer, Apple M4 Max)

| Operation | Bun 1.3.6 | Node.js 24 | This PR | Improvement |
|-----------|-----------|------------|---------|-------------|
| swap16    | 1.00 µs   | 0.57 µs    | 0.56 µs | **1.79x faster** |
| swap32 | 0.55 µs | 0.77 µs | 0.54 µs | (no change, already fast) |
| swap64    | 2.02 µs   | 0.58 µs    | 0.56 µs | **3.6x faster** |

Bun now matches or exceeds Node.js performance for all swap operations.

## Notes

- `swap32` was not modified as the compiler already optimizes the 4-byte
swap pattern
- All existing tests pass
2026-01-18 13:33:04 -08:00
SUZUKI Sosuke
12a45b7cbf Remove dead data URL check in fetch implementation (#26197)
## Summary
- Remove unreachable dead code that checked for data URLs in
`fetchImpl()`
- Data URLs are already handled earlier in the function via the
`dispatch_request` block which processes `.data` scheme URLs
- This redundant check at lines 375-387 could never be reached

## Test plan
- [ ] Verify existing fetch tests pass with `bun bd test
test/js/web/fetch/`
- [ ] Confirm data URL fetching still works correctly (handled by
earlier code path)

## Changelog
<!-- CHANGELOG:START -->
<!-- No user-facing changes - internal code cleanup only -->
<!-- CHANGELOG:END -->

🤖 Generated with [Claude Code](https://claude.com/claude-code) (100%
12-shotted by claude-opus-4-5)

Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-18 13:32:20 -08:00
robobun
039c89442f chore: bump TinyCC to latest upstream (Jan 2026) (#26210)
## What does this PR do?

Updates the oven-sh/tinycc fork to the latest upstream TinyCC,
incorporating 30+ upstream commits while preserving all Bun-specific
patches.

### Upstream changes incorporated
- Build system improvements (c2str.exe handling, cross-compilation)
- macOS 15 compatibility fixes
- libtcc debugging support
- pic/pie support for i386
- arm64 alignment and symbol offset fixes
- RISC-V 64 improvements (pointer difference, assembly, Zicsr extension)
- Relocation updates
- Preprocessor improvements (integer literal overflow handling)
- x86-64 cvts*2si fix
- Various bug fixes

### Bun-specific patches preserved
- Fix crash on macOS x64 (libxcselect.dylib memory handling)
- Implement `-framework FrameworkName` on macOS (for framework header
parsing)
- Add missing #ifdef guards for TCC_IS_NATIVE
- Make `__attribute__(deprecated)` a no-op
- Fix `__has_include` with framework paths
- Support attributes after identifiers in enums
- Fix dlsym behavior on macOS (RTLD_SELF first, then RTLD_DEFAULT)
- Various tccmacho.c improvements

### Related PRs
- TinyCC fork CI is passing:
https://github.com/oven-sh/tinycc/actions/runs/21105489093

## How did you verify your code works?

- [x] TinyCC fork CI passes on all platforms (Linux
x86_64/arm64/armv7/riscv64, macOS x86_64/arm64, Windows i386/x86_64)
- [ ] Bun CI passes

🤖 Generated with [Claude Code](https://claude.com/claude-code)

---------

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-18 13:31:21 -08:00
robobun
c3b4e5568c fix(http): check socket state before operations in doRedirect (#26221)
## Summary
- Fix assertion failure when using HTTP proxy with redirects and socket
closes during redirect processing
- Add `isClosedOrHasError()` checks before `releaseSocket` and
`closeSocket` in `doRedirect`

Fixes #26220

## Root Cause
In `doRedirect` (`src/http.zig:786-797`), the code called
`releaseSocket` or `closeSocket` without checking if the socket was
already closed. When `onClose` is triggered while `is_redirect_pending`
is true, it calls `doRedirect`, but the socket is already closed at that
point, causing the assertion in `HTTPContext.zig:168` to fail:

```zig
assert(!socket.isClosed());  // FAILS - socket IS closed
```

## Fix
Added `!socket.isClosedOrHasError()` checks before socket operations in
`doRedirect`, matching the pattern already used at line 1790 in the same
file.

## Test plan
- [x] All existing proxy redirect tests pass (`bun bd test
test/js/bun/http/proxy.test.ts`)
- [x] Build completes successfully (`bun bd`)

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-18 13:19:30 -08:00
robobun
3d46ae2fa4 fix(node-fetch): convert old-style Node.js streams to Web streams (#26226)
## Summary
- Fix multipart uploads using form-data + node-fetch@2 +
fs.createReadStream() being truncated
- Convert old-style Node.js streams (that don't implement
`Symbol.asyncIterator`) to Web ReadableStreams before passing to native
fetch

## Test plan
- [x] New tests in `test/regression/issue/26225.test.ts` verify:
  - Multipart uploads with form-data and createReadStream work correctly
  - Async iterable bodies still work (regression test)
  - Large file streams work correctly
- [x] Tests fail with `USE_SYSTEM_BUN=1` and pass with debug build

Fixes #26225

🤖 Generated with [Claude Code](https://claude.com/claude-code)

---------

Co-authored-by: Claude Bot <claude-bot@bun.sh>
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-18 13:19:02 -08:00
574 changed files with 176953 additions and 4360 deletions

View File

@@ -26,7 +26,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
wget curl git python3 python3-pip ninja-build \
software-properties-common apt-transport-https \
ca-certificates gnupg lsb-release unzip \
libxml2-dev ruby ruby-dev bison gawk perl make golang ccache \
libxml2-dev ruby ruby-dev bison gawk perl make golang ccache qemu-user-static \
&& add-apt-repository ppa:ubuntu-toolchain-r/test \
&& apt-get update \
&& apt-get install -y gcc-13 g++-13 libgcc-13-dev libstdc++-13-dev \

View File

@@ -114,6 +114,8 @@ const buildPlatforms = [
{ os: "linux", arch: "x64", abi: "musl", baseline: true, distro: "alpine", release: "3.22" },
{ os: "windows", arch: "x64", release: "2019" },
{ os: "windows", arch: "x64", baseline: true, release: "2019" },
// TODO: Enable when Windows ARM64 CI runners are ready
// { os: "windows", arch: "aarch64", release: "2019" },
];
/**
@@ -136,6 +138,8 @@ const testPlatforms = [
{ os: "linux", arch: "x64", abi: "musl", baseline: true, distro: "alpine", release: "3.22", tier: "latest" },
{ os: "windows", arch: "x64", release: "2019", tier: "oldest" },
{ os: "windows", arch: "x64", release: "2019", baseline: true, tier: "oldest" },
// TODO: Enable when Windows ARM64 CI runners are ready
// { os: "windows", arch: "aarch64", release: "2019", tier: "oldest" },
];
/**
@@ -533,6 +537,109 @@ function getLinkBunStep(platform, options) {
};
}
/**
* Returns the artifact triplet for a platform, e.g. "bun-linux-aarch64" or "bun-linux-x64-musl-baseline".
* Matches the naming convention in cmake/targets/BuildBun.cmake.
* @param {Platform} platform
* @returns {string}
*/
function getTargetTriplet(platform) {
const { os, arch, abi, baseline } = platform;
let triplet = `bun-${os}-${arch}`;
if (abi === "musl") {
triplet += "-musl";
}
if (baseline) {
triplet += "-baseline";
}
return triplet;
}
/**
* Returns true if a platform needs QEMU-based baseline CPU verification.
* x64 baseline builds verify no AVX/AVX2 instructions snuck in.
* aarch64 builds verify no LSE/SVE instructions snuck in.
* @param {Platform} platform
* @returns {boolean}
*/
function needsBaselineVerification(platform) {
const { os, arch, baseline } = platform;
if (os !== "linux") return false;
return (arch === "x64" && baseline) || arch === "aarch64";
}
/**
* @param {Platform} platform
* @param {PipelineOptions} options
* @returns {Step}
*/
function getVerifyBaselineStep(platform, options) {
const { arch } = platform;
const targetKey = getTargetKey(platform);
const archArg = arch === "x64" ? "x64" : "aarch64";
return {
key: `${targetKey}-verify-baseline`,
label: `${getTargetLabel(platform)} - verify-baseline`,
depends_on: [`${targetKey}-build-bun`],
agents: getLinkBunAgent(platform, options),
retry: getRetry(),
cancel_on_build_failing: isMergeQueue(),
timeout_in_minutes: 5,
command: [
`buildkite-agent artifact download '*.zip' . --step ${targetKey}-build-bun`,
`unzip -o '${getTargetTriplet(platform)}.zip'`,
`unzip -o '${getTargetTriplet(platform)}-profile.zip'`,
`chmod +x ${getTargetTriplet(platform)}/bun ${getTargetTriplet(platform)}-profile/bun-profile`,
`./scripts/verify-baseline-cpu.sh --arch ${archArg} --binary ${getTargetTriplet(platform)}/bun`,
`./scripts/verify-baseline-cpu.sh --arch ${archArg} --binary ${getTargetTriplet(platform)}-profile/bun-profile`,
],
};
}
/**
* Returns true if the PR modifies SetupWebKit.cmake (WebKit version changes).
* JIT stress tests under QEMU should run when WebKit is updated to catch
* JIT-generated code that uses unsupported CPU instructions.
* @param {PipelineOptions} options
* @returns {boolean}
*/
function hasWebKitChanges(options) {
const { changedFiles = [] } = options;
return changedFiles.some(file => file.includes("SetupWebKit.cmake"));
}
/**
* Returns a step that runs JSC JIT stress tests under QEMU.
* This verifies that JIT-compiled code doesn't use CPU instructions
* beyond the baseline target (no AVX on x64, no LSE on aarch64).
* @param {Platform} platform
* @param {PipelineOptions} options
* @returns {Step}
*/
function getJitStressTestStep(platform, options) {
const { arch } = platform;
const targetKey = getTargetKey(platform);
const archArg = arch === "x64" ? "x64" : "aarch64";
return {
key: `${targetKey}-jit-stress-qemu`,
label: `${getTargetLabel(platform)} - jit-stress-qemu`,
depends_on: [`${targetKey}-build-bun`],
agents: getLinkBunAgent(platform, options),
retry: getRetry(),
cancel_on_build_failing: isMergeQueue(),
// JIT stress tests are slow under QEMU emulation
timeout_in_minutes: 30,
command: [
`buildkite-agent artifact download '*.zip' . --step ${targetKey}-build-bun`,
`unzip -o '${getTargetTriplet(platform)}.zip'`,
`chmod +x ${getTargetTriplet(platform)}/bun`,
`./scripts/verify-jit-stress-qemu.sh --arch ${archArg} --binary ${getTargetTriplet(platform)}/bun`,
],
};
}
/**
* @param {Platform} platform
* @param {PipelineOptions} options
@@ -770,6 +877,7 @@ function getBenchmarkStep() {
* @property {Platform[]} [buildPlatforms]
* @property {Platform[]} [testPlatforms]
* @property {string[]} [testFiles]
* @property {string[]} [changedFiles]
*/
/**
@@ -1122,6 +1230,14 @@ async function getPipeline(options = {}) {
steps.push(getBuildZigStep(target, options));
steps.push(getLinkBunStep(target, options));
if (needsBaselineVerification(target)) {
steps.push(getVerifyBaselineStep(target, options));
// Run JIT stress tests under QEMU when WebKit is updated
if (hasWebKitChanges(options)) {
steps.push(getJitStressTestStep(target, options));
}
}
return getStepWithDependsOn(
{
key: getTargetKey(target),
@@ -1219,6 +1335,7 @@ async function main() {
console.log(`- PR is only docs, skipping tests!`);
return;
}
options.changedFiles = allFiles;
}
startGroup("Generating pipeline...");

View File

@@ -36,16 +36,20 @@ function Log-Debug {
}
}
# Detect system architecture
$script:IsARM64 = [System.Runtime.InteropServices.RuntimeInformation]::OSArchitecture -eq [System.Runtime.InteropServices.Architecture]::Arm64
$script:VsArch = if ($script:IsARM64) { "arm64" } else { "amd64" }
# Load Visual Studio environment if not already loaded
function Ensure-VSEnvironment {
if ($null -eq $env:VSINSTALLDIR) {
Log-Info "Loading Visual Studio environment..."
Log-Info "Loading Visual Studio environment for $script:VsArch..."
$vswhere = "C:\Program Files (x86)\Microsoft Visual Studio\Installer\vswhere.exe"
if (!(Test-Path $vswhere)) {
throw "Command not found: vswhere (did you install Visual Studio?)"
}
$vsDir = & $vswhere -prerelease -latest -property installationPath
if ($null -eq $vsDir) {
$vsDir = Get-ChildItem -Path "C:\Program Files\Microsoft Visual Studio\2022" -Directory -ErrorAction SilentlyContinue
@@ -54,20 +58,20 @@ function Ensure-VSEnvironment {
}
$vsDir = $vsDir.FullName
}
Push-Location $vsDir
try {
$vsShell = Join-Path -Path $vsDir -ChildPath "Common7\Tools\Launch-VsDevShell.ps1"
. $vsShell -Arch amd64 -HostArch amd64
. $vsShell -Arch $script:VsArch -HostArch $script:VsArch
} finally {
Pop-Location
}
Log-Success "Visual Studio environment loaded"
}
if ($env:VSCMD_ARG_TGT_ARCH -eq "x86") {
throw "Visual Studio environment is targeting 32 bit, but only 64 bit is supported."
throw "Visual Studio environment is targeting 32 bit x86, but only 64-bit architectures (x64/arm64) are supported."
}
}
@@ -186,8 +190,10 @@ function Install-KeyLocker {
}
# Download MSI installer
$msiUrl = "https://bun-ci-assets.bun.sh/Keylockertools-windows-x64.msi"
$msiPath = Join-Path $env:TEMP "Keylockertools-windows-x64.msi"
# Note: KeyLocker tools currently only available for x64, but works on ARM64 via emulation
$msiArch = "x64"
$msiUrl = "https://bun-ci-assets.bun.sh/Keylockertools-windows-${msiArch}.msi"
$msiPath = Join-Path $env:TEMP "Keylockertools-windows-${msiArch}.msi"
Log-Info "Downloading MSI from: $msiUrl"
Log-Info "Downloading to: $msiPath"

View File

@@ -219,6 +219,9 @@ function create_release() {
bun-windows-x64-profile.zip
bun-windows-x64-baseline.zip
bun-windows-x64-baseline-profile.zip
# TODO: Enable when Windows ARM64 CI runners are ready
# bun-windows-aarch64.zip
# bun-windows-aarch64-profile.zip
)
function upload_artifact() {

View File

@@ -88,7 +88,7 @@ jobs:
commit-message: "deps: update c-ares to ${{ steps.check-version.outputs.tag }} (${{ steps.check-version.outputs.latest }})"
title: "deps: update c-ares to ${{ steps.check-version.outputs.tag }}"
delete-branch: true
branch: deps/update-cares-${{ github.run_number }}
branch: deps/update-cares
body: |
## What does this PR do?

View File

@@ -91,7 +91,7 @@ jobs:
commit-message: "deps: update hdrhistogram to ${{ steps.check-version.outputs.tag }} (${{ steps.check-version.outputs.latest }})"
title: "deps: update hdrhistogram to ${{ steps.check-version.outputs.tag }}"
delete-branch: true
branch: deps/update-hdrhistogram-${{ github.run_number }}
branch: deps/update-hdrhistogram
body: |
## What does this PR do?

View File

@@ -107,7 +107,7 @@ jobs:
commit-message: "deps: update highway to ${{ steps.check-version.outputs.tag }} (${{ steps.check-version.outputs.latest }})"
title: "deps: update highway to ${{ steps.check-version.outputs.tag }}"
delete-branch: true
branch: deps/update-highway-${{ github.run_number }}
branch: deps/update-highway
body: |
## What does this PR do?

View File

@@ -88,7 +88,7 @@ jobs:
commit-message: "deps: update libarchive to ${{ steps.check-version.outputs.tag }} (${{ steps.check-version.outputs.latest }})"
title: "deps: update libarchive to ${{ steps.check-version.outputs.tag }}"
delete-branch: true
branch: deps/update-libarchive-${{ github.run_number }}
branch: deps/update-libarchive
body: |
## What does this PR do?

View File

@@ -88,7 +88,7 @@ jobs:
commit-message: "deps: update libdeflate to ${{ steps.check-version.outputs.tag }} (${{ steps.check-version.outputs.latest }})"
title: "deps: update libdeflate to ${{ steps.check-version.outputs.tag }}"
delete-branch: true
branch: deps/update-libdeflate-${{ github.run_number }}
branch: deps/update-libdeflate
body: |
## What does this PR do?

View File

@@ -100,7 +100,7 @@ jobs:
commit-message: "deps: update lolhtml to ${{ steps.check-version.outputs.tag }} (${{ steps.check-version.outputs.latest }})"
title: "deps: update lolhtml to ${{ steps.check-version.outputs.tag }}"
delete-branch: true
branch: deps/update-lolhtml-${{ github.run_number }}
branch: deps/update-lolhtml
body: |
## What does this PR do?

View File

@@ -105,7 +105,7 @@ jobs:
commit-message: "deps: update lshpack to ${{ steps.check-version.outputs.tag }} (${{ steps.check-version.outputs.latest }})"
title: "deps: update lshpack to ${{ steps.check-version.outputs.tag }}"
delete-branch: true
branch: deps/update-lshpack-${{ github.run_number }}
branch: deps/update-lshpack
body: |
## What does this PR do?

View File

@@ -74,7 +74,7 @@ jobs:
```
${{ env.changed_files }}
```
branch: certs/update-root-certs-${{ github.run_number }}
branch: certs/update-root-certs
base: main
delete-branch: true
labels:

View File

@@ -83,7 +83,7 @@ jobs:
commit-message: "deps: update sqlite to ${{ steps.check-version.outputs.latest }}"
title: "deps: update sqlite to ${{ steps.check-version.outputs.latest }}"
delete-branch: true
branch: deps/update-sqlite-${{ steps.check-version.outputs.latest }}
branch: deps/update-sqlite
body: |
## What does this PR do?

View File

@@ -68,7 +68,7 @@ jobs:
commit-message: "deps: update ${{ matrix.package }} to ${{ steps.check-version.outputs.latest }} (${{ steps.check-version.outputs.latest }})"
title: "deps: update ${{ matrix.package }} to ${{ steps.check-version.outputs.latest }}"
delete-branch: true
branch: deps/update-${{ matrix.package }}-${{ github.run_number }}
branch: deps/update-${{ matrix.package }}
body: |
## What does this PR do?

View File

@@ -88,7 +88,7 @@ jobs:
commit-message: "deps: update zstd to ${{ steps.check-version.outputs.tag }} (${{ steps.check-version.outputs.latest }})"
title: "deps: update zstd to ${{ steps.check-version.outputs.tag }}"
delete-branch: true
branch: deps/update-zstd-${{ github.run_number }}
branch: deps/update-zstd
body: |
## What does this PR do?

View File

@@ -259,18 +259,13 @@ $ git clone https://github.com/oven-sh/WebKit vendor/WebKit
# Check out the commit hash specified in `set(WEBKIT_VERSION <commit_hash>)` in cmake/tools/SetupWebKit.cmake
$ git -C vendor/WebKit checkout <commit_hash>
# Make a debug build of JSC. This will output build artifacts in ./vendor/WebKit/WebKitBuild/Debug
# Optionally, you can use `bun run jsc:build` for a release build
$ bun run jsc:build:debug && rm vendor/WebKit/WebKitBuild/Debug/JavaScriptCore/DerivedSources/inspector/InspectorProtocolObjects.h
# After an initial run of `make jsc-debug`, you can rebuild JSC with:
$ cmake --build vendor/WebKit/WebKitBuild/Debug --target jsc && rm vendor/WebKit/WebKitBuild/Debug/JavaScriptCore/DerivedSources/inspector/InspectorProtocolObjects.h
# Build bun with the local JSC build
# Build bun with the local JSC build — this automatically configures and builds JSC
$ bun run build:local
```
Using `bun run build:local` will build Bun in the `./build/debug-local` directory (instead of `./build/debug`), you'll have to change a couple of places to use this new directory:
`bun run build:local` handles everything: configuring JSC, building JSC, and building Bun. On subsequent runs, JSC will incrementally rebuild if any WebKit sources changed. `ninja -Cbuild/debug-local` also works after the first build, and will build Bun+JSC.
The build output goes to `./build/debug-local` (instead of `./build/debug`), so you'll need to update a couple of places:
- The first line in [`src/js/builtins.d.ts`](/src/js/builtins.d.ts)
- The `CompilationDatabase` line in [`.clangd` config](/.clangd) should be `CompilationDatabase: build/debug-local`
@@ -281,7 +276,7 @@ Note that the WebKit folder, including build artifacts, is 8GB+ in size.
If you are using a JSC debug build and using VScode, make sure to run the `C/C++: Select a Configuration` command to configure intellisense to find the debug headers.
Note that if you change make changes to our [WebKit fork](https://github.com/oven-sh/WebKit), you will also have to change [`SetupWebKit.cmake`](/cmake/tools/SetupWebKit.cmake) to point to the commit hash.
Note that if you make changes to our [WebKit fork](https://github.com/oven-sh/WebKit), you will also have to change [`SetupWebKit.cmake`](/cmake/tools/SetupWebKit.cmake) to point to the commit hash.
## Troubleshooting

2
LATEST
View File

@@ -1 +1 @@
1.3.6
1.3.8

View File

@@ -36,6 +36,7 @@ Bun statically links these libraries:
| [`libbase64`](https://github.com/aklomp/base64/blob/master/LICENSE) | BSD 2-Clause |
| [`libuv`](https://github.com/libuv/libuv) (on Windows) | MIT |
| [`libdeflate`](https://github.com/ebiggers/libdeflate) | MIT |
| [`uucode`](https://github.com/jacobsandlund/uucode) | MIT |
| A fork of [`uWebsockets`](https://github.com/jarred-sumner/uwebsockets) | Apache 2.0 licensed |
| Parts of [Tigerbeetle's IO code](https://github.com/tigerbeetle/tigerbeetle/blob/532c8b70b9142c17e07737ab6d3da68d7500cbca/src/io/windows.zig#L1) | Apache 2.0 licensed |

View File

@@ -18,9 +18,13 @@
"fast-glob": "3.3.1",
"fastify": "^5.0.0",
"fdir": "^6.1.0",
"mitata": "^1.0.25",
"react": "^18.3.1",
"react-dom": "^18.3.1",
"marked": "^17.0.1",
"mitata": "1.0.20",
"react": "^19",
"react-dom": "^19",
"react-markdown": "^9.0.3",
"remark": "^15.0.1",
"remark-html": "^16.0.1",
"string-width": "7.1.0",
"strip-ansi": "^7.1.0",
"tar": "^7.4.3",
@@ -150,18 +154,36 @@
"@swc/core-win32-x64-msvc": ["@swc/core-win32-x64-msvc@1.3.35", "", { "os": "win32", "cpu": "x64" }, "sha512-/RvphT4WfuGfIK84Ha0dovdPrKB1bW/mc+dtdmhv2E3EGkNc5FoueNwYmXWRimxnU7X0X7IkcRhyKB4G5DeAmg=="],
"@types/debug": ["@types/debug@4.1.12", "", { "dependencies": { "@types/ms": "*" } }, "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ=="],
"@types/estree": ["@types/estree@1.0.8", "", {}, "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w=="],
"@types/estree-jsx": ["@types/estree-jsx@1.0.5", "", { "dependencies": { "@types/estree": "*" } }, "sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg=="],
"@types/fs-extra": ["@types/fs-extra@11.0.4", "", { "dependencies": { "@types/jsonfile": "*", "@types/node": "*" } }, "sha512-yTbItCNreRooED33qjunPthRcSjERP1r4MqCZc7wv0u2sUkzTFp45tgUfS5+r7FrZPdmCCNflLhVSP/o+SemsQ=="],
"@types/hast": ["@types/hast@3.0.4", "", { "dependencies": { "@types/unist": "*" } }, "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ=="],
"@types/jsonfile": ["@types/jsonfile@6.1.4", "", { "dependencies": { "@types/node": "*" } }, "sha512-D5qGUYwjvnNNextdU59/+fI+spnwtTFmyQP0h+PfIOSkNfpU6AOICUOkm4i0OnSk+NyjdPJrxCDro0sJsWlRpQ=="],
"@types/mdast": ["@types/mdast@4.0.4", "", { "dependencies": { "@types/unist": "*" } }, "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA=="],
"@types/minimist": ["@types/minimist@1.2.5", "", {}, "sha512-hov8bUuiLiyFPGyFPE1lwWhmzYbirOXQNNo40+y3zow8aFVTeyn3VWL0VFFfdNddA8S4Vf0Tc062rzyNr7Paag=="],
"@types/ms": ["@types/ms@2.1.0", "", {}, "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA=="],
"@types/node": ["@types/node@18.19.8", "", { "dependencies": { "undici-types": "~5.26.4" } }, "sha512-g1pZtPhsvGVTwmeVoexWZLTQaOvXwoSq//pTL0DHeNzUDrFnir4fgETdhjhIxjVnN+hKOuh98+E1eMLnUXstFg=="],
"@types/ps-tree": ["@types/ps-tree@1.1.6", "", {}, "sha512-PtrlVaOaI44/3pl3cvnlK+GxOM3re2526TJvPvh7W+keHIXdV4TE0ylpPBAcvFQCbGitaTXwL9u+RF7qtVeazQ=="],
"@types/react": ["@types/react@19.2.10", "", { "dependencies": { "csstype": "^3.2.2" } }, "sha512-WPigyYuGhgZ/cTPRXB2EwUw+XvsRA3GqHlsP4qteqrnnjDrApbS7MxcGr/hke5iUoeB7E/gQtrs9I37zAJ0Vjw=="],
"@types/unist": ["@types/unist@3.0.3", "", {}, "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q=="],
"@types/which": ["@types/which@3.0.3", "", {}, "sha512-2C1+XoY0huExTbs8MQv1DuS5FS86+SEjdM9F/+GS61gg5Hqbtj8ZiDSx8MfWcyei907fIPbfPGCOrNUTnVHY1g=="],
"@ungap/structured-clone": ["@ungap/structured-clone@1.3.0", "", {}, "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g=="],
"abstract-logging": ["abstract-logging@2.0.1", "", {}, "sha512-2BjRTZxTPvheOvGbBslFSYOUkr+SjPtOnrLP33f+VIWLzezQpZcqVg7ja3L4dBXmzzgwT+a029jRx5PCi3JuiA=="],
"ajv": ["ajv@8.17.1", "", { "dependencies": { "fast-deep-equal": "^3.1.3", "fast-uri": "^3.0.1", "json-schema-traverse": "^1.0.0", "require-from-string": "^2.0.2" } }, "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g=="],
@@ -176,6 +198,8 @@
"avvio": ["avvio@9.1.0", "", { "dependencies": { "@fastify/error": "^4.0.0", "fastq": "^1.17.1" } }, "sha512-fYASnYi600CsH/j9EQov7lECAniYiBFiiAtBNuZYLA2leLe9qOvZzqYHFjtIj6gD2VMoMLP14834LFWvr4IfDw=="],
"bail": ["bail@2.0.2", "", {}, "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw=="],
"benchmark": ["benchmark@2.1.4", "", { "dependencies": { "lodash": "^4.17.4", "platform": "^1.3.3" } }, "sha512-l9MlfN4M1K/H2fbhfMy3B7vJd6AGKJVQn2h6Sg/Yx+KckoUA7ewS5Vv6TjSq18ooE1kS9hhAlQRH3AkXIh/aOQ=="],
"braces": ["braces@3.0.2", "", { "dependencies": { "fill-range": "^7.0.1" } }, "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A=="],
@@ -184,8 +208,18 @@
"caniuse-lite": ["caniuse-lite@1.0.30001456", "", {}, "sha512-XFHJY5dUgmpMV25UqaD4kVq2LsiaU5rS8fb0f17pCoXQiQslzmFgnfOxfvo1bTpTqf7dwG/N/05CnLCnOEKmzA=="],
"ccount": ["ccount@2.0.1", "", {}, "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg=="],
"chalk": ["chalk@5.3.0", "", {}, "sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w=="],
"character-entities": ["character-entities@2.0.2", "", {}, "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ=="],
"character-entities-html4": ["character-entities-html4@2.1.0", "", {}, "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA=="],
"character-entities-legacy": ["character-entities-legacy@3.0.0", "", {}, "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ=="],
"character-reference-invalid": ["character-reference-invalid@2.0.1", "", {}, "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw=="],
"chownr": ["chownr@3.0.0", "", {}, "sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g=="],
"color": ["color@4.2.3", "", { "dependencies": { "color-convert": "^2.0.1", "color-string": "^1.9.0" } }, "sha512-1rXeuUUiGGrykh+CeBdu5Ie7OJwinCgQY0bc7GCRxy5xVHy+moaqkpL/jqQq0MtQOeYcrqEz4abc5f0KtU7W4A=="],
@@ -196,18 +230,26 @@
"color-string": ["color-string@1.9.1", "", { "dependencies": { "color-name": "^1.0.0", "simple-swizzle": "^0.2.2" } }, "sha512-shrVawQFojnZv6xM40anx4CkoDP+fZsw/ZerEMsW/pyzsRbElpsL/DBVW7q3ExxwusdNXI3lXpuhEZkzs8p5Eg=="],
"comma-separated-tokens": ["comma-separated-tokens@2.0.3", "", {}, "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg=="],
"convert-source-map": ["convert-source-map@1.9.0", "", {}, "sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A=="],
"cookie": ["cookie@1.0.2", "", {}, "sha512-9Kr/j4O16ISv8zBBhJoi4bXOYNTkFLOqSL3UDB0njXxCXNezjeyVrJyGOWtgfs/q2km1gwBcfH8q1yEGoMYunA=="],
"cross-spawn": ["cross-spawn@7.0.3", "", { "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", "which": "^2.0.1" } }, "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w=="],
"csstype": ["csstype@3.2.3", "", {}, "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ=="],
"data-uri-to-buffer": ["data-uri-to-buffer@4.0.1", "", {}, "sha512-0R9ikRb668HB7QDxT1vkpuUBtqc53YyAwMwGeUFKRojY/NWKvdZ+9UYtRfGmhqNbRkTSVpMbmyhXipFFv2cb/A=="],
"debug": ["debug@4.3.4", "", { "dependencies": { "ms": "2.1.2" } }, "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ=="],
"decode-named-character-reference": ["decode-named-character-reference@1.3.0", "", { "dependencies": { "character-entities": "^2.0.0" } }, "sha512-GtpQYB283KrPp6nRw50q3U9/VfOutZOe103qlN7BPP6Ad27xYnOIWv4lPzo8HCAL+mMZofJ9KEy30fq6MfaK6Q=="],
"dequal": ["dequal@2.0.3", "", {}, "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA=="],
"devlop": ["devlop@1.1.0", "", { "dependencies": { "dequal": "^2.0.0" } }, "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA=="],
"dir-glob": ["dir-glob@3.0.1", "", { "dependencies": { "path-type": "^4.0.0" } }, "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA=="],
"duplexer": ["duplexer@0.1.2", "", {}, "sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg=="],
@@ -262,12 +304,16 @@
"escape-string-regexp": ["escape-string-regexp@1.0.5", "", {}, "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg=="],
"estree-util-is-identifier-name": ["estree-util-is-identifier-name@3.0.0", "", {}, "sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg=="],
"event-stream": ["event-stream@3.3.4", "", { "dependencies": { "duplexer": "~0.1.1", "from": "~0", "map-stream": "~0.1.0", "pause-stream": "0.0.11", "split": "0.3", "stream-combiner": "~0.0.4", "through": "~2.3.1" } }, "sha512-QHpkERcGsR0T7Qm3HNJSyXKEEj8AHNxkY3PK8TS2KJvQ7NiSHe3DDpwVKKtoYprL/AreyzFBeIkBIWChAqn60g=="],
"eventemitter3": ["eventemitter3@5.0.0", "", {}, "sha512-riuVbElZZNXLeLEoprfNYoDSwTBRR44X3mnhdI1YcnENpWTCsTTVZ2zFuqQcpoyqPQIUXdiPEU0ECAq0KQRaHg=="],
"execa": ["execa@8.0.1", "", { "dependencies": { "cross-spawn": "^7.0.3", "get-stream": "^8.0.1", "human-signals": "^5.0.0", "is-stream": "^3.0.0", "merge-stream": "^2.0.0", "npm-run-path": "^5.1.0", "onetime": "^6.0.0", "signal-exit": "^4.1.0", "strip-final-newline": "^3.0.0" } }, "sha512-VyhnebXciFV2DESc+p6B+y0LjSm0krU4OgJN44qFAhBY0TJ+1V61tYD2+wHusZ6F9n5K+vl8k0sTy7PEfV4qpg=="],
"extend": ["extend@3.0.2", "", {}, "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g=="],
"fast-decode-uri-component": ["fast-decode-uri-component@1.0.1", "", {}, "sha512-WKgKWg5eUxvRZGwW8FvfbaH7AXSh2cL+3j5fMGzUMCxWBJ3dV3a7Wz8y2f/uQ0e3B6WmodD3oS54jTQ9HVTIIg=="],
"fast-deep-equal": ["fast-deep-equal@3.1.3", "", {}, "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q=="],
@@ -318,20 +364,44 @@
"has-flag": ["has-flag@3.0.0", "", {}, "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw=="],
"hast-util-sanitize": ["hast-util-sanitize@5.0.2", "", { "dependencies": { "@types/hast": "^3.0.0", "@ungap/structured-clone": "^1.0.0", "unist-util-position": "^5.0.0" } }, "sha512-3yTWghByc50aGS7JlGhk61SPenfE/p1oaFeNwkOOyrscaOkMGrcW9+Cy/QAIOBpZxP1yqDIzFMR0+Np0i0+usg=="],
"hast-util-to-html": ["hast-util-to-html@9.0.5", "", { "dependencies": { "@types/hast": "^3.0.0", "@types/unist": "^3.0.0", "ccount": "^2.0.0", "comma-separated-tokens": "^2.0.0", "hast-util-whitespace": "^3.0.0", "html-void-elements": "^3.0.0", "mdast-util-to-hast": "^13.0.0", "property-information": "^7.0.0", "space-separated-tokens": "^2.0.0", "stringify-entities": "^4.0.0", "zwitch": "^2.0.4" } }, "sha512-OguPdidb+fbHQSU4Q4ZiLKnzWo8Wwsf5bZfbvu7//a9oTYoqD/fWpe96NuHkoS9h0ccGOTe0C4NGXdtS0iObOw=="],
"hast-util-to-jsx-runtime": ["hast-util-to-jsx-runtime@2.3.6", "", { "dependencies": { "@types/estree": "^1.0.0", "@types/hast": "^3.0.0", "@types/unist": "^3.0.0", "comma-separated-tokens": "^2.0.0", "devlop": "^1.0.0", "estree-util-is-identifier-name": "^3.0.0", "hast-util-whitespace": "^3.0.0", "mdast-util-mdx-expression": "^2.0.0", "mdast-util-mdx-jsx": "^3.0.0", "mdast-util-mdxjs-esm": "^2.0.0", "property-information": "^7.0.0", "space-separated-tokens": "^2.0.0", "style-to-js": "^1.0.0", "unist-util-position": "^5.0.0", "vfile-message": "^4.0.0" } }, "sha512-zl6s8LwNyo1P9uw+XJGvZtdFF1GdAkOg8ujOw+4Pyb76874fLps4ueHXDhXWdk6YHQ6OgUtinliG7RsYvCbbBg=="],
"hast-util-whitespace": ["hast-util-whitespace@3.0.0", "", { "dependencies": { "@types/hast": "^3.0.0" } }, "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw=="],
"html-url-attributes": ["html-url-attributes@3.0.1", "", {}, "sha512-ol6UPyBWqsrO6EJySPz2O7ZSr856WDrEzM5zMqp+FJJLGMW35cLYmmZnl0vztAZxRUoNZJFTCohfjuIJ8I4QBQ=="],
"html-void-elements": ["html-void-elements@3.0.0", "", {}, "sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg=="],
"human-signals": ["human-signals@5.0.0", "", {}, "sha512-AXcZb6vzzrFAUE61HnN4mpLqd/cSIwNQjtNWR0euPm6y0iqx3G4gOXaIDdtdDwZmhwe82LA6+zinmW4UBWVePQ=="],
"ignore": ["ignore@5.3.0", "", {}, "sha512-g7dmpshy+gD7mh88OC9NwSGTKoc3kyLAZQRU1mt53Aw/vnvfXnbC+F/7F7QoYVKbV+KNvJx8wArewKy1vXMtlg=="],
"inline-style-parser": ["inline-style-parser@0.2.7", "", {}, "sha512-Nb2ctOyNR8DqQoR0OwRG95uNWIC0C1lCgf5Naz5H6Ji72KZ8OcFZLz2P5sNgwlyoJ8Yif11oMuYs5pBQa86csA=="],
"ipaddr.js": ["ipaddr.js@2.2.0", "", {}, "sha512-Ag3wB2o37wslZS19hZqorUnrnzSkpOVy+IiiDEiTqNubEYpYuHWIf6K4psgN2ZWKExS4xhVCrRVfb/wfW8fWJA=="],
"is-alphabetical": ["is-alphabetical@2.0.1", "", {}, "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ=="],
"is-alphanumerical": ["is-alphanumerical@2.0.1", "", { "dependencies": { "is-alphabetical": "^2.0.0", "is-decimal": "^2.0.0" } }, "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw=="],
"is-arrayish": ["is-arrayish@0.3.2", "", {}, "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ=="],
"is-decimal": ["is-decimal@2.0.1", "", {}, "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A=="],
"is-extglob": ["is-extglob@2.1.1", "", {}, "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ=="],
"is-glob": ["is-glob@4.0.3", "", { "dependencies": { "is-extglob": "^2.1.1" } }, "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg=="],
"is-hexadecimal": ["is-hexadecimal@2.0.1", "", {}, "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg=="],
"is-number": ["is-number@7.0.0", "", {}, "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng=="],
"is-plain-obj": ["is-plain-obj@4.1.0", "", {}, "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg=="],
"is-stream": ["is-stream@3.0.0", "", {}, "sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA=="],
"isexe": ["isexe@2.0.0", "", {}, "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw=="],
@@ -352,16 +422,76 @@
"lodash": ["lodash@4.17.21", "", {}, "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg=="],
"loose-envify": ["loose-envify@1.4.0", "", { "dependencies": { "js-tokens": "^3.0.0 || ^4.0.0" }, "bin": { "loose-envify": "cli.js" } }, "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q=="],
"longest-streak": ["longest-streak@3.1.0", "", {}, "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g=="],
"lru-cache": ["lru-cache@5.1.1", "", { "dependencies": { "yallist": "^3.0.2" } }, "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w=="],
"map-stream": ["map-stream@0.1.0", "", {}, "sha512-CkYQrPYZfWnu/DAmVCpTSX/xHpKZ80eKh2lAkyA6AJTef6bW+6JpbQZN5rofum7da+SyN1bi5ctTm+lTfcCW3g=="],
"marked": ["marked@17.0.1", "", { "bin": { "marked": "bin/marked.js" } }, "sha512-boeBdiS0ghpWcSwoNm/jJBwdpFaMnZWRzjA6SkUMYb40SVaN1x7mmfGKp0jvexGcx+7y2La5zRZsYFZI6Qpypg=="],
"mdast-util-from-markdown": ["mdast-util-from-markdown@2.0.2", "", { "dependencies": { "@types/mdast": "^4.0.0", "@types/unist": "^3.0.0", "decode-named-character-reference": "^1.0.0", "devlop": "^1.0.0", "mdast-util-to-string": "^4.0.0", "micromark": "^4.0.0", "micromark-util-decode-numeric-character-reference": "^2.0.0", "micromark-util-decode-string": "^2.0.0", "micromark-util-normalize-identifier": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0", "unist-util-stringify-position": "^4.0.0" } }, "sha512-uZhTV/8NBuw0WHkPTrCqDOl0zVe1BIng5ZtHoDk49ME1qqcjYmmLmOf0gELgcRMxN4w2iuIeVso5/6QymSrgmA=="],
"mdast-util-mdx-expression": ["mdast-util-mdx-expression@2.0.1", "", { "dependencies": { "@types/estree-jsx": "^1.0.0", "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", "devlop": "^1.0.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0" } }, "sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ=="],
"mdast-util-mdx-jsx": ["mdast-util-mdx-jsx@3.2.0", "", { "dependencies": { "@types/estree-jsx": "^1.0.0", "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", "@types/unist": "^3.0.0", "ccount": "^2.0.0", "devlop": "^1.1.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0", "parse-entities": "^4.0.0", "stringify-entities": "^4.0.0", "unist-util-stringify-position": "^4.0.0", "vfile-message": "^4.0.0" } }, "sha512-lj/z8v0r6ZtsN/cGNNtemmmfoLAFZnjMbNyLzBafjzikOM+glrjNHPlf6lQDOTccj9n5b0PPihEBbhneMyGs1Q=="],
"mdast-util-mdxjs-esm": ["mdast-util-mdxjs-esm@2.0.1", "", { "dependencies": { "@types/estree-jsx": "^1.0.0", "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", "devlop": "^1.0.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0" } }, "sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg=="],
"mdast-util-phrasing": ["mdast-util-phrasing@4.1.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "unist-util-is": "^6.0.0" } }, "sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w=="],
"mdast-util-to-hast": ["mdast-util-to-hast@13.2.1", "", { "dependencies": { "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", "@ungap/structured-clone": "^1.0.0", "devlop": "^1.0.0", "micromark-util-sanitize-uri": "^2.0.0", "trim-lines": "^3.0.0", "unist-util-position": "^5.0.0", "unist-util-visit": "^5.0.0", "vfile": "^6.0.0" } }, "sha512-cctsq2wp5vTsLIcaymblUriiTcZd0CwWtCbLvrOzYCDZoWyMNV8sZ7krj09FSnsiJi3WVsHLM4k6Dq/yaPyCXA=="],
"mdast-util-to-markdown": ["mdast-util-to-markdown@2.1.2", "", { "dependencies": { "@types/mdast": "^4.0.0", "@types/unist": "^3.0.0", "longest-streak": "^3.0.0", "mdast-util-phrasing": "^4.0.0", "mdast-util-to-string": "^4.0.0", "micromark-util-classify-character": "^2.0.0", "micromark-util-decode-string": "^2.0.0", "unist-util-visit": "^5.0.0", "zwitch": "^2.0.0" } }, "sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA=="],
"mdast-util-to-string": ["mdast-util-to-string@4.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0" } }, "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg=="],
"merge-stream": ["merge-stream@2.0.0", "", {}, "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w=="],
"merge2": ["merge2@1.4.1", "", {}, "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg=="],
"micromark": ["micromark@4.0.2", "", { "dependencies": { "@types/debug": "^4.0.0", "debug": "^4.0.0", "decode-named-character-reference": "^1.0.0", "devlop": "^1.0.0", "micromark-core-commonmark": "^2.0.0", "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-chunked": "^2.0.0", "micromark-util-combine-extensions": "^2.0.0", "micromark-util-decode-numeric-character-reference": "^2.0.0", "micromark-util-encode": "^2.0.0", "micromark-util-normalize-identifier": "^2.0.0", "micromark-util-resolve-all": "^2.0.0", "micromark-util-sanitize-uri": "^2.0.0", "micromark-util-subtokenize": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA=="],
"micromark-core-commonmark": ["micromark-core-commonmark@2.0.3", "", { "dependencies": { "decode-named-character-reference": "^1.0.0", "devlop": "^1.0.0", "micromark-factory-destination": "^2.0.0", "micromark-factory-label": "^2.0.0", "micromark-factory-space": "^2.0.0", "micromark-factory-title": "^2.0.0", "micromark-factory-whitespace": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-chunked": "^2.0.0", "micromark-util-classify-character": "^2.0.0", "micromark-util-html-tag-name": "^2.0.0", "micromark-util-normalize-identifier": "^2.0.0", "micromark-util-resolve-all": "^2.0.0", "micromark-util-subtokenize": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg=="],
"micromark-factory-destination": ["micromark-factory-destination@2.0.1", "", { "dependencies": { "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA=="],
"micromark-factory-label": ["micromark-factory-label@2.0.1", "", { "dependencies": { "devlop": "^1.0.0", "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg=="],
"micromark-factory-space": ["micromark-factory-space@2.0.1", "", { "dependencies": { "micromark-util-character": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg=="],
"micromark-factory-title": ["micromark-factory-title@2.0.1", "", { "dependencies": { "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw=="],
"micromark-factory-whitespace": ["micromark-factory-whitespace@2.0.1", "", { "dependencies": { "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ=="],
"micromark-util-character": ["micromark-util-character@2.1.1", "", { "dependencies": { "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q=="],
"micromark-util-chunked": ["micromark-util-chunked@2.0.1", "", { "dependencies": { "micromark-util-symbol": "^2.0.0" } }, "sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA=="],
"micromark-util-classify-character": ["micromark-util-classify-character@2.0.1", "", { "dependencies": { "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q=="],
"micromark-util-combine-extensions": ["micromark-util-combine-extensions@2.0.1", "", { "dependencies": { "micromark-util-chunked": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg=="],
"micromark-util-decode-numeric-character-reference": ["micromark-util-decode-numeric-character-reference@2.0.2", "", { "dependencies": { "micromark-util-symbol": "^2.0.0" } }, "sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw=="],
"micromark-util-decode-string": ["micromark-util-decode-string@2.0.1", "", { "dependencies": { "decode-named-character-reference": "^1.0.0", "micromark-util-character": "^2.0.0", "micromark-util-decode-numeric-character-reference": "^2.0.0", "micromark-util-symbol": "^2.0.0" } }, "sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ=="],
"micromark-util-encode": ["micromark-util-encode@2.0.1", "", {}, "sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw=="],
"micromark-util-html-tag-name": ["micromark-util-html-tag-name@2.0.1", "", {}, "sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA=="],
"micromark-util-normalize-identifier": ["micromark-util-normalize-identifier@2.0.1", "", { "dependencies": { "micromark-util-symbol": "^2.0.0" } }, "sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q=="],
"micromark-util-resolve-all": ["micromark-util-resolve-all@2.0.1", "", { "dependencies": { "micromark-util-types": "^2.0.0" } }, "sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg=="],
"micromark-util-sanitize-uri": ["micromark-util-sanitize-uri@2.0.1", "", { "dependencies": { "micromark-util-character": "^2.0.0", "micromark-util-encode": "^2.0.0", "micromark-util-symbol": "^2.0.0" } }, "sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ=="],
"micromark-util-subtokenize": ["micromark-util-subtokenize@2.1.0", "", { "dependencies": { "devlop": "^1.0.0", "micromark-util-chunked": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA=="],
"micromark-util-symbol": ["micromark-util-symbol@2.0.1", "", {}, "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q=="],
"micromark-util-types": ["micromark-util-types@2.0.2", "", {}, "sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA=="],
"micromatch": ["micromatch@4.0.5", "", { "dependencies": { "braces": "^3.0.2", "picomatch": "^2.3.1" } }, "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA=="],
"mimic-fn": ["mimic-fn@4.0.0", "", {}, "sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw=="],
@@ -372,7 +502,7 @@
"minizlib": ["minizlib@3.1.0", "", { "dependencies": { "minipass": "^7.1.2" } }, "sha512-KZxYo1BUkWD2TVFLr0MQoM8vUUigWD3LlD83a/75BqC+4qE0Hb1Vo5v1FgcfaNXvfXzr+5EhQ6ing/CaBijTlw=="],
"mitata": ["mitata@1.0.25", "", {}, "sha512-0v5qZtVW5vwj9FDvYfraR31BMDcRLkhSFWPTLaxx/Z3/EvScfVtAAWtMI2ArIbBcwh7P86dXh0lQWKiXQPlwYA=="],
"mitata": ["mitata@1.0.20", "", {}, "sha512-oHWYGX5bi4wGT/1zrhiZAEzqTV14Vq6/PUTW8WK0b3YHBBQcZz2QFm+InHhjnD0I7B6CMtwdGt2K0938r7YTdQ=="],
"ms": ["ms@2.1.2", "", {}, "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="],
@@ -388,6 +518,8 @@
"onetime": ["onetime@6.0.0", "", { "dependencies": { "mimic-fn": "^4.0.0" } }, "sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ=="],
"parse-entities": ["parse-entities@4.0.2", "", { "dependencies": { "@types/unist": "^2.0.0", "character-entities-legacy": "^3.0.0", "character-reference-invalid": "^2.0.0", "decode-named-character-reference": "^1.0.0", "is-alphanumerical": "^2.0.0", "is-decimal": "^2.0.0", "is-hexadecimal": "^2.0.0" } }, "sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw=="],
"path-key": ["path-key@3.1.1", "", {}, "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q=="],
"path-type": ["path-type@4.0.0", "", {}, "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw=="],
@@ -408,18 +540,32 @@
"process-warning": ["process-warning@5.0.0", "", {}, "sha512-a39t9ApHNx2L4+HBnQKqxxHNs1r7KF+Intd8Q/g1bUh6q0WIp9voPXJ/x0j+ZL45KF1pJd9+q2jLIRMfvEshkA=="],
"property-information": ["property-information@7.1.0", "", {}, "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ=="],
"ps-tree": ["ps-tree@1.2.0", "", { "dependencies": { "event-stream": "=3.3.4" }, "bin": { "ps-tree": "./bin/ps-tree.js" } }, "sha512-0VnamPPYHl4uaU/nSFeZZpR21QAWRz+sRv4iW9+v/GS/J5U5iZB5BNN6J0RMoOvdx2gWM2+ZFMIm58q24e4UYA=="],
"queue-microtask": ["queue-microtask@1.2.3", "", {}, "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A=="],
"quick-format-unescaped": ["quick-format-unescaped@4.0.4", "", {}, "sha512-tYC1Q1hgyRuHgloV/YXs2w15unPVh8qfu/qCTfhTYamaw7fyhumKa2yGpdSo87vY32rIclj+4fWYQXUMs9EHvg=="],
"react": ["react@18.3.1", "", { "dependencies": { "loose-envify": "^1.1.0" } }, "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ=="],
"react": ["react@19.2.4", "", {}, "sha512-9nfp2hYpCwOjAN+8TZFGhtWEwgvWHXqESH8qT89AT/lWklpLON22Lc8pEtnpsZz7VmawabSU0gCjnj8aC0euHQ=="],
"react-dom": ["react-dom@18.3.1", "", { "dependencies": { "loose-envify": "^1.1.0", "scheduler": "^0.23.2" }, "peerDependencies": { "react": "^18.3.1" } }, "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw=="],
"react-dom": ["react-dom@19.2.4", "", { "dependencies": { "scheduler": "^0.27.0" }, "peerDependencies": { "react": "^19.2.4" } }, "sha512-AXJdLo8kgMbimY95O2aKQqsz2iWi9jMgKJhRBAxECE4IFxfcazB2LmzloIoibJI3C12IlY20+KFaLv+71bUJeQ=="],
"react-markdown": ["react-markdown@9.1.0", "", { "dependencies": { "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", "devlop": "^1.0.0", "hast-util-to-jsx-runtime": "^2.0.0", "html-url-attributes": "^3.0.0", "mdast-util-to-hast": "^13.0.0", "remark-parse": "^11.0.0", "remark-rehype": "^11.0.0", "unified": "^11.0.0", "unist-util-visit": "^5.0.0", "vfile": "^6.0.0" }, "peerDependencies": { "@types/react": ">=18", "react": ">=18" } }, "sha512-xaijuJB0kzGiUdG7nc2MOMDUDBWPyGAjZtUrow9XxUeua8IqeP+VlIfAZ3bphpcLTnSZXz6z9jcVC/TCwbfgdw=="],
"real-require": ["real-require@0.2.0", "", {}, "sha512-57frrGM/OCTLqLOAh0mhVA9VBMHd+9U7Zb2THMGdBUoZVOtGbJzjxsYGDJ3A9AYYCP4hn6y1TVbaOfzWtm5GFg=="],
"remark": ["remark@15.0.1", "", { "dependencies": { "@types/mdast": "^4.0.0", "remark-parse": "^11.0.0", "remark-stringify": "^11.0.0", "unified": "^11.0.0" } }, "sha512-Eht5w30ruCXgFmxVUSlNWQ9iiimq07URKeFS3hNc8cUWy1llX4KDWfyEDZRycMc+znsN9Ux5/tJ/BFdgdOwA3A=="],
"remark-html": ["remark-html@16.0.1", "", { "dependencies": { "@types/mdast": "^4.0.0", "hast-util-sanitize": "^5.0.0", "hast-util-to-html": "^9.0.0", "mdast-util-to-hast": "^13.0.0", "unified": "^11.0.0" } }, "sha512-B9JqA5i0qZe0Nsf49q3OXyGvyXuZFDzAP2iOFLEumymuYJITVpiH1IgsTEwTpdptDmZlMDMWeDmSawdaJIGCXQ=="],
"remark-parse": ["remark-parse@11.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "mdast-util-from-markdown": "^2.0.0", "micromark-util-types": "^2.0.0", "unified": "^11.0.0" } }, "sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA=="],
"remark-rehype": ["remark-rehype@11.1.2", "", { "dependencies": { "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", "mdast-util-to-hast": "^13.0.0", "unified": "^11.0.0", "vfile": "^6.0.0" } }, "sha512-Dh7l57ianaEoIpzbp0PC9UKAdCSVklD8E5Rpw7ETfbTl3FqcOOgq5q2LVDhgGCkaBv7p24JXikPdvhhmHvKMsw=="],
"remark-stringify": ["remark-stringify@11.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "mdast-util-to-markdown": "^2.0.0", "unified": "^11.0.0" } }, "sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw=="],
"require-from-string": ["require-from-string@2.0.2", "", {}, "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw=="],
"ret": ["ret@0.5.0", "", {}, "sha512-I1XxrZSQ+oErkRR4jYbAyEEu2I0avBvvMM5JN+6EBprOGRCs63ENqZ3vjavq8fBw2+62G5LF5XelKwuJpcvcxw=="],
@@ -434,7 +580,7 @@
"safe-stable-stringify": ["safe-stable-stringify@2.5.0", "", {}, "sha512-b3rppTKm9T+PsVCBEOUR46GWI7fdOs00VKZ1+9c1EWDaDMvjQc6tUwuFyIprgGgTcWoVHSKrU8H31ZHA2e0RHA=="],
"scheduler": ["scheduler@0.23.2", "", { "dependencies": { "loose-envify": "^1.1.0" } }, "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ=="],
"scheduler": ["scheduler@0.27.0", "", {}, "sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q=="],
"secure-json-parse": ["secure-json-parse@4.0.0", "", {}, "sha512-dxtLJO6sc35jWidmLxo7ij+Eg48PM/kleBsxpC8QJE0qJICe+KawkDQmvCMZUr9u7WKVHgMW6vy3fQ7zMiFZMA=="],
@@ -454,6 +600,8 @@
"sonic-boom": ["sonic-boom@4.2.0", "", { "dependencies": { "atomic-sleep": "^1.0.0" } }, "sha512-INb7TM37/mAcsGmc9hyyI6+QR3rR1zVRu36B0NeGXKnOOLiZOfER5SA+N7X7k3yUYRzLWafduTDvJAfDswwEww=="],
"space-separated-tokens": ["space-separated-tokens@2.0.2", "", {}, "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q=="],
"split": ["split@0.3.3", "", { "dependencies": { "through": "2" } }, "sha512-wD2AeVmxXRBoX44wAycgjVpMhvbwdI2aZjCkvfNcH1YqHQvJVa1duWc73OyVGJUc05fhFaTZeQ/PYsrmyH0JVA=="],
"split2": ["split2@4.2.0", "", {}, "sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg=="],
@@ -462,10 +610,16 @@
"string-width": ["string-width@7.1.0", "", { "dependencies": { "emoji-regex": "^10.3.0", "get-east-asian-width": "^1.0.0", "strip-ansi": "^7.1.0" } }, "sha512-SEIJCWiX7Kg4c129n48aDRwLbFb2LJmXXFrWBG4NGaRtMQ3myKPKbwrD1BKqQn74oCoNMBVrfDEr5M9YxCsrkw=="],
"stringify-entities": ["stringify-entities@4.0.4", "", { "dependencies": { "character-entities-html4": "^2.0.0", "character-entities-legacy": "^3.0.0" } }, "sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg=="],
"strip-ansi": ["strip-ansi@7.1.0", "", { "dependencies": { "ansi-regex": "^6.0.1" } }, "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ=="],
"strip-final-newline": ["strip-final-newline@3.0.0", "", {}, "sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw=="],
"style-to-js": ["style-to-js@1.1.21", "", { "dependencies": { "style-to-object": "1.0.14" } }, "sha512-RjQetxJrrUJLQPHbLku6U/ocGtzyjbJMP9lCNK7Ag0CNh690nSH8woqWH9u16nMjYBAok+i7JO1NP2pOy8IsPQ=="],
"style-to-object": ["style-to-object@1.0.14", "", { "dependencies": { "inline-style-parser": "0.2.7" } }, "sha512-LIN7rULI0jBscWQYaSswptyderlarFkjQ+t79nzty8tcIAceVomEVlLzH5VP4Cmsv6MtKhs7qaAiwlcp+Mgaxw=="],
"supports-color": ["supports-color@5.5.0", "", { "dependencies": { "has-flag": "^3.0.0" } }, "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow=="],
"tar": ["tar@7.5.2", "", { "dependencies": { "@isaacs/fs-minipass": "^4.0.0", "chownr": "^3.0.0", "minipass": "^7.1.2", "minizlib": "^3.1.0", "yallist": "^5.0.0" } }, "sha512-7NyxrTE4Anh8km8iEy7o0QYPs+0JKBTj5ZaqHg6B39erLg0qYXN3BijtShwbsNSvQ+LN75+KV+C4QR/f6Gwnpg=="],
@@ -482,12 +636,32 @@
"toad-cache": ["toad-cache@3.7.0", "", {}, "sha512-/m8M+2BJUpoJdgAHoG+baCwBT+tf2VraSfkBgl0Y00qIWt41DJ8R5B8nsEw0I58YwF5IZH6z24/2TobDKnqSWw=="],
"trim-lines": ["trim-lines@3.0.1", "", {}, "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg=="],
"trough": ["trough@2.2.0", "", {}, "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw=="],
"undici-types": ["undici-types@5.26.5", "", {}, "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA=="],
"unified": ["unified@11.0.5", "", { "dependencies": { "@types/unist": "^3.0.0", "bail": "^2.0.0", "devlop": "^1.0.0", "extend": "^3.0.0", "is-plain-obj": "^4.0.0", "trough": "^2.0.0", "vfile": "^6.0.0" } }, "sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA=="],
"unist-util-is": ["unist-util-is@6.0.1", "", { "dependencies": { "@types/unist": "^3.0.0" } }, "sha512-LsiILbtBETkDz8I9p1dQ0uyRUWuaQzd/cuEeS1hoRSyW5E5XGmTzlwY1OrNzzakGowI9Dr/I8HVaw4hTtnxy8g=="],
"unist-util-position": ["unist-util-position@5.0.0", "", { "dependencies": { "@types/unist": "^3.0.0" } }, "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA=="],
"unist-util-stringify-position": ["unist-util-stringify-position@4.0.0", "", { "dependencies": { "@types/unist": "^3.0.0" } }, "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ=="],
"unist-util-visit": ["unist-util-visit@5.1.0", "", { "dependencies": { "@types/unist": "^3.0.0", "unist-util-is": "^6.0.0", "unist-util-visit-parents": "^6.0.0" } }, "sha512-m+vIdyeCOpdr/QeQCu2EzxX/ohgS8KbnPDgFni4dQsfSCtpz8UqDyY5GjRru8PDKuYn7Fq19j1CQ+nJSsGKOzg=="],
"unist-util-visit-parents": ["unist-util-visit-parents@6.0.2", "", { "dependencies": { "@types/unist": "^3.0.0", "unist-util-is": "^6.0.0" } }, "sha512-goh1s1TBrqSqukSc8wrjwWhL0hiJxgA8m4kFxGlQ+8FYQ3C/m11FcTs4YYem7V664AhHVvgoQLk890Ssdsr2IQ=="],
"universalify": ["universalify@2.0.1", "", {}, "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw=="],
"update-browserslist-db": ["update-browserslist-db@1.0.10", "", { "dependencies": { "escalade": "^3.1.1", "picocolors": "^1.0.0" }, "peerDependencies": { "browserslist": ">= 4.21.0" }, "bin": { "browserslist-lint": "cli.js" } }, "sha512-OztqDenkfFkbSG+tRxBeAnCVPckDBcvibKd35yDONx6OU8N7sqgwc7rCbkJ/WcYtVRZ4ba68d6byhC21GFh7sQ=="],
"vfile": ["vfile@6.0.3", "", { "dependencies": { "@types/unist": "^3.0.0", "vfile-message": "^4.0.0" } }, "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q=="],
"vfile-message": ["vfile-message@4.0.3", "", { "dependencies": { "@types/unist": "^3.0.0", "unist-util-stringify-position": "^4.0.0" } }, "sha512-QTHzsGd1EhbZs4AsQ20JX1rC3cOlt/IWJruk893DfLRr57lcnOeMaWG4K0JrRta4mIJZKth2Au3mM3u03/JWKw=="],
"web-streams-polyfill": ["web-streams-polyfill@3.3.2", "", {}, "sha512-3pRGuxRF5gpuZc0W+EpwQRmCD7gRqcDOMt688KmdlDAgAyaB1XlN0zq2njfDNm44XVdIouE7pZ6GzbdyH47uIQ=="],
"webpod": ["webpod@0.0.2", "", { "bin": { "webpod": "dist/index.js" } }, "sha512-cSwwQIeg8v4i3p4ajHhwgR7N6VyxAf+KYSSsY6Pd3aETE+xEU4vbitz7qQkB0I321xnhDdgtxuiSfk5r/FVtjg=="],
@@ -500,6 +674,8 @@
"yaml": ["yaml@2.3.4", "", {}, "sha512-8aAvwVUSHpfEqTQ4w/KMlf3HcRdt50E5ODIQJBw1fQ5RL34xabzxtUlzTXVqc4rkZsPbvrXKWnABCD7kWSmocA=="],
"zwitch": ["zwitch@2.0.4", "", {}, "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A=="],
"zx": ["zx@7.2.3", "", { "dependencies": { "@types/fs-extra": "^11.0.1", "@types/minimist": "^1.2.2", "@types/node": "^18.16.3", "@types/ps-tree": "^1.1.2", "@types/which": "^3.0.0", "chalk": "^5.2.0", "fs-extra": "^11.1.1", "fx": "*", "globby": "^13.1.4", "minimist": "^1.2.8", "node-fetch": "3.3.1", "ps-tree": "^1.2.0", "webpod": "^0", "which": "^3.0.0", "yaml": "^2.2.2" }, "bin": { "zx": "build/cli.js" } }, "sha512-QODu38nLlYXg/B/Gw7ZKiZrvPkEsjPN3LQ5JFXM7h0JvwhEdPNNl+4Ao1y4+o3CLNiDUNcwzQYZ4/Ko7kKzCMA=="],
"@babel/generator/@jridgewell/gen-mapping": ["@jridgewell/gen-mapping@0.3.2", "", { "dependencies": { "@jridgewell/set-array": "^1.0.1", "@jridgewell/sourcemap-codec": "^1.4.10", "@jridgewell/trace-mapping": "^0.3.9" } }, "sha512-mh65xKQAzI6iBcFzwv28KVWSmCkdRBWoOh+bYQGW3+6OZvbbN3TqMGo5hqYxQniRcH9F2VZIoJCm4pa3BPDK/A=="],
@@ -518,6 +694,8 @@
"npm-run-path/path-key": ["path-key@4.0.0", "", {}, "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ=="],
"parse-entities/@types/unist": ["@types/unist@2.0.11", "", {}, "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA=="],
"@babel/highlight/chalk/ansi-styles": ["ansi-styles@3.2.1", "", { "dependencies": { "color-convert": "^1.9.0" } }, "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA=="],
"@babel/highlight/chalk/ansi-styles/color-convert": ["color-convert@1.9.3", "", { "dependencies": { "color-name": "1.1.3" } }, "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg=="],

15
bench/json5/bun.lock Normal file
View File

@@ -0,0 +1,15 @@
{
"lockfileVersion": 1,
"configVersion": 1,
"workspaces": {
"": {
"name": "json5-benchmark",
"dependencies": {
"json5": "^2.2.3",
},
},
},
"packages": {
"json5": ["json5@2.2.3", "", { "bin": { "json5": "lib/cli.js" } }, "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg=="],
}
}

88
bench/json5/json5.mjs Normal file
View File

@@ -0,0 +1,88 @@
import JSON5 from "json5";
import { bench, group, run } from "../runner.mjs";
const isBun = typeof Bun !== "undefined" && Bun.JSON5;
function sizeLabel(n) {
if (n >= 1024 * 1024) return `${(n / 1024 / 1024).toFixed(1)}MB`;
if (n >= 1024) return `${(n / 1024).toFixed(0)}KB`;
return `${n}B`;
}
// -- parse inputs --
const smallJson5 = `{
// User profile
name: "John Doe",
age: 30,
email: 'john@example.com',
active: true,
}`;
function generateLargeJson5(count) {
const lines = ["{\n // Auto-generated dataset\n items: [\n"];
for (let i = 0; i < count; i++) {
lines.push(` {
id: ${i},
name: 'item_${i}',
value: ${(Math.random() * 1000).toFixed(2)},
hex: 0x${i.toString(16).toUpperCase()},
active: ${i % 2 === 0},
tags: ['tag_${i % 10}', 'category_${i % 5}',],
// entry ${i}
},\n`);
}
lines.push(" ],\n total: " + count + ",\n status: 'complete',\n}\n");
return lines.join("");
}
const largeJson5 = generateLargeJson5(6500);
// -- stringify inputs --
const smallObject = {
name: "John Doe",
age: 30,
email: "john@example.com",
active: true,
};
const largeObject = {
items: Array.from({ length: 10000 }, (_, i) => ({
id: i,
name: `item_${i}`,
value: +(Math.random() * 1000).toFixed(2),
active: i % 2 === 0,
tags: [`tag_${i % 10}`, `category_${i % 5}`],
})),
total: 10000,
status: "complete",
};
const stringify = isBun ? Bun.JSON5.stringify : JSON5.stringify;
// -- parse benchmarks --
group(`parse small (${sizeLabel(smallJson5.length)})`, () => {
if (isBun) bench("Bun.JSON5.parse", () => Bun.JSON5.parse(smallJson5));
bench("json5.parse", () => JSON5.parse(smallJson5));
});
group(`parse large (${sizeLabel(largeJson5.length)})`, () => {
if (isBun) bench("Bun.JSON5.parse", () => Bun.JSON5.parse(largeJson5));
bench("json5.parse", () => JSON5.parse(largeJson5));
});
// -- stringify benchmarks --
group(`stringify small (${sizeLabel(stringify(smallObject).length)})`, () => {
if (isBun) bench("Bun.JSON5.stringify", () => Bun.JSON5.stringify(smallObject));
bench("json5.stringify", () => JSON5.stringify(smallObject));
});
group(`stringify large (${sizeLabel(stringify(largeObject).length)})`, () => {
if (isBun) bench("Bun.JSON5.stringify", () => Bun.JSON5.stringify(largeObject));
bench("json5.stringify", () => JSON5.stringify(largeObject));
});
await run();

7
bench/json5/package.json Normal file
View File

@@ -0,0 +1,7 @@
{
"name": "json5-benchmark",
"version": "1.0.0",
"dependencies": {
"json5": "^2.2.3"
}
}

View File

@@ -14,14 +14,18 @@
"fast-glob": "3.3.1",
"fastify": "^5.0.0",
"fdir": "^6.1.0",
"mitata": "^1.0.25",
"react": "^18.3.1",
"react-dom": "^18.3.1",
"marked": "^17.0.1",
"mitata": "1.0.20",
"react": "^19",
"react-dom": "^19",
"react-markdown": "^9.0.3",
"remark": "^15.0.1",
"remark-html": "^16.0.1",
"string-width": "7.1.0",
"wrap-ansi": "^9.0.0",
"strip-ansi": "^7.1.0",
"tar": "^7.4.3",
"tinycolor2": "^1.6.0",
"wrap-ansi": "^9.0.0",
"zx": "^7.2.3"
},
"scripts": {

View File

@@ -0,0 +1,92 @@
import React from "react";
import { renderToString } from "react-dom/server";
import ReactMarkdown from "react-markdown";
const markdown = `# Project README
## Introduction
This is a medium-sized markdown document that includes **bold text**, *italic text*,
and \`inline code\`. It also has [links](https://example.com) and various formatting.
## Features
- Feature one with **bold**
- Feature two with *emphasis*
- Feature three with \`code\`
- Feature four with [a link](https://example.com)
## Code Example
\`\`\`javascript
function hello() {
console.log("Hello, world!");
return 42;
}
const result = hello();
\`\`\`
## Table
| Name | Value | Description |
|------|-------|-------------|
| foo | 1 | First item |
| bar | 2 | Second item |
| baz | 3 | Third item |
## Blockquote
> This is a blockquote with **bold** and *italic* text.
> It spans multiple lines and contains a [link](https://example.com).
---
### Nested Lists
1. First ordered item
- Nested unordered
- Another nested
2. Second ordered item
1. Nested ordered
2. Another nested
3. Third ordered item
Some final paragraph with ~~strikethrough~~ text and more **formatting**.
`;
// Verify outputs are roughly the same
const bunHtml = renderToString(Bun.markdown.react(markdown));
const reactMarkdownHtml = renderToString(React.createElement(ReactMarkdown, { children: markdown }));
console.log("=== Bun.markdown.react output ===");
console.log(bunHtml.slice(0, 500));
console.log(`... (${bunHtml.length} chars total)\n`);
console.log("=== react-markdown output ===");
console.log(reactMarkdownHtml.slice(0, 500));
console.log(`... (${reactMarkdownHtml.length} chars total)\n`);
const server = Bun.serve({
port: 0,
routes: {
"/bun-markdown": () => {
return new Response(renderToString(Bun.markdown.react(markdown)), {
headers: { "Content-Type": "text/html" },
});
},
"/react-markdown": () => {
return new Response(renderToString(React.createElement(ReactMarkdown, { children: markdown })), {
headers: { "Content-Type": "text/html" },
});
},
},
});
console.log(`Server listening on ${server.url}`);
console.log(` ${server.url}bun-markdown`);
console.log(` ${server.url}react-markdown`);
console.log();
console.log("Run:");
console.log(` oha -c 20 -z 5s ${server.url}bun-markdown`);
console.log(` oha -c 20 -z 5s ${server.url}react-markdown`);

159
bench/snippets/markdown.mjs Normal file
View File

@@ -0,0 +1,159 @@
import { marked } from "marked";
import { remark } from "remark";
import remarkHtml from "remark-html";
import { bench, run, summary } from "../runner.mjs";
const remarkProcessor = remark().use(remarkHtml);
const small = `# Hello World
This is a **bold** and *italic* paragraph with a [link](https://example.com).
- Item 1
- Item 2
- Item 3
`;
const medium = `# Project README
## Introduction
This is a medium-sized markdown document that includes **bold text**, *italic text*,
and \`inline code\`. It also has [links](https://example.com) and various formatting.
## Features
- Feature one with **bold**
- Feature two with *emphasis*
- Feature three with \`code\`
- Feature four with [a link](https://example.com)
## Code Example
\`\`\`javascript
function hello() {
console.log("Hello, world!");
return 42;
}
const result = hello();
\`\`\`
## Table
| Name | Value | Description |
|------|-------|-------------|
| foo | 1 | First item |
| bar | 2 | Second item |
| baz | 3 | Third item |
## Blockquote
> This is a blockquote with **bold** and *italic* text.
> It spans multiple lines and contains a [link](https://example.com).
---
### Nested Lists
1. First ordered item
- Nested unordered
- Another nested
2. Second ordered item
1. Nested ordered
2. Another nested
3. Third ordered item
Some final paragraph with ~~strikethrough~~ text and more **formatting**.
`;
const large = medium.repeat(20);
const renderCallbacks = {
heading: (children, { level }) => `<h${level}>${children}</h${level}>`,
paragraph: children => `<p>${children}</p>`,
strong: children => `<strong>${children}</strong>`,
emphasis: children => `<em>${children}</em>`,
codespan: children => `<code>${children}</code>`,
code: (children, { language }) =>
language
? `<pre><code class="language-${language}">${children}</code></pre>`
: `<pre><code>${children}</code></pre>`,
link: (children, { href, title }) =>
title ? `<a href="${href}" title="${title}">${children}</a>` : `<a href="${href}">${children}</a>`,
image: (children, { src, title }) =>
title ? `<img src="${src}" alt="${children}" title="${title}" />` : `<img src="${src}" alt="${children}" />`,
list: (children, { ordered, start }) => (ordered ? `<ol start="${start}">${children}</ol>` : `<ul>${children}</ul>`),
listItem: children => `<li>${children}</li>`,
blockquote: children => `<blockquote>${children}</blockquote>`,
hr: () => `<hr />`,
strikethrough: children => `<del>${children}</del>`,
table: children => `<table>${children}</table>`,
thead: children => `<thead>${children}</thead>`,
tbody: children => `<tbody>${children}</tbody>`,
tr: children => `<tr>${children}</tr>`,
th: children => `<th>${children}</th>`,
td: children => `<td>${children}</td>`,
};
summary(() => {
if (typeof Bun !== "undefined" && Bun.markdown) {
bench(`small (${small.length} chars) - Bun.markdown.html`, () => {
return Bun.markdown.html(small);
});
bench(`small (${small.length} chars) - Bun.markdown.render`, () => {
return Bun.markdown.render(small, renderCallbacks);
});
}
bench(`small (${small.length} chars) - marked`, () => {
return marked(small);
});
bench(`small (${small.length} chars) - remark`, () => {
return remarkProcessor.processSync(small).toString();
});
});
summary(() => {
if (typeof Bun !== "undefined" && Bun.markdown) {
bench(`medium (${medium.length} chars) - Bun.markdown.html`, () => {
return Bun.markdown.html(medium);
});
bench(`medium (${medium.length} chars) - Bun.markdown.render`, () => {
return Bun.markdown.render(medium, renderCallbacks);
});
}
bench(`medium (${medium.length} chars) - marked`, () => {
return marked(medium);
});
bench(`medium (${medium.length} chars) - remark`, () => {
return remarkProcessor.processSync(medium).toString();
});
});
summary(() => {
if (typeof Bun !== "undefined" && Bun.markdown) {
bench(`large (${large.length} chars) - Bun.markdown.html`, () => {
return Bun.markdown.html(large);
});
bench(`large (${large.length} chars) - Bun.markdown.render`, () => {
return Bun.markdown.render(large, renderCallbacks);
});
}
bench(`large (${large.length} chars) - marked`, () => {
return marked(large);
});
bench(`large (${large.length} chars) - remark`, () => {
return remarkProcessor.processSync(large).toString();
});
});
await run();

View File

@@ -0,0 +1,20 @@
// Benchmark for [...set] optimization (WebKit#56539)
// https://github.com/WebKit/WebKit/pull/56539
import { bench, run } from "../runner.mjs";
const intSet10 = new Set([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
const intSet100 = new Set(Array.from({ length: 100 }, (_, i) => i));
const strSet10 = new Set(Array.from({ length: 10 }, (_, i) => `key-${i}`));
const strSet100 = new Set(Array.from({ length: 100 }, (_, i) => `key-${i}`));
const objSet10 = new Set(Array.from({ length: 10 }, (_, i) => ({ id: i })));
const objSet100 = new Set(Array.from({ length: 100 }, (_, i) => ({ id: i })));
bench("[...set] - integers (10)", () => [...intSet10]);
bench("[...set] - integers (100)", () => [...intSet100]);
bench("[...set] - strings (10)", () => [...strSet10]);
bench("[...set] - strings (100)", () => [...strSet100]);
bench("[...set] - objects (10)", () => [...objSet10]);
bench("[...set] - objects (100)", () => [...objSet100]);
await run();

150
build.zig
View File

@@ -34,6 +34,7 @@ const BunBuildOptions = struct {
enable_asan: bool,
enable_fuzzilli: bool,
enable_valgrind: bool,
enable_tinycc: bool,
use_mimalloc: bool,
tracy_callstack_depth: u16,
reported_nodejs_version: Version,
@@ -84,6 +85,7 @@ const BunBuildOptions = struct {
opts.addOption(bool, "enable_asan", this.enable_asan);
opts.addOption(bool, "enable_fuzzilli", this.enable_fuzzilli);
opts.addOption(bool, "enable_valgrind", this.enable_valgrind);
opts.addOption(bool, "enable_tinycc", this.enable_tinycc);
opts.addOption(bool, "use_mimalloc", this.use_mimalloc);
opts.addOption([]const u8, "reported_nodejs_version", b.fmt("{f}", .{this.reported_nodejs_version}));
opts.addOption(bool, "zig_self_hosted_backend", this.no_llvm);
@@ -259,6 +261,7 @@ pub fn build(b: *Build) !void {
.enable_asan = b.option(bool, "enable_asan", "Enable asan") orelse false,
.enable_fuzzilli = b.option(bool, "enable_fuzzilli", "Enable fuzzilli instrumentation") orelse false,
.enable_valgrind = b.option(bool, "enable_valgrind", "Enable valgrind") orelse false,
.enable_tinycc = b.option(bool, "enable_tinycc", "Enable TinyCC for FFI JIT compilation") orelse true,
.use_mimalloc = b.option(bool, "use_mimalloc", "Use mimalloc as default allocator") orelse false,
.llvm_codegen_threads = b.option(u32, "llvm_codegen_threads", "Number of threads to use for LLVM codegen") orelse 1,
};
@@ -342,6 +345,7 @@ pub fn build(b: *Build) !void {
const step = b.step("check-debug", "Check for semantic analysis errors on some platforms");
addMultiCheck(b, step, build_options, &.{
.{ .os = .windows, .arch = .x86_64 },
.{ .os = .windows, .arch = .aarch64 },
.{ .os = .mac, .arch = .aarch64 },
.{ .os = .linux, .arch = .x86_64 },
}, &.{.Debug});
@@ -352,6 +356,7 @@ pub fn build(b: *Build) !void {
const step = b.step("check-all", "Check for semantic analysis errors on all supported platforms");
addMultiCheck(b, step, build_options, &.{
.{ .os = .windows, .arch = .x86_64 },
.{ .os = .windows, .arch = .aarch64 },
.{ .os = .mac, .arch = .x86_64 },
.{ .os = .mac, .arch = .aarch64 },
.{ .os = .linux, .arch = .x86_64 },
@@ -366,6 +371,7 @@ pub fn build(b: *Build) !void {
const step = b.step("check-all-debug", "Check for semantic analysis errors on all supported platforms in debug mode");
addMultiCheck(b, step, build_options, &.{
.{ .os = .windows, .arch = .x86_64 },
.{ .os = .windows, .arch = .aarch64 },
.{ .os = .mac, .arch = .x86_64 },
.{ .os = .mac, .arch = .aarch64 },
.{ .os = .linux, .arch = .x86_64 },
@@ -380,12 +386,14 @@ pub fn build(b: *Build) !void {
const step = b.step("check-windows", "Check for semantic analysis errors on Windows");
addMultiCheck(b, step, build_options, &.{
.{ .os = .windows, .arch = .x86_64 },
.{ .os = .windows, .arch = .aarch64 },
}, &.{ .Debug, .ReleaseFast });
}
{
const step = b.step("check-windows-debug", "Check for semantic analysis errors on Windows");
addMultiCheck(b, step, build_options, &.{
.{ .os = .windows, .arch = .x86_64 },
.{ .os = .windows, .arch = .aarch64 },
}, &.{.Debug});
}
{
@@ -422,6 +430,7 @@ pub fn build(b: *Build) !void {
const step = b.step("translate-c", "Copy generated translated-c-headers.zig to zig-out");
for ([_]TargetDescription{
.{ .os = .windows, .arch = .x86_64 },
.{ .os = .windows, .arch = .aarch64 },
.{ .os = .mac, .arch = .x86_64 },
.{ .os = .mac, .arch = .aarch64 },
.{ .os = .linux, .arch = .x86_64 },
@@ -450,6 +459,146 @@ pub fn build(b: *Build) !void {
// const run = b.addRunArtifact(exe);
// step.dependOn(&run.step);
}
// zig build generate-grapheme-tables
// Regenerates src/string/immutable/grapheme_tables.zig from the vendored uucode.
// Run this when updating src/deps/uucode. Normal builds use the committed file.
{
const step = b.step("generate-grapheme-tables", "Regenerate grapheme property tables from vendored uucode");
// --- Phase 1: Build uucode tables (separate module graph, no tables dependency) ---
const bt_config_mod = b.createModule(.{
.root_source_file = b.path("src/deps/uucode/src/config.zig"),
.target = b.graph.host,
});
const bt_types_mod = b.createModule(.{
.root_source_file = b.path("src/deps/uucode/src/types.zig"),
.target = b.graph.host,
});
bt_types_mod.addImport("config.zig", bt_config_mod);
bt_config_mod.addImport("types.zig", bt_types_mod);
const bt_config_x_mod = b.createModule(.{
.root_source_file = b.path("src/deps/uucode/src/x/config.x.zig"),
.target = b.graph.host,
});
const bt_types_x_mod = b.createModule(.{
.root_source_file = b.path("src/deps/uucode/src/x/types.x.zig"),
.target = b.graph.host,
});
bt_types_x_mod.addImport("config.x.zig", bt_config_x_mod);
bt_config_x_mod.addImport("types.x.zig", bt_types_x_mod);
bt_config_x_mod.addImport("types.zig", bt_types_mod);
bt_config_x_mod.addImport("config.zig", bt_config_mod);
const bt_build_config_mod = b.createModule(.{
.root_source_file = b.path("src/unicode/uucode/uucode_config.zig"),
.target = b.graph.host,
});
bt_build_config_mod.addImport("types.zig", bt_types_mod);
bt_build_config_mod.addImport("config.zig", bt_config_mod);
bt_build_config_mod.addImport("types.x.zig", bt_types_x_mod);
bt_build_config_mod.addImport("config.x.zig", bt_config_x_mod);
const build_tables_mod = b.createModule(.{
.root_source_file = b.path("src/deps/uucode/src/build/tables.zig"),
.target = b.graph.host,
.optimize = .Debug,
});
build_tables_mod.addImport("config.zig", bt_config_mod);
build_tables_mod.addImport("build_config", bt_build_config_mod);
build_tables_mod.addImport("types.zig", bt_types_mod);
const build_tables_exe = b.addExecutable(.{
.name = "uucode_build_tables",
.root_module = build_tables_mod,
.use_llvm = true,
});
const run_build_tables = b.addRunArtifact(build_tables_exe);
run_build_tables.setCwd(b.path("src/deps/uucode"));
const tables_path = run_build_tables.addOutputFileArg("tables.zig");
// --- Phase 2: Build grapheme-gen with full uucode (separate module graph) ---
const rt_config_mod = b.createModule(.{
.root_source_file = b.path("src/deps/uucode/src/config.zig"),
.target = b.graph.host,
});
const rt_types_mod = b.createModule(.{
.root_source_file = b.path("src/deps/uucode/src/types.zig"),
.target = b.graph.host,
});
rt_types_mod.addImport("config.zig", rt_config_mod);
rt_config_mod.addImport("types.zig", rt_types_mod);
const rt_config_x_mod = b.createModule(.{
.root_source_file = b.path("src/deps/uucode/src/x/config.x.zig"),
.target = b.graph.host,
});
const rt_types_x_mod = b.createModule(.{
.root_source_file = b.path("src/deps/uucode/src/x/types.x.zig"),
.target = b.graph.host,
});
rt_types_x_mod.addImport("config.x.zig", rt_config_x_mod);
rt_config_x_mod.addImport("types.x.zig", rt_types_x_mod);
rt_config_x_mod.addImport("types.zig", rt_types_mod);
rt_config_x_mod.addImport("config.zig", rt_config_mod);
const rt_build_config_mod = b.createModule(.{
.root_source_file = b.path("src/unicode/uucode/uucode_config.zig"),
.target = b.graph.host,
});
rt_build_config_mod.addImport("types.zig", rt_types_mod);
rt_build_config_mod.addImport("config.zig", rt_config_mod);
rt_build_config_mod.addImport("types.x.zig", rt_types_x_mod);
rt_build_config_mod.addImport("config.x.zig", rt_config_x_mod);
const rt_tables_mod = b.createModule(.{
.root_source_file = tables_path,
.target = b.graph.host,
});
rt_tables_mod.addImport("types.zig", rt_types_mod);
rt_tables_mod.addImport("types.x.zig", rt_types_x_mod);
rt_tables_mod.addImport("config.zig", rt_config_mod);
rt_tables_mod.addImport("build_config", rt_build_config_mod);
const rt_get_mod = b.createModule(.{
.root_source_file = b.path("src/deps/uucode/src/get.zig"),
.target = b.graph.host,
});
rt_get_mod.addImport("types.zig", rt_types_mod);
rt_get_mod.addImport("tables", rt_tables_mod);
rt_types_mod.addImport("get.zig", rt_get_mod);
const uucode_mod = b.createModule(.{
.root_source_file = b.path("src/deps/uucode/src/root.zig"),
.target = b.graph.host,
});
uucode_mod.addImport("types.zig", rt_types_mod);
uucode_mod.addImport("config.zig", rt_config_mod);
uucode_mod.addImport("types.x.zig", rt_types_x_mod);
uucode_mod.addImport("tables", rt_tables_mod);
uucode_mod.addImport("get.zig", rt_get_mod);
// grapheme_gen executable
const gen_exe = b.addExecutable(.{
.name = "grapheme-gen",
.root_module = b.createModule(.{
.root_source_file = b.path("src/unicode/uucode/grapheme_gen.zig"),
.target = b.graph.host,
.optimize = .Debug,
.imports = &.{
.{ .name = "uucode", .module = uucode_mod },
},
}),
.use_llvm = true,
});
const run_gen = b.addRunArtifact(gen_exe);
const gen_output = run_gen.captureStdOut();
const install = b.addInstallFile(gen_output, "../src/string/immutable/grapheme_tables.zig");
step.dependOn(&install.step);
}
}
const TargetDescription = struct {
@@ -493,6 +642,7 @@ fn addMultiCheck(
.no_llvm = root_build_options.no_llvm,
.enable_asan = root_build_options.enable_asan,
.enable_valgrind = root_build_options.enable_valgrind,
.enable_tinycc = root_build_options.enable_tinycc,
.enable_fuzzilli = root_build_options.enable_fuzzilli,
.use_mimalloc = root_build_options.use_mimalloc,
.override_no_export_cpp_apis = root_build_options.override_no_export_cpp_apis,

View File

@@ -21,6 +21,10 @@ endforeach()
if(CMAKE_SYSTEM_PROCESSOR MATCHES "arm|ARM|arm64|ARM64|aarch64|AARCH64")
if(APPLE)
register_compiler_flags(-mcpu=apple-m1)
elseif(WIN32)
# Windows ARM64: use /clang: prefix for clang-cl, skip for MSVC cl.exe subprojects
# These flags are only understood by clang-cl, not MSVC cl.exe
register_compiler_flags(/clang:-march=armv8-a+crc /clang:-mtune=ampere1)
else()
register_compiler_flags(-march=armv8-a+crc -mtune=ampere1)
endif()
@@ -242,10 +246,17 @@ if(UNIX)
)
endif()
register_compiler_flags(
DESCRIPTION "Set C/C++ error limit"
-ferror-limit=${ERROR_LIMIT}
)
if(WIN32)
register_compiler_flags(
DESCRIPTION "Set C/C++ error limit"
/clang:-ferror-limit=${ERROR_LIMIT}
)
else()
register_compiler_flags(
DESCRIPTION "Set C/C++ error limit"
-ferror-limit=${ERROR_LIMIT}
)
endif()
# --- LTO ---
if(ENABLE_LTO)

View File

@@ -106,9 +106,9 @@ else()
endif()
if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "arm64|ARM64|aarch64|AARCH64")
set(HOST_OS "aarch64")
set(HOST_ARCH "aarch64")
elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "x86_64|X86_64|x64|X64|amd64|AMD64")
set(HOST_OS "x64")
set(HOST_ARCH "x64")
else()
unsupported(CMAKE_HOST_SYSTEM_PROCESSOR)
endif()
@@ -433,6 +433,33 @@ function(register_command)
list(APPEND CMD_EFFECTIVE_DEPENDS ${CMD_EXECUTABLE})
endif()
# SKIP_CODEGEN: Skip commands that use BUN_EXECUTABLE if all outputs exist
# This is used for Windows ARM64 builds where x64 bun crashes under emulation
if(SKIP_CODEGEN AND CMD_EXECUTABLE STREQUAL "${BUN_EXECUTABLE}")
set(ALL_OUTPUTS_EXIST TRUE)
foreach(output ${CMD_OUTPUTS})
if(NOT EXISTS ${output})
set(ALL_OUTPUTS_EXIST FALSE)
break()
endif()
endforeach()
if(ALL_OUTPUTS_EXIST AND CMD_OUTPUTS)
message(STATUS "SKIP_CODEGEN: Skipping ${CMD_TARGET} (outputs exist)")
if(CMD_TARGET)
add_custom_target(${CMD_TARGET})
endif()
return()
elseif(NOT CMD_OUTPUTS)
message(STATUS "SKIP_CODEGEN: Skipping ${CMD_TARGET} (no outputs)")
if(CMD_TARGET)
add_custom_target(${CMD_TARGET})
endif()
return()
else()
message(FATAL_ERROR "SKIP_CODEGEN: Cannot skip ${CMD_TARGET} - missing outputs. Run codegen on x64 first.")
endif()
endif()
foreach(target ${CMD_TARGETS})
if(target MATCHES "/|\\\\")
message(FATAL_ERROR "register_command: TARGETS contains \"${target}\", if it's a path add it to SOURCES instead")
@@ -650,6 +677,7 @@ function(register_bun_install)
${NPM_CWD}
COMMAND
${BUN_EXECUTABLE}
${BUN_FLAGS}
install
--frozen-lockfile
SOURCES
@@ -757,7 +785,7 @@ function(register_cmake_command)
set(MAKE_EFFECTIVE_ARGS -B${MAKE_BUILD_PATH} ${CMAKE_ARGS})
set(setFlags GENERATOR BUILD_TYPE)
set(appendFlags C_FLAGS CXX_FLAGS LINKER_FLAGS)
set(appendFlags C_FLAGS CXX_FLAGS LINKER_FLAGS STATIC_LINKER_FLAGS EXE_LINKER_FLAGS SHARED_LINKER_FLAGS MODULE_LINKER_FLAGS)
set(specialFlags POSITION_INDEPENDENT_CODE)
set(flags ${setFlags} ${appendFlags} ${specialFlags})
@@ -803,6 +831,14 @@ function(register_cmake_command)
list(APPEND MAKE_EFFECTIVE_ARGS "-DCMAKE_${flag}=${MAKE_${flag}}")
endforeach()
# Workaround for CMake 4.1.0 bug: Force correct machine type for Windows ARM64
# Use toolchain file and set CMP0197 policy to prevent duplicate /machine: flags
if(WIN32 AND CMAKE_SYSTEM_PROCESSOR MATCHES "ARM64|aarch64|AARCH64")
list(APPEND MAKE_EFFECTIVE_ARGS "-DCMAKE_TOOLCHAIN_FILE=${CWD}/cmake/toolchains/windows-aarch64.cmake")
list(APPEND MAKE_EFFECTIVE_ARGS "-DCMAKE_POLICY_DEFAULT_CMP0197=NEW")
list(APPEND MAKE_EFFECTIVE_ARGS "-DCMAKE_PROJECT_INCLUDE=${CWD}/cmake/arm64-static-lib-fix.cmake")
endif()
if(DEFINED FRESH)
list(APPEND MAKE_EFFECTIVE_ARGS --fresh)
endif()

View File

@@ -4,6 +4,7 @@ endif()
optionx(BUN_LINK_ONLY BOOL "If only the linking step should be built" DEFAULT OFF)
optionx(BUN_CPP_ONLY BOOL "If only the C++ part of Bun should be built" DEFAULT OFF)
optionx(SKIP_CODEGEN BOOL "Skip JavaScript codegen (for Windows ARM64 debug)" DEFAULT OFF)
optionx(BUILDKITE BOOL "If Buildkite is enabled" DEFAULT OFF)
optionx(GITHUB_ACTIONS BOOL "If GitHub Actions is enabled" DEFAULT OFF)
@@ -49,7 +50,7 @@ else()
message(FATAL_ERROR "Unsupported operating system: ${CMAKE_SYSTEM_NAME}")
endif()
if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|arm64|arm")
if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|arm64|ARM64")
setx(ARCH "aarch64")
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "amd64|x86_64|x64|AMD64")
setx(ARCH "x64")
@@ -57,6 +58,18 @@ else()
message(FATAL_ERROR "Unsupported architecture: ${CMAKE_SYSTEM_PROCESSOR}")
endif()
# CMake 4.0+ policy CMP0197 controls how MSVC machine type flags are handled
# Setting to NEW prevents duplicate /machine: flags being added to linker commands
if(WIN32 AND ARCH STREQUAL "aarch64")
set(CMAKE_POLICY_DEFAULT_CMP0197 NEW)
set(CMAKE_MSVC_CMP0197 NEW)
# Set linker flags for exe/shared linking
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /machine:ARM64")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /machine:ARM64")
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} /machine:ARM64")
set(CMAKE_STATIC_LINKER_FLAGS "${CMAKE_STATIC_LINKER_FLAGS} /machine:ARM64")
endif()
# Windows Code Signing Option
if(WIN32)
optionx(ENABLE_WINDOWS_CODESIGNING BOOL "Enable Windows code signing with DigiCert KeyLocker" DEFAULT OFF)
@@ -199,6 +212,16 @@ optionx(USE_WEBKIT_ICU BOOL "Use the ICU libraries from WebKit" DEFAULT ${DEFAUL
optionx(ERROR_LIMIT STRING "Maximum number of errors to show when compiling C++ code" DEFAULT "100")
# TinyCC is used for FFI JIT compilation
# Disable on Windows ARM64 where it's not yet supported
if(WIN32 AND ARCH STREQUAL "aarch64")
set(DEFAULT_ENABLE_TINYCC OFF)
else()
set(DEFAULT_ENABLE_TINYCC ON)
endif()
optionx(ENABLE_TINYCC BOOL "Enable TinyCC for FFI JIT compilation" DEFAULT ${DEFAULT_ENABLE_TINYCC})
# This is not an `option` because setting this variable to OFF is experimental
# and unsupported. This replaces the `use_mimalloc` variable previously in
# bun.zig, and enables C++ code to also be aware of the option.

View File

@@ -13,10 +13,7 @@
},
{
"output": "JavaScriptSources.txt",
"paths": [
"src/js/**/*.{js,ts}",
"src/install/PackageManager/scanner-entry.ts"
]
"paths": ["src/js/**/*.{js,ts}", "src/install/PackageManager/scanner-entry.ts"]
},
{
"output": "JavaScriptCodegenSources.txt",

View File

@@ -0,0 +1,8 @@
# This file is included after project() via CMAKE_PROJECT_INCLUDE
# It fixes the static library creation command to use ARM64 machine type
if(WIN32 AND CMAKE_SYSTEM_PROCESSOR STREQUAL \"aarch64\")
# Override the static library creation commands to avoid spurious /machine:x64 flags
set(CMAKE_C_CREATE_STATIC_LIBRARY \"<CMAKE_AR> /nologo /machine:ARM64 /out:<TARGET> <OBJECTS>\" CACHE STRING \"\" FORCE)
set(CMAKE_CXX_CREATE_STATIC_LIBRARY \"<CMAKE_AR> /nologo /machine:ARM64 /out:<TARGET> <OBJECTS>\" CACHE STRING \"\" FORCE)
endif()

View File

@@ -21,7 +21,12 @@ if(NOT DEFINED CMAKE_HOST_SYSTEM_PROCESSOR)
endif()
if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "arm64|ARM64|aarch64|AARCH64")
set(ZIG_ARCH "aarch64")
# Windows ARM64 can run x86_64 via emulation, and no native ARM64 Zig build exists yet
if(CMAKE_HOST_WIN32)
set(ZIG_ARCH "x86_64")
else()
set(ZIG_ARCH "aarch64")
endif()
elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "amd64|AMD64|x86_64|X86_64|x64|X64")
set(ZIG_ARCH "x86_64")
else()

View File

@@ -0,0 +1,34 @@
@echo off
setlocal enabledelayedexpansion
REM Wrapper for llvm-lib that strips conflicting /machine:x64 flag for ARM64 builds
REM This is a workaround for CMake 4.1.0 bug
REM Find llvm-lib.exe - check LLVM_LIB env var, then PATH, then known locations
if defined LLVM_LIB (
set "LLVM_LIB_EXE=!LLVM_LIB!"
) else (
where llvm-lib.exe >nul 2>&1
if !ERRORLEVEL! equ 0 (
for /f "delims=" %%i in ('where llvm-lib.exe') do set "LLVM_LIB_EXE=%%i"
) else if exist "C:\Program Files\LLVM\bin\llvm-lib.exe" (
set "LLVM_LIB_EXE=C:\Program Files\LLVM\bin\llvm-lib.exe"
) else (
echo Error: Cannot find llvm-lib.exe. Set LLVM_LIB environment variable or add LLVM to PATH.
exit /b 1
)
)
set "ARGS="
for %%a in (%*) do (
set "ARG=%%a"
if /i "!ARG!"=="/machine:x64" (
REM Skip this argument
) else (
set "ARGS=!ARGS! %%a"
)
)
"!LLVM_LIB_EXE!" %ARGS%
exit /b %ERRORLEVEL%

View File

@@ -0,0 +1,18 @@
# Wrapper for llvm-lib that strips conflicting /machine:x64 flag for ARM64 builds
# This is a workaround for CMake 4.1.0 bug where both /machine:ARM64 and /machine:x64 are added
# Find llvm-lib.exe - check LLVM_LIB env var, then PATH, then known locations
if ($env:LLVM_LIB) {
$llvmLib = $env:LLVM_LIB
} elseif (Get-Command llvm-lib.exe -ErrorAction SilentlyContinue) {
$llvmLib = (Get-Command llvm-lib.exe).Source
} elseif (Test-Path "C:\Program Files\LLVM\bin\llvm-lib.exe") {
$llvmLib = "C:\Program Files\LLVM\bin\llvm-lib.exe"
} else {
Write-Error "Cannot find llvm-lib.exe. Set LLVM_LIB environment variable or add LLVM to PATH."
exit 1
}
$filteredArgs = $args | Where-Object { $_ -ne "/machine:x64" }
& $llvmLib @filteredArgs
exit $LASTEXITCODE

View File

@@ -0,0 +1,34 @@
@echo off
setlocal enabledelayedexpansion
REM Wrapper for llvm-lib that strips conflicting /machine:x64 flag for ARM64 builds
REM This is a workaround for CMake 4.1.0 bug
REM Find llvm-lib.exe - check LLVM_LIB env var, then PATH, then known locations
if defined LLVM_LIB (
set "LLVM_LIB_EXE=!LLVM_LIB!"
) else (
where llvm-lib.exe >nul 2>&1
if !ERRORLEVEL! equ 0 (
for /f "delims=" %%i in ('where llvm-lib.exe') do set "LLVM_LIB_EXE=%%i"
) else if exist "C:\Program Files\LLVM\bin\llvm-lib.exe" (
set "LLVM_LIB_EXE=C:\Program Files\LLVM\bin\llvm-lib.exe"
) else (
echo Error: Cannot find llvm-lib.exe. Set LLVM_LIB environment variable or add LLVM to PATH.
exit /b 1
)
)
set NEWARGS=
for %%a in (%*) do (
set "ARG=%%a"
if /i "!ARG!"=="/machine:x64" (
REM Skip /machine:x64 argument
) else (
set "NEWARGS=!NEWARGS! %%a"
)
)
"!LLVM_LIB_EXE!" %NEWARGS%
exit /b %ERRORLEVEL%

View File

@@ -57,13 +57,17 @@ set(BUN_DEPENDENCIES
LolHtml
Lshpack
Mimalloc
TinyCC
Zlib
LibArchive # must be loaded after zlib
HdrHistogram # must be loaded after zlib
Zstd
)
# TinyCC is optional - disabled on Windows ARM64 where it's not supported
if(ENABLE_TINYCC)
list(APPEND BUN_DEPENDENCIES TinyCC)
endif()
include(CloneZstd)
# --- Codegen ---
@@ -185,7 +189,7 @@ register_command(
CWD
${BUN_NODE_FALLBACKS_SOURCE}
COMMAND
${BUN_EXECUTABLE} run build-fallbacks
${BUN_EXECUTABLE} ${BUN_FLAGS} run build-fallbacks
${BUN_NODE_FALLBACKS_OUTPUT}
${BUN_NODE_FALLBACKS_SOURCES}
SOURCES
@@ -206,7 +210,7 @@ register_command(
CWD
${BUN_NODE_FALLBACKS_SOURCE}
COMMAND
${BUN_EXECUTABLE} build
${BUN_EXECUTABLE} ${BUN_FLAGS} build
${BUN_NODE_FALLBACKS_SOURCE}/node_modules/react-refresh/cjs/react-refresh-runtime.development.js
--outfile=${BUN_REACT_REFRESH_OUTPUT}
--target=browser
@@ -243,6 +247,7 @@ register_command(
"Generating ErrorCode.{zig,h}"
COMMAND
${BUN_EXECUTABLE}
${BUN_FLAGS}
run
${BUN_ERROR_CODE_SCRIPT}
${CODEGEN_PATH}
@@ -278,6 +283,7 @@ register_command(
"Generating ZigGeneratedClasses.{zig,cpp,h}"
COMMAND
${BUN_EXECUTABLE}
${BUN_FLAGS}
run
${BUN_ZIG_GENERATED_CLASSES_SCRIPT}
${BUN_ZIG_GENERATED_CLASSES_SOURCES}
@@ -328,6 +334,7 @@ register_command(
"Generating C++ --> Zig bindings"
COMMAND
${BUN_EXECUTABLE}
${BUN_FLAGS}
${CWD}/src/codegen/cppbind.ts
${CWD}/src
${CODEGEN_PATH}
@@ -345,6 +352,7 @@ register_command(
"Generating CI info"
COMMAND
${BUN_EXECUTABLE}
${BUN_FLAGS}
${CWD}/src/codegen/ci_info.ts
${CODEGEN_PATH}/ci_info.zig
SOURCES
@@ -353,24 +361,35 @@ register_command(
${BUN_CI_INFO_OUTPUTS}
)
register_command(
TARGET
bun-js-modules
COMMENT
"Generating JavaScript modules"
COMMAND
${BUN_EXECUTABLE}
run
if(SKIP_CODEGEN)
# Skip JavaScript codegen - useful for Windows ARM64 debug builds where bun crashes
message(STATUS "SKIP_CODEGEN is ON - skipping bun-js-modules codegen")
foreach(output ${BUN_JAVASCRIPT_OUTPUTS})
if(NOT EXISTS ${output})
message(FATAL_ERROR "SKIP_CODEGEN is ON but ${output} does not exist. Run codegen manually first.")
endif()
endforeach()
else()
register_command(
TARGET
bun-js-modules
COMMENT
"Generating JavaScript modules"
COMMAND
${BUN_EXECUTABLE}
${BUN_FLAGS}
run
${BUN_JAVASCRIPT_CODEGEN_SCRIPT}
--debug=${DEBUG}
${BUILD_PATH}
SOURCES
${BUN_JAVASCRIPT_SOURCES}
${BUN_JAVASCRIPT_CODEGEN_SOURCES}
${BUN_JAVASCRIPT_CODEGEN_SCRIPT}
--debug=${DEBUG}
${BUILD_PATH}
SOURCES
${BUN_JAVASCRIPT_SOURCES}
${BUN_JAVASCRIPT_CODEGEN_SOURCES}
${BUN_JAVASCRIPT_CODEGEN_SCRIPT}
OUTPUTS
${BUN_JAVASCRIPT_OUTPUTS}
)
OUTPUTS
${BUN_JAVASCRIPT_OUTPUTS}
)
endif()
set(BUN_BAKE_RUNTIME_CODEGEN_SCRIPT ${CWD}/src/codegen/bake-codegen.ts)
@@ -392,6 +411,7 @@ register_command(
"Bundling Bake Runtime"
COMMAND
${BUN_EXECUTABLE}
${BUN_FLAGS}
run
${BUN_BAKE_RUNTIME_CODEGEN_SCRIPT}
--debug=${DEBUG}
@@ -415,7 +435,7 @@ string(REPLACE ";" "," BUN_BINDGENV2_SOURCES_COMMA_SEPARATED
"${BUN_BINDGENV2_SOURCES}")
execute_process(
COMMAND ${BUN_EXECUTABLE} run ${BUN_BINDGENV2_SCRIPT}
COMMAND ${BUN_EXECUTABLE} ${BUN_FLAGS} run ${BUN_BINDGENV2_SCRIPT}
--command=list-outputs
--sources=${BUN_BINDGENV2_SOURCES_COMMA_SEPARATED}
--codegen-path=${CODEGEN_PATH}
@@ -438,7 +458,7 @@ register_command(
COMMENT
"Generating bindings (v2)"
COMMAND
${BUN_EXECUTABLE} run ${BUN_BINDGENV2_SCRIPT}
${BUN_EXECUTABLE} ${BUN_FLAGS} run ${BUN_BINDGENV2_SCRIPT}
--command=generate
--codegen-path=${CODEGEN_PATH}
--sources=${BUN_BINDGENV2_SOURCES_COMMA_SEPARATED}
@@ -469,6 +489,7 @@ register_command(
"Processing \".bind.ts\" files"
COMMAND
${BUN_EXECUTABLE}
${BUN_FLAGS}
run
${BUN_BINDGEN_SCRIPT}
--debug=${DEBUG}
@@ -501,6 +522,7 @@ register_command(
"Generating JSSink.{cpp,h}"
COMMAND
${BUN_EXECUTABLE}
${BUN_FLAGS}
run
${BUN_JS_SINK_SCRIPT}
${CODEGEN_PATH}
@@ -573,6 +595,7 @@ foreach(i RANGE 0 ${BUN_OBJECT_LUT_SOURCES_MAX_INDEX})
${BUN_OBJECT_LUT_SOURCE}
COMMAND
${BUN_EXECUTABLE}
${BUN_FLAGS}
run
${BUN_OBJECT_LUT_SCRIPT}
${BUN_OBJECT_LUT_SOURCE}
@@ -656,6 +679,10 @@ endif()
if(CMAKE_SYSTEM_PROCESSOR MATCHES "arm|ARM|arm64|ARM64|aarch64|AARCH64")
if(APPLE)
set(ZIG_CPU "apple_m1")
elseif(WIN32)
# Windows ARM64: use a specific CPU with NEON support
# Zig running under x64 emulation would detect wrong CPU with "native"
set(ZIG_CPU "cortex_a76")
else()
set(ZIG_CPU "native")
endif()
@@ -694,6 +721,7 @@ register_command(
-Denable_asan=$<IF:$<BOOL:${ENABLE_ZIG_ASAN}>,true,false>
-Denable_fuzzilli=$<IF:$<BOOL:${ENABLE_FUZZILLI}>,true,false>
-Denable_valgrind=$<IF:$<BOOL:${ENABLE_VALGRIND}>,true,false>
-Denable_tinycc=$<IF:$<BOOL:${ENABLE_TINYCC}>,true,false>
-Duse_mimalloc=$<IF:$<BOOL:${USE_MIMALLOC_AS_DEFAULT_ALLOCATOR}>,true,false>
-Dllvm_codegen_threads=${LLVM_ZIG_CODEGEN_THREADS}
-Dversion=${VERSION}
@@ -911,7 +939,7 @@ if(WIN32)
endif()
if(USE_MIMALLOC_AS_DEFAULT_ALLOCATOR)
target_compile_definitions(${bun} PRIVATE USE_MIMALLOC=1)
target_compile_definitions(${bun} PRIVATE USE_BUN_MIMALLOC=1)
endif()
target_compile_definitions(${bun} PRIVATE
@@ -1211,7 +1239,7 @@ if(BUN_LINK_ONLY)
WEBKIT_DOWNLOAD_URL=${WEBKIT_DOWNLOAD_URL}
WEBKIT_VERSION=${WEBKIT_VERSION}
ZIG_COMMIT=${ZIG_COMMIT}
${BUN_EXECUTABLE} ${CWD}/scripts/create-link-metadata.mjs ${BUILD_PATH} ${bun}
${BUN_EXECUTABLE} ${BUN_FLAGS} ${CWD}/scripts/create-link-metadata.mjs ${BUILD_PATH} ${bun}
SOURCES
${BUN_ZIG_OUTPUT}
${BUN_CPP_OUTPUT}
@@ -1225,6 +1253,7 @@ if(WIN32)
target_link_libraries(${bun} PRIVATE
${WEBKIT_LIB_PATH}/WTF.lib
${WEBKIT_LIB_PATH}/JavaScriptCore.lib
${WEBKIT_LIB_PATH}/bmalloc.lib
${WEBKIT_LIB_PATH}/sicudtd.lib
${WEBKIT_LIB_PATH}/sicuind.lib
${WEBKIT_LIB_PATH}/sicuucd.lib
@@ -1233,6 +1262,7 @@ if(WIN32)
target_link_libraries(${bun} PRIVATE
${WEBKIT_LIB_PATH}/WTF.lib
${WEBKIT_LIB_PATH}/JavaScriptCore.lib
${WEBKIT_LIB_PATH}/bmalloc.lib
${WEBKIT_LIB_PATH}/sicudt.lib
${WEBKIT_LIB_PATH}/sicuin.lib
${WEBKIT_LIB_PATH}/sicuuc.lib
@@ -1243,13 +1273,18 @@ else()
${WEBKIT_LIB_PATH}/libWTF.a
${WEBKIT_LIB_PATH}/libJavaScriptCore.a
)
if(NOT APPLE OR EXISTS ${WEBKIT_LIB_PATH}/libbmalloc.a)
if(WEBKIT_LOCAL OR NOT APPLE OR EXISTS ${WEBKIT_LIB_PATH}/libbmalloc.a)
target_link_libraries(${bun} PRIVATE ${WEBKIT_LIB_PATH}/libbmalloc.a)
endif()
endif()
include_directories(${WEBKIT_INCLUDE_PATH})
# When building with a local WebKit, ensure JSC is built before compiling Bun's C++ sources.
if(WEBKIT_LOCAL AND TARGET jsc)
add_dependencies(${bun} jsc)
endif()
# Include the generated dependency versions header
include_directories(${CMAKE_BINARY_DIR})
@@ -1294,9 +1329,14 @@ if(LINUX)
target_link_libraries(${bun} PUBLIC libatomic.so)
endif()
target_link_libraries(${bun} PRIVATE ${WEBKIT_LIB_PATH}/libicudata.a)
target_link_libraries(${bun} PRIVATE ${WEBKIT_LIB_PATH}/libicui18n.a)
target_link_libraries(${bun} PRIVATE ${WEBKIT_LIB_PATH}/libicuuc.a)
if(WEBKIT_LOCAL)
find_package(ICU REQUIRED COMPONENTS data i18n uc)
target_link_libraries(${bun} PRIVATE ICU::data ICU::i18n ICU::uc)
else()
target_link_libraries(${bun} PRIVATE ${WEBKIT_LIB_PATH}/libicudata.a)
target_link_libraries(${bun} PRIVATE ${WEBKIT_LIB_PATH}/libicui18n.a)
target_link_libraries(${bun} PRIVATE ${WEBKIT_LIB_PATH}/libicuuc.a)
endif()
endif()
if(WIN32)

View File

@@ -20,6 +20,15 @@ set(HIGHWAY_CMAKE_ARGS
-DHWY_ENABLE_INSTALL=OFF
)
# On Windows ARM64 with clang-cl, the __ARM_NEON macro isn't defined by default
# but NEON intrinsics are supported. Define it so Highway can detect NEON support.
if(WIN32 AND CMAKE_SYSTEM_PROCESSOR MATCHES "ARM64|aarch64|AARCH64")
list(APPEND HIGHWAY_CMAKE_ARGS
-DCMAKE_C_FLAGS=-D__ARM_NEON=1
-DCMAKE_CXX_FLAGS=-D__ARM_NEON=1
)
endif()
register_cmake_command(
TARGET
highway

View File

@@ -4,7 +4,7 @@ register_repository(
REPOSITORY
cloudflare/lol-html
COMMIT
d64457d9ff0143deef025d5df7e8586092b9afb7
e9e16dca48dd4a8ffbc77642bc4be60407585f11
)
set(LOLHTML_CWD ${VENDOR_PATH}/lolhtml/c-api)
@@ -33,6 +33,37 @@ if (NOT WIN32)
set(RUSTFLAGS "-Cpanic=abort-Cdebuginfo=0-Cforce-unwind-tables=no-Copt-level=s")
endif()
# On Windows, ensure MSVC link.exe is used instead of Git's link.exe
set(LOLHTML_ENV
CARGO_TERM_COLOR=always
CARGO_TERM_VERBOSE=true
CARGO_TERM_DIAGNOSTIC=true
CARGO_ENCODED_RUSTFLAGS=${RUSTFLAGS}
CARGO_HOME=${CARGO_HOME}
RUSTUP_HOME=${RUSTUP_HOME}
)
if(WIN32)
# On Windows, tell Rust to use MSVC link.exe directly via the target-specific linker env var.
# This avoids Git's /usr/bin/link being found first in PATH.
# Find the MSVC link.exe from Visual Studio installation
file(GLOB MSVC_VERSIONS "C:/Program Files/Microsoft Visual Studio/2022/*/VC/Tools/MSVC/*")
if(MSVC_VERSIONS)
list(GET MSVC_VERSIONS -1 MSVC_LATEST) # Get the latest version
if(CMAKE_SYSTEM_PROCESSOR MATCHES "ARM64|aarch64")
set(MSVC_LINK_PATH "${MSVC_LATEST}/bin/HostARM64/arm64/link.exe")
set(CARGO_LINKER_VAR "CARGO_TARGET_AARCH64_PC_WINDOWS_MSVC_LINKER")
else()
set(MSVC_LINK_PATH "${MSVC_LATEST}/bin/Hostx64/x64/link.exe")
set(CARGO_LINKER_VAR "CARGO_TARGET_X86_64_PC_WINDOWS_MSVC_LINKER")
endif()
if(EXISTS "${MSVC_LINK_PATH}")
list(APPEND LOLHTML_ENV "${CARGO_LINKER_VAR}=${MSVC_LINK_PATH}")
message(STATUS "lolhtml: Using MSVC link.exe: ${MSVC_LINK_PATH}")
endif()
endif()
endif()
register_command(
TARGET
lolhtml
@@ -45,12 +76,7 @@ register_command(
ARTIFACTS
${LOLHTML_LIBRARY}
ENVIRONMENT
CARGO_TERM_COLOR=always
CARGO_TERM_VERBOSE=true
CARGO_TERM_DIAGNOSTIC=true
CARGO_ENCODED_RUSTFLAGS=${RUSTFLAGS}
CARGO_HOME=${CARGO_HOME}
RUSTUP_HOME=${RUSTUP_HOME}
${LOLHTML_ENV}
)
target_link_libraries(${bun} PRIVATE ${LOLHTML_LIBRARY})

View File

@@ -4,7 +4,7 @@ register_repository(
REPOSITORY
oven-sh/mimalloc
COMMIT
1beadf9651a7bfdec6b5367c380ecc3fe1c40d1a
ffa38ab8ac914f9eb7af75c1f8ad457643dc14f2
)
set(MIMALLOC_CMAKE_ARGS
@@ -14,7 +14,7 @@ set(MIMALLOC_CMAKE_ARGS
-DMI_BUILD_TESTS=OFF
-DMI_USE_CXX=ON
-DMI_SKIP_COLLECT_ON_EXIT=ON
# ```
# mimalloc_allow_large_os_pages=0 BUN_PORT=3004 mem bun http-hello.js
# Started development server: http://localhost:3004
@@ -51,7 +51,7 @@ if(ENABLE_ASAN)
list(APPEND MIMALLOC_CMAKE_ARGS -DMI_DEBUG_UBSAN=ON)
elseif(APPLE OR LINUX)
if(APPLE)
list(APPEND MIMALLOC_CMAKE_ARGS -DMI_OVERRIDE=OFF)
list(APPEND MIMALLOC_CMAKE_ARGS -DMI_OVERRIDE=OFF)
list(APPEND MIMALLOC_CMAKE_ARGS -DMI_OSX_ZONE=OFF)
list(APPEND MIMALLOC_CMAKE_ARGS -DMI_OSX_INTERPOSE=OFF)
else()
@@ -69,17 +69,27 @@ if(ENABLE_VALGRIND)
list(APPEND MIMALLOC_CMAKE_ARGS -DMI_VALGRIND=ON)
endif()
# Enable SIMD optimizations when not building for baseline (older CPUs)
if(NOT ENABLE_BASELINE)
# Enable architecture-specific optimizations when not building for baseline.
# On Linux aarch64, upstream mimalloc force-enables MI_OPT_ARCH which adds
# -march=armv8.1-a (LSE atomics). This crashes on ARMv8.0 CPUs
# (Cortex-A53, Raspberry Pi 4, AWS a1 instances). Use MI_NO_OPT_ARCH
# to prevent that, but keep SIMD enabled. -moutline-atomics for runtime
# dispatch to LSE/LL-SC. macOS arm64 always has LSE (Apple Silicon) so
# MI_OPT_ARCH is safe there.
if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|arm64|ARM64|AARCH64" AND NOT APPLE)
list(APPEND MIMALLOC_CMAKE_ARGS -DMI_NO_OPT_ARCH=ON)
list(APPEND MIMALLOC_CMAKE_ARGS -DMI_OPT_SIMD=ON)
list(APPEND MIMALLOC_CMAKE_ARGS "-DCMAKE_C_FLAGS=-moutline-atomics")
elseif(NOT ENABLE_BASELINE)
list(APPEND MIMALLOC_CMAKE_ARGS -DMI_OPT_ARCH=ON)
list(APPEND MIMALLOC_CMAKE_ARGS -DMI_OPT_SIMD=ON)
endif()
if(WIN32)
if(DEBUG)
set(MIMALLOC_LIBRARY mimalloc-static-debug)
set(MIMALLOC_LIBRARY mimalloc-debug)
else()
set(MIMALLOC_LIBRARY mimalloc-static)
set(MIMALLOC_LIBRARY mimalloc)
endif()
elseif(DEBUG)
if (ENABLE_ASAN)

View File

@@ -4,7 +4,7 @@ register_repository(
REPOSITORY
oven-sh/tinycc
COMMIT
29985a3b59898861442fa3b43f663fc1af2591d7
12882eee073cfe5c7621bcfadf679e1372d4537b
)
register_cmake_command(

View File

@@ -0,0 +1,20 @@
set(CMAKE_SYSTEM_NAME Windows)
set(CMAKE_SYSTEM_PROCESSOR aarch64)
set(CMAKE_C_COMPILER_WORKS ON)
set(CMAKE_CXX_COMPILER_WORKS ON)
# Force ARM64 architecture ID - this is what CMake uses to determine /machine: flag
set(MSVC_C_ARCHITECTURE_ID ARM64 CACHE INTERNAL "")
set(MSVC_CXX_ARCHITECTURE_ID ARM64 CACHE INTERNAL "")
# CMake 4.0+ policy CMP0197 controls how MSVC machine type flags are handled
set(CMAKE_POLICY_DEFAULT_CMP0197 NEW CACHE INTERNAL "")
# Clear any inherited static linker flags that might have wrong machine types
set(CMAKE_STATIC_LINKER_FLAGS "" CACHE STRING "" FORCE)
# Use wrapper script for llvm-lib that strips /machine:x64 flags
# This works around CMake 4.1.0 bug where both ARM64 and x64 machine flags are added
get_filename_component(_TOOLCHAIN_DIR "${CMAKE_CURRENT_LIST_DIR}" DIRECTORY)
set(CMAKE_AR "${_TOOLCHAIN_DIR}/scripts/llvm-lib-wrapper.bat" CACHE FILEPATH "" FORCE)

View File

@@ -6,7 +6,8 @@ endif()
optionx(BUILDKITE_ORGANIZATION_SLUG STRING "The organization slug to use on Buildkite" DEFAULT "bun")
optionx(BUILDKITE_PIPELINE_SLUG STRING "The pipeline slug to use on Buildkite" DEFAULT "bun")
optionx(BUILDKITE_BUILD_ID STRING "The build ID to use on Buildkite")
optionx(BUILDKITE_BUILD_ID STRING "The build ID (UUID) to use on Buildkite")
optionx(BUILDKITE_BUILD_NUMBER STRING "The build number to use on Buildkite")
optionx(BUILDKITE_GROUP_ID STRING "The group ID to use on Buildkite")
if(ENABLE_BASELINE)
@@ -32,7 +33,13 @@ if(NOT BUILDKITE_BUILD_ID)
return()
endif()
setx(BUILDKITE_BUILD_URL https://buildkite.com/${BUILDKITE_ORGANIZATION_SLUG}/${BUILDKITE_PIPELINE_SLUG}/builds/${BUILDKITE_BUILD_ID})
# Use BUILDKITE_BUILD_NUMBER for the URL if available, as the UUID format causes a 302 redirect
# that CMake's file(DOWNLOAD) doesn't follow, resulting in empty response.
if(BUILDKITE_BUILD_NUMBER)
setx(BUILDKITE_BUILD_URL https://buildkite.com/${BUILDKITE_ORGANIZATION_SLUG}/${BUILDKITE_PIPELINE_SLUG}/builds/${BUILDKITE_BUILD_NUMBER})
else()
setx(BUILDKITE_BUILD_URL https://buildkite.com/${BUILDKITE_ORGANIZATION_SLUG}/${BUILDKITE_PIPELINE_SLUG}/builds/${BUILDKITE_BUILD_ID})
endif()
setx(BUILDKITE_BUILD_PATH ${BUILDKITE_BUILDS_PATH}/builds/${BUILDKITE_BUILD_ID})
file(
@@ -48,8 +55,16 @@ if(NOT BUILDKITE_BUILD_STATUS EQUAL 0)
endif()
file(READ ${BUILDKITE_BUILD_PATH}/build.json BUILDKITE_BUILD)
# Escape backslashes so CMake doesn't interpret JSON escape sequences (e.g., \n in commit messages)
string(REPLACE "\\" "\\\\" BUILDKITE_BUILD "${BUILDKITE_BUILD}")
# CMake's string(JSON ...) interprets escape sequences like \n, \r, \t.
# We need to escape these specific sequences while preserving valid JSON escapes like \" and \\.
# Strategy: Use a unique placeholder to protect \\ sequences, escape \n/\r/\t, then restore \\.
# This prevents \\n (literal backslash + n) from being corrupted to \\\n.
set(BKSLASH_PLACEHOLDER "___BKSLASH_PLACEHOLDER_7f3a9b2c___")
string(REPLACE "\\\\" "${BKSLASH_PLACEHOLDER}" BUILDKITE_BUILD "${BUILDKITE_BUILD}")
string(REPLACE "\\n" "\\\\n" BUILDKITE_BUILD "${BUILDKITE_BUILD}")
string(REPLACE "\\r" "\\\\r" BUILDKITE_BUILD "${BUILDKITE_BUILD}")
string(REPLACE "\\t" "\\\\t" BUILDKITE_BUILD "${BUILDKITE_BUILD}")
string(REPLACE "${BKSLASH_PLACEHOLDER}" "\\\\" BUILDKITE_BUILD "${BUILDKITE_BUILD}")
string(JSON BUILDKITE_BUILD_UUID GET ${BUILDKITE_BUILD} id)
string(JSON BUILDKITE_JOBS GET ${BUILDKITE_BUILD} jobs)

View File

@@ -17,6 +17,14 @@ if (NOT CI)
set(BUN_EXECUTABLE ${BUN_EXECUTABLE} CACHE FILEPATH "Bun executable" FORCE)
endif()
# On Windows ARM64, we need to add --smol flag to avoid crashes when running
# x64 bun under WoW64 emulation
if(WIN32 AND ARCH STREQUAL "aarch64")
set(BUN_FLAGS "--smol" CACHE STRING "Extra flags for bun executable")
else()
set(BUN_FLAGS "" CACHE STRING "Extra flags for bun executable")
endif()
# If this is not set, some advanced features are not checked.
# https://github.com/oven-sh/bun/blob/cd7f6a1589db7f1e39dc4e3f4a17234afbe7826c/src/bun.js/javascript.zig#L1069-L1072
setenv(BUN_GARBAGE_COLLECTOR_LEVEL 1)

View File

@@ -12,7 +12,13 @@ if(NOT ENABLE_LLVM)
return()
endif()
set(DEFAULT_LLVM_VERSION "19.1.7")
# LLVM 21 is required for Windows ARM64 (first version with ARM64 Windows builds)
# Other platforms use LLVM 19.1.7
if(WIN32 AND CMAKE_SYSTEM_PROCESSOR MATCHES "ARM64|aarch64|AARCH64")
set(DEFAULT_LLVM_VERSION "21.1.8")
else()
set(DEFAULT_LLVM_VERSION "19.1.7")
endif()
optionx(LLVM_VERSION STRING "The version of LLVM to use" DEFAULT ${DEFAULT_LLVM_VERSION})

View File

@@ -31,13 +31,6 @@ execute_process(
ERROR_QUIET
)
if(MACOS_VERSION VERSION_LESS ${CMAKE_OSX_DEPLOYMENT_TARGET})
message(FATAL_ERROR "Your computer is running macOS ${MACOS_VERSION}, which is older than the target macOS SDK ${CMAKE_OSX_DEPLOYMENT_TARGET}. To fix this, either:\n"
" - Upgrade your computer to macOS ${CMAKE_OSX_DEPLOYMENT_TARGET} or newer\n"
" - Download a newer version of the macOS SDK from Apple: https://developer.apple.com/download/all/?q=xcode\n"
" - Set -DCMAKE_OSX_DEPLOYMENT_TARGET=${MACOS_VERSION}\n")
endif()
execute_process(
COMMAND xcrun --sdk macosx --show-sdk-path
OUTPUT_VARIABLE DEFAULT_CMAKE_OSX_SYSROOT

View File

@@ -1,14 +1,25 @@
# NOTE: Changes to this file trigger QEMU JIT stress tests in CI.
# See scripts/verify-jit-stress-qemu.sh for details.
option(WEBKIT_VERSION "The version of WebKit to use")
option(WEBKIT_LOCAL "If a local version of WebKit should be used instead of downloading")
option(WEBKIT_BUILD_TYPE "The build type for local WebKit (defaults to CMAKE_BUILD_TYPE)")
if(NOT WEBKIT_VERSION)
set(WEBKIT_VERSION 5ae5f07f29059c183a8db2eef2c9aabd474ec73c)
set(WEBKIT_VERSION 515344bc5d65aa2d4f9ff277b5fb944f0e051dcd)
endif()
# Use preview build URL for Windows ARM64 until the fix is merged to main
set(WEBKIT_PREVIEW_PR 140)
string(SUBSTRING ${WEBKIT_VERSION} 0 16 WEBKIT_VERSION_PREFIX)
string(SUBSTRING ${WEBKIT_VERSION} 0 8 WEBKIT_VERSION_SHORT)
if(WEBKIT_LOCAL)
set(DEFAULT_WEBKIT_PATH ${VENDOR_PATH}/WebKit/WebKitBuild/${CMAKE_BUILD_TYPE})
if(NOT WEBKIT_BUILD_TYPE)
set(WEBKIT_BUILD_TYPE ${CMAKE_BUILD_TYPE})
endif()
set(DEFAULT_WEBKIT_PATH ${VENDOR_PATH}/WebKit/WebKitBuild/${WEBKIT_BUILD_TYPE})
else()
set(DEFAULT_WEBKIT_PATH ${CACHE_PATH}/webkit-${WEBKIT_VERSION_PREFIX})
endif()
@@ -23,19 +34,153 @@ set(WEBKIT_INCLUDE_PATH ${WEBKIT_PATH}/include)
set(WEBKIT_LIB_PATH ${WEBKIT_PATH}/lib)
if(WEBKIT_LOCAL)
if(EXISTS ${WEBKIT_PATH}/cmakeconfig.h)
# You may need to run:
# make jsc-compile-debug jsc-copy-headers
include_directories(
${WEBKIT_PATH}
${WEBKIT_PATH}/JavaScriptCore/Headers
${WEBKIT_PATH}/JavaScriptCore/Headers/JavaScriptCore
${WEBKIT_PATH}/JavaScriptCore/PrivateHeaders
${WEBKIT_PATH}/bmalloc/Headers
${WEBKIT_PATH}/WTF/Headers
${WEBKIT_PATH}/JavaScriptCore/PrivateHeaders/JavaScriptCore
${WEBKIT_PATH}/JavaScriptCore/DerivedSources/inspector
set(WEBKIT_SOURCE_DIR ${VENDOR_PATH}/WebKit)
if(WIN32)
# --- Build ICU from source (Windows only) ---
# On macOS, ICU is found automatically (Homebrew icu4c for headers, system for libs).
# On Linux, ICU is found automatically from system packages (e.g. libicu-dev).
# On Windows, there is no system ICU, so we build it from source.
set(ICU_LOCAL_ROOT ${VENDOR_PATH}/WebKit/WebKitBuild/icu)
if(NOT EXISTS ${ICU_LOCAL_ROOT}/lib/sicudt.lib)
message(STATUS "Building ICU from source...")
if(CMAKE_SYSTEM_PROCESSOR MATCHES "arm64|ARM64|aarch64|AARCH64")
set(ICU_PLATFORM "ARM64")
else()
set(ICU_PLATFORM "x64")
endif()
execute_process(
COMMAND powershell -ExecutionPolicy Bypass -File
${WEBKIT_SOURCE_DIR}/build-icu.ps1
-Platform ${ICU_PLATFORM}
-BuildType ${WEBKIT_BUILD_TYPE}
-OutputDir ${ICU_LOCAL_ROOT}
RESULT_VARIABLE ICU_BUILD_RESULT
)
if(NOT ICU_BUILD_RESULT EQUAL 0)
message(FATAL_ERROR "Failed to build ICU (exit code: ${ICU_BUILD_RESULT}).")
endif()
endif()
# Copy ICU libs to WEBKIT_LIB_PATH with the names BuildBun.cmake expects.
# Prebuilt WebKit uses 's' prefix (static) and 'd' suffix (debug).
file(MAKE_DIRECTORY ${WEBKIT_LIB_PATH})
if(WEBKIT_BUILD_TYPE STREQUAL "Debug")
set(ICU_SUFFIX "d")
else()
set(ICU_SUFFIX "")
endif()
file(COPY_FILE ${ICU_LOCAL_ROOT}/lib/sicudt.lib ${WEBKIT_LIB_PATH}/sicudt${ICU_SUFFIX}.lib ONLY_IF_DIFFERENT)
file(COPY_FILE ${ICU_LOCAL_ROOT}/lib/icuin.lib ${WEBKIT_LIB_PATH}/sicuin${ICU_SUFFIX}.lib ONLY_IF_DIFFERENT)
file(COPY_FILE ${ICU_LOCAL_ROOT}/lib/icuuc.lib ${WEBKIT_LIB_PATH}/sicuuc${ICU_SUFFIX}.lib ONLY_IF_DIFFERENT)
endif()
# --- Configure JSC ---
message(STATUS "Configuring JSC from local WebKit source at ${WEBKIT_SOURCE_DIR}...")
set(JSC_CMAKE_ARGS
-S ${WEBKIT_SOURCE_DIR}
-B ${WEBKIT_PATH}
-G ${CMAKE_GENERATOR}
-DPORT=JSCOnly
-DENABLE_STATIC_JSC=ON
-DUSE_THIN_ARCHIVES=OFF
-DENABLE_FTL_JIT=ON
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON
-DUSE_BUN_JSC_ADDITIONS=ON
-DUSE_BUN_EVENT_LOOP=ON
-DENABLE_BUN_SKIP_FAILING_ASSERTIONS=ON
-DALLOW_LINE_AND_COLUMN_NUMBER_IN_BUILTINS=ON
-DCMAKE_BUILD_TYPE=${WEBKIT_BUILD_TYPE}
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
-DENABLE_REMOTE_INSPECTOR=ON
)
if(WIN32)
# ICU paths and Windows-specific compiler/linker settings
list(APPEND JSC_CMAKE_ARGS
-DICU_ROOT=${ICU_LOCAL_ROOT}
-DICU_LIBRARY=${ICU_LOCAL_ROOT}/lib
-DICU_INCLUDE_DIR=${ICU_LOCAL_ROOT}/include
-DCMAKE_LINKER=lld-link
)
# Static CRT and U_STATIC_IMPLEMENTATION
if(WEBKIT_BUILD_TYPE STREQUAL "Debug")
set(JSC_MSVC_RUNTIME "MultiThreadedDebug")
else()
set(JSC_MSVC_RUNTIME "MultiThreaded")
endif()
list(APPEND JSC_CMAKE_ARGS
-DCMAKE_MSVC_RUNTIME_LIBRARY=${JSC_MSVC_RUNTIME}
"-DCMAKE_C_FLAGS=/DU_STATIC_IMPLEMENTATION"
"-DCMAKE_CXX_FLAGS=/DU_STATIC_IMPLEMENTATION /clang:-fno-c++-static-destructors"
)
endif()
if(ENABLE_ASAN)
list(APPEND JSC_CMAKE_ARGS -DENABLE_SANITIZERS=address)
endif()
# Pass through ccache if available
if(CMAKE_C_COMPILER_LAUNCHER)
list(APPEND JSC_CMAKE_ARGS -DCMAKE_C_COMPILER_LAUNCHER=${CMAKE_C_COMPILER_LAUNCHER})
endif()
if(CMAKE_CXX_COMPILER_LAUNCHER)
list(APPEND JSC_CMAKE_ARGS -DCMAKE_CXX_COMPILER_LAUNCHER=${CMAKE_CXX_COMPILER_LAUNCHER})
endif()
execute_process(
COMMAND ${CMAKE_COMMAND} ${JSC_CMAKE_ARGS}
RESULT_VARIABLE JSC_CONFIGURE_RESULT
)
if(NOT JSC_CONFIGURE_RESULT EQUAL 0)
message(FATAL_ERROR "Failed to configure JSC (exit code: ${JSC_CONFIGURE_RESULT}). "
"Check the output above for errors.")
endif()
if(WIN32)
set(JSC_BYPRODUCTS
${WEBKIT_LIB_PATH}/JavaScriptCore.lib
${WEBKIT_LIB_PATH}/WTF.lib
${WEBKIT_LIB_PATH}/bmalloc.lib
)
else()
set(JSC_BYPRODUCTS
${WEBKIT_LIB_PATH}/libJavaScriptCore.a
${WEBKIT_LIB_PATH}/libWTF.a
${WEBKIT_LIB_PATH}/libbmalloc.a
)
endif()
if(WIN32)
add_custom_target(jsc ALL
COMMAND ${CMAKE_COMMAND} --build ${WEBKIT_PATH} --config ${WEBKIT_BUILD_TYPE} --target jsc
BYPRODUCTS ${JSC_BYPRODUCTS}
COMMENT "Building JSC (${WEBKIT_PATH})"
)
else()
add_custom_target(jsc ALL
COMMAND ${CMAKE_COMMAND} --build ${WEBKIT_PATH} --config ${WEBKIT_BUILD_TYPE} --target jsc
BYPRODUCTS ${JSC_BYPRODUCTS}
COMMENT "Building JSC (${WEBKIT_PATH})"
USES_TERMINAL
)
endif()
include_directories(
${WEBKIT_PATH}
${WEBKIT_PATH}/JavaScriptCore/Headers
${WEBKIT_PATH}/JavaScriptCore/Headers/JavaScriptCore
${WEBKIT_PATH}/JavaScriptCore/PrivateHeaders
${WEBKIT_PATH}/bmalloc/Headers
${WEBKIT_PATH}/WTF/Headers
${WEBKIT_PATH}/JavaScriptCore/PrivateHeaders/JavaScriptCore
)
# On Windows, add ICU headers from the local ICU build
if(WIN32)
include_directories(${ICU_LOCAL_ROOT}/include)
endif()
# After this point, only prebuilt WebKit is supported
@@ -52,7 +197,7 @@ else()
message(FATAL_ERROR "Unsupported operating system: ${CMAKE_SYSTEM_NAME}")
endif()
if(CMAKE_SYSTEM_PROCESSOR MATCHES "arm64|aarch64")
if(CMAKE_SYSTEM_PROCESSOR MATCHES "arm64|ARM64|aarch64|AARCH64")
set(WEBKIT_ARCH "arm64")
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "amd64|x86_64|x64|AMD64")
set(WEBKIT_ARCH "amd64")
@@ -81,7 +226,14 @@ endif()
setx(WEBKIT_NAME bun-webkit-${WEBKIT_OS}-${WEBKIT_ARCH}${WEBKIT_SUFFIX})
set(WEBKIT_FILENAME ${WEBKIT_NAME}.tar.gz)
setx(WEBKIT_DOWNLOAD_URL https://github.com/oven-sh/WebKit/releases/download/autobuild-${WEBKIT_VERSION}/${WEBKIT_FILENAME})
if(WEBKIT_VERSION MATCHES "^autobuild-")
set(WEBKIT_TAG ${WEBKIT_VERSION})
else()
set(WEBKIT_TAG autobuild-${WEBKIT_VERSION})
endif()
setx(WEBKIT_DOWNLOAD_URL https://github.com/oven-sh/WebKit/releases/download/${WEBKIT_TAG}/${WEBKIT_FILENAME})
if(EXISTS ${WEBKIT_PATH}/package.json)
file(READ ${WEBKIT_PATH}/package.json WEBKIT_PACKAGE_JSON)

View File

@@ -1,4 +1,4 @@
if(CMAKE_SYSTEM_PROCESSOR MATCHES "arm64|aarch64")
if(CMAKE_SYSTEM_PROCESSOR MATCHES "arm64|ARM64|aarch64|AARCH64")
set(DEFAULT_ZIG_ARCH "aarch64")
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "amd64|x86_64|x64|AMD64")
set(DEFAULT_ZIG_ARCH "x86_64")

View File

@@ -7,9 +7,9 @@ Bytecode caching is a build-time optimization that dramatically improves applica
## Usage
### Basic usage
### Basic usage (CommonJS)
Enable bytecode caching with the `--bytecode` flag:
Enable bytecode caching with the `--bytecode` flag. Without `--format`, this defaults to CommonJS:
```bash terminal icon="terminal"
bun build ./index.ts --target=bun --bytecode --outdir=./dist
@@ -17,7 +17,7 @@ bun build ./index.ts --target=bun --bytecode --outdir=./dist
This generates two files:
- `dist/index.js` - Your bundled JavaScript
- `dist/index.js` - Your bundled JavaScript (CommonJS)
- `dist/index.jsc` - The bytecode cache file
At runtime, Bun automatically detects and uses the `.jsc` file:
@@ -28,14 +28,24 @@ bun ./dist/index.js # Automatically uses index.jsc
### With standalone executables
When creating executables with `--compile`, bytecode is embedded into the binary:
When creating executables with `--compile`, bytecode is embedded into the binary. Both ESM and CommonJS formats are supported:
```bash terminal icon="terminal"
# ESM (requires --compile)
bun build ./cli.ts --compile --bytecode --format=esm --outfile=mycli
# CommonJS (works with or without --compile)
bun build ./cli.ts --compile --bytecode --outfile=mycli
```
The resulting executable contains both the code and bytecode, giving you maximum performance in a single file.
### ESM bytecode
ESM bytecode requires `--compile` because Bun embeds module metadata (import/export information) in the compiled binary. This metadata allows the JavaScript engine to skip parsing entirely at runtime.
Without `--compile`, ESM bytecode would still require parsing the source to analyze module dependencies—defeating the purpose of bytecode caching.
### Combining with other optimizations
Bytecode works great with minification and source maps:
@@ -90,35 +100,9 @@ Larger applications benefit more because they have more code to parse.
- ❌ **Code that runs once**
- ❌ **Development builds**
- ❌ **Size-constrained environments**
- ❌ **Code with top-level await** (not supported)
## Limitations
### CommonJS only
Bytecode caching currently works with CommonJS output format. Bun's bundler automatically converts most ESM code to CommonJS, but **top-level await** is the exception:
```js
// This prevents bytecode caching
const data = await fetch("https://api.example.com");
export default data;
```
**Why**: Top-level await requires async module evaluation, which can't be represented in CommonJS. The module graph becomes asynchronous, and the CommonJS wrapper function model breaks down.
**Workaround**: Move async initialization into a function:
```js
async function init() {
const data = await fetch("https://api.example.com");
return data;
}
export default init;
```
Now the module exports a function that the consumer can await when needed.
### Version compatibility
Bytecode is **not portable across Bun versions**. The bytecode format is tied to JavaScriptCore's internal representation, which changes between versions.
@@ -236,8 +220,6 @@ It's normal for it it to log a cache miss multiple times since Bun doesn't curre
- Compressing `.jsc` files for network transfer (gzip/brotli)
- Evaluating if the startup performance gain is worth the size increase
**Top-level await**: Not supported. Refactor to use async initialization functions.
## What is bytecode?
When you run JavaScript, the JavaScript engine doesn't execute your source code directly. Instead, it goes through several steps:

View File

@@ -322,10 +322,7 @@ Using bytecode compilation, `tsc` starts 2x faster:
Bytecode compilation moves parsing overhead for large input files from runtime to bundle time. Your app starts faster, in exchange for making the `bun build` command a little slower. It doesn't obscure source code.
<Warning>
**Experimental:** Bytecode compilation is an experimental feature. Only `cjs` format is supported (which means no
top-level-await). Let us know if you run into any issues!
</Warning>
<Note>Bytecode compilation supports both `cjs` and `esm` formats when used with `--compile`.</Note>
### What do these flags do?
@@ -365,6 +362,23 @@ The `--bytecode` argument enables bytecode compilation. Every time you run JavaS
console.log(process.execArgv); // ["--smol", "--user-agent=MyBot"]
```
### Runtime arguments via `BUN_OPTIONS`
The `BUN_OPTIONS` environment variable is applied to standalone executables, allowing you to pass runtime flags without recompiling:
```bash terminal icon="terminal"
# Enable CPU profiling on a compiled executable
BUN_OPTIONS="--cpu-prof" ./myapp
# Enable heap profiling with markdown output
BUN_OPTIONS="--heap-prof-md" ./myapp
# Combine multiple flags
BUN_OPTIONS="--smol --cpu-prof-md" ./myapp
```
This is useful for debugging or profiling production executables without rebuilding them.
---
## Automatic config loading

View File

@@ -1333,6 +1333,50 @@ Generate metadata about the build in a structured format. The metafile contains
</Tab>
</Tabs>
#### Markdown metafile
Use `--metafile-md` to generate a markdown metafile, which is LLM-friendly and easy to read in the terminal:
```bash terminal icon="terminal"
bun build ./src/index.ts --outdir ./dist --metafile-md ./dist/meta.md
```
Both `--metafile` and `--metafile-md` can be used together:
```bash terminal icon="terminal"
bun build ./src/index.ts --outdir ./dist --metafile ./dist/meta.json --metafile-md ./dist/meta.md
```
#### `metafile` option formats
In the JavaScript API, `metafile` accepts several forms:
```ts title="build.ts" icon="/icons/typescript.svg"
// Boolean — include metafile in the result object
await Bun.build({
entrypoints: ["./src/index.ts"],
outdir: "./dist",
metafile: true,
});
// String — write JSON metafile to a specific path
await Bun.build({
entrypoints: ["./src/index.ts"],
outdir: "./dist",
metafile: "./dist/meta.json",
});
// Object — specify separate paths for JSON and markdown output
await Bun.build({
entrypoints: ["./src/index.ts"],
outdir: "./dist",
metafile: {
json: "./dist/meta.json",
markdown: "./dist/meta.md",
},
});
```
The metafile structure contains:
```ts
@@ -1464,22 +1508,43 @@ BuildArtifact (entry-point) {
## Bytecode
The `bytecode: boolean` option can be used to generate bytecode for any JavaScript/TypeScript entrypoints. This can greatly improve startup times for large applications. Only supported for `"cjs"` format, only supports `"target": "bun"` and dependent on a matching version of Bun. This adds a corresponding `.jsc` file for each entrypoint.
The `bytecode: boolean` option can be used to generate bytecode for any JavaScript/TypeScript entrypoints. This can greatly improve startup times for large applications. Requires `"target": "bun"` and is dependent on a matching version of Bun.
- **CommonJS**: Works with or without `compile: true`. Generates a `.jsc` file alongside each entrypoint.
- **ESM**: Requires `compile: true`. Bytecode and module metadata are embedded in the standalone executable.
Without an explicit `format`, bytecode defaults to CommonJS.
<Tabs>
<Tab title="JavaScript">
```ts title="build.ts" icon="/icons/typescript.svg"
// CommonJS bytecode (generates .jsc files)
await Bun.build({
entrypoints: ["./index.tsx"],
outdir: "./out",
bytecode: true,
})
// ESM bytecode (requires compile)
await Bun.build({
entrypoints: ["./index.tsx"],
outfile: "./mycli",
bytecode: true,
format: "esm",
compile: true,
})
```
</Tab>
<Tab title="CLI">
```bash terminal icon="terminal"
# CommonJS bytecode
bun build ./index.tsx --outdir ./out --bytecode
# ESM bytecode (requires --compile)
bun build ./index.tsx --outfile ./mycli --bytecode --format=esm --compile
```
</Tab>
</Tabs>
@@ -1646,7 +1711,10 @@ interface BuildConfig {
* start times, but will make the final output larger and slightly increase
* memory usage.
*
* Bytecode is currently only supported for CommonJS (`format: "cjs"`).
* - CommonJS: works with or without `compile: true`
* - ESM: requires `compile: true`
*
* Without an explicit `format`, defaults to CommonJS.
*
* Must be `target: "bun"`
* @default false

View File

@@ -150,6 +150,9 @@
"/runtime/secrets",
"/runtime/console",
"/runtime/yaml",
"/runtime/markdown",
"/runtime/json5",
"/runtime/jsonl",
"/runtime/html-rewriter",
"/runtime/hashing",
"/runtime/glob",
@@ -497,6 +500,7 @@
"/guides/runtime/import-json",
"/guides/runtime/import-toml",
"/guides/runtime/import-yaml",
"/guides/runtime/import-json5",
"/guides/runtime/import-html",
"/guides/util/import-meta-dir",
"/guides/util/import-meta-file",

View File

@@ -0,0 +1,74 @@
---
title: Import a JSON5 file
sidebarTitle: Import JSON5
mode: center
---
Bun natively supports `.json5` imports.
```json5 config.json5 icon="file-code"
{
// Comments are allowed
database: {
host: "localhost",
port: 5432,
name: "myapp",
},
server: {
port: 3000,
timeout: 30,
},
features: {
auth: true,
rateLimit: true,
},
}
```
---
Import the file like any other source file.
```ts config.ts icon="/icons/typescript.svg"
import config from "./config.json5";
config.database.host; // => "localhost"
config.server.port; // => 3000
config.features.auth; // => true
```
---
You can also use named imports to destructure top-level properties:
```ts config.ts icon="/icons/typescript.svg"
import { database, server, features } from "./config.json5";
console.log(database.name); // => "myapp"
console.log(server.timeout); // => 30
console.log(features.rateLimit); // => true
```
---
For parsing JSON5 strings at runtime, use `Bun.JSON5.parse()`:
```ts config.ts icon="/icons/typescript.svg"
const data = JSON5.parse(`{
name: 'John Doe',
age: 30,
hobbies: [
'reading',
'coding',
],
}`);
console.log(data.name); // => "John Doe"
console.log(data.hobbies); // => ["reading", "coding"]
```
---
See [Docs > API > JSON5](/runtime/json5) for complete documentation on JSON5 support in Bun.

View File

@@ -97,6 +97,31 @@ Filters respect your [workspace configuration](/pm/workspaces): If you have a `p
bun run --filter foo myscript
```
### Parallel and sequential mode
Combine `--filter` or `--workspaces` with `--parallel` or `--sequential` to run scripts across workspace packages with Foreman-style prefixed output:
```bash terminal icon="terminal"
# Run "build" in all matching packages concurrently
bun run --parallel --filter '*' build
# Run "build" in all workspace packages sequentially
bun run --sequential --workspaces build
# Run glob-matched scripts across all packages
bun run --parallel --filter '*' "build:*"
# Continue running even if one package's script fails
bun run --parallel --no-exit-on-error --filter '*' test
# Run multiple scripts across all packages
bun run --parallel --filter '*' build lint
```
Each line of output is prefixed with the package and script name (e.g. `pkg-a:build | ...`). Without `--filter`/`--workspaces`, the prefix is just the script name (e.g. `build | ...`). When a package's `package.json` has no `name` field, the relative path from the workspace root is used instead.
Use `--if-present` with `--workspaces` to skip packages that don't have the requested script instead of erroring.
### Dependency Order
Bun will respect package dependency order when running scripts. Say you have a package `foo` that depends on another package `bar` in your workspace, and both packages have a `build` script. When you run `bun --filter '*' build`, you will notice that `foo` will only start running once `bar` is done.

View File

@@ -227,6 +227,26 @@ bun --cpu-prof script.js
This generates a `.cpuprofile` file you can open in Chrome DevTools (Performance tab → Load profile) or VS Code's CPU profiler.
### Markdown output
Use `--cpu-prof-md` to generate a markdown CPU profile, which is grep-friendly and designed for LLM analysis:
```sh terminal icon="terminal"
bun --cpu-prof-md script.js
```
Both `--cpu-prof` and `--cpu-prof-md` can be used together to generate both formats at once:
```sh terminal icon="terminal"
bun --cpu-prof --cpu-prof-md script.js
```
You can also trigger profiling via the `BUN_OPTIONS` environment variable:
```sh terminal icon="terminal"
BUN_OPTIONS="--cpu-prof-md" bun script.js
```
### Options
```sh terminal icon="terminal"
@@ -234,8 +254,43 @@ bun --cpu-prof --cpu-prof-name my-profile.cpuprofile script.js
bun --cpu-prof --cpu-prof-dir ./profiles script.js
```
| Flag | Description |
| ---------------------------- | -------------------- |
| `--cpu-prof` | Enable profiling |
| `--cpu-prof-name <filename>` | Set output filename |
| `--cpu-prof-dir <dir>` | Set output directory |
| Flag | Description |
| ---------------------------- | ----------------------------------------------------------- |
| `--cpu-prof` | Generate a `.cpuprofile` JSON file (Chrome DevTools format) |
| `--cpu-prof-md` | Generate a markdown CPU profile (grep/LLM-friendly) |
| `--cpu-prof-name <filename>` | Set output filename |
| `--cpu-prof-dir <dir>` | Set output directory |
## Heap profiling
Generate heap snapshots on exit to analyze memory usage and find memory leaks.
```sh terminal icon="terminal"
bun --heap-prof script.js
```
This generates a V8 `.heapsnapshot` file that can be loaded in Chrome DevTools (Memory tab → Load).
### Markdown output
Use `--heap-prof-md` to generate a markdown heap profile for CLI analysis:
```sh terminal icon="terminal"
bun --heap-prof-md script.js
```
<Note>If both `--heap-prof` and `--heap-prof-md` are specified, the markdown format is used.</Note>
### Options
```sh terminal icon="terminal"
bun --heap-prof --heap-prof-name my-snapshot.heapsnapshot script.js
bun --heap-prof --heap-prof-dir ./profiles script.js
```
| Flag | Description |
| ----------------------------- | ------------------------------------------ |
| `--heap-prof` | Generate a V8 `.heapsnapshot` file on exit |
| `--heap-prof-md` | Generate a markdown heap profile on exit |
| `--heap-prof-name <filename>` | Set output filename |
| `--heap-prof-dir <dir>` | Set output directory |

View File

@@ -35,7 +35,7 @@ winget install "Visual Studio Community 2022" --override "--add Microsoft.Visual
After Visual Studio, you need the following:
- LLVM 19.1.7
- LLVM (19.1.7 for x64, 21.1.8 for ARM64)
- Go
- Rust
- NASM
@@ -47,25 +47,35 @@ After Visual Studio, you need the following:
[Scoop](https://scoop.sh) can be used to install these remaining tools easily.
```ps1 Scoop
```ps1 Scoop (x64)
irm https://get.scoop.sh | iex
scoop install nodejs-lts go rust nasm ruby perl ccache
# scoop seems to be buggy if you install llvm and the rest at the same time
scoop install llvm@19.1.7
```
For Windows ARM64, download LLVM 21.1.8 directly from GitHub releases (first version with ARM64 Windows builds):
```ps1 ARM64
# Download and install LLVM for ARM64
Invoke-WebRequest -Uri "https://github.com/llvm/llvm-project/releases/download/llvmorg-21.1.8/LLVM-21.1.8-woa64.exe" -OutFile "$env:TEMP\LLVM-21.1.8-woa64.exe"
Start-Process -FilePath "$env:TEMP\LLVM-21.1.8-woa64.exe" -ArgumentList "/S" -Wait
```
<Note>
Please do not use WinGet/other package manager for these, as you will likely install Strawberry Perl instead of a more
minimal installation of Perl. Strawberry Perl includes many other utilities that get installed into `$Env:PATH` that
will conflict with MSVC and break the build.
</Note>
If you intend on building WebKit locally (optional), you should install these packages:
If you intend on building WebKit locally (optional, x64 only), you should install these packages:
```ps1 Scoop
scoop install make cygwin python
```
<Note>Cygwin is not required for ARM64 builds as WebKit is provided as a pre-built binary.</Note>
From here on out, it is **expected you use a PowerShell Terminal with `.\scripts\vs-shell.ps1` sourced**. This script is available in the Bun repository and can be loaded by executing it:
```ps1

View File

@@ -266,18 +266,13 @@ git clone https://github.com/oven-sh/WebKit vendor/WebKit
# Check out the commit hash specified in `set(WEBKIT_VERSION <commit_hash>)` in cmake/tools/SetupWebKit.cmake
git -C vendor/WebKit checkout <commit_hash>
# Make a debug build of JSC. This will output build artifacts in ./vendor/WebKit/WebKitBuild/Debug
# Optionally, you can use `bun run jsc:build` for a release build
bun run jsc:build:debug && rm vendor/WebKit/WebKitBuild/Debug/JavaScriptCore/DerivedSources/inspector/InspectorProtocolObjects.h
# After an initial run of `make jsc-debug`, you can rebuild JSC with:
cmake --build vendor/WebKit/WebKitBuild/Debug --target jsc && rm vendor/WebKit/WebKitBuild/Debug/JavaScriptCore/DerivedSources/inspector/InspectorProtocolObjects.h
# Build bun with the local JSC build
# Build bun with the local JSC build — this automatically configures and builds JSC
bun run build:local
```
Using `bun run build:local` will build Bun in the `./build/debug-local` directory (instead of `./build/debug`), you'll have to change a couple of places to use this new directory:
`bun run build:local` handles everything: configuring JSC, building JSC, and building Bun. On subsequent runs, JSC will incrementally rebuild if any WebKit sources changed. `ninja -Cbuild/debug-local` also works after the first build, and will build Bun+JSC.
The build output goes to `./build/debug-local` (instead of `./build/debug`), so you'll need to update a couple of places:
- The first line in `src/js/builtins.d.ts`
- The `CompilationDatabase` line in `.clangd` config should be `CompilationDatabase: build/debug-local`
@@ -288,7 +283,7 @@ Note that the WebKit folder, including build artifacts, is 8GB+ in size.
If you are using a JSC debug build and using VScode, make sure to run the `C/C++: Select a Configuration` command to configure intellisense to find the debug headers.
Note that if you change make changes to our [WebKit fork](https://github.com/oven-sh/WebKit), you will also have to change `SetupWebKit.cmake` to point to the commit hash.
Note that if you make changes to our [WebKit fork](https://github.com/oven-sh/WebKit), you will also have to change `SetupWebKit.cmake` to point to the commit hash.
## Troubleshooting

View File

@@ -55,5 +55,5 @@ Click the link in the right column to jump to the associated documentation.
| Stream Processing | [`Bun.readableStreamTo*()`](/runtime/utils#bun-readablestreamto), `Bun.readableStreamToBytes()`, `Bun.readableStreamToBlob()`, `Bun.readableStreamToFormData()`, `Bun.readableStreamToJSON()`, `Bun.readableStreamToArray()` |
| Memory & Buffer Management | `Bun.ArrayBufferSink`, `Bun.allocUnsafe`, `Bun.concatArrayBuffers` |
| Module Resolution | [`Bun.resolveSync()`](/runtime/utils#bun-resolvesync) |
| Parsing & Formatting | [`Bun.semver`](/runtime/semver), `Bun.TOML.parse`, [`Bun.color`](/runtime/color) |
| Parsing & Formatting | [`Bun.semver`](/runtime/semver), `Bun.TOML.parse`, [`Bun.markdown`](/runtime/markdown), [`Bun.color`](/runtime/color) |
| Low-level / Internals | `Bun.mmap`, `Bun.gc`, `Bun.generateHeapSnapshot`, [`bun:jsc`](https://bun.com/reference/bun/jsc) |

View File

@@ -5,7 +5,7 @@ description: "File types and loaders supported by Bun's bundler and runtime"
The Bun bundler implements a set of default loaders out of the box. As a rule of thumb, the bundler and the runtime both support the same set of file types out of the box.
`.js` `.cjs` `.mjs` `.mts` `.cts` `.ts` `.tsx` `.jsx` `.css` `.json` `.jsonc` `.toml` `.yaml` `.yml` `.txt` `.wasm` `.node` `.html` `.sh`
`.js` `.cjs` `.mjs` `.mts` `.cts` `.ts` `.tsx` `.jsx` `.css` `.json` `.jsonc` `.json5` `.toml` `.yaml` `.yml` `.txt` `.wasm` `.node` `.html` `.sh`
Bun uses the file extension to determine which built-in _loader_ should be used to parse the file. Every loader has a name, such as `js`, `tsx`, or `json`. These names are used when building [plugins](/bundler/plugins) that extend Bun with custom loaders.
@@ -197,6 +197,53 @@ export default {
</CodeGroup>
### `json5`
**JSON5 loader**. Default for `.json5`.
JSON5 files can be directly imported. Bun will parse them with its fast native JSON5 parser. JSON5 is a superset of JSON that supports comments, trailing commas, unquoted keys, single-quoted strings, and more.
```ts
import config from "./config.json5";
console.log(config);
// via import attribute:
import data from "./data.txt" with { type: "json5" };
```
During bundling, the parsed JSON5 is inlined into the bundle as a JavaScript object.
```ts
var config = {
name: "my-app",
version: "1.0.0",
// ...other fields
};
```
If a `.json5` file is passed as an entrypoint, it will be converted to a `.js` module that `export default`s the parsed object.
<CodeGroup>
```json5 Input
{
// Configuration
name: "John Doe",
age: 35,
email: "johndoe@example.com",
}
```
```ts Output
export default {
name: "John Doe",
age: 35,
email: "johndoe@example.com",
};
```
</CodeGroup>
### `text`
**Text loader**. Default for `.txt`.

271
docs/runtime/json5.mdx Normal file
View File

@@ -0,0 +1,271 @@
---
title: JSON5
description: Use Bun's built-in support for JSON5 files through both runtime APIs and bundler integration
---
In Bun, JSON5 is a first-class citizen alongside JSON, TOML, and YAML. You can:
- Parse and stringify JSON5 with `Bun.JSON5.parse` and `Bun.JSON5.stringify`
- `import` & `require` JSON5 files as modules at runtime (including hot reloading & watch mode support)
- `import` & `require` JSON5 files in frontend apps via Bun's bundler
---
## Conformance
Bun's JSON5 parser passes 100% of the [official JSON5 test suite](https://github.com/json5/json5-tests). The parser is written in Zig for optimal performance. You can view our [translated test suite](https://github.com/oven-sh/bun/blob/main/test/js/bun/json5/json5-test-suite.test.ts) to see every test case.
---
## Runtime API
### `Bun.JSON5.parse()`
Parse a JSON5 string into a JavaScript value.
```ts
import { JSON5 } from "bun";
const data = JSON5.parse(`{
// JSON5 supports comments
name: 'my-app',
version: '1.0.0',
debug: true,
// trailing commas are allowed
tags: ['web', 'api',],
}`);
console.log(data);
// {
// name: "my-app",
// version: "1.0.0",
// debug: true,
// tags: ["web", "api"]
// }
```
#### Supported JSON5 Features
JSON5 is a superset of JSON based on ECMAScript 5.1 syntax. It supports:
- **Comments**: single-line (`//`) and multi-line (`/* */`)
- **Trailing commas**: in objects and arrays
- **Unquoted keys**: valid ECMAScript 5.1 identifiers can be used as keys
- **Single-quoted strings**: in addition to double-quoted strings
- **Multi-line strings**: using backslash line continuations
- **Hex numbers**: `0xFF`
- **Leading & trailing decimal points**: `.5` and `5.`
- **Infinity and NaN**: positive and negative
- **Explicit plus sign**: `+42`
```ts
const data = JSON5.parse(`{
// Unquoted keys
unquoted: 'keys work',
// Single and double quotes
single: 'single-quoted',
double: "double-quoted",
// Trailing commas
trailing: 'comma',
// Special numbers
hex: 0xDEADbeef,
half: .5,
to: Infinity,
nan: NaN,
// Multi-line strings
multiline: 'line 1 \
line 2',
}`);
```
#### Error Handling
`Bun.JSON5.parse()` throws a `SyntaxError` if the input is invalid JSON5:
```ts
try {
JSON5.parse("{invalid}");
} catch (error) {
console.error("Failed to parse JSON5:", error.message);
}
```
### `Bun.JSON5.stringify()`
Stringify a JavaScript value to a JSON5 string.
```ts
import { JSON5 } from "bun";
const str = JSON5.stringify({ name: "my-app", version: "1.0.0" });
console.log(str);
// {name:'my-app',version:'1.0.0'}
```
#### Pretty Printing
Pass a `space` argument to format the output with indentation:
```ts
const pretty = JSON5.stringify(
{
name: "my-app",
debug: true,
tags: ["web", "api"],
},
null,
2,
);
console.log(pretty);
// {
// name: 'my-app',
// debug: true,
// tags: [
// 'web',
// 'api',
// ],
// }
```
The `space` argument can be a number (number of spaces) or a string (used as the indent character):
```ts
// Tab indentation
JSON5.stringify(data, null, "\t");
```
#### Special Values
Unlike `JSON.stringify`, `JSON5.stringify` preserves special numeric values:
```ts
JSON5.stringify({ inf: Infinity, ninf: -Infinity, nan: NaN });
// {inf:Infinity,ninf:-Infinity,nan:NaN}
```
---
## Module Import
### ES Modules
You can import JSON5 files directly as ES modules:
```json5 config.json5
{
// Database configuration
database: {
host: "localhost",
port: 5432,
name: "myapp",
},
features: {
auth: true,
rateLimit: true,
analytics: false,
},
}
```
#### Default Import
```ts app.ts icon="/icons/typescript.svg"
import config from "./config.json5";
console.log(config.database.host); // "localhost"
console.log(config.features.auth); // true
```
#### Named Imports
You can destructure top-level properties as named imports:
```ts app.ts icon="/icons/typescript.svg"
import { database, features } from "./config.json5";
console.log(database.host); // "localhost"
console.log(features.rateLimit); // true
```
### CommonJS
JSON5 files can also be required in CommonJS:
```ts app.ts icon="/icons/typescript.svg"
const config = require("./config.json5");
console.log(config.database.name); // "myapp"
// Destructuring also works
const { database, features } = require("./config.json5");
```
---
## Hot Reloading with JSON5
When you run your application with `bun --hot`, changes to JSON5 files are automatically detected and reloaded:
```json5 config.json5
{
server: {
port: 3000,
host: "localhost",
},
features: {
debug: true,
verbose: false,
},
}
```
```ts server.ts icon="/icons/typescript.svg"
import { server, features } from "./config.json5";
Bun.serve({
port: server.port,
hostname: server.host,
fetch(req) {
if (features.verbose) {
console.log(`${req.method} ${req.url}`);
}
return new Response("Hello World");
},
});
```
Run with hot reloading:
```bash terminal icon="terminal"
bun --hot server.ts
```
---
## Bundler Integration
When you import JSON5 files and bundle with Bun, the JSON5 is parsed at build time and included as a JavaScript module:
```bash terminal icon="terminal"
bun build app.ts --outdir=dist
```
This means:
- Zero runtime JSON5 parsing overhead in production
- Smaller bundle sizes
- Tree-shaking support for unused properties (named imports)
### Dynamic Imports
JSON5 files can be dynamically imported:
```ts
const config = await import("./config.json5");
```

188
docs/runtime/jsonl.mdx Normal file
View File

@@ -0,0 +1,188 @@
---
title: JSONL
description: Parse newline-delimited JSON (JSONL) with Bun's built-in streaming parser
---
Bun has built-in support for parsing [JSONL](https://jsonlines.org/) (newline-delimited JSON), where each line is a separate JSON value. The parser is implemented in C++ using JavaScriptCore's optimized JSON parser and supports streaming use cases.
```ts
const results = Bun.JSONL.parse('{"name":"Alice"}\n{"name":"Bob"}\n');
// [{ name: "Alice" }, { name: "Bob" }]
```
---
## `Bun.JSONL.parse()`
Parse a complete JSONL input and return an array of all parsed values.
```ts
import { JSONL } from "bun";
const input = '{"id":1,"name":"Alice"}\n{"id":2,"name":"Bob"}\n{"id":3,"name":"Charlie"}\n';
const records = JSONL.parse(input);
console.log(records);
// [
// { id: 1, name: "Alice" },
// { id: 2, name: "Bob" },
// { id: 3, name: "Charlie" }
// ]
```
Input can be a string or a `Uint8Array`:
```ts
const buffer = new TextEncoder().encode('{"a":1}\n{"b":2}\n');
const results = Bun.JSONL.parse(buffer);
// [{ a: 1 }, { b: 2 }]
```
When passed a `Uint8Array`, a UTF-8 BOM at the start of the buffer is automatically skipped.
### Error handling
If the input contains invalid JSON, `Bun.JSONL.parse()` throws a `SyntaxError`:
```ts
try {
Bun.JSONL.parse('{"valid":true}\n{invalid}\n');
} catch (error) {
console.error(error); // SyntaxError: Failed to parse JSONL
}
```
---
## `Bun.JSONL.parseChunk()`
For streaming scenarios, `parseChunk` parses as many complete values as possible from the input and reports how far it got. This is useful when receiving data incrementally (e.g., from a network stream) and you need to know where to resume parsing.
```ts
const chunk = '{"id":1}\n{"id":2}\n{"id":3';
const result = Bun.JSONL.parseChunk(chunk);
console.log(result.values); // [{ id: 1 }, { id: 2 }]
console.log(result.read); // 17 — characters consumed
console.log(result.done); // false — incomplete value remains
console.log(result.error); // null — no parse error
```
### Return value
`parseChunk` returns an object with four properties:
| Property | Type | Description |
| -------- | --------------------- | ----------------------------------------------------------------------- |
| `values` | `any[]` | Array of successfully parsed JSON values |
| `read` | `number` | Number of bytes (for `Uint8Array`) or characters (for strings) consumed |
| `done` | `boolean` | `true` if the entire input was consumed with no remaining data |
| `error` | `SyntaxError \| null` | Parse error, or `null` if no error occurred |
### Streaming example
Use `read` to slice off consumed input and carry forward the remainder:
```ts
let buffer = "";
async function processStream(stream: ReadableStream<string>) {
for await (const chunk of stream) {
buffer += chunk;
const result = Bun.JSONL.parseChunk(buffer);
for (const value of result.values) {
handleRecord(value);
}
// Keep only the unconsumed portion
buffer = buffer.slice(result.read);
}
// Handle any remaining data
if (buffer.length > 0) {
const final = Bun.JSONL.parseChunk(buffer);
for (const value of final.values) {
handleRecord(value);
}
if (final.error) {
console.error("Parse error in final chunk:", final.error.message);
}
}
}
```
### Byte offsets with `Uint8Array`
When the input is a `Uint8Array`, you can pass optional `start` and `end` byte offsets:
```ts
const buf = new TextEncoder().encode('{"a":1}\n{"b":2}\n{"c":3}\n');
// Parse starting from byte 8
const result = Bun.JSONL.parseChunk(buf, 8);
console.log(result.values); // [{ b: 2 }, { c: 3 }]
console.log(result.read); // 24
// Parse a specific range
const partial = Bun.JSONL.parseChunk(buf, 0, 8);
console.log(partial.values); // [{ a: 1 }]
```
The `read` value is always a byte offset into the original buffer, making it easy to use with `TypedArray.subarray()` for zero-copy streaming:
```ts
let buf = new Uint8Array(0);
async function processBinaryStream(stream: ReadableStream<Uint8Array>) {
for await (const chunk of stream) {
// Append chunk to buffer
const newBuf = new Uint8Array(buf.length + chunk.length);
newBuf.set(buf);
newBuf.set(chunk, buf.length);
buf = newBuf;
const result = Bun.JSONL.parseChunk(buf);
for (const value of result.values) {
handleRecord(value);
}
// Keep unconsumed bytes
buf = buf.slice(result.read);
}
}
```
### Error recovery
Unlike `parse()`, `parseChunk()` does not throw on invalid JSON. Instead, it returns the error in the `error` property, along with any values that were successfully parsed before the error:
```ts
const input = '{"a":1}\n{invalid}\n{"b":2}\n';
const result = Bun.JSONL.parseChunk(input);
console.log(result.values); // [{ a: 1 }] — values parsed before the error
console.log(result.error); // SyntaxError
console.log(result.read); // 7 — position up to last successful parse
```
---
## Supported value types
Each line can be any valid JSON value, not just objects:
```ts
const input = '42\n"hello"\ntrue\nnull\n[1,2,3]\n{"key":"value"}\n';
const values = Bun.JSONL.parse(input);
// [42, "hello", true, null, [1, 2, 3], { key: "value" }]
```
---
## Performance notes
- **ASCII fast path**: Pure ASCII input is parsed directly without copying, using a zero-allocation `StringView`.
- **UTF-8 support**: Non-ASCII `Uint8Array` input is decoded to UTF-16 using SIMD-accelerated conversion.
- **BOM handling**: UTF-8 BOM (`0xEF 0xBB 0xBF`) at the start of a `Uint8Array` is automatically skipped.
- **Pre-built object shape**: The result object from `parseChunk` uses a cached structure for fast property access.

344
docs/runtime/markdown.mdx Normal file
View File

@@ -0,0 +1,344 @@
---
title: Markdown
description: Parse and render Markdown with Bun's built-in Markdown API, supporting GFM extensions and custom rendering callbacks
---
{% callout type="note" %}
**Unstable API** — This API is under active development and may change in future versions of Bun.
{% /callout %}
Bun includes a fast, built-in Markdown parser written in Zig. It supports GitHub Flavored Markdown (GFM) extensions and provides three APIs:
- `Bun.markdown.html()` — render Markdown to an HTML string
- `Bun.markdown.render()` — render Markdown with custom callbacks for each element
- `Bun.markdown.react()` — render Markdown to React JSX elements
---
## `Bun.markdown.html()`
Convert a Markdown string to HTML.
```ts
const html = Bun.markdown.html("# Hello **world**");
// "<h1>Hello <strong>world</strong></h1>\n"
```
GFM extensions like tables, strikethrough, and task lists are enabled by default:
```ts
const html = Bun.markdown.html(`
| Feature | Status |
|-------------|--------|
| Tables | ~~done~~ |
| Strikethrough| ~~done~~ |
| Task lists | done |
`);
```
### Options
Pass an options object as the second argument to configure the parser:
```ts
const html = Bun.markdown.html("some markdown", {
tables: true, // GFM tables (default: true)
strikethrough: true, // GFM strikethrough (default: true)
tasklists: true, // GFM task lists (default: true)
tagFilter: true, // GFM tag filter for disallowed HTML tags
autolinks: true, // Autolink URLs, emails, and www. links
});
```
All available options:
| Option | Default | Description |
| ---------------------- | ------- | ----------------------------------------------------------- |
| `tables` | `false` | GFM tables |
| `strikethrough` | `false` | GFM strikethrough (`~~text~~`) |
| `tasklists` | `false` | GFM task lists (`- [x] item`) |
| `autolinks` | `false` | Enable autolinks — see [Autolinks](#autolinks) |
| `headings` | `false` | Heading IDs and autolinks — see [Heading IDs](#heading-ids) |
| `hardSoftBreaks` | `false` | Treat soft line breaks as hard breaks |
| `wikiLinks` | `false` | Enable `[[wiki links]]` |
| `underline` | `false` | `__text__` renders as `<u>` instead of `<strong>` |
| `latexMath` | `false` | Enable `$inline$` and `$$display$$` math |
| `collapseWhitespace` | `false` | Collapse whitespace in text |
| `permissiveAtxHeaders` | `false` | ATX headers without space after `#` |
| `noIndentedCodeBlocks` | `false` | Disable indented code blocks |
| `noHtmlBlocks` | `false` | Disable HTML blocks |
| `noHtmlSpans` | `false` | Disable inline HTML |
| `tagFilter` | `false` | GFM tag filter for disallowed HTML tags |
#### Autolinks
Pass `true` to enable all autolink types, or an object for granular control:
```ts
// Enable all autolinks (URL, WWW, email)
Bun.markdown.html("Visit www.example.com", { autolinks: true });
// Enable only specific types
Bun.markdown.html("Visit www.example.com", {
autolinks: { url: true, www: true },
});
```
#### Heading IDs
Pass `true` to enable both heading IDs and autolink headings, or an object for granular control:
```ts
// Enable heading IDs and autolink headings
Bun.markdown.html("## Hello World", { headings: true });
// '<h2 id="hello-world"><a href="#hello-world">Hello World</a></h2>\n'
// Enable only heading IDs (no autolink)
Bun.markdown.html("## Hello World", { headings: { ids: true } });
// '<h2 id="hello-world">Hello World</h2>\n'
```
---
## `Bun.markdown.render()`
Parse Markdown and render it using custom JavaScript callbacks. This gives you full control over the output format — you can generate HTML with custom classes, React elements, ANSI terminal output, or any other string format.
```ts
const result = Bun.markdown.render("# Hello **world**", {
heading: (children, { level }) => `<h${level} class="title">${children}</h${level}>`,
strong: children => `<b>${children}</b>`,
paragraph: children => `<p>${children}</p>`,
});
// '<h1 class="title">Hello <b>world</b></h1>'
```
### Callback signature
Each callback receives:
1. **`children`** — the accumulated content of the element as a string
2. **`meta`** (optional) — an object with element-specific metadata
Return a string to replace the element's rendering. Return `null` or `undefined` to omit the element from the output entirely. If no callback is registered for an element, its children pass through unchanged.
### Block callbacks
| Callback | Meta | Description |
| ------------ | ------------------------------------------- | ---------------------------------------------------------------------------------------- |
| `heading` | `{ level: number, id?: string }` | Heading level 16. `id` is set when `headings: { ids: true }` is enabled |
| `paragraph` | — | Paragraph block |
| `blockquote` | — | Blockquote block |
| `code` | `{ language?: string }` | Fenced or indented code block. `language` is the info-string when specified on the fence |
| `list` | `{ ordered: boolean, start?: number }` | Ordered or unordered list. `start` is the start number for ordered lists |
| `listItem` | `{ checked?: boolean }` | List item. `checked` is set for task list items (`- [x]` / `- [ ]`) |
| `hr` | — | Horizontal rule |
| `table` | — | Table block |
| `thead` | — | Table head |
| `tbody` | — | Table body |
| `tr` | — | Table row |
| `th` | `{ align?: "left" \| "center" \| "right" }` | Table header cell. `align` is set when alignment is specified |
| `td` | `{ align?: "left" \| "center" \| "right" }` | Table data cell. `align` is set when alignment is specified |
| `html` | — | Raw HTML content |
### Inline callbacks
| Callback | Meta | Description |
| --------------- | ---------------------------------- | ---------------------------- |
| `strong` | — | Strong emphasis (`**text**`) |
| `emphasis` | — | Emphasis (`*text*`) |
| `link` | `{ href: string, title?: string }` | Link |
| `image` | `{ src: string, title?: string }` | Image |
| `codespan` | — | Inline code (`` `code` ``) |
| `strikethrough` | — | Strikethrough (`~~text~~`) |
| `text` | — | Plain text content |
### Examples
#### Custom HTML with classes
```ts
const html = Bun.markdown.render("# Title\n\nHello **world**", {
heading: (children, { level }) => `<h${level} class="heading heading-${level}">${children}</h${level}>`,
paragraph: children => `<p class="body">${children}</p>`,
strong: children => `<strong class="bold">${children}</strong>`,
});
```
#### Stripping all formatting
```ts
const plaintext = Bun.markdown.render("# Hello **world**", {
heading: children => children,
paragraph: children => children,
strong: children => children,
emphasis: children => children,
link: children => children,
image: () => "",
code: children => children,
codespan: children => children,
});
// "Hello world"
```
#### Omitting elements
Return `null` or `undefined` to remove an element from the output:
```ts
const result = Bun.markdown.render("# Title\n\n![logo](img.png)\n\nHello", {
image: () => null, // Remove all images
heading: children => children,
paragraph: children => children + "\n",
});
// "Title\nHello\n"
```
#### ANSI terminal output
```ts
const ansi = Bun.markdown.render("# Hello\n\nThis is **bold** and *italic*", {
heading: (children, { level }) => `\x1b[1;4m${children}\x1b[0m\n`,
paragraph: children => children + "\n",
strong: children => `\x1b[1m${children}\x1b[22m`,
emphasis: children => `\x1b[3m${children}\x1b[23m`,
});
```
#### Code block syntax highlighting
````ts
const result = Bun.markdown.render("```js\nconsole.log('hi')\n```", {
code: (children, meta) => {
const lang = meta?.language ?? "";
return `<pre><code class="language-${lang}">${children}</code></pre>`;
},
});
````
### Parser options
Parser options are passed as a separate third argument:
```ts
const result = Bun.markdown.render(
"Visit www.example.com",
{
link: (children, { href }) => `[${children}](${href})`,
paragraph: children => children,
},
{ autolinks: true },
);
```
---
## `Bun.markdown.react()`
Render Markdown directly to React elements. Returns a `<Fragment>` that you can use as a component return value.
```tsx
function Markdown({ text }: { text: string }) {
return Bun.markdown.react(text);
}
```
### Server-side rendering
Works with `renderToString()` and React Server Components:
```tsx
import { renderToString } from "react-dom/server";
const html = renderToString(Bun.markdown.react("# Hello **world**"));
// "<h1>Hello <strong>world</strong></h1>"
```
### Component overrides
Replace any HTML element with a custom React component by passing it in the second argument, keyed by tag name:
```tsx
function Code({ language, children }) {
return (
<pre data-language={language}>
<code>{children}</code>
</pre>
);
}
function Link({ href, title, children }) {
return (
<a href={href} title={title} target="_blank" rel="noopener noreferrer">
{children}
</a>
);
}
function Heading({ id, children }) {
return (
<h2 id={id}>
<a href={`#${id}`}>{children}</a>
</h2>
);
}
const el = Bun.markdown.react(
content,
{
pre: Code,
a: Link,
h2: Heading,
},
{ headings: { ids: true } },
);
```
#### Available overrides
Every HTML tag produced by the parser can be overridden:
| Option | Props | Description |
| ------------ | ---------------------------- | --------------------------------------------------------------- |
| `h1``h6` | `{ id?, children }` | Headings. `id` is set when `headings: { ids: true }` is enabled |
| `p` | `{ children }` | Paragraph |
| `blockquote` | `{ children }` | Blockquote |
| `pre` | `{ language?, children }` | Code block. `language` is the info string (e.g. `"js"`) |
| `hr` | `{}` | Horizontal rule (no children) |
| `ul` | `{ children }` | Unordered list |
| `ol` | `{ start, children }` | Ordered list. `start` is the first item number |
| `li` | `{ checked?, children }` | List item. `checked` is set for task list items |
| `table` | `{ children }` | Table |
| `thead` | `{ children }` | Table head |
| `tbody` | `{ children }` | Table body |
| `tr` | `{ children }` | Table row |
| `th` | `{ align?, children }` | Table header cell |
| `td` | `{ align?, children }` | Table data cell |
| `em` | `{ children }` | Emphasis (`*text*`) |
| `strong` | `{ children }` | Strong (`**text**`) |
| `a` | `{ href, title?, children }` | Link |
| `img` | `{ src, alt?, title? }` | Image (no children) |
| `code` | `{ children }` | Inline code |
| `del` | `{ children }` | Strikethrough (`~~text~~`) |
| `br` | `{}` | Hard line break (no children) |
### React 18 and older
By default, elements use `Symbol.for('react.transitional.element')` as the `$$typeof` symbol. For React 18 and older, pass `reactVersion: 18` in the options (third argument):
```tsx
function Markdown({ text }: { text: string }) {
return Bun.markdown.react(text, undefined, { reactVersion: 18 });
}
```
### Parser options
All [parser options](#options) are passed as the third argument:
```tsx
const el = Bun.markdown.react("## Hello World", undefined, {
headings: { ids: true },
autolinks: true,
});
```

View File

@@ -165,7 +165,7 @@ This page is updated regularly to reflect compatibility status of the latest ver
### [`node:inspector`](https://nodejs.org/api/inspector.html)
🔴 Not implemented.
🟡 Partially implemented. `Profiler` API is supported (`Profiler.enable`, `Profiler.disable`, `Profiler.start`, `Profiler.stop`, `Profiler.setSamplingInterval`). Other inspector APIs are not yet implemented.
### [`node:repl`](https://nodejs.org/api/repl.html)

View File

@@ -135,6 +135,18 @@ await s3file.write(JSON.stringify({ name: "John", age: 30 }), {
type: "application/json",
});
// Write with content encoding (e.g. for pre-compressed data)
await s3file.write(compressedData, {
type: "application/json",
contentEncoding: "gzip",
});
// Write with content disposition
await s3file.write(pdfData, {
type: "application/pdf",
contentDisposition: 'attachment; filename="report.pdf"',
});
// Write using a writer (streaming)
const writer = s3file.writer({ type: "application/json" });
writer.write("Hello");
@@ -188,7 +200,13 @@ const download = s3.presign("my-file.txt"); // GET, text/plain, expires in 24 ho
const upload = s3.presign("my-file", {
expiresIn: 3600, // 1 hour
method: "PUT",
type: "application/json", // No extension for inferring, so we can specify the content type to be JSON
type: "application/json", // Sets response-content-type in the presigned URL
});
// Presign with content disposition (e.g. force download with a specific filename)
const downloadUrl = s3.presign("report.pdf", {
expiresIn: 3600,
contentDisposition: 'attachment; filename="quarterly-report.pdf"',
});
// You can call .presign() if on a file reference, but avoid doing so

View File

@@ -460,7 +460,7 @@ console.log(result); // Blob(13) { size: 13, type: "text/plain" }
For cross-platform compatibility, Bun Shell implements a set of builtin commands, in addition to reading commands from the PATH environment variable.
- `cd`: change the working directory
- `ls`: list files in a directory
- `ls`: list files in a directory (supports `-l` for long listing format)
- `rm`: remove files and directories
- `echo`: print text
- `pwd`: print the working directory

View File

@@ -880,6 +880,94 @@ npm/strip-ansi 212,992 chars long-ansi 1.36 ms/iter 1.38 ms
---
## `Bun.wrapAnsi()`
<Note>Drop-in replacement for `wrap-ansi` npm package</Note>
`Bun.wrapAnsi(input: string, columns: number, options?: WrapAnsiOptions): string`
Wrap text to a specified column width while preserving ANSI escape codes, hyperlinks, and handling Unicode/emoji width correctly. This is a native, high-performance alternative to the popular [`wrap-ansi`](https://www.npmjs.com/package/wrap-ansi) npm package.
```ts
// Basic wrapping at 20 columns
Bun.wrapAnsi("The quick brown fox jumps over the lazy dog", 20);
// => "The quick brown fox\njumps over the lazy\ndog"
// Preserves ANSI escape codes
Bun.wrapAnsi("\u001b[31mThe quick brown fox jumps over the lazy dog\u001b[0m", 20);
// => "\u001b[31mThe quick brown fox\njumps over the lazy\ndog\u001b[0m"
```
### Options
```ts
Bun.wrapAnsi("Hello World", 5, {
hard: true, // Break words that exceed column width (default: false)
wordWrap: true, // Wrap at word boundaries (default: true)
trim: true, // Trim leading/trailing whitespace per line (default: true)
ambiguousIsNarrow: true, // Treat ambiguous-width characters as narrow (default: true)
});
```
| Option | Default | Description |
| ------------------- | ------- | --------------------------------------------------------------------------------------------------------------- |
| `hard` | `false` | If `true`, break words in the middle if they exceed the column width. |
| `wordWrap` | `true` | If `true`, wrap at word boundaries. If `false`, only break at explicit newlines. |
| `trim` | `true` | If `true`, trim leading and trailing whitespace from each line. |
| `ambiguousIsNarrow` | `true` | If `true`, treat ambiguous-width Unicode characters as 1 column wide. If `false`, treat them as 2 columns wide. |
TypeScript definition:
```ts expandable
namespace Bun {
export function wrapAnsi(
/**
* The string to wrap
*/
input: string,
/**
* The maximum column width
*/
columns: number,
/**
* Wrapping options
*/
options?: {
/**
* If `true`, break words in the middle if they don't fit on a line.
* If `false`, only break at word boundaries.
*
* @default false
*/
hard?: boolean;
/**
* If `true`, wrap at word boundaries when possible.
* If `false`, don't perform word wrapping (only wrap at explicit newlines).
*
* @default true
*/
wordWrap?: boolean;
/**
* If `true`, trim leading and trailing whitespace from each line.
* If `false`, preserve whitespace.
*
* @default true
*/
trim?: boolean;
/**
* When it's ambiguous and `true`, count ambiguous width characters as 1 character wide.
* If `false`, count them as 2 characters wide.
*
* @default true
*/
ambiguousIsNarrow?: boolean;
},
): string;
}
```
---
## `serialize` & `deserialize` in `bun:jsc`
To save a JavaScript value into an ArrayBuffer & back, use `serialize` and `deserialize` from the `"bun:jsc"` module.

View File

@@ -50,7 +50,8 @@ bun build <entry points>
</ParamField>
<ParamField path="--format" type="string" default="esm">
Module format of the output bundle. One of <code>esm</code>, <code>cjs</code>, or <code>iife</code>
Module format of the output bundle. One of <code>esm</code>, <code>cjs</code>, or <code>iife</code>. Defaults to{" "}
<code>cjs</code> when <code>--bytecode</code> is used.
</ParamField>
### File Naming

View File

@@ -40,6 +40,18 @@ bun run <file or script>
Run a script in all workspace packages (from the <code>workspaces</code> field in <code>package.json</code>)
</ParamField>
<ParamField path="--parallel" type="boolean">
Run multiple scripts or workspace scripts concurrently with prefixed output
</ParamField>
<ParamField path="--sequential" type="boolean">
Run multiple scripts or workspace scripts one after another with prefixed output
</ParamField>
<ParamField path="--no-exit-on-error" type="boolean">
When using <code>--parallel</code> or <code>--sequential</code>, continue running other scripts when one fails
</ParamField>
### Runtime &amp; Process Control
<ParamField path="--bun" type="boolean">

24
meta.json Normal file
View File

@@ -0,0 +1,24 @@
{
"inputs": {
"../../tmp/test-entry.js": {
"bytes": 21,
"imports": [
],
"format": "esm"
}
},
"outputs": {
"./test-entry.js": {
"bytes": 49,
"inputs": {
"../../tmp/test-entry.js": {
"bytesInOutput": 22
}
},
"imports": [
],
"exports": [],
"entryPoint": "../../tmp/test-entry.js"
}
}
}

View File

@@ -35,7 +35,7 @@ JSC_DEFINE_HOST_FUNCTION(jsFunctionWrite, (JSC::JSGlobalObject * globalObject,
JSValue arg1 = callframe->argument(0);
JSValue toWriteArg = callframe->argument(1);
auto &vm = globalObject->vm();
auto scope = DECLARE_CATCH_SCOPE(vm);
auto scope = DECLARE_TOP_EXCEPTION_SCOPE(vm);
int32_t fd = STDOUT_FILENO;
if (callframe->argumentCount() > 1) {

View File

@@ -1,7 +1,7 @@
{
"private": true,
"name": "bun",
"version": "1.3.7",
"version": "1.3.9",
"workspaces": [
"./packages/bun-types",
"./packages/@types/bun"

View File

@@ -71,8 +71,25 @@ async function buildRootModule(dryRun?: boolean) {
js: "// Source code: https://github.com/oven-sh/bun/blob/main/packages/bun-release/scripts/npm-postinstall.ts",
},
});
write(join(cwd, "bin", "bun.exe"), "");
write(join(cwd, "bin", "bunx.exe"), "");
// Create placeholder scripts that print an error message if postinstall hasn't run.
// On Unix, these are executed as shell scripts despite the .exe extension.
// Do NOT add a shebang (#!/bin/sh) here — npm's cmd-shim reads shebangs to generate
// .ps1/.cmd wrappers BEFORE postinstall runs, and bakes the interpreter path in.
// A #!/bin/sh shebang breaks Windows because the wrappers reference /bin/sh which
// doesn't exist, even after postinstall replaces the placeholder with the real binary.
const placeholderScript = `echo "Error: Bun's postinstall script was not run." >&2
echo "" >&2
echo "This occurs when using --ignore-scripts during installation, or when using a" >&2
echo "package manager like pnpm that does not run postinstall scripts by default." >&2
echo "" >&2
echo "To fix this, run the postinstall script manually:" >&2
echo " cd node_modules/bun && node install.js" >&2
echo "" >&2
echo "Or reinstall bun without the --ignore-scripts flag." >&2
exit 1
`;
write(join(cwd, "bin", "bun.exe"), placeholderScript);
write(join(cwd, "bin", "bunx.exe"), placeholderScript);
write(
join(cwd, "bin", "README.txt"),
`The 'bun.exe' file is a placeholder for the binary file, which

View File

@@ -743,6 +743,101 @@ declare module "bun" {
export function parse(input: string): unknown;
}
/**
* JSONL (JSON Lines) related APIs.
*
* Each line in the input is expected to be a valid JSON value separated by newlines.
*/
namespace JSONL {
/**
* The result of `Bun.JSONL.parseChunk`.
*/
interface ParseChunkResult {
/** The successfully parsed JSON values. */
values: unknown[];
/** How far into the input was consumed. When the input is a string, this is a character offset. When the input is a `TypedArray`, this is a byte offset. Use `input.slice(read)` or `input.subarray(read)` to get the unconsumed remainder. */
read: number;
/** `true` if all input was consumed successfully. `false` if the input ends with an incomplete value or a parse error occurred. */
done: boolean;
/** A `SyntaxError` if a parse error occurred, otherwise `null`. Values parsed before the error are still available in `values`. */
error: SyntaxError | null;
}
/**
* Parse a JSONL (JSON Lines) string into an array of JavaScript values.
*
* If a parse error occurs and no values were successfully parsed, throws
* a `SyntaxError`. If values were parsed before the error, returns the
* successfully parsed values without throwing.
*
* Incomplete trailing values (e.g. from a partial chunk) are silently
* ignored and not included in the result.
*
* When a `TypedArray` is passed, the bytes are parsed directly without
* copying if the content is ASCII.
*
* @param input The JSONL string or typed array to parse
* @returns An array of parsed values
* @throws {SyntaxError} If the input starts with invalid JSON and no values could be parsed
*
* @example
* ```js
* const items = Bun.JSONL.parse('{"a":1}\n{"b":2}\n');
* // [{ a: 1 }, { b: 2 }]
*
* // From a Uint8Array (zero-copy for ASCII):
* const buf = new TextEncoder().encode('{"a":1}\n{"b":2}\n');
* const items = Bun.JSONL.parse(buf);
* // [{ a: 1 }, { b: 2 }]
*
* // Partial results on error after valid values:
* const partial = Bun.JSONL.parse('{"a":1}\n{bad}\n');
* // [{ a: 1 }]
*
* // Throws when no valid values precede the error:
* Bun.JSONL.parse('{bad}\n'); // throws SyntaxError
* ```
*/
export function parse(input: string | NodeJS.TypedArray | DataView<ArrayBuffer> | ArrayBufferLike): unknown[];
/**
* Parse a JSONL chunk, designed for streaming use.
*
* Never throws on parse errors. Instead, returns whatever values were
* successfully parsed along with an `error` property containing the
* `SyntaxError` (or `null` on success). Use `read` to determine how
* much input was consumed and `done` to check if all input was parsed.
*
* When a `TypedArray` is passed, the bytes are parsed directly without
* copying if the content is ASCII. Optional `start` and `end` parameters
* allow slicing without copying, and `read` will be a byte offset into
* the original typed array.
*
* @param input The JSONL string or typed array to parse
* @param start Byte offset to start parsing from (typed array only, default: 0)
* @param end Byte offset to stop parsing at (typed array only, default: input.byteLength)
* @returns An object with `values`, `read`, `done`, and `error` properties
*
* @example
* ```js
* let buffer = new Uint8Array(0);
* for await (const chunk of stream) {
* buffer = Buffer.concat([buffer, chunk]);
* const { values, read, error } = Bun.JSONL.parseChunk(buffer);
* if (error) throw error;
* for (const value of values) handle(value);
* buffer = buffer.subarray(read);
* }
* ```
*/
export function parseChunk(input: string): ParseChunkResult;
export function parseChunk(
input: NodeJS.TypedArray | DataView<ArrayBuffer> | ArrayBufferLike,
start?: number,
end?: number,
): ParseChunkResult;
}
/**
* YAML related APIs
*/
@@ -810,6 +905,480 @@ declare module "bun" {
export function stringify(input: unknown, replacer?: undefined | null, space?: string | number): string;
}
/**
* Markdown related APIs.
*
* Provides fast markdown parsing and rendering with three output modes:
* - `html()` — render to an HTML string
* - `render()` — render with custom callbacks for each element
* - `react()` — parse to React-compatible JSX elements
*
* Supports GFM extensions (tables, strikethrough, task lists, autolinks) and
* component overrides to replace default HTML tags with custom components.
*
* @example
* ```tsx
* // Render markdown to HTML
* const html = Bun.markdown.html("# Hello **world**");
* // "<h1>Hello <strong>world</strong></h1>\n"
*
* // Render with custom callbacks
* const ansi = Bun.markdown.render("# Hello **world**", {
* heading: (children, { level }) => `\x1b[1m${children}\x1b[0m\n`,
* strong: (children) => `\x1b[1m${children}\x1b[22m`,
* paragraph: (children) => children + "\n",
* });
*
* // Render as a React component
* function Markdown({ text }: { text: string }) {
* return Bun.markdown.react(text);
* }
*
* // With component overrides
* const element = Bun.markdown.react("# Hello", { h1: MyHeadingComponent });
* ```
*/
namespace markdown {
/**
* Options for configuring the markdown parser.
*
* By default, GFM extensions (tables, strikethrough, task lists) are enabled.
*/
interface Options {
/** Enable GFM tables. Default: `true`. */
tables?: boolean;
/** Enable GFM strikethrough (`~~text~~`). Default: `true`. */
strikethrough?: boolean;
/** Enable GFM task lists (`- [x] item`). Default: `true`. */
tasklists?: boolean;
/** Treat soft line breaks as hard line breaks. Default: `false`. */
hardSoftBreaks?: boolean;
/** Enable wiki-style links (`[[target]]` or `[[target|label]]`). Default: `false`. */
wikiLinks?: boolean;
/** Enable underline syntax (`__text__` renders as `<u>` instead of `<strong>`). Default: `false`. */
underline?: boolean;
/** Enable LaTeX math (`$inline$` and `$$display$$`). Default: `false`. */
latexMath?: boolean;
/** Collapse whitespace in text content. Default: `false`. */
collapseWhitespace?: boolean;
/** Allow ATX headers without a space after `#`. Default: `false`. */
permissiveAtxHeaders?: boolean;
/** Disable indented code blocks. Default: `false`. */
noIndentedCodeBlocks?: boolean;
/** Disable HTML blocks. Default: `false`. */
noHtmlBlocks?: boolean;
/** Disable inline HTML spans. Default: `false`. */
noHtmlSpans?: boolean;
/**
* Enable the GFM tag filter, which replaces `<` with `&lt;` for disallowed
* HTML tags (e.g. `<script>`, `<style>`, `<iframe>`). Default: `false`.
*/
tagFilter?: boolean;
/**
* Enable autolinks. Pass `true` to enable all autolink types (URL, WWW, email),
* or an object to enable individually.
*
* @example
* ```ts
* // Enable all autolinks
* { autolinks: true }
* // Enable only URL and email autolinks
* { autolinks: { url: true, email: true } }
* ```
*/
autolinks?: boolean | { url?: boolean; www?: boolean; email?: boolean };
/**
* Configure heading IDs and autolink headings. Pass `true` to enable both
* heading IDs and autolink headings, or an object to configure individually.
*
* @example
* ```ts
* // Enable both heading IDs and autolink headings
* { headings: true }
* // Enable only heading IDs
* { headings: { ids: true } }
* ```
*/
headings?: boolean | { ids?: boolean; autolink?: boolean };
}
/** A component that accepts props `P`: a function, class, or HTML tag name. */
type Component<P = {}> = string | ((props: P) => any) | (new (props: P) => any);
interface ChildrenProps {
children: import("./jsx.d.ts").JSX.Element[];
}
interface HeadingProps extends ChildrenProps {
/** Heading ID slug. Set when `headings: { ids: true }` is enabled. */
id?: string;
}
interface OrderedListProps extends ChildrenProps {
/** The start number. */
start: number;
}
interface ListItemProps extends ChildrenProps {
/** Task list checked state. Set for `- [x]` / `- [ ]` items. */
checked?: boolean;
}
interface CodeBlockProps extends ChildrenProps {
/** The info-string language (e.g. `"js"`). */
language?: string;
}
interface CellProps extends ChildrenProps {
/** Column alignment. */
align?: "left" | "center" | "right";
}
interface LinkProps extends ChildrenProps {
/** Link URL. */
href: string;
/** Link title attribute. */
title?: string;
}
interface ImageProps {
/** Image URL. */
src: string;
/** Alt text. */
alt?: string;
/** Image title attribute. */
title?: string;
}
/**
* Component overrides for `react()`.
*
* Replace default HTML tags with custom React components. Each override
* receives the same props the default element would get.
*
* @example
* ```tsx
* function Code({ language, children }: { language?: string; children: React.ReactNode }) {
* return <pre data-language={language}><code>{children}</code></pre>;
* }
* Bun.markdown.react(text, { pre: Code });
* ```
*/
interface ComponentOverrides {
h1?: Component<HeadingProps>;
h2?: Component<HeadingProps>;
h3?: Component<HeadingProps>;
h4?: Component<HeadingProps>;
h5?: Component<HeadingProps>;
h6?: Component<HeadingProps>;
p?: Component<ChildrenProps>;
blockquote?: Component<ChildrenProps>;
ul?: Component<ChildrenProps>;
ol?: Component<OrderedListProps>;
li?: Component<ListItemProps>;
pre?: Component<CodeBlockProps>;
hr?: Component<{}>;
html?: Component<ChildrenProps>;
table?: Component<ChildrenProps>;
thead?: Component<ChildrenProps>;
tbody?: Component<ChildrenProps>;
tr?: Component<ChildrenProps>;
th?: Component<CellProps>;
td?: Component<CellProps>;
em?: Component<ChildrenProps>;
strong?: Component<ChildrenProps>;
a?: Component<LinkProps>;
img?: Component<ImageProps>;
code?: Component<ChildrenProps>;
del?: Component<ChildrenProps>;
math?: Component<ChildrenProps>;
u?: Component<ChildrenProps>;
br?: Component<{}>;
}
/**
* Callbacks for `render()`. Each callback receives the accumulated children
* as a string and optional metadata, and returns a string.
*
* Return `null` or `undefined` to omit the element from the output.
* If no callback is registered for an element, its children pass through unchanged.
*/
/** Meta passed to the `heading` callback. */
interface HeadingMeta {
/** Heading level (16). */
level: number;
/** Heading ID slug. Set when `headings: { ids: true }` is enabled. */
id?: string;
}
/** Meta passed to the `code` callback. */
interface CodeBlockMeta {
/** The info-string language (e.g. `"js"`). */
language?: string;
}
/** Meta passed to the `list` callback. */
interface ListMeta {
/** Whether this is an ordered list. */
ordered: boolean;
/** The start number for ordered lists. */
start?: number;
}
/** Meta passed to the `listItem` callback. */
interface ListItemMeta {
/** Task list checked state. Set for `- [x]` / `- [ ]` items. */
checked?: boolean;
}
/** Meta passed to `th` and `td` callbacks. */
interface CellMeta {
/** Column alignment. */
align?: "left" | "center" | "right";
}
/** Meta passed to the `link` callback. */
interface LinkMeta {
/** Link URL. */
href: string;
/** Link title attribute. */
title?: string;
}
/** Meta passed to the `image` callback. */
interface ImageMeta {
/** Image URL. */
src: string;
/** Image title attribute. */
title?: string;
}
interface RenderCallbacks {
/** Heading (level 16). `id` is set when `headings: { ids: true }` is enabled. */
heading?: (children: string, meta: HeadingMeta) => string | null | undefined;
/** Paragraph. */
paragraph?: (children: string) => string | null | undefined;
/** Blockquote. */
blockquote?: (children: string) => string | null | undefined;
/** Code block. `meta.language` is the info-string (e.g. `"js"`). Only passed for fenced code blocks with a language. */
code?: (children: string, meta?: CodeBlockMeta) => string | null | undefined;
/** Ordered or unordered list. `start` is the first item number for ordered lists. */
list?: (children: string, meta: ListMeta) => string | null | undefined;
/** List item. `meta.checked` is set for task list items (`- [x]` / `- [ ]`). Only passed for task list items. */
listItem?: (children: string, meta?: ListItemMeta) => string | null | undefined;
/** Horizontal rule. */
hr?: (children: string) => string | null | undefined;
/** Table. */
table?: (children: string) => string | null | undefined;
/** Table head. */
thead?: (children: string) => string | null | undefined;
/** Table body. */
tbody?: (children: string) => string | null | undefined;
/** Table row. */
tr?: (children: string) => string | null | undefined;
/** Table header cell. `meta.align` is set when column alignment is specified. */
th?: (children: string, meta?: CellMeta) => string | null | undefined;
/** Table data cell. `meta.align` is set when column alignment is specified. */
td?: (children: string, meta?: CellMeta) => string | null | undefined;
/** Raw HTML content. */
html?: (children: string) => string | null | undefined;
/** Strong emphasis (`**text**`). */
strong?: (children: string) => string | null | undefined;
/** Emphasis (`*text*`). */
emphasis?: (children: string) => string | null | undefined;
/** Link. `href` is the URL, `title` is the optional title attribute. */
link?: (children: string, meta: LinkMeta) => string | null | undefined;
/** Image. `src` is the URL, `title` is the optional title attribute. */
image?: (children: string, meta: ImageMeta) => string | null | undefined;
/** Inline code (`` `code` ``). */
codespan?: (children: string) => string | null | undefined;
/** Strikethrough (`~~text~~`). */
strikethrough?: (children: string) => string | null | undefined;
/** Plain text content. */
text?: (text: string) => string | null | undefined;
}
/** Options for `react()` — parser options and element symbol configuration. */
interface ReactOptions extends Options {
/**
* Which `$$typeof` symbol to use on the generated elements.
* - `19` (default): `Symbol.for('react.transitional.element')`
* - `18`: `Symbol.for('react.element')` — use this for React 18 and older
*/
reactVersion?: 18 | 19;
}
/**
* Render markdown to an HTML string.
*
* @param input The markdown string or buffer to render
* @param options Parser options
* @returns An HTML string
*
* @example
* ```ts
* const html = Bun.markdown.html("# Hello **world**");
* // "<h1>Hello <strong>world</strong></h1>\n"
*
* // With options
* const html = Bun.markdown.html("## Hello", { headings: { ids: true } });
* // '<h2 id="hello">Hello</h2>\n'
* ```
*/
export function html(
input: string | NodeJS.TypedArray | DataView<ArrayBuffer> | ArrayBufferLike,
options?: Options,
): string;
/**
* Render markdown with custom JavaScript callbacks for each element.
*
* Each callback receives the accumulated children as a string and optional
* metadata, and returns a string. Return `null` or `undefined` to omit
* an element. If no callback is registered, children pass through unchanged.
*
* Parser options are passed as a separate third argument.
*
* @param input The markdown string to render
* @param callbacks Callbacks for each element type
* @param options Parser options
* @returns The accumulated string output
*
* @example
* ```ts
* // Custom HTML with classes
* const html = Bun.markdown.render("# Title\n\nHello **world**", {
* heading: (children, { level }) => `<h${level} class="title">${children}</h${level}>`,
* paragraph: (children) => `<p>${children}</p>`,
* strong: (children) => `<b>${children}</b>`,
* });
*
* // ANSI terminal output
* const ansi = Bun.markdown.render("# Hello\n\n**bold**", {
* heading: (children) => `\x1b[1;4m${children}\x1b[0m\n`,
* paragraph: (children) => children + "\n",
* strong: (children) => `\x1b[1m${children}\x1b[22m`,
* });
*
* // With parser options as third argument
* const text = Bun.markdown.render("Visit www.example.com", {
* link: (children, { href }) => `[${children}](${href})`,
* paragraph: (children) => children,
* }, { autolinks: true });
* ```
*/
export function render(
input: string | NodeJS.TypedArray | DataView<ArrayBuffer> | ArrayBufferLike,
callbacks?: RenderCallbacks,
options?: Options,
): string;
/**
* Render markdown to React JSX elements.
*
* Returns a React Fragment containing the parsed markdown as children.
* Can be returned directly from a component or passed to `renderToString()`.
*
* Override any HTML element with a custom component by passing it in the
* second argument, keyed by tag name. Custom components receive the same props
* the default elements would (e.g. `href` for links, `language` for code blocks).
*
* Parser options (including `reactVersion`) are passed as a separate third argument.
* Uses `Symbol.for('react.transitional.element')` by default (React 19).
* Pass `reactVersion: 18` for React 18 and older.
*
* @param input The markdown string or buffer to parse
* @param components Component overrides keyed by HTML tag name
* @param options Parser options and element symbol configuration
* @returns A React Fragment element containing the parsed markdown
*
* @example
* ```tsx
* // Use directly as a component return value
* function Markdown({ text }: { text: string }) {
* return Bun.markdown.react(text);
* }
*
* // Server-side rendering
* import { renderToString } from "react-dom/server";
* const html = renderToString(Bun.markdown.react("# Hello **world**"));
*
* // Custom components receive element props
* function Code({ language, children }: { language?: string; children: React.ReactNode }) {
* return <pre data-language={language}><code>{children}</code></pre>;
* }
* function Link({ href, children }: { href: string; children: React.ReactNode }) {
* return <a href={href} target="_blank">{children}</a>;
* }
* const el = Bun.markdown.react(text, { pre: Code, a: Link });
*
* // For React 18 and older
* const el18 = Bun.markdown.react(text, undefined, { reactVersion: 18 });
* ```
*/
export function react(
input: string | NodeJS.TypedArray | DataView<ArrayBuffer> | ArrayBufferLike,
components?: ComponentOverrides,
options?: ReactOptions,
): import("./jsx.d.ts").JSX.Element;
}
/**
* JSON5 related APIs
*/
namespace JSON5 {
/**
* Parse a JSON5 string into a JavaScript value.
*
* JSON5 is a superset of JSON based on ECMAScript 5.1 that supports
* comments, trailing commas, unquoted keys, single-quoted strings,
* hex numbers, Infinity, NaN, and more.
*
* @category Utilities
*
* @param input The JSON5 string to parse
* @returns A JavaScript value
*
* @example
* ```ts
* import { JSON5 } from "bun";
*
* const result = JSON5.parse(`{
* // This is a comment
* name: 'my-app',
* version: '1.0.0', // trailing comma is allowed
* hex: 0xDEADbeef,
* half: .5,
* infinity: Infinity,
* }`);
* ```
*/
export function parse(input: string): unknown;
/**
* Convert a JavaScript value into a JSON5 string. Object keys that are
* valid identifiers are unquoted, strings use double quotes, `Infinity`
* and `NaN` are represented as literals, and indented output includes
* trailing commas.
*
* @category Utilities
*
* @param input The JavaScript value to stringify.
* @param replacer Currently not supported.
* @param space A number for how many spaces each level of indentation gets, or a string used as indentation.
* The number is clamped between 0 and 10, and the first 10 characters of the string are used.
* @returns A JSON5 string, or `undefined` if the input is `undefined`, a function, or a symbol.
*
* @example
* ```ts
* import { JSON5 } from "bun";
*
* console.log(JSON5.stringify({ a: 1, b: "two" }));
* // {a:1,b:"two"}
*
* console.log(JSON5.stringify({ a: 1, b: 2 }, null, 2));
* // {
* // a: 1,
* // b: 2,
* // }
* ```
*/
export function stringify(input: unknown, replacer?: undefined | null, space?: string | number): string | undefined;
}
/**
* Synchronously resolve a `moduleId` as though it were imported from `parent`
*
@@ -1745,6 +2314,17 @@ declare module "bun" {
* @default "warn"
*/
logLevel?: "verbose" | "debug" | "info" | "warn" | "error";
/**
* Enable REPL mode transforms:
* - Wraps top-level inputs that appear to be object literals (inputs starting with '{' without trailing ';') in parentheses
* - Hoists all declarations as var for REPL persistence across vm.runInContext calls
* - Wraps last expression in { __proto__: null, value: expr } for result capture
* - Wraps code in sync/async IIFE to avoid parentheses around object literals
*
* @default false
*/
replMode?: boolean;
}
/**
@@ -1851,14 +2431,15 @@ declare module "bun" {
type Architecture = "x64" | "arm64";
type Libc = "glibc" | "musl";
type SIMD = "baseline" | "modern";
type Target =
type CompileTarget =
| `bun-darwin-${Architecture}`
| `bun-darwin-x64-${SIMD}`
| `bun-darwin-${Architecture}-${SIMD}`
| `bun-linux-${Architecture}`
| `bun-linux-${Architecture}-${Libc}`
| `bun-linux-${Architecture}-${SIMD}`
| `bun-linux-${Architecture}-${SIMD}-${Libc}`
| "bun-windows-x64"
| `bun-windows-x64-${SIMD}`
| `bun-linux-x64-${SIMD}-${Libc}`;
| `bun-windows-x64-${SIMD}`;
}
/**
@@ -2013,7 +2594,10 @@ declare module "bun" {
* start times, but will make the final output larger and slightly increase
* memory usage.
*
* Bytecode is currently only supported for CommonJS (`format: "cjs"`).
* - CommonJS: works with or without `compile: true`
* - ESM: requires `compile: true`
*
* Without an explicit `format`, defaults to CommonJS.
*
* Must be `target: "bun"`
* @default false
@@ -2193,7 +2777,7 @@ declare module "bun" {
}
interface CompileBuildOptions {
target?: Bun.Build.Target;
target?: Bun.Build.CompileTarget;
execArgv?: string[];
executablePath?: string;
outfile?: string;
@@ -2275,7 +2859,7 @@ declare module "bun" {
* });
* ```
*/
compile: boolean | Bun.Build.Target | CompileBuildOptions;
compile: boolean | Bun.Build.CompileTarget | CompileBuildOptions;
/**
* Splitting is not currently supported with `.compile`
@@ -5098,7 +5682,7 @@ declare module "bun" {
*
* This will apply to all sockets from the same {@link Listener}. it is per socket only for {@link Bun.connect}.
*/
reload(handler: SocketHandler): void;
reload(options: Pick<SocketOptions<Data>, "socket">): void;
/**
* Get the server that created this socket
@@ -5441,7 +6025,7 @@ declare module "bun" {
stop(closeActiveConnections?: boolean): void;
ref(): void;
unref(): void;
reload(options: Pick<Partial<SocketOptions>, "socket">): void;
reload(options: Pick<SocketOptions<Data>, "socket">): void;
data: Data;
}
interface TCPSocketListener<Data = unknown> extends SocketListener<Data> {

View File

@@ -23,6 +23,11 @@ declare module "*.jsonc" {
export = contents;
}
declare module "*.json5" {
var contents: any;
export = contents;
}
declare module "*/bun.lock" {
var contents: import("bun").BunLockFile;
export = contents;

11
packages/bun-types/jsx.d.ts vendored Normal file
View File

@@ -0,0 +1,11 @@
export {};
type ReactElement = typeof globalThis extends { React: infer React }
? React extends { createElement(...args: any): infer R }
? R
: never
: unknown;
export namespace JSX {
export type Element = ReactElement;
}

View File

@@ -204,26 +204,38 @@ namespace uWS {
}
// do we have data to emit all?
if (data.length() >= chunkSize(state)) {
unsigned int remaining = chunkSize(state);
if (data.length() >= remaining) {
// emit all but 2 bytes then reset state to 0 and goto beginning
// not fin
std::string_view emitSoon;
bool shouldEmit = false;
if (chunkSize(state) > 2) {
emitSoon = std::string_view(data.data(), chunkSize(state) - 2);
shouldEmit = true;
// Validate the chunk terminator (\r\n) accounting for partial reads
switch (remaining) {
default:
// remaining > 2: emit data and validate full terminator
emitSoon = std::string_view(data.data(), remaining - 2);
shouldEmit = true;
[[fallthrough]];
case 2:
// remaining >= 2: validate both \r and \n
if (data[remaining - 2] != '\r' || data[remaining - 1] != '\n') {
state = STATE_IS_ERROR;
return std::nullopt;
}
break;
case 1:
// remaining == 1: only \n left to validate
if (data[0] != '\n') {
state = STATE_IS_ERROR;
return std::nullopt;
}
break;
case 0:
// remaining == 0: terminator already consumed
break;
}
// Validate that the chunk terminator is \r\n to prevent request smuggling
// The last 2 bytes of the chunk must be exactly \r\n
// Note: chunkSize always includes +2 for the terminator (added in consumeHexNumber),
// and chunks with size 0 (chunkSize == 2) are handled earlier at line 190.
// Therefore chunkSize >= 3 here, so no underflow is possible.
size_t terminatorOffset = chunkSize(state) - 2;
if (data[terminatorOffset] != '\r' || data[terminatorOffset + 1] != '\n') {
state = STATE_IS_ERROR;
return std::nullopt;
}
data.remove_prefix(chunkSize(state));
data.remove_prefix(remaining);
state = STATE_IS_CHUNKED;
if (shouldEmit) {
return emitSoon;
@@ -232,19 +244,45 @@ namespace uWS {
} else {
/* We will consume all our input data */
std::string_view emitSoon;
if (chunkSize(state) > 2) {
uint64_t maximalAppEmit = chunkSize(state) - 2;
if (data.length() > maximalAppEmit) {
unsigned int size = chunkSize(state);
size_t len = data.length();
if (size > 2) {
uint64_t maximalAppEmit = size - 2;
if (len > maximalAppEmit) {
emitSoon = data.substr(0, maximalAppEmit);
// Validate terminator bytes being consumed
size_t terminatorBytesConsumed = len - maximalAppEmit;
if (terminatorBytesConsumed >= 1 && data[maximalAppEmit] != '\r') {
state = STATE_IS_ERROR;
return std::nullopt;
}
if (terminatorBytesConsumed >= 2 && data[maximalAppEmit + 1] != '\n') {
state = STATE_IS_ERROR;
return std::nullopt;
}
} else {
//cb(data);
emitSoon = data;
}
} else if (size == 2) {
// Only terminator bytes remain, validate what we have
if (len >= 1 && data[0] != '\r') {
state = STATE_IS_ERROR;
return std::nullopt;
}
if (len >= 2 && data[1] != '\n') {
state = STATE_IS_ERROR;
return std::nullopt;
}
} else if (size == 1) {
// Only \n remains
if (data[0] != '\n') {
state = STATE_IS_ERROR;
return std::nullopt;
}
}
decChunkSize(state, (unsigned int) data.length());
decChunkSize(state, (unsigned int) len);
state |= STATE_IS_CHUNKED;
// new: decrease data by its size (bug)
data.remove_prefix(data.length()); // ny bug fix för getNextChunk
data.remove_prefix(len);
if (emitSoon.length()) {
return emitSoon;
} else {

View File

@@ -39,6 +39,7 @@ add_compile_definitions(
CONFIG_TCC_PREDEFS
ONE_SOURCE=0
TCC_LIBTCC1="\\0"
CONFIG_TCC_BACKTRACE=0
)
if(APPLE)

View File

@@ -1,5 +1,5 @@
#!/bin/sh
# Version: 26
# Version: 27
# A script that installs the dependencies needed to build and test Bun.
# This should work on macOS and Linux with a POSIX shell.
@@ -1061,6 +1061,11 @@ install_build_essentials() {
go \
xz
install_packages apache2-utils
# QEMU user-mode for baseline CPU verification in CI
case "$arch" in
x64) install_packages qemu-x86_64 ;;
aarch64) install_packages qemu-aarch64 ;;
esac
;;
esac

View File

@@ -23,7 +23,10 @@ const OS_NAME = platform().toLowerCase();
const ARCH_NAME_RAW = arch();
const IS_MAC = OS_NAME === "darwin";
const IS_LINUX = OS_NAME === "linux";
const IS_ARM64 = ARCH_NAME_RAW === "arm64" || ARCH_NAME_RAW === "aarch64";
const IS_WINDOWS = OS_NAME === "win32";
// On Windows, use PROCESSOR_ARCHITECTURE env var to get native arch (Bun may run under x64 emulation)
const NATIVE_ARCH = IS_WINDOWS ? (process.env.PROCESSOR_ARCHITECTURE || ARCH_NAME_RAW).toUpperCase() : ARCH_NAME_RAW;
const IS_ARM64 = NATIVE_ARCH === "ARM64" || NATIVE_ARCH === "AARCH64" || ARCH_NAME_RAW === "arm64";
// Paths
const ROOT_DIR = resolve(import.meta.dir, "..");
@@ -33,22 +36,54 @@ const WEBKIT_RELEASE_DIR = join(WEBKIT_BUILD_DIR, "Release");
const WEBKIT_DEBUG_DIR = join(WEBKIT_BUILD_DIR, "Debug");
const WEBKIT_RELEASE_DIR_LTO = join(WEBKIT_BUILD_DIR, "ReleaseLTO");
// Windows ICU paths - use vcpkg static build
// Auto-detect triplet: prefer arm64 if it exists, otherwise x64
const VCPKG_ARM64_PATH = join(WEBKIT_DIR, "vcpkg_installed", "arm64-windows-static");
const VCPKG_X64_PATH = join(WEBKIT_DIR, "vcpkg_installed", "x64-windows-static");
const VCPKG_ROOT = existsSync(VCPKG_ARM64_PATH) ? VCPKG_ARM64_PATH : VCPKG_X64_PATH;
const ICU_INCLUDE_DIR = join(VCPKG_ROOT, "include");
// Get ICU library paths based on build config (debug uses 'd' suffix libraries)
function getICULibraryPaths(config: BuildConfig) {
const isDebug = config === "debug";
// vcpkg static ICU libraries: release in lib/, debug in debug/lib/ with 'd' suffix
const libDir = isDebug ? join(VCPKG_ROOT, "debug", "lib") : join(VCPKG_ROOT, "lib");
const suffix = isDebug ? "d" : "";
return {
ICU_LIBRARY: libDir,
ICU_DATA_LIBRARY: join(libDir, `sicudt${suffix}.lib`),
ICU_I18N_LIBRARY: join(libDir, `sicuin${suffix}.lib`),
ICU_UC_LIBRARY: join(libDir, `sicuuc${suffix}.lib`),
};
}
// Homebrew prefix detection
const HOMEBREW_PREFIX = IS_ARM64 ? "/opt/homebrew/" : "/usr/local/";
// Compiler detection
function findExecutable(names: string[]): string | null {
for (const name of names) {
const result = spawnSync("which", [name], { encoding: "utf8" });
if (result.status === 0) {
return result.stdout.trim();
}
const path = Bun.which(name);
if (path) return path;
}
return null;
}
const CC = findExecutable(["clang-19", "clang"]) || "clang";
const CXX = findExecutable(["clang++-19", "clang++"]) || "clang++";
// Detect ccache
const CCACHE = findExecutable(["ccache"]);
const HAS_CCACHE = CCACHE !== null;
// Configure compilers with ccache if available
// On Windows, use clang-cl for MSVC compatibility
const CC_BASE = IS_WINDOWS
? findExecutable(["clang-cl.exe", "clang-cl"]) || "clang-cl"
: findExecutable(["clang-19", "clang"]) || "clang";
const CXX_BASE = IS_WINDOWS
? findExecutable(["clang-cl.exe", "clang-cl"]) || "clang-cl"
: findExecutable(["clang++-19", "clang++"]) || "clang++";
const CC = HAS_CCACHE ? CCACHE : CC_BASE;
const CXX = HAS_CCACHE ? CCACHE : CXX_BASE;
// Build directory based on config
const getBuildDir = (config: BuildConfig) => {
@@ -63,7 +98,7 @@ const getBuildDir = (config: BuildConfig) => {
};
// Common CMake flags
const getCommonFlags = () => {
const getCommonFlags = (config: BuildConfig) => {
const flags = [
"-DPORT=JSCOnly",
"-DENABLE_STATIC_JSC=ON",
@@ -74,16 +109,27 @@ const getCommonFlags = () => {
"-DENABLE_FTL_JIT=ON",
"-G",
"Ninja",
`-DCMAKE_C_COMPILER=${CC}`,
`-DCMAKE_CXX_COMPILER=${CXX}`,
];
// Configure compiler with ccache if available
if (HAS_CCACHE) {
flags.push(
`-DCMAKE_C_COMPILER_LAUNCHER=${CCACHE}`,
`-DCMAKE_CXX_COMPILER_LAUNCHER=${CCACHE}`,
`-DCMAKE_C_COMPILER=${CC_BASE}`,
`-DCMAKE_CXX_COMPILER=${CXX_BASE}`,
);
} else {
flags.push(`-DCMAKE_C_COMPILER=${CC}`, `-DCMAKE_CXX_COMPILER=${CXX}`);
}
if (IS_MAC) {
flags.push(
"-DENABLE_SINGLE_THREADED_VM_ENTRY_SCOPE=ON",
"-DBUN_FAST_TLS=ON",
"-DPTHREAD_JIT_PERMISSIONS_API=1",
"-DUSE_PTHREAD_JIT_PERMISSIONS_API=ON",
"-DENABLE_REMOTE_INSPECTOR=ON",
);
} else if (IS_LINUX) {
flags.push(
@@ -91,6 +137,27 @@ const getCommonFlags = () => {
"-DUSE_VISIBILITY_ATTRIBUTE=1",
"-DENABLE_REMOTE_INSPECTOR=ON",
);
} else if (IS_WINDOWS) {
// Find lld-link for Windows builds
const lldLink = findExecutable(["lld-link.exe", "lld-link"]) || "lld-link";
// Get ICU library paths for this build config (debug uses 'd' suffix libraries)
const icuPaths = getICULibraryPaths(config);
flags.push(
"-DENABLE_REMOTE_INSPECTOR=ON",
"-DUSE_VISIBILITY_ATTRIBUTE=1",
"-DUSE_SYSTEM_MALLOC=ON",
`-DCMAKE_LINKER=${lldLink}`,
`-DICU_ROOT=${VCPKG_ROOT}`,
`-DICU_LIBRARY=${icuPaths.ICU_LIBRARY}`,
`-DICU_INCLUDE_DIR=${ICU_INCLUDE_DIR}`,
// Explicitly set ICU library paths to use vcpkg static libs (debug has 'd' suffix)
`-DICU_DATA_LIBRARY_RELEASE=${icuPaths.ICU_DATA_LIBRARY}`,
`-DICU_I18N_LIBRARY_RELEASE=${icuPaths.ICU_I18N_LIBRARY}`,
`-DICU_UC_LIBRARY_RELEASE=${icuPaths.ICU_UC_LIBRARY}`,
"-DCMAKE_C_FLAGS=/DU_STATIC_IMPLEMENTATION",
"-DCMAKE_CXX_FLAGS=/DU_STATIC_IMPLEMENTATION /clang:-fno-c++-static-destructors",
);
}
return flags;
@@ -98,7 +165,7 @@ const getCommonFlags = () => {
// Build-specific CMake flags
const getBuildFlags = (config: BuildConfig) => {
const flags = [...getCommonFlags()];
const flags = [...getCommonFlags(config)];
switch (config) {
case "debug":
@@ -106,24 +173,40 @@ const getBuildFlags = (config: BuildConfig) => {
"-DCMAKE_BUILD_TYPE=Debug",
"-DENABLE_BUN_SKIP_FAILING_ASSERTIONS=ON",
"-DCMAKE_EXPORT_COMPILE_COMMANDS=ON",
"-DENABLE_REMOTE_INSPECTOR=ON",
"-DUSE_VISIBILITY_ATTRIBUTE=1",
);
if (IS_MAC) {
// Enable address sanitizer by default on Mac debug builds
if (IS_MAC || IS_LINUX) {
// Enable address sanitizer by default on Mac/Linux debug builds
flags.push("-DENABLE_SANITIZERS=address");
// To disable asan, comment the line above and uncomment:
// flags.push("-DENABLE_MALLOC_HEAP_BREAKDOWN=ON");
}
if (IS_WINDOWS) {
flags.push("-DCMAKE_MSVC_RUNTIME_LIBRARY=MultiThreadedDebug");
}
break;
case "lto":
flags.push("-DCMAKE_BUILD_TYPE=Release", "-DCMAKE_C_FLAGS=-flto=full", "-DCMAKE_CXX_FLAGS=-flto=full");
flags.push("-DCMAKE_BUILD_TYPE=Release");
if (IS_WINDOWS) {
// On Windows, append LTO flags to existing Windows-specific flags
flags.push(
"-DCMAKE_C_FLAGS=/DU_STATIC_IMPLEMENTATION -flto=full",
"-DCMAKE_CXX_FLAGS=/DU_STATIC_IMPLEMENTATION /clang:-fno-c++-static-destructors -flto=full",
"-DCMAKE_MSVC_RUNTIME_LIBRARY=MultiThreaded",
);
} else {
flags.push("-DCMAKE_C_FLAGS=-flto=full", "-DCMAKE_CXX_FLAGS=-flto=full");
}
break;
default: // release
flags.push("-DCMAKE_BUILD_TYPE=RelWithDebInfo");
if (IS_WINDOWS) {
flags.push("-DCMAKE_MSVC_RUNTIME_LIBRARY=MultiThreaded");
}
break;
}
@@ -134,17 +217,6 @@ const getBuildFlags = (config: BuildConfig) => {
const getBuildEnv = () => {
const env = { ...process.env };
const cflags = ["-ffat-lto-objects"];
const cxxflags = ["-ffat-lto-objects"];
if (IS_LINUX && buildConfig !== "lto") {
cflags.push("-Wl,--whole-archive");
cxxflags.push("-Wl,--whole-archive", "-DUSE_BUN_JSC_ADDITIONS=ON", "-DUSE_BUN_EVENT_LOOP=ON");
}
env.CFLAGS = (env.CFLAGS || "") + " " + cflags.join(" ");
env.CXXFLAGS = (env.CXXFLAGS || "") + " " + cxxflags.join(" ");
if (IS_MAC) {
env.ICU_INCLUDE_DIRS = `${HOMEBREW_PREFIX}opt/icu4c/include`;
}
@@ -179,6 +251,9 @@ function buildJSC() {
console.log(`Building JSC with configuration: ${buildConfig}`);
console.log(`Build directory: ${buildDir}`);
if (HAS_CCACHE) {
console.log(`Using ccache for faster builds: ${CCACHE}`);
}
// Create build directories
if (!existsSync(buildDir)) {

View File

@@ -14,6 +14,15 @@ import {
startGroup,
} from "./utils.mjs";
// Detect Windows ARM64 - bun may run under x64 emulation (WoW64), so check multiple indicators
const isWindowsARM64 =
isWindows &&
(process.env.PROCESSOR_ARCHITECTURE === "ARM64" ||
process.env.VSCMD_ARG_HOST_ARCH === "arm64" ||
process.env.MSYSTEM_CARCH === "aarch64" ||
(process.env.PROCESSOR_IDENTIFIER || "").includes("ARMv8") ||
process.arch === "arm64");
if (globalThis.Bun) {
await import("./glob-sources.mjs");
}
@@ -83,6 +92,23 @@ async function build(args) {
generateOptions["--toolchain"] = toolchainPath;
}
// Windows ARM64: automatically set required options
if (isWindowsARM64) {
// Use clang-cl instead of MSVC cl.exe for proper ARM64 flag support
if (!generateOptions["-DCMAKE_C_COMPILER"]) {
generateOptions["-DCMAKE_C_COMPILER"] = "clang-cl";
}
if (!generateOptions["-DCMAKE_CXX_COMPILER"]) {
generateOptions["-DCMAKE_CXX_COMPILER"] = "clang-cl";
}
// Skip codegen by default since x64 bun crashes under WoW64 emulation
// Can be overridden with -DSKIP_CODEGEN=OFF once ARM64 bun is available
if (!generateOptions["-DSKIP_CODEGEN"]) {
generateOptions["-DSKIP_CODEGEN"] = "ON";
}
console.log("Windows ARM64 detected: using clang-cl and SKIP_CODEGEN=ON");
}
const generateArgs = Object.entries(generateOptions).flatMap(([flag, value]) =>
flag.startsWith("-D") ? [`${flag}=${value}`] : [flag, value],
);

82
scripts/update-uucode.sh Executable file
View File

@@ -0,0 +1,82 @@
#!/bin/bash
# Updates the vendored uucode library and regenerates grapheme tables.
#
# Usage:
# ./scripts/update-uucode.sh # update from default URL
# ./scripts/update-uucode.sh /path/to/uucode # update from local directory
# ./scripts/update-uucode.sh https://url.tar.gz # update from URL
#
# After running, verify with:
# bun bd test test/js/bun/util/stringWidth.test.ts
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
BUN_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
UUCODE_DIR="$BUN_ROOT/src/deps/uucode"
ZIG="$BUN_ROOT/vendor/zig/zig"
if [ ! -x "$ZIG" ]; then
echo "error: zig not found at $ZIG"
echo " run scripts/bootstrap.sh first"
exit 1
fi
update_from_dir() {
local src="$1"
echo "Updating uucode from: $src"
rm -rf "$UUCODE_DIR"
mkdir -p "$UUCODE_DIR"
cp -r "$src"/* "$UUCODE_DIR/"
}
update_from_url() {
local url="$1"
local tmp
tmp=$(mktemp -d)
trap "rm -rf $tmp" EXIT
echo "Downloading uucode from: $url"
curl -fsSL "$url" | tar -xz -C "$tmp" --strip-components=1
update_from_dir "$tmp"
}
# Handle source argument
if [ $# -ge 1 ]; then
SOURCE="$1"
if [ -d "$SOURCE" ]; then
update_from_dir "$SOURCE"
elif [[ "$SOURCE" == http* ]]; then
update_from_url "$SOURCE"
else
echo "error: argument must be a directory or URL"
exit 1
fi
else
# Default: use the zig global cache if available
CACHED=$(find "$HOME/.cache/zig/p" -maxdepth 1 -name "uucode-*" -type d 2>/dev/null | sort -V | tail -1)
if [ -n "$CACHED" ]; then
update_from_dir "$CACHED"
else
echo "error: no uucode source specified and none found in zig cache"
echo ""
echo "usage: $0 <path-to-uucode-dir-or-url>"
exit 1
fi
fi
echo ""
echo "Regenerating grapheme tables..."
cd "$BUN_ROOT"
"$ZIG" build generate-grapheme-tables
echo ""
echo "Done. Updated files:"
echo " src/deps/uucode/ (vendored library)"
echo " src/string/immutable/grapheme_tables.zig (regenerated)"
echo ""
echo "Next steps:"
echo " 1. bun bd test test/js/bun/util/stringWidth.test.ts"
echo " 2. git add src/deps/uucode src/string/immutable/grapheme_tables.zig"
echo " 3. git commit -m 'Update uucode to <version>'"

100
scripts/verify-baseline-cpu.sh Executable file
View File

@@ -0,0 +1,100 @@
#!/usr/bin/env bash
set -euo pipefail
# Verify that a Bun binary doesn't use CPU instructions beyond its baseline target.
# Uses QEMU user-mode emulation with restricted CPU features.
# Any illegal instruction (SIGILL) causes exit code 132 and fails the build.
#
# QEMU must be pre-installed in the CI image (see .buildkite/Dockerfile and
# scripts/bootstrap.sh).
ARCH=""
BINARY=""
while [[ $# -gt 0 ]]; do
case $1 in
--arch) ARCH="$2"; shift 2 ;;
--binary) BINARY="$2"; shift 2 ;;
*) echo "Unknown arg: $1"; exit 1 ;;
esac
done
if [ -z "$ARCH" ] || [ -z "$BINARY" ]; then
echo "Usage: $0 --arch <x64|aarch64> --binary <path>"
exit 1
fi
if [ ! -f "$BINARY" ]; then
echo "ERROR: Binary not found: $BINARY"
exit 1
fi
# Select QEMU binary and CPU model
HOST_ARCH=$(uname -m)
if [ "$ARCH" = "x64" ]; then
QEMU_BIN="qemu-x86_64"
if [ -f "/usr/bin/qemu-x86_64-static" ]; then
QEMU_BIN="qemu-x86_64-static"
fi
QEMU_CPU="Nehalem"
CPU_DESC="Nehalem (SSE4.2, no AVX/AVX2/AVX512)"
elif [ "$ARCH" = "aarch64" ]; then
QEMU_BIN="qemu-aarch64"
if [ -f "/usr/bin/qemu-aarch64-static" ]; then
QEMU_BIN="qemu-aarch64-static"
fi
# cortex-a53 is ARMv8.0-A (no LSE atomics, no SVE). It's the most widely
# supported ARMv8.0 model across QEMU versions.
QEMU_CPU="cortex-a53"
CPU_DESC="Cortex-A53 (ARMv8.0-A+CRC, no LSE/SVE)"
else
echo "ERROR: Unknown arch: $ARCH"
exit 1
fi
if ! command -v "$QEMU_BIN" &>/dev/null; then
echo "ERROR: $QEMU_BIN not found. It must be pre-installed in the CI image."
exit 1
fi
BINARY_NAME=$(basename "$BINARY")
echo "--- Verifying $BINARY_NAME on $CPU_DESC"
echo " Binary: $BINARY"
echo " QEMU: $QEMU_BIN -cpu $QEMU_CPU"
echo " Host: $HOST_ARCH"
echo ""
run_test() {
local label="$1"
shift
echo "+++ $BINARY_NAME: $label"
if "$QEMU_BIN" -cpu "$QEMU_CPU" "$@"; then
echo " PASS"
return 0
else
local exit_code=$?
echo ""
if [ $exit_code -eq 132 ]; then
echo " FAIL: Illegal instruction (SIGILL)"
echo ""
echo " The $BINARY_NAME binary uses CPU instructions not available on $QEMU_CPU."
if [ "$ARCH" = "x64" ]; then
echo " The baseline x64 build targets Nehalem (SSE4.2)."
echo " AVX, AVX2, and AVX512 instructions are not allowed."
else
echo " The aarch64 build targets Cortex-A53 (ARMv8.0-A+CRC)."
echo " LSE atomics, SVE, and dotprod instructions are not allowed."
fi
else
echo " FAIL: exit code $exit_code"
fi
exit $exit_code
fi
}
run_test "bun --version" "$BINARY" --version
run_test "bun -e eval" "$BINARY" -e "console.log(JSON.stringify({ok:1+1}))"
echo ""
echo " All checks passed for $BINARY_NAME on $QEMU_CPU."

148
scripts/verify-jit-stress-qemu.sh Executable file
View File

@@ -0,0 +1,148 @@
#!/usr/bin/env bash
set -euo pipefail
# Run JSC JIT stress tests under QEMU to verify that JIT-compiled code
# doesn't use CPU instructions beyond the baseline target.
#
# This script exercises all JIT tiers (DFG, FTL, Wasm BBQ/OMG) and catches
# cases where JIT-generated code emits AVX instructions on x64 or LSE
# atomics on aarch64.
#
# See: test/js/bun/jsc-stress/ for the test fixtures.
ARCH=""
BINARY=""
while [[ $# -gt 0 ]]; do
case $1 in
--arch) ARCH="$2"; shift 2 ;;
--binary) BINARY="$2"; shift 2 ;;
*) echo "Unknown arg: $1"; exit 1 ;;
esac
done
if [ -z "$ARCH" ] || [ -z "$BINARY" ]; then
echo "Usage: $0 --arch <x64|aarch64> --binary <path>"
exit 1
fi
if [ ! -f "$BINARY" ]; then
echo "ERROR: Binary not found: $BINARY"
exit 1
fi
# Convert to absolute path for use after pushd
BINARY="$(cd "$(dirname "$BINARY")" && pwd)/$(basename "$BINARY")"
# Select QEMU binary and CPU model
if [ "$ARCH" = "x64" ]; then
QEMU_BIN="qemu-x86_64"
if [ -f "/usr/bin/qemu-x86_64-static" ]; then
QEMU_BIN="qemu-x86_64-static"
fi
QEMU_CPU="Nehalem"
CPU_DESC="Nehalem (SSE4.2, no AVX/AVX2/AVX512)"
elif [ "$ARCH" = "aarch64" ]; then
QEMU_BIN="qemu-aarch64"
if [ -f "/usr/bin/qemu-aarch64-static" ]; then
QEMU_BIN="qemu-aarch64-static"
fi
QEMU_CPU="cortex-a53"
CPU_DESC="Cortex-A53 (ARMv8.0-A+CRC, no LSE/SVE)"
else
echo "ERROR: Unknown arch: $ARCH"
exit 1
fi
if ! command -v "$QEMU_BIN" &>/dev/null; then
echo "ERROR: $QEMU_BIN not found. It must be pre-installed in the CI image."
exit 1
fi
BINARY_NAME=$(basename "$BINARY")
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
FIXTURES_DIR="$REPO_ROOT/test/js/bun/jsc-stress/fixtures"
WASM_FIXTURES_DIR="$FIXTURES_DIR/wasm"
PRELOAD_PATH="$REPO_ROOT/test/js/bun/jsc-stress/preload.js"
echo "--- Running JSC JIT stress tests on $CPU_DESC"
echo " Binary: $BINARY"
echo " QEMU: $QEMU_BIN -cpu $QEMU_CPU"
echo ""
SIGILL_FAILURES=0
OTHER_FAILURES=0
PASSED=0
run_fixture() {
local fixture="$1"
local fixture_name
fixture_name=$(basename "$fixture")
echo "+++ $fixture_name"
if "$QEMU_BIN" -cpu "$QEMU_CPU" "$BINARY" --preload "$PRELOAD_PATH" "$fixture" 2>&1; then
echo " PASS"
((PASSED++))
return 0
else
local exit_code=$?
if [ $exit_code -eq 132 ]; then
echo " FAIL: Illegal instruction (SIGILL)"
echo ""
echo " JIT-compiled code in $fixture_name uses CPU instructions not available on $QEMU_CPU."
if [ "$ARCH" = "x64" ]; then
echo " The baseline x64 build targets Nehalem (SSE4.2)."
echo " JIT must not emit AVX, AVX2, or AVX512 instructions."
else
echo " The aarch64 build targets Cortex-A53 (ARMv8.0-A+CRC)."
echo " JIT must not emit LSE atomics, SVE, or dotprod instructions."
fi
((SIGILL_FAILURES++))
else
# Non-SIGILL failures are warnings (test issues, not CPU instruction issues)
echo " WARN: exit code $exit_code (not a CPU instruction issue)"
((OTHER_FAILURES++))
fi
return $exit_code
fi
}
# Run JS fixtures (DFG/FTL)
echo "--- JS fixtures (DFG/FTL)"
for fixture in "$FIXTURES_DIR"/*.js; do
if [ -f "$fixture" ]; then
run_fixture "$fixture" || true
fi
done
# Run Wasm fixtures (BBQ/OMG)
echo "--- Wasm fixtures (BBQ/OMG)"
for fixture in "$WASM_FIXTURES_DIR"/*.js; do
if [ -f "$fixture" ]; then
# Wasm tests need to run from the wasm fixtures directory
# because they reference .wasm files relative to the script
pushd "$WASM_FIXTURES_DIR" > /dev/null
run_fixture "$fixture" || true
popd > /dev/null
fi
done
echo ""
echo "--- Summary"
echo " Passed: $PASSED"
echo " SIGILL failures: $SIGILL_FAILURES"
echo " Other failures: $OTHER_FAILURES (warnings, not CPU instruction issues)"
echo ""
if [ $SIGILL_FAILURES -gt 0 ]; then
echo " FAILED: JIT-generated code uses unsupported CPU instructions."
exit 1
fi
if [ $OTHER_FAILURES -gt 0 ]; then
echo " Some tests failed for reasons unrelated to CPU instructions."
echo " These are warnings and do not indicate JIT instruction issues."
fi
echo " All JIT stress tests passed on $QEMU_CPU (no SIGILL)."

View File

@@ -3,6 +3,10 @@
$ErrorActionPreference = "Stop"
# Detect system architecture
$script:IsARM64 = [System.Runtime.InteropServices.RuntimeInformation]::OSArchitecture -eq [System.Runtime.InteropServices.Architecture]::Arm64
$script:VsArch = if ($script:IsARM64) { "arm64" } else { "amd64" }
if($env:VSINSTALLDIR -eq $null) {
Write-Host "Loading Visual Studio environment, this may take a second..."
@@ -23,14 +27,14 @@ if($env:VSINSTALLDIR -eq $null) {
Push-Location $vsDir
try {
$vsShell = (Join-Path -Path $vsDir -ChildPath "Common7\Tools\Launch-VsDevShell.ps1")
. $vsShell -Arch amd64 -HostArch amd64
. $vsShell -Arch $script:VsArch -HostArch $script:VsArch
} finally {
Pop-Location
}
}
if($env:VSCMD_ARG_TGT_ARCH -eq "x86") {
throw "Visual Studio environment is targeting 32 bit, but only 64 bit is supported."
throw "Visual Studio environment is targeting 32 bit x86, but only 64-bit architectures (x64/arm64) are supported."
}
if ($args.Count -gt 0) {

View File

@@ -15,6 +15,7 @@ hash: u64 = 0,
is_executable: bool = false,
source_map_index: u32 = std.math.maxInt(u32),
bytecode_index: u32 = std.math.maxInt(u32),
module_info_index: u32 = std.math.maxInt(u32),
output_kind: jsc.API.BuildArtifact.OutputKind,
/// Relative
dest_path: []const u8 = "",
@@ -210,6 +211,7 @@ pub const Options = struct {
hash: ?u64 = null,
source_map_index: ?u32 = null,
bytecode_index: ?u32 = null,
module_info_index: ?u32 = null,
output_path: string,
source_index: Index.Optional = .none,
size: ?usize = null,
@@ -251,6 +253,7 @@ pub fn init(options: Options) OutputFile {
.hash = options.hash orelse 0,
.output_kind = options.output_kind,
.bytecode_index = options.bytecode_index orelse std.math.maxInt(u32),
.module_info_index = options.module_info_index orelse std.math.maxInt(u32),
.source_map_index = options.source_map_index orelse std.math.maxInt(u32),
.is_executable = options.is_executable,
.value = switch (options.data) {

View File

@@ -92,6 +92,10 @@ pub const StandaloneModuleGraph = struct {
contents: Schema.StringPointer = .{},
sourcemap: Schema.StringPointer = .{},
bytecode: Schema.StringPointer = .{},
module_info: Schema.StringPointer = .{},
/// The file path used when generating bytecode (e.g., "B:/~BUN/root/app.js").
/// Must match exactly at runtime for bytecode cache hits.
bytecode_origin_path: Schema.StringPointer = .{},
encoding: Encoding = .latin1,
loader: bun.options.Loader = .file,
module_format: ModuleFormat = .none,
@@ -159,6 +163,10 @@ pub const StandaloneModuleGraph = struct {
encoding: Encoding = .binary,
wtf_string: bun.String = bun.String.empty,
bytecode: []u8 = "",
module_info: []u8 = "",
/// The file path used when generating bytecode (e.g., "B:/~BUN/root/app.js").
/// Must match exactly at runtime for bytecode cache hits.
bytecode_origin_path: []const u8 = "",
module_format: ModuleFormat = .none,
side: FileSide = .server,
@@ -333,6 +341,8 @@ pub const StandaloneModuleGraph = struct {
else
.none,
.bytecode = if (module.bytecode.length > 0) @constCast(sliceTo(raw_bytes, module.bytecode)) else &.{},
.module_info = if (module.module_info.length > 0) @constCast(sliceTo(raw_bytes, module.module_info)) else &.{},
.bytecode_origin_path = if (module.bytecode_origin_path.length > 0) sliceToZ(raw_bytes, module.bytecode_origin_path) else "",
.module_format = module.module_format,
.side = module.side,
},
@@ -382,6 +392,8 @@ pub const StandaloneModuleGraph = struct {
} else if (output_file.output_kind == .bytecode) {
// Allocate up to 256 byte alignment for bytecode
string_builder.cap += (output_file.value.buffer.bytes.len + 255) / 256 * 256 + 256;
} else if (output_file.output_kind == .module_info) {
string_builder.cap += output_file.value.buffer.bytes.len;
} else {
if (entry_point_id == null) {
if (output_file.side == null or output_file.side.? == .server) {
@@ -429,21 +441,67 @@ pub const StandaloneModuleGraph = struct {
const bytecode: StringPointer = brk: {
if (output_file.bytecode_index != std.math.maxInt(u32)) {
// Use up to 256 byte alignment for bytecode
// Not aligning it correctly will cause a runtime assertion error, or a segfault.
// Bytecode alignment for JSC bytecode cache deserialization.
// Not aligning correctly causes a runtime assertion error or segfault.
//
// PLATFORM-SPECIFIC ALIGNMENT:
// - PE (Windows) and Mach-O (macOS): The module graph data is embedded in
// a dedicated section with an 8-byte size header. At runtime, the section
// is memory-mapped at a page-aligned address (hence 128-byte aligned).
// The data buffer starts 8 bytes after the section start.
// For bytecode at offset O to be 128-byte aligned:
// (section_va + 8 + O) % 128 == 0
// => O % 128 == 120
//
// - ELF (Linux): The module graph data is appended to the executable and
// read into a heap-allocated buffer at runtime. The allocator provides
// natural alignment, and there's no 8-byte section header offset.
// However, using target_mod=120 is still safe because:
// - If the buffer is 128-aligned: bytecode at offset 120 is at (128n + 120),
// which when loaded at a 128-aligned address gives proper alignment.
// - The extra 120 bytes of padding is acceptable overhead.
//
// This alignment strategy (target_mod=120) works for all platforms because
// it's the worst-case offset needed for the 8-byte header scenario.
const bytecode = output_files[output_file.bytecode_index].value.buffer.bytes;
const aligned = std.mem.alignInSlice(string_builder.writable(), 128).?;
@memcpy(aligned[0..bytecode.len], bytecode[0..bytecode.len]);
const unaligned_space = aligned[bytecode.len..];
const offset = @intFromPtr(aligned.ptr) - @intFromPtr(string_builder.ptr.?);
const current_offset = string_builder.len;
// Calculate padding so that (current_offset + padding) % 128 == 120
// This accounts for the 8-byte section header on PE/Mach-O platforms.
const target_mod: usize = 128 - @sizeOf(u64); // 120 = accounts for 8-byte header
const current_mod = current_offset % 128;
const padding = if (current_mod <= target_mod)
target_mod - current_mod
else
128 - current_mod + target_mod;
// Zero the padding bytes to ensure deterministic output
const writable = string_builder.writable();
@memset(writable[0..padding], 0);
string_builder.len += padding;
const aligned_offset = string_builder.len;
const writable_after_padding = string_builder.writable();
@memcpy(writable_after_padding[0..bytecode.len], bytecode[0..bytecode.len]);
const unaligned_space = writable_after_padding[bytecode.len..];
const len = bytecode.len + @min(unaligned_space.len, 128);
string_builder.len += len;
break :brk StringPointer{ .offset = @truncate(offset), .length = @truncate(len) };
break :brk StringPointer{ .offset = @truncate(aligned_offset), .length = @truncate(len) };
} else {
break :brk .{};
}
};
// Embed module_info for ESM bytecode
const module_info: StringPointer = brk: {
if (output_file.module_info_index != std.math.maxInt(u32)) {
const mi_bytes = output_files[output_file.module_info_index].value.buffer.bytes;
const offset = string_builder.len;
const writable = string_builder.writable();
@memcpy(writable[0..mi_bytes.len], mi_bytes[0..mi_bytes.len]);
string_builder.len += mi_bytes.len;
break :brk StringPointer{ .offset = @truncate(offset), .length = @truncate(mi_bytes.len) };
}
break :brk .{};
};
if (comptime bun.Environment.is_canary or bun.Environment.isDebug) {
if (bun.env_var.BUN_FEATURE_FLAG_DUMP_CODE.get()) |dump_code_dir| {
const buf = bun.path_buffer_pool.get();
@@ -465,6 +523,13 @@ pub const StandaloneModuleGraph = struct {
}
}
// When there's bytecode, store the bytecode output file's path as bytecode_origin_path.
// This path was used to generate the bytecode cache and must match at runtime.
const bytecode_origin_path: StringPointer = if (output_file.bytecode_index != std.math.maxInt(u32))
string_builder.appendCountZ(output_files[output_file.bytecode_index].dest_path)
else
.{};
var module = CompiledModuleGraphFile{
.name = string_builder.fmtAppendCountZ("{s}{s}", .{
prefix,
@@ -482,6 +547,8 @@ pub const StandaloneModuleGraph = struct {
else => .none,
} else .none,
.bytecode = bytecode,
.module_info = module_info,
.bytecode_origin_path = bytecode_origin_path,
.side = switch (output_file.side orelse .server) {
.server => .server,
.client => .client,

View File

@@ -11,6 +11,7 @@ pub const AllocationScopeIn = allocation_scope.AllocationScopeIn;
pub const NullableAllocator = @import("./allocators/NullableAllocator.zig");
pub const MaxHeapAllocator = @import("./allocators/MaxHeapAllocator.zig");
pub const LinuxMemFdAllocator = @import("./allocators/LinuxMemFdAllocator.zig");
pub const BufferFallbackAllocator = @import("./allocators/BufferFallbackAllocator.zig");
pub const MaybeOwned = @import("./allocators/maybe_owned.zig").MaybeOwned;
pub fn isSliceInBufferT(comptime T: type, slice: []const T, buffer: []const T) bool {

View File

@@ -0,0 +1,85 @@
/// An allocator that attempts to allocate from a provided buffer first,
/// falling back to another allocator when the buffer is exhausted.
/// Unlike `std.heap.StackFallbackAllocator`, this does not own the buffer.
const BufferFallbackAllocator = @This();
#fallback_allocator: Allocator,
#fixed_buffer_allocator: FixedBufferAllocator,
pub fn init(buffer: []u8, fallback_allocator: Allocator) BufferFallbackAllocator {
return .{
.#fallback_allocator = fallback_allocator,
.#fixed_buffer_allocator = FixedBufferAllocator.init(buffer),
};
}
pub fn allocator(self: *BufferFallbackAllocator) Allocator {
return .{
.ptr = self,
.vtable = &.{
.alloc = alloc,
.resize = resize,
.remap = remap,
.free = free,
},
};
}
fn alloc(ctx: *anyopaque, len: usize, alignment: std.mem.Alignment, ra: usize) ?[*]u8 {
const self: *BufferFallbackAllocator = @ptrCast(@alignCast(ctx));
return FixedBufferAllocator.alloc(
&self.#fixed_buffer_allocator,
len,
alignment,
ra,
) orelse self.#fallback_allocator.rawAlloc(len, alignment, ra);
}
fn resize(ctx: *anyopaque, buf: []u8, alignment: std.mem.Alignment, new_len: usize, ra: usize) bool {
const self: *BufferFallbackAllocator = @ptrCast(@alignCast(ctx));
if (self.#fixed_buffer_allocator.ownsPtr(buf.ptr)) {
return FixedBufferAllocator.resize(
&self.#fixed_buffer_allocator,
buf,
alignment,
new_len,
ra,
);
}
return self.#fallback_allocator.rawResize(buf, alignment, new_len, ra);
}
fn remap(ctx: *anyopaque, memory: []u8, alignment: std.mem.Alignment, new_len: usize, ra: usize) ?[*]u8 {
const self: *BufferFallbackAllocator = @ptrCast(@alignCast(ctx));
if (self.#fixed_buffer_allocator.ownsPtr(memory.ptr)) {
return FixedBufferAllocator.remap(
&self.#fixed_buffer_allocator,
memory,
alignment,
new_len,
ra,
);
}
return self.#fallback_allocator.rawRemap(memory, alignment, new_len, ra);
}
fn free(ctx: *anyopaque, buf: []u8, alignment: std.mem.Alignment, ra: usize) void {
const self: *BufferFallbackAllocator = @ptrCast(@alignCast(ctx));
if (self.#fixed_buffer_allocator.ownsPtr(buf.ptr)) {
return FixedBufferAllocator.free(
&self.#fixed_buffer_allocator,
buf,
alignment,
ra,
);
}
return self.#fallback_allocator.rawFree(buf, alignment, ra);
}
pub fn reset(self: *BufferFallbackAllocator) void {
self.#fixed_buffer_allocator.reset();
}
const std = @import("std");
const Allocator = std.mem.Allocator;
const FixedBufferAllocator = std.heap.FixedBufferAllocator;

View File

@@ -2,7 +2,10 @@
const Self = @This();
#heap: if (safety_checks) Owned(*DebugHeap) else *mimalloc.Heap,
const safety_checks = bun.Environment.isDebug or bun.Environment.enable_asan;
#heap: *mimalloc.Heap,
thread_id: if (safety_checks) std.Thread.Id else void,
/// Uses the default thread-local heap. This type is zero-sized.
///
@@ -20,18 +23,18 @@ pub const Default = struct {
///
/// This type is a `GenericAllocator`; see `src/allocators.zig`.
pub const Borrowed = struct {
#heap: BorrowedHeap,
#heap: *mimalloc.Heap,
pub fn allocator(self: Borrowed) std.mem.Allocator {
return .{ .ptr = self.#heap, .vtable = &c_allocator_vtable };
return .{ .ptr = self.#heap, .vtable = c_allocator_vtable };
}
pub fn getDefault() Borrowed {
return .{ .#heap = getThreadHeap() };
return .{ .#heap = mimalloc.mi_heap_main() };
}
pub fn gc(self: Borrowed) void {
mimalloc.mi_heap_collect(self.getMimallocHeap(), false);
mimalloc.mi_heap_collect(self.#heap, false);
}
pub fn helpCatchMemoryIssues(self: Borrowed) void {
@@ -41,30 +44,17 @@ pub const Borrowed = struct {
}
}
pub fn ownsPtr(self: Borrowed, ptr: *const anyopaque) bool {
return mimalloc.mi_heap_check_owned(self.getMimallocHeap(), ptr);
}
fn fromOpaque(ptr: *anyopaque) Borrowed {
return .{ .#heap = @ptrCast(@alignCast(ptr)) };
}
fn getMimallocHeap(self: Borrowed) *mimalloc.Heap {
return if (comptime safety_checks) self.#heap.inner else self.#heap;
}
fn assertThreadLock(self: Borrowed) void {
if (comptime safety_checks) self.#heap.thread_lock.assertLocked();
}
fn alignedAlloc(self: Borrowed, len: usize, alignment: Alignment) ?[*]u8 {
log("Malloc: {d}\n", .{len});
const heap = self.getMimallocHeap();
const ptr: ?*anyopaque = if (mimalloc.mustUseAlignedAlloc(alignment))
mimalloc.mi_heap_malloc_aligned(heap, len, alignment.toByteUnits())
mimalloc.mi_heap_malloc_aligned(self.#heap, len, alignment.toByteUnits())
else
mimalloc.mi_heap_malloc(heap, len);
mimalloc.mi_heap_malloc(self.#heap, len);
if (comptime bun.Environment.isDebug) {
const usable = mimalloc.mi_malloc_usable_size(ptr);
@@ -89,42 +79,17 @@ pub const Borrowed = struct {
}
};
const BorrowedHeap = if (safety_checks) *DebugHeap else *mimalloc.Heap;
const DebugHeap = struct {
inner: *mimalloc.Heap,
thread_lock: bun.safety.ThreadLock,
pub const deinit = void;
};
threadlocal var thread_heap: if (safety_checks) ?DebugHeap else void = if (safety_checks) null;
fn getThreadHeap() BorrowedHeap {
if (comptime !safety_checks) return mimalloc.mi_heap_get_default();
if (thread_heap == null) {
thread_heap = .{
.inner = mimalloc.mi_heap_get_default(),
.thread_lock = .initLocked(),
};
}
return &thread_heap.?;
}
const log = bun.Output.scoped(.mimalloc, .hidden);
pub fn allocator(self: Self) std.mem.Allocator {
self.assertThreadOwnership();
return self.borrow().allocator();
}
pub fn borrow(self: Self) Borrowed {
return .{ .#heap = if (comptime safety_checks) self.#heap.get() else self.#heap };
return .{ .#heap = self.#heap };
}
/// Internally, mimalloc calls mi_heap_get_default()
/// to get the default heap.
/// It uses pthread_getspecific to do that.
/// We can save those extra calls if we just do it once in here
pub fn getThreadLocalDefault() std.mem.Allocator {
if (bun.Environment.enable_asan) return bun.default_allocator;
return Borrowed.getDefault().allocator();
@@ -157,22 +122,15 @@ pub fn dumpStats(_: Self) void {
}
pub fn deinit(self: *Self) void {
const mimalloc_heap = self.borrow().getMimallocHeap();
if (comptime safety_checks) {
self.#heap.deinit();
}
mimalloc.mi_heap_destroy(mimalloc_heap);
mimalloc.mi_heap_destroy(self.#heap);
self.* = undefined;
}
pub fn init() Self {
const mimalloc_heap = mimalloc.mi_heap_new() orelse bun.outOfMemory();
if (comptime !safety_checks) return .{ .#heap = mimalloc_heap };
const heap: Owned(*DebugHeap) = .new(.{
.inner = mimalloc_heap,
.thread_lock = .initLocked(),
});
return .{ .#heap = heap };
return .{
.#heap = mimalloc.mi_heap_new() orelse bun.outOfMemory(),
.thread_id = if (safety_checks) std.Thread.getCurrentId() else {},
};
}
pub fn gc(self: Self) void {
@@ -183,8 +141,16 @@ pub fn helpCatchMemoryIssues(self: Self) void {
self.borrow().helpCatchMemoryIssues();
}
pub fn ownsPtr(self: Self, ptr: *const anyopaque) bool {
return self.borrow().ownsPtr(ptr);
fn assertThreadOwnership(self: Self) void {
if (comptime safety_checks) {
const current_thread = std.Thread.getCurrentId();
if (current_thread != self.thread_id) {
std.debug.panic(
"MimallocArena used from wrong thread: arena belongs to thread {d}, but current thread is {d}",
.{ self.thread_id, current_thread },
);
}
}
}
fn alignedAllocSize(ptr: [*]u8) usize {
@@ -193,13 +159,10 @@ fn alignedAllocSize(ptr: [*]u8) usize {
fn vtable_alloc(ptr: *anyopaque, len: usize, alignment: Alignment, _: usize) ?[*]u8 {
const self: Borrowed = .fromOpaque(ptr);
self.assertThreadLock();
return self.alignedAlloc(len, alignment);
}
fn vtable_resize(ptr: *anyopaque, buf: []u8, _: Alignment, new_len: usize, _: usize) bool {
const self: Borrowed = .fromOpaque(ptr);
self.assertThreadLock();
fn vtable_resize(_: *anyopaque, buf: []u8, _: Alignment, new_len: usize, _: usize) bool {
return mimalloc.mi_expand(buf.ptr, new_len) != null;
}
@@ -223,39 +186,17 @@ fn vtable_free(
}
}
/// Attempt to expand or shrink memory, allowing relocation.
///
/// `memory.len` must equal the length requested from the most recent
/// successful call to `alloc`, `resize`, or `remap`. `alignment` must
/// equal the same value that was passed as the `alignment` parameter to
/// the original `alloc` call.
///
/// A non-`null` return value indicates the resize was successful. The
/// allocation may have same address, or may have been relocated. In either
/// case, the allocation now has size of `new_len`. A `null` return value
/// indicates that the resize would be equivalent to allocating new memory,
/// copying the bytes from the old memory, and then freeing the old memory.
/// In such case, it is more efficient for the caller to perform the copy.
///
/// `new_len` must be greater than zero.
///
/// `ret_addr` is optionally provided as the first return address of the
/// allocation call stack. If the value is `0` it means no return address
/// has been provided.
fn vtable_remap(ptr: *anyopaque, buf: []u8, alignment: Alignment, new_len: usize, _: usize) ?[*]u8 {
const self: Borrowed = .fromOpaque(ptr);
self.assertThreadLock();
const heap = self.getMimallocHeap();
const aligned_size = alignment.toByteUnits();
const value = mimalloc.mi_heap_realloc_aligned(heap, buf.ptr, new_len, aligned_size);
const value = mimalloc.mi_heap_realloc_aligned(self.#heap, buf.ptr, new_len, alignment.toByteUnits());
return @ptrCast(value);
}
pub fn isInstance(alloc: std.mem.Allocator) bool {
return alloc.vtable == &c_allocator_vtable;
return alloc.vtable == c_allocator_vtable;
}
const c_allocator_vtable = std.mem.Allocator.VTable{
const c_allocator_vtable = &std.mem.Allocator.VTable{
.alloc = vtable_alloc,
.resize = vtable_resize,
.remap = vtable_remap,
@@ -268,5 +209,3 @@ const Alignment = std.mem.Alignment;
const bun = @import("bun");
const assert = bun.assert;
const mimalloc = bun.mimalloc;
const Owned = bun.ptr.Owned;
const safety_checks = bun.Environment.ci_assert;

View File

@@ -60,17 +60,29 @@ pub const Heap = opaque {
return mi_heap_realloc(self, p, newsize);
}
pub fn isOwned(self: *Heap, p: ?*anyopaque) bool {
return mi_heap_check_owned(self, p);
pub fn isOwned(self: *Heap, p: ?*const anyopaque) bool {
return mi_heap_contains(self, p);
}
};
pub extern fn mi_heap_new() ?*Heap;
pub extern fn mi_heap_delete(heap: *Heap) void;
pub extern fn mi_heap_destroy(heap: *Heap) void;
pub extern fn mi_heap_set_default(heap: *Heap) *Heap;
pub extern fn mi_heap_get_default() *Heap;
pub extern fn mi_heap_get_backing() *Heap;
pub extern fn mi_heap_collect(heap: *Heap, force: bool) void;
pub extern fn mi_heap_main() *Heap;
// Thread-local heap (theap) API - new in mimalloc v3
pub const THeap = opaque {};
pub extern fn mi_theap_get_default() *THeap;
pub extern fn mi_theap_set_default(theap: *THeap) *THeap;
pub extern fn mi_theap_collect(theap: *THeap, force: bool) void;
pub extern fn mi_theap_malloc(theap: *THeap, size: usize) ?*anyopaque;
pub extern fn mi_theap_zalloc(theap: *THeap, size: usize) ?*anyopaque;
pub extern fn mi_theap_calloc(theap: *THeap, count: usize, size: usize) ?*anyopaque;
pub extern fn mi_theap_malloc_small(theap: *THeap, size: usize) ?*anyopaque;
pub extern fn mi_theap_malloc_aligned(theap: *THeap, size: usize, alignment: usize) ?*anyopaque;
pub extern fn mi_theap_realloc(theap: *THeap, p: ?*anyopaque, newsize: usize) ?*anyopaque;
pub extern fn mi_theap_destroy(theap: *THeap) void;
pub extern fn mi_heap_theap(heap: *Heap) *THeap;
pub extern fn mi_heap_malloc(heap: *Heap, size: usize) ?*anyopaque;
pub extern fn mi_heap_zalloc(heap: *Heap, size: usize) ?*anyopaque;
pub extern fn mi_heap_calloc(heap: *Heap, count: usize, size: usize) ?*anyopaque;
@@ -102,8 +114,7 @@ pub extern fn mi_heap_rezalloc_aligned(heap: *Heap, p: ?*anyopaque, newsize: usi
pub extern fn mi_heap_rezalloc_aligned_at(heap: *Heap, p: ?*anyopaque, newsize: usize, alignment: usize, offset: usize) ?*anyopaque;
pub extern fn mi_heap_recalloc_aligned(heap: *Heap, p: ?*anyopaque, newcount: usize, size: usize, alignment: usize) ?*anyopaque;
pub extern fn mi_heap_recalloc_aligned_at(heap: *Heap, p: ?*anyopaque, newcount: usize, size: usize, alignment: usize, offset: usize) ?*anyopaque;
pub extern fn mi_heap_contains_block(heap: *Heap, p: *const anyopaque) bool;
pub extern fn mi_heap_check_owned(heap: *Heap, p: *const anyopaque) bool;
pub extern fn mi_heap_contains(heap: *const Heap, p: ?*const anyopaque) bool;
pub extern fn mi_check_owned(p: ?*const anyopaque) bool;
pub const struct_mi_heap_area_s = extern struct {
blocks: ?*anyopaque,

View File

@@ -0,0 +1,513 @@
pub const RecordKind = enum(u8) {
/// var_name
declared_variable,
/// let_name
lexical_variable,
/// module_name, import_name, local_name
import_info_single,
/// module_name, import_name, local_name
import_info_single_type_script,
/// module_name, import_name = '*', local_name
import_info_namespace,
/// export_name, import_name, module_name
export_info_indirect,
/// export_name, local_name, padding (for local => indirect conversion)
export_info_local,
/// export_name, module_name
export_info_namespace,
/// module_name
export_info_star,
_,
pub fn len(record: RecordKind) !usize {
return switch (record) {
.declared_variable, .lexical_variable => 1,
.import_info_single => 3,
.import_info_single_type_script => 3,
.import_info_namespace => 3,
.export_info_indirect => 3,
.export_info_local => 3,
.export_info_namespace => 2,
.export_info_star => 1,
else => return error.InvalidRecordKind,
};
}
};
pub const Flags = packed struct(u8) {
contains_import_meta: bool = false,
is_typescript: bool = false,
_padding: u6 = 0,
};
pub const ModuleInfoDeserialized = struct {
strings_buf: []const u8,
strings_lens: []align(1) const u32,
requested_modules_keys: []align(1) const StringID,
requested_modules_values: []align(1) const ModuleInfo.FetchParameters,
buffer: []align(1) const StringID,
record_kinds: []align(1) const RecordKind,
flags: Flags,
owner: union(enum) {
module_info,
allocated_slice: struct {
slice: []const u8,
allocator: std.mem.Allocator,
},
},
pub fn deinit(self: *ModuleInfoDeserialized) void {
switch (self.owner) {
.module_info => {
const mi: *ModuleInfo = @fieldParentPtr("_deserialized", self);
mi.destroy();
},
.allocated_slice => |as| {
as.allocator.free(as.slice);
as.allocator.destroy(self);
},
}
}
inline fn eat(rem: *[]const u8, len: usize) ![]const u8 {
if (rem.*.len < len) return error.BadModuleInfo;
const res = rem.*[0..len];
rem.* = rem.*[len..];
return res;
}
inline fn eatC(rem: *[]const u8, comptime len: usize) !*const [len]u8 {
if (rem.*.len < len) return error.BadModuleInfo;
const res = rem.*[0..len];
rem.* = rem.*[len..];
return res;
}
pub fn create(source: []const u8, gpa: std.mem.Allocator) !*ModuleInfoDeserialized {
const duped = try gpa.dupe(u8, source);
errdefer gpa.free(duped);
var rem: []const u8 = duped;
const res = try gpa.create(ModuleInfoDeserialized);
errdefer gpa.destroy(res);
const record_kinds_len = std.mem.readInt(u32, try eatC(&rem, 4), .little);
const record_kinds = std.mem.bytesAsSlice(RecordKind, try eat(&rem, record_kinds_len * @sizeOf(RecordKind)));
_ = try eat(&rem, (4 - (record_kinds_len % 4)) % 4); // alignment padding
const buffer_len = std.mem.readInt(u32, try eatC(&rem, 4), .little);
const buffer = std.mem.bytesAsSlice(StringID, try eat(&rem, buffer_len * @sizeOf(StringID)));
const requested_modules_len = std.mem.readInt(u32, try eatC(&rem, 4), .little);
const requested_modules_keys = std.mem.bytesAsSlice(StringID, try eat(&rem, requested_modules_len * @sizeOf(StringID)));
const requested_modules_values = std.mem.bytesAsSlice(ModuleInfo.FetchParameters, try eat(&rem, requested_modules_len * @sizeOf(ModuleInfo.FetchParameters)));
const flags: Flags = @bitCast((try eatC(&rem, 1))[0]);
_ = try eat(&rem, 3); // alignment padding
const strings_len = std.mem.readInt(u32, try eatC(&rem, 4), .little);
const strings_lens = std.mem.bytesAsSlice(u32, try eat(&rem, strings_len * @sizeOf(u32)));
const strings_buf = rem;
res.* = .{
.strings_buf = strings_buf,
.strings_lens = strings_lens,
.requested_modules_keys = requested_modules_keys,
.requested_modules_values = requested_modules_values,
.buffer = buffer,
.record_kinds = record_kinds,
.flags = flags,
.owner = .{ .allocated_slice = .{
.slice = duped,
.allocator = gpa,
} },
};
return res;
}
/// Wrapper around `create` for use when loading from a cache (transpiler cache or standalone module graph).
/// Returns `null` instead of panicking on corrupt/truncated data.
pub fn createFromCachedRecord(source: []const u8, gpa: std.mem.Allocator) ?*ModuleInfoDeserialized {
return create(source, gpa) catch |e| switch (e) {
error.OutOfMemory => bun.outOfMemory(),
error.BadModuleInfo => null,
};
}
pub fn serialize(self: *const ModuleInfoDeserialized, writer: anytype) !void {
try writer.writeInt(u32, @truncate(self.record_kinds.len), .little);
try writer.writeAll(std.mem.sliceAsBytes(self.record_kinds));
try writer.writeByteNTimes(0, (4 - (self.record_kinds.len % 4)) % 4); // alignment padding
try writer.writeInt(u32, @truncate(self.buffer.len), .little);
try writer.writeAll(std.mem.sliceAsBytes(self.buffer));
try writer.writeInt(u32, @truncate(self.requested_modules_keys.len), .little);
try writer.writeAll(std.mem.sliceAsBytes(self.requested_modules_keys));
try writer.writeAll(std.mem.sliceAsBytes(self.requested_modules_values));
try writer.writeByte(@bitCast(self.flags));
try writer.writeByteNTimes(0, 3); // alignment padding
try writer.writeInt(u32, @truncate(self.strings_lens.len), .little);
try writer.writeAll(std.mem.sliceAsBytes(self.strings_lens));
try writer.writeAll(self.strings_buf);
}
};
const StringMapKey = enum(u32) {
_,
};
pub const StringContext = struct {
strings_buf: []const u8,
strings_lens: []const u32,
pub fn hash(_: @This(), s: []const u8) u32 {
return @as(u32, @truncate(std.hash.Wyhash.hash(0, s)));
}
pub fn eql(self: @This(), fetch_key: []const u8, item_key: StringMapKey, item_i: usize) bool {
return bun.strings.eqlLong(fetch_key, self.strings_buf[@intFromEnum(item_key)..][0..self.strings_lens[item_i]], true);
}
};
pub const ModuleInfo = struct {
/// all strings in wtf-8. index in hashmap = StringID
gpa: std.mem.Allocator,
strings_map: std.ArrayHashMapUnmanaged(StringMapKey, void, void, true),
strings_buf: std.ArrayListUnmanaged(u8),
strings_lens: std.ArrayListUnmanaged(u32),
requested_modules: std.AutoArrayHashMap(StringID, FetchParameters),
buffer: std.ArrayListUnmanaged(StringID),
record_kinds: std.ArrayListUnmanaged(RecordKind),
flags: Flags,
exported_names: std.AutoArrayHashMapUnmanaged(StringID, void),
finalized: bool = false,
/// only initialized after .finalize() is called
_deserialized: ModuleInfoDeserialized,
pub fn asDeserialized(self: *ModuleInfo) *ModuleInfoDeserialized {
bun.assert(self.finalized);
return &self._deserialized;
}
pub const FetchParameters = enum(u32) {
none = std.math.maxInt(u32),
javascript = std.math.maxInt(u32) - 1,
webassembly = std.math.maxInt(u32) - 2,
json = std.math.maxInt(u32) - 3,
_, // host_defined: cast to StringID
pub fn hostDefined(value: StringID) FetchParameters {
return @enumFromInt(@intFromEnum(value));
}
};
pub const VarKind = enum { declared, lexical };
pub fn addVar(self: *ModuleInfo, name: StringID, kind: VarKind) !void {
switch (kind) {
.declared => try self.addDeclaredVariable(name),
.lexical => try self.addLexicalVariable(name),
}
}
fn _addRecord(self: *ModuleInfo, kind: RecordKind, data: []const StringID) !void {
bun.assert(!self.finalized);
bun.assert(data.len == kind.len() catch unreachable);
try self.record_kinds.append(self.gpa, kind);
try self.buffer.appendSlice(self.gpa, data);
}
pub fn addDeclaredVariable(self: *ModuleInfo, id: StringID) !void {
try self._addRecord(.declared_variable, &.{id});
}
pub fn addLexicalVariable(self: *ModuleInfo, id: StringID) !void {
try self._addRecord(.lexical_variable, &.{id});
}
pub fn addImportInfoSingle(self: *ModuleInfo, module_name: StringID, import_name: StringID, local_name: StringID, only_used_as_type: bool) !void {
try self._addRecord(if (only_used_as_type) .import_info_single_type_script else .import_info_single, &.{ module_name, import_name, local_name });
}
pub fn addImportInfoNamespace(self: *ModuleInfo, module_name: StringID, local_name: StringID) !void {
try self._addRecord(.import_info_namespace, &.{ module_name, try self.str("*"), local_name });
}
pub fn addExportInfoIndirect(self: *ModuleInfo, export_name: StringID, import_name: StringID, module_name: StringID) !void {
if (try self._hasOrAddExportedName(export_name)) return; // a syntax error will be emitted later in this case
try self._addRecord(.export_info_indirect, &.{ export_name, import_name, module_name });
}
pub fn addExportInfoLocal(self: *ModuleInfo, export_name: StringID, local_name: StringID) !void {
if (try self._hasOrAddExportedName(export_name)) return; // a syntax error will be emitted later in this case
try self._addRecord(.export_info_local, &.{ export_name, local_name, @enumFromInt(std.math.maxInt(u32)) });
}
pub fn addExportInfoNamespace(self: *ModuleInfo, export_name: StringID, module_name: StringID) !void {
if (try self._hasOrAddExportedName(export_name)) return; // a syntax error will be emitted later in this case
try self._addRecord(.export_info_namespace, &.{ export_name, module_name });
}
pub fn addExportInfoStar(self: *ModuleInfo, module_name: StringID) !void {
try self._addRecord(.export_info_star, &.{module_name});
}
pub fn _hasOrAddExportedName(self: *ModuleInfo, name: StringID) !bool {
if (try self.exported_names.fetchPut(self.gpa, name, {}) != null) return true;
return false;
}
pub fn create(gpa: std.mem.Allocator, is_typescript: bool) !*ModuleInfo {
const res = try gpa.create(ModuleInfo);
res.* = ModuleInfo.init(gpa, is_typescript);
return res;
}
fn init(allocator: std.mem.Allocator, is_typescript: bool) ModuleInfo {
return .{
.gpa = allocator,
.strings_map = .{},
.strings_buf = .{},
.strings_lens = .{},
.exported_names = .{},
.requested_modules = std.AutoArrayHashMap(StringID, FetchParameters).init(allocator),
.buffer = .empty,
.record_kinds = .empty,
.flags = .{ .contains_import_meta = false, .is_typescript = is_typescript },
._deserialized = undefined,
};
}
fn deinit(self: *ModuleInfo) void {
self.strings_map.deinit(self.gpa);
self.strings_buf.deinit(self.gpa);
self.strings_lens.deinit(self.gpa);
self.exported_names.deinit(self.gpa);
self.requested_modules.deinit();
self.buffer.deinit(self.gpa);
self.record_kinds.deinit(self.gpa);
}
pub fn destroy(self: *ModuleInfo) void {
const alloc = self.gpa;
self.deinit();
alloc.destroy(self);
}
pub fn str(self: *ModuleInfo, value: []const u8) !StringID {
try self.strings_buf.ensureUnusedCapacity(self.gpa, value.len);
try self.strings_lens.ensureUnusedCapacity(self.gpa, 1);
const gpres = try self.strings_map.getOrPutAdapted(self.gpa, value, StringContext{
.strings_buf = self.strings_buf.items,
.strings_lens = self.strings_lens.items,
});
if (gpres.found_existing) return @enumFromInt(@as(u32, @intCast(gpres.index)));
gpres.key_ptr.* = @enumFromInt(@as(u32, @truncate(self.strings_buf.items.len)));
gpres.value_ptr.* = {};
self.strings_buf.appendSliceAssumeCapacity(value);
self.strings_lens.appendAssumeCapacity(@as(u32, @truncate(value.len)));
return @enumFromInt(@as(u32, @intCast(gpres.index)));
}
pub fn requestModule(self: *ModuleInfo, import_record_path: StringID, fetch_parameters: FetchParameters) !void {
// jsc only records the attributes of the first import with the given import_record_path. so only put if not exists.
const gpres = try self.requested_modules.getOrPut(import_record_path);
if (!gpres.found_existing) gpres.value_ptr.* = fetch_parameters;
}
/// Replace all occurrences of old_id with new_id in records and requested_modules.
/// Used to fix up cross-chunk import specifiers after final paths are computed.
pub fn replaceStringID(self: *ModuleInfo, old_id: StringID, new_id: StringID) void {
bun.assert(!self.finalized);
// Replace in record buffer
for (self.buffer.items) |*item| {
if (item.* == old_id) item.* = new_id;
}
// Replace in requested_modules keys (preserving insertion order)
if (self.requested_modules.getIndex(old_id)) |idx| {
self.requested_modules.keys()[idx] = new_id;
self.requested_modules.reIndex() catch {};
}
}
/// find any exports marked as 'local' that are actually 'indirect' and fix them
pub fn finalize(self: *ModuleInfo) !void {
bun.assert(!self.finalized);
var local_name_to_module_name = std.AutoArrayHashMap(StringID, struct { module_name: StringID, import_name: StringID, record_kinds_idx: usize }).init(bun.default_allocator);
defer local_name_to_module_name.deinit();
{
var i: usize = 0;
for (self.record_kinds.items, 0..) |k, idx| {
if (k == .import_info_single or k == .import_info_single_type_script) {
try local_name_to_module_name.put(self.buffer.items[i + 2], .{ .module_name = self.buffer.items[i], .import_name = self.buffer.items[i + 1], .record_kinds_idx = idx });
}
i += k.len() catch unreachable;
}
}
{
var i: usize = 0;
for (self.record_kinds.items) |*k| {
if (k.* == .export_info_local) {
if (local_name_to_module_name.get(self.buffer.items[i + 1])) |ip| {
k.* = .export_info_indirect;
self.buffer.items[i + 1] = ip.import_name;
self.buffer.items[i + 2] = ip.module_name;
// In TypeScript, the re-exported import may target a type-only
// export that was elided. Convert the import to SingleTypeScript
// so JSC tolerates it being NotFound during linking.
if (self.flags.is_typescript) {
self.record_kinds.items[ip.record_kinds_idx] = .import_info_single_type_script;
}
}
}
i += k.len() catch unreachable;
}
}
self._deserialized = .{
.strings_buf = self.strings_buf.items,
.strings_lens = self.strings_lens.items,
.requested_modules_keys = self.requested_modules.keys(),
.requested_modules_values = self.requested_modules.values(),
.buffer = self.buffer.items,
.record_kinds = self.record_kinds.items,
.flags = self.flags,
.owner = .module_info,
};
self.finalized = true;
}
};
pub const StringID = enum(u32) {
star_default = std.math.maxInt(u32),
star_namespace = std.math.maxInt(u32) - 1,
_,
};
export fn zig__renderDiff(expected_ptr: [*:0]const u8, expected_len: usize, received_ptr: [*:0]const u8, received_len: usize, globalThis: *bun.jsc.JSGlobalObject) void {
const formatter = DiffFormatter{
.received_string = received_ptr[0..received_len],
.expected_string = expected_ptr[0..expected_len],
.globalThis = globalThis,
};
bun.Output.errorWriter().print("DIFF:\n{any}\n", .{formatter}) catch {};
}
export fn zig__ModuleInfoDeserialized__toJSModuleRecord(
globalObject: *bun.jsc.JSGlobalObject,
vm: *bun.jsc.VM,
module_key: *const IdentifierArray,
source_code: *const SourceCode,
declared_variables: *VariableEnvironment,
lexical_variables: *VariableEnvironment,
res: *ModuleInfoDeserialized,
) ?*JSModuleRecord {
defer res.deinit();
var identifiers = IdentifierArray.create(res.strings_lens.len);
defer identifiers.destroy();
var offset: usize = 0;
for (0.., res.strings_lens) |index, len| {
if (res.strings_buf.len < offset + len) return null; // error!
const sub = res.strings_buf[offset..][0..len];
identifiers.setFromUtf8(index, vm, sub);
offset += len;
}
{
var i: usize = 0;
for (res.record_kinds) |k| {
if (i + (k.len() catch 0) > res.buffer.len) return null;
switch (k) {
.declared_variable => declared_variables.add(vm, identifiers, res.buffer[i]),
.lexical_variable => lexical_variables.add(vm, identifiers, res.buffer[i]),
.import_info_single, .import_info_single_type_script, .import_info_namespace, .export_info_indirect, .export_info_local, .export_info_namespace, .export_info_star => {},
else => return null,
}
i += k.len() catch unreachable; // handled above
}
}
const module_record = JSModuleRecord.create(globalObject, vm, module_key, source_code, declared_variables, lexical_variables, res.flags.contains_import_meta, res.flags.is_typescript);
for (res.requested_modules_keys, res.requested_modules_values) |reqk, reqv| {
switch (reqv) {
.none => module_record.addRequestedModuleNullAttributesPtr(identifiers, reqk),
.javascript => module_record.addRequestedModuleJavaScript(identifiers, reqk),
.webassembly => module_record.addRequestedModuleWebAssembly(identifiers, reqk),
.json => module_record.addRequestedModuleJSON(identifiers, reqk),
else => |uv| module_record.addRequestedModuleHostDefined(identifiers, reqk, @enumFromInt(@intFromEnum(uv))),
}
}
{
var i: usize = 0;
for (res.record_kinds) |k| {
if (i + (k.len() catch unreachable) > res.buffer.len) unreachable; // handled above
switch (k) {
.declared_variable, .lexical_variable => {},
.import_info_single => module_record.addImportEntrySingle(identifiers, res.buffer[i + 1], res.buffer[i + 2], res.buffer[i]),
.import_info_single_type_script => module_record.addImportEntrySingleTypeScript(identifiers, res.buffer[i + 1], res.buffer[i + 2], res.buffer[i]),
.import_info_namespace => module_record.addImportEntryNamespace(identifiers, res.buffer[i + 1], res.buffer[i + 2], res.buffer[i]),
.export_info_indirect => module_record.addIndirectExport(identifiers, res.buffer[i + 0], res.buffer[i + 1], res.buffer[i + 2]),
.export_info_local => module_record.addLocalExport(identifiers, res.buffer[i], res.buffer[i + 1]),
.export_info_namespace => module_record.addNamespaceExport(identifiers, res.buffer[i], res.buffer[i + 1]),
.export_info_star => module_record.addStarExport(identifiers, res.buffer[i]),
else => unreachable, // handled above
}
i += k.len() catch unreachable; // handled above
}
}
return module_record;
}
export fn zig__ModuleInfo__destroy(info: *ModuleInfo) void {
info.destroy();
}
const VariableEnvironment = opaque {
extern fn JSC__VariableEnvironment__add(environment: *VariableEnvironment, vm: *bun.jsc.VM, identifier_array: *IdentifierArray, identifier_index: StringID) void;
pub const add = JSC__VariableEnvironment__add;
};
const IdentifierArray = opaque {
extern fn JSC__IdentifierArray__create(len: usize) *IdentifierArray;
pub const create = JSC__IdentifierArray__create;
extern fn JSC__IdentifierArray__destroy(identifier_array: *IdentifierArray) void;
pub const destroy = JSC__IdentifierArray__destroy;
extern fn JSC__IdentifierArray__setFromUtf8(identifier_array: *IdentifierArray, n: usize, vm: *bun.jsc.VM, str: [*]const u8, len: usize) void;
pub fn setFromUtf8(self: *IdentifierArray, n: usize, vm: *bun.jsc.VM, str: []const u8) void {
JSC__IdentifierArray__setFromUtf8(self, n, vm, str.ptr, str.len);
}
};
const SourceCode = opaque {};
const JSModuleRecord = opaque {
extern fn JSC_JSModuleRecord__create(global_object: *bun.jsc.JSGlobalObject, vm: *bun.jsc.VM, module_key: *const IdentifierArray, source_code: *const SourceCode, declared_variables: *VariableEnvironment, lexical_variables: *VariableEnvironment, has_import_meta: bool, is_typescript: bool) *JSModuleRecord;
pub const create = JSC_JSModuleRecord__create;
extern fn JSC_JSModuleRecord__declaredVariables(module_record: *JSModuleRecord) *VariableEnvironment;
pub const declaredVariables = JSC_JSModuleRecord__declaredVariables;
extern fn JSC_JSModuleRecord__lexicalVariables(module_record: *JSModuleRecord) *VariableEnvironment;
pub const lexicalVariables = JSC_JSModuleRecord__lexicalVariables;
extern fn JSC_JSModuleRecord__addIndirectExport(module_record: *JSModuleRecord, identifier_array: *IdentifierArray, export_name: StringID, import_name: StringID, module_name: StringID) void;
pub const addIndirectExport = JSC_JSModuleRecord__addIndirectExport;
extern fn JSC_JSModuleRecord__addLocalExport(module_record: *JSModuleRecord, identifier_array: *IdentifierArray, export_name: StringID, local_name: StringID) void;
pub const addLocalExport = JSC_JSModuleRecord__addLocalExport;
extern fn JSC_JSModuleRecord__addNamespaceExport(module_record: *JSModuleRecord, identifier_array: *IdentifierArray, export_name: StringID, module_name: StringID) void;
pub const addNamespaceExport = JSC_JSModuleRecord__addNamespaceExport;
extern fn JSC_JSModuleRecord__addStarExport(module_record: *JSModuleRecord, identifier_array: *IdentifierArray, module_name: StringID) void;
pub const addStarExport = JSC_JSModuleRecord__addStarExport;
extern fn JSC_JSModuleRecord__addRequestedModuleNullAttributesPtr(module_record: *JSModuleRecord, identifier_array: *IdentifierArray, module_name: StringID) void;
pub const addRequestedModuleNullAttributesPtr = JSC_JSModuleRecord__addRequestedModuleNullAttributesPtr;
extern fn JSC_JSModuleRecord__addRequestedModuleJavaScript(module_record: *JSModuleRecord, identifier_array: *IdentifierArray, module_name: StringID) void;
pub const addRequestedModuleJavaScript = JSC_JSModuleRecord__addRequestedModuleJavaScript;
extern fn JSC_JSModuleRecord__addRequestedModuleWebAssembly(module_record: *JSModuleRecord, identifier_array: *IdentifierArray, module_name: StringID) void;
pub const addRequestedModuleWebAssembly = JSC_JSModuleRecord__addRequestedModuleWebAssembly;
extern fn JSC_JSModuleRecord__addRequestedModuleJSON(module_record: *JSModuleRecord, identifier_array: *IdentifierArray, module_name: StringID) void;
pub const addRequestedModuleJSON = JSC_JSModuleRecord__addRequestedModuleJSON;
extern fn JSC_JSModuleRecord__addRequestedModuleHostDefined(module_record: *JSModuleRecord, identifier_array: *IdentifierArray, module_name: StringID, host_defined_import_type: StringID) void;
pub const addRequestedModuleHostDefined = JSC_JSModuleRecord__addRequestedModuleHostDefined;
extern fn JSC_JSModuleRecord__addImportEntrySingle(module_record: *JSModuleRecord, identifier_array: *IdentifierArray, import_name: StringID, local_name: StringID, module_name: StringID) void;
pub const addImportEntrySingle = JSC_JSModuleRecord__addImportEntrySingle;
extern fn JSC_JSModuleRecord__addImportEntrySingleTypeScript(module_record: *JSModuleRecord, identifier_array: *IdentifierArray, import_name: StringID, local_name: StringID, module_name: StringID) void;
pub const addImportEntrySingleTypeScript = JSC_JSModuleRecord__addImportEntrySingleTypeScript;
extern fn JSC_JSModuleRecord__addImportEntryNamespace(module_record: *JSModuleRecord, identifier_array: *IdentifierArray, import_name: StringID, local_name: StringID, module_name: StringID) void;
pub const addImportEntryNamespace = JSC_JSModuleRecord__addImportEntryNamespace;
};
export fn zig_log(msg: [*:0]const u8) void {
bun.Output.errorWriter().print("{s}\n", .{std.mem.span(msg)}) catch {};
}
const bun = @import("bun");
const std = @import("std");
const DiffFormatter = @import("./bun.js/test/diff_format.zig").DiffFormatter;

View File

@@ -343,6 +343,8 @@ pub const api = struct {
sqlite_embedded = 17,
html = 18,
yaml = 19,
json5 = 20,
md = 21,
_,
pub fn jsonStringify(self: @This(), writer: anytype) !void {

View File

@@ -68,6 +68,7 @@ ts_enums: TsEnumsMap = .{},
/// This is a list of named exports that may exist in a CommonJS module
/// We use this with `commonjs_at_runtime` to re-export CommonJS
has_commonjs_export_names: bool = false,
has_import_meta: bool = false,
import_meta_ref: Ref = Ref.None,
pub const CommonJSNamedExport = struct {

View File

@@ -52,7 +52,7 @@ ts_enums: Ast.TsEnumsMap = .{},
flags: BundledAst.Flags = .{},
pub const Flags = packed struct(u8) {
pub const Flags = packed struct(u16) {
// This is a list of CommonJS features. When a file uses CommonJS features,
// it's not a candidate for "flat bundling" and must be wrapped in its own
// closure.
@@ -65,6 +65,8 @@ pub const Flags = packed struct(u8) {
has_lazy_export: bool = false,
commonjs_module_exports_assigned_deoptimized: bool = false,
has_explicit_use_strict_directive: bool = false,
has_import_meta: bool = false,
_padding: u7 = 0,
};
pub const empty = BundledAst.init(Ast.empty);
@@ -116,6 +118,7 @@ pub fn toAST(this: *const BundledAst) Ast {
.has_lazy_export = this.flags.has_lazy_export,
.commonjs_module_exports_assigned_deoptimized = this.flags.commonjs_module_exports_assigned_deoptimized,
.directive = if (this.flags.has_explicit_use_strict_directive) "use strict" else null,
.has_import_meta = this.flags.has_import_meta,
};
}
@@ -168,6 +171,7 @@ pub fn init(ast: Ast) BundledAst {
.has_lazy_export = ast.has_lazy_export,
.commonjs_module_exports_assigned_deoptimized = ast.commonjs_module_exports_assigned_deoptimized,
.has_explicit_use_strict_directive = strings.eqlComptime(ast.directive orelse "", "use strict"),
.has_import_meta = ast.has_import_meta,
},
};
}

View File

@@ -320,9 +320,8 @@ pub const Runner = struct {
.Null => return Expr.init(E.Null, E.Null{}, this.caller.loc),
.Private => {
this.is_top_level = false;
const _entry = this.visited.getOrPut(this.allocator, value) catch unreachable;
if (_entry.found_existing) {
return _entry.value_ptr.*;
if (this.visited.get(value)) |cached| {
return cached;
}
var blob_: ?*const jsc.WebCore.Blob = null;
@@ -470,9 +469,8 @@ pub const Runner = struct {
return Expr.init(E.String, E.String.init(out_slice), this.caller.loc);
},
.Promise => {
const _entry = this.visited.getOrPut(this.allocator, value) catch unreachable;
if (_entry.found_existing) {
return _entry.value_ptr.*;
if (this.visited.get(value)) |cached| {
return cached;
}
const promise = value.asAnyPromise() orelse @panic("Unexpected promise type");
@@ -494,7 +492,7 @@ pub const Runner = struct {
this.is_top_level = false;
const result = try this.run(promise_result);
_entry.value_ptr.* = result;
this.visited.put(this.allocator, value, result) catch unreachable;
return result;
},
else => {},

View File

@@ -6467,6 +6467,11 @@ pub fn NewParser_(
parts.items[0].stmts = top_level_stmts;
}
// REPL mode transforms
if (p.options.repl_mode) {
try repl_transforms.ReplTransforms(P).apply(p, parts, allocator);
}
var top_level_symbols_to_parts = js_ast.Ast.TopLevelSymbolToParts{};
var top_level = &top_level_symbols_to_parts;
@@ -6527,7 +6532,9 @@ pub fn NewParser_(
break :brk p.hmr_api_ref;
}
if (p.options.bundle and p.needsWrapperRef(parts.items)) {
// When code splitting is enabled, always create wrapper_ref to match esbuild behavior.
// Otherwise, use needsWrapperRef() to optimize away unnecessary wrappers.
if (p.options.bundle and (p.options.code_splitting or p.needsWrapperRef(parts.items))) {
break :brk p.newSymbol(
.other,
std.fmt.allocPrint(
@@ -6584,6 +6591,7 @@ pub fn NewParser_(
.top_level_await_keyword = p.top_level_await_keyword,
.commonjs_named_exports = p.commonjs_named_exports,
.has_commonjs_export_names = p.has_commonjs_export_names,
.has_import_meta = p.has_import_meta,
.hashbang = hashbang,
// TODO: cross-module constant inlining
@@ -6760,6 +6768,8 @@ var falseValueExpr = Expr.Data{ .e_boolean = E.Boolean{ .value = false } };
const string = []const u8;
const repl_transforms = @import("./repl_transforms.zig");
const Define = @import("../defines.zig").Define;
const DefineData = @import("../defines.zig").DefineData;

View File

@@ -19,6 +19,7 @@ pub const Parser = struct {
tree_shaking: bool = false,
bundle: bool = false,
code_splitting: bool = false,
package_version: string = "",
macro_context: *MacroContextType() = undefined,
@@ -38,6 +39,13 @@ pub const Parser = struct {
/// able to customize what import sources are used.
framework: ?*bun.bake.Framework = null,
/// REPL mode: transforms code for interactive evaluation
/// - Wraps lone object literals `{...}` in parentheses
/// - Hoists variable declarations for REPL persistence
/// - Wraps last expression in { value: expr } for result capture
/// - Wraps code with await in async IIFE
repl_mode: bool = false,
pub fn hashForRuntimeTranspiler(this: *const Options, hasher: *std.hash.Wyhash, did_use_jsx: bool) void {
bun.assert(!this.bundle);

365
src/ast/repl_transforms.zig Normal file
View File

@@ -0,0 +1,365 @@
/// REPL Transform module - transforms code for interactive REPL evaluation
///
/// This module provides transformations for REPL mode:
/// - Wraps the last expression in { value: expr } for result capture
/// - Wraps code with await in async IIFE with variable hoisting
/// - Hoists declarations for variable persistence across REPL lines
pub fn ReplTransforms(comptime P: type) type {
return struct {
const Self = @This();
/// Apply REPL-mode transforms to the AST.
/// This transforms code for interactive evaluation:
/// - Wraps the last expression in { value: expr } for result capture
/// - Wraps code with await in async IIFE with variable hoisting
pub fn apply(p: *P, parts: *ListManaged(js_ast.Part), allocator: Allocator) !void {
// Skip transform if there's a top-level return (indicates module pattern)
if (p.has_top_level_return) {
return;
}
// Collect all statements
var total_stmts_count: usize = 0;
for (parts.items) |part| {
total_stmts_count += part.stmts.len;
}
if (total_stmts_count == 0) {
return;
}
// Check if there's top-level await
const has_top_level_await = p.top_level_await_keyword.len > 0;
// Collect all statements into a single array
var all_stmts = bun.handleOom(allocator.alloc(Stmt, total_stmts_count));
var stmt_idx: usize = 0;
for (parts.items) |part| {
for (part.stmts) |stmt| {
all_stmts[stmt_idx] = stmt;
stmt_idx += 1;
}
}
// Apply transform with is_async based on presence of top-level await
try transformWithHoisting(p, parts, all_stmts, allocator, has_top_level_await);
}
/// Transform code with hoisting and IIFE wrapper
/// @param is_async: true for async IIFE (when top-level await present), false for sync IIFE
fn transformWithHoisting(
p: *P,
parts: *ListManaged(js_ast.Part),
all_stmts: []Stmt,
allocator: Allocator,
is_async: bool,
) !void {
if (all_stmts.len == 0) return;
// Lists for hoisted declarations and inner statements
var hoisted_stmts = ListManaged(Stmt).init(allocator);
var inner_stmts = ListManaged(Stmt).init(allocator);
try hoisted_stmts.ensureTotalCapacity(all_stmts.len);
try inner_stmts.ensureTotalCapacity(all_stmts.len);
// Process each statement - hoist all declarations for REPL persistence
for (all_stmts) |stmt| {
switch (stmt.data) {
.s_local => |local| {
// Hoist all declarations as var so they become context properties
// In sloppy mode, var at top level becomes a property of the global/context object
// This is essential for REPL variable persistence across vm.runInContext calls
const kind: S.Local.Kind = .k_var;
// Extract individual identifiers from binding patterns for hoisting
var hoisted_decl_list = ListManaged(G.Decl).init(allocator);
for (local.decls.slice()) |decl| {
try extractIdentifiersFromBinding(p, decl.binding, &hoisted_decl_list);
}
if (hoisted_decl_list.items.len > 0) {
try hoisted_stmts.append(p.s(S.Local{
.kind = kind,
.decls = Decl.List.fromOwnedSlice(hoisted_decl_list.items),
}, stmt.loc));
}
// Create assignment expressions for the inner statements
for (local.decls.slice()) |decl| {
if (decl.value) |value| {
// Create assignment expression: binding = value
const assign_expr = createBindingAssignment(p, decl.binding, value, allocator);
try inner_stmts.append(p.s(S.SExpr{ .value = assign_expr }, stmt.loc));
}
}
},
.s_function => |func| {
// For function declarations:
// Hoist as: var funcName;
// Inner: this.funcName = funcName; function funcName() {}
if (func.func.name) |name_loc| {
try hoisted_stmts.append(p.s(S.Local{
.kind = .k_var,
.decls = Decl.List.fromOwnedSlice(bun.handleOom(allocator.dupe(G.Decl, &.{
G.Decl{
.binding = p.b(B.Identifier{ .ref = name_loc.ref.? }, name_loc.loc),
.value = null,
},
}))),
}, stmt.loc));
// Add this.funcName = funcName assignment
const this_expr = p.newExpr(E.This{}, stmt.loc);
const this_dot = p.newExpr(E.Dot{
.target = this_expr,
.name = p.symbols.items[name_loc.ref.?.innerIndex()].original_name,
.name_loc = name_loc.loc,
}, stmt.loc);
const func_id = p.newExpr(E.Identifier{ .ref = name_loc.ref.? }, name_loc.loc);
const assign = p.newExpr(E.Binary{
.op = .bin_assign,
.left = this_dot,
.right = func_id,
}, stmt.loc);
try inner_stmts.append(p.s(S.SExpr{ .value = assign }, stmt.loc));
}
// Add the function declaration itself
try inner_stmts.append(stmt);
},
.s_class => |class| {
// For class declarations:
// Hoist as: var ClassName; (use var so it persists to vm context)
// Inner: ClassName = class ClassName {}
if (class.class.class_name) |name_loc| {
try hoisted_stmts.append(p.s(S.Local{
.kind = .k_var,
.decls = Decl.List.fromOwnedSlice(bun.handleOom(allocator.dupe(G.Decl, &.{
G.Decl{
.binding = p.b(B.Identifier{ .ref = name_loc.ref.? }, name_loc.loc),
.value = null,
},
}))),
}, stmt.loc));
// Convert class declaration to assignment: ClassName = class ClassName {}
const class_expr = p.newExpr(class.class, stmt.loc);
const class_id = p.newExpr(E.Identifier{ .ref = name_loc.ref.? }, name_loc.loc);
const assign = p.newExpr(E.Binary{
.op = .bin_assign,
.left = class_id,
.right = class_expr,
}, stmt.loc);
try inner_stmts.append(p.s(S.SExpr{ .value = assign }, stmt.loc));
} else {
try inner_stmts.append(stmt);
}
},
.s_directive => |directive| {
// In REPL mode, treat directives (string literals) as expressions
const str_expr = p.newExpr(E.String{ .data = directive.value }, stmt.loc);
try inner_stmts.append(p.s(S.SExpr{ .value = str_expr }, stmt.loc));
},
else => {
try inner_stmts.append(stmt);
},
}
}
// Wrap the last expression in return { value: expr }
wrapLastExpressionWithReturn(p, &inner_stmts, allocator);
// Create the IIFE: (() => { ...inner_stmts... })() or (async () => { ... })()
const arrow = p.newExpr(E.Arrow{
.args = &.{},
.body = .{ .loc = logger.Loc.Empty, .stmts = inner_stmts.items },
.is_async = is_async,
}, logger.Loc.Empty);
const iife = p.newExpr(E.Call{
.target = arrow,
.args = ExprNodeList{},
}, logger.Loc.Empty);
// Final output: hoisted declarations + IIFE call
const final_stmts_count = hoisted_stmts.items.len + 1;
var final_stmts = bun.handleOom(allocator.alloc(Stmt, final_stmts_count));
for (hoisted_stmts.items, 0..) |stmt, j| {
final_stmts[j] = stmt;
}
final_stmts[hoisted_stmts.items.len] = p.s(S.SExpr{ .value = iife }, logger.Loc.Empty);
// Update parts
if (parts.items.len > 0) {
parts.items[0].stmts = final_stmts;
parts.items.len = 1;
}
}
/// Wrap the last expression in return { value: expr }
fn wrapLastExpressionWithReturn(p: *P, inner_stmts: *ListManaged(Stmt), allocator: Allocator) void {
if (inner_stmts.items.len > 0) {
var last_idx: usize = inner_stmts.items.len;
while (last_idx > 0) {
last_idx -= 1;
const last_stmt = inner_stmts.items[last_idx];
switch (last_stmt.data) {
.s_empty, .s_comment => continue,
.s_expr => |expr_data| {
// Wrap in return { value: expr }
const wrapped = wrapExprInValueObject(p, expr_data.value, allocator);
inner_stmts.items[last_idx] = p.s(S.Return{ .value = wrapped }, last_stmt.loc);
break;
},
else => break,
}
}
}
}
/// Extract individual identifiers from a binding pattern for hoisting
fn extractIdentifiersFromBinding(p: *P, binding: Binding, decls: *ListManaged(G.Decl)) !void {
switch (binding.data) {
.b_identifier => |ident| {
try decls.append(G.Decl{
.binding = p.b(B.Identifier{ .ref = ident.ref }, binding.loc),
.value = null,
});
},
.b_array => |arr| {
for (arr.items) |item| {
try extractIdentifiersFromBinding(p, item.binding, decls);
}
},
.b_object => |obj| {
for (obj.properties) |prop| {
try extractIdentifiersFromBinding(p, prop.value, decls);
}
},
.b_missing => {},
}
}
/// Create { __proto__: null, value: expr } wrapper object
/// Uses null prototype to create a clean data object
fn wrapExprInValueObject(p: *P, expr: Expr, allocator: Allocator) Expr {
var properties = bun.handleOom(allocator.alloc(G.Property, 2));
// __proto__: null - creates null-prototype object
properties[0] = G.Property{
.key = p.newExpr(E.String{ .data = "__proto__" }, expr.loc),
.value = p.newExpr(E.Null{}, expr.loc),
};
// value: expr - the actual result value
properties[1] = G.Property{
.key = p.newExpr(E.String{ .data = "value" }, expr.loc),
.value = expr,
};
return p.newExpr(E.Object{
.properties = G.Property.List.fromOwnedSlice(properties),
}, expr.loc);
}
/// Create assignment expression from binding pattern
fn createBindingAssignment(p: *P, binding: Binding, value: Expr, allocator: Allocator) Expr {
switch (binding.data) {
.b_identifier => |ident| {
return p.newExpr(E.Binary{
.op = .bin_assign,
.left = p.newExpr(E.Identifier{ .ref = ident.ref }, binding.loc),
.right = value,
}, binding.loc);
},
.b_array => {
// For array destructuring, create: [a, b] = value
return p.newExpr(E.Binary{
.op = .bin_assign,
.left = convertBindingToExpr(p, binding, allocator),
.right = value,
}, binding.loc);
},
.b_object => {
// For object destructuring, create: {a, b} = value
return p.newExpr(E.Binary{
.op = .bin_assign,
.left = convertBindingToExpr(p, binding, allocator),
.right = value,
}, binding.loc);
},
.b_missing => {
// Return Missing expression to match convertBindingToExpr
return p.newExpr(E.Missing{}, binding.loc);
},
}
}
/// Convert a binding pattern to an expression (for assignment targets)
/// Handles spread/rest patterns in arrays and objects to match Binding.toExpr behavior
fn convertBindingToExpr(p: *P, binding: Binding, allocator: Allocator) Expr {
switch (binding.data) {
.b_identifier => |ident| {
return p.newExpr(E.Identifier{ .ref = ident.ref }, binding.loc);
},
.b_array => |arr| {
var items = bun.handleOom(allocator.alloc(Expr, arr.items.len));
for (arr.items, 0..) |item, i| {
const expr = convertBindingToExpr(p, item.binding, allocator);
// Check for spread pattern: if has_spread and this is the last element
if (arr.has_spread and i == arr.items.len - 1) {
items[i] = p.newExpr(E.Spread{ .value = expr }, expr.loc);
} else if (item.default_value) |default_val| {
items[i] = p.newExpr(E.Binary{
.op = .bin_assign,
.left = expr,
.right = default_val,
}, item.binding.loc);
} else {
items[i] = expr;
}
}
return p.newExpr(E.Array{
.items = ExprNodeList.fromOwnedSlice(items),
.is_single_line = arr.is_single_line,
}, binding.loc);
},
.b_object => |obj| {
var properties = bun.handleOom(allocator.alloc(G.Property, obj.properties.len));
for (obj.properties, 0..) |prop, i| {
properties[i] = G.Property{
.flags = prop.flags,
.key = prop.key,
// Set kind to .spread if the property has spread flag
.kind = if (prop.flags.contains(.is_spread)) .spread else .normal,
.value = convertBindingToExpr(p, prop.value, allocator),
.initializer = prop.default_value,
};
}
return p.newExpr(E.Object{
.properties = G.Property.List.fromOwnedSlice(properties),
.is_single_line = obj.is_single_line,
}, binding.loc);
},
.b_missing => {
return p.newExpr(E.Missing{}, binding.loc);
},
}
}
};
}
const std = @import("std");
const Allocator = std.mem.Allocator;
const ListManaged = std.array_list.Managed;
const bun = @import("bun");
const logger = bun.logger;
const js_ast = bun.ast;
const B = js_ast.B;
const Binding = js_ast.Binding;
const E = js_ast.E;
const Expr = js_ast.Expr;
const ExprNodeList = js_ast.ExprNodeList;
const S = js_ast.S;
const Stmt = js_ast.Stmt;
const G = js_ast.G;
const Decl = G.Decl;

View File

@@ -1355,7 +1355,7 @@ fn computeArgumentsForFrameworkRequest(
const relative_path_buf = bun.path_buffer_pool.get();
defer bun.path_buffer_pool.put(relative_path_buf);
var route_name = bun.String.cloneUTF8(dev.relativePath(relative_path_buf, keys[fromOpaqueFileId(.server, route.file_page.unwrap().?).get()]));
try arr.putIndex(global, 0, route_name.transferToJS(global));
try arr.putIndex(global, 0, try route_name.transferToJS(global));
}
n = 1;
while (true) {
@@ -1366,7 +1366,7 @@ fn computeArgumentsForFrameworkRequest(
relative_path_buf,
keys[fromOpaqueFileId(.server, layout).get()],
));
try arr.putIndex(global, @intCast(n), layout_name.transferToJS(global));
try arr.putIndex(global, @intCast(n), try layout_name.transferToJS(global));
n += 1;
}
route = dev.router.routePtr(route.parent.unwrap() orelse break);
@@ -1383,7 +1383,7 @@ fn computeArgumentsForFrameworkRequest(
std.mem.asBytes(&generation),
}) catch |err| bun.handleOom(err);
defer str.deref();
const js = str.toJS(dev.vm.global);
const js = try str.toJS(dev.vm.global);
framework_bundle.cached_client_bundle_url = .create(js, dev.vm.global);
break :str js;
},
@@ -2091,7 +2091,7 @@ fn generateCssJSArray(dev: *DevServer, route_bundle: *RouteBundle) bun.JSError!j
}) catch unreachable;
const str = bun.String.cloneUTF8(path);
defer str.deref();
try arr.putIndex(dev.vm.global, @intCast(i), str.toJS(dev.vm.global));
try arr.putIndex(dev.vm.global, @intCast(i), try str.toJS(dev.vm.global));
}
return arr;
}
@@ -2136,7 +2136,7 @@ fn makeArrayForServerComponentsPatch(dev: *DevServer, global: *jsc.JSGlobalObjec
defer bun.path_buffer_pool.put(relative_path_buf);
const str = bun.String.cloneUTF8(dev.relativePath(relative_path_buf, names[item.get()]));
defer str.deref();
try arr.putIndex(global, @intCast(i), str.toJS(global));
try arr.putIndex(global, @intCast(i), try str.toJS(global));
}
return arr;
}

View File

@@ -48,6 +48,7 @@ pub fn trackResolutionFailure(store: *DirectoryWatchStore, import_source: []cons
.jsonc,
.toml,
.yaml,
.json5,
.wasm,
.napi,
.base64,
@@ -56,6 +57,7 @@ pub fn trackResolutionFailure(store: *DirectoryWatchStore, import_source: []cons
.bunsh,
.sqlite,
.sqlite_embedded,
.md,
=> bun.debugAssert(false),
}

Some files were not shown because too many files have changed in this diff Show More