Compare commits

..

1 Commits

Author SHA1 Message Date
Claude Bot
429c79ff09 fix: respect install.lockfile.save=false when migrating from package-lock.json
Previously, Bun would always save a lockfile when migrating from package-lock.json
or yarn.lock, even when install.lockfile.save was set to false in bunfig.toml.
This fix ensures that the save=false configuration is respected during migration.

The lockfile is still migrated and used in memory for dependency resolution,
but it is not written to disk when save=false.

Fixes #22963

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-09-25 15:29:22 +00:00
732 changed files with 8972 additions and 47389 deletions

View File

@@ -371,7 +371,7 @@ function getZigAgent(platform, options) {
* @returns {Agent}
*/
function getTestAgent(platform, options) {
const { os, arch, profile } = platform;
const { os, arch } = platform;
if (os === "darwin") {
return {
@@ -391,13 +391,6 @@ function getTestAgent(platform, options) {
}
if (arch === "aarch64") {
if (profile === "asan") {
return getEc2Agent(platform, options, {
instanceType: "c8g.2xlarge",
cpuCount: 2,
threadsPerCore: 1,
});
}
return getEc2Agent(platform, options, {
instanceType: "c8g.xlarge",
cpuCount: 2,
@@ -405,13 +398,6 @@ function getTestAgent(platform, options) {
});
}
if (profile === "asan") {
return getEc2Agent(platform, options, {
instanceType: "c7i.2xlarge",
cpuCount: 2,
threadsPerCore: 1,
});
}
return getEc2Agent(platform, options, {
instanceType: "c7i.xlarge",
cpuCount: 2,
@@ -552,7 +538,6 @@ function getLinkBunStep(platform, options) {
cancel_on_build_failing: isMergeQueue(),
env: {
BUN_LINK_ONLY: "ON",
ASAN_OPTIONS: "allow_user_segv_handler=1:disable_coredump=0:detect_leaks=0",
...getBuildEnv(platform, options),
},
command: `${getBuildCommand(platform, options, "build-bun")} --target bun`,
@@ -616,9 +601,6 @@ function getTestBunStep(platform, options, testOptions = {}) {
cancel_on_build_failing: isMergeQueue(),
parallelism: unifiedTests ? undefined : os === "darwin" ? 2 : 10,
timeout_in_minutes: profile === "asan" || os === "windows" ? 45 : 30,
env: {
ASAN_OPTIONS: "allow_user_segv_handler=1:disable_coredump=0:detect_leaks=0",
},
command:
os === "windows"
? `node .\\scripts\\runner.node.mjs ${args.join(" ")}`

View File

@@ -1,88 +0,0 @@
#!/usr/bin/env bun
import { extname } from "path";
import { spawnSync } from "child_process";
const input = await Bun.stdin.json();
const toolName = input.tool_name;
const toolInput = input.tool_input || {};
const filePath = toolInput.file_path;
// Only process Write, Edit, and MultiEdit tools
if (!["Write", "Edit", "MultiEdit"].includes(toolName)) {
process.exit(0);
}
const ext = extname(filePath);
// Only format known files
if (!filePath) {
process.exit(0);
}
function formatZigFile() {
try {
// Format the Zig file
const result = spawnSync("vendor/zig/zig.exe", ["fmt", filePath], {
cwd: process.env.CLAUDE_PROJECT_DIR || process.cwd(),
encoding: "utf-8",
});
if (result.error) {
console.error(`Failed to format ${filePath}: ${result.error.message}`);
process.exit(0);
}
if (result.status !== 0) {
console.error(`zig fmt failed for ${filePath}:`);
if (result.stderr) {
console.error(result.stderr);
}
process.exit(0);
}
} catch (error) {}
}
function formatTypeScriptFile() {
try {
// Format the TypeScript file
const result = spawnSync(
"./node_modules/.bin/prettier",
["--plugin=prettier-plugin-organize-imports", "--config", ".prettierrc", "--write", filePath],
{
cwd: process.env.CLAUDE_PROJECT_DIR || process.cwd(),
encoding: "utf-8",
},
);
} catch (error) {}
}
if (ext === ".zig") {
formatZigFile();
} else if (
[
".cjs",
".css",
".html",
".js",
".json",
".jsonc",
".jsx",
".less",
".mjs",
".pcss",
".postcss",
".sass",
".scss",
".styl",
".stylus",
".toml",
".ts",
".tsx",
".yaml",
].includes(ext)
) {
formatTypeScriptFile();
}
process.exit(0);

View File

@@ -1,175 +0,0 @@
#!/usr/bin/env bun
import { basename, extname } from "path";
const input = await Bun.stdin.json();
const toolName = input.tool_name;
const toolInput = input.tool_input || {};
const command = toolInput.command || "";
const timeout = toolInput.timeout;
const cwd = input.cwd || "";
// Get environment variables from the hook context
// Note: We check process.env directly as env vars are inherited
let useSystemBun = process.env.USE_SYSTEM_BUN;
if (toolName !== "Bash" || !command) {
process.exit(0);
}
function denyWithReason(reason) {
const output = {
hookSpecificOutput: {
hookEventName: "PreToolUse",
permissionDecision: "deny",
permissionDecisionReason: reason,
},
};
console.log(JSON.stringify(output));
process.exit(0);
}
// Parse the command to extract argv0 and positional args
let tokens;
try {
// Simple shell parsing - split on spaces but respect quotes (both single and double)
tokens = command.match(/(?:[^\s"']+|"[^"]*"|'[^']*')+/g)?.map(t => t.replace(/^['"]|['"]$/g, "")) || [];
} catch {
process.exit(0);
}
if (tokens.length === 0) {
process.exit(0);
}
// Strip inline environment variable assignments (e.g., FOO=1 bun test)
const inlineEnv = new Map();
let commandStart = 0;
while (
commandStart < tokens.length &&
/^[A-Za-z_][A-Za-z0-9_]*=/.test(tokens[commandStart]) &&
!tokens[commandStart].includes("/")
) {
const [name, value = ""] = tokens[commandStart].split("=", 2);
inlineEnv.set(name, value);
commandStart++;
}
if (commandStart >= tokens.length) {
process.exit(0);
}
tokens = tokens.slice(commandStart);
useSystemBun = inlineEnv.get("USE_SYSTEM_BUN") ?? useSystemBun;
// Get the executable name (argv0)
const argv0 = basename(tokens[0], extname(tokens[0]));
// Check if it's zig or zig.exe
if (argv0 === "zig") {
// Filter out flags (starting with -) to get positional arguments
const positionalArgs = tokens.slice(1).filter(arg => !arg.startsWith("-"));
// Check if the positional args contain "build" followed by "obj"
if (positionalArgs.length >= 2 && positionalArgs[0] === "build" && positionalArgs[1] === "obj") {
denyWithReason("error: Use `bun bd` to build Bun and wait patiently");
}
}
// Check if argv0 is timeout and the command is "bun bd"
if (argv0 === "timeout") {
// Find the actual command after timeout and its arguments
const timeoutArgEndIndex = tokens.slice(1).findIndex(t => !t.startsWith("-") && !/^\d/.test(t));
if (timeoutArgEndIndex === -1) {
process.exit(0);
}
const actualCommandIndex = timeoutArgEndIndex + 1;
if (actualCommandIndex >= tokens.length) {
process.exit(0);
}
const actualCommand = basename(tokens[actualCommandIndex]);
const restArgs = tokens.slice(actualCommandIndex + 1);
// Check if it's "bun bd" or "bun-debug bd" without other positional args
if (actualCommand === "bun" || actualCommand.includes("bun-debug")) {
const positionalArgs = restArgs.filter(arg => !arg.startsWith("-"));
if (positionalArgs.length === 1 && positionalArgs[0] === "bd") {
denyWithReason("error: Run `bun bd` without a timeout");
}
}
}
// Check if command is "bun .* test" or "bun-debug test" with -u/--update-snapshots AND -t/--test-name-pattern
if (argv0 === "bun" || argv0.includes("bun-debug")) {
const allArgs = tokens.slice(1);
// Check if "test" is in positional args or "bd" followed by "test"
const positionalArgs = allArgs.filter(arg => !arg.startsWith("-"));
const hasTest = positionalArgs.includes("test") || (positionalArgs[0] === "bd" && positionalArgs[1] === "test");
if (hasTest) {
const hasUpdateSnapshots = allArgs.some(arg => arg === "-u" || arg === "--update-snapshots");
const hasTestNamePattern = allArgs.some(arg => arg === "-t" || arg === "--test-name-pattern");
if (hasUpdateSnapshots && hasTestNamePattern) {
denyWithReason("error: Cannot use -u/--update-snapshots with -t/--test-name-pattern");
}
}
}
// Check if timeout option is set for "bun bd" command
if (timeout !== undefined && (argv0 === "bun" || argv0.includes("bun-debug"))) {
const positionalArgs = tokens.slice(1).filter(arg => !arg.startsWith("-"));
if (positionalArgs.length === 1 && positionalArgs[0] === "bd") {
denyWithReason("error: Run `bun bd` without a timeout");
}
}
// Check if running "bun test <file>" without USE_SYSTEM_BUN=1
if ((argv0 === "bun" || argv0.includes("bun-debug")) && useSystemBun !== "1") {
const allArgs = tokens.slice(1);
const positionalArgs = allArgs.filter(arg => !arg.startsWith("-"));
// Check if it's "test" (not "bd test")
if (positionalArgs.length >= 1 && positionalArgs[0] === "test" && positionalArgs[0] !== "bd") {
denyWithReason(
"error: In development, use `bun bd test <file>` to test your changes. If you meant to use a release version, set USE_SYSTEM_BUN=1",
);
}
}
// Check if running "bun bd test" from bun repo root or test folder without a file path
if (argv0 === "bun" || argv0.includes("bun-debug")) {
const allArgs = tokens.slice(1);
const positionalArgs = allArgs.filter(arg => !arg.startsWith("-"));
// Check if it's "bd test"
if (positionalArgs.length >= 2 && positionalArgs[0] === "bd" && positionalArgs[1] === "test") {
// Check if cwd is the bun repo root or test folder
const isBunRepoRoot = cwd === "/workspace/bun" || cwd.endsWith("/bun");
const isTestFolder = cwd.endsWith("/bun/test");
if (isBunRepoRoot || isTestFolder) {
// Check if there's a file path argument (looks like a path: contains / or has test extension)
const hasFilePath = positionalArgs
.slice(2)
.some(
arg =>
arg.includes("/") ||
arg.endsWith(".test.ts") ||
arg.endsWith(".test.js") ||
arg.endsWith(".test.tsx") ||
arg.endsWith(".test.jsx"),
);
if (!hasFilePath) {
denyWithReason(
"error: `bun bd test` from repo root or test folder will run all tests. Use `bun bd test <path>` with a specific test file.",
);
}
}
}
}
// Allow the command to proceed
process.exit(0);

View File

@@ -1,26 +0,0 @@
{
"hooks": {
"PreToolUse": [
{
"matcher": "Bash",
"hooks": [
{
"type": "command",
"command": "\"$CLAUDE_PROJECT_DIR\"/.claude/hooks/pre-bash-zig-build.js"
}
]
}
],
"PostToolUse": [
{
"matcher": "Write|Edit|MultiEdit",
"hooks": [
{
"type": "command",
"command": "\"$CLAUDE_PROJECT_DIR\"/.claude/hooks/post-edit-zig-format.js"
}
]
}
]
}
}

View File

@@ -70,7 +70,24 @@ jobs:
- name: Update SQLite if needed
if: success() && steps.check-version.outputs.current_num < steps.check-version.outputs.latest_num
run: |
./scripts/update-sqlite-amalgamation.sh ${{ steps.check-version.outputs.latest_num }} ${{ steps.check-version.outputs.latest_year }}
set -euo pipefail
TEMP_DIR=$(mktemp -d)
cd $TEMP_DIR
echo "Downloading from: https://sqlite.org/${{ steps.check-version.outputs.latest_year }}/sqlite-amalgamation-${{ steps.check-version.outputs.latest_num }}.zip"
# Download and extract latest version
wget "https://sqlite.org/${{ steps.check-version.outputs.latest_year }}/sqlite-amalgamation-${{ steps.check-version.outputs.latest_num }}.zip"
unzip "sqlite-amalgamation-${{ steps.check-version.outputs.latest_num }}.zip"
cd "sqlite-amalgamation-${{ steps.check-version.outputs.latest_num }}"
# Add header comment and copy files
echo "// clang-format off" > $GITHUB_WORKSPACE/src/bun.js/bindings/sqlite/sqlite3.c
cat sqlite3.c >> $GITHUB_WORKSPACE/src/bun.js/bindings/sqlite/sqlite3.c
echo "// clang-format off" > $GITHUB_WORKSPACE/src/bun.js/bindings/sqlite/sqlite3_local.h
cat sqlite3.h >> $GITHUB_WORKSPACE/src/bun.js/bindings/sqlite/sqlite3_local.h
- name: Create Pull Request
if: success() && steps.check-version.outputs.current_num < steps.check-version.outputs.latest_num

View File

@@ -45,8 +45,3 @@ jobs:
env:
VSCE_PAT: ${{ secrets.VSCODE_EXTENSION }}
working-directory: packages/bun-vscode/extension
- uses: actions/upload-artifact@v4
with:
name: bun-vscode-${{ github.event.inputs.version }}.vsix
path: packages/bun-vscode/extension/bun-vscode-${{ github.event.inputs.version }}.vsix

4
.gitignore vendored
View File

@@ -1,9 +1,7 @@
.claude/settings.local.json
.DS_Store
.env
.envrc
.eslintcache
.gdb_history
.idea
.next
.ninja_deps
@@ -191,4 +189,4 @@ scratch*.{js,ts,tsx,cjs,mjs}
scripts/lldb-inline
# We regenerate these in all the build scripts
cmake/sources/*.txt
cmake/sources/*.txt

View File

@@ -19,12 +19,6 @@
"options": {
"printWidth": 80
}
},
{
"files": ["src/codegen/bindgenv2/**/*.ts", "*.bindv2.ts"],
"options": {
"printWidth": 100
}
}
]
}

4
.vscode/launch.json generated vendored
View File

@@ -26,7 +26,7 @@
// "BUN_JSC_dumpSimulatedThrows": "1",
// "BUN_JSC_unexpectedExceptionStackTraceLimit": "20",
// "BUN_DESTRUCT_VM_ON_EXIT": "1",
// "ASAN_OPTIONS": "allow_user_segv_handler=1:disable_coredump=0:detect_leaks=1:abort_on_error=1",
// "ASAN_OPTIONS": "allow_user_segv_handler=1:disable_coredump=0:detect_leaks=1",
// "LSAN_OPTIONS": "malloc_context_size=100:print_suppressions=1:suppressions=${workspaceFolder}/test/leaksan.supp",
},
"console": "internalConsole",
@@ -69,7 +69,7 @@
// "BUN_JSC_dumpSimulatedThrows": "1",
// "BUN_JSC_unexpectedExceptionStackTraceLimit": "20",
// "BUN_DESTRUCT_VM_ON_EXIT": "1",
// "ASAN_OPTIONS": "allow_user_segv_handler=1:disable_coredump=0:detect_leaks=1:abort_on_error=1",
// "ASAN_OPTIONS": "allow_user_segv_handler=1:disable_coredump=0:detect_leaks=1",
// "LSAN_OPTIONS": "malloc_context_size=100:print_suppressions=1:suppressions=${workspaceFolder}/test/leaksan.supp",
},
"console": "internalConsole",

View File

@@ -143,6 +143,19 @@ When implementing JavaScript classes in C++:
3. Add iso subspaces for classes with C++ fields
4. Cache structures in ZigGlobalObject
## Development Workflow
### Code Formatting
- `bun run prettier` - Format JS/TS files
- `bun run zig-format` - Format Zig files
- `bun run clang-format` - Format C++ files
### Watching for Changes
- `bun run watch` - Incremental Zig compilation with error checking
- `bun run watch-windows` - Windows-specific watch mode
### Code Generation
Code generation happens automatically as part of the build process. The main scripts are:
@@ -164,6 +177,47 @@ Built-in JavaScript modules use special syntax and are organized as:
- `internal/` - Internal modules not exposed to users
- `builtins/` - Core JavaScript builtins (streams, console, etc.)
### Special Syntax in Built-in Modules
1. **`$` prefix** - Access to private properties and JSC intrinsics:
```js
const arr = $Array.from(...); // Private global
map.$set(...); // Private method
const arr2 = $newArrayWithSize(5); // JSC intrinsic
```
2. **`require()`** - Must use string literals, resolved at compile time:
```js
const fs = require("fs"); // Directly loads by numeric ID
```
3. **Debug helpers**:
- `$debug()` - Like console.log but stripped in release builds
- `$assert()` - Assertions stripped in release builds
- `if($debug) {}` - Check if debug env var is set
4. **Platform detection**: `process.platform` and `process.arch` are inlined and dead-code eliminated
5. **Export syntax**: Use `export default` which gets converted to a return statement:
```js
export default {
readFile,
writeFile,
};
```
Note: These are NOT ES modules. The preprocessor converts `$` to `@` (JSC's actual syntax) and handles the special functions.
## CI
Bun uses BuildKite for CI. To get the status of a PR, you can use the following command:
```bash
bun ci
```
## Important Development Notes
1. **Never use `bun test` or `bun <file>` directly** - always use `bun bd test` or `bun bd <command>`. `bun bd` compiles & runs the debug build.
@@ -175,6 +229,19 @@ Built-in JavaScript modules use special syntax and are organized as:
7. **Avoid shell commands** - Don't use `find` or `grep` in tests; use Bun's Glob and built-in tools
8. **Memory management** - In Zig code, be careful with allocators and use defer for cleanup
9. **Cross-platform** - Run `bun run zig:check-all` to compile the Zig code on all platforms when making platform-specific changes
10. **Debug builds** - Use `BUN_DEBUG_QUIET_LOGS=1` to disable debug logging, or `BUN_DEBUG_<scopeName>=1` to enable specific `Output.scoped(.${scopeName}, .visible)`s
10. **Debug builds** - Use `BUN_DEBUG_QUIET_LOGS=1` to disable debug logging, or `BUN_DEBUG_<scope>=1` to enable specific scopes
11. **Be humble & honest** - NEVER overstate what you got done or what actually works in commits, PRs or in messages to the user.
12. **Branch names must start with `claude/`** - This is a requirement for the CI to work.
## Key APIs and Features
### Bun-Specific APIs
- **Bun.serve()** - High-performance HTTP server
- **Bun.spawn()** - Process spawning with better performance than Node.js
- **Bun.file()** - Fast file I/O operations
- **Bun.write()** - Unified API for writing to files, stdout, etc.
- **Bun.$ (Shell)** - Cross-platform shell scripting
- **Bun.SQLite** - Native SQLite integration
- **Bun.FFI** - Call native libraries from JavaScript
- **Bun.Glob** - Fast file pattern matching

View File

@@ -21,7 +21,7 @@ $ sudo pacman -S base-devel ccache cmake git go libiconv libtool make ninja pkg-
```
```bash#Fedora
$ sudo dnf install cargo clang19 llvm19 lld19 ccache cmake git golang libtool ninja-build pkg-config rustc ruby libatomic-static libstdc++-static sed unzip which libicu-devel 'perl(Math::BigInt)'
$ sudo dnf install cargo ccache cmake git golang libtool ninja-build pkg-config rustc ruby libatomic-static libstdc++-static sed unzip which libicu-devel 'perl(Math::BigInt)'
```
```bash#openSUSE Tumbleweed

2
LATEST
View File

@@ -1 +1 @@
1.2.23
1.2.22

View File

@@ -1,6 +1,6 @@
const isBun = typeof globalThis?.Bun?.sql !== "undefined";
import postgres from "postgres";
const sql = isBun ? Bun.sql : postgres();
const sql = isBun ? Bun.sql : postgres;
// Create the table if it doesn't exist
await sql`

View File

@@ -48,8 +48,6 @@ const BunBuildOptions = struct {
/// enable debug logs in release builds
enable_logs: bool = false,
enable_asan: bool,
enable_valgrind: bool,
use_mimalloc: bool,
tracy_callstack_depth: u16,
reported_nodejs_version: Version,
/// To make iterating on some '@embedFile's faster, we load them at runtime
@@ -69,7 +67,6 @@ const BunBuildOptions = struct {
cached_options_module: ?*Module = null,
windows_shim: ?WindowsShim = null,
llvm_codegen_threads: ?u32 = null,
pub fn isBaseline(this: *const BunBuildOptions) bool {
return this.arch.isX86() and
@@ -97,8 +94,6 @@ const BunBuildOptions = struct {
opts.addOption(bool, "baseline", this.isBaseline());
opts.addOption(bool, "enable_logs", this.enable_logs);
opts.addOption(bool, "enable_asan", this.enable_asan);
opts.addOption(bool, "enable_valgrind", this.enable_valgrind);
opts.addOption(bool, "use_mimalloc", this.use_mimalloc);
opts.addOption([]const u8, "reported_nodejs_version", b.fmt("{}", .{this.reported_nodejs_version}));
opts.addOption(bool, "zig_self_hosted_backend", this.no_llvm);
opts.addOption(bool, "override_no_export_cpp_apis", this.override_no_export_cpp_apis);
@@ -218,21 +213,26 @@ pub fn build(b: *Build) !void {
var build_options = BunBuildOptions{
.target = target,
.optimize = optimize,
.os = os,
.arch = arch,
.codegen_path = codegen_path,
.codegen_embed = codegen_embed,
.no_llvm = no_llvm,
.override_no_export_cpp_apis = override_no_export_cpp_apis,
.version = try Version.parse(bun_version),
.canary_revision = canary: {
const rev = b.option(u32, "canary", "Treat this as a canary build") orelse 0;
break :canary if (rev == 0) null else rev;
},
.reported_nodejs_version = try Version.parse(
b.option([]const u8, "reported_nodejs_version", "Reported Node.js version") orelse
"0.0.0-unset",
),
.sha = sha: {
const sha_buildoption = b.option([]const u8, "sha", "Force the git sha");
const sha_github = b.graph.env_map.get("GITHUB_SHA");
@@ -268,12 +268,10 @@ pub fn build(b: *Build) !void {
break :sha sha;
},
.tracy_callstack_depth = b.option(u16, "tracy_callstack_depth", "") orelse 10,
.enable_logs = b.option(bool, "enable_logs", "Enable logs in release") orelse false,
.enable_asan = b.option(bool, "enable_asan", "Enable asan") orelse false,
.enable_valgrind = b.option(bool, "enable_valgrind", "Enable valgrind") orelse false,
.use_mimalloc = b.option(bool, "use_mimalloc", "Use mimalloc as default allocator") orelse false,
.llvm_codegen_threads = b.option(u32, "llvm_codegen_threads", "Number of threads to use for LLVM codegen") orelse 1,
};
// zig build obj
@@ -502,8 +500,6 @@ fn addMultiCheck(
.codegen_path = root_build_options.codegen_path,
.no_llvm = root_build_options.no_llvm,
.enable_asan = root_build_options.enable_asan,
.enable_valgrind = root_build_options.enable_valgrind,
.use_mimalloc = root_build_options.use_mimalloc,
.override_no_export_cpp_apis = root_build_options.override_no_export_cpp_apis,
};
@@ -609,15 +605,7 @@ fn configureObj(b: *Build, opts: *BunBuildOptions, obj: *Compile) void {
// Object options
obj.use_llvm = !opts.no_llvm;
obj.use_lld = if (opts.os == .mac or opts.os == .linux) false else !opts.no_llvm;
if (opts.optimize == .Debug) {
if (@hasField(std.meta.Child(@TypeOf(obj)), "llvm_codegen_threads"))
obj.llvm_codegen_threads = opts.llvm_codegen_threads orelse 0;
}
obj.no_link_obj = true;
obj.use_lld = if (opts.os == .mac) false else !opts.no_llvm;
if (opts.enable_asan and !enableFastBuild(b)) {
if (@hasField(Build.Module, "sanitize_address")) {
obj.root_module.sanitize_address = true;
@@ -648,7 +636,7 @@ fn configureObj(b: *Build, opts: *BunBuildOptions, obj: *Compile) void {
obj.link_function_sections = true;
obj.link_data_sections = true;
if (opts.optimize == .Debug and opts.enable_valgrind) {
if (opts.optimize == .Debug) {
obj.root_module.valgrind = true;
}
}
@@ -724,7 +712,6 @@ fn addInternalImports(b: *Build, mod: *Module, opts: *BunBuildOptions) void {
// Generated code exposed as individual modules.
inline for (.{
.{ .file = "ZigGeneratedClasses.zig", .import = "ZigGeneratedClasses" },
.{ .file = "bindgen_generated.zig", .import = "bindgen_generated" },
.{ .file = "ResolvedSourceTag.zig", .import = "ResolvedSourceTag" },
.{ .file = "ErrorCode.zig", .import = "ErrorCode" },
.{ .file = "runtime.out.js", .enable = opts.shouldEmbedCode() },
@@ -758,7 +745,6 @@ fn addInternalImports(b: *Build, mod: *Module, opts: *BunBuildOptions) void {
.{ .file = "node-fallbacks/url.js", .enable = opts.shouldEmbedCode() },
.{ .file = "node-fallbacks/util.js", .enable = opts.shouldEmbedCode() },
.{ .file = "node-fallbacks/zlib.js", .enable = opts.shouldEmbedCode() },
.{ .file = "eval/feedback.ts", .enable = opts.shouldEmbedCode() },
}) |entry| {
if (!@hasField(@TypeOf(entry), "enable") or entry.enable) {
const path = b.pathJoin(&.{ opts.codegen_path, entry.file });

View File

@@ -8,14 +8,14 @@
"@lezer/cpp": "^1.1.3",
"@types/bun": "workspace:*",
"bun-tracestrings": "github:oven-sh/bun.report#912ca63e26c51429d3e6799aa2a6ab079b188fd8",
"esbuild": "^0.21.5",
"mitata": "^0.1.14",
"esbuild": "^0.21.4",
"mitata": "^0.1.11",
"peechy": "0.4.34",
"prettier": "^3.6.2",
"prettier-plugin-organize-imports": "^4.3.0",
"prettier": "^3.5.3",
"prettier-plugin-organize-imports": "^4.0.0",
"react": "^18.3.1",
"react-dom": "^18.3.1",
"source-map-js": "^1.2.1",
"source-map-js": "^1.2.0",
"typescript": "5.9.2",
},
},
@@ -284,7 +284,7 @@
"prettier": ["prettier@3.6.2", "", { "bin": { "prettier": "bin/prettier.cjs" } }, "sha512-I7AIg5boAr5R0FFtJ6rCfD+LFsWHp81dolrFD8S79U9tb8Az2nGrJncnMSnys+bpQJfRUzqs9hnA81OAA3hCuQ=="],
"prettier-plugin-organize-imports": ["prettier-plugin-organize-imports@4.3.0", "", { "peerDependencies": { "prettier": ">=2.0", "typescript": ">=2.9", "vue-tsc": "^2.1.0 || 3" }, "optionalPeers": ["vue-tsc"] }, "sha512-FxFz0qFhyBsGdIsb697f/EkvHzi5SZOhWAjxcx2dLt+Q532bAlhswcXGYB1yzjZ69kW8UoadFBw7TyNwlq96Iw=="],
"prettier-plugin-organize-imports": ["prettier-plugin-organize-imports@4.2.0", "", { "peerDependencies": { "prettier": ">=2.0", "typescript": ">=2.9", "vue-tsc": "^2.1.0 || 3" }, "optionalPeers": ["vue-tsc"] }, "sha512-Zdy27UhlmyvATZi67BTnLcKTo8fm6Oik59Sz6H64PgZJVs6NJpPD1mT240mmJn62c98/QaL+r3kx9Q3gRpDajg=="],
"react": ["react@18.3.1", "", { "dependencies": { "loose-envify": "^1.1.0" } }, "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ=="],

View File

@@ -60,10 +60,10 @@ endif()
# Windows Code Signing Option
if(WIN32)
optionx(ENABLE_WINDOWS_CODESIGNING BOOL "Enable Windows code signing with DigiCert KeyLocker" DEFAULT OFF)
if(ENABLE_WINDOWS_CODESIGNING)
message(STATUS "Windows code signing: ENABLED")
# Check for required environment variables
if(NOT DEFINED ENV{SM_API_KEY})
message(WARNING "SM_API_KEY not set - code signing may fail")
@@ -114,10 +114,8 @@ endif()
if(DEBUG AND ((APPLE AND ARCH STREQUAL "aarch64") OR LINUX))
set(DEFAULT_ASAN ON)
set(DEFAULT_VALGRIND OFF)
else()
set(DEFAULT_ASAN OFF)
set(DEFAULT_VALGRIND OFF)
endif()
optionx(ENABLE_ASAN BOOL "If ASAN support should be enabled" DEFAULT ${DEFAULT_ASAN})
@@ -202,9 +200,4 @@ optionx(USE_WEBKIT_ICU BOOL "Use the ICU libraries from WebKit" DEFAULT ${DEFAUL
optionx(ERROR_LIMIT STRING "Maximum number of errors to show when compiling C++ code" DEFAULT "100")
# This is not an `option` because setting this variable to OFF is experimental
# and unsupported. This replaces the `use_mimalloc` variable previously in
# bun.zig, and enables C++ code to also be aware of the option.
set(USE_MIMALLOC_AS_DEFAULT_ALLOCATOR ON)
list(APPEND CMAKE_ARGS -DCMAKE_EXPORT_COMPILE_COMMANDS=ON)

View File

@@ -31,14 +31,6 @@
"output": "BindgenSources.txt",
"paths": ["src/**/*.bind.ts"]
},
{
"output": "BindgenV2Sources.txt",
"paths": ["src/**/*.bindv2.ts"]
},
{
"output": "BindgenV2InternalSources.txt",
"paths": ["src/codegen/bindgenv2/**/*.ts"]
},
{
"output": "ZigSources.txt",
"paths": ["src/**/*.zig"]

View File

@@ -2,8 +2,6 @@ include(PathUtils)
if(DEBUG)
set(bun bun-debug)
elseif(ENABLE_ASAN AND ENABLE_VALGRIND)
set(bun bun-asan-valgrind)
elseif(ENABLE_ASAN)
set(bun bun-asan)
elseif(ENABLE_VALGRIND)
@@ -44,14 +42,6 @@ else()
set(CONFIGURE_DEPENDS "")
endif()
set(LLVM_ZIG_CODEGEN_THREADS 0)
# This makes the build slower, so we turn it off for now.
# if (DEBUG)
# include(ProcessorCount)
# ProcessorCount(CPU_COUNT)
# set(LLVM_ZIG_CODEGEN_THREADS ${CPU_COUNT})
# endif()
# --- Dependencies ---
set(BUN_DEPENDENCIES
@@ -395,54 +385,6 @@ register_command(
${BUN_BAKE_RUNTIME_OUTPUTS}
)
set(BUN_BINDGENV2_SCRIPT ${CWD}/src/codegen/bindgenv2/script.ts)
absolute_sources(BUN_BINDGENV2_SOURCES ${CWD}/cmake/sources/BindgenV2Sources.txt)
# These sources include the script itself.
absolute_sources(BUN_BINDGENV2_INTERNAL_SOURCES
${CWD}/cmake/sources/BindgenV2InternalSources.txt)
string(REPLACE ";" "," BUN_BINDGENV2_SOURCES_COMMA_SEPARATED
"${BUN_BINDGENV2_SOURCES}")
execute_process(
COMMAND ${BUN_EXECUTABLE} run ${BUN_BINDGENV2_SCRIPT}
--command=list-outputs
--sources=${BUN_BINDGENV2_SOURCES_COMMA_SEPARATED}
--codegen-path=${CODEGEN_PATH}
RESULT_VARIABLE bindgen_result
OUTPUT_VARIABLE bindgen_outputs
)
if(${bindgen_result})
message(FATAL_ERROR "bindgenv2/script.ts exited with non-zero status")
endif()
foreach(output IN LISTS bindgen_outputs)
if(output MATCHES "\.cpp$")
list(APPEND BUN_BINDGENV2_CPP_OUTPUTS ${output})
elseif(output MATCHES "\.zig$")
list(APPEND BUN_BINDGENV2_ZIG_OUTPUTS ${output})
else()
message(FATAL_ERROR "unexpected bindgen output: [${output}]")
endif()
endforeach()
register_command(
TARGET
bun-bindgen-v2
COMMENT
"Generating bindings (v2)"
COMMAND
${BUN_EXECUTABLE} run ${BUN_BINDGENV2_SCRIPT}
--command=generate
--codegen-path=${CODEGEN_PATH}
--sources=${BUN_BINDGENV2_SOURCES_COMMA_SEPARATED}
SOURCES
${BUN_BINDGENV2_SOURCES}
${BUN_BINDGENV2_INTERNAL_SOURCES}
OUTPUTS
${BUN_BINDGENV2_CPP_OUTPUTS}
${BUN_BINDGENV2_ZIG_OUTPUTS}
)
set(BUN_BINDGEN_SCRIPT ${CWD}/src/codegen/bindgen.ts)
absolute_sources(BUN_BINDGEN_SOURCES ${CWD}/cmake/sources/BindgenSources.txt)
@@ -621,7 +563,6 @@ set(BUN_ZIG_GENERATED_SOURCES
${BUN_ZIG_GENERATED_CLASSES_OUTPUTS}
${BUN_JAVASCRIPT_OUTPUTS}
${BUN_CPP_OUTPUTS}
${BUN_BINDGENV2_ZIG_OUTPUTS}
)
# In debug builds, these are not embedded, but rather referenced at runtime.
@@ -635,13 +576,7 @@ if (TEST)
set(BUN_ZIG_OUTPUT ${BUILD_PATH}/bun-test.o)
set(ZIG_STEPS test)
else()
if (LLVM_ZIG_CODEGEN_THREADS GREATER 1)
foreach(i RANGE ${LLVM_ZIG_CODEGEN_THREADS})
list(APPEND BUN_ZIG_OUTPUT ${BUILD_PATH}/bun-zig.${i}.o)
endforeach()
else()
set(BUN_ZIG_OUTPUT ${BUILD_PATH}/bun-zig.o)
endif()
set(BUN_ZIG_OUTPUT ${BUILD_PATH}/bun-zig.o)
set(ZIG_STEPS obj)
endif()
@@ -684,9 +619,6 @@ register_command(
-Dcpu=${ZIG_CPU}
-Denable_logs=$<IF:$<BOOL:${ENABLE_LOGS}>,true,false>
-Denable_asan=$<IF:$<BOOL:${ENABLE_ZIG_ASAN}>,true,false>
-Denable_valgrind=$<IF:$<BOOL:${ENABLE_VALGRIND}>,true,false>
-Duse_mimalloc=$<IF:$<BOOL:${USE_MIMALLOC_AS_DEFAULT_ALLOCATOR}>,true,false>
-Dllvm_codegen_threads=${LLVM_ZIG_CODEGEN_THREADS}
-Dversion=${VERSION}
-Dreported_nodejs_version=${NODEJS_VERSION}
-Dcanary=${CANARY_REVISION}
@@ -762,7 +694,6 @@ list(APPEND BUN_CPP_SOURCES
${BUN_JAVASCRIPT_OUTPUTS}
${BUN_OBJECT_LUT_OUTPUTS}
${BUN_BINDGEN_CPP_OUTPUTS}
${BUN_BINDGENV2_CPP_OUTPUTS}
)
if(WIN32)
@@ -900,10 +831,6 @@ if(WIN32)
)
endif()
if(USE_MIMALLOC_AS_DEFAULT_ALLOCATOR)
target_compile_definitions(${bun} PRIVATE USE_MIMALLOC=1)
endif()
target_compile_definitions(${bun} PRIVATE
_HAS_EXCEPTIONS=0
LIBUS_USE_OPENSSL=1
@@ -959,8 +886,12 @@ if(NOT WIN32)
endif()
if(ENABLE_ASAN)
target_compile_options(${bun} PUBLIC -fsanitize=address)
target_link_libraries(${bun} PUBLIC -fsanitize=address)
target_compile_options(${bun} PUBLIC
-fsanitize=address
)
target_link_libraries(${bun} PUBLIC
-fsanitize=address
)
endif()
target_compile_options(${bun} PUBLIC
@@ -999,8 +930,12 @@ if(NOT WIN32)
)
if(ENABLE_ASAN)
target_compile_options(${bun} PUBLIC -fsanitize=address)
target_link_libraries(${bun} PUBLIC -fsanitize=address)
target_compile_options(${bun} PUBLIC
-fsanitize=address
)
target_link_libraries(${bun} PUBLIC
-fsanitize=address
)
endif()
endif()
else()
@@ -1034,7 +969,6 @@ if(WIN32)
/delayload:WSOCK32.dll
/delayload:ADVAPI32.dll
/delayload:IPHLPAPI.dll
/delayload:CRYPT32.dll
)
endif()
endif()
@@ -1076,7 +1010,6 @@ if(LINUX)
-Wl,--wrap=exp2
-Wl,--wrap=expf
-Wl,--wrap=fcntl64
-Wl,--wrap=gettid
-Wl,--wrap=log
-Wl,--wrap=log2
-Wl,--wrap=log2f
@@ -1128,7 +1061,7 @@ if(LINUX)
)
endif()
if (NOT DEBUG AND NOT ENABLE_ASAN AND NOT ENABLE_VALGRIND)
if (NOT DEBUG AND NOT ENABLE_ASAN)
target_link_options(${bun} PUBLIC
-Wl,-icf=safe
)
@@ -1255,7 +1188,6 @@ if(WIN32)
ntdll
userenv
dbghelp
crypt32
wsock32 # ws2_32 required by TransmitFile aka sendfile on windows
delayimp.lib
)
@@ -1431,20 +1363,12 @@ if(NOT BUN_CPP_ONLY)
if(ENABLE_BASELINE)
set(bunTriplet ${bunTriplet}-baseline)
endif()
if (ENABLE_ASAN AND ENABLE_VALGRIND)
set(bunTriplet ${bunTriplet}-asan-valgrind)
set(bunPath ${bunTriplet})
elseif (ENABLE_VALGRIND)
set(bunTriplet ${bunTriplet}-valgrind)
set(bunPath ${bunTriplet})
elseif(ENABLE_ASAN)
if(ENABLE_ASAN)
set(bunTriplet ${bunTriplet}-asan)
set(bunPath ${bunTriplet})
else()
string(REPLACE bun ${bunTriplet} bunPath ${bun})
endif()
set(bunFiles ${bunExe} features.json)
if(WIN32)
list(APPEND bunFiles ${bun}.pdb)

View File

@@ -4,8 +4,7 @@ register_repository(
REPOSITORY
libuv/libuv
COMMIT
# Corresponds to v1.51.0
5152db2cbfeb5582e9c27c5ea1dba2cd9e10759b
da527d8d2a908b824def74382761566371439003
)
if(WIN32)

View File

@@ -181,23 +181,12 @@ function(generate_dependency_versions_header)
string(APPEND HEADER_CONTENT "}\n")
string(APPEND HEADER_CONTENT "#endif\n\n")
string(APPEND HEADER_CONTENT "#endif // BUN_DEPENDENCY_VERSIONS_H\n")
# Write the header file only if content has changed
# Write the header file
set(OUTPUT_FILE "${CMAKE_BINARY_DIR}/bun_dependency_versions.h")
# Read existing content if file exists
set(EXISTING_CONTENT "")
if(EXISTS "${OUTPUT_FILE}")
file(READ "${OUTPUT_FILE}" EXISTING_CONTENT)
endif()
# Only write if content is different
if(NOT "${EXISTING_CONTENT}" STREQUAL "${HEADER_CONTENT}")
file(WRITE "${OUTPUT_FILE}" "${HEADER_CONTENT}")
message(STATUS "Updated dependency versions header: ${OUTPUT_FILE}")
else()
message(STATUS "Dependency versions header unchanged: ${OUTPUT_FILE}")
endif()
file(WRITE "${OUTPUT_FILE}" "${HEADER_CONTENT}")
message(STATUS "Generated dependency versions header: ${OUTPUT_FILE}")
# Also create a more detailed version for debugging
set(DEBUG_OUTPUT_FILE "${CMAKE_BINARY_DIR}/bun_dependency_versions_debug.txt")

View File

@@ -131,9 +131,6 @@ else()
find_llvm_command(CMAKE_RANLIB llvm-ranlib)
if(LINUX)
find_llvm_command(LLD_PROGRAM ld.lld)
# Ensure vendor dependencies use lld instead of ld
list(APPEND CMAKE_ARGS -DCMAKE_EXE_LINKER_FLAGS=--ld-path=${LLD_PROGRAM})
list(APPEND CMAKE_ARGS -DCMAKE_SHARED_LINKER_FLAGS=--ld-path=${LLD_PROGRAM})
endif()
if(APPLE)
find_llvm_command(CMAKE_DSYMUTIL dsymutil)

View File

@@ -2,7 +2,7 @@ option(WEBKIT_VERSION "The version of WebKit to use")
option(WEBKIT_LOCAL "If a local version of WebKit should be used instead of downloading")
if(NOT WEBKIT_VERSION)
set(WEBKIT_VERSION 6d0f3aac0b817cc01a846b3754b21271adedac12)
set(WEBKIT_VERSION 495c25e24927ba03277ae225cd42811588d03ff8)
endif()
string(SUBSTRING ${WEBKIT_VERSION} 0 16 WEBKIT_VERSION_PREFIX)

View File

@@ -20,7 +20,7 @@ else()
unsupported(CMAKE_SYSTEM_NAME)
endif()
set(ZIG_COMMIT "55fdbfa0c86be86b68d43a4ba761e6909eb0d7b2")
set(ZIG_COMMIT "e0b7c318f318196c5f81fdf3423816a7b5bb3112")
optionx(ZIG_TARGET STRING "The zig target to use" DEFAULT ${DEFAULT_ZIG_TARGET})
if(CMAKE_BUILD_TYPE STREQUAL "Release")
@@ -90,7 +90,6 @@ register_command(
-DZIG_PATH=${ZIG_PATH}
-DZIG_COMMIT=${ZIG_COMMIT}
-DENABLE_ASAN=${ENABLE_ASAN}
-DENABLE_VALGRIND=${ENABLE_VALGRIND}
-DZIG_COMPILER_SAFE=${ZIG_COMPILER_SAFE}
-P ${CWD}/cmake/scripts/DownloadZig.cmake
SOURCES

View File

@@ -122,7 +122,7 @@
},
{
"name": "reporter",
"description": "Test output reporter format. Available: 'junit' (requires --reporter-outfile). Default: console output.",
"description": "Specify the test reporter. Currently --reporter=junit is the only supported format.",
"hasValue": true,
"valueType": "val",
"required": false,
@@ -130,7 +130,7 @@
},
{
"name": "reporter-outfile",
"description": "Output file path for the reporter format (required with --reporter).",
"description": "The output file used for the format from --reporter.",
"hasValue": true,
"valueType": "val",
"required": false,

View File

@@ -233,7 +233,6 @@ In addition to the standard fetch options, Bun provides several extensions:
```ts
const response = await fetch("http://example.com", {
// Control automatic response decompression (default: true)
// Supports gzip, deflate, brotli (br), and zstd
decompress: true,
// Disable connection reuse for this request
@@ -340,7 +339,7 @@ This will print the request and response headers to your terminal:
[fetch] > User-Agent: Bun/$BUN_LATEST_VERSION
[fetch] > Accept: */*
[fetch] > Host: example.com
[fetch] > Accept-Encoding: gzip, deflate, br, zstd
[fetch] > Accept-Encoding: gzip, deflate, br
[fetch] < 200 OK
[fetch] < Content-Encoding: gzip

View File

@@ -155,24 +155,3 @@ const glob = new Glob("\\!index.ts");
glob.match("!index.ts"); // => true
glob.match("index.ts"); // => false
```
## Node.js `fs.glob()` compatibility
Bun also implements Node.js's `fs.glob()` functions with additional features:
```ts
import { glob, globSync, promises } from "node:fs";
// Array of patterns
const files = await promises.glob(["**/*.ts", "**/*.js"]);
// Exclude patterns
const filtered = await promises.glob("**/*", {
exclude: ["node_modules/**", "*.test.*"],
});
```
All three functions (`fs.glob()`, `fs.globSync()`, `fs.promises.glob()`) support:
- Array of patterns as the first argument
- `exclude` option to filter results

View File

@@ -184,7 +184,6 @@ Bun.hash.rapidhash("data", 1234);
- `"blake2b256"`
- `"blake2b512"`
- `"blake2s256"`
- `"md4"`
- `"md5"`
- `"ripemd160"`

View File

@@ -88,9 +88,6 @@ await redis.set("user:1:name", "Alice");
// Get a key
const name = await redis.get("user:1:name");
// Get a key as Uint8Array
const buffer = await redis.getBuffer("user:1:name");
// Delete a key
await redis.del("user:1:name");
@@ -135,10 +132,6 @@ await redis.hmset("user:123", [
const userFields = await redis.hmget("user:123", ["name", "email"]);
console.log(userFields); // ["Alice", "alice@example.com"]
// Get single field from hash (returns value directly, null if missing)
const userName = await redis.hget("user:123", "name");
console.log(userName); // "Alice"
// Increment a numeric field in a hash
await redis.hincrby("user:123", "visits", 1);
@@ -168,102 +161,6 @@ const randomTag = await redis.srandmember("tags");
const poppedTag = await redis.spop("tags");
```
## Pub/Sub
Bun provides native bindings for the [Redis
Pub/Sub](https://redis.io/docs/latest/develop/pubsub/) protocol. **New in Bun
1.2.23**
{% callout %}
**🚧** — The Redis Pub/Sub feature is experimental. Although we expect it to be
stable, we're currently actively looking for feedback and areas for improvement.
{% /callout %}
### Basic Usage
To get started publishing messages, you can set up a publisher in
`publisher.ts`:
```typescript#publisher.ts
import { RedisClient } from "bun";
const writer = new RedisClient("redis://localhost:6739");
await writer.connect();
writer.publish("general", "Hello everyone!");
writer.close();
```
In another file, create the subscriber in `subscriber.ts`:
```typescript#subscriber.ts
import { RedisClient } from "bun";
const listener = new RedisClient("redis://localhost:6739");
await listener.connect();
await listener.subscribe("general", (message, channel) => {
console.log(`Received: ${message}`);
});
```
In one shell, run your subscriber:
```bash
bun run subscriber.ts
```
and, in another, run your publisher:
```bash
bun run publisher.ts
```
{% callout %}
**Note:** The subscription mode takes over the `RedisClient` connection. A
client with subscriptions can only call `RedisClient.prototype.subscribe()`. In
other words, applications which need to message Redis need a separate
connection, acquirable through `.duplicate()`:
```typescript
import { RedisClient } from "bun";
const redis = new RedisClient("redis://localhost:6379");
await redis.connect();
const subscriber = await redis.duplicate();
await subscriber.subscribe("foo", () => {});
await redis.set("bar", "baz");
```
{% /callout %}
### Publishing
Publishing messages is done through the `publish()` method:
```typescript
await client.publish(channelName, message);
```
### Subscriptions
The Bun `RedisClient` allows you to subscribe to channels through the
`.subscribe()` method:
```typescript
await client.subscribe(channel, (message, channel) => {});
```
You can unsubscribe through the `.unsubscribe()` method:
```typescript
await client.unsubscribe(); // Unsubscribe from all channels.
await client.unsubscribe(channel); // Unsubscribe a particular channel.
await client.unsubscribe(channel, listener); // Unsubscribe a particular listener.
```
## Advanced Usage
### Command Execution and Pipelining
@@ -585,10 +482,9 @@ When connecting to Redis servers using older versions that don't support RESP3,
Current limitations of the Redis client we are planning to address in future versions:
- [ ] No dedicated API for pub/sub functionality (though you can use the raw command API)
- [ ] Transactions (MULTI/EXEC) must be done through raw commands for now
- [ ] Streams are supported but without dedicated methods
- [ ] Pub/Sub does not currently support binary data, nor pattern-based
subscriptions.
Unsupported features:

View File

@@ -377,22 +377,6 @@ const users = [
await sql`SELECT * FROM users WHERE id IN ${sql(users, "id")}`;
```
### `sql.array` helper
The `sql.array` helper creates PostgreSQL array literals from JavaScript arrays:
```ts
// Create array literals for PostgreSQL
await sql`INSERT INTO tags (items) VALUES (${sql.array(["red", "blue", "green"])})`;
// Generates: INSERT INTO tags (items) VALUES (ARRAY['red', 'blue', 'green'])
// Works with numeric arrays too
await sql`SELECT * FROM products WHERE ids = ANY(${sql.array([1, 2, 3])})`;
// Generates: SELECT * FROM products WHERE ids = ANY(ARRAY[1, 2, 3])
```
**Note**: `sql.array` is PostgreSQL-only. Multi-dimensional arrays and NULL elements may not be supported yet.
## `sql``.simple()`
The PostgreSQL wire protocol supports two types of queries: "simple" and "extended". Simple queries can contain multiple statements but don't support parameters, while extended queries (the default) support parameters but only allow one statement.

View File

@@ -663,8 +663,6 @@ class Statement<Params, ReturnType> {
toString(): string; // serialize to SQL
columnNames: string[]; // the column names of the result set
columnTypes: string[]; // types based on actual values in first row (call .get()/.all() first)
declaredTypes: (string | null)[]; // types from CREATE TABLE schema (call .get()/.all() first)
paramsCount: number; // the number of parameters expected by the statement
native: any; // the native object representing the statement

View File

@@ -28,20 +28,6 @@ for await (const chunk of stream) {
}
```
`ReadableStream` also provides convenience methods for consuming the entire stream:
```ts
const stream = new ReadableStream({
start(controller) {
controller.enqueue("hello world");
controller.close();
},
});
const data = await stream.text(); // => "hello world"
// Also available: .json(), .bytes(), .blob()
```
## Direct `ReadableStream`
Bun implements an optimized version of `ReadableStream` that avoid unnecessary data copying & queue management logic. With a traditional `ReadableStream`, chunks of data are _enqueued_. Each chunk is copied into a queue, where it sits until the stream is ready to send more data.

View File

@@ -602,40 +602,6 @@ dec.decode(decompressed);
// => "hellohellohello..."
```
## `Bun.zstdCompress()` / `Bun.zstdCompressSync()`
Compresses a `Uint8Array` using the Zstandard algorithm.
```ts
const buf = Buffer.from("hello".repeat(100));
// Synchronous
const compressedSync = Bun.zstdCompressSync(buf);
// Asynchronous
const compressedAsync = await Bun.zstdCompress(buf);
// With compression level (1-22, default: 3)
const compressedLevel = Bun.zstdCompressSync(buf, { level: 6 });
```
## `Bun.zstdDecompress()` / `Bun.zstdDecompressSync()`
Decompresses a `Uint8Array` using the Zstandard algorithm.
```ts
const buf = Buffer.from("hello".repeat(100));
const compressed = Bun.zstdCompressSync(buf);
// Synchronous
const decompressedSync = Bun.zstdDecompressSync(compressed);
// Asynchronous
const decompressedAsync = await Bun.zstdDecompress(compressed);
const dec = new TextDecoder();
dec.decode(decompressedSync);
// => "hellohellohello..."
```
## `Bun.inspect()`
Serializes an object to a `string` exactly as it would be printed by `console.log`.

View File

@@ -279,9 +279,6 @@ Bun implements the `WebSocket` class. To create a WebSocket client that connects
```ts
const socket = new WebSocket("ws://localhost:3000");
// With subprotocol negotiation
const socket2 = new WebSocket("ws://localhost:3000", ["soap", "wamp"]);
```
In browsers, the cookies that are currently set on the page will be sent with the WebSocket upgrade request. This is a standard feature of the `WebSocket` API.
@@ -296,17 +293,6 @@ const socket = new WebSocket("ws://localhost:3000", {
});
```
### Client compression
WebSocket clients support permessage-deflate compression. The `extensions` property shows negotiated compression:
```ts
const socket = new WebSocket("wss://echo.websocket.org");
socket.addEventListener("open", () => {
console.log(socket.extensions); // => "permessage-deflate"
});
```
To add event listeners to the socket:
```ts

View File

@@ -282,31 +282,6 @@ const worker = new Worker("./i-am-smol.ts", {
Setting `smol: true` sets `JSC::HeapSize` to be `Small` instead of the default `Large`.
{% /details %}
## Environment Data
Share data between the main thread and workers using `setEnvironmentData()` and `getEnvironmentData()`.
```js
import { setEnvironmentData, getEnvironmentData } from "worker_threads";
// In main thread
setEnvironmentData("config", { apiUrl: "https://api.example.com" });
// In worker
const config = getEnvironmentData("config");
console.log(config); // => { apiUrl: "https://api.example.com" }
```
## Worker Events
Listen for worker creation events using `process.emit()`:
```js
process.on("worker", worker => {
console.log("New worker created:", worker.threadId);
});
```
## `Bun.isMainThread`
You can check if you're in the main thread by checking `Bun.isMainThread`.

View File

@@ -3,7 +3,6 @@ In Bun, YAML is a first-class citizen alongside JSON and TOML.
Bun provides built-in support for YAML files through both runtime APIs and bundler integration. You can
- Parse YAML strings with `Bun.YAML.parse`
- Stringify JavaScript objects to YAML with `Bun.YAML.stringify`
- import & require YAML files as modules at runtime (including hot reloading & watch mode support)
- import & require YAML files in frontend apps via bun's bundler
@@ -105,7 +104,7 @@ const data = Bun.YAML.parse(yaml);
#### Error Handling
`Bun.YAML.parse()` throws an error if the YAML is invalid:
`Bun.YAML.parse()` throws a `SyntaxError` if the YAML is invalid:
```ts
try {
@@ -115,175 +114,6 @@ try {
}
```
### `Bun.YAML.stringify()`
Convert a JavaScript value into a YAML string. The API signature matches `JSON.stringify`:
```ts
YAML.stringify(value, replacer?, space?)
```
- `value`: The value to convert to YAML
- `replacer`: Currently only `null` or `undefined` (function replacers not yet supported)
- `space`: Number of spaces for indentation (e.g., `2`) or a string to use for indentation. **Without this parameter, outputs flow-style (single-line) YAML**
#### Basic Usage
```ts
import { YAML } from "bun";
const data = {
name: "John Doe",
age: 30,
hobbies: ["reading", "coding"],
};
// Without space - outputs flow-style (single-line) YAML
console.log(YAML.stringify(data));
// {name: John Doe,age: 30,hobbies: [reading,coding]}
// With space=2 - outputs block-style (multi-line) YAML
console.log(YAML.stringify(data, null, 2));
// name: John Doe
// age: 30
// hobbies:
// - reading
// - coding
```
#### Output Styles
```ts
const arr = [1, 2, 3];
// Flow style (single-line) - default
console.log(YAML.stringify(arr));
// [1,2,3]
// Block style (multi-line) - with indentation
console.log(YAML.stringify(arr, null, 2));
// - 1
// - 2
// - 3
```
#### String Quoting
`YAML.stringify()` automatically quotes strings when necessary:
- Strings that would be parsed as YAML keywords (`true`, `false`, `null`, `yes`, `no`, etc.)
- Strings that would be parsed as numbers
- Strings containing special characters or escape sequences
```ts
const examples = {
keyword: "true", // Will be quoted: "true"
number: "123", // Will be quoted: "123"
text: "hello world", // Won't be quoted: hello world
empty: "", // Will be quoted: ""
};
console.log(YAML.stringify(examples, null, 2));
// keyword: "true"
// number: "123"
// text: hello world
// empty: ""
```
#### Cycles and References
`YAML.stringify()` automatically detects and handles circular references using YAML anchors and aliases:
```ts
const obj = { name: "root" };
obj.self = obj; // Circular reference
const yamlString = YAML.stringify(obj, null, 2);
console.log(yamlString);
// &root
// name: root
// self:
// *root
// Objects with shared references
const shared = { id: 1 };
const data = {
first: shared,
second: shared,
};
console.log(YAML.stringify(data, null, 2));
// first:
// &first
// id: 1
// second:
// *first
```
#### Special Values
```ts
// Special numeric values
console.log(YAML.stringify(Infinity)); // .inf
console.log(YAML.stringify(-Infinity)); // -.inf
console.log(YAML.stringify(NaN)); // .nan
console.log(YAML.stringify(0)); // 0
console.log(YAML.stringify(-0)); // -0
// null and undefined
console.log(YAML.stringify(null)); // null
console.log(YAML.stringify(undefined)); // undefined (returns undefined, not a string)
// Booleans
console.log(YAML.stringify(true)); // true
console.log(YAML.stringify(false)); // false
```
#### Complex Objects
```ts
const config = {
server: {
port: 3000,
host: "localhost",
ssl: {
enabled: true,
cert: "/path/to/cert.pem",
key: "/path/to/key.pem",
},
},
database: {
connections: [
{ name: "primary", host: "db1.example.com" },
{ name: "replica", host: "db2.example.com" },
],
},
features: {
auth: true,
"rate-limit": 100, // Keys with special characters are preserved
},
};
const yamlString = YAML.stringify(config, null, 2);
console.log(yamlString);
// server:
// port: 3000
// host: localhost
// ssl:
// enabled: true
// cert: /path/to/cert.pem
// key: /path/to/key.pem
// database:
// connections:
// - name: primary
// host: db1.example.com
// - name: replica
// host: db2.example.com
// features:
// auth: true
// rate-limit: 100
```
## Module Import
### ES Modules

View File

@@ -140,19 +140,6 @@ The `--sourcemap` argument embeds a sourcemap compressed with zstd, so that erro
The `--bytecode` argument enables bytecode compilation. Every time you run JavaScript code in Bun, JavaScriptCore (the engine) will compile your source code into bytecode. We can move this parsing work from runtime to bundle time, saving you startup time.
## Embedding runtime arguments
**`--compile-exec-argv="args"`** - Embed runtime arguments that are available via `process.execArgv`:
```bash
bun build --compile --compile-exec-argv="--smol --user-agent=MyBot" ./app.ts --outfile myapp
```
```js
// In the compiled app
console.log(process.execArgv); // ["--smol", "--user-agent=MyBot"]
```
## Act as the Bun CLI
{% note %}

View File

@@ -313,14 +313,6 @@ $ bun build --entrypoints ./index.ts --outdir ./out --target browser
Depending on the target, Bun will apply different module resolution rules and optimizations.
### Module resolution
Bun supports the `NODE_PATH` environment variable for additional module resolution paths:
```bash
NODE_PATH=./src bun build ./entry.js --outdir ./dist
```
<!-- - Module resolution. For example, when bundling for the browser, Bun will prioritize the `"browser"` export condition when resolving imports. An error will be thrown if any Node.js or Bun built-ins are imported or used, e.g. `node:fs` or `Bun.serve`. -->
{% table %}
@@ -400,55 +392,6 @@ $ bun build ./index.tsx --outdir ./out --format cjs
TODO: document IIFE once we support globalNames.
### `jsx`
Configure JSX transform behavior. Allows fine-grained control over how JSX is compiled.
**Classic runtime example** (uses `factory` and `fragment`):
{% codetabs %}
```ts#JavaScript
await Bun.build({
entrypoints: ['./app.tsx'],
outdir: './out',
jsx: {
factory: 'h',
fragment: 'Fragment',
runtime: 'classic',
},
})
```
```bash#CLI
# JSX configuration is handled via bunfig.toml or tsconfig.json
$ bun build ./app.tsx --outdir ./out
```
{% /codetabs %}
**Automatic runtime example** (uses `importSource`):
{% codetabs %}
```ts#JavaScript
await Bun.build({
entrypoints: ['./app.tsx'],
outdir: './out',
jsx: {
importSource: 'preact',
runtime: 'automatic',
},
})
```
```bash#CLI
# JSX configuration is handled via bunfig.toml or tsconfig.json
$ bun build ./app.tsx --outdir ./out
```
{% /codetabs %}
### `splitting`
Whether to enable code splitting.
@@ -1576,15 +1519,6 @@ interface BuildConfig {
* @default "esm"
*/
format?: "esm" | "cjs" | "iife";
/**
* JSX configuration object for controlling JSX transform behavior
*/
jsx?: {
factory?: string;
fragment?: string;
importSource?: string;
runtime?: "automatic" | "classic";
};
naming?:
| string
| {

View File

@@ -176,21 +176,7 @@ When a `bun.lock` exists and `package.json` hasnt changed, Bun downloads miss
## Platform-specific dependencies?
bun stores normalized `cpu` and `os` values from npm in the lockfile, along with the resolved packages. It skips downloading, extracting, and installing packages disabled for the current target at runtime. This means the lockfile won't change between platforms/architectures even if the packages ultimately installed do change.
### `--cpu` and `--os` flags
You can override the target platform for package selection:
```bash
bun install --cpu=x64 --os=linux
```
This installs packages for the specified platform instead of the current system. Useful for cross-platform builds or when preparing deployments for different environments.
**Accepted values for `--cpu`**: `arm64`, `x64`, `ia32`, `ppc64`, `s390x`
**Accepted values for `--os`**: `linux`, `darwin`, `win32`, `freebsd`, `openbsd`, `sunos`, `aix`
bun stores normalized `cpu` and `os` values from npm in the lockfile, along with the resolved packages. It skips downloading, extracting, and installing packages disabled for the current target at runtime. This means the lockfile wont change between platforms/architectures even if the packages ultimately installed do change.
## Peer dependencies?
@@ -259,91 +245,3 @@ bun uses a binary format for caching NPM registry responses. This loads much fas
You will see these files in `~/.bun/install/cache/*.npm`. The filename pattern is `${hash(packageName)}.npm`. Its a hash so that extra directories dont need to be created for scoped packages.
Bun's usage of `Cache-Control` ignores `Age`. This improves performance, but means bun may be about 5 minutes out of date to receive the latest package version metadata from npm.
## pnpm migration
Bun automatically migrates projects from pnpm to bun. When a `pnpm-lock.yaml` file is detected and no `bun.lock` file exists, Bun will automatically migrate the lockfile to `bun.lock` during installation. The original `pnpm-lock.yaml` file remains unmodified.
```bash
bun install
```
**Note**: Migration only runs when `bun.lock` is absent. There is currently no opt-out flag for pnpm migration.
The migration process handles:
### Lockfile Migration
- Converts `pnpm-lock.yaml` to `bun.lock` format
- Preserves package versions and resolution information
- Maintains dependency relationships and peer dependencies
- Handles patched dependencies with integrity hashes
### Workspace Configuration
When a `pnpm-workspace.yaml` file exists, Bun migrates workspace settings to your root `package.json`:
```yaml
# pnpm-workspace.yaml
packages:
- "apps/*"
- "packages/*"
catalog:
react: ^18.0.0
typescript: ^5.0.0
catalogs:
build:
webpack: ^5.0.0
babel: ^7.0.0
```
The workspace packages list and catalogs are moved to the `workspaces` field in `package.json`:
```json
{
"workspaces": {
"packages": ["apps/*", "packages/*"],
"catalog": {
"react": "^18.0.0",
"typescript": "^5.0.0"
},
"catalogs": {
"build": {
"webpack": "^5.0.0",
"babel": "^7.0.0"
}
}
}
}
```
### Catalog Dependencies
Dependencies using pnpm's `catalog:` protocol are preserved:
```json
{
"dependencies": {
"react": "catalog:",
"webpack": "catalog:build"
}
}
```
### Configuration Migration
The following pnpm configuration is migrated from both `pnpm-lock.yaml` and `pnpm-workspace.yaml`:
- **Overrides**: Moved from `pnpm.overrides` to root-level `overrides` in `package.json`
- **Patched Dependencies**: Moved from `pnpm.patchedDependencies` to root-level `patchedDependencies` in `package.json`
- **Workspace Overrides**: Applied from `pnpm-workspace.yaml` to root `package.json`
### Requirements
- Requires pnpm lockfile version 7 or higher
- Workspace packages must have a `name` field in their `package.json`
- All catalog entries referenced by dependencies must exist in the catalogs definition
After migration, you can safely remove `pnpm-lock.yaml` and `pnpm-workspace.yaml` files.

View File

@@ -63,15 +63,6 @@ $ bunx --bun my-cli # good
$ bunx my-cli --bun # bad
```
## Package flag
**`--package <pkg>` or `-p <pkg>`** - Run binary from specific package. Useful when binary name differs from package name:
```bash
bunx -p renovate renovate-config-validator
bunx --package @angular/cli ng
```
To force bun to always be used with a script, use a shebang.
```

View File

@@ -33,11 +33,6 @@ It creates:
- an entry point which defaults to `index.ts` unless any of `index.{tsx, jsx, js, mts, mjs}` exist or the `package.json` specifies a `module` or `main` field
- a `README.md` file
AI Agent rules (disable with `$BUN_AGENT_RULE_DISABLED=1`):
- a `CLAUDE.md` file when Claude CLI is detected (disable with `CLAUDE_CODE_AGENT_RULE_DISABLED` env var)
- a `.cursor/rules/*.mdc` file to guide [Cursor AI](https://cursor.sh) to use Bun instead of Node.js and npm when Cursor is detected
If you pass `-y` or `--yes`, it will assume you want to continue without asking questions.
At the end, it runs `bun install` to install `@types/bun`.

View File

@@ -44,47 +44,4 @@ You can also pass glob patterns to filter by workspace names:
{% bunOutdatedTerminal glob="{e,t}*" displayGlob="--filter='@monorepo/{types,cli}'" /%}
### Catalog Dependencies
`bun outdated` supports checking catalog dependencies defined in `package.json`:
```sh
$ bun outdated -r
┌────────────────────┬─────────┬─────────┬─────────┬────────────────────────────────┐
│ Package │ Current │ Update │ Latest │ Workspace │
├────────────────────┼─────────┼─────────┼─────────┼────────────────────────────────┤
│ body-parser │ 1.19.0 │ 1.19.0 │ 2.2.0 │ @test/shared │
├────────────────────┼─────────┼─────────┼─────────┼────────────────────────────────┤
│ cors │ 2.8.0 │ 2.8.0 │ 2.8.5 │ @test/shared │
├────────────────────┼─────────┼─────────┼─────────┼────────────────────────────────┤
│ chalk │ 4.0.0 │ 4.0.0 │ 5.6.2 │ @test/utils │
├────────────────────┼─────────┼─────────┼─────────┼────────────────────────────────┤
│ uuid │ 8.0.0 │ 8.0.0 │ 13.0.0 │ @test/utils │
├────────────────────┼─────────┼─────────┼─────────┼────────────────────────────────┤
│ axios │ 0.21.0 │ 0.21.0 │ 1.12.2 │ catalog (@test/app)
├────────────────────┼─────────┼─────────┼─────────┼────────────────────────────────┤
│ lodash │ 4.17.15 │ 4.17.15 │ 4.17.21 │ catalog (@test/app, @test/app)
├────────────────────┼─────────┼─────────┼─────────┼────────────────────────────────┤
│ react │ 17.0.0 │ 17.0.0 │ 19.1.1 │ catalog (@test/app)
├────────────────────┼─────────┼─────────┼─────────┼────────────────────────────────┤
│ react-dom │ 17.0.0 │ 17.0.0 │ 19.1.1 │ catalog (@test/app)
├────────────────────┼─────────┼─────────┼─────────┼────────────────────────────────┤
│ express │ 4.17.0 │ 4.17.0 │ 5.1.0 │ catalog (@test/shared)
├────────────────────┼─────────┼─────────┼─────────┼────────────────────────────────┤
│ moment │ 2.24.0 │ 2.24.0 │ 2.30.1 │ catalog (@test/utils)
├────────────────────┼─────────┼─────────┼─────────┼────────────────────────────────┤
│ @types/node (dev) │ 14.0.0 │ 14.0.0 │ 24.5.2 │ @test/shared │
├────────────────────┼─────────┼─────────┼─────────┼────────────────────────────────┤
│ @types/react (dev) │ 17.0.0 │ 17.0.0 │ 19.1.15 │ catalog:testing (@test/app)
├────────────────────┼─────────┼─────────┼─────────┼────────────────────────────────┤
│ eslint (dev) │ 7.0.0 │ 7.0.0 │ 9.36.0 │ catalog:testing (@test/app)
├────────────────────┼─────────┼─────────┼─────────┼────────────────────────────────┤
│ typescript (dev) │ 4.9.5 │ 4.9.5 │ 5.9.2 │ catalog:build (@test/app)
├────────────────────┼─────────┼─────────┼─────────┼────────────────────────────────┤
│ jest (dev) │ 26.0.0 │ 26.0.0 │ 30.2.0 │ catalog:testing (@test/shared)
├────────────────────┼─────────┼─────────┼─────────┼────────────────────────────────┤
│ prettier (dev) │ 2.0.0 │ 2.0.0 │ 3.6.2 │ catalog:build (@test/utils)
└────────────────────┴─────────┴─────────┴─────────┴────────────────────────────────┘
```
{% bunCLIUsage command="outdated" /%}

View File

@@ -82,16 +82,6 @@ The `--dry-run` flag can be used to simulate the publish process without actuall
$ bun publish --dry-run
```
### `--tolerate-republish`
The `--tolerate-republish` flag makes `bun publish` exit with code 0 instead of code 1 when attempting to republish over an existing version number. This is useful in automated workflows where republishing the same version might occur and should not be treated as an error.
```sh
$ bun publish --tolerate-republish
```
Without this flag, attempting to publish a version that already exists will result in an error and exit code 1. With this flag, the command will exit successfully even when trying to republish an existing version.
### `--gzip-level`
Specify the level of gzip compression to use when packing the package. Only applies to `bun publish` without a tarball path argument. Values range from `0` to `9` (default is `9`).

View File

@@ -151,14 +151,6 @@ By default, Bun respects this shebang and executes the script with `node`. Howev
$ bun run --bun vite
```
### `--no-addons`
Disable native addons and use the `node-addons` export condition.
```bash
$ bun --no-addons run server.js
```
### Filtering
In monorepos containing multiple packages, you can use the `--filter` argument to execute scripts in many packages at once.
@@ -174,14 +166,6 @@ will execute `<script>` in both `bar` and `baz`, but not in `foo`.
Find more details in the docs page for [filter](https://bun.com/docs/cli/filter#running-scripts-with-filter).
### `--workspaces`
Run scripts across all workspaces in the monorepo:
```bash
bun run --workspaces test
```
## `bun run -` to pipe code from stdin
`bun run -` lets you read JavaScript, TypeScript, TSX, or JSX from stdin and execute it without writing to a temporary file first.
@@ -228,14 +212,6 @@ $ bun --smol run index.tsx
This causes the garbage collector to run more frequently, which can slow down execution. However, it can be useful in environments with limited memory. Bun automatically adjusts the garbage collector's heap size based on the available memory (accounting for cgroups and other memory limits) with and without the `--smol` flag, so this is mostly useful for cases where you want to make the heap size grow more slowly.
## `--user-agent`
**`--user-agent <string>`** - Set User-Agent header for all `fetch()` requests:
```bash
bun --user-agent "MyBot/1.0" run index.tsx
```
## Resolution order
Absolute paths and paths starting with `./` or `.\\` are always executed as source files. Unless using `bun run`, running a file with an allowed extension will prefer the file over a package.json script.
@@ -247,15 +223,4 @@ When there is a package.json script and a file with the same name, `bun run` pri
3. Binaries from project packages, eg `bun add eslint && bun run eslint`
4. (`bun run` only) System commands, eg `bun run ls`
### `--unhandled-rejections`
Configure how unhandled promise rejections are handled:
```bash
$ bun --unhandled-rejections=throw script.js # Throw exception (terminate immediately)
$ bun --unhandled-rejections=strict script.js # Throw exception (emit rejectionHandled if handled later)
$ bun --unhandled-rejections=warn script.js # Print warning to stderr (default in Node.js)
$ bun --unhandled-rejections=none script.js # Silently ignore
```
{% bunCLIUsage command="run" /%}

View File

@@ -47,8 +47,6 @@ To filter by _test name_, use the `-t`/`--test-name-pattern` flag.
$ bun test --test-name-pattern addition
```
When no tests match the filter, `bun test` exits with code 1.
To run a specific file in the test runner, make sure the path starts with `./` or `/` to distinguish it from a filter name.
```bash
@@ -111,90 +109,6 @@ Use the `--timeout` flag to specify a _per-test_ timeout in milliseconds. If a t
$ bun test --timeout 20
```
## Concurrent test execution
By default, Bun runs all tests sequentially within each test file. You can enable concurrent execution to run async tests in parallel, significantly speeding up test suites with independent tests.
### `--concurrent` flag
Use the `--concurrent` flag to run all tests concurrently within their respective files:
```sh
$ bun test --concurrent
```
When this flag is enabled, all tests will run in parallel unless explicitly marked with `test.serial`.
### `--max-concurrency` flag
Control the maximum number of tests running simultaneously with the `--max-concurrency` flag:
```sh
# Limit to 4 concurrent tests
$ bun test --concurrent --max-concurrency 4
# Default: 20
$ bun test --concurrent
```
This helps prevent resource exhaustion when running many concurrent tests. The default value is 20.
### `test.concurrent`
Mark individual tests to run concurrently, even when the `--concurrent` flag is not used:
```ts
import { test, expect } from "bun:test";
// These tests run in parallel with each other
test.concurrent("concurrent test 1", async () => {
await fetch("/api/endpoint1");
expect(true).toBe(true);
});
test.concurrent("concurrent test 2", async () => {
await fetch("/api/endpoint2");
expect(true).toBe(true);
});
// This test runs sequentially
test("sequential test", () => {
expect(1 + 1).toBe(2);
});
```
### `test.serial`
Force tests to run sequentially, even when the `--concurrent` flag is enabled:
```ts
import { test, expect } from "bun:test";
let sharedState = 0;
// These tests must run in order
test.serial("first serial test", () => {
sharedState = 1;
expect(sharedState).toBe(1);
});
test.serial("second serial test", () => {
// Depends on the previous test
expect(sharedState).toBe(1);
sharedState = 2;
});
// This test can run concurrently if --concurrent is enabled
test("independent test", () => {
expect(true).toBe(true);
});
// Chaining test qualifiers
test.failing.each([1, 2, 3])("chained qualifiers %d", input => {
expect(input).toBe(0); // This test is expected to fail for each input
});
```
## Rerun tests
Use the `--rerun-each` flag to run each test multiple times. This is useful for detecting flaky or non-deterministic test failures.
@@ -203,36 +117,6 @@ Use the `--rerun-each` flag to run each test multiple times. This is useful for
$ bun test --rerun-each 100
```
## Randomize test execution order
Use the `--randomize` flag to run tests in a random order. This helps detect tests that depend on shared state or execution order.
```sh
$ bun test --randomize
```
When using `--randomize`, the seed used for randomization will be displayed in the test summary:
```sh
$ bun test --randomize
# ... test output ...
--seed=12345
2 pass
8 fail
Ran 10 tests across 2 files. [50.00ms]
```
### Reproducible random order with `--seed`
Use the `--seed` flag to specify a seed for the randomization. This allows you to reproduce the same test order when debugging order-dependent failures.
```sh
# Reproduce a previous randomized run
$ bun test --seed 123456
```
The `--seed` flag implies `--randomize`, so you don't need to specify both. Using the same seed value will always produce the same test execution order, making it easier to debug intermittent failures caused by test interdependencies.
## Bail out with `--bail`
Use the `--bail` flag to abort the test run early after a pre-determined number of test failures. By default Bun will run all tests and report all failures, but sometimes in CI environments it's preferable to terminate earlier to reduce CPU usage.

View File

@@ -90,17 +90,6 @@ Packages are organized in sections by dependency type:
Within each section, individual packages may have additional suffixes (` dev`, ` peer`, ` optional`) for extra clarity.
## `--recursive`
Use the `--recursive` flag with `--interactive` to update dependencies across all workspaces in a monorepo:
```sh
$ bun update --interactive --recursive
$ bun update -i -r
```
This displays an additional "Workspace" column showing which workspace each dependency belongs to.
## `--latest`
By default, `bun update` will update to the latest version of a dependency that satisfies the version range specified in your `package.json`.

View File

@@ -24,26 +24,6 @@ To update all dependencies to the latest versions (including breaking changes):
bun update --latest
```
### Filtering options
**`--audit-level=<low|moderate|high|critical>`** - Only show vulnerabilities at this severity level or higher:
```bash
bun audit --audit-level=high
```
**`--prod`** - Audit only production dependencies (excludes devDependencies):
```bash
bun audit --prod
```
**`--ignore <CVE>`** - Ignore specific CVEs (can be used multiple times):
```bash
bun audit --ignore CVE-2022-25883 --ignore CVE-2023-26136
```
### `--json`
Use the `--json` flag to print the raw JSON response from the registry instead of the formatted report:

View File

@@ -46,13 +46,3 @@ print = "yarn"
Bun v1.2 changed the default lockfile format to the text-based `bun.lock`. Existing binary `bun.lockb` lockfiles can be migrated to the new format by running `bun install --save-text-lockfile --frozen-lockfile --lockfile-only` and deleting `bun.lockb`.
More information about the new lockfile format can be found on [our blogpost](https://bun.com/blog/bun-lock-text-lockfile).
#### Automatic lockfile migration
When running `bun install` in a project without a `bun.lock`, Bun automatically migrates existing lockfiles:
- `yarn.lock` (v1)
- `package-lock.json` (npm)
- `pnpm-lock.yaml` (pnpm)
The original lockfile is preserved and can be removed manually after verification.

View File

@@ -73,33 +73,3 @@ The equivalent `bunfig.toml` option is to add a key in [`install.scopes`](https:
[install.scopes]
myorg = { url = "http://localhost:4873/", username = "myusername", password = "$NPM_PASSWORD" }
```
### `link-workspace-packages`: Control workspace package installation
Controls how workspace packages are installed when available locally:
```ini
link-workspace-packages=true
```
The equivalent `bunfig.toml` option is [`install.linkWorkspacePackages`](https://bun.com/docs/runtime/bunfig#install-linkworkspacepackages):
```toml
[install]
linkWorkspacePackages = true
```
### `save-exact`: Save exact versions
Always saves exact versions without the `^` prefix:
```ini
save-exact=true
```
The equivalent `bunfig.toml` option is [`install.exact`](https://bun.com/docs/runtime/bunfig#install-exact):
```toml
[install]
exact = true
```

View File

@@ -81,7 +81,7 @@ Workspaces have a couple major benefits.
- **Code can be split into logical parts.** If one package relies on another, you can simply add it as a dependency in `package.json`. If package `b` depends on `a`, `bun install` will install your local `packages/a` directory into `node_modules` instead of downloading it from the npm registry.
- **Dependencies can be de-duplicated.** If `a` and `b` share a common dependency, it will be _hoisted_ to the root `node_modules` directory. This reduces redundant disk usage and minimizes "dependency hell" issues associated with having multiple versions of a package installed simultaneously.
- **Run scripts in multiple packages.** You can use the [`--filter` flag](https://bun.com/docs/cli/filter) to easily run `package.json` scripts in multiple packages in your workspace, or `--workspaces` to run scripts across all workspaces.
- **Run scripts in multiple packages.** You can use the [`--filter` flag](https://bun.com/docs/cli/filter) to easily run `package.json` scripts in multiple packages in your workspace.
## Share versions with Catalogs

View File

@@ -359,7 +359,7 @@ export default {
page("api/file-io", "File I/O", {
description: `Read and write files fast with Bun's heavily optimized file system API.`,
}), // "`Bun.write`"),
page("api/redis", "Redis Client", {
page("api/redis", "Redis client", {
description: `Bun provides a fast, native Redis client with automatic command pipelining for better performance.`,
}),
page("api/import-meta", "import.meta", {

View File

@@ -232,23 +232,6 @@ Set path where coverage reports will be saved. Please notice, that it works only
coverageDir = "path/to/somewhere" # default "coverage"
```
### `test.concurrentTestGlob`
Specify a glob pattern to automatically run matching test files with concurrent test execution enabled. Test files matching this pattern will behave as if the `--concurrent` flag was passed, running all tests within those files concurrently.
```toml
[test]
concurrentTestGlob = "**/concurrent-*.test.ts"
```
This is useful for:
- Gradually migrating test suites to concurrent execution
- Running integration tests concurrently while keeping unit tests sequential
- Separating fast concurrent tests from tests that require sequential execution
The `--concurrent` CLI flag will override this setting when specified.
## Package manager
Package management is a complex issue; to support a range of use cases, the behavior of `bun install` can be configured under the `[install]` section.

View File

@@ -220,11 +220,6 @@ These environment variables are read by Bun and configure aspects of its behavio
- `DO_NOT_TRACK`
- Disable uploading crash reports to `bun.report` on crash. On macOS & Windows, crash report uploads are enabled by default. Otherwise, telemetry is not sent yet as of May 21st, 2024, but we are planning to add telemetry in the coming weeks. If `DO_NOT_TRACK=1`, then auto-uploading crash reports and telemetry are both [disabled](https://do-not-track.dev/).
---
- `BUN_OPTIONS`
- Prepends command-line arguments to any Bun execution. For example, `BUN_OPTIONS="--hot"` makes `bun run dev` behave like `bun --hot run dev`.
{% /table %}
## Runtime transpiler caching

View File

@@ -124,7 +124,7 @@ This page is updated regularly to reflect compatibility status of the latest ver
### [`node:perf_hooks`](https://nodejs.org/api/perf_hooks.html)
🟡 APIs are implemented, but Node.js test suite does not pass yet for this module.
🟡 Missing `createHistogram` `monitorEventLoopDelay`. It's recommended to use `performance` global instead of `perf_hooks.performance`.
### [`node:process`](https://nodejs.org/api/process.html)
@@ -156,7 +156,7 @@ This page is updated regularly to reflect compatibility status of the latest ver
### [`node:worker_threads`](https://nodejs.org/api/worker_threads.html)
🟡 `Worker` doesn't support the following options: `stdin` `stdout` `stderr` `trackedUnmanagedFds` `resourceLimits`. Missing `markAsUntransferable` `moveMessagePortToContext`.
🟡 `Worker` doesn't support the following options: `stdin` `stdout` `stderr` `trackedUnmanagedFds` `resourceLimits`. Missing `markAsUntransferable` `moveMessagePortToContext` `getHeapSnapshot`.
### [`node:inspector`](https://nodejs.org/api/inspector.html)

View File

@@ -46,25 +46,6 @@ smol = true # Reduce memory usage during test runs
This is equivalent to using the `--smol` flag on the command line.
### Test execution
#### concurrentTestGlob
Automatically run test files matching a glob pattern with concurrent test execution enabled. This is useful for gradually migrating test suites to concurrent execution or for running specific test types concurrently.
```toml
[test]
concurrentTestGlob = "**/concurrent-*.test.ts" # Run files matching this pattern concurrently
```
Test files matching this pattern will behave as if the `--concurrent` flag was passed, running all tests within those files concurrently. This allows you to:
- Gradually migrate your test suite to concurrent execution
- Run integration tests concurrently while keeping unit tests sequential
- Separate fast concurrent tests from tests that require sequential execution
The `--concurrent` CLI flag will override this setting when specified, forcing all tests to run concurrently regardless of the glob pattern.
### Coverage options
In addition to the options documented in the [coverage documentation](./coverage.md), the following options are available:

View File

@@ -1,132 +0,0 @@
# Concurrent Test Glob Example
This example demonstrates how to use the `concurrentTestGlob` option to selectively run tests concurrently based on file naming patterns.
## Project Structure
```text
my-project/
├── bunfig.toml
├── tests/
│ ├── unit/
│ │ ├── math.test.ts # Sequential
│ │ └── utils.test.ts # Sequential
│ └── integration/
│ ├── concurrent-api.test.ts # Concurrent
│ └── concurrent-database.test.ts # Concurrent
```
## Configuration
### bunfig.toml
```toml
[test]
# Run all test files with "concurrent-" prefix concurrently
concurrentTestGlob = "**/concurrent-*.test.ts"
```
## Test Files
### Unit Test (Sequential)
`tests/unit/math.test.ts`
```typescript
import { test, expect } from "bun:test";
// These tests run sequentially by default
// Good for tests that share state or have specific ordering requirements
let sharedState = 0;
test("addition", () => {
sharedState = 5 + 3;
expect(sharedState).toBe(8);
});
test("uses previous state", () => {
// This test depends on the previous test's state
expect(sharedState).toBe(8);
});
```
### Integration Test (Concurrent)
`tests/integration/concurrent-api.test.ts`
```typescript
import { test, expect } from "bun:test";
// These tests automatically run concurrently due to filename matching the glob pattern.
// Using test() is equivalent to test.concurrent() when the file matches concurrentTestGlob.
// Each test is independent and can run in parallel.
test("fetch user data", async () => {
const response = await fetch("/api/user/1");
expect(response.ok).toBe(true);
});
test("fetch posts", async () => {
const response = await fetch("/api/posts");
expect(response.ok).toBe(true);
});
test("fetch comments", async () => {
const response = await fetch("/api/comments");
expect(response.ok).toBe(true);
});
```
## Running Tests
```bash
# Run all tests - concurrent-*.test.ts files will run concurrently
bun test
# Override: Force ALL tests to run concurrently
# Note: This overrides bunfig.toml and runs all tests concurrently, regardless of glob
bun test --concurrent
# Run only unit tests (sequential)
bun test tests/unit
# Run only integration tests (concurrent due to glob pattern)
bun test tests/integration
```
## Benefits
1. **Gradual Migration**: Migrate to concurrent tests file by file by renaming them
2. **Clear Organization**: File naming convention indicates execution mode
3. **Performance**: Integration tests run faster in parallel
4. **Safety**: Unit tests remain sequential where needed
5. **Flexibility**: Easy to change execution mode by renaming files
## Migration Strategy
To migrate existing tests to concurrent execution:
1. Start with independent integration tests
2. Rename files to match the glob pattern: `mv api.test.ts concurrent-api.test.ts`
3. Verify tests still pass
4. Monitor for race conditions or shared state issues
5. Continue migrating stable tests incrementally
## Tips
- Use descriptive prefixes: `concurrent-`, `parallel-`, `async-`
- Keep related sequential tests together
- Document why certain tests must remain sequential
- Use `test.concurrent()` for fine-grained control in sequential files
(In files matched by `concurrentTestGlob`, plain `test()` already runs concurrently)
- Consider separate globs for different test types:
```toml
[test]
# Multiple patterns for different test categories
concurrentTestGlob = [
"**/integration/*.test.ts",
"**/e2e/*.test.ts",
"**/concurrent-*.test.ts"
]
```

View File

@@ -8,8 +8,6 @@
# Thread::initializePlatformThreading() in ThreadingPOSIX.cpp) to the JS thread to suspend or resume
# it. So stopping the process would just create noise when debugging any long-running script.
process handle -p true -s false -n false SIGPWR
process handle -p true -s false -n false SIGUSR1
process handle -p true -s false -n false SIGUSR2
command script import -c lldb_pretty_printers.py
type category enable zig.lang

View File

@@ -78,12 +78,6 @@
"no-empty-file": "off",
"no-unnecessary-await": "off"
}
},
{
"files": ["src/js/builtins/**"],
"rules": {
"no-unused-expressions": "off"
}
}
]
}

View File

@@ -1,7 +1,7 @@
{
"private": true,
"name": "bun",
"version": "1.2.24",
"version": "1.2.23",
"workspaces": [
"./packages/bun-types",
"./packages/@types/bun"
@@ -11,14 +11,14 @@
"@lezer/cpp": "^1.1.3",
"@types/bun": "workspace:*",
"bun-tracestrings": "github:oven-sh/bun.report#912ca63e26c51429d3e6799aa2a6ab079b188fd8",
"esbuild": "^0.21.5",
"mitata": "^0.1.14",
"esbuild": "^0.21.4",
"mitata": "^0.1.11",
"peechy": "0.4.34",
"prettier": "^3.6.2",
"prettier-plugin-organize-imports": "^4.3.0",
"prettier": "^3.5.3",
"prettier-plugin-organize-imports": "^4.0.0",
"react": "^18.3.1",
"react-dom": "^18.3.1",
"source-map-js": "^1.2.1",
"source-map-js": "^1.2.0",
"typescript": "5.9.2"
},
"resolutions": {
@@ -33,7 +33,7 @@
"bd:v": "(bun run --silent build:debug &> /tmp/bun.debug.build.log || (cat /tmp/bun.debug.build.log && rm -rf /tmp/bun.debug.build.log && exit 1)) && rm -f /tmp/bun.debug.build.log && ./build/debug/bun-debug",
"bd": "BUN_DEBUG_QUIET_LOGS=1 bun --silent bd:v",
"build:debug": "export COMSPEC=\"C:\\Windows\\System32\\cmd.exe\" && bun ./scripts/build.mjs -GNinja -DCMAKE_BUILD_TYPE=Debug -B build/debug --log-level=NOTICE",
"build:debug:noasan": "export COMSPEC=\"C:\\Windows\\System32\\cmd.exe\" && bun ./scripts/build.mjs -GNinja -DCMAKE_BUILD_TYPE=Debug -DENABLE_ASAN=OFF -B build/debug --log-level=NOTICE",
"build:debug:asan": "bun ./scripts/build.mjs -GNinja -DCMAKE_BUILD_TYPE=Debug -DENABLE_ASAN=ON -B build/debug-asan --log-level=NOTICE",
"build:release": "bun ./scripts/build.mjs -GNinja -DCMAKE_BUILD_TYPE=Release -B build/release",
"build:ci": "bun ./scripts/build.mjs -GNinja -DCMAKE_BUILD_TYPE=Release -DCMAKE_VERBOSE_MAKEFILE=ON -DCI=true -B build/release-ci --verbose --fresh",
"build:assert": "bun ./scripts/build.mjs -GNinja -DCMAKE_BUILD_TYPE=RelWithDebInfo -DENABLE_ASSERTIONS=ON -DENABLE_LOGS=ON -B build/release-assert",
@@ -84,11 +84,6 @@
"node:test": "node ./scripts/runner.node.mjs --quiet --exec-path=$npm_execpath --node-tests ",
"node:test:cp": "bun ./scripts/fetch-node-test.ts ",
"clean:zig": "rm -rf build/debug/cache/zig build/debug/CMakeCache.txt 'build/debug/*.o' .zig-cache zig-out || true",
"machine:linux:ubuntu": "./scripts/machine.mjs ssh --cloud=aws --arch=x64 --instance-type c7i.2xlarge --os=linux --distro=ubuntu --release=25.04",
"machine:linux:debian": "./scripts/machine.mjs ssh --cloud=aws --arch=x64 --instance-type c7i.2xlarge --os=linux --distro=debian --release=12",
"machine:linux:alpine": "./scripts/machine.mjs ssh --cloud=aws --arch=x64 --instance-type c7i.2xlarge --os=linux --distro=alpine --release=3.21",
"machine:linux:amazonlinux": "./scripts/machine.mjs ssh --cloud=aws --arch=x64 --instance-type c7i.2xlarge --os=linux --distro=amazonlinux --release=2023",
"machine:windows:2019": "./scripts/machine.mjs ssh --cloud=aws --arch=x64 --instance-type c7i.2xlarge --os=windows --release=2019",
"sync-webkit-source": "bun ./scripts/sync-webkit-source.ts"
}
}

View File

@@ -62,7 +62,7 @@ export const platforms: Platform[] = [
},
{
os: "linux",
arch: "arm64",
arch: "aarch64",
abi: "musl",
bin: "bun-linux-aarch64-musl",
exe: "bin/bun",

View File

@@ -636,7 +636,7 @@ declare module "bun" {
* import { YAML } from "bun";
*
* console.log(YAML.parse("123")) // 123
* console.log(YAML.parse("null")) // null
* console.log(YAML.parse("123")) // null
* console.log(YAML.parse("false")) // false
* console.log(YAML.parse("abc")) // "abc"
* console.log(YAML.parse("- abc")) // [ "abc" ]
@@ -653,10 +653,7 @@ declare module "bun" {
*
* @param input The JavaScript value to stringify.
* @param replacer Currently not supported.
* @param space A number for how many spaces each level of indentation gets, or a string used as indentation.
* Without this parameter, outputs flow-style (single-line) YAML.
* With this parameter, outputs block-style (multi-line) YAML.
* The number is clamped between 0 and 10, and the first 10 characters of the string are used.
* @param space A number for how many spaces each level of indentation gets, or a string used as indentation. The number is clamped between 0 and 10, and the first 10 characters of the string are used.
* @returns A string containing the YAML document.
*
* @example
@@ -664,24 +661,19 @@ declare module "bun" {
* import { YAML } from "bun";
*
* const input = {
* abc: "def",
* num: 123
* abc: "def"
* };
*
* // Without space - flow style (single-line)
* console.log(YAML.stringify(input));
* // {abc: def,num: 123}
*
* // With space - block style (multi-line)
* console.log(YAML.stringify(input, null, 2));
* // # output
* // abc: def
* // num: 123
*
* const cycle = {};
* cycle.obj = cycle;
* console.log(YAML.stringify(cycle, null, 2));
* // &1
* // obj: *1
* console.log(YAML.stringify(cycle));
* // # output
* // &root
* // obj:
* // *root
*/
export function stringify(input: unknown, replacer?: undefined | null, space?: string | number): string;
}
@@ -3707,6 +3699,10 @@ declare module "bun" {
*/
secureOptions?: number | undefined; // Value is a numeric bitmask of the `SSL_OP_*` options
keyFile?: string;
certFile?: string;
ALPNProtocols?: string | BufferSource;
ciphers?: string;
@@ -5043,7 +5039,6 @@ declare module "bun" {
type SupportedCryptoAlgorithms =
| "blake2b256"
| "blake2b512"
| "blake2s256"
| "md4"
| "md5"
| "ripemd160"

File diff suppressed because it is too large Load Diff

View File

@@ -12,68 +12,6 @@ declare module "bun" {
release(): void;
}
type ArrayType =
| "BOOLEAN"
| "BYTEA"
| "CHAR"
| "NAME"
| "TEXT"
| "CHAR"
| "VARCHAR"
| "SMALLINT"
| "INT2VECTOR"
| "INTEGER"
| "INT"
| "BIGINT"
| "REAL"
| "DOUBLE PRECISION"
| "NUMERIC"
| "MONEY"
| "OID"
| "TID"
| "XID"
| "CID"
| "JSON"
| "JSONB"
| "JSONPATH"
| "XML"
| "POINT"
| "LSEG"
| "PATH"
| "BOX"
| "POLYGON"
| "LINE"
| "CIRCLE"
| "CIDR"
| "MACADDR"
| "INET"
| "MACADDR8"
| "DATE"
| "TIME"
| "TIMESTAMP"
| "TIMESTAMPTZ"
| "INTERVAL"
| "TIMETZ"
| "BIT"
| "VARBIT"
| "ACLITEM"
| "PG_DATABASE"
| (string & {});
/**
* Represents a SQL array parameter
*/
interface SQLArrayParameter {
/**
* The serialized values of the array parameter
*/
serializedValues: string;
/**
* The type of the array parameter
*/
arrayType: ArrayType;
}
/**
* Represents a client within a transaction context Extends SQL with savepoint
* functionality
@@ -692,21 +630,6 @@ declare module "bun" {
*/
reserve(): Promise<ReservedSQL>;
/**
* Creates a new SQL array parameter
* @param values - The values to create the array parameter from
* @param typeNameOrTypeID - The type name or type ID to create the array parameter from, if omitted it will default to JSON
* @returns A new SQL array parameter
*
* @example
* ```ts
* const array = sql.array([1, 2, 3], "INT");
* await sql`CREATE TABLE users_posts (user_id INT, posts_id INT[])`;
* await sql`INSERT INTO users_posts (user_id, posts_id) VALUES (${user.id}, ${array})`;
* ```
*/
array(values: any[], typeNameOrTypeID?: number | ArrayType): SQLArrayParameter;
/**
* Begins a new transaction.
*

View File

@@ -230,11 +230,6 @@ declare module "bun:test" {
* Marks this group of tests to be executed concurrently.
*/
concurrent: Describe<T>;
/**
* Marks this group of tests to be executed serially (one after another),
* even when the --concurrent flag is used.
*/
serial: Describe<T>;
/**
* Runs this group of tests, only if `condition` is true.
*
@@ -464,11 +459,6 @@ declare module "bun:test" {
* Runs the test concurrently with other concurrent tests.
*/
concurrent: Test<T>;
/**
* Forces the test to run serially (not in parallel),
* even when the --concurrent flag is used.
*/
serial: Test<T>;
/**
* Runs this test, if `condition` is true.
*
@@ -501,13 +491,6 @@ declare module "bun:test" {
* @param condition if the test should run concurrently
*/
concurrentIf(condition: boolean): Test<T>;
/**
* Forces the test to run serially (not in parallel), if `condition` is true.
* This applies even when the --concurrent flag is used.
*
* @param condition if the test should run serially
*/
serialIf(condition: boolean): Test<T>;
/**
* Returns a function that runs for each item in `table`.
*

View File

@@ -6,46 +6,10 @@
#include <atomic>
#include <string.h>
#include "./default_ciphers.h"
// System-specific includes for certificate loading
#include "./root_certs_platform.h"
#ifdef _WIN32
#include <windows.h>
#include <wincrypt.h>
#else
// Linux/Unix includes
#include <dirent.h>
#include <stdio.h>
#include <limits.h>
#endif
static const int root_certs_size = sizeof(root_certs) / sizeof(root_certs[0]);
extern "C" void BUN__warn__extra_ca_load_failed(const char* filename, const char* error_msg);
// Forward declarations for platform-specific functions
// (Actual implementations are in platform-specific files)
// External variable from Zig CLI arguments
extern "C" bool Bun__Node__UseSystemCA;
// Helper function to check if system CA should be used
// Checks both CLI flag (--use-system-ca) and environment variable (NODE_USE_SYSTEM_CA=1)
static bool us_should_use_system_ca() {
// Check CLI flag first
if (Bun__Node__UseSystemCA) {
return true;
}
// Check environment variable
const char *use_system_ca = getenv("NODE_USE_SYSTEM_CA");
return use_system_ca && strcmp(use_system_ca, "1") == 0;
}
// Platform-specific system certificate loading implementations are separated:
// - macOS: root_certs_darwin.cpp (Security framework with dynamic loading)
// - Windows: root_certs_windows.cpp (Windows CryptoAPI)
// - Linux/Unix: us_load_system_certificates_linux() below
// This callback is used to avoid the default passphrase callback in OpenSSL
// which will typically prompt for the passphrase. The prompting is designed
// for the OpenSSL CLI, but works poorly for this case because it involves
@@ -137,8 +101,7 @@ end:
static void us_internal_init_root_certs(
X509 *root_cert_instances[root_certs_size],
STACK_OF(X509) *&root_extra_cert_instances,
STACK_OF(X509) *&root_system_cert_instances) {
STACK_OF(X509) *&root_extra_cert_instances) {
static std::atomic_flag root_cert_instances_lock = ATOMIC_FLAG_INIT;
static std::atomic_bool root_cert_instances_initialized = 0;
@@ -160,17 +123,6 @@ static void us_internal_init_root_certs(
if (extra_certs && extra_certs[0]) {
root_extra_cert_instances = us_ssl_ctx_load_all_certs_from_file(extra_certs);
}
// load system certificates if NODE_USE_SYSTEM_CA=1
if (us_should_use_system_ca()) {
#ifdef __APPLE__
us_load_system_certificates_macos(&root_system_cert_instances);
#elif defined(_WIN32)
us_load_system_certificates_windows(&root_system_cert_instances);
#else
us_load_system_certificates_linux(&root_system_cert_instances);
#endif
}
}
atomic_flag_clear_explicit(&root_cert_instances_lock,
@@ -185,15 +137,12 @@ extern "C" int us_internal_raw_root_certs(struct us_cert_string_t **out) {
struct us_default_ca_certificates {
X509 *root_cert_instances[root_certs_size];
STACK_OF(X509) *root_extra_cert_instances;
STACK_OF(X509) *root_system_cert_instances;
};
us_default_ca_certificates* us_get_default_ca_certificates() {
static us_default_ca_certificates default_ca_certificates = {{NULL}, NULL, NULL};
static us_default_ca_certificates default_ca_certificates = {{NULL}, NULL};
us_internal_init_root_certs(default_ca_certificates.root_cert_instances,
default_ca_certificates.root_extra_cert_instances,
default_ca_certificates.root_system_cert_instances);
us_internal_init_root_certs(default_ca_certificates.root_cert_instances, default_ca_certificates.root_extra_cert_instances);
return &default_ca_certificates;
}
@@ -202,33 +151,20 @@ STACK_OF(X509) *us_get_root_extra_cert_instances() {
return us_get_default_ca_certificates()->root_extra_cert_instances;
}
STACK_OF(X509) *us_get_root_system_cert_instances() {
if (!us_should_use_system_ca())
return NULL;
// Ensure single-path initialization via us_internal_init_root_certs
auto certs = us_get_default_ca_certificates();
return certs->root_system_cert_instances;
}
extern "C" X509_STORE *us_get_default_ca_store() {
X509_STORE *store = X509_STORE_new();
if (store == NULL) {
return NULL;
}
// Only load system default paths when NODE_USE_SYSTEM_CA=1
// Otherwise, rely on bundled certificates only (like Node.js behavior)
if (us_should_use_system_ca()) {
if (!X509_STORE_set_default_paths(store)) {
X509_STORE_free(store);
return NULL;
}
if (!X509_STORE_set_default_paths(store)) {
X509_STORE_free(store);
return NULL;
}
us_default_ca_certificates *default_ca_certificates = us_get_default_ca_certificates();
X509** root_cert_instances = default_ca_certificates->root_cert_instances;
STACK_OF(X509) *root_extra_cert_instances = default_ca_certificates->root_extra_cert_instances;
STACK_OF(X509) *root_system_cert_instances = default_ca_certificates->root_system_cert_instances;
// load all root_cert_instances on the default ca store
for (size_t i = 0; i < root_certs_size; i++) {
@@ -247,59 +183,8 @@ extern "C" X509_STORE *us_get_default_ca_store() {
}
}
if (us_should_use_system_ca() && root_system_cert_instances) {
for (int i = 0; i < sk_X509_num(root_system_cert_instances); i++) {
X509 *cert = sk_X509_value(root_system_cert_instances, i);
X509_up_ref(cert);
X509_STORE_add_cert(store, cert);
}
}
return store;
}
extern "C" const char *us_get_default_ciphers() {
return DEFAULT_CIPHER_LIST;
}
// Platform-specific implementations for loading system certificates
#if defined(_WIN32)
// Windows implementation is split to avoid header conflicts:
// - root_certs_windows.cpp loads raw certificate data (uses Windows headers)
// - This file converts raw data to X509* (uses OpenSSL headers)
#include <vector>
struct RawCertificate {
std::vector<unsigned char> data;
};
// Defined in root_certs_windows.cpp - loads raw certificate data
extern void us_load_system_certificates_windows_raw(
std::vector<RawCertificate>& raw_certs);
// Convert raw Windows certificates to OpenSSL X509 format
void us_load_system_certificates_windows(STACK_OF(X509) **system_certs) {
*system_certs = sk_X509_new_null();
if (*system_certs == NULL) {
return;
}
// Load raw certificates from Windows stores
std::vector<RawCertificate> raw_certs;
us_load_system_certificates_windows_raw(raw_certs);
// Convert each raw certificate to X509
for (const auto& raw_cert : raw_certs) {
const unsigned char* data = raw_cert.data.data();
X509* x509_cert = d2i_X509(NULL, &data, raw_cert.data.size());
if (x509_cert != NULL) {
sk_X509_push(*system_certs, x509_cert);
}
}
}
#else
// Linux and other Unix-like systems - implementation is in root_certs_linux.cpp
extern "C" void us_load_system_certificates_linux(STACK_OF(X509) **system_certs);
#endif
}

View File

@@ -1,431 +0,0 @@
#ifdef __APPLE__
#include <dlfcn.h>
#include <CoreFoundation/CoreFoundation.h>
#include <atomic>
#include <openssl/x509.h>
#include <openssl/x509_vfy.h>
#include <stdio.h>
// Security framework types and constants - dynamically loaded
typedef struct OpaqueSecCertificateRef* SecCertificateRef;
typedef struct OpaqueSecTrustRef* SecTrustRef;
typedef struct OpaqueSecPolicyRef* SecPolicyRef;
typedef int32_t OSStatus;
typedef uint32_t SecTrustSettingsDomain;
// Security framework constants
enum {
errSecSuccess = 0,
errSecItemNotFound = -25300,
};
// Trust settings domains
enum {
kSecTrustSettingsDomainUser = 0,
kSecTrustSettingsDomainAdmin = 1,
kSecTrustSettingsDomainSystem = 2,
};
// Trust status enumeration
enum class TrustStatus {
TRUSTED,
DISTRUSTED,
UNSPECIFIED
};
// Dynamic Security framework loader
class SecurityFramework {
public:
void* handle;
void* cf_handle;
// Core Foundation constants
CFStringRef kSecClass;
CFStringRef kSecClassCertificate;
CFStringRef kSecMatchLimit;
CFStringRef kSecMatchLimitAll;
CFStringRef kSecReturnRef;
CFStringRef kSecMatchTrustedOnly;
CFBooleanRef kCFBooleanTrue;
CFAllocatorRef kCFAllocatorDefault;
CFArrayCallBacks* kCFTypeArrayCallBacks;
CFDictionaryKeyCallBacks* kCFTypeDictionaryKeyCallBacks;
CFDictionaryValueCallBacks* kCFTypeDictionaryValueCallBacks;
// Core Foundation function pointers
CFMutableArrayRef (*CFArrayCreateMutable)(CFAllocatorRef allocator, CFIndex capacity, const CFArrayCallBacks *callBacks);
CFArrayRef (*CFArrayCreate)(CFAllocatorRef allocator, const void **values, CFIndex numValues, const CFArrayCallBacks *callBacks);
void (*CFArraySetValueAtIndex)(CFMutableArrayRef theArray, CFIndex idx, const void *value);
const void* (*CFArrayGetValueAtIndex)(CFArrayRef theArray, CFIndex idx);
CFIndex (*CFArrayGetCount)(CFArrayRef theArray);
void (*CFRelease)(CFTypeRef cf);
CFDictionaryRef (*CFDictionaryCreate)(CFAllocatorRef allocator, const void **keys, const void **values, CFIndex numValues, const CFDictionaryKeyCallBacks *keyCallBacks, const CFDictionaryValueCallBacks *valueCallBacks);
const UInt8* (*CFDataGetBytePtr)(CFDataRef theData);
CFIndex (*CFDataGetLength)(CFDataRef theData);
// Security framework function pointers
OSStatus (*SecItemCopyMatching)(CFDictionaryRef query, CFTypeRef *result);
CFDataRef (*SecCertificateCopyData)(SecCertificateRef certificate);
OSStatus (*SecTrustCreateWithCertificates)(CFArrayRef certificates, CFArrayRef policies, SecTrustRef *trust);
SecPolicyRef (*SecPolicyCreateSSL)(Boolean server, CFStringRef hostname);
Boolean (*SecTrustEvaluateWithError)(SecTrustRef trust, CFErrorRef *error);
OSStatus (*SecTrustSettingsCopyTrustSettings)(SecCertificateRef certRef, SecTrustSettingsDomain domain, CFArrayRef *trustSettings);
SecurityFramework() : handle(nullptr), cf_handle(nullptr),
kSecClass(nullptr), kSecClassCertificate(nullptr),
kSecMatchLimit(nullptr), kSecMatchLimitAll(nullptr),
kSecReturnRef(nullptr), kSecMatchTrustedOnly(nullptr), kCFBooleanTrue(nullptr),
kCFAllocatorDefault(nullptr), kCFTypeArrayCallBacks(nullptr),
kCFTypeDictionaryKeyCallBacks(nullptr), kCFTypeDictionaryValueCallBacks(nullptr),
CFArrayCreateMutable(nullptr), CFArrayCreate(nullptr),
CFArraySetValueAtIndex(nullptr), CFArrayGetValueAtIndex(nullptr),
CFArrayGetCount(nullptr), CFRelease(nullptr),
CFDictionaryCreate(nullptr), CFDataGetBytePtr(nullptr), CFDataGetLength(nullptr),
SecItemCopyMatching(nullptr), SecCertificateCopyData(nullptr),
SecTrustCreateWithCertificates(nullptr), SecPolicyCreateSSL(nullptr),
SecTrustEvaluateWithError(nullptr), SecTrustSettingsCopyTrustSettings(nullptr) {}
~SecurityFramework() {
if (handle) {
dlclose(handle);
}
if (cf_handle) {
dlclose(cf_handle);
}
}
bool load() {
if (handle && cf_handle) return true; // Already loaded
// Load CoreFoundation framework
cf_handle = dlopen("/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation", RTLD_LAZY | RTLD_LOCAL);
if (!cf_handle) {
fprintf(stderr, "Failed to load CoreFoundation framework: %s\n", dlerror());
return false;
}
// Load Security framework
handle = dlopen("/System/Library/Frameworks/Security.framework/Security", RTLD_LAZY | RTLD_LOCAL);
if (!handle) {
fprintf(stderr, "Failed to load Security framework: %s\n", dlerror());
dlclose(cf_handle);
cf_handle = nullptr;
return false;
}
// Load constants and functions
if (!load_constants()) {
if (handle) {
dlclose(handle);
handle = nullptr;
}
if (cf_handle) {
dlclose(cf_handle);
cf_handle = nullptr;
}
return false;
}
if (!load_functions()) {
if (handle) {
dlclose(handle);
handle = nullptr;
}
if (cf_handle) {
dlclose(cf_handle);
cf_handle = nullptr;
}
return false;
}
return true;
}
private:
bool load_constants() {
// Load Security framework constants
void* ptr = dlsym(handle, "kSecClass");
if (!ptr) { fprintf(stderr, "DEBUG: kSecClass not found\n"); return false; }
kSecClass = *(CFStringRef*)ptr;
ptr = dlsym(handle, "kSecClassCertificate");
if (!ptr) { fprintf(stderr, "DEBUG: kSecClassCertificate not found\n"); return false; }
kSecClassCertificate = *(CFStringRef*)ptr;
ptr = dlsym(handle, "kSecMatchLimit");
if (!ptr) { fprintf(stderr, "DEBUG: kSecMatchLimit not found\n"); return false; }
kSecMatchLimit = *(CFStringRef*)ptr;
ptr = dlsym(handle, "kSecMatchLimitAll");
if (!ptr) { fprintf(stderr, "DEBUG: kSecMatchLimitAll not found\n"); return false; }
kSecMatchLimitAll = *(CFStringRef*)ptr;
ptr = dlsym(handle, "kSecReturnRef");
if (!ptr) { fprintf(stderr, "DEBUG: kSecReturnRef not found\n"); return false; }
kSecReturnRef = *(CFStringRef*)ptr;
ptr = dlsym(handle, "kSecMatchTrustedOnly");
if (!ptr) { fprintf(stderr, "DEBUG: kSecMatchTrustedOnly not found\n"); return false; }
kSecMatchTrustedOnly = *(CFStringRef*)ptr;
// Load CoreFoundation constants
ptr = dlsym(cf_handle, "kCFBooleanTrue");
if (!ptr) { fprintf(stderr, "DEBUG: kCFBooleanTrue not found\n"); return false; }
kCFBooleanTrue = *(CFBooleanRef*)ptr;
ptr = dlsym(cf_handle, "kCFAllocatorDefault");
if (!ptr) { fprintf(stderr, "DEBUG: kCFAllocatorDefault not found\n"); return false; }
kCFAllocatorDefault = *(CFAllocatorRef*)ptr;
ptr = dlsym(cf_handle, "kCFTypeArrayCallBacks");
if (!ptr) { fprintf(stderr, "DEBUG: kCFTypeArrayCallBacks not found\n"); return false; }
kCFTypeArrayCallBacks = (CFArrayCallBacks*)ptr;
ptr = dlsym(cf_handle, "kCFTypeDictionaryKeyCallBacks");
if (!ptr) { fprintf(stderr, "DEBUG: kCFTypeDictionaryKeyCallBacks not found\n"); return false; }
kCFTypeDictionaryKeyCallBacks = (CFDictionaryKeyCallBacks*)ptr;
ptr = dlsym(cf_handle, "kCFTypeDictionaryValueCallBacks");
if (!ptr) { fprintf(stderr, "DEBUG: kCFTypeDictionaryValueCallBacks not found\n"); return false; }
kCFTypeDictionaryValueCallBacks = (CFDictionaryValueCallBacks*)ptr;
return true;
}
bool load_functions() {
// Load CoreFoundation functions
CFArrayCreateMutable = (CFMutableArrayRef (*)(CFAllocatorRef, CFIndex, const CFArrayCallBacks*))dlsym(cf_handle, "CFArrayCreateMutable");
CFArrayCreate = (CFArrayRef (*)(CFAllocatorRef, const void**, CFIndex, const CFArrayCallBacks*))dlsym(cf_handle, "CFArrayCreate");
CFArraySetValueAtIndex = (void (*)(CFMutableArrayRef, CFIndex, const void*))dlsym(cf_handle, "CFArraySetValueAtIndex");
CFArrayGetValueAtIndex = (const void* (*)(CFArrayRef, CFIndex))dlsym(cf_handle, "CFArrayGetValueAtIndex");
CFArrayGetCount = (CFIndex (*)(CFArrayRef))dlsym(cf_handle, "CFArrayGetCount");
CFRelease = (void (*)(CFTypeRef))dlsym(cf_handle, "CFRelease");
CFDictionaryCreate = (CFDictionaryRef (*)(CFAllocatorRef, const void**, const void**, CFIndex, const CFDictionaryKeyCallBacks*, const CFDictionaryValueCallBacks*))dlsym(cf_handle, "CFDictionaryCreate");
CFDataGetBytePtr = (const UInt8* (*)(CFDataRef))dlsym(cf_handle, "CFDataGetBytePtr");
CFDataGetLength = (CFIndex (*)(CFDataRef))dlsym(cf_handle, "CFDataGetLength");
// Load Security framework functions
SecItemCopyMatching = (OSStatus (*)(CFDictionaryRef, CFTypeRef*))dlsym(handle, "SecItemCopyMatching");
SecCertificateCopyData = (CFDataRef (*)(SecCertificateRef))dlsym(handle, "SecCertificateCopyData");
SecTrustCreateWithCertificates = (OSStatus (*)(CFArrayRef, CFArrayRef, SecTrustRef*))dlsym(handle, "SecTrustCreateWithCertificates");
SecPolicyCreateSSL = (SecPolicyRef (*)(Boolean, CFStringRef))dlsym(handle, "SecPolicyCreateSSL");
SecTrustEvaluateWithError = (Boolean (*)(SecTrustRef, CFErrorRef*))dlsym(handle, "SecTrustEvaluateWithError");
SecTrustSettingsCopyTrustSettings = (OSStatus (*)(SecCertificateRef, SecTrustSettingsDomain, CFArrayRef*))dlsym(handle, "SecTrustSettingsCopyTrustSettings");
return CFArrayCreateMutable && CFArrayCreate && CFArraySetValueAtIndex &&
CFArrayGetValueAtIndex && CFArrayGetCount && CFRelease &&
CFDictionaryCreate && CFDataGetBytePtr && CFDataGetLength &&
SecItemCopyMatching && SecCertificateCopyData &&
SecTrustCreateWithCertificates && SecPolicyCreateSSL &&
SecTrustEvaluateWithError && SecTrustSettingsCopyTrustSettings;
}
};
// Global instance for dynamic loading
static std::atomic<SecurityFramework*> g_security_framework{nullptr};
static SecurityFramework* get_security_framework() {
SecurityFramework* framework = g_security_framework.load();
if (!framework) {
SecurityFramework* new_framework = new SecurityFramework();
if (new_framework->load()) {
SecurityFramework* expected = nullptr;
if (g_security_framework.compare_exchange_strong(expected, new_framework)) {
framework = new_framework;
} else {
delete new_framework;
framework = expected;
}
} else {
delete new_framework;
framework = nullptr;
}
}
return framework;
}
// Helper function to determine if a certificate is self-issued
static bool is_certificate_self_issued(X509* cert) {
X509_NAME* subject = X509_get_subject_name(cert);
X509_NAME* issuer = X509_get_issuer_name(cert);
return subject && issuer && X509_NAME_cmp(subject, issuer) == 0;
}
// Validate certificate trust using Security framework
static bool is_certificate_trust_valid(SecurityFramework* security, SecCertificateRef cert_ref) {
CFMutableArrayRef subj_certs = security->CFArrayCreateMutable(nullptr, 1, security->kCFTypeArrayCallBacks);
if (!subj_certs) return false;
security->CFArraySetValueAtIndex(subj_certs, 0, cert_ref);
SecPolicyRef policy = security->SecPolicyCreateSSL(true, nullptr);
if (!policy) {
security->CFRelease(subj_certs);
return false;
}
CFArrayRef policies = security->CFArrayCreate(nullptr, (const void**)&policy, 1, security->kCFTypeArrayCallBacks);
if (!policies) {
security->CFRelease(policy);
security->CFRelease(subj_certs);
return false;
}
SecTrustRef sec_trust = nullptr;
OSStatus ortn = security->SecTrustCreateWithCertificates(subj_certs, policies, &sec_trust);
bool result = false;
if (ortn == errSecSuccess && sec_trust) {
result = security->SecTrustEvaluateWithError(sec_trust, nullptr);
}
// Cleanup
if (sec_trust) security->CFRelease(sec_trust);
security->CFRelease(policies);
security->CFRelease(policy);
security->CFRelease(subj_certs);
return result;
}
// Check trust settings for policy (simplified version)
static TrustStatus is_trust_settings_trusted_for_policy(SecurityFramework* security, CFArrayRef trust_settings, bool is_self_issued) {
if (!trust_settings) {
return TrustStatus::UNSPECIFIED;
}
// Empty trust settings array means "always trust this certificate"
if (security->CFArrayGetCount(trust_settings) == 0) {
return is_self_issued ? TrustStatus::TRUSTED : TrustStatus::UNSPECIFIED;
}
// For simplicity, we'll do basic checking here
// A full implementation would parse the trust dictionary entries
return TrustStatus::UNSPECIFIED;
}
// Check if certificate is trusted for server auth policy
static bool is_certificate_trusted_for_policy(SecurityFramework* security, X509* cert, SecCertificateRef cert_ref) {
bool is_self_issued = is_certificate_self_issued(cert);
bool trust_evaluated = false;
// Check user trust domain, then admin domain
for (const auto& trust_domain : {kSecTrustSettingsDomainUser, kSecTrustSettingsDomainAdmin, kSecTrustSettingsDomainSystem}) {
CFArrayRef trust_settings = nullptr;
OSStatus err = security->SecTrustSettingsCopyTrustSettings(cert_ref, trust_domain, &trust_settings);
if (err != errSecSuccess && err != errSecItemNotFound) {
continue;
}
if (err == errSecSuccess && trust_settings) {
TrustStatus result = is_trust_settings_trusted_for_policy(security, trust_settings, is_self_issued);
security->CFRelease(trust_settings);
if (result == TrustStatus::TRUSTED) {
return true;
} else if (result == TrustStatus::DISTRUSTED) {
return false;
}
}
// If no trust settings and we haven't evaluated trust yet, check trust validity
if (!trust_settings && !trust_evaluated) {
if (is_certificate_trust_valid(security, cert_ref)) {
return true;
}
trust_evaluated = true;
}
}
return false;
}
// Main function to load system certificates on macOS
extern "C" void us_load_system_certificates_macos(STACK_OF(X509) **system_certs) {
*system_certs = sk_X509_new_null();
if (!*system_certs) {
return;
}
SecurityFramework* security = get_security_framework();
if (!security) {
return; // Fail silently
}
// Create search dictionary for certificates
CFTypeRef search_keys[] = {
security->kSecClass,
security->kSecMatchLimit,
security->kSecReturnRef,
security->kSecMatchTrustedOnly,
};
CFTypeRef search_values[] = {
security->kSecClassCertificate,
security->kSecMatchLimitAll,
security->kCFBooleanTrue,
security->kCFBooleanTrue,
};
CFDictionaryRef search = security->CFDictionaryCreate(
security->kCFAllocatorDefault,
search_keys,
search_values,
4,
security->kCFTypeDictionaryKeyCallBacks,
security->kCFTypeDictionaryValueCallBacks
);
if (!search) {
return;
}
CFArrayRef certificates = nullptr;
OSStatus status = security->SecItemCopyMatching(search, (CFTypeRef*)&certificates);
security->CFRelease(search);
if (status != errSecSuccess || !certificates) {
return;
}
CFIndex count = security->CFArrayGetCount(certificates);
for (CFIndex i = 0; i < count; ++i) {
SecCertificateRef cert_ref = (SecCertificateRef)security->CFArrayGetValueAtIndex(certificates, i);
if (!cert_ref) continue;
// Get certificate data
CFDataRef cert_data = security->SecCertificateCopyData(cert_ref);
if (!cert_data) continue;
// Convert to X509
const unsigned char* data_ptr = security->CFDataGetBytePtr(cert_data);
long data_len = security->CFDataGetLength(cert_data);
X509* x509_cert = d2i_X509(nullptr, &data_ptr, data_len);
security->CFRelease(cert_data);
if (!x509_cert) continue;
// Only consider CA certificates
if (X509_check_ca(x509_cert) == 1 &&
is_certificate_trusted_for_policy(security, x509_cert, cert_ref)) {
sk_X509_push(*system_certs, x509_cert);
} else {
X509_free(x509_cert);
}
}
security->CFRelease(certificates);
}
// Cleanup function for Security framework
extern "C" void us_cleanup_security_framework() {
SecurityFramework* framework = g_security_framework.exchange(nullptr);
if (framework) {
delete framework;
}
}
#endif // __APPLE__

View File

@@ -5,7 +5,6 @@
#define CPPDECL extern "C"
STACK_OF(X509) *us_get_root_extra_cert_instances();
STACK_OF(X509) *us_get_root_system_cert_instances();
#else
#define CPPDECL extern

View File

@@ -1,170 +0,0 @@
#ifndef _WIN32
#ifndef __APPLE__
#include <dirent.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <openssl/x509.h>
#include <openssl/x509_vfy.h>
#include <openssl/pem.h>
extern "C" void BUN__warn__extra_ca_load_failed(const char* filename, const char* error_msg);
// Helper function to load certificates from a directory
static void load_certs_from_directory(const char* dir_path, STACK_OF(X509)* cert_stack) {
DIR* dir = opendir(dir_path);
if (!dir) {
return;
}
struct dirent* entry;
while ((entry = readdir(dir)) != NULL) {
// Skip . and ..
if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0) {
continue;
}
// Check if file has .crt, .pem, or .cer extension
const char* ext = strrchr(entry->d_name, '.');
if (!ext || (strcmp(ext, ".crt") != 0 && strcmp(ext, ".pem") != 0 && strcmp(ext, ".cer") != 0)) {
continue;
}
// Build full path
char filepath[PATH_MAX];
snprintf(filepath, sizeof(filepath), "%s/%s", dir_path, entry->d_name);
// Try to load certificate
FILE* file = fopen(filepath, "r");
if (file) {
X509* cert = PEM_read_X509(file, NULL, NULL, NULL);
fclose(file);
if (cert) {
if (!sk_X509_push(cert_stack, cert)) {
X509_free(cert);
}
}
}
}
closedir(dir);
}
// Helper function to load certificates from a bundle file
static void load_certs_from_bundle(const char* bundle_path, STACK_OF(X509)* cert_stack) {
FILE* file = fopen(bundle_path, "r");
if (!file) {
return;
}
X509* cert;
while ((cert = PEM_read_X509(file, NULL, NULL, NULL)) != NULL) {
if (!sk_X509_push(cert_stack, cert)) {
X509_free(cert);
break;
}
}
ERR_clear_error();
fclose(file);
}
// Main function to load system certificates on Linux and other Unix-like systems
extern "C" void us_load_system_certificates_linux(STACK_OF(X509) **system_certs) {
*system_certs = sk_X509_new_null();
if (*system_certs == NULL) {
return;
}
// First check environment variables (same as Node.js and OpenSSL)
const char* ssl_cert_file = getenv("SSL_CERT_FILE");
const char* ssl_cert_dir = getenv("SSL_CERT_DIR");
// If SSL_CERT_FILE is set, load from it
if (ssl_cert_file && strlen(ssl_cert_file) > 0) {
load_certs_from_bundle(ssl_cert_file, *system_certs);
}
// If SSL_CERT_DIR is set, load from each directory (colon-separated)
if (ssl_cert_dir && strlen(ssl_cert_dir) > 0) {
char* dir_copy = strdup(ssl_cert_dir);
if (dir_copy) {
char* token = strtok(dir_copy, ":");
while (token != NULL) {
// Skip empty tokens
if (strlen(token) > 0) {
load_certs_from_directory(token, *system_certs);
}
token = strtok(NULL, ":");
}
free(dir_copy);
}
}
// If environment variables were set, use only those (even if they yield zero certs)
if (ssl_cert_file || ssl_cert_dir) {
return;
}
// Otherwise, load certificates from standard Linux/Unix paths
// These are the common locations for system certificates
// Common certificate bundle locations (single file with multiple certs)
// These paths are based on common Linux distributions and OpenSSL defaults
static const char* bundle_paths[] = {
"/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Gentoo
"/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL 6
"/etc/ssl/ca-bundle.pem", // OpenSUSE
"/etc/pki/tls/cert.pem", // Fedora/RHEL 7+
"/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", // CentOS/RHEL 7+
"/etc/ssl/cert.pem", // Alpine Linux, macOS OpenSSL
"/usr/local/etc/openssl/cert.pem", // Homebrew OpenSSL on macOS
"/usr/local/share/ca-certificates/ca-certificates.crt", // Custom CA installs
NULL
};
// Common certificate directory locations (multiple files)
// Note: OpenSSL expects hashed symlinks in directories (c_rehash format)
static const char* dir_paths[] = {
"/etc/ssl/certs", // Common location (Debian/Ubuntu with hashed links)
"/etc/pki/tls/certs", // RHEL/Fedora
"/usr/share/ca-certificates", // Debian/Ubuntu (original certs, not hashed)
"/usr/local/share/certs", // FreeBSD
"/etc/openssl/certs", // NetBSD
"/var/ssl/certs", // AIX
"/usr/local/etc/openssl/certs", // Homebrew OpenSSL on macOS
"/System/Library/OpenSSL/certs", // macOS system OpenSSL (older versions)
NULL
};
// Try loading from bundle files first
for (const char** path = bundle_paths; *path != NULL; path++) {
load_certs_from_bundle(*path, *system_certs);
}
// Then try loading from directories
for (const char** path = dir_paths; *path != NULL; path++) {
load_certs_from_directory(*path, *system_certs);
}
// Also check NODE_EXTRA_CA_CERTS environment variable
const char* extra_ca_certs = getenv("NODE_EXTRA_CA_CERTS");
if (extra_ca_certs && strlen(extra_ca_certs) > 0) {
FILE* file = fopen(extra_ca_certs, "r");
if (file) {
X509* cert;
while ((cert = PEM_read_X509(file, NULL, NULL, NULL)) != NULL) {
sk_X509_push(*system_certs, cert);
}
fclose(file);
} else {
BUN__warn__extra_ca_load_failed(extra_ca_certs, "Failed to open file");
}
}
}
#endif // !__APPLE__
#endif // !_WIN32

View File

@@ -1,18 +0,0 @@
#pragma once
#include <openssl/x509.h>
// Platform-specific certificate loading functions
extern "C" {
// Load system certificates for the current platform
void us_load_system_certificates_linux(STACK_OF(X509) **system_certs);
void us_load_system_certificates_macos(STACK_OF(X509) **system_certs);
void us_load_system_certificates_windows(STACK_OF(X509) **system_certs);
// Platform-specific cleanup functions
#ifdef __APPLE__
void us_cleanup_security_framework();
#endif
}

View File

@@ -1,53 +0,0 @@
#ifdef _WIN32
#include <windows.h>
#include <wincrypt.h>
#include <vector>
#include <cstring>
// Forward declaration to avoid including OpenSSL headers here
// This prevents conflicts with Windows macros like X509_NAME
// Note: We don't use STACK_OF macro here since we don't have OpenSSL headers
// Structure to hold raw certificate data
struct RawCertificate {
std::vector<unsigned char> data;
};
// Helper function to load raw certificates from a Windows certificate store
static void LoadRawCertsFromStore(std::vector<RawCertificate>& raw_certs,
DWORD store_flags,
const wchar_t* store_name) {
HCERTSTORE cert_store = CertOpenStore(
CERT_STORE_PROV_SYSTEM_W,
0,
0,
store_flags | CERT_STORE_READONLY_FLAG,
store_name
);
if (cert_store == NULL) {
return;
}
PCCERT_CONTEXT cert_context = NULL;
while ((cert_context = CertEnumCertificatesInStore(cert_store, cert_context)) != NULL) {
RawCertificate raw_cert;
raw_cert.data.assign(cert_context->pbCertEncoded,
cert_context->pbCertEncoded + cert_context->cbCertEncoded);
raw_certs.push_back(std::move(raw_cert));
}
CertCloseStore(cert_store, 0);
}
// Main function to load raw system certificates on Windows
// Returns certificates as raw DER data to avoid OpenSSL header conflicts
extern void us_load_system_certificates_windows_raw(
std::vector<RawCertificate>& raw_certs) {
// Load only from ROOT by default
LoadRawCertsFromStore(raw_certs, CERT_SYSTEM_STORE_CURRENT_USER, L"ROOT");
LoadRawCertsFromStore(raw_certs, CERT_SYSTEM_STORE_LOCAL_MACHINE, L"ROOT");
}
#endif // _WIN32

View File

@@ -226,11 +226,11 @@ struct us_bun_socket_context_options_t {
const char *ca_file_name;
const char *ssl_ciphers;
int ssl_prefer_low_memory_usage; /* Todo: rename to prefer_low_memory_usage and apply for TCP as well */
const char * const *key;
const char **key;
unsigned int key_count;
const char * const *cert;
const char **cert;
unsigned int cert_count;
const char * const *ca;
const char **ca;
unsigned int ca_count;
unsigned int secure_options;
int reject_unauthorized;

View File

@@ -627,24 +627,14 @@ public:
return std::move(*this);
}
void setOnSocketClosed(HttpContextData<SSL>::OnSocketClosedCallback onClose) {
void setOnClose(HttpContextData<SSL>::OnSocketClosedCallback onClose) {
httpContext->getSocketContextData()->onSocketClosed = onClose;
}
void setOnSocketDrain(HttpContextData<SSL>::OnSocketDrainCallback onDrain) {
httpContext->getSocketContextData()->onSocketDrain = onDrain;
}
void setOnSocketData(HttpContextData<SSL>::OnSocketDataCallback onData) {
httpContext->getSocketContextData()->onSocketData = onData;
}
void setOnClientError(HttpContextData<SSL>::OnClientErrorCallback onClientError) {
httpContext->getSocketContextData()->onClientError = std::move(onClientError);
}
void setOnSocketUpgraded(HttpContextData<SSL>::OnSocketUpgradedCallback onUpgraded) {
httpContext->getSocketContextData()->onSocketUpgraded = onUpgraded;
}
TemplatedApp &&run() {
uWS::run();
return std::move(*this);

View File

@@ -193,32 +193,23 @@ private:
auto *httpResponseData = reinterpret_cast<HttpResponseData<SSL> *>(us_socket_ext(SSL, s));
/* Call filter */
HttpContextData<SSL> *httpContextData = getSocketContextDataS(s);
if(httpResponseData && httpResponseData->isConnectRequest) {
if (httpResponseData->socketData && httpContextData->onSocketData) {
httpContextData->onSocketData(httpResponseData->socketData, SSL, s, "", 0, true);
}
if(httpResponseData->inStream) {
httpResponseData->inStream(reinterpret_cast<HttpResponse<SSL> *>(s), "", 0, true, httpResponseData->userData);
httpResponseData->inStream = nullptr;
}
}
for (auto &f : httpContextData->filterHandlers) {
f((HttpResponse<SSL> *) s, -1);
}
if (httpResponseData->socketData && httpContextData->onSocketClosed) {
httpContextData->onSocketClosed(httpResponseData->socketData, SSL, s);
}
/* Signal broken HTTP request only if we have a pending request */
if (httpResponseData->onAborted != nullptr && httpResponseData->userData != nullptr) {
httpResponseData->onAborted((HttpResponse<SSL> *)s, httpResponseData->userData);
}
if (httpResponseData->socketData && httpContextData->onSocketClosed) {
httpContextData->onSocketClosed(httpResponseData->socketData, SSL, s);
}
/* Destruct socket ext */
httpResponseData->~HttpResponseData<SSL>();
@@ -263,9 +254,7 @@ private:
/* The return value is entirely up to us to interpret. The HttpParser cares only for whether the returned value is DIFFERENT from passed user */
auto result = httpResponseData->consumePostPadded(httpContextData->maxHeaderSize, httpResponseData->isConnectRequest, httpContextData->flags.requireHostHeader,httpContextData->flags.useStrictMethodValidation, data, (unsigned int) length, s, proxyParser, [httpContextData](void *s, HttpRequest *httpRequest) -> void * {
auto result = httpResponseData->consumePostPadded(httpContextData->maxHeaderSize, httpContextData->flags.requireHostHeader,httpContextData->flags.useStrictMethodValidation, data, (unsigned int) length, s, proxyParser, [httpContextData](void *s, HttpRequest *httpRequest) -> void * {
/* For every request we reset the timeout and hang until user makes action */
/* Warning: if we are in shutdown state, resetting the timer is a security issue! */
us_socket_timeout(SSL, (us_socket_t *) s, 0);
@@ -341,12 +330,7 @@ private:
/* Continue parsing */
return s;
}, [httpResponseData, httpContextData](void *user, std::string_view data, bool fin) -> void * {
if (httpResponseData->isConnectRequest && httpResponseData->socketData && httpContextData->onSocketData) {
httpContextData->onSocketData(httpResponseData->socketData, SSL, (struct us_socket_t *) user, data.data(), data.length(), fin);
}
}, [httpResponseData](void *user, std::string_view data, bool fin) -> void * {
/* We always get an empty chunk even if there is no data */
if (httpResponseData->inStream) {
@@ -465,7 +449,7 @@ private:
us_socket_context_on_writable(SSL, getSocketContext(), [](us_socket_t *s) {
auto *asyncSocket = reinterpret_cast<AsyncSocket<SSL> *>(s);
auto *httpResponseData = reinterpret_cast<HttpResponseData<SSL> *>(asyncSocket->getAsyncSocketData());
/* Attempt to drain the socket buffer before triggering onWritable callback */
size_t bufferedAmount = asyncSocket->getBufferedAmount();
if (bufferedAmount > 0) {
@@ -486,12 +470,6 @@ private:
*/
}
auto *httpContextData = getSocketContextDataS(s);
if (httpResponseData->isConnectRequest && httpResponseData->socketData && httpContextData->onSocketDrain) {
httpContextData->onSocketDrain(httpResponseData->socketData, SSL, (struct us_socket_t *) s);
}
/* Ask the developer to write data and return success (true) or failure (false), OR skip sending anything and return success (true). */
if (httpResponseData->onWritable) {
/* We are now writable, so hang timeout again, the user does not have to do anything so we should hang until end or tryEnd rearms timeout */
@@ -536,7 +514,6 @@ private:
us_socket_context_on_end(SSL, getSocketContext(), [](us_socket_t *s) {
auto *asyncSocket = reinterpret_cast<AsyncSocket<SSL> *>(s);
asyncSocket->uncorkWithoutSending();
/* We do not care for half closed sockets */
return asyncSocket->close();
});

View File

@@ -43,11 +43,8 @@ struct alignas(16) HttpContextData {
template <bool> friend struct TemplatedApp;
private:
std::vector<MoveOnlyFunction<void(HttpResponse<SSL> *, int)>> filterHandlers;
using OnSocketDataCallback = void (*)(void* userData, int is_ssl, struct us_socket_t *rawSocket, const char *data, int length, bool last);
using OnSocketDrainCallback = void (*)(void* userData, int is_ssl, struct us_socket_t *rawSocket);
using OnSocketUpgradedCallback = void (*)(void* userData, int is_ssl, struct us_socket_t *rawSocket);
using OnClientErrorCallback = MoveOnlyFunction<void(int is_ssl, struct us_socket_t *rawSocket, uWS::HttpParserError errorCode, char *rawPacket, int rawPacketLength)>;
using OnSocketClosedCallback = void (*)(void* userData, int is_ssl, struct us_socket_t *rawSocket);
using OnClientErrorCallback = MoveOnlyFunction<void(int is_ssl, struct us_socket_t *rawSocket, uWS::HttpParserError errorCode, char *rawPacket, int rawPacketLength)>;
MoveOnlyFunction<void(const char *hostname)> missingServerNameHandler;
@@ -64,9 +61,6 @@ private:
void *upgradedWebSocket = nullptr;
/* Used to simulate Node.js socket events. */
OnSocketClosedCallback onSocketClosed = nullptr;
OnSocketDrainCallback onSocketDrain = nullptr;
OnSocketDataCallback onSocketData = nullptr;
OnSocketUpgradedCallback onSocketUpgraded = nullptr;
OnClientErrorCallback onClientError = nullptr;
uint64_t maxHeaderSize = 0; // 0 means no limit
@@ -79,7 +73,6 @@ private:
}
public:
HttpFlags flags;
};

View File

@@ -117,19 +117,18 @@ namespace uWS
struct ConsumeRequestLineResult {
char *position;
bool isAncientHTTP;
bool isConnect;
HTTPHeaderParserError headerParserError;
public:
static ConsumeRequestLineResult error(HTTPHeaderParserError error) {
return ConsumeRequestLineResult{nullptr, false, false, error};
return ConsumeRequestLineResult{nullptr, false, error};
}
static ConsumeRequestLineResult success(char *position, bool isAncientHTTP = false, bool isConnect = false) {
return ConsumeRequestLineResult{position, isAncientHTTP, isConnect, HTTP_HEADER_PARSER_ERROR_NONE};
static ConsumeRequestLineResult success(char *position, bool isAncientHTTP = false) {
return ConsumeRequestLineResult{position, isAncientHTTP, HTTP_HEADER_PARSER_ERROR_NONE};
}
static ConsumeRequestLineResult shortRead(bool isAncientHTTP = false, bool isConnect = false) {
return ConsumeRequestLineResult{nullptr, isAncientHTTP, isConnect, HTTP_HEADER_PARSER_ERROR_NONE};
static ConsumeRequestLineResult shortRead(bool isAncientHTTP = false) {
return ConsumeRequestLineResult{nullptr, isAncientHTTP, HTTP_HEADER_PARSER_ERROR_NONE};
}
bool isErrorOrShortRead() {
@@ -552,10 +551,7 @@ namespace uWS
return ConsumeRequestLineResult::shortRead();
}
bool isHTTPMethod = (__builtin_expect(data[1] == '/', 1));
bool isConnect = !isHTTPMethod && (isHTTPorHTTPSPrefixForProxies(data + 1, end) == 1 || ((data - start) == 7 && memcmp(start, "CONNECT", 7) == 0));
if (isHTTPMethod || isConnect) [[likely]] {
if (data[0] == 32 && (__builtin_expect(data[1] == '/', 1) || isHTTPorHTTPSPrefixForProxies(data + 1, end) == 1)) [[likely]] {
header.key = {start, (size_t) (data - start)};
data++;
if(!isValidMethod(header.key, useStrictMethodValidation)) {
@@ -581,22 +577,22 @@ namespace uWS
if (nextPosition >= end) {
/* Whatever we have must be part of the version string */
if (memcmp(" HTTP/1.1\r\n", data, std::min<unsigned int>(11, (unsigned int) (end - data))) == 0) {
return ConsumeRequestLineResult::shortRead(false, isConnect);
return ConsumeRequestLineResult::shortRead();
} else if (memcmp(" HTTP/1.0\r\n", data, std::min<unsigned int>(11, (unsigned int) (end - data))) == 0) {
/*Indicates that the request line is ancient HTTP*/
return ConsumeRequestLineResult::shortRead(true, isConnect);
return ConsumeRequestLineResult::shortRead(true);
}
return ConsumeRequestLineResult::error(HTTP_HEADER_PARSER_ERROR_INVALID_HTTP_VERSION);
}
if (memcmp(" HTTP/1.1\r\n", data, 11) == 0) {
return ConsumeRequestLineResult::success(nextPosition, false, isConnect);
return ConsumeRequestLineResult::success(nextPosition);
} else if (memcmp(" HTTP/1.0\r\n", data, 11) == 0) {
/*Indicates that the request line is ancient HTTP*/
return ConsumeRequestLineResult::success(nextPosition, true, isConnect);
return ConsumeRequestLineResult::success(nextPosition, true);
}
/* If we stand at the post padded CR, we have fragmented input so try again later */
if (data[0] == '\r') {
return ConsumeRequestLineResult::shortRead(false, isConnect);
return ConsumeRequestLineResult::shortRead();
}
/* This is an error */
return ConsumeRequestLineResult::error(HTTP_HEADER_PARSER_ERROR_INVALID_HTTP_VERSION);
@@ -606,14 +602,14 @@ namespace uWS
/* If we stand at the post padded CR, we have fragmented input so try again later */
if (data[0] == '\r') {
return ConsumeRequestLineResult::shortRead(false, isConnect);
return ConsumeRequestLineResult::shortRead();
}
if (data[0] == 32) {
switch (isHTTPorHTTPSPrefixForProxies(data + 1, end)) {
// If we haven't received enough data to check if it's http:// or https://, let's try again later
case -1:
return ConsumeRequestLineResult::shortRead(false, isConnect);
return ConsumeRequestLineResult::shortRead();
// Otherwise, if it's not http:// or https://, return 400
default:
return ConsumeRequestLineResult::error(HTTP_HEADER_PARSER_ERROR_INVALID_REQUEST);
@@ -639,7 +635,7 @@ namespace uWS
}
/* End is only used for the proxy parser. The HTTP parser recognizes "\ra" as invalid "\r\n" scan and breaks. */
static HttpParserResult getHeaders(char *postPaddedBuffer, char *end, struct HttpRequest::Header *headers, void *reserved, bool &isAncientHTTP, bool &isConnectRequest, bool useStrictMethodValidation, uint64_t maxHeaderSize) {
static HttpParserResult getHeaders(char *postPaddedBuffer, char *end, struct HttpRequest::Header *headers, void *reserved, bool &isAncientHTTP, bool useStrictMethodValidation, uint64_t maxHeaderSize) {
char *preliminaryKey, *preliminaryValue, *start = postPaddedBuffer;
#ifdef UWS_WITH_PROXY
/* ProxyParser is passed as reserved parameter */
@@ -693,9 +689,6 @@ namespace uWS
if(requestLineResult.isAncientHTTP) {
isAncientHTTP = true;
}
if(requestLineResult.isConnect) {
isConnectRequest = true;
}
/* No request headers found */
const char * headerStart = (headers[0].key.length() > 0) ? headers[0].key.data() : end;
@@ -805,7 +798,7 @@ namespace uWS
/* This is the only caller of getHeaders and is thus the deepest part of the parser. */
template <bool ConsumeMinimally>
HttpParserResult fenceAndConsumePostPadded(uint64_t maxHeaderSize, bool& isConnectRequest, bool requireHostHeader, bool useStrictMethodValidation, char *data, unsigned int length, void *user, void *reserved, HttpRequest *req, MoveOnlyFunction<void *(void *, HttpRequest *)> &requestHandler, MoveOnlyFunction<void *(void *, std::string_view, bool)> &dataHandler) {
HttpParserResult fenceAndConsumePostPadded(uint64_t maxHeaderSize, bool requireHostHeader, bool useStrictMethodValidation, char *data, unsigned int length, void *user, void *reserved, HttpRequest *req, MoveOnlyFunction<void *(void *, HttpRequest *)> &requestHandler, MoveOnlyFunction<void *(void *, std::string_view, bool)> &dataHandler) {
/* How much data we CONSUMED (to throw away) */
unsigned int consumedTotal = 0;
@@ -816,7 +809,7 @@ namespace uWS
data[length + 1] = 'a'; /* Anything that is not \n, to trigger "invalid request" */
req->ancientHttp = false;
for (;length;) {
auto result = getHeaders(data, data + length, req->headers, reserved, req->ancientHttp, isConnectRequest, useStrictMethodValidation, maxHeaderSize);
auto result = getHeaders(data, data + length, req->headers, reserved, req->ancientHttp, useStrictMethodValidation, maxHeaderSize);
if(result.isError()) {
return result;
}
@@ -923,10 +916,6 @@ namespace uWS
length -= emittable;
consumedTotal += emittable;
}
} else if(isConnectRequest) {
// This only server to mark that the connect request read all headers
// and can starting emitting data
remainingStreamingBytes = STATE_IS_CHUNKED;
} else {
/* If we came here without a body; emit an empty data chunk to signal no data */
dataHandler(user, {}, true);
@@ -942,16 +931,15 @@ namespace uWS
}
public:
HttpParserResult consumePostPadded(uint64_t maxHeaderSize, bool& isConnectRequest, bool requireHostHeader, bool useStrictMethodValidation, char *data, unsigned int length, void *user, void *reserved, MoveOnlyFunction<void *(void *, HttpRequest *)> &&requestHandler, MoveOnlyFunction<void *(void *, std::string_view, bool)> &&dataHandler) {
HttpParserResult consumePostPadded(uint64_t maxHeaderSize, bool requireHostHeader, bool useStrictMethodValidation, char *data, unsigned int length, void *user, void *reserved, MoveOnlyFunction<void *(void *, HttpRequest *)> &&requestHandler, MoveOnlyFunction<void *(void *, std::string_view, bool)> &&dataHandler) {
/* This resets BloomFilter by construction, but later we also reset it again.
* Optimize this to skip resetting twice (req could be made global) */
HttpRequest req;
if (remainingStreamingBytes) {
if (isConnectRequest) {
dataHandler(user, std::string_view(data, length), false);
return HttpParserResult::success(0, user);
} else if (isParsingChunkedEncoding(remainingStreamingBytes)) {
/* It's either chunked or with a content-length */
/* It's either chunked or with a content-length */
if (isParsingChunkedEncoding(remainingStreamingBytes)) {
std::string_view dataToConsume(data, length);
for (auto chunk : uWS::ChunkIterator(&dataToConsume, &remainingStreamingBytes)) {
dataHandler(user, chunk, chunk.length() == 0);
@@ -962,7 +950,6 @@ public:
data = (char *) dataToConsume.data();
length = (unsigned int) dataToConsume.length();
} else {
// this is exactly the same as below!
// todo: refactor this
if (remainingStreamingBytes >= length) {
@@ -993,7 +980,7 @@ public:
fallback.append(data, maxCopyDistance);
// break here on break
HttpParserResult consumed = fenceAndConsumePostPadded<true>(maxHeaderSize, isConnectRequest, requireHostHeader, useStrictMethodValidation, fallback.data(), (unsigned int) fallback.length(), user, reserved, &req, requestHandler, dataHandler);
HttpParserResult consumed = fenceAndConsumePostPadded<true>(maxHeaderSize, requireHostHeader, useStrictMethodValidation, fallback.data(), (unsigned int) fallback.length(), user, reserved, &req, requestHandler, dataHandler);
/* Return data will be different than user if we are upgraded to WebSocket or have an error */
if (consumed.returnedData != user) {
return consumed;
@@ -1010,11 +997,8 @@ public:
length -= consumedBytes - had;
if (remainingStreamingBytes) {
if(isConnectRequest) {
dataHandler(user, std::string_view(data, length), false);
return HttpParserResult::success(0, user);
} else if (isParsingChunkedEncoding(remainingStreamingBytes)) {
/* It's either chunked or with a content-length */
/* It's either chunked or with a content-length */
if (isParsingChunkedEncoding(remainingStreamingBytes)) {
std::string_view dataToConsume(data, length);
for (auto chunk : uWS::ChunkIterator(&dataToConsume, &remainingStreamingBytes)) {
dataHandler(user, chunk, chunk.length() == 0);
@@ -1053,7 +1037,7 @@ public:
}
}
HttpParserResult consumed = fenceAndConsumePostPadded<false>(maxHeaderSize, isConnectRequest, requireHostHeader, useStrictMethodValidation, data, length, user, reserved, &req, requestHandler, dataHandler);
HttpParserResult consumed = fenceAndConsumePostPadded<false>(maxHeaderSize, requireHostHeader, useStrictMethodValidation, data, length, user, reserved, &req, requestHandler, dataHandler);
/* Return data will be different than user if we are upgraded to WebSocket or have an error */
if (consumed.returnedData != user) {
return consumed;

View File

@@ -243,7 +243,7 @@ public:
/* Manually upgrade to WebSocket. Typically called in upgrade handler. Immediately calls open handler.
* NOTE: Will invalidate 'this' as socket might change location in memory. Throw away after use. */
template <typename UserData>
us_socket_t *upgrade(UserData&& userData, std::string_view secWebSocketKey, std::string_view secWebSocketProtocol,
us_socket_t *upgrade(UserData &&userData, std::string_view secWebSocketKey, std::string_view secWebSocketProtocol,
std::string_view secWebSocketExtensions,
struct us_socket_context_t *webSocketContext) {
@@ -316,20 +316,14 @@ public:
HttpContext<SSL> *httpContext = (HttpContext<SSL> *) us_socket_context(SSL, (struct us_socket_t *) this);
/* Move any backpressure out of HttpResponse */
auto* responseData = getHttpResponseData();
BackPressure backpressure(std::move(((AsyncSocketData<SSL> *) responseData)->buffer));
auto* socketData = responseData->socketData;
HttpContextData<SSL> *httpContextData = httpContext->getSocketContextData();
BackPressure backpressure(std::move(((AsyncSocketData<SSL> *) getHttpResponseData())->buffer));
/* Destroy HttpResponseData */
responseData->~HttpResponseData();
getHttpResponseData()->~HttpResponseData();
/* Before we adopt and potentially change socket, check if we are corked */
bool wasCorked = Super::isCorked();
/* Adopting a socket invalidates it, do not rely on it directly to carry any data */
us_socket_t *usSocket = us_socket_context_adopt_socket(SSL, (us_socket_context_t *) webSocketContext, (us_socket_t *) this, sizeof(WebSocketData) + sizeof(UserData));
WebSocket<SSL, true, UserData> *webSocket = (WebSocket<SSL, true, UserData> *) usSocket;
@@ -340,12 +334,10 @@ public:
}
/* Initialize websocket with any moved backpressure intact */
webSocket->init(perMessageDeflate, compressOptions, std::move(backpressure), socketData, httpContextData->onSocketClosed);
if (httpContextData->onSocketUpgraded) {
httpContextData->onSocketUpgraded(socketData, SSL, usSocket);
}
webSocket->init(perMessageDeflate, compressOptions, std::move(backpressure));
/* We should only mark this if inside the parser; if upgrading "async" we cannot set this */
HttpContextData<SSL> *httpContextData = httpContext->getSocketContextData();
if (httpContextData->flags.isParsingHttp) {
/* We need to tell the Http parser that we changed socket */
httpContextData->upgradedWebSocket = webSocket;
@@ -358,7 +350,7 @@ public:
us_socket_timeout(SSL, (us_socket_t *) webSocket, webSocketContextData->idleTimeoutComponents.first);
/* Move construct the UserData right before calling open handler */
new (webSocket->getUserData()) UserData(std::forward<UserData>(userData));
new (webSocket->getUserData()) UserData(std::move(userData));
/* Emit open event and start the timeout */
if (webSocketContextData->openHandler) {
@@ -749,10 +741,6 @@ public:
return httpResponseData->socketData;
}
bool isConnectRequest() {
HttpResponseData<SSL> *httpResponseData = getHttpResponseData();
return httpResponseData->isConnectRequest;
}
void setWriteOffset(uint64_t offset) {
HttpResponseData<SSL> *httpResponseData = getHttpResponseData();

View File

@@ -108,7 +108,6 @@ struct HttpResponseData : AsyncSocketData<SSL>, HttpParser {
uint8_t state = 0;
uint8_t idleTimeout = 10; // default HTTP_TIMEOUT 10 seconds
bool fromAncientRequest = false;
bool isConnectRequest = false;
bool isIdle = true;
bool shouldCloseOnceIdle = false;

View File

@@ -34,8 +34,8 @@ struct WebSocket : AsyncSocket<SSL> {
private:
typedef AsyncSocket<SSL> Super;
void *init(bool perMessageDeflate, CompressOptions compressOptions, BackPressure &&backpressure, void *socketData, WebSocketData::OnSocketClosedCallback onSocketClosed) {
new (us_socket_ext(SSL, (us_socket_t *) this)) WebSocketData(perMessageDeflate, compressOptions, std::move(backpressure), socketData, onSocketClosed);
void *init(bool perMessageDeflate, CompressOptions compressOptions, BackPressure &&backpressure) {
new (us_socket_ext(SSL, (us_socket_t *) this)) WebSocketData(perMessageDeflate, compressOptions, std::move(backpressure));
return this;
}
public:

View File

@@ -256,9 +256,6 @@ private:
/* For whatever reason, if we already have emitted close event, do not emit it again */
WebSocketData *webSocketData = (WebSocketData *) (us_socket_ext(SSL, s));
if (webSocketData->socketData && webSocketData->onSocketClosed) {
webSocketData->onSocketClosed(webSocketData->socketData, SSL, (us_socket_t *) s);
}
if (!webSocketData->isShuttingDown) {
/* Emit close event */
auto *webSocketContextData = (WebSocketContextData<SSL, USERDATA> *) us_socket_context_ext(SSL, us_socket_context(SSL, (us_socket_t *) s));

View File

@@ -52,6 +52,7 @@ struct WebSocketContextData {
private:
public:
/* This one points to the App's shared topicTree */
TopicTree<TopicTreeMessage, TopicTreeBigMessage> *topicTree;

View File

@@ -38,7 +38,6 @@ private:
unsigned int controlTipLength = 0;
bool isShuttingDown = 0;
bool hasTimedOut = false;
enum CompressionStatus : char {
DISABLED,
ENABLED,
@@ -53,12 +52,7 @@ private:
/* We could be a subscriber */
Subscriber *subscriber = nullptr;
public:
using OnSocketClosedCallback = void (*)(void* userData, int is_ssl, struct us_socket_t *rawSocket);
void *socketData = nullptr;
/* node http compatibility callbacks */
OnSocketClosedCallback onSocketClosed = nullptr;
WebSocketData(bool perMessageDeflate, CompressOptions compressOptions, BackPressure &&backpressure, void *socketData, OnSocketClosedCallback onSocketClosed) : AsyncSocketData<false>(std::move(backpressure)), WebSocketState<true>() {
WebSocketData(bool perMessageDeflate, CompressOptions compressOptions, BackPressure &&backpressure) : AsyncSocketData<false>(std::move(backpressure)), WebSocketState<true>() {
compressionStatus = perMessageDeflate ? ENABLED : DISABLED;
/* Initialize the dedicated sliding window(s) */
@@ -70,8 +64,6 @@ public:
inflationStream = new InflationStream(compressOptions);
}
}
this->socketData = socketData;
this->onSocketClosed = onSocketClosed;
}
~WebSocketData() {

View File

@@ -22,7 +22,7 @@ At its core is the _Bun runtime_, a fast JavaScript runtime designed as a drop-i
## Features:
- Live in-editor error messages (gif below)
- Vscode test runner support
- Test runner codelens
- Debugger support
- Run scripts from package.json
- Visual lockfile viewer for old binary lockfiles (`bun.lockb`)

View File

@@ -1,6 +1,6 @@
{
"name": "bun-vscode",
"version": "0.0.31",
"version": "0.0.29",
"author": "oven",
"repository": {
"type": "git",
@@ -116,6 +116,20 @@
"category": "Bun",
"enablement": "!inDebugMode && resourceLangId =~ /^(javascript|typescript|javascriptreact|typescriptreact)$/ && !isInDiffEditor && resourceScheme == 'untitled'",
"icon": "$(play-circle)"
},
{
"command": "extension.bun.runTest",
"title": "Run all tests",
"shortTitle": "Run Test",
"category": "Bun",
"icon": "$(play)"
},
{
"command": "extension.bun.watchTest",
"title": "Run all tests in watch mode",
"shortTitle": "Run Test Watch",
"category": "Bun",
"icon": "$(sync)"
}
],
"menus": {

View File

@@ -1,5 +1,5 @@
#!/bin/sh
# Version: 19
# Version: 18
# A script that installs the dependencies needed to build and test Bun.
# This should work on macOS and Linux with a POSIX shell.
@@ -685,8 +685,6 @@ install_common_software() {
apt-transport-https \
software-properties-common
fi
install_packages \
libc6-dbg
;;
dnf)
install_packages \
@@ -1195,7 +1193,7 @@ install_docker() {
execute_sudo amazon-linux-extras install docker
;;
amzn-* | alpine-*)
install_packages docker docker-cli-compose
install_packages docker
;;
*)
sh="$(require sh)"
@@ -1210,17 +1208,10 @@ install_docker() {
if [ -f "$systemctl" ]; then
execute_sudo "$systemctl" enable docker
fi
if [ "$os" = "linux" ] && [ "$distro" = "alpine" ]; then
execute doas rc-update add docker default
execute doas rc-service docker start
fi
getent="$(which getent)"
if [ -n "$("$getent" group docker)" ]; then
usermod="$(which usermod)"
if [ -z "$usermod" ]; then
usermod="$(sudo which usermod)"
fi
if [ -f "$usermod" ]; then
execute_sudo "$usermod" -aG docker "$user"
fi

View File

@@ -72,7 +72,6 @@ const cwd = import.meta.dirname ? dirname(import.meta.dirname) : process.cwd();
const testsPath = join(cwd, "test");
const spawnTimeout = 5_000;
const spawnBunTimeout = 20_000; // when running with ASAN/LSAN bun can take a bit longer to exit, not a bug.
const testTimeout = 3 * 60_000;
const integrationTimeout = 5 * 60_000;
@@ -80,7 +79,7 @@ function getNodeParallelTestTimeout(testPath) {
if (testPath.includes("test-dns")) {
return 90_000;
}
return 20_000;
return 10_000;
}
process.on("SIGTRAP", () => {
@@ -579,11 +578,8 @@ async function runTests() {
const title = relative(cwd, absoluteTestPath).replaceAll(sep, "/");
if (isNodeTest(testPath)) {
const testContent = readFileSync(absoluteTestPath, "utf-8");
let runWithBunTest = title.includes("needs-test") || testContent.includes("node:test");
// don't wanna have a filter for includes("bun:test") but these need our mocks
runWithBunTest ||= title === "test/js/node/test/parallel/test-fs-append-file-flush.js";
runWithBunTest ||= title === "test/js/node/test/parallel/test-fs-write-file-flush.js";
runWithBunTest ||= title === "test/js/node/test/parallel/test-fs-write-stream-flush.js";
const runWithBunTest =
title.includes("needs-test") || testContent.includes("bun:test") || testContent.includes("node:test");
const subcommand = runWithBunTest ? "test" : "run";
const env = {
FORCE_COLOR: "0",
@@ -596,7 +592,7 @@ async function runTests() {
}
if ((basename(execPath).includes("asan") || !isCI) && shouldValidateLeakSan(testPath)) {
env.BUN_DESTRUCT_VM_ON_EXIT = "1";
env.ASAN_OPTIONS = "allow_user_segv_handler=1:disable_coredump=0:detect_leaks=1:abort_on_error=1";
env.ASAN_OPTIONS = "allow_user_segv_handler=1:disable_coredump=0:detect_leaks=1";
// prettier-ignore
env.LSAN_OPTIONS = `malloc_context_size=100:print_suppressions=0:suppressions=${process.cwd()}/test/leaksan.supp`;
}
@@ -661,7 +657,6 @@ async function runTests() {
const buildResult = await spawnBun(execPath, {
cwd: vendorPath,
args: ["run", "build"],
timeout: 60_000,
});
if (!buildResult.ok) {
throw new Error(`Failed to build vendor: ${buildResult.error}`);
@@ -688,9 +683,6 @@ async function runTests() {
}
}
// tests are all over, close the group from the final test. any further output should print ungrouped.
startGroup("End");
if (isGithubAction) {
reportOutputToGitHubAction("failing_tests_count", failedResults.length);
const markdown = formatTestToMarkdown(failedResults, false, 0);
@@ -1140,6 +1132,10 @@ async function spawnBun(execPath, { args, cwd, timeout, env, stdout, stderr }) {
: { BUN_ENABLE_CRASH_REPORTING: "0" }),
};
if (basename(execPath).includes("asan") && bunEnv.ASAN_OPTIONS === undefined) {
bunEnv.ASAN_OPTIONS = "allow_user_segv_handler=1:disable_coredump=0";
}
if (isWindows && bunEnv.Path) {
delete bunEnv.Path;
}
@@ -1156,9 +1152,6 @@ async function spawnBun(execPath, { args, cwd, timeout, env, stdout, stderr }) {
}
bunEnv["TEMP"] = tmpdirPath;
}
if (timeout === undefined) {
timeout = spawnBunTimeout;
}
try {
const existingCores = options["coredump-upload"] ? readdirSync(coresDir) : [];
const result = await spawnSafe({
@@ -1338,7 +1331,7 @@ async function spawnBunTest(execPath, testPath, opts = { cwd }) {
}
if ((basename(execPath).includes("asan") || !isCI) && shouldValidateLeakSan(relative(cwd, absPath))) {
env.BUN_DESTRUCT_VM_ON_EXIT = "1";
env.ASAN_OPTIONS = "allow_user_segv_handler=1:disable_coredump=0:detect_leaks=1:abort_on_error=1";
env.ASAN_OPTIONS = "allow_user_segv_handler=1:disable_coredump=0:detect_leaks=1";
// prettier-ignore
env.LSAN_OPTIONS = `malloc_context_size=100:print_suppressions=0:suppressions=${process.cwd()}/test/leaksan.supp`;
}

View File

@@ -1,60 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# This script updates SQLite amalgamation files with the required compiler flags.
# It downloads the SQLite source, configures it with necessary flags, builds the
# amalgamation, and copies the generated files to the Bun source tree.
#
# Usage:
# ./scripts/update-sqlite-amalgamation.sh <version_number> <year>
#
# Example:
# ./scripts/update-sqlite-amalgamation.sh 3500400 2025
#
# The version number is a 7-digit SQLite version (e.g., 3500400 for 3.50.4)
# The year is the release year found in the download URL
if [ $# -ne 2 ]; then
echo "Usage: $0 <version_number> <year>"
echo "Example: $0 3500400 2025"
exit 1
fi
VERSION_NUM="$1"
YEAR="$2"
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# Create temporary directory
TEMP_DIR=$(mktemp -d)
trap 'rm -rf "$TEMP_DIR"' EXIT
cd "$TEMP_DIR"
echo "Downloading SQLite source version $VERSION_NUM from year $YEAR..."
DOWNLOAD_URL="https://sqlite.org/$YEAR/sqlite-src-$VERSION_NUM.zip"
echo "URL: $DOWNLOAD_URL"
wget -q "$DOWNLOAD_URL"
unzip -q "sqlite-src-$VERSION_NUM.zip"
cd "sqlite-src-$VERSION_NUM"
echo "Configuring SQLite with required flags..."
# These flags must be set during amalgamation generation for them to take effect
# in the parser and other compile-time generated code
CFLAGS="-DSQLITE_ENABLE_UPDATE_DELETE_LIMIT=1 -DSQLITE_ENABLE_COLUMN_METADATA=1"
./configure CFLAGS="$CFLAGS" > /dev/null 2>&1
echo "Building amalgamation..."
make sqlite3.c > /dev/null 2>&1
echo "Copying files to Bun source tree..."
# Add clang-format off directive and copy the amalgamation
echo "// clang-format off" > "$REPO_ROOT/src/bun.js/bindings/sqlite/sqlite3.c"
cat sqlite3.c >> "$REPO_ROOT/src/bun.js/bindings/sqlite/sqlite3.c"
echo "// clang-format off" > "$REPO_ROOT/src/bun.js/bindings/sqlite/sqlite3_local.h"
cat sqlite3.h >> "$REPO_ROOT/src/bun.js/bindings/sqlite/sqlite3_local.h"
echo "✓ Successfully updated SQLite amalgamation files"

View File

@@ -2808,7 +2808,6 @@ export function endGroup() {
} else {
console.groupEnd();
}
// when a file exits with an ASAN error, there is no trailing newline so we add one here to make sure `console.group()` detection doesn't get broken in CI.
console.log();
}
@@ -2866,12 +2865,6 @@ export function printEnvironment() {
spawnSync([shell, "-c", "free -m -w"], { stdio: "inherit" });
}
});
startGroup("Docker", () => {
const shell = which(["sh", "bash"]);
if (shell) {
spawnSync([shell, "-c", "docker ps"], { stdio: "inherit" });
}
});
}
if (isWindows) {
startGroup("Disk (win)", () => {

View File

@@ -1 +0,0 @@
CLAUDE.md

View File

@@ -1,5 +0,0 @@
## Zig
- Prefer `@import` at the **bottom** of the file.
- It's `@import("bun")` not `@import("root").bun`
- You must be patient with the build.

View File

@@ -121,10 +121,6 @@ pub fn exit(code: u32) noreturn {
std.os.windows.kernel32.ExitProcess(code);
},
else => {
if (Environment.enable_asan) {
std.c.exit(@bitCast(code));
std.c.abort(); // exit should be noreturn
}
bun.c.quick_exit(@bitCast(code));
std.c.abort(); // quick_exit should be noreturn
},

View File

@@ -7,11 +7,6 @@ pub const StandaloneModuleGraph = struct {
files: bun.StringArrayHashMap(File),
entry_point_id: u32 = 0,
compile_exec_argv: []const u8 = "",
env_config: api.LoadedEnvConfig = .{
.dotenv = .disable,
.prefix = "",
.defaults = .{ .keys = &.{}, .values = &.{} },
},
// We never want to hit the filesystem for these files
// We use the `/$bunfs/` prefix to indicate that it's a virtual path
@@ -204,6 +199,7 @@ pub const StandaloneModuleGraph = struct {
store.ref();
const b = bun.webcore.Blob.initWithStore(store, globalObject).new();
b.allocator = bun.default_allocator;
if (bun.http.MimeType.byExtensionNoDefault(bun.strings.trimLeadingChar(std.fs.path.extension(this.name), '.'))) |mime| {
store.mime_type = mime;
@@ -293,11 +289,7 @@ pub const StandaloneModuleGraph = struct {
byte_count: usize = 0,
modules_ptr: bun.StringPointer = .{},
entry_point_id: u32 = 0,
_padding1: u32 = 0, // Ensure compile_exec_argv_ptr is 8-byte aligned
compile_exec_argv_ptr: bun.StringPointer = .{},
dotenv_behavior: api.DotEnvBehavior = .disable,
_padding2: u32 = 0, // Ensure dotenv_prefix_ptr is 8-byte aligned
dotenv_prefix_ptr: bun.StringPointer = .{},
};
const trailer = "\n---- Bun! ----\n";
@@ -343,11 +335,6 @@ pub const StandaloneModuleGraph = struct {
.files = modules,
.entry_point_id = offsets.entry_point_id,
.compile_exec_argv = sliceToZ(raw_bytes, offsets.compile_exec_argv_ptr),
.env_config = .{
.dotenv = offsets.dotenv_behavior,
.prefix = sliceToZ(raw_bytes, offsets.dotenv_prefix_ptr),
.defaults = .{ .keys = &.{}, .values = &.{} },
},
};
}
@@ -363,7 +350,7 @@ pub const StandaloneModuleGraph = struct {
return bytes[ptr.offset..][0..ptr.length :0];
}
pub fn toBytes(allocator: std.mem.Allocator, prefix: []const u8, output_files: []const bun.options.OutputFile, output_format: bun.options.Format, compile_exec_argv: []const u8, dotenv_behavior: api.DotEnvBehavior, dotenv_prefix: []const u8) ![]u8 {
pub fn toBytes(allocator: std.mem.Allocator, prefix: []const u8, output_files: []const bun.options.OutputFile, output_format: bun.options.Format, compile_exec_argv: []const u8) ![]u8 {
var serialize_trace = bun.perf.trace("StandaloneModuleGraph.serialize");
defer serialize_trace.end();
@@ -405,7 +392,6 @@ pub const StandaloneModuleGraph = struct {
string_builder.cap += 16;
string_builder.cap += @sizeOf(Offsets);
string_builder.countZ(compile_exec_argv);
string_builder.countZ(dotenv_prefix);
try string_builder.allocate(allocator);
@@ -446,27 +432,6 @@ pub const StandaloneModuleGraph = struct {
}
};
if (comptime bun.Environment.is_canary or bun.Environment.isDebug) {
if (bun.getenvZ("BUN_FEATURE_FLAG_DUMP_CODE")) |dump_code_dir| {
const buf = bun.path_buffer_pool.get();
defer bun.path_buffer_pool.put(buf);
const dest_z = bun.path.joinAbsStringBufZ(dump_code_dir, buf, &.{dest_path}, .auto);
// Scoped block to handle dump failures without skipping module emission
dump: {
const file = bun.sys.File.makeOpen(dest_z, bun.O.WRONLY | bun.O.CREAT | bun.O.TRUNC, 0o664).unwrap() catch |err| {
Output.prettyErrorln("<r><red>error<r><d>:<r> failed to open {s}: {s}", .{ dest_path, @errorName(err) });
break :dump;
};
defer file.close();
file.writeAll(output_file.value.buffer.bytes).unwrap() catch |err| {
Output.prettyErrorln("<r><red>error<r><d>:<r> failed to write {s}: {s}", .{ dest_path, @errorName(err) });
break :dump;
};
}
}
}
var module = CompiledModuleGraphFile{
.name = string_builder.fmtAppendCountZ("{s}{s}", .{
prefix,
@@ -512,8 +477,6 @@ pub const StandaloneModuleGraph = struct {
.entry_point_id = @as(u32, @truncate(entry_point_id.?)),
.modules_ptr = string_builder.appendCount(std.mem.sliceAsBytes(modules.items)),
.compile_exec_argv_ptr = string_builder.appendCountZ(compile_exec_argv),
.dotenv_behavior = dotenv_behavior,
.dotenv_prefix_ptr = string_builder.appendCountZ(dotenv_prefix),
.byte_count = string_builder.len,
};
@@ -761,8 +724,7 @@ pub const StandaloneModuleGraph = struct {
return bun.invalid_fd;
};
defer pe_file.deinit();
// Always strip authenticode when adding .bun section for --compile
pe_file.addBunSection(bytes, .strip_always) catch |err| {
pe_file.addBunSection(bytes) catch |err| {
Output.prettyErrorln("Error adding Bun section to PE file: {}", .{err});
cleanup(zname, cloned_executable_fd);
return bun.invalid_fd;
@@ -966,10 +928,8 @@ pub const StandaloneModuleGraph = struct {
windows_options: bun.options.WindowsOptions,
compile_exec_argv: []const u8,
self_exe_path: ?[]const u8,
dotenv_behavior: api.DotEnvBehavior,
dotenv_prefix: []const u8,
) !CompileResult {
const bytes = toBytes(allocator, module_prefix, output_files, output_format, compile_exec_argv, dotenv_behavior, dotenv_prefix) catch |err| {
const bytes = toBytes(allocator, module_prefix, output_files, output_format, compile_exec_argv) catch |err| {
return CompileResult.fail(std.fmt.allocPrint(allocator, "failed to generate module graph bytes: {s}", .{@errorName(err)}) catch "failed to generate module graph bytes");
};
if (bytes.len == 0) return CompileResult.fail("no output files to bundle");
@@ -1546,4 +1506,3 @@ const macho = bun.macho;
const pe = bun.pe;
const strings = bun.strings;
const Schema = bun.schema.api;
const api = Schema;

View File

@@ -919,8 +919,6 @@ pub const Default = struct {
_ = self;
return c_allocator;
}
pub const deinit = void;
};
const basic = if (bun.use_mimalloc)

View File

@@ -94,8 +94,6 @@ const BorrowedHeap = if (safety_checks) *DebugHeap else *mimalloc.Heap;
const DebugHeap = struct {
inner: *mimalloc.Heap,
thread_lock: bun.safety.ThreadLock,
pub const deinit = void;
};
threadlocal var thread_heap: if (safety_checks) ?DebugHeap else void = if (safety_checks) null;

View File

@@ -506,12 +506,6 @@ pub fn AllocationScopeIn(comptime Allocator: type) type {
pub fn setPointerExtra(self: Self, ptr: *anyopaque, extra: Extra) void {
return self.borrow().setPointerExtra(ptr, extra);
}
pub fn leakSlice(self: Self, memory: anytype) void {
if (comptime !Self.enabled) return;
_ = @typeInfo(@TypeOf(memory)).pointer;
self.trackExternalFree(memory, null) catch @panic("tried to free memory that was not allocated by the allocation scope");
}
};
}

View File

@@ -216,7 +216,8 @@ pub extern fn mi_new_reallocn(p: ?*anyopaque, newcount: usize, size: usize) ?*an
pub const MI_SMALL_WSIZE_MAX = @as(c_int, 128);
pub const MI_SMALL_SIZE_MAX = MI_SMALL_WSIZE_MAX * @import("std").zig.c_translation.sizeof(?*anyopaque);
pub const MI_ALIGNMENT_MAX = (@as(c_int, 16) * @as(c_int, 1024)) * @as(c_ulong, 1024);
pub const MI_MAX_ALIGN_SIZE = 16;
const MI_MAX_ALIGN_SIZE = 16;
pub fn mustUseAlignedAlloc(alignment: std.mem.Alignment) bool {
return alignment.toByteUnits() > MI_MAX_ALIGN_SIZE;

View File

@@ -112,7 +112,6 @@ pub const Features = struct {
pub var unsupported_uv_function: usize = 0;
pub var exited: usize = 0;
pub var yarn_migration: usize = 0;
pub var pnpm_migration: usize = 0;
pub var yaml_parse: usize = 0;
comptime {

View File

@@ -752,7 +752,7 @@ pub const Object = struct {
pub fn hasProperty(obj: *const Object, name: string) bool {
for (obj.properties.slice()) |prop| {
const key = prop.key orelse continue;
if (key.data != .e_string) continue;
if (std.meta.activeTag(key.data) != .e_string) continue;
if (key.data.e_string.eql(string, name)) return true;
}
return false;
@@ -762,7 +762,7 @@ pub const Object = struct {
for (obj.properties.slice(), 0..) |prop, i| {
const value = prop.value orelse continue;
const key = prop.key orelse continue;
if (key.data != .e_string) continue;
if (std.meta.activeTag(key.data) != .e_string) continue;
const key_str = key.data.e_string;
if (key_str.eql(string, name)) {
return Expr.Query{

View File

@@ -132,14 +132,14 @@ pub fn isEmpty(expr: Expr) bool {
pub const Query = struct { expr: Expr, loc: logger.Loc, i: u32 = 0 };
pub fn hasAnyPropertyNamed(expr: *const Expr, comptime names: []const string) bool {
if (expr.data != .e_object) return false;
if (std.meta.activeTag(expr.data) != .e_object) return false;
const obj = expr.data.e_object;
if (obj.properties.len == 0) return false;
for (obj.properties.slice()) |prop| {
if (prop.value == null) continue;
const key = prop.key orelse continue;
if (key.data != .e_string) continue;
if (std.meta.activeTag(key.data) != .e_string) continue;
const key_str = key.data.e_string;
if (strings.eqlAnyComptime(key_str.data, names)) return true;
}
@@ -266,7 +266,7 @@ pub fn set(expr: *Expr, allocator: std.mem.Allocator, name: string, value: Expr)
for (0..expr.data.e_object.properties.len) |i| {
const prop = &expr.data.e_object.properties.ptr[i];
const key = prop.key orelse continue;
if (key.data != .e_string) continue;
if (std.meta.activeTag(key.data) != .e_string) continue;
if (key.data.e_string.eql(string, name)) {
prop.value = value;
return;
@@ -288,7 +288,7 @@ pub fn setString(expr: *Expr, allocator: std.mem.Allocator, name: string, value:
for (0..expr.data.e_object.properties.len) |i| {
const prop = &expr.data.e_object.properties.ptr[i];
const key = prop.key orelse continue;
if (key.data != .e_string) continue;
if (std.meta.activeTag(key.data) != .e_string) continue;
if (key.data.e_string.eql(string, name)) {
prop.value = Expr.init(E.String, .{ .data = value }, logger.Loc.Empty);
return;
@@ -310,15 +310,6 @@ pub fn getObject(expr: *const Expr, name: string) ?Expr {
return null;
}
pub fn getBoolean(expr: *const Expr, name: string) ?bool {
if (expr.asProperty(name)) |query| {
if (query.expr.data == .e_boolean) {
return query.expr.data.e_boolean.value;
}
}
return null;
}
pub fn getString(expr: *const Expr, allocator: std.mem.Allocator, name: string) OOM!?struct { string, logger.Loc } {
if (asProperty(expr, name)) |q| {
if (q.expr.asString(allocator)) |str| {
@@ -394,7 +385,7 @@ pub fn getRope(self: *const Expr, rope: *const E.Object.Rope) ?E.Object.RopeQuer
// Making this comptime bloats the binary and doesn't seem to impact runtime performance.
pub fn asProperty(expr: *const Expr, name: string) ?Query {
if (expr.data != .e_object) return null;
if (std.meta.activeTag(expr.data) != .e_object) return null;
const obj = expr.data.e_object;
if (obj.properties.len == 0) return null;
@@ -402,7 +393,7 @@ pub fn asProperty(expr: *const Expr, name: string) ?Query {
}
pub fn asPropertyStringMap(expr: *const Expr, name: string, allocator: std.mem.Allocator) ?*bun.StringArrayHashMap(string) {
if (expr.data != .e_object) return null;
if (std.meta.activeTag(expr.data) != .e_object) return null;
const obj_ = expr.data.e_object;
if (obj_.properties.len == 0) return null;
const query = obj_.asProperty(name) orelse return null;
@@ -448,7 +439,7 @@ pub const ArrayIterator = struct {
};
pub fn asArray(expr: *const Expr) ?ArrayIterator {
if (expr.data != .e_array) return null;
if (std.meta.activeTag(expr.data) != .e_array) return null;
const array = expr.data.e_array;
if (array.items.len == 0) return null;
@@ -464,7 +455,7 @@ pub inline fn asUtf8StringLiteral(expr: *const Expr) ?string {
}
pub inline fn asStringLiteral(expr: *const Expr, allocator: std.mem.Allocator) ?string {
if (expr.data != .e_string) return null;
if (std.meta.activeTag(expr.data) != .e_string) return null;
return expr.data.e_string.string(allocator) catch null;
}
@@ -510,7 +501,7 @@ pub inline fn asStringZ(expr: *const Expr, allocator: std.mem.Allocator) OOM!?st
pub fn asBool(
expr: *const Expr,
) ?bool {
if (expr.data != .e_boolean) return null;
if (std.meta.activeTag(expr.data) != .e_boolean) return null;
return expr.data.e_boolean.value;
}
@@ -531,7 +522,7 @@ const Serializable = struct {
};
pub fn isMissing(a: *const Expr) bool {
return a.data == Expr.Tag.e_missing;
return std.meta.activeTag(a.data) == Expr.Tag.e_missing;
}
// The goal of this function is to "rotate" the AST if it's possible to use the

View File

@@ -325,7 +325,7 @@ pub const Runner = struct {
return _entry.value_ptr.*;
}
var blob_: ?*const jsc.WebCore.Blob = null;
var blob_: ?jsc.WebCore.Blob = null;
const mime_type: ?MimeType = null;
if (value.jsType() == .DOMWrapper) {
@@ -334,23 +334,30 @@ pub const Runner = struct {
} else if (value.as(jsc.WebCore.Request)) |resp| {
return this.run(try resp.getBlobWithoutCallFrame(this.global));
} else if (value.as(jsc.WebCore.Blob)) |resp| {
blob_ = resp;
blob_ = resp.*;
blob_.?.allocator = null;
} else if (value.as(bun.api.ResolveMessage) != null or value.as(bun.api.BuildMessage) != null) {
_ = this.macro.vm.uncaughtException(this.global, value, false);
return error.MacroFailed;
}
}
if (blob_) |blob| {
return Expr.fromBlob(
if (blob_) |*blob| {
const out_expr = Expr.fromBlob(
blob,
this.allocator,
mime_type,
this.log,
this.caller.loc,
) catch {
blob.deinit();
return error.MacroFailed;
};
if (out_expr.data == .e_string) {
blob.deinit();
}
return out_expr;
}
return Expr.init(E.String, E.String.empty, this.caller.loc);
@@ -364,20 +371,41 @@ pub const Runner = struct {
const _entry = this.visited.getOrPut(this.allocator, value) catch unreachable;
if (_entry.found_existing) {
switch (_entry.value_ptr.*.data) {
.e_object, .e_array => {
this.log.addErrorFmt(this.source, this.caller.loc, this.allocator, "converting circular structure to Bun AST is not implemented yet", .{}) catch unreachable;
return error.MacroFailed;
},
else => {},
}
return _entry.value_ptr.*;
}
var iter = try jsc.JSArrayIterator.init(value, this.global);
// Process all array items
if (iter.len == 0) {
const result = Expr.init(
E.Array,
E.Array{
.items = ExprNodeList.empty,
.was_originally_macro = true,
},
this.caller.loc,
);
_entry.value_ptr.* = result;
return result;
}
var array = this.allocator.alloc(Expr, iter.len) catch unreachable;
errdefer this.allocator.free(array);
const expr = Expr.init(
var out = Expr.init(
E.Array,
E.Array{ .items = ExprNodeList.empty, .was_originally_macro = true },
E.Array{
.items = ExprNodeList.empty,
.was_originally_macro = true,
},
this.caller.loc,
);
_entry.value_ptr.* = expr;
_entry.value_ptr.* = out;
errdefer this.allocator.free(array);
var i: usize = 0;
while (try iter.next()) |item| {
array[i] = try this.run(item);
@@ -385,27 +413,24 @@ pub const Runner = struct {
continue;
i += 1;
}
expr.data.e_array.items = ExprNodeList.fromOwnedSlice(array);
expr.data.e_array.items.len = @truncate(i);
return expr;
out.data.e_array.items = ExprNodeList.fromOwnedSlice(array);
_entry.value_ptr.* = out;
return out;
},
// TODO: optimize this
jsc.ConsoleObject.Formatter.Tag.Object => {
this.is_top_level = false;
const _entry = this.visited.getOrPut(this.allocator, value) catch unreachable;
if (_entry.found_existing) {
switch (_entry.value_ptr.*.data) {
.e_object, .e_array => {
this.log.addErrorFmt(this.source, this.caller.loc, this.allocator, "converting circular structure to Bun AST is not implemented yet", .{}) catch unreachable;
return error.MacroFailed;
},
else => {},
}
return _entry.value_ptr.*;
}
// Reserve a placeholder to break cycles.
const expr = Expr.init(
E.Object,
E.Object{ .properties = G.Property.List{}, .was_originally_macro = true },
this.caller.loc,
);
_entry.value_ptr.* = expr;
// SAFETY: tag ensures `value` is an object.
const obj = value.getObject() orelse unreachable;
var object_iter = try jsc.JSPropertyIterator(.{
@@ -414,28 +439,36 @@ pub const Runner = struct {
}).init(this.global, obj);
defer object_iter.deinit();
// Build properties list
var properties = bun.handleOom(
G.Property.List.initCapacity(this.allocator, object_iter.len),
const out = _entry.value_ptr;
out.* = Expr.init(
E.Object,
E.Object{
.properties = bun.handleOom(
G.Property.List.initCapacity(this.allocator, object_iter.len),
),
.was_originally_macro = true,
},
this.caller.loc,
);
const properties = &out.data.e_object.properties;
errdefer properties.clearAndFree(this.allocator);
while (try object_iter.next()) |prop| {
const object_value = try this.run(object_iter.value);
properties.append(this.allocator, G.Property{
bun.assertf(
object_iter.i == properties.len,
"`properties` unexpectedly modified (length {d}, expected {d})",
.{ properties.len, object_iter.i },
);
properties.appendAssumeCapacity(G.Property{
.key = Expr.init(
E.String,
E.String.init(prop.toOwnedSlice(this.allocator) catch unreachable),
this.caller.loc,
),
.value = object_value,
}) catch |err| bun.handleOom(err);
.value = try this.run(object_iter.value),
});
}
expr.data.e_object.properties = properties;
return expr;
return out.*;
},
.JSON => {

View File

@@ -170,21 +170,6 @@ pub fn NewParser_(
dirname_ref: Ref = Ref.None,
import_meta_ref: Ref = Ref.None,
hmr_api_ref: Ref = Ref.None,
/// If bake is enabled and this is a server-side file, we want to use
/// special `Response` class inside the `bun:app` built-in module to
/// support syntax like `return Response(<jsx />, {...})` or `return Response.render("/my-page")`
/// or `return Response.redirect("/other")`.
///
/// So we'll need to add a `import { Response } from 'bun:app'` to the
/// top of the file
///
/// We need to declare this `response_ref` upfront
response_ref: Ref = Ref.None,
/// We also need to declare the namespace ref for `bun:app` and attach
/// it to the symbol so the code generated `e_import_identifier`'s
bun_app_namespace_ref: Ref = Ref.None,
scopes_in_order_visitor_index: usize = 0,
has_classic_runtime_warned: bool = false,
macro_call_count: MacroCallCountType = 0,
@@ -969,7 +954,7 @@ pub fn NewParser_(
switch (call.target.data) {
.e_identifier => |ident| {
// is this a require("something")
if (strings.eqlComptime(p.loadNameFromRef(ident.ref), "require") and call.args.len == 1 and call.args.ptr[0].data == .e_string) {
if (strings.eqlComptime(p.loadNameFromRef(ident.ref), "require") and call.args.len == 1 and std.meta.activeTag(call.args.ptr[0].data) == .e_string) {
_ = p.addImportRecord(.require, loc, call.args.at(0).data.e_string.string(p.allocator) catch unreachable);
}
},
@@ -985,7 +970,7 @@ pub fn NewParser_(
switch (call.target.data) {
.e_identifier => |ident| {
// is this a require("something")
if (strings.eqlComptime(p.loadNameFromRef(ident.ref), "require") and call.args.len == 1 and call.args.ptr[0].data == .e_string) {
if (strings.eqlComptime(p.loadNameFromRef(ident.ref), "require") and call.args.len == 1 and std.meta.activeTag(call.args.ptr[0].data) == .e_string) {
_ = p.addImportRecord(.require, loc, call.args.at(0).data.e_string.string(p.allocator) catch unreachable);
}
},
@@ -1235,81 +1220,6 @@ pub fn NewParser_(
};
}
pub fn generateImportStmtForBakeResponse(
noalias p: *P,
parts: *ListManaged(js_ast.Part),
) !void {
bun.assert(!p.response_ref.isNull());
bun.assert(!p.bun_app_namespace_ref.isNull());
const allocator = p.allocator;
const import_path = "bun:app";
const import_record_i = p.addImportRecordByRange(.stmt, logger.Range.None, import_path);
var declared_symbols = DeclaredSymbol.List{};
try declared_symbols.ensureTotalCapacity(allocator, 2);
var stmts = try allocator.alloc(Stmt, 1);
declared_symbols.appendAssumeCapacity(
DeclaredSymbol{ .ref = p.bun_app_namespace_ref, .is_top_level = true },
);
try p.module_scope.generated.append(allocator, p.bun_app_namespace_ref);
const clause_items = try allocator.dupe(js_ast.ClauseItem, &.{
js_ast.ClauseItem{
.alias = "Response",
.original_name = "Response",
.alias_loc = logger.Loc{},
.name = LocRef{ .ref = p.response_ref, .loc = logger.Loc{} },
},
});
declared_symbols.appendAssumeCapacity(DeclaredSymbol{
.ref = p.response_ref,
.is_top_level = true,
});
// ensure every e_import_identifier holds the namespace
if (p.options.features.hot_module_reloading) {
const symbol = &p.symbols.items[p.response_ref.inner_index];
bun.assert(symbol.namespace_alias != null);
symbol.namespace_alias.?.import_record_index = import_record_i;
}
try p.is_import_item.put(allocator, p.response_ref, {});
try p.named_imports.put(allocator, p.response_ref, js_ast.NamedImport{
.alias = "Response",
.alias_loc = logger.Loc{},
.namespace_ref = p.bun_app_namespace_ref,
.import_record_index = import_record_i,
});
stmts[0] = p.s(
S.Import{
.namespace_ref = p.bun_app_namespace_ref,
.items = clause_items,
.import_record_index = import_record_i,
.is_single_line = true,
},
logger.Loc{},
);
var import_records = try allocator.alloc(u32, 1);
import_records[0] = import_record_i;
// This import is placed in a part before the main code, however
// the bundler ends up re-ordering this to be after... The order
// does not matter as ESM imports are always hoisted.
parts.append(js_ast.Part{
.stmts = stmts,
.declared_symbols = declared_symbols,
.import_record_indices = bun.BabyList(u32).fromOwnedSlice(import_records),
.tag = .runtime,
}) catch unreachable;
}
pub fn generateImportStmt(
noalias p: *P,
import_path: string,
@@ -1317,7 +1227,7 @@ pub fn NewParser_(
parts: *ListManaged(js_ast.Part),
symbols: anytype,
additional_stmt: ?Stmt,
comptime prefix: string,
comptime suffix: string,
comptime is_internal: bool,
) anyerror!void {
const allocator = p.allocator;
@@ -1327,13 +1237,13 @@ pub fn NewParser_(
import_record.path.namespace = "runtime";
import_record.is_internal = is_internal;
const import_path_identifier = try import_record.path.name.nonUniqueNameString(allocator);
var namespace_identifier = try allocator.alloc(u8, import_path_identifier.len + prefix.len);
var namespace_identifier = try allocator.alloc(u8, import_path_identifier.len + suffix.len);
const clause_items = try allocator.alloc(js_ast.ClauseItem, imports.len);
var stmts = try allocator.alloc(Stmt, 1 + if (additional_stmt != null) @as(usize, 1) else @as(usize, 0));
var declared_symbols = DeclaredSymbol.List{};
try declared_symbols.ensureTotalCapacity(allocator, imports.len + 1);
bun.copy(u8, namespace_identifier, prefix);
bun.copy(u8, namespace_identifier[prefix.len..], import_path_identifier);
bun.copy(u8, namespace_identifier, suffix);
bun.copy(u8, namespace_identifier[suffix.len..], import_path_identifier);
const namespace_ref = try p.newSymbol(.other, namespace_identifier);
declared_symbols.appendAssumeCapacity(.{
@@ -2104,25 +2014,6 @@ pub fn NewParser_(
.wrap_exports_for_server_reference => {},
}
// Server-side components:
// Declare upfront the symbols for "Response" and "bun:app"
switch (p.options.features.server_components) {
.none, .client_side => {},
else => {
p.response_ref = try p.declareGeneratedSymbol(.import, "Response");
p.bun_app_namespace_ref = try p.newSymbol(
.other,
"import_bun_app",
);
const symbol = &p.symbols.items[p.response_ref.inner_index];
symbol.namespace_alias = .{
.namespace_ref = p.bun_app_namespace_ref,
.alias = "Response",
.import_record_index = std.math.maxInt(u32),
};
},
}
if (p.options.features.hot_module_reloading) {
p.hmr_api_ref = try p.declareCommonJSSymbol(.unbound, "hmr");
}
@@ -3180,7 +3071,7 @@ pub fn NewParser_(
return ref;
}
pub fn declareGeneratedSymbol(p: *P, kind: Symbol.Kind, comptime name: string) !Ref {
fn declareGeneratedSymbol(p: *P, kind: Symbol.Kind, comptime name: string) !Ref {
// The bundler runs the renamer, so it is ok to not append a hash
if (p.options.bundle) {
return try declareSymbolMaybeGenerated(p, kind, logger.Loc.Empty, name, true);

View File

@@ -1357,16 +1357,6 @@ pub const Parser = struct {
);
}
// Bake: transform global `Response` to use `import { Response } from 'bun:app'`
if (!p.response_ref.isNull() and is_used_and_has_no_links: {
// We only want to do this if the symbol is used and didn't get
// bound to some other value
const symbol: *const Symbol = &p.symbols.items[p.response_ref.innerIndex()];
break :is_used_and_has_no_links !symbol.hasLink() and symbol.use_count_estimate > 0;
}) {
try p.generateImportStmtForBakeResponse(&before);
}
if (before.items.len > 0 or after.items.len > 0) {
try parts.ensureUnusedCapacity(before.items.len + after.items.len);
const parts_len = parts.items.len;

Some files were not shown because too many files have changed in this diff Show More