mirror of
https://github.com/oven-sh/bun
synced 2026-02-05 16:38:55 +00:00
Compare commits
114 Commits
claude/add
...
dylan/test
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
302b1e2b85 | ||
|
|
a58e4c0c01 | ||
|
|
f19a1cc3a5 | ||
|
|
eac82e2184 | ||
|
|
dac1ee73c6 | ||
|
|
9aa3c7863d | ||
|
|
b88cecfe66 | ||
|
|
b613790451 | ||
|
|
5fe3e3774c | ||
|
|
c5005a37d7 | ||
|
|
a89e61fcaa | ||
|
|
2b7fc18092 | ||
|
|
e3a1ae09f3 | ||
|
|
25e156c95b | ||
|
|
badcfe8a14 | ||
|
|
8d7ca660ef | ||
|
|
933c6fd260 | ||
|
|
f9a69773ab | ||
|
|
e88d151241 | ||
|
|
debd9cc35d | ||
|
|
e0b6183571 | ||
|
|
8d2953c097 | ||
|
|
057fa31a75 | ||
|
|
9c2590ca07 | ||
|
|
b5a56c183b | ||
|
|
41be6aeb3c | ||
|
|
8025fa4046 | ||
|
|
a37b00e477 | ||
|
|
621066d0c4 | ||
|
|
51a05ae2e3 | ||
|
|
52629145ca | ||
|
|
f4218ed40b | ||
|
|
9c75db45fa | ||
|
|
f6e722b594 | ||
|
|
d9fdb67d70 | ||
|
|
a09dc2f450 | ||
|
|
39e48ed244 | ||
|
|
db2f768bd9 | ||
|
|
cf1367137d | ||
|
|
5a12175cb0 | ||
|
|
ba20670da3 | ||
|
|
8c9c7894d6 | ||
|
|
73feb108d9 | ||
|
|
5179dad481 | ||
|
|
97f6adf767 | ||
|
|
8102e80f88 | ||
|
|
ed72eff2a9 | ||
|
|
665ea96076 | ||
|
|
da0babebd2 | ||
|
|
733e7f6165 | ||
|
|
d3ce459f0e | ||
|
|
e14e42b402 | ||
|
|
eb04e4e640 | ||
|
|
250d30eb7d | ||
|
|
0511fbf7b6 | ||
|
|
c58d2e3911 | ||
|
|
266fca2e5c | ||
|
|
9d01a7b91a | ||
|
|
a329da97f4 | ||
|
|
f45900d7e6 | ||
|
|
00490199f1 | ||
|
|
17b503b389 | ||
|
|
ea735c341f | ||
|
|
064ecc37fd | ||
|
|
90c7a4e886 | ||
|
|
c63fa996d1 | ||
|
|
5457d76bcb | ||
|
|
c4519c7552 | ||
|
|
656747bcf1 | ||
|
|
2039ab182d | ||
|
|
5a709a2dbf | ||
|
|
51ce3bc269 | ||
|
|
14b62e6904 | ||
|
|
d3061de1bf | ||
|
|
58782ceef2 | ||
|
|
0b9a2fce2d | ||
|
|
749ad8a1ff | ||
|
|
9746d03ccb | ||
|
|
4dfd87a302 | ||
|
|
20854fb285 | ||
|
|
be15f6c80c | ||
|
|
0ea4ce1bb4 | ||
|
|
6c381b0e03 | ||
|
|
7798e6638b | ||
|
|
e3783c244f | ||
|
|
fee28ca66f | ||
|
|
f9a042f114 | ||
|
|
57b93f6ea3 | ||
|
|
fcd628424a | ||
|
|
526686fdc9 | ||
|
|
95b18582ec | ||
|
|
4252a6df31 | ||
|
|
13248bab57 | ||
|
|
80e8b9601d | ||
|
|
0bd3f3757f | ||
|
|
084eeb945e | ||
|
|
92bc522e85 | ||
|
|
e58a4a7282 | ||
|
|
ebe2e9da14 | ||
|
|
1a23797e82 | ||
|
|
16435f3561 | ||
|
|
db22b7f402 | ||
|
|
a8ccdb02e9 | ||
|
|
144c45229e | ||
|
|
85271f9dd9 | ||
|
|
99786797c7 | ||
|
|
b82c676ce5 | ||
|
|
e555702653 | ||
|
|
68bdffebe6 | ||
|
|
285143dc66 | ||
|
|
beae53e81b | ||
|
|
0d6a27d394 | ||
|
|
0b549321e9 | ||
|
|
33fdc2112f |
@@ -371,7 +371,7 @@ function getZigAgent(platform, options) {
|
||||
* @returns {Agent}
|
||||
*/
|
||||
function getTestAgent(platform, options) {
|
||||
const { os, arch } = platform;
|
||||
const { os, arch, profile } = platform;
|
||||
|
||||
if (os === "darwin") {
|
||||
return {
|
||||
@@ -391,6 +391,13 @@ function getTestAgent(platform, options) {
|
||||
}
|
||||
|
||||
if (arch === "aarch64") {
|
||||
if (profile === "asan") {
|
||||
return getEc2Agent(platform, options, {
|
||||
instanceType: "c8g.2xlarge",
|
||||
cpuCount: 2,
|
||||
threadsPerCore: 1,
|
||||
});
|
||||
}
|
||||
return getEc2Agent(platform, options, {
|
||||
instanceType: "c8g.xlarge",
|
||||
cpuCount: 2,
|
||||
@@ -398,6 +405,13 @@ function getTestAgent(platform, options) {
|
||||
});
|
||||
}
|
||||
|
||||
if (profile === "asan") {
|
||||
return getEc2Agent(platform, options, {
|
||||
instanceType: "c7i.2xlarge",
|
||||
cpuCount: 2,
|
||||
threadsPerCore: 1,
|
||||
});
|
||||
}
|
||||
return getEc2Agent(platform, options, {
|
||||
instanceType: "c7i.xlarge",
|
||||
cpuCount: 2,
|
||||
@@ -538,6 +552,7 @@ function getLinkBunStep(platform, options) {
|
||||
cancel_on_build_failing: isMergeQueue(),
|
||||
env: {
|
||||
BUN_LINK_ONLY: "ON",
|
||||
ASAN_OPTIONS: "allow_user_segv_handler=1:disable_coredump=0:detect_leaks=0",
|
||||
...getBuildEnv(platform, options),
|
||||
},
|
||||
command: `${getBuildCommand(platform, options, "build-bun")} --target bun`,
|
||||
@@ -601,6 +616,9 @@ function getTestBunStep(platform, options, testOptions = {}) {
|
||||
cancel_on_build_failing: isMergeQueue(),
|
||||
parallelism: unifiedTests ? undefined : os === "darwin" ? 2 : 10,
|
||||
timeout_in_minutes: profile === "asan" || os === "windows" ? 45 : 30,
|
||||
env: {
|
||||
ASAN_OPTIONS: "allow_user_segv_handler=1:disable_coredump=0:detect_leaks=0",
|
||||
},
|
||||
command:
|
||||
os === "windows"
|
||||
? `node .\\scripts\\runner.node.mjs ${args.join(" ")}`
|
||||
|
||||
5
.github/workflows/vscode-release.yml
vendored
5
.github/workflows/vscode-release.yml
vendored
@@ -45,3 +45,8 @@ jobs:
|
||||
env:
|
||||
VSCE_PAT: ${{ secrets.VSCODE_EXTENSION }}
|
||||
working-directory: packages/bun-vscode/extension
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: bun-vscode-${{ github.event.inputs.version }}.vsix
|
||||
path: packages/bun-vscode/extension/bun-vscode-${{ github.event.inputs.version }}.vsix
|
||||
|
||||
4
.vscode/launch.json
generated
vendored
4
.vscode/launch.json
generated
vendored
@@ -26,7 +26,7 @@
|
||||
// "BUN_JSC_dumpSimulatedThrows": "1",
|
||||
// "BUN_JSC_unexpectedExceptionStackTraceLimit": "20",
|
||||
// "BUN_DESTRUCT_VM_ON_EXIT": "1",
|
||||
// "ASAN_OPTIONS": "allow_user_segv_handler=1:disable_coredump=0:detect_leaks=1",
|
||||
// "ASAN_OPTIONS": "allow_user_segv_handler=1:disable_coredump=0:detect_leaks=1:abort_on_error=1",
|
||||
// "LSAN_OPTIONS": "malloc_context_size=100:print_suppressions=1:suppressions=${workspaceFolder}/test/leaksan.supp",
|
||||
},
|
||||
"console": "internalConsole",
|
||||
@@ -69,7 +69,7 @@
|
||||
// "BUN_JSC_dumpSimulatedThrows": "1",
|
||||
// "BUN_JSC_unexpectedExceptionStackTraceLimit": "20",
|
||||
// "BUN_DESTRUCT_VM_ON_EXIT": "1",
|
||||
// "ASAN_OPTIONS": "allow_user_segv_handler=1:disable_coredump=0:detect_leaks=1",
|
||||
// "ASAN_OPTIONS": "allow_user_segv_handler=1:disable_coredump=0:detect_leaks=1:abort_on_error=1",
|
||||
// "LSAN_OPTIONS": "malloc_context_size=100:print_suppressions=1:suppressions=${workspaceFolder}/test/leaksan.supp",
|
||||
},
|
||||
"console": "internalConsole",
|
||||
|
||||
@@ -21,7 +21,7 @@ $ sudo pacman -S base-devel ccache cmake git go libiconv libtool make ninja pkg-
|
||||
```
|
||||
|
||||
```bash#Fedora
|
||||
$ sudo dnf install cargo ccache cmake git golang libtool ninja-build pkg-config rustc ruby libatomic-static libstdc++-static sed unzip which libicu-devel 'perl(Math::BigInt)'
|
||||
$ sudo dnf install cargo clang19 llvm19 lld19 ccache cmake git golang libtool ninja-build pkg-config rustc ruby libatomic-static libstdc++-static sed unzip which libicu-devel 'perl(Math::BigInt)'
|
||||
```
|
||||
|
||||
```bash#openSUSE Tumbleweed
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
const isBun = typeof globalThis?.Bun?.sql !== "undefined";
|
||||
import postgres from "postgres";
|
||||
const sql = isBun ? Bun.sql : postgres;
|
||||
const sql = isBun ? Bun.sql : postgres();
|
||||
|
||||
// Create the table if it doesn't exist
|
||||
await sql`
|
||||
|
||||
25
build.zig
25
build.zig
@@ -48,6 +48,7 @@ const BunBuildOptions = struct {
|
||||
/// enable debug logs in release builds
|
||||
enable_logs: bool = false,
|
||||
enable_asan: bool,
|
||||
enable_valgrind: bool,
|
||||
tracy_callstack_depth: u16,
|
||||
reported_nodejs_version: Version,
|
||||
/// To make iterating on some '@embedFile's faster, we load them at runtime
|
||||
@@ -67,6 +68,7 @@ const BunBuildOptions = struct {
|
||||
|
||||
cached_options_module: ?*Module = null,
|
||||
windows_shim: ?WindowsShim = null,
|
||||
llvm_codegen_threads: ?u32 = null,
|
||||
|
||||
pub fn isBaseline(this: *const BunBuildOptions) bool {
|
||||
return this.arch.isX86() and
|
||||
@@ -94,6 +96,7 @@ const BunBuildOptions = struct {
|
||||
opts.addOption(bool, "baseline", this.isBaseline());
|
||||
opts.addOption(bool, "enable_logs", this.enable_logs);
|
||||
opts.addOption(bool, "enable_asan", this.enable_asan);
|
||||
opts.addOption(bool, "enable_valgrind", this.enable_valgrind);
|
||||
opts.addOption([]const u8, "reported_nodejs_version", b.fmt("{}", .{this.reported_nodejs_version}));
|
||||
opts.addOption(bool, "zig_self_hosted_backend", this.no_llvm);
|
||||
opts.addOption(bool, "override_no_export_cpp_apis", this.override_no_export_cpp_apis);
|
||||
@@ -213,26 +216,21 @@ pub fn build(b: *Build) !void {
|
||||
var build_options = BunBuildOptions{
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
|
||||
.os = os,
|
||||
.arch = arch,
|
||||
|
||||
.codegen_path = codegen_path,
|
||||
.codegen_embed = codegen_embed,
|
||||
.no_llvm = no_llvm,
|
||||
.override_no_export_cpp_apis = override_no_export_cpp_apis,
|
||||
|
||||
.version = try Version.parse(bun_version),
|
||||
.canary_revision = canary: {
|
||||
const rev = b.option(u32, "canary", "Treat this as a canary build") orelse 0;
|
||||
break :canary if (rev == 0) null else rev;
|
||||
},
|
||||
|
||||
.reported_nodejs_version = try Version.parse(
|
||||
b.option([]const u8, "reported_nodejs_version", "Reported Node.js version") orelse
|
||||
"0.0.0-unset",
|
||||
),
|
||||
|
||||
.sha = sha: {
|
||||
const sha_buildoption = b.option([]const u8, "sha", "Force the git sha");
|
||||
const sha_github = b.graph.env_map.get("GITHUB_SHA");
|
||||
@@ -268,10 +266,11 @@ pub fn build(b: *Build) !void {
|
||||
|
||||
break :sha sha;
|
||||
},
|
||||
|
||||
.tracy_callstack_depth = b.option(u16, "tracy_callstack_depth", "") orelse 10,
|
||||
.enable_logs = b.option(bool, "enable_logs", "Enable logs in release") orelse false,
|
||||
.enable_asan = b.option(bool, "enable_asan", "Enable asan") orelse false,
|
||||
.enable_valgrind = b.option(bool, "enable_valgrind", "Enable valgrind") orelse false,
|
||||
.llvm_codegen_threads = b.option(u32, "llvm_codegen_threads", "Number of threads to use for LLVM codegen") orelse 1,
|
||||
};
|
||||
|
||||
// zig build obj
|
||||
@@ -500,6 +499,7 @@ fn addMultiCheck(
|
||||
.codegen_path = root_build_options.codegen_path,
|
||||
.no_llvm = root_build_options.no_llvm,
|
||||
.enable_asan = root_build_options.enable_asan,
|
||||
.enable_valgrind = root_build_options.enable_valgrind,
|
||||
.override_no_export_cpp_apis = root_build_options.override_no_export_cpp_apis,
|
||||
};
|
||||
|
||||
@@ -605,7 +605,15 @@ fn configureObj(b: *Build, opts: *BunBuildOptions, obj: *Compile) void {
|
||||
|
||||
// Object options
|
||||
obj.use_llvm = !opts.no_llvm;
|
||||
obj.use_lld = if (opts.os == .mac) false else !opts.no_llvm;
|
||||
obj.use_lld = if (opts.os == .mac or opts.os == .linux) false else !opts.no_llvm;
|
||||
|
||||
if (opts.optimize == .Debug) {
|
||||
if (@hasField(std.meta.Child(@TypeOf(obj)), "llvm_codegen_threads"))
|
||||
obj.llvm_codegen_threads = opts.llvm_codegen_threads orelse 0;
|
||||
}
|
||||
|
||||
obj.no_link_obj = true;
|
||||
|
||||
if (opts.enable_asan and !enableFastBuild(b)) {
|
||||
if (@hasField(Build.Module, "sanitize_address")) {
|
||||
obj.root_module.sanitize_address = true;
|
||||
@@ -636,7 +644,7 @@ fn configureObj(b: *Build, opts: *BunBuildOptions, obj: *Compile) void {
|
||||
obj.link_function_sections = true;
|
||||
obj.link_data_sections = true;
|
||||
|
||||
if (opts.optimize == .Debug) {
|
||||
if (opts.optimize == .Debug and opts.enable_valgrind) {
|
||||
obj.root_module.valgrind = true;
|
||||
}
|
||||
}
|
||||
@@ -745,6 +753,7 @@ fn addInternalImports(b: *Build, mod: *Module, opts: *BunBuildOptions) void {
|
||||
.{ .file = "node-fallbacks/url.js", .enable = opts.shouldEmbedCode() },
|
||||
.{ .file = "node-fallbacks/util.js", .enable = opts.shouldEmbedCode() },
|
||||
.{ .file = "node-fallbacks/zlib.js", .enable = opts.shouldEmbedCode() },
|
||||
.{ .file = "eval/feedback.ts", .enable = opts.shouldEmbedCode() },
|
||||
}) |entry| {
|
||||
if (!@hasField(@TypeOf(entry), "enable") or entry.enable) {
|
||||
const path = b.pathJoin(&.{ opts.codegen_path, entry.file });
|
||||
|
||||
@@ -60,10 +60,10 @@ endif()
|
||||
# Windows Code Signing Option
|
||||
if(WIN32)
|
||||
optionx(ENABLE_WINDOWS_CODESIGNING BOOL "Enable Windows code signing with DigiCert KeyLocker" DEFAULT OFF)
|
||||
|
||||
|
||||
if(ENABLE_WINDOWS_CODESIGNING)
|
||||
message(STATUS "Windows code signing: ENABLED")
|
||||
|
||||
|
||||
# Check for required environment variables
|
||||
if(NOT DEFINED ENV{SM_API_KEY})
|
||||
message(WARNING "SM_API_KEY not set - code signing may fail")
|
||||
@@ -114,8 +114,10 @@ endif()
|
||||
|
||||
if(DEBUG AND ((APPLE AND ARCH STREQUAL "aarch64") OR LINUX))
|
||||
set(DEFAULT_ASAN ON)
|
||||
set(DEFAULT_VALGRIND OFF)
|
||||
else()
|
||||
set(DEFAULT_ASAN OFF)
|
||||
set(DEFAULT_VALGRIND OFF)
|
||||
endif()
|
||||
|
||||
optionx(ENABLE_ASAN BOOL "If ASAN support should be enabled" DEFAULT ${DEFAULT_ASAN})
|
||||
|
||||
@@ -2,6 +2,8 @@ include(PathUtils)
|
||||
|
||||
if(DEBUG)
|
||||
set(bun bun-debug)
|
||||
elseif(ENABLE_ASAN AND ENABLE_VALGRIND)
|
||||
set(bun bun-asan-valgrind)
|
||||
elseif(ENABLE_ASAN)
|
||||
set(bun bun-asan)
|
||||
elseif(ENABLE_VALGRIND)
|
||||
@@ -42,6 +44,14 @@ else()
|
||||
set(CONFIGURE_DEPENDS "")
|
||||
endif()
|
||||
|
||||
set(LLVM_ZIG_CODEGEN_THREADS 0)
|
||||
# This makes the build slower, so we turn it off for now.
|
||||
# if (DEBUG)
|
||||
# include(ProcessorCount)
|
||||
# ProcessorCount(CPU_COUNT)
|
||||
# set(LLVM_ZIG_CODEGEN_THREADS ${CPU_COUNT})
|
||||
# endif()
|
||||
|
||||
# --- Dependencies ---
|
||||
|
||||
set(BUN_DEPENDENCIES
|
||||
@@ -576,7 +586,13 @@ if (TEST)
|
||||
set(BUN_ZIG_OUTPUT ${BUILD_PATH}/bun-test.o)
|
||||
set(ZIG_STEPS test)
|
||||
else()
|
||||
set(BUN_ZIG_OUTPUT ${BUILD_PATH}/bun-zig.o)
|
||||
if (LLVM_ZIG_CODEGEN_THREADS GREATER 1)
|
||||
foreach(i RANGE ${LLVM_ZIG_CODEGEN_THREADS})
|
||||
list(APPEND BUN_ZIG_OUTPUT ${BUILD_PATH}/bun-zig.${i}.o)
|
||||
endforeach()
|
||||
else()
|
||||
set(BUN_ZIG_OUTPUT ${BUILD_PATH}/bun-zig.o)
|
||||
endif()
|
||||
set(ZIG_STEPS obj)
|
||||
endif()
|
||||
|
||||
@@ -619,6 +635,8 @@ register_command(
|
||||
-Dcpu=${ZIG_CPU}
|
||||
-Denable_logs=$<IF:$<BOOL:${ENABLE_LOGS}>,true,false>
|
||||
-Denable_asan=$<IF:$<BOOL:${ENABLE_ZIG_ASAN}>,true,false>
|
||||
-Denable_valgrind=$<IF:$<BOOL:${ENABLE_VALGRIND}>,true,false>
|
||||
-Dllvm_codegen_threads=${LLVM_ZIG_CODEGEN_THREADS}
|
||||
-Dversion=${VERSION}
|
||||
-Dreported_nodejs_version=${NODEJS_VERSION}
|
||||
-Dcanary=${CANARY_REVISION}
|
||||
@@ -886,12 +904,8 @@ if(NOT WIN32)
|
||||
endif()
|
||||
|
||||
if(ENABLE_ASAN)
|
||||
target_compile_options(${bun} PUBLIC
|
||||
-fsanitize=address
|
||||
)
|
||||
target_link_libraries(${bun} PUBLIC
|
||||
-fsanitize=address
|
||||
)
|
||||
target_compile_options(${bun} PUBLIC -fsanitize=address)
|
||||
target_link_libraries(${bun} PUBLIC -fsanitize=address)
|
||||
endif()
|
||||
|
||||
target_compile_options(${bun} PUBLIC
|
||||
@@ -930,12 +944,8 @@ if(NOT WIN32)
|
||||
)
|
||||
|
||||
if(ENABLE_ASAN)
|
||||
target_compile_options(${bun} PUBLIC
|
||||
-fsanitize=address
|
||||
)
|
||||
target_link_libraries(${bun} PUBLIC
|
||||
-fsanitize=address
|
||||
)
|
||||
target_compile_options(${bun} PUBLIC -fsanitize=address)
|
||||
target_link_libraries(${bun} PUBLIC -fsanitize=address)
|
||||
endif()
|
||||
endif()
|
||||
else()
|
||||
@@ -969,6 +979,7 @@ if(WIN32)
|
||||
/delayload:WSOCK32.dll
|
||||
/delayload:ADVAPI32.dll
|
||||
/delayload:IPHLPAPI.dll
|
||||
/delayload:CRYPT32.dll
|
||||
)
|
||||
endif()
|
||||
endif()
|
||||
@@ -1010,6 +1021,7 @@ if(LINUX)
|
||||
-Wl,--wrap=exp2
|
||||
-Wl,--wrap=expf
|
||||
-Wl,--wrap=fcntl64
|
||||
-Wl,--wrap=gettid
|
||||
-Wl,--wrap=log
|
||||
-Wl,--wrap=log2
|
||||
-Wl,--wrap=log2f
|
||||
@@ -1061,7 +1073,7 @@ if(LINUX)
|
||||
)
|
||||
endif()
|
||||
|
||||
if (NOT DEBUG AND NOT ENABLE_ASAN)
|
||||
if (NOT DEBUG AND NOT ENABLE_ASAN AND NOT ENABLE_VALGRIND)
|
||||
target_link_options(${bun} PUBLIC
|
||||
-Wl,-icf=safe
|
||||
)
|
||||
@@ -1188,6 +1200,7 @@ if(WIN32)
|
||||
ntdll
|
||||
userenv
|
||||
dbghelp
|
||||
crypt32
|
||||
wsock32 # ws2_32 required by TransmitFile aka sendfile on windows
|
||||
delayimp.lib
|
||||
)
|
||||
@@ -1363,12 +1376,20 @@ if(NOT BUN_CPP_ONLY)
|
||||
if(ENABLE_BASELINE)
|
||||
set(bunTriplet ${bunTriplet}-baseline)
|
||||
endif()
|
||||
if(ENABLE_ASAN)
|
||||
|
||||
if (ENABLE_ASAN AND ENABLE_VALGRIND)
|
||||
set(bunTriplet ${bunTriplet}-asan-valgrind)
|
||||
set(bunPath ${bunTriplet})
|
||||
elseif (ENABLE_VALGRIND)
|
||||
set(bunTriplet ${bunTriplet}-valgrind)
|
||||
set(bunPath ${bunTriplet})
|
||||
elseif(ENABLE_ASAN)
|
||||
set(bunTriplet ${bunTriplet}-asan)
|
||||
set(bunPath ${bunTriplet})
|
||||
else()
|
||||
string(REPLACE bun ${bunTriplet} bunPath ${bun})
|
||||
endif()
|
||||
|
||||
set(bunFiles ${bunExe} features.json)
|
||||
if(WIN32)
|
||||
list(APPEND bunFiles ${bun}.pdb)
|
||||
|
||||
@@ -4,7 +4,8 @@ register_repository(
|
||||
REPOSITORY
|
||||
libuv/libuv
|
||||
COMMIT
|
||||
da527d8d2a908b824def74382761566371439003
|
||||
# Corresponds to v1.51.0
|
||||
5152db2cbfeb5582e9c27c5ea1dba2cd9e10759b
|
||||
)
|
||||
|
||||
if(WIN32)
|
||||
|
||||
@@ -181,12 +181,23 @@ function(generate_dependency_versions_header)
|
||||
string(APPEND HEADER_CONTENT "}\n")
|
||||
string(APPEND HEADER_CONTENT "#endif\n\n")
|
||||
string(APPEND HEADER_CONTENT "#endif // BUN_DEPENDENCY_VERSIONS_H\n")
|
||||
|
||||
# Write the header file
|
||||
|
||||
# Write the header file only if content has changed
|
||||
set(OUTPUT_FILE "${CMAKE_BINARY_DIR}/bun_dependency_versions.h")
|
||||
file(WRITE "${OUTPUT_FILE}" "${HEADER_CONTENT}")
|
||||
|
||||
message(STATUS "Generated dependency versions header: ${OUTPUT_FILE}")
|
||||
|
||||
# Read existing content if file exists
|
||||
set(EXISTING_CONTENT "")
|
||||
if(EXISTS "${OUTPUT_FILE}")
|
||||
file(READ "${OUTPUT_FILE}" EXISTING_CONTENT)
|
||||
endif()
|
||||
|
||||
# Only write if content is different
|
||||
if(NOT "${EXISTING_CONTENT}" STREQUAL "${HEADER_CONTENT}")
|
||||
file(WRITE "${OUTPUT_FILE}" "${HEADER_CONTENT}")
|
||||
message(STATUS "Updated dependency versions header: ${OUTPUT_FILE}")
|
||||
else()
|
||||
message(STATUS "Dependency versions header unchanged: ${OUTPUT_FILE}")
|
||||
endif()
|
||||
|
||||
# Also create a more detailed version for debugging
|
||||
set(DEBUG_OUTPUT_FILE "${CMAKE_BINARY_DIR}/bun_dependency_versions_debug.txt")
|
||||
|
||||
@@ -131,6 +131,9 @@ else()
|
||||
find_llvm_command(CMAKE_RANLIB llvm-ranlib)
|
||||
if(LINUX)
|
||||
find_llvm_command(LLD_PROGRAM ld.lld)
|
||||
# Ensure vendor dependencies use lld instead of ld
|
||||
list(APPEND CMAKE_ARGS -DCMAKE_EXE_LINKER_FLAGS=--ld-path=${LLD_PROGRAM})
|
||||
list(APPEND CMAKE_ARGS -DCMAKE_SHARED_LINKER_FLAGS=--ld-path=${LLD_PROGRAM})
|
||||
endif()
|
||||
if(APPLE)
|
||||
find_llvm_command(CMAKE_DSYMUTIL dsymutil)
|
||||
|
||||
@@ -2,7 +2,7 @@ option(WEBKIT_VERSION "The version of WebKit to use")
|
||||
option(WEBKIT_LOCAL "If a local version of WebKit should be used instead of downloading")
|
||||
|
||||
if(NOT WEBKIT_VERSION)
|
||||
set(WEBKIT_VERSION 495c25e24927ba03277ae225cd42811588d03ff8)
|
||||
set(WEBKIT_VERSION 69fa2714ab5f917c2d15501ff8cfdccfaea78882)
|
||||
endif()
|
||||
|
||||
string(SUBSTRING ${WEBKIT_VERSION} 0 16 WEBKIT_VERSION_PREFIX)
|
||||
|
||||
@@ -20,7 +20,7 @@ else()
|
||||
unsupported(CMAKE_SYSTEM_NAME)
|
||||
endif()
|
||||
|
||||
set(ZIG_COMMIT "e0b7c318f318196c5f81fdf3423816a7b5bb3112")
|
||||
set(ZIG_COMMIT "55fdbfa0c86be86b68d43a4ba761e6909eb0d7b2")
|
||||
optionx(ZIG_TARGET STRING "The zig target to use" DEFAULT ${DEFAULT_ZIG_TARGET})
|
||||
|
||||
if(CMAKE_BUILD_TYPE STREQUAL "Release")
|
||||
@@ -90,6 +90,7 @@ register_command(
|
||||
-DZIG_PATH=${ZIG_PATH}
|
||||
-DZIG_COMMIT=${ZIG_COMMIT}
|
||||
-DENABLE_ASAN=${ENABLE_ASAN}
|
||||
-DENABLE_VALGRIND=${ENABLE_VALGRIND}
|
||||
-DZIG_COMPILER_SAFE=${ZIG_COMPILER_SAFE}
|
||||
-P ${CWD}/cmake/scripts/DownloadZig.cmake
|
||||
SOURCES
|
||||
|
||||
@@ -122,7 +122,7 @@
|
||||
},
|
||||
{
|
||||
"name": "reporter",
|
||||
"description": "Specify the test reporter. Currently --reporter=junit is the only supported format.",
|
||||
"description": "Test output reporter format. Available: 'junit' (requires --reporter-outfile). Default: console output.",
|
||||
"hasValue": true,
|
||||
"valueType": "val",
|
||||
"required": false,
|
||||
@@ -130,7 +130,7 @@
|
||||
},
|
||||
{
|
||||
"name": "reporter-outfile",
|
||||
"description": "The output file used for the format from --reporter.",
|
||||
"description": "Output file path for the reporter format (required with --reporter).",
|
||||
"hasValue": true,
|
||||
"valueType": "val",
|
||||
"required": false,
|
||||
|
||||
@@ -665,7 +665,6 @@ _bun_test_completion() {
|
||||
'--timeout[Set the per-test timeout in milliseconds, default is 5000.]:timeout' \
|
||||
'--update-snapshots[Update snapshot files]' \
|
||||
'--rerun-each[Re-run each test file <NUMBER> times, helps catch certain bugs]:rerun' \
|
||||
'--only[Only run tests that are marked with "test.only()"]' \
|
||||
'--todo[Include tests that are marked with "test.todo()"]' \
|
||||
'--coverage[Generate a coverage profile]' \
|
||||
'--bail[Exit the test suite after <NUMBER> failures. If you do not specify a number, it defaults to 1.]:bail' \
|
||||
|
||||
@@ -233,6 +233,7 @@ In addition to the standard fetch options, Bun provides several extensions:
|
||||
```ts
|
||||
const response = await fetch("http://example.com", {
|
||||
// Control automatic response decompression (default: true)
|
||||
// Supports gzip, deflate, brotli (br), and zstd
|
||||
decompress: true,
|
||||
|
||||
// Disable connection reuse for this request
|
||||
@@ -339,7 +340,7 @@ This will print the request and response headers to your terminal:
|
||||
[fetch] > User-Agent: Bun/$BUN_LATEST_VERSION
|
||||
[fetch] > Accept: */*
|
||||
[fetch] > Host: example.com
|
||||
[fetch] > Accept-Encoding: gzip, deflate, br
|
||||
[fetch] > Accept-Encoding: gzip, deflate, br, zstd
|
||||
|
||||
[fetch] < 200 OK
|
||||
[fetch] < Content-Encoding: gzip
|
||||
|
||||
@@ -155,3 +155,24 @@ const glob = new Glob("\\!index.ts");
|
||||
glob.match("!index.ts"); // => true
|
||||
glob.match("index.ts"); // => false
|
||||
```
|
||||
|
||||
## Node.js `fs.glob()` compatibility
|
||||
|
||||
Bun also implements Node.js's `fs.glob()` functions with additional features:
|
||||
|
||||
```ts
|
||||
import { glob, globSync, promises } from "node:fs";
|
||||
|
||||
// Array of patterns
|
||||
const files = await promises.glob(["**/*.ts", "**/*.js"]);
|
||||
|
||||
// Exclude patterns
|
||||
const filtered = await promises.glob("**/*", {
|
||||
exclude: ["node_modules/**", "*.test.*"],
|
||||
});
|
||||
```
|
||||
|
||||
All three functions (`fs.glob()`, `fs.globSync()`, `fs.promises.glob()`) support:
|
||||
|
||||
- Array of patterns as the first argument
|
||||
- `exclude` option to filter results
|
||||
|
||||
@@ -184,6 +184,7 @@ Bun.hash.rapidhash("data", 1234);
|
||||
|
||||
- `"blake2b256"`
|
||||
- `"blake2b512"`
|
||||
- `"blake2s256"`
|
||||
- `"md4"`
|
||||
- `"md5"`
|
||||
- `"ripemd160"`
|
||||
|
||||
@@ -88,6 +88,9 @@ await redis.set("user:1:name", "Alice");
|
||||
// Get a key
|
||||
const name = await redis.get("user:1:name");
|
||||
|
||||
// Get a key as Uint8Array
|
||||
const buffer = await redis.getBuffer("user:1:name");
|
||||
|
||||
// Delete a key
|
||||
await redis.del("user:1:name");
|
||||
|
||||
@@ -132,6 +135,10 @@ await redis.hmset("user:123", [
|
||||
const userFields = await redis.hmget("user:123", ["name", "email"]);
|
||||
console.log(userFields); // ["Alice", "alice@example.com"]
|
||||
|
||||
// Get single field from hash (returns value directly, null if missing)
|
||||
const userName = await redis.hget("user:123", "name");
|
||||
console.log(userName); // "Alice"
|
||||
|
||||
// Increment a numeric field in a hash
|
||||
await redis.hincrby("user:123", "visits", 1);
|
||||
|
||||
@@ -161,6 +168,102 @@ const randomTag = await redis.srandmember("tags");
|
||||
const poppedTag = await redis.spop("tags");
|
||||
```
|
||||
|
||||
## Pub/Sub
|
||||
|
||||
Bun provides native bindings for the [Redis
|
||||
Pub/Sub](https://redis.io/docs/latest/develop/pubsub/) protocol. **New in Bun
|
||||
1.2.23**
|
||||
|
||||
{% callout %}
|
||||
**🚧** — The Redis Pub/Sub feature is experimental. Although we expect it to be
|
||||
stable, we're currently actively looking for feedback and areas for improvement.
|
||||
{% /callout %}
|
||||
|
||||
### Basic Usage
|
||||
|
||||
To get started publishing messages, you can set up a publisher in
|
||||
`publisher.ts`:
|
||||
|
||||
```typescript#publisher.ts
|
||||
import { RedisClient } from "bun";
|
||||
|
||||
const writer = new RedisClient("redis://localhost:6739");
|
||||
await writer.connect();
|
||||
|
||||
writer.publish("general", "Hello everyone!");
|
||||
|
||||
writer.close();
|
||||
```
|
||||
|
||||
In another file, create the subscriber in `subscriber.ts`:
|
||||
|
||||
```typescript#subscriber.ts
|
||||
import { RedisClient } from "bun";
|
||||
|
||||
const listener = new RedisClient("redis://localhost:6739");
|
||||
await listener.connect();
|
||||
|
||||
await listener.subscribe("general", (message, channel) => {
|
||||
console.log(`Received: ${message}`);
|
||||
});
|
||||
```
|
||||
|
||||
In one shell, run your subscriber:
|
||||
|
||||
```bash
|
||||
bun run subscriber.ts
|
||||
```
|
||||
|
||||
and, in another, run your publisher:
|
||||
|
||||
```bash
|
||||
bun run publisher.ts
|
||||
```
|
||||
|
||||
{% callout %}
|
||||
**Note:** The subscription mode takes over the `RedisClient` connection. A
|
||||
client with subscriptions can only call `RedisClient.prototype.subscribe()`. In
|
||||
other words, applications which need to message Redis need a separate
|
||||
connection, acquirable through `.duplicate()`:
|
||||
|
||||
```typescript
|
||||
import { RedisClient } from "bun";
|
||||
|
||||
const redis = new RedisClient("redis://localhost:6379");
|
||||
await redis.connect();
|
||||
const subscriber = await redis.duplicate();
|
||||
|
||||
await subscriber.subscribe("foo", () => {});
|
||||
await redis.set("bar", "baz");
|
||||
```
|
||||
|
||||
{% /callout %}
|
||||
|
||||
### Publishing
|
||||
|
||||
Publishing messages is done through the `publish()` method:
|
||||
|
||||
```typescript
|
||||
await client.publish(channelName, message);
|
||||
```
|
||||
|
||||
### Subscriptions
|
||||
|
||||
The Bun `RedisClient` allows you to subscribe to channels through the
|
||||
`.subscribe()` method:
|
||||
|
||||
```typescript
|
||||
await client.subscribe(channel, (message, channel) => {});
|
||||
```
|
||||
|
||||
You can unsubscribe through the `.unsubscribe()` method:
|
||||
|
||||
```typescript
|
||||
await client.unsubscribe(); // Unsubscribe from all channels.
|
||||
await client.unsubscribe(channel); // Unsubscribe a particular channel.
|
||||
await client.unsubscribe(channel, listener); // Unsubscribe a particular listener.
|
||||
```
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### Command Execution and Pipelining
|
||||
@@ -482,9 +585,10 @@ When connecting to Redis servers using older versions that don't support RESP3,
|
||||
|
||||
Current limitations of the Redis client we are planning to address in future versions:
|
||||
|
||||
- [ ] No dedicated API for pub/sub functionality (though you can use the raw command API)
|
||||
- [ ] Transactions (MULTI/EXEC) must be done through raw commands for now
|
||||
- [ ] Streams are supported but without dedicated methods
|
||||
- [ ] Pub/Sub does not currently support binary data, nor pattern-based
|
||||
subscriptions.
|
||||
|
||||
Unsupported features:
|
||||
|
||||
|
||||
@@ -377,6 +377,22 @@ const users = [
|
||||
await sql`SELECT * FROM users WHERE id IN ${sql(users, "id")}`;
|
||||
```
|
||||
|
||||
### `sql.array` helper
|
||||
|
||||
The `sql.array` helper creates PostgreSQL array literals from JavaScript arrays:
|
||||
|
||||
```ts
|
||||
// Create array literals for PostgreSQL
|
||||
await sql`INSERT INTO tags (items) VALUES (${sql.array(["red", "blue", "green"])})`;
|
||||
// Generates: INSERT INTO tags (items) VALUES (ARRAY['red', 'blue', 'green'])
|
||||
|
||||
// Works with numeric arrays too
|
||||
await sql`SELECT * FROM products WHERE ids = ANY(${sql.array([1, 2, 3])})`;
|
||||
// Generates: SELECT * FROM products WHERE ids = ANY(ARRAY[1, 2, 3])
|
||||
```
|
||||
|
||||
**Note**: `sql.array` is PostgreSQL-only. Multi-dimensional arrays and NULL elements may not be supported yet.
|
||||
|
||||
## `sql``.simple()`
|
||||
|
||||
The PostgreSQL wire protocol supports two types of queries: "simple" and "extended". Simple queries can contain multiple statements but don't support parameters, while extended queries (the default) support parameters but only allow one statement.
|
||||
|
||||
@@ -663,6 +663,8 @@ class Statement<Params, ReturnType> {
|
||||
toString(): string; // serialize to SQL
|
||||
|
||||
columnNames: string[]; // the column names of the result set
|
||||
columnTypes: string[]; // types based on actual values in first row (call .get()/.all() first)
|
||||
declaredTypes: (string | null)[]; // types from CREATE TABLE schema (call .get()/.all() first)
|
||||
paramsCount: number; // the number of parameters expected by the statement
|
||||
native: any; // the native object representing the statement
|
||||
|
||||
|
||||
@@ -28,6 +28,20 @@ for await (const chunk of stream) {
|
||||
}
|
||||
```
|
||||
|
||||
`ReadableStream` also provides convenience methods for consuming the entire stream:
|
||||
|
||||
```ts
|
||||
const stream = new ReadableStream({
|
||||
start(controller) {
|
||||
controller.enqueue("hello world");
|
||||
controller.close();
|
||||
},
|
||||
});
|
||||
|
||||
const data = await stream.text(); // => "hello world"
|
||||
// Also available: .json(), .bytes(), .blob()
|
||||
```
|
||||
|
||||
## Direct `ReadableStream`
|
||||
|
||||
Bun implements an optimized version of `ReadableStream` that avoid unnecessary data copying & queue management logic. With a traditional `ReadableStream`, chunks of data are _enqueued_. Each chunk is copied into a queue, where it sits until the stream is ready to send more data.
|
||||
|
||||
@@ -602,6 +602,40 @@ dec.decode(decompressed);
|
||||
// => "hellohellohello..."
|
||||
```
|
||||
|
||||
## `Bun.zstdCompress()` / `Bun.zstdCompressSync()`
|
||||
|
||||
Compresses a `Uint8Array` using the Zstandard algorithm.
|
||||
|
||||
```ts
|
||||
const buf = Buffer.from("hello".repeat(100));
|
||||
|
||||
// Synchronous
|
||||
const compressedSync = Bun.zstdCompressSync(buf);
|
||||
// Asynchronous
|
||||
const compressedAsync = await Bun.zstdCompress(buf);
|
||||
|
||||
// With compression level (1-22, default: 3)
|
||||
const compressedLevel = Bun.zstdCompressSync(buf, { level: 6 });
|
||||
```
|
||||
|
||||
## `Bun.zstdDecompress()` / `Bun.zstdDecompressSync()`
|
||||
|
||||
Decompresses a `Uint8Array` using the Zstandard algorithm.
|
||||
|
||||
```ts
|
||||
const buf = Buffer.from("hello".repeat(100));
|
||||
const compressed = Bun.zstdCompressSync(buf);
|
||||
|
||||
// Synchronous
|
||||
const decompressedSync = Bun.zstdDecompressSync(compressed);
|
||||
// Asynchronous
|
||||
const decompressedAsync = await Bun.zstdDecompress(compressed);
|
||||
|
||||
const dec = new TextDecoder();
|
||||
dec.decode(decompressedSync);
|
||||
// => "hellohellohello..."
|
||||
```
|
||||
|
||||
## `Bun.inspect()`
|
||||
|
||||
Serializes an object to a `string` exactly as it would be printed by `console.log`.
|
||||
|
||||
@@ -279,6 +279,9 @@ Bun implements the `WebSocket` class. To create a WebSocket client that connects
|
||||
|
||||
```ts
|
||||
const socket = new WebSocket("ws://localhost:3000");
|
||||
|
||||
// With subprotocol negotiation
|
||||
const socket2 = new WebSocket("ws://localhost:3000", ["soap", "wamp"]);
|
||||
```
|
||||
|
||||
In browsers, the cookies that are currently set on the page will be sent with the WebSocket upgrade request. This is a standard feature of the `WebSocket` API.
|
||||
@@ -293,6 +296,17 @@ const socket = new WebSocket("ws://localhost:3000", {
|
||||
});
|
||||
```
|
||||
|
||||
### Client compression
|
||||
|
||||
WebSocket clients support permessage-deflate compression. The `extensions` property shows negotiated compression:
|
||||
|
||||
```ts
|
||||
const socket = new WebSocket("wss://echo.websocket.org");
|
||||
socket.addEventListener("open", () => {
|
||||
console.log(socket.extensions); // => "permessage-deflate"
|
||||
});
|
||||
```
|
||||
|
||||
To add event listeners to the socket:
|
||||
|
||||
```ts
|
||||
|
||||
@@ -282,6 +282,31 @@ const worker = new Worker("./i-am-smol.ts", {
|
||||
Setting `smol: true` sets `JSC::HeapSize` to be `Small` instead of the default `Large`.
|
||||
{% /details %}
|
||||
|
||||
## Environment Data
|
||||
|
||||
Share data between the main thread and workers using `setEnvironmentData()` and `getEnvironmentData()`.
|
||||
|
||||
```js
|
||||
import { setEnvironmentData, getEnvironmentData } from "worker_threads";
|
||||
|
||||
// In main thread
|
||||
setEnvironmentData("config", { apiUrl: "https://api.example.com" });
|
||||
|
||||
// In worker
|
||||
const config = getEnvironmentData("config");
|
||||
console.log(config); // => { apiUrl: "https://api.example.com" }
|
||||
```
|
||||
|
||||
## Worker Events
|
||||
|
||||
Listen for worker creation events using `process.emit()`:
|
||||
|
||||
```js
|
||||
process.on("worker", worker => {
|
||||
console.log("New worker created:", worker.threadId);
|
||||
});
|
||||
```
|
||||
|
||||
## `Bun.isMainThread`
|
||||
|
||||
You can check if you're in the main thread by checking `Bun.isMainThread`.
|
||||
|
||||
172
docs/api/yaml.md
172
docs/api/yaml.md
@@ -3,6 +3,7 @@ In Bun, YAML is a first-class citizen alongside JSON and TOML.
|
||||
Bun provides built-in support for YAML files through both runtime APIs and bundler integration. You can
|
||||
|
||||
- Parse YAML strings with `Bun.YAML.parse`
|
||||
- Stringify JavaScript objects to YAML with `Bun.YAML.stringify`
|
||||
- import & require YAML files as modules at runtime (including hot reloading & watch mode support)
|
||||
- import & require YAML files in frontend apps via bun's bundler
|
||||
|
||||
@@ -104,7 +105,7 @@ const data = Bun.YAML.parse(yaml);
|
||||
|
||||
#### Error Handling
|
||||
|
||||
`Bun.YAML.parse()` throws a `SyntaxError` if the YAML is invalid:
|
||||
`Bun.YAML.parse()` throws an error if the YAML is invalid:
|
||||
|
||||
```ts
|
||||
try {
|
||||
@@ -114,6 +115,175 @@ try {
|
||||
}
|
||||
```
|
||||
|
||||
### `Bun.YAML.stringify()`
|
||||
|
||||
Convert a JavaScript value into a YAML string. The API signature matches `JSON.stringify`:
|
||||
|
||||
```ts
|
||||
YAML.stringify(value, replacer?, space?)
|
||||
```
|
||||
|
||||
- `value`: The value to convert to YAML
|
||||
- `replacer`: Currently only `null` or `undefined` (function replacers not yet supported)
|
||||
- `space`: Number of spaces for indentation (e.g., `2`) or a string to use for indentation. **Without this parameter, outputs flow-style (single-line) YAML**
|
||||
|
||||
#### Basic Usage
|
||||
|
||||
```ts
|
||||
import { YAML } from "bun";
|
||||
|
||||
const data = {
|
||||
name: "John Doe",
|
||||
age: 30,
|
||||
hobbies: ["reading", "coding"],
|
||||
};
|
||||
|
||||
// Without space - outputs flow-style (single-line) YAML
|
||||
console.log(YAML.stringify(data));
|
||||
// {name: John Doe,age: 30,hobbies: [reading,coding]}
|
||||
|
||||
// With space=2 - outputs block-style (multi-line) YAML
|
||||
console.log(YAML.stringify(data, null, 2));
|
||||
// name: John Doe
|
||||
// age: 30
|
||||
// hobbies:
|
||||
// - reading
|
||||
// - coding
|
||||
```
|
||||
|
||||
#### Output Styles
|
||||
|
||||
```ts
|
||||
const arr = [1, 2, 3];
|
||||
|
||||
// Flow style (single-line) - default
|
||||
console.log(YAML.stringify(arr));
|
||||
// [1,2,3]
|
||||
|
||||
// Block style (multi-line) - with indentation
|
||||
console.log(YAML.stringify(arr, null, 2));
|
||||
// - 1
|
||||
// - 2
|
||||
// - 3
|
||||
```
|
||||
|
||||
#### String Quoting
|
||||
|
||||
`YAML.stringify()` automatically quotes strings when necessary:
|
||||
|
||||
- Strings that would be parsed as YAML keywords (`true`, `false`, `null`, `yes`, `no`, etc.)
|
||||
- Strings that would be parsed as numbers
|
||||
- Strings containing special characters or escape sequences
|
||||
|
||||
```ts
|
||||
const examples = {
|
||||
keyword: "true", // Will be quoted: "true"
|
||||
number: "123", // Will be quoted: "123"
|
||||
text: "hello world", // Won't be quoted: hello world
|
||||
empty: "", // Will be quoted: ""
|
||||
};
|
||||
|
||||
console.log(YAML.stringify(examples, null, 2));
|
||||
// keyword: "true"
|
||||
// number: "123"
|
||||
// text: hello world
|
||||
// empty: ""
|
||||
```
|
||||
|
||||
#### Cycles and References
|
||||
|
||||
`YAML.stringify()` automatically detects and handles circular references using YAML anchors and aliases:
|
||||
|
||||
```ts
|
||||
const obj = { name: "root" };
|
||||
obj.self = obj; // Circular reference
|
||||
|
||||
const yamlString = YAML.stringify(obj, null, 2);
|
||||
console.log(yamlString);
|
||||
// &root
|
||||
// name: root
|
||||
// self:
|
||||
// *root
|
||||
|
||||
// Objects with shared references
|
||||
const shared = { id: 1 };
|
||||
const data = {
|
||||
first: shared,
|
||||
second: shared,
|
||||
};
|
||||
|
||||
console.log(YAML.stringify(data, null, 2));
|
||||
// first:
|
||||
// &first
|
||||
// id: 1
|
||||
// second:
|
||||
// *first
|
||||
```
|
||||
|
||||
#### Special Values
|
||||
|
||||
```ts
|
||||
// Special numeric values
|
||||
console.log(YAML.stringify(Infinity)); // .inf
|
||||
console.log(YAML.stringify(-Infinity)); // -.inf
|
||||
console.log(YAML.stringify(NaN)); // .nan
|
||||
console.log(YAML.stringify(0)); // 0
|
||||
console.log(YAML.stringify(-0)); // -0
|
||||
|
||||
// null and undefined
|
||||
console.log(YAML.stringify(null)); // null
|
||||
console.log(YAML.stringify(undefined)); // undefined (returns undefined, not a string)
|
||||
|
||||
// Booleans
|
||||
console.log(YAML.stringify(true)); // true
|
||||
console.log(YAML.stringify(false)); // false
|
||||
```
|
||||
|
||||
#### Complex Objects
|
||||
|
||||
```ts
|
||||
const config = {
|
||||
server: {
|
||||
port: 3000,
|
||||
host: "localhost",
|
||||
ssl: {
|
||||
enabled: true,
|
||||
cert: "/path/to/cert.pem",
|
||||
key: "/path/to/key.pem",
|
||||
},
|
||||
},
|
||||
database: {
|
||||
connections: [
|
||||
{ name: "primary", host: "db1.example.com" },
|
||||
{ name: "replica", host: "db2.example.com" },
|
||||
],
|
||||
},
|
||||
features: {
|
||||
auth: true,
|
||||
"rate-limit": 100, // Keys with special characters are preserved
|
||||
},
|
||||
};
|
||||
|
||||
const yamlString = YAML.stringify(config, null, 2);
|
||||
console.log(yamlString);
|
||||
// server:
|
||||
// port: 3000
|
||||
// host: localhost
|
||||
// ssl:
|
||||
// enabled: true
|
||||
// cert: /path/to/cert.pem
|
||||
// key: /path/to/key.pem
|
||||
// database:
|
||||
// connections:
|
||||
// - name: primary
|
||||
// host: db1.example.com
|
||||
// - name: replica
|
||||
// host: db2.example.com
|
||||
// features:
|
||||
// auth: true
|
||||
// rate-limit: 100
|
||||
```
|
||||
|
||||
## Module Import
|
||||
|
||||
### ES Modules
|
||||
|
||||
@@ -140,6 +140,19 @@ The `--sourcemap` argument embeds a sourcemap compressed with zstd, so that erro
|
||||
|
||||
The `--bytecode` argument enables bytecode compilation. Every time you run JavaScript code in Bun, JavaScriptCore (the engine) will compile your source code into bytecode. We can move this parsing work from runtime to bundle time, saving you startup time.
|
||||
|
||||
## Embedding runtime arguments
|
||||
|
||||
**`--compile-exec-argv="args"`** - Embed runtime arguments that are available via `process.execArgv`:
|
||||
|
||||
```bash
|
||||
bun build --compile --compile-exec-argv="--smol --user-agent=MyBot" ./app.ts --outfile myapp
|
||||
```
|
||||
|
||||
```js
|
||||
// In the compiled app
|
||||
console.log(process.execArgv); // ["--smol", "--user-agent=MyBot"]
|
||||
```
|
||||
|
||||
## Act as the Bun CLI
|
||||
|
||||
{% note %}
|
||||
|
||||
@@ -313,6 +313,14 @@ $ bun build --entrypoints ./index.ts --outdir ./out --target browser
|
||||
|
||||
Depending on the target, Bun will apply different module resolution rules and optimizations.
|
||||
|
||||
### Module resolution
|
||||
|
||||
Bun supports the `NODE_PATH` environment variable for additional module resolution paths:
|
||||
|
||||
```bash
|
||||
NODE_PATH=./src bun build ./entry.js --outdir ./dist
|
||||
```
|
||||
|
||||
<!-- - Module resolution. For example, when bundling for the browser, Bun will prioritize the `"browser"` export condition when resolving imports. An error will be thrown if any Node.js or Bun built-ins are imported or used, e.g. `node:fs` or `Bun.serve`. -->
|
||||
|
||||
{% table %}
|
||||
@@ -392,6 +400,55 @@ $ bun build ./index.tsx --outdir ./out --format cjs
|
||||
|
||||
TODO: document IIFE once we support globalNames.
|
||||
|
||||
### `jsx`
|
||||
|
||||
Configure JSX transform behavior. Allows fine-grained control over how JSX is compiled.
|
||||
|
||||
**Classic runtime example** (uses `factory` and `fragment`):
|
||||
|
||||
{% codetabs %}
|
||||
|
||||
```ts#JavaScript
|
||||
await Bun.build({
|
||||
entrypoints: ['./app.tsx'],
|
||||
outdir: './out',
|
||||
jsx: {
|
||||
factory: 'h',
|
||||
fragment: 'Fragment',
|
||||
runtime: 'classic',
|
||||
},
|
||||
})
|
||||
```
|
||||
|
||||
```bash#CLI
|
||||
# JSX configuration is handled via bunfig.toml or tsconfig.json
|
||||
$ bun build ./app.tsx --outdir ./out
|
||||
```
|
||||
|
||||
{% /codetabs %}
|
||||
|
||||
**Automatic runtime example** (uses `importSource`):
|
||||
|
||||
{% codetabs %}
|
||||
|
||||
```ts#JavaScript
|
||||
await Bun.build({
|
||||
entrypoints: ['./app.tsx'],
|
||||
outdir: './out',
|
||||
jsx: {
|
||||
importSource: 'preact',
|
||||
runtime: 'automatic',
|
||||
},
|
||||
})
|
||||
```
|
||||
|
||||
```bash#CLI
|
||||
# JSX configuration is handled via bunfig.toml or tsconfig.json
|
||||
$ bun build ./app.tsx --outdir ./out
|
||||
```
|
||||
|
||||
{% /codetabs %}
|
||||
|
||||
### `splitting`
|
||||
|
||||
Whether to enable code splitting.
|
||||
@@ -1519,6 +1576,15 @@ interface BuildConfig {
|
||||
* @default "esm"
|
||||
*/
|
||||
format?: "esm" | "cjs" | "iife";
|
||||
/**
|
||||
* JSX configuration object for controlling JSX transform behavior
|
||||
*/
|
||||
jsx?: {
|
||||
factory?: string;
|
||||
fragment?: string;
|
||||
importSource?: string;
|
||||
runtime?: "automatic" | "classic";
|
||||
};
|
||||
naming?:
|
||||
| string
|
||||
| {
|
||||
|
||||
@@ -176,7 +176,21 @@ When a `bun.lock` exists and `package.json` hasn’t changed, Bun downloads miss
|
||||
|
||||
## Platform-specific dependencies?
|
||||
|
||||
bun stores normalized `cpu` and `os` values from npm in the lockfile, along with the resolved packages. It skips downloading, extracting, and installing packages disabled for the current target at runtime. This means the lockfile won’t change between platforms/architectures even if the packages ultimately installed do change.
|
||||
bun stores normalized `cpu` and `os` values from npm in the lockfile, along with the resolved packages. It skips downloading, extracting, and installing packages disabled for the current target at runtime. This means the lockfile won't change between platforms/architectures even if the packages ultimately installed do change.
|
||||
|
||||
### `--cpu` and `--os` flags
|
||||
|
||||
You can override the target platform for package selection:
|
||||
|
||||
```bash
|
||||
bun install --cpu=x64 --os=linux
|
||||
```
|
||||
|
||||
This installs packages for the specified platform instead of the current system. Useful for cross-platform builds or when preparing deployments for different environments.
|
||||
|
||||
**Accepted values for `--cpu`**: `arm64`, `x64`, `ia32`, `ppc64`, `s390x`
|
||||
|
||||
**Accepted values for `--os`**: `linux`, `darwin`, `win32`, `freebsd`, `openbsd`, `sunos`, `aix`
|
||||
|
||||
## Peer dependencies?
|
||||
|
||||
@@ -245,3 +259,91 @@ bun uses a binary format for caching NPM registry responses. This loads much fas
|
||||
You will see these files in `~/.bun/install/cache/*.npm`. The filename pattern is `${hash(packageName)}.npm`. It’s a hash so that extra directories don’t need to be created for scoped packages.
|
||||
|
||||
Bun's usage of `Cache-Control` ignores `Age`. This improves performance, but means bun may be about 5 minutes out of date to receive the latest package version metadata from npm.
|
||||
|
||||
## pnpm migration
|
||||
|
||||
Bun automatically migrates projects from pnpm to bun. When a `pnpm-lock.yaml` file is detected and no `bun.lock` file exists, Bun will automatically migrate the lockfile to `bun.lock` during installation. The original `pnpm-lock.yaml` file remains unmodified.
|
||||
|
||||
```bash
|
||||
bun install
|
||||
```
|
||||
|
||||
**Note**: Migration only runs when `bun.lock` is absent. There is currently no opt-out flag for pnpm migration.
|
||||
|
||||
The migration process handles:
|
||||
|
||||
### Lockfile Migration
|
||||
|
||||
- Converts `pnpm-lock.yaml` to `bun.lock` format
|
||||
- Preserves package versions and resolution information
|
||||
- Maintains dependency relationships and peer dependencies
|
||||
- Handles patched dependencies with integrity hashes
|
||||
|
||||
### Workspace Configuration
|
||||
|
||||
When a `pnpm-workspace.yaml` file exists, Bun migrates workspace settings to your root `package.json`:
|
||||
|
||||
```yaml
|
||||
# pnpm-workspace.yaml
|
||||
packages:
|
||||
- "apps/*"
|
||||
- "packages/*"
|
||||
|
||||
catalog:
|
||||
react: ^18.0.0
|
||||
typescript: ^5.0.0
|
||||
|
||||
catalogs:
|
||||
build:
|
||||
webpack: ^5.0.0
|
||||
babel: ^7.0.0
|
||||
```
|
||||
|
||||
The workspace packages list and catalogs are moved to the `workspaces` field in `package.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"workspaces": {
|
||||
"packages": ["apps/*", "packages/*"],
|
||||
"catalog": {
|
||||
"react": "^18.0.0",
|
||||
"typescript": "^5.0.0"
|
||||
},
|
||||
"catalogs": {
|
||||
"build": {
|
||||
"webpack": "^5.0.0",
|
||||
"babel": "^7.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Catalog Dependencies
|
||||
|
||||
Dependencies using pnpm's `catalog:` protocol are preserved:
|
||||
|
||||
```json
|
||||
{
|
||||
"dependencies": {
|
||||
"react": "catalog:",
|
||||
"webpack": "catalog:build"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Configuration Migration
|
||||
|
||||
The following pnpm configuration is migrated from both `pnpm-lock.yaml` and `pnpm-workspace.yaml`:
|
||||
|
||||
- **Overrides**: Moved from `pnpm.overrides` to root-level `overrides` in `package.json`
|
||||
- **Patched Dependencies**: Moved from `pnpm.patchedDependencies` to root-level `patchedDependencies` in `package.json`
|
||||
- **Workspace Overrides**: Applied from `pnpm-workspace.yaml` to root `package.json`
|
||||
|
||||
### Requirements
|
||||
|
||||
- Requires pnpm lockfile version 7 or higher
|
||||
- Workspace packages must have a `name` field in their `package.json`
|
||||
- All catalog entries referenced by dependencies must exist in the catalogs definition
|
||||
|
||||
After migration, you can safely remove `pnpm-lock.yaml` and `pnpm-workspace.yaml` files.
|
||||
|
||||
@@ -63,6 +63,15 @@ $ bunx --bun my-cli # good
|
||||
$ bunx my-cli --bun # bad
|
||||
```
|
||||
|
||||
## Package flag
|
||||
|
||||
**`--package <pkg>` or `-p <pkg>`** - Run binary from specific package. Useful when binary name differs from package name:
|
||||
|
||||
```bash
|
||||
bunx -p renovate renovate-config-validator
|
||||
bunx --package @angular/cli ng
|
||||
```
|
||||
|
||||
To force bun to always be used with a script, use a shebang.
|
||||
|
||||
```
|
||||
|
||||
@@ -33,6 +33,11 @@ It creates:
|
||||
- an entry point which defaults to `index.ts` unless any of `index.{tsx, jsx, js, mts, mjs}` exist or the `package.json` specifies a `module` or `main` field
|
||||
- a `README.md` file
|
||||
|
||||
AI Agent rules (disable with `$BUN_AGENT_RULE_DISABLED=1`):
|
||||
|
||||
- a `CLAUDE.md` file when Claude CLI is detected (disable with `CLAUDE_CODE_AGENT_RULE_DISABLED` env var)
|
||||
- a `.cursor/rules/*.mdc` file to guide [Cursor AI](https://cursor.sh) to use Bun instead of Node.js and npm when Cursor is detected
|
||||
|
||||
If you pass `-y` or `--yes`, it will assume you want to continue without asking questions.
|
||||
|
||||
At the end, it runs `bun install` to install `@types/bun`.
|
||||
|
||||
@@ -44,4 +44,47 @@ You can also pass glob patterns to filter by workspace names:
|
||||
|
||||
{% bunOutdatedTerminal glob="{e,t}*" displayGlob="--filter='@monorepo/{types,cli}'" /%}
|
||||
|
||||
### Catalog Dependencies
|
||||
|
||||
`bun outdated` supports checking catalog dependencies defined in `package.json`:
|
||||
|
||||
```sh
|
||||
$ bun outdated -r
|
||||
┌────────────────────┬─────────┬─────────┬─────────┬────────────────────────────────┐
|
||||
│ Package │ Current │ Update │ Latest │ Workspace │
|
||||
├────────────────────┼─────────┼─────────┼─────────┼────────────────────────────────┤
|
||||
│ body-parser │ 1.19.0 │ 1.19.0 │ 2.2.0 │ @test/shared │
|
||||
├────────────────────┼─────────┼─────────┼─────────┼────────────────────────────────┤
|
||||
│ cors │ 2.8.0 │ 2.8.0 │ 2.8.5 │ @test/shared │
|
||||
├────────────────────┼─────────┼─────────┼─────────┼────────────────────────────────┤
|
||||
│ chalk │ 4.0.0 │ 4.0.0 │ 5.6.2 │ @test/utils │
|
||||
├────────────────────┼─────────┼─────────┼─────────┼────────────────────────────────┤
|
||||
│ uuid │ 8.0.0 │ 8.0.0 │ 13.0.0 │ @test/utils │
|
||||
├────────────────────┼─────────┼─────────┼─────────┼────────────────────────────────┤
|
||||
│ axios │ 0.21.0 │ 0.21.0 │ 1.12.2 │ catalog (@test/app) │
|
||||
├────────────────────┼─────────┼─────────┼─────────┼────────────────────────────────┤
|
||||
│ lodash │ 4.17.15 │ 4.17.15 │ 4.17.21 │ catalog (@test/app, @test/app) │
|
||||
├────────────────────┼─────────┼─────────┼─────────┼────────────────────────────────┤
|
||||
│ react │ 17.0.0 │ 17.0.0 │ 19.1.1 │ catalog (@test/app) │
|
||||
├────────────────────┼─────────┼─────────┼─────────┼────────────────────────────────┤
|
||||
│ react-dom │ 17.0.0 │ 17.0.0 │ 19.1.1 │ catalog (@test/app) │
|
||||
├────────────────────┼─────────┼─────────┼─────────┼────────────────────────────────┤
|
||||
│ express │ 4.17.0 │ 4.17.0 │ 5.1.0 │ catalog (@test/shared) │
|
||||
├────────────────────┼─────────┼─────────┼─────────┼────────────────────────────────┤
|
||||
│ moment │ 2.24.0 │ 2.24.0 │ 2.30.1 │ catalog (@test/utils) │
|
||||
├────────────────────┼─────────┼─────────┼─────────┼────────────────────────────────┤
|
||||
│ @types/node (dev) │ 14.0.0 │ 14.0.0 │ 24.5.2 │ @test/shared │
|
||||
├────────────────────┼─────────┼─────────┼─────────┼────────────────────────────────┤
|
||||
│ @types/react (dev) │ 17.0.0 │ 17.0.0 │ 19.1.15 │ catalog:testing (@test/app) │
|
||||
├────────────────────┼─────────┼─────────┼─────────┼────────────────────────────────┤
|
||||
│ eslint (dev) │ 7.0.0 │ 7.0.0 │ 9.36.0 │ catalog:testing (@test/app) │
|
||||
├────────────────────┼─────────┼─────────┼─────────┼────────────────────────────────┤
|
||||
│ typescript (dev) │ 4.9.5 │ 4.9.5 │ 5.9.2 │ catalog:build (@test/app) │
|
||||
├────────────────────┼─────────┼─────────┼─────────┼────────────────────────────────┤
|
||||
│ jest (dev) │ 26.0.0 │ 26.0.0 │ 30.2.0 │ catalog:testing (@test/shared) │
|
||||
├────────────────────┼─────────┼─────────┼─────────┼────────────────────────────────┤
|
||||
│ prettier (dev) │ 2.0.0 │ 2.0.0 │ 3.6.2 │ catalog:build (@test/utils) │
|
||||
└────────────────────┴─────────┴─────────┴─────────┴────────────────────────────────┘
|
||||
```
|
||||
|
||||
{% bunCLIUsage command="outdated" /%}
|
||||
|
||||
@@ -82,6 +82,16 @@ The `--dry-run` flag can be used to simulate the publish process without actuall
|
||||
$ bun publish --dry-run
|
||||
```
|
||||
|
||||
### `--tolerate-republish`
|
||||
|
||||
The `--tolerate-republish` flag makes `bun publish` exit with code 0 instead of code 1 when attempting to republish over an existing version number. This is useful in automated workflows where republishing the same version might occur and should not be treated as an error.
|
||||
|
||||
```sh
|
||||
$ bun publish --tolerate-republish
|
||||
```
|
||||
|
||||
Without this flag, attempting to publish a version that already exists will result in an error and exit code 1. With this flag, the command will exit successfully even when trying to republish an existing version.
|
||||
|
||||
### `--gzip-level`
|
||||
|
||||
Specify the level of gzip compression to use when packing the package. Only applies to `bun publish` without a tarball path argument. Values range from `0` to `9` (default is `9`).
|
||||
|
||||
@@ -151,6 +151,14 @@ By default, Bun respects this shebang and executes the script with `node`. Howev
|
||||
$ bun run --bun vite
|
||||
```
|
||||
|
||||
### `--no-addons`
|
||||
|
||||
Disable native addons and use the `node-addons` export condition.
|
||||
|
||||
```bash
|
||||
$ bun --no-addons run server.js
|
||||
```
|
||||
|
||||
### Filtering
|
||||
|
||||
In monorepos containing multiple packages, you can use the `--filter` argument to execute scripts in many packages at once.
|
||||
@@ -166,6 +174,14 @@ will execute `<script>` in both `bar` and `baz`, but not in `foo`.
|
||||
|
||||
Find more details in the docs page for [filter](https://bun.com/docs/cli/filter#running-scripts-with-filter).
|
||||
|
||||
### `--workspaces`
|
||||
|
||||
Run scripts across all workspaces in the monorepo:
|
||||
|
||||
```bash
|
||||
bun run --workspaces test
|
||||
```
|
||||
|
||||
## `bun run -` to pipe code from stdin
|
||||
|
||||
`bun run -` lets you read JavaScript, TypeScript, TSX, or JSX from stdin and execute it without writing to a temporary file first.
|
||||
@@ -212,6 +228,14 @@ $ bun --smol run index.tsx
|
||||
|
||||
This causes the garbage collector to run more frequently, which can slow down execution. However, it can be useful in environments with limited memory. Bun automatically adjusts the garbage collector's heap size based on the available memory (accounting for cgroups and other memory limits) with and without the `--smol` flag, so this is mostly useful for cases where you want to make the heap size grow more slowly.
|
||||
|
||||
## `--user-agent`
|
||||
|
||||
**`--user-agent <string>`** - Set User-Agent header for all `fetch()` requests:
|
||||
|
||||
```bash
|
||||
bun --user-agent "MyBot/1.0" run index.tsx
|
||||
```
|
||||
|
||||
## Resolution order
|
||||
|
||||
Absolute paths and paths starting with `./` or `.\\` are always executed as source files. Unless using `bun run`, running a file with an allowed extension will prefer the file over a package.json script.
|
||||
@@ -223,4 +247,15 @@ When there is a package.json script and a file with the same name, `bun run` pri
|
||||
3. Binaries from project packages, eg `bun add eslint && bun run eslint`
|
||||
4. (`bun run` only) System commands, eg `bun run ls`
|
||||
|
||||
### `--unhandled-rejections`
|
||||
|
||||
Configure how unhandled promise rejections are handled:
|
||||
|
||||
```bash
|
||||
$ bun --unhandled-rejections=throw script.js # Throw exception (terminate immediately)
|
||||
$ bun --unhandled-rejections=strict script.js # Throw exception (emit rejectionHandled if handled later)
|
||||
$ bun --unhandled-rejections=warn script.js # Print warning to stderr (default in Node.js)
|
||||
$ bun --unhandled-rejections=none script.js # Silently ignore
|
||||
```
|
||||
|
||||
{% bunCLIUsage command="run" /%}
|
||||
|
||||
116
docs/cli/test.md
116
docs/cli/test.md
@@ -47,6 +47,8 @@ To filter by _test name_, use the `-t`/`--test-name-pattern` flag.
|
||||
$ bun test --test-name-pattern addition
|
||||
```
|
||||
|
||||
When no tests match the filter, `bun test` exits with code 1.
|
||||
|
||||
To run a specific file in the test runner, make sure the path starts with `./` or `/` to distinguish it from a filter name.
|
||||
|
||||
```bash
|
||||
@@ -109,6 +111,90 @@ Use the `--timeout` flag to specify a _per-test_ timeout in milliseconds. If a t
|
||||
$ bun test --timeout 20
|
||||
```
|
||||
|
||||
## Concurrent test execution
|
||||
|
||||
By default, Bun runs all tests sequentially within each test file. You can enable concurrent execution to run async tests in parallel, significantly speeding up test suites with independent tests.
|
||||
|
||||
### `--concurrent` flag
|
||||
|
||||
Use the `--concurrent` flag to run all tests concurrently within their respective files:
|
||||
|
||||
```sh
|
||||
$ bun test --concurrent
|
||||
```
|
||||
|
||||
When this flag is enabled, all tests will run in parallel unless explicitly marked with `test.serial`.
|
||||
|
||||
### `--max-concurrency` flag
|
||||
|
||||
Control the maximum number of tests running simultaneously with the `--max-concurrency` flag:
|
||||
|
||||
```sh
|
||||
# Limit to 4 concurrent tests
|
||||
$ bun test --concurrent --max-concurrency 4
|
||||
|
||||
# Default: 20
|
||||
$ bun test --concurrent
|
||||
```
|
||||
|
||||
This helps prevent resource exhaustion when running many concurrent tests. The default value is 20.
|
||||
|
||||
### `test.concurrent`
|
||||
|
||||
Mark individual tests to run concurrently, even when the `--concurrent` flag is not used:
|
||||
|
||||
```ts
|
||||
import { test, expect } from "bun:test";
|
||||
|
||||
// These tests run in parallel with each other
|
||||
test.concurrent("concurrent test 1", async () => {
|
||||
await fetch("/api/endpoint1");
|
||||
expect(true).toBe(true);
|
||||
});
|
||||
|
||||
test.concurrent("concurrent test 2", async () => {
|
||||
await fetch("/api/endpoint2");
|
||||
expect(true).toBe(true);
|
||||
});
|
||||
|
||||
// This test runs sequentially
|
||||
test("sequential test", () => {
|
||||
expect(1 + 1).toBe(2);
|
||||
});
|
||||
```
|
||||
|
||||
### `test.serial`
|
||||
|
||||
Force tests to run sequentially, even when the `--concurrent` flag is enabled:
|
||||
|
||||
```ts
|
||||
import { test, expect } from "bun:test";
|
||||
|
||||
let sharedState = 0;
|
||||
|
||||
// These tests must run in order
|
||||
test.serial("first serial test", () => {
|
||||
sharedState = 1;
|
||||
expect(sharedState).toBe(1);
|
||||
});
|
||||
|
||||
test.serial("second serial test", () => {
|
||||
// Depends on the previous test
|
||||
expect(sharedState).toBe(1);
|
||||
sharedState = 2;
|
||||
});
|
||||
|
||||
// This test can run concurrently if --concurrent is enabled
|
||||
test("independent test", () => {
|
||||
expect(true).toBe(true);
|
||||
});
|
||||
|
||||
// Chaining test qualifiers
|
||||
test.failing.each([1, 2, 3])("chained qualifiers %d", input => {
|
||||
expect(input).toBe(0); // This test is expected to fail for each input
|
||||
});
|
||||
```
|
||||
|
||||
## Rerun tests
|
||||
|
||||
Use the `--rerun-each` flag to run each test multiple times. This is useful for detecting flaky or non-deterministic test failures.
|
||||
@@ -117,6 +203,36 @@ Use the `--rerun-each` flag to run each test multiple times. This is useful for
|
||||
$ bun test --rerun-each 100
|
||||
```
|
||||
|
||||
## Randomize test execution order
|
||||
|
||||
Use the `--randomize` flag to run tests in a random order. This helps detect tests that depend on shared state or execution order.
|
||||
|
||||
```sh
|
||||
$ bun test --randomize
|
||||
```
|
||||
|
||||
When using `--randomize`, the seed used for randomization will be displayed in the test summary:
|
||||
|
||||
```sh
|
||||
$ bun test --randomize
|
||||
# ... test output ...
|
||||
--seed=12345
|
||||
2 pass
|
||||
8 fail
|
||||
Ran 10 tests across 2 files. [50.00ms]
|
||||
```
|
||||
|
||||
### Reproducible random order with `--seed`
|
||||
|
||||
Use the `--seed` flag to specify a seed for the randomization. This allows you to reproduce the same test order when debugging order-dependent failures.
|
||||
|
||||
```sh
|
||||
# Reproduce a previous randomized run
|
||||
$ bun test --seed 123456
|
||||
```
|
||||
|
||||
The `--seed` flag implies `--randomize`, so you don't need to specify both. Using the same seed value will always produce the same test execution order, making it easier to debug intermittent failures caused by test interdependencies.
|
||||
|
||||
## Bail out with `--bail`
|
||||
|
||||
Use the `--bail` flag to abort the test run early after a pre-determined number of test failures. By default Bun will run all tests and report all failures, but sometimes in CI environments it's preferable to terminate earlier to reduce CPU usage.
|
||||
|
||||
@@ -90,6 +90,17 @@ Packages are organized in sections by dependency type:
|
||||
|
||||
Within each section, individual packages may have additional suffixes (` dev`, ` peer`, ` optional`) for extra clarity.
|
||||
|
||||
## `--recursive`
|
||||
|
||||
Use the `--recursive` flag with `--interactive` to update dependencies across all workspaces in a monorepo:
|
||||
|
||||
```sh
|
||||
$ bun update --interactive --recursive
|
||||
$ bun update -i -r
|
||||
```
|
||||
|
||||
This displays an additional "Workspace" column showing which workspace each dependency belongs to.
|
||||
|
||||
## `--latest`
|
||||
|
||||
By default, `bun update` will update to the latest version of a dependency that satisfies the version range specified in your `package.json`.
|
||||
|
||||
@@ -24,6 +24,26 @@ To update all dependencies to the latest versions (including breaking changes):
|
||||
bun update --latest
|
||||
```
|
||||
|
||||
### Filtering options
|
||||
|
||||
**`--audit-level=<low|moderate|high|critical>`** - Only show vulnerabilities at this severity level or higher:
|
||||
|
||||
```bash
|
||||
bun audit --audit-level=high
|
||||
```
|
||||
|
||||
**`--prod`** - Audit only production dependencies (excludes devDependencies):
|
||||
|
||||
```bash
|
||||
bun audit --prod
|
||||
```
|
||||
|
||||
**`--ignore <CVE>`** - Ignore specific CVEs (can be used multiple times):
|
||||
|
||||
```bash
|
||||
bun audit --ignore CVE-2022-25883 --ignore CVE-2023-26136
|
||||
```
|
||||
|
||||
### `--json`
|
||||
|
||||
Use the `--json` flag to print the raw JSON response from the registry instead of the formatted report:
|
||||
|
||||
@@ -46,3 +46,13 @@ print = "yarn"
|
||||
Bun v1.2 changed the default lockfile format to the text-based `bun.lock`. Existing binary `bun.lockb` lockfiles can be migrated to the new format by running `bun install --save-text-lockfile --frozen-lockfile --lockfile-only` and deleting `bun.lockb`.
|
||||
|
||||
More information about the new lockfile format can be found on [our blogpost](https://bun.com/blog/bun-lock-text-lockfile).
|
||||
|
||||
#### Automatic lockfile migration
|
||||
|
||||
When running `bun install` in a project without a `bun.lock`, Bun automatically migrates existing lockfiles:
|
||||
|
||||
- `yarn.lock` (v1)
|
||||
- `package-lock.json` (npm)
|
||||
- `pnpm-lock.yaml` (pnpm)
|
||||
|
||||
The original lockfile is preserved and can be removed manually after verification.
|
||||
|
||||
@@ -73,3 +73,33 @@ The equivalent `bunfig.toml` option is to add a key in [`install.scopes`](https:
|
||||
[install.scopes]
|
||||
myorg = { url = "http://localhost:4873/", username = "myusername", password = "$NPM_PASSWORD" }
|
||||
```
|
||||
|
||||
### `link-workspace-packages`: Control workspace package installation
|
||||
|
||||
Controls how workspace packages are installed when available locally:
|
||||
|
||||
```ini
|
||||
link-workspace-packages=true
|
||||
```
|
||||
|
||||
The equivalent `bunfig.toml` option is [`install.linkWorkspacePackages`](https://bun.com/docs/runtime/bunfig#install-linkworkspacepackages):
|
||||
|
||||
```toml
|
||||
[install]
|
||||
linkWorkspacePackages = true
|
||||
```
|
||||
|
||||
### `save-exact`: Save exact versions
|
||||
|
||||
Always saves exact versions without the `^` prefix:
|
||||
|
||||
```ini
|
||||
save-exact=true
|
||||
```
|
||||
|
||||
The equivalent `bunfig.toml` option is [`install.exact`](https://bun.com/docs/runtime/bunfig#install-exact):
|
||||
|
||||
```toml
|
||||
[install]
|
||||
exact = true
|
||||
```
|
||||
|
||||
@@ -81,7 +81,7 @@ Workspaces have a couple major benefits.
|
||||
|
||||
- **Code can be split into logical parts.** If one package relies on another, you can simply add it as a dependency in `package.json`. If package `b` depends on `a`, `bun install` will install your local `packages/a` directory into `node_modules` instead of downloading it from the npm registry.
|
||||
- **Dependencies can be de-duplicated.** If `a` and `b` share a common dependency, it will be _hoisted_ to the root `node_modules` directory. This reduces redundant disk usage and minimizes "dependency hell" issues associated with having multiple versions of a package installed simultaneously.
|
||||
- **Run scripts in multiple packages.** You can use the [`--filter` flag](https://bun.com/docs/cli/filter) to easily run `package.json` scripts in multiple packages in your workspace.
|
||||
- **Run scripts in multiple packages.** You can use the [`--filter` flag](https://bun.com/docs/cli/filter) to easily run `package.json` scripts in multiple packages in your workspace, or `--workspaces` to run scripts across all workspaces.
|
||||
|
||||
## Share versions with Catalogs
|
||||
|
||||
|
||||
@@ -359,7 +359,7 @@ export default {
|
||||
page("api/file-io", "File I/O", {
|
||||
description: `Read and write files fast with Bun's heavily optimized file system API.`,
|
||||
}), // "`Bun.write`"),
|
||||
page("api/redis", "Redis client", {
|
||||
page("api/redis", "Redis Client", {
|
||||
description: `Bun provides a fast, native Redis client with automatic command pipelining for better performance.`,
|
||||
}),
|
||||
page("api/import-meta", "import.meta", {
|
||||
|
||||
@@ -232,6 +232,23 @@ Set path where coverage reports will be saved. Please notice, that it works only
|
||||
coverageDir = "path/to/somewhere" # default "coverage"
|
||||
```
|
||||
|
||||
### `test.concurrentTestGlob`
|
||||
|
||||
Specify a glob pattern to automatically run matching test files with concurrent test execution enabled. Test files matching this pattern will behave as if the `--concurrent` flag was passed, running all tests within those files concurrently.
|
||||
|
||||
```toml
|
||||
[test]
|
||||
concurrentTestGlob = "**/concurrent-*.test.ts"
|
||||
```
|
||||
|
||||
This is useful for:
|
||||
|
||||
- Gradually migrating test suites to concurrent execution
|
||||
- Running integration tests concurrently while keeping unit tests sequential
|
||||
- Separating fast concurrent tests from tests that require sequential execution
|
||||
|
||||
The `--concurrent` CLI flag will override this setting when specified.
|
||||
|
||||
## Package manager
|
||||
|
||||
Package management is a complex issue; to support a range of use cases, the behavior of `bun install` can be configured under the `[install]` section.
|
||||
|
||||
@@ -220,6 +220,11 @@ These environment variables are read by Bun and configure aspects of its behavio
|
||||
- `DO_NOT_TRACK`
|
||||
- Disable uploading crash reports to `bun.report` on crash. On macOS & Windows, crash report uploads are enabled by default. Otherwise, telemetry is not sent yet as of May 21st, 2024, but we are planning to add telemetry in the coming weeks. If `DO_NOT_TRACK=1`, then auto-uploading crash reports and telemetry are both [disabled](https://do-not-track.dev/).
|
||||
|
||||
---
|
||||
|
||||
- `BUN_OPTIONS`
|
||||
- Prepends command-line arguments to any Bun execution. For example, `BUN_OPTIONS="--hot"` makes `bun run dev` behave like `bun --hot run dev`.
|
||||
|
||||
{% /table %}
|
||||
|
||||
## Runtime transpiler caching
|
||||
|
||||
@@ -124,7 +124,7 @@ This page is updated regularly to reflect compatibility status of the latest ver
|
||||
|
||||
### [`node:perf_hooks`](https://nodejs.org/api/perf_hooks.html)
|
||||
|
||||
🟡 Missing `createHistogram` `monitorEventLoopDelay`. It's recommended to use `performance` global instead of `perf_hooks.performance`.
|
||||
🟡 APIs are implemented, but Node.js test suite does not pass yet for this module.
|
||||
|
||||
### [`node:process`](https://nodejs.org/api/process.html)
|
||||
|
||||
@@ -156,7 +156,7 @@ This page is updated regularly to reflect compatibility status of the latest ver
|
||||
|
||||
### [`node:worker_threads`](https://nodejs.org/api/worker_threads.html)
|
||||
|
||||
🟡 `Worker` doesn't support the following options: `stdin` `stdout` `stderr` `trackedUnmanagedFds` `resourceLimits`. Missing `markAsUntransferable` `moveMessagePortToContext` `getHeapSnapshot`.
|
||||
🟡 `Worker` doesn't support the following options: `stdin` `stdout` `stderr` `trackedUnmanagedFds` `resourceLimits`. Missing `markAsUntransferable` `moveMessagePortToContext`.
|
||||
|
||||
### [`node:inspector`](https://nodejs.org/api/inspector.html)
|
||||
|
||||
|
||||
@@ -46,6 +46,25 @@ smol = true # Reduce memory usage during test runs
|
||||
|
||||
This is equivalent to using the `--smol` flag on the command line.
|
||||
|
||||
### Test execution
|
||||
|
||||
#### concurrentTestGlob
|
||||
|
||||
Automatically run test files matching a glob pattern with concurrent test execution enabled. This is useful for gradually migrating test suites to concurrent execution or for running specific test types concurrently.
|
||||
|
||||
```toml
|
||||
[test]
|
||||
concurrentTestGlob = "**/concurrent-*.test.ts" # Run files matching this pattern concurrently
|
||||
```
|
||||
|
||||
Test files matching this pattern will behave as if the `--concurrent` flag was passed, running all tests within those files concurrently. This allows you to:
|
||||
|
||||
- Gradually migrate your test suite to concurrent execution
|
||||
- Run integration tests concurrently while keeping unit tests sequential
|
||||
- Separate fast concurrent tests from tests that require sequential execution
|
||||
|
||||
The `--concurrent` CLI flag will override this setting when specified, forcing all tests to run concurrently regardless of the glob pattern.
|
||||
|
||||
### Coverage options
|
||||
|
||||
In addition to the options documented in the [coverage documentation](./coverage.md), the following options are available:
|
||||
|
||||
132
docs/test/examples/concurrent-test-glob.md
Normal file
132
docs/test/examples/concurrent-test-glob.md
Normal file
@@ -0,0 +1,132 @@
|
||||
# Concurrent Test Glob Example
|
||||
|
||||
This example demonstrates how to use the `concurrentTestGlob` option to selectively run tests concurrently based on file naming patterns.
|
||||
|
||||
## Project Structure
|
||||
|
||||
```text
|
||||
my-project/
|
||||
├── bunfig.toml
|
||||
├── tests/
|
||||
│ ├── unit/
|
||||
│ │ ├── math.test.ts # Sequential
|
||||
│ │ └── utils.test.ts # Sequential
|
||||
│ └── integration/
|
||||
│ ├── concurrent-api.test.ts # Concurrent
|
||||
│ └── concurrent-database.test.ts # Concurrent
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### bunfig.toml
|
||||
|
||||
```toml
|
||||
[test]
|
||||
# Run all test files with "concurrent-" prefix concurrently
|
||||
concurrentTestGlob = "**/concurrent-*.test.ts"
|
||||
```
|
||||
|
||||
## Test Files
|
||||
|
||||
### Unit Test (Sequential)
|
||||
|
||||
`tests/unit/math.test.ts`
|
||||
|
||||
```typescript
|
||||
import { test, expect } from "bun:test";
|
||||
|
||||
// These tests run sequentially by default
|
||||
// Good for tests that share state or have specific ordering requirements
|
||||
let sharedState = 0;
|
||||
|
||||
test("addition", () => {
|
||||
sharedState = 5 + 3;
|
||||
expect(sharedState).toBe(8);
|
||||
});
|
||||
|
||||
test("uses previous state", () => {
|
||||
// This test depends on the previous test's state
|
||||
expect(sharedState).toBe(8);
|
||||
});
|
||||
```
|
||||
|
||||
### Integration Test (Concurrent)
|
||||
|
||||
`tests/integration/concurrent-api.test.ts`
|
||||
|
||||
```typescript
|
||||
import { test, expect } from "bun:test";
|
||||
|
||||
// These tests automatically run concurrently due to filename matching the glob pattern.
|
||||
// Using test() is equivalent to test.concurrent() when the file matches concurrentTestGlob.
|
||||
// Each test is independent and can run in parallel.
|
||||
|
||||
test("fetch user data", async () => {
|
||||
const response = await fetch("/api/user/1");
|
||||
expect(response.ok).toBe(true);
|
||||
});
|
||||
|
||||
test("fetch posts", async () => {
|
||||
const response = await fetch("/api/posts");
|
||||
expect(response.ok).toBe(true);
|
||||
});
|
||||
|
||||
test("fetch comments", async () => {
|
||||
const response = await fetch("/api/comments");
|
||||
expect(response.ok).toBe(true);
|
||||
});
|
||||
```
|
||||
|
||||
## Running Tests
|
||||
|
||||
```bash
|
||||
# Run all tests - concurrent-*.test.ts files will run concurrently
|
||||
bun test
|
||||
|
||||
# Override: Force ALL tests to run concurrently
|
||||
# Note: This overrides bunfig.toml and runs all tests concurrently, regardless of glob
|
||||
bun test --concurrent
|
||||
|
||||
# Run only unit tests (sequential)
|
||||
bun test tests/unit
|
||||
|
||||
# Run only integration tests (concurrent due to glob pattern)
|
||||
bun test tests/integration
|
||||
```
|
||||
|
||||
## Benefits
|
||||
|
||||
1. **Gradual Migration**: Migrate to concurrent tests file by file by renaming them
|
||||
2. **Clear Organization**: File naming convention indicates execution mode
|
||||
3. **Performance**: Integration tests run faster in parallel
|
||||
4. **Safety**: Unit tests remain sequential where needed
|
||||
5. **Flexibility**: Easy to change execution mode by renaming files
|
||||
|
||||
## Migration Strategy
|
||||
|
||||
To migrate existing tests to concurrent execution:
|
||||
|
||||
1. Start with independent integration tests
|
||||
2. Rename files to match the glob pattern: `mv api.test.ts concurrent-api.test.ts`
|
||||
3. Verify tests still pass
|
||||
4. Monitor for race conditions or shared state issues
|
||||
5. Continue migrating stable tests incrementally
|
||||
|
||||
## Tips
|
||||
|
||||
- Use descriptive prefixes: `concurrent-`, `parallel-`, `async-`
|
||||
- Keep related sequential tests together
|
||||
- Document why certain tests must remain sequential
|
||||
- Use `test.concurrent()` for fine-grained control in sequential files
|
||||
(In files matched by `concurrentTestGlob`, plain `test()` already runs concurrently)
|
||||
- Consider separate globs for different test types:
|
||||
|
||||
```toml
|
||||
[test]
|
||||
# Multiple patterns for different test categories
|
||||
concurrentTestGlob = [
|
||||
"**/integration/*.test.ts",
|
||||
"**/e2e/*.test.ts",
|
||||
"**/concurrent-*.test.ts"
|
||||
]
|
||||
```
|
||||
@@ -149,12 +149,6 @@ describe.only("only", () => {
|
||||
|
||||
The following command will only execute tests #2 and #3.
|
||||
|
||||
```sh
|
||||
$ bun test --only
|
||||
```
|
||||
|
||||
The following command will only execute tests #1, #2 and #3.
|
||||
|
||||
```sh
|
||||
$ bun test
|
||||
```
|
||||
|
||||
@@ -8,6 +8,8 @@
|
||||
# Thread::initializePlatformThreading() in ThreadingPOSIX.cpp) to the JS thread to suspend or resume
|
||||
# it. So stopping the process would just create noise when debugging any long-running script.
|
||||
process handle -p true -s false -n false SIGPWR
|
||||
process handle -p true -s false -n false SIGUSR1
|
||||
process handle -p true -s false -n false SIGUSR2
|
||||
|
||||
command script import -c lldb_pretty_printers.py
|
||||
type category enable zig.lang
|
||||
|
||||
@@ -78,6 +78,12 @@
|
||||
"no-empty-file": "off",
|
||||
"no-unnecessary-await": "off"
|
||||
}
|
||||
},
|
||||
{
|
||||
"files": ["src/js/builtins/**"],
|
||||
"rules": {
|
||||
"no-unused-expressions": "off"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": true,
|
||||
"name": "bun",
|
||||
"version": "1.2.23",
|
||||
"version": "1.2.24",
|
||||
"workspaces": [
|
||||
"./packages/bun-types",
|
||||
"./packages/@types/bun"
|
||||
@@ -33,7 +33,7 @@
|
||||
"bd:v": "(bun run --silent build:debug &> /tmp/bun.debug.build.log || (cat /tmp/bun.debug.build.log && rm -rf /tmp/bun.debug.build.log && exit 1)) && rm -f /tmp/bun.debug.build.log && ./build/debug/bun-debug",
|
||||
"bd": "BUN_DEBUG_QUIET_LOGS=1 bun --silent bd:v",
|
||||
"build:debug": "export COMSPEC=\"C:\\Windows\\System32\\cmd.exe\" && bun ./scripts/build.mjs -GNinja -DCMAKE_BUILD_TYPE=Debug -B build/debug --log-level=NOTICE",
|
||||
"build:debug:asan": "bun ./scripts/build.mjs -GNinja -DCMAKE_BUILD_TYPE=Debug -DENABLE_ASAN=ON -B build/debug-asan --log-level=NOTICE",
|
||||
"build:debug:noasan": "export COMSPEC=\"C:\\Windows\\System32\\cmd.exe\" && bun ./scripts/build.mjs -GNinja -DCMAKE_BUILD_TYPE=Debug -DENABLE_ASAN=OFF -B build/debug --log-level=NOTICE",
|
||||
"build:release": "bun ./scripts/build.mjs -GNinja -DCMAKE_BUILD_TYPE=Release -B build/release",
|
||||
"build:ci": "bun ./scripts/build.mjs -GNinja -DCMAKE_BUILD_TYPE=Release -DCMAKE_VERBOSE_MAKEFILE=ON -DCI=true -B build/release-ci --verbose --fresh",
|
||||
"build:assert": "bun ./scripts/build.mjs -GNinja -DCMAKE_BUILD_TYPE=RelWithDebInfo -DENABLE_ASSERTIONS=ON -DENABLE_LOGS=ON -B build/release-assert",
|
||||
@@ -84,6 +84,11 @@
|
||||
"node:test": "node ./scripts/runner.node.mjs --quiet --exec-path=$npm_execpath --node-tests ",
|
||||
"node:test:cp": "bun ./scripts/fetch-node-test.ts ",
|
||||
"clean:zig": "rm -rf build/debug/cache/zig build/debug/CMakeCache.txt 'build/debug/*.o' .zig-cache zig-out || true",
|
||||
"machine:linux:ubuntu": "./scripts/machine.mjs ssh --cloud=aws --arch=x64 --instance-type c7i.2xlarge --os=linux --distro=ubuntu --release=25.04",
|
||||
"machine:linux:debian": "./scripts/machine.mjs ssh --cloud=aws --arch=x64 --instance-type c7i.2xlarge --os=linux --distro=debian --release=12",
|
||||
"machine:linux:alpine": "./scripts/machine.mjs ssh --cloud=aws --arch=x64 --instance-type c7i.2xlarge --os=linux --distro=alpine --release=3.21",
|
||||
"machine:linux:amazonlinux": "./scripts/machine.mjs ssh --cloud=aws --arch=x64 --instance-type c7i.2xlarge --os=linux --distro=amazonlinux --release=2023",
|
||||
"machine:windows:2019": "./scripts/machine.mjs ssh --cloud=aws --arch=x64 --instance-type c7i.2xlarge --os=windows --release=2019",
|
||||
"sync-webkit-source": "bun ./scripts/sync-webkit-source.ts"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -62,7 +62,7 @@ export const platforms: Platform[] = [
|
||||
},
|
||||
{
|
||||
os: "linux",
|
||||
arch: "aarch64",
|
||||
arch: "arm64",
|
||||
abi: "musl",
|
||||
bin: "bun-linux-aarch64-musl",
|
||||
exe: "bin/bun",
|
||||
|
||||
27
packages/bun-types/bun.d.ts
vendored
27
packages/bun-types/bun.d.ts
vendored
@@ -636,7 +636,7 @@ declare module "bun" {
|
||||
* import { YAML } from "bun";
|
||||
*
|
||||
* console.log(YAML.parse("123")) // 123
|
||||
* console.log(YAML.parse("123")) // null
|
||||
* console.log(YAML.parse("null")) // null
|
||||
* console.log(YAML.parse("false")) // false
|
||||
* console.log(YAML.parse("abc")) // "abc"
|
||||
* console.log(YAML.parse("- abc")) // [ "abc" ]
|
||||
@@ -653,7 +653,10 @@ declare module "bun" {
|
||||
*
|
||||
* @param input The JavaScript value to stringify.
|
||||
* @param replacer Currently not supported.
|
||||
* @param space A number for how many spaces each level of indentation gets, or a string used as indentation. The number is clamped between 0 and 10, and the first 10 characters of the string are used.
|
||||
* @param space A number for how many spaces each level of indentation gets, or a string used as indentation.
|
||||
* Without this parameter, outputs flow-style (single-line) YAML.
|
||||
* With this parameter, outputs block-style (multi-line) YAML.
|
||||
* The number is clamped between 0 and 10, and the first 10 characters of the string are used.
|
||||
* @returns A string containing the YAML document.
|
||||
*
|
||||
* @example
|
||||
@@ -661,19 +664,24 @@ declare module "bun" {
|
||||
* import { YAML } from "bun";
|
||||
*
|
||||
* const input = {
|
||||
* abc: "def"
|
||||
* abc: "def",
|
||||
* num: 123
|
||||
* };
|
||||
*
|
||||
* // Without space - flow style (single-line)
|
||||
* console.log(YAML.stringify(input));
|
||||
* // # output
|
||||
* // {abc: def,num: 123}
|
||||
*
|
||||
* // With space - block style (multi-line)
|
||||
* console.log(YAML.stringify(input, null, 2));
|
||||
* // abc: def
|
||||
* // num: 123
|
||||
*
|
||||
* const cycle = {};
|
||||
* cycle.obj = cycle;
|
||||
* console.log(YAML.stringify(cycle));
|
||||
* // # output
|
||||
* // &root
|
||||
* // obj:
|
||||
* // *root
|
||||
* console.log(YAML.stringify(cycle, null, 2));
|
||||
* // &1
|
||||
* // obj: *1
|
||||
*/
|
||||
export function stringify(input: unknown, replacer?: undefined | null, space?: string | number): string;
|
||||
}
|
||||
@@ -5039,6 +5047,7 @@ declare module "bun" {
|
||||
type SupportedCryptoAlgorithms =
|
||||
| "blake2b256"
|
||||
| "blake2b512"
|
||||
| "blake2s256"
|
||||
| "md4"
|
||||
| "md5"
|
||||
| "ripemd160"
|
||||
|
||||
2831
packages/bun-types/redis.d.ts
vendored
2831
packages/bun-types/redis.d.ts
vendored
File diff suppressed because it is too large
Load Diff
77
packages/bun-types/sql.d.ts
vendored
77
packages/bun-types/sql.d.ts
vendored
@@ -12,6 +12,68 @@ declare module "bun" {
|
||||
release(): void;
|
||||
}
|
||||
|
||||
type ArrayType =
|
||||
| "BOOLEAN"
|
||||
| "BYTEA"
|
||||
| "CHAR"
|
||||
| "NAME"
|
||||
| "TEXT"
|
||||
| "CHAR"
|
||||
| "VARCHAR"
|
||||
| "SMALLINT"
|
||||
| "INT2VECTOR"
|
||||
| "INTEGER"
|
||||
| "INT"
|
||||
| "BIGINT"
|
||||
| "REAL"
|
||||
| "DOUBLE PRECISION"
|
||||
| "NUMERIC"
|
||||
| "MONEY"
|
||||
| "OID"
|
||||
| "TID"
|
||||
| "XID"
|
||||
| "CID"
|
||||
| "JSON"
|
||||
| "JSONB"
|
||||
| "JSONPATH"
|
||||
| "XML"
|
||||
| "POINT"
|
||||
| "LSEG"
|
||||
| "PATH"
|
||||
| "BOX"
|
||||
| "POLYGON"
|
||||
| "LINE"
|
||||
| "CIRCLE"
|
||||
| "CIDR"
|
||||
| "MACADDR"
|
||||
| "INET"
|
||||
| "MACADDR8"
|
||||
| "DATE"
|
||||
| "TIME"
|
||||
| "TIMESTAMP"
|
||||
| "TIMESTAMPTZ"
|
||||
| "INTERVAL"
|
||||
| "TIMETZ"
|
||||
| "BIT"
|
||||
| "VARBIT"
|
||||
| "ACLITEM"
|
||||
| "PG_DATABASE"
|
||||
| (string & {});
|
||||
|
||||
/**
|
||||
* Represents a SQL array parameter
|
||||
*/
|
||||
interface SQLArrayParameter {
|
||||
/**
|
||||
* The serialized values of the array parameter
|
||||
*/
|
||||
serializedValues: string;
|
||||
/**
|
||||
* The type of the array parameter
|
||||
*/
|
||||
arrayType: ArrayType;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a client within a transaction context Extends SQL with savepoint
|
||||
* functionality
|
||||
@@ -630,6 +692,21 @@ declare module "bun" {
|
||||
*/
|
||||
reserve(): Promise<ReservedSQL>;
|
||||
|
||||
/**
|
||||
* Creates a new SQL array parameter
|
||||
* @param values - The values to create the array parameter from
|
||||
* @param typeNameOrTypeID - The type name or type ID to create the array parameter from, if omitted it will default to JSON
|
||||
* @returns A new SQL array parameter
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const array = sql.array([1, 2, 3], "INT");
|
||||
* await sql`CREATE TABLE users_posts (user_id INT, posts_id INT[])`;
|
||||
* await sql`INSERT INTO users_posts (user_id, posts_id) VALUES (${user.id}, ${array})`;
|
||||
* ```
|
||||
*/
|
||||
array(values: any[], typeNameOrTypeID?: number | ArrayType): SQLArrayParameter;
|
||||
|
||||
/**
|
||||
* Begins a new transaction.
|
||||
*
|
||||
|
||||
23
packages/bun-types/test.d.ts
vendored
23
packages/bun-types/test.d.ts
vendored
@@ -91,6 +91,7 @@ declare module "bun:test" {
|
||||
export namespace jest {
|
||||
function restoreAllMocks(): void;
|
||||
function clearAllMocks(): void;
|
||||
function resetAllMocks(): void;
|
||||
function fn<T extends (...args: any[]) => any>(func?: T): Mock<T>;
|
||||
function setSystemTime(now?: number | Date): void;
|
||||
function setTimeout(milliseconds: number): void;
|
||||
@@ -180,6 +181,9 @@ declare module "bun:test" {
|
||||
* Clear all mock state (calls, results, etc.) without restoring original implementation
|
||||
*/
|
||||
clearAllMocks: typeof jest.clearAllMocks;
|
||||
resetAllMocks: typeof jest.resetAllMocks;
|
||||
useFakeTimers: typeof jest.useFakeTimers;
|
||||
useRealTimers: typeof jest.useRealTimers;
|
||||
};
|
||||
|
||||
interface FunctionLike {
|
||||
@@ -226,6 +230,11 @@ declare module "bun:test" {
|
||||
* Marks this group of tests to be executed concurrently.
|
||||
*/
|
||||
concurrent: Describe<T>;
|
||||
/**
|
||||
* Marks this group of tests to be executed serially (one after another),
|
||||
* even when the --concurrent flag is used.
|
||||
*/
|
||||
serial: Describe<T>;
|
||||
/**
|
||||
* Runs this group of tests, only if `condition` is true.
|
||||
*
|
||||
@@ -423,7 +432,7 @@ declare module "bun:test" {
|
||||
options?: number | TestOptions,
|
||||
): void;
|
||||
/**
|
||||
* Skips all other tests, except this test when run with the `--only` option.
|
||||
* Skips all other tests, except this test.
|
||||
*/
|
||||
only: Test<T>;
|
||||
/**
|
||||
@@ -455,6 +464,11 @@ declare module "bun:test" {
|
||||
* Runs the test concurrently with other concurrent tests.
|
||||
*/
|
||||
concurrent: Test<T>;
|
||||
/**
|
||||
* Forces the test to run serially (not in parallel),
|
||||
* even when the --concurrent flag is used.
|
||||
*/
|
||||
serial: Test<T>;
|
||||
/**
|
||||
* Runs this test, if `condition` is true.
|
||||
*
|
||||
@@ -487,6 +501,13 @@ declare module "bun:test" {
|
||||
* @param condition if the test should run concurrently
|
||||
*/
|
||||
concurrentIf(condition: boolean): Test<T>;
|
||||
/**
|
||||
* Forces the test to run serially (not in parallel), if `condition` is true.
|
||||
* This applies even when the --concurrent flag is used.
|
||||
*
|
||||
* @param condition if the test should run serially
|
||||
*/
|
||||
serialIf(condition: boolean): Test<T>;
|
||||
/**
|
||||
* Returns a function that runs for each item in `table`.
|
||||
*
|
||||
|
||||
@@ -6,10 +6,46 @@
|
||||
#include <atomic>
|
||||
#include <string.h>
|
||||
#include "./default_ciphers.h"
|
||||
|
||||
// System-specific includes for certificate loading
|
||||
#include "./root_certs_platform.h"
|
||||
#ifdef _WIN32
|
||||
#include <windows.h>
|
||||
#include <wincrypt.h>
|
||||
#else
|
||||
// Linux/Unix includes
|
||||
#include <dirent.h>
|
||||
#include <stdio.h>
|
||||
#include <limits.h>
|
||||
#endif
|
||||
static const int root_certs_size = sizeof(root_certs) / sizeof(root_certs[0]);
|
||||
|
||||
extern "C" void BUN__warn__extra_ca_load_failed(const char* filename, const char* error_msg);
|
||||
|
||||
// Forward declarations for platform-specific functions
|
||||
// (Actual implementations are in platform-specific files)
|
||||
|
||||
// External variable from Zig CLI arguments
|
||||
extern "C" bool Bun__Node__UseSystemCA;
|
||||
|
||||
// Helper function to check if system CA should be used
|
||||
// Checks both CLI flag (--use-system-ca) and environment variable (NODE_USE_SYSTEM_CA=1)
|
||||
static bool us_should_use_system_ca() {
|
||||
// Check CLI flag first
|
||||
if (Bun__Node__UseSystemCA) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check environment variable
|
||||
const char *use_system_ca = getenv("NODE_USE_SYSTEM_CA");
|
||||
return use_system_ca && strcmp(use_system_ca, "1") == 0;
|
||||
}
|
||||
|
||||
// Platform-specific system certificate loading implementations are separated:
|
||||
// - macOS: root_certs_darwin.cpp (Security framework with dynamic loading)
|
||||
// - Windows: root_certs_windows.cpp (Windows CryptoAPI)
|
||||
// - Linux/Unix: us_load_system_certificates_linux() below
|
||||
|
||||
// This callback is used to avoid the default passphrase callback in OpenSSL
|
||||
// which will typically prompt for the passphrase. The prompting is designed
|
||||
// for the OpenSSL CLI, but works poorly for this case because it involves
|
||||
@@ -101,7 +137,8 @@ end:
|
||||
|
||||
static void us_internal_init_root_certs(
|
||||
X509 *root_cert_instances[root_certs_size],
|
||||
STACK_OF(X509) *&root_extra_cert_instances) {
|
||||
STACK_OF(X509) *&root_extra_cert_instances,
|
||||
STACK_OF(X509) *&root_system_cert_instances) {
|
||||
static std::atomic_flag root_cert_instances_lock = ATOMIC_FLAG_INIT;
|
||||
static std::atomic_bool root_cert_instances_initialized = 0;
|
||||
|
||||
@@ -123,6 +160,17 @@ static void us_internal_init_root_certs(
|
||||
if (extra_certs && extra_certs[0]) {
|
||||
root_extra_cert_instances = us_ssl_ctx_load_all_certs_from_file(extra_certs);
|
||||
}
|
||||
|
||||
// load system certificates if NODE_USE_SYSTEM_CA=1
|
||||
if (us_should_use_system_ca()) {
|
||||
#ifdef __APPLE__
|
||||
us_load_system_certificates_macos(&root_system_cert_instances);
|
||||
#elif defined(_WIN32)
|
||||
us_load_system_certificates_windows(&root_system_cert_instances);
|
||||
#else
|
||||
us_load_system_certificates_linux(&root_system_cert_instances);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
atomic_flag_clear_explicit(&root_cert_instances_lock,
|
||||
@@ -137,12 +185,15 @@ extern "C" int us_internal_raw_root_certs(struct us_cert_string_t **out) {
|
||||
struct us_default_ca_certificates {
|
||||
X509 *root_cert_instances[root_certs_size];
|
||||
STACK_OF(X509) *root_extra_cert_instances;
|
||||
STACK_OF(X509) *root_system_cert_instances;
|
||||
};
|
||||
|
||||
us_default_ca_certificates* us_get_default_ca_certificates() {
|
||||
static us_default_ca_certificates default_ca_certificates = {{NULL}, NULL};
|
||||
static us_default_ca_certificates default_ca_certificates = {{NULL}, NULL, NULL};
|
||||
|
||||
us_internal_init_root_certs(default_ca_certificates.root_cert_instances, default_ca_certificates.root_extra_cert_instances);
|
||||
us_internal_init_root_certs(default_ca_certificates.root_cert_instances,
|
||||
default_ca_certificates.root_extra_cert_instances,
|
||||
default_ca_certificates.root_system_cert_instances);
|
||||
|
||||
return &default_ca_certificates;
|
||||
}
|
||||
@@ -151,20 +202,33 @@ STACK_OF(X509) *us_get_root_extra_cert_instances() {
|
||||
return us_get_default_ca_certificates()->root_extra_cert_instances;
|
||||
}
|
||||
|
||||
STACK_OF(X509) *us_get_root_system_cert_instances() {
|
||||
if (!us_should_use_system_ca())
|
||||
return NULL;
|
||||
// Ensure single-path initialization via us_internal_init_root_certs
|
||||
auto certs = us_get_default_ca_certificates();
|
||||
return certs->root_system_cert_instances;
|
||||
}
|
||||
|
||||
extern "C" X509_STORE *us_get_default_ca_store() {
|
||||
X509_STORE *store = X509_STORE_new();
|
||||
if (store == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!X509_STORE_set_default_paths(store)) {
|
||||
X509_STORE_free(store);
|
||||
return NULL;
|
||||
// Only load system default paths when NODE_USE_SYSTEM_CA=1
|
||||
// Otherwise, rely on bundled certificates only (like Node.js behavior)
|
||||
if (us_should_use_system_ca()) {
|
||||
if (!X509_STORE_set_default_paths(store)) {
|
||||
X509_STORE_free(store);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
us_default_ca_certificates *default_ca_certificates = us_get_default_ca_certificates();
|
||||
X509** root_cert_instances = default_ca_certificates->root_cert_instances;
|
||||
STACK_OF(X509) *root_extra_cert_instances = default_ca_certificates->root_extra_cert_instances;
|
||||
STACK_OF(X509) *root_system_cert_instances = default_ca_certificates->root_system_cert_instances;
|
||||
|
||||
// load all root_cert_instances on the default ca store
|
||||
for (size_t i = 0; i < root_certs_size; i++) {
|
||||
@@ -183,8 +247,59 @@ extern "C" X509_STORE *us_get_default_ca_store() {
|
||||
}
|
||||
}
|
||||
|
||||
if (us_should_use_system_ca() && root_system_cert_instances) {
|
||||
for (int i = 0; i < sk_X509_num(root_system_cert_instances); i++) {
|
||||
X509 *cert = sk_X509_value(root_system_cert_instances, i);
|
||||
X509_up_ref(cert);
|
||||
X509_STORE_add_cert(store, cert);
|
||||
}
|
||||
}
|
||||
|
||||
return store;
|
||||
}
|
||||
extern "C" const char *us_get_default_ciphers() {
|
||||
return DEFAULT_CIPHER_LIST;
|
||||
}
|
||||
}
|
||||
|
||||
// Platform-specific implementations for loading system certificates
|
||||
|
||||
#if defined(_WIN32)
|
||||
// Windows implementation is split to avoid header conflicts:
|
||||
// - root_certs_windows.cpp loads raw certificate data (uses Windows headers)
|
||||
// - This file converts raw data to X509* (uses OpenSSL headers)
|
||||
|
||||
#include <vector>
|
||||
|
||||
struct RawCertificate {
|
||||
std::vector<unsigned char> data;
|
||||
};
|
||||
|
||||
// Defined in root_certs_windows.cpp - loads raw certificate data
|
||||
extern void us_load_system_certificates_windows_raw(
|
||||
std::vector<RawCertificate>& raw_certs);
|
||||
|
||||
// Convert raw Windows certificates to OpenSSL X509 format
|
||||
void us_load_system_certificates_windows(STACK_OF(X509) **system_certs) {
|
||||
*system_certs = sk_X509_new_null();
|
||||
if (*system_certs == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Load raw certificates from Windows stores
|
||||
std::vector<RawCertificate> raw_certs;
|
||||
us_load_system_certificates_windows_raw(raw_certs);
|
||||
|
||||
// Convert each raw certificate to X509
|
||||
for (const auto& raw_cert : raw_certs) {
|
||||
const unsigned char* data = raw_cert.data.data();
|
||||
X509* x509_cert = d2i_X509(NULL, &data, raw_cert.data.size());
|
||||
if (x509_cert != NULL) {
|
||||
sk_X509_push(*system_certs, x509_cert);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
// Linux and other Unix-like systems - implementation is in root_certs_linux.cpp
|
||||
extern "C" void us_load_system_certificates_linux(STACK_OF(X509) **system_certs);
|
||||
#endif
|
||||
431
packages/bun-usockets/src/crypto/root_certs_darwin.cpp
Normal file
431
packages/bun-usockets/src/crypto/root_certs_darwin.cpp
Normal file
@@ -0,0 +1,431 @@
|
||||
#ifdef __APPLE__
|
||||
|
||||
#include <dlfcn.h>
|
||||
#include <CoreFoundation/CoreFoundation.h>
|
||||
#include <atomic>
|
||||
#include <openssl/x509.h>
|
||||
#include <openssl/x509_vfy.h>
|
||||
#include <stdio.h>
|
||||
|
||||
// Security framework types and constants - dynamically loaded
|
||||
typedef struct OpaqueSecCertificateRef* SecCertificateRef;
|
||||
typedef struct OpaqueSecTrustRef* SecTrustRef;
|
||||
typedef struct OpaqueSecPolicyRef* SecPolicyRef;
|
||||
typedef int32_t OSStatus;
|
||||
typedef uint32_t SecTrustSettingsDomain;
|
||||
|
||||
// Security framework constants
|
||||
enum {
|
||||
errSecSuccess = 0,
|
||||
errSecItemNotFound = -25300,
|
||||
};
|
||||
|
||||
// Trust settings domains
|
||||
enum {
|
||||
kSecTrustSettingsDomainUser = 0,
|
||||
kSecTrustSettingsDomainAdmin = 1,
|
||||
kSecTrustSettingsDomainSystem = 2,
|
||||
};
|
||||
|
||||
// Trust status enumeration
|
||||
enum class TrustStatus {
|
||||
TRUSTED,
|
||||
DISTRUSTED,
|
||||
UNSPECIFIED
|
||||
};
|
||||
|
||||
// Dynamic Security framework loader
|
||||
class SecurityFramework {
|
||||
public:
|
||||
void* handle;
|
||||
void* cf_handle;
|
||||
|
||||
// Core Foundation constants
|
||||
CFStringRef kSecClass;
|
||||
CFStringRef kSecClassCertificate;
|
||||
CFStringRef kSecMatchLimit;
|
||||
CFStringRef kSecMatchLimitAll;
|
||||
CFStringRef kSecReturnRef;
|
||||
CFStringRef kSecMatchTrustedOnly;
|
||||
CFBooleanRef kCFBooleanTrue;
|
||||
CFAllocatorRef kCFAllocatorDefault;
|
||||
CFArrayCallBacks* kCFTypeArrayCallBacks;
|
||||
CFDictionaryKeyCallBacks* kCFTypeDictionaryKeyCallBacks;
|
||||
CFDictionaryValueCallBacks* kCFTypeDictionaryValueCallBacks;
|
||||
|
||||
// Core Foundation function pointers
|
||||
CFMutableArrayRef (*CFArrayCreateMutable)(CFAllocatorRef allocator, CFIndex capacity, const CFArrayCallBacks *callBacks);
|
||||
CFArrayRef (*CFArrayCreate)(CFAllocatorRef allocator, const void **values, CFIndex numValues, const CFArrayCallBacks *callBacks);
|
||||
void (*CFArraySetValueAtIndex)(CFMutableArrayRef theArray, CFIndex idx, const void *value);
|
||||
const void* (*CFArrayGetValueAtIndex)(CFArrayRef theArray, CFIndex idx);
|
||||
CFIndex (*CFArrayGetCount)(CFArrayRef theArray);
|
||||
void (*CFRelease)(CFTypeRef cf);
|
||||
CFDictionaryRef (*CFDictionaryCreate)(CFAllocatorRef allocator, const void **keys, const void **values, CFIndex numValues, const CFDictionaryKeyCallBacks *keyCallBacks, const CFDictionaryValueCallBacks *valueCallBacks);
|
||||
const UInt8* (*CFDataGetBytePtr)(CFDataRef theData);
|
||||
CFIndex (*CFDataGetLength)(CFDataRef theData);
|
||||
|
||||
// Security framework function pointers
|
||||
OSStatus (*SecItemCopyMatching)(CFDictionaryRef query, CFTypeRef *result);
|
||||
CFDataRef (*SecCertificateCopyData)(SecCertificateRef certificate);
|
||||
OSStatus (*SecTrustCreateWithCertificates)(CFArrayRef certificates, CFArrayRef policies, SecTrustRef *trust);
|
||||
SecPolicyRef (*SecPolicyCreateSSL)(Boolean server, CFStringRef hostname);
|
||||
Boolean (*SecTrustEvaluateWithError)(SecTrustRef trust, CFErrorRef *error);
|
||||
OSStatus (*SecTrustSettingsCopyTrustSettings)(SecCertificateRef certRef, SecTrustSettingsDomain domain, CFArrayRef *trustSettings);
|
||||
|
||||
SecurityFramework() : handle(nullptr), cf_handle(nullptr),
|
||||
kSecClass(nullptr), kSecClassCertificate(nullptr),
|
||||
kSecMatchLimit(nullptr), kSecMatchLimitAll(nullptr),
|
||||
kSecReturnRef(nullptr), kSecMatchTrustedOnly(nullptr), kCFBooleanTrue(nullptr),
|
||||
kCFAllocatorDefault(nullptr), kCFTypeArrayCallBacks(nullptr),
|
||||
kCFTypeDictionaryKeyCallBacks(nullptr), kCFTypeDictionaryValueCallBacks(nullptr),
|
||||
CFArrayCreateMutable(nullptr), CFArrayCreate(nullptr),
|
||||
CFArraySetValueAtIndex(nullptr), CFArrayGetValueAtIndex(nullptr),
|
||||
CFArrayGetCount(nullptr), CFRelease(nullptr),
|
||||
CFDictionaryCreate(nullptr), CFDataGetBytePtr(nullptr), CFDataGetLength(nullptr),
|
||||
SecItemCopyMatching(nullptr), SecCertificateCopyData(nullptr),
|
||||
SecTrustCreateWithCertificates(nullptr), SecPolicyCreateSSL(nullptr),
|
||||
SecTrustEvaluateWithError(nullptr), SecTrustSettingsCopyTrustSettings(nullptr) {}
|
||||
|
||||
~SecurityFramework() {
|
||||
if (handle) {
|
||||
dlclose(handle);
|
||||
}
|
||||
if (cf_handle) {
|
||||
dlclose(cf_handle);
|
||||
}
|
||||
}
|
||||
|
||||
bool load() {
|
||||
if (handle && cf_handle) return true; // Already loaded
|
||||
|
||||
// Load CoreFoundation framework
|
||||
cf_handle = dlopen("/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation", RTLD_LAZY | RTLD_LOCAL);
|
||||
if (!cf_handle) {
|
||||
fprintf(stderr, "Failed to load CoreFoundation framework: %s\n", dlerror());
|
||||
return false;
|
||||
}
|
||||
|
||||
// Load Security framework
|
||||
handle = dlopen("/System/Library/Frameworks/Security.framework/Security", RTLD_LAZY | RTLD_LOCAL);
|
||||
if (!handle) {
|
||||
fprintf(stderr, "Failed to load Security framework: %s\n", dlerror());
|
||||
dlclose(cf_handle);
|
||||
cf_handle = nullptr;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Load constants and functions
|
||||
if (!load_constants()) {
|
||||
if (handle) {
|
||||
dlclose(handle);
|
||||
handle = nullptr;
|
||||
}
|
||||
if (cf_handle) {
|
||||
dlclose(cf_handle);
|
||||
cf_handle = nullptr;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!load_functions()) {
|
||||
if (handle) {
|
||||
dlclose(handle);
|
||||
handle = nullptr;
|
||||
}
|
||||
if (cf_handle) {
|
||||
dlclose(cf_handle);
|
||||
cf_handle = nullptr;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
bool load_constants() {
|
||||
// Load Security framework constants
|
||||
void* ptr = dlsym(handle, "kSecClass");
|
||||
if (!ptr) { fprintf(stderr, "DEBUG: kSecClass not found\n"); return false; }
|
||||
kSecClass = *(CFStringRef*)ptr;
|
||||
|
||||
ptr = dlsym(handle, "kSecClassCertificate");
|
||||
if (!ptr) { fprintf(stderr, "DEBUG: kSecClassCertificate not found\n"); return false; }
|
||||
kSecClassCertificate = *(CFStringRef*)ptr;
|
||||
|
||||
ptr = dlsym(handle, "kSecMatchLimit");
|
||||
if (!ptr) { fprintf(stderr, "DEBUG: kSecMatchLimit not found\n"); return false; }
|
||||
kSecMatchLimit = *(CFStringRef*)ptr;
|
||||
|
||||
ptr = dlsym(handle, "kSecMatchLimitAll");
|
||||
if (!ptr) { fprintf(stderr, "DEBUG: kSecMatchLimitAll not found\n"); return false; }
|
||||
kSecMatchLimitAll = *(CFStringRef*)ptr;
|
||||
|
||||
ptr = dlsym(handle, "kSecReturnRef");
|
||||
if (!ptr) { fprintf(stderr, "DEBUG: kSecReturnRef not found\n"); return false; }
|
||||
kSecReturnRef = *(CFStringRef*)ptr;
|
||||
|
||||
ptr = dlsym(handle, "kSecMatchTrustedOnly");
|
||||
if (!ptr) { fprintf(stderr, "DEBUG: kSecMatchTrustedOnly not found\n"); return false; }
|
||||
kSecMatchTrustedOnly = *(CFStringRef*)ptr;
|
||||
|
||||
// Load CoreFoundation constants
|
||||
ptr = dlsym(cf_handle, "kCFBooleanTrue");
|
||||
if (!ptr) { fprintf(stderr, "DEBUG: kCFBooleanTrue not found\n"); return false; }
|
||||
kCFBooleanTrue = *(CFBooleanRef*)ptr;
|
||||
|
||||
ptr = dlsym(cf_handle, "kCFAllocatorDefault");
|
||||
if (!ptr) { fprintf(stderr, "DEBUG: kCFAllocatorDefault not found\n"); return false; }
|
||||
kCFAllocatorDefault = *(CFAllocatorRef*)ptr;
|
||||
|
||||
ptr = dlsym(cf_handle, "kCFTypeArrayCallBacks");
|
||||
if (!ptr) { fprintf(stderr, "DEBUG: kCFTypeArrayCallBacks not found\n"); return false; }
|
||||
kCFTypeArrayCallBacks = (CFArrayCallBacks*)ptr;
|
||||
|
||||
ptr = dlsym(cf_handle, "kCFTypeDictionaryKeyCallBacks");
|
||||
if (!ptr) { fprintf(stderr, "DEBUG: kCFTypeDictionaryKeyCallBacks not found\n"); return false; }
|
||||
kCFTypeDictionaryKeyCallBacks = (CFDictionaryKeyCallBacks*)ptr;
|
||||
|
||||
ptr = dlsym(cf_handle, "kCFTypeDictionaryValueCallBacks");
|
||||
if (!ptr) { fprintf(stderr, "DEBUG: kCFTypeDictionaryValueCallBacks not found\n"); return false; }
|
||||
kCFTypeDictionaryValueCallBacks = (CFDictionaryValueCallBacks*)ptr;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool load_functions() {
|
||||
// Load CoreFoundation functions
|
||||
CFArrayCreateMutable = (CFMutableArrayRef (*)(CFAllocatorRef, CFIndex, const CFArrayCallBacks*))dlsym(cf_handle, "CFArrayCreateMutable");
|
||||
CFArrayCreate = (CFArrayRef (*)(CFAllocatorRef, const void**, CFIndex, const CFArrayCallBacks*))dlsym(cf_handle, "CFArrayCreate");
|
||||
CFArraySetValueAtIndex = (void (*)(CFMutableArrayRef, CFIndex, const void*))dlsym(cf_handle, "CFArraySetValueAtIndex");
|
||||
CFArrayGetValueAtIndex = (const void* (*)(CFArrayRef, CFIndex))dlsym(cf_handle, "CFArrayGetValueAtIndex");
|
||||
CFArrayGetCount = (CFIndex (*)(CFArrayRef))dlsym(cf_handle, "CFArrayGetCount");
|
||||
CFRelease = (void (*)(CFTypeRef))dlsym(cf_handle, "CFRelease");
|
||||
CFDictionaryCreate = (CFDictionaryRef (*)(CFAllocatorRef, const void**, const void**, CFIndex, const CFDictionaryKeyCallBacks*, const CFDictionaryValueCallBacks*))dlsym(cf_handle, "CFDictionaryCreate");
|
||||
CFDataGetBytePtr = (const UInt8* (*)(CFDataRef))dlsym(cf_handle, "CFDataGetBytePtr");
|
||||
CFDataGetLength = (CFIndex (*)(CFDataRef))dlsym(cf_handle, "CFDataGetLength");
|
||||
|
||||
// Load Security framework functions
|
||||
SecItemCopyMatching = (OSStatus (*)(CFDictionaryRef, CFTypeRef*))dlsym(handle, "SecItemCopyMatching");
|
||||
SecCertificateCopyData = (CFDataRef (*)(SecCertificateRef))dlsym(handle, "SecCertificateCopyData");
|
||||
SecTrustCreateWithCertificates = (OSStatus (*)(CFArrayRef, CFArrayRef, SecTrustRef*))dlsym(handle, "SecTrustCreateWithCertificates");
|
||||
SecPolicyCreateSSL = (SecPolicyRef (*)(Boolean, CFStringRef))dlsym(handle, "SecPolicyCreateSSL");
|
||||
SecTrustEvaluateWithError = (Boolean (*)(SecTrustRef, CFErrorRef*))dlsym(handle, "SecTrustEvaluateWithError");
|
||||
SecTrustSettingsCopyTrustSettings = (OSStatus (*)(SecCertificateRef, SecTrustSettingsDomain, CFArrayRef*))dlsym(handle, "SecTrustSettingsCopyTrustSettings");
|
||||
|
||||
return CFArrayCreateMutable && CFArrayCreate && CFArraySetValueAtIndex &&
|
||||
CFArrayGetValueAtIndex && CFArrayGetCount && CFRelease &&
|
||||
CFDictionaryCreate && CFDataGetBytePtr && CFDataGetLength &&
|
||||
SecItemCopyMatching && SecCertificateCopyData &&
|
||||
SecTrustCreateWithCertificates && SecPolicyCreateSSL &&
|
||||
SecTrustEvaluateWithError && SecTrustSettingsCopyTrustSettings;
|
||||
}
|
||||
};
|
||||
|
||||
// Global instance for dynamic loading
|
||||
static std::atomic<SecurityFramework*> g_security_framework{nullptr};
|
||||
|
||||
static SecurityFramework* get_security_framework() {
|
||||
SecurityFramework* framework = g_security_framework.load();
|
||||
if (!framework) {
|
||||
SecurityFramework* new_framework = new SecurityFramework();
|
||||
if (new_framework->load()) {
|
||||
SecurityFramework* expected = nullptr;
|
||||
if (g_security_framework.compare_exchange_strong(expected, new_framework)) {
|
||||
framework = new_framework;
|
||||
} else {
|
||||
delete new_framework;
|
||||
framework = expected;
|
||||
}
|
||||
} else {
|
||||
delete new_framework;
|
||||
framework = nullptr;
|
||||
}
|
||||
}
|
||||
return framework;
|
||||
}
|
||||
|
||||
// Helper function to determine if a certificate is self-issued
|
||||
static bool is_certificate_self_issued(X509* cert) {
|
||||
X509_NAME* subject = X509_get_subject_name(cert);
|
||||
X509_NAME* issuer = X509_get_issuer_name(cert);
|
||||
|
||||
return subject && issuer && X509_NAME_cmp(subject, issuer) == 0;
|
||||
}
|
||||
|
||||
// Validate certificate trust using Security framework
|
||||
static bool is_certificate_trust_valid(SecurityFramework* security, SecCertificateRef cert_ref) {
|
||||
CFMutableArrayRef subj_certs = security->CFArrayCreateMutable(nullptr, 1, security->kCFTypeArrayCallBacks);
|
||||
if (!subj_certs) return false;
|
||||
|
||||
security->CFArraySetValueAtIndex(subj_certs, 0, cert_ref);
|
||||
|
||||
SecPolicyRef policy = security->SecPolicyCreateSSL(true, nullptr);
|
||||
if (!policy) {
|
||||
security->CFRelease(subj_certs);
|
||||
return false;
|
||||
}
|
||||
|
||||
CFArrayRef policies = security->CFArrayCreate(nullptr, (const void**)&policy, 1, security->kCFTypeArrayCallBacks);
|
||||
if (!policies) {
|
||||
security->CFRelease(policy);
|
||||
security->CFRelease(subj_certs);
|
||||
return false;
|
||||
}
|
||||
|
||||
SecTrustRef sec_trust = nullptr;
|
||||
OSStatus ortn = security->SecTrustCreateWithCertificates(subj_certs, policies, &sec_trust);
|
||||
|
||||
bool result = false;
|
||||
if (ortn == errSecSuccess && sec_trust) {
|
||||
result = security->SecTrustEvaluateWithError(sec_trust, nullptr);
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
if (sec_trust) security->CFRelease(sec_trust);
|
||||
security->CFRelease(policies);
|
||||
security->CFRelease(policy);
|
||||
security->CFRelease(subj_certs);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
// Check trust settings for policy (simplified version)
|
||||
static TrustStatus is_trust_settings_trusted_for_policy(SecurityFramework* security, CFArrayRef trust_settings, bool is_self_issued) {
|
||||
if (!trust_settings) {
|
||||
return TrustStatus::UNSPECIFIED;
|
||||
}
|
||||
|
||||
// Empty trust settings array means "always trust this certificate"
|
||||
if (security->CFArrayGetCount(trust_settings) == 0) {
|
||||
return is_self_issued ? TrustStatus::TRUSTED : TrustStatus::UNSPECIFIED;
|
||||
}
|
||||
|
||||
// For simplicity, we'll do basic checking here
|
||||
// A full implementation would parse the trust dictionary entries
|
||||
return TrustStatus::UNSPECIFIED;
|
||||
}
|
||||
|
||||
// Check if certificate is trusted for server auth policy
|
||||
static bool is_certificate_trusted_for_policy(SecurityFramework* security, X509* cert, SecCertificateRef cert_ref) {
|
||||
bool is_self_issued = is_certificate_self_issued(cert);
|
||||
bool trust_evaluated = false;
|
||||
|
||||
// Check user trust domain, then admin domain
|
||||
for (const auto& trust_domain : {kSecTrustSettingsDomainUser, kSecTrustSettingsDomainAdmin, kSecTrustSettingsDomainSystem}) {
|
||||
CFArrayRef trust_settings = nullptr;
|
||||
OSStatus err = security->SecTrustSettingsCopyTrustSettings(cert_ref, trust_domain, &trust_settings);
|
||||
|
||||
if (err != errSecSuccess && err != errSecItemNotFound) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (err == errSecSuccess && trust_settings) {
|
||||
TrustStatus result = is_trust_settings_trusted_for_policy(security, trust_settings, is_self_issued);
|
||||
security->CFRelease(trust_settings);
|
||||
|
||||
if (result == TrustStatus::TRUSTED) {
|
||||
return true;
|
||||
} else if (result == TrustStatus::DISTRUSTED) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// If no trust settings and we haven't evaluated trust yet, check trust validity
|
||||
if (!trust_settings && !trust_evaluated) {
|
||||
if (is_certificate_trust_valid(security, cert_ref)) {
|
||||
return true;
|
||||
}
|
||||
trust_evaluated = true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// Main function to load system certificates on macOS
|
||||
extern "C" void us_load_system_certificates_macos(STACK_OF(X509) **system_certs) {
|
||||
*system_certs = sk_X509_new_null();
|
||||
if (!*system_certs) {
|
||||
return;
|
||||
}
|
||||
|
||||
SecurityFramework* security = get_security_framework();
|
||||
if (!security) {
|
||||
return; // Fail silently
|
||||
}
|
||||
|
||||
// Create search dictionary for certificates
|
||||
CFTypeRef search_keys[] = {
|
||||
security->kSecClass,
|
||||
security->kSecMatchLimit,
|
||||
security->kSecReturnRef,
|
||||
security->kSecMatchTrustedOnly,
|
||||
};
|
||||
CFTypeRef search_values[] = {
|
||||
security->kSecClassCertificate,
|
||||
security->kSecMatchLimitAll,
|
||||
security->kCFBooleanTrue,
|
||||
security->kCFBooleanTrue,
|
||||
};
|
||||
|
||||
CFDictionaryRef search = security->CFDictionaryCreate(
|
||||
security->kCFAllocatorDefault,
|
||||
search_keys,
|
||||
search_values,
|
||||
4,
|
||||
security->kCFTypeDictionaryKeyCallBacks,
|
||||
security->kCFTypeDictionaryValueCallBacks
|
||||
);
|
||||
|
||||
if (!search) {
|
||||
return;
|
||||
}
|
||||
|
||||
CFArrayRef certificates = nullptr;
|
||||
OSStatus status = security->SecItemCopyMatching(search, (CFTypeRef*)&certificates);
|
||||
security->CFRelease(search);
|
||||
|
||||
if (status != errSecSuccess || !certificates) {
|
||||
return;
|
||||
}
|
||||
|
||||
CFIndex count = security->CFArrayGetCount(certificates);
|
||||
|
||||
for (CFIndex i = 0; i < count; ++i) {
|
||||
SecCertificateRef cert_ref = (SecCertificateRef)security->CFArrayGetValueAtIndex(certificates, i);
|
||||
if (!cert_ref) continue;
|
||||
|
||||
// Get certificate data
|
||||
CFDataRef cert_data = security->SecCertificateCopyData(cert_ref);
|
||||
if (!cert_data) continue;
|
||||
|
||||
// Convert to X509
|
||||
const unsigned char* data_ptr = security->CFDataGetBytePtr(cert_data);
|
||||
long data_len = security->CFDataGetLength(cert_data);
|
||||
X509* x509_cert = d2i_X509(nullptr, &data_ptr, data_len);
|
||||
security->CFRelease(cert_data);
|
||||
|
||||
if (!x509_cert) continue;
|
||||
|
||||
// Only consider CA certificates
|
||||
if (X509_check_ca(x509_cert) == 1 &&
|
||||
is_certificate_trusted_for_policy(security, x509_cert, cert_ref)) {
|
||||
sk_X509_push(*system_certs, x509_cert);
|
||||
} else {
|
||||
X509_free(x509_cert);
|
||||
}
|
||||
}
|
||||
|
||||
security->CFRelease(certificates);
|
||||
}
|
||||
|
||||
// Cleanup function for Security framework
|
||||
extern "C" void us_cleanup_security_framework() {
|
||||
SecurityFramework* framework = g_security_framework.exchange(nullptr);
|
||||
if (framework) {
|
||||
delete framework;
|
||||
}
|
||||
}
|
||||
|
||||
#endif // __APPLE__
|
||||
@@ -5,6 +5,7 @@
|
||||
#define CPPDECL extern "C"
|
||||
|
||||
STACK_OF(X509) *us_get_root_extra_cert_instances();
|
||||
STACK_OF(X509) *us_get_root_system_cert_instances();
|
||||
|
||||
#else
|
||||
#define CPPDECL extern
|
||||
|
||||
170
packages/bun-usockets/src/crypto/root_certs_linux.cpp
Normal file
170
packages/bun-usockets/src/crypto/root_certs_linux.cpp
Normal file
@@ -0,0 +1,170 @@
|
||||
#ifndef _WIN32
|
||||
#ifndef __APPLE__
|
||||
|
||||
#include <dirent.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <limits.h>
|
||||
#include <openssl/x509.h>
|
||||
#include <openssl/x509_vfy.h>
|
||||
#include <openssl/pem.h>
|
||||
|
||||
extern "C" void BUN__warn__extra_ca_load_failed(const char* filename, const char* error_msg);
|
||||
|
||||
// Helper function to load certificates from a directory
|
||||
static void load_certs_from_directory(const char* dir_path, STACK_OF(X509)* cert_stack) {
|
||||
DIR* dir = opendir(dir_path);
|
||||
if (!dir) {
|
||||
return;
|
||||
}
|
||||
|
||||
struct dirent* entry;
|
||||
while ((entry = readdir(dir)) != NULL) {
|
||||
// Skip . and ..
|
||||
if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if file has .crt, .pem, or .cer extension
|
||||
const char* ext = strrchr(entry->d_name, '.');
|
||||
if (!ext || (strcmp(ext, ".crt") != 0 && strcmp(ext, ".pem") != 0 && strcmp(ext, ".cer") != 0)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Build full path
|
||||
char filepath[PATH_MAX];
|
||||
snprintf(filepath, sizeof(filepath), "%s/%s", dir_path, entry->d_name);
|
||||
|
||||
// Try to load certificate
|
||||
FILE* file = fopen(filepath, "r");
|
||||
if (file) {
|
||||
X509* cert = PEM_read_X509(file, NULL, NULL, NULL);
|
||||
fclose(file);
|
||||
|
||||
if (cert) {
|
||||
if (!sk_X509_push(cert_stack, cert)) {
|
||||
X509_free(cert);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
closedir(dir);
|
||||
}
|
||||
|
||||
// Helper function to load certificates from a bundle file
|
||||
static void load_certs_from_bundle(const char* bundle_path, STACK_OF(X509)* cert_stack) {
|
||||
FILE* file = fopen(bundle_path, "r");
|
||||
if (!file) {
|
||||
return;
|
||||
}
|
||||
|
||||
X509* cert;
|
||||
while ((cert = PEM_read_X509(file, NULL, NULL, NULL)) != NULL) {
|
||||
if (!sk_X509_push(cert_stack, cert)) {
|
||||
X509_free(cert);
|
||||
break;
|
||||
}
|
||||
}
|
||||
ERR_clear_error();
|
||||
|
||||
fclose(file);
|
||||
}
|
||||
|
||||
// Main function to load system certificates on Linux and other Unix-like systems
|
||||
extern "C" void us_load_system_certificates_linux(STACK_OF(X509) **system_certs) {
|
||||
*system_certs = sk_X509_new_null();
|
||||
if (*system_certs == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
// First check environment variables (same as Node.js and OpenSSL)
|
||||
const char* ssl_cert_file = getenv("SSL_CERT_FILE");
|
||||
const char* ssl_cert_dir = getenv("SSL_CERT_DIR");
|
||||
|
||||
// If SSL_CERT_FILE is set, load from it
|
||||
if (ssl_cert_file && strlen(ssl_cert_file) > 0) {
|
||||
load_certs_from_bundle(ssl_cert_file, *system_certs);
|
||||
}
|
||||
|
||||
// If SSL_CERT_DIR is set, load from each directory (colon-separated)
|
||||
if (ssl_cert_dir && strlen(ssl_cert_dir) > 0) {
|
||||
char* dir_copy = strdup(ssl_cert_dir);
|
||||
if (dir_copy) {
|
||||
char* token = strtok(dir_copy, ":");
|
||||
while (token != NULL) {
|
||||
// Skip empty tokens
|
||||
if (strlen(token) > 0) {
|
||||
load_certs_from_directory(token, *system_certs);
|
||||
}
|
||||
token = strtok(NULL, ":");
|
||||
}
|
||||
free(dir_copy);
|
||||
}
|
||||
}
|
||||
|
||||
// If environment variables were set, use only those (even if they yield zero certs)
|
||||
if (ssl_cert_file || ssl_cert_dir) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Otherwise, load certificates from standard Linux/Unix paths
|
||||
// These are the common locations for system certificates
|
||||
|
||||
// Common certificate bundle locations (single file with multiple certs)
|
||||
// These paths are based on common Linux distributions and OpenSSL defaults
|
||||
static const char* bundle_paths[] = {
|
||||
"/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Gentoo
|
||||
"/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL 6
|
||||
"/etc/ssl/ca-bundle.pem", // OpenSUSE
|
||||
"/etc/pki/tls/cert.pem", // Fedora/RHEL 7+
|
||||
"/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", // CentOS/RHEL 7+
|
||||
"/etc/ssl/cert.pem", // Alpine Linux, macOS OpenSSL
|
||||
"/usr/local/etc/openssl/cert.pem", // Homebrew OpenSSL on macOS
|
||||
"/usr/local/share/ca-certificates/ca-certificates.crt", // Custom CA installs
|
||||
NULL
|
||||
};
|
||||
|
||||
// Common certificate directory locations (multiple files)
|
||||
// Note: OpenSSL expects hashed symlinks in directories (c_rehash format)
|
||||
static const char* dir_paths[] = {
|
||||
"/etc/ssl/certs", // Common location (Debian/Ubuntu with hashed links)
|
||||
"/etc/pki/tls/certs", // RHEL/Fedora
|
||||
"/usr/share/ca-certificates", // Debian/Ubuntu (original certs, not hashed)
|
||||
"/usr/local/share/certs", // FreeBSD
|
||||
"/etc/openssl/certs", // NetBSD
|
||||
"/var/ssl/certs", // AIX
|
||||
"/usr/local/etc/openssl/certs", // Homebrew OpenSSL on macOS
|
||||
"/System/Library/OpenSSL/certs", // macOS system OpenSSL (older versions)
|
||||
NULL
|
||||
};
|
||||
|
||||
// Try loading from bundle files first
|
||||
for (const char** path = bundle_paths; *path != NULL; path++) {
|
||||
load_certs_from_bundle(*path, *system_certs);
|
||||
}
|
||||
|
||||
// Then try loading from directories
|
||||
for (const char** path = dir_paths; *path != NULL; path++) {
|
||||
load_certs_from_directory(*path, *system_certs);
|
||||
}
|
||||
|
||||
// Also check NODE_EXTRA_CA_CERTS environment variable
|
||||
const char* extra_ca_certs = getenv("NODE_EXTRA_CA_CERTS");
|
||||
if (extra_ca_certs && strlen(extra_ca_certs) > 0) {
|
||||
FILE* file = fopen(extra_ca_certs, "r");
|
||||
if (file) {
|
||||
X509* cert;
|
||||
while ((cert = PEM_read_X509(file, NULL, NULL, NULL)) != NULL) {
|
||||
sk_X509_push(*system_certs, cert);
|
||||
}
|
||||
fclose(file);
|
||||
} else {
|
||||
BUN__warn__extra_ca_load_failed(extra_ca_certs, "Failed to open file");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif // !__APPLE__
|
||||
#endif // !_WIN32
|
||||
18
packages/bun-usockets/src/crypto/root_certs_platform.h
Normal file
18
packages/bun-usockets/src/crypto/root_certs_platform.h
Normal file
@@ -0,0 +1,18 @@
|
||||
#pragma once
|
||||
|
||||
#include <openssl/x509.h>
|
||||
|
||||
// Platform-specific certificate loading functions
|
||||
extern "C" {
|
||||
|
||||
// Load system certificates for the current platform
|
||||
void us_load_system_certificates_linux(STACK_OF(X509) **system_certs);
|
||||
void us_load_system_certificates_macos(STACK_OF(X509) **system_certs);
|
||||
void us_load_system_certificates_windows(STACK_OF(X509) **system_certs);
|
||||
|
||||
// Platform-specific cleanup functions
|
||||
#ifdef __APPLE__
|
||||
void us_cleanup_security_framework();
|
||||
#endif
|
||||
|
||||
}
|
||||
53
packages/bun-usockets/src/crypto/root_certs_windows.cpp
Normal file
53
packages/bun-usockets/src/crypto/root_certs_windows.cpp
Normal file
@@ -0,0 +1,53 @@
|
||||
#ifdef _WIN32
|
||||
|
||||
#include <windows.h>
|
||||
#include <wincrypt.h>
|
||||
#include <vector>
|
||||
#include <cstring>
|
||||
|
||||
// Forward declaration to avoid including OpenSSL headers here
|
||||
// This prevents conflicts with Windows macros like X509_NAME
|
||||
// Note: We don't use STACK_OF macro here since we don't have OpenSSL headers
|
||||
|
||||
// Structure to hold raw certificate data
|
||||
struct RawCertificate {
|
||||
std::vector<unsigned char> data;
|
||||
};
|
||||
|
||||
// Helper function to load raw certificates from a Windows certificate store
|
||||
static void LoadRawCertsFromStore(std::vector<RawCertificate>& raw_certs,
|
||||
DWORD store_flags,
|
||||
const wchar_t* store_name) {
|
||||
HCERTSTORE cert_store = CertOpenStore(
|
||||
CERT_STORE_PROV_SYSTEM_W,
|
||||
0,
|
||||
0,
|
||||
store_flags | CERT_STORE_READONLY_FLAG,
|
||||
store_name
|
||||
);
|
||||
|
||||
if (cert_store == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
PCCERT_CONTEXT cert_context = NULL;
|
||||
while ((cert_context = CertEnumCertificatesInStore(cert_store, cert_context)) != NULL) {
|
||||
RawCertificate raw_cert;
|
||||
raw_cert.data.assign(cert_context->pbCertEncoded,
|
||||
cert_context->pbCertEncoded + cert_context->cbCertEncoded);
|
||||
raw_certs.push_back(std::move(raw_cert));
|
||||
}
|
||||
|
||||
CertCloseStore(cert_store, 0);
|
||||
}
|
||||
|
||||
// Main function to load raw system certificates on Windows
|
||||
// Returns certificates as raw DER data to avoid OpenSSL header conflicts
|
||||
extern void us_load_system_certificates_windows_raw(
|
||||
std::vector<RawCertificate>& raw_certs) {
|
||||
// Load only from ROOT by default
|
||||
LoadRawCertsFromStore(raw_certs, CERT_SYSTEM_STORE_CURRENT_USER, L"ROOT");
|
||||
LoadRawCertsFromStore(raw_certs, CERT_SYSTEM_STORE_LOCAL_MACHINE, L"ROOT");
|
||||
}
|
||||
|
||||
#endif // _WIN32
|
||||
@@ -627,9 +627,15 @@ public:
|
||||
return std::move(*this);
|
||||
}
|
||||
|
||||
void setOnClose(HttpContextData<SSL>::OnSocketClosedCallback onClose) {
|
||||
void setOnSocketClosed(HttpContextData<SSL>::OnSocketClosedCallback onClose) {
|
||||
httpContext->getSocketContextData()->onSocketClosed = onClose;
|
||||
}
|
||||
void setOnSocketDrain(HttpContextData<SSL>::OnSocketDrainCallback onDrain) {
|
||||
httpContext->getSocketContextData()->onSocketDrain = onDrain;
|
||||
}
|
||||
void setOnSocketData(HttpContextData<SSL>::OnSocketDataCallback onData) {
|
||||
httpContext->getSocketContextData()->onSocketData = onData;
|
||||
}
|
||||
|
||||
void setOnClientError(HttpContextData<SSL>::OnClientErrorCallback onClientError) {
|
||||
httpContext->getSocketContextData()->onClientError = std::move(onClientError);
|
||||
|
||||
@@ -193,23 +193,32 @@ private:
|
||||
auto *httpResponseData = reinterpret_cast<HttpResponseData<SSL> *>(us_socket_ext(SSL, s));
|
||||
|
||||
|
||||
|
||||
/* Call filter */
|
||||
HttpContextData<SSL> *httpContextData = getSocketContextDataS(s);
|
||||
|
||||
if(httpResponseData && httpResponseData->isConnectRequest) {
|
||||
if (httpResponseData->socketData && httpContextData->onSocketData) {
|
||||
httpContextData->onSocketData(httpResponseData->socketData, SSL, s, "", 0, true);
|
||||
}
|
||||
if(httpResponseData->inStream) {
|
||||
httpResponseData->inStream(reinterpret_cast<HttpResponse<SSL> *>(s), "", 0, true, httpResponseData->userData);
|
||||
httpResponseData->inStream = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
for (auto &f : httpContextData->filterHandlers) {
|
||||
f((HttpResponse<SSL> *) s, -1);
|
||||
}
|
||||
|
||||
if (httpResponseData->socketData && httpContextData->onSocketClosed) {
|
||||
httpContextData->onSocketClosed(httpResponseData->socketData, SSL, s);
|
||||
}
|
||||
/* Signal broken HTTP request only if we have a pending request */
|
||||
if (httpResponseData->onAborted != nullptr && httpResponseData->userData != nullptr) {
|
||||
httpResponseData->onAborted((HttpResponse<SSL> *)s, httpResponseData->userData);
|
||||
}
|
||||
|
||||
if (httpResponseData->socketData && httpContextData->onSocketClosed) {
|
||||
httpContextData->onSocketClosed(httpResponseData->socketData, SSL, s);
|
||||
}
|
||||
|
||||
/* Destruct socket ext */
|
||||
httpResponseData->~HttpResponseData<SSL>();
|
||||
@@ -254,7 +263,9 @@ private:
|
||||
|
||||
/* The return value is entirely up to us to interpret. The HttpParser cares only for whether the returned value is DIFFERENT from passed user */
|
||||
|
||||
auto result = httpResponseData->consumePostPadded(httpContextData->maxHeaderSize, httpContextData->flags.requireHostHeader,httpContextData->flags.useStrictMethodValidation, data, (unsigned int) length, s, proxyParser, [httpContextData](void *s, HttpRequest *httpRequest) -> void * {
|
||||
auto result = httpResponseData->consumePostPadded(httpContextData->maxHeaderSize, httpResponseData->isConnectRequest, httpContextData->flags.requireHostHeader,httpContextData->flags.useStrictMethodValidation, data, (unsigned int) length, s, proxyParser, [httpContextData](void *s, HttpRequest *httpRequest) -> void * {
|
||||
|
||||
|
||||
/* For every request we reset the timeout and hang until user makes action */
|
||||
/* Warning: if we are in shutdown state, resetting the timer is a security issue! */
|
||||
us_socket_timeout(SSL, (us_socket_t *) s, 0);
|
||||
@@ -330,7 +341,12 @@ private:
|
||||
/* Continue parsing */
|
||||
return s;
|
||||
|
||||
}, [httpResponseData](void *user, std::string_view data, bool fin) -> void * {
|
||||
}, [httpResponseData, httpContextData](void *user, std::string_view data, bool fin) -> void * {
|
||||
|
||||
|
||||
if (httpResponseData->isConnectRequest && httpResponseData->socketData && httpContextData->onSocketData) {
|
||||
httpContextData->onSocketData(httpResponseData->socketData, SSL, (struct us_socket_t *) user, data.data(), data.length(), fin);
|
||||
}
|
||||
/* We always get an empty chunk even if there is no data */
|
||||
if (httpResponseData->inStream) {
|
||||
|
||||
@@ -449,7 +465,7 @@ private:
|
||||
us_socket_context_on_writable(SSL, getSocketContext(), [](us_socket_t *s) {
|
||||
auto *asyncSocket = reinterpret_cast<AsyncSocket<SSL> *>(s);
|
||||
auto *httpResponseData = reinterpret_cast<HttpResponseData<SSL> *>(asyncSocket->getAsyncSocketData());
|
||||
|
||||
|
||||
/* Attempt to drain the socket buffer before triggering onWritable callback */
|
||||
size_t bufferedAmount = asyncSocket->getBufferedAmount();
|
||||
if (bufferedAmount > 0) {
|
||||
@@ -470,6 +486,12 @@ private:
|
||||
*/
|
||||
}
|
||||
|
||||
auto *httpContextData = getSocketContextDataS(s);
|
||||
|
||||
|
||||
if (httpResponseData->isConnectRequest && httpResponseData->socketData && httpContextData->onSocketDrain) {
|
||||
httpContextData->onSocketDrain(httpResponseData->socketData, SSL, (struct us_socket_t *) s);
|
||||
}
|
||||
/* Ask the developer to write data and return success (true) or failure (false), OR skip sending anything and return success (true). */
|
||||
if (httpResponseData->onWritable) {
|
||||
/* We are now writable, so hang timeout again, the user does not have to do anything so we should hang until end or tryEnd rearms timeout */
|
||||
@@ -514,6 +536,7 @@ private:
|
||||
us_socket_context_on_end(SSL, getSocketContext(), [](us_socket_t *s) {
|
||||
auto *asyncSocket = reinterpret_cast<AsyncSocket<SSL> *>(s);
|
||||
asyncSocket->uncorkWithoutSending();
|
||||
|
||||
/* We do not care for half closed sockets */
|
||||
return asyncSocket->close();
|
||||
});
|
||||
|
||||
@@ -44,7 +44,10 @@ struct alignas(16) HttpContextData {
|
||||
private:
|
||||
std::vector<MoveOnlyFunction<void(HttpResponse<SSL> *, int)>> filterHandlers;
|
||||
using OnSocketClosedCallback = void (*)(void* userData, int is_ssl, struct us_socket_t *rawSocket);
|
||||
using OnSocketDataCallback = void (*)(void* userData, int is_ssl, struct us_socket_t *rawSocket, const char *data, int length, bool last);
|
||||
using OnSocketDrainCallback = void (*)(void* userData, int is_ssl, struct us_socket_t *rawSocket);
|
||||
using OnClientErrorCallback = MoveOnlyFunction<void(int is_ssl, struct us_socket_t *rawSocket, uWS::HttpParserError errorCode, char *rawPacket, int rawPacketLength)>;
|
||||
|
||||
|
||||
MoveOnlyFunction<void(const char *hostname)> missingServerNameHandler;
|
||||
|
||||
@@ -61,6 +64,8 @@ private:
|
||||
void *upgradedWebSocket = nullptr;
|
||||
/* Used to simulate Node.js socket events. */
|
||||
OnSocketClosedCallback onSocketClosed = nullptr;
|
||||
OnSocketDrainCallback onSocketDrain = nullptr;
|
||||
OnSocketDataCallback onSocketData = nullptr;
|
||||
OnClientErrorCallback onClientError = nullptr;
|
||||
|
||||
uint64_t maxHeaderSize = 0; // 0 means no limit
|
||||
|
||||
@@ -117,18 +117,19 @@ namespace uWS
|
||||
struct ConsumeRequestLineResult {
|
||||
char *position;
|
||||
bool isAncientHTTP;
|
||||
bool isConnect;
|
||||
HTTPHeaderParserError headerParserError;
|
||||
public:
|
||||
static ConsumeRequestLineResult error(HTTPHeaderParserError error) {
|
||||
return ConsumeRequestLineResult{nullptr, false, error};
|
||||
return ConsumeRequestLineResult{nullptr, false, false, error};
|
||||
}
|
||||
|
||||
static ConsumeRequestLineResult success(char *position, bool isAncientHTTP = false) {
|
||||
return ConsumeRequestLineResult{position, isAncientHTTP, HTTP_HEADER_PARSER_ERROR_NONE};
|
||||
static ConsumeRequestLineResult success(char *position, bool isAncientHTTP = false, bool isConnect = false) {
|
||||
return ConsumeRequestLineResult{position, isAncientHTTP, isConnect, HTTP_HEADER_PARSER_ERROR_NONE};
|
||||
}
|
||||
|
||||
static ConsumeRequestLineResult shortRead(bool isAncientHTTP = false) {
|
||||
return ConsumeRequestLineResult{nullptr, isAncientHTTP, HTTP_HEADER_PARSER_ERROR_NONE};
|
||||
static ConsumeRequestLineResult shortRead(bool isAncientHTTP = false, bool isConnect = false) {
|
||||
return ConsumeRequestLineResult{nullptr, isAncientHTTP, isConnect, HTTP_HEADER_PARSER_ERROR_NONE};
|
||||
}
|
||||
|
||||
bool isErrorOrShortRead() {
|
||||
@@ -551,7 +552,10 @@ namespace uWS
|
||||
return ConsumeRequestLineResult::shortRead();
|
||||
}
|
||||
|
||||
if (data[0] == 32 && (__builtin_expect(data[1] == '/', 1) || isHTTPorHTTPSPrefixForProxies(data + 1, end) == 1)) [[likely]] {
|
||||
|
||||
bool isHTTPMethod = (__builtin_expect(data[1] == '/', 1));
|
||||
bool isConnect = !isHTTPMethod && (isHTTPorHTTPSPrefixForProxies(data + 1, end) == 1 || ((data - start) == 7 && memcmp(start, "CONNECT", 7) == 0));
|
||||
if (isHTTPMethod || isConnect) [[likely]] {
|
||||
header.key = {start, (size_t) (data - start)};
|
||||
data++;
|
||||
if(!isValidMethod(header.key, useStrictMethodValidation)) {
|
||||
@@ -577,22 +581,22 @@ namespace uWS
|
||||
if (nextPosition >= end) {
|
||||
/* Whatever we have must be part of the version string */
|
||||
if (memcmp(" HTTP/1.1\r\n", data, std::min<unsigned int>(11, (unsigned int) (end - data))) == 0) {
|
||||
return ConsumeRequestLineResult::shortRead();
|
||||
return ConsumeRequestLineResult::shortRead(false, isConnect);
|
||||
} else if (memcmp(" HTTP/1.0\r\n", data, std::min<unsigned int>(11, (unsigned int) (end - data))) == 0) {
|
||||
/*Indicates that the request line is ancient HTTP*/
|
||||
return ConsumeRequestLineResult::shortRead(true);
|
||||
return ConsumeRequestLineResult::shortRead(true, isConnect);
|
||||
}
|
||||
return ConsumeRequestLineResult::error(HTTP_HEADER_PARSER_ERROR_INVALID_HTTP_VERSION);
|
||||
}
|
||||
if (memcmp(" HTTP/1.1\r\n", data, 11) == 0) {
|
||||
return ConsumeRequestLineResult::success(nextPosition);
|
||||
return ConsumeRequestLineResult::success(nextPosition, false, isConnect);
|
||||
} else if (memcmp(" HTTP/1.0\r\n", data, 11) == 0) {
|
||||
/*Indicates that the request line is ancient HTTP*/
|
||||
return ConsumeRequestLineResult::success(nextPosition, true);
|
||||
return ConsumeRequestLineResult::success(nextPosition, true, isConnect);
|
||||
}
|
||||
/* If we stand at the post padded CR, we have fragmented input so try again later */
|
||||
if (data[0] == '\r') {
|
||||
return ConsumeRequestLineResult::shortRead();
|
||||
return ConsumeRequestLineResult::shortRead(false, isConnect);
|
||||
}
|
||||
/* This is an error */
|
||||
return ConsumeRequestLineResult::error(HTTP_HEADER_PARSER_ERROR_INVALID_HTTP_VERSION);
|
||||
@@ -602,14 +606,14 @@ namespace uWS
|
||||
|
||||
/* If we stand at the post padded CR, we have fragmented input so try again later */
|
||||
if (data[0] == '\r') {
|
||||
return ConsumeRequestLineResult::shortRead();
|
||||
return ConsumeRequestLineResult::shortRead(false, isConnect);
|
||||
}
|
||||
|
||||
if (data[0] == 32) {
|
||||
switch (isHTTPorHTTPSPrefixForProxies(data + 1, end)) {
|
||||
// If we haven't received enough data to check if it's http:// or https://, let's try again later
|
||||
case -1:
|
||||
return ConsumeRequestLineResult::shortRead();
|
||||
return ConsumeRequestLineResult::shortRead(false, isConnect);
|
||||
// Otherwise, if it's not http:// or https://, return 400
|
||||
default:
|
||||
return ConsumeRequestLineResult::error(HTTP_HEADER_PARSER_ERROR_INVALID_REQUEST);
|
||||
@@ -635,7 +639,7 @@ namespace uWS
|
||||
}
|
||||
|
||||
/* End is only used for the proxy parser. The HTTP parser recognizes "\ra" as invalid "\r\n" scan and breaks. */
|
||||
static HttpParserResult getHeaders(char *postPaddedBuffer, char *end, struct HttpRequest::Header *headers, void *reserved, bool &isAncientHTTP, bool useStrictMethodValidation, uint64_t maxHeaderSize) {
|
||||
static HttpParserResult getHeaders(char *postPaddedBuffer, char *end, struct HttpRequest::Header *headers, void *reserved, bool &isAncientHTTP, bool &isConnectRequest, bool useStrictMethodValidation, uint64_t maxHeaderSize) {
|
||||
char *preliminaryKey, *preliminaryValue, *start = postPaddedBuffer;
|
||||
#ifdef UWS_WITH_PROXY
|
||||
/* ProxyParser is passed as reserved parameter */
|
||||
@@ -689,6 +693,9 @@ namespace uWS
|
||||
if(requestLineResult.isAncientHTTP) {
|
||||
isAncientHTTP = true;
|
||||
}
|
||||
if(requestLineResult.isConnect) {
|
||||
isConnectRequest = true;
|
||||
}
|
||||
/* No request headers found */
|
||||
const char * headerStart = (headers[0].key.length() > 0) ? headers[0].key.data() : end;
|
||||
|
||||
@@ -798,7 +805,7 @@ namespace uWS
|
||||
|
||||
/* This is the only caller of getHeaders and is thus the deepest part of the parser. */
|
||||
template <bool ConsumeMinimally>
|
||||
HttpParserResult fenceAndConsumePostPadded(uint64_t maxHeaderSize, bool requireHostHeader, bool useStrictMethodValidation, char *data, unsigned int length, void *user, void *reserved, HttpRequest *req, MoveOnlyFunction<void *(void *, HttpRequest *)> &requestHandler, MoveOnlyFunction<void *(void *, std::string_view, bool)> &dataHandler) {
|
||||
HttpParserResult fenceAndConsumePostPadded(uint64_t maxHeaderSize, bool& isConnectRequest, bool requireHostHeader, bool useStrictMethodValidation, char *data, unsigned int length, void *user, void *reserved, HttpRequest *req, MoveOnlyFunction<void *(void *, HttpRequest *)> &requestHandler, MoveOnlyFunction<void *(void *, std::string_view, bool)> &dataHandler) {
|
||||
|
||||
/* How much data we CONSUMED (to throw away) */
|
||||
unsigned int consumedTotal = 0;
|
||||
@@ -809,7 +816,7 @@ namespace uWS
|
||||
data[length + 1] = 'a'; /* Anything that is not \n, to trigger "invalid request" */
|
||||
req->ancientHttp = false;
|
||||
for (;length;) {
|
||||
auto result = getHeaders(data, data + length, req->headers, reserved, req->ancientHttp, useStrictMethodValidation, maxHeaderSize);
|
||||
auto result = getHeaders(data, data + length, req->headers, reserved, req->ancientHttp, isConnectRequest, useStrictMethodValidation, maxHeaderSize);
|
||||
if(result.isError()) {
|
||||
return result;
|
||||
}
|
||||
@@ -916,6 +923,10 @@ namespace uWS
|
||||
length -= emittable;
|
||||
consumedTotal += emittable;
|
||||
}
|
||||
} else if(isConnectRequest) {
|
||||
// This only server to mark that the connect request read all headers
|
||||
// and can starting emitting data
|
||||
remainingStreamingBytes = STATE_IS_CHUNKED;
|
||||
} else {
|
||||
/* If we came here without a body; emit an empty data chunk to signal no data */
|
||||
dataHandler(user, {}, true);
|
||||
@@ -931,15 +942,16 @@ namespace uWS
|
||||
}
|
||||
|
||||
public:
|
||||
HttpParserResult consumePostPadded(uint64_t maxHeaderSize, bool requireHostHeader, bool useStrictMethodValidation, char *data, unsigned int length, void *user, void *reserved, MoveOnlyFunction<void *(void *, HttpRequest *)> &&requestHandler, MoveOnlyFunction<void *(void *, std::string_view, bool)> &&dataHandler) {
|
||||
|
||||
HttpParserResult consumePostPadded(uint64_t maxHeaderSize, bool& isConnectRequest, bool requireHostHeader, bool useStrictMethodValidation, char *data, unsigned int length, void *user, void *reserved, MoveOnlyFunction<void *(void *, HttpRequest *)> &&requestHandler, MoveOnlyFunction<void *(void *, std::string_view, bool)> &&dataHandler) {
|
||||
/* This resets BloomFilter by construction, but later we also reset it again.
|
||||
* Optimize this to skip resetting twice (req could be made global) */
|
||||
HttpRequest req;
|
||||
if (remainingStreamingBytes) {
|
||||
|
||||
/* It's either chunked or with a content-length */
|
||||
if (isParsingChunkedEncoding(remainingStreamingBytes)) {
|
||||
if (isConnectRequest) {
|
||||
dataHandler(user, std::string_view(data, length), false);
|
||||
return HttpParserResult::success(0, user);
|
||||
} else if (isParsingChunkedEncoding(remainingStreamingBytes)) {
|
||||
/* It's either chunked or with a content-length */
|
||||
std::string_view dataToConsume(data, length);
|
||||
for (auto chunk : uWS::ChunkIterator(&dataToConsume, &remainingStreamingBytes)) {
|
||||
dataHandler(user, chunk, chunk.length() == 0);
|
||||
@@ -950,6 +962,7 @@ public:
|
||||
data = (char *) dataToConsume.data();
|
||||
length = (unsigned int) dataToConsume.length();
|
||||
} else {
|
||||
|
||||
// this is exactly the same as below!
|
||||
// todo: refactor this
|
||||
if (remainingStreamingBytes >= length) {
|
||||
@@ -980,7 +993,7 @@ public:
|
||||
fallback.append(data, maxCopyDistance);
|
||||
|
||||
// break here on break
|
||||
HttpParserResult consumed = fenceAndConsumePostPadded<true>(maxHeaderSize, requireHostHeader, useStrictMethodValidation, fallback.data(), (unsigned int) fallback.length(), user, reserved, &req, requestHandler, dataHandler);
|
||||
HttpParserResult consumed = fenceAndConsumePostPadded<true>(maxHeaderSize, isConnectRequest, requireHostHeader, useStrictMethodValidation, fallback.data(), (unsigned int) fallback.length(), user, reserved, &req, requestHandler, dataHandler);
|
||||
/* Return data will be different than user if we are upgraded to WebSocket or have an error */
|
||||
if (consumed.returnedData != user) {
|
||||
return consumed;
|
||||
@@ -997,8 +1010,11 @@ public:
|
||||
length -= consumedBytes - had;
|
||||
|
||||
if (remainingStreamingBytes) {
|
||||
/* It's either chunked or with a content-length */
|
||||
if (isParsingChunkedEncoding(remainingStreamingBytes)) {
|
||||
if(isConnectRequest) {
|
||||
dataHandler(user, std::string_view(data, length), false);
|
||||
return HttpParserResult::success(0, user);
|
||||
} else if (isParsingChunkedEncoding(remainingStreamingBytes)) {
|
||||
/* It's either chunked or with a content-length */
|
||||
std::string_view dataToConsume(data, length);
|
||||
for (auto chunk : uWS::ChunkIterator(&dataToConsume, &remainingStreamingBytes)) {
|
||||
dataHandler(user, chunk, chunk.length() == 0);
|
||||
@@ -1037,7 +1053,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
HttpParserResult consumed = fenceAndConsumePostPadded<false>(maxHeaderSize, requireHostHeader, useStrictMethodValidation, data, length, user, reserved, &req, requestHandler, dataHandler);
|
||||
HttpParserResult consumed = fenceAndConsumePostPadded<false>(maxHeaderSize, isConnectRequest, requireHostHeader, useStrictMethodValidation, data, length, user, reserved, &req, requestHandler, dataHandler);
|
||||
/* Return data will be different than user if we are upgraded to WebSocket or have an error */
|
||||
if (consumed.returnedData != user) {
|
||||
return consumed;
|
||||
|
||||
@@ -243,7 +243,7 @@ public:
|
||||
/* Manually upgrade to WebSocket. Typically called in upgrade handler. Immediately calls open handler.
|
||||
* NOTE: Will invalidate 'this' as socket might change location in memory. Throw away after use. */
|
||||
template <typename UserData>
|
||||
us_socket_t *upgrade(UserData &&userData, std::string_view secWebSocketKey, std::string_view secWebSocketProtocol,
|
||||
us_socket_t *upgrade(UserData&& userData, std::string_view secWebSocketKey, std::string_view secWebSocketProtocol,
|
||||
std::string_view secWebSocketExtensions,
|
||||
struct us_socket_context_t *webSocketContext) {
|
||||
|
||||
@@ -350,7 +350,8 @@ public:
|
||||
us_socket_timeout(SSL, (us_socket_t *) webSocket, webSocketContextData->idleTimeoutComponents.first);
|
||||
|
||||
/* Move construct the UserData right before calling open handler */
|
||||
new (webSocket->getUserData()) UserData(std::move(userData));
|
||||
new (webSocket->getUserData()) UserData(std::forward<UserData>(userData));
|
||||
|
||||
|
||||
/* Emit open event and start the timeout */
|
||||
if (webSocketContextData->openHandler) {
|
||||
@@ -741,6 +742,10 @@ public:
|
||||
|
||||
return httpResponseData->socketData;
|
||||
}
|
||||
bool isConnectRequest() {
|
||||
HttpResponseData<SSL> *httpResponseData = getHttpResponseData();
|
||||
return httpResponseData->isConnectRequest;
|
||||
}
|
||||
|
||||
void setWriteOffset(uint64_t offset) {
|
||||
HttpResponseData<SSL> *httpResponseData = getHttpResponseData();
|
||||
|
||||
@@ -108,6 +108,7 @@ struct HttpResponseData : AsyncSocketData<SSL>, HttpParser {
|
||||
uint8_t state = 0;
|
||||
uint8_t idleTimeout = 10; // default HTTP_TIMEOUT 10 seconds
|
||||
bool fromAncientRequest = false;
|
||||
bool isConnectRequest = false;
|
||||
bool isIdle = true;
|
||||
bool shouldCloseOnceIdle = false;
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ At its core is the _Bun runtime_, a fast JavaScript runtime designed as a drop-i
|
||||
## Features:
|
||||
|
||||
- Live in-editor error messages (gif below)
|
||||
- Test runner codelens
|
||||
- Vscode test runner support
|
||||
- Debugger support
|
||||
- Run scripts from package.json
|
||||
- Visual lockfile viewer for old binary lockfiles (`bun.lockb`)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "bun-vscode",
|
||||
"version": "0.0.29",
|
||||
"version": "0.0.31",
|
||||
"author": "oven",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
@@ -116,20 +116,6 @@
|
||||
"category": "Bun",
|
||||
"enablement": "!inDebugMode && resourceLangId =~ /^(javascript|typescript|javascriptreact|typescriptreact)$/ && !isInDiffEditor && resourceScheme == 'untitled'",
|
||||
"icon": "$(play-circle)"
|
||||
},
|
||||
{
|
||||
"command": "extension.bun.runTest",
|
||||
"title": "Run all tests",
|
||||
"shortTitle": "Run Test",
|
||||
"category": "Bun",
|
||||
"icon": "$(play)"
|
||||
},
|
||||
{
|
||||
"command": "extension.bun.watchTest",
|
||||
"title": "Run all tests in watch mode",
|
||||
"shortTitle": "Run Test Watch",
|
||||
"category": "Bun",
|
||||
"icon": "$(sync)"
|
||||
}
|
||||
],
|
||||
"menus": {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/bin/sh
|
||||
# Version: 18
|
||||
# Version: 19
|
||||
|
||||
# A script that installs the dependencies needed to build and test Bun.
|
||||
# This should work on macOS and Linux with a POSIX shell.
|
||||
@@ -685,6 +685,8 @@ install_common_software() {
|
||||
apt-transport-https \
|
||||
software-properties-common
|
||||
fi
|
||||
install_packages \
|
||||
libc6-dbg
|
||||
;;
|
||||
dnf)
|
||||
install_packages \
|
||||
@@ -1193,7 +1195,7 @@ install_docker() {
|
||||
execute_sudo amazon-linux-extras install docker
|
||||
;;
|
||||
amzn-* | alpine-*)
|
||||
install_packages docker
|
||||
install_packages docker docker-cli-compose
|
||||
;;
|
||||
*)
|
||||
sh="$(require sh)"
|
||||
@@ -1208,10 +1210,17 @@ install_docker() {
|
||||
if [ -f "$systemctl" ]; then
|
||||
execute_sudo "$systemctl" enable docker
|
||||
fi
|
||||
if [ "$os" = "linux" ] && [ "$distro" = "alpine" ]; then
|
||||
execute doas rc-update add docker default
|
||||
execute doas rc-service docker start
|
||||
fi
|
||||
|
||||
getent="$(which getent)"
|
||||
if [ -n "$("$getent" group docker)" ]; then
|
||||
usermod="$(which usermod)"
|
||||
if [ -z "$usermod" ]; then
|
||||
usermod="$(sudo which usermod)"
|
||||
fi
|
||||
if [ -f "$usermod" ]; then
|
||||
execute_sudo "$usermod" -aG docker "$user"
|
||||
fi
|
||||
|
||||
@@ -72,6 +72,7 @@ const cwd = import.meta.dirname ? dirname(import.meta.dirname) : process.cwd();
|
||||
const testsPath = join(cwd, "test");
|
||||
|
||||
const spawnTimeout = 5_000;
|
||||
const spawnBunTimeout = 20_000; // when running with ASAN/LSAN bun can take a bit longer to exit, not a bug.
|
||||
const testTimeout = 3 * 60_000;
|
||||
const integrationTimeout = 5 * 60_000;
|
||||
|
||||
@@ -79,7 +80,7 @@ function getNodeParallelTestTimeout(testPath) {
|
||||
if (testPath.includes("test-dns")) {
|
||||
return 90_000;
|
||||
}
|
||||
return 10_000;
|
||||
return 20_000;
|
||||
}
|
||||
|
||||
process.on("SIGTRAP", () => {
|
||||
@@ -578,8 +579,11 @@ async function runTests() {
|
||||
const title = relative(cwd, absoluteTestPath).replaceAll(sep, "/");
|
||||
if (isNodeTest(testPath)) {
|
||||
const testContent = readFileSync(absoluteTestPath, "utf-8");
|
||||
const runWithBunTest =
|
||||
title.includes("needs-test") || testContent.includes("bun:test") || testContent.includes("node:test");
|
||||
let runWithBunTest = title.includes("needs-test") || testContent.includes("node:test");
|
||||
// don't wanna have a filter for includes("bun:test") but these need our mocks
|
||||
runWithBunTest ||= title === "test/js/node/test/parallel/test-fs-append-file-flush.js";
|
||||
runWithBunTest ||= title === "test/js/node/test/parallel/test-fs-write-file-flush.js";
|
||||
runWithBunTest ||= title === "test/js/node/test/parallel/test-fs-write-stream-flush.js";
|
||||
const subcommand = runWithBunTest ? "test" : "run";
|
||||
const env = {
|
||||
FORCE_COLOR: "0",
|
||||
@@ -592,7 +596,7 @@ async function runTests() {
|
||||
}
|
||||
if ((basename(execPath).includes("asan") || !isCI) && shouldValidateLeakSan(testPath)) {
|
||||
env.BUN_DESTRUCT_VM_ON_EXIT = "1";
|
||||
env.ASAN_OPTIONS = "allow_user_segv_handler=1:disable_coredump=0:detect_leaks=1";
|
||||
env.ASAN_OPTIONS = "allow_user_segv_handler=1:disable_coredump=0:detect_leaks=1:abort_on_error=1";
|
||||
// prettier-ignore
|
||||
env.LSAN_OPTIONS = `malloc_context_size=100:print_suppressions=0:suppressions=${process.cwd()}/test/leaksan.supp`;
|
||||
}
|
||||
@@ -657,6 +661,7 @@ async function runTests() {
|
||||
const buildResult = await spawnBun(execPath, {
|
||||
cwd: vendorPath,
|
||||
args: ["run", "build"],
|
||||
timeout: 60_000,
|
||||
});
|
||||
if (!buildResult.ok) {
|
||||
throw new Error(`Failed to build vendor: ${buildResult.error}`);
|
||||
@@ -683,6 +688,9 @@ async function runTests() {
|
||||
}
|
||||
}
|
||||
|
||||
// tests are all over, close the group from the final test. any further output should print ungrouped.
|
||||
startGroup("End");
|
||||
|
||||
if (isGithubAction) {
|
||||
reportOutputToGitHubAction("failing_tests_count", failedResults.length);
|
||||
const markdown = formatTestToMarkdown(failedResults, false, 0);
|
||||
@@ -1132,10 +1140,6 @@ async function spawnBun(execPath, { args, cwd, timeout, env, stdout, stderr }) {
|
||||
: { BUN_ENABLE_CRASH_REPORTING: "0" }),
|
||||
};
|
||||
|
||||
if (basename(execPath).includes("asan") && bunEnv.ASAN_OPTIONS === undefined) {
|
||||
bunEnv.ASAN_OPTIONS = "allow_user_segv_handler=1:disable_coredump=0";
|
||||
}
|
||||
|
||||
if (isWindows && bunEnv.Path) {
|
||||
delete bunEnv.Path;
|
||||
}
|
||||
@@ -1152,6 +1156,9 @@ async function spawnBun(execPath, { args, cwd, timeout, env, stdout, stderr }) {
|
||||
}
|
||||
bunEnv["TEMP"] = tmpdirPath;
|
||||
}
|
||||
if (timeout === undefined) {
|
||||
timeout = spawnBunTimeout;
|
||||
}
|
||||
try {
|
||||
const existingCores = options["coredump-upload"] ? readdirSync(coresDir) : [];
|
||||
const result = await spawnSafe({
|
||||
@@ -1331,7 +1338,7 @@ async function spawnBunTest(execPath, testPath, opts = { cwd }) {
|
||||
}
|
||||
if ((basename(execPath).includes("asan") || !isCI) && shouldValidateLeakSan(relative(cwd, absPath))) {
|
||||
env.BUN_DESTRUCT_VM_ON_EXIT = "1";
|
||||
env.ASAN_OPTIONS = "allow_user_segv_handler=1:disable_coredump=0:detect_leaks=1";
|
||||
env.ASAN_OPTIONS = "allow_user_segv_handler=1:disable_coredump=0:detect_leaks=1:abort_on_error=1";
|
||||
// prettier-ignore
|
||||
env.LSAN_OPTIONS = `malloc_context_size=100:print_suppressions=0:suppressions=${process.cwd()}/test/leaksan.supp`;
|
||||
}
|
||||
|
||||
@@ -2808,6 +2808,7 @@ export function endGroup() {
|
||||
} else {
|
||||
console.groupEnd();
|
||||
}
|
||||
// when a file exits with an ASAN error, there is no trailing newline so we add one here to make sure `console.group()` detection doesn't get broken in CI.
|
||||
console.log();
|
||||
}
|
||||
|
||||
@@ -2865,6 +2866,12 @@ export function printEnvironment() {
|
||||
spawnSync([shell, "-c", "free -m -w"], { stdio: "inherit" });
|
||||
}
|
||||
});
|
||||
startGroup("Docker", () => {
|
||||
const shell = which(["sh", "bash"]);
|
||||
if (shell) {
|
||||
spawnSync([shell, "-c", "docker ps"], { stdio: "inherit" });
|
||||
}
|
||||
});
|
||||
}
|
||||
if (isWindows) {
|
||||
startGroup("Disk (win)", () => {
|
||||
|
||||
@@ -121,6 +121,10 @@ pub fn exit(code: u32) noreturn {
|
||||
std.os.windows.kernel32.ExitProcess(code);
|
||||
},
|
||||
else => {
|
||||
if (Environment.enable_asan) {
|
||||
std.c.exit(@bitCast(code));
|
||||
std.c.abort(); // exit should be noreturn
|
||||
}
|
||||
bun.c.quick_exit(@bitCast(code));
|
||||
std.c.abort(); // quick_exit should be noreturn
|
||||
},
|
||||
|
||||
@@ -199,7 +199,6 @@ pub const StandaloneModuleGraph = struct {
|
||||
store.ref();
|
||||
|
||||
const b = bun.webcore.Blob.initWithStore(store, globalObject).new();
|
||||
b.allocator = bun.default_allocator;
|
||||
|
||||
if (bun.http.MimeType.byExtensionNoDefault(bun.strings.trimLeadingChar(std.fs.path.extension(this.name), '.'))) |mime| {
|
||||
store.mime_type = mime;
|
||||
@@ -724,7 +723,8 @@ pub const StandaloneModuleGraph = struct {
|
||||
return bun.invalid_fd;
|
||||
};
|
||||
defer pe_file.deinit();
|
||||
pe_file.addBunSection(bytes) catch |err| {
|
||||
// Always strip authenticode when adding .bun section for --compile
|
||||
pe_file.addBunSection(bytes, .strip_always) catch |err| {
|
||||
Output.prettyErrorln("Error adding Bun section to PE file: {}", .{err});
|
||||
cleanup(zname, cloned_executable_fd);
|
||||
return bun.invalid_fd;
|
||||
|
||||
@@ -919,6 +919,8 @@ pub const Default = struct {
|
||||
_ = self;
|
||||
return c_allocator;
|
||||
}
|
||||
|
||||
pub const deinit = void;
|
||||
};
|
||||
|
||||
const basic = if (bun.use_mimalloc)
|
||||
@@ -926,6 +928,127 @@ const basic = if (bun.use_mimalloc)
|
||||
else
|
||||
@import("./allocators/fallback.zig");
|
||||
|
||||
pub fn stackFallback(comptime size: usize, fallback_allocator: std.mem.Allocator) StackFallbackAllocator(size) {
|
||||
return StackFallbackAllocator(size){
|
||||
.buffer = undefined,
|
||||
.fallback_allocator = fallback_allocator,
|
||||
.fixed_buffer_allocator = undefined,
|
||||
.force_heap = if (comptime Environment.ci_assert)
|
||||
!bun.getRuntimeFeatureFlag(.BUN_DEBUG_FORCE_HEAP_FALLBACK_ALLOCATORS)
|
||||
else {},
|
||||
};
|
||||
}
|
||||
|
||||
/// An allocator that attempts to allocate using a
|
||||
/// `FixedBufferAllocator` using an array of size `size`. If the
|
||||
/// allocation fails, it will fall back to using
|
||||
/// `fallback_allocator`. Easily created with `stackFallback`.
|
||||
pub fn StackFallbackAllocator(comptime size: usize) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
|
||||
buffer: [size]u8,
|
||||
fallback_allocator: std.mem.Allocator,
|
||||
fixed_buffer_allocator: std.heap.FixedBufferAllocator,
|
||||
get_called: if (Environment.ci_assert) bool else void = if (Environment.ci_assert) false,
|
||||
force_heap: if (Environment.ci_assert) bool else void,
|
||||
|
||||
/// This function both fetches a `Allocator` interface to this
|
||||
/// allocator *and* resets the internal buffer allocator.
|
||||
pub fn get(self: *Self) std.mem.Allocator {
|
||||
if (comptime Environment.ci_assert) {
|
||||
bun.assert(!self.get_called); // `get` called multiple times; instead use `const allocator = stackFallback(N).get();`
|
||||
self.get_called = true;
|
||||
}
|
||||
self.fixed_buffer_allocator = std.heap.FixedBufferAllocator.init(self.buffer[0..]);
|
||||
return .{
|
||||
.ptr = self,
|
||||
.vtable = &.{
|
||||
.alloc = alloc,
|
||||
.resize = resize,
|
||||
.remap = remap,
|
||||
.free = free,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
fn alloc(
|
||||
ctx: *anyopaque,
|
||||
len: usize,
|
||||
alignment: std.mem.Alignment,
|
||||
ra: usize,
|
||||
) ?[*]u8 {
|
||||
const self: *Self = @ptrCast(@alignCast(ctx));
|
||||
if (comptime Environment.ci_assert) {
|
||||
if (self.force_heap) {
|
||||
return self.fallback_allocator.rawAlloc(len, alignment, ra);
|
||||
}
|
||||
}
|
||||
return std.heap.FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, alignment, ra) orelse
|
||||
return self.fallback_allocator.rawAlloc(len, alignment, ra);
|
||||
}
|
||||
|
||||
fn resize(
|
||||
ctx: *anyopaque,
|
||||
buf: []u8,
|
||||
alignment: std.mem.Alignment,
|
||||
new_len: usize,
|
||||
ra: usize,
|
||||
) bool {
|
||||
const self: *Self = @ptrCast(@alignCast(ctx));
|
||||
if (comptime Environment.ci_assert) {
|
||||
if (self.force_heap) {
|
||||
return self.fallback_allocator.rawResize(buf, alignment, new_len, ra);
|
||||
}
|
||||
}
|
||||
if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
|
||||
return std.heap.FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, alignment, new_len, ra);
|
||||
} else {
|
||||
return self.fallback_allocator.rawResize(buf, alignment, new_len, ra);
|
||||
}
|
||||
}
|
||||
|
||||
fn remap(
|
||||
context: *anyopaque,
|
||||
memory: []u8,
|
||||
alignment: std.mem.Alignment,
|
||||
new_len: usize,
|
||||
return_address: usize,
|
||||
) ?[*]u8 {
|
||||
const self: *Self = @ptrCast(@alignCast(context));
|
||||
if (comptime Environment.ci_assert) {
|
||||
if (self.force_heap) {
|
||||
return self.fallback_allocator.rawRemap(memory, alignment, new_len, return_address);
|
||||
}
|
||||
}
|
||||
if (self.fixed_buffer_allocator.ownsPtr(memory.ptr)) {
|
||||
return std.heap.FixedBufferAllocator.remap(&self.fixed_buffer_allocator, memory, alignment, new_len, return_address);
|
||||
} else {
|
||||
return self.fallback_allocator.rawRemap(memory, alignment, new_len, return_address);
|
||||
}
|
||||
}
|
||||
|
||||
fn free(
|
||||
ctx: *anyopaque,
|
||||
buf: []u8,
|
||||
alignment: std.mem.Alignment,
|
||||
ra: usize,
|
||||
) void {
|
||||
const self: *Self = @ptrCast(@alignCast(ctx));
|
||||
if (comptime Environment.ci_assert) {
|
||||
if (self.force_heap) {
|
||||
return self.fallback_allocator.rawFree(buf, alignment, ra);
|
||||
}
|
||||
}
|
||||
if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
|
||||
return std.heap.FixedBufferAllocator.free(&self.fixed_buffer_allocator, buf, alignment, ra);
|
||||
} else {
|
||||
return self.fallback_allocator.rawFree(buf, alignment, ra);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
const Environment = @import("./env.zig");
|
||||
const std = @import("std");
|
||||
|
||||
|
||||
@@ -94,6 +94,8 @@ const BorrowedHeap = if (safety_checks) *DebugHeap else *mimalloc.Heap;
|
||||
const DebugHeap = struct {
|
||||
inner: *mimalloc.Heap,
|
||||
thread_lock: bun.safety.ThreadLock,
|
||||
|
||||
pub const deinit = void;
|
||||
};
|
||||
|
||||
threadlocal var thread_heap: if (safety_checks) ?DebugHeap else void = if (safety_checks) null;
|
||||
|
||||
@@ -506,6 +506,12 @@ pub fn AllocationScopeIn(comptime Allocator: type) type {
|
||||
pub fn setPointerExtra(self: Self, ptr: *anyopaque, extra: Extra) void {
|
||||
return self.borrow().setPointerExtra(ptr, extra);
|
||||
}
|
||||
|
||||
pub fn leakSlice(self: Self, memory: anytype) void {
|
||||
if (comptime !Self.enabled) return;
|
||||
_ = @typeInfo(@TypeOf(memory)).pointer;
|
||||
self.trackExternalFree(memory, null) catch @panic("tried to free memory that was not allocated by the allocation scope");
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@@ -112,6 +112,7 @@ pub const Features = struct {
|
||||
pub var unsupported_uv_function: usize = 0;
|
||||
pub var exited: usize = 0;
|
||||
pub var yarn_migration: usize = 0;
|
||||
pub var pnpm_migration: usize = 0;
|
||||
pub var yaml_parse: usize = 0;
|
||||
|
||||
comptime {
|
||||
|
||||
@@ -752,7 +752,7 @@ pub const Object = struct {
|
||||
pub fn hasProperty(obj: *const Object, name: string) bool {
|
||||
for (obj.properties.slice()) |prop| {
|
||||
const key = prop.key orelse continue;
|
||||
if (std.meta.activeTag(key.data) != .e_string) continue;
|
||||
if (key.data != .e_string) continue;
|
||||
if (key.data.e_string.eql(string, name)) return true;
|
||||
}
|
||||
return false;
|
||||
@@ -762,7 +762,7 @@ pub const Object = struct {
|
||||
for (obj.properties.slice(), 0..) |prop, i| {
|
||||
const value = prop.value orelse continue;
|
||||
const key = prop.key orelse continue;
|
||||
if (std.meta.activeTag(key.data) != .e_string) continue;
|
||||
if (key.data != .e_string) continue;
|
||||
const key_str = key.data.e_string;
|
||||
if (key_str.eql(string, name)) {
|
||||
return Expr.Query{
|
||||
|
||||
@@ -132,14 +132,14 @@ pub fn isEmpty(expr: Expr) bool {
|
||||
pub const Query = struct { expr: Expr, loc: logger.Loc, i: u32 = 0 };
|
||||
|
||||
pub fn hasAnyPropertyNamed(expr: *const Expr, comptime names: []const string) bool {
|
||||
if (std.meta.activeTag(expr.data) != .e_object) return false;
|
||||
if (expr.data != .e_object) return false;
|
||||
const obj = expr.data.e_object;
|
||||
if (obj.properties.len == 0) return false;
|
||||
|
||||
for (obj.properties.slice()) |prop| {
|
||||
if (prop.value == null) continue;
|
||||
const key = prop.key orelse continue;
|
||||
if (std.meta.activeTag(key.data) != .e_string) continue;
|
||||
if (key.data != .e_string) continue;
|
||||
const key_str = key.data.e_string;
|
||||
if (strings.eqlAnyComptime(key_str.data, names)) return true;
|
||||
}
|
||||
@@ -266,7 +266,7 @@ pub fn set(expr: *Expr, allocator: std.mem.Allocator, name: string, value: Expr)
|
||||
for (0..expr.data.e_object.properties.len) |i| {
|
||||
const prop = &expr.data.e_object.properties.ptr[i];
|
||||
const key = prop.key orelse continue;
|
||||
if (std.meta.activeTag(key.data) != .e_string) continue;
|
||||
if (key.data != .e_string) continue;
|
||||
if (key.data.e_string.eql(string, name)) {
|
||||
prop.value = value;
|
||||
return;
|
||||
@@ -288,7 +288,7 @@ pub fn setString(expr: *Expr, allocator: std.mem.Allocator, name: string, value:
|
||||
for (0..expr.data.e_object.properties.len) |i| {
|
||||
const prop = &expr.data.e_object.properties.ptr[i];
|
||||
const key = prop.key orelse continue;
|
||||
if (std.meta.activeTag(key.data) != .e_string) continue;
|
||||
if (key.data != .e_string) continue;
|
||||
if (key.data.e_string.eql(string, name)) {
|
||||
prop.value = Expr.init(E.String, .{ .data = value }, logger.Loc.Empty);
|
||||
return;
|
||||
@@ -310,6 +310,15 @@ pub fn getObject(expr: *const Expr, name: string) ?Expr {
|
||||
return null;
|
||||
}
|
||||
|
||||
pub fn getBoolean(expr: *const Expr, name: string) ?bool {
|
||||
if (expr.asProperty(name)) |query| {
|
||||
if (query.expr.data == .e_boolean) {
|
||||
return query.expr.data.e_boolean.value;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
pub fn getString(expr: *const Expr, allocator: std.mem.Allocator, name: string) OOM!?struct { string, logger.Loc } {
|
||||
if (asProperty(expr, name)) |q| {
|
||||
if (q.expr.asString(allocator)) |str| {
|
||||
@@ -385,7 +394,7 @@ pub fn getRope(self: *const Expr, rope: *const E.Object.Rope) ?E.Object.RopeQuer
|
||||
|
||||
// Making this comptime bloats the binary and doesn't seem to impact runtime performance.
|
||||
pub fn asProperty(expr: *const Expr, name: string) ?Query {
|
||||
if (std.meta.activeTag(expr.data) != .e_object) return null;
|
||||
if (expr.data != .e_object) return null;
|
||||
const obj = expr.data.e_object;
|
||||
if (obj.properties.len == 0) return null;
|
||||
|
||||
@@ -393,7 +402,7 @@ pub fn asProperty(expr: *const Expr, name: string) ?Query {
|
||||
}
|
||||
|
||||
pub fn asPropertyStringMap(expr: *const Expr, name: string, allocator: std.mem.Allocator) ?*bun.StringArrayHashMap(string) {
|
||||
if (std.meta.activeTag(expr.data) != .e_object) return null;
|
||||
if (expr.data != .e_object) return null;
|
||||
const obj_ = expr.data.e_object;
|
||||
if (obj_.properties.len == 0) return null;
|
||||
const query = obj_.asProperty(name) orelse return null;
|
||||
@@ -439,7 +448,7 @@ pub const ArrayIterator = struct {
|
||||
};
|
||||
|
||||
pub fn asArray(expr: *const Expr) ?ArrayIterator {
|
||||
if (std.meta.activeTag(expr.data) != .e_array) return null;
|
||||
if (expr.data != .e_array) return null;
|
||||
const array = expr.data.e_array;
|
||||
if (array.items.len == 0) return null;
|
||||
|
||||
@@ -455,7 +464,7 @@ pub inline fn asUtf8StringLiteral(expr: *const Expr) ?string {
|
||||
}
|
||||
|
||||
pub inline fn asStringLiteral(expr: *const Expr, allocator: std.mem.Allocator) ?string {
|
||||
if (std.meta.activeTag(expr.data) != .e_string) return null;
|
||||
if (expr.data != .e_string) return null;
|
||||
return expr.data.e_string.string(allocator) catch null;
|
||||
}
|
||||
|
||||
@@ -501,7 +510,7 @@ pub inline fn asStringZ(expr: *const Expr, allocator: std.mem.Allocator) OOM!?st
|
||||
pub fn asBool(
|
||||
expr: *const Expr,
|
||||
) ?bool {
|
||||
if (std.meta.activeTag(expr.data) != .e_boolean) return null;
|
||||
if (expr.data != .e_boolean) return null;
|
||||
|
||||
return expr.data.e_boolean.value;
|
||||
}
|
||||
@@ -522,7 +531,7 @@ const Serializable = struct {
|
||||
};
|
||||
|
||||
pub fn isMissing(a: *const Expr) bool {
|
||||
return std.meta.activeTag(a.data) == Expr.Tag.e_missing;
|
||||
return a.data == Expr.Tag.e_missing;
|
||||
}
|
||||
|
||||
// The goal of this function is to "rotate" the AST if it's possible to use the
|
||||
|
||||
@@ -325,7 +325,7 @@ pub const Runner = struct {
|
||||
return _entry.value_ptr.*;
|
||||
}
|
||||
|
||||
var blob_: ?jsc.WebCore.Blob = null;
|
||||
var blob_: ?*const jsc.WebCore.Blob = null;
|
||||
const mime_type: ?MimeType = null;
|
||||
|
||||
if (value.jsType() == .DOMWrapper) {
|
||||
@@ -334,30 +334,23 @@ pub const Runner = struct {
|
||||
} else if (value.as(jsc.WebCore.Request)) |resp| {
|
||||
return this.run(try resp.getBlobWithoutCallFrame(this.global));
|
||||
} else if (value.as(jsc.WebCore.Blob)) |resp| {
|
||||
blob_ = resp.*;
|
||||
blob_.?.allocator = null;
|
||||
blob_ = resp;
|
||||
} else if (value.as(bun.api.ResolveMessage) != null or value.as(bun.api.BuildMessage) != null) {
|
||||
_ = this.macro.vm.uncaughtException(this.global, value, false);
|
||||
return error.MacroFailed;
|
||||
}
|
||||
}
|
||||
|
||||
if (blob_) |*blob| {
|
||||
const out_expr = Expr.fromBlob(
|
||||
if (blob_) |blob| {
|
||||
return Expr.fromBlob(
|
||||
blob,
|
||||
this.allocator,
|
||||
mime_type,
|
||||
this.log,
|
||||
this.caller.loc,
|
||||
) catch {
|
||||
blob.deinit();
|
||||
return error.MacroFailed;
|
||||
};
|
||||
if (out_expr.data == .e_string) {
|
||||
blob.deinit();
|
||||
}
|
||||
|
||||
return out_expr;
|
||||
}
|
||||
|
||||
return Expr.init(E.String, E.String.empty, this.caller.loc);
|
||||
@@ -371,41 +364,20 @@ pub const Runner = struct {
|
||||
|
||||
const _entry = this.visited.getOrPut(this.allocator, value) catch unreachable;
|
||||
if (_entry.found_existing) {
|
||||
switch (_entry.value_ptr.*.data) {
|
||||
.e_object, .e_array => {
|
||||
this.log.addErrorFmt(this.source, this.caller.loc, this.allocator, "converting circular structure to Bun AST is not implemented yet", .{}) catch unreachable;
|
||||
return error.MacroFailed;
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
return _entry.value_ptr.*;
|
||||
}
|
||||
|
||||
var iter = try jsc.JSArrayIterator.init(value, this.global);
|
||||
if (iter.len == 0) {
|
||||
const result = Expr.init(
|
||||
E.Array,
|
||||
E.Array{
|
||||
.items = ExprNodeList.empty,
|
||||
.was_originally_macro = true,
|
||||
},
|
||||
this.caller.loc,
|
||||
);
|
||||
_entry.value_ptr.* = result;
|
||||
return result;
|
||||
}
|
||||
|
||||
// Process all array items
|
||||
var array = this.allocator.alloc(Expr, iter.len) catch unreachable;
|
||||
var out = Expr.init(
|
||||
errdefer this.allocator.free(array);
|
||||
const expr = Expr.init(
|
||||
E.Array,
|
||||
E.Array{
|
||||
.items = ExprNodeList.empty,
|
||||
.was_originally_macro = true,
|
||||
},
|
||||
E.Array{ .items = ExprNodeList.empty, .was_originally_macro = true },
|
||||
this.caller.loc,
|
||||
);
|
||||
_entry.value_ptr.* = out;
|
||||
|
||||
errdefer this.allocator.free(array);
|
||||
_entry.value_ptr.* = expr;
|
||||
var i: usize = 0;
|
||||
while (try iter.next()) |item| {
|
||||
array[i] = try this.run(item);
|
||||
@@ -413,24 +385,27 @@ pub const Runner = struct {
|
||||
continue;
|
||||
i += 1;
|
||||
}
|
||||
out.data.e_array.items = ExprNodeList.fromOwnedSlice(array);
|
||||
_entry.value_ptr.* = out;
|
||||
return out;
|
||||
|
||||
expr.data.e_array.items = ExprNodeList.fromOwnedSlice(array);
|
||||
expr.data.e_array.items.len = @truncate(i);
|
||||
return expr;
|
||||
},
|
||||
// TODO: optimize this
|
||||
jsc.ConsoleObject.Formatter.Tag.Object => {
|
||||
this.is_top_level = false;
|
||||
const _entry = this.visited.getOrPut(this.allocator, value) catch unreachable;
|
||||
if (_entry.found_existing) {
|
||||
switch (_entry.value_ptr.*.data) {
|
||||
.e_object, .e_array => {
|
||||
this.log.addErrorFmt(this.source, this.caller.loc, this.allocator, "converting circular structure to Bun AST is not implemented yet", .{}) catch unreachable;
|
||||
return error.MacroFailed;
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
return _entry.value_ptr.*;
|
||||
}
|
||||
|
||||
// Reserve a placeholder to break cycles.
|
||||
const expr = Expr.init(
|
||||
E.Object,
|
||||
E.Object{ .properties = G.Property.List{}, .was_originally_macro = true },
|
||||
this.caller.loc,
|
||||
);
|
||||
_entry.value_ptr.* = expr;
|
||||
|
||||
// SAFETY: tag ensures `value` is an object.
|
||||
const obj = value.getObject() orelse unreachable;
|
||||
var object_iter = try jsc.JSPropertyIterator(.{
|
||||
@@ -439,36 +414,28 @@ pub const Runner = struct {
|
||||
}).init(this.global, obj);
|
||||
defer object_iter.deinit();
|
||||
|
||||
const out = _entry.value_ptr;
|
||||
out.* = Expr.init(
|
||||
E.Object,
|
||||
E.Object{
|
||||
.properties = bun.handleOom(
|
||||
G.Property.List.initCapacity(this.allocator, object_iter.len),
|
||||
),
|
||||
.was_originally_macro = true,
|
||||
},
|
||||
this.caller.loc,
|
||||
// Build properties list
|
||||
var properties = bun.handleOom(
|
||||
G.Property.List.initCapacity(this.allocator, object_iter.len),
|
||||
);
|
||||
const properties = &out.data.e_object.properties;
|
||||
errdefer properties.clearAndFree(this.allocator);
|
||||
|
||||
while (try object_iter.next()) |prop| {
|
||||
bun.assertf(
|
||||
object_iter.i == properties.len,
|
||||
"`properties` unexpectedly modified (length {d}, expected {d})",
|
||||
.{ properties.len, object_iter.i },
|
||||
);
|
||||
properties.appendAssumeCapacity(G.Property{
|
||||
const object_value = try this.run(object_iter.value);
|
||||
|
||||
properties.append(this.allocator, G.Property{
|
||||
.key = Expr.init(
|
||||
E.String,
|
||||
E.String.init(prop.toOwnedSlice(this.allocator) catch unreachable),
|
||||
this.caller.loc,
|
||||
),
|
||||
.value = try this.run(object_iter.value),
|
||||
});
|
||||
.value = object_value,
|
||||
}) catch |err| bun.handleOom(err);
|
||||
}
|
||||
return out.*;
|
||||
|
||||
expr.data.e_object.properties = properties;
|
||||
|
||||
return expr;
|
||||
},
|
||||
|
||||
.JSON => {
|
||||
|
||||
125
src/ast/P.zig
125
src/ast/P.zig
@@ -170,6 +170,21 @@ pub fn NewParser_(
|
||||
dirname_ref: Ref = Ref.None,
|
||||
import_meta_ref: Ref = Ref.None,
|
||||
hmr_api_ref: Ref = Ref.None,
|
||||
|
||||
/// If bake is enabled and this is a server-side file, we want to use
|
||||
/// special `Response` class inside the `bun:app` built-in module to
|
||||
/// support syntax like `return Response(<jsx />, {...})` or `return Response.render("/my-page")`
|
||||
/// or `return Response.redirect("/other")`.
|
||||
///
|
||||
/// So we'll need to add a `import { Response } from 'bun:app'` to the
|
||||
/// top of the file
|
||||
///
|
||||
/// We need to declare this `response_ref` upfront
|
||||
response_ref: Ref = Ref.None,
|
||||
/// We also need to declare the namespace ref for `bun:app` and attach
|
||||
/// it to the symbol so the code generated `e_import_identifier`'s
|
||||
bun_app_namespace_ref: Ref = Ref.None,
|
||||
|
||||
scopes_in_order_visitor_index: usize = 0,
|
||||
has_classic_runtime_warned: bool = false,
|
||||
macro_call_count: MacroCallCountType = 0,
|
||||
@@ -954,7 +969,7 @@ pub fn NewParser_(
|
||||
switch (call.target.data) {
|
||||
.e_identifier => |ident| {
|
||||
// is this a require("something")
|
||||
if (strings.eqlComptime(p.loadNameFromRef(ident.ref), "require") and call.args.len == 1 and std.meta.activeTag(call.args.ptr[0].data) == .e_string) {
|
||||
if (strings.eqlComptime(p.loadNameFromRef(ident.ref), "require") and call.args.len == 1 and call.args.ptr[0].data == .e_string) {
|
||||
_ = p.addImportRecord(.require, loc, call.args.at(0).data.e_string.string(p.allocator) catch unreachable);
|
||||
}
|
||||
},
|
||||
@@ -970,7 +985,7 @@ pub fn NewParser_(
|
||||
switch (call.target.data) {
|
||||
.e_identifier => |ident| {
|
||||
// is this a require("something")
|
||||
if (strings.eqlComptime(p.loadNameFromRef(ident.ref), "require") and call.args.len == 1 and std.meta.activeTag(call.args.ptr[0].data) == .e_string) {
|
||||
if (strings.eqlComptime(p.loadNameFromRef(ident.ref), "require") and call.args.len == 1 and call.args.ptr[0].data == .e_string) {
|
||||
_ = p.addImportRecord(.require, loc, call.args.at(0).data.e_string.string(p.allocator) catch unreachable);
|
||||
}
|
||||
},
|
||||
@@ -1220,6 +1235,81 @@ pub fn NewParser_(
|
||||
};
|
||||
}
|
||||
|
||||
pub fn generateImportStmtForBakeResponse(
|
||||
noalias p: *P,
|
||||
parts: *ListManaged(js_ast.Part),
|
||||
) !void {
|
||||
bun.assert(!p.response_ref.isNull());
|
||||
bun.assert(!p.bun_app_namespace_ref.isNull());
|
||||
const allocator = p.allocator;
|
||||
|
||||
const import_path = "bun:app";
|
||||
|
||||
const import_record_i = p.addImportRecordByRange(.stmt, logger.Range.None, import_path);
|
||||
|
||||
var declared_symbols = DeclaredSymbol.List{};
|
||||
try declared_symbols.ensureTotalCapacity(allocator, 2);
|
||||
|
||||
var stmts = try allocator.alloc(Stmt, 1);
|
||||
|
||||
declared_symbols.appendAssumeCapacity(
|
||||
DeclaredSymbol{ .ref = p.bun_app_namespace_ref, .is_top_level = true },
|
||||
);
|
||||
try p.module_scope.generated.append(allocator, p.bun_app_namespace_ref);
|
||||
|
||||
const clause_items = try allocator.dupe(js_ast.ClauseItem, &.{
|
||||
js_ast.ClauseItem{
|
||||
.alias = "Response",
|
||||
.original_name = "Response",
|
||||
.alias_loc = logger.Loc{},
|
||||
.name = LocRef{ .ref = p.response_ref, .loc = logger.Loc{} },
|
||||
},
|
||||
});
|
||||
|
||||
declared_symbols.appendAssumeCapacity(DeclaredSymbol{
|
||||
.ref = p.response_ref,
|
||||
.is_top_level = true,
|
||||
});
|
||||
|
||||
// ensure every e_import_identifier holds the namespace
|
||||
if (p.options.features.hot_module_reloading) {
|
||||
const symbol = &p.symbols.items[p.response_ref.inner_index];
|
||||
bun.assert(symbol.namespace_alias != null);
|
||||
symbol.namespace_alias.?.import_record_index = import_record_i;
|
||||
}
|
||||
|
||||
try p.is_import_item.put(allocator, p.response_ref, {});
|
||||
try p.named_imports.put(allocator, p.response_ref, js_ast.NamedImport{
|
||||
.alias = "Response",
|
||||
.alias_loc = logger.Loc{},
|
||||
.namespace_ref = p.bun_app_namespace_ref,
|
||||
.import_record_index = import_record_i,
|
||||
});
|
||||
|
||||
stmts[0] = p.s(
|
||||
S.Import{
|
||||
.namespace_ref = p.bun_app_namespace_ref,
|
||||
.items = clause_items,
|
||||
.import_record_index = import_record_i,
|
||||
.is_single_line = true,
|
||||
},
|
||||
logger.Loc{},
|
||||
);
|
||||
|
||||
var import_records = try allocator.alloc(u32, 1);
|
||||
import_records[0] = import_record_i;
|
||||
|
||||
// This import is placed in a part before the main code, however
|
||||
// the bundler ends up re-ordering this to be after... The order
|
||||
// does not matter as ESM imports are always hoisted.
|
||||
parts.append(js_ast.Part{
|
||||
.stmts = stmts,
|
||||
.declared_symbols = declared_symbols,
|
||||
.import_record_indices = bun.BabyList(u32).fromOwnedSlice(import_records),
|
||||
.tag = .runtime,
|
||||
}) catch unreachable;
|
||||
}
|
||||
|
||||
pub fn generateImportStmt(
|
||||
noalias p: *P,
|
||||
import_path: string,
|
||||
@@ -1227,7 +1317,7 @@ pub fn NewParser_(
|
||||
parts: *ListManaged(js_ast.Part),
|
||||
symbols: anytype,
|
||||
additional_stmt: ?Stmt,
|
||||
comptime suffix: string,
|
||||
comptime prefix: string,
|
||||
comptime is_internal: bool,
|
||||
) anyerror!void {
|
||||
const allocator = p.allocator;
|
||||
@@ -1237,13 +1327,13 @@ pub fn NewParser_(
|
||||
import_record.path.namespace = "runtime";
|
||||
import_record.is_internal = is_internal;
|
||||
const import_path_identifier = try import_record.path.name.nonUniqueNameString(allocator);
|
||||
var namespace_identifier = try allocator.alloc(u8, import_path_identifier.len + suffix.len);
|
||||
var namespace_identifier = try allocator.alloc(u8, import_path_identifier.len + prefix.len);
|
||||
const clause_items = try allocator.alloc(js_ast.ClauseItem, imports.len);
|
||||
var stmts = try allocator.alloc(Stmt, 1 + if (additional_stmt != null) @as(usize, 1) else @as(usize, 0));
|
||||
var declared_symbols = DeclaredSymbol.List{};
|
||||
try declared_symbols.ensureTotalCapacity(allocator, imports.len + 1);
|
||||
bun.copy(u8, namespace_identifier, suffix);
|
||||
bun.copy(u8, namespace_identifier[suffix.len..], import_path_identifier);
|
||||
bun.copy(u8, namespace_identifier, prefix);
|
||||
bun.copy(u8, namespace_identifier[prefix.len..], import_path_identifier);
|
||||
|
||||
const namespace_ref = try p.newSymbol(.other, namespace_identifier);
|
||||
declared_symbols.appendAssumeCapacity(.{
|
||||
@@ -2014,6 +2104,25 @@ pub fn NewParser_(
|
||||
.wrap_exports_for_server_reference => {},
|
||||
}
|
||||
|
||||
// Server-side components:
|
||||
// Declare upfront the symbols for "Response" and "bun:app"
|
||||
switch (p.options.features.server_components) {
|
||||
.none, .client_side => {},
|
||||
else => {
|
||||
p.response_ref = try p.declareGeneratedSymbol(.import, "Response");
|
||||
p.bun_app_namespace_ref = try p.newSymbol(
|
||||
.other,
|
||||
"import_bun_app",
|
||||
);
|
||||
const symbol = &p.symbols.items[p.response_ref.inner_index];
|
||||
symbol.namespace_alias = .{
|
||||
.namespace_ref = p.bun_app_namespace_ref,
|
||||
.alias = "Response",
|
||||
.import_record_index = std.math.maxInt(u32),
|
||||
};
|
||||
},
|
||||
}
|
||||
|
||||
if (p.options.features.hot_module_reloading) {
|
||||
p.hmr_api_ref = try p.declareCommonJSSymbol(.unbound, "hmr");
|
||||
}
|
||||
@@ -3071,7 +3180,7 @@ pub fn NewParser_(
|
||||
return ref;
|
||||
}
|
||||
|
||||
fn declareGeneratedSymbol(p: *P, kind: Symbol.Kind, comptime name: string) !Ref {
|
||||
pub fn declareGeneratedSymbol(p: *P, kind: Symbol.Kind, comptime name: string) !Ref {
|
||||
// The bundler runs the renamer, so it is ok to not append a hash
|
||||
if (p.options.bundle) {
|
||||
return try declareSymbolMaybeGenerated(p, kind, logger.Loc.Empty, name, true);
|
||||
@@ -3428,7 +3537,7 @@ pub fn NewParser_(
|
||||
// Insert any relocated variable statements now
|
||||
if (p.relocated_top_level_vars.items.len > 0) {
|
||||
var already_declared = RefMap{};
|
||||
var already_declared_allocator_stack = std.heap.stackFallback(1024, allocator);
|
||||
var already_declared_allocator_stack = bun.allocators.stackFallback(1024, allocator);
|
||||
const already_declared_allocator = already_declared_allocator_stack.get();
|
||||
defer if (already_declared_allocator_stack.fixed_buffer_allocator.end_index >= 1023) already_declared.deinit(already_declared_allocator);
|
||||
|
||||
|
||||
@@ -1357,6 +1357,16 @@ pub const Parser = struct {
|
||||
);
|
||||
}
|
||||
|
||||
// Bake: transform global `Response` to use `import { Response } from 'bun:app'`
|
||||
if (!p.response_ref.isNull() and is_used_and_has_no_links: {
|
||||
// We only want to do this if the symbol is used and didn't get
|
||||
// bound to some other value
|
||||
const symbol: *const Symbol = &p.symbols.items[p.response_ref.innerIndex()];
|
||||
break :is_used_and_has_no_links !symbol.hasLink() and symbol.use_count_estimate > 0;
|
||||
}) {
|
||||
try p.generateImportStmtForBakeResponse(&before);
|
||||
}
|
||||
|
||||
if (before.items.len > 0 or after.items.len > 0) {
|
||||
try parts.ensureUnusedCapacity(before.items.len + after.items.len);
|
||||
const parts_len = parts.items.len;
|
||||
|
||||
@@ -194,11 +194,11 @@ pub fn isTSArrowFnJSX(p: anytype) !bool {
|
||||
}
|
||||
if (p.lexer.token == .t_identifier) {
|
||||
try p.lexer.next();
|
||||
if (p.lexer.token == .t_comma) {
|
||||
if (p.lexer.token == .t_comma or p.lexer.token == .t_equals) {
|
||||
is_ts_arrow_fn = true;
|
||||
} else if (p.lexer.token == .t_extends) {
|
||||
try p.lexer.next();
|
||||
is_ts_arrow_fn = p.lexer.token != .t_equals and p.lexer.token != .t_greater_than;
|
||||
is_ts_arrow_fn = p.lexer.token != .t_equals and p.lexer.token != .t_greater_than and p.lexer.token != .t_slash;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -827,24 +827,25 @@ pub fn ParseSuffix(
|
||||
const optional_chain = &optional_chain_;
|
||||
while (true) {
|
||||
if (p.lexer.loc().start == p.after_arrow_body_loc.start) {
|
||||
while (true) {
|
||||
switch (p.lexer.token) {
|
||||
.t_comma => {
|
||||
if (level.gte(.comma)) {
|
||||
break;
|
||||
}
|
||||
defer left_and_out.* = left_value;
|
||||
next_token: switch (p.lexer.token) {
|
||||
.t_comma => {
|
||||
if (level.gte(.comma)) {
|
||||
return;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{
|
||||
.op = .bin_comma,
|
||||
.left = left.*,
|
||||
.right = try p.parseExpr(.comma),
|
||||
}, left.loc);
|
||||
},
|
||||
else => {
|
||||
break;
|
||||
},
|
||||
}
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{
|
||||
.op = .bin_comma,
|
||||
.left = left.*,
|
||||
.right = try p.parseExpr(.comma),
|
||||
}, left.loc);
|
||||
|
||||
continue :next_token p.lexer.token;
|
||||
},
|
||||
else => {
|
||||
return;
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -64,7 +64,7 @@ pub fn VisitExpr(
|
||||
}
|
||||
pub fn e_import_meta(p: *P, expr: Expr, in: ExprIn) Expr {
|
||||
// TODO: delete import.meta might not work
|
||||
const is_delete_target = std.meta.activeTag(p.delete_target) == .e_import_meta;
|
||||
const is_delete_target = p.delete_target == .e_import_meta;
|
||||
|
||||
if (p.define.dots.get("meta")) |meta| {
|
||||
for (meta) |define| {
|
||||
@@ -609,7 +609,8 @@ pub fn VisitExpr(
|
||||
p.delete_target = dot.data;
|
||||
}
|
||||
|
||||
return p.visitExprInOut(dot, in);
|
||||
// don't call visitExprInOut on `dot` because we've already visited `target` above!
|
||||
return dot;
|
||||
}
|
||||
|
||||
// Handle property rewrites to ensure things
|
||||
@@ -1444,9 +1445,20 @@ pub fn VisitExpr(
|
||||
// Why? Because we *don't* want to check for uses of
|
||||
// `useState` _inside_ React, and we know React uses
|
||||
// commonjs so it will never be `.e_import_identifier`.
|
||||
e_.target.data == .e_import_identifier) {
|
||||
check_for_usestate: {
|
||||
if (e_.target.data == .e_import_identifier) break :check_for_usestate true;
|
||||
// Also check for `React.useState(...)`
|
||||
if (e_.target.data == .e_dot and e_.target.data.e_dot.target.data == .e_import_identifier) {
|
||||
const id = e_.target.data.e_dot.target.data.e_import_identifier;
|
||||
const name = p.symbols.items[id.ref.innerIndex()].original_name;
|
||||
break :check_for_usestate bun.strings.eqlComptime(name, "React");
|
||||
}
|
||||
break :check_for_usestate false;
|
||||
}) {
|
||||
bun.assert(p.options.features.server_components.isServerSide());
|
||||
if (bun.strings.eqlComptime(original_name, "useState")) {
|
||||
if (!bun.strings.startsWith(p.source.path.pretty, "node_modules") and
|
||||
bun.strings.eqlComptime(original_name, "useState"))
|
||||
{
|
||||
p.log.addError(
|
||||
p.source,
|
||||
expr.loc,
|
||||
|
||||
35
src/bake.zig
35
src/bake.zig
@@ -27,9 +27,6 @@ pub const UserOptions = struct {
|
||||
|
||||
/// Currently, this function must run at the top of the event loop.
|
||||
pub fn fromJS(config: JSValue, global: *jsc.JSGlobalObject) !UserOptions {
|
||||
if (!config.isObject()) {
|
||||
return global.throwInvalidArguments("'" ++ api_name ++ "' is not an object", .{});
|
||||
}
|
||||
var arena = std.heap.ArenaAllocator.init(bun.default_allocator);
|
||||
errdefer arena.deinit();
|
||||
const alloc = arena.allocator();
|
||||
@@ -38,6 +35,38 @@ pub const UserOptions = struct {
|
||||
errdefer allocations.free();
|
||||
var bundler_options = SplitBundlerOptions.empty;
|
||||
|
||||
if (!config.isObject()) {
|
||||
// Allow users to do `export default { app: 'react' }` for convenience
|
||||
if (config.isString()) {
|
||||
const bunstr = try config.toBunString(global);
|
||||
defer bunstr.deref();
|
||||
const utf8_string = bunstr.toUTF8(bun.default_allocator);
|
||||
defer utf8_string.deinit();
|
||||
|
||||
if (bun.strings.eql(utf8_string.byteSlice(), "react")) {
|
||||
const root = bun.getcwdAlloc(alloc) catch |err| switch (err) {
|
||||
error.OutOfMemory => {
|
||||
return global.throwOutOfMemory();
|
||||
},
|
||||
else => {
|
||||
return global.throwError(err, "while querying current working directory");
|
||||
},
|
||||
};
|
||||
|
||||
const framework = try Framework.react(alloc);
|
||||
|
||||
return UserOptions{
|
||||
.arena = arena,
|
||||
.allocations = allocations,
|
||||
.root = root,
|
||||
.framework = framework,
|
||||
.bundler_options = bundler_options,
|
||||
};
|
||||
}
|
||||
}
|
||||
return global.throwInvalidArguments("'" ++ api_name ++ "' is not an object", .{});
|
||||
}
|
||||
|
||||
if (try config.getOptional(global, "bundlerOptions", JSValue)) |js_options| {
|
||||
if (try js_options.getOptional(global, "server", JSValue)) |server_options| {
|
||||
bundler_options.server = try BuildConfigSubset.fromJS(global, server_options);
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
// clang-format off
|
||||
#include "BakeSourceProvider.h"
|
||||
#include "DevServerSourceProvider.h"
|
||||
#include "BakeGlobalObject.h"
|
||||
#include "JavaScriptCore/CallData.h"
|
||||
#include "JavaScriptCore/Completion.h"
|
||||
@@ -78,6 +79,34 @@ extern "C" JSC::EncodedJSValue BakeLoadServerHmrPatch(GlobalObject* global, BunS
|
||||
return JSC::JSValue::encode(result);
|
||||
}
|
||||
|
||||
extern "C" JSC::EncodedJSValue BakeLoadServerHmrPatchWithSourceMap(GlobalObject* global, BunString source, const char* sourceMapJSONPtr, size_t sourceMapJSONLength) {
|
||||
JSC::VM&vm = global->vm();
|
||||
auto scope = DECLARE_THROW_SCOPE(vm);
|
||||
|
||||
String string = "bake://server.patch.js"_s;
|
||||
JSC::SourceOrigin origin = JSC::SourceOrigin(WTF::URL(string));
|
||||
|
||||
// Use DevServerSourceProvider with the source map JSON
|
||||
auto provider = DevServerSourceProvider::create(
|
||||
global,
|
||||
source.toWTFString(),
|
||||
sourceMapJSONPtr,
|
||||
sourceMapJSONLength,
|
||||
origin,
|
||||
WTFMove(string),
|
||||
WTF::TextPosition(),
|
||||
JSC::SourceProviderSourceType::Program
|
||||
);
|
||||
|
||||
JSC::SourceCode sourceCode = JSC::SourceCode(provider);
|
||||
|
||||
JSC::JSValue result = vm.interpreter.executeProgram(sourceCode, global, global);
|
||||
RETURN_IF_EXCEPTION(scope, {});
|
||||
|
||||
RELEASE_ASSERT(result);
|
||||
return JSC::JSValue::encode(result);
|
||||
}
|
||||
|
||||
extern "C" JSC::EncodedJSValue BakeGetModuleNamespace(
|
||||
JSC::JSGlobalObject* global,
|
||||
JSC::JSValue keyValue
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -41,8 +41,8 @@ pub fn runWithBody(ctx: *ErrorReportRequest, body: []const u8, r: AnyResponse) !
|
||||
var s = std.io.fixedBufferStream(body);
|
||||
const reader = s.reader();
|
||||
|
||||
var sfa_general = std.heap.stackFallback(65536, ctx.dev.allocator());
|
||||
var sfa_sourcemap = std.heap.stackFallback(65536, ctx.dev.allocator());
|
||||
var sfa_general = bun.allocators.stackFallback(65536, ctx.dev.allocator());
|
||||
var sfa_sourcemap = bun.allocators.stackFallback(65536, ctx.dev.allocator());
|
||||
const temp_alloc = sfa_general.get();
|
||||
var arena = std.heap.ArenaAllocator.init(temp_alloc);
|
||||
defer arena.deinit();
|
||||
|
||||
@@ -187,7 +187,7 @@ pub fn run(first: *HotReloadEvent) void {
|
||||
return;
|
||||
}
|
||||
|
||||
var sfb = std.heap.stackFallback(4096, dev.allocator());
|
||||
var sfb = bun.allocators.stackFallback(4096, dev.allocator());
|
||||
const temp_alloc = sfb.get();
|
||||
var entry_points: EntryPointList = .empty;
|
||||
defer entry_points.deinit(temp_alloc);
|
||||
|
||||
@@ -269,9 +269,11 @@ pub fn IncrementalGraph(comptime side: bake.Side) type {
|
||||
/// All part contents
|
||||
current_chunk_parts: ArrayListUnmanaged(switch (side) {
|
||||
.client => FileIndex,
|
||||
// These slices do not outlive the bundler, and must
|
||||
// be joined before its arena is deinitialized.
|
||||
.server => []const u8,
|
||||
// This memory is allocated by the dev server allocator
|
||||
.server => bun.ptr.OwnedIn(
|
||||
[]const u8,
|
||||
bun.bake.DevServer.DevAllocator,
|
||||
),
|
||||
}),
|
||||
|
||||
/// Asset IDs, which can be printed as hex in '/_bun/asset/{hash}.css'
|
||||
@@ -280,6 +282,10 @@ pub fn IncrementalGraph(comptime side: bake.Side) type {
|
||||
.server => void,
|
||||
},
|
||||
|
||||
/// Source maps for server chunks and the file indices to track which
|
||||
/// file each chunk comes from
|
||||
current_chunk_source_maps: if (side == .server) ArrayListUnmanaged(CurrentChunkSourceMapData) else void = if (side == .server) .empty,
|
||||
|
||||
pub const empty: Self = .{
|
||||
.bundled_files = .empty,
|
||||
.stale_files = .empty,
|
||||
@@ -293,6 +299,16 @@ pub fn IncrementalGraph(comptime side: bake.Side) type {
|
||||
.current_chunk_parts = .empty,
|
||||
|
||||
.current_css_files = if (side == .client) .empty,
|
||||
.current_chunk_source_maps = if (side == .server) .empty else {},
|
||||
};
|
||||
|
||||
const CurrentChunkSourceMapData = struct {
|
||||
file_index: FileIndex,
|
||||
source_map: PackedMap.Shared,
|
||||
|
||||
pub fn deinit(self: *CurrentChunkSourceMapData) void {
|
||||
self.source_map.deinit();
|
||||
}
|
||||
};
|
||||
|
||||
pub const File = switch (side) {
|
||||
@@ -378,9 +394,19 @@ pub fn IncrementalGraph(comptime side: bake.Side) type {
|
||||
.edges = g.edges.deinit(alloc),
|
||||
.edges_free_list = g.edges_free_list.deinit(alloc),
|
||||
.current_chunk_len = {},
|
||||
.current_chunk_parts = g.current_chunk_parts.deinit(alloc),
|
||||
.current_css_files = if (comptime side == .client)
|
||||
g.current_css_files.deinit(alloc),
|
||||
.current_chunk_parts = {
|
||||
if (comptime side == .server) {
|
||||
for (g.current_chunk_parts.items) |*part| part.deinit();
|
||||
}
|
||||
g.current_chunk_parts.deinit(alloc);
|
||||
},
|
||||
.current_css_files = if (comptime side == .client) g.current_css_files.deinit(alloc),
|
||||
.current_chunk_source_maps = if (side == .server) {
|
||||
for (g.current_chunk_source_maps.items) |*source_map| {
|
||||
source_map.deinit();
|
||||
}
|
||||
g.current_chunk_source_maps.deinit(alloc);
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
@@ -412,6 +438,11 @@ pub fn IncrementalGraph(comptime side: bake.Side) type {
|
||||
}
|
||||
source_maps += file.source_map.memoryCost();
|
||||
}
|
||||
} else if (side == .server) {
|
||||
graph += DevServer.memoryCostArrayList(g.current_chunk_source_maps);
|
||||
for (g.current_chunk_source_maps.items) |item| {
|
||||
source_maps += item.source_map.memoryCost();
|
||||
}
|
||||
}
|
||||
return .{
|
||||
.graph = graph,
|
||||
@@ -445,7 +476,7 @@ pub fn IncrementalGraph(comptime side: bake.Side) type {
|
||||
g: *Self,
|
||||
ctx: *HotUpdateContext,
|
||||
index: bun.ast.Index,
|
||||
content_: union(enum) {
|
||||
_content: union(enum) {
|
||||
js: struct {
|
||||
code: JsCode,
|
||||
source_map: ?struct {
|
||||
@@ -457,13 +488,16 @@ pub fn IncrementalGraph(comptime side: bake.Side) type {
|
||||
},
|
||||
is_ssr_graph: bool,
|
||||
) !void {
|
||||
var content = content_;
|
||||
var content = _content;
|
||||
const dev = g.owner();
|
||||
dev.graph_safety_lock.assertLocked();
|
||||
|
||||
const path = ctx.sources[index.get()].path;
|
||||
const key = path.keyForIncrementalGraph();
|
||||
|
||||
const log = bun.Output.scoped(.IncrementalGraphReceiveChunk, .visible);
|
||||
log("receiveChunk({s}, {s})", .{ @tagName(side), key });
|
||||
|
||||
if (Environment.allow_assert) {
|
||||
switch (content) {
|
||||
.css => {},
|
||||
@@ -546,7 +580,7 @@ pub fn IncrementalGraph(comptime side: bake.Side) type {
|
||||
bun.assert(html_route_bundle_index == null); // suspect behind #17956
|
||||
if (source_map.chunk.buffer.len() > 0) {
|
||||
break :blk .{ .some = PackedMap.newNonEmpty(
|
||||
source_map.chunk,
|
||||
&source_map.chunk,
|
||||
source_map.escaped_source.take().?,
|
||||
) };
|
||||
}
|
||||
@@ -632,11 +666,47 @@ pub fn IncrementalGraph(comptime side: bake.Side) type {
|
||||
}
|
||||
}
|
||||
if (content == .js) {
|
||||
try g.current_chunk_parts.append(dev.allocator(), content.js.code);
|
||||
try g.current_chunk_parts.append(
|
||||
dev.allocator(),
|
||||
bun.ptr.OwnedIn([]const u8, bun.bake.DevServer.DevAllocator).fromRawIn(
|
||||
content.js.code,
|
||||
dev.dev_allocator(),
|
||||
),
|
||||
);
|
||||
g.current_chunk_len += content.js.code.len;
|
||||
if (content.js.source_map) |*source_map| {
|
||||
source_map.chunk.buffer.deinit();
|
||||
source_map.escaped_source.deinit();
|
||||
|
||||
// TODO: we probably want to store SSR chunks but not
|
||||
// server chunks, but not 100% sure
|
||||
const should_immediately_free_sourcemap = false;
|
||||
if (should_immediately_free_sourcemap) {
|
||||
@compileError("Not implemented the codepath to free the sourcemap");
|
||||
} else {
|
||||
if (content.js.source_map) |*source_map| append_empty: {
|
||||
defer source_map.chunk.deinit();
|
||||
defer source_map.escaped_source.deinit();
|
||||
if (source_map.chunk.buffer.len() > 0) {
|
||||
const escaped_source = source_map.escaped_source.take() orelse break :append_empty;
|
||||
const packed_map: PackedMap.Shared = .{ .some = PackedMap.newNonEmpty(
|
||||
&source_map.chunk,
|
||||
escaped_source,
|
||||
) };
|
||||
try g.current_chunk_source_maps.append(dev.allocator(), CurrentChunkSourceMapData{
|
||||
.source_map = packed_map,
|
||||
.file_index = file_index,
|
||||
});
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Must precompute this. Otherwise, source maps won't have
|
||||
// the info needed to concatenate VLQ mappings.
|
||||
const count: u32 = @intCast(bun.strings.countChar(content.js.code, '\n'));
|
||||
try g.current_chunk_source_maps.append(dev.allocator(), .{
|
||||
.file_index = file_index,
|
||||
.source_map = PackedMap.Shared{
|
||||
.line_count = .init(count),
|
||||
},
|
||||
});
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -808,7 +878,7 @@ pub fn IncrementalGraph(comptime side: bake.Side) type {
|
||||
bun.assert(bundler_index.isValid());
|
||||
bun.assert(ctx.loaders[bundler_index.get()].isCSS());
|
||||
|
||||
var sfb = std.heap.stackFallback(@sizeOf(bun.ast.Index) * 64, temp_alloc);
|
||||
var sfb = bun.allocators.stackFallback(@sizeOf(bun.ast.Index) * 64, temp_alloc);
|
||||
const queue_alloc = sfb.get();
|
||||
|
||||
// This queue avoids stack overflow.
|
||||
@@ -1598,10 +1668,17 @@ pub fn IncrementalGraph(comptime side: bake.Side) type {
|
||||
pub fn reset(g: *Self) void {
|
||||
g.owner().graph_safety_lock.assertLocked();
|
||||
g.current_chunk_len = 0;
|
||||
g.current_chunk_parts.clearRetainingCapacity();
|
||||
|
||||
if (comptime side == .client) {
|
||||
g.current_css_files.clearRetainingCapacity();
|
||||
} else if (comptime side == .server) {
|
||||
for (g.current_chunk_parts.items) |*part| part.deinit();
|
||||
|
||||
for (g.current_chunk_source_maps.items) |*sourcemap| sourcemap.deinit();
|
||||
g.current_chunk_source_maps.clearRetainingCapacity();
|
||||
}
|
||||
|
||||
g.current_chunk_parts.clearRetainingCapacity();
|
||||
}
|
||||
|
||||
const TakeJSBundleOptions = switch (side) {
|
||||
@@ -1614,6 +1691,7 @@ pub fn IncrementalGraph(comptime side: bake.Side) type {
|
||||
},
|
||||
.server => struct {
|
||||
kind: ChunkKind,
|
||||
script_id: SourceMapStore.Key,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -1650,7 +1728,7 @@ pub fn IncrementalGraph(comptime side: bake.Side) type {
|
||||
// to inform the HMR runtime some crucial entry-point info. The
|
||||
// exact upper bound of this can be calculated, but is not to
|
||||
// avoid worrying about windows paths.
|
||||
var end_sfa = std.heap.stackFallback(65536, g.allocator());
|
||||
var end_sfa = bun.allocators.stackFallback(65536, g.allocator());
|
||||
var end_list = std.ArrayList(u8).initCapacity(end_sfa.get(), 65536) catch unreachable;
|
||||
defer end_list.deinit();
|
||||
const end = end: {
|
||||
@@ -1729,7 +1807,7 @@ pub fn IncrementalGraph(comptime side: bake.Side) type {
|
||||
// entry is an index into files
|
||||
.client => files[entry.get()].unpack().jsCode().?,
|
||||
// entry is the '[]const u8' itself
|
||||
.server => entry,
|
||||
.server => entry.get(),
|
||||
});
|
||||
}
|
||||
list.appendSliceAssumeCapacity(end);
|
||||
@@ -1756,46 +1834,71 @@ pub fn IncrementalGraph(comptime side: bake.Side) type {
|
||||
};
|
||||
|
||||
/// Uses `arena` as a temporary allocator, fills in all fields of `out` except ref_count
|
||||
pub fn takeSourceMap(g: *Self, arena: std.mem.Allocator, gpa: Allocator, out: *SourceMapStore.Entry) bun.OOM!void {
|
||||
if (comptime side == .server) @compileError("not implemented");
|
||||
|
||||
pub fn takeSourceMap(g: *@This(), _: std.mem.Allocator, gpa: Allocator, out: *SourceMapStore.Entry) bun.OOM!void {
|
||||
const paths = g.bundled_files.keys();
|
||||
const files = g.bundled_files.values();
|
||||
|
||||
// This buffer is temporary, holding the quoted source paths, joined with commas.
|
||||
var source_map_strings = std.ArrayList(u8).init(arena);
|
||||
defer source_map_strings.deinit();
|
||||
switch (side) {
|
||||
.client => {
|
||||
const files = g.bundled_files.values();
|
||||
|
||||
const buf = bun.path_buffer_pool.get();
|
||||
defer bun.path_buffer_pool.put(buf);
|
||||
const buf = bun.path_buffer_pool.get();
|
||||
defer bun.path_buffer_pool.put(buf);
|
||||
|
||||
var file_paths = try ArrayListUnmanaged([]const u8).initCapacity(gpa, g.current_chunk_parts.items.len);
|
||||
errdefer file_paths.deinit(gpa);
|
||||
var contained_maps: bun.MultiArrayList(PackedMap.Shared) = .empty;
|
||||
try contained_maps.ensureTotalCapacity(gpa, g.current_chunk_parts.items.len);
|
||||
errdefer contained_maps.deinit(gpa);
|
||||
var file_paths = try ArrayListUnmanaged([]const u8).initCapacity(gpa, g.current_chunk_parts.items.len);
|
||||
errdefer file_paths.deinit(gpa);
|
||||
var contained_maps: bun.MultiArrayList(PackedMap.Shared) = .empty;
|
||||
try contained_maps.ensureTotalCapacity(gpa, g.current_chunk_parts.items.len);
|
||||
errdefer contained_maps.deinit(gpa);
|
||||
|
||||
var overlapping_memory_cost: usize = 0;
|
||||
var overlapping_memory_cost: usize = 0;
|
||||
|
||||
for (g.current_chunk_parts.items) |file_index| {
|
||||
file_paths.appendAssumeCapacity(paths[file_index.get()]);
|
||||
const source_map = files[file_index.get()].unpack().source_map.clone();
|
||||
if (source_map.get()) |map| {
|
||||
overlapping_memory_cost += map.memoryCost();
|
||||
}
|
||||
contained_maps.appendAssumeCapacity(source_map);
|
||||
for (g.current_chunk_parts.items) |file_index| {
|
||||
file_paths.appendAssumeCapacity(paths[file_index.get()]);
|
||||
const source_map = files[file_index.get()].unpack().source_map.clone();
|
||||
if (source_map.get()) |map| {
|
||||
overlapping_memory_cost += map.memoryCost();
|
||||
}
|
||||
contained_maps.appendAssumeCapacity(source_map);
|
||||
}
|
||||
|
||||
overlapping_memory_cost += contained_maps.memoryCost() + DevServer.memoryCostSlice(file_paths.items);
|
||||
|
||||
const ref_count = out.ref_count;
|
||||
out.* = .{
|
||||
.dev_allocator = g.dev_allocator(),
|
||||
.ref_count = ref_count,
|
||||
.paths = file_paths.items,
|
||||
.files = contained_maps,
|
||||
.overlapping_memory_cost = @intCast(overlapping_memory_cost),
|
||||
};
|
||||
},
|
||||
.server => {
|
||||
var file_paths = try ArrayListUnmanaged([]const u8).initCapacity(gpa, g.current_chunk_parts.items.len);
|
||||
errdefer file_paths.deinit(gpa);
|
||||
var contained_maps: bun.MultiArrayList(PackedMap.Shared) = .empty;
|
||||
try contained_maps.ensureTotalCapacity(gpa, g.current_chunk_parts.items.len);
|
||||
errdefer contained_maps.deinit(gpa);
|
||||
|
||||
var overlapping_memory_cost: u32 = 0;
|
||||
|
||||
// For server, we use the tracked file indices to get the correct paths
|
||||
for (g.current_chunk_source_maps.items) |item| {
|
||||
file_paths.appendAssumeCapacity(paths[item.file_index.get()]);
|
||||
contained_maps.appendAssumeCapacity(item.source_map.clone());
|
||||
overlapping_memory_cost += @intCast(item.source_map.memoryCost());
|
||||
}
|
||||
|
||||
overlapping_memory_cost += @intCast(contained_maps.memoryCost() + DevServer.memoryCostSlice(file_paths.items));
|
||||
|
||||
out.* = .{
|
||||
.dev_allocator = g.dev_allocator(),
|
||||
.ref_count = out.ref_count,
|
||||
.paths = file_paths.items,
|
||||
.files = contained_maps,
|
||||
.overlapping_memory_cost = overlapping_memory_cost,
|
||||
};
|
||||
},
|
||||
}
|
||||
|
||||
overlapping_memory_cost += contained_maps.memoryCost() + DevServer.memoryCostSlice(file_paths.items);
|
||||
|
||||
const ref_count = out.ref_count;
|
||||
out.* = .{
|
||||
.dev_allocator = g.dev_allocator(),
|
||||
.ref_count = ref_count,
|
||||
.paths = file_paths.items,
|
||||
.files = contained_maps,
|
||||
.overlapping_memory_cost = @intCast(overlapping_memory_cost),
|
||||
};
|
||||
}
|
||||
|
||||
fn disconnectAndDeleteFile(g: *Self, file_index: FileIndex) void {
|
||||
|
||||
@@ -19,8 +19,8 @@ end_state: struct {
|
||||
original_column: i32,
|
||||
},
|
||||
|
||||
pub fn newNonEmpty(chunk: SourceMap.Chunk, escaped_source: Owned([]u8)) bun.ptr.Shared(*Self) {
|
||||
var buffer = chunk.buffer;
|
||||
pub fn newNonEmpty(chunk: *SourceMap.Chunk, escaped_source: Owned([]u8)) bun.ptr.Shared(*Self) {
|
||||
var buffer = &chunk.buffer;
|
||||
assert(!buffer.isEmpty());
|
||||
const dev_allocator = DevAllocator.downcast(buffer.allocator);
|
||||
return .new(.{
|
||||
|
||||
@@ -72,6 +72,8 @@ pub const State = enum {
|
||||
unqueued,
|
||||
/// A bundle associated with this route is happening
|
||||
bundling,
|
||||
/// A bundle associated with this route *will happen in the next bundle*
|
||||
deferred_to_next_bundle,
|
||||
/// This route was flagged for bundling failures. There are edge cases
|
||||
/// where a route can be disconnected from its failures, so the route
|
||||
/// imports has to be traced to discover if possible failures still
|
||||
|
||||
@@ -110,7 +110,7 @@ pub fn initFromJs(dev: *DevServer, owner: Owner, value: JSValue) !SerializedFail
|
||||
@panic("TODO");
|
||||
}
|
||||
// Avoid small re-allocations without requesting so much from the heap
|
||||
var sfb = std.heap.stackFallback(65536, dev.allocator());
|
||||
var sfb = bun.allocators.stackFallback(65536, dev.allocator());
|
||||
var payload = std.ArrayList(u8).initCapacity(sfb.get(), 65536) catch
|
||||
unreachable; // enough space
|
||||
const w = payload.writer();
|
||||
@@ -137,7 +137,7 @@ pub fn initFromLog(
|
||||
assert(messages.len > 0);
|
||||
|
||||
// Avoid small re-allocations without requesting so much from the heap
|
||||
var sfb = std.heap.stackFallback(65536, dev.allocator());
|
||||
var sfb = bun.allocators.stackFallback(65536, dev.allocator());
|
||||
var payload = std.ArrayList(u8).initCapacity(sfb.get(), 65536) catch
|
||||
unreachable; // enough space
|
||||
const w = payload.writer();
|
||||
|
||||
@@ -76,11 +76,11 @@ pub const Entry = struct {
|
||||
pub fn renderMappings(map: Entry, kind: ChunkKind, arena: Allocator, gpa: Allocator) ![]u8 {
|
||||
var j: StringJoiner = .{ .allocator = arena };
|
||||
j.pushStatic("AAAA");
|
||||
try joinVLQ(&map, kind, &j, arena);
|
||||
try joinVLQ(&map, kind, &j, arena, .client);
|
||||
return j.done(gpa);
|
||||
}
|
||||
|
||||
pub fn renderJSON(map: *const Entry, dev: *DevServer, arena: Allocator, kind: ChunkKind, gpa: Allocator) ![]u8 {
|
||||
pub fn renderJSON(map: *const Entry, dev: *DevServer, arena: Allocator, kind: ChunkKind, gpa: Allocator, side: bake.Side) ![]u8 {
|
||||
const map_files = map.files.slice();
|
||||
const paths = map.paths;
|
||||
|
||||
@@ -106,13 +106,22 @@ pub const Entry = struct {
|
||||
|
||||
if (std.fs.path.isAbsolute(path)) {
|
||||
const is_windows_drive_path = Environment.isWindows and path[0] != '/';
|
||||
try source_map_strings.appendSlice(if (is_windows_drive_path)
|
||||
"\"file:///"
|
||||
else
|
||||
"\"file://");
|
||||
|
||||
// On the client we prefix the sourcemap path with "file://" and
|
||||
// percent encode it
|
||||
if (side == .client) {
|
||||
try source_map_strings.appendSlice(if (is_windows_drive_path)
|
||||
"\"file:///"
|
||||
else
|
||||
"\"file://");
|
||||
} else {
|
||||
try source_map_strings.append('"');
|
||||
}
|
||||
|
||||
if (Environment.isWindows and !is_windows_drive_path) {
|
||||
// UNC namespace -> file://server/share/path.ext
|
||||
bun.strings.percentEncodeWrite(
|
||||
encodeSourceMapPath(
|
||||
side,
|
||||
if (path.len > 2 and path[0] == '/' and path[1] == '/')
|
||||
path[2..]
|
||||
else
|
||||
@@ -127,7 +136,7 @@ pub const Entry = struct {
|
||||
// -> file:///path/to/file.js
|
||||
// windows drive letter paths have the extra slash added
|
||||
// -> file:///C:/path/to/file.js
|
||||
bun.strings.percentEncodeWrite(path, &source_map_strings) catch |err| switch (err) {
|
||||
encodeSourceMapPath(side, path, &source_map_strings) catch |err| switch (err) {
|
||||
error.IncompleteUTF8 => @panic("Unexpected: asset with incomplete UTF-8 as file path"),
|
||||
error.OutOfMemory => |e| return e,
|
||||
};
|
||||
@@ -175,14 +184,14 @@ pub const Entry = struct {
|
||||
j.pushStatic(
|
||||
\\],"names":[],"mappings":"AAAA
|
||||
);
|
||||
try joinVLQ(map, kind, &j, arena);
|
||||
try joinVLQ(map, kind, &j, arena, side);
|
||||
|
||||
const json_bytes = try j.doneWithEnd(gpa, "\"}");
|
||||
errdefer @compileError("last try should be the final alloc");
|
||||
|
||||
if (bun.FeatureFlags.bake_debugging_features) if (dev.dump_dir) |dump_dir| {
|
||||
const rel_path_escaped = "latest_chunk.js.map";
|
||||
dumpBundle(dump_dir, .client, rel_path_escaped, json_bytes, false) catch |err| {
|
||||
const rel_path_escaped = if (side == .client) "latest_chunk.js.map" else "latest_hmr.js.map";
|
||||
dumpBundle(dump_dir, if (side == .client) .client else .server, rel_path_escaped, json_bytes, false) catch |err| {
|
||||
bun.handleErrorReturnTrace(err, @errorReturnTrace());
|
||||
Output.warn("Could not dump bundle: {}", .{err});
|
||||
};
|
||||
@@ -191,7 +200,22 @@ pub const Entry = struct {
|
||||
return json_bytes;
|
||||
}
|
||||
|
||||
fn joinVLQ(map: *const Entry, kind: ChunkKind, j: *StringJoiner, arena: Allocator) !void {
|
||||
fn encodeSourceMapPath(
|
||||
side: bake.Side,
|
||||
utf8_input: []const u8,
|
||||
array_list: *std.ArrayList(u8),
|
||||
) error{ OutOfMemory, IncompleteUTF8 }!void {
|
||||
// On the client, percent encode everything so it works in the browser
|
||||
if (side == .client) {
|
||||
return bun.strings.percentEncodeWrite(utf8_input, array_list);
|
||||
}
|
||||
|
||||
const writer = array_list.writer();
|
||||
try bun.js_printer.writePreQuotedString(utf8_input, @TypeOf(writer), writer, '"', false, true, .utf8);
|
||||
}
|
||||
|
||||
fn joinVLQ(map: *const Entry, kind: ChunkKind, j: *StringJoiner, arena: Allocator, side: bake.Side) !void {
|
||||
_ = side;
|
||||
const map_files = map.files.slice();
|
||||
|
||||
const runtime: bake.HmrRuntime = switch (kind) {
|
||||
|
||||
17
src/bake/DevServerSourceProvider.cpp
Normal file
17
src/bake/DevServerSourceProvider.cpp
Normal file
@@ -0,0 +1,17 @@
|
||||
#include "DevServerSourceProvider.h"
|
||||
#include "BunBuiltinNames.h"
|
||||
#include "BunString.h"
|
||||
|
||||
// The Zig implementation will be provided to handle registration
|
||||
extern "C" void Bun__addDevServerSourceProvider(void* bun_vm, Bake::DevServerSourceProvider* opaque_source_provider, BunString* specifier);
|
||||
|
||||
// Export functions for Zig to access DevServerSourceProvider
|
||||
extern "C" BunString DevServerSourceProvider__getSourceSlice(Bake::DevServerSourceProvider* provider)
|
||||
{
|
||||
return Bun::toStringView(provider->source());
|
||||
}
|
||||
|
||||
extern "C" MiCString DevServerSourceProvider__getSourceMapJSON(Bake::DevServerSourceProvider* provider)
|
||||
{
|
||||
return provider->sourceMapJSON();
|
||||
}
|
||||
74
src/bake/DevServerSourceProvider.h
Normal file
74
src/bake/DevServerSourceProvider.h
Normal file
@@ -0,0 +1,74 @@
|
||||
#pragma once
|
||||
#include "root.h"
|
||||
#include "headers-handwritten.h"
|
||||
#include "JavaScriptCore/SourceOrigin.h"
|
||||
#include "ZigGlobalObject.h"
|
||||
#include "MiString.h"
|
||||
|
||||
namespace Bake {
|
||||
|
||||
class DevServerSourceProvider;
|
||||
|
||||
// Function to be implemented in Zig to register the source provider
|
||||
extern "C" void Bun__addDevServerSourceProvider(void* bun_vm, DevServerSourceProvider* opaque_source_provider, BunString* specifier);
|
||||
extern "C" void Bun__removeDevServerSourceProvider(void* bun_vm, DevServerSourceProvider* opaque_source_provider, BunString* specifier);
|
||||
|
||||
class DevServerSourceProvider final : public JSC::StringSourceProvider {
|
||||
public:
|
||||
static Ref<DevServerSourceProvider> create(
|
||||
JSC::JSGlobalObject* globalObject,
|
||||
const String& source,
|
||||
const char* sourceMapJSONPtr,
|
||||
size_t sourceMapJSONLength,
|
||||
const JSC::SourceOrigin& sourceOrigin,
|
||||
String&& sourceURL,
|
||||
const TextPosition& startPosition,
|
||||
JSC::SourceProviderSourceType sourceType)
|
||||
{
|
||||
auto provider = adoptRef(*new DevServerSourceProvider(source, sourceMapJSONPtr, sourceMapJSONLength, sourceOrigin, WTFMove(sourceURL), startPosition, sourceType));
|
||||
auto* zigGlobalObject = jsCast<::Zig::GlobalObject*>(globalObject);
|
||||
auto specifier = Bun::toString(provider->sourceURL());
|
||||
provider->m_globalObject = zigGlobalObject;
|
||||
provider->m_specifier = specifier;
|
||||
Bun__addDevServerSourceProvider(zigGlobalObject->bunVM(), provider.ptr(), &specifier);
|
||||
return provider;
|
||||
}
|
||||
|
||||
MiCString sourceMapJSON() const
|
||||
{
|
||||
return m_sourceMapJSON.asCString();
|
||||
}
|
||||
|
||||
private:
|
||||
DevServerSourceProvider(
|
||||
const String& source,
|
||||
const char* sourceMapJSONPtr,
|
||||
size_t sourceMapJSONLength,
|
||||
const JSC::SourceOrigin& sourceOrigin,
|
||||
String&& sourceURL,
|
||||
const TextPosition& startPosition,
|
||||
JSC::SourceProviderSourceType sourceType)
|
||||
: StringSourceProvider(
|
||||
source,
|
||||
sourceOrigin,
|
||||
JSC::SourceTaintedOrigin::Untainted,
|
||||
WTFMove(sourceURL),
|
||||
startPosition,
|
||||
sourceType)
|
||||
, m_sourceMapJSON(sourceMapJSONPtr, sourceMapJSONLength)
|
||||
{
|
||||
}
|
||||
|
||||
~DevServerSourceProvider()
|
||||
{
|
||||
if (m_globalObject) {
|
||||
Bun__removeDevServerSourceProvider(m_globalObject->bunVM(), this, &m_specifier);
|
||||
}
|
||||
}
|
||||
|
||||
MiString m_sourceMapJSON;
|
||||
Zig::GlobalObject* m_globalObject;
|
||||
BunString m_specifier;
|
||||
};
|
||||
|
||||
} // namespace Bake
|
||||
@@ -473,7 +473,7 @@ pub const Style = union(enum) {
|
||||
pub fn fromJS(value: JSValue, global: *jsc.JSGlobalObject) !Style {
|
||||
if (value.isString()) {
|
||||
const bun_string = try value.toBunString(global);
|
||||
var sfa = std.heap.stackFallback(4096, bun.default_allocator);
|
||||
var sfa = bun.allocators.stackFallback(4096, bun.default_allocator);
|
||||
const utf8 = bun_string.toUTF8(sfa.get());
|
||||
defer utf8.deinit();
|
||||
if (map.get(utf8.slice())) |style| {
|
||||
@@ -822,6 +822,28 @@ pub const MatchedParams = struct {
|
||||
key: []const u8,
|
||||
value: []const u8,
|
||||
};
|
||||
|
||||
/// Convert the matched params to a JavaScript object
|
||||
/// Returns null if there are no params
|
||||
pub fn toJS(self: *const MatchedParams, global: *jsc.JSGlobalObject) JSValue {
|
||||
const params_array = self.params.slice();
|
||||
|
||||
if (params_array.len == 0) {
|
||||
return JSValue.null;
|
||||
}
|
||||
|
||||
// Create a JavaScript object with params
|
||||
const obj = JSValue.createEmptyObject(global, params_array.len);
|
||||
for (params_array) |param| {
|
||||
const key_str = bun.String.cloneUTF8(param.key);
|
||||
defer key_str.deref();
|
||||
const value_str = bun.String.cloneUTF8(param.value);
|
||||
defer value_str.deref();
|
||||
|
||||
_ = obj.putBunStringOneOrArray(global, &key_str, value_str.toJS(global)) catch unreachable;
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
};
|
||||
|
||||
/// Fast enough for development to be seamless, but avoids building a
|
||||
@@ -1219,7 +1241,7 @@ pub const JSFrameworkRouter = struct {
|
||||
|
||||
var params_out: MatchedParams = undefined;
|
||||
if (jsfr.router.matchSlow(path_slice.slice(), ¶ms_out)) |index| {
|
||||
var sfb = std.heap.stackFallback(4096, bun.default_allocator);
|
||||
var sfb = bun.allocators.stackFallback(4096, bun.default_allocator);
|
||||
const alloc = sfb.get();
|
||||
|
||||
return (try jsc.JSObject.create(.{
|
||||
@@ -1242,7 +1264,7 @@ pub const JSFrameworkRouter = struct {
|
||||
pub fn toJSON(jsfr: *JSFrameworkRouter, global: *JSGlobalObject, callframe: *jsc.CallFrame) bun.JSError!JSValue {
|
||||
_ = callframe;
|
||||
|
||||
var sfb = std.heap.stackFallback(4096, bun.default_allocator);
|
||||
var sfb = bun.allocators.stackFallback(4096, bun.default_allocator);
|
||||
const alloc = sfb.get();
|
||||
|
||||
return jsfr.routeToJson(global, Route.Index.init(0), alloc);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user