Compare commits

..

105 Commits

Author SHA1 Message Date
dave caruso
0e665b7dae yaeh 2024-07-31 22:00:18 -07:00
dave caruso
9f63d55efc aa 2024-07-31 20:39:53 -07:00
dave caruso
c2d1389dbc wa 2024-07-31 19:56:56 -07:00
dave caruso
87067e528d Merge remote-tracking branch 'origin/main' into dave/node_fallbacks 2024-07-31 15:04:55 -07:00
dave caruso
cf9eff9d5d a 2024-07-31 15:04:53 -07:00
Ciro Spaciari
bec04c7341 change Body.Value.Error to not always allocate JSValues (#12955)
Co-authored-by: Jarred Sumner <jarred@jarredsumner.com>
2024-07-31 02:26:14 -07:00
Jarred Sumner
a44b7e41d2 Pass --force to git submodule update in CI 2024-07-30 23:03:35 -07:00
Jarred Sumner
de5e56336c Use example.com as the test domain in a test 2024-07-30 22:44:47 -07:00
Ciro Spaciari
1c648063fa fix(tls/socket/fetch) shutdown fix + ref counted context (#12925)
Co-authored-by: Jarred Sumner <jarred@jarredsumner.com>
2024-07-30 22:41:54 -07:00
Jarred Sumner
1c3354bc95 Deflake setInterval test 2024-07-30 22:33:04 -07:00
dave caruso
d5d4f53e82 fix(bundler): put unwrapped cjs imports at top level for minifyrenamer (#12951) 2024-07-30 21:30:09 -07:00
Pipythonmc
7ab4dc738f doc: fix incorrect documentation relating to the --define flag (#12952) 2024-07-30 19:17:32 -07:00
dave caruso
ebc7045ca4 fix crash handler test failures (#12932) 2024-07-30 16:52:59 -07:00
Ashcon Partovi
848ad19d9e Attempt to fix flaky Windows builds 2024-07-30 10:55:13 -07:00
Ashcon Partovi
1da3436266 Attempt to fix flaky build-deps on Windows 2024-07-30 10:48:24 -07:00
Ashcon Partovi
49e496399a Attempt to fix missing GitHub assets on release 2024-07-30 10:44:44 -07:00
Ashcon Partovi
9b8340a5b3 Attempt to fix 'spawn error' on Windows tests 2024-07-29 19:16:29 -07:00
Meghan Denny
8efcc61a7b windows: cleanup logging of NODE_CHANNEL_FD (#12930) 2024-07-29 19:16:02 -07:00
Meghan Denny
4d6480050c NodeError: add more and use them in child_process and dgram (#12929) 2024-07-29 19:15:23 -07:00
Meghan Denny
fc2c134bc6 test: rewrite "should call close" to use promise instead of done (#12931) 2024-07-29 19:05:44 -07:00
m1212e
4c4db1da37 docs: Add hint for memory timeline in debugger (#12923) 2024-07-29 19:05:13 -07:00
dave caruso
77e14c8482 fix template addition folding 12904 (#12928) 2024-07-29 19:04:59 -07:00
Ashcon Partovi
fba5d65003 Make the release script faster 2024-07-29 17:38:06 -07:00
Jarred Sumner
c181cf45a7 Fixes #12910 (#12911) 2024-07-29 17:19:47 -07:00
Ashcon Partovi
5aeb4d9f79 Fix missing assert in release script 2024-07-29 16:57:22 -07:00
Dariush Alipour
1d9a8b4134 fix: child_process test with specified shell for windows (#12926) 2024-07-29 16:36:46 -07:00
Ashcon Partovi
30881444df Fix flaky C++ build with missing submodules 2024-07-29 16:34:01 -07:00
Ashcon Partovi
a2b4e3d4c2 Fix syntax in env.ps1 2024-07-29 16:25:50 -07:00
Ashcon Partovi
e5662caa33 Fix release script, again 2024-07-29 16:21:55 -07:00
Ashcon Partovi
1f1ea7bf24 Fix release script 2024-07-29 15:29:46 -07:00
Ashcon Partovi
175746e569 Only upload canary artifacts when the build is canary 2024-07-29 14:54:29 -07:00
Ashcon Partovi
005dd776b6 Allow creating release builds with 'RELEASE=1' 2024-07-29 14:50:26 -07:00
Ashcon Partovi
81dec2657f Enable buildkite (#12653) 2024-07-29 14:39:50 -07:00
Jarred Sumner
dbd320ccfa Remove some dynamic memory allocations in uWebSockets (#12897) 2024-07-29 15:10:55 -03:00
Jarred Sumner
8f8d3968a3 Enable concurrent transpiler on Windows (#12915) 2024-07-29 06:25:38 -07:00
Jarred Sumner
0bbdd880e6 Fix various Windows build issues 2024-07-29 04:31:39 -07:00
Jarred Sumner
51257d5668 Add BUN_FEATURE_FLAG_DISABLE_ASYNC_TRANSPILER feature flag 2024-07-29 01:37:59 -07:00
Jarred Sumner
a2ae28d158 Add named allocator 2024-07-28 21:27:08 -07:00
Jarred Sumner
f04991f6bb Fix debug build issue 2024-07-28 19:38:03 -07:00
Andrew Johnston
80e651aca3 fix(build): use specific version of lld for link on unix (#12907) 2024-07-28 18:38:01 -07:00
Jarred Sumner
a5ba02804f Use typed allocators in more places (#12899) 2024-07-28 18:37:35 -07:00
Jarred Sumner
4199fd4515 Fix memory leak in RuntimeTranspilerStore (#12900) 2024-07-28 08:30:32 -07:00
Dylan Conway
848327d333 textencoder: remove DOMJIT (#12868) 2024-07-28 07:46:53 -07:00
Jarred Sumner
bfb72f84c4 In debug builds on macOS, add malloc_zone_check when GC runs 2024-07-28 05:13:50 -07:00
Jarred Sumner
e4022ec3c7 Bump versions of things 2024-07-27 02:02:48 -07:00
Jarred Sumner
a7f34c15fc Slightly better error.stack (#12861) 2024-07-27 01:02:46 -07:00
Meghan Denny
a0ebb051b0 implement node:util.getSystemErrorName() (#12837) 2024-07-27 00:20:50 -07:00
dave caruso
70ca2b76c3 fix: check if we are crashing before exiting gracefully (#12865) 2024-07-26 20:00:02 -07:00
Jarred Sumner
e5ac4f94fa Handle errors in node:http better (#12641)
Co-authored-by: Jarred-Sumner <Jarred-Sumner@users.noreply.github.com>
2024-07-26 19:53:36 -07:00
dave caruso
d547d8a30e fix a bundler crash (#12864)
Co-authored-by: paperdave <paperdave@users.noreply.github.com>
2024-07-26 19:39:37 -07:00
Meghan Denny
32d9bb3ced ci: format: switch to mlugg/setup-zig (#12863) 2024-07-26 18:47:02 -07:00
dave caruso
75df73ef90 fix: make raiseIgnoringPanicHandler ignore the panic handler (#12578)
Co-authored-by: paperdave <paperdave@users.noreply.github.com>
Co-authored-by: Jarred Sumner <jarred@jarredsumner.com>
2024-07-26 18:36:53 -07:00
Dylan Conway
13907c4c29 fix(build): assertion failure when cross-compiling on windows (#12862)
Co-authored-by: dylan-conway <dylan-conway@users.noreply.github.com>
2024-07-26 17:29:01 -07:00
Jarred Sumner
87169b6bb3 Configure libcpp assert to avoid macOS 13.0 issue (#12860) 2024-07-26 16:03:16 -07:00
Jarred Sumner
244100c32f When crash reporter is disabled also disable resetSegfaultHanlder 2024-07-26 14:50:56 -07:00
Jarred Sumner
8a78b2241d Rename JSC.Node.StringOrBuffer -> StringOrBuffer 2024-07-26 14:50:30 -07:00
dave caruso
bf8b6922bb Fix memory leak when printing any error's source code. (#12831)
Co-authored-by: Jarred Sumner <jarred@jarredsumner.com>
2024-07-26 14:14:16 -07:00
Dylan Conway
7aa05ec542 bump webkit (#12858)
Co-authored-by: dylan-conway <dylan-conway@users.noreply.github.com>
2024-07-26 14:13:58 -07:00
Meghan Denny
f95ae9baee launch.json: remove BUN_DEBUG_ALL=1 from 'bun run' (#12845) 2024-07-26 04:14:45 -07:00
Meghan Denny
f7cb2da542 use .undefined literal instead of jsUndefined() call (#12834) 2024-07-26 04:03:55 -07:00
Meghan Denny
d966129992 bindings: better use of jsc api in Path_functionToNamespacedPath (#12836) 2024-07-26 04:02:31 -07:00
Meghan Denny
6cb5cd2a87 node:v8: expose DefaultDeserializer and DefaultSerializer exports (#12838) 2024-07-26 03:58:47 -07:00
Meghan Denny
080a2806af uws: tidy use of ssl intFromBool (#12839) 2024-07-26 03:58:01 -07:00
Meghan Denny
92c83fcd9e ipc: make IPCInstance.context void on windows instead of u0 (#12840) 2024-07-26 03:56:13 -07:00
Meghan Denny
277ed9d138 bindings: fix zig extern def of Bun__JSValue__deserialize (#12844) 2024-07-26 03:48:30 -07:00
Meghan Denny
879cb23163 cpp: missing uses of propertyNames (#12835) 2024-07-26 03:47:41 -07:00
Jarred Sumner
d321ee97c5 Move napi_new_instance to c++ (#12658)
Co-authored-by: Jarred-Sumner <Jarred-Sumner@users.noreply.github.com>
Co-authored-by: Dylan Conway <35280289+dylan-conway@users.noreply.github.com>
2024-07-25 18:51:01 -07:00
Jarred Sumner
3bfeb83e7e Fix [Symbol.dispose] on Bun.listen() & Bun.connect() + add types (#12739)
Co-authored-by: Dylan Conway <35280289+dylan-conway@users.noreply.github.com>
2024-07-25 18:49:35 -07:00
Meghan Denny
5a18b7d2fc fixes relationship between process.kill and process._kill (#12792)
Co-authored-by: Jarred Sumner <jarred@jarredsumner.com>
2024-07-25 18:46:30 -07:00
Gert Goet
e75ef69fb4 Fix spacing of patch-command help (#12769) 2024-07-25 18:44:52 -07:00
Dylan Conway
78021e34ae fix(bun:test): make sure test.each doesn't return .zero (#12828) 2024-07-25 18:24:16 -07:00
Dylan Conway
d7187592c0 fix(brotli): protect and unprotect buffer values (#12829) 2024-07-25 18:24:01 -07:00
Jarred Sumner
5f1b569c52 Fix crash when creating a new Worker with a numeric environment varia… (#12810) 2024-07-25 18:10:57 -07:00
dave caruso
e54fe5995b fix(bundler): dont tree-shake imported enum if inlined and used (#12826) 2024-07-25 17:29:20 -07:00
Jarred Sumner
a2f68989a0 Retry on 5xx errors from npm registry (#12825) 2024-07-25 17:28:59 -07:00
Jarred Sumner
4a1e01d076 Use bun.New more (#12811)
Co-authored-by: Dylan Conway <35280289+dylan-conway@users.noreply.github.com>
2024-07-25 15:35:02 -07:00
Jarred Sumner
dd8b0a5889 Clean up socket_async_http_abort_tracker after a lot of requests (#12816) 2024-07-25 15:34:48 -07:00
Jarred Sumner
8cadf66143 Add malloc zones to heapStats() on macOS in debug builds (#12815) 2024-07-25 15:33:43 -07:00
Jarred Sumner
77cd03dad1 Workaround for BUN-2WQ (#12806) 2024-07-25 15:33:17 -07:00
Meghan Denny
82b42ed851 node: more process.exitCode fixes (#12809) 2024-07-25 15:18:41 -07:00
Dylan Conway
2de82c0b3b fix regression test 2024-07-25 14:07:57 -07:00
Jarred Sumner
30df04cd35 Add a couple feature flags 2024-07-25 05:52:50 -07:00
Jarred Sumner
585c8299d8 Clean up some stack trace printing logic (#12791) 2024-07-25 04:04:02 -07:00
Dylan Conway
375d8da8e6 fix(brotli): protect buffer jsvalues (#12800)
Co-authored-by: Jarred Sumner <jarred@jarredsumner.com>
2024-07-25 04:01:53 -07:00
Jarred Sumner
0bd8db7162 Slightly reduce pointer lookups in hot code path (#12802) 2024-07-24 23:07:51 -07:00
Meghan Denny
d97260869d replace JSValue .callWithThis with always explicit .call (#12789) 2024-07-24 23:07:28 -07:00
Jarred Sumner
fdb58dc861 Fixes #9555 (#12801) 2024-07-24 23:07:04 -07:00
Meghan Denny
5f118704ec web-apis.md: make this list diff better; does not change presentation (#12795) 2024-07-24 22:27:32 -07:00
dave caruso
610c7f5e47 fix memory lifetime of define expressions (#12784) 2024-07-24 22:27:14 -07:00
Meghan Denny
1e0b20f514 node: fix observable value of process.exitCode (#12799) 2024-07-24 22:26:07 -07:00
Meghan Denny
f6c89f4c25 JSValue.toFmt doesn't need a globalThis param because ConsoleObject.Formatter already has one (#12790) 2024-07-24 22:03:51 -07:00
Meghan Denny
907cd8d45d fix crash in populateStackTrace() (#12793) 2024-07-24 21:08:25 -07:00
Dylan Conway
ac4523e903 fix(napi): unref threadsafe functions on finalize (#12788) 2024-07-24 18:57:01 -07:00
Jarred Sumner
24574dddb2 Ensure LLVM 18 with Homebrew on macOS 2024-07-24 15:56:05 -07:00
Dylan Conway
2da57f6d7b napi_threadsafe_function async tracker (#12780) 2024-07-24 15:27:51 -07:00
dave caruso
e2c3749965 fix(bundler): become smarter with __esm wrappers (#12729) 2024-07-24 02:00:20 -07:00
Jarred Sumner
57c6a7db35 libdeflate (#12741) 2024-07-24 01:30:31 -07:00
ippsav
c37891471a Fix alignment calculation in Zone.create function (#12748) 2024-07-24 01:30:11 -07:00
Jarred Sumner
8ba0791dc8 Use one JSC::SourceProvider instead of 322 (#12761) 2024-07-24 01:26:09 -07:00
dave caruso
f9371e59f2 fix(bundler): fix part liveness calculation (#12758) 2024-07-23 23:49:01 -07:00
Jarred Sumner
79ddf0e47a Fix assertion failure in bun build when entry point is a file loader (#12683)
Co-authored-by: dave caruso <me@paperdave.net>
2024-07-23 22:02:51 -07:00
David Stevens
177f3a8622 Fixes #12182 - update default port when server is created (#12201) 2024-07-23 11:07:38 -07:00
Ciro Spaciari
5a5f3d6b30 fix(http) timeout (#12728)
Co-authored-by: cirospaciari <cirospaciari@users.noreply.github.com>
Co-authored-by: Jarred Sumner <jarred@jarredsumner.com>
2024-07-23 01:13:43 -07:00
Ivan Baksheev
4e5d759c37 fix(bundler): ignore external rules for entrypoint (#12736) 2024-07-23 00:33:18 -07:00
Jarred Sumner
1a702dfdc7 Add canary to cache key 2024-07-22 22:16:27 -07:00
353 changed files with 13466 additions and 11732 deletions

View File

@@ -10,9 +10,10 @@ steps:
blocked_state: "running"
- label: ":pipeline:"
command: "buildkite-agent pipeline upload .buildkite/ci.yml"
agents:
queue: "build-linux"
queue: "build-darwin"
command:
- ".buildkite/scripts/prepare-build.sh"
- if: "build.branch == 'main' && !build.pull_request.repository.fork"
label: ":github:"

File diff suppressed because it is too large Load Diff

55
.buildkite/scripts/build-bun.sh Executable file
View File

@@ -0,0 +1,55 @@
#!/bin/bash
set -eo pipefail
source "$(dirname "$0")/env.sh"
function run_command() {
set -x
"$@"
{ set +x; } 2>/dev/null
}
cwd="$(pwd)"
mkdir -p build
source "$(dirname "$0")/download-artifact.sh" "build/bun-deps/**" --step "$BUILDKITE_GROUP_KEY-build-deps"
source "$(dirname "$0")/download-artifact.sh" "build/bun-zig.o" --step "$BUILDKITE_GROUP_KEY-build-zig"
source "$(dirname "$0")/download-artifact.sh" "build/bun-cpp-objects.a" --step "$BUILDKITE_GROUP_KEY-build-cpp" --split
cd build
run_command cmake .. "${CMAKE_FLAGS[@]}" \
-GNinja \
-DBUN_LINK_ONLY="1" \
-DNO_CONFIGURE_DEPENDS="1" \
-DBUN_ZIG_OBJ_DIR="$cwd/build" \
-DBUN_CPP_ARCHIVE="$cwd/build/bun-cpp-objects.a" \
-DBUN_DEPS_OUT_DIR="$cwd/build/bun-deps" \
-DCMAKE_BUILD_TYPE="$CMAKE_BUILD_TYPE" \
-DCPU_TARGET="$CPU_TARGET" \
-DUSE_LTO="$USE_LTO" \
-DUSE_DEBUG_JSC="$USE_DEBUG_JSC" \
-DCANARY="$CANARY" \
-DGIT_SHA="$GIT_SHA"
run_command ninja -v -j "$CPUS"
run_command ls
tag="bun-$BUILDKITE_GROUP_KEY"
if [ "$USE_LTO" == "OFF" ]; then
# Remove OS check when LTO is enabled on macOS again
if [[ "$tag" == *"darwin"* ]]; then
tag="$tag-nolto"
fi
fi
for name in bun bun-profile; do
dir="$tag"
if [ "$name" == "bun-profile" ]; then
dir="$tag-profile"
fi
run_command chmod +x "$name"
run_command "./$name" --revision
run_command mkdir -p "$dir"
run_command mv "$name" "$dir/$name"
run_command zip -r "$dir.zip" "$dir"
source "$cwd/.buildkite/scripts/upload-artifact.sh" "$dir.zip"
done

35
.buildkite/scripts/build-cpp.sh Executable file
View File

@@ -0,0 +1,35 @@
#!/bin/bash
set -eo pipefail
source "$(dirname "$0")/env.sh"
export FORCE_UPDATE_SUBMODULES=1
source "$(realpath $(dirname "$0")/../../scripts/update-submodules.sh)"
{ set +x; } 2>/dev/null
function run_command() {
set -x
"$@"
{ set +x; } 2>/dev/null
}
mkdir -p build
cd build
mkdir -p tmp_modules tmp_functions js codegen
run_command cmake .. "${CMAKE_FLAGS[@]}" \
-GNinja \
-DBUN_CPP_ONLY="1" \
-DNO_CONFIGURE_DEPENDS="1" \
-DCMAKE_BUILD_TYPE="$CMAKE_BUILD_TYPE" \
-DCPU_TARGET="$CPU_TARGET" \
-DUSE_LTO="$USE_LTO" \
-DUSE_DEBUG_JSC="$USE_DEBUG_JSC" \
-DCANARY="$CANARY" \
-DGIT_SHA="$GIT_SHA"
chmod +x compile-cpp-only.sh
source compile-cpp-only.sh -v -j "$CPUS"
{ set +x; } 2>/dev/null
cd ..
source "$(dirname "$0")/upload-artifact.sh" "build/bun-cpp-objects.a" --split

View File

View File

@@ -0,0 +1,22 @@
#!/bin/bash
set -eo pipefail
source "$(dirname "$0")/env.sh"
source "$(realpath $(dirname "$0")/../../scripts/all-dependencies.sh)"
artifacts=(
libcrypto.a libssl.a libdecrepit.a
libcares.a
libarchive.a
liblolhtml.a
libmimalloc.a libmimalloc.o
libtcc.a
libz.a
libzstd.a
libdeflate.a
liblshpack.a
)
for artifact in "${artifacts[@]}"; do
source "$(dirname "$0")/upload-artifact.sh" "build/bun-deps/$artifact"
done

View File

@@ -0,0 +1,40 @@
#!/bin/bash
set -eo pipefail
source "$(dirname "$0")/env.sh"
function assert_bun() {
if ! command -v bun &>/dev/null; then
echo "error: bun is not installed" 1>&2
exit 1
fi
}
function assert_make() {
if ! command -v make &>/dev/null; then
echo "error: make is not installed" 1>&2
exit 1
fi
}
function run_command() {
set -x
"$@"
{ set +x; } 2>/dev/null
}
function build_node_fallbacks() {
local cwd="src/node-fallbacks"
run_command bun install --cwd "$cwd" --frozen-lockfile
run_command bun run --cwd "$cwd" build
}
function build_old_js() {
run_command bun install --frozen-lockfile
run_command make runtime_js fallback_decoder bun_error
}
assert_bun
assert_make
build_node_fallbacks
build_old_js

80
.buildkite/scripts/build-zig.sh Executable file
View File

@@ -0,0 +1,80 @@
#!/bin/bash
set -eo pipefail
source "$(dirname "$0")/env.sh"
function assert_target() {
local arch="${2-$(uname -m)}"
case "$(echo "$arch" | tr '[:upper:]' '[:lower:]')" in
x64 | x86_64 | amd64)
export ZIG_ARCH="x86_64"
if [[ "$BUILDKITE_STEP_KEY" == *"baseline"* ]]; then
export ZIG_CPU_TARGET="nehalem"
else
export ZIG_CPU_TARGET="haswell"
fi
;;
aarch64 | arm64)
export ZIG_ARCH="aarch64"
export ZIG_CPU_TARGET="native"
;;
*)
echo "error: Unsupported architecture: $arch" 1>&2
exit 1
;;
esac
local os="${1-$(uname -s)}"
case "$(echo "$os" | tr '[:upper:]' '[:lower:]')" in
linux)
export ZIG_TARGET="$ZIG_ARCH-linux-gnu" ;;
darwin)
export ZIG_TARGET="$ZIG_ARCH-macos-none" ;;
windows)
export ZIG_TARGET="$ZIG_ARCH-windows-msvc" ;;
*)
echo "error: Unsupported operating system: $os" 1>&2
exit 1
;;
esac
}
function run_command() {
set -x
"$@"
{ set +x; } 2>/dev/null
}
assert_target "$@"
# Since the zig build depends on files from the zig submodule,
# make sure to update the submodule before building.
run_command git submodule update --init --recursive --progress --depth=1 --checkout src/deps/zig
# TODO: Move these to be part of the CMake build
source "$(dirname "$0")/build-old-js.sh"
cwd="$(pwd)"
mkdir -p build
cd build
run_command cmake .. "${CMAKE_FLAGS[@]}" \
-GNinja \
-DNO_CONFIGURE_DEPENDS="1" \
-DNO_CODEGEN="0" \
-DWEBKIT_DIR="omit" \
-DBUN_ZIG_OBJ_DIR="$cwd/build" \
-DZIG_LIB_DIR="$cwd/src/deps/zig/lib" \
-DCMAKE_BUILD_TYPE="$CMAKE_BUILD_TYPE" \
-DARCH="$ZIG_ARCH" \
-DCPU_TARGET="$ZIG_CPU_TARGET" \
-DZIG_TARGET="$ZIG_TARGET" \
-DUSE_LTO="$USE_LTO" \
-DUSE_DEBUG_JSC="$USE_DEBUG_JSC" \
-DCANARY="$CANARY" \
-DGIT_SHA="$GIT_SHA"
export ONLY_ZIG="1"
run_command ninja "$cwd/build/bun-zig.o" -v -j "$CPUS"
cd ..
source "$(dirname "$0")/upload-artifact.sh" "build/bun-zig.o"

View File

@@ -0,0 +1,47 @@
param (
[Parameter(Mandatory=$true)]
[string[]] $Paths,
[switch] $Split
)
$ErrorActionPreference = "Stop"
function Assert-Buildkite-Agent() {
if (-not (Get-Command "buildkite-agent" -ErrorAction SilentlyContinue)) {
Write-Error "Cannot find buildkite-agent, please install it: https://buildkite.com/docs/agent/v3/install"
exit 1
}
}
function Assert-Join-File() {
if (-not (Get-Command "Join-File" -ErrorAction SilentlyContinue)) {
Write-Error "Cannot find Join-File, please install it: https://www.powershellgallery.com/packages/FileSplitter/1.3"
exit 1
}
}
function Download-Buildkite-Artifact() {
param (
[Parameter(Mandatory=$true)]
[string] $Path,
)
if ($Split) {
& buildkite-agent artifact download "$Path.*" --debug --debug-http
Join-File -Path "$(Resolve-Path .)\$Path" -Verbose -DeletePartFiles
} else {
& buildkite-agent artifact download "$Path" --debug --debug-http
}
if (-not (Test-Path $Path)) {
Write-Error "Could not find artifact: $Path"
exit 1
}
}
Assert-Buildkite-Agent
if ($Split) {
Assert-Join-File
}
foreach ($Path in $Paths) {
Download-Buildkite-Artifact $Path
}

View File

@@ -0,0 +1,46 @@
#!/bin/bash
set -eo pipefail
function assert_buildkite_agent() {
if ! command -v buildkite-agent &> /dev/null; then
echo "error: Cannot find buildkite-agent, please install it:"
echo "https://buildkite.com/docs/agent/v3/install"
exit 1
fi
}
function download_buildkite_artifact() {
local path="$1"; shift
local split="0"
local args=()
while true; do
if [ -z "$1" ]; then
break
fi
case "$1" in
--split) split="1"; shift ;;
*) args+=("$1"); shift ;;
esac
done
if [ "$split" == "1" ]; then
run_command buildkite-agent artifact download "$path.*" . "${args[@]}"
run_command cat $path.?? > "$path"
run_command rm -f $path.??
else
run_command buildkite-agent artifact download "$path" . "${args[@]}"
fi
if [[ "$path" != *"*"* ]] && [ ! -f "$path" ]; then
echo "error: Could not find artifact: $path"
exit 1
fi
}
function run_command() {
set -x
"$@"
{ set +x; } 2>/dev/null
}
assert_buildkite_agent
download_buildkite_artifact "$@"

119
.buildkite/scripts/env.sh Executable file
View File

@@ -0,0 +1,119 @@
#!/bin/bash
set -eo pipefail
function assert_os() {
local os="$(uname -s)"
case "$os" in
Linux)
echo "linux" ;;
Darwin)
echo "darwin" ;;
*)
echo "error: Unsupported operating system: $os" 1>&2
exit 1
;;
esac
}
function assert_arch() {
local arch="$(uname -m)"
case "$arch" in
aarch64 | arm64)
echo "aarch64" ;;
x86_64 | amd64)
echo "x64" ;;
*)
echo "error: Unknown architecture: $arch" 1>&2
exit 1
;;
esac
}
function assert_build() {
if [ -z "$BUILDKITE_REPO" ]; then
echo "error: Cannot find repository for this build"
exit 1
fi
if [ -z "$BUILDKITE_COMMIT" ]; then
echo "error: Cannot find commit for this build"
exit 1
fi
if [ -z "$BUILDKITE_STEP_KEY" ]; then
echo "error: Cannot find step key for this build"
exit 1
fi
if [ -n "$BUILDKITE_GROUP_KEY" ] && [[ "$BUILDKITE_STEP_KEY" != "$BUILDKITE_GROUP_KEY"* ]]; then
echo "error: Build step '$BUILDKITE_STEP_KEY' does not start with group key '$BUILDKITE_GROUP_KEY'"
exit 1
fi
# Skip os and arch checks for Zig, since it's cross-compiled on macOS
if [[ "$BUILDKITE_STEP_KEY" != *"zig"* ]]; then
local os="$(assert_os)"
if [[ "$BUILDKITE_STEP_KEY" != *"$os"* ]]; then
echo "error: Build step '$BUILDKITE_STEP_KEY' does not match operating system '$os'"
exit 1
fi
local arch="$(assert_arch)"
if [[ "$BUILDKITE_STEP_KEY" != *"$arch"* ]]; then
echo "error: Build step '$BUILDKITE_STEP_KEY' does not match architecture '$arch'"
exit 1
fi
fi
}
function assert_buildkite_agent() {
if ! command -v buildkite-agent &> /dev/null; then
echo "error: Cannot find buildkite-agent, please install it:"
echo "https://buildkite.com/docs/agent/v3/install"
exit 1
fi
}
function export_environment() {
source "$(realpath $(dirname "$0")/../../scripts/env.sh)"
source "$(realpath $(dirname "$0")/../../scripts/update-submodules.sh)"
{ set +x; } 2>/dev/null
export GIT_SHA="$BUILDKITE_COMMIT"
export CCACHE_DIR="$HOME/.cache/ccache/$BUILDKITE_STEP_KEY"
export SCCACHE_DIR="$HOME/.cache/sccache/$BUILDKITE_STEP_KEY"
export ZIG_LOCAL_CACHE_DIR="$HOME/.cache/zig-cache/$BUILDKITE_STEP_KEY"
export BUN_DEPS_CACHE_DIR="$HOME/.cache/bun-deps/$BUILDKITE_STEP_KEY"
if [ "$(assert_arch)" == "aarch64" ]; then
export CPU_TARGET="native"
elif [[ "$BUILDKITE_STEP_KEY" == *"baseline"* ]]; then
export CPU_TARGET="nehalem"
else
export CPU_TARGET="haswell"
fi
if [[ "$BUILDKITE_STEP_KEY" == *"nolto"* ]]; then
export USE_LTO="OFF"
else
export USE_LTO="ON"
fi
if $(buildkite-agent meta-data exists release &> /dev/null); then
export CMAKE_BUILD_TYPE="$(buildkite-agent meta-data get release)"
else
export CMAKE_BUILD_TYPE="Release"
fi
if $(buildkite-agent meta-data exists canary &> /dev/null); then
export CANARY="$(buildkite-agent meta-data get canary)"
else
export CANARY="1"
fi
if $(buildkite-agent meta-data exists assertions &> /dev/null); then
export USE_DEBUG_JSC="$(buildkite-agent meta-data get assertions)"
else
export USE_DEBUG_JSC="OFF"
fi
if [ "$BUILDKITE_CLEAN_CHECKOUT" == "true" ]; then
rm -rf "$CCACHE_DIR"
rm -rf "$SCCACHE_DIR"
rm -rf "$ZIG_LOCAL_CACHE_DIR"
rm -rf "$BUN_DEPS_CACHE_DIR"
fi
}
assert_build
assert_buildkite_agent
export_environment

View File

@@ -0,0 +1,97 @@
#!/bin/bash
set -eo pipefail
function assert_build() {
if [ -z "$BUILDKITE_REPO" ]; then
echo "error: Cannot find repository for this build"
exit 1
fi
if [ -z "$BUILDKITE_COMMIT" ]; then
echo "error: Cannot find commit for this build"
exit 1
fi
}
function assert_buildkite_agent() {
if ! command -v buildkite-agent &> /dev/null; then
echo "error: Cannot find buildkite-agent, please install it:"
echo "https://buildkite.com/docs/agent/v3/install"
exit 1
fi
}
function assert_jq() {
assert_command "jq" "jq" "https://stedolan.github.io/jq/"
}
function assert_curl() {
assert_command "curl" "curl" "https://curl.se/download.html"
}
function assert_command() {
local command="$1"
local package="$2"
local help_url="$3"
if ! command -v "$command" &> /dev/null; then
echo "warning: $command is not installed, installing..."
if command -v brew &> /dev/null; then
HOMEBREW_NO_AUTO_UPDATE=1 brew install "$package"
else
echo "error: Cannot install $command, please install it"
if [ -n "$help_url" ]; then
echo ""
echo "hint: See $help_url for help"
fi
exit 1
fi
fi
}
function assert_release() {
if [ "$RELEASE" == "1" ]; then
run_command buildkite-agent meta-data set canary "0"
fi
}
function assert_canary() {
local canary="$(buildkite-agent meta-data get canary 2>/dev/null)"
if [ -z "$canary" ]; then
local repo=$(echo "$BUILDKITE_REPO" | sed -E 's#https://github.com/([^/]+)/([^/]+).git#\1/\2#g')
local tag="$(curl -sL "https://api.github.com/repos/$repo/releases/latest" | jq -r ".tag_name")"
if [ "$tag" == "null" ]; then
canary="1"
else
local revision=$(curl -sL "https://api.github.com/repos/$repo/compare/$tag...$BUILDKITE_COMMIT" | jq -r ".ahead_by")
if [ "$revision" == "null" ]; then
canary="1"
else
canary="$revision"
fi
fi
run_command buildkite-agent meta-data set canary "$canary"
fi
}
function upload_buildkite_pipeline() {
local path="$1"
if [ ! -f "$path" ]; then
echo "error: Cannot find pipeline: $path"
exit 1
fi
run_command buildkite-agent pipeline upload "$path"
}
function run_command() {
set -x
"$@"
{ set +x; } 2>/dev/null
}
assert_build
assert_buildkite_agent
assert_jq
assert_curl
assert_release
assert_canary
upload_buildkite_pipeline ".buildkite/ci.yml"

View File

@@ -0,0 +1,47 @@
param (
[Parameter(Mandatory=$true)]
[string[]] $Paths,
[switch] $Split
)
$ErrorActionPreference = "Stop"
function Assert-Buildkite-Agent() {
if (-not (Get-Command "buildkite-agent" -ErrorAction SilentlyContinue)) {
Write-Error "Cannot find buildkite-agent, please install it: https://buildkite.com/docs/agent/v3/install"
exit 1
}
}
function Assert-Split-File() {
if (-not (Get-Command "Split-File" -ErrorAction SilentlyContinue)) {
Write-Error "Cannot find Split-File, please install it: https://www.powershellgallery.com/packages/FileSplitter/1.3"
exit 1
}
}
function Upload-Buildkite-Artifact() {
param (
[Parameter(Mandatory=$true)]
[string] $Path,
)
if (-not (Test-Path $Path)) {
Write-Error "Could not find artifact: $Path"
exit 1
}
if ($Split) {
Remove-Item -Path "$Path.*" -Force
Split-File -Path (Resolve-Path $Path) -PartSizeBytes "50MB" -Verbose
$Path = "$Path.*"
}
& buildkite-agent artifact upload "$Path" --debug --debug-http
}
Assert-Buildkite-Agent
if ($Split) {
Assert-Split-File
}
foreach ($Path in $Paths) {
Upload-Buildkite-Artifact $Path
}

View File

@@ -0,0 +1,54 @@
#!/bin/bash
set -eo pipefail
function assert_buildkite_agent() {
if ! command -v buildkite-agent &> /dev/null; then
echo "error: Cannot find buildkite-agent, please install it:"
echo "https://buildkite.com/docs/agent/v3/install"
exit 1
fi
}
function assert_split() {
if ! command -v split &> /dev/null; then
echo "error: Cannot find split, please install it:"
echo "https://www.gnu.org/software/coreutils/split"
exit 1
fi
}
function upload_buildkite_artifact() {
local path="$1"; shift
local split="0"
local args=()
while true; do
if [ -z "$1" ]; then
break
fi
case "$1" in
--split) split="1"; shift ;;
*) args+=("$1"); shift ;;
esac
done
if [ ! -f "$path" ]; then
echo "error: Could not find artifact: $path"
exit 1
fi
if [ "$split" == "1" ]; then
run_command rm -f "$path."*
run_command split -b 50MB -d "$path" "$path."
run_command buildkite-agent artifact upload "$path.*" "${args[@]}"
else
run_command buildkite-agent artifact upload "$path" "${args[@]}"
fi
}
function run_command() {
set -x
"$@"
{ set +x; } 2>/dev/null
}
assert_buildkite_agent
upload_buildkite_artifact "$@"

View File

@@ -3,7 +3,15 @@
set -eo pipefail
function assert_main() {
if [[ "$BUILDKITE_PULL_REQUEST_REPO" && "$BUILDKITE_REPO" != "$BUILDKITE_PULL_REQUEST_REPO" ]]; then
if [ -z "$BUILDKITE_REPO" ]; then
echo "error: Cannot find repository for this build"
exit 1
fi
if [ -z "$BUILDKITE_COMMIT" ]; then
echo "error: Cannot find commit for this build"
exit 1
fi
if [ -n "$BUILDKITE_PULL_REQUEST_REPO" ] && [ "$BUILDKITE_REPO" != "$BUILDKITE_PULL_REQUEST_REPO" ]; then
echo "error: Cannot upload release from a fork"
exit 1
fi
@@ -18,77 +26,186 @@ function assert_main() {
}
function assert_buildkite_agent() {
if ! command -v buildkite-agent &> /dev/null; then
if ! command -v "buildkite-agent" &> /dev/null; then
echo "error: Cannot find buildkite-agent, please install it:"
echo "https://buildkite.com/docs/agent/v3/install"
exit 1
fi
}
function assert_gh() {
if ! command -v gh &> /dev/null; then
echo "warning: gh is not installed, installing..."
function assert_github() {
assert_command "gh" "gh" "https://github.com/cli/cli#installation"
assert_buildkite_secret "GITHUB_TOKEN"
# gh expects the token in $GH_TOKEN
export GH_TOKEN="$GITHUB_TOKEN"
}
function assert_aws() {
assert_command "aws" "awscli" "https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html"
for secret in "AWS_ACCESS_KEY_ID" "AWS_SECRET_ACCESS_KEY" "AWS_ENDPOINT"; do
assert_buildkite_secret "$secret"
done
assert_buildkite_secret "AWS_BUCKET" --skip-redaction
}
function assert_sentry() {
assert_command "sentry-cli" "getsentry/tools/sentry-cli" "https://docs.sentry.io/cli/installation/"
for secret in "SENTRY_AUTH_TOKEN" "SENTRY_ORG" "SENTRY_PROJECT"; do
assert_buildkite_secret "$secret"
done
}
function run_command() {
set -x
"$@"
{ set +x; } 2>/dev/null
}
function assert_command() {
local command="$1"
local package="$2"
local help_url="$3"
if ! command -v "$command" &> /dev/null; then
echo "warning: $command is not installed, installing..."
if command -v brew &> /dev/null; then
brew install gh
HOMEBREW_NO_AUTO_UPDATE=1 run_command brew install "$package"
else
echo "error: Cannot install gh, please install it:"
echo "https://github.com/cli/cli#installation"
echo "error: Cannot install $command, please install it"
if [ -n "$help_url" ]; then
echo ""
echo "hint: See $help_url for help"
fi
exit 1
fi
fi
}
function assert_gh_token() {
local token=$(buildkite-agent secret get GITHUB_TOKEN)
if [ -z "$token" ]; then
echo "error: Cannot find GITHUB_TOKEN secret"
function assert_buildkite_secret() {
local key="$1"
local value=$(buildkite-agent secret get "$key" ${@:2})
if [ -z "$value" ]; then
echo "error: Cannot find $key secret"
echo ""
echo "hint: Create a secret named GITHUB_TOKEN with a GitHub access token:"
echo "hint: Create a secret named $key with a value:"
echo "https://buildkite.com/docs/pipelines/buildkite-secrets"
exit 1
fi
export GH_TOKEN="$token"
export "$key"="$value"
}
function download_artifact() {
local name=$1
buildkite-agent artifact download "$name" .
if [ ! -f "$name" ]; then
function release_tag() {
local version="$1"
if [ "$version" == "canary" ]; then
echo "canary"
else
echo "bun-v$version"
fi
}
function create_sentry_release() {
local version="$1"
local release="$version"
if [ "$version" == "canary" ]; then
release="$BUILDKITE_COMMIT-canary"
fi
run_command sentry-cli releases new "$release" --finalize
run_command sentry-cli releases set-commits "$release" --auto --ignore-missing
if [ "$version" == "canary" ]; then
run_command sentry-cli deploys new --env="canary" --release="$release"
fi
}
function download_buildkite_artifact() {
local name="$1"
local dir="$2"
if [ -z "$dir" ]; then
dir="."
fi
run_command buildkite-agent artifact download "$name" "$dir"
if [ ! -f "$dir/$name" ]; then
echo "error: Cannot find Buildkite artifact: $name"
exit 1
fi
}
function upload_assets() {
local tag=$1
local files=${@:2}
gh release upload "$tag" $files --clobber --repo "$BUILDKITE_REPO"
function upload_github_asset() {
local version="$1"
local tag="$(release_tag "$version")"
local file="$2"
run_command gh release upload "$tag" "$file" --clobber --repo "$BUILDKITE_REPO"
# Sometimes the upload fails, maybe this is a race condition in the gh CLI?
while [ "$(gh release view "$tag" --repo "$BUILDKITE_REPO" | grep -c "$file")" -eq 0 ]; do
echo "warn: Uploading $file to $tag failed, retrying..."
sleep "$((RANDOM % 5 + 1))"
run_command gh release upload "$tag" "$file" --clobber --repo "$BUILDKITE_REPO"
done
}
assert_main
assert_buildkite_agent
assert_gh
assert_gh_token
function update_github_release() {
local version="$1"
local tag="$(release_tag "$version")"
if [ "$tag" == "canary" ]; then
run_command gh release edit "$tag" --repo "$BUILDKITE_REPO" \
--notes "This release of Bun corresponds to the commit: $BUILDKITE_COMMIT"
fi
}
declare artifacts=(
bun-darwin-aarch64.zip
bun-darwin-aarch64-profile.zip
bun-darwin-x64.zip
bun-darwin-x64-profile.zip
bun-linux-aarch64.zip
bun-linux-aarch64-profile.zip
bun-linux-x64.zip
bun-linux-x64-profile.zip
bun-linux-x64-baseline.zip
bun-linux-x64-baseline-profile.zip
bun-windows-x64.zip
bun-windows-x64-profile.zip
bun-windows-x64-baseline.zip
bun-windows-x64-baseline-profile.zip
)
function upload_s3_file() {
local folder="$1"
local file="$2"
run_command aws --endpoint-url="$AWS_ENDPOINT" s3 cp "$file" "s3://$AWS_BUCKET/$folder/$file"
}
for artifact in "${artifacts[@]}"; do
download_artifact $artifact
done
function create_release() {
assert_main
assert_buildkite_agent
assert_github
assert_aws
assert_sentry
upload_assets "canary" "${artifacts[@]}"
local tag="$1" # 'canary' or 'x.y.z'
local artifacts=(
bun-darwin-aarch64.zip
bun-darwin-aarch64-profile.zip
bun-darwin-x64.zip
bun-darwin-x64-profile.zip
bun-linux-aarch64.zip
bun-linux-aarch64-profile.zip
bun-linux-x64.zip
bun-linux-x64-profile.zip
bun-linux-x64-baseline.zip
bun-linux-x64-baseline-profile.zip
bun-windows-x64.zip
bun-windows-x64-profile.zip
bun-windows-x64-baseline.zip
bun-windows-x64-baseline-profile.zip
)
function upload_artifact() {
local artifact="$1"
download_buildkite_artifact "$artifact"
upload_s3_file "releases/$BUILDKITE_COMMIT" "$artifact" &
upload_s3_file "releases/$tag" "$artifact" &
upload_github_asset "$tag" "$artifact" &
}
for artifact in "${artifacts[@]}"; do
upload_artifact "$artifact" &
done
wait
update_github_release "$tag"
create_sentry_release "$tag"
}
function assert_canary() {
local canary="$(buildkite-agent meta-data get canary 2>/dev/null)"
if [ -z "$canary" ] || [ "$canary" == "0" ]; then
echo "warn: Skipping release because this is not a canary build"
exit 0
fi
}
assert_canary
create_release "canary"

View File

@@ -1,286 +0,0 @@
name: Build Darwin
permissions:
contents: read
actions: write
on:
workflow_call:
inputs:
runs-on:
type: string
default: macos-13-large
tag:
type: string
required: true
arch:
type: string
required: true
cpu:
type: string
required: true
assertions:
type: boolean
canary:
type: boolean
no-cache:
type: boolean
env:
LLVM_VERSION: 18
BUN_VERSION: 1.1.8
LC_CTYPE: "en_US.UTF-8"
LC_ALL: "en_US.UTF-8"
# LTO is disabled because we cannot use lld on macOS currently
BUN_ENABLE_LTO: "0"
jobs:
build-submodules:
name: Build Submodules
runs-on: ${{ inputs.runs-on }}
steps:
- name: Checkout
uses: actions/checkout@v4
with:
sparse-checkout: |
.gitmodules
src/deps
scripts
- name: Hash Submodules
id: hash
run: |
print_versions() {
git submodule | grep -v WebKit
echo "LLVM_VERSION=${{ env.LLVM_VERSION }}"
cat $(echo scripts/build*.sh scripts/all-dependencies.sh | tr " " "\n" | sort)
}
echo "hash=$(print_versions | shasum)" >> $GITHUB_OUTPUT
- name: Install Dependencies
env:
HOMEBREW_NO_INSTALLED_DEPENDENTS_CHECK: 1
HOMEBREW_NO_AUTO_UPDATE: 1
HOMEBREW_NO_INSTALL_CLEANUP: 1
run: |
brew install \
llvm@${{ env.LLVM_VERSION }} \
ccache \
rust \
pkg-config \
coreutils \
libtool \
cmake \
libiconv \
automake \
openssl@1.1 \
ninja \
golang \
gnu-sed --force --overwrite
echo "$(brew --prefix ccache)/bin" >> $GITHUB_PATH
echo "$(brew --prefix coreutils)/libexec/gnubin" >> $GITHUB_PATH
echo "$(brew --prefix llvm@$LLVM_VERSION)/bin" >> $GITHUB_PATH
brew link --overwrite llvm@$LLVM_VERSION
- name: Clone Submodules
run: |
./scripts/update-submodules.sh
- name: Build Submodules
env:
CPU_TARGET: ${{ inputs.cpu }}
BUN_DEPS_OUT_DIR: ${{ runner.temp }}/bun-deps
run: |
mkdir -p $BUN_DEPS_OUT_DIR
./scripts/all-dependencies.sh
- name: Upload bun-${{ inputs.tag }}-deps
uses: actions/upload-artifact@v4
with:
name: bun-${{ inputs.tag }}-deps
path: ${{ runner.temp }}/bun-deps
if-no-files-found: error
build-cpp:
name: Build C++
runs-on: ${{ inputs.runs-on }}
steps:
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
# TODO: Figure out how to cache homebrew dependencies
- name: Install Dependencies
env:
HOMEBREW_NO_INSTALLED_DEPENDENTS_CHECK: 1
HOMEBREW_NO_AUTO_UPDATE: 1
HOMEBREW_NO_INSTALL_CLEANUP: 1
run: |
brew install \
llvm@${{ env.LLVM_VERSION }} \
ccache \
rust \
pkg-config \
coreutils \
libtool \
cmake \
libiconv \
automake \
openssl@1.1 \
ninja \
golang \
gnu-sed --force --overwrite
echo "$(brew --prefix ccache)/bin" >> $GITHUB_PATH
echo "$(brew --prefix coreutils)/libexec/gnubin" >> $GITHUB_PATH
echo "$(brew --prefix llvm@$LLVM_VERSION)/bin" >> $GITHUB_PATH
brew link --overwrite llvm@$LLVM_VERSION
- name: Setup Bun
uses: ./.github/actions/setup-bun
with:
bun-version: ${{ env.BUN_VERSION }}
- name: Compile
env:
CPU_TARGET: ${{ inputs.cpu }}
SOURCE_DIR: ${{ github.workspace }}
OBJ_DIR: ${{ runner.temp }}/bun-cpp-obj
BUN_DEPS_OUT_DIR: ${{ runner.temp }}/bun-deps
CCACHE_DIR: ${{ runner.temp }}/ccache
run: |
mkdir -p $OBJ_DIR
cd $OBJ_DIR
cmake -S $SOURCE_DIR -B $OBJ_DIR \
-G Ninja \
-DCMAKE_BUILD_TYPE=Release \
-DUSE_LTO=ON \
-DBUN_CPP_ONLY=1 \
-DNO_CONFIGURE_DEPENDS=1
chmod +x compile-cpp-only.sh
./compile-cpp-only.sh -v
- name: Upload bun-${{ inputs.tag }}-cpp
uses: actions/upload-artifact@v4
with:
name: bun-${{ inputs.tag }}-cpp
path: ${{ runner.temp }}/bun-cpp-obj/bun-cpp-objects.a
if-no-files-found: error
build-zig:
name: Build Zig
uses: ./.github/workflows/build-zig.yml
with:
os: darwin
only-zig: true
tag: ${{ inputs.tag }}
arch: ${{ inputs.arch }}
cpu: ${{ inputs.cpu }}
assertions: ${{ inputs.assertions }}
canary: ${{ inputs.canary }}
no-cache: ${{ inputs.no-cache }}
link:
name: Link
runs-on: ${{ inputs.runs-on }}
needs:
- build-submodules
- build-cpp
- build-zig
steps:
- uses: actions/checkout@v4
# TODO: Figure out how to cache homebrew dependencies
- name: Install Dependencies
env:
HOMEBREW_NO_INSTALLED_DEPENDENTS_CHECK: 1
HOMEBREW_NO_AUTO_UPDATE: 1
HOMEBREW_NO_INSTALL_CLEANUP: 1
run: |
brew install \
llvm@${{ env.LLVM_VERSION }} \
ccache \
rust \
pkg-config \
coreutils \
libtool \
cmake \
libiconv \
automake \
openssl@1.1 \
ninja \
golang \
gnu-sed --force --overwrite
echo "$(brew --prefix ccache)/bin" >> $GITHUB_PATH
echo "$(brew --prefix coreutils)/libexec/gnubin" >> $GITHUB_PATH
echo "$(brew --prefix llvm@$LLVM_VERSION)/bin" >> $GITHUB_PATH
brew link --overwrite llvm@$LLVM_VERSION
- name: Setup Bun
uses: ./.github/actions/setup-bun
with:
bun-version: ${{ env.BUN_VERSION }}
- name: Download bun-${{ inputs.tag }}-deps
uses: actions/download-artifact@v4
with:
name: bun-${{ inputs.tag }}-deps
path: ${{ runner.temp }}/bun-deps
- name: Download bun-${{ inputs.tag }}-cpp
uses: actions/download-artifact@v4
with:
name: bun-${{ inputs.tag }}-cpp
path: ${{ runner.temp }}/bun-cpp-obj
- name: Download bun-${{ inputs.tag }}-zig
uses: actions/download-artifact@v4
with:
name: bun-${{ inputs.tag }}-zig
path: ${{ runner.temp }}/release
- name: Link
env:
CPU_TARGET: ${{ inputs.cpu }}
run: |
SRC_DIR=$PWD
mkdir ${{ runner.temp }}/link-build
cd ${{ runner.temp }}/link-build
cmake $SRC_DIR \
-G Ninja \
-DCMAKE_BUILD_TYPE=Release \
-DUSE_LTO=ON \
-DBUN_LINK_ONLY=1 \
-DBUN_ZIG_OBJ_DIR="${{ runner.temp }}/release" \
-DBUN_CPP_ARCHIVE="${{ runner.temp }}/bun-cpp-obj/bun-cpp-objects.a" \
-DBUN_DEPS_OUT_DIR="${{ runner.temp }}/bun-deps" \
-DNO_CONFIGURE_DEPENDS=1
ninja -v
- name: Prepare
run: |
cd ${{ runner.temp }}/link-build
chmod +x bun-profile bun
mkdir -p bun-${{ inputs.tag }}-profile/ bun-${{ inputs.tag }}/
mv bun-profile bun-${{ inputs.tag }}-profile/bun-profile
if [ -f bun-profile.dSYM || -d bun-profile.dSYM ]; then
mv bun-profile.dSYM bun-${{ inputs.tag }}-profile/bun-profile.dSYM
fi
if [ -f bun.dSYM || -d bun.dSYM ]; then
mv bun.dSYM bun-${{ inputs.tag }}-profile/bun-profile.dSYM
fi
mv bun bun-${{ inputs.tag }}/bun
zip -r bun-${{ inputs.tag }}-profile.zip bun-${{ inputs.tag }}-profile
zip -r bun-${{ inputs.tag }}.zip bun-${{ inputs.tag }}
- name: Upload bun-${{ inputs.tag }}
uses: actions/upload-artifact@v4
with:
name: bun-${{ inputs.tag }}
path: ${{ runner.temp }}/link-build/bun-${{ inputs.tag }}.zip
if-no-files-found: error
- name: Upload bun-${{ inputs.tag }}-profile
uses: actions/upload-artifact@v4
with:
name: bun-${{ inputs.tag }}-profile
path: ${{ runner.temp }}/link-build/bun-${{ inputs.tag }}-profile.zip
if-no-files-found: error
on-failure:
if: ${{ github.repository_owner == 'oven-sh' && failure() }}
name: On Failure
needs: link
runs-on: ubuntu-latest
steps:
- name: Send Message
uses: sarisia/actions-status-discord@v1
with:
webhook: ${{ secrets.DISCORD_WEBHOOK }}
nodetail: true
color: "#FF0000"
title: ""
description: |
### ❌ [${{ github.event.pull_request.title }}](${{ github.event.pull_request.html_url }})
@${{ github.actor }}, the build for bun-${{ inputs.tag }} failed.
**[View logs](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})**

View File

@@ -1,64 +0,0 @@
name: Build Linux
permissions:
contents: read
actions: write
on:
workflow_call:
inputs:
runs-on:
type: string
required: true
tag:
type: string
required: true
arch:
type: string
required: true
cpu:
type: string
required: true
assertions:
type: boolean
zig-optimize:
type: string
canary:
type: boolean
no-cache:
type: boolean
jobs:
build:
name: Build Linux
uses: ./.github/workflows/build-zig.yml
with:
os: linux
only-zig: false
runs-on: ${{ inputs.runs-on }}
tag: ${{ inputs.tag }}
arch: ${{ inputs.arch }}
cpu: ${{ inputs.cpu }}
assertions: ${{ inputs.assertions }}
zig-optimize: ${{ inputs.zig-optimize }}
canary: ${{ inputs.canary }}
no-cache: ${{ inputs.no-cache }}
on-failure:
if: ${{ github.repository_owner == 'oven-sh' && failure() }}
name: On Failure
needs: build
runs-on: ubuntu-latest
steps:
- name: Send Message
uses: sarisia/actions-status-discord@v1
with:
webhook: ${{ secrets.DISCORD_WEBHOOK }}
nodetail: true
color: "#FF0000"
title: ""
description: |
### ❌ [${{ github.event.pull_request.title }}](${{ github.event.pull_request.html_url }})
@${{ github.actor }}, the build for bun-${{ inputs.tag }} failed.
**[View logs](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})**

View File

@@ -1,348 +0,0 @@
name: Build Windows
permissions:
contents: read
actions: write
on:
workflow_call:
inputs:
runs-on:
type: string
default: windows
tag:
type: string
required: true
arch:
type: string
required: true
cpu:
type: string
required: true
assertions:
type: boolean
canary:
type: boolean
no-cache:
type: boolean
bun-version:
type: string
default: 1.1.7
env:
# Must specify exact version of LLVM for Windows
LLVM_VERSION: 18.1.8
BUN_VERSION: ${{ inputs.bun-version }}
BUN_GARBAGE_COLLECTOR_LEVEL: 1
BUN_FEATURE_FLAG_INTERNAL_FOR_TESTING: 1
CI: true
USE_LTO: 1
jobs:
build-submodules:
name: Build Submodules
runs-on: ${{ inputs.runs-on }}
steps:
- name: Install Scoop
run: |
Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
Invoke-RestMethod -Uri https://get.scoop.sh | Invoke-Expression
Join-Path (Resolve-Path ~).Path "scoop\shims" >> $Env:GITHUB_PATH
- name: Setup Git
run: |
git config --global core.autocrlf false
git config --global core.eol lf
- name: Checkout
uses: actions/checkout@v4
with:
sparse-checkout: |
.gitmodules
src/deps
scripts
- name: Hash Submodules
id: hash
run: |
$data = "$(& {
git submodule | Where-Object { $_ -notmatch 'WebKit' }
echo "LLVM_VERSION=${{ env.LLVM_VERSION }}"
Get-Content -Path (Get-ChildItem -Path 'scripts/build*.ps1', 'scripts/all-dependencies.ps1', 'scripts/env.ps1' | Sort-Object -Property Name).FullName | Out-String
echo 1
})"
$hash = ( -join ((New-Object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider).ComputeHash([System.Text.Encoding]::UTF8.GetBytes($data)) | ForEach-Object { $_.ToString("x2") } )).Substring(0, 10)
echo "hash=${hash}" >> $env:GITHUB_OUTPUT
- if: ${{ !inputs.no-cache }}
name: Restore Cache
id: cache
uses: actions/cache/restore@v4
with:
path: bun-deps
key: bun-${{ inputs.tag }}-deps-${{ steps.hash.outputs.hash }}
- if: ${{ inputs.no-cache || !steps.cache.outputs.cache-hit }}
name: Install LLVM and Ninja
run: |
scoop install ninja
scoop install llvm@${{ env.LLVM_VERSION }}
scoop install nasm@2.16.01
- if: ${{ inputs.no-cache || !steps.cache.outputs.cache-hit }}
name: Clone Submodules
run: |
.\scripts\update-submodules.ps1
- if: ${{ inputs.no-cache || !steps.cache.outputs.cache-hit }}
name: Build Dependencies
env:
CPU_TARGET: ${{ inputs.cpu }}
CCACHE_DIR: ccache
USE_LTO: 1
run: |
.\scripts\env.ps1 ${{ contains(inputs.tag, '-baseline') && '-Baseline' || '' }}
$env:BUN_DEPS_OUT_DIR = (mkdir -Force "./bun-deps")
.\scripts\all-dependencies.ps1
- name: Save Cache
if: ${{ inputs.no-cache || !steps.cache.outputs.cache-hit }}
uses: actions/cache/save@v4
with:
path: bun-deps
key: ${{ steps.cache.outputs.cache-primary-key }}
- name: Upload bun-${{ inputs.tag }}-deps
uses: actions/upload-artifact@v4
with:
name: bun-${{ inputs.tag }}-deps
path: bun-deps
if-no-files-found: error
codegen:
name: Codegen
runs-on: ubuntu-latest
steps:
- name: Setup Git
run: |
git config --global core.autocrlf false
git config --global core.eol lf
- name: Checkout
uses: actions/checkout@v4
- name: Setup Bun
uses: ./.github/actions/setup-bun
with:
bun-version: ${{ inputs.bun-version }}
- name: Codegen
run: |
./scripts/cross-compile-codegen.sh win32 x64
- if: ${{ inputs.canary }}
name: Calculate Revision
run: |
echo "canary_revision=$(GITHUB_TOKEN="${{ github.token }}"
bash ./scripts/calculate-canary-revision.sh --raw)" > build-codegen-win32-x64/.canary_revision
- name: Upload bun-${{ inputs.tag }}-codegen
uses: actions/upload-artifact@v4
with:
name: bun-${{ inputs.tag }}-codegen
path: build-codegen-win32-x64
if-no-files-found: error
build-cpp:
name: Build C++
needs: codegen
runs-on: ${{ inputs.runs-on }}
steps:
- name: Install Scoop
run: |
Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
Invoke-RestMethod -Uri https://get.scoop.sh | Invoke-Expression
Join-Path (Resolve-Path ~).Path "scoop\shims" >> $Env:GITHUB_PATH
- name: Setup Git
run: |
git config --global core.autocrlf false
git config --global core.eol lf
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
- name: Install LLVM and Ninja
run: |
scoop install ninja
scoop install llvm@${{ env.LLVM_VERSION }}
- name: Setup Bun
uses: ./.github/actions/setup-bun
with:
bun-version: ${{ inputs.bun-version }}
- if: ${{ !inputs.no-cache }}
name: Restore Cache
uses: actions/cache@v4
with:
path: ccache
key: bun-${{ inputs.tag }}-cpp-${{ hashFiles('Dockerfile', 'Makefile', 'CMakeLists.txt', 'build.zig', 'scripts/**', 'src/**', 'packages/bun-usockets/src/**', 'packages/bun-uws/src/**') }}
restore-keys: |
bun-${{ inputs.tag }}-cpp-
- name: Download bun-${{ inputs.tag }}-codegen
uses: actions/download-artifact@v4
with:
name: bun-${{ inputs.tag }}-codegen
path: build
- name: Compile
env:
CPU_TARGET: ${{ inputs.cpu }}
CCACHE_DIR: ccache
USE_LTO: 1
run: |
# $CANARY_REVISION = if (Test-Path build/.canary_revision) { Get-Content build/.canary_revision } else { "0" }
$CANARY_REVISION = 0
.\scripts\env.ps1 ${{ contains(inputs.tag, '-baseline') && '-Baseline' || '' }}
.\scripts\update-submodules.ps1
.\scripts\build-libuv.ps1 -CloneOnly $True
cd build
cmake .. -G Ninja -DCMAKE_BUILD_TYPE=Release `
-DNO_CODEGEN=1 `
-DUSE_LTO=1 `
-DNO_CONFIGURE_DEPENDS=1 `
"-DCANARY=${CANARY_REVISION}" `
-DBUN_CPP_ONLY=1 ${{ contains(inputs.tag, '-baseline') && '-DUSE_BASELINE_BUILD=1' || '' }}
if ($LASTEXITCODE -ne 0) { throw "CMake configuration failed" }
.\compile-cpp-only.ps1 -v
if ($LASTEXITCODE -ne 0) { throw "C++ compilation failed" }
- name: Upload bun-${{ inputs.tag }}-cpp
uses: actions/upload-artifact@v4
with:
name: bun-${{ inputs.tag }}-cpp
path: build/bun-cpp-objects.a
if-no-files-found: error
build-zig:
name: Build Zig
uses: ./.github/workflows/build-zig.yml
with:
os: windows
zig-optimize: ReleaseSafe
only-zig: true
tag: ${{ inputs.tag }}
arch: ${{ inputs.arch }}
cpu: ${{ inputs.cpu }}
assertions: ${{ inputs.assertions }}
canary: ${{ inputs.canary }}
no-cache: ${{ inputs.no-cache }}
link:
name: Link
runs-on: ${{ inputs.runs-on }}
needs:
- build-submodules
- build-cpp
- build-zig
- codegen
steps:
- name: Install Scoop
run: |
Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
Invoke-RestMethod -Uri https://get.scoop.sh | Invoke-Expression
Join-Path (Resolve-Path ~).Path "scoop\shims" >> $Env:GITHUB_PATH
- name: Setup Git
run: |
git config --global core.autocrlf false
git config --global core.eol lf
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
- name: Install Ninja
run: |
scoop install ninja
scoop install llvm@${{ env.LLVM_VERSION }}
- name: Setup Bun
uses: ./.github/actions/setup-bun
with:
bun-version: ${{ inputs.bun-version }}
- name: Download bun-${{ inputs.tag }}-deps
uses: actions/download-artifact@v4
with:
name: bun-${{ inputs.tag }}-deps
path: bun-deps
- name: Download bun-${{ inputs.tag }}-cpp
uses: actions/download-artifact@v4
with:
name: bun-${{ inputs.tag }}-cpp
path: bun-cpp
- name: Download bun-${{ inputs.tag }}-zig
uses: actions/download-artifact@v4
with:
name: bun-${{ inputs.tag }}-zig
path: bun-zig
- name: Download bun-${{ inputs.tag }}-codegen
uses: actions/download-artifact@v4
with:
name: bun-${{ inputs.tag }}-codegen
path: build
- if: ${{ !inputs.no-cache }}
name: Restore Cache
uses: actions/cache@v4
with:
path: ccache
key: bun-${{ inputs.tag }}-cpp-${{ hashFiles('Dockerfile', 'Makefile', 'CMakeLists.txt', 'build.zig', 'scripts/**', 'src/**', 'packages/bun-usockets/src/**', 'packages/bun-uws/src/**') }}
restore-keys: |
bun-${{ inputs.tag }}-cpp-
- name: Link
env:
CPU_TARGET: ${{ inputs.cpu }}
CCACHE_DIR: ccache
run: |
.\scripts\update-submodules.ps1
.\scripts\env.ps1 ${{ contains(inputs.tag, '-baseline') && '-Baseline' || '' }}
Set-Location build
# $CANARY_REVISION = if (Test-Path build/.canary_revision) { Get-Content build/.canary_revision } else { "0" }
$CANARY_REVISION = 0
cmake .. -G Ninja -DCMAKE_BUILD_TYPE=Release `
-DNO_CODEGEN=1 `
-DNO_CONFIGURE_DEPENDS=1 `
"-DCANARY=${CANARY_REVISION}" `
-DBUN_LINK_ONLY=1 `
-DUSE_LTO=1 `
"-DBUN_DEPS_OUT_DIR=$(Resolve-Path ../bun-deps)" `
"-DBUN_CPP_ARCHIVE=$(Resolve-Path ../bun-cpp/bun-cpp-objects.a)" `
"-DBUN_ZIG_OBJ_DIR=$(Resolve-Path ../bun-zig)" `
${{ contains(inputs.tag, '-baseline') && '-DUSE_BASELINE_BUILD=1' || '' }}
if ($LASTEXITCODE -ne 0) { throw "CMake configuration failed" }
ninja -v
if ($LASTEXITCODE -ne 0) { throw "Link failed!" }
- name: Prepare
run: |
$Dist = mkdir -Force "bun-${{ inputs.tag }}"
cp -r build\bun.exe "$Dist\bun.exe"
Compress-Archive -Force "$Dist" "${Dist}.zip"
$Dist = "$Dist-profile"
MkDir -Force "$Dist"
cp -r build\bun.exe "$Dist\bun.exe"
cp -r build\bun.pdb "$Dist\bun.pdb"
Compress-Archive -Force "$Dist" "$Dist.zip"
.\build\bun.exe --print "JSON.stringify(require('bun:internal-for-testing').crash_handler.getFeatureData())" > .\features.json
- name: Upload bun-${{ inputs.tag }}
uses: actions/upload-artifact@v4
with:
name: bun-${{ inputs.tag }}
path: bun-${{ inputs.tag }}.zip
if-no-files-found: error
- name: Upload bun-${{ inputs.tag }}-profile
uses: actions/upload-artifact@v4
with:
name: bun-${{ inputs.tag }}-profile
path: bun-${{ inputs.tag }}-profile.zip
if-no-files-found: error
- name: Upload bun-feature-data
uses: actions/upload-artifact@v4
with:
name: bun-feature-data
path: features.json
if-no-files-found: error
overwrite: true
on-failure:
if: ${{ github.repository_owner == 'oven-sh' && failure() }}
name: On Failure
needs: link
runs-on: ubuntu-latest
steps:
- name: Send Message
uses: sarisia/actions-status-discord@v1
with:
webhook: ${{ secrets.DISCORD_WEBHOOK }}
nodetail: true
color: "#FF0000"
title: ""
description: |
### ❌ [${{ github.event.pull_request.title }}](${{ github.event.pull_request.html_url }})
@${{ github.actor }}, the build for bun-${{ inputs.tag }} failed.
**[View logs](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})**

View File

@@ -1,122 +0,0 @@
name: Build Zig
permissions:
contents: read
actions: write
on:
workflow_call:
inputs:
runs-on:
type: string
default: ${{ github.repository_owner != 'oven-sh' && 'ubuntu-latest' || inputs.only-zig && 'namespace-profile-bun-ci-linux-x64' || inputs.arch == 'x64' && 'namespace-profile-bun-ci-linux-x64' || 'namespace-profile-bun-ci-linux-aarch64' }}
tag:
type: string
required: true
os:
type: string
required: true
arch:
type: string
required: true
cpu:
type: string
required: true
assertions:
type: boolean
default: false
zig-optimize:
type: string # 'ReleaseSafe' or 'ReleaseFast'
default: ReleaseFast
canary:
type: boolean
default: ${{ github.ref == 'refs/heads/main' }}
only-zig:
type: boolean
default: true
no-cache:
type: boolean
default: false
jobs:
build-zig:
name: ${{ inputs.only-zig && 'Build Zig' || 'Build & Link' }}
runs-on: ${{ inputs.runs-on }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Calculate Cache Key
id: cache
run: |
echo "key=${{ hashFiles('Dockerfile', 'Makefile', 'CMakeLists.txt', 'build.zig', 'scripts/**', 'src/**', 'packages/bun-usockets/src/**', 'packages/bun-uws/src/**') }}" >> $GITHUB_OUTPUT
- if: ${{ !inputs.no-cache }}
name: Restore Cache
uses: actions/cache@v4
with:
key: bun-${{ inputs.tag }}-docker-${{ steps.cache.outputs.key }}
restore-keys: |
bun-${{ inputs.tag }}-docker-
path: |
${{ runner.temp }}/dockercache
- name: Setup Docker
uses: docker/setup-buildx-action@v3
with:
install: true
platforms: |
linux/${{ runner.arch == 'X64' && 'amd64' || 'arm64' }}
- name: Build
uses: docker/build-push-action@v5
with:
push: false
target: ${{ inputs.only-zig && 'build_release_obj' || 'artifact' }}
cache-from: |
type=local,src=${{ runner.temp }}/dockercache
cache-to: |
type=local,dest=${{ runner.temp }}/dockercache,mode=max
outputs: |
type=local,dest=${{ runner.temp }}/release
platforms: |
linux/${{ runner.arch == 'X64' && 'amd64' || 'arm64' }}
build-args: |
GIT_SHA=${{ github.event.workflow_run.head_sha || github.sha }}
TRIPLET=${{ inputs.os == 'darwin' && format('{0}-macos-none', inputs.arch == 'x64' && 'x86_64' || 'aarch64') || inputs.os == 'windows' && format('{0}-windows-msvc', inputs.arch == 'x64' && 'x86_64' || 'aarch64') || format('{0}-linux-gnu', inputs.arch == 'x64' && 'x86_64' || 'aarch64') }}
ARCH=${{ inputs.arch == 'x64' && 'x86_64' || 'aarch64' }}
BUILDARCH=${{ inputs.arch == 'x64' && 'amd64' || 'arm64' }}
BUILD_MACHINE_ARCH=${{ inputs.arch == 'x64' && 'x86_64' || 'aarch64' }}
CPU_TARGET=${{ inputs.arch == 'x64' && inputs.cpu || 'native' }}
ASSERTIONS=${{ inputs.assertions && 'ON' || 'OFF' }}
ZIG_OPTIMIZE=${{ inputs.zig-optimize }}
CANARY=${{ inputs.canary && '1' || '0' }}
- if: ${{ inputs.only-zig }}
name: Upload bun-${{ inputs.tag }}-zig
uses: actions/upload-artifact@v4
with:
name: bun-${{ inputs.tag }}-zig
path: ${{ runner.temp }}/release/bun-zig.o
if-no-files-found: error
- if: ${{ !inputs.only-zig }}
name: Prepare
run: |
cd ${{ runner.temp }}/release
chmod +x bun-profile bun
mkdir bun-${{ inputs.tag }}-profile
mkdir bun-${{ inputs.tag }}
strip bun
mv bun-profile bun-${{ inputs.tag }}-profile/bun-profile
mv bun bun-${{ inputs.tag }}/bun
zip -r bun-${{ inputs.tag }}-profile.zip bun-${{ inputs.tag }}-profile
zip -r bun-${{ inputs.tag }}.zip bun-${{ inputs.tag }}
- if: ${{ !inputs.only-zig }}
name: Upload bun-${{ inputs.tag }}
uses: actions/upload-artifact@v4
with:
name: bun-${{ inputs.tag }}
path: ${{ runner.temp }}/release/bun-${{ inputs.tag }}.zip
if-no-files-found: error
- if: ${{ !inputs.only-zig }}
name: Upload bun-${{ inputs.tag }}-profile
uses: actions/upload-artifact@v4
with:
name: bun-${{ inputs.tag }}-profile
path: ${{ runner.temp }}/release/bun-${{ inputs.tag }}-profile.zip
if-no-files-found: error

View File

@@ -1,245 +0,0 @@
name: CI
permissions:
contents: read
actions: write
concurrency:
group: ${{ github.workflow }}-${{ github.event_name == 'workflow_dispatch' && inputs.run-id || github.ref }}
cancel-in-progress: true
on:
workflow_dispatch:
inputs:
run-id:
type: string
description: The workflow ID to download artifacts (skips the build step)
pull_request:
paths-ignore:
- .vscode/**/*
- docs/**/*
- examples/**/*
push:
branches:
- main
paths-ignore:
- .vscode/**/*
- docs/**/*
- examples/**/*
jobs:
format:
if: ${{ !inputs.run-id }}
name: Format
uses: ./.github/workflows/run-format.yml
secrets: inherit
with:
zig-version: 0.13.0
permissions:
contents: write
lint:
if: ${{ !inputs.run-id }}
name: Lint
uses: ./.github/workflows/run-lint.yml
secrets: inherit
linux-x64:
if: ${{ !inputs.run-id }}
name: Build linux-x64
uses: ./.github/workflows/build-linux.yml
secrets: inherit
with:
runs-on: ${{ github.repository_owner == 'oven-sh' && 'namespace-profile-bun-ci-linux-x64' || 'ubuntu-latest' }}
tag: linux-x64
arch: x64
cpu: haswell
canary: true
no-cache: true
linux-x64-baseline:
if: ${{ !inputs.run-id }}
name: Build linux-x64-baseline
uses: ./.github/workflows/build-linux.yml
secrets: inherit
with:
runs-on: ${{ github.repository_owner == 'oven-sh' && 'namespace-profile-bun-ci-linux-x64' || 'ubuntu-latest' }}
tag: linux-x64-baseline
arch: x64
cpu: nehalem
canary: true
no-cache: true
linux-aarch64:
if: ${{ !inputs.run-id && github.repository_owner == 'oven-sh' }}
name: Build linux-aarch64
uses: ./.github/workflows/build-linux.yml
secrets: inherit
with:
runs-on: namespace-profile-bun-ci-linux-aarch64
tag: linux-aarch64
arch: aarch64
cpu: native
canary: true
no-cache: true
darwin-x64:
if: ${{ !inputs.run-id }}
name: Build darwin-x64
uses: ./.github/workflows/build-darwin.yml
secrets: inherit
with:
runs-on: ${{ github.repository_owner == 'oven-sh' && 'macos-13-large' || 'macos-13' }}
tag: darwin-x64
arch: x64
cpu: haswell
canary: true
darwin-x64-baseline:
if: ${{ !inputs.run-id }}
name: Build darwin-x64-baseline
uses: ./.github/workflows/build-darwin.yml
secrets: inherit
with:
runs-on: ${{ github.repository_owner == 'oven-sh' && 'macos-13-large' || 'macos-13' }}
tag: darwin-x64-baseline
arch: x64
cpu: nehalem
canary: true
darwin-aarch64:
if: ${{ !inputs.run-id }}
name: Build darwin-aarch64
uses: ./.github/workflows/build-darwin.yml
secrets: inherit
with:
runs-on: ${{ github.repository_owner == 'oven-sh' && 'namespace-profile-bun-ci-darwin-aarch64' || 'macos-13' }}
tag: darwin-aarch64
arch: aarch64
cpu: native
canary: true
windows-x64:
if: ${{ !inputs.run-id }}
name: Build windows-x64
uses: ./.github/workflows/build-windows.yml
secrets: inherit
with:
runs-on: windows
tag: windows-x64
arch: x64
cpu: haswell
canary: true
windows-x64-baseline:
if: ${{ !inputs.run-id }}
name: Build windows-x64-baseline
uses: ./.github/workflows/build-windows.yml
secrets: inherit
with:
runs-on: windows
tag: windows-x64-baseline
arch: x64
cpu: nehalem
canary: true
linux-x64-test:
if: ${{ inputs.run-id || github.event_name == 'pull_request' }}
name: Test linux-x64
needs: linux-x64
uses: ./.github/workflows/run-test.yml
secrets: inherit
with:
run-id: ${{ inputs.run-id }}
pr-number: ${{ github.event.number }}
runs-on: ${{ github.repository_owner == 'oven-sh' && 'namespace-profile-bun-ci-linux-x64' || 'ubuntu-latest' }}
tag: linux-x64
linux-x64-baseline-test:
if: ${{ inputs.run-id || github.event_name == 'pull_request' }}
name: Test linux-x64-baseline
needs: linux-x64-baseline
uses: ./.github/workflows/run-test.yml
secrets: inherit
with:
run-id: ${{ inputs.run-id }}
pr-number: ${{ github.event.number }}
runs-on: ${{ github.repository_owner == 'oven-sh' && 'namespace-profile-bun-ci-linux-x64' || 'ubuntu-latest' }}
tag: linux-x64-baseline
linux-aarch64-test:
if: ${{ inputs.run-id || github.event_name == 'pull_request' && github.repository_owner == 'oven-sh'}}
name: Test linux-aarch64
needs: linux-aarch64
uses: ./.github/workflows/run-test.yml
secrets: inherit
with:
run-id: ${{ inputs.run-id }}
pr-number: ${{ github.event.number }}
runs-on: namespace-profile-bun-ci-linux-aarch64
tag: linux-aarch64
darwin-x64-test:
if: ${{ inputs.run-id || github.event_name == 'pull_request' }}
name: Test darwin-x64
needs: darwin-x64
uses: ./.github/workflows/run-test.yml
secrets: inherit
with:
run-id: ${{ inputs.run-id }}
pr-number: ${{ github.event.number }}
runs-on: ${{ github.repository_owner == 'oven-sh' && 'macos-13-large' || 'macos-13' }}
tag: darwin-x64
darwin-x64-baseline-test:
if: ${{ inputs.run-id || github.event_name == 'pull_request' }}
name: Test darwin-x64-baseline
needs: darwin-x64-baseline
uses: ./.github/workflows/run-test.yml
secrets: inherit
with:
run-id: ${{ inputs.run-id }}
pr-number: ${{ github.event.number }}
runs-on: ${{ github.repository_owner == 'oven-sh' && 'macos-13-large' || 'macos-13' }}
tag: darwin-x64-baseline
darwin-aarch64-test:
if: ${{ inputs.run-id || github.event_name == 'pull_request' }}
name: Test darwin-aarch64
needs: darwin-aarch64
uses: ./.github/workflows/run-test.yml
secrets: inherit
with:
run-id: ${{ inputs.run-id }}
pr-number: ${{ github.event.number }}
runs-on: ${{ github.repository_owner == 'oven-sh' && 'namespace-profile-bun-ci-darwin-aarch64' || 'macos-13' }}
tag: darwin-aarch64
windows-x64-test:
if: ${{ inputs.run-id || github.event_name == 'pull_request' }}
name: Test windows-x64
needs: windows-x64
uses: ./.github/workflows/run-test.yml
secrets: inherit
with:
run-id: ${{ inputs.run-id }}
pr-number: ${{ github.event.number }}
runs-on: windows
tag: windows-x64
windows-x64-baseline-test:
if: ${{ inputs.run-id || github.event_name == 'pull_request' }}
name: Test windows-x64-baseline
needs: windows-x64-baseline
uses: ./.github/workflows/run-test.yml
secrets: inherit
with:
run-id: ${{ inputs.run-id }}
pr-number: ${{ github.event.number }}
runs-on: windows
tag: windows-x64-baseline
cleanup:
if: ${{ always() }}
name: Cleanup
needs:
- linux-x64
- linux-x64-baseline
- linux-aarch64
- darwin-x64
- darwin-x64-baseline
- darwin-aarch64
- windows-x64
- windows-x64-baseline
runs-on: ubuntu-latest
steps:
- name: Cleanup Artifacts
uses: geekyeggo/delete-artifact@v5
with:
name: |
bun-*-cpp
bun-*-zig
bun-*-deps
bun-*-codegen

View File

@@ -1,55 +0,0 @@
name: Comment
permissions:
actions: read
pull-requests: write
on:
workflow_run:
workflows:
- CI
types:
- completed
jobs:
comment:
if: ${{ github.repository_owner == 'oven-sh' }}
name: Comment
runs-on: ubuntu-latest
steps:
- name: Download Tests
uses: actions/download-artifact@v4
with:
path: bun
pattern: bun-*-tests
github-token: ${{ github.token }}
run-id: ${{ github.event.workflow_run.id }}
- name: Setup Environment
id: env
shell: bash
run: |
echo "pr-number=$(<bun/bun-linux-x64-tests/pr-number.txt)" >> $GITHUB_OUTPUT
- name: Generate Comment
run: |
cat bun/bun-*-tests/comment.md > comment.md
if [ -s comment.md ]; then
echo -e "❌ @${{ github.actor }}, your commit has failing tests :(\n\n$(cat comment.md)" > comment.md
else
echo -e "✅ @${{ github.actor }}, all tests passed!" > comment.md
fi
echo -e "\n**[View logs](https://github.com/${{ github.repository }}/actions/runs/${{ github.event.workflow_run.id }})**" >> comment.md
echo -e "<!-- generated-comment workflow=${{ github.workflow }} -->" >> comment.md
- name: Find Comment
id: comment
uses: peter-evans/find-comment@v3
with:
issue-number: ${{ steps.env.outputs.pr-number }}
comment-author: github-actions[bot]
body-includes: <!-- generated-comment workflow=${{ github.workflow }} -->
- name: Write Comment
uses: peter-evans/create-or-update-comment@v4
with:
comment-id: ${{ steps.comment.outputs.comment-id }}
issue-number: ${{ steps.env.outputs.pr-number }}
body-path: comment.md
edit-mode: replace

View File

@@ -1,183 +0,0 @@
name: Create Release Build
run-name: Compile Bun v${{ inputs.version }} by ${{ github.actor }}
concurrency:
group: release
cancel-in-progress: true
permissions:
contents: write
actions: write
on:
workflow_dispatch:
inputs:
version:
type: string
required: true
description: "Release version. Example: 1.1.4. Exclude the 'v' prefix."
tag:
type: string
required: true
description: "GitHub tag to use"
clobber:
type: boolean
required: false
default: false
description: "Overwrite existing release artifacts?"
release:
types:
- created
jobs:
notify-start:
if: ${{ github.repository_owner == 'oven-sh' }}
name: Notify Start
runs-on: ubuntu-latest
steps:
- name: Send Message
uses: sarisia/actions-status-discord@v1
with:
webhook: ${{ secrets.DISCORD_WEBHOOK_PUBLIC }}
nodetail: true
color: "#1F6FEB"
title: "Bun v${{ inputs.version }} is compiling"
description: |
### @${{ github.actor }} started compiling Bun v${{inputs.version}}
- name: Send Message
uses: sarisia/actions-status-discord@v1
with:
webhook: ${{ secrets.BUN_DISCORD_GITHUB_CHANNEL_WEBHOOK }}
nodetail: true
color: "#1F6FEB"
title: "Bun v${{ inputs.version }} is compiling"
description: |
### @${{ github.actor }} started compiling Bun v${{inputs.version}}
**[View logs](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})**
linux-x64:
name: Build linux-x64
uses: ./.github/workflows/build-linux.yml
secrets: inherit
with:
runs-on: ${{ github.repository_owner == 'oven-sh' && 'namespace-profile-bun-ci-linux-x64' || 'ubuntu-latest' }}
tag: linux-x64
arch: x64
cpu: haswell
canary: false
linux-x64-baseline:
name: Build linux-x64-baseline
uses: ./.github/workflows/build-linux.yml
secrets: inherit
with:
runs-on: ${{ github.repository_owner == 'oven-sh' && 'namespace-profile-bun-ci-linux-x64' || 'ubuntu-latest' }}
tag: linux-x64-baseline
arch: x64
cpu: nehalem
canary: false
linux-aarch64:
name: Build linux-aarch64
uses: ./.github/workflows/build-linux.yml
secrets: inherit
with:
runs-on: namespace-profile-bun-ci-linux-aarch64
tag: linux-aarch64
arch: aarch64
cpu: native
canary: false
darwin-x64:
name: Build darwin-x64
uses: ./.github/workflows/build-darwin.yml
secrets: inherit
with:
runs-on: ${{ github.repository_owner == 'oven-sh' && 'macos-13-large' || 'macos-13' }}
tag: darwin-x64
arch: x64
cpu: haswell
canary: false
darwin-x64-baseline:
name: Build darwin-x64-baseline
uses: ./.github/workflows/build-darwin.yml
secrets: inherit
with:
runs-on: ${{ github.repository_owner == 'oven-sh' && 'macos-13-large' || 'macos-13' }}
tag: darwin-x64-baseline
arch: x64
cpu: nehalem
canary: false
darwin-aarch64:
name: Build darwin-aarch64
uses: ./.github/workflows/build-darwin.yml
secrets: inherit
with:
runs-on: ${{ github.repository_owner == 'oven-sh' && 'namespace-profile-bun-ci-darwin-aarch64' || 'macos-13' }}
tag: darwin-aarch64
arch: aarch64
cpu: native
canary: false
windows-x64:
name: Build windows-x64
uses: ./.github/workflows/build-windows.yml
secrets: inherit
with:
runs-on: windows
tag: windows-x64
arch: x64
cpu: haswell
canary: false
windows-x64-baseline:
name: Build windows-x64-baseline
uses: ./.github/workflows/build-windows.yml
secrets: inherit
with:
runs-on: windows
tag: windows-x64-baseline
arch: x64
cpu: nehalem
canary: false
upload-artifacts:
needs:
- linux-x64
- linux-x64-baseline
- linux-aarch64
- darwin-x64
- darwin-x64-baseline
- darwin-aarch64
- windows-x64
- windows-x64-baseline
runs-on: ubuntu-latest
steps:
- name: Download Artifacts
uses: actions/download-artifact@v4
with:
path: bun-releases
pattern: bun-*
merge-multiple: true
github-token: ${{ github.token }}
- name: Check for Artifacts
run: |
if [ ! -d "bun-releases" ] || [ -z "$(ls -A bun-releases)" ]; then
echo "Error: No artifacts were downloaded or 'bun-releases' directory does not exist."
exit 1 # Fail the job if the condition is met
else
echo "Artifacts downloaded successfully."
fi
- name: Send Message
uses: sarisia/actions-status-discord@v1
with:
webhook: ${{ secrets.DISCORD_WEBHOOK }}
nodetail: true
color: "#FF0000"
title: "Bun v${{ inputs.version }} release artifacts uploaded"
- name: "Upload Artifacts"
env:
GH_TOKEN: ${{ github.token }}
run: |
# Unzip one level deep each artifact
cd bun-releases
for f in *.zip; do
unzip -o $f
done
cd ..
gh release upload --repo=${{ github.repository }} ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.tag || github.event.release.id }} ${{ inputs.clobber && '--clobber' || '' }} bun-releases/*.zip

View File

@@ -1,3 +1,6 @@
# TODO: Move this to bash scripts intead of Github Actions
# so it can be run from Buildkite, see: .buildkite/scripts/release.sh
name: Release
concurrency: release

View File

@@ -31,7 +31,7 @@ jobs:
with:
bun-version: "1.1.20"
- name: Setup Zig
uses: goto-bus-stop/setup-zig@c7b6cdd3adba8f8b96984640ff172c37c93f73ee
uses: mlugg/setup-zig@v1
with:
version: ${{ inputs.zig-version }}
- name: Install Dependencies

View File

@@ -1,224 +0,0 @@
name: Test
permissions:
contents: read
actions: read
on:
workflow_call:
inputs:
runs-on:
type: string
required: true
tag:
type: string
required: true
pr-number:
type: string
required: true
run-id:
type: string
default: ${{ github.run_id }}
jobs:
test:
name: Tests
runs-on: ${{ inputs.runs-on }}
steps:
- if: ${{ runner.os == 'Windows' }}
name: Setup Git
run: |
git config --global core.autocrlf false
git config --global core.eol lf
- name: Checkout
uses: actions/checkout@v4
with:
sparse-checkout: |
package.json
bun.lockb
test
packages/bun-internal-test
packages/bun-types
- name: Setup Environment
shell: bash
run: |
echo "${{ inputs.pr-number }}" > pr-number.txt
- name: Download Bun
uses: actions/download-artifact@v4
with:
name: bun-${{ inputs.tag }}
path: bun
github-token: ${{ github.token }}
run-id: ${{ inputs.run-id || github.run_id }}
- name: Download pnpm
uses: pnpm/action-setup@v4
with:
version: 8
- if: ${{ runner.os != 'Windows' }}
name: Setup Bun
shell: bash
run: |
unzip bun/bun-*.zip
cd bun-*
pwd >> $GITHUB_PATH
- if: ${{ runner.os == 'Windows' }}
name: Setup Cygwin
uses: secondlife/setup-cygwin@v3
with:
packages: bash
- if: ${{ runner.os == 'Windows' }}
name: Setup Bun (Windows)
run: |
unzip bun/bun-*.zip
cd bun-*
pwd >> $env:GITHUB_PATH
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: 20
- name: Install Dependencies
timeout-minutes: 5
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
bun install
- name: Install Dependencies (test)
timeout-minutes: 5
run: |
bun install --cwd test
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Install Dependencies (runner)
timeout-minutes: 5
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
bun install --cwd packages/bun-internal-test
- name: Run Tests
id: test
timeout-minutes: 90
shell: bash
env:
IS_BUN_CI: 1
TMPDIR: ${{ runner.temp }}
BUN_TAG: ${{ inputs.tag }}
BUN_FEATURE_FLAG_INTERNAL_FOR_TESTING: "true"
SMTP_SENDGRID_SENDER: ${{ secrets.SMTP_SENDGRID_SENDER }}
TLS_MONGODB_DATABASE_URL: ${{ secrets.TLS_MONGODB_DATABASE_URL }}
TLS_POSTGRES_DATABASE_URL: ${{ secrets.TLS_POSTGRES_DATABASE_URL }}
TEST_INFO_STRIPE: ${{ secrets.TEST_INFO_STRIPE }}
TEST_INFO_AZURE_SERVICE_BUS: ${{ secrets.TEST_INFO_AZURE_SERVICE_BUS }}
SHELLOPTS: igncr
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
node packages/bun-internal-test/src/runner.node.mjs $(which bun)
- if: ${{ always() }}
name: Upload Results
uses: actions/upload-artifact@v4
with:
name: bun-${{ inputs.tag }}-tests
path: |
test-report.*
comment.md
pr-number.txt
if-no-files-found: error
overwrite: true
- if: ${{ always() && steps.test.outputs.failing_tests != '' && github.event.pull_request && github.repository_owner == 'oven-sh' }}
name: Send Message
uses: sarisia/actions-status-discord@v1
with:
webhook: ${{ secrets.DISCORD_WEBHOOK }}
nodetail: true
color: "#FF0000"
title: ""
description: |
### ❌ [${{ github.event.pull_request.title }}](${{ github.event.pull_request.html_url }})
@${{ github.actor }}, there are ${{ steps.test.outputs.failing_tests_count || 'some' }} failing tests on bun-${{ inputs.tag }}.
${{ steps.test.outputs.failing_tests }}
**[View logs](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})**
- name: Fail
if: ${{ failure() || always() && steps.test.outputs.failing_tests != '' }}
run: |
echo "There are ${{ steps.test.outputs.failing_tests_count || 'some' }} failing tests on bun-${{ inputs.tag }}."
exit 1
test-node:
name: Node.js Tests
# TODO: enable when we start paying attention to the results. In the meantime, this causes CI to queue jobs wasting developer time.
if: 0
runs-on: ${{ inputs.runs-on }}
steps:
- if: ${{ runner.os == 'Windows' }}
name: Setup Git
run: |
git config --global core.autocrlf false
git config --global core.eol lf
- name: Checkout
uses: actions/checkout@v4
with:
sparse-checkout: |
test/node.js
- name: Setup Environment
shell: bash
run: |
echo "${{ inputs.pr-number }}" > pr-number.txt
- name: Download Bun
uses: actions/download-artifact@v4
with:
name: bun-${{ inputs.tag }}
path: bun
github-token: ${{ github.token }}
run-id: ${{ inputs.run-id || github.run_id }}
- if: ${{ runner.os != 'Windows' }}
name: Setup Bun
shell: bash
run: |
unzip bun/bun-*.zip
cd bun-*
pwd >> $GITHUB_PATH
- if: ${{ runner.os == 'Windows' }}
name: Setup Cygwin
uses: secondlife/setup-cygwin@v3
with:
packages: bash
- if: ${{ runner.os == 'Windows' }}
name: Setup Bun (Windows)
run: |
unzip bun/bun-*.zip
cd bun-*
pwd >> $env:GITHUB_PATH
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: 20
- name: Checkout Tests
shell: bash
working-directory: test/node.js
run: |
node runner.mjs --pull
- name: Install Dependencies
timeout-minutes: 5
shell: bash
working-directory: test/node.js
run: |
bun install
- name: Run Tests
timeout-minutes: 10 # Increase when more tests are added
shell: bash
working-directory: test/node.js
env:
TMPDIR: ${{ runner.temp }}
BUN_GARBAGE_COLLECTOR_LEVEL: "0"
BUN_FEATURE_FLAG_INTERNAL_FOR_TESTING: "true"
run: |
node runner.mjs
- name: Upload Results
uses: actions/upload-artifact@v4
with:
name: bun-${{ inputs.tag }}-node-tests
path: |
test/node.js/summary/*.json
if-no-files-found: error
overwrite: true

View File

@@ -1,94 +0,0 @@
name: Upload Artifacts
run-name: Canary release ${{github.sha}} upload
permissions:
contents: write
on:
workflow_run:
workflows:
- CI
types:
- completed
branches:
- main
jobs:
upload:
if: ${{ github.repository_owner == 'oven-sh' }}
name: Upload Artifacts
runs-on: ubuntu-latest
steps:
- name: Download Artifacts
uses: actions/download-artifact@v4
with:
path: bun
pattern: bun-*
merge-multiple: true
github-token: ${{ github.token }}
run-id: ${{ github.event.workflow_run.id }}
- name: Check for Artifacts
run: |
if [ ! -d "bun" ] || [ -z "$(ls -A bun)" ]; then
echo "Error: No artifacts were downloaded or 'bun' directory does not exist."
exit 1 # Fail the job if the condition is met
else
echo "Artifacts downloaded successfully."
fi
- name: Upload to GitHub Releases
uses: ncipollo/release-action@v1
with:
tag: canary
name: Canary (${{ github.sha }})
prerelease: true
body: This canary release of Bun corresponds to the commit [${{ github.sha }}]
allowUpdates: true
replacesArtifacts: true
generateReleaseNotes: true
artifactErrorsFailBuild: true
artifacts: bun/**/bun-*.zip
token: ${{ github.token }}
- name: Upload to S3 (using SHA)
uses: shallwefootball/s3-upload-action@4350529f410221787ccf424e50133cbc1b52704e
with:
endpoint: ${{ secrets.AWS_ENDPOINT }}
aws_key_id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws_secret_access_key: ${{ secrets.AWS_SECRET_ACCESS_KEY}}
aws_bucket: ${{ secrets.AWS_BUCKET }}
source_dir: bun
destination_dir: releases/${{ github.event.workflow_run.head_sha || github.sha }}-canary
- name: Upload to S3 (using tag)
uses: shallwefootball/s3-upload-action@4350529f410221787ccf424e50133cbc1b52704e
with:
endpoint: ${{ secrets.AWS_ENDPOINT }}
aws_key_id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws_secret_access_key: ${{ secrets.AWS_SECRET_ACCESS_KEY}}
aws_bucket: ${{ secrets.AWS_BUCKET }}
source_dir: bun
destination_dir: releases/canary
- name: Announce on Discord
uses: sarisia/actions-status-discord@v1
with:
webhook: ${{ secrets.BUN_DISCORD_GITHUB_CHANNEL_WEBHOOK }}
nodetail: true
color: "#1F6FEB"
title: "New Bun Canary available"
url: https://github.com/oven-sh/bun/commit/${{ github.sha }}
description: |
A new canary build of Bun has been automatically uploaded. To upgrade, run:
```sh
bun upgrade --canary
# bun upgrade --stable <- to downgrade
```
# If notifying sentry fails, don't fail the rest of the build.
- name: Notify Sentry
uses: getsentry/action-release@v1.7.0
env:
SENTRY_AUTH_TOKEN: ${{ secrets.SENTRY_AUTH_TOKEN }}
SENTRY_ORG: ${{ secrets.SENTRY_ORG }}
SENTRY_PROJECT: ${{ secrets.SENTRY_PROJECT }}
with:
ignore_missing: true
ignore_empty: true
version: ${{ github.event.workflow_run.head_sha || github.sha }}-canary
environment: canary

12
.gitmodules vendored
View File

@@ -76,17 +76,13 @@ ignore = dirty
depth = 1
shallow = true
fetchRecurseSubmodules = false
[submodule "src/deps/libuv"]
path = src/deps/libuv
url = https://github.com/libuv/libuv.git
ignore = dirty
depth = 1
shallow = true
fetchRecurseSubmodules = false
branch = v1.48.0
[submodule "zig"]
path = src/deps/zig
url = https://github.com/oven-sh/zig
depth = 1
shallow = true
fetchRecurseSubmodules = false
[submodule "src/deps/libdeflate"]
path = src/deps/libdeflate
url = https://github.com/ebiggers/libdeflate
ignore = "dirty"

4
.lldbinit Normal file
View File

@@ -0,0 +1,4 @@
command script import src/deps/zig/tools/lldb_pretty_printers.py
command script import src/bun.js/WebKit/Tools/lldb/lldb_webkit.py
# type summary add --summary-string "${var} | inner=${var[0-30]}, source=${var[33-64]}, tag=${var[31-32]}" "unsigned long"

4
.vscode/launch.json generated vendored
View File

@@ -17,6 +17,7 @@
"cwd": "${workspaceFolder}/test",
"env": {
"FORCE_COLOR": "1",
"BUN_DEBUG_QUIET_LOGS": "1",
"BUN_GARBAGE_COLLECTOR_LEVEL": "1",
},
"console": "internalConsole",
@@ -145,14 +146,13 @@
"request": "launch",
"name": "bun run [file]",
"program": "${workspaceFolder}/build/bun-debug",
"args": ["run", "${file}"],
"args": ["run", "${fileBasename}"],
"cwd": "${fileDirname}",
"env": {
"FORCE_COLOR": "0",
"BUN_DEBUG_QUIET_LOGS": "1",
"BUN_DEBUG_EventLoop": "1",
"BUN_GARBAGE_COLLECTOR_LEVEL": "2",
"BUN_DEBUG_ALL": "1",
},
"console": "internalConsole",
},

View File

@@ -42,8 +42,11 @@
"editor.defaultFormatter": "ziglang.vscode-zig",
},
// C++
// lldb
"lldb.launch.initCommands": ["command source ${workspaceFolder}/.lldbinit"],
"lldb.verboseLogging": false,
// C++
"cmake.configureOnOpen": false,
"C_Cpp.errorSquiggles": "enabled",
"[cpp]": {

View File

@@ -3,8 +3,8 @@ cmake_policy(SET CMP0091 NEW)
cmake_policy(SET CMP0067 NEW)
set(CMAKE_POLICY_DEFAULT_CMP0069 NEW)
set(Bun_VERSION "1.1.21")
set(WEBKIT_TAG 49907bff8781719bc2ded068b0c934f6d0074d1e)
set(Bun_VERSION "1.1.22")
set(WEBKIT_TAG f9a0fda2d2b2fd001a00bfcf8e7917a56b382516)
set(BUN_WORKDIR "${CMAKE_CURRENT_BINARY_DIR}")
message(STATUS "Configuring Bun ${Bun_VERSION} in ${BUN_WORKDIR}")
@@ -310,6 +310,7 @@ endif()
# -- Build Flags --
option(USE_STATIC_SQLITE "Statically link SQLite?" ${DEFAULT_ON_UNLESS_APPLE})
option(USE_CUSTOM_ZLIB "Use Bun's recommended version of zlib" ON)
option(USE_CUSTOM_LIBDEFLATE "Use Bun's recommended version of libdeflate" ON)
option(USE_CUSTOM_BORINGSSL "Use Bun's recommended version of BoringSSL" ON)
option(USE_CUSTOM_LIBARCHIVE "Use Bun's recommended version of libarchive" ON)
option(USE_CUSTOM_MIMALLOC "Use Bun's recommended version of Mimalloc" ON)
@@ -999,8 +1000,20 @@ add_compile_definitions(
)
if(NOT ASSERT_ENABLED)
if(APPLE)
add_compile_definitions("_LIBCXX_ENABLE_ASSERTIONS=0")
add_compile_definitions("_LIBCPP_HARDENING_MODE=_LIBCPP_HARDENING_MODE_NONE")
endif()
add_compile_definitions("NDEBUG=1")
else()
if(APPLE)
add_compile_definitions("_LIBCXX_ENABLE_ASSERTIONS=1")
add_compile_definitions("_LIBCPP_HARDENING_MODE=_LIBCPP_HARDENING_MODE_DEBUG")
elseif(CMAKE_SYSTEM_NAME STREQUAL "Linux")
add_compile_definitions("_GLIBCXX_ASSERTIONS=1")
endif()
add_compile_definitions("ASSERT_ENABLED=1")
endif()
@@ -1210,7 +1223,7 @@ endif()
if(UNIX AND NOT APPLE)
target_link_options(${bun} PUBLIC
-fuse-ld=lld
-fuse-ld=lld-${LLVM_VERSION}
-fno-pic
-static-libstdc++
-static-libgcc
@@ -1358,6 +1371,19 @@ else()
target_link_libraries(${bun} PRIVATE LibArchive::LibArchive)
endif()
if(USE_CUSTOM_LIBDEFLATE)
include_directories(${BUN_DEPS_DIR}/libdeflate)
if(WIN32)
target_link_libraries(${bun} PRIVATE "${BUN_DEPS_OUT_DIR}/deflate.lib")
else()
target_link_libraries(${bun} PRIVATE "${BUN_DEPS_OUT_DIR}/libdeflate.a")
endif()
else()
find_package(LibDeflate REQUIRED)
target_link_libraries(${bun} PRIVATE LibDeflate::LibDeflate)
endif()
if(USE_CUSTOM_MIMALLOC)
include_directories(${BUN_DEPS_DIR}/mimalloc/include)

View File

@@ -263,6 +263,27 @@ RUN --mount=type=cache,target=${CCACHE_DIR} \
&& bash ./scripts/build-zlib.sh && rm -rf src/deps/zlib scripts
FROM bun-base as libdeflate
ARG BUN_DIR
ARG CPU_TARGET
ENV CPU_TARGET=${CPU_TARGET}
ARG CCACHE_DIR=/ccache
ENV CCACHE_DIR=${CCACHE_DIR}
COPY Makefile ${BUN_DIR}/Makefile
COPY CMakeLists.txt ${BUN_DIR}/CMakeLists.txt
COPY scripts ${BUN_DIR}/scripts
COPY src/deps/libdeflate ${BUN_DIR}/src/deps/libdeflate
COPY package.json bun.lockb Makefile .gitmodules ${BUN_DIR}/
WORKDIR $BUN_DIR
RUN --mount=type=cache,target=${CCACHE_DIR} \
cd $BUN_DIR \
&& bash ./scripts/build-libdeflate.sh && rm -rf src/deps/libdeflate scripts
FROM bun-base as libarchive
ARG BUN_DIR
@@ -412,6 +433,9 @@ COPY src ${BUN_DIR}/src
COPY CMakeLists.txt ${BUN_DIR}/CMakeLists.txt
COPY src/deps/boringssl/include ${BUN_DIR}/src/deps/boringssl/include
# for uWebSockets
COPY src/deps/libdeflate ${BUN_DIR}/src/deps/libdeflate
ARG CCACHE_DIR=/ccache
ENV CCACHE_DIR=${CCACHE_DIR}
@@ -516,6 +540,7 @@ COPY src/symbols.dyn src/linker.lds ${BUN_DIR}/src/
COPY CMakeLists.txt ${BUN_DIR}/CMakeLists.txt
COPY --from=zlib ${BUN_DEPS_OUT_DIR}/* ${BUN_DEPS_OUT_DIR}/
COPY --from=libdeflate ${BUN_DEPS_OUT_DIR}/* ${BUN_DEPS_OUT_DIR}/
COPY --from=libarchive ${BUN_DEPS_OUT_DIR}/* ${BUN_DEPS_OUT_DIR}/
COPY --from=boringssl ${BUN_DEPS_OUT_DIR}/* ${BUN_DEPS_OUT_DIR}/
COPY --from=lolhtml ${BUN_DEPS_OUT_DIR}/* ${BUN_DEPS_OUT_DIR}/

2
LATEST
View File

@@ -1 +1 @@
1.1.20
1.1.21

View File

@@ -34,6 +34,8 @@ Bun statically links these libraries:
| [`c-ares`](https://github.com/c-ares/c-ares) | MIT licensed |
| [`libicu`](https://github.com/unicode-org/icu) 72 | [license here](https://github.com/unicode-org/icu/blob/main/icu4c/LICENSE) |
| [`libbase64`](https://github.com/aklomp/base64/blob/master/LICENSE) | BSD 2-Clause |
| [`libuv`](https://github.com/libuv/libuv) (on Windows) | MIT |
| [`libdeflate`](https://github.com/ebiggers/libdeflate) | MIT |
| A fork of [`uWebsockets`](https://github.com/jarred-sumner/uwebsockets) | Apache 2.0 licensed |
| Parts of [Tigerbeetle's IO code](https://github.com/tigerbeetle/tigerbeetle/blob/532c8b70b9142c17e07737ab6d3da68d7500cbca/src/io/windows.zig#L1) | Apache 2.0 licensed |

View File

@@ -1,20 +1,43 @@
import { run, bench } from "mitata";
import { run, bench, group } from "mitata";
import { gzipSync, gunzipSync } from "bun";
const data = new TextEncoder().encode("Hello World!".repeat(9999));
const data = await Bun.file(require.resolve("@babel/standalone/babel.min.js")).arrayBuffer();
const compressed = gzipSync(data);
bench(`roundtrip - "Hello World!".repeat(9999))`, () => {
gunzipSync(gzipSync(data));
const libraries = ["zlib"];
if (Bun.semver.satisfies(Bun.version.replaceAll("-debug", ""), ">=1.1.21")) {
libraries.push("libdeflate");
}
const options = { library: undefined };
const benchFn = (name, fn) => {
if (libraries.length > 1) {
group(name, () => {
for (const library of libraries) {
bench(library, () => {
options.library = library;
fn();
});
}
});
} else {
options.library = libraries[0];
bench(name, () => {
fn();
});
}
};
benchFn(`roundtrip - @babel/standalone/babel.min.js`, () => {
gunzipSync(gzipSync(data, options), options);
});
bench(`gzipSync("Hello World!".repeat(9999)))`, () => {
gzipSync(data);
benchFn(`gzipSync(@babel/standalone/babel.min.js`, () => {
gzipSync(data, options);
});
bench(`gunzipSync("Hello World!".repeat(9999)))`, () => {
gunzipSync(compressed);
benchFn(`gunzipSync(@babel/standalone/babel.min.js`, () => {
gunzipSync(compressed, options);
});
await run();

BIN
bench/gzip/bun.lockb Executable file

Binary file not shown.

View File

@@ -1,19 +1,22 @@
import { run, bench } from "mitata";
import { gzipSync, gunzipSync } from "zlib";
import { createRequire } from "module";
import { readFileSync } from "fs";
const data = new TextEncoder().encode("Hello World!".repeat(9999));
const require = createRequire(import.meta.url);
const data = readFileSync(require.resolve("@babel/standalone/babel.min.js"));
const compressed = gzipSync(data);
bench(`roundtrip - "Hello World!".repeat(9999))`, () => {
bench(`roundtrip - @babel/standalone/babel.min.js)`, () => {
gunzipSync(gzipSync(data));
});
bench(`gzipSync("Hello World!".repeat(9999)))`, () => {
bench(`gzipSync(@babel/standalone/babel.min.js))`, () => {
gzipSync(data);
});
bench(`gunzipSync("Hello World!".repeat(9999)))`, () => {
bench(`gunzipSync(@babel/standalone/babel.min.js))`, () => {
gunzipSync(compressed);
});

View File

@@ -7,5 +7,8 @@
"bench:node": "$NODE node.mjs",
"bench:deno": "$DENO run -A --unstable deno.js",
"bench": "bun run bench:bun && bun run bench:node && bun run bench:deno"
},
"dependencies": {
"@babel/standalone": "7.24.10"
}
}

View File

@@ -0,0 +1,31 @@
import { run, bench } from "mitata";
import { createRequire } from "module";
const require = createRequire(import.meta.url);
const db = require("better-sqlite3")("./src/northwind.sqlite");
{
const sql = db.prepare(`SELECT * FROM "Order"`);
bench('SELECT * FROM "Order"', () => {
sql.all();
});
}
{
const sql = db.prepare(`SELECT * FROM "Product"`);
bench('SELECT * FROM "Product"', () => {
sql.all();
});
}
{
const sql = db.prepare(`SELECT * FROM "OrderDetail"`);
bench('SELECT * FROM "OrderDetail"', () => {
sql.all();
});
}
await run();

View File

@@ -1,8 +1,9 @@
// Run `node --experimental-sqlite bench/sqlite/node.mjs` to run the script.
// You will need `--experimental-sqlite` flag to run this script and node v22.5.0 or higher.
import { run, bench } from "mitata";
import { createRequire } from "module";
import { DatabaseSync as Database } from "node:sqlite";
const require = createRequire(import.meta.url);
const db = require("better-sqlite3")("./src/northwind.sqlite");
const db = new Database("./src/northwind.sqlite");
{
const sql = db.prepare(`SELECT * FROM "Order"`);

View File

@@ -49,6 +49,7 @@ const BunBuildOptions = struct {
reported_nodejs_version: Version,
generated_code_dir: []const u8,
no_llvm: bool,
cached_options_module: ?*Module = null,
windows_shim: ?WindowsShim = null,
@@ -181,6 +182,8 @@ pub fn build(b: *Build) !void {
const obj_format = b.option(ObjectFormat, "obj_format", "Output file for object files") orelse .obj;
const no_llvm = b.option(bool, "no_llvm", "Experiment with Zig self hosted backends. No stability guaranteed") orelse false;
var build_options = BunBuildOptions{
.target = target,
.optimize = optimize,
@@ -189,6 +192,7 @@ pub fn build(b: *Build) !void {
.arch = arch,
.generated_code_dir = generated_code_dir,
.no_llvm = no_llvm,
.version = try Version.parse(bun_version),
.canary_revision = canary: {
@@ -320,6 +324,7 @@ pub inline fn addMultiCheck(
.version = root_build_options.version,
.reported_nodejs_version = root_build_options.reported_nodejs_version,
.generated_code_dir = root_build_options.generated_code_dir,
.no_llvm = root_build_options.no_llvm,
};
var obj = addBunObject(b, &options);
@@ -338,6 +343,8 @@ pub fn addBunObject(b: *Build, opts: *BunBuildOptions) *Compile {
},
.target = opts.target,
.optimize = opts.optimize,
.use_llvm = !opts.no_llvm,
.use_lld = if (opts.os == .mac) false else !opts.no_llvm,
// https://github.com/ziglang/zig/issues/17430
.pic = true,

View File

@@ -5,8 +5,8 @@ name: Define and replace static globals & constants
The `--define` flag lets you declare statically-analyzable constants and globals. It replace all usages of an identifier or property in a JavaScript or TypeScript file with a constant value. This feature is supported at runtime and also in `bun build`. This is sort of similar to `#define` in C/C++, except for JavaScript.
```ts
bun --define:process.env.NODE_ENV="'production'" src/index.ts # Runtime
bun build --define:process.env.NODE_ENV="'production'" src/index.ts # Build
bun --define process.env.NODE_ENV="'production'" src/index.ts # Runtime
bun build --define process.env.NODE_ENV="'production'" src/index.ts # Build
```
---
@@ -95,7 +95,7 @@ To replace all usages of `AWS` with the JSON object `{"ACCESS_KEY":"abc","SECRET
```sh
# JSON
bun --define:AWS='{"ACCESS_KEY":"abc","SECRET_KEY":"def"}' src/index.ts
bun --define AWS='{"ACCESS_KEY":"abc","SECRET_KEY":"def"}' src/index.ts
```
Those will be transformed into the equivalent JavaScript code.
@@ -119,7 +119,7 @@ You can also pass properties to the `--define` flag.
For example, to replace all usages of `console.write` with `console.log`, you can use the following command (requires Bun v1.1.5 or later)
```sh
bun --define:console.write=console.log src/index.ts
bun --define console.write=console.log src/index.ts
```
That transforms the following input:

View File

@@ -171,6 +171,8 @@ Once imported, you should see something like this:
{% image alt="Viewing heap snapshot in Safari" src="https://user-images.githubusercontent.com/709451/204429337-b0d8935f-3509-4071-b991-217794d1fb27.png" caption="Viewing heap snapshot in Safari Dev Tools" /%}
> The [web debugger](https://bun.sh/docs/runtime/debugger#inspect) also offers the timeline feature which allows you to track and examine the memory usage of the running debug session.
### Native heap stats
Bun uses mimalloc for the other heap. To report a summary of non-JavaScript memory usage, set the `MIMALLOC_SHOW_STATS=1` environment variable. and stats will print on exit.

View File

@@ -153,7 +153,7 @@ Some methods are not optimized yet.
### [`node:util`](https://nodejs.org/api/util.html)
🟡 Missing `MIMEParams` `MIMEType` `aborted` `debug` `getSystemErrorMap` `getSystemErrorName` `transferableAbortController` `transferableAbortSignal` `stripVTControlCharacters`
🟡 Missing `MIMEParams` `MIMEType` `aborted` `debug` `getSystemErrorMap` `transferableAbortController` `transferableAbortSignal` `stripVTControlCharacters`
### [`node:v8`](https://nodejs.org/api/v8.html)

View File

@@ -7,22 +7,36 @@ The following Web APIs are partially or completely supported.
---
- HTTP
- [`fetch`](https://developer.mozilla.org/en-US/docs/Web/API/fetch) [`Response`](https://developer.mozilla.org/en-US/docs/Web/API/Response) [`Request`](https://developer.mozilla.org/en-US/docs/Web/API/Request) [`Headers`](https://developer.mozilla.org/en-US/docs/Web/API/Headers) [`AbortController`](https://developer.mozilla.org/en-US/docs/Web/API/AbortController) [`AbortSignal`](https://developer.mozilla.org/en-US/docs/Web/API/AbortSignal)
- [`fetch`](https://developer.mozilla.org/en-US/docs/Web/API/fetch)
[`Response`](https://developer.mozilla.org/en-US/docs/Web/API/Response)
[`Request`](https://developer.mozilla.org/en-US/docs/Web/API/Request)
[`Headers`](https://developer.mozilla.org/en-US/docs/Web/API/Headers)
[`AbortController`](https://developer.mozilla.org/en-US/docs/Web/API/AbortController)
[`AbortSignal`](https://developer.mozilla.org/en-US/docs/Web/API/AbortSignal)
---
- URLs
- [`URL`](https://developer.mozilla.org/en-US/docs/Web/API/URL) [`URLSearchParams`](https://developer.mozilla.org/en-US/docs/Web/API/URLSearchParams)
- [`URL`](https://developer.mozilla.org/en-US/docs/Web/API/URL)
[`URLSearchParams`](https://developer.mozilla.org/en-US/docs/Web/API/URLSearchParams)
---
- Web Workers
- [`Worker`](https://developer.mozilla.org/en-US/docs/Web/API/Worker) [`self.postMessage`](https://developer.mozilla.org/en-US/docs/Web/API/DedicatedWorkerGlobalScope/postMessage) [`structuredClone`](https://developer.mozilla.org/en-US/docs/Web/API/structuredClone) [`MessagePort`](https://developer.mozilla.org/en-US/docs/Web/API/MessagePort) [`MessageChannel`](https://developer.mozilla.org/en-US/docs/Web/API/MessageChannel), [`BroadcastChannel`](https://developer.mozilla.org/en-US/docs/Web/API/BroadcastChannel).
- [`Worker`](https://developer.mozilla.org/en-US/docs/Web/API/Worker)
[`self.postMessage`](https://developer.mozilla.org/en-US/docs/Web/API/DedicatedWorkerGlobalScope/postMessage)
[`structuredClone`](https://developer.mozilla.org/en-US/docs/Web/API/structuredClone)
[`MessagePort`](https://developer.mozilla.org/en-US/docs/Web/API/MessagePort)
[`MessageChannel`](https://developer.mozilla.org/en-US/docs/Web/API/MessageChannel), [`BroadcastChannel`](https://developer.mozilla.org/en-US/docs/Web/API/BroadcastChannel).
---
- Streams
- [`ReadableStream`](https://developer.mozilla.org/en-US/docs/Web/API/ReadableStream) [`WritableStream`](https://developer.mozilla.org/en-US/docs/Web/API/WritableStream) [`TransformStream`](https://developer.mozilla.org/en-US/docs/Web/API/TransformStream) [`ByteLengthQueuingStrategy`](https://developer.mozilla.org/en-US/docs/Web/API/ByteLengthQueuingStrategy) [`CountQueuingStrategy`](https://developer.mozilla.org/en-US/docs/Web/API/CountQueuingStrategy) and associated classes
- [`ReadableStream`](https://developer.mozilla.org/en-US/docs/Web/API/ReadableStream)
[`WritableStream`](https://developer.mozilla.org/en-US/docs/Web/API/WritableStream)
[`TransformStream`](https://developer.mozilla.org/en-US/docs/Web/API/TransformStream)
[`ByteLengthQueuingStrategy`](https://developer.mozilla.org/en-US/docs/Web/API/ByteLengthQueuingStrategy)
[`CountQueuingStrategy`](https://developer.mozilla.org/en-US/docs/Web/API/CountQueuingStrategy) and associated classes
---
@@ -37,7 +51,10 @@ The following Web APIs are partially or completely supported.
---
- Encoding and decoding
- [`atob`](https://developer.mozilla.org/en-US/docs/Web/API/atob) [`btoa`](https://developer.mozilla.org/en-US/docs/Web/API/btoa) [`TextEncoder`](https://developer.mozilla.org/en-US/docs/Web/API/TextEncoder) [`TextDecoder`](https://developer.mozilla.org/en-US/docs/Web/API/TextDecoder)
- [`atob`](https://developer.mozilla.org/en-US/docs/Web/API/atob)
[`btoa`](https://developer.mozilla.org/en-US/docs/Web/API/btoa)
[`TextEncoder`](https://developer.mozilla.org/en-US/docs/Web/API/TextEncoder)
[`TextDecoder`](https://developer.mozilla.org/en-US/docs/Web/API/TextDecoder)
---
@@ -47,7 +64,8 @@ The following Web APIs are partially or completely supported.
---
- Timeouts
- [`setTimeout`](https://developer.mozilla.org/en-US/docs/Web/API/setTimeout) [`clearTimeout`](https://developer.mozilla.org/en-US/docs/Web/API/clearTimeout)
- [`setTimeout`](https://developer.mozilla.org/en-US/docs/Web/API/setTimeout)
[`clearTimeout`](https://developer.mozilla.org/en-US/docs/Web/API/clearTimeout)
---
@@ -57,14 +75,16 @@ The following Web APIs are partially or completely supported.
---
- Crypto
- [`crypto`](https://developer.mozilla.org/en-US/docs/Web/API/Crypto) [`SubtleCrypto`](https://developer.mozilla.org/en-US/docs/Web/API/SubtleCrypto)
- [`crypto`](https://developer.mozilla.org/en-US/docs/Web/API/Crypto)
[`SubtleCrypto`](https://developer.mozilla.org/en-US/docs/Web/API/SubtleCrypto)
[`CryptoKey`](https://developer.mozilla.org/en-US/docs/Web/API/CryptoKey)
---
- Debugging
- [`console`](https://developer.mozilla.org/en-US/docs/Web/API/console) [`performance`](https://developer.mozilla.org/en-US/docs/Web/API/Performance)
- [`console`](https://developer.mozilla.org/en-US/docs/Web/API/console)
[`performance`](https://developer.mozilla.org/en-US/docs/Web/API/Performance)
---
@@ -79,7 +99,9 @@ The following Web APIs are partially or completely supported.
---
- User interaction
- [`alert`](https://developer.mozilla.org/en-US/docs/Web/API/Window/alert) [`confirm`](https://developer.mozilla.org/en-US/docs/Web/API/Window/confirm) [`prompt`](https://developer.mozilla.org/en-US/docs/Web/API/Window/prompt) (intended for interactive CLIs)
- [`alert`](https://developer.mozilla.org/en-US/docs/Web/API/Window/alert)
[`confirm`](https://developer.mozilla.org/en-US/docs/Web/API/Window/confirm)
[`prompt`](https://developer.mozilla.org/en-US/docs/Web/API/Window/prompt) (intended for interactive CLIs)
<!-- - Blocking. Prints the alert message to terminal and awaits `[ENTER]` before proceeding. -->
<!-- - Blocking. Prints confirmation message and awaits `[y/N]` input from user. Returns `true` if user entered `y` or `Y`, `false` otherwise.
@@ -94,7 +116,10 @@ The following Web APIs are partially or completely supported.
- Events
- [`EventTarget`](https://developer.mozilla.org/en-US/docs/Web/API/EventTarget)
[`Event`](https://developer.mozilla.org/en-US/docs/Web/API/Event) [`ErrorEvent`](https://developer.mozilla.org/en-US/docs/Web/API/ErrorEvent) [`CloseEvent`](https://developer.mozilla.org/en-US/docs/Web/API/CloseEvent) [`MessageEvent`](https://developer.mozilla.org/en-US/docs/Web/API/MessageEvent)
[`Event`](https://developer.mozilla.org/en-US/docs/Web/API/Event)
[`ErrorEvent`](https://developer.mozilla.org/en-US/docs/Web/API/ErrorEvent)
[`CloseEvent`](https://developer.mozilla.org/en-US/docs/Web/API/CloseEvent)
[`MessageEvent`](https://developer.mozilla.org/en-US/docs/Web/API/MessageEvent)
---

View File

@@ -195,7 +195,6 @@ pub fn main() anyerror!void {
args.headers_buf,
response_body_string,
args.body,
0,
HTTP.FetchRedirect.follow,
),
};

View File

@@ -31,7 +31,6 @@ const params = [_]clap.Param(clap.Help){
clap.parseParam("-b, --body <STR> HTTP request body as a string") catch unreachable,
clap.parseParam("-f, --file <STR> File path to load as body") catch unreachable,
clap.parseParam("-n, --count <INT> How many runs? Default 10") catch unreachable,
clap.parseParam("-t, --timeout <INT> Max duration per request") catch unreachable,
clap.parseParam("-r, --retry <INT> Max retry count") catch unreachable,
clap.parseParam("--no-gzip Disable gzip") catch unreachable,
clap.parseParam("--no-deflate Disable deflate") catch unreachable,
@@ -75,7 +74,6 @@ pub const Arguments = struct {
body: string = "",
turbo: bool = false,
count: usize = 10,
timeout: usize = 0,
repeat: usize = 0,
concurrency: u16 = 32,
@@ -165,10 +163,6 @@ pub const Arguments = struct {
// .keep_alive = !args.flag("--no-keep-alive"),
.concurrency = std.fmt.parseInt(u16, args.option("--max-concurrency") orelse "32", 10) catch 32,
.turbo = args.flag("--turbo"),
.timeout = std.fmt.parseInt(usize, args.option("--timeout") orelse "0", 10) catch |err| {
Output.prettyErrorln("<r><red>{s}<r> parsing timeout", .{@errorName(err)});
Global.exit(1);
},
.count = std.fmt.parseInt(usize, args.option("--count") orelse "10", 10) catch |err| {
Output.prettyErrorln("<r><red>{s}<r> parsing count", .{@errorName(err)});
Global.exit(1);
@@ -225,7 +219,6 @@ pub fn main() anyerror!void {
args.headers_buf,
response_body,
"",
args.timeout,
),
};
ctx.http.client.verbose = args.verbose;

View File

@@ -3481,6 +3481,13 @@ declare module "bun" {
* Filtered data consists mostly of small values with a somewhat random distribution.
*/
strategy?: number;
library?: "zlib";
}
interface LibdeflateCompressionOptions {
level?: 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12;
library?: "libdeflate";
}
/**
@@ -3489,26 +3496,38 @@ declare module "bun" {
* @param options Compression options to use
* @returns The output buffer with the compressed data
*/
function deflateSync(data: Uint8Array | string | ArrayBuffer, options?: ZlibCompressionOptions): Uint8Array;
function deflateSync(
data: Uint8Array | string | ArrayBuffer,
options?: ZlibCompressionOptions | LibdeflateCompressionOptions,
): Uint8Array;
/**
* Compresses a chunk of data with `zlib` GZIP algorithm.
* @param data The buffer of data to compress
* @param options Compression options to use
* @returns The output buffer with the compressed data
*/
function gzipSync(data: Uint8Array | string | ArrayBuffer, options?: ZlibCompressionOptions): Uint8Array;
function gzipSync(
data: Uint8Array | string | ArrayBuffer,
options?: ZlibCompressionOptions | LibdeflateCompressionOptions,
): Uint8Array;
/**
* Decompresses a chunk of data with `zlib` INFLATE algorithm.
* @param data The buffer of data to decompress
* @returns The output buffer with the decompressed data
*/
function inflateSync(data: Uint8Array | string | ArrayBuffer): Uint8Array;
function inflateSync(
data: Uint8Array | string | ArrayBuffer,
options?: ZlibCompressionOptions | LibdeflateCompressionOptions,
): Uint8Array;
/**
* Decompresses a chunk of data with `zlib` GUNZIP algorithm.
* @param data The buffer of data to decompress
* @returns The output buffer with the decompressed data
*/
function gunzipSync(data: Uint8Array | string | ArrayBuffer): Uint8Array;
function gunzipSync(
data: Uint8Array | string | ArrayBuffer,
options?: ZlibCompressionOptions | LibdeflateCompressionOptions,
): Uint8Array;
type Target =
/**
@@ -3828,7 +3847,7 @@ declare module "bun" {
*/
const isMainThread: boolean;
interface Socket<Data = undefined> {
interface Socket<Data = undefined> extends Disposable {
/**
* Write `data` to the socket
*
@@ -4110,7 +4129,7 @@ declare module "bun" {
setMaxSendFragment(size: number): boolean;
}
interface SocketListener<Data = undefined> {
interface SocketListener<Data = undefined> extends Disposable {
stop(closeActiveConnections?: boolean): void;
ref(): void;
unref(): void;

View File

@@ -26,6 +26,10 @@
#include <stdlib.h>
#ifndef _WIN32
// Necessary for the stdint include
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>

View File

@@ -15,18 +15,18 @@
* limitations under the License.
*/
#include "libusockets.h"
#include "internal/internal.h"
#include "libusockets.h"
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#ifndef _WIN32
#include <arpa/inet.h>
#endif
#define CONCURRENT_CONNECTIONS 2
// clang-format off
int default_is_low_prio_handler(struct us_socket_t *s) {
return 0;
}
@@ -44,7 +44,7 @@ int us_raw_root_certs(struct us_cert_string_t**out){
void us_listen_socket_close(int ssl, struct us_listen_socket_t *ls) {
/* us_listen_socket_t extends us_socket_t so we close in similar ways */
if (!us_socket_is_closed(0, &ls->s)) {
us_internal_socket_context_unlink_listen_socket(ls->s.context, ls);
us_internal_socket_context_unlink_listen_socket(ssl, ls->s.context, ls);
us_poll_stop((struct us_poll_t *) &ls->s, ls->s.context->loop);
bsd_close_socket(us_poll_fd((struct us_poll_t *) &ls->s));
@@ -72,12 +72,12 @@ void us_socket_context_close(int ssl, struct us_socket_context_t *context) {
struct us_socket_t *s = context->head_sockets;
while (s) {
struct us_socket_t *nextS = s->next;
us_socket_close(ssl, s, 0, 0);
us_socket_close(ssl, s, LIBUS_SOCKET_CLOSE_CODE_CLEAN_SHUTDOWN, 0);
s = nextS;
}
}
void us_internal_socket_context_unlink_listen_socket(struct us_socket_context_t *context, struct us_listen_socket_t *ls) {
void us_internal_socket_context_unlink_listen_socket(int ssl, struct us_socket_context_t *context, struct us_listen_socket_t *ls) {
/* We have to properly update the iterator used to sweep sockets for timeouts */
if (ls == (struct us_listen_socket_t *) context->iterator) {
context->iterator = ls->s.next;
@@ -95,9 +95,10 @@ void us_internal_socket_context_unlink_listen_socket(struct us_socket_context_t
ls->s.next->prev = ls->s.prev;
}
}
us_socket_context_unref(ssl, context);
}
void us_internal_socket_context_unlink_socket(struct us_socket_context_t *context, struct us_socket_t *s) {
void us_internal_socket_context_unlink_socket(int ssl, struct us_socket_context_t *context, struct us_socket_t *s) {
/* We have to properly update the iterator used to sweep sockets for timeouts */
if (s == context->iterator) {
context->iterator = s->next;
@@ -115,6 +116,7 @@ void us_internal_socket_context_unlink_socket(struct us_socket_context_t *contex
s->next->prev = s->prev;
}
}
us_socket_context_unref(ssl, context);
}
/* We always add in the top, so we don't modify any s.next */
@@ -126,6 +128,7 @@ void us_internal_socket_context_link_listen_socket(struct us_socket_context_t *c
context->head_listen_sockets->s.prev = &ls->s;
}
context->head_listen_sockets = ls;
context->ref_count++;
}
/* We always add in the top, so we don't modify any s.next */
@@ -137,6 +140,7 @@ void us_internal_socket_context_link_socket(struct us_socket_context_t *context,
context->head_sockets->prev = s;
}
context->head_sockets = s;
context->ref_count++;
}
struct us_loop_t *us_socket_context_loop(int ssl, struct us_socket_context_t *context) {
@@ -231,6 +235,7 @@ struct us_socket_context_t *us_create_socket_context(int ssl, struct us_loop_t *
struct us_socket_context_t *context = us_calloc(1, sizeof(struct us_socket_context_t) + context_ext_size);
context->loop = loop;
context->is_low_prio = default_is_low_prio_handler;
context->ref_count = 1;
us_internal_loop_link(loop, context);
@@ -252,6 +257,7 @@ struct us_socket_context_t *us_create_bun_socket_context(int ssl, struct us_loop
struct us_socket_context_t *context = us_calloc(1, sizeof(struct us_socket_context_t) + context_ext_size);
context->loop = loop;
context->is_low_prio = default_is_low_prio_handler;
context->ref_count = 1;
us_internal_loop_link(loop, context);
@@ -272,7 +278,8 @@ struct us_bun_verify_error_t us_socket_verify_error(int ssl, struct us_socket_t
}
void us_socket_context_free(int ssl, struct us_socket_context_t *context) {
void us_internal_socket_context_free(int ssl, struct us_socket_context_t *context) {
#ifndef LIBUS_NO_SSL
if (ssl) {
/* This function will call us again with SSL=false */
@@ -285,7 +292,23 @@ void us_socket_context_free(int ssl, struct us_socket_context_t *context) {
* This is the opposite order compared to when creating the context - SSL code is cleaning up before non-SSL */
us_internal_loop_unlink(context->loop, context);
us_free(context);
/* Link this context to the close-list and let it be deleted after this iteration */
context->next = context->loop->data.closed_context_head;
context->loop->data.closed_context_head = context;
}
void us_socket_context_ref(int ssl, struct us_socket_context_t *context) {
context->ref_count++;
}
void us_socket_context_unref(int ssl, struct us_socket_context_t *context) {
if (--context->ref_count == 0) {
us_internal_socket_context_free(ssl, context);
}
}
void us_socket_context_free(int ssl, struct us_socket_context_t *context) {
us_socket_context_unref(ssl, context);
}
struct us_listen_socket_t *us_socket_context_listen(int ssl, struct us_socket_context_t *context, const char *host, int port, int options, int socket_ext_size) {
@@ -709,7 +732,7 @@ struct us_socket_t *us_socket_context_adopt_socket(int ssl, struct us_socket_con
if (s->low_prio_state != 1) {
/* This properly updates the iterator if in on_timeout */
us_internal_socket_context_unlink_socket(s->context, s);
us_internal_socket_context_unlink_socket(ssl, s->context, s);
}

View File

@@ -14,8 +14,14 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// clang-format off
#if (defined(LIBUS_USE_OPENSSL) || defined(LIBUS_USE_WOLFSSL))
#include "internal/internal.h"
#include "libusockets.h"
#include <string.h>
/* These are in sni_tree.cpp */
void *sni_new();
void sni_free(void *sni, void (*cb)(void *));
@@ -23,10 +29,6 @@ int sni_add(void *sni, const char *hostname, void *user);
void *sni_remove(void *sni, const char *hostname);
void *sni_find(void *sni, const char *hostname);
#include "internal/internal.h"
#include "libusockets.h"
#include <string.h>
/* This module contains the entire OpenSSL implementation
* of the SSL socket and socket context interfaces. */
#ifdef LIBUS_USE_OPENSSL
@@ -71,10 +73,6 @@ struct us_internal_ssl_socket_context_t {
// socket context
SSL_CTX *ssl_context;
int is_parent;
#if ALLOW_SERVER_RENEGOTIATION
unsigned int client_renegotiation_limit;
unsigned int client_renegotiation_window;
#endif
/* These decorate the base implementation */
struct us_internal_ssl_socket_t *(*on_open)(struct us_internal_ssl_socket_t *,
int is_client, char *ip,
@@ -108,15 +106,9 @@ enum {
struct us_internal_ssl_socket_t {
struct us_socket_t s;
SSL *ssl; // this _must_ be the first member after s
#if ALLOW_SERVER_RENEGOTIATION
unsigned int client_pending_renegotiations;
uint64_t last_ssl_renegotiation;
unsigned int is_client : 1;
#endif
unsigned int ssl_write_wants_read : 1; // we use this for now
unsigned int ssl_read_wants_write : 1;
unsigned int handshake_state : 2;
unsigned int received_ssl_shutdown : 1;
};
int passphrase_cb(char *buf, int size, int rwflag, void *u) {
@@ -194,16 +186,9 @@ struct us_internal_ssl_socket_t *ssl_on_open(struct us_internal_ssl_socket_t *s,
(struct loop_ssl_data *)loop->data.ssl_data;
s->ssl = SSL_new(context->ssl_context);
#if ALLOW_SERVER_RENEGOTIATION
s->client_pending_renegotiations = context->client_renegotiation_limit;
s->last_ssl_renegotiation = 0;
s->is_client = is_client ? 1 : 0;
#endif
s->ssl_write_wants_read = 0;
s->ssl_read_wants_write = 0;
s->handshake_state = HANDSHAKE_PENDING;
s->received_ssl_shutdown = 0;
SSL_set_bio(s->ssl, loop_ssl_data->shared_rbio, loop_ssl_data->shared_wbio);
// if we allow renegotiation, we need to set the mode here
@@ -213,24 +198,18 @@ struct us_internal_ssl_socket_t *ssl_on_open(struct us_internal_ssl_socket_t *s,
// this can be a DoS vector for servers, so we enable it using a limit
// we do not use ssl_renegotiate_freely, since ssl_renegotiate_explicit is
// more performant when using BoringSSL
#if ALLOW_SERVER_RENEGOTIATION
if (context->client_renegotiation_limit) {
SSL_set_renegotiate_mode(s->ssl, ssl_renegotiate_explicit);
} else {
SSL_set_renegotiate_mode(s->ssl, ssl_renegotiate_never);
}
#endif
BIO_up_ref(loop_ssl_data->shared_rbio);
BIO_up_ref(loop_ssl_data->shared_wbio);
if (is_client) {
#if ALLOW_SERVER_RENEGOTIATION == 0
SSL_set_renegotiate_mode(s->ssl, ssl_renegotiate_explicit);
#endif
SSL_set_connect_state(s->ssl);
} else {
SSL_set_accept_state(s->ssl);
// we do not allow renegotiation on the server side (should be the default for BoringSSL, but we set to make openssl compatible)
SSL_set_renegotiate_mode(s->ssl, ssl_renegotiate_never);
}
struct us_internal_ssl_socket_t *result =
@@ -246,6 +225,36 @@ struct us_internal_ssl_socket_t *ssl_on_open(struct us_internal_ssl_socket_t *s,
return result;
}
/// @brief Complete the shutdown or do a fast shutdown when needed, this should only be called before closing the socket
/// @param s
void us_internal_handle_shutdown(struct us_internal_ssl_socket_t *s, int force_fast_shutdown) {
// if we are already shutdown or in the middle of a handshake we dont need to do anything
if(!s->ssl || us_socket_is_shut_down(0, &s->s)) return;
// we are closing the socket but did not sent a shutdown yet
int state = SSL_get_shutdown(s->ssl);
int sent_shutdown = state & SSL_SENT_SHUTDOWN;
int received_shutdown = state & SSL_RECEIVED_SHUTDOWN;
// if we are missing a shutdown call, we need to do a fast shutdown here
if(!sent_shutdown || !received_shutdown) {
// Zero means that we should wait for the peer to close the connection
// but we are already closing the connection so we do a fast shutdown here
int ret = SSL_shutdown(s->ssl);
if(ret == 0 && force_fast_shutdown) {
// do a fast shutdown (dont wait for peer)
ret = SSL_shutdown(s->ssl);
}
if(ret < 0) {
// we got some error here, but we dont care about it, we are closing the socket
int err = SSL_get_error(s->ssl, ret);
if (err == SSL_ERROR_SSL || err == SSL_ERROR_SYSCALL) {
// clear
ERR_clear_error();
}
}
}
}
void us_internal_on_ssl_handshake(
struct us_internal_ssl_socket_context_t *context,
void (*on_handshake)(struct us_internal_ssl_socket_t *, int success,
@@ -259,6 +268,8 @@ void us_internal_on_ssl_handshake(
struct us_internal_ssl_socket_t *
us_internal_ssl_socket_close(struct us_internal_ssl_socket_t *s, int code,
void *reason) {
if (s->handshake_state != HANDSHAKE_COMPLETED) {
// if we have some pending handshake we cancel it and try to check the
// latest handshake error this way we will always call on_handshake with the
@@ -269,8 +280,14 @@ us_internal_ssl_socket_close(struct us_internal_ssl_socket_t *s, int code,
us_internal_trigger_handshake_callback(s, 0);
}
return (struct us_internal_ssl_socket_t *)us_socket_close(
0, (struct us_socket_t *)s, code, reason);
// if we are in the middle of a close_notify we need to finish it (code != 0 forces a fast shutdown)
us_internal_handle_shutdown(s, code != 0);
// only close the socket if we are not in the middle of a handshake
if(!s->ssl || SSL_get_shutdown(s->ssl) & SSL_RECEIVED_SHUTDOWN) {
return (struct us_internal_ssl_socket_t *)us_socket_close(0, (struct us_socket_t *)s, code, reason);
}
return s;
}
void us_internal_trigger_handshake_callback(struct us_internal_ssl_socket_t *s,
@@ -292,26 +309,7 @@ int us_internal_ssl_renegotiate(struct us_internal_ssl_socket_t *s) {
// if is a server and we have no pending renegotiation we can check
// the limits
s->handshake_state = HANDSHAKE_RENEGOTIATION_PENDING;
#if ALLOW_SERVER_RENEGOTIATION
if (!s->is_client && !SSL_renegotiate_pending(s->ssl)) {
uint64_t now = time(NULL);
struct us_internal_ssl_socket_context_t *context =
(struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s);
// if is not the first time we negotiate and we are outside the time
// window, reset the limits
if (s->last_ssl_renegotiation && (now - s->last_ssl_renegotiation) >=
context->client_renegotiation_window) {
// reset the limits
s->client_pending_renegotiations = context->client_renegotiation_limit;
}
// if we have no more renegotiations, we should close the connection
if (s->client_pending_renegotiations == 0) {
return 0;
}
s->last_ssl_renegotiation = now;
s->client_pending_renegotiations--;
}
#endif
if (!SSL_renegotiate(s->ssl)) {
// we failed to renegotiate
us_internal_trigger_handshake_callback(s, 0);
@@ -347,7 +345,6 @@ void us_internal_update_handshake(struct us_internal_ssl_socket_t *s) {
int result = SSL_do_handshake(s->ssl);
if (SSL_get_shutdown(s->ssl) & SSL_RECEIVED_SHUTDOWN) {
s->received_ssl_shutdown = 1;
us_internal_ssl_socket_close(s, 0, NULL);
return;
}
@@ -387,16 +384,15 @@ ssl_on_close(struct us_internal_ssl_socket_t *s, int code, void *reason) {
struct us_internal_ssl_socket_context_t *context =
(struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s);
SSL_free(s->ssl);
return context->on_close(s, code, reason);
struct us_internal_ssl_socket_t * ret = context->on_close(s, code, reason);
SSL_free(s->ssl); // free SSL after on_close
s->ssl = NULL; // set to NULL
return ret;
}
struct us_internal_ssl_socket_t *
ssl_on_end(struct us_internal_ssl_socket_t *s) {
// whatever state we are in, a TCP FIN is always an answered shutdown
/* Todo: this should report CLEANLY SHUTDOWN as reason */
return us_internal_ssl_socket_close(s, 0, NULL);
}
@@ -420,31 +416,13 @@ struct us_internal_ssl_socket_t *ssl_on_data(struct us_internal_ssl_socket_t *s,
loop_ssl_data->ssl_socket = &s->s;
loop_ssl_data->msg_more = 0;
if (us_socket_is_closed(0, &s->s) || s->received_ssl_shutdown) {
if (us_socket_is_closed(0, &s->s)) {
return NULL;
}
if (us_internal_ssl_socket_is_shut_down(s)) {
int ret = 0;
if ((ret = SSL_shutdown(s->ssl)) == 1) {
// two phase shutdown is complete here
/* Todo: this should also report some kind of clean shutdown */
return us_internal_ssl_socket_close(s, 0, NULL);
} else if (ret < 0) {
int err = SSL_get_error(s->ssl, ret);
if (err == SSL_ERROR_SSL || err == SSL_ERROR_SYSCALL) {
// we need to clear the error queue in case these added to the thread
// local queue
ERR_clear_error();
}
}
// no further processing of data when in shutdown state
return s;
us_internal_ssl_socket_close(s, 0, NULL);
return NULL;
}
// bug checking: this loop needs a lot of attention and clean-ups and
@@ -452,17 +430,12 @@ struct us_internal_ssl_socket_t *ssl_on_data(struct us_internal_ssl_socket_t *s,
int read = 0;
restart:
// read until shutdown
while (!s->received_ssl_shutdown) {
while (1) {
int just_read = SSL_read(s->ssl,
loop_ssl_data->ssl_read_output +
LIBUS_RECV_BUFFER_PADDING + read,
LIBUS_RECV_BUFFER_LENGTH - read);
// we need to check if we received a shutdown here
if (SSL_get_shutdown(s->ssl) & SSL_RECEIVED_SHUTDOWN) {
s->received_ssl_shutdown = 1;
// we will only close after we handle the data and errors
}
if (just_read <= 0) {
int err = SSL_get_error(s->ssl, just_read);
// as far as I know these are the only errors we want to handle
@@ -477,8 +450,9 @@ restart:
// clean and close renegotiation failed
err = SSL_ERROR_SSL;
} else if (err == SSL_ERROR_ZERO_RETURN) {
// zero return can be EOF/FIN, if we have data just signal on_data and
// close
// Remotely-Initiated Shutdown
// See: https://www.openssl.org/docs/manmaster/man3/SSL_shutdown.html
if (read) {
context =
(struct us_internal_ssl_socket_context_t *)us_socket_context(
@@ -488,11 +462,12 @@ restart:
s, loop_ssl_data->ssl_read_output + LIBUS_RECV_BUFFER_PADDING,
read);
if (!s || us_socket_is_closed(0, &s->s)) {
return s;
return NULL; // stop processing data
}
}
// terminate connection here
return us_internal_ssl_socket_close(s, 0, NULL);
us_internal_ssl_socket_close(s, 0, NULL);
return NULL; // stop processing data
}
if (err == SSL_ERROR_SSL || err == SSL_ERROR_SYSCALL) {
@@ -501,7 +476,8 @@ restart:
}
// terminate connection here
return us_internal_ssl_socket_close(s, 0, NULL);
us_internal_ssl_socket_close(s, 0, NULL);
return NULL; // stop processing data
} else {
// emit the data we have and exit
@@ -527,7 +503,7 @@ restart:
s, loop_ssl_data->ssl_read_output + LIBUS_RECV_BUFFER_PADDING,
read);
if (!s || us_socket_is_closed(0, &s->s)) {
return s;
return NULL; // stop processing data
}
break;
@@ -550,19 +526,13 @@ restart:
s = context->on_data(
s, loop_ssl_data->ssl_read_output + LIBUS_RECV_BUFFER_PADDING, read);
if (!s || us_socket_is_closed(0, &s->s)) {
return s;
return NULL;
}
read = 0;
goto restart;
}
}
// we received the shutdown after reading so we close
if (s->received_ssl_shutdown) {
us_internal_ssl_socket_close(s, 0, NULL);
return NULL;
}
// trigger writable if we failed last write with want read
if (s->ssl_write_wants_read) {
s->ssl_write_wants_read = 0;
@@ -576,7 +546,7 @@ restart:
&s->s); // cast here!
// if we are closed here, then exit
if (!s || us_socket_is_closed(0, &s->s)) {
return s;
return NULL;
}
}
@@ -1032,7 +1002,7 @@ long us_internal_verify_peer_certificate( // NOLINT(runtime/int)
struct us_bun_verify_error_t
us_internal_verify_error(struct us_internal_ssl_socket_t *s) {
if (us_socket_is_closed(0, &s->s) || us_internal_ssl_socket_is_shut_down(s)) {
if (!s->ssl || us_socket_is_closed(0, &s->s) || us_internal_ssl_socket_is_shut_down(s)) {
return (struct us_bun_verify_error_t){
.error = 0, .code = NULL, .reason = NULL};
}
@@ -1317,10 +1287,6 @@ void us_bun_internal_ssl_socket_context_add_server_name(
/* We do not want to hold any nullptr's in our SNI tree */
if (ssl_context) {
#if ALLOW_SERVER_RENEGOTIATION
context->client_renegotiation_limit = options.client_renegotiation_limit;
context->client_renegotiation_window = options.client_renegotiation_window;
#endif
if (sni_add(context->sni, hostname_pattern, ssl_context)) {
/* If we already had that name, ignore */
free_ssl_context(ssl_context);
@@ -1469,10 +1435,6 @@ us_internal_bun_create_ssl_socket_context(
context->on_handshake = NULL;
context->handshake_data = NULL;
#if ALLOW_SERVER_RENEGOTIATION
context->client_renegotiation_limit = options.client_renegotiation_limit;
context->client_renegotiation_window = options.client_renegotiation_window;
#endif
/* We, as parent context, may ignore data */
context->sc.is_low_prio = (int (*)(struct us_socket_t *))ssl_is_low_prio;
@@ -1503,7 +1465,7 @@ void us_internal_ssl_socket_context_free(
sni_free(context->sni, sni_hostname_destructor);
}
us_socket_context_free(0, &context->sc);
us_internal_socket_context_free(0, &context->sc);
}
struct us_listen_socket_t *us_internal_ssl_socket_context_listen(
@@ -1714,7 +1676,7 @@ void *us_internal_connecting_ssl_socket_ext(struct us_connecting_socket_t *s) {
}
int us_internal_ssl_socket_is_shut_down(struct us_internal_ssl_socket_t *s) {
return us_socket_is_shut_down(0, &s->s) ||
return !s->ssl || us_socket_is_shut_down(0, &s->s) ||
SSL_get_shutdown(s->ssl) & SSL_SENT_SHUTDOWN;
}
@@ -1740,11 +1702,8 @@ void us_internal_ssl_socket_shutdown(struct us_internal_ssl_socket_t *s) {
loop_ssl_data->ssl_socket = &s->s;
loop_ssl_data->msg_more = 0;
// sets SSL_SENT_SHUTDOWN no matter what (not actually true if error!)
// sets SSL_SENT_SHUTDOWN and waits for the other side to do the same
int ret = SSL_shutdown(s->ssl);
if (ret == 0) {
ret = SSL_shutdown(s->ssl);
}
if (SSL_in_init(s->ssl) || SSL_get_quiet_shutdown(s->ssl)) {
// when SSL_in_init or quiet shutdown in BoringSSL, we call shutdown
@@ -2049,7 +2008,6 @@ us_socket_context_on_socket_connect_error(
socket->ssl_write_wants_read = 0;
socket->ssl_read_wants_write = 0;
socket->handshake_state = HANDSHAKE_PENDING;
socket->received_ssl_shutdown = 0;
return socket;
}

View File

@@ -14,6 +14,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// clang-format off
#pragma once
#ifndef INTERNAL_H
#define INTERNAL_H
@@ -144,11 +145,15 @@ void us_internal_free_loop_ssl_data(struct us_loop_t *loop);
/* Socket context related */
void us_internal_socket_context_link_socket(struct us_socket_context_t *context,
struct us_socket_t *s);
void us_internal_socket_context_unlink_socket(
void us_internal_socket_context_unlink_socket(int ssl,
struct us_socket_context_t *context, struct us_socket_t *s);
void us_internal_socket_after_resolve(struct us_connecting_socket_t *s);
void us_internal_socket_after_open(struct us_socket_t *s, int error);
struct us_internal_ssl_socket_t *
us_internal_ssl_socket_close(struct us_internal_ssl_socket_t *s, int code,
void *reason);
int us_internal_handle_dns_results(struct us_loop_t *loop);
/* Sockets are polls */
@@ -244,12 +249,13 @@ struct us_listen_socket_t {
/* Listen sockets are keps in their own list */
void us_internal_socket_context_link_listen_socket(
struct us_socket_context_t *context, struct us_listen_socket_t *s);
void us_internal_socket_context_unlink_listen_socket(
void us_internal_socket_context_unlink_listen_socket(int ssl,
struct us_socket_context_t *context, struct us_listen_socket_t *s);
struct us_socket_context_t {
alignas(LIBUS_EXT_ALIGNMENT) struct us_loop_t *loop;
uint32_t global_tick;
uint32_t ref_count;
unsigned char timestamp;
unsigned char long_timestamp;
struct us_socket_t *head_sockets;
@@ -280,7 +286,8 @@ struct us_internal_ssl_socket_t;
typedef void (*us_internal_on_handshake_t)(
struct us_internal_ssl_socket_t *, int success,
struct us_bun_verify_error_t verify_error, void *custom_data);
void us_internal_socket_context_free(int ssl, struct us_socket_context_t *context);
/* SNI functions */
void us_internal_ssl_socket_context_add_server_name(
struct us_internal_ssl_socket_context_t *context,

View File

@@ -27,6 +27,7 @@ struct us_internal_loop_data_t {
int last_write_failed;
struct us_socket_context_t *head;
struct us_socket_context_t *iterator;
struct us_socket_context_t *closed_context_head;
char *recv_buf;
char *send_buf;
void *ssl_data;

View File

@@ -17,6 +17,7 @@
#ifndef BSD_H
#define BSD_H
#pragma once
// top-most wrapper of bsd-like syscalls
@@ -25,7 +26,7 @@
#include "libusockets.h"
#ifdef _WIN32
#ifdef _WIN32
#ifndef NOMINMAX
#define NOMINMAX
#endif
@@ -34,7 +35,7 @@
#pragma comment(lib, "ws2_32.lib")
#define SETSOCKOPT_PTR_TYPE const char *
#define LIBUS_SOCKET_ERROR INVALID_SOCKET
#else
#else /* POSIX */
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif

View File

@@ -15,7 +15,7 @@
* limitations under the License.
*/
// clang-format off
#pragma once
#ifndef us_calloc
#define us_calloc calloc
#endif
@@ -49,6 +49,7 @@
#define LIBUS_EXT_ALIGNMENT 16
#define ALLOW_SERVER_RENEGOTIATION 0
#define LIBUS_SOCKET_CLOSE_CODE_CLEAN_SHUTDOWN 0
#define LIBUS_SOCKET_CLOSE_CODE_CONNECTION_RESET 1
/* Define what a socket descriptor is based on platform */
@@ -229,8 +230,11 @@ struct us_socket_context_t *us_create_socket_context(int ssl, struct us_loop_t *
struct us_socket_context_t *us_create_bun_socket_context(int ssl, struct us_loop_t *loop,
int ext_size, struct us_bun_socket_context_options_t options);
/* Delete resources allocated at creation time. */
/* Delete resources allocated at creation time (will call unref now and only free when ref count == 0). */
void us_socket_context_free(int ssl, struct us_socket_context_t *context);
void us_socket_context_ref(int ssl, struct us_socket_context_t *context);
void us_socket_context_unref(int ssl, struct us_socket_context_t *context);
struct us_bun_verify_error_t us_socket_verify_error(int ssl, struct us_socket_t *context);
/* Setters of various async callbacks */
void us_socket_context_on_open(int ssl, struct us_socket_context_t *context,

View File

@@ -47,6 +47,8 @@ void us_internal_loop_data_init(struct us_loop_t *loop, void (*wakeup_cb)(struct
loop->data.parent_ptr = 0;
loop->data.parent_tag = 0;
loop->data.closed_context_head = 0;
loop->data.wakeup_async = us_internal_create_async(loop, 1, 0);
us_internal_async_set(loop->data.wakeup_async, (void (*)(struct us_internal_async *)) wakeup_cb);
}
@@ -234,6 +236,15 @@ void us_internal_free_closed_sockets(struct us_loop_t *loop) {
loop->data.closed_connecting_head = 0;
}
void us_internal_free_closed_contexts(struct us_loop_t *loop) {
for (struct us_socket_context_t *ctx = loop->data.closed_context_head; ctx; ) {
struct us_socket_context_t *next = ctx->next;
us_free(ctx);
ctx = next;
}
loop->data.closed_context_head = 0;
}
void sweep_timer_cb(struct us_internal_callback_t *cb) {
us_internal_timer_sweep(cb->loop);
}
@@ -253,6 +264,7 @@ void us_internal_loop_pre(struct us_loop_t *loop) {
void us_internal_loop_post(struct us_loop_t *loop) {
us_internal_handle_dns_results(loop);
us_internal_free_closed_sockets(loop);
us_internal_free_closed_contexts(loop);
loop->data.post_cb(loop);
}
@@ -356,7 +368,7 @@ void us_internal_dispatch_ready_poll(struct us_poll_t *p, int error, int events)
s->context->loop->data.low_prio_budget--; /* Still having budget for this iteration - do normal processing */
} else {
us_poll_change(&s->p, us_socket_context(0, s)->loop, us_poll_events(&s->p) & LIBUS_SOCKET_WRITABLE);
us_internal_socket_context_unlink_socket(s->context, s);
us_internal_socket_context_unlink_socket(0, s->context, s);
/* Link this socket to the low-priority queue - we use a LIFO queue, to prioritize newer clients that are
* maybe not already timeouted - sounds unfair, but works better in real-life with smaller client-timeouts
@@ -411,7 +423,7 @@ void us_internal_dispatch_ready_poll(struct us_poll_t *p, int error, int events)
if (us_socket_is_shut_down(0, s)) {
/* We got FIN back after sending it */
/* Todo: We should give "CLEAN SHUTDOWN" as reason here */
s = us_socket_close(0, s, 0, NULL);
s = us_socket_close(0, s, LIBUS_SOCKET_CLOSE_CODE_CLEAN_SHUTDOWN, NULL);
} else {
/* We got FIN, so stop polling for readable */
us_poll_change(&s->p, us_socket_context(0, s)->loop, us_poll_events(&s->p) & LIBUS_SOCKET_WRITABLE);
@@ -419,7 +431,7 @@ void us_internal_dispatch_ready_poll(struct us_poll_t *p, int error, int events)
}
} else if (length == LIBUS_SOCKET_ERROR && !bsd_would_block()) {
/* Todo: decide also here what kind of reason we should give */
s = us_socket_close(0, s, 0, NULL);
s = us_socket_close(0, s, LIBUS_SOCKET_CLOSE_CODE_CLEAN_SHUTDOWN, NULL);
return;
}

View File

@@ -137,7 +137,7 @@ void us_connecting_socket_close(int ssl, struct us_connecting_socket_t *c) {
c->closed = 1;
for (struct us_socket_t *s = c->connecting_head; s; s = s->connect_next) {
us_internal_socket_context_unlink_socket(s->context, s);
us_internal_socket_context_unlink_socket(ssl, s->context, s);
us_poll_stop((struct us_poll_t *) s, s->context->loop);
bsd_close_socket(us_poll_fd((struct us_poll_t *) s));
@@ -157,6 +157,9 @@ void us_connecting_socket_close(int ssl, struct us_connecting_socket_t *c) {
}
struct us_socket_t *us_socket_close(int ssl, struct us_socket_t *s, int code, void *reason) {
if(ssl) {
return (struct us_socket_t *)us_internal_ssl_socket_close((struct us_internal_ssl_socket_t *) s, code, reason);
}
if (!us_socket_is_closed(0, s)) {
if (s->low_prio_state == 1) {
/* Unlink this socket from the low-priority queue */
@@ -169,7 +172,7 @@ struct us_socket_t *us_socket_close(int ssl, struct us_socket_t *s, int code, vo
s->next = 0;
s->low_prio_state = 0;
} else {
us_internal_socket_context_unlink_socket(s->context, s);
us_internal_socket_context_unlink_socket(ssl, s->context, s);
}
#ifdef LIBUS_USE_KQUEUE
// kqueue automatically removes the fd from the set on close
@@ -219,7 +222,7 @@ struct us_socket_t *us_socket_detach(int ssl, struct us_socket_t *s) {
s->next = 0;
s->low_prio_state = 0;
} else {
us_internal_socket_context_unlink_socket(s->context, s);
us_internal_socket_context_unlink_socket(ssl, s->context, s);
}
us_poll_stop((struct us_poll_t *) s, s->context->loop);

View File

@@ -1,38 +0,0 @@
CAPI_EXAMPLE_FILES := HelloWorld HelloWorldAsync ServerName UpgradeSync UpgradeAsync EchoServer Broadcast BroadcastEchoServer
RUST_EXAMPLE_FILES := RustHelloWorld
LIBRARY_NAME := libuwebsockets
default:
$(MAKE) capi
$(CXX) -O3 -flto -I ../src -I ../uSockets/src examples/HelloWorld.c *.o -lz -luv -lssl -lcrypto -lstdc++ ../uSockets/uSockets.a -o HelloWorld
capi:
$(MAKE) clean
cd ../uSockets && $(CC) -pthread -DUWS_WITH_PROXY -DLIBUS_USE_OPENSSL -DLIBUS_USE_LIBUV -std=c11 -Isrc -flto -fPIC -O3 -c src/*.c src/eventing/*.c src/crypto/*.c
cd ../uSockets && $(CXX) -std=c++17 -flto -fPIC -O3 -c src/crypto/*.cpp
cd ../uSockets && $(AR) rvs uSockets.a *.o
$(CXX) -DUWS_WITH_PROXY -c -O3 -std=c++17 -lz -luv -flto -fPIC -I ../src -I ../uSockets/src $(LIBRARY_NAME).cpp
$(AR) rvs $(LIBRARY_NAME).a $(LIBRARY_NAME).o ../uSockets/uSockets.a
shared:
$(MAKE) clean
cd ../uSockets && $(CC) -pthread -DUWS_WITH_PROXY -DLIBUS_USE_OPENSSL -DLIBUS_USE_LIBUV -std=c11 -Isrc -flto -fPIC -O3 -c src/*.c src/eventing/*.c src/crypto/*.c
cd ../uSockets && $(CXX) -std=c++17 -flto -fPIC -O3 -c src/crypto/*.cpp
cd ../uSockets && $(AR) rvs uSockets.a *.o
$(CXX) -DUWS_WITH_PROXY -c -O3 -std=c++17 -lz -luv -flto -fPIC -I ../src -I ../uSockets/src $(LIBRARY_NAME).cpp
$(CXX) -shared -o $(LIBRARY_NAME).so $(LIBRARY_NAME).o ../uSockets/uSockets.a -fPIC -lz -luv -lssl -lcrypto
misc:
mkdir -p ../misc && openssl req -newkey rsa:2048 -new -nodes -x509 -days 3650 -passout pass:1234 -keyout ../misc/key.pem -out ../misc/cert.pem
rust:
$(MAKE) capi
rustc -C link-arg=$(LIBRARY_NAME).a -C link-args="-lstdc++ -luv" -C opt-level=3 -C lto -L all=. examples/RustHelloWorld.rs -o RustHelloWorld
clean:
rm -f *.o $(CAPI_EXAMPLE_FILES) $(RUST_EXAMPLE_FILES) $(LIBRARY_NAME).a $(LIBRARY_NAME).so
all:
for FILE in $(CAPI_EXAMPLE_FILES); do $(CXX) -O3 -flto -I ../src -I ../uSockets/src examples/$$FILE.c *.o -luv -lstdc++ ../uSockets/uSockets.a -o $$FILE & done; \
wait

View File

@@ -1,157 +0,0 @@
#include "../libuwebsockets.h"
#include <stdio.h>
#include <malloc.h>
#include <time.h>
#include <string.h>
#include <stdarg.h>
#define SSL 1
//Timer close helper
void uws_timer_close(struct us_timer_t *timer)
{
struct us_timer_t *t = (struct us_timer_t *)timer;
struct timer_handler_data *data;
memcpy(&data, us_timer_ext(t), sizeof(struct timer_handler_data *));
free(data);
us_timer_close(t, 0);
}
//Timer create helper
struct us_timer_t *uws_create_timer(int ms, int repeat_ms, void (*handler)(void *data), void *data)
{
struct us_loop_t *loop = uws_get_loop();
struct us_timer_t *delayTimer = us_create_timer(loop, 0, sizeof(void *));
struct timer_handler_data
{
void *data;
void (*handler)(void *data);
bool repeat;
};
struct timer_handler_data *timer_data = (struct timer_handler_data *)malloc(sizeof(timer_handler_data));
timer_data->data = data;
timer_data->handler = handler;
timer_data->repeat = repeat_ms > 0;
memcpy(us_timer_ext(delayTimer), &timer_data, sizeof(struct timer_handler_data *));
us_timer_set(
delayTimer, [](struct us_timer_t *t)
{
/* We wrote the pointer to the timer's extension */
struct timer_handler_data *data;
memcpy(&data, us_timer_ext(t), sizeof(struct timer_handler_data *));
data->handler(data->data);
if (!data->repeat)
{
free(data);
us_timer_close(t, 0);
}
},
ms, repeat_ms);
return (struct us_timer_t *)delayTimer;
}
/* This is a simple WebSocket "sync" upgrade example.
* You may compile it with "WITH_OPENSSL=1 make" or with "make" */
/* ws->getUserData returns one of these */
struct PerSocketData {
/* Fill with user data */
};
int buffer_size(const char* format, ...) {
va_list args;
va_start(args, format);
int result = vsnprintf(NULL, 0, format, args);
va_end(args);
return result + 1; // safe byte for \0
}
void listen_handler(struct us_listen_socket_t *listen_socket, uws_app_listen_config_t config, void* user_data)
{
if (listen_socket){
printf("Listening on port wss://localhost:%d\n", config.port);
}
}
void open_handler(uws_websocket_t* ws){
/* Open event here, you may access uws_ws_get_user_data(WS) which points to a PerSocketData struct */
uws_ws_subscribe(SSL, ws, "broadcast", 9);
}
void message_handler(uws_websocket_t* ws, const char* message, size_t length, uws_opcode_t opcode){
}
void close_handler(uws_websocket_t* ws, int code, const char* message, size_t length){
/* You may access uws_ws_get_user_data(ws) here, but sending or
* doing any kind of I/O with the socket is not valid. */
}
void drain_handler(uws_websocket_t* ws){
/* Check uws_ws_get_buffered_amount(ws) here */
}
void ping_handler(uws_websocket_t* ws, const char* message, size_t length){
/* You don't need to handle this one, we automatically respond to pings as per standard */
}
void pong_handler(uws_websocket_t* ws, const char* message, size_t length){
/* You don't need to handle this one either */
}
void on_timer_interval(void* data){
// broadcast the unix time as millis
uws_app_t * app = (uws_app_t *)data;
struct timespec ts;
timespec_get(&ts, TIME_UTC);
int64_t millis = ts.tv_sec * 1000 + ts.tv_nsec / 1000000;
char* message = (char*)malloc((size_t)buffer_size("%ld", millis));
size_t message_length = sprintf(message, "%ld", millis);
uws_publish(SSL, app, "broadcast", 9, message, message_length, uws_opcode_t::TEXT, false);
free(message);
}
int main()
{
uws_app_t *app = uws_create_app(SSL, (struct us_socket_context_options_t){
/* There are example certificates in uWebSockets.js repo */
.key_file_name = "../misc/key.pem",
.cert_file_name = "../misc/cert.pem",
.passphrase = "1234"
});
uws_ws(SSL, app, "/*", (uws_socket_behavior_t){
.compression = uws_compress_options_t::SHARED_COMPRESSOR,
.maxPayloadLength = 16 * 1024,
.idleTimeout = 12,
.maxBackpressure = 1 * 1024 * 1024,
.upgrade = NULL,
.open = open_handler,
.message = message_handler,
.drain = drain_handler,
.ping = ping_handler,
.pong = pong_handler,
.close = close_handler,
});
uws_app_listen(SSL, app, 9001, listen_handler, NULL);
// broadcast the unix time as millis every 8 millis
uws_create_timer(8, 8, on_timer_interval, app);
uws_app_run(SSL, app);
}

View File

@@ -1,175 +0,0 @@
#include "../libuwebsockets.h"
#include <stdio.h>
#include <malloc.h>
#include <time.h>
#include <string.h>
#include <stdarg.h>
#define SSL 1
/* This is a simple WebSocket "sync" upgrade example.
* You may compile it with "WITH_OPENSSL=1 make" or with "make" */
typedef struct
{
size_t length;
char *name;
} topic_t;
/* ws->getUserData returns one of these */
struct PerSocketData
{
/* Fill with user data */
topic_t **topics;
int topics_quantity;
int nr;
};
uws_app_t *app;
int buffer_size(const char *format, ...)
{
va_list args;
va_start(args, format);
int result = vsnprintf(NULL, 0, format, args);
va_end(args);
return result + 1; // safe byte for \0
}
void listen_handler(struct us_listen_socket_t *listen_socket, uws_app_listen_config_t config, void* user_data)
{
if (listen_socket)
{
printf("Listening on port wss://localhost:%d\n", config.port);
}
}
void upgrade_handler(uws_res_t *response, uws_req_t *request, uws_socket_context_t *context)
{
/* You may read from req only here, and COPY whatever you need into your PerSocketData.
* PerSocketData is valid from .open to .close event, accessed with uws_ws_get_user_data(ws).
* HttpRequest (req) is ONLY valid in this very callback, so any data you will need later
* has to be COPIED into PerSocketData here. */
/* Immediately upgrading without doing anything "async" before, is simple */
struct PerSocketData *data = (struct PerSocketData *)malloc(sizeof(struct PerSocketData));
data->topics = (topic_t **)calloc(32, sizeof(topic_t *));
data->topics_quantity = 32;
data->nr = 0;
const char *ws_key = NULL;
const char *ws_protocol = NULL;
const char *ws_extensions = NULL;
size_t ws_key_length = uws_req_get_header(request, "sec-websocket-key", 17, &ws_key);
size_t ws_protocol_length = uws_req_get_header(request, "sec-websocket-protocol", 22, &ws_protocol);
size_t ws_extensions_length = uws_req_get_header(request, "sec-websocket-extensions", 24, &ws_extensions);
uws_res_upgrade(SSL,
response,
(void *)data,
ws_key,
ws_key_length,
ws_protocol,
ws_protocol_length,
ws_extensions,
ws_extensions_length,
context);
}
void open_handler(uws_websocket_t *ws)
{
/* Open event here, you may access uws_ws_get_user_data(ws) which points to a PerSocketData struct */
struct PerSocketData *data = (struct PerSocketData *)uws_ws_get_user_data(SSL, ws);
for (int i = 0; i < data->topics_quantity; i++)
{
char *topic = (char *)malloc((size_t)buffer_size("%ld-%d", (uintptr_t)ws, i));
size_t topic_length = sprintf(topic, "%ld-%d", (uintptr_t)ws, i);
topic_t *new_topic = (topic_t*) malloc(sizeof(topic_t));
new_topic->length = topic_length;
new_topic->name = topic;
data->topics[i] = new_topic;
uws_ws_subscribe(SSL, ws, topic, topic_length);
}
}
void message_handler(uws_websocket_t *ws, const char *message, size_t length, uws_opcode_t opcode)
{
struct PerSocketData *data = (struct PerSocketData *)uws_ws_get_user_data(SSL, ws);
topic_t *topic = data->topics[(size_t)(++data->nr % data->topics_quantity)];
uws_publish(SSL, app, topic->name, topic->length, message, length, opcode, false);
topic = data->topics[(size_t)(++data->nr % data->topics_quantity)];
uws_ws_publish(SSL, ws, topic->name, topic->length, message, length);
}
void close_handler(uws_websocket_t *ws, int code, const char *message, size_t length)
{
/* You may access uws_ws_get_user_data(ws) here, but sending or
* doing any kind of I/O with the socket is not valid. */
struct PerSocketData *data = (struct PerSocketData *)uws_ws_get_user_data(SSL, ws);
if (data)
{
for (int i = 0; i < data->topics_quantity; i++)
{
topic_t* topic = data->topics[i];
free(topic->name);
free(topic);
}
free(data->topics);
free(data);
}
}
void drain_handler(uws_websocket_t *ws)
{
/* Check uws_ws_get_buffered_amount(ws) here */
}
void ping_handler(uws_websocket_t *ws, const char *message, size_t length)
{
/* You don't need to handle this one, we automatically respond to pings as per standard */
}
void pong_handler(uws_websocket_t *ws, const char *message, size_t length)
{
/* You don't need to handle this one either */
}
int main()
{
uws_app_t *app = uws_create_app(SSL, (struct us_socket_context_options_t){
/* There are example certificates in uWebSockets.js repo */
.key_file_name = "../misc/key.pem",
.cert_file_name = "../misc/cert.pem",
.passphrase = "1234"
});
uws_ws(SSL, app, "/*", (uws_socket_behavior_t){
.compression = uws_compress_options_t::SHARED_COMPRESSOR,
.maxPayloadLength = 16 * 1024,
.idleTimeout = 12,
.maxBackpressure = 1 * 1024 * 1024,
.upgrade = upgrade_handler,
.open = open_handler,
.message = message_handler,
.drain = drain_handler,
.ping = ping_handler,
.pong = pong_handler,
.close = close_handler,
});
uws_app_listen(SSL, app, 9001, listen_handler, NULL);
uws_app_run(SSL, app);
}

View File

@@ -1,81 +0,0 @@
#include "../libuwebsockets.h"
#include <stdio.h>
#include <malloc.h>
#define SSL 1
/* This is a simple WebSocket "sync" upgrade example.
* You may compile it with "WITH_OPENSSL=1 make" or with "make" */
/* ws->getUserData returns one of these */
struct PerSocketData {
/* Fill with user data */
};
void listen_handler(struct us_listen_socket_t *listen_socket, uws_app_listen_config_t config, void* user_data)
{
if (listen_socket){
printf("Listening on port wss://localhost:%d\n", config.port);
}
}
void open_handler(uws_websocket_t* ws){
/* Open event here, you may access uws_ws_get_user_data(WS) which points to a PerSocketData struct */
}
void message_handler(uws_websocket_t* ws, const char* message, size_t length, uws_opcode_t opcode){
uws_ws_send(SSL, ws, message, length, opcode);
}
void close_handler(uws_websocket_t* ws, int code, const char* message, size_t length){
/* You may access uws_ws_get_user_data(ws) here, but sending or
* doing any kind of I/O with the socket is not valid. */
}
void drain_handler(uws_websocket_t* ws){
/* Check uws_ws_get_buffered_amount(ws) here */
}
void ping_handler(uws_websocket_t* ws, const char* message, size_t length){
/* You don't need to handle this one, we automatically respond to pings as per standard */
}
void pong_handler(uws_websocket_t* ws, const char* message, size_t length){
/* You don't need to handle this one either */
}
int main()
{
uws_app_t *app = uws_create_app(SSL, (struct us_socket_context_options_t){
/* There are example certificates in uWebSockets.js repo */
.key_file_name = "../misc/key.pem",
.cert_file_name = "../misc/cert.pem",
.passphrase = "1234"
});
uws_ws(SSL, app, "/*", (uws_socket_behavior_t){
.compression = uws_compress_options_t::SHARED_COMPRESSOR,
.maxPayloadLength = 16 * 1024,
.idleTimeout = 12,
.maxBackpressure = 1 * 1024 * 1024,
.upgrade = NULL,
.open = open_handler,
.message = message_handler,
.drain = drain_handler,
.ping = ping_handler,
.pong = pong_handler,
.close = close_handler,
});
uws_app_listen(SSL,app, 9001, listen_handler, NULL);
uws_app_run(SSL, app);
}

View File

@@ -1,33 +0,0 @@
#include "../libuwebsockets.h"
#include "libusockets.h"
#include <stdio.h>
#define SSL 1
void get_handler(uws_res_t *res, uws_req_t *req, void *user_data)
{
uws_res_end(SSL, res, "Hello CAPI!", 11, false);
}
void listen_handler(struct us_listen_socket_t *listen_socket, uws_app_listen_config_t config, void *user_data)
{
if (listen_socket)
{
printf("Listening on port https://localhost:%d now\n", config.port);
}
}
int main()
{
/* Overly simple hello world app */
uws_app_t *app = uws_create_app(SSL, (struct us_socket_context_options_t){
/* There are example certificates in uWebSockets.js repo */
.key_file_name = "../misc/key.pem",
.cert_file_name = "../misc/cert.pem",
.passphrase = "1234"
});
uws_app_get(SSL, app, "/*", get_handler, NULL);
uws_app_listen(SSL, app, 3000, listen_handler, NULL);
uws_app_run(SSL, app);
}

View File

@@ -1,123 +0,0 @@
#include "../libuwebsockets.h"
#include "libusockets.h"
#include <stdio.h>
#include <malloc.h>
#include <string.h>
#define SSL 0
typedef struct {
uws_res_t* res;
bool aborted;
} async_request_t;
//Timer close helper
void uws_timer_close(struct us_timer_t *timer)
{
struct us_timer_t *t = (struct us_timer_t *)timer;
struct timer_handler_data *data;
memcpy(&data, us_timer_ext(t), sizeof(struct timer_handler_data *));
free(data);
us_timer_close(t, 0);
}
//Timer create helper
struct us_timer_t *uws_create_timer(int ms, int repeat_ms, void (*handler)(void *data), void *data)
{
struct us_loop_t *loop = uws_get_loop();
struct us_timer_t *delayTimer = us_create_timer(loop, 0, sizeof(void *));
struct timer_handler_data
{
void *data;
void (*handler)(void *data);
bool repeat;
};
struct timer_handler_data *timer_data = (struct timer_handler_data *)malloc(sizeof(timer_handler_data));
timer_data->data = data;
timer_data->handler = handler;
timer_data->repeat = repeat_ms > 0;
memcpy(us_timer_ext(delayTimer), &timer_data, sizeof(struct timer_handler_data *));
us_timer_set(
delayTimer, [](struct us_timer_t *t)
{
/* We wrote the pointer to the timer's extension */
struct timer_handler_data *data;
memcpy(&data, us_timer_ext(t), sizeof(struct timer_handler_data *));
data->handler(data->data);
if (!data->repeat)
{
free(data);
us_timer_close(t, 0);
}
},
ms, repeat_ms);
return (struct us_timer_t *)delayTimer;
}
void on_res_aborted(uws_res_t *response, void* data){
async_request_t* request_data = (async_request_t*)data;
/* We don't implement any kind of cancellation here,
* so simply flag us as aborted */
request_data->aborted = true;
}
void on_res_corked(uws_res_t *response, void* data){
uws_res_end(SSL, response, "Hello CAPI!", 11, false);
}
void on_timer_done(void *data){
async_request_t* request_data = (async_request_t*)data;
/* Were'nt we aborted before our async task finished? Okay, send a message! */
if(!request_data->aborted){
uws_res_cork(SSL, request_data->res,on_res_corked, request_data);
}
}
void get_handler(uws_res_t *res, uws_req_t *req, void* user_data)
{
/* We have to attach an abort handler for us to be aware
* of disconnections while we perform async tasks */
async_request_t* request_data = (async_request_t*) malloc(sizeof(async_request_t));
request_data->res = res;
request_data->aborted = false;
uws_res_on_aborted(SSL, res, on_res_aborted, request_data);
/* Simulate checking auth for 5 seconds. This looks like crap, never write
* code that utilize us_timer_t like this; they are high-cost and should
* not be created and destroyed more than rarely!
* Either way, here we go!*/
uws_create_timer(1, 0, on_timer_done, request_data);
}
void listen_handler(struct us_listen_socket_t *listen_socket, uws_app_listen_config_t config, void* user_data)
{
if (listen_socket)
{
printf("Listening on port https://localhost:%d now\n", config.port);
}
}
int main()
{
/* Overly simple hello world app with async response */
uws_app_t *app = uws_create_app(SSL, (struct us_socket_context_options_t){
/* There are example certificates in uWebSockets.js repo */
.key_file_name = "../misc/key.pem",
.cert_file_name = "../misc/cert.pem",
.passphrase = "1234"
});
uws_app_get(SSL, app, "/*", get_handler, NULL);
uws_app_listen(SSL, app, 3000, listen_handler, NULL);
uws_app_run(SSL, app);
}

View File

@@ -1,309 +0,0 @@
/* automatically generated by rust-bindgen 0.59.2 */
use std::convert::TryInto;
use std::ffi::CString;
pub type SizeT = ::std::os::raw::c_ulong;
pub type WcharT = ::std::os::raw::c_uint;
#[repr(C)]
#[repr(align(16))]
#[derive(Debug, Copy, Clone)]
pub struct max_align_t {
pub __clang_max_align_nonce1: ::std::os::raw::c_longlong,
pub __bindgen_padding_0: u64,
pub __clang_max_align_nonce2: u128,
}
#[test]
fn bindgen_test_layout_max_align_t() {
assert_eq!(
::std::mem::size_of::<max_align_t>(),
32usize,
concat!("Size of: ", stringify!(max_align_t))
);
assert_eq!(
::std::mem::align_of::<max_align_t>(),
16usize,
concat!("Alignment of ", stringify!(max_align_t))
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<max_align_t>())).__clang_max_align_nonce1 as *const _ as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(max_align_t),
"::",
stringify!(__clang_max_align_nonce1)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<max_align_t>())).__clang_max_align_nonce2 as *const _ as usize
},
16usize,
concat!(
"Offset of field: ",
stringify!(max_align_t),
"::",
stringify!(__clang_max_align_nonce2)
)
);
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct uws_app_s {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct uws_req_s {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct uws_res_s {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct uws_app_listen_config_s {
port: ::std::os::raw::c_int,
host: *const ::std::os::raw::c_char,
options: ::std::os::raw::c_int,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct us_socket_context_options_s {
key_file_name: *const ::std::os::raw::c_char,
cert_file_name: *const ::std::os::raw::c_char,
passphrase: *const ::std::os::raw::c_char,
dh_params_file_name: *const ::std::os::raw::c_char,
ca_file_name: *const ::std::os::raw::c_char,
ssl_prefer_low_memory_usage: ::std::os::raw::c_int,
}
pub type UwsAppListenConfigT = uws_app_listen_config_s;
pub type UsSocketContextOptionsT = us_socket_context_options_s;
pub struct UsSocketContextOptions<'a> {
key_file_name: &'a str,
cert_file_name: &'a str,
passphrase: &'a str,
dh_params_file_name: &'a str,
ca_file_name: &'a str,
ssl_prefer_low_memory_usage: i32,
}
pub type UwsAppT = uws_app_s;
pub type UwsReqT = uws_req_s;
pub type UwsResT = uws_res_s;
extern "C" {
pub fn uws_create_app(
ssl: ::std::os::raw::c_int,
options: UsSocketContextOptionsT,
) -> *mut UwsAppT;
pub fn uws_app_get(
ssl: ::std::os::raw::c_int,
app: *mut UwsAppT,
pattern: *const ::std::os::raw::c_char,
handler: ::std::option::Option<
unsafe extern "C" fn(
res: *mut UwsResT,
req: *mut UwsReqT,
user_data: *mut ::std::os::raw::c_void,
),
>,
user_data: *mut ::std::os::raw::c_void,
);
pub fn uws_app_run(ssl: ::std::os::raw::c_int, app: *mut UwsAppT);
pub fn uws_app_listen(
ssl: ::std::os::raw::c_int,
app: *mut UwsAppT,
port: ::std::os::raw::c_int,
handler: ::std::option::Option<
unsafe extern "C" fn(
listen_socket: *mut ::std::os::raw::c_void,
config: UwsAppListenConfigT,
user_data: *mut ::std::os::raw::c_void,
),
>,
user_data: *mut ::std::os::raw::c_void,
);
pub fn uws_res_end(
ssl: ::std::os::raw::c_int,
res: *mut UwsResT,
data: *const ::std::os::raw::c_char,
length: SizeT,
close_connection: bool,
);
}
pub struct AppResponse<const SSL: i32> {
native: *mut UwsResT,
}
pub struct AppRequest {
native: *mut UwsReqT,
}
impl AppRequest {
pub fn new(native: *mut UwsReqT) -> AppRequest {
AppRequest { native: native }
}
}
impl<const SSL: i32> AppResponse<SSL> {
pub fn new(native: *mut UwsResT) -> AppResponse<SSL> {
AppResponse::<SSL> { native: native }
}
fn end(self, message: &str) -> AppResponse<SSL> {
unsafe {
let c_message =
::std::ffi::CString::new(message).expect("Failed to create message CString");
//This will now const fold :/ performance impact needs refactor
uws_res_end(
SSL,
self.native,
c_message.as_ptr(),
message.len().try_into().unwrap(),
false,
);
}
self
}
}
pub type UwsMethodHandler<const SSL: i32> = fn(res: AppResponse<SSL>, req: AppRequest);
pub type UwsListenHandler =
fn(listen_socket: *mut ::std::os::raw::c_void, config: UwsAppListenConfigT);
pub struct TemplateApp<const SSL: i32> {
native: *mut UwsAppT,
}
extern "C" fn uws_generic_listen_handler(
listen_socket: *mut ::std::os::raw::c_void,
config: UwsAppListenConfigT,
user_data: *mut ::std::os::raw::c_void,
) {
unsafe {
let callback = &mut *(user_data as *mut UwsListenHandler);
callback(listen_socket, config);
}
}
extern "C" fn uws_generic_method_handler(
res: *mut UwsResT,
req: *mut UwsReqT,
user_data: *mut ::std::os::raw::c_void,
) {
unsafe {
let response = AppResponse::<0>::new(res);
let request = AppRequest::new(req);
let callback = &mut *(user_data as *mut UwsMethodHandler<0>);
callback(response, request);
}
}
extern "C" fn uws_ssl_generic_method_handler(
res: *mut UwsResT,
req: *mut UwsReqT,
user_data: *mut ::std::os::raw::c_void,
) {
unsafe {
let response = AppResponse::<1>::new(res);
let request = AppRequest::new(req);
let callback = &mut *(user_data as *mut UwsMethodHandler<1>);
callback(response, request);
}
}
impl<const SSL: i32> TemplateApp<SSL> {
pub fn new(config: UsSocketContextOptions) -> TemplateApp<SSL> {
unsafe {
let key_file_name_s =
CString::new(config.key_file_name).expect("Failed to create key_file_name CString");
let cert_file_name_s = CString::new(config.cert_file_name)
.expect("Failed to create cert_file_name CString");
let passphrase_s =
CString::new(config.passphrase).expect("Failed to create passphrase CString");
let dh_params_file_name_s = CString::new(config.dh_params_file_name)
.expect("Failed to create dh_params_file_name CString");
let ca_file_name_s =
CString::new(config.ca_file_name).expect("Failed to create ca_file_name CString");
let native_options = UsSocketContextOptionsT {
key_file_name: key_file_name_s.as_ptr(),
cert_file_name: cert_file_name_s.as_ptr(),
passphrase: passphrase_s.as_ptr(),
dh_params_file_name: dh_params_file_name_s.as_ptr(),
ca_file_name: ca_file_name_s.as_ptr(),
ssl_prefer_low_memory_usage: config.ssl_prefer_low_memory_usage,
};
TemplateApp::<SSL> {
native: uws_create_app(SSL, native_options),
}
}
}
pub fn get(self, route: &str, mut handler: UwsMethodHandler<SSL>) -> TemplateApp<SSL> {
unsafe {
let c_route = ::std::ffi::CString::new(route).expect("Failed to create route CString");
if SSL == 1 {
uws_app_get(
SSL,
self.native,
c_route.as_ptr(),
std::option::Option::Some(uws_ssl_generic_method_handler),
&mut handler as *mut _ as *mut ::std::os::raw::c_void,
);
} else {
uws_app_get(
SSL,
self.native,
c_route.as_ptr(),
std::option::Option::Some(uws_generic_method_handler),
&mut handler as *mut _ as *mut ::std::os::raw::c_void,
);
}
}
self
}
pub fn listen(self, port: i32, mut handler: UwsListenHandler) -> TemplateApp<SSL> {
unsafe {
uws_app_listen(
SSL,
self.native,
port,
::std::option::Option::Some(uws_generic_listen_handler),
&mut handler as *mut _ as *mut ::std::os::raw::c_void,
);
}
self
}
pub fn run(self) -> TemplateApp<SSL> {
unsafe {
uws_app_run(SSL, self.native);
}
self
}
}
pub type App = TemplateApp<0>;
pub type SSLApp = TemplateApp<1>;
fn main() {
let config = UsSocketContextOptions {
key_file_name: "../misc/key.pem",
cert_file_name: "../misc/cert.pem",
passphrase: "1234",
ca_file_name: "",
dh_params_file_name: "",
ssl_prefer_low_memory_usage: 0,
};
SSLApp::new(config)
.get("/", |res, _req| {
res.end("Hello Rust!");
})
.listen(3000, |_listen_socket, config| {
println!("Listening on port https://127.0.0.1:{}", config.port);
})
.run();
}

View File

@@ -1,59 +0,0 @@
#include "../libuwebsockets.h"
#include <stdio.h>
#include <string.h>
#define SSL 1
struct us_listen_socket_t *globalListenSocket;
uws_app_t *app;
void get_handler(uws_res_t *res, uws_req_t *req, void* user_data)
{
uws_res_end(SSL, res, "Hello CAPI!", 11, false);
}
void exit_handler(uws_res_t *res, uws_req_t *req, void* user_data)
{
uws_res_end(SSL, res, "Shutting down!",14, false);
/* We use this to check graceful closedown */
us_listen_socket_close(false, globalListenSocket);
}
void missing_server_name_handler(const char *hostname, void* user_data){
printf("We are missing server name: <%s>\n", hostname);
/* Assume it is localhost, so add it */
uws_add_server_name(SSL, app, "localhost");
}
void listen_handler(struct us_listen_socket_t *listen_socket, uws_app_listen_config_t config, void* user_data)
{
if (listen_socket){
printf("Listening on port https://localhost:%d\n", config.port);
globalListenSocket = listen_socket;
}else{
printf("Failed to listen on port https://localhost:%d\n", config.port);
}
}
int main()
{
/* Overly simple hello world app (SNI)*/
app = uws_create_app(SSL, (struct us_socket_context_options_t){
/* There are example certificates in uWebSockets.js repo */
.key_file_name = "../misc/key.pem",
.cert_file_name = "../misc/cert.pem",
.passphrase = "1234"
});
uws_missing_server_name(SSL, app, missing_server_name_handler, NULL);
uws_app_get(SSL, app, "/*", get_handler, NULL);
uws_app_get(SSL, app, "/exit", exit_handler, NULL);
uws_app_listen(SSL, app, 3000, listen_handler, NULL);
/* Let's add a wildcard SNI to begin with */
uws_add_server_name(SSL, app, "*.google.*");
uws_app_run(SSL, app);
}

View File

@@ -1,255 +0,0 @@
#include "../libuwebsockets.h"
#include "libusockets.h"
#include <stdio.h>
#include <malloc.h>
#include <string.h>
/* This is a simple WebSocket "sync" upgrade example.
* You may compile it with "WITH_OPENSSL=1 make" or with "make" */
#define SSL 1
typedef struct
{
char *value;
size_t length;
} header_t;
struct PerSocketData
{
/* Define your user data */
int something;
};
struct UpgradeData
{
header_t *secWebSocketKey;
header_t *secWebSocketProtocol;
header_t *secWebSocketExtensions;
uws_socket_context_t *context;
uws_res_t *response;
bool aborted;
};
header_t *create_header(size_t length, const char* value)
{
header_t *header = (header_t *)malloc(sizeof(header_t));
if(length > 0){
header->value = (char *)calloc(sizeof(char), length);
header->length = length;
memcpy(header->value, value, length);
}else{
header->value = NULL;
header->length = 0;
}
return header;
}
void free_header(header_t *header)
{
free(header->value);
free(header);
}
void listen_handler(struct us_listen_socket_t *listen_socket, uws_app_listen_config_t config, void *user_data)
{
if (listen_socket)
{
printf("Listening on port wss://localhost:%d\n", config.port);
}
}
//Timer close helper
void uws_timer_close(struct us_timer_t *timer)
{
struct us_timer_t *t = (struct us_timer_t *)timer;
struct timer_handler_data *data;
memcpy(&data, us_timer_ext(t), sizeof(struct timer_handler_data *));
free(data);
us_timer_close(t, 0);
}
//Timer create helper
struct us_timer_t *uws_create_timer(int ms, int repeat_ms, void (*handler)(void *data), void *data)
{
struct us_loop_t *loop = uws_get_loop();
struct us_timer_t *delayTimer = us_create_timer(loop, 0, sizeof(void *));
struct timer_handler_data
{
void *data;
void (*handler)(void *data);
bool repeat;
};
struct timer_handler_data *timer_data = (struct timer_handler_data *)malloc(sizeof(timer_handler_data));
timer_data->data = data;
timer_data->handler = handler;
timer_data->repeat = repeat_ms > 0;
memcpy(us_timer_ext(delayTimer), &timer_data, sizeof(struct timer_handler_data *));
us_timer_set(
delayTimer, [](struct us_timer_t *t)
{
/* We wrote the pointer to the timer's extension */
struct timer_handler_data *data;
memcpy(&data, us_timer_ext(t), sizeof(struct timer_handler_data *));
data->handler(data->data);
if (!data->repeat)
{
free(data);
us_timer_close(t, 0);
}
},
ms, repeat_ms);
return (struct us_timer_t *)delayTimer;
}
void on_timer_done(void *data)
{
struct UpgradeData *upgrade_data = (struct UpgradeData *)data;
/* Were'nt we aborted before our async task finished? Okay, upgrade then! */
if (!upgrade_data->aborted)
{
struct PerSocketData *socket_data = (struct PerSocketData *)malloc(sizeof(struct PerSocketData));
socket_data->something = 15;
printf("Async task done, upgrading to WebSocket now!\n");
uws_res_upgrade(SSL,
upgrade_data->response,
(void *)socket_data,
upgrade_data->secWebSocketKey->value,
upgrade_data->secWebSocketKey->length,
upgrade_data->secWebSocketProtocol->value,
upgrade_data->secWebSocketProtocol->length,
upgrade_data->secWebSocketExtensions->value,
upgrade_data->secWebSocketExtensions->length,
upgrade_data->context);
}
else
{
printf("Async task done, but the HTTP socket was closed. Skipping upgrade to WebSocket!\n");
}
free_header(upgrade_data->secWebSocketKey);
free_header(upgrade_data->secWebSocketProtocol);
free_header(upgrade_data->secWebSocketExtensions);
free(upgrade_data);
}
void on_res_aborted(uws_res_t *response, void *data)
{
struct UpgradeData *upgrade_data = (struct UpgradeData *)data;
/* We don't implement any kind of cancellation here,
* so simply flag us as aborted */
upgrade_data->aborted = true;
}
void upgrade_handler(uws_res_t *response, uws_req_t *request, uws_socket_context_t *context)
{
/* HttpRequest (req) is only valid in this very callback, so we must COPY the headers
* we need later on while upgrading to WebSocket. You must not access req after first return.
* Here we create a heap allocated struct holding everything we will need later on. */
struct UpgradeData *data = (struct UpgradeData *)malloc(sizeof(struct UpgradeData));
data->aborted = false;
data->context = context;
data->response = response;
const char *ws_key = NULL;
const char *ws_protocol = NULL;
const char *ws_extensions = NULL;
size_t ws_key_length = uws_req_get_header(request, "sec-websocket-key", 17, &ws_key);
size_t ws_protocol_length = uws_req_get_header(request, "sec-websocket-protocol", 22, &ws_protocol);
size_t ws_extensions_length = uws_req_get_header(request, "sec-websocket-extensions", 24, &ws_extensions);
data->secWebSocketKey = create_header(ws_key_length, ws_key);
data->secWebSocketProtocol = create_header(ws_protocol_length, ws_protocol);
data->secWebSocketExtensions = create_header(ws_extensions_length, ws_extensions);
/* We have to attach an abort handler for us to be aware
* of disconnections while we perform async tasks */
uws_res_on_aborted(SSL, response, on_res_aborted, data);
/* Simulate checking auth for 5 seconds. This looks like crap, never write
* code that utilize us_timer_t like this; they are high-cost and should
* not be created and destroyed more than rarely!
* Either way, here we go!*/
uws_create_timer(5000, 0, on_timer_done, data);
}
void open_handler(uws_websocket_t *ws)
{
/* Open event here, you may access uws_ws_get_user_data(ws) which points to a PerSocketData struct.
* Here we simply validate that indeed, something == 15 as set in upgrade handler. */
struct PerSocketData *data = (struct PerSocketData *)uws_ws_get_user_data(SSL, ws);
data->something = 15;
printf("Something is: %d\n", data->something);
}
void message_handler(uws_websocket_t *ws, const char *message, size_t length, uws_opcode_t opcode)
{
/* We simply echo whatever data we get */
uws_ws_send(SSL, ws, message, length, opcode);
}
void close_handler(uws_websocket_t *ws, int code, const char *message, size_t length)
{
/* You may access uws_ws_get_user_data(ws) here, but sending or
* doing any kind of I/O with the socket is not valid. */
struct PerSocketData *data = (struct PerSocketData *)uws_ws_get_user_data(SSL, ws);
if (data)
{
free(data);
}
}
void drain_handler(uws_websocket_t *ws)
{
/* Check uws_ws_get_buffered_amount(ws) here */
}
void ping_handler(uws_websocket_t *ws, const char *message, size_t length)
{
/* You don't need to handle this one, we automatically respond to pings as per standard */
}
void pong_handler(uws_websocket_t *ws, const char *message, size_t length)
{
/* You don't need to handle this one either */
}
int main()
{
uws_app_t *app = uws_create_app(SSL, (struct us_socket_context_options_t){
/* There are example certificates in uWebSockets.js repo */
.key_file_name = "../misc/key.pem",
.cert_file_name = "../misc/cert.pem",
.passphrase = "1234"
});
uws_ws(SSL, app, "/*", (uws_socket_behavior_t){
.compression = uws_compress_options_t::SHARED_COMPRESSOR,
.maxPayloadLength = 16 * 1024,
.idleTimeout = 12,
.maxBackpressure = 1 * 1024 * 1024,
.upgrade = upgrade_handler,
.open = open_handler,
.message = message_handler,
.drain = drain_handler,
.ping = ping_handler,
.pong = pong_handler,
.close = close_handler,
});
uws_app_listen(SSL, app, 9001, listen_handler, NULL);
uws_app_run(SSL, app);
}

View File

@@ -1,117 +0,0 @@
#include "../libuwebsockets.h"
#include <stdio.h>
#include <malloc.h>
#define SSL 1
/* This is a simple WebSocket "sync" upgrade example.
* You may compile it with "WITH_OPENSSL=1 make" or with "make" */
/* uws_ws_get_user_data(ws) returns one of these */
struct PerSocketData
{
/* Define your user data */
int something;
};
void listen_handler(struct us_listen_socket_t *listen_socket, uws_app_listen_config_t config, void *user_data)
{
if (listen_socket)
{
printf("Listening on port wss://localhost:%d\n", config.port);
}
}
void upgrade_handler(uws_res_t *response, uws_req_t *request, uws_socket_context_t *context)
{
/* You may read from req only here, and COPY whatever you need into your PerSocketData.
* PerSocketData is valid from .open to .close event, accessed with uws_ws_get_user_data(ws).
* HttpRequest (req) is ONLY valid in this very callback, so any data you will need later
* has to be COPIED into PerSocketData here. */
/* Immediately upgrading without doing anything "async" before, is simple */
struct PerSocketData *data = (struct PerSocketData *)malloc(sizeof(struct PerSocketData));
data->something = 15;
const char *ws_key = NULL;
const char *ws_protocol = NULL;
const char *ws_extensions = NULL;
size_t ws_key_length = uws_req_get_header(request, "sec-websocket-key", 17, &ws_key);
size_t ws_protocol_length = uws_req_get_header(request, "sec-websocket-protocol", 22, &ws_protocol);
size_t ws_extensions_length = uws_req_get_header(request, "sec-websocket-extensions", 24, &ws_extensions);
uws_res_upgrade(SSL,
response,
(void *)data,
ws_key,
ws_key_length,
ws_protocol,
ws_protocol_length,
ws_extensions,
ws_extensions_length,
context);
}
void open_handler(uws_websocket_t *ws)
{
/* Open event here, you may access uws_ws_get_user_data(ws) which points to a PerSocketData struct.
* Here we simply validate that indeed, something == 15 as set in upgrade handler. */
struct PerSocketData *data = (struct PerSocketData *)uws_ws_get_user_data(SSL, ws);
data->something = 15;
printf("Something is: %d\n", data->something);
}
void message_handler(uws_websocket_t *ws, const char *message, size_t length, uws_opcode_t opcode)
{
/* We simply echo whatever data we get */
uws_ws_send(SSL, ws, message, length, opcode);
}
void close_handler(uws_websocket_t *ws, int code, const char *message, size_t length)
{
/* You may access uws_ws_get_user_data(ws) here, but sending or
* doing any kind of I/O with the socket is not valid. */
struct PerSocketData *data = (struct PerSocketData *)uws_ws_get_user_data(SSL, ws);
if (data)
free(data);
}
void drain_handler(uws_websocket_t *ws)
{
/* Check uws_ws_get_buffered_amount(ws) here */
}
void ping_handler(uws_websocket_t *ws, const char *message, size_t length)
{
/* You don't need to handle this one, we automatically respond to pings as per standard */
}
void pong_handler(uws_websocket_t *ws, const char *message, size_t length)
{
/* You don't need to handle this one either */
}
int main()
{
uws_app_t *app = uws_create_app(SSL, (struct us_socket_context_options_t){
/* There are example certificates in uWebSockets.js repo */
.key_file_name = "../misc/key.pem",
.cert_file_name = "../misc/cert.pem",
.passphrase = "1234"
});
uws_ws(SSL, app, "/*", (uws_socket_behavior_t){.compression = uws_compress_options_t::SHARED_COMPRESSOR, .maxPayloadLength = 16 * 1024, .idleTimeout = 12, .maxBackpressure = 1 * 1024 * 1024, .upgrade = upgrade_handler, .open = open_handler, .message = message_handler, .drain = drain_handler, .ping = ping_handler, .pong = pong_handler, .close = close_handler});
uws_app_listen(SSL, app, 9001, listen_handler, NULL);
uws_app_run(SSL, app);
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,260 +0,0 @@
/*
* Copyright 2022 Ciro Spaciari
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
// clang-format off
#ifndef LIBUWS_CAPI_HEADER
#define LIBUWS_CAPI_HEADER
#include <stddef.h>
#include <stdbool.h>
#include <stdint.h>
#include "libusockets.h"
#ifdef __cplusplus
extern "C"
{
#endif
#ifdef _WIN32
# define DLL_EXPORT __declspec( dllexport )
#else
# define DLL_EXPORT
#endif
DLL_EXPORT typedef enum
{
/* These are not actual compression options */
_COMPRESSOR_MASK = 0x00FF,
_DECOMPRESSOR_MASK = 0x0F00,
/* Disabled, shared, shared are "special" values */
DISABLED = 0,
SHARED_COMPRESSOR = 1,
SHARED_DECOMPRESSOR = 1 << 8,
/* Highest 4 bits describe decompressor */
DEDICATED_DECOMPRESSOR_32KB = 15 << 8,
DEDICATED_DECOMPRESSOR_16KB = 14 << 8,
DEDICATED_DECOMPRESSOR_8KB = 13 << 8,
DEDICATED_DECOMPRESSOR_4KB = 12 << 8,
DEDICATED_DECOMPRESSOR_2KB = 11 << 8,
DEDICATED_DECOMPRESSOR_1KB = 10 << 8,
DEDICATED_DECOMPRESSOR_512B = 9 << 8,
/* Same as 32kb */
DEDICATED_DECOMPRESSOR = 15 << 8,
/* Lowest 8 bit describe compressor */
DEDICATED_COMPRESSOR_3KB = 9 << 4 | 1,
DEDICATED_COMPRESSOR_4KB = 9 << 4 | 2,
DEDICATED_COMPRESSOR_8KB = 10 << 4 | 3,
DEDICATED_COMPRESSOR_16KB = 11 << 4 | 4,
DEDICATED_COMPRESSOR_32KB = 12 << 4 | 5,
DEDICATED_COMPRESSOR_64KB = 13 << 4 | 6,
DEDICATED_COMPRESSOR_128KB = 14 << 4 | 7,
DEDICATED_COMPRESSOR_256KB = 15 << 4 | 8,
/* Same as 256kb */
DEDICATED_COMPRESSOR = 15 << 4 | 8
} uws_compress_options_t;
DLL_EXPORT typedef enum
{
CONTINUATION = 0,
TEXT = 1,
BINARY = 2,
CLOSE = 8,
PING = 9,
PONG = 10
} uws_opcode_t;
DLL_EXPORT typedef enum
{
BACKPRESSURE,
SUCCESS,
DROPPED
} uws_sendstatus_t;
DLL_EXPORT typedef struct
{
int port;
const char *host;
int options;
} uws_app_listen_config_t;
DLL_EXPORT typedef struct {
bool ok;
bool has_responded;
} uws_try_end_result_t;
DLL_EXPORT struct uws_app_s;
DLL_EXPORT struct uws_req_s;
DLL_EXPORT struct uws_res_s;
DLL_EXPORT struct uws_websocket_s;
DLL_EXPORT struct uws_header_iterator_s;
DLL_EXPORT typedef struct uws_app_s uws_app_t;
DLL_EXPORT typedef struct uws_req_s uws_req_t;
DLL_EXPORT typedef struct uws_res_s uws_res_t;
DLL_EXPORT typedef struct uws_socket_context_s uws_socket_context_t;
DLL_EXPORT typedef struct uws_websocket_s uws_websocket_t;
DLL_EXPORT typedef void (*uws_websocket_handler)(uws_websocket_t *ws, void* user_data);
DLL_EXPORT typedef void (*uws_websocket_message_handler)(uws_websocket_t *ws, const char *message, size_t length, uws_opcode_t opcode, void* user_data);
DLL_EXPORT typedef void (*uws_websocket_ping_pong_handler)(uws_websocket_t *ws, const char *message, size_t length, void* user_data);
DLL_EXPORT typedef void (*uws_websocket_close_handler)(uws_websocket_t *ws, int code, const char *message, size_t length, void* user_data);
DLL_EXPORT typedef void (*uws_websocket_upgrade_handler)(uws_res_t *response, uws_req_t *request, uws_socket_context_t *context, void* user_data);
DLL_EXPORT typedef void (*uws_websocket_subscription_handler)(uws_websocket_t *ws, const char *topic_name, size_t topic_name_length, int new_number_of_subscriber, int old_number_of_subscriber, void* user_data);
DLL_EXPORT typedef struct
{
uws_compress_options_t compression;
/* Maximum message size we can receive */
unsigned int maxPayloadLength;
/* 2 minutes timeout is good */
unsigned short idleTimeout;
/* 64kb backpressure is probably good */
unsigned int maxBackpressure;
bool closeOnBackpressureLimit;
/* This one depends on kernel timeouts and is a bad default */
bool resetIdleTimeoutOnSend;
/* A good default, esp. for newcomers */
bool sendPingsAutomatically;
/* Maximum socket lifetime in seconds before forced closure (defaults to disabled) */
unsigned short maxLifetime;
uws_websocket_upgrade_handler upgrade;
uws_websocket_handler open;
uws_websocket_message_handler message;
uws_websocket_handler drain;
uws_websocket_ping_pong_handler ping;
uws_websocket_ping_pong_handler pong;
uws_websocket_close_handler close;
uws_websocket_subscription_handler subscription;
} uws_socket_behavior_t;
DLL_EXPORT typedef void (*uws_listen_handler)(struct us_listen_socket_t *listen_socket, uws_app_listen_config_t config, void *user_data);
DLL_EXPORT typedef void (*uws_listen_domain_handler)(struct us_listen_socket_t *listen_socket, const char* domain, size_t domain_length, int options, void *user_data);
DLL_EXPORT typedef void (*uws_method_handler)(uws_res_t *response, uws_req_t *request, void *user_data);
DLL_EXPORT typedef void (*uws_filter_handler)(uws_res_t *response, int, void *user_data);
DLL_EXPORT typedef void (*uws_missing_server_handler)(const char *hostname, size_t hostname_length, void *user_data);
DLL_EXPORT typedef void (*uws_get_headers_server_handler)(const char *header_name, size_t header_name_size, const char *header_value, size_t header_value_size, void *user_data);
//Basic HTTP
DLL_EXPORT uws_app_t *uws_create_app(int ssl, struct us_bun_socket_context_options_t options);
DLL_EXPORT void uws_app_destroy(int ssl, uws_app_t *app);
DLL_EXPORT void uws_app_get(int ssl, uws_app_t *app, const char *pattern, uws_method_handler handler, void *user_data);
DLL_EXPORT void uws_app_post(int ssl, uws_app_t *app, const char *pattern, uws_method_handler handler, void *user_data);
DLL_EXPORT void uws_app_options(int ssl, uws_app_t *app, const char *pattern, uws_method_handler handler, void *user_data);
DLL_EXPORT void uws_app_delete(int ssl, uws_app_t *app, const char *pattern, uws_method_handler handler, void *user_data);
DLL_EXPORT void uws_app_patch(int ssl, uws_app_t *app, const char *pattern, uws_method_handler handler, void *user_data);
DLL_EXPORT void uws_app_put(int ssl, uws_app_t *app, const char *pattern, uws_method_handler handler, void *user_data);
DLL_EXPORT void uws_app_head(int ssl, uws_app_t *app, const char *pattern, uws_method_handler handler, void *user_data);
DLL_EXPORT void uws_app_connect(int ssl, uws_app_t *app, const char *pattern, uws_method_handler handler, void *user_data);
DLL_EXPORT void uws_app_trace(int ssl, uws_app_t *app, const char *pattern, uws_method_handler handler, void *user_data);
DLL_EXPORT void uws_app_any(int ssl, uws_app_t *app, const char *pattern, uws_method_handler handler, void *user_data);
DLL_EXPORT void uws_app_run(int ssl, uws_app_t *);
DLL_EXPORT void uws_app_listen(int ssl, uws_app_t *app, int port, uws_listen_handler handler, void *user_data);
DLL_EXPORT void uws_app_listen_with_config(int ssl, uws_app_t *app, uws_app_listen_config_t config, uws_listen_handler handler, void *user_data);
DLL_EXPORT void uws_app_listen_domain(int ssl, uws_app_t *app, const char *domain, size_t domain_length, uws_listen_domain_handler handler, void *user_data);
DLL_EXPORT void uws_app_listen_domain_with_options(int ssl, uws_app_t *app, const char *domain,size_t domain_length, int options, uws_listen_domain_handler handler, void *user_data);
DLL_EXPORT void uws_app_domain(int ssl, uws_app_t *app, const char* server_name, size_t server_name_length);
DLL_EXPORT bool uws_constructor_failed(int ssl, uws_app_t *app);
DLL_EXPORT unsigned int uws_num_subscribers(int ssl, uws_app_t *app, const char *topic, size_t topic_length);
DLL_EXPORT bool uws_publish(int ssl, uws_app_t *app, const char *topic, size_t topic_length, const char *message, size_t message_length, uws_opcode_t opcode, bool compress);
DLL_EXPORT void *uws_get_native_handle(int ssl, uws_app_t *app);
DLL_EXPORT void uws_remove_server_name(int ssl, uws_app_t *app, const char *hostname_pattern, size_t hostname_pattern_length);
DLL_EXPORT void uws_add_server_name(int ssl, uws_app_t *app, const char *hostname_pattern, size_t hostname_pattern_length);
DLL_EXPORT void uws_add_server_name_with_options(int ssl, uws_app_t *app, const char *hostname_pattern, size_t hostname_pattern_length, struct us_bun_socket_context_options_t options);
DLL_EXPORT void uws_missing_server_name(int ssl, uws_app_t *app, uws_missing_server_handler handler, void *user_data);
DLL_EXPORT void uws_filter(int ssl, uws_app_t *app, uws_filter_handler handler, void *user_data);
//WebSocket
DLL_EXPORT void uws_ws(int ssl, uws_app_t *app, const char *pattern, uws_socket_behavior_t behavior, void* user_data);
DLL_EXPORT void *uws_ws_get_user_data(int ssl, uws_websocket_t *ws);
DLL_EXPORT void uws_ws_close(int ssl, uws_websocket_t *ws);
DLL_EXPORT uws_sendstatus_t uws_ws_send(int ssl, uws_websocket_t *ws, const char *message, size_t length, uws_opcode_t opcode);
DLL_EXPORT uws_sendstatus_t uws_ws_send_with_options(int ssl, uws_websocket_t *ws, const char *message, size_t length, uws_opcode_t opcode, bool compress, bool fin);
DLL_EXPORT uws_sendstatus_t uws_ws_send_fragment(int ssl, uws_websocket_t *ws, const char *message, size_t length, bool compress);
DLL_EXPORT uws_sendstatus_t uws_ws_send_first_fragment(int ssl, uws_websocket_t *ws, const char *message, size_t length, bool compress);
DLL_EXPORT uws_sendstatus_t uws_ws_send_first_fragment_with_opcode(int ssl, uws_websocket_t *ws, const char *message, size_t length, uws_opcode_t opcode, bool compress);
DLL_EXPORT uws_sendstatus_t uws_ws_send_last_fragment(int ssl, uws_websocket_t *ws, const char *message, size_t length, bool compress);
DLL_EXPORT void uws_ws_end(int ssl, uws_websocket_t *ws, int code, const char *message, size_t length);
DLL_EXPORT void uws_ws_cork(int ssl, uws_websocket_t *ws, void (*handler)(void *user_data), void *user_data);
DLL_EXPORT bool uws_ws_subscribe(int ssl, uws_websocket_t *ws, const char *topic, size_t length);
DLL_EXPORT bool uws_ws_unsubscribe(int ssl, uws_websocket_t *ws, const char *topic, size_t length);
DLL_EXPORT bool uws_ws_is_subscribed(int ssl, uws_websocket_t *ws, const char *topic, size_t length);
DLL_EXPORT void uws_ws_iterate_topics(int ssl, uws_websocket_t *ws, void (*callback)(const char *topic, size_t length, void *user_data), void *user_data);
DLL_EXPORT bool uws_ws_publish(int ssl, uws_websocket_t *ws, const char *topic, size_t topic_length, const char *message, size_t message_length);
DLL_EXPORT bool uws_ws_publish_with_options(int ssl, uws_websocket_t *ws, const char *topic, size_t topic_length, const char *message, size_t message_length, uws_opcode_t opcode, bool compress);
DLL_EXPORT unsigned int uws_ws_get_buffered_amount(int ssl, uws_websocket_t *ws);
DLL_EXPORT size_t uws_ws_get_remote_address(int ssl, uws_websocket_t *ws, const char **dest);
DLL_EXPORT size_t uws_ws_get_remote_address_as_text(int ssl, uws_websocket_t *ws, const char **dest);
DLL_EXPORT void uws_res_get_remote_address_info(uws_res_t *res, const char **dest, size_t *length, unsigned int *port);
//Response
DLL_EXPORT void uws_res_end(int ssl, uws_res_t *res, const char *data, size_t length, bool close_connection);
DLL_EXPORT uws_try_end_result_t uws_res_try_end(int ssl, uws_res_t *res, const char *data, size_t length, uint64_t total_size, bool close_connection);
DLL_EXPORT void uws_res_cork(int ssl, uws_res_t *res, void(*callback)(uws_res_t *res, void* user_data) ,void* user_data);
DLL_EXPORT void uws_res_pause(int ssl, uws_res_t *res);
DLL_EXPORT void uws_res_resume(int ssl, uws_res_t *res);
DLL_EXPORT void uws_res_write_continue(int ssl, uws_res_t *res);
DLL_EXPORT void uws_res_write_status(int ssl, uws_res_t *res, const char *status, size_t length);
DLL_EXPORT void uws_res_write_header(int ssl, uws_res_t *res, const char *key, size_t key_length, const char *value, size_t value_length);
DLL_EXPORT void uws_res_write_header_int(int ssl, uws_res_t *res, const char *key, size_t key_length, uint64_t value);
DLL_EXPORT void uws_res_end_without_body(int ssl, uws_res_t *res, bool close_connection);
DLL_EXPORT bool uws_res_write(int ssl, uws_res_t *res, const char *data, size_t length);
DLL_EXPORT uint64_t uws_res_get_write_offset(int ssl, uws_res_t *res);
DLL_EXPORT void uws_res_override_write_offset(int ssl, uws_res_t *res, uint64_t offset);
DLL_EXPORT bool uws_res_has_responded(int ssl, uws_res_t *res);
DLL_EXPORT void uws_res_on_writable(int ssl, uws_res_t *res, bool (*handler)(uws_res_t *res, uint64_t, void *optional_data), void *user_data);
DLL_EXPORT void uws_res_on_aborted(int ssl, uws_res_t *res, void (*handler)(uws_res_t *res, void *optional_data), void *optional_data);
DLL_EXPORT void uws_res_on_data(int ssl, uws_res_t *res, void (*handler)(uws_res_t *res, const char *chunk, size_t chunk_length, bool is_end, void *optional_data), void *optional_data);
DLL_EXPORT void uws_res_upgrade(int ssl, uws_res_t *res, void *data, const char *sec_web_socket_key, size_t sec_web_socket_key_length, const char *sec_web_socket_protocol, size_t sec_web_socket_protocol_length, const char *sec_web_socket_extensions, size_t sec_web_socket_extensions_length, uws_socket_context_t *ws);
DLL_EXPORT size_t uws_res_get_remote_address(int ssl, uws_res_t *res, const char **dest);
DLL_EXPORT size_t uws_res_get_remote_address_as_text(int ssl, uws_res_t *res, const char **dest);
#ifdef UWS_WITH_PROXY
DLL_EXPORT size_t uws_res_get_proxied_remote_address(int ssl, uws_res_t *res, const char **dest);
DLL_EXPORT size_t uws_res_get_proxied_remote_address_as_text(int ssl, uws_res_t *res, const char **dest);
#endif
DLL_EXPORT void *uws_res_get_native_handle(int ssl, uws_res_t *res);
//Request
DLL_EXPORT bool uws_req_is_ancient(uws_req_t *res);
DLL_EXPORT bool uws_req_get_yield(uws_req_t *res);
DLL_EXPORT void uws_req_set_yield(uws_req_t *res, bool yield);
DLL_EXPORT size_t uws_req_get_url(uws_req_t *res, const char **dest);
DLL_EXPORT size_t uws_req_get_full_url(uws_req_t *res, const char **dest);
DLL_EXPORT size_t uws_req_get_method(uws_req_t *res, const char **dest);
DLL_EXPORT size_t uws_req_get_case_sensitive_method(uws_req_t *res, const char **dest);
DLL_EXPORT size_t uws_req_get_header(uws_req_t *res, const char *lower_case_header, size_t lower_case_header_length, const char **dest);
DLL_EXPORT void uws_req_for_each_header(uws_req_t *res, uws_get_headers_server_handler handler, void *user_data);
DLL_EXPORT size_t uws_req_get_query(uws_req_t *res, const char *key, size_t key_length, const char **dest);
DLL_EXPORT size_t uws_req_get_parameter(uws_req_t *res, unsigned short index, const char **dest);
DLL_EXPORT struct us_loop_t *uws_get_loop();
DLL_EXPORT struct us_loop_t *uws_get_loop_with_native(void* existing_native_loop);
DLL_EXPORT void uws_loop_defer(struct us_loop_t *loop, void( cb(void *user_data) ), void *user_data);
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -124,7 +124,7 @@ private:
/* Signal broken HTTP request only if we have a pending request */
if (httpResponseData->onAborted) {
httpResponseData->onAborted();
httpResponseData->onAborted((HttpResponse<SSL> *)s, httpResponseData->userData);
}
/* Destruct socket ext */
@@ -258,7 +258,7 @@ private:
}
/* We might respond in the handler, so do not change timeout after this */
httpResponseData->inStream(data, fin);
httpResponseData->inStream(static_cast<HttpResponse<SSL>*>(user), data.data(), data.length(), fin, httpResponseData->userData);
/* Was the socket closed? */
if (us_socket_is_closed(SSL, (struct us_socket_t *) user)) {
@@ -366,7 +366,7 @@ private:
/* We expect the developer to return whether or not write was successful (true).
* If write was never called, the developer should still return true so that we may drain. */
bool success = httpResponseData->callOnWritable(httpResponseData->offset);
bool success = httpResponseData->callOnWritable((HttpResponse<SSL> *)asyncSocket, httpResponseData->offset);
/* The developer indicated that their onWritable failed. */
if (!success) {

View File

@@ -558,10 +558,11 @@ public:
}
/* Attach handler for writable HTTP response */
HttpResponse *onWritable(MoveOnlyFunction<bool(uint64_t)> &&handler) {
HttpResponse *onWritable(void* userData, HttpResponseData<SSL>::OnWritableCallback handler) {
HttpResponseData<SSL> *httpResponseData = getHttpResponseData();
httpResponseData->onWritable = std::move(handler);
httpResponseData->userData = userData;
httpResponseData->onWritable = handler;
return this;
}
@@ -574,10 +575,11 @@ public:
}
/* Attach handler for aborted HTTP request */
HttpResponse *onAborted(MoveOnlyFunction<void()> &&handler) {
HttpResponse *onAborted(void* userData, HttpResponseData<SSL>::OnAbortedCallback handler) {
HttpResponseData<SSL> *httpResponseData = getHttpResponseData();
httpResponseData->onAborted = std::move(handler);
httpResponseData->userData = userData;
httpResponseData->onAborted = handler;
return this;
}
HttpResponse* clearOnWritableAndAborted() {
@@ -594,9 +596,10 @@ public:
return this;
}
/* Attach a read handler for data sent. Will be called with FIN set true if last segment. */
void onData(MoveOnlyFunction<void(std::string_view, bool)> &&handler) {
void onData(void* userData, HttpResponseData<SSL>::OnDataCallback handler) {
HttpResponseData<SSL> *data = getHttpResponseData();
data->inStream = std::move(handler);
data->userData = userData;
data->inStream = handler;
/* Always reset this counter here */
data->received_bytes_per_timeout = 0;

View File

@@ -33,6 +33,10 @@ struct HttpResponseData : AsyncSocketData<SSL>, HttpParser {
template <bool> friend struct HttpResponse;
template <bool> friend struct HttpContext;
public:
using OnWritableCallback = bool (*)(uWS::HttpResponse<SSL>*, uint64_t, void*);
using OnAbortedCallback = void (*)(uWS::HttpResponse<SSL>*, void*);
using OnDataCallback = void (*)(uWS::HttpResponse<SSL>* response, const char* chunk, size_t chunk_length, bool, void*);
/* When we are done with a response we mark it like so */
void markDone() {
onAborted = nullptr;
@@ -46,15 +50,15 @@ struct HttpResponseData : AsyncSocketData<SSL>, HttpParser {
}
/* Caller of onWritable. It is possible onWritable calls markDone so we need to borrow it. */
bool callOnWritable(uint64_t offset) {
bool callOnWritable( uWS::HttpResponse<SSL>* response, uint64_t offset) {
/* Borrow real onWritable */
MoveOnlyFunction<bool(uint64_t)> borrowedOnWritable = std::move(onWritable);
auto* borrowedOnWritable = std::move(onWritable);
/* Set onWritable to placeholder */
onWritable = [](uint64_t) {return true;};
onWritable = [](uWS::HttpResponse<SSL>*, uint64_t, void*) {return true;};
/* Run borrowed onWritable */
bool ret = borrowedOnWritable(offset);
bool ret = borrowedOnWritable(response, offset, userData);
/* If we still have onWritable (the placeholder) then move back the real one */
if (onWritable) {
@@ -74,10 +78,13 @@ struct HttpResponseData : AsyncSocketData<SSL>, HttpParser {
HTTP_CONNECTION_CLOSE = 16 // used
};
/* Shared context pointer */
void* userData = nullptr;
/* Per socket event handlers */
MoveOnlyFunction<bool(uint64_t)> onWritable;
MoveOnlyFunction<void()> onAborted;
MoveOnlyFunction<void(std::string_view, bool)> inStream; // onData
OnWritableCallback onWritable = nullptr;
OnAbortedCallback onAborted = nullptr;
OnDataCallback inStream = nullptr;
/* Outgoing offset */
uint64_t offset = 0;

View File

@@ -20,6 +20,8 @@
#ifndef UWS_PERMESSAGEDEFLATE_H
#define UWS_PERMESSAGEDEFLATE_H
#define UWS_USE_LIBDEFLATE 1
#include <cstdint>
#include <cstring>
@@ -134,6 +136,9 @@ struct ZlibContext {
struct DeflationStream {
z_stream deflationStream = {};
#ifdef UWS_USE_LIBDEFLATE
unsigned char reset_buffer[4096 + 1];
#endif
DeflationStream(CompressOptions compressOptions) {
@@ -154,13 +159,11 @@ struct DeflationStream {
/* Run a fast path in case of shared_compressor */
if (reset) {
size_t written = 0;
static unsigned char buf[1024 + 1];
written = libdeflate_deflate_compress(zlibContext->compressor, raw.data(), raw.length(), buf, 1024);
written = libdeflate_deflate_compress(zlibContext->compressor, raw.data(), raw.length(), reset_buffer, 4096);
if (written) {
memcpy(&buf[written], "\x00", 1);
return std::string_view((char *) buf, written + 1);
memcpy(&reset_buffer[written], "\x00", 1);
return std::string_view((char *) reset_buffer, written + 1);
}
}
#endif
@@ -214,6 +217,9 @@ struct DeflationStream {
struct InflationStream {
z_stream inflationStream = {};
#ifdef UWS_USE_LIBDEFLATE
char buf[4096];
#endif
InflationStream(CompressOptions compressOptions) {
/* Inflation windowBits are the top 8 bits of the 16 bit compressOptions */
@@ -230,13 +236,12 @@ struct InflationStream {
#ifdef UWS_USE_LIBDEFLATE
/* Try fast path first */
size_t written = 0;
static char buf[1024];
/* We have to pad 9 bytes and restore those bytes when done since 9 is more than 6 of next WebSocket message */
char tmp[9];
memcpy(tmp, (char *) compressed.data() + compressed.length(), 9);
memcpy((char *) compressed.data() + compressed.length(), "\x00\x00\xff\xff\x01\x00\x00\xff\xff", 9);
libdeflate_result res = libdeflate_deflate_decompress(zlibContext->decompressor, compressed.data(), compressed.length() + 9, buf, 1024, &written);
libdeflate_result res = libdeflate_deflate_decompress(zlibContext->decompressor, compressed.data(), compressed.length() + 9, buf, 4096, &written);
memcpy((char *) compressed.data() + compressed.length(), tmp, 9);
if (res == 0) {

View File

@@ -3,9 +3,9 @@ param(
)
$ErrorActionPreference = 'Stop'
. (Join-Path $PSScriptRoot "env.ps1")
if ($env:CI) {
. (Join-Path $PSScriptRoot "env.ps1")
if ($env:CI -eq "true") {
& (Join-Path $PSScriptRoot "update-submodules.ps1")
}
@@ -79,6 +79,10 @@ Build-Dependency `
-Script "lshpack" `
-Outputs @("lshpack.lib")
Build-Dependency `
-Script "libdeflate" `
-Outputs @("deflate.lib")
if (!($Script:DidAnything)) {
Write-Host "(run with -Force to rebuild all)"
}

View File

@@ -3,7 +3,7 @@ set -eo pipefail
source "$(dirname -- "${BASH_SOURCE[0]}")/env.sh"
if [[ "$CI" ]]; then
$(dirname -- "${BASH_SOURCE[0]}")/update-submodules.sh
$(dirname -- "${BASH_SOURCE[0]}")/update-submodules.sh
fi
FORCE=
@@ -36,9 +36,11 @@ fi
dep() {
local submodule="$1"
local script="$2"
CACHE_KEY=
if [ "$CACHE" == "1" ]; then
CACHE_KEY="$submodule/$(echo "$SUBMODULES" | grep "$submodule" | git hash-object --stdin)"
local hash="$(echo "$SUBMODULES" | grep "$submodule" | awk '{print $1}')"
local os="$(uname -s | tr '[:upper:]' '[:lower:]')"
local arch="$(uname -m)"
CACHE_KEY="$submodule/$hash-$os-$arch-$CPU_TARGET"
fi
if [ -z "$FORCE" ]; then
HAS_ALL_DEPS=1
@@ -92,6 +94,7 @@ dep mimalloc mimalloc libmimalloc.a libmimalloc.o
dep tinycc tinycc libtcc.a
dep zlib zlib libz.a
dep zstd zstd libzstd.a
dep libdeflate libdeflate libdeflate.a
dep ls-hpack lshpack liblshpack.a
if [ "$BUILT_ANY" -eq 0 ]; then

View File

@@ -3,6 +3,7 @@ $ErrorActionPreference = 'Stop' # Setting strict mode, similar to 'set -euo pip
Push-Location (Join-Path $BUN_DEPS_DIR 'boringssl')
try {
Remove-Item -ErrorAction SilentlyContinue -Recurse -Force build
Set-Location (mkdir -Force build)
Run cmake @CMAKE_FLAGS ..

View File

@@ -1,29 +1,29 @@
param (
[switch] $Baseline = $False,
[switch] $Fast = $False
)
$ErrorActionPreference = 'Stop' # Setting strict mode, similar to 'set -euo pipefail' in bash
$Tag = If ($Baseline) { "-Baseline" } Else { "" }
$UseBaselineBuild = If ($Baseline) { "ON" } Else { "OFF" }
$UseLto = If ($Fast) { "OFF" } Else { "ON" }
. (Join-Path $PSScriptRoot "env.ps1")
if ($env:CI -eq "true") {
$env:FORCE_UPDATE_SUBMODULES = "1"
& (Join-Path $PSScriptRoot "update-submodules.ps1")
& (Join-Path $PSScriptRoot "build-libuv.ps1") -CloneOnly $True
}
# $CANARY_REVISION = if (Test-Path build/.canary_revision) { Get-Content build/.canary_revision } else { "0" }
$CANARY_REVISION = 0
.\scripts\env.ps1 $Tag
.\scripts\update-submodules.ps1
.\scripts\build-libuv.ps1 -CloneOnly $True
cd build
cmake .. -G Ninja -DCMAKE_BUILD_TYPE=Release `
cmake .. @CMAKE_FLAGS `
-G Ninja `
-DCMAKE_BUILD_TYPE=Release `
-DNO_CODEGEN=0 `
-DNO_CONFIGURE_DEPENDS=1 `
"-DUSE_BASELINE_BUILD=${UseBaselineBuild}" `
"-DUSE_LTO=${UseLto}" `
"-DCANARY=${CANARY_REVISION}" `
-DBUN_CPP_ONLY=1 $Flags
-DBUN_CPP_ONLY=1
if ($LASTEXITCODE -ne 0) { throw "CMake configuration failed" }
.\compile-cpp-only.ps1 -v
if ($LASTEXITCODE -ne 0) { throw "C++ compilation failed" }
.\compile-cpp-only.ps1 -v -j $env:CPUS
if ($LASTEXITCODE -ne 0) { throw "C++ compilation failed" }
# HACK: For some reason, the buildkite agent is hanging when uploading bun-cpp-objects.a
# Best guess is that there is an issue when uploading files larger than 500 MB
#
# For now, use FileSplitter to split the file into smaller chunks:
# https://www.powershellgallery.com/packages/FileSplitter/1.3
if ($env:BUILDKITE) {
Split-File -Path (Resolve-Path "bun-cpp-objects.a") -PartSizeBytes "50MB" -Verbose
}

View File

@@ -1,48 +0,0 @@
#!/usr/bin/env bash
set -exo pipefail
source $(dirname -- "${BASH_SOURCE[0]}")/env.sh
export USE_LTO="${USE_LTO:-ON}"
case "$(uname -m)" in
aarch64|arm64)
export CPU_TARGET="${CPU_TARGET:-native}"
;;
*)
export CPU_TARGET="${CPU_TARGET:-haswell}"
;;
esac
while [[ $# -gt 0 ]]; do
case "$1" in
--fast|--no-lto)
export USE_LTO="OFF"
shift
;;
--baseline)
export CPU_TARGET="nehalem"
shift
;;
--cpu)
export CPU_TARGET="$2"
shift
shift
;;
*|-*|--*)
echo "Unknown option $1"
exit 1
;;
esac
done
mkdir -p build
cd build
mkdir -p tmp_modules tmp_functions js codegen
cmake .. \
-GNinja \
-DCMAKE_BUILD_TYPE=Release \
-DUSE_LTO=${USE_LTO} \
-DCPU_TARGET=${CPU_TARGET} \
-DBUN_CPP_ONLY=1 \
-DNO_CONFIGURE_DEPENDS=1
chmod +x ./compile-cpp-only.sh
bash ./compile-cpp-only.sh -v

View File

@@ -1,95 +0,0 @@
#!/usr/bin/env bash
set -exo pipefail
source $(dirname -- "${BASH_SOURCE[0]}")/env.sh
cwd=$(pwd)
zig=
if [[ "$CI" ]]; then
# Since the zig build depends on files from the zig submodule,
# make sure to update the submodule before building.
git submodule update --init --recursive --progress --depth=1 --checkout src/deps/zig
# Also update the correct version of zig in the submodule.
$(dirname -- "${BASH_SOURCE[0]}")/download-zig.sh
fi
if [ -f "$cwd/.cache/zig/zig" ]; then
zig="$cwd/.cache/zig/zig"
else
zig=$(which zig)
fi
ZIG_OPTIMIZE="${ZIG_OPTIMIZE:-ReleaseFast}"
CANARY="${CANARY:-0}"
GIT_SHA="${GIT_SHA:-$(git rev-parse HEAD)}"
BUILD_MACHINE_ARCH="${BUILD_MACHINE_ARCH:-$(uname -m)}"
DOCKER_MACHINE_ARCH=""
if [[ "$BUILD_MACHINE_ARCH" == "x86_64" || "$BUILD_MACHINE_ARCH" == "amd64" ]]; then
BUILD_MACHINE_ARCH="x86_64"
DOCKER_MACHINE_ARCH="amd64"
elif [[ "$BUILD_MACHINE_ARCH" == "aarch64" || "$BUILD_MACHINE_ARCH" == "arm64" ]]; then
BUILD_MACHINE_ARCH="aarch64"
DOCKER_MACHINE_ARCH="arm64"
fi
TARGET_OS="${1:-linux}"
TARGET_ARCH="${2:-x64}"
TARGET_CPU="${3:-${CPU_TARGET:-native}}"
BUILDARCH=""
if [[ "$TARGET_ARCH" == "x64" || "$TARGET_ARCH" == "x86_64" || "$TARGET_ARCH" == "amd64" ]]; then
TARGET_ARCH="x86_64"
BUILDARCH="amd64"
elif [[ "$TARGET_ARCH" == "aarch64" || "$TARGET_ARCH" == "arm64" ]]; then
TARGET_ARCH="aarch64"
BUILDARCH="arm64"
fi
TRIPLET=""
if [[ "$TARGET_OS" == "linux" ]]; then
TRIPLET="$TARGET_ARCH-linux-gnu"
elif [[ "$TARGET_OS" == "darwin" ]]; then
TRIPLET="$TARGET_ARCH-macos-none"
elif [[ "$TARGET_OS" == "windows" ]]; then
TRIPLET="$TARGET_ARCH-windows-msvc"
fi
echo "--- Building identifier-cache"
$zig run src/js_lexer/identifier_data.zig
echo "--- Building node-fallbacks"
cd src/node-fallbacks
bun install --frozen-lockfile
bun run build
cd "$cwd"
echo "--- Building codegen"
bun install --frozen-lockfile
make runtime_js fallback_decoder bun_error
echo "--- Building modules"
mkdir -p build
bun run src/codegen/bundle-modules.ts --debug=OFF build
echo "--- Building zig"
cd build
cmake .. \
-GNinja \
-DCMAKE_BUILD_TYPE=Release \
-DUSE_LTO=ON \
-DZIG_OPTIMIZE="${ZIG_OPTIMIZE}" \
-DGIT_SHA="${GIT_SHA}" \
-DARCH="${TARGET_ARCH}" \
-DBUILDARCH="${BUILDARCH}" \
-DCPU_TARGET="${TARGET_CPU}" \
-DZIG_TARGET="${TRIPLET}" \
-DASSERTIONS="OFF" \
-DWEBKIT_DIR="omit" \
-DNO_CONFIGURE_DEPENDS=1 \
-DNO_CODEGEN=1 \
-DBUN_ZIG_OBJ_DIR="$cwd/build" \
-DCANARY="$CANARY" \
-DZIG_LIB_DIR=src/deps/zig/lib
ONLY_ZIG=1 ninja "$cwd/build/bun-zig.o" -v

View File

@@ -0,0 +1,16 @@
$ErrorActionPreference = 'Stop' # Setting strict mode, similar to 'set -euo pipefail' in bash
. (Join-Path $PSScriptRoot "env.ps1")
Push-Location (Join-Path $BUN_DEPS_DIR 'libdeflate')
try {
Remove-Item CMakeCache.txt, CMakeFiles, build -Recurse -ErrorAction SilentlyContinue
mkdir -Force build
Run cmake -S "." -B build @CMAKE_FLAGS -DLIBDEFLATE_BUILD_STATIC_LIB=ON -DLIBDEFLATE_BUILD_SHARED_LIB=OFF -DLIBDEFLATE_BUILD_GZIP=OFF
Run cmake --build build --clean-first --config Release
# In https://github.com/ebiggers/libdeflate/releases/tag/v1.20, it's outputting libdeflate.a even on Windows
Copy-Item build/deflatestatic.lib $BUN_DEPS_OUT_DIR/deflate.lib
Write-Host "-> deflate.lib"
} finally { Pop-Location }

10
scripts/build-libdeflate.sh Executable file
View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
set -exo pipefail
source $(dirname -- "${BASH_SOURCE[0]}")/env.sh
mkdir -p $BUN_DEPS_OUT_DIR
cd $BUN_DEPS_DIR/libdeflate
rm -rf build CMakeCache.txt CMakeFiles
cmake "${CMAKE_FLAGS[@]}" -DLIBDEFLATE_BUILD_STATIC_LIB=ON -DLIBDEFLATE_BUILD_SHARED_LIB=OFF -DLIBDEFLATE_BUILD_GZIP=OFF -B build -S . -G Ninja
ninja libdeflate.a -C build
cp build/libdeflate.a $BUN_DEPS_OUT_DIR/libdeflate.a

View File

@@ -3,6 +3,7 @@ $ErrorActionPreference = 'Stop' # Setting strict mode, similar to 'set -euo pip
Push-Location (Join-Path $BUN_DEPS_DIR 'mimalloc')
try {
Remove-Item -ErrorAction SilentlyContinue -Recurse -Force build
Set-Location (mkdir -Force build)
Run cmake .. @CMAKE_FLAGS `

View File

@@ -20,8 +20,6 @@ try {
Run clang-cl -DTCC_TARGET_PE -DTCC_TARGET_X86_64 config.h -DC2STR -o c2str.exe conftest.c
Run .\c2str.exe .\include\tccdefs.h tccdefs_.h
$Baseline = $env:BUN_DEV_ENV_SET -eq "Baseline=True"
Run clang-cl @($env:CFLAGS -split ' ') libtcc.c -o tcc.obj "-DTCC_TARGET_PE" "-DTCC_TARGET_X86_64" "-O2" "-W2" "-Zi" "-MD" "-GS-" "-c" "-MT"
Run llvm-lib "tcc.obj" "-OUT:tcc.lib"

File diff suppressed because it is too large Load Diff

View File

@@ -1,17 +1,13 @@
param (
[switch] $Baseline = $False,
[switch] $Fast = $False
param(
[switch]$Baseline = $false
)
$ErrorActionPreference = 'Stop' # Setting strict mode, similar to 'set -euo pipefail' in bash
$Target = If ($Baseline) { "windows-x64-baseline" } Else { "windows-x64" }
$Tag = "bun-$Target"
$TagSuffix = If ($Baseline) { "-Baseline" } Else { "" }
$UseBaselineBuild = If ($Baseline) { "ON" } Else { "OFF" }
$UseLto = If ($Fast) { "OFF" } Else { "ON" }
.\scripts\env.ps1 $TagSuffix
. (Join-Path $PSScriptRoot "env.ps1")
mkdir -Force build
buildkite-agent artifact download "**" build --step "${Target}-build-zig"
@@ -21,29 +17,24 @@ mv -Force -ErrorAction SilentlyContinue build\build\bun-deps\* build\bun-deps
mv -Force -ErrorAction SilentlyContinue build\build\* build
Set-Location build
$CANARY_REVISION = 0
cmake .. -G Ninja -DCMAKE_BUILD_TYPE=Release `
# HACK: See scripts/build-bun-cpp.ps1
Join-File -Path "$(Resolve-Path .)\bun-cpp-objects.a" -Verbose -DeletePartFiles
cmake .. @CMAKE_FLAGS `
-G Ninja `
-DCMAKE_BUILD_TYPE=Release `
-DNO_CODEGEN=1 `
-DNO_CONFIGURE_DEPENDS=1 `
"-DCPU_TARGET=${CPU_TARGET}" `
"-DCANARY=${CANARY_REVISION}" `
-DBUN_LINK_ONLY=1 `
"-DUSE_BASELINE_BUILD=${UseBaselineBuild}" `
"-DUSE_LTO=${UseLto}" `
"-DBUN_DEPS_OUT_DIR=$(Resolve-Path bun-deps)" `
"-DBUN_CPP_ARCHIVE=$(Resolve-Path bun-cpp-objects.a)" `
"-DBUN_ZIG_OBJ_DIR=$(Resolve-Path .)" `
"$Flags"
"-DBUN_ZIG_OBJ_DIR=$(Resolve-Path .)"
if ($LASTEXITCODE -ne 0) { throw "CMake configuration failed" }
ninja -v
ninja -v -j $env:CPUS
if ($LASTEXITCODE -ne 0) { throw "Link failed!" }
ls
if ($Fast) {
$Tag = "$Tag-nolto"
}
Set-Location ..
$Dist = mkdir -Force "${Tag}"
cp -r build\bun.exe "$Dist\bun.exe"

View File

@@ -1,80 +0,0 @@
#!/usr/bin/env bash
set -exo pipefail
source $(dirname -- "${BASH_SOURCE[0]}")/env.sh
export USE_LTO="${USE_LTO:-ON}"
case "$(uname -m)" in
aarch64|arm64)
export CPU_TARGET="${CPU_TARGET:-native}"
;;
*)
export CPU_TARGET="${CPU_TARGET:-haswell}"
;;
esac
export TAG=""
while [[ $# -gt 0 ]]; do
case "$1" in
--tag)
export TAG="$2"
shift
shift
;;
--fast|--no-lto)
export USE_LTO="OFF"
shift
;;
--baseline)
export CPU_TARGET="nehalem"
shift
;;
--cpu)
export CPU_TARGET="$2"
shift
shift
;;
*|-*|--*)
echo "Unknown option $1"
exit 1
;;
esac
done
if [[ -z "$TAG" ]]; then
echo "--tag <name> is required"
exit 1
fi
rm -rf release
mkdir -p release
buildkite-agent artifact download '**' release --step $TAG-build-deps
buildkite-agent artifact download '**' release --step $TAG-build-zig
buildkite-agent artifact download '**' release --step $TAG-build-cpp
cd release
cmake .. \
-GNinja \
-DCMAKE_BUILD_TYPE=Release \
-DCPU_TARGET=${CPU_TARGET} \
-DUSE_LTO=${USE_LTO} \
-DBUN_LINK_ONLY=1 \
-DBUN_ZIG_OBJ_DIR="$(pwd)/build" \
-DBUN_CPP_ARCHIVE="$(pwd)/build/bun-cpp-objects.a" \
-DBUN_DEPS_OUT_DIR="$(pwd)/build/bun-deps" \
-DNO_CONFIGURE_DEPENDS=1
ninja -v
if [[ "${USE_LTO}" == "OFF" ]]; then
TAG="${TAG}-nolto"
fi
chmod +x bun-profile bun
mkdir -p bun-$TAG-profile/ bun-$TAG/
mv bun-profile bun-$TAG-profile/bun-profile
mv bun bun-$TAG/bun
zip -r bun-$TAG-profile.zip bun-$TAG-profile
zip -r bun-$TAG.zip bun-$TAG
cd ..
mv release/bun-$TAG.zip bun-$TAG.zip
mv release/bun-$TAG-profile.zip bun-$TAG-profile.zip

View File

@@ -1,11 +1,3 @@
param(
[switch]$Baseline = $false
)
if ($ENV:BUN_DEV_ENV_SET -eq "Baseline=True") {
$Baseline = $true
}
$ErrorActionPreference = 'Stop' # Setting strict mode, similar to 'set -euo pipefail' in bash
# this is the environment script for building bun's dependencies
@@ -38,13 +30,19 @@ if($Env:VSCMD_ARG_TGT_ARCH -eq "x86") {
throw "Visual Studio environment is targetting 32 bit. This configuration is definetly a mistake."
}
$ENV:BUN_DEV_ENV_SET = "Baseline=$Baseline";
$BUN_BASE_DIR = if ($env:BUN_BASE_DIR) { $env:BUN_BASE_DIR } else { Join-Path $ScriptDir '..' }
$BUN_DEPS_DIR = if ($env:BUN_DEPS_DIR) { $env:BUN_DEPS_DIR } else { Join-Path $BUN_BASE_DIR 'src\deps' }
$BUN_DEPS_OUT_DIR = if ($env:BUN_DEPS_OUT_DIR) { $env:BUN_DEPS_OUT_DIR } else { Join-Path $BUN_BASE_DIR 'build\bun-deps' }
$CPUS = if ($env:CPUS) { $env:CPUS } else { (Get-CimInstance -Class Win32_Processor).NumberOfCores }
$Lto = if ($env:USE_LTO) { $env:USE_LTO -eq "1" } else { $True }
$Baseline = if ($env:USE_BASELINE_BUILD) {
$env:USE_BASELINE_BUILD -eq "1"
} elseif ($env:BUILDKITE_STEP_KEY -match "baseline") {
$True
} else {
$False
}
$CC = "clang-cl"
$CXX = "clang-cl"
@@ -52,7 +50,7 @@ $CXX = "clang-cl"
$CFLAGS = '/O2 /Z7 /MT /O2 /Ob2 /DNDEBUG /U_DLL'
$CXXFLAGS = '/O2 /Z7 /MT /O2 /Ob2 /DNDEBUG /U_DLL'
if ($env:USE_LTO -eq "1") {
if ($Lto) {
$CXXFLAGS += " -fuse-ld=lld -flto -Xclang -emit-llvm-bc"
$CFLAGS += " -fuse-ld=lld -flto -Xclang -emit-llvm-bc"
}
@@ -63,6 +61,14 @@ $env:CPU_TARGET = $CPU_NAME
$CFLAGS += " -march=${CPU_NAME}"
$CXXFLAGS += " -march=${CPU_NAME}"
$Canary = If ($env:CANARY) {
$env:CANARY
} ElseIf ($env:BUILDKITE -eq "true") {
(buildkite-agent meta-data get canary)
} Else {
"1"
}
$CMAKE_FLAGS = @(
"-GNinja",
"-DCMAKE_BUILD_TYPE=Release",
@@ -72,15 +78,15 @@ $CMAKE_FLAGS = @(
"-DCMAKE_CXX_FLAGS=$CXXFLAGS",
"-DCMAKE_C_FLAGS_RELEASE=$CFLAGS",
"-DCMAKE_CXX_FLAGS_RELEASE=$CXXFLAGS",
"-DCMAKE_MSVC_RUNTIME_LIBRARY=MultiThreaded"
"-DCMAKE_MSVC_RUNTIME_LIBRARY=MultiThreaded",
"-DCANARY=$Canary"
)
if ($env:USE_LTO -eq "1") {
if (Get-Command lld-lib -ErrorAction SilentlyContinue) {
$AR = Get-Command lld-lib -ErrorAction SilentlyContinue
$env:AR = $AR
$CMAKE_FLAGS += "-DCMAKE_AR=$AR"
}
if (Get-Command llvm-lib -ErrorAction SilentlyContinue) {
$AR_CMD = Get-Command llvm-lib -ErrorAction SilentlyContinue
$AR = $AR_CMD.Path
$env:AR = $AR
$CMAKE_FLAGS += "-DCMAKE_AR=$AR"
}
$env:CC = "clang-cl"
@@ -93,6 +99,10 @@ if ($Baseline) {
$CMAKE_FLAGS += "-DUSE_BASELINE_BUILD=ON"
}
if ($Lto) {
$CMAKE_FLAGS += "-DUSE_LTO=ON"
}
if (Get-Command sccache -ErrorAction SilentlyContinue) {
# Continue with local compiler if sccache has an error
$env:SCCACHE_IGNORE_SERVER_IO_ERROR = "1"

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env bash
# Hack for Buildkite sometimes not having the right path
# Hack for buildkite sometimes not having the right path
if [[ "${CI:-}" == "1" || "${CI:-}" == "true" ]]; then
if [ -f ~/.bashrc ]; then
source ~/.bashrc
@@ -24,12 +24,7 @@ export BUN_DEPS_OUT_DIR=${BUN_DEPS_OUT_DIR:-$BUN_BASE_DIR/build/bun-deps}
export LC_CTYPE="en_US.UTF-8"
export LC_ALL="en_US.UTF-8"
if [[ "$CI" != "1" && "$CI" != "true" ]]; then
if [ -f $SCRIPT_DIR/env.local ]; then
echo "Sourcing $SCRIPT_DIR/env.local"
source $SCRIPT_DIR/env.local
fi
elif [[ $(uname -s) == 'Darwin' ]]; then
if [[ $(uname -s) == 'Darwin' ]]; then
export CXX="$(brew --prefix llvm)@$LLVM_VERSION/bin/clang++"
export CC="$(brew --prefix llvm)@$LLVM_VERSION/bin/clang"
export AR="$(brew --prefix llvm)@$LLVM_VERSION/bin/llvm-ar"
@@ -37,6 +32,11 @@ elif [[ $(uname -s) == 'Darwin' ]]; then
export LIBTOOL="$(brew --prefix llvm)@$LLVM_VERSION/bin/llvm-libtool-darwin"
export PATH="$(brew --prefix llvm)@$LLVM_VERSION/bin:$PATH"
ln -sf $LIBTOOL "$(brew --prefix llvm)@$LLVM_VERSION/bin/libtool" || true
elif [[ "$CI" != "1" && "$CI" != "true" ]]; then
if [[ -f $SCRIPT_DIR/env.local ]]; then
echo "Sourcing $SCRIPT_DIR/env.local"
source $SCRIPT_DIR/env.local
fi
fi
# this compiler detection could be better
@@ -76,7 +76,7 @@ fi
# https://gitlab.kitware.com/cmake/cmake/-/issues/25755
if [[ $(uname -s) == 'Darwin' && $LLVM_VERSION == '18' ]]; then
export CFLAGS="$CFLAGS -fno-define-target-os-macros "
export CXXFLAGS="$CXXFLAGS -fno-define-target-os-macros "
export CXXFLAGS="$CXXFLAGS -fno-define-target-os-macros -D_LIBCXX_ENABLE_ASSERTIONS=0 -D_LIBCPP_HARDENING_MODE=_LIBCPP_HARDENING_MODE_NONE "
fi
# libarchive needs position-independent executables to compile successfully
@@ -120,7 +120,6 @@ fi
if [[ $(uname -s) == 'Darwin' ]]; then
export CMAKE_OSX_DEPLOYMENT_TARGET=${CMAKE_OSX_DEPLOYMENT_TARGET:-13.0}
CMAKE_FLAGS+=(-DCMAKE_OSX_DEPLOYMENT_TARGET=${CMAKE_OSX_DEPLOYMENT_TARGET})
export CFLAGS="$CFLAGS -mmacosx-version-min=${CMAKE_OSX_DEPLOYMENT_TARGET} -D__DARWIN_NON_CANCELABLE=1 "
export CXXFLAGS="$CXXFLAGS -mmacosx-version-min=${CMAKE_OSX_DEPLOYMENT_TARGET} -D__DARWIN_NON_CANCELABLE=1 "

View File

@@ -6,16 +6,162 @@ import { copyFileSync, existsSync, mkdirSync, mkdtempSync, readFileSync, readdir
import { basename, dirname, join } from "node:path";
import { tmpdir } from "node:os";
const projectPath = dirname(import.meta.dirname);
const vendorPath = process.env.BUN_VENDOR_PATH || join(projectPath, "vendor");
const isWindows = process.platform === "win32";
const isMacOS = process.platform === "darwin";
const isLinux = process.platform === "linux";
const cwd = dirname(import.meta.dirname);
const spawnSyncTimeout = 1000 * 60;
const spawnTimeout = 1000 * 60 * 3;
/**
* @typedef {Object} S3UploadOptions
* @property {string} [bucket]
* @property {string} filename
* @property {string} content
* @property {Record<string, string>} [headers]
*/
/**
* @param {S3UploadOptions} options
*/
async function uploadFileToS3(options) {
const { AwsV4Signer } = await import("aws4fetch");
const { bucket, filename, content, ...extra } = options;
const baseUrl = getEnv(["S3_ENDPOINT", "S3_BASE_URL", "AWS_ENDPOINT"], "https://s3.amazonaws.com");
const bucketUrl = new URL(bucket || getEnv(["S3_BUCKET", "AWS_BUCKET"]), baseUrl);
const signer = new AwsV4Signer({
accessKeyId: getSecret(["S3_ACCESS_KEY_ID", "AWS_ACCESS_KEY_ID"]),
secretAccessKey: getSecret(["S3_SECRET_ACCESS_KEY", "AWS_SECRET_ACCESS_KEY"]),
url: new URL(filename, bucketUrl),
method: "PUT",
body: content,
...extra,
});
const { url, method, headers, body } = signer.sign();
await fetchSafe(url, {
method,
headers,
body,
});
console.log("Uploaded file to S3:", {
url: `${bucketUrl}`,
filename,
});
}
/**
* @typedef {Object} SentryRelease
* @property {string} organizationId
* @property {string} projectId
* @property {string} version
* @property {string} [url]
* @property {string} [ref]
* @property {string} [dateReleased]
*/
/**
* @param {SentryRelease} options
* @returns {Promise<void>}
*/
async function createSentryRelease(options) {
const { organizationId, projectId, ...body } = options;
const baseUrl = getEnv("SENTRY_BASE_URL", "https://sentry.io");
const url = new URL(`api/0/organizations/${organizationId}/releases`, baseUrl);
const accessToken = getSecret(["SENTRY_AUTH_TOKEN", "SENTRY_TOKEN"]);
const release = await fetchSafe(url, {
method: "POST",
headers: {
"Authorization": `Bearer ${accessToken}`,
"Content-Type": "application/json",
},
body: JSON.stringify(body),
format: "json",
});
console.log("Created Sentry release:", release);
}
/**
* @return {string}
*/
function getGithubToken() {
const token = getEnv("GITHUB_TOKEN", null);
if (token) {
return token;
}
const gh = which("gh");
if (gh) {
const { exitCode, stdout } = spawnSyncSafe(gh, ["auth", "token"]);
if (exitCode === 0) {
return stdout.trim();
}
}
throw new Error("Failed to get GitHub token (set GITHUB_TOKEN or run `gh auth login`)");
}
/**
* @param {string | string[]} name
* @return {string}
*/
function getSecret(name) {
return getEnv(name);
}
/**
* @param {string | string[]} name
* @param {string | null} [defaultValue]
* @returns {string | undefined}
*/
function getEnv(name, defaultValue) {
let result = defaultValue;
for (const key of typeof name === "string" ? [name] : name) {
const value = process.env[key];
if (value) {
result = value;
break;
}
}
if (result || result === null) {
return result;
}
throw new Error(`Environment variable is required: ${name}`);
}
/**
* @typedef {Object} SpawnOptions
* @property {boolean} [throwOnError]
* @property {string} [cwd]
* @property {string} [env]
* @property {string} [encoding]
* @property {number} [timeout]
*/
/**
* @typedef {Object} SpawnResult
* @property {number | null} exitCode
* @property {number | null} signalCode
* @property {string} stdout
* @property {string} stderr
*/
/**
* @param {string} command
* @param {string[]} [args]
* @param {SpawnOptions} [options]
* @returns {Promise<SpawnResult>}
*/
async function spawnSafe(command, args, options = {}) {
const result = new Promise((resolve, reject) => {
let stdout = "";
@@ -60,6 +206,12 @@ async function spawnSafe(command, args, options = {}) {
}
}
/**
* @param {string} command
* @param {string[]} [args]
* @param {SpawnOptions} [options]
* @returns {SpawnResult}
*/
function spawnSyncSafe(command, args, options = {}) {
try {
const { error, status, signal, stdout, stderr } = spawnSync(command, args, {
@@ -86,6 +238,20 @@ function spawnSyncSafe(command, args, options = {}) {
}
}
/**
* @typedef {Object} FetchOptions
* @property {string} [method]
* @property {Record<string, string>} [headers]
* @property {string | Uint8Array} [body]
* @property {"json" | "text" | "bytes"} [format]
* @property {boolean} [throwOnError]
*/
/**
* @param {string | URL} url
* @param {FetchOptions} [options]
* @returns {Promise<Response | string | Uint8Array>}
*/
async function fetchSafe(url, options = {}) {
let response;
try {
@@ -138,47 +304,6 @@ function which(command, path) {
return result.trimEnd();
}
function getZigTarget(os = process.platform, arch = process.arch) {
if (arch === "x64") {
if (os === "linux") return "linux-x86_64";
if (os === "darwin") return "macos-x86_64";
if (os === "win32") return "windows-x86_64";
}
if (arch === "arm64") {
if (os === "linux") return "linux-aarch64";
if (os === "darwin") return "macos-aarch64";
}
throw new Error(`Unsupported zig target: os=${os}, arch=${arch}`);
}
function getRecommendedZigVersion() {
const scriptPath = join(projectPath, "build.zig");
try {
const scriptContent = readFileSync(scriptPath, "utf-8");
const match = scriptContent.match(/recommended_zig_version = "([^"]+)"/);
if (!match) {
throw new Error("File does not contain string: 'recommended_zig_version'");
}
return match[1];
} catch (cause) {
throw new Error("Failed to find recommended Zig version", { cause });
}
}
/**
* @returns {Promise<string>}
*/
async function getLatestZigVersion() {
try {
const response = await fetchSafe("https://ziglang.org/download/index.json", { format: "json" });
const { master } = response;
const { version } = master;
return version;
} catch (cause) {
throw new Error("Failed to get latest Zig version", { cause });
}
}
/**
* @param {string} execPath
* @returns {string | undefined}
@@ -191,110 +316,3 @@ function getVersion(execPath) {
}
return result.trim();
}
/**
* @returns {string}
*/
function getTmpdir() {
if (isMacOS && existsSync("/tmp")) {
return "/tmp";
}
return tmpdir();
}
/**
* @returns {string}
*/
function mkTmpdir() {
return mkdtempSync(join(getTmpdir(), "bun-"));
}
/**
* @param {string} url
* @param {string} [path]
* @returns {Promise<string>}
*/
async function downloadFile(url, path) {
const outPath = path || join(mkTmpdir(), basename(url));
const bytes = await fetchSafe(url, { format: "bytes" });
mkdirSync(dirname(outPath), { recursive: true });
writeFileSync(outPath, bytes);
return outPath;
}
/**
* @param {string} tarPath
* @param {string} [path]
* @returns {Promise<string>}
*/
async function extractFile(tarPath, path) {
const outPath = path || join(mkTmpdir(), basename(tarPath));
mkdirSync(outPath, { recursive: true });
await spawnSafe("tar", ["-xf", tarPath, "-C", outPath, "--strip-components=1"]);
return outPath;
}
const dependencies = [
{
name: "zig",
version: getRecommendedZigVersion(),
download: downloadZig,
},
];
async function getDependencyPath(name) {
let dependency;
for (const entry of dependencies) {
if (name === entry.name) {
dependency = entry;
break;
}
}
if (!dependency) {
throw new Error(`Unknown dependency: ${name}`);
}
const { version, download } = dependency;
mkdirSync(vendorPath, { recursive: true });
for (const path of readdirSync(vendorPath)) {
if (!path.startsWith(name)) {
continue;
}
const dependencyPath = join(vendorPath, path);
const dependencyVersion = getVersion(dependencyPath);
if (dependencyVersion === version) {
return dependencyPath;
}
}
if (!download) {
throw new Error(`Dependency not found: ${name}`);
}
return await download(version);
}
/**
* @param {string} [version]
*/
async function downloadZig(version) {
const target = getZigTarget();
const expectedVersion = version || getRecommendedZigVersion();
const url = `https://ziglang.org/builds/zig-${target}-${expectedVersion}.tar.xz`;
const tarPath = await downloadFile(url);
const extractedPath = await extractFile(tarPath);
const zigPath = join(extractedPath, exePath("zig"));
const actualVersion = getVersion(zigPath);
const outPath = join(vendorPath, exePath(`zig-${actualVersion}`));
mkdirSync(dirname(outPath), { recursive: true });
copyFileSync(zigPath, outPath);
return outPath;
}
/**
* @param {string} path
* @returns {string}
*/
function exePath(path) {
return isWindows ? `${path}.exe` : path;
}
const execPath = await getDependencyPath("zig");
console.log(execPath);

View File

@@ -26,7 +26,7 @@ import { normalize as normalizeWindows } from "node:path/win32";
import { isIP } from "node:net";
import { parseArgs } from "node:util";
const spawnTimeout = 30_000;
const spawnTimeout = 5_000;
const testTimeout = 3 * 60_000;
const integrationTimeout = 5 * 60_000;
@@ -104,15 +104,19 @@ async function printInfo() {
console.log("Glibc:", getGlibcVersion());
}
console.log("Hostname:", getHostname());
if (isCloud) {
console.log("Public IP:", await getPublicIp());
console.log("Cloud:", getCloud());
}
if (isCI) {
console.log("CI:", getCI());
console.log("Shard:", options["shard"], "/", options["max-shards"]);
console.log("Build URL:", getBuildUrl());
console.log("Environment:", process.env);
if (isCloud) {
console.log("Public IP:", await getPublicIp());
console.log("Cloud:", getCloud());
}
const tailscaleIp = await getTailscaleIp();
if (tailscaleIp) {
console.log("Tailscale IP:", tailscaleIp);
}
}
console.log("Cwd:", cwd);
console.log("Tmpdir:", tmpPath);
@@ -136,6 +140,30 @@ async function runTests() {
}
console.log("Bun:", execPath);
for (let i = 0; i < 10; i++) {
try {
const { error } = spawnSync(execPath, ["--version"], {
encoding: "utf-8",
timeout: spawnTimeout,
env: {
PATH: process.env.PATH,
BUN_DEBUG_QUIET_LOGS: 1,
},
});
if (!error) {
break;
}
throw error;
} catch (error) {
const { code } = error;
if (code === "EBUSY" || code === "UNKNOWN") {
console.log(`Bun appears to be busy, retrying... [code: ${code}]`);
await new Promise(resolve => setTimeout(resolve, 1000 * (i + 1)));
continue;
}
}
}
const revision = getRevision(execPath);
console.log("Revision:", revision);
@@ -231,18 +259,20 @@ async function runTests() {
*/
/**
* @param {SpawnOptions} request
* @param {SpawnOptions} options
* @returns {Promise<SpawnResult>}
*/
async function spawnSafe({
command,
args,
cwd,
env,
timeout = spawnTimeout,
stdout = process.stdout.write.bind(process.stdout),
stderr = process.stderr.write.bind(process.stderr),
}) {
async function spawnSafe(options) {
const {
command,
args,
cwd,
env,
timeout = spawnTimeout,
stdout = process.stdout.write.bind(process.stdout),
stderr = process.stderr.write.bind(process.stderr),
retries = 0,
} = options;
let exitCode;
let signalCode;
let spawnError;
@@ -318,6 +348,16 @@ async function spawnSafe({
resolve();
}
});
if (spawnError && retries < 5) {
const { code } = spawnError;
if (code === "EBUSY" || code === "UNKNOWN") {
await new Promise(resolve => setTimeout(resolve, 1000 * (retries + 1)));
return spawnSafe({
...options,
retries: retries + 1,
});
}
}
let error;
if (exitCode === 0) {
// ...
@@ -961,7 +1001,7 @@ async function getExecPathFromBuildKite(target) {
if (isWindows) {
await spawnSafe({
command: "powershell",
args: ["-Command", `Expand-Archive -Path ${zipPath} -DestinationPath ${releasePath}`],
args: ["-Command", `Expand-Archive -Path ${zipPath} -DestinationPath ${releasePath} -Force`],
});
} else {
await spawnSafe({
@@ -1286,6 +1326,26 @@ async function getPublicIp() {
}
}
/**
* @returns {string | undefined}
*/
function getTailscaleIp() {
try {
const { status, stdout } = spawnSync("tailscale", ["ip", "--1"], {
encoding: "utf-8",
timeout: spawnTimeout,
env: {
PATH: process.env.PATH,
},
});
if (status === 0) {
return stdout.trim();
}
} catch {
// ...
}
}
/**
* @param {...string} paths
* @returns {string}
@@ -1332,7 +1392,7 @@ function formatTestToMarkdown(result, concise) {
let markdown = "";
for (const { testPath, ok, tests, error, stdoutPreview: stdout } of results) {
if (ok) {
if (ok || error === "SIGTERM") {
continue;
}

View File

@@ -12,8 +12,13 @@ try {
if (!($WebKit) -and (-not (Test-Path "src/bun.js/WebKit/.git"))) {
$Names = $Names | Where-Object { $_ -ne 'src/bun.js/WebKit' }
}
if ($env:FORCE_UPDATE_SUBMODULES -eq "1") {
# Set --force in CI.
git submodule update --init --recursive --progress --depth 1 --checkout --force @NAMES
} else {
git submodule update --init --recursive --progress --depth 1 --checkout @NAMES
}
git submodule update --init --recursive --progress --depth 1 --checkout @NAMES
if ($LASTEXITCODE -ne 0) {
throw "git submodule update failed"
}

View File

@@ -12,3 +12,9 @@ fi
set -exo pipefail
git submodule update --init --recursive --progress --depth=1 --checkout $NAMES
if [ "$FORCE_UPDATE_SUBMODULES" == "1" ]; then
# Set --force in CI.
git submodule update --init --recursive --progress --depth=1 --checkout --force $NAMES
else
git submodule update --init --recursive --progress --depth=1 --checkout $NAMES
fi

View File

@@ -12,6 +12,7 @@ TINYCC=$(git rev-parse HEAD:./src/deps/tinycc)
C_ARES=$(git rev-parse HEAD:./src/deps/c-ares)
ZSTD=$(git rev-parse HEAD:./src/deps/zstd)
LSHPACK=$(git rev-parse HEAD:./src/deps/ls-hpack)
LIBDEFLATE=$(git rev-parse HEAD:./src/deps/libdeflate)
rm -rf src/generated_versions_list.zig
echo "// AUTO-GENERATED FILE. Created via .scripts/write-versions.sh" >src/generated_versions_list.zig
@@ -26,6 +27,7 @@ echo "pub const zlib = \"$ZLIB_VERSION\";" >>src/generated_versions_list.zig
echo "pub const tinycc = \"$TINYCC\";" >>src/generated_versions_list.zig
echo "pub const lolhtml = \"$LOLHTML\";" >>src/generated_versions_list.zig
echo "pub const c_ares = \"$C_ARES\";" >>src/generated_versions_list.zig
echo "pub const libdeflate = \"$LIBDEFLATE\";" >>src/generated_versions_list.zig
echo "pub const zstd = \"$ZSTD\";" >>src/generated_versions_list.zig
echo "pub const lshpack = \"$LSHPACK\";" >>src/generated_versions_list.zig
echo "" >>src/generated_versions_list.zig

View File

@@ -101,11 +101,6 @@ pub fn runExitCallbacks() void {
on_exit_callbacks.items.len = 0;
}
/// Flushes stdout and stderr and exits with the given code.
pub fn exit(code: u8) noreturn {
exitWide(@as(u32, code));
}
var is_exiting = std.atomic.Value(bool).init(false);
export fn bun_is_exiting() c_int {
return @intFromBool(isExiting());
@@ -114,36 +109,25 @@ pub fn isExiting() bool {
return is_exiting.load(.monotonic);
}
pub fn exitWide(code: u32) noreturn {
/// Flushes stdout and stderr (in exit/quick_exit callback) and exits with the given code.
pub fn exit(code: u32) noreturn {
is_exiting.store(true, .monotonic);
if (comptime Environment.isMac) {
std.c.exit(@bitCast(code));
// If we are crashing, allow the crash handler to finish it's work.
bun.crash_handler.sleepForeverIfAnotherThreadIsCrashing();
switch (Environment.os) {
.mac => std.c.exit(@bitCast(code)),
else => bun.C.quick_exit(@bitCast(code)),
}
bun.C.quick_exit(@bitCast(code));
}
pub fn raiseIgnoringPanicHandler(sig: anytype) noreturn {
if (comptime @TypeOf(sig) == bun.SignalCode) {
return raiseIgnoringPanicHandler(@intFromEnum(sig));
}
pub fn raiseIgnoringPanicHandler(sig: bun.SignalCode) noreturn {
Output.flush();
if (!Environment.isWindows) {
if (sig >= 1 and sig != std.posix.SIG.STOP and sig != std.posix.SIG.KILL) {
const act = std.posix.Sigaction{
.handler = .{ .sigaction = @ptrCast(@alignCast(std.posix.SIG.DFL)) },
.mask = std.posix.empty_sigset,
.flags = 0,
};
std.posix.sigaction(@intCast(sig), &act, null) catch {};
}
}
Output.Source.Stdio.restore();
_ = std.c.raise(sig);
bun.crash_handler.resetSegfaultHandler();
_ = std.c.raise(@intFromEnum(sig));
std.c.abort();
}
@@ -171,11 +155,9 @@ pub inline fn configureAllocator(_: AllocatorConfiguration) void {
// if (!config.long_running) Mimalloc.mi_option_set(Mimalloc.mi_option_reset_delay, 0);
}
pub const panic = Output.panic; // deprecated
pub fn notimpl() noreturn {
@setCold(true);
Global.panic("Not implemented yet!!!!!", .{});
Output.panic("Not implemented yet!!!!!", .{});
}
// Make sure we always print any leftover

View File

@@ -2,6 +2,8 @@ const std = @import("std");
const bun = @import("root").bun;
const unicode = std.unicode;
const js_ast = bun.JSAst;
pub const NodeIndex = u32;
pub const NodeIndexNone = 4294967293;
@@ -100,6 +102,8 @@ pub const Index = packed struct(u32) {
/// The maps can be merged quickly by creating a single outer array containing
/// all inner arrays from all parsed files.
pub const Ref = packed struct(u64) {
pub const Int = u31;
inner_index: Int = 0,
tag: enum(u2) {
@@ -114,6 +118,10 @@ pub const Ref = packed struct(u64) {
/// Represents a null state without using an extra bit
pub const None = Ref{ .inner_index = 0, .source_index = 0, .tag = .invalid };
comptime {
bun.assert(None.isEmpty());
}
pub inline fn isEmpty(this: Ref) bool {
return this.asU64() == 0;
}
@@ -121,12 +129,6 @@ pub const Ref = packed struct(u64) {
pub const ArrayHashCtx = RefHashCtx;
pub const HashCtx = RefCtx;
pub const Int = std.meta.Int(.unsigned, (64 - 2) / 2);
pub fn toInt(value: anytype) Int {
return @as(Int, @intCast(value));
}
pub fn isSourceIndexNull(this: anytype) bool {
return this == std.math.maxInt(Int);
}
@@ -140,13 +142,41 @@ pub const Ref = packed struct(u64) {
writer,
"Ref[inner={d}, src={d}, .{s}]",
.{
ref.sourceIndex(),
ref.innerIndex(),
ref.sourceIndex(),
@tagName(ref.tag),
},
);
}
pub fn dump(ref: Ref, symbol_table: anytype) std.fmt.Formatter(dumpImpl) {
return .{ .data = .{
.ref = ref,
.symbol_table = switch (@TypeOf(symbol_table)) {
*const std.ArrayList(js_ast.Symbol) => symbol_table.items,
*std.ArrayList(js_ast.Symbol) => symbol_table.items,
[]const js_ast.Symbol => symbol_table,
[]js_ast.Symbol => symbol_table,
else => |T| @compileError("Unsupported type to Ref.dump: " ++ @typeName(T)),
},
} };
}
fn dumpImpl(data: struct { ref: Ref, symbol_table: []const js_ast.Symbol }, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) !void {
const symbol = data.symbol_table[data.ref.inner_index];
try std.fmt.format(
writer,
"Ref[inner={d}, src={d}, .{s}; original_name={s}, uses={d}]",
.{
data.ref.inner_index,
data.ref.source_index,
@tagName(data.ref.tag),
symbol.original_name,
symbol.use_count_estimate,
},
);
}
pub fn isValid(this: Ref) bool {
return this.tag != .invalid;
}
@@ -177,11 +207,11 @@ pub const Ref = packed struct(u64) {
}
pub fn hash(key: Ref) u32 {
return @as(u32, @truncate(key.hash64()));
return @truncate(key.hash64());
}
pub inline fn asU64(key: Ref) u64 {
return @as(u64, @bitCast(key));
return @bitCast(key);
}
pub inline fn hash64(key: Ref) u64 {
@@ -192,9 +222,7 @@ pub const Ref = packed struct(u64) {
return ref.asU64() == other.asU64();
}
pub inline fn isNull(self: Ref) bool {
return self.tag == .invalid;
}
pub const isNull = isEmpty; // deprecated
pub fn jsonStringify(self: *const Ref, writer: anytype) !void {
return try writer.write([2]u32{ self.sourceIndex(), self.innerIndex() });

View File

@@ -537,7 +537,7 @@ pub const FilePoll = struct {
}
};
const HiveArray = bun.HiveArray(FilePoll, 128).Fallback;
const HiveArray = bun.HiveArray(FilePoll, if (bun.heap_breakdown.enabled) 0 else 128).Fallback;
// We defer freeing FilePoll until the end of the next event loop iteration
// This ensures that we don't free a FilePoll before the next callback is called
@@ -548,9 +548,9 @@ pub const FilePoll = struct {
const log = Output.scoped(.FilePoll, false);
pub fn init(allocator: std.mem.Allocator) Store {
pub fn init() Store {
return .{
.hive = HiveArray.init(allocator),
.hive = HiveArray.init(bun.typedAllocator(FilePoll)),
};
}

View File

@@ -316,9 +316,9 @@ pub const FilePoll = struct {
const log = Output.scoped(.FilePoll, false);
pub fn init(allocator: std.mem.Allocator) Store {
pub fn init() Store {
return .{
.hive = HiveArray.init(allocator),
.hive = HiveArray.init(bun.typedAllocator(FilePoll)),
};
}

View File

@@ -12,7 +12,7 @@ const mimalloc = bun.Mimalloc;
const BrotliAllocator = struct {
pub fn alloc(_: ?*anyopaque, len: usize) callconv(.C) *anyopaque {
if (bun.heap_breakdown.enabled) {
const zone = bun.heap_breakdown.getZone(BrotliAllocator);
const zone = bun.heap_breakdown.getZone("brotli");
return zone.malloc_zone_malloc(len) orelse bun.outOfMemory();
}
@@ -21,7 +21,7 @@ const BrotliAllocator = struct {
pub fn free(_: ?*anyopaque, data: ?*anyopaque) callconv(.C) void {
if (bun.heap_breakdown.enabled) {
const zone = bun.heap_breakdown.getZone(BrotliAllocator);
const zone = bun.heap_breakdown.getZone("brotli");
zone.malloc_zone_free(data);
return;
}

View File

@@ -869,7 +869,6 @@ pub const Formatter = struct {
pub const ZigFormatter = struct {
formatter: *ConsoleObject.Formatter,
global: *JSGlobalObject,
value: JSValue,
pub const WriteError = error{UhOh};
@@ -878,9 +877,8 @@ pub const Formatter = struct {
defer {
self.formatter.remaining_values = &[_]JSValue{};
}
self.formatter.globalThis = self.global;
self.formatter.format(
Tag.get(self.value, self.global),
Tag.get(self.value, self.formatter.globalThis),
@TypeOf(writer),
writer,
self.value,
@@ -1283,6 +1281,7 @@ pub const Formatter = struct {
writer.writeAll(end);
// then skip the second % so we dont hit it again
slice = slice[@min(slice.len, i + 1)..];
len = @truncate(slice.len);
i = 0;
continue;
},
@@ -1295,7 +1294,7 @@ pub const Formatter = struct {
slice = slice[@min(slice.len, i + 1)..];
i = 0;
hit_percent = true;
len = @as(u32, @truncate(slice.len));
len = @truncate(slice.len);
const next_value = this.remaining_values[0];
this.remaining_values = this.remaining_values[1..];
@@ -2310,7 +2309,7 @@ pub const Formatter = struct {
.Object,
Writer,
writer_,
toJSONFunction.callWithThis(this.globalThis, value, &.{}),
toJSONFunction.call(this.globalThis, value, &.{}),
.Object,
enable_ansi_colors,
);
@@ -2325,7 +2324,7 @@ pub const Formatter = struct {
.Object,
Writer,
writer_,
toJSONFunction.callWithThis(this.globalThis, value, &.{}),
toJSONFunction.call(this.globalThis, value, &.{}),
.Object,
enable_ansi_colors,
);
@@ -2574,7 +2573,7 @@ pub const Formatter = struct {
},
.toJSON => {
if (value.get(this.globalThis, "toJSON")) |func| {
const result = func.callWithThis(this.globalThis, value, &.{});
const result = func.call(this.globalThis, value, &.{});
if (result.toError() == null) {
const prev_quote_keys = this.quote_keys;
this.quote_keys = true;

Some files were not shown because too many files have changed in this diff Show More