mirror of
https://github.com/oven-sh/bun
synced 2026-02-03 15:38:46 +00:00
Compare commits
286 Commits
ci-testing
...
jarred/byt
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bfe7e711eb | ||
|
|
5108e3e0d9 | ||
|
|
bd3e62df40 | ||
|
|
1668fde0a9 | ||
|
|
12174e0577 | ||
|
|
c50f8d82d5 | ||
|
|
d30767ea68 | ||
|
|
6b30c1b30d | ||
|
|
b64f1e15b5 | ||
|
|
5f6015bb79 | ||
|
|
f123814d87 | ||
|
|
ef4bcb314c | ||
|
|
fd2ad27b6f | ||
|
|
03de99afcf | ||
|
|
9ba63eb522 | ||
|
|
bac38b8967 | ||
|
|
76c4145f0e | ||
|
|
adb54f1849 | ||
|
|
0f4aa68575 | ||
|
|
6555248a04 | ||
|
|
2f19b71e0f | ||
|
|
9076b369f0 | ||
|
|
9cb203f229 | ||
|
|
5650ed470c | ||
|
|
fc99dd27e3 | ||
|
|
4d61637e8a | ||
|
|
dc03f0283c | ||
|
|
aed0f58dfc | ||
|
|
a37694cec2 | ||
|
|
b52f9923e2 | ||
|
|
1bed7a7fd1 | ||
|
|
59eb5515c5 | ||
|
|
682b3730a1 | ||
|
|
bd3c258af4 | ||
|
|
9faaa9b982 | ||
|
|
bd2eb40a39 | ||
|
|
f3ed9eac4a | ||
|
|
b55670ddb7 | ||
|
|
39eecc7757 | ||
|
|
6faf657e32 | ||
|
|
e48369ddab | ||
|
|
743f40b473 | ||
|
|
b4e552dbeb | ||
|
|
952d44b675 | ||
|
|
8cb0b5db21 | ||
|
|
1976e5bc00 | ||
|
|
f520715622 | ||
|
|
a4264cef23 | ||
|
|
09f002934c | ||
|
|
89dfe9beb6 | ||
|
|
4ac415f58d | ||
|
|
acd8567fa0 | ||
|
|
ba2ea6fbb2 | ||
|
|
36c621b6b1 | ||
|
|
bab5fec95f | ||
|
|
e6b30a90de | ||
|
|
fea302ee1d | ||
|
|
2ffcccc5b4 | ||
|
|
11d7a9d5e9 | ||
|
|
55cdf69415 | ||
|
|
ac8f9052a2 | ||
|
|
5a525d3042 | ||
|
|
6fd06dd023 | ||
|
|
df9d18659c | ||
|
|
d8ac4c59ff | ||
|
|
3309a8479c | ||
|
|
3896b0e29f | ||
|
|
c4f4d7c872 | ||
|
|
ebdd678da5 | ||
|
|
8195aa4c96 | ||
|
|
ad638a6bea | ||
|
|
22176cb9bb | ||
|
|
cf3caba3dc | ||
|
|
7529cd76b5 | ||
|
|
9eeef3f5df | ||
|
|
2b1a10629b | ||
|
|
0a37423baf | ||
|
|
1a9307da08 | ||
|
|
b005ef43d4 | ||
|
|
078fdd3787 | ||
|
|
dc58c42453 | ||
|
|
b53c25e5f8 | ||
|
|
e97c65fd1e | ||
|
|
5a108c5027 | ||
|
|
0f1d5d5dab | ||
|
|
6415cc3e92 | ||
|
|
8d34846d19 | ||
|
|
781998cf00 | ||
|
|
02a75070fb | ||
|
|
ac8db43485 | ||
|
|
94ee538dc6 | ||
|
|
9cdda49485 | ||
|
|
2c84840222 | ||
|
|
dafa9946e4 | ||
|
|
74d5b93ffc | ||
|
|
886c31f0c5 | ||
|
|
1bac09488d | ||
|
|
83a256013f | ||
|
|
384988f26c | ||
|
|
fe62a61404 | ||
|
|
ef8fd12e43 | ||
|
|
999324a50c | ||
|
|
8ace981fbc | ||
|
|
02ff16d95c | ||
|
|
1d188dbc55 | ||
|
|
f16d802eb1 | ||
|
|
eb8ed27a4a | ||
|
|
5eb053fa3b | ||
|
|
f9af7be5ae | ||
|
|
1367e5e85a | ||
|
|
d55b5cc169 | ||
|
|
fa2e00f109 | ||
|
|
9993d72fee | ||
|
|
fd75ca7585 | ||
|
|
a53db001db | ||
|
|
1a5c05adca | ||
|
|
58d02e467f | ||
|
|
63596c3f8c | ||
|
|
996847bcad | ||
|
|
33c91fe3fa | ||
|
|
7fd072f4af | ||
|
|
15a8e72790 | ||
|
|
64d77e33f6 | ||
|
|
babc907bfe | ||
|
|
83c5d8a942 | ||
|
|
5a8e98cec2 | ||
|
|
d4237b0757 | ||
|
|
766a9cf4f2 | ||
|
|
98a709fb1b | ||
|
|
715ff7f323 | ||
|
|
df1744f0da | ||
|
|
5bd344281f | ||
|
|
b70458c3e4 | ||
|
|
3f686222d4 | ||
|
|
36fc324523 | ||
|
|
a5bd94f582 | ||
|
|
4fae1b4475 | ||
|
|
2fa60f2d12 | ||
|
|
6d79edaa15 | ||
|
|
dc2929d4e1 | ||
|
|
5bc45e2721 | ||
|
|
fe7f5fa731 | ||
|
|
30edb594a8 | ||
|
|
1961a9acc8 | ||
|
|
9482a0afdf | ||
|
|
a1312066b3 | ||
|
|
85a3299115 | ||
|
|
3ea71a9672 | ||
|
|
bf945f6dbb | ||
|
|
a366135bd2 | ||
|
|
eec5abd0da | ||
|
|
cede04b019 | ||
|
|
cf1863236a | ||
|
|
bd3517197c | ||
|
|
2c93e917a9 | ||
|
|
5e6b509100 | ||
|
|
c229da8d9a | ||
|
|
4304368fc0 | ||
|
|
460d6edbda | ||
|
|
9628ee76fc | ||
|
|
9fd6a04460 | ||
|
|
a13a020d4c | ||
|
|
3a245dd248 | ||
|
|
b972ed6540 | ||
|
|
dfa3a9a369 | ||
|
|
444766833c | ||
|
|
f7d459eea5 | ||
|
|
7d018fb323 | ||
|
|
5f08478229 | ||
|
|
d861347dc5 | ||
|
|
1eb5ecb563 | ||
|
|
6661ab6022 | ||
|
|
23aa4f2959 | ||
|
|
9302b42919 | ||
|
|
b9ead441c1 | ||
|
|
24dbef7713 | ||
|
|
28c40babd2 | ||
|
|
35465d3a29 | ||
|
|
7aaf935711 | ||
|
|
bfca627dfa | ||
|
|
22e37a5c8d | ||
|
|
960514364e | ||
|
|
98078e7639 | ||
|
|
62d973f19f | ||
|
|
5cbb6926f5 | ||
|
|
077ee55211 | ||
|
|
adb31c0752 | ||
|
|
ab55477c2d | ||
|
|
ef23b8e60c | ||
|
|
e6528f81c9 | ||
|
|
1481cc2730 | ||
|
|
e6c87bddee | ||
|
|
f7b2e2a795 | ||
|
|
3aaa240233 | ||
|
|
9d74b5bdc8 | ||
|
|
d74a192345 | ||
|
|
76a3dc268d | ||
|
|
d2c821bbf6 | ||
|
|
ff334da585 | ||
|
|
d96629e053 | ||
|
|
c527058f14 | ||
|
|
3efd445084 | ||
|
|
84c91bf7e1 | ||
|
|
9f7c6e34cb | ||
|
|
d44969769f | ||
|
|
ff0f9d5f4d | ||
|
|
c63c55cbb1 | ||
|
|
6d09772a13 | ||
|
|
df33f2b2a2 | ||
|
|
923303047f | ||
|
|
3876ecfde8 | ||
|
|
2680deb5d3 | ||
|
|
e1aadd0d7a | ||
|
|
7a6efad44e | ||
|
|
4ed0c36063 | ||
|
|
b75c605a75 | ||
|
|
7da9e7c45d | ||
|
|
30d06dec47 | ||
|
|
3674493aa4 | ||
|
|
cacbaba524 | ||
|
|
0d7d789ebd | ||
|
|
1aa35089d6 | ||
|
|
1de1745085 | ||
|
|
639e9a83d5 | ||
|
|
9db3379cc5 | ||
|
|
c5c55c7ce4 | ||
|
|
43326b0b2d | ||
|
|
680f842948 | ||
|
|
363a4934d0 | ||
|
|
98f9e276b0 | ||
|
|
ce1286efef | ||
|
|
fd84ace83b | ||
|
|
483af7c33c | ||
|
|
6fbe3d8214 | ||
|
|
c552cb40d1 | ||
|
|
63cf732ab4 | ||
|
|
6303af3ce0 | ||
|
|
9104bd7210 | ||
|
|
b5c91a4b7e | ||
|
|
82239371ab | ||
|
|
26526cba38 | ||
|
|
214b3ccca0 | ||
|
|
ada020b69f | ||
|
|
deb6ff5e6c | ||
|
|
f25599a6e8 | ||
|
|
de64683b22 | ||
|
|
c6d508972f | ||
|
|
2f30e19835 | ||
|
|
0081ab4738 | ||
|
|
6f6ea0d6f3 | ||
|
|
622432e843 | ||
|
|
80eb6d00e8 | ||
|
|
b6715d2c64 | ||
|
|
f371a78568 | ||
|
|
c2cf528953 | ||
|
|
9911407f26 | ||
|
|
59c5c0fe48 | ||
|
|
e585f900c9 | ||
|
|
dc620ea837 | ||
|
|
49ab4c147a | ||
|
|
b2a4df68c3 | ||
|
|
4c0a1f2983 | ||
|
|
bec04c7341 | ||
|
|
a44b7e41d2 | ||
|
|
de5e56336c | ||
|
|
1c648063fa | ||
|
|
1c3354bc95 | ||
|
|
d5d4f53e82 | ||
|
|
7ab4dc738f | ||
|
|
ebc7045ca4 | ||
|
|
848ad19d9e | ||
|
|
1da3436266 | ||
|
|
49e496399a | ||
|
|
9b8340a5b3 | ||
|
|
8efcc61a7b | ||
|
|
4d6480050c | ||
|
|
fc2c134bc6 | ||
|
|
4c4db1da37 | ||
|
|
77e14c8482 | ||
|
|
fba5d65003 | ||
|
|
c181cf45a7 | ||
|
|
5aeb4d9f79 | ||
|
|
1d9a8b4134 | ||
|
|
30881444df | ||
|
|
a2b4e3d4c2 | ||
|
|
e5662caa33 |
@@ -621,9 +621,7 @@ steps:
|
||||
artifact_paths:
|
||||
- "build\\bun-deps\\*.lib"
|
||||
env:
|
||||
SCCACHE_DIR: "$$HOME\\.cache\\sccache"
|
||||
ZIG_LOCAL_CACHE_DIR: "$$HOME\\.cache\\zig-cache"
|
||||
SCCACHE_IGNORE_SERVER_IO_ERROR: "1"
|
||||
CCACHE_DISABLE: "1"
|
||||
command:
|
||||
- ".\\scripts\\all-dependencies.ps1"
|
||||
|
||||
@@ -646,6 +644,8 @@ steps:
|
||||
# HACK: See scripts/build-bun-cpp.ps1
|
||||
# - "build\\bun-cpp-objects.a"
|
||||
- "build\\bun-cpp-objects.a.*"
|
||||
env:
|
||||
CCACHE_DISABLE: "1"
|
||||
command:
|
||||
- ".\\scripts\\build-bun-cpp.ps1"
|
||||
|
||||
@@ -664,9 +664,7 @@ steps:
|
||||
- "bun-windows-x64-profile.zip"
|
||||
- "features.json"
|
||||
env:
|
||||
SCCACHE_DIR: "$$HOME\\.cache\\sccache"
|
||||
ZIG_LOCAL_CACHE_DIR: "$$HOME\\.cache\\zig-cache"
|
||||
SCCACHE_IGNORE_SERVER_IO_ERROR: "1"
|
||||
CCACHE_DISABLE: "1"
|
||||
command:
|
||||
- ".\\scripts\\buildkite-link-bun.ps1"
|
||||
|
||||
@@ -708,9 +706,7 @@ steps:
|
||||
artifact_paths:
|
||||
- "build\\bun-deps\\*.lib"
|
||||
env:
|
||||
SCCACHE_DIR: "$$HOME\\.cache\\sccache"
|
||||
ZIG_LOCAL_CACHE_DIR: "$$HOME\\.cache\\zig-cache"
|
||||
SCCACHE_IGNORE_SERVER_IO_ERROR: "1"
|
||||
CCACHE_DISABLE: "1"
|
||||
USE_BASELINE_BUILD: "1"
|
||||
command:
|
||||
- ".\\scripts\\all-dependencies.ps1"
|
||||
@@ -735,9 +731,7 @@ steps:
|
||||
# - "build\\bun-cpp-objects.a"
|
||||
- "build\\bun-cpp-objects.a.*"
|
||||
env:
|
||||
SCCACHE_DIR: "$$HOME\\.cache\\sccache"
|
||||
ZIG_LOCAL_CACHE_DIR: "$$HOME\\.cache\\zig-cache"
|
||||
SCCACHE_IGNORE_SERVER_IO_ERROR: "1"
|
||||
CCACHE_DISABLE: "1"
|
||||
USE_BASELINE_BUILD: "1"
|
||||
command:
|
||||
- ".\\scripts\\build-bun-cpp.ps1"
|
||||
@@ -757,9 +751,7 @@ steps:
|
||||
- "bun-windows-x64-baseline-profile.zip"
|
||||
- "features.json"
|
||||
env:
|
||||
SCCACHE_DIR: "$$HOME\\.cache\\sccache"
|
||||
ZIG_LOCAL_CACHE_DIR: "$$HOME\\.cache\\zig-cache"
|
||||
SCCACHE_IGNORE_SERVER_IO_ERROR: "1"
|
||||
CCACHE_DISABLE: "1"
|
||||
USE_BASELINE_BUILD: "1"
|
||||
command:
|
||||
- ".\\scripts\\buildkite-link-bun.ps1 -Baseline $$True"
|
||||
|
||||
@@ -52,4 +52,11 @@ for name in bun bun-profile; do
|
||||
run_command mv "$name" "$dir/$name"
|
||||
run_command zip -r "$dir.zip" "$dir"
|
||||
source "$cwd/.buildkite/scripts/upload-artifact.sh" "$dir.zip"
|
||||
# temporary disable this so CI can run
|
||||
# this is failing because $name is now in $dir/$name and if changed to $dir/$name we get ENOENT reading "bun:internal-for-testing"
|
||||
# if [ "$name" == "bun-profile" ]; then
|
||||
# export BUN_FEATURE_FLAG_INTERNAL_FOR_TESTING="1"
|
||||
# run_command "./$name" -e "require('fs').writeFileSync('./features.json', JSON.stringify(require('bun:internal-for-testing').crash_handler.getFeatureData()))"
|
||||
# source "$cwd/.buildkite/scripts/upload-artifact.sh" "features.json"
|
||||
# fi
|
||||
done
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eo pipefail
|
||||
export FORCE_UPDATE_SUBMODULES=1
|
||||
|
||||
# env.sh calls update_submodules.sh
|
||||
source "$(dirname "$0")/env.sh"
|
||||
source "$(realpath $(dirname "$0")/../../scripts/update-submodules.sh)"
|
||||
|
||||
{ set +x; } 2>/dev/null
|
||||
|
||||
function run_command() {
|
||||
|
||||
@@ -1,40 +1,53 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eo pipefail
|
||||
set -euo pipefail
|
||||
|
||||
export CMAKE_FLAGS=""
|
||||
source "$(dirname "$0")/env.sh"
|
||||
|
||||
if [[ -n "$CMAKE_FLAGS" ]]; then
|
||||
echo "CMAKE_FLAGS should not be empty"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
function assert_target() {
|
||||
local arch="${2-$(uname -m)}"
|
||||
case "$(echo "$arch" | tr '[:upper:]' '[:lower:]')" in
|
||||
x64 | x86_64 | amd64)
|
||||
export ZIG_ARCH="x86_64"
|
||||
if [[ "$BUILDKITE_STEP_KEY" == *"baseline"* ]]; then
|
||||
export ZIG_CPU_TARGET="nehalem"
|
||||
else
|
||||
export ZIG_CPU_TARGET="haswell"
|
||||
fi
|
||||
;;
|
||||
aarch64 | arm64)
|
||||
export ZIG_ARCH="aarch64"
|
||||
export ZIG_CPU_TARGET="native"
|
||||
;;
|
||||
*)
|
||||
echo "error: Unsupported architecture: $arch" 1>&2
|
||||
exit 1
|
||||
;;
|
||||
x64 | x86_64 | amd64)
|
||||
export ZIG_ARCH="x86_64"
|
||||
if [[ "$BUILDKITE_STEP_KEY" == *"baseline"* ]]; then
|
||||
export ZIG_CPU_TARGET="nehalem"
|
||||
else
|
||||
export ZIG_CPU_TARGET="haswell"
|
||||
fi
|
||||
;;
|
||||
aarch64 | arm64)
|
||||
export ZIG_ARCH="aarch64"
|
||||
export ZIG_CPU_TARGET="native"
|
||||
;;
|
||||
*)
|
||||
echo "error: Unsupported architecture: $arch" 1>&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
local os="${1-$(uname -s)}"
|
||||
case "$(echo "$os" | tr '[:upper:]' '[:lower:]')" in
|
||||
linux)
|
||||
export ZIG_TARGET="$ZIG_ARCH-linux-gnu" ;;
|
||||
darwin)
|
||||
export ZIG_TARGET="$ZIG_ARCH-macos-none" ;;
|
||||
windows)
|
||||
export ZIG_TARGET="$ZIG_ARCH-windows-msvc" ;;
|
||||
*)
|
||||
echo "error: Unsupported operating system: $os" 1>&2
|
||||
exit 1
|
||||
;;
|
||||
linux)
|
||||
export ZIG_OS="linux"
|
||||
export ZIG_TARGET="$ZIG_ARCH-linux-gnu"
|
||||
;;
|
||||
darwin)
|
||||
export ZIG_OS="macos"
|
||||
export ZIG_TARGET="$ZIG_ARCH-macos-none"
|
||||
;;
|
||||
windows)
|
||||
export ZIG_OS="windows"
|
||||
export ZIG_TARGET="$ZIG_ARCH-windows-msvc"
|
||||
;;
|
||||
*)
|
||||
echo "error: Unsupported operating system: $os" 1>&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
@@ -57,6 +70,13 @@ cwd="$(pwd)"
|
||||
mkdir -p build
|
||||
cd build
|
||||
|
||||
# in buildkite this script to compile for windows is run on a macos machine
|
||||
# so the cmake windows detection for this logic is not ran
|
||||
ZIG_OPTIMIZE="ReleaseFast"
|
||||
if [[ "$ZIG_OS" == "windows" ]]; then
|
||||
ZIG_OPTIMIZE="ReleaseSafe"
|
||||
fi
|
||||
|
||||
run_command cmake .. "${CMAKE_FLAGS[@]}" \
|
||||
-GNinja \
|
||||
-DNO_CONFIGURE_DEPENDS="1" \
|
||||
@@ -71,6 +91,7 @@ run_command cmake .. "${CMAKE_FLAGS[@]}" \
|
||||
-DUSE_LTO="$USE_LTO" \
|
||||
-DUSE_DEBUG_JSC="$USE_DEBUG_JSC" \
|
||||
-DCANARY="$CANARY" \
|
||||
-DZIG_OPTIMIZE="$ZIG_OPTIMIZE" \
|
||||
-DGIT_SHA="$GIT_SHA"
|
||||
|
||||
export ONLY_ZIG="1"
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eo pipefail
|
||||
set -euo pipefail
|
||||
|
||||
function assert_buildkite_agent() {
|
||||
if ! command -v buildkite-agent &> /dev/null; then
|
||||
if ! command -v buildkite-agent &>/dev/null; then
|
||||
echo "error: Cannot find buildkite-agent, please install it:"
|
||||
echo "https://buildkite.com/docs/agent/v3/install"
|
||||
exit 1
|
||||
@@ -11,25 +11,38 @@ function assert_buildkite_agent() {
|
||||
}
|
||||
|
||||
function download_buildkite_artifact() {
|
||||
local path="$1"; shift
|
||||
# Check if at least one argument is provided
|
||||
if [ $# -eq 0 ]; then
|
||||
echo "error: No path provided for artifact download"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
local path="$1"
|
||||
shift
|
||||
local split="0"
|
||||
local args=()
|
||||
while true; do
|
||||
if [ -z "$1" ]; then
|
||||
break
|
||||
fi
|
||||
|
||||
while [ $# -gt 0 ]; do
|
||||
case "$1" in
|
||||
--split) split="1"; shift ;;
|
||||
*) args+=("$1"); shift ;;
|
||||
--split)
|
||||
split="1"
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
args+=("$1")
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ "$split" == "1" ]; then
|
||||
run_command buildkite-agent artifact download "$path.*" . "${args[@]}"
|
||||
run_command cat $path.?? > "$path"
|
||||
run_command rm -f $path.??
|
||||
run_command buildkite-agent artifact download "$path.*" . "${args[@]:-}"
|
||||
run_command cat "$path".?? >"$path"
|
||||
run_command rm -f "$path".??
|
||||
else
|
||||
run_command buildkite-agent artifact download "$path" . "${args[@]}"
|
||||
run_command buildkite-agent artifact download "$path" . "${args[@]:-}"
|
||||
fi
|
||||
|
||||
if [[ "$path" != *"*"* ]] && [ ! -f "$path" ]; then
|
||||
echo "error: Could not find artifact: $path"
|
||||
exit 1
|
||||
|
||||
@@ -1,32 +1,55 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eo pipefail
|
||||
set -euo pipefail
|
||||
|
||||
BUILDKITE_REPO=${BUILDKITE_REPO:-}
|
||||
BUILDKITE_CLEAN_CHECKOUT=${BUILDKITE_CLEAN_CHECKOUT:-}
|
||||
BUILDKITE_BRANCH=${BUILDKITE_BRANCH:-}
|
||||
CCACHE_DIR=${CCACHE_DIR:-}
|
||||
SCCACHE_DIR=${SCCACHE_DIR:-}
|
||||
ZIG_LOCAL_CACHE_DIR=${ZIG_LOCAL_CACHE_DIR:-}
|
||||
ZIG_GLOBAL_CACHE_DIR=${ZIG_GLOBAL_CACHE_DIR:-}
|
||||
BUN_DEPS_CACHE_DIR=${BUN_DEPS_CACHE_DIR:-}
|
||||
BUN_DEPS_CACHE_DIR=${BUN_DEPS_CACHE_DIR:-}
|
||||
BUILDKITE_STEP_KEY=${BUILDKITE_STEP_KEY:-}
|
||||
|
||||
ROOT_DIR="$(realpath "$(dirname "$0")/../../")"
|
||||
|
||||
# Fail if we cannot find the root directory
|
||||
if [ ! -d "$ROOT_DIR" ]; then
|
||||
echo "error: Cannot find root directory: '$ROOT_DIR'" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
function assert_os() {
|
||||
local os="$(uname -s)"
|
||||
case "$os" in
|
||||
Linux)
|
||||
echo "linux" ;;
|
||||
Darwin)
|
||||
echo "darwin" ;;
|
||||
*)
|
||||
echo "error: Unsupported operating system: $os" 1>&2
|
||||
exit 1
|
||||
;;
|
||||
Linux)
|
||||
echo "linux"
|
||||
;;
|
||||
Darwin)
|
||||
echo "darwin"
|
||||
;;
|
||||
*)
|
||||
echo "error: Unsupported operating system: $os" 1>&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
function assert_arch() {
|
||||
local arch="$(uname -m)"
|
||||
case "$arch" in
|
||||
aarch64 | arm64)
|
||||
echo "aarch64" ;;
|
||||
x86_64 | amd64)
|
||||
echo "x64" ;;
|
||||
*)
|
||||
echo "error: Unknown architecture: $arch" 1>&2
|
||||
exit 1
|
||||
;;
|
||||
aarch64 | arm64)
|
||||
echo "aarch64"
|
||||
;;
|
||||
x86_64 | amd64)
|
||||
echo "x64"
|
||||
;;
|
||||
*)
|
||||
echo "error: Unknown architecture: $arch" 1>&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
@@ -63,7 +86,7 @@ function assert_build() {
|
||||
}
|
||||
|
||||
function assert_buildkite_agent() {
|
||||
if ! command -v buildkite-agent &> /dev/null; then
|
||||
if (! command -v buildkite-agent &>/dev/null); then
|
||||
echo "error: Cannot find buildkite-agent, please install it:"
|
||||
echo "https://buildkite.com/docs/agent/v3/install"
|
||||
exit 1
|
||||
@@ -71,13 +94,29 @@ function assert_buildkite_agent() {
|
||||
}
|
||||
|
||||
function export_environment() {
|
||||
source "$(realpath $(dirname "$0")/../../scripts/env.sh)"
|
||||
source "${ROOT_DIR}/scripts/env.sh"
|
||||
source "${ROOT_DIR}/scripts/update-submodules.sh"
|
||||
|
||||
{ set +x; } 2>/dev/null
|
||||
export GIT_SHA="$BUILDKITE_COMMIT"
|
||||
export CCACHE_DIR="$HOME/.cache/ccache/$BUILDKITE_STEP_KEY"
|
||||
export SCCACHE_DIR="$HOME/.cache/sccache/$BUILDKITE_STEP_KEY"
|
||||
export ZIG_LOCAL_CACHE_DIR="$HOME/.cache/zig-cache/$BUILDKITE_STEP_KEY"
|
||||
export BUN_DEPS_CACHE_DIR="$HOME/.cache/bun-deps/$BUILDKITE_STEP_KEY"
|
||||
if [ "$BUILDKITE_CLEAN_CHECKOUT" == "true" ] || [ "$BUILDKITE_BRANCH" == "main" ]; then
|
||||
local tmpdir="$(mktemp -d 2>/dev/null || mktemp -d -t 'new')"
|
||||
export CCACHE_DIR="$tmpdir/.cache/ccache"
|
||||
export SCCACHE_DIR="$tmpdir/.cache/sccache"
|
||||
export ZIG_LOCAL_CACHE_DIR="$tmpdir/.cache/zig-cache"
|
||||
export ZIG_GLOBAL_CACHE_DIR="$tmpdir/.cache/zig-cache"
|
||||
export BUN_DEPS_CACHE_DIR="$tmpdir/.cache/bun-deps"
|
||||
export CCACHE_RECACHE="1"
|
||||
else
|
||||
export CCACHE_DIR="$HOME/.cache/ccache/$BUILDKITE_STEP_KEY"
|
||||
export SCCACHE_DIR="$HOME/.cache/sccache/$BUILDKITE_STEP_KEY"
|
||||
export ZIG_LOCAL_CACHE_DIR="$HOME/.cache/zig-cache/$BUILDKITE_STEP_KEY"
|
||||
export ZIG_GLOBAL_CACHE_DIR="$HOME/.cache/zig-cache/$BUILDKITE_STEP_KEY"
|
||||
export BUN_DEPS_CACHE_DIR="$HOME/.cache/bun-deps/$BUILDKITE_STEP_KEY"
|
||||
fi
|
||||
if [ "$(assert_os)" == "linux" ]; then
|
||||
export USE_LTO="ON"
|
||||
fi
|
||||
if [ "$(assert_arch)" == "aarch64" ]; then
|
||||
export CPU_TARGET="native"
|
||||
elif [[ "$BUILDKITE_STEP_KEY" == *"baseline"* ]]; then
|
||||
@@ -85,32 +124,21 @@ function export_environment() {
|
||||
else
|
||||
export CPU_TARGET="haswell"
|
||||
fi
|
||||
if [[ "$BUILDKITE_STEP_KEY" == *"nolto"* ]]; then
|
||||
export USE_LTO="OFF"
|
||||
else
|
||||
export USE_LTO="ON"
|
||||
fi
|
||||
if $(buildkite-agent meta-data exists release &> /dev/null); then
|
||||
if $(buildkite-agent meta-data exists release &>/dev/null); then
|
||||
export CMAKE_BUILD_TYPE="$(buildkite-agent meta-data get release)"
|
||||
else
|
||||
export CMAKE_BUILD_TYPE="Release"
|
||||
fi
|
||||
if $(buildkite-agent meta-data exists canary &> /dev/null); then
|
||||
if $(buildkite-agent meta-data exists canary &>/dev/null); then
|
||||
export CANARY="$(buildkite-agent meta-data get canary)"
|
||||
else
|
||||
export CANARY="1"
|
||||
fi
|
||||
if $(buildkite-agent meta-data exists assertions &> /dev/null); then
|
||||
if $(buildkite-agent meta-data exists assertions &>/dev/null); then
|
||||
export USE_DEBUG_JSC="$(buildkite-agent meta-data get assertions)"
|
||||
else
|
||||
export USE_DEBUG_JSC="OFF"
|
||||
fi
|
||||
if [ "$BUILDKITE_CLEAN_CHECKOUT" == "true" ]; then
|
||||
rm -rf "$CCACHE_DIR"
|
||||
rm -rf "$SCCACHE_DIR"
|
||||
rm -rf "$ZIG_LOCAL_CACHE_DIR"
|
||||
rm -rf "$BUN_DEPS_CACHE_DIR"
|
||||
fi
|
||||
}
|
||||
|
||||
assert_build
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eo pipefail
|
||||
set -euo pipefail
|
||||
|
||||
function assert_buildkite_agent() {
|
||||
if ! command -v buildkite-agent &> /dev/null; then
|
||||
if ! command -v buildkite-agent &>/dev/null; then
|
||||
echo "error: Cannot find buildkite-agent, please install it:"
|
||||
echo "https://buildkite.com/docs/agent/v3/install"
|
||||
exit 1
|
||||
@@ -11,7 +11,7 @@ function assert_buildkite_agent() {
|
||||
}
|
||||
|
||||
function assert_split() {
|
||||
if ! command -v split &> /dev/null; then
|
||||
if ! command -v split &>/dev/null; then
|
||||
echo "error: Cannot find split, please install it:"
|
||||
echo "https://www.gnu.org/software/coreutils/split"
|
||||
exit 1
|
||||
@@ -19,16 +19,27 @@ function assert_split() {
|
||||
}
|
||||
|
||||
function upload_buildkite_artifact() {
|
||||
local path="$1"; shift
|
||||
if [ -z "${1:-}" ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
local path="$1"
|
||||
shift
|
||||
local split="0"
|
||||
local args=()
|
||||
local args=() # Initialize args as an empty array
|
||||
while true; do
|
||||
if [ -z "$1" ]; then
|
||||
if [ -z "${1:-}" ]; then
|
||||
break
|
||||
fi
|
||||
case "$1" in
|
||||
--split) split="1"; shift ;;
|
||||
*) args+=("$1"); shift ;;
|
||||
--split)
|
||||
split="1"
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
args+=("$1")
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
if [ ! -f "$path" ]; then
|
||||
@@ -38,9 +49,15 @@ function upload_buildkite_artifact() {
|
||||
if [ "$split" == "1" ]; then
|
||||
run_command rm -f "$path."*
|
||||
run_command split -b 50MB -d "$path" "$path."
|
||||
run_command buildkite-agent artifact upload "$path.*" "${args[@]}"
|
||||
if [ "${args[@]:-}" != "" ]; then
|
||||
run_command buildkite-agent artifact upload "$path.*" "${args[@]}"
|
||||
else
|
||||
run_command buildkite-agent artifact upload "$path.*"
|
||||
fi
|
||||
elif [ "${args[@]:-}" != "" ]; then
|
||||
run_command buildkite-agent artifact upload "$path" "${args[@]:-}"
|
||||
else
|
||||
run_command buildkite-agent artifact upload "$path" "${args[@]}"
|
||||
run_command buildkite-agent artifact upload "$path"
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,10 @@
|
||||
set -eo pipefail
|
||||
|
||||
function assert_main() {
|
||||
if [ "$RELEASE" == "1" ]; then
|
||||
echo "info: Skipping canary release because this is a release build"
|
||||
exit 0
|
||||
fi
|
||||
if [ -z "$BUILDKITE_REPO" ]; then
|
||||
echo "error: Cannot find repository for this build"
|
||||
exit 1
|
||||
@@ -26,7 +30,7 @@ function assert_main() {
|
||||
}
|
||||
|
||||
function assert_buildkite_agent() {
|
||||
if ! command -v buildkite-agent &> /dev/null; then
|
||||
if ! command -v "buildkite-agent" &> /dev/null; then
|
||||
echo "error: Cannot find buildkite-agent, please install it:"
|
||||
echo "https://buildkite.com/docs/agent/v3/install"
|
||||
exit 1
|
||||
@@ -42,14 +46,15 @@ function assert_github() {
|
||||
|
||||
function assert_aws() {
|
||||
assert_command "aws" "awscli" "https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html"
|
||||
for secret in AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_ENDPOINT AWS_BUCKET; do
|
||||
for secret in "AWS_ACCESS_KEY_ID" "AWS_SECRET_ACCESS_KEY" "AWS_ENDPOINT"; do
|
||||
assert_buildkite_secret "$secret"
|
||||
done
|
||||
assert_buildkite_secret "AWS_BUCKET" --skip-redaction
|
||||
}
|
||||
|
||||
function assert_sentry() {
|
||||
assert_command "sentry-cli" "getsentry/tools/sentry-cli" "https://docs.sentry.io/cli/installation/"
|
||||
for secret in SENTRY_AUTH_TOKEN SENTRY_ORG SENTRY_PROJECT; do
|
||||
for secret in "SENTRY_AUTH_TOKEN" "SENTRY_ORG" "SENTRY_PROJECT"; do
|
||||
assert_buildkite_secret "$secret"
|
||||
done
|
||||
}
|
||||
@@ -81,7 +86,7 @@ function assert_command() {
|
||||
|
||||
function assert_buildkite_secret() {
|
||||
local key="$1"
|
||||
local value=$(buildkite-agent secret get "$key")
|
||||
local value=$(buildkite-agent secret get "$key" ${@:2})
|
||||
if [ -z "$value" ]; then
|
||||
echo "error: Cannot find $key secret"
|
||||
echo ""
|
||||
@@ -114,43 +119,54 @@ function create_sentry_release() {
|
||||
fi
|
||||
}
|
||||
|
||||
function download_buildkite_artifacts() {
|
||||
local dir="$1"
|
||||
local names="${@:2}"
|
||||
for name in "${names[@]}"; do
|
||||
run_command buildkite-agent artifact download "$name" "$dir"
|
||||
if [ ! -f "$dir/$name" ]; then
|
||||
echo "error: Cannot find Buildkite artifact: $name"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
function upload_github_assets() {
|
||||
local version="$1"
|
||||
local tag="$(release_tag "$version")"
|
||||
local files="${@:2}"
|
||||
for file in "${files[@]}"; do
|
||||
run_command gh release upload "$tag" "$file" --clobber --repo "$BUILDKITE_REPO"
|
||||
done
|
||||
if [ "$version" == "canary" ]; then
|
||||
run_command gh release edit "$tag" --repo "$BUILDKITE_REPO" \
|
||||
--notes "This canary release of Bun corresponds to the commit: $BUILDKITE_COMMIT"
|
||||
function download_buildkite_artifact() {
|
||||
local name="$1"
|
||||
local dir="$2"
|
||||
if [ -z "$dir" ]; then
|
||||
dir="."
|
||||
fi
|
||||
run_command buildkite-agent artifact download "$name" "$dir"
|
||||
if [ ! -f "$dir/$name" ]; then
|
||||
echo "error: Cannot find Buildkite artifact: $name"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function upload_s3_files() {
|
||||
local folder="$1"
|
||||
local files="${@:2}"
|
||||
for file in "${files[@]}"; do
|
||||
run_command aws --endpoint-url="$AWS_ENDPOINT" s3 cp "$file" "s3://$AWS_BUCKET/$folder/$file"
|
||||
function upload_github_asset() {
|
||||
local version="$1"
|
||||
local tag="$(release_tag "$version")"
|
||||
local file="$2"
|
||||
run_command gh release upload "$tag" "$file" --clobber --repo "$BUILDKITE_REPO"
|
||||
|
||||
# Sometimes the upload fails, maybe this is a race condition in the gh CLI?
|
||||
while [ "$(gh release view "$tag" --repo "$BUILDKITE_REPO" | grep -c "$file")" -eq 0 ]; do
|
||||
echo "warn: Uploading $file to $tag failed, retrying..."
|
||||
sleep "$((RANDOM % 5 + 1))"
|
||||
run_command gh release upload "$tag" "$file" --clobber --repo "$BUILDKITE_REPO"
|
||||
done
|
||||
}
|
||||
|
||||
function update_github_release() {
|
||||
local version="$1"
|
||||
local tag="$(release_tag "$version")"
|
||||
if [ "$tag" == "canary" ]; then
|
||||
sleep 5 # There is possibly a race condition where this overwrites artifacts?
|
||||
run_command gh release edit "$tag" --repo "$BUILDKITE_REPO" \
|
||||
--notes "This release of Bun corresponds to the commit: $BUILDKITE_COMMIT"
|
||||
fi
|
||||
}
|
||||
|
||||
function upload_s3_file() {
|
||||
local folder="$1"
|
||||
local file="$2"
|
||||
run_command aws --endpoint-url="$AWS_ENDPOINT" s3 cp "$file" "s3://$AWS_BUCKET/$folder/$file"
|
||||
}
|
||||
|
||||
function create_release() {
|
||||
assert_main
|
||||
assert_buildkite_agent
|
||||
assert_github
|
||||
assert_aws
|
||||
assert_sentry
|
||||
|
||||
local tag="$1" # 'canary' or 'x.y.z'
|
||||
@@ -171,10 +187,24 @@ function create_release() {
|
||||
bun-windows-x64-baseline-profile.zip
|
||||
)
|
||||
|
||||
download_buildkite_artifacts "." "${artifacts[@]}"
|
||||
upload_s3_files "releases/$BUILDKITE_COMMIT" "${artifacts[@]}"
|
||||
upload_s3_files "releases/$tag" "${artifacts[@]}"
|
||||
upload_github_assets "$tag" "${artifacts[@]}"
|
||||
function upload_artifact() {
|
||||
local artifact="$1"
|
||||
download_buildkite_artifact "$artifact"
|
||||
if [ "$tag" == "canary" ]; then
|
||||
upload_s3_file "releases/$BUILDKITE_COMMIT-canary" "$artifact" &
|
||||
else
|
||||
upload_s3_file "releases/$BUILDKITE_COMMIT" "$artifact" &
|
||||
fi
|
||||
upload_s3_file "releases/$tag" "$artifact" &
|
||||
upload_github_asset "$tag" "$artifact" &
|
||||
wait
|
||||
}
|
||||
|
||||
for artifact in "${artifacts[@]}"; do
|
||||
upload_artifact "$artifact"
|
||||
done
|
||||
|
||||
update_github_release "$tag"
|
||||
create_sentry_release "$tag"
|
||||
}
|
||||
|
||||
|
||||
3
.gitattributes
vendored
3
.gitattributes
vendored
@@ -45,3 +45,6 @@ examples/**/* linguist-documentation
|
||||
|
||||
src/deps/*.c linguist-vendored
|
||||
src/deps/brotli/** linguist-vendored
|
||||
|
||||
test/js/node/test/fixtures linguist-vendored
|
||||
test/js/node/test/common linguist-vendored
|
||||
|
||||
36
.github/workflows/labeled.yml
vendored
36
.github/workflows/labeled.yml
vendored
@@ -7,6 +7,42 @@ on:
|
||||
types: [labeled]
|
||||
|
||||
jobs:
|
||||
# on-bug:
|
||||
# runs-on: ubuntu-latest
|
||||
# if: github.event.label.name == 'bug' || github.event.label.name == 'crash'
|
||||
# permissions:
|
||||
# issues: write
|
||||
# steps:
|
||||
# - name: Checkout
|
||||
# uses: actions/checkout@v4
|
||||
# with:
|
||||
# sparse-checkout: |
|
||||
# scripts
|
||||
# .github
|
||||
# CMakeLists.txt
|
||||
# - name: Setup Bun
|
||||
# uses: ./.github/actions/setup-bun
|
||||
# with:
|
||||
# bun-version: "1.1.24"
|
||||
# - name: "categorize bug"
|
||||
# id: add-labels
|
||||
# env:
|
||||
# GITHUB_ISSUE_BODY: ${{ github.event.issue.body }}
|
||||
# GITHUB_ISSUE_TITLE: ${{ github.event.issue.title }}
|
||||
# ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
# shell: bash
|
||||
# run: |
|
||||
# echo '{"dependencies": { "@anthropic-ai/sdk": "latest" }}' > scripts/package.json && bun install --cwd=./scripts
|
||||
# LABELS=$(bun scripts/label-issue.ts)
|
||||
# echo "labels=$LABELS" >> $GITHUB_OUTPUT
|
||||
# - name: Add labels
|
||||
# uses: actions-cool/issues-helper@v3
|
||||
# if: steps.add-labels.outputs.labels != ''
|
||||
# with:
|
||||
# actions: "add-labels"
|
||||
# token: ${{ secrets.GITHUB_TOKEN }}
|
||||
# issue-number: ${{ github.event.issue.number }}
|
||||
# labels: ${{ steps.add-labels.outputs.labels }}
|
||||
on-labeled:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event.label.name == 'crash' || github.event.label.name == 'needs repro'
|
||||
|
||||
3
.github/workflows/release.yml
vendored
3
.github/workflows/release.yml
vendored
@@ -88,6 +88,9 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# To workaround issue
|
||||
ref: main
|
||||
- name: Setup Bun
|
||||
uses: ./.github/actions/setup-bun
|
||||
with:
|
||||
|
||||
4
.github/workflows/run-lint-cpp.yml
vendored
4
.github/workflows/run-lint-cpp.yml
vendored
@@ -3,7 +3,7 @@ name: lint-cpp
|
||||
permissions:
|
||||
contents: read
|
||||
env:
|
||||
LLVM_VERSION: 16
|
||||
LLVM_VERSION: 18
|
||||
LC_CTYPE: "en_US.UTF-8"
|
||||
LC_ALL: "en_US.UTF-8"
|
||||
|
||||
@@ -26,7 +26,7 @@ jobs:
|
||||
- name: Setup Bun
|
||||
uses: ./.github/actions/setup-bun
|
||||
with:
|
||||
bun-version: latest
|
||||
bun-version: 1.1.23
|
||||
- name: Install Dependencies
|
||||
env:
|
||||
HOMEBREW_NO_INSTALLED_DEPENDENTS_CHECK: 1
|
||||
|
||||
30
.github/workflows/stale.yaml
vendored
Normal file
30
.github/workflows/stale.yaml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
name: Close inactive issues
|
||||
on:
|
||||
# schedule:
|
||||
# - cron: "15 * * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
close-issues:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: actions/stale@v5
|
||||
with:
|
||||
days-before-issue-close: 5
|
||||
any-of-issue-labels: "needs repro,waiting-for-author"
|
||||
exempt-issue-labels: "neverstale"
|
||||
exempt-pr-labels: "neverstale"
|
||||
remove-stale-when-updated: true
|
||||
stale-issue-label: "stale"
|
||||
stale-pr-label: "stale"
|
||||
stale-issue-message: "This issue is stale and may be closed due to inactivity. If you're still running into this, please leave a comment."
|
||||
close-issue-message: "This issue was closed because it has been inactive for 5 days since being marked as stale."
|
||||
days-before-pr-stale: 30
|
||||
days-before-pr-close: 14
|
||||
stale-pr-message: "This pull request is stale and may be closed due to inactivity."
|
||||
close-pr-message: "This pull request has been closed due to inactivity."
|
||||
repo-token: ${{ github.token }}
|
||||
operations-per-run: 1000
|
||||
@@ -1,2 +1,4 @@
|
||||
command script import src/deps/zig/tools/lldb_pretty_printers.py
|
||||
command script import src/bun.js/WebKit/Tools/lldb/lldb_webkit.py
|
||||
|
||||
# type summary add --summary-string "${var} | inner=${var[0-30]}, source=${var[33-64]}, tag=${var[31-32]}" "unsigned long"
|
||||
|
||||
121
.vscode/launch.json
generated
vendored
121
.vscode/launch.json
generated
vendored
@@ -18,6 +18,7 @@
|
||||
"env": {
|
||||
"FORCE_COLOR": "1",
|
||||
"BUN_DEBUG_QUIET_LOGS": "1",
|
||||
"BUN_DEBUG_jest": "1",
|
||||
"BUN_GARBAGE_COLLECTOR_LEVEL": "1",
|
||||
},
|
||||
"console": "internalConsole",
|
||||
@@ -33,7 +34,6 @@
|
||||
"FORCE_COLOR": "1",
|
||||
"BUN_DEBUG_QUIET_LOGS": "1",
|
||||
"BUN_GARBAGE_COLLECTOR_LEVEL": "1",
|
||||
"BUN_DEBUG_FileReader": "1",
|
||||
"BUN_DEBUG_jest": "1",
|
||||
},
|
||||
"console": "internalConsole",
|
||||
@@ -54,6 +54,7 @@
|
||||
"env": {
|
||||
"FORCE_COLOR": "1",
|
||||
"BUN_DEBUG_QUIET_LOGS": "1",
|
||||
"BUN_DEBUG_jest": "1",
|
||||
"BUN_GARBAGE_COLLECTOR_LEVEL": "0",
|
||||
},
|
||||
"console": "internalConsole",
|
||||
@@ -68,6 +69,7 @@
|
||||
"env": {
|
||||
"FORCE_COLOR": "1",
|
||||
"BUN_DEBUG_QUIET_LOGS": "0",
|
||||
"BUN_DEBUG_jest": "1",
|
||||
"BUN_GARBAGE_COLLECTOR_LEVEL": "2",
|
||||
},
|
||||
"console": "internalConsole",
|
||||
@@ -82,6 +84,7 @@
|
||||
"env": {
|
||||
"FORCE_COLOR": "1",
|
||||
"BUN_DEBUG_QUIET_LOGS": "1",
|
||||
"BUN_DEBUG_jest": "1",
|
||||
"BUN_GARBAGE_COLLECTOR_LEVEL": "2",
|
||||
},
|
||||
"console": "internalConsole",
|
||||
@@ -96,6 +99,7 @@
|
||||
"env": {
|
||||
"FORCE_COLOR": "1",
|
||||
"BUN_DEBUG_QUIET_LOGS": "1",
|
||||
"BUN_DEBUG_jest": "1",
|
||||
"BUN_GARBAGE_COLLECTOR_LEVEL": "2",
|
||||
},
|
||||
"console": "internalConsole",
|
||||
@@ -110,6 +114,7 @@
|
||||
"env": {
|
||||
"FORCE_COLOR": "1",
|
||||
"BUN_DEBUG_QUIET_LOGS": "1",
|
||||
"BUN_DEBUG_jest": "1",
|
||||
"BUN_GARBAGE_COLLECTOR_LEVEL": "2",
|
||||
"BUN_INSPECT": "ws://localhost:0/?wait=1",
|
||||
},
|
||||
@@ -130,6 +135,7 @@
|
||||
"env": {
|
||||
"FORCE_COLOR": "1",
|
||||
"BUN_DEBUG_QUIET_LOGS": "1",
|
||||
"BUN_DEBUG_jest": "1",
|
||||
"BUN_GARBAGE_COLLECTOR_LEVEL": "2",
|
||||
"BUN_INSPECT": "ws://localhost:0/?break=1",
|
||||
},
|
||||
@@ -151,7 +157,6 @@
|
||||
"env": {
|
||||
"FORCE_COLOR": "0",
|
||||
"BUN_DEBUG_QUIET_LOGS": "1",
|
||||
"BUN_DEBUG_EventLoop": "1",
|
||||
"BUN_GARBAGE_COLLECTOR_LEVEL": "2",
|
||||
},
|
||||
"console": "internalConsole",
|
||||
@@ -263,6 +268,7 @@
|
||||
"env": {
|
||||
"FORCE_COLOR": "1",
|
||||
"BUN_DEBUG_QUIET_LOGS": "1",
|
||||
"BUN_DEBUG_jest": "1",
|
||||
"BUN_GARBAGE_COLLECTOR_LEVEL": "2",
|
||||
},
|
||||
"console": "internalConsole",
|
||||
@@ -277,6 +283,7 @@
|
||||
"env": {
|
||||
"FORCE_COLOR": "1",
|
||||
"BUN_DEBUG_QUIET_LOGS": "1",
|
||||
"BUN_DEBUG_jest": "1",
|
||||
"BUN_GARBAGE_COLLECTOR_LEVEL": "0",
|
||||
},
|
||||
"console": "internalConsole",
|
||||
@@ -290,7 +297,8 @@
|
||||
"cwd": "${workspaceFolder}/test",
|
||||
"env": {
|
||||
"FORCE_COLOR": "1",
|
||||
"BUN_DEBUG_QUIET_LOGS": "0",
|
||||
"BUN_DEBUG_QUIET_LOGS": "1",
|
||||
"BUN_DEBUG_jest": "1",
|
||||
"BUN_GARBAGE_COLLECTOR_LEVEL": "2",
|
||||
},
|
||||
"console": "internalConsole",
|
||||
@@ -305,6 +313,7 @@
|
||||
"env": {
|
||||
"FORCE_COLOR": "1",
|
||||
"BUN_DEBUG_QUIET_LOGS": "1",
|
||||
"BUN_DEBUG_jest": "1",
|
||||
"BUN_GARBAGE_COLLECTOR_LEVEL": "2",
|
||||
},
|
||||
"console": "internalConsole",
|
||||
@@ -319,6 +328,7 @@
|
||||
"env": {
|
||||
"FORCE_COLOR": "1",
|
||||
"BUN_DEBUG_QUIET_LOGS": "1",
|
||||
"BUN_DEBUG_jest": "1",
|
||||
"BUN_GARBAGE_COLLECTOR_LEVEL": "2",
|
||||
},
|
||||
"console": "internalConsole",
|
||||
@@ -333,6 +343,7 @@
|
||||
"env": {
|
||||
"FORCE_COLOR": "1",
|
||||
"BUN_DEBUG_QUIET_LOGS": "1",
|
||||
"BUN_DEBUG_jest": "1",
|
||||
"BUN_GARBAGE_COLLECTOR_LEVEL": "2",
|
||||
"BUN_INSPECT": "ws://localhost:0/?wait=1",
|
||||
},
|
||||
@@ -353,6 +364,7 @@
|
||||
"env": {
|
||||
"FORCE_COLOR": "1",
|
||||
"BUN_DEBUG_QUIET_LOGS": "1",
|
||||
"BUN_DEBUG_jest": "1",
|
||||
"BUN_GARBAGE_COLLECTOR_LEVEL": "2",
|
||||
"BUN_INSPECT": "ws://localhost:0/?break=1",
|
||||
},
|
||||
@@ -448,6 +460,11 @@
|
||||
"program": "node",
|
||||
"args": ["test/runner.node.mjs"],
|
||||
"cwd": "${workspaceFolder}",
|
||||
"env": {
|
||||
"FORCE_COLOR": "1",
|
||||
"BUN_DEBUG_QUIET_LOGS": "1",
|
||||
"BUN_GARBAGE_COLLECTOR_LEVEL": "2",
|
||||
},
|
||||
"console": "internalConsole",
|
||||
},
|
||||
// Windows: bun test [file]
|
||||
@@ -474,7 +491,6 @@
|
||||
"name": "BUN_DEBUG_jest",
|
||||
"value": "1",
|
||||
},
|
||||
|
||||
{
|
||||
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
|
||||
"value": "1",
|
||||
@@ -501,19 +517,7 @@
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_DEBUG_EventLoop",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_DEBUG_uv",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_DEBUG_SYS",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_DEBUG_PipeWriter",
|
||||
"name": "BUN_DEBUG_jest",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
@@ -541,6 +545,10 @@
|
||||
"name": "BUN_DEBUG_QUIET_LOGS",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_DEBUG_jest",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
|
||||
"value": "0",
|
||||
@@ -566,6 +574,10 @@
|
||||
"name": "BUN_DEBUG_QUIET_LOGS",
|
||||
"value": "0",
|
||||
},
|
||||
{
|
||||
"name": "BUN_DEBUG_jest",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
|
||||
"value": "2",
|
||||
@@ -591,6 +603,10 @@
|
||||
"name": "BUN_DEBUG_QUIET_LOGS",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_DEBUG_jest",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
|
||||
"value": "2",
|
||||
@@ -625,6 +641,10 @@
|
||||
"name": "BUN_DEBUG_QUIET_LOGS",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_DEBUG_jest",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
|
||||
"value": "2",
|
||||
@@ -660,6 +680,10 @@
|
||||
"name": "BUN_DEBUG_QUIET_LOGS",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_DEBUG_jest",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
|
||||
"value": "2",
|
||||
@@ -681,7 +705,10 @@
|
||||
"name": "FORCE_COLOR",
|
||||
"value": "1",
|
||||
},
|
||||
|
||||
{
|
||||
"name": "BUN_DEBUG_QUIET_LOGS",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
|
||||
"value": "0",
|
||||
@@ -705,7 +732,7 @@
|
||||
},
|
||||
{
|
||||
"name": "BUN_DEBUG_QUIET_LOGS",
|
||||
"value": "0",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
|
||||
@@ -801,6 +828,10 @@
|
||||
"name": "BUN_DEBUG_QUIET_LOGS",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_DEBUG_jest",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
|
||||
"value": "2",
|
||||
@@ -826,6 +857,10 @@
|
||||
"name": "BUN_DEBUG_QUIET_LOGS",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_DEBUG_jest",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
|
||||
"value": "0",
|
||||
@@ -851,6 +886,10 @@
|
||||
"name": "BUN_DEBUG_QUIET_LOGS",
|
||||
"value": "0",
|
||||
},
|
||||
{
|
||||
"name": "BUN_DEBUG_jest",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
|
||||
"value": "2",
|
||||
@@ -876,6 +915,10 @@
|
||||
"name": "BUN_DEBUG_QUIET_LOGS",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_DEBUG_jest",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
|
||||
"value": "2",
|
||||
@@ -901,6 +944,10 @@
|
||||
"name": "BUN_DEBUG_QUIET_LOGS",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_DEBUG_jest",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
|
||||
"value": "2",
|
||||
@@ -926,6 +973,10 @@
|
||||
"name": "BUN_DEBUG_QUIET_LOGS",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_DEBUG_jest",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
|
||||
"value": "2",
|
||||
@@ -960,6 +1011,10 @@
|
||||
"name": "BUN_DEBUG_QUIET_LOGS",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_DEBUG_jest",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
|
||||
"value": "2",
|
||||
@@ -1046,6 +1101,10 @@
|
||||
"name": "BUN_DEBUG_QUIET_LOGS",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_DEBUG_jest",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
|
||||
"value": "0",
|
||||
@@ -1069,7 +1128,11 @@
|
||||
},
|
||||
{
|
||||
"name": "BUN_DEBUG_QUIET_LOGS",
|
||||
"value": "0",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_DEBUG_jest",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
|
||||
@@ -1096,6 +1159,24 @@
|
||||
"program": "node",
|
||||
"args": ["test/runner.node.mjs"],
|
||||
"cwd": "${workspaceFolder}",
|
||||
"environment": [
|
||||
{
|
||||
"name": "FORCE_COLOR",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_DEBUG_QUIET_LOGS",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_DEBUG_jest",
|
||||
"value": "1",
|
||||
},
|
||||
{
|
||||
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
|
||||
"value": "2",
|
||||
},
|
||||
],
|
||||
"console": "internalConsole",
|
||||
},
|
||||
],
|
||||
|
||||
4
.vscode/settings.json
vendored
4
.vscode/settings.json
vendored
@@ -15,6 +15,9 @@
|
||||
"src/bun.js/WebKit": true,
|
||||
"src/deps/*/**": true,
|
||||
"test/node.js/upstream": true,
|
||||
// This will fill up your whole search history.
|
||||
"test/js/node/test/fixtures": true,
|
||||
"test/js/node/test/common": true,
|
||||
},
|
||||
"search.followSymlinks": false,
|
||||
"search.useIgnoreFiles": true,
|
||||
@@ -135,6 +138,7 @@
|
||||
},
|
||||
"files.associations": {
|
||||
"*.idl": "cpp",
|
||||
"array": "cpp",
|
||||
},
|
||||
"C_Cpp.files.exclude": {
|
||||
"**/.vscode": true,
|
||||
|
||||
@@ -3,8 +3,8 @@ cmake_policy(SET CMP0091 NEW)
|
||||
cmake_policy(SET CMP0067 NEW)
|
||||
|
||||
set(CMAKE_POLICY_DEFAULT_CMP0069 NEW)
|
||||
set(Bun_VERSION "1.1.22")
|
||||
set(WEBKIT_TAG f9a0fda2d2b2fd001a00bfcf8e7917a56b382516)
|
||||
set(Bun_VERSION "1.1.27")
|
||||
set(WEBKIT_TAG 147ed53838e21525677492c27099567a6cd19c6b)
|
||||
|
||||
set(BUN_WORKDIR "${CMAKE_CURRENT_BINARY_DIR}")
|
||||
message(STATUS "Configuring Bun ${Bun_VERSION} in ${BUN_WORKDIR}")
|
||||
@@ -15,8 +15,24 @@ set(CMAKE_C_STANDARD 17)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||
set(CMAKE_C_STANDARD_REQUIRED ON)
|
||||
|
||||
option(ZIG_CACHE_DIR "Path to the Zig cache directory" "")
|
||||
|
||||
if(NOT ZIG_CACHE_DIR)
|
||||
SET(ZIG_CACHE_DIR "${BUN_WORKDIR}")
|
||||
cmake_path(APPEND ZIG_CACHE_DIR "zig-cache")
|
||||
endif()
|
||||
|
||||
set(LOCAL_ZIG_CACHE_DIR "${ZIG_CACHE_DIR}")
|
||||
set(GLOBAL_ZIG_CACHE_DIR "${ZIG_CACHE_DIR}")
|
||||
|
||||
cmake_path(APPEND LOCAL_ZIG_CACHE_DIR "local")
|
||||
cmake_path(APPEND GLOBAL_ZIG_CACHE_DIR "global")
|
||||
|
||||
# Used in process.version, process.versions.node, napi, and elsewhere
|
||||
set(REPORTED_NODEJS_VERSION "22.3.0")
|
||||
set(REPORTED_NODEJS_VERSION "22.6.0")
|
||||
|
||||
# Used in process.versions.modules and compared while loading V8 modules
|
||||
set(REPORTED_NODEJS_ABI_VERSION "127")
|
||||
|
||||
# WebKit uses -std=gnu++20 on non-macOS non-Windows
|
||||
# If we do not set this, it will crash at startup on the first memory allocation.
|
||||
@@ -334,7 +350,7 @@ option(USE_LTO "Enable Link-Time Optimization" ${DEFAULT_LTO})
|
||||
|
||||
if(APPLE AND USE_LTO)
|
||||
set(USE_LTO OFF)
|
||||
message(WARNING "Link-Time Optimization is not supported on macOS because it requires -fuse-ld=lld and lld causes many segfaults on macOS (likely related to stack size)")
|
||||
message(FATAL_ERROR "Link-Time Optimization is not supported on macOS because it requires -fuse-ld=lld and lld causes many segfaults on macOS (likely related to stack size)")
|
||||
endif()
|
||||
|
||||
if(WIN32 AND USE_LTO)
|
||||
@@ -463,6 +479,8 @@ elseif(NOT BUN_CPP_ONLY AND NOT BUN_LINK_ONLY AND NOT BUN_TIDY_ONLY AND NOT BUN_
|
||||
|
||||
message(STATUS "Installed Zig Compiler: ${ZIG_COMPILER}")
|
||||
set_property(DIRECTORY APPEND PROPERTY CMAKE_CONFIGURE_DEPENDS "build.zig")
|
||||
|
||||
message(STATUS "Using zig cache directory: ${ZIG_CACHE_DIR}")
|
||||
endif()
|
||||
|
||||
# Bun
|
||||
@@ -487,7 +505,7 @@ if(USE_UNIFIED_SOURCES)
|
||||
endif()
|
||||
|
||||
# CCache
|
||||
find_program(CCACHE_PROGRAM sccache)
|
||||
# find_program(CCACHE_PROGRAM sccache)
|
||||
find_program(CCACHE_PROGRAM ccache)
|
||||
|
||||
if(CCACHE_PROGRAM)
|
||||
@@ -649,6 +667,7 @@ file(GLOB BUN_CPP ${CONFIGURE_DEPENDS}
|
||||
"${BUN_SRC}/bun.js/bindings/sqlite/*.cpp"
|
||||
"${BUN_SRC}/bun.js/bindings/webcrypto/*.cpp"
|
||||
"${BUN_SRC}/bun.js/bindings/webcrypto/*/*.cpp"
|
||||
"${BUN_SRC}/bun.js/bindings/v8/*.cpp"
|
||||
"${BUN_SRC}/deps/picohttpparser/picohttpparser.c"
|
||||
)
|
||||
list(APPEND BUN_RAW_SOURCES ${BUN_CPP})
|
||||
@@ -689,6 +708,32 @@ add_custom_command(
|
||||
)
|
||||
list(APPEND BUN_RAW_SOURCES "${BUN_WORKDIR}/codegen/ZigGeneratedClasses.cpp")
|
||||
|
||||
if(NOT NO_CODEGEN)
|
||||
# --- ErrorCode Generator ---
|
||||
file(GLOB NODE_ERRORS_TS ${CONFIGURE_DEPENDS}
|
||||
"${BUN_SRC}/bun.js/bindings/ErrorCode.ts"
|
||||
)
|
||||
add_custom_command(
|
||||
OUTPUT "${BUN_WORKDIR}/codegen/ErrorCode+List.h" "${BUN_WORKDIR}/codegen/ErrorCode+Data.h" "${BUN_WORKDIR}/codegen/ErrorCode.zig"
|
||||
COMMAND ${BUN_EXECUTABLE} run "${BUN_CODEGEN_SRC}/generate-node-errors.ts" "${BUN_WORKDIR}/codegen"
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
MAIN_DEPENDENCY "${BUN_CODEGEN_SRC}/generate-node-errors.ts"
|
||||
DEPENDS ${NODE_ERRORS_TS}
|
||||
VERBATIM
|
||||
COMMENT "Generating ErrorCode.zig"
|
||||
)
|
||||
|
||||
# This needs something to force it to be regenerated
|
||||
WEBKIT_ADD_SOURCE_DEPENDENCIES(
|
||||
"${BUN_SRC}/bun.js/bindings/ErrorCode.cpp"
|
||||
"${BUN_WORKDIR}/codegen/ErrorCode+List.h"
|
||||
)
|
||||
WEBKIT_ADD_SOURCE_DEPENDENCIES(
|
||||
"${BUN_SRC}/bun.js/bindings/ErrorCode.h"
|
||||
"${BUN_WORKDIR}/codegen/ErrorCode+Data.h"
|
||||
)
|
||||
endif()
|
||||
|
||||
# --- JSSink Generator ---
|
||||
add_custom_command(
|
||||
OUTPUT "${BUN_WORKDIR}/codegen/JSSink.cpp"
|
||||
@@ -762,7 +807,7 @@ if(NOT NO_CODEGEN)
|
||||
OUTPUT ${BUN_IDENTIFIER_CACHE_OUT}
|
||||
MAIN_DEPENDENCY "${BUN_SRC}/js_lexer/identifier_data.zig"
|
||||
DEPENDS "${BUN_SRC}/js_lexer/identifier_cache.zig"
|
||||
COMMAND ${ZIG_COMPILER} run "--zig-lib-dir" "${ZIG_LIB_DIR}" "${BUN_SRC}/js_lexer/identifier_data.zig"
|
||||
COMMAND ${ZIG_COMPILER} run "--zig-lib-dir" "${ZIG_LIB_DIR}" "--cache-dir" "${LOCAL_ZIG_CACHE_DIR}" "--global-cache-dir" "${GLOBAL_ZIG_CACHE_DIR}" "${BUN_SRC}/js_lexer/identifier_data.zig"
|
||||
VERBATIM
|
||||
COMMENT "Building Identifier Cache"
|
||||
)
|
||||
@@ -783,6 +828,7 @@ if(NOT NO_CODEGEN)
|
||||
"${BUN_SRC}/js/thirdparty/*.ts"
|
||||
"${BUN_SRC}/js/internal/*.js"
|
||||
"${BUN_SRC}/js/internal/*.ts"
|
||||
"${BUN_SRC}/js/internal/cluster/*.ts"
|
||||
"${BUN_SRC}/js/internal/util/*.js"
|
||||
"${BUN_SRC}/js/internal/fs/*.ts"
|
||||
"${BUN_SRC}/js/node/*.js"
|
||||
@@ -911,10 +957,13 @@ if(NOT BUN_LINK_ONLY AND NOT BUN_CPP_ONLY)
|
||||
"-Denable_logs=${ENABLE_LOGS}"
|
||||
"-Dreported_nodejs_version=${REPORTED_NODEJS_VERSION}"
|
||||
"-Dobj_format=${BUN_ZIG_OBJ_FORMAT}"
|
||||
"--cache-dir" "${LOCAL_ZIG_CACHE_DIR}"
|
||||
"--global-cache-dir" "${GLOBAL_ZIG_CACHE_DIR}"
|
||||
DEPENDS
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/build.zig"
|
||||
"${ZIG_FILES}"
|
||||
"${BUN_WORKDIR}/codegen/ZigGeneratedClasses.zig"
|
||||
"${BUN_WORKDIR}/codegen/ErrorCode.zig"
|
||||
"${BUN_WORKDIR}/codegen/ResolvedSourceTag.zig"
|
||||
"${BUN_IDENTIFIER_CACHE_OUT}"
|
||||
"${BUN_SRC}/api/schema.zig"
|
||||
@@ -986,7 +1035,6 @@ add_compile_definitions(
|
||||
"LIBUS_USE_BORINGSSL=1"
|
||||
"WITH_BORINGSSL=1"
|
||||
"STATICALLY_LINKED_WITH_JavaScriptCore=1"
|
||||
"STATICALLY_LINKED_WITH_WTF=1"
|
||||
"STATICALLY_LINKED_WITH_BMALLOC=1"
|
||||
"BUILDING_WITH_CMAKE=1"
|
||||
"JSC_OBJC_API_ENABLED=0"
|
||||
@@ -997,6 +1045,7 @@ add_compile_definitions(
|
||||
"BUILDING_JSCONLY__"
|
||||
"BUN_DYNAMIC_JS_LOAD_PATH=\"${BUN_WORKDIR}/js\""
|
||||
"REPORTED_NODEJS_VERSION=\"${REPORTED_NODEJS_VERSION}\""
|
||||
"REPORTED_NODEJS_ABI_VERSION=${REPORTED_NODEJS_ABI_VERSION}"
|
||||
)
|
||||
|
||||
if(NOT ASSERT_ENABLED)
|
||||
@@ -1083,12 +1132,25 @@ if(CMAKE_BUILD_TYPE STREQUAL "Debug")
|
||||
-Werror=uninitialized
|
||||
-Werror=conditional-uninitialized
|
||||
-Werror=suspicious-memaccess
|
||||
-Werror=int-conversion
|
||||
-Werror=nonnull
|
||||
-Werror=move
|
||||
-Werror=sometimes-uninitialized
|
||||
-Werror=unused
|
||||
-Wno-unused-function
|
||||
-Wno-nullability-completeness
|
||||
-Werror
|
||||
-fsanitize=null
|
||||
-fsanitize-recover=all
|
||||
-fsanitize=bounds
|
||||
-fsanitize=return
|
||||
-fsanitize=nullability-arg
|
||||
-fsanitize=nullability-assign
|
||||
-fsanitize=nullability-return
|
||||
-fsanitize=returns-nonnull-attribute
|
||||
-fsanitize=unreachable
|
||||
)
|
||||
target_link_libraries(${bun} PRIVATE -fsanitize=null)
|
||||
else()
|
||||
target_compile_options(${bun} PUBLIC /Od /Z7)
|
||||
endif()
|
||||
@@ -1110,8 +1172,11 @@ elseif(CMAKE_BUILD_TYPE STREQUAL "Release")
|
||||
-Werror=uninitialized
|
||||
-Werror=conditional-uninitialized
|
||||
-Werror=suspicious-memaccess
|
||||
-Werror=int-conversion
|
||||
-Werror=nonnull
|
||||
-Werror=move
|
||||
-Werror=sometimes-uninitialized
|
||||
-Wno-nullability-completeness
|
||||
-Werror
|
||||
)
|
||||
else()
|
||||
@@ -1208,6 +1273,9 @@ else()
|
||||
-fno-pic
|
||||
-fno-pie
|
||||
-faddrsig
|
||||
-ffile-prefix-map="${CMAKE_CURRENT_SOURCE_DIR}"=.
|
||||
-ffile-prefix-map="${BUN_DEPS_DIR}"=src/deps
|
||||
-ffile-prefix-map="${BUN_DEPS_OUT_DIR}"=src/deps
|
||||
)
|
||||
endif()
|
||||
|
||||
@@ -1530,7 +1598,10 @@ endif()
|
||||
if(NOT WIN32)
|
||||
target_link_libraries(${bun} PRIVATE "${WEBKIT_LIB_DIR}/libWTF.a")
|
||||
target_link_libraries(${bun} PRIVATE "${WEBKIT_LIB_DIR}/libJavaScriptCore.a")
|
||||
target_link_libraries(${bun} PRIVATE "${WEBKIT_LIB_DIR}/libbmalloc.a")
|
||||
|
||||
if(NOT APPLE OR EXISTS "${WEBKIT_LIB_DIR}/libbmalloc.a")
|
||||
target_link_libraries(${bun} PRIVATE "${WEBKIT_LIB_DIR}/libbmalloc.a")
|
||||
endif()
|
||||
else()
|
||||
target_link_libraries(${bun} PRIVATE
|
||||
"${WEBKIT_LIB_DIR}/WTF.lib"
|
||||
@@ -1562,12 +1633,14 @@ endif()
|
||||
|
||||
if(BUN_TIDY_ONLY)
|
||||
find_program(CLANG_TIDY_EXE NAMES "clang-tidy")
|
||||
set(CLANG_TIDY_COMMAND "${CLANG_TIDY_EXE}" "-checks=-*,clang-analyzer-*,-clang-analyzer-webkit.UncountedLambdaCapturesChecker" "--fix" "--fix-errors" "--format-style=webkit" "--warnings-as-errors=*")
|
||||
|
||||
# webkit ones are disabled disabled because it's noisy, e.g. for JavaScriptCore/Lookup.h
|
||||
set(CLANG_TIDY_COMMAND "${CLANG_TIDY_EXE}" "-checks=-*,clang-analyzer-*,-clang-analyzer-webkit.UncountedLambdaCapturesChecker,-clang-analyzer-optin.core.EnumCastOutOfRange,-clang-analyzer-webkit.RefCntblBaseVirtualDtor" "--fix" "--fix-errors" "--format-style=webkit" "--warnings-as-errors=*")
|
||||
set_target_properties(${bun} PROPERTIES CXX_CLANG_TIDY "${CLANG_TIDY_COMMAND}")
|
||||
endif()
|
||||
|
||||
if(BUN_TIDY_ONLY_EXTRA)
|
||||
find_program(CLANG_TIDY_EXE NAMES "clang-tidy")
|
||||
set(CLANG_TIDY_COMMAND "${CLANG_TIDY_EXE}" "-checks=-*,clang-analyzer-*,performance-*,-clang-analyzer-webkit.UncountedLambdaCapturesChecker" "--fix" "--fix-errors" "--format-style=webkit" "--warnings-as-errors=*")
|
||||
set(CLANG_TIDY_COMMAND "${CLANG_TIDY_EXE}" "-checks=-*,clang-analyzer-*,performance-*,-clang-analyzer-webkit.UncountedLambdaCapturesChecker,-clang-analyzer-optin.core.EnumCastOutOfRange,-clang-analyzer-webkit.RefCntblBaseVirtualDtor" "--fix" "--fix-errors" "--format-style=webkit" "--warnings-as-errors=*")
|
||||
set_target_properties(${bun} PROPERTIES CXX_CLANG_TIDY "${CLANG_TIDY_COMMAND}")
|
||||
endif()
|
||||
|
||||
@@ -63,7 +63,7 @@ Bun requires LLVM 16 (`clang` is part of LLVM). This version requirement is to m
|
||||
{% codetabs %}
|
||||
|
||||
```bash#macOS (Homebrew)
|
||||
$ brew install llvm@16
|
||||
$ brew install llvm@18
|
||||
```
|
||||
|
||||
```bash#Ubuntu/Debian
|
||||
|
||||
@@ -52,7 +52,7 @@ ENV CI 1
|
||||
ENV CPU_TARGET=${CPU_TARGET}
|
||||
ENV BUILDARCH=${BUILDARCH}
|
||||
ENV BUN_DEPS_OUT_DIR=${BUN_DEPS_OUT_DIR}
|
||||
ENV BUN_ENABLE_LTO 1
|
||||
ENV USE_LTO 1
|
||||
|
||||
ENV LC_CTYPE=en_US.UTF-8
|
||||
ENV LC_ALL=en_US.UTF-8
|
||||
|
||||
3
Makefile
3
Makefile
@@ -366,7 +366,7 @@ ifeq ($(OS_NAME),linux)
|
||||
endif
|
||||
|
||||
ifeq ($(OS_NAME),darwin)
|
||||
MACOS_MIN_FLAG=-mmacosx-version-min=$(MIN_MACOS_VERSION)
|
||||
MACOS_MIN_FLAG=-mmacos-version-min=$(MIN_MACOS_VERSION)
|
||||
POSIX_PKG_MANAGER=brew
|
||||
INCLUDE_DIRS += $(MAC_INCLUDE_DIRS)
|
||||
endif
|
||||
@@ -1309,6 +1309,7 @@ jsc-build-mac-compile-debug:
|
||||
-DCMAKE_BUILD_TYPE=Debug \
|
||||
-DUSE_THIN_ARCHIVES=OFF \
|
||||
-DENABLE_FTL_JIT=ON \
|
||||
-DENABLE_MALLOC_HEAP_BREAKDOWN=ON \
|
||||
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON \
|
||||
-DUSE_BUN_JSC_ADDITIONS=ON \
|
||||
-DENABLE_BUN_SKIP_FAILING_ASSERTIONS=ON \
|
||||
|
||||
47
README.md
47
README.md
@@ -24,8 +24,6 @@
|
||||
|
||||
## What is Bun?
|
||||
|
||||
> **Bun is under active development.** Use it to speed up your development workflows or run simpler production code in resource-constrained environments like serverless functions. We're working on more complete Node.js compatibility and integration with existing frameworks. Join the [Discord](https://bun.sh/discord) and watch the [GitHub repository](https://github.com/oven-sh/bun) to keep tabs on future releases.
|
||||
|
||||
Bun is an all-in-one toolkit for JavaScript and TypeScript apps. It ships as a single executable called `bun`.
|
||||
|
||||
At its core is the _Bun runtime_, a fast JavaScript runtime designed as a drop-in replacement for Node.js. It's written in Zig and powered by JavaScriptCore under the hood, dramatically reducing startup times and memory usage.
|
||||
@@ -87,16 +85,19 @@ bun upgrade --canary
|
||||
## Quick links
|
||||
|
||||
- Intro
|
||||
|
||||
- [What is Bun?](https://bun.sh/docs/index)
|
||||
- [Installation](https://bun.sh/docs/installation)
|
||||
- [Quickstart](https://bun.sh/docs/quickstart)
|
||||
- [TypeScript](https://bun.sh/docs/typescript)
|
||||
|
||||
- Templating
|
||||
|
||||
- [`bun init`](https://bun.sh/docs/cli/init)
|
||||
- [`bun create`](https://bun.sh/docs/cli/bun-create)
|
||||
|
||||
- Runtime
|
||||
|
||||
- [`bun run`](https://bun.sh/docs/cli/run)
|
||||
- [File types](https://bun.sh/docs/runtime/loaders)
|
||||
- [TypeScript](https://bun.sh/docs/runtime/typescript)
|
||||
@@ -115,6 +116,7 @@ bun upgrade --canary
|
||||
- [Framework API](https://bun.sh/docs/runtime/framework)
|
||||
|
||||
- Package manager
|
||||
|
||||
- [`bun install`](https://bun.sh/docs/cli/install)
|
||||
- [`bun add`](https://bun.sh/docs/cli/add)
|
||||
- [`bun remove`](https://bun.sh/docs/cli/remove)
|
||||
@@ -130,6 +132,7 @@ bun upgrade --canary
|
||||
- [Overrides and resolutions](https://bun.sh/docs/install/overrides)
|
||||
|
||||
- Bundler
|
||||
|
||||
- [`Bun.build`](https://bun.sh/docs/bundler)
|
||||
- [Loaders](https://bun.sh/docs/bundler/loaders)
|
||||
- [Plugins](https://bun.sh/docs/bundler/plugins)
|
||||
@@ -137,6 +140,7 @@ bun upgrade --canary
|
||||
- [vs esbuild](https://bun.sh/docs/bundler/vs-esbuild)
|
||||
|
||||
- Test runner
|
||||
|
||||
- [`bun test`](https://bun.sh/docs/cli/test)
|
||||
- [Writing tests](https://bun.sh/docs/test/writing)
|
||||
- [Watch mode](https://bun.sh/docs/test/hot)
|
||||
@@ -148,9 +152,11 @@ bun upgrade --canary
|
||||
- [Code coverage](https://bun.sh/docs/test/coverage)
|
||||
|
||||
- Package runner
|
||||
|
||||
- [`bunx`](https://bun.sh/docs/cli/bunx)
|
||||
|
||||
- API
|
||||
|
||||
- [HTTP server](https://bun.sh/docs/api/http)
|
||||
- [WebSockets](https://bun.sh/docs/api/websockets)
|
||||
- [Workers](https://bun.sh/docs/api/workers)
|
||||
@@ -183,9 +189,10 @@ bun upgrade --canary
|
||||
- [Building Windows](https://bun.sh/docs/project/building-windows)
|
||||
- [License](https://bun.sh/docs/project/licensing)
|
||||
|
||||
## Guides
|
||||
## Guides
|
||||
|
||||
- Binary
|
||||
|
||||
- Binary
|
||||
- [Convert a Blob to a DataView](https://bun.sh/guides/binary/blob-to-dataview)
|
||||
- [Convert a Blob to a ReadableStream](https://bun.sh/guides/binary/blob-to-stream)
|
||||
- [Convert a Blob to a string](https://bun.sh/guides/binary/blob-to-string)
|
||||
@@ -209,7 +216,8 @@ bun upgrade --canary
|
||||
- [Convert an ArrayBuffer to a Uint8Array](https://bun.sh/guides/binary/arraybuffer-to-typedarray)
|
||||
- [Convert an ArrayBuffer to an array of numbers](https://bun.sh/guides/binary/arraybuffer-to-array)
|
||||
|
||||
- Ecosystem
|
||||
- Ecosystem
|
||||
|
||||
- [Build a frontend using Vite and Bun](https://bun.sh/guides/ecosystem/vite)
|
||||
- [Build an app with Astro and Bun](https://bun.sh/guides/ecosystem/astro)
|
||||
- [Build an app with Next.js and Bun](https://bun.sh/guides/ecosystem/nextjs)
|
||||
@@ -236,7 +244,8 @@ bun upgrade --canary
|
||||
- [Use React and JSX](https://bun.sh/guides/ecosystem/react)
|
||||
- [Add Sentry to a Bun app](https://bun.sh/guides/ecosystem/sentry)
|
||||
|
||||
- HTTP
|
||||
- HTTP
|
||||
|
||||
- [Common HTTP server usage](https://bun.sh/guides/http/server)
|
||||
- [Configure TLS on an HTTP server](https://bun.sh/guides/http/tls)
|
||||
- [fetch with unix domain sockets in Bun](https://bun.sh/guides/http/fetch-unix)
|
||||
@@ -250,7 +259,8 @@ bun upgrade --canary
|
||||
- [Upload files via HTTP using FormData](https://bun.sh/guides/http/file-uploads)
|
||||
- [Write a simple HTTP server](https://bun.sh/guides/http/simple)
|
||||
|
||||
- Install
|
||||
- Install
|
||||
|
||||
- [Add a dependency](https://bun.sh/guides/install/add)
|
||||
- [Add a development dependency](https://bun.sh/guides/install/add-dev)
|
||||
- [Add a Git dependency](https://bun.sh/guides/install/add-git)
|
||||
@@ -268,7 +278,8 @@ bun upgrade --canary
|
||||
- [Using bun install with an Azure Artifacts npm registry](https://bun.sh/guides/install/azure-artifacts)
|
||||
- [Using bun install with Artifactory](https://bun.sh/guides/install/jfrog-artifactory)
|
||||
|
||||
- Process
|
||||
- Process
|
||||
|
||||
- [Get the process uptime in nanoseconds](https://bun.sh/guides/process/nanoseconds)
|
||||
- [Listen for CTRL+C](https://bun.sh/guides/process/ctrl-c)
|
||||
- [Listen to OS signals](https://bun.sh/guides/process/os-signals)
|
||||
@@ -279,7 +290,8 @@ bun upgrade --canary
|
||||
- [Spawn a child process](https://bun.sh/guides/process/spawn)
|
||||
- [Spawn a child process and communicate using IPC](https://bun.sh/guides/process/ipc)
|
||||
|
||||
- Read file
|
||||
- Read file
|
||||
|
||||
- [Check if a file exists](https://bun.sh/guides/read-file/exists)
|
||||
- [Get the MIME type of a file](https://bun.sh/guides/read-file/mime)
|
||||
- [Read a file as a ReadableStream](https://bun.sh/guides/read-file/stream)
|
||||
@@ -290,7 +302,8 @@ bun upgrade --canary
|
||||
- [Read a JSON file](https://bun.sh/guides/read-file/json)
|
||||
- [Watch a directory for changes](https://bun.sh/guides/read-file/watch)
|
||||
|
||||
- Runtime
|
||||
- Runtime
|
||||
|
||||
- [Debugging Bun with the VS Code extension](https://bun.sh/guides/runtime/vscode-debugger)
|
||||
- [Debugging Bun with the web debugger](https://bun.sh/guides/runtime/web-debugger)
|
||||
- [Define and replace static globals & constants](https://bun.sh/guides/runtime/define-constant)
|
||||
@@ -305,7 +318,8 @@ bun upgrade --canary
|
||||
- [Set a time zone in Bun](https://bun.sh/guides/runtime/timezone)
|
||||
- [Set environment variables](https://bun.sh/guides/runtime/set-env)
|
||||
|
||||
- Streams
|
||||
- Streams
|
||||
|
||||
- [Convert a Node.js Readable to a Blob](https://bun.sh/guides/streams/node-readable-to-blob)
|
||||
- [Convert a Node.js Readable to a string](https://bun.sh/guides/streams/node-readable-to-string)
|
||||
- [Convert a Node.js Readable to an ArrayBuffer](https://bun.sh/guides/streams/node-readable-to-arraybuffer)
|
||||
@@ -318,7 +332,8 @@ bun upgrade --canary
|
||||
- [Convert a ReadableStream to an ArrayBuffer](https://bun.sh/guides/streams/to-arraybuffer)
|
||||
- [Convert a ReadableStream to JSON](https://bun.sh/guides/streams/to-json)
|
||||
|
||||
- Test
|
||||
- Test
|
||||
|
||||
- [Bail early with the Bun test runner](https://bun.sh/guides/test/bail)
|
||||
- [Generate code coverage reports with the Bun test runner](https://bun.sh/guides/test/coverage)
|
||||
- [Mark a test as a "todo" with the Bun test runner](https://bun.sh/guides/test/todo-tests)
|
||||
@@ -336,7 +351,8 @@ bun upgrade --canary
|
||||
- [Use snapshot testing in `bun test`](https://bun.sh/guides/test/snapshot)
|
||||
- [Write browser DOM tests with Bun and happy-dom](https://bun.sh/guides/test/happy-dom)
|
||||
|
||||
- Util
|
||||
- Util
|
||||
|
||||
- [Check if the current file is the entrypoint](https://bun.sh/guides/util/entrypoint)
|
||||
- [Check if two objects are deeply equal](https://bun.sh/guides/util/deep-equals)
|
||||
- [Compress and decompress data with DEFLATE](https://bun.sh/guides/util/deflate)
|
||||
@@ -355,13 +371,14 @@ bun upgrade --canary
|
||||
- [Hash a password](https://bun.sh/guides/util/hash-a-password)
|
||||
- [Sleep for a fixed number of milliseconds](https://bun.sh/guides/util/sleep)
|
||||
|
||||
- WebSocket
|
||||
- WebSocket
|
||||
|
||||
- [Build a publish-subscribe WebSocket server](https://bun.sh/guides/websocket/pubsub)
|
||||
- [Build a simple WebSocket server](https://bun.sh/guides/websocket/simple)
|
||||
- [Enable compression for WebSocket messages](https://bun.sh/guides/websocket/compression)
|
||||
- [Set per-socket contextual data on a WebSocket](https://bun.sh/guides/websocket/context)
|
||||
|
||||
- Write file
|
||||
- Write file
|
||||
- [Append content to a file](https://bun.sh/guides/write-file/append)
|
||||
- [Copy a file to another location](https://bun.sh/guides/write-file/file-cp)
|
||||
- [Delete a file](https://bun.sh/guides/write-file/unlink)
|
||||
|
||||
27
bench/deepEqual/map.js
Normal file
27
bench/deepEqual/map.js
Normal file
@@ -0,0 +1,27 @@
|
||||
import { bench, run } from "mitata";
|
||||
import { expect } from "bun:test";
|
||||
|
||||
const MAP_SIZE = 10_000;
|
||||
|
||||
function* genPairs(count) {
|
||||
for (let i = 0; i < MAP_SIZE; i++) {
|
||||
yield ["k" + i, "v" + i];
|
||||
}
|
||||
}
|
||||
|
||||
class CustomMap extends Map {
|
||||
abc = 123;
|
||||
constructor(iterable) {
|
||||
super(iterable);
|
||||
}
|
||||
}
|
||||
|
||||
const a = new Map(genPairs());
|
||||
const b = new Map(genPairs());
|
||||
bench("deepEqual Map", () => expect(a).toEqual(b));
|
||||
|
||||
const x = new CustomMap(genPairs());
|
||||
const y = new CustomMap(genPairs());
|
||||
bench("deepEqual CustomMap", () => expect(x).toEqual(y));
|
||||
|
||||
await run();
|
||||
27
bench/deepEqual/set.js
Normal file
27
bench/deepEqual/set.js
Normal file
@@ -0,0 +1,27 @@
|
||||
import { bench, run } from "mitata";
|
||||
import { expect } from "bun:test";
|
||||
|
||||
const SET_SIZE = 10_000;
|
||||
|
||||
function* genValues(count) {
|
||||
for (let i = 0; i < SET_SIZE; i++) {
|
||||
yield "v" + i;
|
||||
}
|
||||
}
|
||||
|
||||
class CustomSet extends Set {
|
||||
abc = 123;
|
||||
constructor(iterable) {
|
||||
super(iterable);
|
||||
}
|
||||
}
|
||||
|
||||
const a = new Set(genValues());
|
||||
const b = new Set(genValues());
|
||||
bench("deepEqual Set", () => expect(a).toEqual(b));
|
||||
|
||||
const x = new CustomSet(genValues());
|
||||
const y = new CustomSet(genValues());
|
||||
bench("deepEqual CustomSet", () => expect(x).toEqual(y));
|
||||
|
||||
await run();
|
||||
55
bench/snippets/text-decoder-stream.mjs
Normal file
55
bench/snippets/text-decoder-stream.mjs
Normal file
@@ -0,0 +1,55 @@
|
||||
import { bench, run } from "./runner.mjs";
|
||||
|
||||
const latin1 = `hello hello hello!!!! `.repeat(10240);
|
||||
|
||||
function create(src) {
|
||||
function split(str, chunkSize) {
|
||||
let chunkedHTML = [];
|
||||
let html = str;
|
||||
const encoder = new TextEncoder();
|
||||
while (html.length > 0) {
|
||||
chunkedHTML.push(encoder.encode(html.slice(0, chunkSize)));
|
||||
html = html.slice(chunkSize);
|
||||
}
|
||||
return chunkedHTML;
|
||||
}
|
||||
|
||||
async function runBench(chunks) {
|
||||
const decoder = new TextDecoderStream();
|
||||
const stream = new ReadableStream({
|
||||
pull(controller) {
|
||||
for (let chunk of chunks) {
|
||||
controller.enqueue(chunk);
|
||||
}
|
||||
controller.close();
|
||||
},
|
||||
}).pipeThrough(decoder);
|
||||
for (let reader = stream.getReader(); ; ) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if (new TextDecoder().decode(await runBench(oneKB)) !== src) {
|
||||
// throw new Error("Benchmark failed");
|
||||
// }
|
||||
const sizes = [16 * 1024, 64 * 1024, 256 * 1024];
|
||||
for (const chunkSize of sizes) {
|
||||
const text = split(src, chunkSize);
|
||||
bench(
|
||||
`${Math.round(src.length / 1024)} KB of text in ${Math.round(chunkSize / 1024) > 0 ? Math.round(chunkSize / 1024) : (chunkSize / 1024).toFixed(2)} KB chunks`,
|
||||
async () => {
|
||||
await runBench(text);
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
create(latin1);
|
||||
create(
|
||||
// bun's old readme was extremely long
|
||||
await fetch("https://web.archive.org/web/20230119110956/https://github.com/oven-sh/bun").then(res => res.text()),
|
||||
);
|
||||
|
||||
await run();
|
||||
49
bench/snippets/text-encoder-stream.mjs
Normal file
49
bench/snippets/text-encoder-stream.mjs
Normal file
@@ -0,0 +1,49 @@
|
||||
import { bench, run } from "./runner.mjs";
|
||||
|
||||
const latin1 = `hello hello hello!!!! `.repeat(10240);
|
||||
|
||||
function create(src) {
|
||||
function split(str, chunkSize) {
|
||||
let chunkedHTML = [];
|
||||
let html = str;
|
||||
while (html.length > 0) {
|
||||
chunkedHTML.push(html.slice(0, chunkSize));
|
||||
html = html.slice(chunkSize);
|
||||
}
|
||||
return chunkedHTML;
|
||||
}
|
||||
|
||||
async function runBench(chunks) {
|
||||
const encoderStream = new TextEncoderStream();
|
||||
const stream = new ReadableStream({
|
||||
pull(controller) {
|
||||
for (let chunk of chunks) {
|
||||
controller.enqueue(chunk);
|
||||
}
|
||||
controller.close();
|
||||
},
|
||||
}).pipeThrough(encoderStream);
|
||||
return await new Response(stream).bytes();
|
||||
}
|
||||
|
||||
// if (new TextDecoder().decode(await runBench(oneKB)) !== src) {
|
||||
// throw new Error("Benchmark failed");
|
||||
// }
|
||||
const sizes = [1024, 16 * 1024, 64 * 1024, 256 * 1024];
|
||||
for (const chunkSize of sizes) {
|
||||
const text = split(src, chunkSize);
|
||||
bench(
|
||||
`${Math.round(src.length / 1024)} KB of text in ${Math.round(chunkSize / 1024) > 0 ? Math.round(chunkSize / 1024) : (chunkSize / 1024).toFixed(2)} KB chunks`,
|
||||
async () => {
|
||||
await runBench(text);
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
create(latin1);
|
||||
create(
|
||||
// bun's old readme was extremely long
|
||||
await fetch("https://web.archive.org/web/20230119110956/https://github.com/oven-sh/bun").then(res => res.text()),
|
||||
);
|
||||
|
||||
await run();
|
||||
10
build.zig
10
build.zig
@@ -89,10 +89,8 @@ const BunBuildOptions = struct {
|
||||
|
||||
pub fn getOSVersionMin(os: OperatingSystem) ?Target.Query.OsVersion {
|
||||
return switch (os) {
|
||||
// bun needs macOS 12 to work properly due to icucore, but we have been
|
||||
// compiling everything with 11 as the minimum.
|
||||
.mac => .{
|
||||
.semver = .{ .major = 11, .minor = 0, .patch = 0 },
|
||||
.semver = .{ .major = 13, .minor = 0, .patch = 0 },
|
||||
},
|
||||
|
||||
// Windows 10 1809 is the minimum supported version
|
||||
@@ -453,6 +451,12 @@ fn addInternalPackages(b: *Build, obj: *Compile, opts: *BunBuildOptions) void {
|
||||
.root_source_file = .{ .cwd_relative = resolved_source_tag_path },
|
||||
});
|
||||
|
||||
const error_code_path = b.pathJoin(&.{ opts.generated_code_dir, "ErrorCode.zig" });
|
||||
validateGeneratedPath(error_code_path);
|
||||
obj.root_module.addAnonymousImport("ErrorCode", .{
|
||||
.root_source_file = .{ .cwd_relative = error_code_path },
|
||||
});
|
||||
|
||||
if (os == .windows) {
|
||||
obj.root_module.addAnonymousImport("bun_shim_impl.exe", .{
|
||||
.root_source_file = opts.windowsShim(b).exe.getEmittedBin(),
|
||||
|
||||
@@ -82,7 +82,7 @@ _bun_completions() {
|
||||
declare -A PACKAGE_OPTIONS;
|
||||
declare -A PM_OPTIONS;
|
||||
|
||||
local SUBCOMMANDS="dev bun create run install add remove upgrade completions discord help init pm x test repl update link unlink build";
|
||||
local SUBCOMMANDS="dev bun create run install add remove upgrade completions discord help init pm x test repl update outdated link unlink build";
|
||||
|
||||
GLOBAL_OPTIONS[LONG_OPTIONS]="--use --cwd --bunfile --server-bunfile --config --disable-react-fast-refresh --disable-hmr --env-file --extension-order --jsx-factory --jsx-fragment --extension-order --jsx-factory --jsx-fragment --jsx-import-source --jsx-production --jsx-runtime --main-fields --no-summary --version --platform --public-dir --tsconfig-override --define --external --help --inject --loader --origin --port --dump-environment-variables --dump-limits --disable-bun-js";
|
||||
GLOBAL_OPTIONS[SHORT_OPTIONS]="-c -v -d -e -h -i -l -u -p";
|
||||
|
||||
@@ -179,6 +179,7 @@ complete -c bun -n "__fish_use_subcommand" -a "remove" -d "Remove a dependency f
|
||||
complete -c bun -n "__fish_use_subcommand" -a "add" -d "Add a dependency to package.json" -f
|
||||
complete -c bun -n "__fish_use_subcommand" -a "init" -d "Initialize a Bun project in this directory" -f
|
||||
complete -c bun -n "__fish_use_subcommand" -a "link" -d "Register or link a local npm package" -f
|
||||
complete -c bun -n "__fish_use_subcommand" -a "link" -d "Unregister a local npm package" -f
|
||||
complete -c bun -n "__fish_use_subcommand" -a "unlink" -d "Unregister a local npm package" -f
|
||||
complete -c bun -n "__fish_use_subcommand" -a "pm" -d "Additional package management utilities" -f
|
||||
complete -c bun -n "__fish_use_subcommand" -a "x" -d "Execute a package binary, installing if needed" -f
|
||||
complete -c bun -n "__fish_use_subcommand" -a "outdated" -d "Display the latest versions of outdated dependencies" -f
|
||||
|
||||
@@ -563,6 +563,22 @@ _bun_update_completion() {
|
||||
esac
|
||||
}
|
||||
|
||||
_bun_outdated_completion() {
|
||||
_arguments -s -C \
|
||||
'--cwd[Set a specific cwd]:cwd' \
|
||||
'--verbose[Excessively verbose logging]' \
|
||||
'--no-progress[Disable the progress bar]' \
|
||||
'--help[Print this help menu]' &&
|
||||
ret=0
|
||||
|
||||
case $state in
|
||||
config)
|
||||
_bun_list_bunfig_toml
|
||||
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_bun_test_completion() {
|
||||
_arguments -s -C \
|
||||
'1: :->cmd1' \
|
||||
@@ -669,6 +685,7 @@ _bun() {
|
||||
'add\:"Add a dependency to package.json (bun a)" '
|
||||
'remove\:"Remove a dependency from package.json (bun rm)" '
|
||||
'update\:"Update outdated dependencies & save to package.json" '
|
||||
'outdated\:"Display the latest versions of outdated dependencies" '
|
||||
'link\:"Link an npm package globally" '
|
||||
'unlink\:"Globally unlink an npm package" '
|
||||
'pm\:"More commands for managing packages" '
|
||||
@@ -740,6 +757,10 @@ _bun() {
|
||||
update)
|
||||
_bun_update_completion
|
||||
|
||||
;;
|
||||
outdated)
|
||||
_bun_outdated_completion
|
||||
|
||||
;;
|
||||
'test')
|
||||
_bun_test_completion
|
||||
@@ -819,6 +840,10 @@ _bun() {
|
||||
update)
|
||||
_bun_update_completion
|
||||
|
||||
;;
|
||||
outdated)
|
||||
_bun_outdated_completion
|
||||
|
||||
;;
|
||||
'test')
|
||||
_bun_test_completion
|
||||
|
||||
@@ -219,6 +219,11 @@ The following classes are typed arrays, along with a description of how they int
|
||||
|
||||
---
|
||||
|
||||
- [`Float16Array`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Float16Array)
|
||||
- Every two (2) bytes are interpreted as a 16-bit floating point number. Range -6.104e5 to 6.55e4.
|
||||
|
||||
---
|
||||
|
||||
- [`Float32Array`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Float32Array)
|
||||
- Every four (4) bytes are interpreted as a 32-bit floating point number. Range -3.4e38 to 3.4e38.
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@ const response = await fetch("http://example.com", {
|
||||
});
|
||||
```
|
||||
|
||||
`body` can be a string, a `FormData` object, an `ArrayBuffer`, a `Blob`, and more. See the [MDN documentation](https://developer.mozilla.org/en-US/docs/Web/API/Body/body) for more information.
|
||||
`body` can be a string, a `FormData` object, an `ArrayBuffer`, a `Blob`, and more. See the [MDN documentation](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API/Using_Fetch#setting_a_body) for more information.
|
||||
|
||||
### Proxying requests
|
||||
|
||||
|
||||
129
docs/api/http.md
129
docs/api/http.md
@@ -70,6 +70,116 @@ const server = Bun.serve({
|
||||
});
|
||||
```
|
||||
|
||||
### Static routes
|
||||
|
||||
Use the `static` option to serve static `Response` objects by route.
|
||||
|
||||
```ts
|
||||
// Bun v1.1.27+ required
|
||||
Bun.serve({
|
||||
static: {
|
||||
// health-check endpoint
|
||||
"/api/health-check": new Response("All good!"),
|
||||
|
||||
// redirect from /old-link to /new-link
|
||||
"/old-link": Response.redirect("/new-link", 301),
|
||||
|
||||
// serve static text
|
||||
"/": new Response("Hello World"),
|
||||
|
||||
// server a file by buffering it in memory
|
||||
"/index.html": new Response(await Bun.file("./index.html").bytes(), {
|
||||
headers: {
|
||||
"Content-Type": "text/html",
|
||||
},
|
||||
}),
|
||||
"/favicon.ico": new Response(await Bun.file("./favicon.ico").bytes(), {
|
||||
headers: {
|
||||
"Content-Type": "image/x-icon",
|
||||
},
|
||||
}),
|
||||
|
||||
// serve JSON
|
||||
"/api/version.json": Response.json({ version: "1.0.0" }),
|
||||
},
|
||||
|
||||
fetch(req) {
|
||||
return new Response("404!");
|
||||
},
|
||||
});
|
||||
```
|
||||
|
||||
Static routes support headers, status code, and other `Response` options.
|
||||
|
||||
```ts
|
||||
Bun.serve({
|
||||
static: {
|
||||
"/api/time": new Response(new Date().toISOString(), {
|
||||
headers: {
|
||||
"X-Custom-Header": "Bun!",
|
||||
},
|
||||
}),
|
||||
},
|
||||
|
||||
fetch(req) {
|
||||
return new Response("404!");
|
||||
},
|
||||
});
|
||||
```
|
||||
|
||||
Static routes can serve Response bodies faster than `fetch` handlers because they don't create `Request` objects, they don't create `AbortSignal`, they don't create additional `Response` objects. The only per-request memory allocation is the TCP/TLS socket data needed for each request.
|
||||
|
||||
{% note %}
|
||||
`static` is experimental
|
||||
{% /note %}
|
||||
|
||||
Static route responses are cached for the lifetime of the server object. To reload static routes, call `server.reload(options)`.
|
||||
|
||||
```ts
|
||||
const server = Bun.serve({
|
||||
static: {
|
||||
"/api/time": new Response(new Date().toISOString()),
|
||||
},
|
||||
|
||||
fetch(req) {
|
||||
return new Response("404!");
|
||||
},
|
||||
});
|
||||
|
||||
// Update the time every second.
|
||||
setInterval(() => {
|
||||
server.reload({
|
||||
static: {
|
||||
"/api/time": new Response(new Date().toISOString()),
|
||||
},
|
||||
|
||||
fetch(req) {
|
||||
return new Response("404!");
|
||||
},
|
||||
});
|
||||
}, 1000);
|
||||
```
|
||||
|
||||
Reloading static routes only impact the next request. In-flight requests continue to use the old static routes. After in-flight requests to old static routes are finished, the old static routes are freed from memory.
|
||||
|
||||
To simplify error handling, static routes do not support streaming response bodies from `ReadableStream` or an `AsyncIterator`. Fortunately, you can still buffer the response in memory first:
|
||||
|
||||
```ts
|
||||
const time = await fetch("https://api.example.com/v1/data");
|
||||
// Buffer the response in memory first.
|
||||
const blob = await time.blob();
|
||||
|
||||
const server = Bun.serve({
|
||||
static: {
|
||||
"/api/data": new Response(blob),
|
||||
},
|
||||
|
||||
fetch(req) {
|
||||
return new Response("404!");
|
||||
},
|
||||
});
|
||||
```
|
||||
|
||||
### Changing the `port` and `hostname`
|
||||
|
||||
To configure which port and hostname the server will listen on, set `port` and `hostname` in the options object.
|
||||
@@ -326,7 +436,24 @@ Bun.serve({
|
||||
});
|
||||
```
|
||||
|
||||
## Object syntax
|
||||
## idleTimeout
|
||||
|
||||
To configure the idle timeout, set the `idleTimeout` field in Bun.serve.
|
||||
|
||||
```ts
|
||||
Bun.serve({
|
||||
// 10 seconds:
|
||||
idleTimeout: 10,
|
||||
|
||||
fetch(req) {
|
||||
return new Response("Bun!");
|
||||
},
|
||||
});
|
||||
```
|
||||
|
||||
This is the maximum amount of time a connection is allowed to be idle before the server closes it. A connection is idling if there is no data sent or received.
|
||||
|
||||
## export default syntax
|
||||
|
||||
Thus far, the examples on this page have used the explicit `Bun.serve` API. Bun also supports an alternate syntax.
|
||||
|
||||
|
||||
@@ -106,6 +106,31 @@ The `--minify` argument optimizes the size of the transpiled output code. If you
|
||||
|
||||
The `--sourcemap` argument embeds a sourcemap compressed with zstd, so that errors & stacktraces point to their original locations instead of the transpiled location. Bun will automatically decompress & resolve the sourcemap when an error occurs.
|
||||
|
||||
## Worker
|
||||
|
||||
To use workers in a standalone executable, add the worker's entrypoint to the CLI arguments:
|
||||
|
||||
```sh
|
||||
$ bun build --compile ./index.ts ./my-worker.ts --outfile myapp
|
||||
```
|
||||
|
||||
Then, reference the worker in your code:
|
||||
|
||||
```ts
|
||||
console.log("Hello from Bun!");
|
||||
|
||||
// Any of these will work:
|
||||
new Worker("./my-worker.ts");
|
||||
new Worker(new URL("./my-worker.ts", import.meta.url));
|
||||
new Worker(new URL("./my-worker.ts", import.meta.url).href);
|
||||
```
|
||||
|
||||
As of Bun v1.1.25, when you add multiple entrypoints to a standalone executable, they will be bundled separately into the executable.
|
||||
|
||||
In the future, we may automatically detect usages of statically-known paths in `new Worker(path)` and then bundle those into the executable, but for now, you'll need to add it to the shell command manually like the above example.
|
||||
|
||||
If you use a relative path to a file not included in the standalone executable, it will attempt to load that path from disk relative to the current working directory of the process (and then error if it doesn't exist).
|
||||
|
||||
## SQLite
|
||||
|
||||
You can use `bun:sqlite` imports with `bun build --compile`.
|
||||
@@ -179,6 +204,59 @@ console.log(addon.hello());
|
||||
|
||||
Unfortunately, if you're using `@mapbox/node-pre-gyp` or other similar tools, you'll need to make sure the `.node` file is directly required or it won't bundle correctly.
|
||||
|
||||
### Embed directories
|
||||
|
||||
To embed a directory with `bun build --compile`, use a shell glob in your `bun build` command:
|
||||
|
||||
```sh
|
||||
$ bun build --compile ./index.ts ./public/**/*.png
|
||||
```
|
||||
|
||||
Then, you can reference the files in your code:
|
||||
|
||||
```ts
|
||||
import icon from "./public/assets/icon.png" with { type: "file" };
|
||||
import { file } from "bun";
|
||||
|
||||
export default {
|
||||
fetch(req) {
|
||||
// Embedded files can be streamed from Response objects
|
||||
return new Response(file(icon));
|
||||
},
|
||||
};
|
||||
```
|
||||
|
||||
This is honestly a workaround, and we expect to improve this in the future with a more direct API.
|
||||
|
||||
### Listing embedded files
|
||||
|
||||
To get a list of all embedded files, use `Bun.embeddedFiles`:
|
||||
|
||||
```js
|
||||
import "./icon.png" with { type: "file" };
|
||||
import { embeddedFiles } from "bun";
|
||||
|
||||
console.log(embeddedFiles[0].name); // `icon-${hash}.png`
|
||||
```
|
||||
|
||||
`Bun.embeddedFiles` returns an array of `Blob` objects which you can use to get the size, contents, and other properties of the files.
|
||||
|
||||
```ts
|
||||
embeddedFiles: Blob[]
|
||||
```
|
||||
|
||||
The list of embedded files excludes bundled source code like `.ts` and `.js` files.
|
||||
|
||||
#### Content hash
|
||||
|
||||
By default, embedded files have a content hash appended to their name. This is useful for situations where you want to serve the file from a URL or CDN and have fewer cache invalidation issues. But sometimes, this is unexpected and you might want the original name instead:
|
||||
|
||||
To disable the content hash, pass `--asset-naming` to `bun build --compile` like this:
|
||||
|
||||
```sh
|
||||
$ bun build --compile --asset-naming="[name].[ext]" ./index.ts
|
||||
```
|
||||
|
||||
## Minification
|
||||
|
||||
To trim down the size of the executable a little, pass `--minify` to `bun build --compile`. This uses Bun's minifier to reduce the code size. Overall though, Bun's binary is still way too big and we need to make it smaller.
|
||||
|
||||
@@ -1276,7 +1276,7 @@ interface BuildOptions {
|
||||
loader?: { [k in string]: Loader }; // See https://bun.sh/docs/bundler/loaders
|
||||
manifest?: boolean; // false
|
||||
external?: string[]; // []
|
||||
sourcemap?: "none" | "inline" | "linked" | "external" | boolean; // "none"
|
||||
sourcemap?: "none" | "inline" | "linked" | "external" | "linked" | boolean; // "none"
|
||||
root?: string; // computed from entrypoints
|
||||
naming?:
|
||||
| string
|
||||
|
||||
@@ -208,8 +208,7 @@ In Bun's CLI, simple boolean flags like `--minify` do not accept an argument. Ot
|
||||
---
|
||||
|
||||
- `--ignore-annotations`
|
||||
- n/a
|
||||
- Not supported
|
||||
- `--ignore-dce-annotations`
|
||||
|
||||
---
|
||||
|
||||
|
||||
61
docs/cli/outdated.md
Normal file
61
docs/cli/outdated.md
Normal file
@@ -0,0 +1,61 @@
|
||||
Use `bun outdated` to display a table of outdated dependencies with their latest versions for the current workspace:
|
||||
|
||||
```sh
|
||||
$ bun outdated
|
||||
|
||||
|--------------------------------------------------------------------|
|
||||
| Package | Current | Update | Latest |
|
||||
|----------------------------------------|---------|--------|--------|
|
||||
| @types/bun (dev) | 1.1.6 | 1.1.7 | 1.1.7 |
|
||||
|----------------------------------------|---------|--------|--------|
|
||||
| @types/react (dev) | 18.3.3 | 18.3.4 | 18.3.4 |
|
||||
|----------------------------------------|---------|--------|--------|
|
||||
| @typescript-eslint/eslint-plugin (dev) | 7.16.1 | 7.18.0 | 8.2.0 |
|
||||
|----------------------------------------|---------|--------|--------|
|
||||
| @typescript-eslint/parser (dev) | 7.16.1 | 7.18.0 | 8.2.0 |
|
||||
|----------------------------------------|---------|--------|--------|
|
||||
| esbuild (dev) | 0.21.5 | 0.21.5 | 0.23.1 |
|
||||
|----------------------------------------|---------|--------|--------|
|
||||
| eslint (dev) | 9.7.0 | 9.9.1 | 9.9.1 |
|
||||
|----------------------------------------|---------|--------|--------|
|
||||
| typescript (dev) | 5.5.3 | 5.5.4 | 5.5.4 |
|
||||
|--------------------------------------------------------------------|
|
||||
```
|
||||
|
||||
The `Update` column shows the version that would be installed if you ran `bun update [package]`. This version is the latest version that satisfies the version range specified in your `package.json`.
|
||||
|
||||
The `Latest` column shows the latest version available from the registry. `bun update --latest [package]` will update to this version.
|
||||
|
||||
Dependency names can be provided to filter the output (pattern matching is supported):
|
||||
|
||||
```sh
|
||||
$ bun outdated "@types/*"
|
||||
|
||||
|------------------------------------------------|
|
||||
| Package | Current | Update | Latest |
|
||||
|--------------------|---------|--------|--------|
|
||||
| @types/bun (dev) | 1.1.6 | 1.1.8 | 1.1.8 |
|
||||
|--------------------|---------|--------|--------|
|
||||
| @types/react (dev) | 18.3.3 | 18.3.4 | 18.3.4 |
|
||||
|------------------------------------------------|
|
||||
```
|
||||
|
||||
## `--filter`
|
||||
|
||||
The `--filter` flag can be used to select workspaces to include in the output. Workspace names or paths can be used as patterns.
|
||||
|
||||
```sh
|
||||
$ bun outdated --filter <pattern>
|
||||
```
|
||||
|
||||
For example, to only show outdated dependencies for workspaces in the `./apps` directory:
|
||||
|
||||
```sh
|
||||
$ bun outdated --filter './apps/*'
|
||||
```
|
||||
|
||||
If you want to do the same, but exclude the `./apps/api` workspace:
|
||||
|
||||
```sh
|
||||
$ bun outdated --filter './apps/*' --filter '!./apps/api'
|
||||
```
|
||||
@@ -10,7 +10,7 @@ This automatically load balances incoming requests across multiple instances of
|
||||
```ts#server.ts
|
||||
import { serve } from "bun";
|
||||
|
||||
const id = = Math.random().toString(36).slice(2);
|
||||
const id = Math.random().toString(36).slice(2);
|
||||
|
||||
serve({
|
||||
port: process.env.PORT || 8080,
|
||||
|
||||
@@ -5,8 +5,8 @@ name: Define and replace static globals & constants
|
||||
The `--define` flag lets you declare statically-analyzable constants and globals. It replace all usages of an identifier or property in a JavaScript or TypeScript file with a constant value. This feature is supported at runtime and also in `bun build`. This is sort of similar to `#define` in C/C++, except for JavaScript.
|
||||
|
||||
```ts
|
||||
bun --define:process.env.NODE_ENV="'production'" src/index.ts # Runtime
|
||||
bun build --define:process.env.NODE_ENV="'production'" src/index.ts # Build
|
||||
bun --define process.env.NODE_ENV="'production'" src/index.ts # Runtime
|
||||
bun build --define process.env.NODE_ENV="'production'" src/index.ts # Build
|
||||
```
|
||||
|
||||
---
|
||||
@@ -95,7 +95,7 @@ To replace all usages of `AWS` with the JSON object `{"ACCESS_KEY":"abc","SECRET
|
||||
|
||||
```sh
|
||||
# JSON
|
||||
bun --define:AWS='{"ACCESS_KEY":"abc","SECRET_KEY":"def"}' src/index.ts
|
||||
bun --define AWS='{"ACCESS_KEY":"abc","SECRET_KEY":"def"}' src/index.ts
|
||||
```
|
||||
|
||||
Those will be transformed into the equivalent JavaScript code.
|
||||
@@ -119,7 +119,7 @@ You can also pass properties to the `--define` flag.
|
||||
For example, to replace all usages of `console.write` with `console.log`, you can use the following command (requires Bun v1.1.5 or later)
|
||||
|
||||
```sh
|
||||
bun --define:console.write=console.log src/index.ts
|
||||
bun --define console.write=console.log src/index.ts
|
||||
```
|
||||
|
||||
That transforms the following input:
|
||||
|
||||
@@ -27,6 +27,28 @@ data.version; // => "1.0.0"
|
||||
data.author.name; // => "John Dough"
|
||||
```
|
||||
|
||||
Bun also supports [Import Attributes](https://github.com/tc39/proposal-import-attributes/) and [JSON modules](https://github.com/tc39/proposal-json-modules) syntax.
|
||||
|
||||
```ts
|
||||
import data from "./package.json" with { type: "json" };
|
||||
|
||||
data.name; // => "bun"
|
||||
data.version; // => "1.0.0"
|
||||
data.author.name; // => "John Dough"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
Bun also supports [Import Attributes](https://github.com/tc39/proposal-import-attributes/) and [JSON modules](https://github.com/tc39/proposal-json-modules) syntax.
|
||||
|
||||
```ts
|
||||
import data from "./package.json" with { type: "json" };
|
||||
|
||||
data.name; // => "bun"
|
||||
data.version; // => "1.0.0"
|
||||
data.author.name; // => "John Dough"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
See [Docs > Runtime > TypeScript](/docs/runtime/typescript) for more information on using TypeScript with Bun.
|
||||
|
||||
@@ -16,10 +16,6 @@ $ bun test # run tests
|
||||
$ bunx cowsay 'Hello, world!' # execute a package
|
||||
```
|
||||
|
||||
{% callout type="note" %}
|
||||
**Bun is still under development.** Use it to speed up your development workflows or run simpler production code in resource-constrained environments like serverless functions. We're working on more complete Node.js compatibility and integration with existing frameworks. Join the [Discord](https://bun.sh/discord) and watch the [GitHub repository](https://github.com/oven-sh/bun) to keep tabs on future releases.
|
||||
{% /callout %}
|
||||
|
||||
Get started with one of the quick links below, or read on to learn more about Bun.
|
||||
|
||||
{% block className="gap-2 grid grid-flow-row grid-cols-1 md:grid-cols-2" %}
|
||||
|
||||
@@ -164,6 +164,9 @@ export default {
|
||||
page("cli/update", "`bun update`", {
|
||||
description: "Update your project's dependencies.",
|
||||
}),
|
||||
page("cli/outdated", "`bun outdated`", {
|
||||
description: "Check for outdated dependencies.",
|
||||
}),
|
||||
page("cli/link", "`bun link`", {
|
||||
description: "Install local packages as dependencies in your project.",
|
||||
}),
|
||||
|
||||
@@ -171,6 +171,8 @@ Once imported, you should see something like this:
|
||||
|
||||
{% image alt="Viewing heap snapshot in Safari" src="https://user-images.githubusercontent.com/709451/204429337-b0d8935f-3509-4071-b991-217794d1fb27.png" caption="Viewing heap snapshot in Safari Dev Tools" /%}
|
||||
|
||||
> The [web debugger](https://bun.sh/docs/runtime/debugger#inspect) also offers the timeline feature which allows you to track and examine the memory usage of the running debug session.
|
||||
|
||||
### Native heap stats
|
||||
|
||||
Bun uses mimalloc for the other heap. To report a summary of non-JavaScript memory usage, set the `MIMALLOC_SHOW_STATS=1` environment variable. and stats will print on exit.
|
||||
|
||||
@@ -22,7 +22,7 @@ This page is updated regularly to reflect compatibility status of the latest ver
|
||||
|
||||
### [`node:cluster`](https://nodejs.org/api/cluster.html)
|
||||
|
||||
🔴 Not implemented.
|
||||
🟡 Handles and file descriptors cannot be passed between workers, which means load-balancing HTTP requests across processes is only supported on Linux at this time (via `SO_REUSEPORT`). Otherwise, implemented but not battle-tested.
|
||||
|
||||
### [`node:console`](https://nodejs.org/api/console.html)
|
||||
|
||||
@@ -341,7 +341,7 @@ The table below lists all globals implemented by Node.js and Bun's current compa
|
||||
|
||||
### [`process`](https://nodejs.org/api/process.html)
|
||||
|
||||
🟡 Missing `domain` `initgroups` `setegid` `seteuid` `setgid` `setgroups` `setuid` `allowedNodeEnvironmentFlags` `getActiveResourcesInfo` `setActiveResourcesInfo` `moduleLoadList` `setSourceMapsEnabled` `channel`. `process.binding` is partially implemented.
|
||||
🟡 Missing `domain` `initgroups` `setegid` `seteuid` `setgid` `setgroups` `setuid` `allowedNodeEnvironmentFlags` `getActiveResourcesInfo` `setActiveResourcesInfo` `moduleLoadList` `setSourceMapsEnabled`. `process.binding` is partially implemented.
|
||||
|
||||
### [`queueMicrotask()`](https://developer.mozilla.org/en-US/docs/Web/API/queueMicrotask)
|
||||
|
||||
@@ -413,7 +413,7 @@ The table below lists all globals implemented by Node.js and Bun's current compa
|
||||
|
||||
### [`TextDecoderStream`](https://developer.mozilla.org/en-US/docs/Web/API/TextDecoderStream)
|
||||
|
||||
🔴 Not implemented.
|
||||
🟢 Fully implemented.
|
||||
|
||||
### [`TextEncoder`](https://developer.mozilla.org/en-US/docs/Web/API/TextEncoder)
|
||||
|
||||
@@ -421,7 +421,7 @@ The table below lists all globals implemented by Node.js and Bun's current compa
|
||||
|
||||
### [`TextEncoderStream`](https://developer.mozilla.org/en-US/docs/Web/API/TextEncoderStream)
|
||||
|
||||
🔴 Not implemented.
|
||||
🟢 Fully implemented.
|
||||
|
||||
### [`TransformStream`](https://developer.mozilla.org/en-US/docs/Web/API/TransformStream)
|
||||
|
||||
|
||||
@@ -235,6 +235,55 @@ const result = await $`cat < ${response} | wc -w`.text();
|
||||
console.log(result); // 6\n
|
||||
```
|
||||
|
||||
## Command substitution (`$(...)`)
|
||||
|
||||
Command substitution allows you to substitute the output of another script into the current script:
|
||||
|
||||
```js
|
||||
import { $ } from "bun";
|
||||
|
||||
// Prints out the hash of the current commit
|
||||
await $`echo Hash of current commit: $(git rev-parse HEAD)`;
|
||||
```
|
||||
|
||||
This is a textual insertion of the command's output and can be used to, for example, declare a shell variable:
|
||||
|
||||
```js
|
||||
import { $ } from "bun";
|
||||
|
||||
await $`
|
||||
REV=$(git rev-parse HEAD)
|
||||
docker built -t myapp:$REV
|
||||
echo Done building docker image "myapp:$REV"
|
||||
`;
|
||||
```
|
||||
|
||||
{% callout %}
|
||||
|
||||
**NOTE**: Because Bun internally uses the special [`raw`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Template_literals#raw_strings) property on the input template literal, using the backtick syntax for command substitution won't work:
|
||||
|
||||
```js
|
||||
import { $ } from "bun";
|
||||
|
||||
await $`echo \`echo hi\``;
|
||||
```
|
||||
|
||||
Instead of printing:
|
||||
|
||||
```
|
||||
hi
|
||||
```
|
||||
|
||||
The above will print out:
|
||||
|
||||
```
|
||||
echo hi
|
||||
```
|
||||
|
||||
We instead recommend sticking to the `$(...)` syntax.
|
||||
|
||||
{% /callout %}
|
||||
|
||||
## Environment variables
|
||||
|
||||
Environment variables can be set like in bash:
|
||||
|
||||
@@ -27,7 +27,8 @@ The following Web APIs are partially or completely supported.
|
||||
[`self.postMessage`](https://developer.mozilla.org/en-US/docs/Web/API/DedicatedWorkerGlobalScope/postMessage)
|
||||
[`structuredClone`](https://developer.mozilla.org/en-US/docs/Web/API/structuredClone)
|
||||
[`MessagePort`](https://developer.mozilla.org/en-US/docs/Web/API/MessagePort)
|
||||
[`MessageChannel`](https://developer.mozilla.org/en-US/docs/Web/API/MessageChannel), [`BroadcastChannel`](https://developer.mozilla.org/en-US/docs/Web/API/BroadcastChannel).
|
||||
[`MessageChannel`](https://developer.mozilla.org/en-US/docs/Web/API/MessageChannel)
|
||||
[`BroadcastChannel`](https://developer.mozilla.org/en-US/docs/Web/API/BroadcastChannel).
|
||||
|
||||
---
|
||||
|
||||
@@ -70,7 +71,8 @@ The following Web APIs are partially or completely supported.
|
||||
---
|
||||
|
||||
- Intervals
|
||||
- [`setInterval`](https://developer.mozilla.org/en-US/docs/Web/API/setInterval)[`clearInterval`](https://developer.mozilla.org/en-US/docs/Web/API/clearInterval)
|
||||
- [`setInterval`](https://developer.mozilla.org/en-US/docs/Web/API/setInterval)
|
||||
[`clearInterval`](https://developer.mozilla.org/en-US/docs/Web/API/clearInterval)
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -29,7 +29,7 @@
|
||||
"bump": "bun ./scripts/bump.ts",
|
||||
"build": "if [ ! -e build ]; then bun setup; fi && ninja -C build",
|
||||
"build:valgrind": "cmake . -DZIG_OPTIMIZE=Debug -DUSE_DEBUG_JSC=ON -DCMAKE_BUILD_TYPE=Debug -GNinja -Bbuild-valgrind && ninja -Cbuild-valgrind",
|
||||
"build:tidy": "BUN_SILENT=1 cmake --log-level=WARNING . -DZIG_OPTIMIZE=Debug -DUSE_DEBUG_JSC=ON -DBUN_TIDY_ONLY=ON -DCMAKE_BUILD_TYPE=Debug -GNinja -Bbuild-tidy >> ${GITHUB_STEP_SUMMARY:-/dev/stdout} && BUN_SILENT=1 ninja -Cbuild-tidy >> ${GITHUB_STEP_SUMMARY:-/dev/stdout}",
|
||||
"build:tidy": "bash ./scripts/env.sh && BUN_SILENT=1 cmake --log-level=WARNING . ${CMAKE_FLAGS[@]} -DZIG_OPTIMIZE=Debug -DUSE_DEBUG_JSC=ON -DBUN_TIDY_ONLY=ON -DCMAKE_BUILD_TYPE=Debug -GNinja -Bbuild-tidy && BUN_SILENT=1 ninja -Cbuild-tidy",
|
||||
"build:tidy-extra": "cmake . -DZIG_OPTIMIZE=Debug -DUSE_DEBUG_JSC=ON -DBUN_TIDY_ONLY_EXTRA=ON -DCMAKE_BUILD_TYPE=Debug -GNinja -Bbuild-tidy && ninja -Cbuild-tidy",
|
||||
"build:release": "cmake . -DCMAKE_BUILD_TYPE=Release -GNinja -Bbuild-release && ninja -Cbuild-release",
|
||||
"build:release:local": "cmake . -DCMAKE_BUILD_TYPE=Release -DWEBKIT_DIR=$(pwd)/src/bun.js/WebKit/WebKitBuild/Release -GNinja -Bbuild-release-local && ninja -Cbuild-release-local",
|
||||
@@ -37,6 +37,7 @@
|
||||
"build:debug-zig-release": "cmake . -DCMAKE_BUILD_TYPE=Release -DZIG_OPTIMIZE=Debug -GNinja -Bbuild-debug-zig-release && ninja -Cbuild-debug-zig-release",
|
||||
"build:safe": "cmake . -DZIG_OPTIMIZE=ReleaseSafe -DUSE_DEBUG_JSC=ON -DCMAKE_BUILD_TYPE=Release -GNinja -Bbuild-safe && ninja -Cbuild-safe",
|
||||
"build:windows": "cmake -B build -S . -G Ninja -DCMAKE_BUILD_TYPE=Debug && ninja -Cbuild",
|
||||
"build:windows:release": "cmake -B build-release -S . -G Ninja -DCMAKE_BUILD_TYPE=Release && ninja -Cbuild-release",
|
||||
"typecheck": "tsc --noEmit && cd test && bun run typecheck",
|
||||
"fmt": "prettier --write --cache './{.vscode,src,test,bench,packages/{bun-types,bun-inspector-*,bun-vscode,bun-debug-adapter-protocol}}/**/*.{mjs,ts,tsx,js,jsx}'",
|
||||
"fmt:zig": "zig fmt src/*.zig src/*/*.zig src/*/*/*.zig src/*/*/*/*.zig",
|
||||
|
||||
@@ -13,5 +13,6 @@
|
||||
"std.StringArrayHashMap(": "bun.StringArrayHashMap has a faster `eql`",
|
||||
"std.StringHashMapUnmanaged(": "bun.StringHashMapUnmanaged has a faster `eql`",
|
||||
"std.StringHashMap(": "bun.StringHashMaphas a faster `eql`",
|
||||
"std.enums.tagName(": "Use bun.tagName instead",
|
||||
"": ""
|
||||
}
|
||||
|
||||
@@ -239,7 +239,7 @@ Starting "${testFileName}"
|
||||
GITHUB_ACTIONS: process.env.GITHUB_ACTIONS ?? "true",
|
||||
BUN_DEBUG_QUIET_LOGS: "1",
|
||||
BUN_INSTALL_CACHE_DIR: join(TMPDIR, ".bun-install-cache"),
|
||||
BUN_ENABLE_CRASH_REPORTING: "1",
|
||||
BUN_ENABLE_CRASH_REPORTING: "0",
|
||||
[windows ? "TEMP" : "TMPDIR"]: TMPDIR,
|
||||
},
|
||||
});
|
||||
|
||||
@@ -173,6 +173,14 @@ function publishModule(name: string, dryRun?: boolean): void {
|
||||
);
|
||||
error(stderr || stdout);
|
||||
if (exitCode !== 0) {
|
||||
if (
|
||||
stdout.includes("You cannot publish over the previously published version") ||
|
||||
stderr.includes("You cannot publish over the previously published version")
|
||||
) {
|
||||
console.warn("Ignoring npm publish error:", stdout, stderr);
|
||||
return;
|
||||
}
|
||||
|
||||
throw new Error("npm publish failed with code " + exitCode);
|
||||
}
|
||||
} else {
|
||||
|
||||
57
packages/bun-types/bun.d.ts
vendored
57
packages/bun-types/bun.d.ts
vendored
@@ -1521,7 +1521,7 @@ declare module "bun" {
|
||||
define?: Record<string, string>;
|
||||
// origin?: string; // e.g. http://mydomain.com
|
||||
loader?: { [k in string]: Loader };
|
||||
sourcemap?: "none" | "linked" | "inline" | "external"; // default: "none", true -> "inline"
|
||||
sourcemap?: "none" | "linked" | "inline" | "external" | "linked"; // default: "none", true -> "inline"
|
||||
/**
|
||||
* package.json `exports` conditions used when resolving imports
|
||||
*
|
||||
@@ -1537,6 +1537,16 @@ declare module "bun" {
|
||||
syntax?: boolean;
|
||||
identifiers?: boolean;
|
||||
};
|
||||
/**
|
||||
* Ignore dead code elimination/tree-shaking annotations such as @__PURE__ and package.json
|
||||
* "sideEffects" fields. This should only be used as a temporary workaround for incorrect
|
||||
* annotations in libraries.
|
||||
*/
|
||||
ignoreDCEAnnotations?: boolean;
|
||||
/**
|
||||
* Force emitting @__PURE__ annotations even if minify.whitespace is true.
|
||||
*/
|
||||
emitDCEAnnotations?: boolean;
|
||||
// treeshaking?: boolean;
|
||||
|
||||
// jsx?:
|
||||
@@ -2288,6 +2298,26 @@ declare module "bun" {
|
||||
* This string will currently do nothing. But in the future it could be useful for logs or metrics.
|
||||
*/
|
||||
id?: string | null;
|
||||
|
||||
/**
|
||||
* Server static Response objects by route.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* Bun.serve({
|
||||
* static: {
|
||||
* "/": new Response("Hello World"),
|
||||
* "/about": new Response("About"),
|
||||
* },
|
||||
* fetch(req) {
|
||||
* return new Response("Fallback response");
|
||||
* },
|
||||
* });
|
||||
* ```
|
||||
*
|
||||
* @experimental
|
||||
*/
|
||||
static?: Record<`/${string}`, Response>;
|
||||
}
|
||||
|
||||
interface ServeOptions extends GenericServeOptions {
|
||||
@@ -2332,6 +2362,14 @@ declare module "bun" {
|
||||
*/
|
||||
unix?: never;
|
||||
|
||||
/**
|
||||
* Sets the the number of seconds to wait before timing out a connection
|
||||
* due to inactivity.
|
||||
*
|
||||
* Default is `10` seconds.
|
||||
*/
|
||||
idleTimeout?: number;
|
||||
|
||||
/**
|
||||
* Handle HTTP requests
|
||||
*
|
||||
@@ -2749,6 +2787,16 @@ declare module "bun" {
|
||||
compress?: boolean,
|
||||
): ServerWebSocketSendStatus;
|
||||
|
||||
/**
|
||||
* A count of connections subscribed to a given topic
|
||||
*
|
||||
* This operation will loop through each topic internally to get the count.
|
||||
*
|
||||
* @param topic the websocket topic to check how many subscribers are connected to
|
||||
* @returns the number of subscribers
|
||||
*/
|
||||
subscriberCount(topic: string): number;
|
||||
|
||||
/**
|
||||
* Returns the client IP address and port of the given Request. If the request was closed or is a unix socket, returns null.
|
||||
*
|
||||
@@ -2850,6 +2898,13 @@ declare module "bun" {
|
||||
// tslint:disable-next-line:unified-signatures
|
||||
function file(path: string | URL, options?: BlobPropertyBag): BunFile;
|
||||
|
||||
/**
|
||||
* A list of files embedded into the standalone executable. Lexigraphically sorted by name.
|
||||
*
|
||||
* If the process is not a standalone executable, this returns an empty array.
|
||||
*/
|
||||
const embeddedFiles: ReadonlyArray<Blob>;
|
||||
|
||||
/**
|
||||
* `Blob` that leverages the fastest system calls available to operate on files.
|
||||
*
|
||||
|
||||
5
packages/bun-types/globals.d.ts
vendored
5
packages/bun-types/globals.d.ts
vendored
@@ -955,6 +955,7 @@ declare global {
|
||||
ref(): Timer;
|
||||
unref(): Timer;
|
||||
hasRef(): boolean;
|
||||
refresh(): Timer
|
||||
|
||||
[Symbol.toPrimitive](): number;
|
||||
}
|
||||
@@ -1917,6 +1918,10 @@ declare global {
|
||||
* closely to the `BodyMixin` API.
|
||||
*/
|
||||
formData(): Promise<FormData>;
|
||||
/**
|
||||
* Returns a promise that resolves to the contents of the blob as a Uint8Array (array of bytes) its the same as `new Uint8Array(await blob.arrayBuffer())`
|
||||
*/
|
||||
bytes(): Promise<Uint8Array>;
|
||||
}
|
||||
var Blob: typeof globalThis extends {
|
||||
onerror: any;
|
||||
|
||||
16
packages/bun-types/sqlite.d.ts
vendored
16
packages/bun-types/sqlite.d.ts
vendored
@@ -536,7 +536,7 @@ declare module "bun:sqlite" {
|
||||
* // => [{bar: "baz"}]
|
||||
*
|
||||
* stmt.all();
|
||||
* // => [{bar: "baz"}]
|
||||
* // => []
|
||||
*
|
||||
* stmt.all("foo");
|
||||
* // => [{bar: "foo"}]
|
||||
@@ -555,14 +555,14 @@ declare module "bun:sqlite" {
|
||||
* ```ts
|
||||
* const stmt = db.prepare("SELECT * FROM foo WHERE bar = ?");
|
||||
*
|
||||
* stmt.all("baz");
|
||||
* // => [{bar: "baz"}]
|
||||
* stmt.get("baz");
|
||||
* // => {bar: "baz"}
|
||||
*
|
||||
* stmt.all();
|
||||
* // => [{bar: "baz"}]
|
||||
* stmt.get();
|
||||
* // => null
|
||||
*
|
||||
* stmt.all("foo");
|
||||
* // => [{bar: "foo"}]
|
||||
* stmt.get("foo");
|
||||
* // => {bar: "foo"}
|
||||
* ```
|
||||
*
|
||||
* The following types can be used when binding parameters:
|
||||
@@ -747,7 +747,7 @@ declare module "bun:sqlite" {
|
||||
* query.as(User);
|
||||
* const user = query.get();
|
||||
* console.log(user.birthdate);
|
||||
* // => Date(1995, 11, 19)
|
||||
* // => Date(1995, 12, 19)
|
||||
* ```
|
||||
*/
|
||||
as<T = unknown>(Class: new (...args: any[]) => T): Statement<T, ParamsType>;
|
||||
|
||||
2
packages/bun-types/test.d.ts
vendored
2
packages/bun-types/test.d.ts
vendored
@@ -1250,7 +1250,7 @@ declare module "bun:test" {
|
||||
* - If expected is a `string` or `RegExp`, it will check the `message` property.
|
||||
* - If expected is an `Error` object, it will check the `name` and `message` properties.
|
||||
* - If expected is an `Error` constructor, it will check the class of the `Error`.
|
||||
* - If expected is not provided, it will check if anything as thrown.
|
||||
* - If expected is not provided, it will check if anything has thrown.
|
||||
*
|
||||
* @example
|
||||
* function fail() {
|
||||
|
||||
@@ -25359,3 +25359,127 @@ CKA_TRUST_SERVER_AUTH CK_TRUST CKT_NSS_TRUSTED_DELEGATOR
|
||||
CKA_TRUST_EMAIL_PROTECTION CK_TRUST CKT_NSS_MUST_VERIFY_TRUST
|
||||
CKA_TRUST_CODE_SIGNING CK_TRUST CKT_NSS_MUST_VERIFY_TRUST
|
||||
CKA_TRUST_STEP_UP_APPROVED CK_BBOOL CK_FALSE
|
||||
|
||||
#
|
||||
# Certificate "FIRMAPROFESIONAL CA ROOT-A WEB"
|
||||
#
|
||||
# Issuer: CN=FIRMAPROFESIONAL CA ROOT-A WEB,OID.2.5.4.97=VATES-A62634068,O=Firmaprofesional SA,C=ES
|
||||
# Serial Number:31:97:21:ed:af:89:42:7f:35:41:87:a1:67:56:4c:6d
|
||||
# Subject: CN=FIRMAPROFESIONAL CA ROOT-A WEB,OID.2.5.4.97=VATES-A62634068,O=Firmaprofesional SA,C=ES
|
||||
# Not Valid Before: Wed Apr 06 09:01:36 2022
|
||||
# Not Valid After : Sun Mar 31 09:01:36 2047
|
||||
# Fingerprint (SHA-256): BE:F2:56:DA:F2:6E:9C:69:BD:EC:16:02:35:97:98:F3:CA:F7:18:21:A0:3E:01:82:57:C5:3C:65:61:7F:3D:4A
|
||||
# Fingerprint (SHA1): A8:31:11:74:A6:14:15:0D:CA:77:DD:0E:E4:0C:5D:58:FC:A0:72:A5
|
||||
CKA_CLASS CK_OBJECT_CLASS CKO_CERTIFICATE
|
||||
CKA_TOKEN CK_BBOOL CK_TRUE
|
||||
CKA_PRIVATE CK_BBOOL CK_FALSE
|
||||
CKA_MODIFIABLE CK_BBOOL CK_FALSE
|
||||
CKA_LABEL UTF8 "FIRMAPROFESIONAL CA ROOT-A WEB"
|
||||
CKA_CERTIFICATE_TYPE CK_CERTIFICATE_TYPE CKC_X_509
|
||||
CKA_SUBJECT MULTILINE_OCTAL
|
||||
\060\156\061\013\060\011\006\003\125\004\006\023\002\105\123\061
|
||||
\034\060\032\006\003\125\004\012\014\023\106\151\162\155\141\160
|
||||
\162\157\146\145\163\151\157\156\141\154\040\123\101\061\030\060
|
||||
\026\006\003\125\004\141\014\017\126\101\124\105\123\055\101\066
|
||||
\062\066\063\064\060\066\070\061\047\060\045\006\003\125\004\003
|
||||
\014\036\106\111\122\115\101\120\122\117\106\105\123\111\117\116
|
||||
\101\114\040\103\101\040\122\117\117\124\055\101\040\127\105\102
|
||||
END
|
||||
CKA_ID UTF8 "0"
|
||||
CKA_ISSUER MULTILINE_OCTAL
|
||||
\060\156\061\013\060\011\006\003\125\004\006\023\002\105\123\061
|
||||
\034\060\032\006\003\125\004\012\014\023\106\151\162\155\141\160
|
||||
\162\157\146\145\163\151\157\156\141\154\040\123\101\061\030\060
|
||||
\026\006\003\125\004\141\014\017\126\101\124\105\123\055\101\066
|
||||
\062\066\063\064\060\066\070\061\047\060\045\006\003\125\004\003
|
||||
\014\036\106\111\122\115\101\120\122\117\106\105\123\111\117\116
|
||||
\101\114\040\103\101\040\122\117\117\124\055\101\040\127\105\102
|
||||
END
|
||||
CKA_SERIAL_NUMBER MULTILINE_OCTAL
|
||||
\002\020\061\227\041\355\257\211\102\177\065\101\207\241\147\126
|
||||
\114\155
|
||||
END
|
||||
CKA_VALUE MULTILINE_OCTAL
|
||||
\060\202\002\172\060\202\002\000\240\003\002\001\002\002\020\061
|
||||
\227\041\355\257\211\102\177\065\101\207\241\147\126\114\155\060
|
||||
\012\006\010\052\206\110\316\075\004\003\003\060\156\061\013\060
|
||||
\011\006\003\125\004\006\023\002\105\123\061\034\060\032\006\003
|
||||
\125\004\012\014\023\106\151\162\155\141\160\162\157\146\145\163
|
||||
\151\157\156\141\154\040\123\101\061\030\060\026\006\003\125\004
|
||||
\141\014\017\126\101\124\105\123\055\101\066\062\066\063\064\060
|
||||
\066\070\061\047\060\045\006\003\125\004\003\014\036\106\111\122
|
||||
\115\101\120\122\117\106\105\123\111\117\116\101\114\040\103\101
|
||||
\040\122\117\117\124\055\101\040\127\105\102\060\036\027\015\062
|
||||
\062\060\064\060\066\060\071\060\061\063\066\132\027\015\064\067
|
||||
\060\063\063\061\060\071\060\061\063\066\132\060\156\061\013\060
|
||||
\011\006\003\125\004\006\023\002\105\123\061\034\060\032\006\003
|
||||
\125\004\012\014\023\106\151\162\155\141\160\162\157\146\145\163
|
||||
\151\157\156\141\154\040\123\101\061\030\060\026\006\003\125\004
|
||||
\141\014\017\126\101\124\105\123\055\101\066\062\066\063\064\060
|
||||
\066\070\061\047\060\045\006\003\125\004\003\014\036\106\111\122
|
||||
\115\101\120\122\117\106\105\123\111\117\116\101\114\040\103\101
|
||||
\040\122\117\117\124\055\101\040\127\105\102\060\166\060\020\006
|
||||
\007\052\206\110\316\075\002\001\006\005\053\201\004\000\042\003
|
||||
\142\000\004\107\123\352\054\021\244\167\307\052\352\363\326\137
|
||||
\173\323\004\221\134\372\210\306\042\271\203\020\142\167\204\063
|
||||
\055\351\003\210\324\340\063\367\355\167\054\112\140\352\344\157
|
||||
\255\155\264\370\114\212\244\344\037\312\352\117\070\112\056\202
|
||||
\163\053\307\146\233\012\214\100\234\174\212\366\362\071\140\262
|
||||
\336\313\354\270\344\157\352\233\135\267\123\220\030\062\125\305
|
||||
\040\267\224\243\143\060\141\060\017\006\003\125\035\023\001\001
|
||||
\377\004\005\060\003\001\001\377\060\037\006\003\125\035\043\004
|
||||
\030\060\026\200\024\223\341\103\143\134\074\235\326\047\363\122
|
||||
\354\027\262\251\257\054\367\166\370\060\035\006\003\125\035\016
|
||||
\004\026\004\024\223\341\103\143\134\074\235\326\047\363\122\354
|
||||
\027\262\251\257\054\367\166\370\060\016\006\003\125\035\017\001
|
||||
\001\377\004\004\003\002\001\006\060\012\006\010\052\206\110\316
|
||||
\075\004\003\003\003\150\000\060\145\002\060\035\174\244\173\303
|
||||
\211\165\063\341\073\251\105\277\106\351\351\241\335\311\042\026
|
||||
\267\107\021\013\330\232\272\361\310\013\160\120\123\002\221\160
|
||||
\205\131\251\036\244\346\352\043\061\240\000\002\061\000\375\342
|
||||
\370\263\257\026\271\036\163\304\226\343\301\060\031\330\176\346
|
||||
\303\227\336\034\117\270\211\057\063\353\110\017\031\367\207\106
|
||||
\135\046\220\245\205\305\271\172\224\076\207\250\275\000
|
||||
END
|
||||
CKA_NSS_MOZILLA_CA_POLICY CK_BBOOL CK_TRUE
|
||||
CKA_NSS_SERVER_DISTRUST_AFTER CK_BBOOL CK_FALSE
|
||||
CKA_NSS_EMAIL_DISTRUST_AFTER CK_BBOOL CK_FALSE
|
||||
|
||||
# Trust for "FIRMAPROFESIONAL CA ROOT-A WEB"
|
||||
# Issuer: CN=FIRMAPROFESIONAL CA ROOT-A WEB,OID.2.5.4.97=VATES-A62634068,O=Firmaprofesional SA,C=ES
|
||||
# Serial Number:31:97:21:ed:af:89:42:7f:35:41:87:a1:67:56:4c:6d
|
||||
# Subject: CN=FIRMAPROFESIONAL CA ROOT-A WEB,OID.2.5.4.97=VATES-A62634068,O=Firmaprofesional SA,C=ES
|
||||
# Not Valid Before: Wed Apr 06 09:01:36 2022
|
||||
# Not Valid After : Sun Mar 31 09:01:36 2047
|
||||
# Fingerprint (SHA-256): BE:F2:56:DA:F2:6E:9C:69:BD:EC:16:02:35:97:98:F3:CA:F7:18:21:A0:3E:01:82:57:C5:3C:65:61:7F:3D:4A
|
||||
# Fingerprint (SHA1): A8:31:11:74:A6:14:15:0D:CA:77:DD:0E:E4:0C:5D:58:FC:A0:72:A5
|
||||
CKA_CLASS CK_OBJECT_CLASS CKO_NSS_TRUST
|
||||
CKA_TOKEN CK_BBOOL CK_TRUE
|
||||
CKA_PRIVATE CK_BBOOL CK_FALSE
|
||||
CKA_MODIFIABLE CK_BBOOL CK_FALSE
|
||||
CKA_LABEL UTF8 "FIRMAPROFESIONAL CA ROOT-A WEB"
|
||||
CKA_CERT_SHA1_HASH MULTILINE_OCTAL
|
||||
\250\061\021\164\246\024\025\015\312\167\335\016\344\014\135\130
|
||||
\374\240\162\245
|
||||
END
|
||||
CKA_CERT_MD5_HASH MULTILINE_OCTAL
|
||||
\202\262\255\105\000\202\260\146\143\370\137\303\147\116\316\243
|
||||
END
|
||||
CKA_ISSUER MULTILINE_OCTAL
|
||||
\060\156\061\013\060\011\006\003\125\004\006\023\002\105\123\061
|
||||
\034\060\032\006\003\125\004\012\014\023\106\151\162\155\141\160
|
||||
\162\157\146\145\163\151\157\156\141\154\040\123\101\061\030\060
|
||||
\026\006\003\125\004\141\014\017\126\101\124\105\123\055\101\066
|
||||
\062\066\063\064\060\066\070\061\047\060\045\006\003\125\004\003
|
||||
\014\036\106\111\122\115\101\120\122\117\106\105\123\111\117\116
|
||||
\101\114\040\103\101\040\122\117\117\124\055\101\040\127\105\102
|
||||
END
|
||||
CKA_SERIAL_NUMBER MULTILINE_OCTAL
|
||||
\002\020\061\227\041\355\257\211\102\177\065\101\207\241\147\126
|
||||
\114\155
|
||||
END
|
||||
CKA_TRUST_SERVER_AUTH CK_TRUST CKT_NSS_TRUSTED_DELEGATOR
|
||||
CKA_TRUST_EMAIL_PROTECTION CK_TRUST CKT_NSS_MUST_VERIFY_TRUST
|
||||
CKA_TRUST_CODE_SIGNING CK_TRUST CKT_NSS_MUST_VERIFY_TRUST
|
||||
CKA_TRUST_STEP_UP_APPROVED CK_BBOOL CK_FALSE
|
||||
|
||||
|
||||
@@ -73,6 +73,9 @@ const getReleases = text => {
|
||||
release[kNSSDate] = new Date(normalizeTD(cells[columns[kNSSDate]]));
|
||||
release[kFirefoxVersion] = normalizeTD(cells[columns[kFirefoxVersion]]);
|
||||
release[kFirefoxDate] = new Date(normalizeTD(cells[columns[kFirefoxDate]]));
|
||||
if (!isNaN(release[kFirefoxDate]) && isNaN(release[kNSSDate])) {
|
||||
release[kNSSDate] = new Date(release[kFirefoxDate]);
|
||||
}
|
||||
releases.push(release);
|
||||
row = matches.next();
|
||||
}
|
||||
|
||||
@@ -26,6 +26,10 @@
|
||||
#include <stdlib.h>
|
||||
|
||||
#ifndef _WIN32
|
||||
// Necessary for the stdint include
|
||||
#ifndef _GNU_SOURCE
|
||||
#define _GNU_SOURCE
|
||||
#endif
|
||||
#include <sys/types.h>
|
||||
#include <sys/socket.h>
|
||||
#include <netinet/in.h>
|
||||
@@ -38,8 +42,8 @@
|
||||
#include <mstcpip.h>
|
||||
#endif
|
||||
|
||||
#if defined(__APPLE__) && defined(__aarch64__)
|
||||
#define HAS_MSGX
|
||||
#if defined(__APPLE__)
|
||||
extern int Bun__doesMacOSVersionSupportSendRecvMsgX();
|
||||
#endif
|
||||
|
||||
|
||||
@@ -73,32 +77,30 @@ int bsd_sendmmsg(LIBUS_SOCKET_DESCRIPTOR fd, struct udp_sendbuf* sendbuf, int fl
|
||||
}
|
||||
return sendbuf->num;
|
||||
#elif defined(__APPLE__)
|
||||
// TODO figure out why sendmsg_x fails when one of the messages is empty
|
||||
// so that we can get rid of this code.
|
||||
// One of the weird things is that once a non-empty message has been sent on the socket,
|
||||
// empty messages start working as well. Bizzare.
|
||||
#ifdef HAS_MSGX
|
||||
if (sendbuf->has_empty) {
|
||||
#endif
|
||||
for (int i = 0; i < sendbuf->num; i++) {
|
||||
while (1) {
|
||||
ssize_t ret = sendmsg(fd, &sendbuf->msgvec[i].msg_hdr, flags);
|
||||
if (ret < 0) {
|
||||
if (errno == EINTR) continue;
|
||||
if (errno == EAGAIN || errno == EWOULDBLOCK) return i;
|
||||
return ret;
|
||||
}
|
||||
break;
|
||||
}
|
||||
// sendmsg_x does not support addresses.
|
||||
if (!sendbuf->has_empty && !sendbuf->has_addresses && Bun__doesMacOSVersionSupportSendRecvMsgX()) {
|
||||
while (1) {
|
||||
int ret = sendmsg_x(fd, sendbuf->msgvec, sendbuf->num, flags);
|
||||
if (ret >= 0) return ret;
|
||||
// If we receive EMMSGSIZE, we should use the fallback code.
|
||||
if (errno == EMSGSIZE) break;
|
||||
if (errno != EINTR) return ret;
|
||||
}
|
||||
return sendbuf->num;
|
||||
#ifdef HAS_MSGX
|
||||
}
|
||||
while (1) {
|
||||
int ret = sendmsg_x(fd, sendbuf->msgvec, sendbuf->num, flags);
|
||||
if (ret >= 0 || errno != EINTR) return ret;
|
||||
|
||||
for (size_t i = 0, count = sendbuf->num; i < count; i++) {
|
||||
while (1) {
|
||||
ssize_t ret = sendmsg(fd, &sendbuf->msgvec[i].msg_hdr, flags);
|
||||
if (ret < 0) {
|
||||
if (errno == EINTR) continue;
|
||||
if (errno == EAGAIN || errno == EWOULDBLOCK) return i;
|
||||
return ret;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return sendbuf->num;
|
||||
#else
|
||||
while (1) {
|
||||
int ret = sendmmsg(fd, sendbuf->msgvec, sendbuf->num, flags | MSG_NOSIGNAL);
|
||||
@@ -120,12 +122,13 @@ int bsd_recvmmsg(LIBUS_SOCKET_DESCRIPTOR fd, struct udp_recvbuf *recvbuf, int fl
|
||||
return 1;
|
||||
}
|
||||
#elif defined(__APPLE__)
|
||||
#ifdef HAS_MSGX
|
||||
while (1) {
|
||||
int ret = recvmsg_x(fd, recvbuf->msgvec, LIBUS_UDP_RECV_COUNT, flags);
|
||||
if (ret >= 0 || errno != EINTR) return ret;
|
||||
if (Bun__doesMacOSVersionSupportSendRecvMsgX()) {
|
||||
while (1) {
|
||||
int ret = recvmsg_x(fd, recvbuf->msgvec, LIBUS_UDP_RECV_COUNT, flags);
|
||||
if (ret >= 0 || errno != EINTR) return ret;
|
||||
}
|
||||
}
|
||||
#else
|
||||
|
||||
for (int i = 0; i < LIBUS_UDP_RECV_COUNT; ++i) {
|
||||
while (1) {
|
||||
ssize_t ret = recvmsg(fd, &recvbuf->msgvec[i].msg_hdr, flags);
|
||||
@@ -139,7 +142,6 @@ int bsd_recvmmsg(LIBUS_SOCKET_DESCRIPTOR fd, struct udp_recvbuf *recvbuf, int fl
|
||||
}
|
||||
}
|
||||
return LIBUS_UDP_RECV_COUNT;
|
||||
#endif
|
||||
#else
|
||||
while (1) {
|
||||
int ret = recvmmsg(fd, (struct mmsghdr *)&recvbuf->msgvec, LIBUS_UDP_RECV_COUNT, flags, 0);
|
||||
@@ -154,19 +156,20 @@ void bsd_udp_setup_recvbuf(struct udp_recvbuf *recvbuf, void *databuf, size_t da
|
||||
recvbuf->buflen = databuflen;
|
||||
#else
|
||||
// assert(databuflen > LIBUS_UDP_MAX_SIZE * LIBUS_UDP_RECV_COUNT);
|
||||
|
||||
for (int i = 0; i < LIBUS_UDP_RECV_COUNT; i++) {
|
||||
memset(recvbuf, 0, sizeof(struct udp_recvbuf));
|
||||
for (size_t i = 0; i < LIBUS_UDP_RECV_COUNT; i++) {
|
||||
recvbuf->iov[i].iov_base = (char*)databuf + i * LIBUS_UDP_MAX_SIZE;
|
||||
recvbuf->iov[i].iov_len = LIBUS_UDP_MAX_SIZE;
|
||||
|
||||
recvbuf->msgvec[i].msg_hdr.msg_name = &recvbuf->addr[i];
|
||||
recvbuf->msgvec[i].msg_hdr.msg_namelen = sizeof(struct sockaddr_storage);
|
||||
|
||||
recvbuf->msgvec[i].msg_hdr.msg_iov = &recvbuf->iov[i];
|
||||
recvbuf->msgvec[i].msg_hdr.msg_iovlen = 1;
|
||||
|
||||
recvbuf->msgvec[i].msg_hdr.msg_control = recvbuf->control[i];
|
||||
recvbuf->msgvec[i].msg_hdr.msg_controllen = 256;
|
||||
struct msghdr mh = {};
|
||||
memset(&mh, 0, sizeof(struct msghdr));
|
||||
mh.msg_name = &recvbuf->addr[i];
|
||||
mh.msg_namelen = sizeof(struct sockaddr_storage);
|
||||
mh.msg_iov = &recvbuf->iov[i];
|
||||
mh.msg_iovlen = 1;
|
||||
mh.msg_control = recvbuf->control[i];
|
||||
mh.msg_controllen = sizeof(recvbuf->control[i]);
|
||||
recvbuf->msgvec[i].msg_hdr = mh;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
@@ -179,7 +182,12 @@ int bsd_udp_setup_sendbuf(struct udp_sendbuf *buf, size_t bufsize, void** payloa
|
||||
buf->num = num;
|
||||
return num;
|
||||
#else
|
||||
// TODO: can we skip empty messages altogether? Do we really need to send 0-length messages?
|
||||
buf->has_empty = 0;
|
||||
|
||||
// sendmsg_x docs states it does not support addresses.
|
||||
buf->has_addresses = 0;
|
||||
|
||||
struct mmsghdr *msgvec = buf->msgvec;
|
||||
// todo check this math
|
||||
size_t count = (bufsize - sizeof(struct udp_sendbuf)) / (sizeof(struct mmsghdr) + sizeof(struct iovec));
|
||||
@@ -194,6 +202,9 @@ int bsd_udp_setup_sendbuf(struct udp_sendbuf *buf, size_t bufsize, void** payloa
|
||||
addr_len = addr->sa_family == AF_INET ? sizeof(struct sockaddr_in)
|
||||
: addr->sa_family == AF_INET6 ? sizeof(struct sockaddr_in6)
|
||||
: 0;
|
||||
if (addr_len > 0) {
|
||||
buf->has_addresses = 1;
|
||||
}
|
||||
}
|
||||
iov[i].iov_base = payloads[i];
|
||||
iov[i].iov_len = lengths[i];
|
||||
@@ -206,6 +217,7 @@ int bsd_udp_setup_sendbuf(struct udp_sendbuf *buf, size_t bufsize, void** payloa
|
||||
msgvec[i].msg_hdr.msg_flags = 0;
|
||||
msgvec[i].msg_len = 0;
|
||||
|
||||
|
||||
if (lengths[i] == 0) {
|
||||
buf->has_empty = 1;
|
||||
}
|
||||
|
||||
@@ -15,18 +15,18 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "libusockets.h"
|
||||
#include "internal/internal.h"
|
||||
#include "libusockets.h"
|
||||
#include <errno.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <errno.h>
|
||||
|
||||
#ifndef _WIN32
|
||||
#include <arpa/inet.h>
|
||||
#endif
|
||||
|
||||
#define CONCURRENT_CONNECTIONS 2
|
||||
#define CONCURRENT_CONNECTIONS 4
|
||||
|
||||
// clang-format off
|
||||
int default_is_low_prio_handler(struct us_socket_t *s) {
|
||||
return 0;
|
||||
}
|
||||
@@ -44,7 +44,7 @@ int us_raw_root_certs(struct us_cert_string_t**out){
|
||||
void us_listen_socket_close(int ssl, struct us_listen_socket_t *ls) {
|
||||
/* us_listen_socket_t extends us_socket_t so we close in similar ways */
|
||||
if (!us_socket_is_closed(0, &ls->s)) {
|
||||
us_internal_socket_context_unlink_listen_socket(ls->s.context, ls);
|
||||
us_internal_socket_context_unlink_listen_socket(ssl, ls->s.context, ls);
|
||||
us_poll_stop((struct us_poll_t *) &ls->s, ls->s.context->loop);
|
||||
bsd_close_socket(us_poll_fd((struct us_poll_t *) &ls->s));
|
||||
|
||||
@@ -60,11 +60,19 @@ void us_listen_socket_close(int ssl, struct us_listen_socket_t *ls) {
|
||||
}
|
||||
|
||||
void us_socket_context_close(int ssl, struct us_socket_context_t *context) {
|
||||
/* Begin by closing all listen sockets */
|
||||
/* First start closing pending connecting sockets*/
|
||||
struct us_connecting_socket_t *c = context->head_connecting_sockets;
|
||||
while (c) {
|
||||
struct us_connecting_socket_t *nextC = c->next_pending;
|
||||
us_connecting_socket_close(ssl, c);
|
||||
c = nextC;
|
||||
}
|
||||
/* After this by closing all listen sockets */
|
||||
struct us_listen_socket_t *ls = context->head_listen_sockets;
|
||||
while (ls) {
|
||||
struct us_listen_socket_t *nextLS = (struct us_listen_socket_t *) ls->s.next;
|
||||
us_listen_socket_close(ssl, ls);
|
||||
|
||||
ls = nextLS;
|
||||
}
|
||||
|
||||
@@ -72,12 +80,12 @@ void us_socket_context_close(int ssl, struct us_socket_context_t *context) {
|
||||
struct us_socket_t *s = context->head_sockets;
|
||||
while (s) {
|
||||
struct us_socket_t *nextS = s->next;
|
||||
us_socket_close(ssl, s, 0, 0);
|
||||
us_socket_close(ssl, s, LIBUS_SOCKET_CLOSE_CODE_CLEAN_SHUTDOWN, 0);
|
||||
s = nextS;
|
||||
}
|
||||
}
|
||||
|
||||
void us_internal_socket_context_unlink_listen_socket(struct us_socket_context_t *context, struct us_listen_socket_t *ls) {
|
||||
void us_internal_socket_context_unlink_listen_socket(int ssl, struct us_socket_context_t *context, struct us_listen_socket_t *ls) {
|
||||
/* We have to properly update the iterator used to sweep sockets for timeouts */
|
||||
if (ls == (struct us_listen_socket_t *) context->iterator) {
|
||||
context->iterator = ls->s.next;
|
||||
@@ -95,9 +103,10 @@ void us_internal_socket_context_unlink_listen_socket(struct us_socket_context_t
|
||||
ls->s.next->prev = ls->s.prev;
|
||||
}
|
||||
}
|
||||
us_socket_context_unref(ssl, context);
|
||||
}
|
||||
|
||||
void us_internal_socket_context_unlink_socket(struct us_socket_context_t *context, struct us_socket_t *s) {
|
||||
void us_internal_socket_context_unlink_socket(int ssl, struct us_socket_context_t *context, struct us_socket_t *s) {
|
||||
/* We have to properly update the iterator used to sweep sockets for timeouts */
|
||||
if (s == context->iterator) {
|
||||
context->iterator = s->next;
|
||||
@@ -115,6 +124,22 @@ void us_internal_socket_context_unlink_socket(struct us_socket_context_t *contex
|
||||
s->next->prev = s->prev;
|
||||
}
|
||||
}
|
||||
us_socket_context_unref(ssl, context);
|
||||
}
|
||||
void us_internal_socket_context_unlink_connecting_socket(int ssl, struct us_socket_context_t *context, struct us_connecting_socket_t *c) {
|
||||
if (c->prev_pending == c->next_pending) {
|
||||
context->head_connecting_sockets = 0;
|
||||
} else {
|
||||
if (c->prev_pending) {
|
||||
c->prev_pending->next_pending = c->next_pending;
|
||||
} else {
|
||||
context->head_connecting_sockets = c->next_pending;
|
||||
}
|
||||
if (c->next_pending) {
|
||||
c->next_pending->prev_pending = c->prev_pending;
|
||||
}
|
||||
}
|
||||
us_socket_context_unref(ssl, context);
|
||||
}
|
||||
|
||||
/* We always add in the top, so we don't modify any s.next */
|
||||
@@ -126,8 +151,21 @@ void us_internal_socket_context_link_listen_socket(struct us_socket_context_t *c
|
||||
context->head_listen_sockets->s.prev = &ls->s;
|
||||
}
|
||||
context->head_listen_sockets = ls;
|
||||
us_socket_context_ref(0, context);
|
||||
}
|
||||
|
||||
void us_internal_socket_context_link_connecting_socket(int ssl, struct us_socket_context_t *context, struct us_connecting_socket_t *c) {
|
||||
c->context = context;
|
||||
c->next_pending = context->head_connecting_sockets;
|
||||
c->prev_pending = 0;
|
||||
if (context->head_connecting_sockets) {
|
||||
context->head_connecting_sockets->prev_pending = c;
|
||||
}
|
||||
context->head_connecting_sockets = c;
|
||||
us_socket_context_ref(ssl, context);
|
||||
}
|
||||
|
||||
|
||||
/* We always add in the top, so we don't modify any s.next */
|
||||
void us_internal_socket_context_link_socket(struct us_socket_context_t *context, struct us_socket_t *s) {
|
||||
s->context = context;
|
||||
@@ -137,6 +175,7 @@ void us_internal_socket_context_link_socket(struct us_socket_context_t *context,
|
||||
context->head_sockets->prev = s;
|
||||
}
|
||||
context->head_sockets = s;
|
||||
us_socket_context_ref(0, context);
|
||||
}
|
||||
|
||||
struct us_loop_t *us_socket_context_loop(int ssl, struct us_socket_context_t *context) {
|
||||
@@ -231,6 +270,7 @@ struct us_socket_context_t *us_create_socket_context(int ssl, struct us_loop_t *
|
||||
struct us_socket_context_t *context = us_calloc(1, sizeof(struct us_socket_context_t) + context_ext_size);
|
||||
context->loop = loop;
|
||||
context->is_low_prio = default_is_low_prio_handler;
|
||||
context->ref_count = 1;
|
||||
|
||||
us_internal_loop_link(loop, context);
|
||||
|
||||
@@ -252,6 +292,7 @@ struct us_socket_context_t *us_create_bun_socket_context(int ssl, struct us_loop
|
||||
struct us_socket_context_t *context = us_calloc(1, sizeof(struct us_socket_context_t) + context_ext_size);
|
||||
context->loop = loop;
|
||||
context->is_low_prio = default_is_low_prio_handler;
|
||||
context->ref_count = 1;
|
||||
|
||||
us_internal_loop_link(loop, context);
|
||||
|
||||
@@ -271,8 +312,8 @@ struct us_bun_verify_error_t us_socket_verify_error(int ssl, struct us_socket_t
|
||||
return (struct us_bun_verify_error_t) { .error = 0, .code = NULL, .reason = NULL };
|
||||
}
|
||||
|
||||
void us_internal_socket_context_free(int ssl, struct us_socket_context_t *context) {
|
||||
|
||||
void us_socket_context_free(int ssl, struct us_socket_context_t *context) {
|
||||
#ifndef LIBUS_NO_SSL
|
||||
if (ssl) {
|
||||
/* This function will call us again with SSL=false */
|
||||
@@ -285,7 +326,24 @@ void us_socket_context_free(int ssl, struct us_socket_context_t *context) {
|
||||
* This is the opposite order compared to when creating the context - SSL code is cleaning up before non-SSL */
|
||||
|
||||
us_internal_loop_unlink(context->loop, context);
|
||||
us_free(context);
|
||||
/* Link this context to the close-list and let it be deleted after this iteration */
|
||||
context->next = context->loop->data.closed_context_head;
|
||||
context->loop->data.closed_context_head = context;
|
||||
}
|
||||
|
||||
void us_socket_context_ref(int ssl, struct us_socket_context_t *context) {
|
||||
context->ref_count++;
|
||||
}
|
||||
void us_socket_context_unref(int ssl, struct us_socket_context_t *context) {
|
||||
uint32_t ref_count = context->ref_count;
|
||||
context->ref_count--;
|
||||
if (ref_count == 1) {
|
||||
us_internal_socket_context_free(ssl, context);
|
||||
}
|
||||
}
|
||||
|
||||
void us_socket_context_free(int ssl, struct us_socket_context_t *context) {
|
||||
us_socket_context_unref(ssl, context);
|
||||
}
|
||||
|
||||
struct us_listen_socket_t *us_socket_context_listen(int ssl, struct us_socket_context_t *context, const char *host, int port, int options, int socket_ext_size) {
|
||||
@@ -456,14 +514,14 @@ void *us_socket_context_connect(int ssl, struct us_socket_context_t *context, co
|
||||
}
|
||||
|
||||
struct us_connecting_socket_t *c = us_calloc(1, sizeof(struct us_connecting_socket_t) + socket_ext_size);
|
||||
c->socket_ext_size = socket_ext_size;
|
||||
c->context = context;
|
||||
c->socket_ext_size = socket_ext_size;
|
||||
c->options = options;
|
||||
c->ssl = ssl > 0;
|
||||
c->timeout = 255;
|
||||
c->long_timeout = 255;
|
||||
c->pending_resolve_callback = 1;
|
||||
c->port = port;
|
||||
us_internal_socket_context_link_connecting_socket(ssl, context, c);
|
||||
|
||||
#ifdef _WIN32
|
||||
loop->uv_loop->active_handles++;
|
||||
@@ -525,15 +583,12 @@ void us_internal_socket_after_resolve(struct us_connecting_socket_t *c) {
|
||||
c->pending_resolve_callback = 0;
|
||||
// if the socket was closed while we were resolving the address, free it
|
||||
if (c->closed) {
|
||||
us_connecting_socket_free(c);
|
||||
us_connecting_socket_free(c->ssl, c);
|
||||
return;
|
||||
}
|
||||
struct addrinfo_result *result = Bun__addrinfo_getRequestResult(c->addrinfo_req);
|
||||
if (result->error) {
|
||||
c->error = result->error;
|
||||
c->context->on_connect_error(c, result->error);
|
||||
Bun__addrinfo_freeRequest(c->addrinfo_req, 0);
|
||||
us_connecting_socket_close(0, c);
|
||||
us_connecting_socket_close(c->ssl, c);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -541,10 +596,7 @@ void us_internal_socket_after_resolve(struct us_connecting_socket_t *c) {
|
||||
|
||||
int opened = start_connections(c, CONCURRENT_CONNECTIONS);
|
||||
if (opened == 0) {
|
||||
c->error = ECONNREFUSED;
|
||||
c->context->on_connect_error(c, ECONNREFUSED);
|
||||
Bun__addrinfo_freeRequest(c->addrinfo_req, 1);
|
||||
us_connecting_socket_close(0, c);
|
||||
us_connecting_socket_close(c->ssl, c);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -612,10 +664,7 @@ void us_internal_socket_after_open(struct us_socket_t *s, int error) {
|
||||
// we have run out of addresses to attempt, signal the connection error
|
||||
// but only if there are no other sockets in the list
|
||||
if (opened == 0 && c->connecting_head == NULL) {
|
||||
c->error = ECONNREFUSED;
|
||||
c->context->on_connect_error(c, error);
|
||||
Bun__addrinfo_freeRequest(c->addrinfo_req, ECONNREFUSED);
|
||||
us_connecting_socket_close(0, c);
|
||||
us_connecting_socket_close(c->ssl, c);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -644,7 +693,7 @@ void us_internal_socket_after_open(struct us_socket_t *s, int error) {
|
||||
}
|
||||
// now that the socket is open, we can release the associated us_connecting_socket_t if it exists
|
||||
Bun__addrinfo_freeRequest(c->addrinfo_req, 0);
|
||||
us_connecting_socket_free(c);
|
||||
us_connecting_socket_free(c->ssl, c);
|
||||
s->connect_state = NULL;
|
||||
}
|
||||
|
||||
@@ -703,13 +752,15 @@ struct us_socket_t *us_socket_context_adopt_socket(int ssl, struct us_socket_con
|
||||
#endif
|
||||
|
||||
/* Cannot adopt a closed socket */
|
||||
if (us_socket_is_closed(ssl, s)) {
|
||||
if (us_socket_is_closed(ssl, s) || us_socket_is_shut_down(ssl, s)) {
|
||||
return s;
|
||||
}
|
||||
|
||||
if (s->low_prio_state != 1) {
|
||||
/* We need to be sure that we still holding a reference*/
|
||||
us_socket_context_ref(ssl, context);
|
||||
/* This properly updates the iterator if in on_timeout */
|
||||
us_internal_socket_context_unlink_socket(s->context, s);
|
||||
us_internal_socket_context_unlink_socket(ssl, s->context, s);
|
||||
}
|
||||
|
||||
|
||||
@@ -720,7 +771,10 @@ struct us_socket_t *us_socket_context_adopt_socket(int ssl, struct us_socket_con
|
||||
new_s = (struct us_socket_t *) us_poll_resize(&s->p, s->context->loop, sizeof(struct us_socket_t) + ext_size);
|
||||
if (c) {
|
||||
c->connecting_head = new_s;
|
||||
struct us_socket_context_t *old_context = s->context;
|
||||
c->context = context;
|
||||
us_internal_socket_context_link_connecting_socket(ssl, context, c);
|
||||
us_internal_socket_context_unlink_connecting_socket(ssl, old_context, c);
|
||||
}
|
||||
}
|
||||
new_s->timeout = 255;
|
||||
@@ -734,6 +788,7 @@ struct us_socket_t *us_socket_context_adopt_socket(int ssl, struct us_socket_con
|
||||
if (new_s->next) new_s->next->prev = new_s;
|
||||
} else {
|
||||
us_internal_socket_context_link_socket(context, new_s);
|
||||
us_socket_context_unref(ssl, context);
|
||||
}
|
||||
|
||||
return new_s;
|
||||
|
||||
@@ -14,8 +14,14 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// clang-format off
|
||||
#if (defined(LIBUS_USE_OPENSSL) || defined(LIBUS_USE_WOLFSSL))
|
||||
|
||||
|
||||
#include "internal/internal.h"
|
||||
#include "libusockets.h"
|
||||
#include <string.h>
|
||||
|
||||
/* These are in sni_tree.cpp */
|
||||
void *sni_new();
|
||||
void sni_free(void *sni, void (*cb)(void *));
|
||||
@@ -23,10 +29,6 @@ int sni_add(void *sni, const char *hostname, void *user);
|
||||
void *sni_remove(void *sni, const char *hostname);
|
||||
void *sni_find(void *sni, const char *hostname);
|
||||
|
||||
#include "internal/internal.h"
|
||||
#include "libusockets.h"
|
||||
#include <string.h>
|
||||
|
||||
/* This module contains the entire OpenSSL implementation
|
||||
* of the SSL socket and socket context interfaces. */
|
||||
#ifdef LIBUS_USE_OPENSSL
|
||||
@@ -71,10 +73,6 @@ struct us_internal_ssl_socket_context_t {
|
||||
// socket context
|
||||
SSL_CTX *ssl_context;
|
||||
int is_parent;
|
||||
#if ALLOW_SERVER_RENEGOTIATION
|
||||
unsigned int client_renegotiation_limit;
|
||||
unsigned int client_renegotiation_window;
|
||||
#endif
|
||||
/* These decorate the base implementation */
|
||||
struct us_internal_ssl_socket_t *(*on_open)(struct us_internal_ssl_socket_t *,
|
||||
int is_client, char *ip,
|
||||
@@ -86,6 +84,10 @@ struct us_internal_ssl_socket_context_t {
|
||||
struct us_internal_ssl_socket_t *(*on_close)(
|
||||
struct us_internal_ssl_socket_t *, int code, void *reason);
|
||||
|
||||
struct us_internal_ssl_socket_t *(*on_timeout)(
|
||||
struct us_internal_ssl_socket_t *);
|
||||
struct us_internal_ssl_socket_t *(*on_long_timeout)(struct us_internal_ssl_socket_t *);
|
||||
|
||||
/* Called for missing SNI hostnames, if not NULL */
|
||||
void (*on_server_name)(struct us_internal_ssl_socket_context_t *,
|
||||
const char *hostname);
|
||||
@@ -108,15 +110,10 @@ enum {
|
||||
struct us_internal_ssl_socket_t {
|
||||
struct us_socket_t s;
|
||||
SSL *ssl; // this _must_ be the first member after s
|
||||
#if ALLOW_SERVER_RENEGOTIATION
|
||||
unsigned int client_pending_renegotiations;
|
||||
uint64_t last_ssl_renegotiation;
|
||||
unsigned int is_client : 1;
|
||||
#endif
|
||||
unsigned int ssl_write_wants_read : 1; // we use this for now
|
||||
unsigned int ssl_read_wants_write : 1;
|
||||
unsigned int handshake_state : 2;
|
||||
unsigned int received_ssl_shutdown : 1;
|
||||
unsigned int fatal_error : 1;
|
||||
};
|
||||
|
||||
int passphrase_cb(char *buf, int size, int rwflag, void *u) {
|
||||
@@ -182,10 +179,9 @@ int BIO_s_custom_read(BIO *bio, char *dst, int length) {
|
||||
return length;
|
||||
}
|
||||
|
||||
struct us_internal_ssl_socket_t *ssl_on_open(struct us_internal_ssl_socket_t *s,
|
||||
int is_client, char *ip,
|
||||
int ip_length) {
|
||||
|
||||
struct loop_ssl_data * us_internal_set_loop_ssl_data(struct us_internal_ssl_socket_t *s) {
|
||||
// note: this context can change when we adopt the socket!
|
||||
struct us_internal_ssl_socket_context_t *context =
|
||||
(struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s);
|
||||
|
||||
@@ -193,17 +189,31 @@ struct us_internal_ssl_socket_t *ssl_on_open(struct us_internal_ssl_socket_t *s,
|
||||
struct loop_ssl_data *loop_ssl_data =
|
||||
(struct loop_ssl_data *)loop->data.ssl_data;
|
||||
|
||||
s->ssl = SSL_new(context->ssl_context);
|
||||
#if ALLOW_SERVER_RENEGOTIATION
|
||||
s->client_pending_renegotiations = context->client_renegotiation_limit;
|
||||
s->last_ssl_renegotiation = 0;
|
||||
s->is_client = is_client ? 1 : 0;
|
||||
// note: if we put data here we should never really clear it (not in write
|
||||
// either, it still should be available for SSL_write to read from!)
|
||||
|
||||
#endif
|
||||
loop_ssl_data->ssl_read_input_length = 0;
|
||||
loop_ssl_data->ssl_read_input_offset = 0;
|
||||
loop_ssl_data->ssl_socket = &s->s;
|
||||
loop_ssl_data->msg_more = 0;
|
||||
return loop_ssl_data;
|
||||
}
|
||||
|
||||
struct us_internal_ssl_socket_t *ssl_on_open(struct us_internal_ssl_socket_t *s,
|
||||
int is_client, char *ip,
|
||||
int ip_length) {
|
||||
|
||||
struct us_internal_ssl_socket_context_t *context =
|
||||
(struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s);
|
||||
|
||||
struct loop_ssl_data *loop_ssl_data = us_internal_set_loop_ssl_data(s);
|
||||
|
||||
s->ssl = SSL_new(context->ssl_context);
|
||||
s->ssl_write_wants_read = 0;
|
||||
s->ssl_read_wants_write = 0;
|
||||
s->fatal_error = 0;
|
||||
s->handshake_state = HANDSHAKE_PENDING;
|
||||
s->received_ssl_shutdown = 0;
|
||||
|
||||
|
||||
SSL_set_bio(s->ssl, loop_ssl_data->shared_rbio, loop_ssl_data->shared_wbio);
|
||||
// if we allow renegotiation, we need to set the mode here
|
||||
@@ -213,24 +223,18 @@ struct us_internal_ssl_socket_t *ssl_on_open(struct us_internal_ssl_socket_t *s,
|
||||
// this can be a DoS vector for servers, so we enable it using a limit
|
||||
// we do not use ssl_renegotiate_freely, since ssl_renegotiate_explicit is
|
||||
// more performant when using BoringSSL
|
||||
#if ALLOW_SERVER_RENEGOTIATION
|
||||
if (context->client_renegotiation_limit) {
|
||||
SSL_set_renegotiate_mode(s->ssl, ssl_renegotiate_explicit);
|
||||
} else {
|
||||
SSL_set_renegotiate_mode(s->ssl, ssl_renegotiate_never);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
BIO_up_ref(loop_ssl_data->shared_rbio);
|
||||
BIO_up_ref(loop_ssl_data->shared_wbio);
|
||||
|
||||
if (is_client) {
|
||||
#if ALLOW_SERVER_RENEGOTIATION == 0
|
||||
SSL_set_renegotiate_mode(s->ssl, ssl_renegotiate_explicit);
|
||||
#endif
|
||||
SSL_set_connect_state(s->ssl);
|
||||
} else {
|
||||
SSL_set_accept_state(s->ssl);
|
||||
// we do not allow renegotiation on the server side (should be the default for BoringSSL, but we set to make openssl compatible)
|
||||
SSL_set_renegotiate_mode(s->ssl, ssl_renegotiate_never);
|
||||
}
|
||||
|
||||
struct us_internal_ssl_socket_t *result =
|
||||
@@ -246,6 +250,64 @@ struct us_internal_ssl_socket_t *ssl_on_open(struct us_internal_ssl_socket_t *s,
|
||||
return result;
|
||||
}
|
||||
|
||||
/// @brief Complete the shutdown or do a fast shutdown when needed, this should only be called before closing the socket
|
||||
/// @param s
|
||||
int us_internal_handle_shutdown(struct us_internal_ssl_socket_t *s, int force_fast_shutdown) {
|
||||
// if we are already shutdown or in the middle of a handshake we dont need to do anything
|
||||
// Scenarios:
|
||||
// 1 - SSL is not initialized yet (null)
|
||||
// 2 - socket is alread shutdown
|
||||
// 3 - we already sent a shutdown
|
||||
// 4 - we are in the middle of a handshake
|
||||
// 5 - we received a fatal error
|
||||
if(us_internal_ssl_socket_is_shut_down(s) || s->fatal_error || !SSL_is_init_finished(s->ssl)) return 1;
|
||||
|
||||
// we are closing the socket but did not sent a shutdown yet
|
||||
int state = SSL_get_shutdown(s->ssl);
|
||||
int sent_shutdown = state & SSL_SENT_SHUTDOWN;
|
||||
int received_shutdown = state & SSL_RECEIVED_SHUTDOWN;
|
||||
// if we are missing a shutdown call, we need to do a fast shutdown here
|
||||
if(!sent_shutdown || !received_shutdown) {
|
||||
// make sure that the ssl loop data is set
|
||||
us_internal_set_loop_ssl_data(s);
|
||||
// Zero means that we should wait for the peer to close the connection
|
||||
// but we are already closing the connection so we do a fast shutdown here
|
||||
int ret = SSL_shutdown(s->ssl);
|
||||
if(ret == 0 && force_fast_shutdown) {
|
||||
// do a fast shutdown (dont wait for peer)
|
||||
ret = SSL_shutdown(s->ssl);
|
||||
}
|
||||
if(ret < 0) {
|
||||
// we got some error here, but we dont care about it, we are closing the socket
|
||||
int err = SSL_get_error(s->ssl, ret);
|
||||
if (err == SSL_ERROR_SSL || err == SSL_ERROR_SYSCALL) {
|
||||
// clear
|
||||
ERR_clear_error();
|
||||
s->fatal_error = 1;
|
||||
// Fatal error occurred, we should close the socket imeadiatly
|
||||
return 1;
|
||||
}
|
||||
if(err == SSL_ERROR_WANT_READ || err == SSL_ERROR_WANT_WRITE) {
|
||||
// We are waiting to be readable or writable this will come in SSL_read to complete the shutdown
|
||||
// if we are forcing a fast shutdown we should return 1 here to imeadiatly close the socket
|
||||
// Scenarios:
|
||||
// 1 - We called abort but the socket is not writable or reable anymore (force_fast_shutdown = 1)
|
||||
// 2 - We called close but wanna to wait until close_notify is received (force_fast_shutdown = 0)
|
||||
return force_fast_shutdown ? 1 : 0;
|
||||
}
|
||||
// If we error we probably do not even start the first handshake or have a critical error so just close the socket
|
||||
// Scenarios:
|
||||
// 1 - We abort the connection to fast and we did not even start the first handshake
|
||||
// 2 - SSL is in a broken state
|
||||
// 3 - SSL is not broken but is in a state that we cannot recover from
|
||||
s->fatal_error = 1;
|
||||
return 1;
|
||||
}
|
||||
return ret == 1;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
void us_internal_on_ssl_handshake(
|
||||
struct us_internal_ssl_socket_context_t *context,
|
||||
void (*on_handshake)(struct us_internal_ssl_socket_t *, int success,
|
||||
@@ -256,9 +318,17 @@ void us_internal_on_ssl_handshake(
|
||||
context->handshake_data = custom_data;
|
||||
}
|
||||
|
||||
int us_internal_ssl_socket_is_closed(struct us_internal_ssl_socket_t *s) {
|
||||
return us_socket_is_closed(0, &s->s);
|
||||
}
|
||||
|
||||
struct us_internal_ssl_socket_t *
|
||||
us_internal_ssl_socket_close(struct us_internal_ssl_socket_t *s, int code,
|
||||
void *reason) {
|
||||
|
||||
// check if we are already closed
|
||||
if (us_internal_ssl_socket_is_closed(s)) return s;
|
||||
|
||||
if (s->handshake_state != HANDSHAKE_COMPLETED) {
|
||||
// if we have some pending handshake we cancel it and try to check the
|
||||
// latest handshake error this way we will always call on_handshake with the
|
||||
@@ -269,8 +339,14 @@ us_internal_ssl_socket_close(struct us_internal_ssl_socket_t *s, int code,
|
||||
us_internal_trigger_handshake_callback(s, 0);
|
||||
}
|
||||
|
||||
return (struct us_internal_ssl_socket_t *)us_socket_close(
|
||||
0, (struct us_socket_t *)s, code, reason);
|
||||
// if we are in the middle of a close_notify we need to finish it (code != 0 forces a fast shutdown)
|
||||
int can_close = us_internal_handle_shutdown(s, code != 0);
|
||||
|
||||
// only close the socket if we are not in the middle of a handshake
|
||||
if(can_close) {
|
||||
return (struct us_internal_ssl_socket_t *)us_socket_close(0, (struct us_socket_t *)s, code, reason);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
void us_internal_trigger_handshake_callback(struct us_internal_ssl_socket_t *s,
|
||||
@@ -292,26 +368,7 @@ int us_internal_ssl_renegotiate(struct us_internal_ssl_socket_t *s) {
|
||||
// if is a server and we have no pending renegotiation we can check
|
||||
// the limits
|
||||
s->handshake_state = HANDSHAKE_RENEGOTIATION_PENDING;
|
||||
#if ALLOW_SERVER_RENEGOTIATION
|
||||
if (!s->is_client && !SSL_renegotiate_pending(s->ssl)) {
|
||||
uint64_t now = time(NULL);
|
||||
struct us_internal_ssl_socket_context_t *context =
|
||||
(struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s);
|
||||
// if is not the first time we negotiate and we are outside the time
|
||||
// window, reset the limits
|
||||
if (s->last_ssl_renegotiation && (now - s->last_ssl_renegotiation) >=
|
||||
context->client_renegotiation_window) {
|
||||
// reset the limits
|
||||
s->client_pending_renegotiations = context->client_renegotiation_limit;
|
||||
}
|
||||
// if we have no more renegotiations, we should close the connection
|
||||
if (s->client_pending_renegotiations == 0) {
|
||||
return 0;
|
||||
}
|
||||
s->last_ssl_renegotiation = now;
|
||||
s->client_pending_renegotiations--;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (!SSL_renegotiate(s->ssl)) {
|
||||
// we failed to renegotiate
|
||||
us_internal_trigger_handshake_callback(s, 0);
|
||||
@@ -321,24 +378,13 @@ int us_internal_ssl_renegotiate(struct us_internal_ssl_socket_t *s) {
|
||||
}
|
||||
|
||||
void us_internal_update_handshake(struct us_internal_ssl_socket_t *s) {
|
||||
struct us_internal_ssl_socket_context_t *context =
|
||||
(struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s);
|
||||
|
||||
// nothing todo here, renegotiation must be handled in SSL_read
|
||||
if (s->handshake_state != HANDSHAKE_PENDING)
|
||||
return;
|
||||
|
||||
struct us_loop_t *loop = us_socket_context_loop(0, &context->sc);
|
||||
struct loop_ssl_data *loop_ssl_data =
|
||||
(struct loop_ssl_data *)loop->data.ssl_data;
|
||||
|
||||
loop_ssl_data->ssl_read_input_length = 0;
|
||||
loop_ssl_data->ssl_read_input_offset = 0;
|
||||
loop_ssl_data->ssl_socket = &s->s;
|
||||
loop_ssl_data->msg_more = 0;
|
||||
|
||||
if (us_socket_is_closed(0, &s->s) || us_internal_ssl_socket_is_shut_down(s) ||
|
||||
SSL_get_shutdown(s->ssl) & SSL_RECEIVED_SHUTDOWN) {
|
||||
|
||||
if (us_internal_ssl_socket_is_closed(s) || us_internal_ssl_socket_is_shut_down(s) ||
|
||||
(s->ssl && SSL_get_shutdown(s->ssl) & SSL_RECEIVED_SHUTDOWN)) {
|
||||
|
||||
us_internal_trigger_handshake_callback(s, 0);
|
||||
return;
|
||||
@@ -347,7 +393,6 @@ void us_internal_update_handshake(struct us_internal_ssl_socket_t *s) {
|
||||
int result = SSL_do_handshake(s->ssl);
|
||||
|
||||
if (SSL_get_shutdown(s->ssl) & SSL_RECEIVED_SHUTDOWN) {
|
||||
s->received_ssl_shutdown = 1;
|
||||
us_internal_ssl_socket_close(s, 0, NULL);
|
||||
return;
|
||||
}
|
||||
@@ -356,30 +401,23 @@ void us_internal_update_handshake(struct us_internal_ssl_socket_t *s) {
|
||||
int err = SSL_get_error(s->ssl, result);
|
||||
// as far as I know these are the only errors we want to handle
|
||||
if (err != SSL_ERROR_WANT_READ && err != SSL_ERROR_WANT_WRITE) {
|
||||
us_internal_trigger_handshake_callback(s, 1);
|
||||
|
||||
// clear per thread error queue if it may contain something
|
||||
if (err == SSL_ERROR_SSL || err == SSL_ERROR_SYSCALL) {
|
||||
ERR_clear_error();
|
||||
s->fatal_error = 1;
|
||||
}
|
||||
us_internal_trigger_handshake_callback(s, 0);
|
||||
|
||||
return;
|
||||
}
|
||||
s->handshake_state = HANDSHAKE_PENDING;
|
||||
// Ensure that we'll cycle through internal openssl's state
|
||||
if (!us_socket_is_closed(0, &s->s) &&
|
||||
!us_internal_ssl_socket_is_shut_down(s)) {
|
||||
us_socket_write(1, loop_ssl_data->ssl_socket, "\0", 0, 0);
|
||||
}
|
||||
s->ssl_write_wants_read = 1;
|
||||
|
||||
return;
|
||||
}
|
||||
// success
|
||||
us_internal_trigger_handshake_callback(s, 1);
|
||||
// Ensure that we'll cycle through internal openssl's state
|
||||
if (!us_socket_is_closed(0, &s->s) &&
|
||||
!us_internal_ssl_socket_is_shut_down(s)) {
|
||||
us_socket_write(1, loop_ssl_data->ssl_socket, "\0", 0, 0);
|
||||
}
|
||||
s->ssl_write_wants_read = 1;
|
||||
}
|
||||
|
||||
struct us_internal_ssl_socket_t *
|
||||
@@ -387,16 +425,33 @@ ssl_on_close(struct us_internal_ssl_socket_t *s, int code, void *reason) {
|
||||
struct us_internal_ssl_socket_context_t *context =
|
||||
(struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s);
|
||||
|
||||
SSL_free(s->ssl);
|
||||
us_internal_set_loop_ssl_data(s);
|
||||
struct us_internal_ssl_socket_t * ret = context->on_close(s, code, reason);
|
||||
SSL_free(s->ssl); // free SSL after on_close
|
||||
s->ssl = NULL; // set to NULL
|
||||
return ret;
|
||||
}
|
||||
|
||||
return context->on_close(s, code, reason);
|
||||
struct us_internal_ssl_socket_t * ssl_on_timeout(struct us_internal_ssl_socket_t *s) {
|
||||
struct us_internal_ssl_socket_context_t *context =
|
||||
(struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s);
|
||||
|
||||
us_internal_set_loop_ssl_data(s);
|
||||
return context->on_timeout(s);
|
||||
}
|
||||
|
||||
struct us_internal_ssl_socket_t * ssl_on_long_timeout(struct us_internal_ssl_socket_t *s) {
|
||||
struct us_internal_ssl_socket_context_t *context =
|
||||
(struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s);
|
||||
|
||||
us_internal_set_loop_ssl_data(s);
|
||||
return context->on_long_timeout(s);
|
||||
}
|
||||
|
||||
struct us_internal_ssl_socket_t *
|
||||
ssl_on_end(struct us_internal_ssl_socket_t *s) {
|
||||
us_internal_set_loop_ssl_data(s);
|
||||
// whatever state we are in, a TCP FIN is always an answered shutdown
|
||||
|
||||
/* Todo: this should report CLEANLY SHUTDOWN as reason */
|
||||
return us_internal_ssl_socket_close(s, 0, NULL);
|
||||
}
|
||||
|
||||
@@ -408,43 +463,20 @@ struct us_internal_ssl_socket_t *ssl_on_data(struct us_internal_ssl_socket_t *s,
|
||||
struct us_internal_ssl_socket_context_t *context =
|
||||
(struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s);
|
||||
|
||||
struct us_loop_t *loop = us_socket_context_loop(0, &context->sc);
|
||||
struct loop_ssl_data *loop_ssl_data =
|
||||
(struct loop_ssl_data *)loop->data.ssl_data;
|
||||
struct loop_ssl_data *loop_ssl_data = us_internal_set_loop_ssl_data(s);
|
||||
|
||||
// note: if we put data here we should never really clear it (not in write
|
||||
// either, it still should be available for SSL_write to read from!)
|
||||
loop_ssl_data->ssl_read_input = data;
|
||||
loop_ssl_data->ssl_read_input_length = length;
|
||||
loop_ssl_data->ssl_read_input_offset = 0;
|
||||
loop_ssl_data->ssl_socket = &s->s;
|
||||
loop_ssl_data->msg_more = 0;
|
||||
|
||||
if (us_socket_is_closed(0, &s->s) || s->received_ssl_shutdown) {
|
||||
if (us_internal_ssl_socket_is_closed(s)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (us_internal_ssl_socket_is_shut_down(s)) {
|
||||
|
||||
int ret = 0;
|
||||
if ((ret = SSL_shutdown(s->ssl)) == 1) {
|
||||
// two phase shutdown is complete here
|
||||
|
||||
/* Todo: this should also report some kind of clean shutdown */
|
||||
return us_internal_ssl_socket_close(s, 0, NULL);
|
||||
} else if (ret < 0) {
|
||||
|
||||
int err = SSL_get_error(s->ssl, ret);
|
||||
|
||||
if (err == SSL_ERROR_SSL || err == SSL_ERROR_SYSCALL) {
|
||||
// we need to clear the error queue in case these added to the thread
|
||||
// local queue
|
||||
ERR_clear_error();
|
||||
}
|
||||
}
|
||||
|
||||
// no further processing of data when in shutdown state
|
||||
return s;
|
||||
us_internal_ssl_socket_close(s, 0, NULL);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// bug checking: this loop needs a lot of attention and clean-ups and
|
||||
@@ -452,17 +484,12 @@ struct us_internal_ssl_socket_t *ssl_on_data(struct us_internal_ssl_socket_t *s,
|
||||
int read = 0;
|
||||
restart:
|
||||
// read until shutdown
|
||||
while (!s->received_ssl_shutdown) {
|
||||
while (1) {
|
||||
int just_read = SSL_read(s->ssl,
|
||||
loop_ssl_data->ssl_read_output +
|
||||
LIBUS_RECV_BUFFER_PADDING + read,
|
||||
LIBUS_RECV_BUFFER_LENGTH - read);
|
||||
// we need to check if we received a shutdown here
|
||||
if (SSL_get_shutdown(s->ssl) & SSL_RECEIVED_SHUTDOWN) {
|
||||
s->received_ssl_shutdown = 1;
|
||||
// we will only close after we handle the data and errors
|
||||
}
|
||||
|
||||
|
||||
if (just_read <= 0) {
|
||||
int err = SSL_get_error(s->ssl, just_read);
|
||||
// as far as I know these are the only errors we want to handle
|
||||
@@ -477,8 +504,9 @@ restart:
|
||||
// clean and close renegotiation failed
|
||||
err = SSL_ERROR_SSL;
|
||||
} else if (err == SSL_ERROR_ZERO_RETURN) {
|
||||
// zero return can be EOF/FIN, if we have data just signal on_data and
|
||||
// close
|
||||
// Remotely-Initiated Shutdown
|
||||
// See: https://www.openssl.org/docs/manmaster/man3/SSL_shutdown.html
|
||||
|
||||
if (read) {
|
||||
context =
|
||||
(struct us_internal_ssl_socket_context_t *)us_socket_context(
|
||||
@@ -487,21 +515,24 @@ restart:
|
||||
s = context->on_data(
|
||||
s, loop_ssl_data->ssl_read_output + LIBUS_RECV_BUFFER_PADDING,
|
||||
read);
|
||||
if (!s || us_socket_is_closed(0, &s->s)) {
|
||||
return s;
|
||||
if (!s || us_internal_ssl_socket_is_closed(s)) {
|
||||
return NULL; // stop processing data
|
||||
}
|
||||
}
|
||||
// terminate connection here
|
||||
return us_internal_ssl_socket_close(s, 0, NULL);
|
||||
us_internal_ssl_socket_close(s, 0, NULL);
|
||||
return NULL; // stop processing data
|
||||
}
|
||||
|
||||
if (err == SSL_ERROR_SSL || err == SSL_ERROR_SYSCALL) {
|
||||
// clear per thread error queue if it may contain something
|
||||
ERR_clear_error();
|
||||
s->fatal_error = 1;
|
||||
}
|
||||
|
||||
// terminate connection here
|
||||
return us_internal_ssl_socket_close(s, 0, NULL);
|
||||
us_internal_ssl_socket_close(s, 0, NULL);
|
||||
return NULL; // stop processing data
|
||||
} else {
|
||||
// emit the data we have and exit
|
||||
|
||||
@@ -526,8 +557,8 @@ restart:
|
||||
s = context->on_data(
|
||||
s, loop_ssl_data->ssl_read_output + LIBUS_RECV_BUFFER_PADDING,
|
||||
read);
|
||||
if (!s || us_socket_is_closed(0, &s->s)) {
|
||||
return s;
|
||||
if (!s || us_internal_ssl_socket_is_closed(s)) {
|
||||
return NULL; // stop processing data
|
||||
}
|
||||
|
||||
break;
|
||||
@@ -549,22 +580,19 @@ restart:
|
||||
// emit data and restart
|
||||
s = context->on_data(
|
||||
s, loop_ssl_data->ssl_read_output + LIBUS_RECV_BUFFER_PADDING, read);
|
||||
if (!s || us_socket_is_closed(0, &s->s)) {
|
||||
return s;
|
||||
if (!s || us_internal_ssl_socket_is_closed(s)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
read = 0;
|
||||
goto restart;
|
||||
}
|
||||
}
|
||||
|
||||
// we received the shutdown after reading so we close
|
||||
if (s->received_ssl_shutdown) {
|
||||
us_internal_ssl_socket_close(s, 0, NULL);
|
||||
return NULL;
|
||||
}
|
||||
// trigger writable if we failed last write with want read
|
||||
if (s->ssl_write_wants_read) {
|
||||
// Trigger writable if we failed last SSL_write with SSL_ERROR_WANT_READ
|
||||
// If we failed SSL_read because we need to write more data (SSL_ERROR_WANT_WRITE) we are not going to trigger on_writable, we will wait until the next on_data or on_writable event
|
||||
// SSL_read will try to flush the write buffer and if fails with SSL_ERROR_WANT_WRITE means the socket is not in a writable state anymore and only makes sense to trigger on_writable if we can write more data
|
||||
// Otherwise we possible would trigger on_writable -> on_data event in a recursive loop
|
||||
if (s->ssl_write_wants_read && !s->ssl_read_wants_write) {
|
||||
s->ssl_write_wants_read = 0;
|
||||
|
||||
// make sure to update context before we call (context can change if the
|
||||
@@ -575,8 +603,8 @@ restart:
|
||||
s = (struct us_internal_ssl_socket_t *)context->sc.on_writable(
|
||||
&s->s); // cast here!
|
||||
// if we are closed here, then exit
|
||||
if (!s || us_socket_is_closed(0, &s->s)) {
|
||||
return s;
|
||||
if (!s || us_internal_ssl_socket_is_closed(s)) {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -585,6 +613,7 @@ restart:
|
||||
|
||||
struct us_internal_ssl_socket_t *
|
||||
ssl_on_writable(struct us_internal_ssl_socket_t *s) {
|
||||
us_internal_set_loop_ssl_data(s);
|
||||
us_internal_update_handshake(s);
|
||||
|
||||
struct us_internal_ssl_socket_context_t *context =
|
||||
@@ -606,8 +635,8 @@ ssl_on_writable(struct us_internal_ssl_socket_t *s) {
|
||||
}
|
||||
// Do not call on_writable if the socket is closed.
|
||||
// on close means the socket data is no longer accessible
|
||||
if (!s || us_socket_is_closed(0, &s->s)) {
|
||||
return 0;
|
||||
if (!s || us_internal_ssl_socket_is_closed(s) || us_internal_ssl_socket_is_shut_down(s)) {
|
||||
return s;
|
||||
}
|
||||
|
||||
if (s->handshake_state == HANDSHAKE_COMPLETED) {
|
||||
@@ -1029,15 +1058,8 @@ long us_internal_verify_peer_certificate( // NOLINT(runtime/int)
|
||||
}
|
||||
return err;
|
||||
}
|
||||
struct us_bun_verify_error_t us_ssl_socket_verify_error_from_ssl(SSL *ssl) {
|
||||
|
||||
struct us_bun_verify_error_t
|
||||
us_internal_verify_error(struct us_internal_ssl_socket_t *s) {
|
||||
if (us_socket_is_closed(0, &s->s) || us_internal_ssl_socket_is_shut_down(s)) {
|
||||
return (struct us_bun_verify_error_t){
|
||||
.error = 0, .code = NULL, .reason = NULL};
|
||||
}
|
||||
|
||||
SSL *ssl = s->ssl;
|
||||
long x509_verify_error = // NOLINT(runtime/int)
|
||||
us_internal_verify_peer_certificate(ssl,
|
||||
X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT);
|
||||
@@ -1053,6 +1075,17 @@ us_internal_verify_error(struct us_internal_ssl_socket_t *s) {
|
||||
.error = x509_verify_error, .code = code, .reason = reason};
|
||||
}
|
||||
|
||||
struct us_bun_verify_error_t
|
||||
us_internal_verify_error(struct us_internal_ssl_socket_t *s) {
|
||||
if (!s->ssl || us_socket_is_closed(0, &s->s) || us_internal_ssl_socket_is_shut_down(s)) {
|
||||
return (struct us_bun_verify_error_t){
|
||||
.error = 0, .code = NULL, .reason = NULL};
|
||||
}
|
||||
|
||||
return us_ssl_socket_verify_error_from_ssl(s->ssl);
|
||||
}
|
||||
|
||||
|
||||
int us_verify_callback(int preverify_ok, X509_STORE_CTX *ctx) {
|
||||
// From https://www.openssl.org/docs/man1.1.1/man3/SSL_verify_cb:
|
||||
//
|
||||
@@ -1317,10 +1350,6 @@ void us_bun_internal_ssl_socket_context_add_server_name(
|
||||
|
||||
/* We do not want to hold any nullptr's in our SNI tree */
|
||||
if (ssl_context) {
|
||||
#if ALLOW_SERVER_RENEGOTIATION
|
||||
context->client_renegotiation_limit = options.client_renegotiation_limit;
|
||||
context->client_renegotiation_window = options.client_renegotiation_window;
|
||||
#endif
|
||||
if (sni_add(context->sni, hostname_pattern, ssl_context)) {
|
||||
/* If we already had that name, ignore */
|
||||
free_ssl_context(ssl_context);
|
||||
@@ -1469,10 +1498,6 @@ us_internal_bun_create_ssl_socket_context(
|
||||
|
||||
context->on_handshake = NULL;
|
||||
context->handshake_data = NULL;
|
||||
#if ALLOW_SERVER_RENEGOTIATION
|
||||
context->client_renegotiation_limit = options.client_renegotiation_limit;
|
||||
context->client_renegotiation_window = options.client_renegotiation_window;
|
||||
#endif
|
||||
/* We, as parent context, may ignore data */
|
||||
context->sc.is_low_prio = (int (*)(struct us_socket_t *))ssl_is_low_prio;
|
||||
|
||||
@@ -1503,7 +1528,7 @@ void us_internal_ssl_socket_context_free(
|
||||
sni_free(context->sni, sni_hostname_destructor);
|
||||
}
|
||||
|
||||
us_socket_context_free(0, &context->sc);
|
||||
us_internal_socket_context_free(0, &context->sc);
|
||||
}
|
||||
|
||||
struct us_listen_socket_t *us_internal_ssl_socket_context_listen(
|
||||
@@ -1592,7 +1617,8 @@ void us_internal_ssl_socket_context_on_timeout(
|
||||
struct us_internal_ssl_socket_t *s)) {
|
||||
us_socket_context_on_timeout(0, (struct us_socket_context_t *)context,
|
||||
(struct us_socket_t * (*)(struct us_socket_t *))
|
||||
on_timeout);
|
||||
ssl_on_timeout);
|
||||
context->on_timeout = on_timeout;
|
||||
}
|
||||
|
||||
void us_internal_ssl_socket_context_on_long_timeout(
|
||||
@@ -1601,7 +1627,8 @@ void us_internal_ssl_socket_context_on_long_timeout(
|
||||
struct us_internal_ssl_socket_t *s)) {
|
||||
us_socket_context_on_long_timeout(
|
||||
0, (struct us_socket_context_t *)context,
|
||||
(struct us_socket_t * (*)(struct us_socket_t *)) on_long_timeout);
|
||||
(struct us_socket_t * (*)(struct us_socket_t *)) ssl_on_long_timeout);
|
||||
context->on_long_timeout = on_long_timeout;
|
||||
}
|
||||
|
||||
/* We do not really listen to passed FIN-handler, we entirely override it with
|
||||
@@ -1656,8 +1683,8 @@ int us_internal_ssl_socket_raw_write(struct us_internal_ssl_socket_t *s,
|
||||
|
||||
int us_internal_ssl_socket_write(struct us_internal_ssl_socket_t *s,
|
||||
const char *data, int length, int msg_more) {
|
||||
|
||||
if (us_socket_is_closed(0, &s->s) || us_internal_ssl_socket_is_shut_down(s)) {
|
||||
|
||||
if (us_socket_is_closed(0, &s->s) || us_internal_ssl_socket_is_shut_down(s) || length == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1697,6 +1724,7 @@ int us_internal_ssl_socket_write(struct us_internal_ssl_socket_t *s,
|
||||
// these two errors may add to the error queue, which is per thread and
|
||||
// must be cleared
|
||||
ERR_clear_error();
|
||||
s->fatal_error = 1;
|
||||
|
||||
// all errors here except for want write are critical and should not
|
||||
// happen
|
||||
@@ -1714,12 +1742,12 @@ void *us_internal_connecting_ssl_socket_ext(struct us_connecting_socket_t *s) {
|
||||
}
|
||||
|
||||
int us_internal_ssl_socket_is_shut_down(struct us_internal_ssl_socket_t *s) {
|
||||
return us_socket_is_shut_down(0, &s->s) ||
|
||||
SSL_get_shutdown(s->ssl) & SSL_SENT_SHUTDOWN;
|
||||
return !s->ssl || us_socket_is_shut_down(0, &s->s) ||
|
||||
SSL_get_shutdown(s->ssl) & SSL_SENT_SHUTDOWN || s->fatal_error;
|
||||
}
|
||||
|
||||
void us_internal_ssl_socket_shutdown(struct us_internal_ssl_socket_t *s) {
|
||||
if (!us_socket_is_closed(0, &s->s) &&
|
||||
if (!us_internal_ssl_socket_is_closed(s) &&
|
||||
!us_internal_ssl_socket_is_shut_down(s)) {
|
||||
struct us_internal_ssl_socket_context_t *context =
|
||||
(struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s);
|
||||
@@ -1740,11 +1768,8 @@ void us_internal_ssl_socket_shutdown(struct us_internal_ssl_socket_t *s) {
|
||||
loop_ssl_data->ssl_socket = &s->s;
|
||||
|
||||
loop_ssl_data->msg_more = 0;
|
||||
// sets SSL_SENT_SHUTDOWN no matter what (not actually true if error!)
|
||||
// sets SSL_SENT_SHUTDOWN and waits for the other side to do the same
|
||||
int ret = SSL_shutdown(s->ssl);
|
||||
if (ret == 0) {
|
||||
ret = SSL_shutdown(s->ssl);
|
||||
}
|
||||
|
||||
if (SSL_in_init(s->ssl) || SSL_get_quiet_shutdown(s->ssl)) {
|
||||
// when SSL_in_init or quiet shutdown in BoringSSL, we call shutdown
|
||||
@@ -1758,6 +1783,7 @@ void us_internal_ssl_socket_shutdown(struct us_internal_ssl_socket_t *s) {
|
||||
if (err == SSL_ERROR_SSL || err == SSL_ERROR_SYSCALL) {
|
||||
// clear
|
||||
ERR_clear_error();
|
||||
s->fatal_error = 1;
|
||||
}
|
||||
|
||||
// we get here if we are shutting down while still in init
|
||||
@@ -1798,6 +1824,7 @@ ssl_wrapped_context_on_close(struct us_internal_ssl_socket_t *s, int code,
|
||||
wrapped_context->old_events.on_close((struct us_socket_t *)s, code, reason);
|
||||
}
|
||||
|
||||
us_socket_context_unref(0, wrapped_context->tcp_context);
|
||||
return s;
|
||||
}
|
||||
|
||||
@@ -1954,6 +1981,7 @@ struct us_internal_ssl_socket_t *us_internal_ssl_socket_wrap_with_tls(
|
||||
}
|
||||
|
||||
struct us_socket_context_t *old_context = us_socket_context(0, s);
|
||||
us_socket_context_ref(0,old_context);
|
||||
|
||||
struct us_socket_context_t *context = us_create_bun_socket_context(
|
||||
1, old_context->loop, sizeof(struct us_wrapped_socket_context_t),
|
||||
@@ -1976,6 +2004,7 @@ struct us_internal_ssl_socket_t *us_internal_ssl_socket_wrap_with_tls(
|
||||
};
|
||||
wrapped_context->old_events = old_events;
|
||||
wrapped_context->events = events;
|
||||
wrapped_context->tcp_context = old_context;
|
||||
|
||||
// no need to wrap open because socket is already open (only new context will
|
||||
// be called so we can configure hostname and ssl stuff normally here before
|
||||
@@ -2048,8 +2077,8 @@ us_socket_context_on_socket_connect_error(
|
||||
socket->ssl = NULL;
|
||||
socket->ssl_write_wants_read = 0;
|
||||
socket->ssl_read_wants_write = 0;
|
||||
socket->fatal_error = 0;
|
||||
socket->handshake_state = HANDSHAKE_PENDING;
|
||||
socket->received_ssl_shutdown = 0;
|
||||
return socket;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,13 +1,16 @@
|
||||
// MSVC doesn't support C11 stdatomic.h propertly yet.
|
||||
// so we use C++ std::atomic instead.
|
||||
#include "./internal/internal.h"
|
||||
#include "./root_certs.h"
|
||||
#include <openssl/x509.h>
|
||||
#include <openssl/pem.h>
|
||||
#include "./internal/internal.h"
|
||||
#include <atomic>
|
||||
|
||||
#include <openssl/pem.h>
|
||||
#include <openssl/x509.h>
|
||||
#include <string.h>
|
||||
static const int root_certs_size = sizeof(root_certs) / sizeof(root_certs[0]);
|
||||
static X509* root_cert_instances[sizeof(root_certs) / sizeof(root_certs[0])] = {NULL};
|
||||
static X509 *root_cert_instances[sizeof(root_certs) / sizeof(root_certs[0])] = {
|
||||
NULL};
|
||||
static X509 *root_extra_cert_instances = {NULL};
|
||||
|
||||
static std::atomic_flag root_cert_instances_lock = ATOMIC_FLAG_INIT;
|
||||
static std::atomic_bool root_cert_instances_initialized = 0;
|
||||
|
||||
@@ -16,15 +19,16 @@ static std::atomic_bool root_cert_instances_initialized = 0;
|
||||
// for the OpenSSL CLI, but works poorly for this case because it involves
|
||||
// synchronous interaction with the controlling terminal, something we never
|
||||
// want, and use this function to avoid it.
|
||||
int us_no_password_callback(char* buf, int size, int rwflag, void* u) {
|
||||
int us_no_password_callback(char *buf, int size, int rwflag, void *u) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
static X509 * us_ssl_ctx_get_X509_without_callback_from(struct us_cert_string_t content) {
|
||||
static X509 *
|
||||
us_ssl_ctx_get_X509_without_callback_from(struct us_cert_string_t content) {
|
||||
X509 *x = NULL;
|
||||
BIO *in;
|
||||
|
||||
ERR_clear_error(); // clear error stack for SSL_CTX_use_certificate()
|
||||
ERR_clear_error(); // clear error stack for SSL_CTX_use_certificate()
|
||||
|
||||
in = BIO_new_mem_buf(content.str, content.len);
|
||||
if (in == NULL) {
|
||||
@@ -37,9 +41,37 @@ static X509 * us_ssl_ctx_get_X509_without_callback_from(struct us_cert_string_t
|
||||
OPENSSL_PUT_ERROR(SSL, ERR_R_PEM_LIB);
|
||||
goto end;
|
||||
}
|
||||
|
||||
return x;
|
||||
end:
|
||||
X509_free(x);
|
||||
BIO_free(in);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static X509 *
|
||||
us_ssl_ctx_get_X509_without_callback_from_file(const char *filename) {
|
||||
X509 *x = NULL;
|
||||
BIO *in;
|
||||
|
||||
ERR_clear_error(); // clear error stack for SSL_CTX_use_certificate()
|
||||
|
||||
in = BIO_new(BIO_s_file());
|
||||
if (in == NULL) {
|
||||
OPENSSL_PUT_ERROR(SSL, ERR_R_BUF_LIB);
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (BIO_read_filename(in, filename) <= 0) {
|
||||
OPENSSL_PUT_ERROR(SSL, ERR_R_SYS_LIB);
|
||||
goto end;
|
||||
}
|
||||
|
||||
x = PEM_read_bio_X509(in, NULL, us_no_password_callback, NULL);
|
||||
if (x == NULL) {
|
||||
OPENSSL_PUT_ERROR(SSL, ERR_R_PEM_LIB);
|
||||
goto end;
|
||||
}
|
||||
return x;
|
||||
end:
|
||||
X509_free(x);
|
||||
BIO_free(in);
|
||||
@@ -47,44 +79,65 @@ end:
|
||||
}
|
||||
|
||||
static void us_internal_init_root_certs() {
|
||||
if(std::atomic_load(&root_cert_instances_initialized) == 1) return;
|
||||
if (std::atomic_load(&root_cert_instances_initialized) == 1)
|
||||
return;
|
||||
|
||||
while(atomic_flag_test_and_set_explicit(&root_cert_instances_lock, std::memory_order_acquire));
|
||||
while (atomic_flag_test_and_set_explicit(&root_cert_instances_lock,
|
||||
std::memory_order_acquire))
|
||||
;
|
||||
|
||||
if(!atomic_exchange(&root_cert_instances_initialized, 1)) {
|
||||
for (size_t i = 0; i < root_certs_size; i++) {
|
||||
root_cert_instances[i] = us_ssl_ctx_get_X509_without_callback_from(root_certs[i]);
|
||||
}
|
||||
}
|
||||
|
||||
atomic_flag_clear_explicit(&root_cert_instances_lock, std::memory_order_release);
|
||||
}
|
||||
|
||||
extern "C" int us_internal_raw_root_certs(struct us_cert_string_t** out) {
|
||||
*out = root_certs;
|
||||
return root_certs_size;
|
||||
}
|
||||
|
||||
extern "C" X509_STORE* us_get_default_ca_store() {
|
||||
X509_STORE *store = X509_STORE_new();
|
||||
if (store == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!X509_STORE_set_default_paths(store)) {
|
||||
X509_STORE_free(store);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
us_internal_init_root_certs();
|
||||
|
||||
// load all root_cert_instances on the default ca store
|
||||
if (!atomic_exchange(&root_cert_instances_initialized, 1)) {
|
||||
for (size_t i = 0; i < root_certs_size; i++) {
|
||||
X509* cert = root_cert_instances[i];
|
||||
if(cert == NULL) continue;
|
||||
X509_up_ref(cert);
|
||||
X509_STORE_add_cert(store, cert);
|
||||
root_cert_instances[i] =
|
||||
us_ssl_ctx_get_X509_without_callback_from(root_certs[i]);
|
||||
}
|
||||
|
||||
return store;
|
||||
|
||||
// get extra cert option from environment variable
|
||||
const char *extra_cert = getenv("NODE_EXTRA_CA_CERTS");
|
||||
if (extra_cert) {
|
||||
size_t length = strlen(extra_cert);
|
||||
if (length > 0) {
|
||||
root_extra_cert_instances =
|
||||
us_ssl_ctx_get_X509_without_callback_from_file(extra_cert);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
atomic_flag_clear_explicit(&root_cert_instances_lock,
|
||||
std::memory_order_release);
|
||||
}
|
||||
|
||||
extern "C" int us_internal_raw_root_certs(struct us_cert_string_t **out) {
|
||||
*out = root_certs;
|
||||
return root_certs_size;
|
||||
}
|
||||
|
||||
extern "C" X509_STORE *us_get_default_ca_store() {
|
||||
X509_STORE *store = X509_STORE_new();
|
||||
if (store == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!X509_STORE_set_default_paths(store)) {
|
||||
X509_STORE_free(store);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
us_internal_init_root_certs();
|
||||
|
||||
// load all root_cert_instances on the default ca store
|
||||
for (size_t i = 0; i < root_certs_size; i++) {
|
||||
X509 *cert = root_cert_instances[i];
|
||||
if (cert == NULL)
|
||||
continue;
|
||||
X509_up_ref(cert);
|
||||
X509_STORE_add_cert(store, cert);
|
||||
}
|
||||
|
||||
if (root_extra_cert_instances) {
|
||||
X509_up_ref(root_extra_cert_instances);
|
||||
X509_STORE_add_cert(store, root_extra_cert_instances);
|
||||
}
|
||||
|
||||
return store;
|
||||
}
|
||||
@@ -3598,4 +3598,20 @@ static struct us_cert_string_t root_certs[] = {
|
||||
"4Sw5/7W0cwDk90imc6y/st53BIe0o82bNSQ3+pCTE4FCxpgmdTdmQRCsu/WU48IxK63nI1bM\n"
|
||||
"NSWSs1A=\n"
|
||||
"-----END CERTIFICATE-----",.len=2033},
|
||||
|
||||
/* FIRMAPROFESIONAL CA ROOT-A WEB */
|
||||
{.str="-----BEGIN CERTIFICATE-----\n"
|
||||
"MIICejCCAgCgAwIBAgIQMZch7a+JQn81QYehZ1ZMbTAKBggqhkjOPQQDAzBuMQswCQYDVQQG\n"
|
||||
"EwJFUzEcMBoGA1UECgwTRmlybWFwcm9mZXNpb25hbCBTQTEYMBYGA1UEYQwPVkFURVMtQTYy\n"
|
||||
"NjM0MDY4MScwJQYDVQQDDB5GSVJNQVBST0ZFU0lPTkFMIENBIFJPT1QtQSBXRUIwHhcNMjIw\n"
|
||||
"NDA2MDkwMTM2WhcNNDcwMzMxMDkwMTM2WjBuMQswCQYDVQQGEwJFUzEcMBoGA1UECgwTRmly\n"
|
||||
"bWFwcm9mZXNpb25hbCBTQTEYMBYGA1UEYQwPVkFURVMtQTYyNjM0MDY4MScwJQYDVQQDDB5G\n"
|
||||
"SVJNQVBST0ZFU0lPTkFMIENBIFJPT1QtQSBXRUIwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAARH\n"
|
||||
"U+osEaR3xyrq89Zfe9MEkVz6iMYiuYMQYneEMy3pA4jU4DP37XcsSmDq5G+tbbT4TIqk5B/K\n"
|
||||
"6k84Si6CcyvHZpsKjECcfIr28jlgst7L7Ljkb+qbXbdTkBgyVcUgt5SjYzBhMA8GA1UdEwEB\n"
|
||||
"/wQFMAMBAf8wHwYDVR0jBBgwFoAUk+FDY1w8ndYn81LsF7Kpryz3dvgwHQYDVR0OBBYEFJPh\n"
|
||||
"Q2NcPJ3WJ/NS7Beyqa8s93b4MA4GA1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNoADBlAjAd\n"
|
||||
"fKR7w4l1M+E7qUW/Runpod3JIha3RxEL2Jq68cgLcFBTApFwhVmpHqTm6iMxoAACMQD94viz\n"
|
||||
"rxa5HnPEluPBMBnYfubDl94cT7iJLzPrSA8Z94dGXSaQpYXFuXqUPoeovQA=\n"
|
||||
"-----END CERTIFICATE-----",.len=917},
|
||||
};
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
// clang-format off
|
||||
#pragma once
|
||||
#ifndef INTERNAL_H
|
||||
#define INTERNAL_H
|
||||
@@ -100,6 +101,9 @@ struct addrinfo_result {
|
||||
int error;
|
||||
};
|
||||
|
||||
#define us_internal_ssl_socket_context_r struct us_internal_ssl_socket_context_t *nonnull_arg
|
||||
#define us_internal_ssl_socket_r struct us_internal_ssl_socket_t *nonnull_arg
|
||||
|
||||
extern int Bun__addrinfo_get(struct us_loop_t* loop, const char* host, struct addrinfo_request** ptr);
|
||||
extern int Bun__addrinfo_set(struct addrinfo_request* ptr, struct us_connecting_socket_t* socket);
|
||||
extern void Bun__addrinfo_freeRequest(struct addrinfo_request* addrinfo_req, int error);
|
||||
@@ -109,19 +113,19 @@ extern struct addrinfo_result *Bun__addrinfo_getRequestResult(struct addrinfo_re
|
||||
/* Loop related */
|
||||
void us_internal_dispatch_ready_poll(struct us_poll_t *p, int error,
|
||||
int events);
|
||||
void us_internal_timer_sweep(struct us_loop_t *loop);
|
||||
void us_internal_free_closed_sockets(struct us_loop_t *loop);
|
||||
void us_internal_timer_sweep(us_loop_r loop);
|
||||
void us_internal_free_closed_sockets(us_loop_r loop);
|
||||
void us_internal_loop_link(struct us_loop_t *loop,
|
||||
struct us_socket_context_t *context);
|
||||
void us_internal_loop_unlink(struct us_loop_t *loop,
|
||||
struct us_socket_context_t *context);
|
||||
void us_internal_loop_data_init(struct us_loop_t *loop,
|
||||
void (*wakeup_cb)(struct us_loop_t *loop),
|
||||
void (*pre_cb)(struct us_loop_t *loop),
|
||||
void (*post_cb)(struct us_loop_t *loop));
|
||||
void us_internal_loop_data_free(struct us_loop_t *loop);
|
||||
void us_internal_loop_pre(struct us_loop_t *loop);
|
||||
void us_internal_loop_post(struct us_loop_t *loop);
|
||||
void (*wakeup_cb)(us_loop_r loop),
|
||||
void (*pre_cb)(us_loop_r loop),
|
||||
void (*post_cb)(us_loop_r loop));
|
||||
void us_internal_loop_data_free(us_loop_r loop);
|
||||
void us_internal_loop_pre(us_loop_r loop);
|
||||
void us_internal_loop_post(us_loop_r loop);
|
||||
|
||||
/* Asyncs (old) */
|
||||
struct us_internal_async *us_internal_create_async(struct us_loop_t *loop,
|
||||
@@ -138,18 +142,22 @@ int us_internal_poll_type(struct us_poll_t *p);
|
||||
void us_internal_poll_set_type(struct us_poll_t *p, int poll_type);
|
||||
|
||||
/* SSL loop data */
|
||||
void us_internal_init_loop_ssl_data(struct us_loop_t *loop);
|
||||
void us_internal_free_loop_ssl_data(struct us_loop_t *loop);
|
||||
void us_internal_init_loop_ssl_data(us_loop_r loop);
|
||||
void us_internal_free_loop_ssl_data(us_loop_r loop);
|
||||
|
||||
/* Socket context related */
|
||||
void us_internal_socket_context_link_socket(struct us_socket_context_t *context,
|
||||
struct us_socket_t *s);
|
||||
void us_internal_socket_context_unlink_socket(
|
||||
struct us_socket_context_t *context, struct us_socket_t *s);
|
||||
void us_internal_socket_context_link_socket(us_socket_context_r context,
|
||||
us_socket_r s);
|
||||
void us_internal_socket_context_unlink_socket(int ssl,
|
||||
us_socket_context_r context, us_socket_r s);
|
||||
|
||||
void us_internal_socket_after_resolve(struct us_connecting_socket_t *s);
|
||||
void us_internal_socket_after_open(struct us_socket_t *s, int error);
|
||||
int us_internal_handle_dns_results(struct us_loop_t *loop);
|
||||
void us_internal_socket_after_open(us_socket_r s, int error);
|
||||
struct us_internal_ssl_socket_t *
|
||||
us_internal_ssl_socket_close(us_internal_ssl_socket_r s, int code,
|
||||
void *reason);
|
||||
|
||||
int us_internal_handle_dns_results(us_loop_r loop);
|
||||
|
||||
/* Sockets are polls */
|
||||
struct us_socket_t {
|
||||
@@ -168,6 +176,7 @@ struct us_socket_t {
|
||||
struct us_connecting_socket_t {
|
||||
alignas(LIBUS_EXT_ALIGNMENT) struct addrinfo_request *addrinfo_req;
|
||||
struct us_socket_context_t *context;
|
||||
// this is used to track all dns resolutions in this connection
|
||||
struct us_connecting_socket_t *next;
|
||||
struct us_socket_t *connecting_head;
|
||||
int options;
|
||||
@@ -178,9 +187,13 @@ struct us_connecting_socket_t {
|
||||
uint16_t port;
|
||||
int error;
|
||||
struct addrinfo *addrinfo_head;
|
||||
// this is used to track pending connecting sockets in the context
|
||||
struct us_connecting_socket_t* next_pending;
|
||||
struct us_connecting_socket_t* prev_pending;
|
||||
};
|
||||
|
||||
struct us_wrapped_socket_context_t {
|
||||
struct us_socket_context_t* tcp_context;
|
||||
struct us_socket_events_t events;
|
||||
struct us_socket_events_t old_events;
|
||||
};
|
||||
@@ -243,17 +256,19 @@ struct us_listen_socket_t {
|
||||
|
||||
/* Listen sockets are keps in their own list */
|
||||
void us_internal_socket_context_link_listen_socket(
|
||||
struct us_socket_context_t *context, struct us_listen_socket_t *s);
|
||||
void us_internal_socket_context_unlink_listen_socket(
|
||||
struct us_socket_context_t *context, struct us_listen_socket_t *s);
|
||||
us_socket_context_r context, struct us_listen_socket_t *s);
|
||||
void us_internal_socket_context_unlink_listen_socket(int ssl,
|
||||
us_socket_context_r context, struct us_listen_socket_t *s);
|
||||
|
||||
struct us_socket_context_t {
|
||||
alignas(LIBUS_EXT_ALIGNMENT) struct us_loop_t *loop;
|
||||
uint32_t global_tick;
|
||||
uint32_t ref_count;
|
||||
unsigned char timestamp;
|
||||
unsigned char long_timestamp;
|
||||
struct us_socket_t *head_sockets;
|
||||
struct us_listen_socket_t *head_listen_sockets;
|
||||
struct us_connecting_socket_t *head_connecting_sockets;
|
||||
struct us_socket_t *iterator;
|
||||
struct us_socket_context_t *prev, *next;
|
||||
|
||||
@@ -280,34 +295,35 @@ struct us_internal_ssl_socket_t;
|
||||
typedef void (*us_internal_on_handshake_t)(
|
||||
struct us_internal_ssl_socket_t *, int success,
|
||||
struct us_bun_verify_error_t verify_error, void *custom_data);
|
||||
|
||||
|
||||
void us_internal_socket_context_free(int ssl, struct us_socket_context_t *context);
|
||||
/* SNI functions */
|
||||
void us_internal_ssl_socket_context_add_server_name(
|
||||
struct us_internal_ssl_socket_context_t *context,
|
||||
us_internal_ssl_socket_context_r context,
|
||||
const char *hostname_pattern, struct us_socket_context_options_t options,
|
||||
void *user);
|
||||
void us_bun_internal_ssl_socket_context_add_server_name(
|
||||
struct us_internal_ssl_socket_context_t *context,
|
||||
us_internal_ssl_socket_context_r context,
|
||||
const char *hostname_pattern,
|
||||
struct us_bun_socket_context_options_t options, void *user);
|
||||
void us_internal_ssl_socket_context_remove_server_name(
|
||||
struct us_internal_ssl_socket_context_t *context,
|
||||
us_internal_ssl_socket_context_r context,
|
||||
const char *hostname_pattern);
|
||||
void us_internal_ssl_socket_context_on_server_name(
|
||||
struct us_internal_ssl_socket_context_t *context,
|
||||
us_internal_ssl_socket_context_r context,
|
||||
void (*cb)(struct us_internal_ssl_socket_context_t *, const char *));
|
||||
void *
|
||||
us_internal_ssl_socket_get_sni_userdata(struct us_internal_ssl_socket_t *s);
|
||||
us_internal_ssl_socket_get_sni_userdata(us_internal_ssl_socket_r s);
|
||||
void *us_internal_ssl_socket_context_find_server_name_userdata(
|
||||
struct us_internal_ssl_socket_context_t *context,
|
||||
us_internal_ssl_socket_context_r context,
|
||||
const char *hostname_pattern);
|
||||
|
||||
void *
|
||||
us_internal_ssl_socket_get_native_handle(struct us_internal_ssl_socket_t *s);
|
||||
us_internal_ssl_socket_get_native_handle(us_internal_ssl_socket_r s);
|
||||
void *us_internal_ssl_socket_context_get_native_handle(
|
||||
struct us_internal_ssl_socket_context_t *context);
|
||||
us_internal_ssl_socket_context_r context);
|
||||
struct us_bun_verify_error_t
|
||||
us_internal_verify_error(struct us_internal_ssl_socket_t *s);
|
||||
us_internal_verify_error(us_internal_ssl_socket_r s);
|
||||
struct us_internal_ssl_socket_context_t *us_internal_create_ssl_socket_context(
|
||||
struct us_loop_t *loop, int context_ext_size,
|
||||
struct us_socket_context_options_t options);
|
||||
@@ -317,111 +333,115 @@ us_internal_bun_create_ssl_socket_context(
|
||||
struct us_bun_socket_context_options_t options);
|
||||
|
||||
void us_internal_ssl_socket_context_free(
|
||||
struct us_internal_ssl_socket_context_t *context);
|
||||
us_internal_ssl_socket_context_r context);
|
||||
void us_internal_ssl_socket_context_on_open(
|
||||
struct us_internal_ssl_socket_context_t *context,
|
||||
us_internal_ssl_socket_context_r context,
|
||||
struct us_internal_ssl_socket_t *(*on_open)(
|
||||
struct us_internal_ssl_socket_t *s, int is_client, char *ip,
|
||||
us_internal_ssl_socket_r s, int is_client, char *ip,
|
||||
int ip_length));
|
||||
|
||||
void us_internal_ssl_socket_context_on_close(
|
||||
struct us_internal_ssl_socket_context_t *context,
|
||||
us_internal_ssl_socket_context_r context,
|
||||
struct us_internal_ssl_socket_t *(*on_close)(
|
||||
struct us_internal_ssl_socket_t *s, int code, void *reason));
|
||||
us_internal_ssl_socket_r s, int code, void *reason));
|
||||
|
||||
void us_internal_ssl_socket_context_on_data(
|
||||
struct us_internal_ssl_socket_context_t *context,
|
||||
us_internal_ssl_socket_context_r context,
|
||||
struct us_internal_ssl_socket_t *(*on_data)(
|
||||
struct us_internal_ssl_socket_t *s, char *data, int length));
|
||||
us_internal_ssl_socket_r s, char *data, int length));
|
||||
|
||||
void us_internal_update_handshake(struct us_internal_ssl_socket_t *s);
|
||||
int us_internal_renegotiate(struct us_internal_ssl_socket_t *s);
|
||||
void us_internal_trigger_handshake_callback(struct us_internal_ssl_socket_t *s,
|
||||
void us_internal_update_handshake(us_internal_ssl_socket_r s);
|
||||
int us_internal_renegotiate(us_internal_ssl_socket_r s);
|
||||
void us_internal_trigger_handshake_callback(us_internal_ssl_socket_r s,
|
||||
int success);
|
||||
void us_internal_on_ssl_handshake(
|
||||
struct us_internal_ssl_socket_context_t *context,
|
||||
us_internal_ssl_socket_context_r context,
|
||||
us_internal_on_handshake_t onhandshake, void *custom_data);
|
||||
|
||||
void us_internal_ssl_socket_context_on_writable(
|
||||
struct us_internal_ssl_socket_context_t *context,
|
||||
us_internal_ssl_socket_context_r context,
|
||||
struct us_internal_ssl_socket_t *(*on_writable)(
|
||||
struct us_internal_ssl_socket_t *s));
|
||||
us_internal_ssl_socket_r s));
|
||||
|
||||
void us_internal_ssl_socket_context_on_timeout(
|
||||
struct us_internal_ssl_socket_context_t *context,
|
||||
us_internal_ssl_socket_context_r context,
|
||||
struct us_internal_ssl_socket_t *(*on_timeout)(
|
||||
struct us_internal_ssl_socket_t *s));
|
||||
us_internal_ssl_socket_r s));
|
||||
|
||||
void us_internal_ssl_socket_context_on_long_timeout(
|
||||
struct us_internal_ssl_socket_context_t *context,
|
||||
us_internal_ssl_socket_context_r context,
|
||||
struct us_internal_ssl_socket_t *(*on_timeout)(
|
||||
struct us_internal_ssl_socket_t *s));
|
||||
us_internal_ssl_socket_r s));
|
||||
|
||||
void us_internal_ssl_socket_context_on_end(
|
||||
struct us_internal_ssl_socket_context_t *context,
|
||||
us_internal_ssl_socket_context_r context,
|
||||
struct us_internal_ssl_socket_t *(*on_end)(
|
||||
struct us_internal_ssl_socket_t *s));
|
||||
us_internal_ssl_socket_r s));
|
||||
|
||||
void us_internal_ssl_socket_context_on_connect_error(
|
||||
struct us_internal_ssl_socket_context_t *context,
|
||||
us_internal_ssl_socket_context_r context,
|
||||
struct us_internal_ssl_socket_t *(*on_connect_error)(
|
||||
struct us_internal_ssl_socket_t *s, int code));
|
||||
us_internal_ssl_socket_r s, int code));
|
||||
|
||||
void us_internal_ssl_socket_context_on_socket_connect_error(
|
||||
struct us_internal_ssl_socket_context_t *context,
|
||||
us_internal_ssl_socket_context_r context,
|
||||
struct us_internal_ssl_socket_t *(*on_socket_connect_error)(
|
||||
struct us_internal_ssl_socket_t *s, int code));
|
||||
us_internal_ssl_socket_r s, int code));
|
||||
|
||||
struct us_listen_socket_t *us_internal_ssl_socket_context_listen(
|
||||
struct us_internal_ssl_socket_context_t *context, const char *host,
|
||||
us_internal_ssl_socket_context_r context, const char *host,
|
||||
int port, int options, int socket_ext_size);
|
||||
|
||||
struct us_listen_socket_t *us_internal_ssl_socket_context_listen_unix(
|
||||
struct us_internal_ssl_socket_context_t *context, const char *path,
|
||||
us_internal_ssl_socket_context_r context, const char *path,
|
||||
size_t pathlen, int options, int socket_ext_size);
|
||||
|
||||
struct us_connecting_socket_t *us_internal_ssl_socket_context_connect(
|
||||
struct us_internal_ssl_socket_context_t *context, const char *host,
|
||||
us_internal_ssl_socket_context_r context, const char *host,
|
||||
int port, int options, int socket_ext_size, int* is_resolved);
|
||||
|
||||
struct us_internal_ssl_socket_t *us_internal_ssl_socket_context_connect_unix(
|
||||
struct us_internal_ssl_socket_context_t *context, const char *server_path,
|
||||
us_internal_ssl_socket_context_r context, const char *server_path,
|
||||
size_t pathlen, int options, int socket_ext_size);
|
||||
|
||||
int us_internal_ssl_socket_write(struct us_internal_ssl_socket_t *s,
|
||||
int us_internal_ssl_socket_write(us_internal_ssl_socket_r s,
|
||||
const char *data, int length, int msg_more);
|
||||
int us_internal_ssl_socket_raw_write(struct us_internal_ssl_socket_t *s,
|
||||
int us_internal_ssl_socket_raw_write(us_internal_ssl_socket_r s,
|
||||
const char *data, int length,
|
||||
int msg_more);
|
||||
|
||||
void us_internal_ssl_socket_timeout(struct us_internal_ssl_socket_t *s,
|
||||
void us_internal_ssl_socket_timeout(us_internal_ssl_socket_r s,
|
||||
unsigned int seconds);
|
||||
void *
|
||||
us_internal_ssl_socket_context_ext(struct us_internal_ssl_socket_context_t *s);
|
||||
struct us_internal_ssl_socket_context_t *
|
||||
us_internal_ssl_socket_get_context(struct us_internal_ssl_socket_t *s);
|
||||
void *us_internal_ssl_socket_ext(struct us_internal_ssl_socket_t *s);
|
||||
us_internal_ssl_socket_get_context(us_internal_ssl_socket_r s);
|
||||
void *us_internal_ssl_socket_ext(us_internal_ssl_socket_r s);
|
||||
void *us_internal_connecting_ssl_socket_ext(struct us_connecting_socket_t *c);
|
||||
int us_internal_ssl_socket_is_shut_down(struct us_internal_ssl_socket_t *s);
|
||||
void us_internal_ssl_socket_shutdown(struct us_internal_ssl_socket_t *s);
|
||||
int us_internal_ssl_socket_is_shut_down(us_internal_ssl_socket_r s);
|
||||
int us_internal_ssl_socket_is_closed(us_internal_ssl_socket_r s);
|
||||
void us_internal_ssl_socket_shutdown(us_internal_ssl_socket_r s);
|
||||
|
||||
struct us_internal_ssl_socket_t *us_internal_ssl_socket_context_adopt_socket(
|
||||
struct us_internal_ssl_socket_context_t *context,
|
||||
struct us_internal_ssl_socket_t *s, int ext_size);
|
||||
us_internal_ssl_socket_context_r context,
|
||||
us_internal_ssl_socket_r s, int ext_size);
|
||||
|
||||
struct us_internal_ssl_socket_t *us_internal_ssl_socket_wrap_with_tls(
|
||||
struct us_socket_t *s, struct us_bun_socket_context_options_t options,
|
||||
us_socket_r s, struct us_bun_socket_context_options_t options,
|
||||
struct us_socket_events_t events, int socket_ext_size);
|
||||
struct us_internal_ssl_socket_context_t *
|
||||
us_internal_create_child_ssl_socket_context(
|
||||
struct us_internal_ssl_socket_context_t *context, int context_ext_size);
|
||||
us_internal_ssl_socket_context_r context, int context_ext_size);
|
||||
struct us_loop_t *us_internal_ssl_socket_context_loop(
|
||||
struct us_internal_ssl_socket_context_t *context);
|
||||
us_internal_ssl_socket_context_r context);
|
||||
struct us_internal_ssl_socket_t *
|
||||
us_internal_ssl_socket_open(struct us_internal_ssl_socket_t *s, int is_client,
|
||||
us_internal_ssl_socket_open(us_internal_ssl_socket_r s, int is_client,
|
||||
char *ip, int ip_length);
|
||||
|
||||
int us_raw_root_certs(struct us_cert_string_t **out);
|
||||
|
||||
void us_internal_socket_context_unlink_connecting_socket(int ssl, struct us_socket_context_t *context, struct us_connecting_socket_t *c);
|
||||
void us_internal_socket_context_link_connecting_socket(int ssl, struct us_socket_context_t *context, struct us_connecting_socket_t *c);
|
||||
#endif
|
||||
|
||||
#endif // INTERNAL_H
|
||||
|
||||
@@ -27,6 +27,7 @@ struct us_internal_loop_data_t {
|
||||
int last_write_failed;
|
||||
struct us_socket_context_t *head;
|
||||
struct us_socket_context_t *iterator;
|
||||
struct us_socket_context_t *closed_context_head;
|
||||
char *recv_buf;
|
||||
char *send_buf;
|
||||
void *ssl_data;
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
|
||||
#ifndef BSD_H
|
||||
#define BSD_H
|
||||
#pragma once
|
||||
|
||||
// top-most wrapper of bsd-like syscalls
|
||||
|
||||
@@ -25,7 +26,7 @@
|
||||
|
||||
#include "libusockets.h"
|
||||
|
||||
#ifdef _WIN32
|
||||
#ifdef _WIN32
|
||||
#ifndef NOMINMAX
|
||||
#define NOMINMAX
|
||||
#endif
|
||||
@@ -34,7 +35,7 @@
|
||||
#pragma comment(lib, "ws2_32.lib")
|
||||
#define SETSOCKOPT_PTR_TYPE const char *
|
||||
#define LIBUS_SOCKET_ERROR INVALID_SOCKET
|
||||
#else
|
||||
#else /* POSIX */
|
||||
#ifndef _GNU_SOURCE
|
||||
#define _GNU_SOURCE
|
||||
#endif
|
||||
@@ -64,14 +65,76 @@ struct bsd_addr_t {
|
||||
#endif
|
||||
|
||||
#ifdef __APPLE__
|
||||
// a.k.a msghdr_x
|
||||
struct mmsghdr {
|
||||
/*
|
||||
* Extended version for sendmsg_x() and recvmsg_x() calls
|
||||
*/
|
||||
struct mmsghdr {
|
||||
struct msghdr msg_hdr;
|
||||
size_t msg_len; /* byte length of buffer in msg_iov */
|
||||
};
|
||||
/*
|
||||
* recvmsg_x() is a system call similar to recvmsg(2) to receive
|
||||
* several datagrams at once in the array of message headers "msgp".
|
||||
*
|
||||
* recvmsg_x() can be used only with protocols handlers that have been specially
|
||||
* modified to support sending and receiving several datagrams at once.
|
||||
*
|
||||
* The size of the array "msgp" is given by the argument "cnt".
|
||||
*
|
||||
* The "flags" arguments supports only the value MSG_DONTWAIT.
|
||||
*
|
||||
* Each member of "msgp" array is of type "struct msghdr_x".
|
||||
*
|
||||
* The "msg_iov" and "msg_iovlen" are input parameters that describe where to
|
||||
* store a datagram in a scatter gather locations of buffers -- see recvmsg(2).
|
||||
* On output the field "msg_datalen" gives the length of the received datagram.
|
||||
*
|
||||
* The field "msg_flags" must be set to zero on input. On output, "msg_flags"
|
||||
* may have MSG_TRUNC set to indicate the trailing portion of the datagram was
|
||||
* discarded because the datagram was larger than the buffer supplied.
|
||||
* recvmsg_x() returns as soon as a datagram is truncated.
|
||||
*
|
||||
* recvmsg_x() may return with less than "cnt" datagrams received based on
|
||||
* the low water mark and the amount of data pending in the socket buffer.
|
||||
*
|
||||
* recvmsg_x() returns the number of datagrams that have been received,
|
||||
* or -1 if an error occurred.
|
||||
*
|
||||
* NOTE: This a private system call, the API is subject to change.
|
||||
*/
|
||||
ssize_t recvmsg_x(int s, const struct mmsghdr *msgp, u_int cnt, int flags);
|
||||
|
||||
ssize_t sendmsg_x(int s, struct mmsghdr *msgp, u_int cnt, int flags);
|
||||
ssize_t recvmsg_x(int s, struct mmsghdr *msgp, u_int cnt, int flags);
|
||||
/*
|
||||
* sendmsg_x() is a system call similar to send(2) to send
|
||||
* several datagrams at once in the array of message headers "msgp".
|
||||
*
|
||||
* sendmsg_x() can be used only with protocols handlers that have been specially
|
||||
* modified to support sending and receiving several datagrams at once.
|
||||
*
|
||||
* The size of the array "msgp" is given by the argument "cnt".
|
||||
*
|
||||
* The "flags" arguments supports only the value MSG_DONTWAIT.
|
||||
*
|
||||
* Each member of "msgp" array is of type "struct msghdr_x".
|
||||
*
|
||||
* The "msg_iov" and "msg_iovlen" are input parameters that specify the
|
||||
* data to be sent in a scatter gather locations of buffers -- see sendmsg(2).
|
||||
*
|
||||
* sendmsg_x() fails with EMSGSIZE if the sum of the length of the datagrams
|
||||
* is greater than the high water mark.
|
||||
*
|
||||
* Address and ancillary data are not supported so the following fields
|
||||
* must be set to zero on input:
|
||||
* "msg_name", "msg_namelen", "msg_control" and "msg_controllen".
|
||||
*
|
||||
* The field "msg_flags" and "msg_datalen" must be set to zero on input.
|
||||
*
|
||||
* sendmsg_x() returns the number of datagrams that have been sent,
|
||||
* or -1 if an error occurred.
|
||||
*
|
||||
* NOTE: This a private system call, the API is subject to change.
|
||||
*/
|
||||
ssize_t sendmsg_x(int s, const struct mmsghdr *msgp, u_int cnt, int flags);
|
||||
#endif
|
||||
|
||||
struct udp_recvbuf {
|
||||
@@ -95,8 +158,9 @@ struct udp_sendbuf {
|
||||
void **addresses;
|
||||
int num;
|
||||
#else
|
||||
int num;
|
||||
char has_empty;
|
||||
unsigned int has_empty : 1;
|
||||
unsigned int has_addresses : 1;
|
||||
unsigned int num;
|
||||
struct mmsghdr msgvec[];
|
||||
#endif
|
||||
};
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
// clang-format off
|
||||
|
||||
#pragma once
|
||||
#ifndef us_calloc
|
||||
#define us_calloc calloc
|
||||
#endif
|
||||
@@ -35,6 +35,25 @@
|
||||
#ifndef LIBUSOCKETS_H
|
||||
#define LIBUSOCKETS_H
|
||||
|
||||
#ifdef BUN_DEBUG
|
||||
#define nonnull_arg _Nonnull
|
||||
#else
|
||||
#define nonnull_arg
|
||||
#endif
|
||||
|
||||
#ifdef BUN_DEBUG
|
||||
#define nonnull_fn_decl
|
||||
#else
|
||||
#ifndef nonnull_fn_decl
|
||||
#define nonnull_fn_decl __attribute__((nonnull))
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define us_loop_r struct us_loop_t *nonnull_arg
|
||||
#define us_socket_r struct us_socket_t *nonnull_arg
|
||||
#define us_poll_r struct us_poll_t *nonnull_arg
|
||||
#define us_socket_context_r struct us_socket_context_t *nonnull_arg
|
||||
|
||||
|
||||
/* 512kb shared receive buffer */
|
||||
#define LIBUS_RECV_BUFFER_LENGTH 524288
|
||||
@@ -49,6 +68,7 @@
|
||||
#define LIBUS_EXT_ALIGNMENT 16
|
||||
#define ALLOW_SERVER_RENEGOTIATION 0
|
||||
|
||||
#define LIBUS_SOCKET_CLOSE_CODE_CLEAN_SHUTDOWN 0
|
||||
#define LIBUS_SOCKET_CLOSE_CODE_CONNECTION_RESET 1
|
||||
|
||||
/* Define what a socket descriptor is based on platform */
|
||||
@@ -123,11 +143,11 @@ struct us_udp_packet_buffer_t *us_create_udp_packet_buffer();
|
||||
/* Creates a (heavy-weight) UDP socket with a user space ring buffer. Again, this one is heavy weight and
|
||||
* shoud be reused. One entire QUIC server can be implemented using only one single UDP socket so weight
|
||||
* is not a concern as is the case for TCP sockets which are 1-to-1 with TCP connections. */
|
||||
//struct us_udp_socket_t *us_create_udp_socket(struct us_loop_t *loop, void (*read_cb)(struct us_udp_socket_t *), unsigned short port);
|
||||
//struct us_udp_socket_t *us_create_udp_socket(us_loop_r loop, void (*read_cb)(struct us_udp_socket_t *), unsigned short port);
|
||||
|
||||
//struct us_udp_socket_t *us_create_udp_socket(struct us_loop_t *loop, void (*data_cb)(struct us_udp_socket_t *, struct us_udp_packet_buffer_t *, int), void (*drain_cb)(struct us_udp_socket_t *), char *host, unsigned short port);
|
||||
//struct us_udp_socket_t *us_create_udp_socket(us_loop_r loop, void (*data_cb)(struct us_udp_socket_t *, struct us_udp_packet_buffer_t *, int), void (*drain_cb)(struct us_udp_socket_t *), char *host, unsigned short port);
|
||||
|
||||
struct us_udp_socket_t *us_create_udp_socket(struct us_loop_t *loop, void (*data_cb)(struct us_udp_socket_t *, void *, int), void (*drain_cb)(struct us_udp_socket_t *), void (*close_cb)(struct us_udp_socket_t *), const char *host, unsigned short port, void *user);
|
||||
struct us_udp_socket_t *us_create_udp_socket(us_loop_r loop, void (*data_cb)(struct us_udp_socket_t *, void *, int), void (*drain_cb)(struct us_udp_socket_t *), void (*close_cb)(struct us_udp_socket_t *), const char *host, unsigned short port, void *user);
|
||||
|
||||
void us_udp_socket_close(struct us_udp_socket_t *s);
|
||||
|
||||
@@ -140,7 +160,7 @@ int us_udp_socket_bind(struct us_udp_socket_t *s, const char *hostname, unsigned
|
||||
/* Public interfaces for timers */
|
||||
|
||||
/* Create a new high precision, low performance timer. May fail and return null */
|
||||
struct us_timer_t *us_create_timer(struct us_loop_t *loop, int fallthrough, unsigned int ext_size);
|
||||
struct us_timer_t *us_create_timer(us_loop_r loop, int fallthrough, unsigned int ext_size);
|
||||
|
||||
/* Returns user data extension for this timer */
|
||||
void *us_timer_ext(struct us_timer_t *timer);
|
||||
@@ -174,17 +194,17 @@ struct us_bun_verify_error_t {
|
||||
};
|
||||
|
||||
struct us_socket_events_t {
|
||||
struct us_socket_t *(*on_open)(struct us_socket_t *, int is_client, char *ip, int ip_length);
|
||||
struct us_socket_t *(*on_data)(struct us_socket_t *, char *data, int length);
|
||||
struct us_socket_t *(*on_writable)(struct us_socket_t *);
|
||||
struct us_socket_t *(*on_close)(struct us_socket_t *, int code, void *reason);
|
||||
struct us_socket_t *(*on_open)(us_socket_r, int is_client, char *ip, int ip_length);
|
||||
struct us_socket_t *(*on_data)(us_socket_r, char *data, int length);
|
||||
struct us_socket_t *(*on_writable)(us_socket_r);
|
||||
struct us_socket_t *(*on_close)(us_socket_r, int code, void *reason);
|
||||
//void (*on_timeout)(struct us_socket_context *);
|
||||
struct us_socket_t *(*on_timeout)(struct us_socket_t *);
|
||||
struct us_socket_t *(*on_long_timeout)(struct us_socket_t *);
|
||||
struct us_socket_t *(*on_end)(struct us_socket_t *);
|
||||
struct us_socket_t *(*on_timeout)(us_socket_r);
|
||||
struct us_socket_t *(*on_long_timeout)(us_socket_r);
|
||||
struct us_socket_t *(*on_end)(us_socket_r);
|
||||
struct us_connecting_socket_t *(*on_connect_error)(struct us_connecting_socket_t *, int code);
|
||||
struct us_socket_t *(*on_connecting_socket_error)(struct us_socket_t *, int code);
|
||||
void (*on_handshake)(struct us_socket_t*, int success, struct us_bun_verify_error_t verify_error, void* custom_data);
|
||||
struct us_socket_t *(*on_connecting_socket_error)(us_socket_r, int code);
|
||||
void (*on_handshake)(us_socket_r, int success, struct us_bun_verify_error_t verify_error, void* custom_data);
|
||||
};
|
||||
|
||||
|
||||
@@ -210,67 +230,70 @@ struct us_bun_socket_context_options_t {
|
||||
};
|
||||
|
||||
/* Return 15-bit timestamp for this context */
|
||||
unsigned short us_socket_context_timestamp(int ssl, struct us_socket_context_t *context);
|
||||
unsigned short us_socket_context_timestamp(int ssl, us_socket_context_r context) nonnull_fn_decl;
|
||||
|
||||
/* Adds SNI domain and cert in asn1 format */
|
||||
void us_socket_context_add_server_name(int ssl, struct us_socket_context_t *context, const char *hostname_pattern, struct us_socket_context_options_t options, void *user);
|
||||
void us_bun_socket_context_add_server_name(int ssl, struct us_socket_context_t *context, const char *hostname_pattern, struct us_bun_socket_context_options_t options, void *user);
|
||||
void us_socket_context_remove_server_name(int ssl, struct us_socket_context_t *context, const char *hostname_pattern);
|
||||
void us_socket_context_on_server_name(int ssl, struct us_socket_context_t *context, void (*cb)(struct us_socket_context_t *, const char *hostname));
|
||||
void *us_socket_server_name_userdata(int ssl, struct us_socket_t *s);
|
||||
void *us_socket_context_find_server_name_userdata(int ssl, struct us_socket_context_t *context, const char *hostname_pattern);
|
||||
void us_socket_context_add_server_name(int ssl, us_socket_context_r context, const char *hostname_pattern, struct us_socket_context_options_t options, void *user);
|
||||
void us_bun_socket_context_add_server_name(int ssl, us_socket_context_r context, const char *hostname_pattern, struct us_bun_socket_context_options_t options, void *user);
|
||||
void us_socket_context_remove_server_name(int ssl, us_socket_context_r context, const char *hostname_pattern);
|
||||
void us_socket_context_on_server_name(int ssl, us_socket_context_r context, void (*cb)(us_socket_context_r context, const char *hostname));
|
||||
void *us_socket_server_name_userdata(int ssl, us_socket_r s);
|
||||
void *us_socket_context_find_server_name_userdata(int ssl, us_socket_context_r context, const char *hostname_pattern);
|
||||
|
||||
/* Returns the underlying SSL native handle, such as SSL_CTX or nullptr */
|
||||
void *us_socket_context_get_native_handle(int ssl, struct us_socket_context_t *context);
|
||||
void *us_socket_context_get_native_handle(int ssl, us_socket_context_r context);
|
||||
|
||||
/* A socket context holds shared callbacks and user data extension for associated sockets */
|
||||
struct us_socket_context_t *us_create_socket_context(int ssl, struct us_loop_t *loop,
|
||||
int ext_size, struct us_socket_context_options_t options);
|
||||
struct us_socket_context_t *us_create_socket_context(int ssl, us_loop_r loop,
|
||||
int ext_size, struct us_socket_context_options_t options) nonnull_fn_decl;
|
||||
struct us_socket_context_t *us_create_bun_socket_context(int ssl, struct us_loop_t *loop,
|
||||
int ext_size, struct us_bun_socket_context_options_t options);
|
||||
int ext_size, struct us_bun_socket_context_options_t options) nonnull_fn_decl;
|
||||
|
||||
/* Delete resources allocated at creation time (will call unref now and only free when ref count == 0). */
|
||||
void us_socket_context_free(int ssl, us_socket_context_r context) nonnull_fn_decl;
|
||||
void us_socket_context_ref(int ssl, us_socket_context_r context) nonnull_fn_decl;
|
||||
void us_socket_context_unref(int ssl, us_socket_context_r context) nonnull_fn_decl;
|
||||
|
||||
/* Delete resources allocated at creation time. */
|
||||
void us_socket_context_free(int ssl, struct us_socket_context_t *context);
|
||||
struct us_bun_verify_error_t us_socket_verify_error(int ssl, struct us_socket_t *context);
|
||||
/* Setters of various async callbacks */
|
||||
void us_socket_context_on_open(int ssl, struct us_socket_context_t *context,
|
||||
struct us_socket_t *(*on_open)(struct us_socket_t *s, int is_client, char *ip, int ip_length));
|
||||
void us_socket_context_on_close(int ssl, struct us_socket_context_t *context,
|
||||
struct us_socket_t *(*on_close)(struct us_socket_t *s, int code, void *reason));
|
||||
void us_socket_context_on_data(int ssl, struct us_socket_context_t *context,
|
||||
struct us_socket_t *(*on_data)(struct us_socket_t *s, char *data, int length));
|
||||
void us_socket_context_on_writable(int ssl, struct us_socket_context_t *context,
|
||||
struct us_socket_t *(*on_writable)(struct us_socket_t *s));
|
||||
void us_socket_context_on_timeout(int ssl, struct us_socket_context_t *context,
|
||||
struct us_socket_t *(*on_timeout)(struct us_socket_t *s));
|
||||
void us_socket_context_on_long_timeout(int ssl, struct us_socket_context_t *context,
|
||||
struct us_socket_t *(*on_timeout)(struct us_socket_t *s));
|
||||
void us_socket_context_on_open(int ssl, us_socket_context_r context,
|
||||
struct us_socket_t *(*on_open)(us_socket_r s, int is_client, char *ip, int ip_length));
|
||||
void us_socket_context_on_close(int ssl, us_socket_context_r context,
|
||||
struct us_socket_t *(*on_close)(us_socket_r s, int code, void *reason));
|
||||
void us_socket_context_on_data(int ssl, us_socket_context_r context,
|
||||
struct us_socket_t *(*on_data)(us_socket_r s, char *data, int length));
|
||||
void us_socket_context_on_writable(int ssl, us_socket_context_r context,
|
||||
struct us_socket_t *(*on_writable)(us_socket_r s));
|
||||
void us_socket_context_on_timeout(int ssl, us_socket_context_r context,
|
||||
struct us_socket_t *(*on_timeout)(us_socket_r s));
|
||||
void us_socket_context_on_long_timeout(int ssl, us_socket_context_r context,
|
||||
struct us_socket_t *(*on_timeout)(us_socket_r s));
|
||||
/* This one is only used for when a connecting socket fails in a late stage. */
|
||||
void us_socket_context_on_connect_error(int ssl, struct us_socket_context_t *context,
|
||||
void us_socket_context_on_connect_error(int ssl, us_socket_context_r context,
|
||||
struct us_connecting_socket_t *(*on_connect_error)(struct us_connecting_socket_t *s, int code));
|
||||
void us_socket_context_on_socket_connect_error(int ssl, struct us_socket_context_t *context,
|
||||
struct us_socket_t *(*on_connect_error)(struct us_socket_t *s, int code));
|
||||
void us_socket_context_on_socket_connect_error(int ssl, us_socket_context_r context,
|
||||
struct us_socket_t *(*on_connect_error)(us_socket_r s, int code));
|
||||
|
||||
void us_socket_context_on_handshake(int ssl, struct us_socket_context_t *context, void (*on_handshake)(struct us_socket_t *, int success, struct us_bun_verify_error_t verify_error, void* custom_data), void* custom_data);
|
||||
void us_socket_context_on_handshake(int ssl, us_socket_context_r context, void (*on_handshake)(struct us_socket_t *, int success, struct us_bun_verify_error_t verify_error, void* custom_data), void* custom_data);
|
||||
|
||||
/* Emitted when a socket has been half-closed */
|
||||
void us_socket_context_on_end(int ssl, struct us_socket_context_t *context, struct us_socket_t *(*on_end)(struct us_socket_t *s));
|
||||
void us_socket_context_on_end(int ssl, us_socket_context_r context, struct us_socket_t *(*on_end)(us_socket_r s));
|
||||
|
||||
/* Returns user data extension for this socket context */
|
||||
void *us_socket_context_ext(int ssl, struct us_socket_context_t *context);
|
||||
void *us_socket_context_ext(int ssl, us_socket_context_r context);
|
||||
|
||||
/* Closes all open sockets, including listen sockets. Does not invalidate the socket context. */
|
||||
void us_socket_context_close(int ssl, struct us_socket_context_t *context);
|
||||
void us_socket_context_close(int ssl, us_socket_context_r context);
|
||||
|
||||
/* Listen for connections. Acts as the main driving cog in a server. Will call set async callbacks. */
|
||||
struct us_listen_socket_t *us_socket_context_listen(int ssl, struct us_socket_context_t *context,
|
||||
struct us_listen_socket_t *us_socket_context_listen(int ssl, us_socket_context_r context,
|
||||
const char *host, int port, int options, int socket_ext_size);
|
||||
|
||||
struct us_listen_socket_t *us_socket_context_listen_unix(int ssl, struct us_socket_context_t *context,
|
||||
struct us_listen_socket_t *us_socket_context_listen_unix(int ssl, us_socket_context_r context,
|
||||
const char *path, size_t pathlen, int options, int socket_ext_size);
|
||||
|
||||
/* listen_socket.c/.h */
|
||||
void us_listen_socket_close(int ssl, struct us_listen_socket_t *ls);
|
||||
void us_listen_socket_close(int ssl, struct us_listen_socket_t *ls) nonnull_fn_decl;
|
||||
|
||||
/*
|
||||
Returns one of
|
||||
@@ -281,156 +304,156 @@ void us_listen_socket_close(int ssl, struct us_listen_socket_t *ls);
|
||||
This is the slow path where we must either go through DNS resolution or create multiple sockets
|
||||
per the happy eyeballs algorithm
|
||||
*/
|
||||
void *us_socket_context_connect(int ssl, struct us_socket_context_t *context,
|
||||
const char *host, int port, int options, int socket_ext_size, int *is_connecting);
|
||||
void *us_socket_context_connect(int ssl, struct us_socket_context_t * nonnull_arg context,
|
||||
const char *host, int port, int options, int socket_ext_size, int *is_connecting) __attribute__((nonnull(2)));
|
||||
|
||||
struct us_socket_t *us_socket_context_connect_unix(int ssl, struct us_socket_context_t *context,
|
||||
const char *server_path, size_t pathlen, int options, int socket_ext_size);
|
||||
struct us_socket_t *us_socket_context_connect_unix(int ssl, us_socket_context_r context,
|
||||
const char *server_path, size_t pathlen, int options, int socket_ext_size) __attribute__((nonnull(2)));
|
||||
|
||||
/* Is this socket established? Can be used to check if a connecting socket has fired the on_open event yet.
|
||||
* Can also be used to determine if a socket is a listen_socket or not, but you probably know that already. */
|
||||
int us_socket_is_established(int ssl, struct us_socket_t *s);
|
||||
int us_socket_is_established(int ssl, us_socket_r s) nonnull_fn_decl;
|
||||
|
||||
void us_connecting_socket_free(struct us_connecting_socket_t *c);
|
||||
void us_connecting_socket_free(int ssl, struct us_connecting_socket_t *c) nonnull_fn_decl;
|
||||
|
||||
/* Cancel a connecting socket. Can be used together with us_socket_timeout to limit connection times.
|
||||
* Entirely destroys the socket - this function works like us_socket_close but does not trigger on_close event since
|
||||
* you never got the on_open event first. */
|
||||
void us_connecting_socket_close(int ssl, struct us_connecting_socket_t *c);
|
||||
void us_connecting_socket_close(int ssl, struct us_connecting_socket_t *c) nonnull_fn_decl;
|
||||
|
||||
/* Returns the loop for this socket context. */
|
||||
struct us_loop_t *us_socket_context_loop(int ssl, struct us_socket_context_t *context);
|
||||
struct us_loop_t *us_socket_context_loop(int ssl, us_socket_context_r context) nonnull_fn_decl __attribute((returns_nonnull));
|
||||
|
||||
/* Invalidates passed socket, returning a new resized socket which belongs to a different socket context.
|
||||
* Used mainly for "socket upgrades" such as when transitioning from HTTP to WebSocket. */
|
||||
struct us_socket_t *us_socket_context_adopt_socket(int ssl, struct us_socket_context_t *context, struct us_socket_t *s, int ext_size);
|
||||
struct us_socket_t *us_socket_context_adopt_socket(int ssl, us_socket_context_r context, us_socket_r s, int ext_size);
|
||||
|
||||
/* Create a child socket context which acts much like its own socket context with its own callbacks yet still relies on the
|
||||
* parent socket context for some shared resources. Child socket contexts should be used together with socket adoptions and nothing else. */
|
||||
struct us_socket_context_t *us_create_child_socket_context(int ssl, struct us_socket_context_t *context, int context_ext_size);
|
||||
struct us_socket_context_t *us_create_child_socket_context(int ssl, us_socket_context_r context, int context_ext_size);
|
||||
|
||||
/* Public interfaces for loops */
|
||||
|
||||
/* Returns a new event loop with user data extension */
|
||||
struct us_loop_t *us_create_loop(void *hint, void (*wakeup_cb)(struct us_loop_t *loop),
|
||||
void (*pre_cb)(struct us_loop_t *loop), void (*post_cb)(struct us_loop_t *loop), unsigned int ext_size);
|
||||
struct us_loop_t *us_create_loop(void *hint, void (*wakeup_cb)(us_loop_r loop),
|
||||
void (*pre_cb)(us_loop_r loop), void (*post_cb)(us_loop_r loop), unsigned int ext_size);
|
||||
|
||||
/* Frees the loop immediately */
|
||||
void us_loop_free(struct us_loop_t *loop);
|
||||
void us_loop_free(us_loop_r loop) nonnull_fn_decl;
|
||||
|
||||
/* Returns the loop user data extension */
|
||||
void *us_loop_ext(struct us_loop_t *loop);
|
||||
void *us_loop_ext(us_loop_r loop) nonnull_fn_decl;
|
||||
|
||||
/* Blocks the calling thread and drives the event loop until no more non-fallthrough polls are scheduled */
|
||||
void us_loop_run(struct us_loop_t *loop);
|
||||
void us_loop_run(us_loop_r loop) nonnull_fn_decl;
|
||||
|
||||
|
||||
/* Signals the loop from any thread to wake up and execute its wakeup handler from the loop's own running thread.
|
||||
* This is the only fully thread-safe function and serves as the basis for thread safety */
|
||||
void us_wakeup_loop(struct us_loop_t *loop);
|
||||
void us_wakeup_loop(us_loop_r loop) nonnull_fn_decl;
|
||||
|
||||
/* Hook up timers in existing loop */
|
||||
void us_loop_integrate(struct us_loop_t *loop);
|
||||
void us_loop_integrate(us_loop_r loop) nonnull_fn_decl;
|
||||
|
||||
/* Returns the loop iteration number */
|
||||
long long us_loop_iteration_number(struct us_loop_t *loop);
|
||||
long long us_loop_iteration_number(us_loop_r loop) nonnull_fn_decl;
|
||||
|
||||
/* Public interfaces for polls */
|
||||
|
||||
/* A fallthrough poll does not keep the loop running, it falls through */
|
||||
struct us_poll_t *us_create_poll(struct us_loop_t *loop, int fallthrough, unsigned int ext_size);
|
||||
struct us_poll_t *us_create_poll(us_loop_r loop, int fallthrough, unsigned int ext_size);
|
||||
|
||||
/* After stopping a poll you must manually free the memory */
|
||||
void us_poll_free(struct us_poll_t *p, struct us_loop_t *loop);
|
||||
void us_poll_free(us_poll_r p, struct us_loop_t *loop);
|
||||
|
||||
/* Associate this poll with a socket descriptor and poll type */
|
||||
void us_poll_init(struct us_poll_t *p, LIBUS_SOCKET_DESCRIPTOR fd, int poll_type);
|
||||
void us_poll_init(us_poll_r p, LIBUS_SOCKET_DESCRIPTOR fd, int poll_type);
|
||||
|
||||
/* Start, change and stop polling for events */
|
||||
void us_poll_start(struct us_poll_t *p, struct us_loop_t *loop, int events);
|
||||
void us_poll_change(struct us_poll_t *p, struct us_loop_t *loop, int events);
|
||||
void us_poll_stop(struct us_poll_t *p, struct us_loop_t *loop);
|
||||
void us_poll_start(us_poll_r p, us_loop_r loop, int events) nonnull_fn_decl;
|
||||
void us_poll_change(us_poll_r p, us_loop_r loop, int events) nonnull_fn_decl;
|
||||
void us_poll_stop(us_poll_r p, struct us_loop_t *loop) nonnull_fn_decl;
|
||||
|
||||
/* Return what events we are polling for */
|
||||
int us_poll_events(struct us_poll_t *p);
|
||||
int us_poll_events(us_poll_r p) nonnull_fn_decl;
|
||||
|
||||
/* Returns the user data extension of this poll */
|
||||
void *us_poll_ext(struct us_poll_t *p);
|
||||
void *us_poll_ext(us_poll_r p) nonnull_fn_decl;
|
||||
|
||||
/* Get associated socket descriptor from a poll */
|
||||
LIBUS_SOCKET_DESCRIPTOR us_poll_fd(struct us_poll_t *p);
|
||||
LIBUS_SOCKET_DESCRIPTOR us_poll_fd(us_poll_r p) nonnull_fn_decl;
|
||||
|
||||
/* Resize an active poll */
|
||||
struct us_poll_t *us_poll_resize(struct us_poll_t *p, struct us_loop_t *loop, unsigned int ext_size);
|
||||
struct us_poll_t *us_poll_resize(us_poll_r p, us_loop_r loop, unsigned int ext_size) nonnull_fn_decl;
|
||||
|
||||
/* Public interfaces for sockets */
|
||||
|
||||
/* Returns the underlying native handle for a socket, such as SSL or file descriptor.
|
||||
* In the case of file descriptor, the value of pointer is fd. */
|
||||
void *us_socket_get_native_handle(int ssl, struct us_socket_t *s);
|
||||
void *us_socket_get_native_handle(int ssl, us_socket_r s) nonnull_fn_decl;
|
||||
|
||||
/* Write up to length bytes of data. Returns actual bytes written.
|
||||
* Will call the on_writable callback of active socket context on failure to write everything off in one go.
|
||||
* Set hint msg_more if you have more immediate data to write. */
|
||||
int us_socket_write(int ssl, struct us_socket_t *s, const char *data, int length, int msg_more);
|
||||
int us_socket_write(int ssl, us_socket_r s, const char * nonnull_arg data, int length, int msg_more) nonnull_fn_decl;
|
||||
|
||||
/* Special path for non-SSL sockets. Used to send header and payload in one go. Works like us_socket_write. */
|
||||
int us_socket_write2(int ssl, struct us_socket_t *s, const char *header, int header_length, const char *payload, int payload_length);
|
||||
int us_socket_write2(int ssl, us_socket_r s, const char *header, int header_length, const char *payload, int payload_length) nonnull_fn_decl;
|
||||
|
||||
/* Set a low precision, high performance timer on a socket. A socket can only have one single active timer
|
||||
* at any given point in time. Will remove any such pre set timer */
|
||||
void us_socket_timeout(int ssl, struct us_socket_t *s, unsigned int seconds);
|
||||
void us_socket_timeout(int ssl, us_socket_r s, unsigned int seconds) nonnull_fn_decl;
|
||||
|
||||
/* Set a low precision, high performance timer on a socket. Suitable for per-minute precision. */
|
||||
void us_socket_long_timeout(int ssl, struct us_socket_t *s, unsigned int minutes);
|
||||
void us_socket_long_timeout(int ssl, us_socket_r s, unsigned int minutes) nonnull_fn_decl;
|
||||
|
||||
/* Return the user data extension of this socket */
|
||||
void *us_socket_ext(int ssl, struct us_socket_t *s);
|
||||
void *us_connecting_socket_ext(int ssl, struct us_connecting_socket_t *c);
|
||||
void *us_socket_ext(int ssl, us_socket_r s) nonnull_fn_decl;
|
||||
void *us_connecting_socket_ext(int ssl, struct us_connecting_socket_t *c) nonnull_fn_decl;
|
||||
|
||||
/* Return the socket context of this socket */
|
||||
struct us_socket_context_t *us_socket_context(int ssl, struct us_socket_t *s);
|
||||
struct us_socket_context_t *us_socket_context(int ssl, us_socket_r s) nonnull_fn_decl __attribute__((returns_nonnull));
|
||||
|
||||
/* Withdraw any msg_more status and flush any pending data */
|
||||
void us_socket_flush(int ssl, struct us_socket_t *s);
|
||||
void us_socket_flush(int ssl, us_socket_r s) nonnull_fn_decl;
|
||||
|
||||
/* Shuts down the connection by sending FIN and/or close_notify */
|
||||
void us_socket_shutdown(int ssl, struct us_socket_t *s);
|
||||
void us_socket_shutdown(int ssl, us_socket_r s) nonnull_fn_decl;
|
||||
|
||||
/* Shuts down the connection in terms of read, meaning next event loop
|
||||
* iteration will catch the socket being closed. Can be used to defer closing
|
||||
* to next event loop iteration. */
|
||||
void us_socket_shutdown_read(int ssl, struct us_socket_t *s);
|
||||
void us_socket_shutdown_read(int ssl, us_socket_r s) nonnull_fn_decl;
|
||||
|
||||
/* Returns whether the socket has been shut down or not */
|
||||
int us_socket_is_shut_down(int ssl, struct us_socket_t *s);
|
||||
int us_socket_is_shut_down(int ssl, us_socket_r s) nonnull_fn_decl;
|
||||
|
||||
/* Returns whether this socket has been closed. Only valid if memory has not yet been released. */
|
||||
int us_socket_is_closed(int ssl, struct us_socket_t *s);
|
||||
int us_socket_is_closed(int ssl, us_socket_r s) nonnull_fn_decl;
|
||||
|
||||
/* Immediately closes the socket */
|
||||
struct us_socket_t *us_socket_close(int ssl, struct us_socket_t *s, int code, void *reason);
|
||||
struct us_socket_t *us_socket_close(int ssl, us_socket_r s, int code, void *reason) __attribute__((nonnull(2)));
|
||||
|
||||
/* Returns local port or -1 on failure. */
|
||||
int us_socket_local_port(int ssl, struct us_socket_t *s);
|
||||
int us_socket_local_port(int ssl, us_socket_r s) nonnull_fn_decl;
|
||||
|
||||
/* Copy remote (IP) address of socket, or fail with zero length. */
|
||||
void us_socket_remote_address(int ssl, struct us_socket_t *s, char *buf, int *length);
|
||||
void us_socket_local_address(int ssl, struct us_socket_t *s, char *buf, int *length);
|
||||
void us_socket_remote_address(int ssl, us_socket_r s, char *nonnull_arg buf, int *nonnull_arg length) nonnull_fn_decl;
|
||||
void us_socket_local_address(int ssl, us_socket_r s, char *nonnull_arg buf, int *nonnull_arg length) nonnull_fn_decl;
|
||||
|
||||
/* Bun extras */
|
||||
struct us_socket_t *us_socket_pair(struct us_socket_context_t *ctx, int socket_ext_size, LIBUS_SOCKET_DESCRIPTOR* fds);
|
||||
struct us_socket_t *us_socket_from_fd(struct us_socket_context_t *ctx, int socket_ext_size, LIBUS_SOCKET_DESCRIPTOR fd);
|
||||
struct us_socket_t *us_socket_attach(int ssl, LIBUS_SOCKET_DESCRIPTOR client_fd, struct us_socket_context_t *ctx, int flags, int socket_ext_size);
|
||||
struct us_socket_t *us_socket_wrap_with_tls(int ssl, struct us_socket_t *s, struct us_bun_socket_context_options_t options, struct us_socket_events_t events, int socket_ext_size);
|
||||
int us_socket_raw_write(int ssl, struct us_socket_t *s, const char *data, int length, int msg_more);
|
||||
struct us_socket_t *us_socket_wrap_with_tls(int ssl, us_socket_r s, struct us_bun_socket_context_options_t options, struct us_socket_events_t events, int socket_ext_size);
|
||||
int us_socket_raw_write(int ssl, us_socket_r s, const char *data, int length, int msg_more);
|
||||
struct us_socket_t* us_socket_open(int ssl, struct us_socket_t * s, int is_client, char* ip, int ip_length);
|
||||
int us_raw_root_certs(struct us_cert_string_t**out);
|
||||
unsigned int us_get_remote_address_info(char *buf, struct us_socket_t *s, const char **dest, int *port, int *is_ipv6);
|
||||
int us_socket_get_error(int ssl, struct us_socket_t *s);
|
||||
unsigned int us_get_remote_address_info(char *buf, us_socket_r s, const char **dest, int *port, int *is_ipv6);
|
||||
int us_socket_get_error(int ssl, us_socket_r s);
|
||||
|
||||
void us_socket_ref(struct us_socket_t *s);
|
||||
void us_socket_unref(struct us_socket_t *s);
|
||||
void us_socket_ref(us_socket_r s);
|
||||
void us_socket_unref(us_socket_r s);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
||||
@@ -47,6 +47,8 @@ void us_internal_loop_data_init(struct us_loop_t *loop, void (*wakeup_cb)(struct
|
||||
loop->data.parent_ptr = 0;
|
||||
loop->data.parent_tag = 0;
|
||||
|
||||
loop->data.closed_context_head = 0;
|
||||
|
||||
loop->data.wakeup_async = us_internal_create_async(loop, 1, 0);
|
||||
us_internal_async_set(loop->data.wakeup_async, (void (*)(struct us_internal_async *)) wakeup_cb);
|
||||
}
|
||||
@@ -234,6 +236,15 @@ void us_internal_free_closed_sockets(struct us_loop_t *loop) {
|
||||
loop->data.closed_connecting_head = 0;
|
||||
}
|
||||
|
||||
void us_internal_free_closed_contexts(struct us_loop_t *loop) {
|
||||
for (struct us_socket_context_t *ctx = loop->data.closed_context_head; ctx; ) {
|
||||
struct us_socket_context_t *next = ctx->next;
|
||||
us_free(ctx);
|
||||
ctx = next;
|
||||
}
|
||||
loop->data.closed_context_head = 0;
|
||||
}
|
||||
|
||||
void sweep_timer_cb(struct us_internal_callback_t *cb) {
|
||||
us_internal_timer_sweep(cb->loop);
|
||||
}
|
||||
@@ -253,6 +264,7 @@ void us_internal_loop_pre(struct us_loop_t *loop) {
|
||||
void us_internal_loop_post(struct us_loop_t *loop) {
|
||||
us_internal_handle_dns_results(loop);
|
||||
us_internal_free_closed_sockets(loop);
|
||||
us_internal_free_closed_contexts(loop);
|
||||
loop->data.post_cb(loop);
|
||||
}
|
||||
|
||||
@@ -356,7 +368,8 @@ void us_internal_dispatch_ready_poll(struct us_poll_t *p, int error, int events)
|
||||
s->context->loop->data.low_prio_budget--; /* Still having budget for this iteration - do normal processing */
|
||||
} else {
|
||||
us_poll_change(&s->p, us_socket_context(0, s)->loop, us_poll_events(&s->p) & LIBUS_SOCKET_WRITABLE);
|
||||
us_internal_socket_context_unlink_socket(s->context, s);
|
||||
us_socket_context_ref(0, s->context);
|
||||
us_internal_socket_context_unlink_socket(0, s->context, s);
|
||||
|
||||
/* Link this socket to the low-priority queue - we use a LIFO queue, to prioritize newer clients that are
|
||||
* maybe not already timeouted - sounds unfair, but works better in real-life with smaller client-timeouts
|
||||
@@ -411,7 +424,8 @@ void us_internal_dispatch_ready_poll(struct us_poll_t *p, int error, int events)
|
||||
if (us_socket_is_shut_down(0, s)) {
|
||||
/* We got FIN back after sending it */
|
||||
/* Todo: We should give "CLEAN SHUTDOWN" as reason here */
|
||||
s = us_socket_close(0, s, 0, NULL);
|
||||
s = us_socket_close(0, s, LIBUS_SOCKET_CLOSE_CODE_CLEAN_SHUTDOWN, NULL);
|
||||
return;
|
||||
} else {
|
||||
/* We got FIN, so stop polling for readable */
|
||||
us_poll_change(&s->p, us_socket_context(0, s)->loop, us_poll_events(&s->p) & LIBUS_SOCKET_WRITABLE);
|
||||
@@ -419,7 +433,7 @@ void us_internal_dispatch_ready_poll(struct us_poll_t *p, int error, int events)
|
||||
}
|
||||
} else if (length == LIBUS_SOCKET_ERROR && !bsd_would_block()) {
|
||||
/* Todo: decide also here what kind of reason we should give */
|
||||
s = us_socket_close(0, s, 0, NULL);
|
||||
s = us_socket_close(0, s, LIBUS_SOCKET_CLOSE_CODE_CLEAN_SHUTDOWN, NULL);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -21,6 +21,8 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <errno.h>
|
||||
|
||||
#ifndef WIN32
|
||||
#include <fcntl.h>
|
||||
@@ -113,6 +115,9 @@ void us_socket_flush(int ssl, struct us_socket_t *s) {
|
||||
}
|
||||
|
||||
int us_socket_is_closed(int ssl, struct us_socket_t *s) {
|
||||
if(ssl) {
|
||||
return us_internal_ssl_socket_is_closed((struct us_internal_ssl_socket_t *) s);
|
||||
}
|
||||
return s->prev == (struct us_socket_t *) s->context;
|
||||
}
|
||||
|
||||
@@ -125,9 +130,11 @@ int us_socket_is_established(int ssl, struct us_socket_t *s) {
|
||||
return us_internal_poll_type((struct us_poll_t *) s) != POLL_TYPE_SEMI_SOCKET;
|
||||
}
|
||||
|
||||
void us_connecting_socket_free(struct us_connecting_socket_t *c) {
|
||||
void us_connecting_socket_free(int ssl, struct us_connecting_socket_t *c) {
|
||||
// we can't just free c immediately, as it may be enqueued in the dns_ready_head list
|
||||
// instead, we move it to a close list and free it after the iteration
|
||||
us_internal_socket_context_unlink_connecting_socket(ssl, c->context, c);
|
||||
|
||||
c->next = c->context->loop->data.closed_connecting_head;
|
||||
c->context->loop->data.closed_connecting_head = c;
|
||||
}
|
||||
@@ -135,9 +142,9 @@ void us_connecting_socket_free(struct us_connecting_socket_t *c) {
|
||||
void us_connecting_socket_close(int ssl, struct us_connecting_socket_t *c) {
|
||||
if (c->closed) return;
|
||||
c->closed = 1;
|
||||
|
||||
for (struct us_socket_t *s = c->connecting_head; s; s = s->connect_next) {
|
||||
us_internal_socket_context_unlink_socket(s->context, s);
|
||||
us_internal_socket_context_unlink_socket(ssl, s->context, s);
|
||||
|
||||
us_poll_stop((struct us_poll_t *) s, s->context->loop);
|
||||
bsd_close_socket(us_poll_fd((struct us_poll_t *) s));
|
||||
|
||||
@@ -148,15 +155,26 @@ void us_connecting_socket_close(int ssl, struct us_connecting_socket_t *c) {
|
||||
/* Any socket with prev = context is marked as closed */
|
||||
s->prev = (struct us_socket_t *) s->context;
|
||||
}
|
||||
|
||||
if(!c->error) {
|
||||
// if we have no error, we have to set that we were aborted aka we called close
|
||||
c->error = ECONNABORTED;
|
||||
}
|
||||
c->context->on_connect_error(c, c->error);
|
||||
if(c->addrinfo_req) {
|
||||
Bun__addrinfo_freeRequest(c->addrinfo_req, c->error == ECONNREFUSED);
|
||||
c->addrinfo_req = 0;
|
||||
}
|
||||
// we can only schedule the socket to be freed if there is no pending callback
|
||||
// otherwise, the callback will see that the socket is closed and will free it
|
||||
if (!c->pending_resolve_callback) {
|
||||
us_connecting_socket_free(c);
|
||||
us_connecting_socket_free(ssl, c);
|
||||
}
|
||||
}
|
||||
|
||||
struct us_socket_t *us_socket_close(int ssl, struct us_socket_t *s, int code, void *reason) {
|
||||
if(ssl) {
|
||||
return (struct us_socket_t *)us_internal_ssl_socket_close((struct us_internal_ssl_socket_t *) s, code, reason);
|
||||
}
|
||||
if (!us_socket_is_closed(0, s)) {
|
||||
if (s->low_prio_state == 1) {
|
||||
/* Unlink this socket from the low-priority queue */
|
||||
@@ -168,8 +186,10 @@ struct us_socket_t *us_socket_close(int ssl, struct us_socket_t *s, int code, vo
|
||||
s->prev = 0;
|
||||
s->next = 0;
|
||||
s->low_prio_state = 0;
|
||||
us_socket_context_unref(ssl, s->context);
|
||||
|
||||
} else {
|
||||
us_internal_socket_context_unlink_socket(s->context, s);
|
||||
us_internal_socket_context_unlink_socket(ssl, s->context, s);
|
||||
}
|
||||
#ifdef LIBUS_USE_KQUEUE
|
||||
// kqueue automatically removes the fd from the set on close
|
||||
@@ -218,8 +238,10 @@ struct us_socket_t *us_socket_detach(int ssl, struct us_socket_t *s) {
|
||||
s->prev = 0;
|
||||
s->next = 0;
|
||||
s->low_prio_state = 0;
|
||||
us_socket_context_unref(ssl, s->context);
|
||||
|
||||
} else {
|
||||
us_internal_socket_context_unlink_socket(s->context, s);
|
||||
us_internal_socket_context_unlink_socket(ssl, s->context, s);
|
||||
}
|
||||
us_poll_stop((struct us_poll_t *) s, s->context->loop);
|
||||
|
||||
|
||||
@@ -514,6 +514,13 @@ public:
|
||||
return std::move(*this);
|
||||
}
|
||||
|
||||
void clearRoutes() {
|
||||
if (httpContext) {
|
||||
httpContext->getSocketContextData()->clearRoutes();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
TemplatedApp &&head(std::string pattern, MoveOnlyFunction<void(HttpResponse<SSL> *, HttpRequest *)> &&handler) {
|
||||
if (httpContext) {
|
||||
httpContext->onHttp("HEAD", pattern, std::move(handler));
|
||||
|
||||
@@ -117,47 +117,47 @@ public:
|
||||
|
||||
/* Immediately close socket */
|
||||
us_socket_t *close() {
|
||||
this->uncork();
|
||||
return us_socket_close(SSL, (us_socket_t *) this, 0, nullptr);
|
||||
}
|
||||
|
||||
void corkUnchecked() {
|
||||
/* What if another socket is corked? */
|
||||
getLoopData()->corkedSocket = this;
|
||||
getLoopData()->corkedSocketIsSSL = SSL;
|
||||
getLoopData()->setCorkedSocket(this, SSL);
|
||||
}
|
||||
|
||||
void uncorkWithoutSending() {
|
||||
if (isCorked()) {
|
||||
getLoopData()->corkedSocket = nullptr;
|
||||
getLoopData()->cleanCorkedSocket();
|
||||
}
|
||||
}
|
||||
|
||||
/* Cork this socket. Only one socket may ever be corked per-loop at any given time */
|
||||
void cork() {
|
||||
auto* corked = getLoopData()->getCorkedSocket();
|
||||
/* Extra check for invalid corking of others */
|
||||
if (getLoopData()->corkOffset && getLoopData()->corkedSocket != this) {
|
||||
if (getLoopData()->isCorked() && corked != this) {
|
||||
// We uncork the other socket early instead of terminating the program
|
||||
// is unlikely to be cause any issues and is better than crashing
|
||||
if(getLoopData()->corkedSocketIsSSL) {
|
||||
((AsyncSocket<true> *) getLoopData()->corkedSocket)->uncork();
|
||||
if(getLoopData()->isCorkedSSL()) {
|
||||
((AsyncSocket<true> *) corked)->uncork();
|
||||
} else {
|
||||
((AsyncSocket<false> *) getLoopData()->corkedSocket)->uncork();
|
||||
((AsyncSocket<false> *) corked)->uncork();
|
||||
}
|
||||
}
|
||||
|
||||
/* What if another socket is corked? */
|
||||
getLoopData()->corkedSocket = this;
|
||||
getLoopData()->corkedSocketIsSSL = SSL;
|
||||
getLoopData()->setCorkedSocket(this, SSL);
|
||||
}
|
||||
|
||||
/* Returns wheter we are corked or not */
|
||||
bool isCorked() {
|
||||
return getLoopData()->corkedSocket == this;
|
||||
return getLoopData()->isCorkedWith(this);
|
||||
}
|
||||
|
||||
/* Returns whether we could cork (it is free) */
|
||||
bool canCork() {
|
||||
return getLoopData()->corkedSocket == nullptr;
|
||||
return getLoopData()->canCork();
|
||||
}
|
||||
|
||||
/* Returns a suitable buffer for temporary assemblation of send data */
|
||||
@@ -166,16 +166,16 @@ public:
|
||||
LoopData *loopData = getLoopData();
|
||||
BackPressure &backPressure = getAsyncSocketData()->buffer;
|
||||
size_t existingBackpressure = backPressure.length();
|
||||
if ((!existingBackpressure) && (isCorked() || canCork()) && (loopData->corkOffset + size < LoopData::CORK_BUFFER_SIZE)) {
|
||||
if ((!existingBackpressure) && (isCorked() || canCork()) && (loopData->getCorkOffset() + size < LoopData::CORK_BUFFER_SIZE)) {
|
||||
/* Cork automatically if we can */
|
||||
if (isCorked()) {
|
||||
char *sendBuffer = loopData->corkBuffer + loopData->corkOffset;
|
||||
loopData->corkOffset += (unsigned int) size;
|
||||
char *sendBuffer = loopData->getCorkSendBuffer();
|
||||
loopData->incrementCorkedOffset((unsigned int) size);
|
||||
return {sendBuffer, SendBufferAttribute::NEEDS_NOTHING};
|
||||
} else {
|
||||
cork();
|
||||
char *sendBuffer = loopData->corkBuffer + loopData->corkOffset;
|
||||
loopData->corkOffset += (unsigned int) size;
|
||||
char *sendBuffer = loopData->getCorkSendBuffer();
|
||||
loopData->incrementCorkedOffset((unsigned int) size);
|
||||
return {sendBuffer, SendBufferAttribute::NEEDS_UNCORK};
|
||||
}
|
||||
} else {
|
||||
@@ -183,17 +183,19 @@ public:
|
||||
/* If we are corked and there is already data in the cork buffer,
|
||||
mark how much is ours and reset it */
|
||||
unsigned int ourCorkOffset = 0;
|
||||
if (isCorked() && loopData->corkOffset) {
|
||||
ourCorkOffset = loopData->corkOffset;
|
||||
loopData->corkOffset = 0;
|
||||
|
||||
if (isCorked()) {
|
||||
ourCorkOffset = loopData->getCorkOffset();
|
||||
loopData->setCorkOffset(0);
|
||||
}
|
||||
|
||||
/* Fallback is to use the backpressure as buffer */
|
||||
backPressure.resize(ourCorkOffset + existingBackpressure + size);
|
||||
|
||||
/* And copy corkbuffer in front */
|
||||
memcpy((char *) backPressure.data() + existingBackpressure, loopData->corkBuffer, ourCorkOffset);
|
||||
|
||||
if(ourCorkOffset > 0) {
|
||||
/* And copy corkbuffer in front */
|
||||
memcpy((char *) backPressure.data() + existingBackpressure, loopData->getCorkBuffer(), ourCorkOffset);
|
||||
}
|
||||
return {(char *) backPressure.data() + ourCorkOffset + existingBackpressure, SendBufferAttribute::NEEDS_DRAIN};
|
||||
}
|
||||
}
|
||||
@@ -253,10 +255,14 @@ public:
|
||||
|
||||
/* We are limited if we have a per-socket buffer */
|
||||
if (asyncSocketData->buffer.length()) {
|
||||
size_t buffer_len = asyncSocketData->buffer.length();
|
||||
// we cannot not flush more than INT_MAX bytes at a time
|
||||
int max_flush_len = std::min(buffer_len, (size_t)INT_MAX);
|
||||
|
||||
/* Write off as much as we can */
|
||||
int written = us_socket_write(SSL, (us_socket_t *) this, asyncSocketData->buffer.data(), (int) asyncSocketData->buffer.length(), /*nextLength != 0 | */length);
|
||||
int written = us_socket_write(SSL, (us_socket_t *) this, asyncSocketData->buffer.data(), max_flush_len, /*nextLength != 0 | */length);
|
||||
/* On failure return, otherwise continue down the function */
|
||||
if ((unsigned int) written < asyncSocketData->buffer.length()) {
|
||||
if ((unsigned int) written < buffer_len) {
|
||||
/* Update buffering (todo: we can do better here if we keep track of what happens to this guy later on) */
|
||||
asyncSocketData->buffer.erase((unsigned int) written);
|
||||
|
||||
@@ -275,20 +281,20 @@ public:
|
||||
}
|
||||
|
||||
if (length) {
|
||||
if (loopData->corkedSocket == this) {
|
||||
if (loopData->isCorkedWith(this)) {
|
||||
/* We are corked */
|
||||
if (LoopData::CORK_BUFFER_SIZE - loopData->corkOffset >= (unsigned int) length) {
|
||||
if (LoopData::CORK_BUFFER_SIZE - loopData->getCorkOffset() >= (unsigned int) length) {
|
||||
/* If the entire chunk fits in cork buffer */
|
||||
memcpy(loopData->corkBuffer + loopData->corkOffset, src, (unsigned int) length);
|
||||
loopData->corkOffset += (unsigned int) length;
|
||||
memcpy(loopData->getCorkSendBuffer(), src, (unsigned int) length);
|
||||
loopData->incrementCorkedOffset((unsigned int) length);
|
||||
/* Fall through to default return */
|
||||
} else {
|
||||
/* Strategy differences between SSL and non-SSL regarding syscall minimizing */
|
||||
if constexpr (false) {
|
||||
/* Cork up as much as we can */
|
||||
unsigned int stripped = LoopData::CORK_BUFFER_SIZE - loopData->corkOffset;
|
||||
memcpy(loopData->corkBuffer + loopData->corkOffset, src, stripped);
|
||||
loopData->corkOffset = LoopData::CORK_BUFFER_SIZE;
|
||||
unsigned int stripped = LoopData::CORK_BUFFER_SIZE - loopData->getCorkOffset();
|
||||
memcpy(loopData->getCorkSendBuffer(), src, stripped);
|
||||
loopData->setCorkOffset(LoopData::CORK_BUFFER_SIZE);
|
||||
|
||||
auto [written, failed] = uncork(src + stripped, length - (int) stripped, optionally);
|
||||
return {written + (int) stripped, failed};
|
||||
@@ -331,14 +337,13 @@ public:
|
||||
/* It does NOT count bytes written from cork buffer (they are already accounted for in the write call responsible for its corking)! */
|
||||
std::pair<int, bool> uncork(const char *src = nullptr, int length = 0, bool optionally = false) {
|
||||
LoopData *loopData = getLoopData();
|
||||
if (loopData->isCorkedWith(this)) {
|
||||
auto offset = loopData->getCorkOffset();
|
||||
loopData->cleanCorkedSocket();
|
||||
|
||||
if (loopData->corkedSocket == this) {
|
||||
loopData->corkedSocket = nullptr;
|
||||
|
||||
if (loopData->corkOffset) {
|
||||
if (offset) {
|
||||
/* Corked data is already accounted for via its write call */
|
||||
auto [written, failed] = write(loopData->corkBuffer, (int) loopData->corkOffset, false, length);
|
||||
loopData->corkOffset = 0;
|
||||
auto [written, failed] = write(loopData->getCorkBuffer(), (int) offset, false, length);
|
||||
|
||||
if (failed && optionally) {
|
||||
/* We do not need to care for buffering here, write does that */
|
||||
@@ -357,4 +362,4 @@ public:
|
||||
|
||||
}
|
||||
|
||||
#endif // UWS_ASYNCSOCKET_H
|
||||
#endif // UWS_ASYNCSOCKET_H
|
||||
@@ -82,7 +82,7 @@ private:
|
||||
}
|
||||
|
||||
/* Any connected socket should timeout until it has a request */
|
||||
us_socket_timeout(SSL, s, HTTP_IDLE_TIMEOUT_S);
|
||||
((HttpResponse<SSL> *) s)->resetTimeout();
|
||||
|
||||
/* Call filter */
|
||||
for (auto &f : httpContextData->filterHandlers) {
|
||||
@@ -94,11 +94,10 @@ private:
|
||||
|
||||
/* Handle socket connections */
|
||||
us_socket_context_on_open(SSL, getSocketContext(), [](us_socket_t *s, int /*is_client*/, char */*ip*/, int /*ip_length*/) {
|
||||
/* Any connected socket should timeout until it has a request */
|
||||
us_socket_timeout(SSL, s, HTTP_IDLE_TIMEOUT_S);
|
||||
|
||||
/* Init socket ext */
|
||||
new (us_socket_ext(SSL, s)) HttpResponseData<SSL>;
|
||||
/* Any connected socket should timeout until it has a request */
|
||||
((HttpResponse<SSL> *) s)->resetTimeout();
|
||||
|
||||
if(!SSL) {
|
||||
/* Call filter */
|
||||
@@ -113,6 +112,9 @@ private:
|
||||
|
||||
/* Handle socket disconnections */
|
||||
us_socket_context_on_close(SSL, getSocketContext(), [](us_socket_t *s, int /*code*/, void */*reason*/) {
|
||||
((AsyncSocket<SSL> *)s)->uncorkWithoutSending();
|
||||
|
||||
|
||||
/* Get socket ext */
|
||||
HttpResponseData<SSL> *httpResponseData = (HttpResponseData<SSL> *) us_socket_ext(SSL, s);
|
||||
|
||||
@@ -126,6 +128,7 @@ private:
|
||||
if (httpResponseData->onAborted) {
|
||||
httpResponseData->onAborted((HttpResponse<SSL> *)s, httpResponseData->userData);
|
||||
}
|
||||
|
||||
|
||||
/* Destruct socket ext */
|
||||
httpResponseData->~HttpResponseData<SSL>();
|
||||
@@ -233,7 +236,7 @@ private:
|
||||
|
||||
/* If we have not responded and we have a data handler, we need to timeout to enfore client sending the data */
|
||||
if (!((HttpResponse<SSL> *) s)->hasResponded() && httpResponseData->inStream) {
|
||||
us_socket_timeout(SSL, (us_socket_t *) s, HTTP_IDLE_TIMEOUT_S);
|
||||
((HttpResponse<SSL> *) s)->resetTimeout();
|
||||
}
|
||||
|
||||
/* Continue parsing */
|
||||
@@ -251,8 +254,8 @@ private:
|
||||
/* We still have some more data coming in later, so reset timeout */
|
||||
/* Only reset timeout if we got enough bytes (16kb/sec) since last time we reset here */
|
||||
httpResponseData->received_bytes_per_timeout += (unsigned int) data.length();
|
||||
if (httpResponseData->received_bytes_per_timeout >= HTTP_RECEIVE_THROUGHPUT_BYTES * HTTP_IDLE_TIMEOUT_S) {
|
||||
us_socket_timeout(SSL, (struct us_socket_t *) user, HTTP_IDLE_TIMEOUT_S);
|
||||
if (httpResponseData->received_bytes_per_timeout >= HTTP_RECEIVE_THROUGHPUT_BYTES * httpResponseData->idleTimeout) {
|
||||
((HttpResponse<SSL> *) user)->resetTimeout();
|
||||
httpResponseData->received_bytes_per_timeout = 0;
|
||||
}
|
||||
}
|
||||
@@ -302,10 +305,9 @@ private:
|
||||
|
||||
/* Timeout on uncork failure */
|
||||
auto [written, failed] = ((AsyncSocket<SSL> *) returnedSocket)->uncork();
|
||||
if (failed) {
|
||||
if (written > 0 || failed) {
|
||||
/* All Http sockets timeout by this, and this behavior match the one in HttpResponse::cork */
|
||||
/* Warning: both HTTP_IDLE_TIMEOUT_S and HTTP_TIMEOUT_S are 10 seconds and both are used the same */
|
||||
((AsyncSocket<SSL> *) s)->timeout(HTTP_IDLE_TIMEOUT_S);
|
||||
((HttpResponse<SSL> *) s)->resetTimeout();
|
||||
}
|
||||
|
||||
/* We need to check if we should close this socket here now */
|
||||
@@ -393,13 +395,14 @@ private:
|
||||
}
|
||||
|
||||
/* Expect another writable event, or another request within the timeout */
|
||||
asyncSocket->timeout(HTTP_IDLE_TIMEOUT_S);
|
||||
((HttpResponse<SSL> *) s)->resetTimeout();
|
||||
|
||||
return s;
|
||||
});
|
||||
|
||||
/* Handle FIN, HTTP does not support half-closed sockets, so simply close */
|
||||
us_socket_context_on_end(SSL, getSocketContext(), [](us_socket_t *s) {
|
||||
((AsyncSocket<SSL> *)s)->uncorkWithoutSending();
|
||||
|
||||
/* We do not care for half closed sockets */
|
||||
AsyncSocket<SSL> *asyncSocket = (AsyncSocket<SSL> *) s;
|
||||
|
||||
@@ -50,6 +50,13 @@ private:
|
||||
void *upgradedWebSocket = nullptr;
|
||||
bool isParsingHttp = false;
|
||||
bool rejectUnauthorized = false;
|
||||
|
||||
// TODO: SNI
|
||||
void clearRoutes() {
|
||||
this->router = HttpRouter<RouterData>{};
|
||||
this->currentRouter = &router;
|
||||
filterHandlers.clear();
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
@@ -39,6 +39,8 @@
|
||||
#include "ProxyParser.h"
|
||||
#include "QueryParser.h"
|
||||
|
||||
extern "C" size_t BUN_DEFAULT_MAX_HTTP_HEADER_SIZE;
|
||||
|
||||
namespace uWS
|
||||
{
|
||||
|
||||
@@ -207,7 +209,7 @@ namespace uWS
|
||||
/* This guy really has only 30 bits since we reserve two highest bits to chunked encoding parsing state */
|
||||
uint64_t remainingStreamingBytes = 0;
|
||||
|
||||
const size_t MAX_FALLBACK_SIZE = 1024 * 8;
|
||||
const size_t MAX_FALLBACK_SIZE = BUN_DEFAULT_MAX_HTTP_HEADER_SIZE;
|
||||
|
||||
/* Returns UINT_MAX on error. Maximum 999999999 is allowed. */
|
||||
static uint64_t toUnsignedInteger(std::string_view str) {
|
||||
|
||||
@@ -40,19 +40,27 @@ namespace uWS {
|
||||
/* Some pre-defined status constants to use with writeStatus */
|
||||
static const char *HTTP_200_OK = "200 OK";
|
||||
|
||||
/* The general timeout for HTTP sockets */
|
||||
static const int HTTP_TIMEOUT_S = 10;
|
||||
|
||||
template <bool SSL>
|
||||
struct HttpResponse : public AsyncSocket<SSL> {
|
||||
/* Solely used for getHttpResponseData() */
|
||||
template <bool> friend struct TemplatedApp;
|
||||
typedef AsyncSocket<SSL> Super;
|
||||
public:
|
||||
|
||||
HttpResponseData<SSL> *getHttpResponseData() {
|
||||
return (HttpResponseData<SSL> *) Super::getAsyncSocketData();
|
||||
}
|
||||
void setTimeout(uint8_t seconds) {
|
||||
auto* data = getHttpResponseData();
|
||||
data->idleTimeout = seconds;
|
||||
Super::timeout(data->idleTimeout);
|
||||
}
|
||||
|
||||
void resetTimeout() {
|
||||
auto* data = getHttpResponseData();
|
||||
|
||||
Super::timeout(data->idleTimeout);
|
||||
}
|
||||
/* Write an unsigned 32-bit integer in hex */
|
||||
void writeUnsignedHex(unsigned int value) {
|
||||
char buf[10];
|
||||
@@ -140,7 +148,7 @@ public:
|
||||
}
|
||||
|
||||
/* tryEnd can never fail when in chunked mode, since we do not have tryWrite (yet), only write */
|
||||
Super::timeout(HTTP_TIMEOUT_S);
|
||||
this->resetTimeout();
|
||||
return true;
|
||||
} else {
|
||||
/* Write content-length on first call */
|
||||
@@ -180,11 +188,8 @@ public:
|
||||
|
||||
/* Success is when we wrote the entire thing without any failures */
|
||||
bool success = written == data.length() && !failed;
|
||||
|
||||
/* If we are now at the end, start a timeout. Also start a timeout if we failed. */
|
||||
if (!success || httpResponseData->offset == totalSize) {
|
||||
Super::timeout(HTTP_TIMEOUT_S);
|
||||
}
|
||||
/* Reset the timeout on each tryEnd */
|
||||
this->resetTimeout();
|
||||
|
||||
/* Remove onAborted function if we reach the end */
|
||||
if (httpResponseData->offset == totalSize) {
|
||||
@@ -358,7 +363,7 @@ public:
|
||||
|
||||
HttpResponse *resume() {
|
||||
Super::resume();
|
||||
Super::timeout(HTTP_TIMEOUT_S);
|
||||
this->resetTimeout();
|
||||
return this;
|
||||
}
|
||||
|
||||
@@ -474,9 +479,8 @@ public:
|
||||
Super::write("\r\n", 2);
|
||||
|
||||
auto [written, failed] = Super::write(data.data(), (int) data.length());
|
||||
if (failed) {
|
||||
Super::timeout(HTTP_TIMEOUT_S);
|
||||
}
|
||||
/* Reset timeout on each sended chunk */
|
||||
this->resetTimeout();
|
||||
|
||||
/* If we did not fail the write, accept more */
|
||||
return !failed;
|
||||
@@ -513,7 +517,7 @@ public:
|
||||
/* The only way we could possibly have changed the corked socket during handler call, would be if
|
||||
* the HTTP socket was upgraded to WebSocket and caused a realloc. Because of this we cannot use "this"
|
||||
* from here downwards. The corking is done with corkUnchecked() in upgrade. It steals cork. */
|
||||
auto *newCorkedSocket = loopData->corkedSocket;
|
||||
auto *newCorkedSocket = loopData->getCorkedSocket();
|
||||
|
||||
/* If nobody is corked, it means most probably that large amounts of data has
|
||||
* been written and the cork buffer has already been sent off and uncorked.
|
||||
@@ -531,10 +535,10 @@ public:
|
||||
return static_cast<HttpResponse *>(newCorkedSocket);
|
||||
}
|
||||
|
||||
if (failed) {
|
||||
if (written > 0 || failed) {
|
||||
/* For now we only have one single timeout so let's use it */
|
||||
/* This behavior should equal the behavior in HttpContext when uncorking fails */
|
||||
Super::timeout(HTTP_TIMEOUT_S);
|
||||
this->resetTimeout();
|
||||
}
|
||||
|
||||
/* If we have no backbuffer and we are connection close and we responded fully then close */
|
||||
|
||||
@@ -36,6 +36,7 @@ struct HttpResponseData : AsyncSocketData<SSL>, HttpParser {
|
||||
using OnWritableCallback = bool (*)(uWS::HttpResponse<SSL>*, uint64_t, void*);
|
||||
using OnAbortedCallback = void (*)(uWS::HttpResponse<SSL>*, void*);
|
||||
using OnDataCallback = void (*)(uWS::HttpResponse<SSL>* response, const char* chunk, size_t chunk_length, bool, void*);
|
||||
uint8_t idleTimeout = 10; // default HTTP_TIMEOUT 10 seconds
|
||||
|
||||
/* When we are done with a response we mark it like so */
|
||||
void markDone() {
|
||||
|
||||
@@ -18,17 +18,17 @@
|
||||
#ifndef UWS_LOOPDATA_H
|
||||
#define UWS_LOOPDATA_H
|
||||
|
||||
#include <thread>
|
||||
#include <functional>
|
||||
#include <vector>
|
||||
#include <mutex>
|
||||
#include <map>
|
||||
#include <ctime>
|
||||
#include <cstdint>
|
||||
#include <ctime>
|
||||
#include <functional>
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
|
||||
#include "PerMessageDeflate.h"
|
||||
#include "MoveOnlyFunction.h"
|
||||
|
||||
#include "PerMessageDeflate.h"
|
||||
// clang-format off
|
||||
struct us_timer_t;
|
||||
|
||||
namespace uWS {
|
||||
@@ -44,7 +44,11 @@ private:
|
||||
|
||||
/* Map from void ptr to handler */
|
||||
std::map<void *, MoveOnlyFunction<void(Loop *)>> postHandlers, preHandlers;
|
||||
|
||||
/* Cork data */
|
||||
char *corkBuffer = new char[CORK_BUFFER_SIZE];
|
||||
unsigned int corkOffset = 0;
|
||||
void *corkedSocket = nullptr;
|
||||
bool corkedSocketIsSSL = false;
|
||||
public:
|
||||
LoopData() {
|
||||
updateDate();
|
||||
@@ -59,6 +63,55 @@ public:
|
||||
}
|
||||
delete [] corkBuffer;
|
||||
}
|
||||
void* getCorkedSocket() {
|
||||
return this->corkedSocket;
|
||||
}
|
||||
|
||||
void setCorkedSocket(void *corkedSocket, bool ssl) {
|
||||
this->corkedSocket = corkedSocket;
|
||||
this->corkedSocketIsSSL = ssl;
|
||||
}
|
||||
|
||||
bool isCorkedSSL() {
|
||||
return this->corkedSocketIsSSL;
|
||||
}
|
||||
|
||||
bool isCorked() {
|
||||
return this->corkOffset && this->corkedSocket;
|
||||
}
|
||||
|
||||
bool canCork() {
|
||||
return this->corkedSocket == nullptr;
|
||||
}
|
||||
|
||||
bool isCorkedWith(void* socket) {
|
||||
return this->corkedSocket == socket;
|
||||
}
|
||||
|
||||
char* getCorkSendBuffer() {
|
||||
return this->corkBuffer + this->corkOffset;
|
||||
}
|
||||
|
||||
void cleanCorkedSocket() {
|
||||
this->corkedSocket = nullptr;
|
||||
this->corkOffset = 0;
|
||||
}
|
||||
|
||||
unsigned int getCorkOffset() {
|
||||
return this->corkOffset;
|
||||
}
|
||||
|
||||
void setCorkOffset(unsigned int offset) {
|
||||
this->corkOffset = offset;
|
||||
}
|
||||
|
||||
void incrementCorkedOffset(unsigned int offset) {
|
||||
this->corkOffset += offset;
|
||||
}
|
||||
|
||||
char* getCorkBuffer() {
|
||||
return this->corkBuffer;
|
||||
}
|
||||
|
||||
void updateDate() {
|
||||
time_t now = time(0);
|
||||
@@ -94,12 +147,6 @@ public:
|
||||
/* Good 16k for SSL perf. */
|
||||
static const unsigned int CORK_BUFFER_SIZE = 16 * 1024;
|
||||
|
||||
/* Cork data */
|
||||
char *corkBuffer = new char[CORK_BUFFER_SIZE];
|
||||
unsigned int corkOffset = 0;
|
||||
void *corkedSocket = nullptr;
|
||||
bool corkedSocketIsSSL = false;
|
||||
|
||||
/* Per message deflate data */
|
||||
ZlibContext *zlibContext = nullptr;
|
||||
InflationStream *inflationStream = nullptr;
|
||||
|
||||
@@ -79,8 +79,14 @@ template <typename T, typename B>
|
||||
struct TopicTree {
|
||||
|
||||
enum IteratorFlags {
|
||||
// To appease clang-analyzer
|
||||
NONE = 0,
|
||||
|
||||
LAST = 1,
|
||||
FIRST = 2
|
||||
FIRST = 2,
|
||||
|
||||
// To appease clang-analyzer
|
||||
FIRST_AND_LAST = FIRST | LAST
|
||||
};
|
||||
|
||||
/* Whomever is iterating this topic is locked to not modify its own list */
|
||||
@@ -120,10 +126,10 @@ private:
|
||||
for (int i = 0; i < numMessageIndices; i++) {
|
||||
T &outgoingMessage = outgoingMessages[s->messageIndices[i]];
|
||||
|
||||
int flags = (i == numMessageIndices - 1) ? LAST : 0;
|
||||
int flags = (i == numMessageIndices - 1) ? LAST : NONE;
|
||||
|
||||
/* Returning true will stop drainage short (such as when backpressure is too high) */
|
||||
if (cb(s, outgoingMessage, (IteratorFlags)(flags | (i == 0 ? FIRST : 0)))) {
|
||||
if (cb(s, outgoingMessage, (IteratorFlags)(flags | (i == 0 ? FIRST : NONE)))) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,13 +18,13 @@
|
||||
#ifndef UWS_WEBSOCKET_H
|
||||
#define UWS_WEBSOCKET_H
|
||||
|
||||
#include "WebSocketData.h"
|
||||
#include "WebSocketProtocol.h"
|
||||
#include "AsyncSocket.h"
|
||||
#include "WebSocketContextData.h"
|
||||
#include "WebSocketData.h"
|
||||
#include "WebSocketProtocol.h"
|
||||
|
||||
#include <string_view>
|
||||
|
||||
// clang-format off
|
||||
namespace uWS {
|
||||
|
||||
template <bool SSL, bool isServer, typename USERDATA>
|
||||
@@ -107,7 +107,7 @@ public:
|
||||
WebSocketData *webSocketData = (WebSocketData *) Super::getAsyncSocketData();
|
||||
|
||||
/* Special path for long sends of non-compressed, non-SSL messages */
|
||||
if (message.length() >= 16 * 1024 && !compress && !SSL && !webSocketData->subscriber && getBufferedAmount() == 0 && Super::getLoopData()->corkOffset == 0) {
|
||||
if (message.length() >= 16 * 1024 && !compress && !SSL && !webSocketData->subscriber && getBufferedAmount() == 0 && Super::getLoopData()->getCorkOffset() == 0) {
|
||||
char header[10];
|
||||
int header_length = (int) protocol::formatMessage<isServer>(header, "", 0, opCode, message.length(), compress, fin);
|
||||
int written = us_socket_write2(0, (struct us_socket_t *)this, header, header_length, message.data(), (int) message.length());
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// clang-format off
|
||||
#ifndef UWS_WEBSOCKETCONTEXT_H
|
||||
#define UWS_WEBSOCKETCONTEXT_H
|
||||
|
||||
@@ -252,6 +252,8 @@ private:
|
||||
|
||||
/* Handle socket disconnections */
|
||||
us_socket_context_on_close(SSL, getSocketContext(), [](auto *s, int code, void *reason) {
|
||||
((AsyncSocket<SSL> *)s)->uncorkWithoutSending();
|
||||
|
||||
/* For whatever reason, if we already have emitted close event, do not emit it again */
|
||||
WebSocketData *webSocketData = (WebSocketData *) (us_socket_ext(SSL, s));
|
||||
if (!webSocketData->isShuttingDown) {
|
||||
@@ -270,7 +272,7 @@ private:
|
||||
webSocketData->subscriber = nullptr;
|
||||
|
||||
if (webSocketContextData->closeHandler) {
|
||||
webSocketContextData->closeHandler((WebSocket<SSL, isServer, USERDATA> *) s, 1006, {(char *) reason, (size_t) code});
|
||||
webSocketContextData->closeHandler((WebSocket<SSL, isServer, USERDATA> *) s, 1006, reason != NULL && code > 0 ? std::string_view{(char *) reason, (size_t) code} : std::string_view());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -371,6 +373,7 @@ private:
|
||||
|
||||
/* Handle FIN, HTTP does not support half-closed sockets, so simply close */
|
||||
us_socket_context_on_end(SSL, getSocketContext(), [](auto *s) {
|
||||
((AsyncSocket<SSL> *)s)->uncorkWithoutSending();
|
||||
|
||||
/* If we get a fin, we just close I guess */
|
||||
us_socket_close(SSL, (us_socket_t *) s, 0, nullptr);
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
#include <string_view>
|
||||
|
||||
// bun-specific
|
||||
#include "simdutf.h"
|
||||
#include "wtf/SIMDUTF.h"
|
||||
|
||||
namespace uWS {
|
||||
|
||||
|
||||
@@ -3,12 +3,16 @@ param(
|
||||
)
|
||||
|
||||
$ErrorActionPreference = 'Stop'
|
||||
. (Join-Path $PSScriptRoot "env.ps1")
|
||||
|
||||
if ($env:CI) {
|
||||
. (Join-Path $PSScriptRoot "env.ps1")
|
||||
if ($env:CI -eq "true") {
|
||||
& (Join-Path $PSScriptRoot "update-submodules.ps1")
|
||||
}
|
||||
|
||||
if ($env:RELEASE -eq "1") {
|
||||
$Force = $true
|
||||
}
|
||||
|
||||
$DidAnything = $false;
|
||||
|
||||
function Build-Dependency {
|
||||
|
||||
@@ -1,7 +1,15 @@
|
||||
#!/usr/bin/env bash
|
||||
set -eo pipefail
|
||||
set -euo pipefail
|
||||
source "$(dirname -- "${BASH_SOURCE[0]}")/env.sh"
|
||||
|
||||
RELEASE="${RELEASE:-0}"
|
||||
CI="${CI:-}"
|
||||
BUILT_ANY=0
|
||||
SUBMODULES=
|
||||
CACHE_DIR=
|
||||
CACHE=0
|
||||
BUN_DEPS_CACHE_DIR="${BUN_DEPS_CACHE_DIR:-}"
|
||||
|
||||
if [[ "$CI" ]]; then
|
||||
$(dirname -- "${BASH_SOURCE[0]}")/update-submodules.sh
|
||||
fi
|
||||
@@ -23,11 +31,9 @@ while getopts "f" opt; do
|
||||
esac
|
||||
done
|
||||
|
||||
BUILT_ANY=0
|
||||
SUBMODULES=
|
||||
CACHE_DIR=
|
||||
CACHE=0
|
||||
if [ -n "$BUN_DEPS_CACHE_DIR" ]; then
|
||||
if [ "$RELEASE" == "1" ]; then
|
||||
FORCE=1
|
||||
elif [ -n "$BUN_DEPS_CACHE_DIR" ]; then
|
||||
CACHE_DIR="$BUN_DEPS_CACHE_DIR"
|
||||
CACHE=1
|
||||
SUBMODULES="$(git submodule status)"
|
||||
@@ -41,6 +47,7 @@ dep() {
|
||||
local os="$(uname -s | tr '[:upper:]' '[:lower:]')"
|
||||
local arch="$(uname -m)"
|
||||
CACHE_KEY="$submodule/$hash-$os-$arch-$CPU_TARGET"
|
||||
mkdir -p "$CACHE_DIR/$CACHE_KEY"
|
||||
fi
|
||||
if [ -z "$FORCE" ]; then
|
||||
HAS_ALL_DEPS=1
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
$ErrorActionPreference = 'Stop' # Setting strict mode, similar to 'set -euo pipefail' in bash
|
||||
|
||||
.\scripts\env.ps1
|
||||
.\scripts\update-submodules.ps1
|
||||
.\scripts\build-libuv.ps1 -CloneOnly $True
|
||||
|
||||
# libdeflate.h is needed otherwise the build fails
|
||||
git submodule update --init --recursive --progress --depth=1 --checkout src/deps/libdeflate
|
||||
. (Join-Path $PSScriptRoot "env.ps1")
|
||||
if ($env:CI -eq "true") {
|
||||
$env:FORCE_UPDATE_SUBMODULES = "1"
|
||||
& (Join-Path $PSScriptRoot "update-submodules.ps1")
|
||||
& (Join-Path $PSScriptRoot "build-libuv.ps1") -CloneOnly $True
|
||||
}
|
||||
|
||||
cd build
|
||||
cmake .. @CMAKE_FLAGS `
|
||||
|
||||
@@ -7,22 +7,25 @@ MIMALLOC_VALGRIND_ENABLED_FLAG=${MIMALLOC_VALGRIND_ENABLED_FLAG:-}
|
||||
|
||||
cd $BUN_DEPS_DIR/mimalloc
|
||||
|
||||
rm -rf CMakeCache* CMakeFiles
|
||||
rm -rf CMakeCache* CMakeFiles build
|
||||
|
||||
cmake "${CMAKE_FLAGS[@]}" . \
|
||||
mkdir build
|
||||
|
||||
cd build
|
||||
|
||||
cmake "${CMAKE_FLAGS[@]}" .. \
|
||||
-DCMAKE_BUILD_TYPE=Debug \
|
||||
-DMI_DEBUG=1 \
|
||||
-DMI_DEBUG_FULL=1 \
|
||||
-DMI_SKIP_COLLECT_ON_EXIT=1 \
|
||||
-DMI_BUILD_SHARED=OFF \
|
||||
-DMI_BUILD_STATIC=ON \
|
||||
-DMI_BUILD_TESTS=OFF \
|
||||
-DMI_OSX_ZONE=OFF \
|
||||
-DMI_OSX_INTERPOSE=OFF \
|
||||
-DMI_TRACK_VALGRIND=ON \
|
||||
-DMI_BUILD_OBJECT=ON \
|
||||
-DMI_USE_CXX=ON \
|
||||
-DMI_OVERRIDE=OFF \
|
||||
-DMI_OSX_ZONE=OFF \
|
||||
-DMI_TRACK_VALGRIND=ON \
|
||||
-DMI_USE_CXX=ON \
|
||||
-GNinja
|
||||
|
||||
ninja
|
||||
|
||||
@@ -7,7 +7,7 @@ $ErrorActionPreference = 'Stop' # Setting strict mode, similar to 'set -euo pip
|
||||
$Target = If ($Baseline) { "windows-x64-baseline" } Else { "windows-x64" }
|
||||
$Tag = "bun-$Target"
|
||||
|
||||
.\scripts\env.ps1
|
||||
. (Join-Path $PSScriptRoot "env.ps1")
|
||||
|
||||
mkdir -Force build
|
||||
buildkite-agent artifact download "**" build --step "${Target}-build-zig"
|
||||
|
||||
@@ -24,7 +24,7 @@ try {
|
||||
Write-Host "-- Downloading WebKit"
|
||||
if (!(Test-Path $TarPath)) {
|
||||
try {
|
||||
Invoke-WebRequest $Url -OutFile $TarPath
|
||||
Invoke-WebRequest $Url -OutFile $TarPath -MaximumRetryCount 3 -RetryIntervalSec 1
|
||||
} catch {
|
||||
Write-Error "Failed to fetch WebKit from: $Url"
|
||||
throw $_
|
||||
|
||||
@@ -43,11 +43,34 @@ fi
|
||||
|
||||
rm -rf "$OUTDIR"
|
||||
|
||||
download () {
|
||||
local command="$1"
|
||||
local retries="$2"
|
||||
local options="$-"
|
||||
if [[ $options == *e* ]]; then
|
||||
set +e
|
||||
fi
|
||||
$command
|
||||
local exit_code=$?
|
||||
if [[ $options == *e* ]]; then
|
||||
set -e
|
||||
fi
|
||||
if [[ $exit_code -ne 0 && $retries -gt 0 ]]; then
|
||||
download "$command" $(($retries - 1))
|
||||
else
|
||||
return $exit_code
|
||||
fi
|
||||
}
|
||||
|
||||
# this is a big download so we will retry 5 times and ask curl to resume
|
||||
# download from where failure occurred if it fails and is rerun
|
||||
if [ ! -f "$tar" ]; then
|
||||
echo "-- Downloading WebKit"
|
||||
if ! curl -o "$tar" -L "$url"; then
|
||||
if ! download "curl -C - --http1.1 -o $tar.tmp -L $url" 5; then
|
||||
echo "Failed to download $url"
|
||||
exit 1
|
||||
else
|
||||
mv $tar.tmp $tar
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ try {
|
||||
if (!(Test-Path $TarPath)) {
|
||||
try {
|
||||
Write-Host "-- Downloading Zig"
|
||||
Invoke-RestMethod $Url -OutFile $TarPath
|
||||
Invoke-RestMethod $Url -OutFile $TarPath -MaximumRetryCount 3 -RetryIntervalSec 1
|
||||
} catch {
|
||||
Write-Error "Failed to fetch Zig from: $Url"
|
||||
throw $_
|
||||
|
||||
@@ -35,20 +35,20 @@ $BUN_DEPS_DIR = if ($env:BUN_DEPS_DIR) { $env:BUN_DEPS_DIR } else { Join-Path $B
|
||||
$BUN_DEPS_OUT_DIR = if ($env:BUN_DEPS_OUT_DIR) { $env:BUN_DEPS_OUT_DIR } else { Join-Path $BUN_BASE_DIR 'build\bun-deps' }
|
||||
|
||||
$CPUS = if ($env:CPUS) { $env:CPUS } else { (Get-CimInstance -Class Win32_Processor).NumberOfCores }
|
||||
$Lto = if ($env:USE_LTO) { $env:USE_LTO -eq "1" } else { True }
|
||||
$Lto = if ($env:USE_LTO) { $env:USE_LTO -eq "1" } else { $False }
|
||||
$Baseline = if ($env:USE_BASELINE_BUILD) {
|
||||
$env:USE_BASELINE_BUILD -eq "1"
|
||||
} elseif ($env:BUILDKITE_STEP_KEY -match "baseline") {
|
||||
True
|
||||
$True
|
||||
} else {
|
||||
False
|
||||
$False
|
||||
}
|
||||
|
||||
$CC = "clang-cl"
|
||||
$CXX = "clang-cl"
|
||||
|
||||
$CFLAGS = '/O2 /Z7 /MT /O2 /Ob2 /DNDEBUG /U_DLL'
|
||||
$CXXFLAGS = '/O2 /Z7 /MT /O2 /Ob2 /DNDEBUG /U_DLL'
|
||||
$CXXFLAGS = '/O2 /Z7 /MT /O2 /Ob2 /DNDEBUG /U_DLL -Xclang -fno-c++-static-destructors '
|
||||
|
||||
if ($Lto) {
|
||||
$CXXFLAGS += " -fuse-ld=lld -flto -Xclang -emit-llvm-bc"
|
||||
@@ -103,7 +103,10 @@ if ($Lto) {
|
||||
$CMAKE_FLAGS += "-DUSE_LTO=ON"
|
||||
}
|
||||
|
||||
if (Get-Command sccache -ErrorAction SilentlyContinue) {
|
||||
if (Get-Command ccache -ErrorAction SilentlyContinue) {
|
||||
$CMAKE_FLAGS += "-DCMAKE_C_COMPILER_LAUNCHER=ccache"
|
||||
$CMAKE_FLAGS += "-DCMAKE_CXX_COMPILER_LAUNCHER=ccache"
|
||||
} elseif (Get-Command sccache -ErrorAction SilentlyContinue) {
|
||||
# Continue with local compiler if sccache has an error
|
||||
$env:SCCACHE_IGNORE_SERVER_IO_ERROR = "1"
|
||||
|
||||
|
||||
@@ -1,5 +1,15 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
export CI=${CI:-0}
|
||||
export USE_LTO=${USE_LTO:-0}
|
||||
export FORCE_PIC=${FORCE_PIC:-}
|
||||
UNAME_OS="$(uname -s)"
|
||||
UNAME_ARCH="$(uname -m)"
|
||||
|
||||
export CMAKE_FLAGS="${CMAKE_FLAGS:-}"
|
||||
|
||||
# Hack for buildkite sometimes not having the right path
|
||||
if [[ "${CI:-}" == "1" || "${CI:-}" == "true" ]]; then
|
||||
if [ -f ~/.bashrc ]; then
|
||||
@@ -7,7 +17,7 @@ if [[ "${CI:-}" == "1" || "${CI:-}" == "true" ]]; then
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ $(uname -s) == 'Darwin' ]]; then
|
||||
if [[ $UNAME_OS == 'Darwin' ]]; then
|
||||
export LLVM_VERSION=18
|
||||
else
|
||||
export LLVM_VERSION=16
|
||||
@@ -16,7 +26,7 @@ fi
|
||||
# this is the environment script for building bun's dependencies
|
||||
# it sets c compiler and flags
|
||||
export SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd)
|
||||
export BUN_BASE_DIR=${BUN_BASE_DIR:-$(cd $SCRIPT_DIR && cd .. && pwd)}
|
||||
export BUN_BASE_DIR=${BUN_BASE_DIR:-$(cd "$SCRIPT_DIR" && cd .. && pwd)}
|
||||
export BUN_DEPS_DIR=${BUN_DEPS_DIR:-$BUN_BASE_DIR/src/deps}
|
||||
export BUN_DEPS_OUT_DIR=${BUN_DEPS_OUT_DIR:-$BUN_BASE_DIR/build/bun-deps}
|
||||
|
||||
@@ -24,7 +34,7 @@ export BUN_DEPS_OUT_DIR=${BUN_DEPS_OUT_DIR:-$BUN_BASE_DIR/build/bun-deps}
|
||||
export LC_CTYPE="en_US.UTF-8"
|
||||
export LC_ALL="en_US.UTF-8"
|
||||
|
||||
if [[ $(uname -s) == 'Darwin' ]]; then
|
||||
if [[ $UNAME_OS == 'Darwin' ]]; then
|
||||
export CXX="$(brew --prefix llvm)@$LLVM_VERSION/bin/clang++"
|
||||
export CC="$(brew --prefix llvm)@$LLVM_VERSION/bin/clang"
|
||||
export AR="$(brew --prefix llvm)@$LLVM_VERSION/bin/llvm-ar"
|
||||
@@ -47,26 +57,28 @@ export CPUS=${CPUS:-$(nproc || sysctl -n hw.ncpu || echo 1)}
|
||||
export RANLIB=${RANLIB:-$(which llvm-ranlib-$LLVM_VERSION || which llvm-ranlib || which ranlib)}
|
||||
|
||||
# on Linux, force using lld as the linker
|
||||
if [[ $(uname -s) == 'Linux' ]]; then
|
||||
if [[ $UNAME_OS == 'Linux' ]]; then
|
||||
export LD=${LD:-$(which ld.lld-$LLVM_VERSION || which ld.lld || which ld)}
|
||||
export LDFLAGS="${LDFLAGS} -fuse-ld=lld "
|
||||
export LDFLAGS="${LDFLAGS:-} -fuse-ld=lld "
|
||||
fi
|
||||
|
||||
export CMAKE_CXX_COMPILER=${CXX}
|
||||
export CMAKE_C_COMPILER=${CC}
|
||||
|
||||
export CFLAGS='-O3 -fno-exceptions -fvisibility=hidden -fvisibility-inlines-hidden -mno-omit-leaf-frame-pointer -fno-omit-frame-pointer -fno-asynchronous-unwind-tables -fno-unwind-tables '
|
||||
export CXXFLAGS='-O3 -fno-exceptions -fno-rtti -fvisibility=hidden -fvisibility-inlines-hidden -mno-omit-leaf-frame-pointer -fno-omit-frame-pointer -fno-asynchronous-unwind-tables -fno-unwind-tables -fno-c++-static-destructors '
|
||||
export FILE_PREFIX_MAP=" -ffile-prefix-map='${BUN_BASE_DIR}'=. -ffile-prefix-map='${BUN_DEPS_DIR}'=src/deps -ffile-prefix-map='${BUN_DEPS_OUT_DIR}'=src/deps "
|
||||
|
||||
export CFLAGS="-O3 -fno-exceptions -fvisibility=hidden -fvisibility-inlines-hidden -mno-omit-leaf-frame-pointer -fno-omit-frame-pointer -fno-asynchronous-unwind-tables -fno-unwind-tables $FILE_PREFIX_MAP "
|
||||
export CXXFLAGS="-O3 -fno-exceptions -fno-rtti -fvisibility=hidden -fvisibility-inlines-hidden -mno-omit-leaf-frame-pointer -fno-omit-frame-pointer -fno-asynchronous-unwind-tables -fno-unwind-tables -fno-c++-static-destructors $FILE_PREFIX_MAP "
|
||||
|
||||
# Add flags for LTO
|
||||
# We cannot enable LTO on macOS for dependencies because it requires -fuse-ld=lld and lld causes many segfaults on macOS (likely related to stack size)
|
||||
if [ "$BUN_ENABLE_LTO" == "1" ]; then
|
||||
if [ "$USE_LTO" == "1" ] || [ "$USE_LTO" == "ON" ]; then
|
||||
export CFLAGS="$CFLAGS -flto=full "
|
||||
export CXXFLAGS="$CXXFLAGS -flto=full -fwhole-program-vtables -fforce-emit-vtables "
|
||||
export LDFLAGS="$LDFLAGS -flto=full -fwhole-program-vtables -fforce-emit-vtables "
|
||||
export LDFLAGS="${LDFLAGS:-} -flto=full -fwhole-program-vtables -fforce-emit-vtables "
|
||||
fi
|
||||
|
||||
if [[ $(uname -s) == 'Linux' ]]; then
|
||||
if [[ $UNAME_OS == 'Linux' ]]; then
|
||||
export CFLAGS="$CFLAGS -ffunction-sections -fdata-sections -faddrsig "
|
||||
export CXXFLAGS="$CXXFLAGS -ffunction-sections -fdata-sections -faddrsig "
|
||||
export LDFLAGS="${LDFLAGS} -Wl,-z,norelro"
|
||||
@@ -74,7 +86,7 @@ fi
|
||||
|
||||
# Clang 18 on macOS needs to have -fno-define-target-os-macros to fix a zlib build issue
|
||||
# https://gitlab.kitware.com/cmake/cmake/-/issues/25755
|
||||
if [[ $(uname -s) == 'Darwin' && $LLVM_VERSION == '18' ]]; then
|
||||
if [[ $UNAME_OS == 'Darwin' && $LLVM_VERSION == '18' ]]; then
|
||||
export CFLAGS="$CFLAGS -fno-define-target-os-macros "
|
||||
export CXXFLAGS="$CXXFLAGS -fno-define-target-os-macros -D_LIBCXX_ENABLE_ASSERTIONS=0 -D_LIBCPP_HARDENING_MODE=_LIBCPP_HARDENING_MODE_NONE "
|
||||
fi
|
||||
@@ -83,12 +95,12 @@ fi
|
||||
if [ -n "$FORCE_PIC" ]; then
|
||||
export CFLAGS="$CFLAGS -fPIC "
|
||||
export CXXFLAGS="$CXXFLAGS -fPIC "
|
||||
elif [[ $(uname -s) == 'Linux' ]]; then
|
||||
elif [[ $UNAME_OS == 'Linux' ]]; then
|
||||
export CFLAGS="$CFLAGS -fno-pie -fno-pic "
|
||||
export CXXFLAGS="$CXXFLAGS -fno-pie -fno-pic "
|
||||
fi
|
||||
|
||||
if [[ $(uname -s) == 'Linux' && ($(uname -m) == 'aarch64' || $(uname -m) == 'arm64') ]]; then
|
||||
if [[ $UNAME_OS == 'Linux' && ($UNAME_ARCH == 'aarch64' || $UNAME_ARCH == 'arm64') ]]; then
|
||||
export CFLAGS="$CFLAGS -march=armv8-a+crc -mtune=ampere1 "
|
||||
export CXXFLAGS="$CXXFLAGS -march=armv8-a+crc -mtune=ampere1 "
|
||||
fi
|
||||
@@ -113,16 +125,16 @@ if [ -f "$CCACHE" ]; then
|
||||
)
|
||||
fi
|
||||
|
||||
if [[ $(uname -s) == 'Linux' ]]; then
|
||||
if [[ $UNAME_OS == 'Linux' ]]; then
|
||||
# Ensure we always use -std=gnu++20 on Linux
|
||||
CMAKE_FLAGS+=(-DCMAKE_CXX_EXTENSIONS=ON)
|
||||
fi
|
||||
|
||||
if [[ $(uname -s) == 'Darwin' ]]; then
|
||||
if [[ $UNAME_OS == 'Darwin' ]]; then
|
||||
export CMAKE_OSX_DEPLOYMENT_TARGET=${CMAKE_OSX_DEPLOYMENT_TARGET:-13.0}
|
||||
CMAKE_FLAGS+=(-DCMAKE_OSX_DEPLOYMENT_TARGET=${CMAKE_OSX_DEPLOYMENT_TARGET})
|
||||
export CFLAGS="$CFLAGS -mmacosx-version-min=${CMAKE_OSX_DEPLOYMENT_TARGET} -D__DARWIN_NON_CANCELABLE=1 "
|
||||
export CXXFLAGS="$CXXFLAGS -mmacosx-version-min=${CMAKE_OSX_DEPLOYMENT_TARGET} -D__DARWIN_NON_CANCELABLE=1 "
|
||||
export CFLAGS="$CFLAGS -mmacos-version-min=${CMAKE_OSX_DEPLOYMENT_TARGET} -D__DARWIN_NON_CANCELABLE=1 "
|
||||
export CXXFLAGS="$CXXFLAGS -mmacos-version-min=${CMAKE_OSX_DEPLOYMENT_TARGET} -D__DARWIN_NON_CANCELABLE=1 "
|
||||
fi
|
||||
|
||||
mkdir -p $BUN_DEPS_OUT_DIR
|
||||
@@ -133,7 +145,7 @@ if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
|
||||
if [ -n "$CCACHE" ]; then
|
||||
echo "Ccache: ${CCACHE}"
|
||||
fi
|
||||
if [[ $(uname -s) == 'Darwin' ]]; then
|
||||
if [[ $UNAME_OS == 'Darwin' ]]; then
|
||||
echo "OSX Deployment Target: ${CMAKE_OSX_DEPLOYMENT_TARGET}"
|
||||
fi
|
||||
fi
|
||||
|
||||
353
scripts/label-issue.ts
Normal file
353
scripts/label-issue.ts
Normal file
@@ -0,0 +1,353 @@
|
||||
const labels = [
|
||||
{
|
||||
name: "build",
|
||||
description: "An issue related to building or compiling Bun (not bun build)",
|
||||
},
|
||||
{
|
||||
name: "bun:crypto",
|
||||
description: "",
|
||||
},
|
||||
{
|
||||
name: "bun:dns",
|
||||
description: "Bun's DNS resolver",
|
||||
},
|
||||
{
|
||||
name: "bun:ffi",
|
||||
description: "Something related with FFI in Bun",
|
||||
},
|
||||
{
|
||||
name: "bun:fs",
|
||||
description: "",
|
||||
},
|
||||
{
|
||||
name: "bun:glob",
|
||||
description: "Related to Bun.Glob",
|
||||
},
|
||||
{
|
||||
name: "bun:http",
|
||||
description: "Bun.serve",
|
||||
},
|
||||
{
|
||||
name: "bun:jsc",
|
||||
description: "",
|
||||
},
|
||||
{
|
||||
name: "bun:semver",
|
||||
description: "Bun.semver",
|
||||
},
|
||||
{
|
||||
name: "bun:serve",
|
||||
description: "Bun.serve and HTTP server",
|
||||
},
|
||||
{
|
||||
name: "bun:spawn",
|
||||
description: "Bun.spawn, Bun.spawnSync",
|
||||
},
|
||||
{
|
||||
name: "bun:sqlite",
|
||||
description: "Something to do with bun:sqlite",
|
||||
},
|
||||
{
|
||||
name: "bun:tcp",
|
||||
description: "TCP sockets in Bun's API (Bun.connect, Bun.listen)",
|
||||
},
|
||||
{
|
||||
name: "bun:udp",
|
||||
description: "UDP sockets in Bun's API (Bun.udpSocket())",
|
||||
},
|
||||
|
||||
{
|
||||
name: "bundler",
|
||||
description: "Something to do with the bundler",
|
||||
},
|
||||
{
|
||||
name: "bunx",
|
||||
description: "Something that has to do with `bunx`",
|
||||
},
|
||||
{
|
||||
name: "chore",
|
||||
description: "Task to improve the repository",
|
||||
},
|
||||
{
|
||||
name: "cjs",
|
||||
description: "CommonJS module",
|
||||
},
|
||||
{
|
||||
name: "cli",
|
||||
description: "Something to do with CLI arguments",
|
||||
},
|
||||
{
|
||||
name: "debugger",
|
||||
description: "Something to do with `bun --inspect` or the debugger",
|
||||
},
|
||||
{
|
||||
name: "docker",
|
||||
description: "An issue that occurs when running in Docker",
|
||||
},
|
||||
{
|
||||
name: "docs",
|
||||
description: "Improvements or additions to documentation",
|
||||
},
|
||||
{
|
||||
name: "ecosystem",
|
||||
description: "Something that relates to package or framework compatibility",
|
||||
},
|
||||
{
|
||||
name: "enhancement",
|
||||
description: "New feature or request",
|
||||
},
|
||||
{
|
||||
name: "idea",
|
||||
description: "",
|
||||
},
|
||||
{
|
||||
name: "infrastructure",
|
||||
description: "",
|
||||
},
|
||||
{
|
||||
name: "jest",
|
||||
description: "Something related to the `bun test` runner",
|
||||
},
|
||||
{
|
||||
name: "jsc",
|
||||
description: "Something related to JavaScriptCore, bun's JS engine",
|
||||
},
|
||||
{
|
||||
name: "lambda",
|
||||
description: "An issue related to the AWS Lambda layer",
|
||||
},
|
||||
{
|
||||
name: "linux",
|
||||
description: "An issue that only occurs on Linux",
|
||||
},
|
||||
{
|
||||
name: "macOS",
|
||||
description: "An issue that only occurs on macOS",
|
||||
},
|
||||
{
|
||||
name: "minifier",
|
||||
description: "bun's javascript minifier",
|
||||
},
|
||||
{
|
||||
name: "napi",
|
||||
description: "Compatibility with the native layer of Node.js",
|
||||
},
|
||||
{
|
||||
name: "node:crypto",
|
||||
description: "the node:crypto module",
|
||||
},
|
||||
{
|
||||
name: "node:dgram",
|
||||
description: "the node:dgram module",
|
||||
},
|
||||
{
|
||||
name: "node:dns",
|
||||
description: "the node:dns module",
|
||||
},
|
||||
{
|
||||
name: "node:fs",
|
||||
description: "the node:fs module",
|
||||
},
|
||||
{
|
||||
name: "node:http",
|
||||
description: "the node:http module",
|
||||
},
|
||||
{
|
||||
name: "node:http2",
|
||||
description: "the node:http2 module",
|
||||
},
|
||||
{
|
||||
name: "node:net",
|
||||
description: "the node:net module",
|
||||
},
|
||||
{
|
||||
name: "node:os",
|
||||
description: "the node:os module",
|
||||
},
|
||||
{
|
||||
name: "node:path",
|
||||
description: "the node:path module",
|
||||
},
|
||||
{
|
||||
name: "node:process",
|
||||
description: "the node:process module",
|
||||
},
|
||||
{
|
||||
name: "node:stream",
|
||||
description: "the node:stream module",
|
||||
},
|
||||
{
|
||||
name: "node:tty",
|
||||
description: "the node:tty module",
|
||||
},
|
||||
{
|
||||
name: "node:util",
|
||||
description: "the node:util module",
|
||||
},
|
||||
{
|
||||
name: "node:v8",
|
||||
description: "the node:v8 module",
|
||||
},
|
||||
{
|
||||
name: "node.js",
|
||||
description: "Compatibility with Node.js APIs",
|
||||
},
|
||||
{
|
||||
name: "npm",
|
||||
description: "Installing npm packages, npm registry, etc related to bun install",
|
||||
},
|
||||
{
|
||||
name: "npm:patch",
|
||||
description: "bun patch subcommand",
|
||||
},
|
||||
{
|
||||
name: "performance",
|
||||
description: "An issue with performance",
|
||||
},
|
||||
{
|
||||
name: "repl",
|
||||
description: "An issue with `bun repl`",
|
||||
},
|
||||
{
|
||||
name: "runtime",
|
||||
description: "Related to the JavaScript runtime",
|
||||
},
|
||||
{
|
||||
name: "shell",
|
||||
description: "Something to do with Bun as a shell",
|
||||
},
|
||||
{
|
||||
name: "sourcemaps",
|
||||
description: "Source maps",
|
||||
},
|
||||
{
|
||||
name: "transpiler",
|
||||
description: "parser || printer",
|
||||
},
|
||||
{
|
||||
name: "types",
|
||||
description: "An issue with TypeScript types",
|
||||
},
|
||||
{
|
||||
name: "typescript",
|
||||
description: "Something for TypeScript",
|
||||
},
|
||||
{
|
||||
name: "vscode",
|
||||
description: "Something to do with the VSCode extension",
|
||||
},
|
||||
{
|
||||
name: "wasm",
|
||||
description: "Something that related to WASM or WASI support",
|
||||
},
|
||||
{
|
||||
name: "web-api",
|
||||
description: "Something that relates to a standard Web API",
|
||||
},
|
||||
{
|
||||
name: "web:blob",
|
||||
description: "Blob",
|
||||
},
|
||||
{
|
||||
name: "web:crypto",
|
||||
description: "Related to crypto, SubtleCrypto",
|
||||
},
|
||||
{
|
||||
name: "web:encoding",
|
||||
description: "TextEncoder, TextDecoder, etc.",
|
||||
},
|
||||
{
|
||||
name: "web:fetch",
|
||||
description: "fetch api",
|
||||
},
|
||||
{
|
||||
name: "web:js",
|
||||
description: "",
|
||||
},
|
||||
{
|
||||
name: "web:performance",
|
||||
description: "Performance object",
|
||||
},
|
||||
{
|
||||
name: "web:stream",
|
||||
description: "Related to ReadableStream, WritableStream, etc.",
|
||||
},
|
||||
{
|
||||
name: "web:url",
|
||||
description: "Related to URL",
|
||||
},
|
||||
{
|
||||
name: "web:websocket",
|
||||
description: "Related to WebSocket client API",
|
||||
},
|
||||
{
|
||||
name: "windows",
|
||||
description: "An issue that only occurs on Windows",
|
||||
},
|
||||
{
|
||||
name: "wintercg",
|
||||
description: "Web-interoperable Runtimes Community Group compatiblity",
|
||||
},
|
||||
];
|
||||
|
||||
import { Anthropic } from "@anthropic-ai/sdk";
|
||||
|
||||
const anthropic = new Anthropic({ apiKey: process.env.ANTHROPIC_API_KEY });
|
||||
|
||||
async function categorizeLabelsByClaudeAI(
|
||||
issueDetails: { title: string; body: string },
|
||||
labels: Array<{ name: string; description: string }>,
|
||||
) {
|
||||
const response = await anthropic.messages.create({
|
||||
model: "claude-3-5-sonnet-20240620",
|
||||
max_tokens: 2048,
|
||||
system: `Given this list of labels:
|
||||
${labels.map(label => `- ${label.name}: ${label.description}`).join("\n")}
|
||||
|
||||
Please analyze the bug report and return a JSON array of label names that are most relevant to this issue. Only include labels that are highly relevant.
|
||||
|
||||
Only output VALID JSON. It's okay if there are no relevant labels.
|
||||
|
||||
The output should be a JSON array like so, with NO OTHER TEXT:
|
||||
|
||||
["label1", "label2", "label3"]
|
||||
`,
|
||||
messages: [
|
||||
{
|
||||
role: "user",
|
||||
content: JSON.stringify({ title: issueDetails.title, body: issueDetails.body }, null, 2),
|
||||
},
|
||||
],
|
||||
});
|
||||
let text = response.content[0].text;
|
||||
const start = text?.indexOf("[");
|
||||
if (start !== -1) {
|
||||
text = text.slice(start);
|
||||
}
|
||||
|
||||
return JSON.parse(text);
|
||||
}
|
||||
|
||||
const issue = {
|
||||
title: process.env.GITHUB_ISSUE_TITLE!,
|
||||
body: process.env.GITHUB_ISSUE_BODY!,
|
||||
};
|
||||
let relevantLabels = await categorizeLabelsByClaudeAI(issue, labels);
|
||||
if (!relevantLabels?.length) {
|
||||
console.error("No relevant labels found");
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
for (let i = 0; i < relevantLabels.length; i++) {
|
||||
if (!labels.find(label => label.name === relevantLabels[i])) {
|
||||
relevantLabels.splice(i, 1);
|
||||
i--;
|
||||
}
|
||||
}
|
||||
|
||||
if (relevantLabels.length === 0) {
|
||||
console.error("No relevant labels found");
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
console.write(relevantLabels.join(","));
|
||||
@@ -21,7 +21,7 @@ import {
|
||||
} from "node:fs";
|
||||
import { spawn, spawnSync } from "node:child_process";
|
||||
import { tmpdir, hostname, userInfo, homedir } from "node:os";
|
||||
import { join, basename, dirname, relative } from "node:path";
|
||||
import { join, basename, dirname, relative, sep } from "node:path";
|
||||
import { normalize as normalizeWindows } from "node:path/win32";
|
||||
import { isIP } from "node:net";
|
||||
import { parseArgs } from "node:util";
|
||||
@@ -104,15 +104,19 @@ async function printInfo() {
|
||||
console.log("Glibc:", getGlibcVersion());
|
||||
}
|
||||
console.log("Hostname:", getHostname());
|
||||
if (isCloud) {
|
||||
console.log("Public IP:", await getPublicIp());
|
||||
console.log("Cloud:", getCloud());
|
||||
}
|
||||
if (isCI) {
|
||||
console.log("CI:", getCI());
|
||||
console.log("Shard:", options["shard"], "/", options["max-shards"]);
|
||||
console.log("Build URL:", getBuildUrl());
|
||||
console.log("Environment:", process.env);
|
||||
if (isCloud) {
|
||||
console.log("Public IP:", await getPublicIp());
|
||||
console.log("Cloud:", getCloud());
|
||||
}
|
||||
const tailscaleIp = await getTailscaleIp();
|
||||
if (tailscaleIp) {
|
||||
console.log("Tailscale IP:", tailscaleIp);
|
||||
}
|
||||
}
|
||||
console.log("Cwd:", cwd);
|
||||
console.log("Tmpdir:", tmpPath);
|
||||
@@ -130,7 +134,32 @@ async function printInfo() {
|
||||
async function runTests() {
|
||||
let execPath;
|
||||
if (options["step"]) {
|
||||
execPath = await getExecPathFromBuildKite(options["step"]);
|
||||
downloadLoop: for (let i = 0; i < 10; i++) {
|
||||
execPath = await getExecPathFromBuildKite(options["step"]);
|
||||
for (let j = 0; j < 10; j++) {
|
||||
const { error } = spawnSync(execPath, ["--version"], {
|
||||
encoding: "utf-8",
|
||||
timeout: spawnTimeout,
|
||||
env: {
|
||||
PATH: process.env.PATH,
|
||||
BUN_DEBUG_QUIET_LOGS: 1,
|
||||
},
|
||||
});
|
||||
if (!error) {
|
||||
break;
|
||||
}
|
||||
const { code } = error;
|
||||
if (code === "EBUSY") {
|
||||
console.log("Bun appears to be busy, retrying...");
|
||||
continue;
|
||||
}
|
||||
if (code === "UNKNOWN") {
|
||||
console.log("Bun appears to be corrupted, downloading again...");
|
||||
rmSync(execPath, { force: true });
|
||||
continue downloadLoop;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
execPath = getExecPath(options["exec-path"]);
|
||||
}
|
||||
@@ -408,10 +437,12 @@ async function spawnBun(execPath, { args, cwd, timeout, env, stdout, stderr }) {
|
||||
BUN_FEATURE_FLAG_INTERNAL_FOR_TESTING: "1",
|
||||
BUN_DEBUG_QUIET_LOGS: "1",
|
||||
BUN_GARBAGE_COLLECTOR_LEVEL: "1",
|
||||
BUN_ENABLE_CRASH_REPORTING: "1",
|
||||
BUN_ENABLE_CRASH_REPORTING: "0", // change this to '1' if https://github.com/oven-sh/bun/issues/13012 is implemented
|
||||
BUN_RUNTIME_TRANSPILER_CACHE_PATH: "0",
|
||||
BUN_INSTALL_CACHE_DIR: tmpdirPath,
|
||||
SHELLOPTS: isWindows ? "igncr" : undefined, // ignore "\r" on Windows
|
||||
// Used in Node.js tests.
|
||||
TEST_TMPDIR: tmpdirPath,
|
||||
};
|
||||
if (env) {
|
||||
Object.assign(bunEnv, env);
|
||||
@@ -500,10 +531,11 @@ async function spawnBun(execPath, { args, cwd, timeout, env, stdout, stderr }) {
|
||||
async function spawnBunTest(execPath, testPath) {
|
||||
const timeout = getTestTimeout(testPath);
|
||||
const perTestTimeout = Math.ceil(timeout / 2);
|
||||
const isReallyTest = isTestStrict(testPath);
|
||||
const { ok, error, stdout } = await spawnBun(execPath, {
|
||||
args: ["test", `--timeout=${perTestTimeout}`, testPath],
|
||||
args: isReallyTest ? ["test", `--timeout=${perTestTimeout}`, testPath] : [testPath],
|
||||
cwd: cwd,
|
||||
timeout,
|
||||
timeout: isReallyTest ? timeout : 30_000,
|
||||
env: {
|
||||
GITHUB_ACTIONS: "true", // always true so annotations are parsed
|
||||
},
|
||||
@@ -782,6 +814,12 @@ function isJavaScript(path) {
|
||||
* @returns {boolean}
|
||||
*/
|
||||
function isTest(path) {
|
||||
if (path.replaceAll(sep, "/").includes("/test-cluster-") && path.endsWith(".js")) return true;
|
||||
if (path.replaceAll(sep, "/").startsWith("js/node/cluster/test-") && path.endsWith(".ts")) return true;
|
||||
return isTestStrict(path);
|
||||
}
|
||||
|
||||
function isTestStrict(path) {
|
||||
return isJavaScript(path) && /\.test|spec\./.test(basename(path));
|
||||
}
|
||||
|
||||
@@ -973,7 +1011,7 @@ async function getExecPathFromBuildKite(target) {
|
||||
if (isWindows) {
|
||||
await spawnSafe({
|
||||
command: "powershell",
|
||||
args: ["-Command", `Expand-Archive -Path ${zipPath} -DestinationPath ${releasePath}`],
|
||||
args: ["-Command", `Expand-Archive -Path ${zipPath} -DestinationPath ${releasePath} -Force`],
|
||||
});
|
||||
} else {
|
||||
await spawnSafe({
|
||||
@@ -1298,6 +1336,26 @@ async function getPublicIp() {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @returns {string | undefined}
|
||||
*/
|
||||
function getTailscaleIp() {
|
||||
try {
|
||||
const { status, stdout } = spawnSync("tailscale", ["ip", "--1"], {
|
||||
encoding: "utf-8",
|
||||
timeout: spawnTimeout,
|
||||
env: {
|
||||
PATH: process.env.PATH,
|
||||
},
|
||||
});
|
||||
if (status === 0) {
|
||||
return stdout.trim();
|
||||
}
|
||||
} catch {
|
||||
// ...
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @param {...string} paths
|
||||
* @returns {string}
|
||||
|
||||
@@ -12,8 +12,13 @@ try {
|
||||
if (!($WebKit) -and (-not (Test-Path "src/bun.js/WebKit/.git"))) {
|
||||
$Names = $Names | Where-Object { $_ -ne 'src/bun.js/WebKit' }
|
||||
}
|
||||
if ($env:FORCE_UPDATE_SUBMODULES -eq "1") {
|
||||
# Set --force in CI.
|
||||
git submodule update --init --recursive --progress --depth 1 --checkout --force @NAMES
|
||||
} else {
|
||||
git submodule update --init --recursive --progress --depth 1 --checkout @NAMES
|
||||
}
|
||||
|
||||
git submodule update --init --recursive --progress --depth 1 --checkout @NAMES
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
throw "git submodule update failed"
|
||||
}
|
||||
|
||||
@@ -1,9 +1,14 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
FORCE_UPDATE_SUBMODULES=${FORCE_UPDATE_SUBMODULES:-0}
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
cd ..
|
||||
NAMES=$(cat .gitmodules | grep 'path = ' | awk '{print $3}')
|
||||
|
||||
if ! [ "$1" == '--webkit' ]; then
|
||||
if ! [ "${1:-}" == '--webkit' ]; then
|
||||
# we will exclude webkit unless you explicitly clone it yourself (a huge download)
|
||||
if [ ! -e "src/bun.js/WebKit/.git" ]; then
|
||||
NAMES=$(echo "$NAMES" | grep -v 'WebKit')
|
||||
@@ -12,3 +17,9 @@ fi
|
||||
|
||||
set -exo pipefail
|
||||
git submodule update --init --recursive --progress --depth=1 --checkout $NAMES
|
||||
if [ "$FORCE_UPDATE_SUBMODULES" == "1" ]; then
|
||||
# Set --force in CI.
|
||||
git submodule update --init --recursive --progress --depth=1 --checkout --force $NAMES
|
||||
else
|
||||
git submodule update --init --recursive --progress --depth=1 --checkout $NAMES
|
||||
fi
|
||||
|
||||
@@ -9,6 +9,8 @@ const Output = bun.Output;
|
||||
const Global = bun.Global;
|
||||
const Environment = bun.Environment;
|
||||
const Syscall = bun.sys;
|
||||
const SourceMap = bun.sourcemap;
|
||||
const StringPointer = bun.StringPointer;
|
||||
|
||||
const w = std.os.windows;
|
||||
|
||||
@@ -33,6 +35,8 @@ pub const StandaloneModuleGraph = struct {
|
||||
|
||||
pub const base_public_path = targetBasePublicPath(Environment.os, "");
|
||||
|
||||
pub const base_public_path_with_default_suffix = targetBasePublicPath(Environment.os, "root/");
|
||||
|
||||
pub fn targetBasePublicPath(target: Environment.OperatingSystem, comptime suffix: [:0]const u8) [:0]const u8 {
|
||||
return switch (target) {
|
||||
.windows => "B:/~BUN/" ++ suffix,
|
||||
@@ -54,6 +58,11 @@ pub const StandaloneModuleGraph = struct {
|
||||
if (!isBunStandaloneFilePath(base_path)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return this.findAssumeStandalonePath(name);
|
||||
}
|
||||
|
||||
pub fn findAssumeStandalonePath(this: *const StandaloneModuleGraph, name: []const u8) ?*File {
|
||||
if (Environment.isWindows) {
|
||||
var normalized_buf: bun.PathBuffer = undefined;
|
||||
const normalized = bun.path.platformToPosixBuf(u8, name, &normalized_buf);
|
||||
@@ -64,21 +73,55 @@ pub const StandaloneModuleGraph = struct {
|
||||
|
||||
pub const CompiledModuleGraphFile = struct {
|
||||
name: Schema.StringPointer = .{},
|
||||
loader: bun.options.Loader = .file,
|
||||
contents: Schema.StringPointer = .{},
|
||||
sourcemap: Schema.StringPointer = .{},
|
||||
encoding: Encoding = .latin1,
|
||||
loader: bun.options.Loader = .file,
|
||||
};
|
||||
|
||||
pub const Encoding = enum(u8) {
|
||||
binary = 0,
|
||||
|
||||
latin1 = 1,
|
||||
|
||||
// Not used yet.
|
||||
utf8 = 2,
|
||||
};
|
||||
|
||||
pub const File = struct {
|
||||
name: []const u8 = "",
|
||||
loader: bun.options.Loader,
|
||||
contents: []const u8 = "",
|
||||
contents: [:0]const u8 = "",
|
||||
sourcemap: LazySourceMap,
|
||||
cached_blob: ?*bun.JSC.WebCore.Blob = null,
|
||||
encoding: Encoding = .binary,
|
||||
wtf_string: bun.String = bun.String.empty,
|
||||
|
||||
pub fn lessThanByIndex(ctx: []const File, lhs_i: u32, rhs_i: u32) bool {
|
||||
const lhs = ctx[lhs_i];
|
||||
const rhs = ctx[rhs_i];
|
||||
return bun.strings.cmpStringsAsc({}, lhs.name, rhs.name);
|
||||
}
|
||||
|
||||
pub fn toWTFString(this: *File) bun.String {
|
||||
if (this.wtf_string.isEmpty()) {
|
||||
switch (this.encoding) {
|
||||
.binary, .utf8 => {
|
||||
this.wtf_string = bun.String.createUTF8(this.contents);
|
||||
},
|
||||
.latin1 => {
|
||||
this.wtf_string = bun.String.createStaticExternal(this.contents, true);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// We don't want this to free.
|
||||
return this.wtf_string.dupeRef();
|
||||
}
|
||||
|
||||
pub fn blob(this: *File, globalObject: *bun.JSC.JSGlobalObject) *bun.JSC.WebCore.Blob {
|
||||
if (this.cached_blob == null) {
|
||||
var store = bun.JSC.WebCore.Blob.Store.init(@constCast(this.contents), bun.default_allocator);
|
||||
const store = bun.JSC.WebCore.Blob.Store.init(@constCast(this.contents), bun.default_allocator);
|
||||
// make it never free
|
||||
store.ref();
|
||||
|
||||
@@ -92,8 +135,16 @@ pub const StandaloneModuleGraph = struct {
|
||||
b.content_type_allocated = false;
|
||||
}
|
||||
|
||||
// The real name goes here:
|
||||
store.data.bytes.stored_name = bun.PathString.init(this.name);
|
||||
|
||||
// The pretty name goes here:
|
||||
if (strings.hasPrefixComptime(this.name, base_public_path_with_default_suffix)) {
|
||||
b.name = bun.String.createUTF8(this.name[base_public_path_with_default_suffix.len..]);
|
||||
} else if (this.name.len > 0) {
|
||||
b.name = bun.String.createUTF8(this.name);
|
||||
}
|
||||
|
||||
this.cached_blob = b;
|
||||
}
|
||||
|
||||
@@ -102,24 +153,61 @@ pub const StandaloneModuleGraph = struct {
|
||||
};
|
||||
|
||||
pub const LazySourceMap = union(enum) {
|
||||
compressed: []const u8,
|
||||
decompressed: bun.sourcemap,
|
||||
serialized: SerializedSourceMap,
|
||||
parsed: *SourceMap.ParsedSourceMap,
|
||||
none,
|
||||
|
||||
pub fn load(this: *LazySourceMap, log: *bun.logger.Log, allocator: std.mem.Allocator) !*bun.sourcemap {
|
||||
if (this.* == .decompressed) return &this.decompressed;
|
||||
/// It probably is not possible to run two decoding jobs on the same file
|
||||
var init_lock: bun.Lock = .{};
|
||||
|
||||
var decompressed = try allocator.alloc(u8, bun.zstd.getDecompressedSize(this.compressed));
|
||||
const result = bun.zstd.decompress(decompressed, this.compressed);
|
||||
if (result == .err) {
|
||||
allocator.free(decompressed);
|
||||
log.addError(null, bun.logger.Loc.Empty, bun.span(result.err)) catch unreachable;
|
||||
return error.@"Failed to decompress sourcemap";
|
||||
}
|
||||
errdefer allocator.free(decompressed);
|
||||
const bytes = decompressed[0..result.success];
|
||||
pub fn load(this: *LazySourceMap) ?*SourceMap.ParsedSourceMap {
|
||||
init_lock.lock();
|
||||
defer init_lock.unlock();
|
||||
|
||||
this.* = .{ .decompressed = try bun.sourcemap.parse(allocator, &bun.logger.Source.initPathString("sourcemap.json", bytes), log) };
|
||||
return &this.decompressed;
|
||||
return switch (this.*) {
|
||||
.none => null,
|
||||
.parsed => |map| map,
|
||||
.serialized => |serialized| {
|
||||
var stored = switch (SourceMap.Mapping.parse(
|
||||
bun.default_allocator,
|
||||
serialized.mappingVLQ(),
|
||||
null,
|
||||
std.math.maxInt(i32),
|
||||
std.math.maxInt(i32),
|
||||
)) {
|
||||
.success => |x| x,
|
||||
.fail => {
|
||||
this.* = .none;
|
||||
return null;
|
||||
},
|
||||
};
|
||||
|
||||
const source_files = serialized.sourceFileNames();
|
||||
const slices = bun.default_allocator.alloc(?[]u8, source_files.len * 2) catch bun.outOfMemory();
|
||||
|
||||
const file_names: [][]const u8 = @ptrCast(slices[0..source_files.len]);
|
||||
const decompressed_contents_slice = slices[source_files.len..][0..source_files.len];
|
||||
for (file_names, source_files) |*dest, src| {
|
||||
dest.* = src.slice(serialized.bytes);
|
||||
}
|
||||
|
||||
@memset(decompressed_contents_slice, null);
|
||||
|
||||
const data = bun.new(SerializedSourceMap.Loaded, .{
|
||||
.map = serialized,
|
||||
.decompressed_files = decompressed_contents_slice,
|
||||
});
|
||||
|
||||
stored.external_source_names = file_names;
|
||||
stored.underlying_provider = .{ .data = @truncate(@intFromPtr(data)) };
|
||||
stored.is_standalone_module_graph = true;
|
||||
|
||||
const parsed = stored.new(); // allocate this on the heap
|
||||
parsed.ref(); // never free
|
||||
this.* = .{ .parsed = parsed };
|
||||
return parsed;
|
||||
},
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
@@ -131,7 +219,7 @@ pub const StandaloneModuleGraph = struct {
|
||||
|
||||
const trailer = "\n---- Bun! ----\n";
|
||||
|
||||
pub fn fromBytes(allocator: std.mem.Allocator, raw_bytes: []const u8, offsets: Offsets) !StandaloneModuleGraph {
|
||||
pub fn fromBytes(allocator: std.mem.Allocator, raw_bytes: []u8, offsets: Offsets) !StandaloneModuleGraph {
|
||||
if (raw_bytes.len == 0) return StandaloneModuleGraph{
|
||||
.files = bun.StringArrayHashMap(File).init(allocator),
|
||||
};
|
||||
@@ -147,18 +235,23 @@ pub const StandaloneModuleGraph = struct {
|
||||
try modules.ensureTotalCapacity(modules_list.len);
|
||||
for (modules_list) |module| {
|
||||
modules.putAssumeCapacity(
|
||||
sliceTo(raw_bytes, module.name),
|
||||
sliceToZ(raw_bytes, module.name),
|
||||
File{
|
||||
.name = sliceTo(raw_bytes, module.name),
|
||||
.name = sliceToZ(raw_bytes, module.name),
|
||||
.loader = module.loader,
|
||||
.contents = sliceTo(raw_bytes, module.contents),
|
||||
.sourcemap = LazySourceMap{
|
||||
.compressed = sliceTo(raw_bytes, module.sourcemap),
|
||||
},
|
||||
.contents = sliceToZ(raw_bytes, module.contents),
|
||||
.sourcemap = if (module.sourcemap.length > 0)
|
||||
.{ .serialized = .{
|
||||
.bytes = @alignCast(sliceTo(raw_bytes, module.sourcemap)),
|
||||
} }
|
||||
else
|
||||
.none,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
modules.lockPointers(); // make the pointers stable forever
|
||||
|
||||
return StandaloneModuleGraph{
|
||||
.bytes = raw_bytes[0..offsets.byte_count],
|
||||
.files = modules,
|
||||
@@ -172,18 +265,29 @@ pub const StandaloneModuleGraph = struct {
|
||||
return bytes[ptr.offset..][0..ptr.length];
|
||||
}
|
||||
|
||||
fn sliceToZ(bytes: []const u8, ptr: bun.StringPointer) [:0]const u8 {
|
||||
if (ptr.length == 0) return "";
|
||||
|
||||
return bytes[ptr.offset..][0..ptr.length :0];
|
||||
}
|
||||
|
||||
pub fn toBytes(allocator: std.mem.Allocator, prefix: []const u8, output_files: []const bun.options.OutputFile) ![]u8 {
|
||||
var serialize_trace = bun.tracy.traceNamed(@src(), "StandaloneModuleGraph.serialize");
|
||||
defer serialize_trace.end();
|
||||
|
||||
var entry_point_id: ?usize = null;
|
||||
var string_builder = bun.StringBuilder{};
|
||||
var module_count: usize = 0;
|
||||
for (output_files, 0..) |output_file, i| {
|
||||
string_builder.count(output_file.dest_path);
|
||||
string_builder.count(prefix);
|
||||
string_builder.countZ(output_file.dest_path);
|
||||
string_builder.countZ(prefix);
|
||||
if (output_file.value == .buffer) {
|
||||
if (output_file.output_kind == .sourcemap) {
|
||||
string_builder.cap += bun.zstd.compressBound(output_file.value.buffer.bytes.len);
|
||||
// This is an over-estimation to ensure that we allocate
|
||||
// enough memory for the source-map contents. Calculating
|
||||
// the exact amount is not possible without allocating as it
|
||||
// involves a JSON parser.
|
||||
string_builder.cap += output_file.value.buffer.bytes.len * 2;
|
||||
} else {
|
||||
if (entry_point_id == null) {
|
||||
if (output_file.output_kind == .@"entry-point") {
|
||||
@@ -191,7 +295,7 @@ pub const StandaloneModuleGraph = struct {
|
||||
}
|
||||
}
|
||||
|
||||
string_builder.count(output_file.value.buffer.bytes);
|
||||
string_builder.countZ(output_file.value.buffer.bytes);
|
||||
module_count += 1;
|
||||
}
|
||||
}
|
||||
@@ -202,16 +306,19 @@ pub const StandaloneModuleGraph = struct {
|
||||
string_builder.cap += @sizeOf(CompiledModuleGraphFile) * output_files.len;
|
||||
string_builder.cap += trailer.len;
|
||||
string_builder.cap += 16;
|
||||
|
||||
{
|
||||
var offsets_ = Offsets{};
|
||||
string_builder.cap += std.mem.asBytes(&offsets_).len;
|
||||
}
|
||||
string_builder.cap += @sizeOf(Offsets);
|
||||
|
||||
try string_builder.allocate(allocator);
|
||||
|
||||
var modules = try std.ArrayList(CompiledModuleGraphFile).initCapacity(allocator, module_count);
|
||||
|
||||
var source_map_header_list = std.ArrayList(u8).init(allocator);
|
||||
defer source_map_header_list.deinit();
|
||||
var source_map_string_list = std.ArrayList(u8).init(allocator);
|
||||
defer source_map_string_list.deinit();
|
||||
var source_map_arena = bun.ArenaAllocator.init(allocator);
|
||||
defer source_map_arena.deinit();
|
||||
|
||||
for (output_files) |output_file| {
|
||||
if (output_file.output_kind == .sourcemap) {
|
||||
continue;
|
||||
@@ -224,25 +331,36 @@ pub const StandaloneModuleGraph = struct {
|
||||
const dest_path = bun.strings.removeLeadingDotSlash(output_file.dest_path);
|
||||
|
||||
var module = CompiledModuleGraphFile{
|
||||
.name = string_builder.fmtAppendCount("{s}{s}", .{
|
||||
.name = string_builder.fmtAppendCountZ("{s}{s}", .{
|
||||
prefix,
|
||||
dest_path,
|
||||
}),
|
||||
.loader = output_file.loader,
|
||||
.contents = string_builder.appendCount(output_file.value.buffer.bytes),
|
||||
.contents = string_builder.appendCountZ(output_file.value.buffer.bytes),
|
||||
.encoding = switch (output_file.loader) {
|
||||
.js, .jsx, .ts, .tsx => .latin1,
|
||||
else => .binary,
|
||||
},
|
||||
};
|
||||
if (output_file.source_map_index != std.math.maxInt(u32)) {
|
||||
const remaining_slice = string_builder.allocatedSlice()[string_builder.len..];
|
||||
const compressed_result = bun.zstd.compress(remaining_slice, output_files[output_file.source_map_index].value.buffer.bytes, 1);
|
||||
if (compressed_result == .err) {
|
||||
bun.Output.panic("Unexpected error compressing sourcemap: {s}", .{bun.span(compressed_result.err)});
|
||||
}
|
||||
module.sourcemap = string_builder.add(compressed_result.success);
|
||||
defer source_map_header_list.clearRetainingCapacity();
|
||||
defer source_map_string_list.clearRetainingCapacity();
|
||||
_ = source_map_arena.reset(.retain_capacity);
|
||||
try serializeJsonSourceMapForStandalone(
|
||||
&source_map_header_list,
|
||||
&source_map_string_list,
|
||||
source_map_arena.allocator(),
|
||||
output_files[output_file.source_map_index].value.buffer.bytes,
|
||||
);
|
||||
module.sourcemap = string_builder.addConcat(&.{
|
||||
source_map_header_list.items,
|
||||
source_map_string_list.items,
|
||||
});
|
||||
}
|
||||
modules.appendAssumeCapacity(module);
|
||||
}
|
||||
|
||||
var offsets = Offsets{
|
||||
const offsets = Offsets{
|
||||
.entry_point_id = @as(u32, @truncate(entry_point_id.?)),
|
||||
.modules_ptr = string_builder.appendCount(std.mem.sliceAsBytes(modules.items)),
|
||||
.byte_count = string_builder.len,
|
||||
@@ -251,7 +369,20 @@ pub const StandaloneModuleGraph = struct {
|
||||
_ = string_builder.append(std.mem.asBytes(&offsets));
|
||||
_ = string_builder.append(trailer);
|
||||
|
||||
return string_builder.ptr.?[0..string_builder.len];
|
||||
const output_bytes = string_builder.ptr.?[0..string_builder.len];
|
||||
|
||||
if (comptime Environment.isDebug) {
|
||||
// An expensive sanity check:
|
||||
var graph = try fromBytes(allocator, @alignCast(output_bytes), offsets);
|
||||
defer {
|
||||
graph.files.unlockPointers();
|
||||
graph.files.deinit();
|
||||
}
|
||||
|
||||
bun.assert_eql(graph.files.count(), modules.items.len);
|
||||
}
|
||||
|
||||
return output_bytes;
|
||||
}
|
||||
|
||||
const page_size = if (Environment.isLinux and Environment.isAarch64)
|
||||
@@ -777,4 +908,172 @@ pub const StandaloneModuleGraph = struct {
|
||||
else => @compileError("TODO"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Source map serialization in the bundler is specially designed to be
|
||||
/// loaded in memory as is. Source contents are compressed with ZSTD to
|
||||
/// reduce the file size, and mappings are stored as uncompressed VLQ.
|
||||
pub const SerializedSourceMap = struct {
|
||||
bytes: []const u8,
|
||||
|
||||
/// Following the header bytes:
|
||||
/// - source_files_count number of StringPointer, file names
|
||||
/// - source_files_count number of StringPointer, zstd compressed contents
|
||||
/// - the mapping data, `map_vlq_length` bytes
|
||||
/// - all the StringPointer contents
|
||||
pub const Header = extern struct {
|
||||
source_files_count: u32,
|
||||
map_bytes_length: u32,
|
||||
};
|
||||
|
||||
pub fn header(map: SerializedSourceMap) *align(1) const Header {
|
||||
return @ptrCast(map.bytes.ptr);
|
||||
}
|
||||
|
||||
pub fn mappingVLQ(map: SerializedSourceMap) []const u8 {
|
||||
const head = map.header();
|
||||
const start = @sizeOf(Header) + head.source_files_count * @sizeOf(StringPointer) * 2;
|
||||
return map.bytes[start..][0..head.map_bytes_length];
|
||||
}
|
||||
|
||||
pub fn sourceFileNames(map: SerializedSourceMap) []align(1) const StringPointer {
|
||||
const head = map.header();
|
||||
return @as([*]align(1) const StringPointer, @ptrCast(map.bytes[@sizeOf(Header)..]))[0..head.source_files_count];
|
||||
}
|
||||
|
||||
fn compressedSourceFiles(map: SerializedSourceMap) []align(1) const StringPointer {
|
||||
const head = map.header();
|
||||
return @as([*]align(1) const StringPointer, @ptrCast(map.bytes[@sizeOf(Header)..]))[head.source_files_count..][0..head.source_files_count];
|
||||
}
|
||||
|
||||
/// Once loaded, this map stores additional data for keeping track of source code.
|
||||
pub const Loaded = struct {
|
||||
map: SerializedSourceMap,
|
||||
|
||||
/// Only decompress source code once! Once a file is decompressed,
|
||||
/// it is stored here. Decompression failures are stored as an empty
|
||||
/// string, which will be treated as "no contents".
|
||||
decompressed_files: []?[]u8,
|
||||
|
||||
pub fn sourceFileContents(this: Loaded, index: usize) ?[]const u8 {
|
||||
if (this.decompressed_files[index]) |decompressed| {
|
||||
return if (decompressed.len == 0) null else decompressed;
|
||||
}
|
||||
|
||||
const compressed_codes = this.map.compressedSourceFiles();
|
||||
const compressed_file = compressed_codes[@intCast(index)].slice(this.map.bytes);
|
||||
const size = bun.zstd.getDecompressedSize(compressed_file);
|
||||
|
||||
const bytes = bun.default_allocator.alloc(u8, size) catch bun.outOfMemory();
|
||||
const result = bun.zstd.decompress(bytes, compressed_file);
|
||||
|
||||
if (result == .err) {
|
||||
bun.Output.warn("Source map decompression error: {s}", .{result.err});
|
||||
bun.default_allocator.free(bytes);
|
||||
this.decompressed_files[index] = "";
|
||||
return null;
|
||||
}
|
||||
|
||||
const data = bytes[0..result.success];
|
||||
this.decompressed_files[index] = data;
|
||||
return data;
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
pub fn serializeJsonSourceMapForStandalone(
|
||||
header_list: *std.ArrayList(u8),
|
||||
string_payload: *std.ArrayList(u8),
|
||||
arena: std.mem.Allocator,
|
||||
json_source: []const u8,
|
||||
) !void {
|
||||
const out = header_list.writer();
|
||||
const json_src = bun.logger.Source.initPathString("sourcemap.json", json_source);
|
||||
var log = bun.logger.Log.init(arena);
|
||||
defer log.deinit();
|
||||
|
||||
// the allocator given to the JS parser is not respected for all parts
|
||||
// of the parse, so we need to remember to reset the ast store
|
||||
bun.JSAst.Expr.Data.Store.reset();
|
||||
bun.JSAst.Stmt.Data.Store.reset();
|
||||
defer {
|
||||
bun.JSAst.Expr.Data.Store.reset();
|
||||
bun.JSAst.Stmt.Data.Store.reset();
|
||||
}
|
||||
var json = bun.JSON.ParseJSON(&json_src, &log, arena) catch
|
||||
return error.InvalidSourceMap;
|
||||
|
||||
const mappings_str = json.get("mappings") orelse
|
||||
return error.InvalidSourceMap;
|
||||
if (mappings_str.data != .e_string)
|
||||
return error.InvalidSourceMap;
|
||||
const sources_content = switch ((json.get("sourcesContent") orelse return error.InvalidSourceMap).data) {
|
||||
.e_array => |arr| arr,
|
||||
else => return error.InvalidSourceMap,
|
||||
};
|
||||
const sources_paths = switch ((json.get("sources") orelse return error.InvalidSourceMap).data) {
|
||||
.e_array => |arr| arr,
|
||||
else => return error.InvalidSourceMap,
|
||||
};
|
||||
if (sources_content.items.len != sources_paths.items.len) {
|
||||
return error.InvalidSourceMap;
|
||||
}
|
||||
|
||||
const map_vlq: []const u8 = mappings_str.data.e_string.slice(arena);
|
||||
|
||||
try out.writeInt(u32, sources_paths.items.len, .little);
|
||||
try out.writeInt(u32, @intCast(map_vlq.len), .little);
|
||||
|
||||
const string_payload_start_location = @sizeOf(u32) +
|
||||
@sizeOf(u32) +
|
||||
@sizeOf(bun.StringPointer) * sources_content.items.len * 2 + // path + source
|
||||
map_vlq.len;
|
||||
|
||||
for (sources_paths.items.slice()) |item| {
|
||||
if (item.data != .e_string)
|
||||
return error.InvalidSourceMap;
|
||||
|
||||
const decoded = try item.data.e_string.stringDecodedUTF8(arena);
|
||||
|
||||
const offset = string_payload.items.len;
|
||||
try string_payload.appendSlice(decoded);
|
||||
|
||||
const slice = bun.StringPointer{
|
||||
.offset = @intCast(offset + string_payload_start_location),
|
||||
.length = @intCast(string_payload.items.len - offset),
|
||||
};
|
||||
try out.writeInt(u32, slice.offset, .little);
|
||||
try out.writeInt(u32, slice.length, .little);
|
||||
}
|
||||
|
||||
for (sources_content.items.slice()) |item| {
|
||||
if (item.data != .e_string)
|
||||
return error.InvalidSourceMap;
|
||||
|
||||
const utf8 = try item.data.e_string.stringDecodedUTF8(arena);
|
||||
defer arena.free(utf8);
|
||||
|
||||
const offset = string_payload.items.len;
|
||||
|
||||
const bound = bun.zstd.compressBound(utf8.len);
|
||||
try string_payload.ensureUnusedCapacity(bound);
|
||||
|
||||
const unused = string_payload.unusedCapacitySlice();
|
||||
const compressed_result = bun.zstd.compress(unused, utf8, 1);
|
||||
if (compressed_result == .err) {
|
||||
bun.Output.panic("Unexpected error compressing sourcemap: {s}", .{bun.span(compressed_result.err)});
|
||||
}
|
||||
string_payload.items.len += compressed_result.success;
|
||||
|
||||
const slice = bun.StringPointer{
|
||||
.offset = @intCast(offset + string_payload_start_location),
|
||||
.length = @intCast(string_payload.items.len - offset),
|
||||
};
|
||||
try out.writeInt(u32, slice.offset, .little);
|
||||
try out.writeInt(u32, slice.length, .little);
|
||||
}
|
||||
|
||||
try out.writeAll(map_vlq);
|
||||
|
||||
bun.assert(header_list.items.len == string_payload_start_location);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -199,7 +199,7 @@ pub fn BSSList(comptime ValueType: type, comptime _count: anytype) type {
|
||||
const Self = @This();
|
||||
|
||||
allocator: Allocator,
|
||||
mutex: Mutex = Mutex.init(),
|
||||
mutex: Mutex = .{},
|
||||
head: *OverflowBlock = undefined,
|
||||
tail: OverflowBlock = OverflowBlock{},
|
||||
backing_buf: [count]ValueType = undefined,
|
||||
@@ -288,7 +288,7 @@ pub fn BSSStringList(comptime _count: usize, comptime _item_length: usize) type
|
||||
allocator: Allocator,
|
||||
slice_buf: [count][]const u8 = undefined,
|
||||
slice_buf_used: u16 = 0,
|
||||
mutex: Mutex = Mutex.init(),
|
||||
mutex: Mutex = .{},
|
||||
pub var instance: Self = undefined;
|
||||
var loaded: bool = false;
|
||||
// only need the mutex on append
|
||||
@@ -465,7 +465,7 @@ pub fn BSSMap(comptime ValueType: type, comptime count: anytype, comptime store_
|
||||
index: IndexMap,
|
||||
overflow_list: Overflow = Overflow{},
|
||||
allocator: Allocator,
|
||||
mutex: Mutex = Mutex.init(),
|
||||
mutex: Mutex = .{},
|
||||
backing_buf: [count]ValueType = undefined,
|
||||
backing_buf_used: u16 = 0,
|
||||
|
||||
|
||||
@@ -113,6 +113,7 @@ pub const Features = struct {
|
||||
pub var no_avx2: usize = 0;
|
||||
pub var binlinks: usize = 0;
|
||||
pub var builtin_modules = std.enums.EnumSet(bun.JSC.HardcodedModule).initEmpty();
|
||||
pub var standalone_executable: usize = 0;
|
||||
|
||||
pub fn formatter() Formatter {
|
||||
return Formatter{};
|
||||
@@ -304,6 +305,23 @@ pub const GenerateHeader = struct {
|
||||
return platform_;
|
||||
}
|
||||
|
||||
// On macOS 13, tests that use sendmsg_x or recvmsg_x hang.
|
||||
var use_msgx_on_macos_14_or_later: bool = undefined;
|
||||
var detectUseMsgXOnMacOS14OrLater_once = std.once(detectUseMsgXOnMacOS14OrLater);
|
||||
fn detectUseMsgXOnMacOS14OrLater() void {
|
||||
const version = Semver.Version.parseUTF8(forOS().version);
|
||||
use_msgx_on_macos_14_or_later = version.valid and version.version.max().major >= 14;
|
||||
}
|
||||
pub export fn Bun__doesMacOSVersionSupportSendRecvMsgX() i32 {
|
||||
if (comptime !Environment.isMac) {
|
||||
// this should not be used on non-mac platforms.
|
||||
return 0;
|
||||
}
|
||||
|
||||
detectUseMsgXOnMacOS14OrLater_once.call();
|
||||
return @intFromBool(use_msgx_on_macos_14_or_later);
|
||||
}
|
||||
|
||||
pub fn kernelVersion() Semver.Version {
|
||||
if (comptime !Environment.isLinux) {
|
||||
@compileError("This function is only implemented on Linux");
|
||||
|
||||
@@ -824,13 +824,20 @@ pub const Api = struct {
|
||||
}
|
||||
};
|
||||
|
||||
pub const StringPointer = packed struct {
|
||||
/// Represents a slice stored within an externally stored buffer. Safe to serialize.
|
||||
/// Must be an extern struct to match with `headers-handwritten.h`.
|
||||
pub const StringPointer = extern struct {
|
||||
/// offset
|
||||
offset: u32 = 0,
|
||||
|
||||
/// length
|
||||
length: u32 = 0,
|
||||
|
||||
comptime {
|
||||
bun.assert(@alignOf(StringPointer) == @alignOf(u32));
|
||||
bun.assert(@sizeOf(StringPointer) == @sizeOf(u64));
|
||||
}
|
||||
|
||||
pub fn decode(reader: anytype) anyerror!StringPointer {
|
||||
var this = std.mem.zeroes(StringPointer);
|
||||
|
||||
@@ -843,6 +850,10 @@ pub const Api = struct {
|
||||
try writer.writeInt(this.offset);
|
||||
try writer.writeInt(this.length);
|
||||
}
|
||||
|
||||
pub fn slice(this: @This(), bytes: []const u8) []const u8 {
|
||||
return bytes[this.offset .. this.offset + this.length];
|
||||
}
|
||||
};
|
||||
|
||||
pub const JavascriptBundledModule = struct {
|
||||
@@ -1687,6 +1698,9 @@ pub const Api = struct {
|
||||
/// packages
|
||||
packages: ?PackagesMode = null,
|
||||
|
||||
/// ignore_dce_annotations
|
||||
ignore_dce_annotations: bool,
|
||||
|
||||
pub fn decode(reader: anytype) anyerror!TransformOptions {
|
||||
var this = std.mem.zeroes(TransformOptions);
|
||||
|
||||
|
||||
@@ -2,6 +2,8 @@ const std = @import("std");
|
||||
const bun = @import("root").bun;
|
||||
const unicode = std.unicode;
|
||||
|
||||
const js_ast = bun.JSAst;
|
||||
|
||||
pub const NodeIndex = u32;
|
||||
pub const NodeIndexNone = 4294967293;
|
||||
|
||||
@@ -147,6 +149,34 @@ pub const Ref = packed struct(u64) {
|
||||
);
|
||||
}
|
||||
|
||||
pub fn dump(ref: Ref, symbol_table: anytype) std.fmt.Formatter(dumpImpl) {
|
||||
return .{ .data = .{
|
||||
.ref = ref,
|
||||
.symbol_table = switch (@TypeOf(symbol_table)) {
|
||||
*const std.ArrayList(js_ast.Symbol) => symbol_table.items,
|
||||
*std.ArrayList(js_ast.Symbol) => symbol_table.items,
|
||||
[]const js_ast.Symbol => symbol_table,
|
||||
[]js_ast.Symbol => symbol_table,
|
||||
else => |T| @compileError("Unsupported type to Ref.dump: " ++ @typeName(T)),
|
||||
},
|
||||
} };
|
||||
}
|
||||
|
||||
fn dumpImpl(data: struct { ref: Ref, symbol_table: []const js_ast.Symbol }, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) !void {
|
||||
const symbol = data.symbol_table[data.ref.inner_index];
|
||||
try std.fmt.format(
|
||||
writer,
|
||||
"Ref[inner={d}, src={d}, .{s}; original_name={s}, uses={d}]",
|
||||
.{
|
||||
data.ref.inner_index,
|
||||
data.ref.source_index,
|
||||
@tagName(data.ref.tag),
|
||||
symbol.original_name,
|
||||
symbol.use_count_estimate,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
pub fn isValid(this: Ref) bool {
|
||||
return this.tag != .invalid;
|
||||
}
|
||||
|
||||
@@ -71,8 +71,8 @@ pub const MessageType = enum(u32) {
|
||||
_,
|
||||
};
|
||||
|
||||
var stderr_mutex: bun.Lock = bun.Lock.init();
|
||||
var stdout_mutex: bun.Lock = bun.Lock.init();
|
||||
var stderr_mutex: bun.Lock = .{};
|
||||
var stdout_mutex: bun.Lock = .{};
|
||||
|
||||
threadlocal var stderr_lock_count: u16 = 0;
|
||||
threadlocal var stdout_lock_count: u16 = 0;
|
||||
@@ -697,41 +697,37 @@ pub fn format2(
|
||||
};
|
||||
const tag = ConsoleObject.Formatter.Tag.get(vals[0], global);
|
||||
|
||||
var unbuffered_writer = if (comptime Writer != RawWriter)
|
||||
if (@hasDecl(@TypeOf(writer.context.unbuffered_writer.context), "quietWriter"))
|
||||
writer.context.unbuffered_writer.context.quietWriter()
|
||||
else
|
||||
writer.context.unbuffered_writer.context.writer()
|
||||
else
|
||||
writer;
|
||||
|
||||
if (tag.tag == .String) {
|
||||
if (options.enable_colors) {
|
||||
if (level == .Error) {
|
||||
unbuffered_writer.writeAll(comptime Output.prettyFmt("<r><red>", true)) catch {};
|
||||
writer.writeAll(comptime Output.prettyFmt("<r><red>", true)) catch {};
|
||||
}
|
||||
fmt.format(
|
||||
tag,
|
||||
@TypeOf(unbuffered_writer),
|
||||
unbuffered_writer,
|
||||
Writer,
|
||||
writer,
|
||||
vals[0],
|
||||
global,
|
||||
true,
|
||||
);
|
||||
if (level == .Error) {
|
||||
unbuffered_writer.writeAll(comptime Output.prettyFmt("<r>", true)) catch {};
|
||||
writer.writeAll(comptime Output.prettyFmt("<r>", true)) catch {};
|
||||
}
|
||||
} else {
|
||||
fmt.format(
|
||||
tag,
|
||||
@TypeOf(unbuffered_writer),
|
||||
unbuffered_writer,
|
||||
Writer,
|
||||
writer,
|
||||
vals[0],
|
||||
global,
|
||||
false,
|
||||
);
|
||||
}
|
||||
if (options.add_newline) _ = unbuffered_writer.write("\n") catch 0;
|
||||
if (options.add_newline) {
|
||||
_ = writer.write("\n") catch 0;
|
||||
}
|
||||
|
||||
writer.context.flush() catch {};
|
||||
} else {
|
||||
defer {
|
||||
if (comptime Writer != RawWriter) {
|
||||
@@ -1077,6 +1073,7 @@ pub const Formatter = struct {
|
||||
};
|
||||
}
|
||||
}
|
||||
if (globalThis.hasException()) return .{ .tag = .RevokedProxy };
|
||||
}
|
||||
|
||||
if (js_type == .DOMWrapper) {
|
||||
@@ -1174,6 +1171,7 @@ pub const Formatter = struct {
|
||||
.Uint16Array,
|
||||
.Int32Array,
|
||||
.Uint32Array,
|
||||
.Float16Array,
|
||||
.Float32Array,
|
||||
.Float64Array,
|
||||
.BigInt64Array,
|
||||
@@ -1567,7 +1565,7 @@ pub const Formatter = struct {
|
||||
formatter: *ConsoleObject.Formatter,
|
||||
writer: Writer,
|
||||
count: usize = 0,
|
||||
pub fn forEach(_: [*c]JSC.VM, globalObject: [*c]JSGlobalObject, ctx: ?*anyopaque, nextValue: JSValue) callconv(.C) void {
|
||||
pub fn forEach(_: [*c]JSC.VM, globalObject: *JSGlobalObject, ctx: ?*anyopaque, nextValue: JSValue) callconv(.C) void {
|
||||
var this: *@This() = bun.cast(*@This(), ctx orelse return);
|
||||
if (single_line and this.count > 0) {
|
||||
this.formatter.printComma(Writer, this.writer, enable_ansi_colors) catch unreachable;
|
||||
@@ -1631,7 +1629,7 @@ pub const Formatter = struct {
|
||||
formatter: *ConsoleObject.Formatter,
|
||||
writer: Writer,
|
||||
is_first: bool = true,
|
||||
pub fn forEach(_: [*c]JSC.VM, globalObject: [*c]JSGlobalObject, ctx: ?*anyopaque, nextValue: JSValue) callconv(.C) void {
|
||||
pub fn forEach(_: [*c]JSC.VM, globalObject: *JSGlobalObject, ctx: ?*anyopaque, nextValue: JSValue) callconv(.C) void {
|
||||
var this: *@This() = bun.cast(*@This(), ctx orelse return);
|
||||
if (single_line) {
|
||||
if (!this.is_first) {
|
||||
@@ -2121,7 +2119,7 @@ pub const Formatter = struct {
|
||||
}
|
||||
},
|
||||
.Array => {
|
||||
const len = @as(u32, @truncate(value.getLength(this.globalThis)));
|
||||
const len = value.getLength(this.globalThis);
|
||||
|
||||
// TODO: DerivedArray does not get passed along in JSType, and it's not clear why.
|
||||
// if (jsType == .DerivedArray) {
|
||||
@@ -2186,6 +2184,7 @@ pub const Formatter = struct {
|
||||
}
|
||||
|
||||
var i: u32 = 1;
|
||||
var nonempty_count: u32 = 1;
|
||||
|
||||
while (i < len) : (i += 1) {
|
||||
const element = value.getDirectIndex(this.globalThis, i);
|
||||
@@ -2195,6 +2194,15 @@ pub const Formatter = struct {
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (nonempty_count >= 100) {
|
||||
this.printComma(Writer, writer_, enable_ansi_colors) catch unreachable;
|
||||
writer.writeAll("\n"); // we want the line break to be unconditional here
|
||||
this.estimated_line_length = 0;
|
||||
this.writeIndent(Writer, writer_) catch unreachable;
|
||||
writer.pretty("<r><d>... {d} more items<r>", enable_ansi_colors, .{len - i});
|
||||
break;
|
||||
}
|
||||
nonempty_count += 1;
|
||||
|
||||
if (empty_start) |empty| {
|
||||
if (empty > 0) {
|
||||
@@ -3076,6 +3084,13 @@ pub const Formatter = struct {
|
||||
@as([]align(std.meta.alignment([]u32)) u32, @alignCast(std.mem.bytesAsSlice(u32, slice))),
|
||||
enable_ansi_colors,
|
||||
),
|
||||
.Float16Array => this.writeTypedArray(
|
||||
*@TypeOf(writer),
|
||||
&writer,
|
||||
f16,
|
||||
@as([]align(std.meta.alignment([]f16)) f16, @alignCast(std.mem.bytesAsSlice(f16, slice))),
|
||||
enable_ansi_colors,
|
||||
),
|
||||
.Float32Array => this.writeTypedArray(
|
||||
*@TypeOf(writer),
|
||||
&writer,
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
/// ** Update the version number when any breaking changes are made to the cache format or to the JS parser **
|
||||
/// Version 3: "Infinity" becomes "1/0".
|
||||
/// Version 4: TypeScript enums are properly handled + more constant folding
|
||||
const expected_version = 4;
|
||||
/// Version 5: `require.main === module` no longer marks a module as CJS
|
||||
/// Version 6: `use strict` is preserved in CommonJS modules when at the top of the file
|
||||
const expected_version = 6;
|
||||
|
||||
const bun = @import("root").bun;
|
||||
const std = @import("std");
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user