Compare commits

..

8 Commits

Author SHA1 Message Date
Ashcon Partovi
4ca0d96837 Fix tinycc 2024-07-23 16:36:23 -07:00
Ashcon Partovi
2091551c92 Latest changes 2024-07-23 15:39:27 -07:00
Ashcon Partovi
a2bc49a991 Changes: 2024-07-23 10:05:47 -07:00
Ashcon Partovi
398e93249e Continue 2024-07-22 21:33:33 -07:00
Ashcon Partovi
1ec44688b7 Changes 2024-07-22 21:33:33 -07:00
Ashcon Partovi
7aa2360542 Changes 2024-07-22 21:33:33 -07:00
Ashcon Partovi
e87c599e6a Build script continued 2024-07-22 21:33:33 -07:00
Ashcon Partovi
d60da3d186 Unified build script 2024-07-22 21:33:33 -07:00
2507 changed files with 62911 additions and 105836 deletions

View File

@@ -10,10 +10,9 @@ steps:
blocked_state: "running"
- label: ":pipeline:"
command: "buildkite-agent pipeline upload .buildkite/ci.yml"
agents:
queue: "build-darwin"
command:
- ".buildkite/scripts/prepare-build.sh"
queue: "build-linux"
- if: "build.branch == 'main' && !build.pull_request.repository.fork"
label: ":github:"

File diff suppressed because it is too large Load Diff

View File

@@ -1,62 +0,0 @@
#!/bin/bash
set -eo pipefail
source "$(dirname "$0")/env.sh"
function run_command() {
set -x
"$@"
{ set +x; } 2>/dev/null
}
cwd="$(pwd)"
mkdir -p build
source "$(dirname "$0")/download-artifact.sh" "build/bun-deps/**" --step "$BUILDKITE_GROUP_KEY-build-deps"
source "$(dirname "$0")/download-artifact.sh" "build/bun-zig.o" --step "$BUILDKITE_GROUP_KEY-build-zig"
source "$(dirname "$0")/download-artifact.sh" "build/bun-cpp-objects.a" --step "$BUILDKITE_GROUP_KEY-build-cpp" --split
cd build
run_command cmake .. "${CMAKE_FLAGS[@]}" \
-GNinja \
-DBUN_LINK_ONLY="1" \
-DNO_CONFIGURE_DEPENDS="1" \
-DBUN_ZIG_OBJ_DIR="$cwd/build" \
-DBUN_CPP_ARCHIVE="$cwd/build/bun-cpp-objects.a" \
-DBUN_DEPS_OUT_DIR="$cwd/build/bun-deps" \
-DCMAKE_BUILD_TYPE="$CMAKE_BUILD_TYPE" \
-DCPU_TARGET="$CPU_TARGET" \
-DUSE_LTO="$USE_LTO" \
-DUSE_DEBUG_JSC="$USE_DEBUG_JSC" \
-DCANARY="$CANARY" \
-DGIT_SHA="$GIT_SHA"
run_command ninja -v -j "$CPUS"
run_command ls
tag="bun-$BUILDKITE_GROUP_KEY"
if [ "$USE_LTO" == "OFF" ]; then
# Remove OS check when LTO is enabled on macOS again
if [[ "$tag" == *"darwin"* ]]; then
tag="$tag-nolto"
fi
fi
for name in bun bun-profile; do
dir="$tag"
if [ "$name" == "bun-profile" ]; then
dir="$tag-profile"
fi
run_command chmod +x "$name"
run_command "./$name" --revision
run_command mkdir -p "$dir"
run_command mv "$name" "$dir/$name"
run_command zip -r "$dir.zip" "$dir"
source "$cwd/.buildkite/scripts/upload-artifact.sh" "$dir.zip"
# temporary disable this so CI can run
# this is failing because $name is now in $dir/$name and if changed to $dir/$name we get ENOENT reading "bun:internal-for-testing"
# if [ "$name" == "bun-profile" ]; then
# export BUN_FEATURE_FLAG_INTERNAL_FOR_TESTING="1"
# run_command "./$name" -e "require('fs').writeFileSync('./features.json', JSON.stringify(require('bun:internal-for-testing').crash_handler.getFeatureData()))"
# source "$cwd/.buildkite/scripts/upload-artifact.sh" "features.json"
# fi
done

View File

@@ -1,37 +0,0 @@
#!/bin/bash
set -eo pipefail
export FORCE_UPDATE_SUBMODULES=1
# env.sh calls update_submodules.sh
source "$(dirname "$0")/env.sh"
{ set +x; } 2>/dev/null
function run_command() {
set -x
"$@"
{ set +x; } 2>/dev/null
}
mkdir -p build
cd build
mkdir -p tmp_modules tmp_functions js codegen
run_command cmake .. "${CMAKE_FLAGS[@]}" \
-GNinja \
-DBUN_CPP_ONLY="1" \
-DNO_CONFIGURE_DEPENDS="1" \
-DCMAKE_BUILD_TYPE="$CMAKE_BUILD_TYPE" \
-DCPU_TARGET="$CPU_TARGET" \
-DUSE_LTO="$USE_LTO" \
-DUSE_DEBUG_JSC="$USE_DEBUG_JSC" \
-DCANARY="$CANARY" \
-DGIT_SHA="$GIT_SHA"
chmod +x compile-cpp-only.sh
source compile-cpp-only.sh -v -j "$CPUS"
{ set +x; } 2>/dev/null
cd ..
source "$(dirname "$0")/upload-artifact.sh" "build/bun-cpp-objects.a" --split

View File

@@ -1,22 +0,0 @@
#!/bin/bash
set -eo pipefail
source "$(dirname "$0")/env.sh"
source "$(realpath $(dirname "$0")/../../scripts/all-dependencies.sh)"
artifacts=(
libcrypto.a libssl.a libdecrepit.a
libcares.a
libarchive.a
liblolhtml.a
libmimalloc.a libmimalloc.o
libtcc.a
libz.a
libzstd.a
libdeflate.a
liblshpack.a
)
for artifact in "${artifacts[@]}"; do
source "$(dirname "$0")/upload-artifact.sh" "build/bun-deps/$artifact"
done

View File

@@ -1,40 +0,0 @@
#!/bin/bash
set -eo pipefail
source "$(dirname "$0")/env.sh"
function assert_bun() {
if ! command -v bun &>/dev/null; then
echo "error: bun is not installed" 1>&2
exit 1
fi
}
function assert_make() {
if ! command -v make &>/dev/null; then
echo "error: make is not installed" 1>&2
exit 1
fi
}
function run_command() {
set -x
"$@"
{ set +x; } 2>/dev/null
}
function build_node_fallbacks() {
local cwd="src/node-fallbacks"
run_command bun install --cwd "$cwd" --frozen-lockfile
run_command bun run --cwd "$cwd" build
}
function build_old_js() {
run_command bun install --frozen-lockfile
run_command make runtime_js fallback_decoder bun_error
}
assert_bun
assert_make
build_node_fallbacks
build_old_js

View File

@@ -1,101 +0,0 @@
#!/bin/bash
set -euo pipefail
export CMAKE_FLAGS=""
source "$(dirname "$0")/env.sh"
if [[ -n "$CMAKE_FLAGS" ]]; then
echo "CMAKE_FLAGS should not be empty"
exit 1
fi
function assert_target() {
local arch="${2-$(uname -m)}"
case "$(echo "$arch" | tr '[:upper:]' '[:lower:]')" in
x64 | x86_64 | amd64)
export ZIG_ARCH="x86_64"
if [[ "$BUILDKITE_STEP_KEY" == *"baseline"* ]]; then
export ZIG_CPU_TARGET="nehalem"
else
export ZIG_CPU_TARGET="haswell"
fi
;;
aarch64 | arm64)
export ZIG_ARCH="aarch64"
export ZIG_CPU_TARGET="native"
;;
*)
echo "error: Unsupported architecture: $arch" 1>&2
exit 1
;;
esac
local os="${1-$(uname -s)}"
case "$(echo "$os" | tr '[:upper:]' '[:lower:]')" in
linux)
export ZIG_OS="linux"
export ZIG_TARGET="$ZIG_ARCH-linux-gnu"
;;
darwin)
export ZIG_OS="macos"
export ZIG_TARGET="$ZIG_ARCH-macos-none"
;;
windows)
export ZIG_OS="windows"
export ZIG_TARGET="$ZIG_ARCH-windows-msvc"
;;
*)
echo "error: Unsupported operating system: $os" 1>&2
exit 1
;;
esac
}
function run_command() {
set -x
"$@"
{ set +x; } 2>/dev/null
}
assert_target "$@"
# Since the zig build depends on files from the zig submodule,
# make sure to update the submodule before building.
run_command git submodule update --init --recursive --progress --depth=1 --checkout src/deps/zig
# TODO: Move these to be part of the CMake build
source "$(dirname "$0")/build-old-js.sh"
cwd="$(pwd)"
mkdir -p build
cd build
# in buildkite this script to compile for windows is run on a macos machine
# so the cmake windows detection for this logic is not ran
ZIG_OPTIMIZE="ReleaseFast"
if [[ "$ZIG_OS" == "windows" ]]; then
ZIG_OPTIMIZE="ReleaseSafe"
fi
run_command cmake .. "${CMAKE_FLAGS[@]}" \
-GNinja \
-DNO_CONFIGURE_DEPENDS="1" \
-DNO_CODEGEN="0" \
-DWEBKIT_DIR="omit" \
-DBUN_ZIG_OBJ_DIR="$cwd/build" \
-DZIG_LIB_DIR="$cwd/src/deps/zig/lib" \
-DCMAKE_BUILD_TYPE="$CMAKE_BUILD_TYPE" \
-DARCH="$ZIG_ARCH" \
-DCPU_TARGET="$ZIG_CPU_TARGET" \
-DZIG_TARGET="$ZIG_TARGET" \
-DUSE_LTO="$USE_LTO" \
-DUSE_DEBUG_JSC="$USE_DEBUG_JSC" \
-DCANARY="$CANARY" \
-DZIG_OPTIMIZE="$ZIG_OPTIMIZE" \
-DGIT_SHA="$GIT_SHA"
export ONLY_ZIG="1"
run_command ninja "$cwd/build/bun-zig.o" -v -j "$CPUS"
cd ..
source "$(dirname "$0")/upload-artifact.sh" "build/bun-zig.o"

View File

@@ -1,47 +0,0 @@
param (
[Parameter(Mandatory=$true)]
[string[]] $Paths,
[switch] $Split
)
$ErrorActionPreference = "Stop"
function Assert-Buildkite-Agent() {
if (-not (Get-Command "buildkite-agent" -ErrorAction SilentlyContinue)) {
Write-Error "Cannot find buildkite-agent, please install it: https://buildkite.com/docs/agent/v3/install"
exit 1
}
}
function Assert-Join-File() {
if (-not (Get-Command "Join-File" -ErrorAction SilentlyContinue)) {
Write-Error "Cannot find Join-File, please install it: https://www.powershellgallery.com/packages/FileSplitter/1.3"
exit 1
}
}
function Download-Buildkite-Artifact() {
param (
[Parameter(Mandatory=$true)]
[string] $Path,
)
if ($Split) {
& buildkite-agent artifact download "$Path.*" --debug --debug-http
Join-File -Path "$(Resolve-Path .)\$Path" -Verbose -DeletePartFiles
} else {
& buildkite-agent artifact download "$Path" --debug --debug-http
}
if (-not (Test-Path $Path)) {
Write-Error "Could not find artifact: $Path"
exit 1
}
}
Assert-Buildkite-Agent
if ($Split) {
Assert-Join-File
}
foreach ($Path in $Paths) {
Download-Buildkite-Artifact $Path
}

View File

@@ -1,59 +0,0 @@
#!/bin/bash
set -euo pipefail
function assert_buildkite_agent() {
if ! command -v buildkite-agent &>/dev/null; then
echo "error: Cannot find buildkite-agent, please install it:"
echo "https://buildkite.com/docs/agent/v3/install"
exit 1
fi
}
function download_buildkite_artifact() {
# Check if at least one argument is provided
if [ $# -eq 0 ]; then
echo "error: No path provided for artifact download"
exit 1
fi
local path="$1"
shift
local split="0"
local args=()
while [ $# -gt 0 ]; do
case "$1" in
--split)
split="1"
shift
;;
*)
args+=("$1")
shift
;;
esac
done
if [ "$split" == "1" ]; then
run_command buildkite-agent artifact download "$path.*" . "${args[@]:-}"
run_command cat "$path".?? >"$path"
run_command rm -f "$path".??
else
run_command buildkite-agent artifact download "$path" . "${args[@]:-}"
fi
if [[ "$path" != *"*"* ]] && [ ! -f "$path" ]; then
echo "error: Could not find artifact: $path"
exit 1
fi
}
function run_command() {
set -x
"$@"
{ set +x; } 2>/dev/null
}
assert_buildkite_agent
download_buildkite_artifact "$@"

View File

@@ -1,146 +0,0 @@
#!/bin/bash
set -euo pipefail
BUILDKITE_REPO=${BUILDKITE_REPO:-}
BUILDKITE_CLEAN_CHECKOUT=${BUILDKITE_CLEAN_CHECKOUT:-}
BUILDKITE_BRANCH=${BUILDKITE_BRANCH:-}
CCACHE_DIR=${CCACHE_DIR:-}
SCCACHE_DIR=${SCCACHE_DIR:-}
ZIG_LOCAL_CACHE_DIR=${ZIG_LOCAL_CACHE_DIR:-}
ZIG_GLOBAL_CACHE_DIR=${ZIG_GLOBAL_CACHE_DIR:-}
BUN_DEPS_CACHE_DIR=${BUN_DEPS_CACHE_DIR:-}
BUN_DEPS_CACHE_DIR=${BUN_DEPS_CACHE_DIR:-}
BUILDKITE_STEP_KEY=${BUILDKITE_STEP_KEY:-}
ROOT_DIR="$(realpath "$(dirname "$0")/../../")"
# Fail if we cannot find the root directory
if [ ! -d "$ROOT_DIR" ]; then
echo "error: Cannot find root directory: '$ROOT_DIR'" 1>&2
exit 1
fi
function assert_os() {
local os="$(uname -s)"
case "$os" in
Linux)
echo "linux"
;;
Darwin)
echo "darwin"
;;
*)
echo "error: Unsupported operating system: $os" 1>&2
exit 1
;;
esac
}
function assert_arch() {
local arch="$(uname -m)"
case "$arch" in
aarch64 | arm64)
echo "aarch64"
;;
x86_64 | amd64)
echo "x64"
;;
*)
echo "error: Unknown architecture: $arch" 1>&2
exit 1
;;
esac
}
function assert_build() {
if [ -z "$BUILDKITE_REPO" ]; then
echo "error: Cannot find repository for this build"
exit 1
fi
if [ -z "$BUILDKITE_COMMIT" ]; then
echo "error: Cannot find commit for this build"
exit 1
fi
if [ -z "$BUILDKITE_STEP_KEY" ]; then
echo "error: Cannot find step key for this build"
exit 1
fi
if [ -n "$BUILDKITE_GROUP_KEY" ] && [[ "$BUILDKITE_STEP_KEY" != "$BUILDKITE_GROUP_KEY"* ]]; then
echo "error: Build step '$BUILDKITE_STEP_KEY' does not start with group key '$BUILDKITE_GROUP_KEY'"
exit 1
fi
# Skip os and arch checks for Zig, since it's cross-compiled on macOS
if [[ "$BUILDKITE_STEP_KEY" != *"zig"* ]]; then
local os="$(assert_os)"
if [[ "$BUILDKITE_STEP_KEY" != *"$os"* ]]; then
echo "error: Build step '$BUILDKITE_STEP_KEY' does not match operating system '$os'"
exit 1
fi
local arch="$(assert_arch)"
if [[ "$BUILDKITE_STEP_KEY" != *"$arch"* ]]; then
echo "error: Build step '$BUILDKITE_STEP_KEY' does not match architecture '$arch'"
exit 1
fi
fi
}
function assert_buildkite_agent() {
if (! command -v buildkite-agent &>/dev/null); then
echo "error: Cannot find buildkite-agent, please install it:"
echo "https://buildkite.com/docs/agent/v3/install"
exit 1
fi
}
function export_environment() {
source "${ROOT_DIR}/scripts/env.sh"
source "${ROOT_DIR}/scripts/update-submodules.sh"
{ set +x; } 2>/dev/null
export GIT_SHA="$BUILDKITE_COMMIT"
if [ "$BUILDKITE_CLEAN_CHECKOUT" == "true" ] || [ "$BUILDKITE_BRANCH" == "main" ]; then
local tmpdir="$(mktemp -d 2>/dev/null || mktemp -d -t 'new')"
export CCACHE_DIR="$tmpdir/.cache/ccache"
export SCCACHE_DIR="$tmpdir/.cache/sccache"
export ZIG_LOCAL_CACHE_DIR="$tmpdir/.cache/zig-cache"
export ZIG_GLOBAL_CACHE_DIR="$tmpdir/.cache/zig-cache"
export BUN_DEPS_CACHE_DIR="$tmpdir/.cache/bun-deps"
export CCACHE_RECACHE="1"
else
export CCACHE_DIR="$HOME/.cache/ccache/$BUILDKITE_STEP_KEY"
export SCCACHE_DIR="$HOME/.cache/sccache/$BUILDKITE_STEP_KEY"
export ZIG_LOCAL_CACHE_DIR="$HOME/.cache/zig-cache/$BUILDKITE_STEP_KEY"
export ZIG_GLOBAL_CACHE_DIR="$HOME/.cache/zig-cache/$BUILDKITE_STEP_KEY"
export BUN_DEPS_CACHE_DIR="$HOME/.cache/bun-deps/$BUILDKITE_STEP_KEY"
fi
if [ "$(assert_os)" == "linux" ]; then
export USE_LTO="ON"
fi
if [ "$(assert_arch)" == "aarch64" ]; then
export CPU_TARGET="native"
elif [[ "$BUILDKITE_STEP_KEY" == *"baseline"* ]]; then
export CPU_TARGET="nehalem"
else
export CPU_TARGET="haswell"
fi
if $(buildkite-agent meta-data exists release &>/dev/null); then
export CMAKE_BUILD_TYPE="$(buildkite-agent meta-data get release)"
else
export CMAKE_BUILD_TYPE="Release"
fi
if $(buildkite-agent meta-data exists canary &>/dev/null); then
export CANARY="$(buildkite-agent meta-data get canary)"
else
export CANARY="1"
fi
if $(buildkite-agent meta-data exists assertions &>/dev/null); then
export USE_DEBUG_JSC="$(buildkite-agent meta-data get assertions)"
else
export USE_DEBUG_JSC="OFF"
fi
}
assert_build
assert_buildkite_agent
export_environment

View File

@@ -1,97 +0,0 @@
#!/bin/bash
set -eo pipefail
function assert_build() {
if [ -z "$BUILDKITE_REPO" ]; then
echo "error: Cannot find repository for this build"
exit 1
fi
if [ -z "$BUILDKITE_COMMIT" ]; then
echo "error: Cannot find commit for this build"
exit 1
fi
}
function assert_buildkite_agent() {
if ! command -v buildkite-agent &> /dev/null; then
echo "error: Cannot find buildkite-agent, please install it:"
echo "https://buildkite.com/docs/agent/v3/install"
exit 1
fi
}
function assert_jq() {
assert_command "jq" "jq" "https://stedolan.github.io/jq/"
}
function assert_curl() {
assert_command "curl" "curl" "https://curl.se/download.html"
}
function assert_command() {
local command="$1"
local package="$2"
local help_url="$3"
if ! command -v "$command" &> /dev/null; then
echo "warning: $command is not installed, installing..."
if command -v brew &> /dev/null; then
HOMEBREW_NO_AUTO_UPDATE=1 brew install "$package"
else
echo "error: Cannot install $command, please install it"
if [ -n "$help_url" ]; then
echo ""
echo "hint: See $help_url for help"
fi
exit 1
fi
fi
}
function assert_release() {
if [ "$RELEASE" == "1" ]; then
run_command buildkite-agent meta-data set canary "0"
fi
}
function assert_canary() {
local canary="$(buildkite-agent meta-data get canary 2>/dev/null)"
if [ -z "$canary" ]; then
local repo=$(echo "$BUILDKITE_REPO" | sed -E 's#https://github.com/([^/]+)/([^/]+).git#\1/\2#g')
local tag="$(curl -sL "https://api.github.com/repos/$repo/releases/latest" | jq -r ".tag_name")"
if [ "$tag" == "null" ]; then
canary="1"
else
local revision=$(curl -sL "https://api.github.com/repos/$repo/compare/$tag...$BUILDKITE_COMMIT" | jq -r ".ahead_by")
if [ "$revision" == "null" ]; then
canary="1"
else
canary="$revision"
fi
fi
run_command buildkite-agent meta-data set canary "$canary"
fi
}
function upload_buildkite_pipeline() {
local path="$1"
if [ ! -f "$path" ]; then
echo "error: Cannot find pipeline: $path"
exit 1
fi
run_command buildkite-agent pipeline upload "$path"
}
function run_command() {
set -x
"$@"
{ set +x; } 2>/dev/null
}
assert_build
assert_buildkite_agent
assert_jq
assert_curl
assert_release
assert_canary
upload_buildkite_pipeline ".buildkite/ci.yml"

View File

@@ -1,47 +0,0 @@
param (
[Parameter(Mandatory=$true)]
[string[]] $Paths,
[switch] $Split
)
$ErrorActionPreference = "Stop"
function Assert-Buildkite-Agent() {
if (-not (Get-Command "buildkite-agent" -ErrorAction SilentlyContinue)) {
Write-Error "Cannot find buildkite-agent, please install it: https://buildkite.com/docs/agent/v3/install"
exit 1
}
}
function Assert-Split-File() {
if (-not (Get-Command "Split-File" -ErrorAction SilentlyContinue)) {
Write-Error "Cannot find Split-File, please install it: https://www.powershellgallery.com/packages/FileSplitter/1.3"
exit 1
}
}
function Upload-Buildkite-Artifact() {
param (
[Parameter(Mandatory=$true)]
[string] $Path,
)
if (-not (Test-Path $Path)) {
Write-Error "Could not find artifact: $Path"
exit 1
}
if ($Split) {
Remove-Item -Path "$Path.*" -Force
Split-File -Path (Resolve-Path $Path) -PartSizeBytes "50MB" -Verbose
$Path = "$Path.*"
}
& buildkite-agent artifact upload "$Path" --debug --debug-http
}
Assert-Buildkite-Agent
if ($Split) {
Assert-Split-File
}
foreach ($Path in $Paths) {
Upload-Buildkite-Artifact $Path
}

View File

@@ -1,71 +0,0 @@
#!/bin/bash
set -euo pipefail
function assert_buildkite_agent() {
if ! command -v buildkite-agent &>/dev/null; then
echo "error: Cannot find buildkite-agent, please install it:"
echo "https://buildkite.com/docs/agent/v3/install"
exit 1
fi
}
function assert_split() {
if ! command -v split &>/dev/null; then
echo "error: Cannot find split, please install it:"
echo "https://www.gnu.org/software/coreutils/split"
exit 1
fi
}
function upload_buildkite_artifact() {
if [ -z "${1:-}" ]; then
return
fi
local path="$1"
shift
local split="0"
local args=() # Initialize args as an empty array
while true; do
if [ -z "${1:-}" ]; then
break
fi
case "$1" in
--split)
split="1"
shift
;;
*)
args+=("$1")
shift
;;
esac
done
if [ ! -f "$path" ]; then
echo "error: Could not find artifact: $path"
exit 1
fi
if [ "$split" == "1" ]; then
run_command rm -f "$path."*
run_command split -b 50MB -d "$path" "$path."
if [ "${args[@]:-}" != "" ]; then
run_command buildkite-agent artifact upload "$path.*" "${args[@]}"
else
run_command buildkite-agent artifact upload "$path.*"
fi
elif [ "${args[@]:-}" != "" ]; then
run_command buildkite-agent artifact upload "$path" "${args[@]:-}"
else
run_command buildkite-agent artifact upload "$path"
fi
}
function run_command() {
set -x
"$@"
{ set +x; } 2>/dev/null
}
assert_buildkite_agent
upload_buildkite_artifact "$@"

View File

@@ -3,19 +3,7 @@
set -eo pipefail
function assert_main() {
if [ "$RELEASE" == "1" ]; then
echo "info: Skipping canary release because this is a release build"
exit 0
fi
if [ -z "$BUILDKITE_REPO" ]; then
echo "error: Cannot find repository for this build"
exit 1
fi
if [ -z "$BUILDKITE_COMMIT" ]; then
echo "error: Cannot find commit for this build"
exit 1
fi
if [ -n "$BUILDKITE_PULL_REQUEST_REPO" ] && [ "$BUILDKITE_REPO" != "$BUILDKITE_PULL_REQUEST_REPO" ]; then
if [[ "$BUILDKITE_PULL_REQUEST_REPO" && "$BUILDKITE_REPO" != "$BUILDKITE_PULL_REQUEST_REPO" ]]; then
echo "error: Cannot upload release from a fork"
exit 1
fi
@@ -30,191 +18,77 @@ function assert_main() {
}
function assert_buildkite_agent() {
if ! command -v "buildkite-agent" &> /dev/null; then
if ! command -v buildkite-agent &> /dev/null; then
echo "error: Cannot find buildkite-agent, please install it:"
echo "https://buildkite.com/docs/agent/v3/install"
exit 1
fi
}
function assert_github() {
assert_command "gh" "gh" "https://github.com/cli/cli#installation"
assert_buildkite_secret "GITHUB_TOKEN"
# gh expects the token in $GH_TOKEN
export GH_TOKEN="$GITHUB_TOKEN"
}
function assert_aws() {
assert_command "aws" "awscli" "https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html"
for secret in "AWS_ACCESS_KEY_ID" "AWS_SECRET_ACCESS_KEY" "AWS_ENDPOINT"; do
assert_buildkite_secret "$secret"
done
assert_buildkite_secret "AWS_BUCKET" --skip-redaction
}
function assert_sentry() {
assert_command "sentry-cli" "getsentry/tools/sentry-cli" "https://docs.sentry.io/cli/installation/"
for secret in "SENTRY_AUTH_TOKEN" "SENTRY_ORG" "SENTRY_PROJECT"; do
assert_buildkite_secret "$secret"
done
}
function run_command() {
set -x
"$@"
{ set +x; } 2>/dev/null
}
function assert_command() {
local command="$1"
local package="$2"
local help_url="$3"
if ! command -v "$command" &> /dev/null; then
echo "warning: $command is not installed, installing..."
function assert_gh() {
if ! command -v gh &> /dev/null; then
echo "warning: gh is not installed, installing..."
if command -v brew &> /dev/null; then
HOMEBREW_NO_AUTO_UPDATE=1 run_command brew install "$package"
brew install gh
else
echo "error: Cannot install $command, please install it"
if [ -n "$help_url" ]; then
echo ""
echo "hint: See $help_url for help"
fi
echo "error: Cannot install gh, please install it:"
echo "https://github.com/cli/cli#installation"
exit 1
fi
fi
}
function assert_buildkite_secret() {
local key="$1"
local value=$(buildkite-agent secret get "$key" ${@:2})
if [ -z "$value" ]; then
echo "error: Cannot find $key secret"
function assert_gh_token() {
local token=$(buildkite-agent secret get GITHUB_TOKEN)
if [ -z "$token" ]; then
echo "error: Cannot find GITHUB_TOKEN secret"
echo ""
echo "hint: Create a secret named $key with a value:"
echo "hint: Create a secret named GITHUB_TOKEN with a GitHub access token:"
echo "https://buildkite.com/docs/pipelines/buildkite-secrets"
exit 1
fi
export "$key"="$value"
export GH_TOKEN="$token"
}
function release_tag() {
local version="$1"
if [ "$version" == "canary" ]; then
echo "canary"
else
echo "bun-v$version"
fi
}
function create_sentry_release() {
local version="$1"
local release="$version"
if [ "$version" == "canary" ]; then
release="$BUILDKITE_COMMIT-canary"
fi
run_command sentry-cli releases new "$release" --finalize
run_command sentry-cli releases set-commits "$release" --auto --ignore-missing
if [ "$version" == "canary" ]; then
run_command sentry-cli deploys new --env="canary" --release="$release"
fi
}
function download_buildkite_artifact() {
local name="$1"
local dir="$2"
if [ -z "$dir" ]; then
dir="."
fi
run_command buildkite-agent artifact download "$name" "$dir"
if [ ! -f "$dir/$name" ]; then
function download_artifact() {
local name=$1
buildkite-agent artifact download "$name" .
if [ ! -f "$name" ]; then
echo "error: Cannot find Buildkite artifact: $name"
exit 1
fi
}
function upload_github_asset() {
local version="$1"
local tag="$(release_tag "$version")"
local file="$2"
run_command gh release upload "$tag" "$file" --clobber --repo "$BUILDKITE_REPO"
# Sometimes the upload fails, maybe this is a race condition in the gh CLI?
while [ "$(gh release view "$tag" --repo "$BUILDKITE_REPO" | grep -c "$file")" -eq 0 ]; do
echo "warn: Uploading $file to $tag failed, retrying..."
sleep "$((RANDOM % 5 + 1))"
run_command gh release upload "$tag" "$file" --clobber --repo "$BUILDKITE_REPO"
done
function upload_assets() {
local tag=$1
local files=${@:2}
gh release upload "$tag" $files --clobber --repo "$BUILDKITE_REPO"
}
function update_github_release() {
local version="$1"
local tag="$(release_tag "$version")"
if [ "$tag" == "canary" ]; then
sleep 5 # There is possibly a race condition where this overwrites artifacts?
run_command gh release edit "$tag" --repo "$BUILDKITE_REPO" \
--notes "This release of Bun corresponds to the commit: $BUILDKITE_COMMIT"
fi
}
assert_main
assert_buildkite_agent
assert_gh
assert_gh_token
function upload_s3_file() {
local folder="$1"
local file="$2"
run_command aws --endpoint-url="$AWS_ENDPOINT" s3 cp "$file" "s3://$AWS_BUCKET/$folder/$file"
}
declare artifacts=(
bun-darwin-aarch64.zip
bun-darwin-aarch64-profile.zip
bun-darwin-x64.zip
bun-darwin-x64-profile.zip
bun-linux-aarch64.zip
bun-linux-aarch64-profile.zip
bun-linux-x64.zip
bun-linux-x64-profile.zip
bun-linux-x64-baseline.zip
bun-linux-x64-baseline-profile.zip
bun-windows-x64.zip
bun-windows-x64-profile.zip
bun-windows-x64-baseline.zip
bun-windows-x64-baseline-profile.zip
)
function create_release() {
assert_main
assert_buildkite_agent
assert_github
assert_aws
assert_sentry
for artifact in "${artifacts[@]}"; do
download_artifact $artifact
done
local tag="$1" # 'canary' or 'x.y.z'
local artifacts=(
bun-darwin-aarch64.zip
bun-darwin-aarch64-profile.zip
bun-darwin-x64.zip
bun-darwin-x64-profile.zip
bun-linux-aarch64.zip
bun-linux-aarch64-profile.zip
bun-linux-x64.zip
bun-linux-x64-profile.zip
bun-linux-x64-baseline.zip
bun-linux-x64-baseline-profile.zip
bun-windows-x64.zip
bun-windows-x64-profile.zip
bun-windows-x64-baseline.zip
bun-windows-x64-baseline-profile.zip
)
function upload_artifact() {
local artifact="$1"
download_buildkite_artifact "$artifact"
if [ "$tag" == "canary" ]; then
upload_s3_file "releases/$BUILDKITE_COMMIT-canary" "$artifact" &
else
upload_s3_file "releases/$BUILDKITE_COMMIT" "$artifact" &
fi
upload_s3_file "releases/$tag" "$artifact" &
upload_github_asset "$tag" "$artifact" &
wait
}
for artifact in "${artifacts[@]}"; do
upload_artifact "$artifact"
done
update_github_release "$tag"
create_sentry_release "$tag"
}
function assert_canary() {
local canary="$(buildkite-agent meta-data get canary 2>/dev/null)"
if [ -z "$canary" ] || [ "$canary" == "0" ]; then
echo "warn: Skipping release because this is not a canary build"
exit 0
fi
}
assert_canary
create_release "canary"
upload_assets "canary" "${artifacts[@]}"

3
.gitattributes vendored
View File

@@ -45,6 +45,3 @@ examples/**/* linguist-documentation
src/deps/*.c linguist-vendored
src/deps/brotli/** linguist-vendored
test/js/node/test/fixtures linguist-vendored
test/js/node/test/common linguist-vendored

286
.github/workflows/build-darwin.yml vendored Normal file
View File

@@ -0,0 +1,286 @@
name: Build Darwin
permissions:
contents: read
actions: write
on:
workflow_call:
inputs:
runs-on:
type: string
default: macos-13-large
tag:
type: string
required: true
arch:
type: string
required: true
cpu:
type: string
required: true
assertions:
type: boolean
canary:
type: boolean
no-cache:
type: boolean
env:
LLVM_VERSION: 18
BUN_VERSION: 1.1.8
LC_CTYPE: "en_US.UTF-8"
LC_ALL: "en_US.UTF-8"
# LTO is disabled because we cannot use lld on macOS currently
BUN_ENABLE_LTO: "0"
jobs:
build-submodules:
name: Build Submodules
runs-on: ${{ inputs.runs-on }}
steps:
- name: Checkout
uses: actions/checkout@v4
with:
sparse-checkout: |
.gitmodules
src/deps
scripts
- name: Hash Submodules
id: hash
run: |
print_versions() {
git submodule | grep -v WebKit
echo "LLVM_VERSION=${{ env.LLVM_VERSION }}"
cat $(echo scripts/build*.sh scripts/all-dependencies.sh | tr " " "\n" | sort)
}
echo "hash=$(print_versions | shasum)" >> $GITHUB_OUTPUT
- name: Install Dependencies
env:
HOMEBREW_NO_INSTALLED_DEPENDENTS_CHECK: 1
HOMEBREW_NO_AUTO_UPDATE: 1
HOMEBREW_NO_INSTALL_CLEANUP: 1
run: |
brew install \
llvm@${{ env.LLVM_VERSION }} \
ccache \
rust \
pkg-config \
coreutils \
libtool \
cmake \
libiconv \
automake \
openssl@1.1 \
ninja \
golang \
gnu-sed --force --overwrite
echo "$(brew --prefix ccache)/bin" >> $GITHUB_PATH
echo "$(brew --prefix coreutils)/libexec/gnubin" >> $GITHUB_PATH
echo "$(brew --prefix llvm@$LLVM_VERSION)/bin" >> $GITHUB_PATH
brew link --overwrite llvm@$LLVM_VERSION
- name: Clone Submodules
run: |
./scripts/update-submodules.sh
- name: Build Submodules
env:
CPU_TARGET: ${{ inputs.cpu }}
BUN_DEPS_OUT_DIR: ${{ runner.temp }}/bun-deps
run: |
mkdir -p $BUN_DEPS_OUT_DIR
./scripts/all-dependencies.sh
- name: Upload bun-${{ inputs.tag }}-deps
uses: actions/upload-artifact@v4
with:
name: bun-${{ inputs.tag }}-deps
path: ${{ runner.temp }}/bun-deps
if-no-files-found: error
build-cpp:
name: Build C++
runs-on: ${{ inputs.runs-on }}
steps:
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
# TODO: Figure out how to cache homebrew dependencies
- name: Install Dependencies
env:
HOMEBREW_NO_INSTALLED_DEPENDENTS_CHECK: 1
HOMEBREW_NO_AUTO_UPDATE: 1
HOMEBREW_NO_INSTALL_CLEANUP: 1
run: |
brew install \
llvm@${{ env.LLVM_VERSION }} \
ccache \
rust \
pkg-config \
coreutils \
libtool \
cmake \
libiconv \
automake \
openssl@1.1 \
ninja \
golang \
gnu-sed --force --overwrite
echo "$(brew --prefix ccache)/bin" >> $GITHUB_PATH
echo "$(brew --prefix coreutils)/libexec/gnubin" >> $GITHUB_PATH
echo "$(brew --prefix llvm@$LLVM_VERSION)/bin" >> $GITHUB_PATH
brew link --overwrite llvm@$LLVM_VERSION
- name: Setup Bun
uses: ./.github/actions/setup-bun
with:
bun-version: ${{ env.BUN_VERSION }}
- name: Compile
env:
CPU_TARGET: ${{ inputs.cpu }}
SOURCE_DIR: ${{ github.workspace }}
OBJ_DIR: ${{ runner.temp }}/bun-cpp-obj
BUN_DEPS_OUT_DIR: ${{ runner.temp }}/bun-deps
CCACHE_DIR: ${{ runner.temp }}/ccache
run: |
mkdir -p $OBJ_DIR
cd $OBJ_DIR
cmake -S $SOURCE_DIR -B $OBJ_DIR \
-G Ninja \
-DCMAKE_BUILD_TYPE=Release \
-DUSE_LTO=ON \
-DBUN_CPP_ONLY=1 \
-DNO_CONFIGURE_DEPENDS=1
chmod +x compile-cpp-only.sh
./compile-cpp-only.sh -v
- name: Upload bun-${{ inputs.tag }}-cpp
uses: actions/upload-artifact@v4
with:
name: bun-${{ inputs.tag }}-cpp
path: ${{ runner.temp }}/bun-cpp-obj/bun-cpp-objects.a
if-no-files-found: error
build-zig:
name: Build Zig
uses: ./.github/workflows/build-zig.yml
with:
os: darwin
only-zig: true
tag: ${{ inputs.tag }}
arch: ${{ inputs.arch }}
cpu: ${{ inputs.cpu }}
assertions: ${{ inputs.assertions }}
canary: ${{ inputs.canary }}
no-cache: ${{ inputs.no-cache }}
link:
name: Link
runs-on: ${{ inputs.runs-on }}
needs:
- build-submodules
- build-cpp
- build-zig
steps:
- uses: actions/checkout@v4
# TODO: Figure out how to cache homebrew dependencies
- name: Install Dependencies
env:
HOMEBREW_NO_INSTALLED_DEPENDENTS_CHECK: 1
HOMEBREW_NO_AUTO_UPDATE: 1
HOMEBREW_NO_INSTALL_CLEANUP: 1
run: |
brew install \
llvm@${{ env.LLVM_VERSION }} \
ccache \
rust \
pkg-config \
coreutils \
libtool \
cmake \
libiconv \
automake \
openssl@1.1 \
ninja \
golang \
gnu-sed --force --overwrite
echo "$(brew --prefix ccache)/bin" >> $GITHUB_PATH
echo "$(brew --prefix coreutils)/libexec/gnubin" >> $GITHUB_PATH
echo "$(brew --prefix llvm@$LLVM_VERSION)/bin" >> $GITHUB_PATH
brew link --overwrite llvm@$LLVM_VERSION
- name: Setup Bun
uses: ./.github/actions/setup-bun
with:
bun-version: ${{ env.BUN_VERSION }}
- name: Download bun-${{ inputs.tag }}-deps
uses: actions/download-artifact@v4
with:
name: bun-${{ inputs.tag }}-deps
path: ${{ runner.temp }}/bun-deps
- name: Download bun-${{ inputs.tag }}-cpp
uses: actions/download-artifact@v4
with:
name: bun-${{ inputs.tag }}-cpp
path: ${{ runner.temp }}/bun-cpp-obj
- name: Download bun-${{ inputs.tag }}-zig
uses: actions/download-artifact@v4
with:
name: bun-${{ inputs.tag }}-zig
path: ${{ runner.temp }}/release
- name: Link
env:
CPU_TARGET: ${{ inputs.cpu }}
run: |
SRC_DIR=$PWD
mkdir ${{ runner.temp }}/link-build
cd ${{ runner.temp }}/link-build
cmake $SRC_DIR \
-G Ninja \
-DCMAKE_BUILD_TYPE=Release \
-DUSE_LTO=ON \
-DBUN_LINK_ONLY=1 \
-DBUN_ZIG_OBJ_DIR="${{ runner.temp }}/release" \
-DBUN_CPP_ARCHIVE="${{ runner.temp }}/bun-cpp-obj/bun-cpp-objects.a" \
-DBUN_DEPS_OUT_DIR="${{ runner.temp }}/bun-deps" \
-DNO_CONFIGURE_DEPENDS=1
ninja -v
- name: Prepare
run: |
cd ${{ runner.temp }}/link-build
chmod +x bun-profile bun
mkdir -p bun-${{ inputs.tag }}-profile/ bun-${{ inputs.tag }}/
mv bun-profile bun-${{ inputs.tag }}-profile/bun-profile
if [ -f bun-profile.dSYM || -d bun-profile.dSYM ]; then
mv bun-profile.dSYM bun-${{ inputs.tag }}-profile/bun-profile.dSYM
fi
if [ -f bun.dSYM || -d bun.dSYM ]; then
mv bun.dSYM bun-${{ inputs.tag }}-profile/bun-profile.dSYM
fi
mv bun bun-${{ inputs.tag }}/bun
zip -r bun-${{ inputs.tag }}-profile.zip bun-${{ inputs.tag }}-profile
zip -r bun-${{ inputs.tag }}.zip bun-${{ inputs.tag }}
- name: Upload bun-${{ inputs.tag }}
uses: actions/upload-artifact@v4
with:
name: bun-${{ inputs.tag }}
path: ${{ runner.temp }}/link-build/bun-${{ inputs.tag }}.zip
if-no-files-found: error
- name: Upload bun-${{ inputs.tag }}-profile
uses: actions/upload-artifact@v4
with:
name: bun-${{ inputs.tag }}-profile
path: ${{ runner.temp }}/link-build/bun-${{ inputs.tag }}-profile.zip
if-no-files-found: error
on-failure:
if: ${{ github.repository_owner == 'oven-sh' && failure() }}
name: On Failure
needs: link
runs-on: ubuntu-latest
steps:
- name: Send Message
uses: sarisia/actions-status-discord@v1
with:
webhook: ${{ secrets.DISCORD_WEBHOOK }}
nodetail: true
color: "#FF0000"
title: ""
description: |
### ❌ [${{ github.event.pull_request.title }}](${{ github.event.pull_request.html_url }})
@${{ github.actor }}, the build for bun-${{ inputs.tag }} failed.
**[View logs](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})**

64
.github/workflows/build-linux.yml vendored Normal file
View File

@@ -0,0 +1,64 @@
name: Build Linux
permissions:
contents: read
actions: write
on:
workflow_call:
inputs:
runs-on:
type: string
required: true
tag:
type: string
required: true
arch:
type: string
required: true
cpu:
type: string
required: true
assertions:
type: boolean
zig-optimize:
type: string
canary:
type: boolean
no-cache:
type: boolean
jobs:
build:
name: Build Linux
uses: ./.github/workflows/build-zig.yml
with:
os: linux
only-zig: false
runs-on: ${{ inputs.runs-on }}
tag: ${{ inputs.tag }}
arch: ${{ inputs.arch }}
cpu: ${{ inputs.cpu }}
assertions: ${{ inputs.assertions }}
zig-optimize: ${{ inputs.zig-optimize }}
canary: ${{ inputs.canary }}
no-cache: ${{ inputs.no-cache }}
on-failure:
if: ${{ github.repository_owner == 'oven-sh' && failure() }}
name: On Failure
needs: build
runs-on: ubuntu-latest
steps:
- name: Send Message
uses: sarisia/actions-status-discord@v1
with:
webhook: ${{ secrets.DISCORD_WEBHOOK }}
nodetail: true
color: "#FF0000"
title: ""
description: |
### ❌ [${{ github.event.pull_request.title }}](${{ github.event.pull_request.html_url }})
@${{ github.actor }}, the build for bun-${{ inputs.tag }} failed.
**[View logs](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})**

348
.github/workflows/build-windows.yml vendored Normal file
View File

@@ -0,0 +1,348 @@
name: Build Windows
permissions:
contents: read
actions: write
on:
workflow_call:
inputs:
runs-on:
type: string
default: windows
tag:
type: string
required: true
arch:
type: string
required: true
cpu:
type: string
required: true
assertions:
type: boolean
canary:
type: boolean
no-cache:
type: boolean
bun-version:
type: string
default: 1.1.7
env:
# Must specify exact version of LLVM for Windows
LLVM_VERSION: 18.1.8
BUN_VERSION: ${{ inputs.bun-version }}
BUN_GARBAGE_COLLECTOR_LEVEL: 1
BUN_FEATURE_FLAG_INTERNAL_FOR_TESTING: 1
CI: true
USE_LTO: 1
jobs:
build-submodules:
name: Build Submodules
runs-on: ${{ inputs.runs-on }}
steps:
- name: Install Scoop
run: |
Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
Invoke-RestMethod -Uri https://get.scoop.sh | Invoke-Expression
Join-Path (Resolve-Path ~).Path "scoop\shims" >> $Env:GITHUB_PATH
- name: Setup Git
run: |
git config --global core.autocrlf false
git config --global core.eol lf
- name: Checkout
uses: actions/checkout@v4
with:
sparse-checkout: |
.gitmodules
src/deps
scripts
- name: Hash Submodules
id: hash
run: |
$data = "$(& {
git submodule | Where-Object { $_ -notmatch 'WebKit' }
echo "LLVM_VERSION=${{ env.LLVM_VERSION }}"
Get-Content -Path (Get-ChildItem -Path 'scripts/build*.ps1', 'scripts/all-dependencies.ps1', 'scripts/env.ps1' | Sort-Object -Property Name).FullName | Out-String
echo 1
})"
$hash = ( -join ((New-Object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider).ComputeHash([System.Text.Encoding]::UTF8.GetBytes($data)) | ForEach-Object { $_.ToString("x2") } )).Substring(0, 10)
echo "hash=${hash}" >> $env:GITHUB_OUTPUT
- if: ${{ !inputs.no-cache }}
name: Restore Cache
id: cache
uses: actions/cache/restore@v4
with:
path: bun-deps
key: bun-${{ inputs.tag }}-deps-${{ steps.hash.outputs.hash }}
- if: ${{ inputs.no-cache || !steps.cache.outputs.cache-hit }}
name: Install LLVM and Ninja
run: |
scoop install ninja
scoop install llvm@${{ env.LLVM_VERSION }}
scoop install nasm@2.16.01
- if: ${{ inputs.no-cache || !steps.cache.outputs.cache-hit }}
name: Clone Submodules
run: |
.\scripts\update-submodules.ps1
- if: ${{ inputs.no-cache || !steps.cache.outputs.cache-hit }}
name: Build Dependencies
env:
CPU_TARGET: ${{ inputs.cpu }}
CCACHE_DIR: ccache
USE_LTO: 1
run: |
.\scripts\env.ps1 ${{ contains(inputs.tag, '-baseline') && '-Baseline' || '' }}
$env:BUN_DEPS_OUT_DIR = (mkdir -Force "./bun-deps")
.\scripts\all-dependencies.ps1
- name: Save Cache
if: ${{ inputs.no-cache || !steps.cache.outputs.cache-hit }}
uses: actions/cache/save@v4
with:
path: bun-deps
key: ${{ steps.cache.outputs.cache-primary-key }}
- name: Upload bun-${{ inputs.tag }}-deps
uses: actions/upload-artifact@v4
with:
name: bun-${{ inputs.tag }}-deps
path: bun-deps
if-no-files-found: error
codegen:
name: Codegen
runs-on: ubuntu-latest
steps:
- name: Setup Git
run: |
git config --global core.autocrlf false
git config --global core.eol lf
- name: Checkout
uses: actions/checkout@v4
- name: Setup Bun
uses: ./.github/actions/setup-bun
with:
bun-version: ${{ inputs.bun-version }}
- name: Codegen
run: |
./scripts/cross-compile-codegen.sh win32 x64
- if: ${{ inputs.canary }}
name: Calculate Revision
run: |
echo "canary_revision=$(GITHUB_TOKEN="${{ github.token }}"
bash ./scripts/calculate-canary-revision.sh --raw)" > build-codegen-win32-x64/.canary_revision
- name: Upload bun-${{ inputs.tag }}-codegen
uses: actions/upload-artifact@v4
with:
name: bun-${{ inputs.tag }}-codegen
path: build-codegen-win32-x64
if-no-files-found: error
build-cpp:
name: Build C++
needs: codegen
runs-on: ${{ inputs.runs-on }}
steps:
- name: Install Scoop
run: |
Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
Invoke-RestMethod -Uri https://get.scoop.sh | Invoke-Expression
Join-Path (Resolve-Path ~).Path "scoop\shims" >> $Env:GITHUB_PATH
- name: Setup Git
run: |
git config --global core.autocrlf false
git config --global core.eol lf
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
- name: Install LLVM and Ninja
run: |
scoop install ninja
scoop install llvm@${{ env.LLVM_VERSION }}
- name: Setup Bun
uses: ./.github/actions/setup-bun
with:
bun-version: ${{ inputs.bun-version }}
- if: ${{ !inputs.no-cache }}
name: Restore Cache
uses: actions/cache@v4
with:
path: ccache
key: bun-${{ inputs.tag }}-cpp-${{ hashFiles('Dockerfile', 'Makefile', 'CMakeLists.txt', 'build.zig', 'scripts/**', 'src/**', 'packages/bun-usockets/src/**', 'packages/bun-uws/src/**') }}
restore-keys: |
bun-${{ inputs.tag }}-cpp-
- name: Download bun-${{ inputs.tag }}-codegen
uses: actions/download-artifact@v4
with:
name: bun-${{ inputs.tag }}-codegen
path: build
- name: Compile
env:
CPU_TARGET: ${{ inputs.cpu }}
CCACHE_DIR: ccache
USE_LTO: 1
run: |
# $CANARY_REVISION = if (Test-Path build/.canary_revision) { Get-Content build/.canary_revision } else { "0" }
$CANARY_REVISION = 0
.\scripts\env.ps1 ${{ contains(inputs.tag, '-baseline') && '-Baseline' || '' }}
.\scripts\update-submodules.ps1
.\scripts\build-libuv.ps1 -CloneOnly $True
cd build
cmake .. -G Ninja -DCMAKE_BUILD_TYPE=Release `
-DNO_CODEGEN=1 `
-DUSE_LTO=1 `
-DNO_CONFIGURE_DEPENDS=1 `
"-DCANARY=${CANARY_REVISION}" `
-DBUN_CPP_ONLY=1 ${{ contains(inputs.tag, '-baseline') && '-DUSE_BASELINE_BUILD=1' || '' }}
if ($LASTEXITCODE -ne 0) { throw "CMake configuration failed" }
.\compile-cpp-only.ps1 -v
if ($LASTEXITCODE -ne 0) { throw "C++ compilation failed" }
- name: Upload bun-${{ inputs.tag }}-cpp
uses: actions/upload-artifact@v4
with:
name: bun-${{ inputs.tag }}-cpp
path: build/bun-cpp-objects.a
if-no-files-found: error
build-zig:
name: Build Zig
uses: ./.github/workflows/build-zig.yml
with:
os: windows
zig-optimize: ReleaseSafe
only-zig: true
tag: ${{ inputs.tag }}
arch: ${{ inputs.arch }}
cpu: ${{ inputs.cpu }}
assertions: ${{ inputs.assertions }}
canary: ${{ inputs.canary }}
no-cache: ${{ inputs.no-cache }}
link:
name: Link
runs-on: ${{ inputs.runs-on }}
needs:
- build-submodules
- build-cpp
- build-zig
- codegen
steps:
- name: Install Scoop
run: |
Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
Invoke-RestMethod -Uri https://get.scoop.sh | Invoke-Expression
Join-Path (Resolve-Path ~).Path "scoop\shims" >> $Env:GITHUB_PATH
- name: Setup Git
run: |
git config --global core.autocrlf false
git config --global core.eol lf
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
- name: Install Ninja
run: |
scoop install ninja
scoop install llvm@${{ env.LLVM_VERSION }}
- name: Setup Bun
uses: ./.github/actions/setup-bun
with:
bun-version: ${{ inputs.bun-version }}
- name: Download bun-${{ inputs.tag }}-deps
uses: actions/download-artifact@v4
with:
name: bun-${{ inputs.tag }}-deps
path: bun-deps
- name: Download bun-${{ inputs.tag }}-cpp
uses: actions/download-artifact@v4
with:
name: bun-${{ inputs.tag }}-cpp
path: bun-cpp
- name: Download bun-${{ inputs.tag }}-zig
uses: actions/download-artifact@v4
with:
name: bun-${{ inputs.tag }}-zig
path: bun-zig
- name: Download bun-${{ inputs.tag }}-codegen
uses: actions/download-artifact@v4
with:
name: bun-${{ inputs.tag }}-codegen
path: build
- if: ${{ !inputs.no-cache }}
name: Restore Cache
uses: actions/cache@v4
with:
path: ccache
key: bun-${{ inputs.tag }}-cpp-${{ hashFiles('Dockerfile', 'Makefile', 'CMakeLists.txt', 'build.zig', 'scripts/**', 'src/**', 'packages/bun-usockets/src/**', 'packages/bun-uws/src/**') }}
restore-keys: |
bun-${{ inputs.tag }}-cpp-
- name: Link
env:
CPU_TARGET: ${{ inputs.cpu }}
CCACHE_DIR: ccache
run: |
.\scripts\update-submodules.ps1
.\scripts\env.ps1 ${{ contains(inputs.tag, '-baseline') && '-Baseline' || '' }}
Set-Location build
# $CANARY_REVISION = if (Test-Path build/.canary_revision) { Get-Content build/.canary_revision } else { "0" }
$CANARY_REVISION = 0
cmake .. -G Ninja -DCMAKE_BUILD_TYPE=Release `
-DNO_CODEGEN=1 `
-DNO_CONFIGURE_DEPENDS=1 `
"-DCANARY=${CANARY_REVISION}" `
-DBUN_LINK_ONLY=1 `
-DUSE_LTO=1 `
"-DBUN_DEPS_OUT_DIR=$(Resolve-Path ../bun-deps)" `
"-DBUN_CPP_ARCHIVE=$(Resolve-Path ../bun-cpp/bun-cpp-objects.a)" `
"-DBUN_ZIG_OBJ_DIR=$(Resolve-Path ../bun-zig)" `
${{ contains(inputs.tag, '-baseline') && '-DUSE_BASELINE_BUILD=1' || '' }}
if ($LASTEXITCODE -ne 0) { throw "CMake configuration failed" }
ninja -v
if ($LASTEXITCODE -ne 0) { throw "Link failed!" }
- name: Prepare
run: |
$Dist = mkdir -Force "bun-${{ inputs.tag }}"
cp -r build\bun.exe "$Dist\bun.exe"
Compress-Archive -Force "$Dist" "${Dist}.zip"
$Dist = "$Dist-profile"
MkDir -Force "$Dist"
cp -r build\bun.exe "$Dist\bun.exe"
cp -r build\bun.pdb "$Dist\bun.pdb"
Compress-Archive -Force "$Dist" "$Dist.zip"
.\build\bun.exe --print "JSON.stringify(require('bun:internal-for-testing').crash_handler.getFeatureData())" > .\features.json
- name: Upload bun-${{ inputs.tag }}
uses: actions/upload-artifact@v4
with:
name: bun-${{ inputs.tag }}
path: bun-${{ inputs.tag }}.zip
if-no-files-found: error
- name: Upload bun-${{ inputs.tag }}-profile
uses: actions/upload-artifact@v4
with:
name: bun-${{ inputs.tag }}-profile
path: bun-${{ inputs.tag }}-profile.zip
if-no-files-found: error
- name: Upload bun-feature-data
uses: actions/upload-artifact@v4
with:
name: bun-feature-data
path: features.json
if-no-files-found: error
overwrite: true
on-failure:
if: ${{ github.repository_owner == 'oven-sh' && failure() }}
name: On Failure
needs: link
runs-on: ubuntu-latest
steps:
- name: Send Message
uses: sarisia/actions-status-discord@v1
with:
webhook: ${{ secrets.DISCORD_WEBHOOK }}
nodetail: true
color: "#FF0000"
title: ""
description: |
### ❌ [${{ github.event.pull_request.title }}](${{ github.event.pull_request.html_url }})
@${{ github.actor }}, the build for bun-${{ inputs.tag }} failed.
**[View logs](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})**

122
.github/workflows/build-zig.yml vendored Normal file
View File

@@ -0,0 +1,122 @@
name: Build Zig
permissions:
contents: read
actions: write
on:
workflow_call:
inputs:
runs-on:
type: string
default: ${{ github.repository_owner != 'oven-sh' && 'ubuntu-latest' || inputs.only-zig && 'namespace-profile-bun-ci-linux-x64' || inputs.arch == 'x64' && 'namespace-profile-bun-ci-linux-x64' || 'namespace-profile-bun-ci-linux-aarch64' }}
tag:
type: string
required: true
os:
type: string
required: true
arch:
type: string
required: true
cpu:
type: string
required: true
assertions:
type: boolean
default: false
zig-optimize:
type: string # 'ReleaseSafe' or 'ReleaseFast'
default: ReleaseFast
canary:
type: boolean
default: ${{ github.ref == 'refs/heads/main' }}
only-zig:
type: boolean
default: true
no-cache:
type: boolean
default: false
jobs:
build-zig:
name: ${{ inputs.only-zig && 'Build Zig' || 'Build & Link' }}
runs-on: ${{ inputs.runs-on }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Calculate Cache Key
id: cache
run: |
echo "key=${{ hashFiles('Dockerfile', 'Makefile', 'CMakeLists.txt', 'build.zig', 'scripts/**', 'src/**', 'packages/bun-usockets/src/**', 'packages/bun-uws/src/**') }}" >> $GITHUB_OUTPUT
- if: ${{ !inputs.no-cache }}
name: Restore Cache
uses: actions/cache@v4
with:
key: bun-${{ inputs.tag }}-docker-${{ steps.cache.outputs.key }}
restore-keys: |
bun-${{ inputs.tag }}-docker-
path: |
${{ runner.temp }}/dockercache
- name: Setup Docker
uses: docker/setup-buildx-action@v3
with:
install: true
platforms: |
linux/${{ runner.arch == 'X64' && 'amd64' || 'arm64' }}
- name: Build
uses: docker/build-push-action@v5
with:
push: false
target: ${{ inputs.only-zig && 'build_release_obj' || 'artifact' }}
cache-from: |
type=local,src=${{ runner.temp }}/dockercache
cache-to: |
type=local,dest=${{ runner.temp }}/dockercache,mode=max
outputs: |
type=local,dest=${{ runner.temp }}/release
platforms: |
linux/${{ runner.arch == 'X64' && 'amd64' || 'arm64' }}
build-args: |
GIT_SHA=${{ github.event.workflow_run.head_sha || github.sha }}
TRIPLET=${{ inputs.os == 'darwin' && format('{0}-macos-none', inputs.arch == 'x64' && 'x86_64' || 'aarch64') || inputs.os == 'windows' && format('{0}-windows-msvc', inputs.arch == 'x64' && 'x86_64' || 'aarch64') || format('{0}-linux-gnu', inputs.arch == 'x64' && 'x86_64' || 'aarch64') }}
ARCH=${{ inputs.arch == 'x64' && 'x86_64' || 'aarch64' }}
BUILDARCH=${{ inputs.arch == 'x64' && 'amd64' || 'arm64' }}
BUILD_MACHINE_ARCH=${{ inputs.arch == 'x64' && 'x86_64' || 'aarch64' }}
CPU_TARGET=${{ inputs.arch == 'x64' && inputs.cpu || 'native' }}
ASSERTIONS=${{ inputs.assertions && 'ON' || 'OFF' }}
ZIG_OPTIMIZE=${{ inputs.zig-optimize }}
CANARY=${{ inputs.canary && '1' || '0' }}
- if: ${{ inputs.only-zig }}
name: Upload bun-${{ inputs.tag }}-zig
uses: actions/upload-artifact@v4
with:
name: bun-${{ inputs.tag }}-zig
path: ${{ runner.temp }}/release/bun-zig.o
if-no-files-found: error
- if: ${{ !inputs.only-zig }}
name: Prepare
run: |
cd ${{ runner.temp }}/release
chmod +x bun-profile bun
mkdir bun-${{ inputs.tag }}-profile
mkdir bun-${{ inputs.tag }}
strip bun
mv bun-profile bun-${{ inputs.tag }}-profile/bun-profile
mv bun bun-${{ inputs.tag }}/bun
zip -r bun-${{ inputs.tag }}-profile.zip bun-${{ inputs.tag }}-profile
zip -r bun-${{ inputs.tag }}.zip bun-${{ inputs.tag }}
- if: ${{ !inputs.only-zig }}
name: Upload bun-${{ inputs.tag }}
uses: actions/upload-artifact@v4
with:
name: bun-${{ inputs.tag }}
path: ${{ runner.temp }}/release/bun-${{ inputs.tag }}.zip
if-no-files-found: error
- if: ${{ !inputs.only-zig }}
name: Upload bun-${{ inputs.tag }}-profile
uses: actions/upload-artifact@v4
with:
name: bun-${{ inputs.tag }}-profile
path: ${{ runner.temp }}/release/bun-${{ inputs.tag }}-profile.zip
if-no-files-found: error

245
.github/workflows/ci.yml vendored Normal file
View File

@@ -0,0 +1,245 @@
name: CI
permissions:
contents: read
actions: write
concurrency:
group: ${{ github.workflow }}-${{ github.event_name == 'workflow_dispatch' && inputs.run-id || github.ref }}
cancel-in-progress: true
on:
workflow_dispatch:
inputs:
run-id:
type: string
description: The workflow ID to download artifacts (skips the build step)
pull_request:
paths-ignore:
- .vscode/**/*
- docs/**/*
- examples/**/*
push:
branches:
- main
paths-ignore:
- .vscode/**/*
- docs/**/*
- examples/**/*
jobs:
format:
if: ${{ !inputs.run-id }}
name: Format
uses: ./.github/workflows/run-format.yml
secrets: inherit
with:
zig-version: 0.13.0
permissions:
contents: write
lint:
if: ${{ !inputs.run-id }}
name: Lint
uses: ./.github/workflows/run-lint.yml
secrets: inherit
linux-x64:
if: ${{ !inputs.run-id }}
name: Build linux-x64
uses: ./.github/workflows/build-linux.yml
secrets: inherit
with:
runs-on: ${{ github.repository_owner == 'oven-sh' && 'namespace-profile-bun-ci-linux-x64' || 'ubuntu-latest' }}
tag: linux-x64
arch: x64
cpu: haswell
canary: true
no-cache: true
linux-x64-baseline:
if: ${{ !inputs.run-id }}
name: Build linux-x64-baseline
uses: ./.github/workflows/build-linux.yml
secrets: inherit
with:
runs-on: ${{ github.repository_owner == 'oven-sh' && 'namespace-profile-bun-ci-linux-x64' || 'ubuntu-latest' }}
tag: linux-x64-baseline
arch: x64
cpu: nehalem
canary: true
no-cache: true
linux-aarch64:
if: ${{ !inputs.run-id && github.repository_owner == 'oven-sh' }}
name: Build linux-aarch64
uses: ./.github/workflows/build-linux.yml
secrets: inherit
with:
runs-on: namespace-profile-bun-ci-linux-aarch64
tag: linux-aarch64
arch: aarch64
cpu: native
canary: true
no-cache: true
darwin-x64:
if: ${{ !inputs.run-id }}
name: Build darwin-x64
uses: ./.github/workflows/build-darwin.yml
secrets: inherit
with:
runs-on: ${{ github.repository_owner == 'oven-sh' && 'macos-13-large' || 'macos-13' }}
tag: darwin-x64
arch: x64
cpu: haswell
canary: true
darwin-x64-baseline:
if: ${{ !inputs.run-id }}
name: Build darwin-x64-baseline
uses: ./.github/workflows/build-darwin.yml
secrets: inherit
with:
runs-on: ${{ github.repository_owner == 'oven-sh' && 'macos-13-large' || 'macos-13' }}
tag: darwin-x64-baseline
arch: x64
cpu: nehalem
canary: true
darwin-aarch64:
if: ${{ !inputs.run-id }}
name: Build darwin-aarch64
uses: ./.github/workflows/build-darwin.yml
secrets: inherit
with:
runs-on: ${{ github.repository_owner == 'oven-sh' && 'namespace-profile-bun-ci-darwin-aarch64' || 'macos-13' }}
tag: darwin-aarch64
arch: aarch64
cpu: native
canary: true
windows-x64:
if: ${{ !inputs.run-id }}
name: Build windows-x64
uses: ./.github/workflows/build-windows.yml
secrets: inherit
with:
runs-on: windows
tag: windows-x64
arch: x64
cpu: haswell
canary: true
windows-x64-baseline:
if: ${{ !inputs.run-id }}
name: Build windows-x64-baseline
uses: ./.github/workflows/build-windows.yml
secrets: inherit
with:
runs-on: windows
tag: windows-x64-baseline
arch: x64
cpu: nehalem
canary: true
linux-x64-test:
if: ${{ inputs.run-id || github.event_name == 'pull_request' }}
name: Test linux-x64
needs: linux-x64
uses: ./.github/workflows/run-test.yml
secrets: inherit
with:
run-id: ${{ inputs.run-id }}
pr-number: ${{ github.event.number }}
runs-on: ${{ github.repository_owner == 'oven-sh' && 'namespace-profile-bun-ci-linux-x64' || 'ubuntu-latest' }}
tag: linux-x64
linux-x64-baseline-test:
if: ${{ inputs.run-id || github.event_name == 'pull_request' }}
name: Test linux-x64-baseline
needs: linux-x64-baseline
uses: ./.github/workflows/run-test.yml
secrets: inherit
with:
run-id: ${{ inputs.run-id }}
pr-number: ${{ github.event.number }}
runs-on: ${{ github.repository_owner == 'oven-sh' && 'namespace-profile-bun-ci-linux-x64' || 'ubuntu-latest' }}
tag: linux-x64-baseline
linux-aarch64-test:
if: ${{ inputs.run-id || github.event_name == 'pull_request' && github.repository_owner == 'oven-sh'}}
name: Test linux-aarch64
needs: linux-aarch64
uses: ./.github/workflows/run-test.yml
secrets: inherit
with:
run-id: ${{ inputs.run-id }}
pr-number: ${{ github.event.number }}
runs-on: namespace-profile-bun-ci-linux-aarch64
tag: linux-aarch64
darwin-x64-test:
if: ${{ inputs.run-id || github.event_name == 'pull_request' }}
name: Test darwin-x64
needs: darwin-x64
uses: ./.github/workflows/run-test.yml
secrets: inherit
with:
run-id: ${{ inputs.run-id }}
pr-number: ${{ github.event.number }}
runs-on: ${{ github.repository_owner == 'oven-sh' && 'macos-13-large' || 'macos-13' }}
tag: darwin-x64
darwin-x64-baseline-test:
if: ${{ inputs.run-id || github.event_name == 'pull_request' }}
name: Test darwin-x64-baseline
needs: darwin-x64-baseline
uses: ./.github/workflows/run-test.yml
secrets: inherit
with:
run-id: ${{ inputs.run-id }}
pr-number: ${{ github.event.number }}
runs-on: ${{ github.repository_owner == 'oven-sh' && 'macos-13-large' || 'macos-13' }}
tag: darwin-x64-baseline
darwin-aarch64-test:
if: ${{ inputs.run-id || github.event_name == 'pull_request' }}
name: Test darwin-aarch64
needs: darwin-aarch64
uses: ./.github/workflows/run-test.yml
secrets: inherit
with:
run-id: ${{ inputs.run-id }}
pr-number: ${{ github.event.number }}
runs-on: ${{ github.repository_owner == 'oven-sh' && 'namespace-profile-bun-ci-darwin-aarch64' || 'macos-13' }}
tag: darwin-aarch64
windows-x64-test:
if: ${{ inputs.run-id || github.event_name == 'pull_request' }}
name: Test windows-x64
needs: windows-x64
uses: ./.github/workflows/run-test.yml
secrets: inherit
with:
run-id: ${{ inputs.run-id }}
pr-number: ${{ github.event.number }}
runs-on: windows
tag: windows-x64
windows-x64-baseline-test:
if: ${{ inputs.run-id || github.event_name == 'pull_request' }}
name: Test windows-x64-baseline
needs: windows-x64-baseline
uses: ./.github/workflows/run-test.yml
secrets: inherit
with:
run-id: ${{ inputs.run-id }}
pr-number: ${{ github.event.number }}
runs-on: windows
tag: windows-x64-baseline
cleanup:
if: ${{ always() }}
name: Cleanup
needs:
- linux-x64
- linux-x64-baseline
- linux-aarch64
- darwin-x64
- darwin-x64-baseline
- darwin-aarch64
- windows-x64
- windows-x64-baseline
runs-on: ubuntu-latest
steps:
- name: Cleanup Artifacts
uses: geekyeggo/delete-artifact@v5
with:
name: |
bun-*-cpp
bun-*-zig
bun-*-deps
bun-*-codegen

55
.github/workflows/comment.yml vendored Normal file
View File

@@ -0,0 +1,55 @@
name: Comment
permissions:
actions: read
pull-requests: write
on:
workflow_run:
workflows:
- CI
types:
- completed
jobs:
comment:
if: ${{ github.repository_owner == 'oven-sh' }}
name: Comment
runs-on: ubuntu-latest
steps:
- name: Download Tests
uses: actions/download-artifact@v4
with:
path: bun
pattern: bun-*-tests
github-token: ${{ github.token }}
run-id: ${{ github.event.workflow_run.id }}
- name: Setup Environment
id: env
shell: bash
run: |
echo "pr-number=$(<bun/bun-linux-x64-tests/pr-number.txt)" >> $GITHUB_OUTPUT
- name: Generate Comment
run: |
cat bun/bun-*-tests/comment.md > comment.md
if [ -s comment.md ]; then
echo -e "❌ @${{ github.actor }}, your commit has failing tests :(\n\n$(cat comment.md)" > comment.md
else
echo -e "✅ @${{ github.actor }}, all tests passed!" > comment.md
fi
echo -e "\n**[View logs](https://github.com/${{ github.repository }}/actions/runs/${{ github.event.workflow_run.id }})**" >> comment.md
echo -e "<!-- generated-comment workflow=${{ github.workflow }} -->" >> comment.md
- name: Find Comment
id: comment
uses: peter-evans/find-comment@v3
with:
issue-number: ${{ steps.env.outputs.pr-number }}
comment-author: github-actions[bot]
body-includes: <!-- generated-comment workflow=${{ github.workflow }} -->
- name: Write Comment
uses: peter-evans/create-or-update-comment@v4
with:
comment-id: ${{ steps.comment.outputs.comment-id }}
issue-number: ${{ steps.env.outputs.pr-number }}
body-path: comment.md
edit-mode: replace

View File

@@ -0,0 +1,183 @@
name: Create Release Build
run-name: Compile Bun v${{ inputs.version }} by ${{ github.actor }}
concurrency:
group: release
cancel-in-progress: true
permissions:
contents: write
actions: write
on:
workflow_dispatch:
inputs:
version:
type: string
required: true
description: "Release version. Example: 1.1.4. Exclude the 'v' prefix."
tag:
type: string
required: true
description: "GitHub tag to use"
clobber:
type: boolean
required: false
default: false
description: "Overwrite existing release artifacts?"
release:
types:
- created
jobs:
notify-start:
if: ${{ github.repository_owner == 'oven-sh' }}
name: Notify Start
runs-on: ubuntu-latest
steps:
- name: Send Message
uses: sarisia/actions-status-discord@v1
with:
webhook: ${{ secrets.DISCORD_WEBHOOK_PUBLIC }}
nodetail: true
color: "#1F6FEB"
title: "Bun v${{ inputs.version }} is compiling"
description: |
### @${{ github.actor }} started compiling Bun v${{inputs.version}}
- name: Send Message
uses: sarisia/actions-status-discord@v1
with:
webhook: ${{ secrets.BUN_DISCORD_GITHUB_CHANNEL_WEBHOOK }}
nodetail: true
color: "#1F6FEB"
title: "Bun v${{ inputs.version }} is compiling"
description: |
### @${{ github.actor }} started compiling Bun v${{inputs.version}}
**[View logs](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})**
linux-x64:
name: Build linux-x64
uses: ./.github/workflows/build-linux.yml
secrets: inherit
with:
runs-on: ${{ github.repository_owner == 'oven-sh' && 'namespace-profile-bun-ci-linux-x64' || 'ubuntu-latest' }}
tag: linux-x64
arch: x64
cpu: haswell
canary: false
linux-x64-baseline:
name: Build linux-x64-baseline
uses: ./.github/workflows/build-linux.yml
secrets: inherit
with:
runs-on: ${{ github.repository_owner == 'oven-sh' && 'namespace-profile-bun-ci-linux-x64' || 'ubuntu-latest' }}
tag: linux-x64-baseline
arch: x64
cpu: nehalem
canary: false
linux-aarch64:
name: Build linux-aarch64
uses: ./.github/workflows/build-linux.yml
secrets: inherit
with:
runs-on: namespace-profile-bun-ci-linux-aarch64
tag: linux-aarch64
arch: aarch64
cpu: native
canary: false
darwin-x64:
name: Build darwin-x64
uses: ./.github/workflows/build-darwin.yml
secrets: inherit
with:
runs-on: ${{ github.repository_owner == 'oven-sh' && 'macos-13-large' || 'macos-13' }}
tag: darwin-x64
arch: x64
cpu: haswell
canary: false
darwin-x64-baseline:
name: Build darwin-x64-baseline
uses: ./.github/workflows/build-darwin.yml
secrets: inherit
with:
runs-on: ${{ github.repository_owner == 'oven-sh' && 'macos-13-large' || 'macos-13' }}
tag: darwin-x64-baseline
arch: x64
cpu: nehalem
canary: false
darwin-aarch64:
name: Build darwin-aarch64
uses: ./.github/workflows/build-darwin.yml
secrets: inherit
with:
runs-on: ${{ github.repository_owner == 'oven-sh' && 'namespace-profile-bun-ci-darwin-aarch64' || 'macos-13' }}
tag: darwin-aarch64
arch: aarch64
cpu: native
canary: false
windows-x64:
name: Build windows-x64
uses: ./.github/workflows/build-windows.yml
secrets: inherit
with:
runs-on: windows
tag: windows-x64
arch: x64
cpu: haswell
canary: false
windows-x64-baseline:
name: Build windows-x64-baseline
uses: ./.github/workflows/build-windows.yml
secrets: inherit
with:
runs-on: windows
tag: windows-x64-baseline
arch: x64
cpu: nehalem
canary: false
upload-artifacts:
needs:
- linux-x64
- linux-x64-baseline
- linux-aarch64
- darwin-x64
- darwin-x64-baseline
- darwin-aarch64
- windows-x64
- windows-x64-baseline
runs-on: ubuntu-latest
steps:
- name: Download Artifacts
uses: actions/download-artifact@v4
with:
path: bun-releases
pattern: bun-*
merge-multiple: true
github-token: ${{ github.token }}
- name: Check for Artifacts
run: |
if [ ! -d "bun-releases" ] || [ -z "$(ls -A bun-releases)" ]; then
echo "Error: No artifacts were downloaded or 'bun-releases' directory does not exist."
exit 1 # Fail the job if the condition is met
else
echo "Artifacts downloaded successfully."
fi
- name: Send Message
uses: sarisia/actions-status-discord@v1
with:
webhook: ${{ secrets.DISCORD_WEBHOOK }}
nodetail: true
color: "#FF0000"
title: "Bun v${{ inputs.version }} release artifacts uploaded"
- name: "Upload Artifacts"
env:
GH_TOKEN: ${{ github.token }}
run: |
# Unzip one level deep each artifact
cd bun-releases
for f in *.zip; do
unzip -o $f
done
cd ..
gh release upload --repo=${{ github.repository }} ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.tag || github.event.release.id }} ${{ inputs.clobber && '--clobber' || '' }} bun-releases/*.zip

View File

@@ -7,42 +7,6 @@ on:
types: [labeled]
jobs:
# on-bug:
# runs-on: ubuntu-latest
# if: github.event.label.name == 'bug' || github.event.label.name == 'crash'
# permissions:
# issues: write
# steps:
# - name: Checkout
# uses: actions/checkout@v4
# with:
# sparse-checkout: |
# scripts
# .github
# CMakeLists.txt
# - name: Setup Bun
# uses: ./.github/actions/setup-bun
# with:
# bun-version: "1.1.24"
# - name: "categorize bug"
# id: add-labels
# env:
# GITHUB_ISSUE_BODY: ${{ github.event.issue.body }}
# GITHUB_ISSUE_TITLE: ${{ github.event.issue.title }}
# ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
# shell: bash
# run: |
# echo '{"dependencies": { "@anthropic-ai/sdk": "latest" }}' > scripts/package.json && bun install --cwd=./scripts
# LABELS=$(bun scripts/label-issue.ts)
# echo "labels=$LABELS" >> $GITHUB_OUTPUT
# - name: Add labels
# uses: actions-cool/issues-helper@v3
# if: steps.add-labels.outputs.labels != ''
# with:
# actions: "add-labels"
# token: ${{ secrets.GITHUB_TOKEN }}
# issue-number: ${{ github.event.issue.number }}
# labels: ${{ steps.add-labels.outputs.labels }}
on-labeled:
runs-on: ubuntu-latest
if: github.event.label.name == 'crash' || github.event.label.name == 'needs repro'

View File

@@ -1,6 +1,3 @@
# TODO: Move this to bash scripts intead of Github Actions
# so it can be run from Buildkite, see: .buildkite/scripts/release.sh
name: Release
concurrency: release
@@ -88,9 +85,6 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v4
with:
# To workaround issue
ref: main
- name: Setup Bun
uses: ./.github/actions/setup-bun
with:

View File

@@ -31,7 +31,7 @@ jobs:
with:
bun-version: "1.1.20"
- name: Setup Zig
uses: mlugg/setup-zig@v1
uses: goto-bus-stop/setup-zig@c7b6cdd3adba8f8b96984640ff172c37c93f73ee
with:
version: ${{ inputs.zig-version }}
- name: Install Dependencies

View File

@@ -3,7 +3,7 @@ name: lint-cpp
permissions:
contents: read
env:
LLVM_VERSION: 18
LLVM_VERSION: 16
LC_CTYPE: "en_US.UTF-8"
LC_ALL: "en_US.UTF-8"
@@ -26,7 +26,7 @@ jobs:
- name: Setup Bun
uses: ./.github/actions/setup-bun
with:
bun-version: 1.1.23
bun-version: latest
- name: Install Dependencies
env:
HOMEBREW_NO_INSTALLED_DEPENDENTS_CHECK: 1

224
.github/workflows/run-test.yml vendored Normal file
View File

@@ -0,0 +1,224 @@
name: Test
permissions:
contents: read
actions: read
on:
workflow_call:
inputs:
runs-on:
type: string
required: true
tag:
type: string
required: true
pr-number:
type: string
required: true
run-id:
type: string
default: ${{ github.run_id }}
jobs:
test:
name: Tests
runs-on: ${{ inputs.runs-on }}
steps:
- if: ${{ runner.os == 'Windows' }}
name: Setup Git
run: |
git config --global core.autocrlf false
git config --global core.eol lf
- name: Checkout
uses: actions/checkout@v4
with:
sparse-checkout: |
package.json
bun.lockb
test
packages/bun-internal-test
packages/bun-types
- name: Setup Environment
shell: bash
run: |
echo "${{ inputs.pr-number }}" > pr-number.txt
- name: Download Bun
uses: actions/download-artifact@v4
with:
name: bun-${{ inputs.tag }}
path: bun
github-token: ${{ github.token }}
run-id: ${{ inputs.run-id || github.run_id }}
- name: Download pnpm
uses: pnpm/action-setup@v4
with:
version: 8
- if: ${{ runner.os != 'Windows' }}
name: Setup Bun
shell: bash
run: |
unzip bun/bun-*.zip
cd bun-*
pwd >> $GITHUB_PATH
- if: ${{ runner.os == 'Windows' }}
name: Setup Cygwin
uses: secondlife/setup-cygwin@v3
with:
packages: bash
- if: ${{ runner.os == 'Windows' }}
name: Setup Bun (Windows)
run: |
unzip bun/bun-*.zip
cd bun-*
pwd >> $env:GITHUB_PATH
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: 20
- name: Install Dependencies
timeout-minutes: 5
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
bun install
- name: Install Dependencies (test)
timeout-minutes: 5
run: |
bun install --cwd test
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Install Dependencies (runner)
timeout-minutes: 5
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
bun install --cwd packages/bun-internal-test
- name: Run Tests
id: test
timeout-minutes: 90
shell: bash
env:
IS_BUN_CI: 1
TMPDIR: ${{ runner.temp }}
BUN_TAG: ${{ inputs.tag }}
BUN_FEATURE_FLAG_INTERNAL_FOR_TESTING: "true"
SMTP_SENDGRID_SENDER: ${{ secrets.SMTP_SENDGRID_SENDER }}
TLS_MONGODB_DATABASE_URL: ${{ secrets.TLS_MONGODB_DATABASE_URL }}
TLS_POSTGRES_DATABASE_URL: ${{ secrets.TLS_POSTGRES_DATABASE_URL }}
TEST_INFO_STRIPE: ${{ secrets.TEST_INFO_STRIPE }}
TEST_INFO_AZURE_SERVICE_BUS: ${{ secrets.TEST_INFO_AZURE_SERVICE_BUS }}
SHELLOPTS: igncr
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
node packages/bun-internal-test/src/runner.node.mjs $(which bun)
- if: ${{ always() }}
name: Upload Results
uses: actions/upload-artifact@v4
with:
name: bun-${{ inputs.tag }}-tests
path: |
test-report.*
comment.md
pr-number.txt
if-no-files-found: error
overwrite: true
- if: ${{ always() && steps.test.outputs.failing_tests != '' && github.event.pull_request && github.repository_owner == 'oven-sh' }}
name: Send Message
uses: sarisia/actions-status-discord@v1
with:
webhook: ${{ secrets.DISCORD_WEBHOOK }}
nodetail: true
color: "#FF0000"
title: ""
description: |
### ❌ [${{ github.event.pull_request.title }}](${{ github.event.pull_request.html_url }})
@${{ github.actor }}, there are ${{ steps.test.outputs.failing_tests_count || 'some' }} failing tests on bun-${{ inputs.tag }}.
${{ steps.test.outputs.failing_tests }}
**[View logs](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})**
- name: Fail
if: ${{ failure() || always() && steps.test.outputs.failing_tests != '' }}
run: |
echo "There are ${{ steps.test.outputs.failing_tests_count || 'some' }} failing tests on bun-${{ inputs.tag }}."
exit 1
test-node:
name: Node.js Tests
# TODO: enable when we start paying attention to the results. In the meantime, this causes CI to queue jobs wasting developer time.
if: 0
runs-on: ${{ inputs.runs-on }}
steps:
- if: ${{ runner.os == 'Windows' }}
name: Setup Git
run: |
git config --global core.autocrlf false
git config --global core.eol lf
- name: Checkout
uses: actions/checkout@v4
with:
sparse-checkout: |
test/node.js
- name: Setup Environment
shell: bash
run: |
echo "${{ inputs.pr-number }}" > pr-number.txt
- name: Download Bun
uses: actions/download-artifact@v4
with:
name: bun-${{ inputs.tag }}
path: bun
github-token: ${{ github.token }}
run-id: ${{ inputs.run-id || github.run_id }}
- if: ${{ runner.os != 'Windows' }}
name: Setup Bun
shell: bash
run: |
unzip bun/bun-*.zip
cd bun-*
pwd >> $GITHUB_PATH
- if: ${{ runner.os == 'Windows' }}
name: Setup Cygwin
uses: secondlife/setup-cygwin@v3
with:
packages: bash
- if: ${{ runner.os == 'Windows' }}
name: Setup Bun (Windows)
run: |
unzip bun/bun-*.zip
cd bun-*
pwd >> $env:GITHUB_PATH
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: 20
- name: Checkout Tests
shell: bash
working-directory: test/node.js
run: |
node runner.mjs --pull
- name: Install Dependencies
timeout-minutes: 5
shell: bash
working-directory: test/node.js
run: |
bun install
- name: Run Tests
timeout-minutes: 10 # Increase when more tests are added
shell: bash
working-directory: test/node.js
env:
TMPDIR: ${{ runner.temp }}
BUN_GARBAGE_COLLECTOR_LEVEL: "0"
BUN_FEATURE_FLAG_INTERNAL_FOR_TESTING: "true"
run: |
node runner.mjs
- name: Upload Results
uses: actions/upload-artifact@v4
with:
name: bun-${{ inputs.tag }}-node-tests
path: |
test/node.js/summary/*.json
if-no-files-found: error
overwrite: true

View File

@@ -1,30 +0,0 @@
name: Close inactive issues
on:
# schedule:
# - cron: "15 * * * *"
workflow_dispatch:
jobs:
close-issues:
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
steps:
- uses: actions/stale@v5
with:
days-before-issue-close: 5
any-of-issue-labels: "needs repro,waiting-for-author"
exempt-issue-labels: "neverstale"
exempt-pr-labels: "neverstale"
remove-stale-when-updated: true
stale-issue-label: "stale"
stale-pr-label: "stale"
stale-issue-message: "This issue is stale and may be closed due to inactivity. If you're still running into this, please leave a comment."
close-issue-message: "This issue was closed because it has been inactive for 5 days since being marked as stale."
days-before-pr-stale: 30
days-before-pr-close: 14
stale-pr-message: "This pull request is stale and may be closed due to inactivity."
close-pr-message: "This pull request has been closed due to inactivity."
repo-token: ${{ github.token }}
operations-per-run: 1000

94
.github/workflows/upload.yml vendored Normal file
View File

@@ -0,0 +1,94 @@
name: Upload Artifacts
run-name: Canary release ${{github.sha}} upload
permissions:
contents: write
on:
workflow_run:
workflows:
- CI
types:
- completed
branches:
- main
jobs:
upload:
if: ${{ github.repository_owner == 'oven-sh' }}
name: Upload Artifacts
runs-on: ubuntu-latest
steps:
- name: Download Artifacts
uses: actions/download-artifact@v4
with:
path: bun
pattern: bun-*
merge-multiple: true
github-token: ${{ github.token }}
run-id: ${{ github.event.workflow_run.id }}
- name: Check for Artifacts
run: |
if [ ! -d "bun" ] || [ -z "$(ls -A bun)" ]; then
echo "Error: No artifacts were downloaded or 'bun' directory does not exist."
exit 1 # Fail the job if the condition is met
else
echo "Artifacts downloaded successfully."
fi
- name: Upload to GitHub Releases
uses: ncipollo/release-action@v1
with:
tag: canary
name: Canary (${{ github.sha }})
prerelease: true
body: This canary release of Bun corresponds to the commit [${{ github.sha }}]
allowUpdates: true
replacesArtifacts: true
generateReleaseNotes: true
artifactErrorsFailBuild: true
artifacts: bun/**/bun-*.zip
token: ${{ github.token }}
- name: Upload to S3 (using SHA)
uses: shallwefootball/s3-upload-action@4350529f410221787ccf424e50133cbc1b52704e
with:
endpoint: ${{ secrets.AWS_ENDPOINT }}
aws_key_id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws_secret_access_key: ${{ secrets.AWS_SECRET_ACCESS_KEY}}
aws_bucket: ${{ secrets.AWS_BUCKET }}
source_dir: bun
destination_dir: releases/${{ github.event.workflow_run.head_sha || github.sha }}-canary
- name: Upload to S3 (using tag)
uses: shallwefootball/s3-upload-action@4350529f410221787ccf424e50133cbc1b52704e
with:
endpoint: ${{ secrets.AWS_ENDPOINT }}
aws_key_id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws_secret_access_key: ${{ secrets.AWS_SECRET_ACCESS_KEY}}
aws_bucket: ${{ secrets.AWS_BUCKET }}
source_dir: bun
destination_dir: releases/canary
- name: Announce on Discord
uses: sarisia/actions-status-discord@v1
with:
webhook: ${{ secrets.BUN_DISCORD_GITHUB_CHANNEL_WEBHOOK }}
nodetail: true
color: "#1F6FEB"
title: "New Bun Canary available"
url: https://github.com/oven-sh/bun/commit/${{ github.sha }}
description: |
A new canary build of Bun has been automatically uploaded. To upgrade, run:
```sh
bun upgrade --canary
# bun upgrade --stable <- to downgrade
```
# If notifying sentry fails, don't fail the rest of the build.
- name: Notify Sentry
uses: getsentry/action-release@v1.7.0
env:
SENTRY_AUTH_TOKEN: ${{ secrets.SENTRY_AUTH_TOKEN }}
SENTRY_ORG: ${{ secrets.SENTRY_ORG }}
SENTRY_PROJECT: ${{ secrets.SENTRY_PROJECT }}
with:
ignore_missing: true
ignore_empty: true
version: ${{ github.event.workflow_run.head_sha || github.sha }}-canary
environment: canary

12
.gitmodules vendored
View File

@@ -76,13 +76,17 @@ ignore = dirty
depth = 1
shallow = true
fetchRecurseSubmodules = false
[submodule "src/deps/libuv"]
path = src/deps/libuv
url = https://github.com/libuv/libuv.git
ignore = dirty
depth = 1
shallow = true
fetchRecurseSubmodules = false
branch = v1.48.0
[submodule "zig"]
path = src/deps/zig
url = https://github.com/oven-sh/zig
depth = 1
shallow = true
fetchRecurseSubmodules = false
[submodule "src/deps/libdeflate"]
path = src/deps/libdeflate
url = https://github.com/ebiggers/libdeflate
ignore = "dirty"

View File

@@ -1,4 +0,0 @@
command script import src/deps/zig/tools/lldb_pretty_printers.py
command script import src/bun.js/WebKit/Tools/lldb/lldb_webkit.py
# type summary add --summary-string "${var} | inner=${var[0-30]}, source=${var[33-64]}, tag=${var[31-32]}" "unsigned long"

125
.vscode/launch.json generated vendored
View File

@@ -17,8 +17,6 @@
"cwd": "${workspaceFolder}/test",
"env": {
"FORCE_COLOR": "1",
"BUN_DEBUG_QUIET_LOGS": "1",
"BUN_DEBUG_jest": "1",
"BUN_GARBAGE_COLLECTOR_LEVEL": "1",
},
"console": "internalConsole",
@@ -34,6 +32,7 @@
"FORCE_COLOR": "1",
"BUN_DEBUG_QUIET_LOGS": "1",
"BUN_GARBAGE_COLLECTOR_LEVEL": "1",
"BUN_DEBUG_FileReader": "1",
"BUN_DEBUG_jest": "1",
},
"console": "internalConsole",
@@ -54,7 +53,6 @@
"env": {
"FORCE_COLOR": "1",
"BUN_DEBUG_QUIET_LOGS": "1",
"BUN_DEBUG_jest": "1",
"BUN_GARBAGE_COLLECTOR_LEVEL": "0",
},
"console": "internalConsole",
@@ -69,7 +67,6 @@
"env": {
"FORCE_COLOR": "1",
"BUN_DEBUG_QUIET_LOGS": "0",
"BUN_DEBUG_jest": "1",
"BUN_GARBAGE_COLLECTOR_LEVEL": "2",
},
"console": "internalConsole",
@@ -84,7 +81,6 @@
"env": {
"FORCE_COLOR": "1",
"BUN_DEBUG_QUIET_LOGS": "1",
"BUN_DEBUG_jest": "1",
"BUN_GARBAGE_COLLECTOR_LEVEL": "2",
},
"console": "internalConsole",
@@ -99,7 +95,6 @@
"env": {
"FORCE_COLOR": "1",
"BUN_DEBUG_QUIET_LOGS": "1",
"BUN_DEBUG_jest": "1",
"BUN_GARBAGE_COLLECTOR_LEVEL": "2",
},
"console": "internalConsole",
@@ -114,7 +109,6 @@
"env": {
"FORCE_COLOR": "1",
"BUN_DEBUG_QUIET_LOGS": "1",
"BUN_DEBUG_jest": "1",
"BUN_GARBAGE_COLLECTOR_LEVEL": "2",
"BUN_INSPECT": "ws://localhost:0/?wait=1",
},
@@ -135,7 +129,6 @@
"env": {
"FORCE_COLOR": "1",
"BUN_DEBUG_QUIET_LOGS": "1",
"BUN_DEBUG_jest": "1",
"BUN_GARBAGE_COLLECTOR_LEVEL": "2",
"BUN_INSPECT": "ws://localhost:0/?break=1",
},
@@ -152,12 +145,14 @@
"request": "launch",
"name": "bun run [file]",
"program": "${workspaceFolder}/build/bun-debug",
"args": ["run", "${fileBasename}"],
"args": ["run", "${file}"],
"cwd": "${fileDirname}",
"env": {
"FORCE_COLOR": "0",
"BUN_DEBUG_QUIET_LOGS": "1",
"BUN_DEBUG_EventLoop": "1",
"BUN_GARBAGE_COLLECTOR_LEVEL": "2",
"BUN_DEBUG_ALL": "1",
},
"console": "internalConsole",
},
@@ -268,7 +263,6 @@
"env": {
"FORCE_COLOR": "1",
"BUN_DEBUG_QUIET_LOGS": "1",
"BUN_DEBUG_jest": "1",
"BUN_GARBAGE_COLLECTOR_LEVEL": "2",
},
"console": "internalConsole",
@@ -283,7 +277,6 @@
"env": {
"FORCE_COLOR": "1",
"BUN_DEBUG_QUIET_LOGS": "1",
"BUN_DEBUG_jest": "1",
"BUN_GARBAGE_COLLECTOR_LEVEL": "0",
},
"console": "internalConsole",
@@ -297,8 +290,7 @@
"cwd": "${workspaceFolder}/test",
"env": {
"FORCE_COLOR": "1",
"BUN_DEBUG_QUIET_LOGS": "1",
"BUN_DEBUG_jest": "1",
"BUN_DEBUG_QUIET_LOGS": "0",
"BUN_GARBAGE_COLLECTOR_LEVEL": "2",
},
"console": "internalConsole",
@@ -313,7 +305,6 @@
"env": {
"FORCE_COLOR": "1",
"BUN_DEBUG_QUIET_LOGS": "1",
"BUN_DEBUG_jest": "1",
"BUN_GARBAGE_COLLECTOR_LEVEL": "2",
},
"console": "internalConsole",
@@ -328,7 +319,6 @@
"env": {
"FORCE_COLOR": "1",
"BUN_DEBUG_QUIET_LOGS": "1",
"BUN_DEBUG_jest": "1",
"BUN_GARBAGE_COLLECTOR_LEVEL": "2",
},
"console": "internalConsole",
@@ -343,7 +333,6 @@
"env": {
"FORCE_COLOR": "1",
"BUN_DEBUG_QUIET_LOGS": "1",
"BUN_DEBUG_jest": "1",
"BUN_GARBAGE_COLLECTOR_LEVEL": "2",
"BUN_INSPECT": "ws://localhost:0/?wait=1",
},
@@ -364,7 +353,6 @@
"env": {
"FORCE_COLOR": "1",
"BUN_DEBUG_QUIET_LOGS": "1",
"BUN_DEBUG_jest": "1",
"BUN_GARBAGE_COLLECTOR_LEVEL": "2",
"BUN_INSPECT": "ws://localhost:0/?break=1",
},
@@ -460,11 +448,6 @@
"program": "node",
"args": ["test/runner.node.mjs"],
"cwd": "${workspaceFolder}",
"env": {
"FORCE_COLOR": "1",
"BUN_DEBUG_QUIET_LOGS": "1",
"BUN_GARBAGE_COLLECTOR_LEVEL": "2",
},
"console": "internalConsole",
},
// Windows: bun test [file]
@@ -491,6 +474,7 @@
"name": "BUN_DEBUG_jest",
"value": "1",
},
{
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
"value": "1",
@@ -517,7 +501,19 @@
"value": "1",
},
{
"name": "BUN_DEBUG_jest",
"name": "BUN_DEBUG_EventLoop",
"value": "1",
},
{
"name": "BUN_DEBUG_uv",
"value": "1",
},
{
"name": "BUN_DEBUG_SYS",
"value": "1",
},
{
"name": "BUN_DEBUG_PipeWriter",
"value": "1",
},
{
@@ -545,10 +541,6 @@
"name": "BUN_DEBUG_QUIET_LOGS",
"value": "1",
},
{
"name": "BUN_DEBUG_jest",
"value": "1",
},
{
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
"value": "0",
@@ -574,10 +566,6 @@
"name": "BUN_DEBUG_QUIET_LOGS",
"value": "0",
},
{
"name": "BUN_DEBUG_jest",
"value": "1",
},
{
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
"value": "2",
@@ -603,10 +591,6 @@
"name": "BUN_DEBUG_QUIET_LOGS",
"value": "1",
},
{
"name": "BUN_DEBUG_jest",
"value": "1",
},
{
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
"value": "2",
@@ -641,10 +625,6 @@
"name": "BUN_DEBUG_QUIET_LOGS",
"value": "1",
},
{
"name": "BUN_DEBUG_jest",
"value": "1",
},
{
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
"value": "2",
@@ -680,10 +660,6 @@
"name": "BUN_DEBUG_QUIET_LOGS",
"value": "1",
},
{
"name": "BUN_DEBUG_jest",
"value": "1",
},
{
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
"value": "2",
@@ -705,10 +681,7 @@
"name": "FORCE_COLOR",
"value": "1",
},
{
"name": "BUN_DEBUG_QUIET_LOGS",
"value": "1",
},
{
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
"value": "0",
@@ -732,7 +705,7 @@
},
{
"name": "BUN_DEBUG_QUIET_LOGS",
"value": "1",
"value": "0",
},
{
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
@@ -828,10 +801,6 @@
"name": "BUN_DEBUG_QUIET_LOGS",
"value": "1",
},
{
"name": "BUN_DEBUG_jest",
"value": "1",
},
{
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
"value": "2",
@@ -857,10 +826,6 @@
"name": "BUN_DEBUG_QUIET_LOGS",
"value": "1",
},
{
"name": "BUN_DEBUG_jest",
"value": "1",
},
{
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
"value": "0",
@@ -886,10 +851,6 @@
"name": "BUN_DEBUG_QUIET_LOGS",
"value": "0",
},
{
"name": "BUN_DEBUG_jest",
"value": "1",
},
{
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
"value": "2",
@@ -915,10 +876,6 @@
"name": "BUN_DEBUG_QUIET_LOGS",
"value": "1",
},
{
"name": "BUN_DEBUG_jest",
"value": "1",
},
{
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
"value": "2",
@@ -944,10 +901,6 @@
"name": "BUN_DEBUG_QUIET_LOGS",
"value": "1",
},
{
"name": "BUN_DEBUG_jest",
"value": "1",
},
{
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
"value": "2",
@@ -973,10 +926,6 @@
"name": "BUN_DEBUG_QUIET_LOGS",
"value": "1",
},
{
"name": "BUN_DEBUG_jest",
"value": "1",
},
{
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
"value": "2",
@@ -1011,10 +960,6 @@
"name": "BUN_DEBUG_QUIET_LOGS",
"value": "1",
},
{
"name": "BUN_DEBUG_jest",
"value": "1",
},
{
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
"value": "2",
@@ -1101,10 +1046,6 @@
"name": "BUN_DEBUG_QUIET_LOGS",
"value": "1",
},
{
"name": "BUN_DEBUG_jest",
"value": "1",
},
{
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
"value": "0",
@@ -1128,11 +1069,7 @@
},
{
"name": "BUN_DEBUG_QUIET_LOGS",
"value": "1",
},
{
"name": "BUN_DEBUG_jest",
"value": "1",
"value": "0",
},
{
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
@@ -1159,24 +1096,6 @@
"program": "node",
"args": ["test/runner.node.mjs"],
"cwd": "${workspaceFolder}",
"environment": [
{
"name": "FORCE_COLOR",
"value": "1",
},
{
"name": "BUN_DEBUG_QUIET_LOGS",
"value": "1",
},
{
"name": "BUN_DEBUG_jest",
"value": "1",
},
{
"name": "BUN_GARBAGE_COLLECTOR_LEVEL",
"value": "2",
},
],
"console": "internalConsole",
},
],

View File

@@ -15,9 +15,6 @@
"src/bun.js/WebKit": true,
"src/deps/*/**": true,
"test/node.js/upstream": true,
// This will fill up your whole search history.
"test/js/node/test/fixtures": true,
"test/js/node/test/common": true,
},
"search.followSymlinks": false,
"search.useIgnoreFiles": true,
@@ -45,11 +42,8 @@
"editor.defaultFormatter": "ziglang.vscode-zig",
},
// lldb
"lldb.launch.initCommands": ["command source ${workspaceFolder}/.lldbinit"],
"lldb.verboseLogging": false,
// C++
"lldb.verboseLogging": false,
"cmake.configureOnOpen": false,
"C_Cpp.errorSquiggles": "enabled",
"[cpp]": {
@@ -138,7 +132,6 @@
},
"files.associations": {
"*.idl": "cpp",
"array": "cpp",
},
"C_Cpp.files.exclude": {
"**/.vscode": true,

View File

@@ -3,8 +3,8 @@ cmake_policy(SET CMP0091 NEW)
cmake_policy(SET CMP0067 NEW)
set(CMAKE_POLICY_DEFAULT_CMP0069 NEW)
set(Bun_VERSION "1.1.27")
set(WEBKIT_TAG 147ed53838e21525677492c27099567a6cd19c6b)
set(Bun_VERSION "1.1.21")
set(WEBKIT_TAG 49907bff8781719bc2ded068b0c934f6d0074d1e)
set(BUN_WORKDIR "${CMAKE_CURRENT_BINARY_DIR}")
message(STATUS "Configuring Bun ${Bun_VERSION} in ${BUN_WORKDIR}")
@@ -15,24 +15,8 @@ set(CMAKE_C_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_C_STANDARD_REQUIRED ON)
option(ZIG_CACHE_DIR "Path to the Zig cache directory" "")
if(NOT ZIG_CACHE_DIR)
SET(ZIG_CACHE_DIR "${BUN_WORKDIR}")
cmake_path(APPEND ZIG_CACHE_DIR "zig-cache")
endif()
set(LOCAL_ZIG_CACHE_DIR "${ZIG_CACHE_DIR}")
set(GLOBAL_ZIG_CACHE_DIR "${ZIG_CACHE_DIR}")
cmake_path(APPEND LOCAL_ZIG_CACHE_DIR "local")
cmake_path(APPEND GLOBAL_ZIG_CACHE_DIR "global")
# Used in process.version, process.versions.node, napi, and elsewhere
set(REPORTED_NODEJS_VERSION "22.6.0")
# Used in process.versions.modules and compared while loading V8 modules
set(REPORTED_NODEJS_ABI_VERSION "127")
set(REPORTED_NODEJS_VERSION "22.3.0")
# WebKit uses -std=gnu++20 on non-macOS non-Windows
# If we do not set this, it will crash at startup on the first memory allocation.
@@ -326,7 +310,6 @@ endif()
# -- Build Flags --
option(USE_STATIC_SQLITE "Statically link SQLite?" ${DEFAULT_ON_UNLESS_APPLE})
option(USE_CUSTOM_ZLIB "Use Bun's recommended version of zlib" ON)
option(USE_CUSTOM_LIBDEFLATE "Use Bun's recommended version of libdeflate" ON)
option(USE_CUSTOM_BORINGSSL "Use Bun's recommended version of BoringSSL" ON)
option(USE_CUSTOM_LIBARCHIVE "Use Bun's recommended version of libarchive" ON)
option(USE_CUSTOM_MIMALLOC "Use Bun's recommended version of Mimalloc" ON)
@@ -350,7 +333,7 @@ option(USE_LTO "Enable Link-Time Optimization" ${DEFAULT_LTO})
if(APPLE AND USE_LTO)
set(USE_LTO OFF)
message(FATAL_ERROR "Link-Time Optimization is not supported on macOS because it requires -fuse-ld=lld and lld causes many segfaults on macOS (likely related to stack size)")
message(WARNING "Link-Time Optimization is not supported on macOS because it requires -fuse-ld=lld and lld causes many segfaults on macOS (likely related to stack size)")
endif()
if(WIN32 AND USE_LTO)
@@ -479,8 +462,6 @@ elseif(NOT BUN_CPP_ONLY AND NOT BUN_LINK_ONLY AND NOT BUN_TIDY_ONLY AND NOT BUN_
message(STATUS "Installed Zig Compiler: ${ZIG_COMPILER}")
set_property(DIRECTORY APPEND PROPERTY CMAKE_CONFIGURE_DEPENDS "build.zig")
message(STATUS "Using zig cache directory: ${ZIG_CACHE_DIR}")
endif()
# Bun
@@ -505,7 +486,7 @@ if(USE_UNIFIED_SOURCES)
endif()
# CCache
# find_program(CCACHE_PROGRAM sccache)
find_program(CCACHE_PROGRAM sccache)
find_program(CCACHE_PROGRAM ccache)
if(CCACHE_PROGRAM)
@@ -667,7 +648,6 @@ file(GLOB BUN_CPP ${CONFIGURE_DEPENDS}
"${BUN_SRC}/bun.js/bindings/sqlite/*.cpp"
"${BUN_SRC}/bun.js/bindings/webcrypto/*.cpp"
"${BUN_SRC}/bun.js/bindings/webcrypto/*/*.cpp"
"${BUN_SRC}/bun.js/bindings/v8/*.cpp"
"${BUN_SRC}/deps/picohttpparser/picohttpparser.c"
)
list(APPEND BUN_RAW_SOURCES ${BUN_CPP})
@@ -708,32 +688,6 @@ add_custom_command(
)
list(APPEND BUN_RAW_SOURCES "${BUN_WORKDIR}/codegen/ZigGeneratedClasses.cpp")
if(NOT NO_CODEGEN)
# --- ErrorCode Generator ---
file(GLOB NODE_ERRORS_TS ${CONFIGURE_DEPENDS}
"${BUN_SRC}/bun.js/bindings/ErrorCode.ts"
)
add_custom_command(
OUTPUT "${BUN_WORKDIR}/codegen/ErrorCode+List.h" "${BUN_WORKDIR}/codegen/ErrorCode+Data.h" "${BUN_WORKDIR}/codegen/ErrorCode.zig"
COMMAND ${BUN_EXECUTABLE} run "${BUN_CODEGEN_SRC}/generate-node-errors.ts" "${BUN_WORKDIR}/codegen"
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
MAIN_DEPENDENCY "${BUN_CODEGEN_SRC}/generate-node-errors.ts"
DEPENDS ${NODE_ERRORS_TS}
VERBATIM
COMMENT "Generating ErrorCode.zig"
)
# This needs something to force it to be regenerated
WEBKIT_ADD_SOURCE_DEPENDENCIES(
"${BUN_SRC}/bun.js/bindings/ErrorCode.cpp"
"${BUN_WORKDIR}/codegen/ErrorCode+List.h"
)
WEBKIT_ADD_SOURCE_DEPENDENCIES(
"${BUN_SRC}/bun.js/bindings/ErrorCode.h"
"${BUN_WORKDIR}/codegen/ErrorCode+Data.h"
)
endif()
# --- JSSink Generator ---
add_custom_command(
OUTPUT "${BUN_WORKDIR}/codegen/JSSink.cpp"
@@ -807,7 +761,7 @@ if(NOT NO_CODEGEN)
OUTPUT ${BUN_IDENTIFIER_CACHE_OUT}
MAIN_DEPENDENCY "${BUN_SRC}/js_lexer/identifier_data.zig"
DEPENDS "${BUN_SRC}/js_lexer/identifier_cache.zig"
COMMAND ${ZIG_COMPILER} run "--zig-lib-dir" "${ZIG_LIB_DIR}" "--cache-dir" "${LOCAL_ZIG_CACHE_DIR}" "--global-cache-dir" "${GLOBAL_ZIG_CACHE_DIR}" "${BUN_SRC}/js_lexer/identifier_data.zig"
COMMAND ${ZIG_COMPILER} run "--zig-lib-dir" "${ZIG_LIB_DIR}" "${BUN_SRC}/js_lexer/identifier_data.zig"
VERBATIM
COMMENT "Building Identifier Cache"
)
@@ -828,7 +782,6 @@ if(NOT NO_CODEGEN)
"${BUN_SRC}/js/thirdparty/*.ts"
"${BUN_SRC}/js/internal/*.js"
"${BUN_SRC}/js/internal/*.ts"
"${BUN_SRC}/js/internal/cluster/*.ts"
"${BUN_SRC}/js/internal/util/*.js"
"${BUN_SRC}/js/internal/fs/*.ts"
"${BUN_SRC}/js/node/*.js"
@@ -957,13 +910,10 @@ if(NOT BUN_LINK_ONLY AND NOT BUN_CPP_ONLY)
"-Denable_logs=${ENABLE_LOGS}"
"-Dreported_nodejs_version=${REPORTED_NODEJS_VERSION}"
"-Dobj_format=${BUN_ZIG_OBJ_FORMAT}"
"--cache-dir" "${LOCAL_ZIG_CACHE_DIR}"
"--global-cache-dir" "${GLOBAL_ZIG_CACHE_DIR}"
DEPENDS
"${CMAKE_CURRENT_SOURCE_DIR}/build.zig"
"${ZIG_FILES}"
"${BUN_WORKDIR}/codegen/ZigGeneratedClasses.zig"
"${BUN_WORKDIR}/codegen/ErrorCode.zig"
"${BUN_WORKDIR}/codegen/ResolvedSourceTag.zig"
"${BUN_IDENTIFIER_CACHE_OUT}"
"${BUN_SRC}/api/schema.zig"
@@ -1035,6 +985,7 @@ add_compile_definitions(
"LIBUS_USE_BORINGSSL=1"
"WITH_BORINGSSL=1"
"STATICALLY_LINKED_WITH_JavaScriptCore=1"
"STATICALLY_LINKED_WITH_WTF=1"
"STATICALLY_LINKED_WITH_BMALLOC=1"
"BUILDING_WITH_CMAKE=1"
"JSC_OBJC_API_ENABLED=0"
@@ -1045,24 +996,11 @@ add_compile_definitions(
"BUILDING_JSCONLY__"
"BUN_DYNAMIC_JS_LOAD_PATH=\"${BUN_WORKDIR}/js\""
"REPORTED_NODEJS_VERSION=\"${REPORTED_NODEJS_VERSION}\""
"REPORTED_NODEJS_ABI_VERSION=${REPORTED_NODEJS_ABI_VERSION}"
)
if(NOT ASSERT_ENABLED)
if(APPLE)
add_compile_definitions("_LIBCXX_ENABLE_ASSERTIONS=0")
add_compile_definitions("_LIBCPP_HARDENING_MODE=_LIBCPP_HARDENING_MODE_NONE")
endif()
add_compile_definitions("NDEBUG=1")
else()
if(APPLE)
add_compile_definitions("_LIBCXX_ENABLE_ASSERTIONS=1")
add_compile_definitions("_LIBCPP_HARDENING_MODE=_LIBCPP_HARDENING_MODE_DEBUG")
elseif(CMAKE_SYSTEM_NAME STREQUAL "Linux")
add_compile_definitions("_GLIBCXX_ASSERTIONS=1")
endif()
add_compile_definitions("ASSERT_ENABLED=1")
endif()
@@ -1132,25 +1070,12 @@ if(CMAKE_BUILD_TYPE STREQUAL "Debug")
-Werror=uninitialized
-Werror=conditional-uninitialized
-Werror=suspicious-memaccess
-Werror=int-conversion
-Werror=nonnull
-Werror=move
-Werror=sometimes-uninitialized
-Werror=unused
-Wno-unused-function
-Wno-nullability-completeness
-Werror
-fsanitize=null
-fsanitize-recover=all
-fsanitize=bounds
-fsanitize=return
-fsanitize=nullability-arg
-fsanitize=nullability-assign
-fsanitize=nullability-return
-fsanitize=returns-nonnull-attribute
-fsanitize=unreachable
)
target_link_libraries(${bun} PRIVATE -fsanitize=null)
else()
target_compile_options(${bun} PUBLIC /Od /Z7)
endif()
@@ -1172,11 +1097,8 @@ elseif(CMAKE_BUILD_TYPE STREQUAL "Release")
-Werror=uninitialized
-Werror=conditional-uninitialized
-Werror=suspicious-memaccess
-Werror=int-conversion
-Werror=nonnull
-Werror=move
-Werror=sometimes-uninitialized
-Wno-nullability-completeness
-Werror
)
else()
@@ -1273,9 +1195,6 @@ else()
-fno-pic
-fno-pie
-faddrsig
-ffile-prefix-map="${CMAKE_CURRENT_SOURCE_DIR}"=.
-ffile-prefix-map="${BUN_DEPS_DIR}"=src/deps
-ffile-prefix-map="${BUN_DEPS_OUT_DIR}"=src/deps
)
endif()
@@ -1291,7 +1210,7 @@ endif()
if(UNIX AND NOT APPLE)
target_link_options(${bun} PUBLIC
-fuse-ld=lld-${LLVM_VERSION}
-fuse-ld=lld
-fno-pic
-static-libstdc++
-static-libgcc
@@ -1439,19 +1358,6 @@ else()
target_link_libraries(${bun} PRIVATE LibArchive::LibArchive)
endif()
if(USE_CUSTOM_LIBDEFLATE)
include_directories(${BUN_DEPS_DIR}/libdeflate)
if(WIN32)
target_link_libraries(${bun} PRIVATE "${BUN_DEPS_OUT_DIR}/deflate.lib")
else()
target_link_libraries(${bun} PRIVATE "${BUN_DEPS_OUT_DIR}/libdeflate.a")
endif()
else()
find_package(LibDeflate REQUIRED)
target_link_libraries(${bun} PRIVATE LibDeflate::LibDeflate)
endif()
if(USE_CUSTOM_MIMALLOC)
include_directories(${BUN_DEPS_DIR}/mimalloc/include)
@@ -1598,10 +1504,7 @@ endif()
if(NOT WIN32)
target_link_libraries(${bun} PRIVATE "${WEBKIT_LIB_DIR}/libWTF.a")
target_link_libraries(${bun} PRIVATE "${WEBKIT_LIB_DIR}/libJavaScriptCore.a")
if(NOT APPLE OR EXISTS "${WEBKIT_LIB_DIR}/libbmalloc.a")
target_link_libraries(${bun} PRIVATE "${WEBKIT_LIB_DIR}/libbmalloc.a")
endif()
target_link_libraries(${bun} PRIVATE "${WEBKIT_LIB_DIR}/libbmalloc.a")
else()
target_link_libraries(${bun} PRIVATE
"${WEBKIT_LIB_DIR}/WTF.lib"
@@ -1633,14 +1536,12 @@ endif()
if(BUN_TIDY_ONLY)
find_program(CLANG_TIDY_EXE NAMES "clang-tidy")
# webkit ones are disabled disabled because it's noisy, e.g. for JavaScriptCore/Lookup.h
set(CLANG_TIDY_COMMAND "${CLANG_TIDY_EXE}" "-checks=-*,clang-analyzer-*,-clang-analyzer-webkit.UncountedLambdaCapturesChecker,-clang-analyzer-optin.core.EnumCastOutOfRange,-clang-analyzer-webkit.RefCntblBaseVirtualDtor" "--fix" "--fix-errors" "--format-style=webkit" "--warnings-as-errors=*")
set(CLANG_TIDY_COMMAND "${CLANG_TIDY_EXE}" "-checks=-*,clang-analyzer-*,-clang-analyzer-webkit.UncountedLambdaCapturesChecker" "--fix" "--fix-errors" "--format-style=webkit" "--warnings-as-errors=*")
set_target_properties(${bun} PROPERTIES CXX_CLANG_TIDY "${CLANG_TIDY_COMMAND}")
endif()
if(BUN_TIDY_ONLY_EXTRA)
find_program(CLANG_TIDY_EXE NAMES "clang-tidy")
set(CLANG_TIDY_COMMAND "${CLANG_TIDY_EXE}" "-checks=-*,clang-analyzer-*,performance-*,-clang-analyzer-webkit.UncountedLambdaCapturesChecker,-clang-analyzer-optin.core.EnumCastOutOfRange,-clang-analyzer-webkit.RefCntblBaseVirtualDtor" "--fix" "--fix-errors" "--format-style=webkit" "--warnings-as-errors=*")
set(CLANG_TIDY_COMMAND "${CLANG_TIDY_EXE}" "-checks=-*,clang-analyzer-*,performance-*,-clang-analyzer-webkit.UncountedLambdaCapturesChecker" "--fix" "--fix-errors" "--format-style=webkit" "--warnings-as-errors=*")
set_target_properties(${bun} PROPERTIES CXX_CLANG_TIDY "${CLANG_TIDY_COMMAND}")
endif()

View File

@@ -63,7 +63,7 @@ Bun requires LLVM 16 (`clang` is part of LLVM). This version requirement is to m
{% codetabs %}
```bash#macOS (Homebrew)
$ brew install llvm@18
$ brew install llvm@16
```
```bash#Ubuntu/Debian

View File

@@ -52,7 +52,7 @@ ENV CI 1
ENV CPU_TARGET=${CPU_TARGET}
ENV BUILDARCH=${BUILDARCH}
ENV BUN_DEPS_OUT_DIR=${BUN_DEPS_OUT_DIR}
ENV USE_LTO 1
ENV BUN_ENABLE_LTO 1
ENV LC_CTYPE=en_US.UTF-8
ENV LC_ALL=en_US.UTF-8
@@ -263,27 +263,6 @@ RUN --mount=type=cache,target=${CCACHE_DIR} \
&& bash ./scripts/build-zlib.sh && rm -rf src/deps/zlib scripts
FROM bun-base as libdeflate
ARG BUN_DIR
ARG CPU_TARGET
ENV CPU_TARGET=${CPU_TARGET}
ARG CCACHE_DIR=/ccache
ENV CCACHE_DIR=${CCACHE_DIR}
COPY Makefile ${BUN_DIR}/Makefile
COPY CMakeLists.txt ${BUN_DIR}/CMakeLists.txt
COPY scripts ${BUN_DIR}/scripts
COPY src/deps/libdeflate ${BUN_DIR}/src/deps/libdeflate
COPY package.json bun.lockb Makefile .gitmodules ${BUN_DIR}/
WORKDIR $BUN_DIR
RUN --mount=type=cache,target=${CCACHE_DIR} \
cd $BUN_DIR \
&& bash ./scripts/build-libdeflate.sh && rm -rf src/deps/libdeflate scripts
FROM bun-base as libarchive
ARG BUN_DIR
@@ -433,9 +412,6 @@ COPY src ${BUN_DIR}/src
COPY CMakeLists.txt ${BUN_DIR}/CMakeLists.txt
COPY src/deps/boringssl/include ${BUN_DIR}/src/deps/boringssl/include
# for uWebSockets
COPY src/deps/libdeflate ${BUN_DIR}/src/deps/libdeflate
ARG CCACHE_DIR=/ccache
ENV CCACHE_DIR=${CCACHE_DIR}
@@ -540,7 +516,6 @@ COPY src/symbols.dyn src/linker.lds ${BUN_DIR}/src/
COPY CMakeLists.txt ${BUN_DIR}/CMakeLists.txt
COPY --from=zlib ${BUN_DEPS_OUT_DIR}/* ${BUN_DEPS_OUT_DIR}/
COPY --from=libdeflate ${BUN_DEPS_OUT_DIR}/* ${BUN_DEPS_OUT_DIR}/
COPY --from=libarchive ${BUN_DEPS_OUT_DIR}/* ${BUN_DEPS_OUT_DIR}/
COPY --from=boringssl ${BUN_DEPS_OUT_DIR}/* ${BUN_DEPS_OUT_DIR}/
COPY --from=lolhtml ${BUN_DEPS_OUT_DIR}/* ${BUN_DEPS_OUT_DIR}/

2
LATEST
View File

@@ -1 +1 @@
1.1.26
1.1.20

View File

@@ -34,8 +34,6 @@ Bun statically links these libraries:
| [`c-ares`](https://github.com/c-ares/c-ares) | MIT licensed |
| [`libicu`](https://github.com/unicode-org/icu) 72 | [license here](https://github.com/unicode-org/icu/blob/main/icu4c/LICENSE) |
| [`libbase64`](https://github.com/aklomp/base64/blob/master/LICENSE) | BSD 2-Clause |
| [`libuv`](https://github.com/libuv/libuv) (on Windows) | MIT |
| [`libdeflate`](https://github.com/ebiggers/libdeflate) | MIT |
| A fork of [`uWebsockets`](https://github.com/jarred-sumner/uwebsockets) | Apache 2.0 licensed |
| Parts of [Tigerbeetle's IO code](https://github.com/tigerbeetle/tigerbeetle/blob/532c8b70b9142c17e07737ab6d3da68d7500cbca/src/io/windows.zig#L1) | Apache 2.0 licensed |

View File

@@ -366,7 +366,7 @@ ifeq ($(OS_NAME),linux)
endif
ifeq ($(OS_NAME),darwin)
MACOS_MIN_FLAG=-mmacos-version-min=$(MIN_MACOS_VERSION)
MACOS_MIN_FLAG=-mmacosx-version-min=$(MIN_MACOS_VERSION)
POSIX_PKG_MANAGER=brew
INCLUDE_DIRS += $(MAC_INCLUDE_DIRS)
endif
@@ -1309,7 +1309,6 @@ jsc-build-mac-compile-debug:
-DCMAKE_BUILD_TYPE=Debug \
-DUSE_THIN_ARCHIVES=OFF \
-DENABLE_FTL_JIT=ON \
-DENABLE_MALLOC_HEAP_BREAKDOWN=ON \
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON \
-DUSE_BUN_JSC_ADDITIONS=ON \
-DENABLE_BUN_SKIP_FAILING_ASSERTIONS=ON \

View File

@@ -24,6 +24,8 @@
## What is Bun?
> **Bun is under active development.** Use it to speed up your development workflows or run simpler production code in resource-constrained environments like serverless functions. We're working on more complete Node.js compatibility and integration with existing frameworks. Join the [Discord](https://bun.sh/discord) and watch the [GitHub repository](https://github.com/oven-sh/bun) to keep tabs on future releases.
Bun is an all-in-one toolkit for JavaScript and TypeScript apps. It ships as a single executable called `bun`.
At its core is the _Bun runtime_, a fast JavaScript runtime designed as a drop-in replacement for Node.js. It's written in Zig and powered by JavaScriptCore under the hood, dramatically reducing startup times and memory usage.
@@ -85,19 +87,16 @@ bun upgrade --canary
## Quick links
- Intro
- [What is Bun?](https://bun.sh/docs/index)
- [Installation](https://bun.sh/docs/installation)
- [Quickstart](https://bun.sh/docs/quickstart)
- [TypeScript](https://bun.sh/docs/typescript)
- Templating
- [`bun init`](https://bun.sh/docs/cli/init)
- [`bun create`](https://bun.sh/docs/cli/bun-create)
- Runtime
- [`bun run`](https://bun.sh/docs/cli/run)
- [File types](https://bun.sh/docs/runtime/loaders)
- [TypeScript](https://bun.sh/docs/runtime/typescript)
@@ -116,7 +115,6 @@ bun upgrade --canary
- [Framework API](https://bun.sh/docs/runtime/framework)
- Package manager
- [`bun install`](https://bun.sh/docs/cli/install)
- [`bun add`](https://bun.sh/docs/cli/add)
- [`bun remove`](https://bun.sh/docs/cli/remove)
@@ -132,7 +130,6 @@ bun upgrade --canary
- [Overrides and resolutions](https://bun.sh/docs/install/overrides)
- Bundler
- [`Bun.build`](https://bun.sh/docs/bundler)
- [Loaders](https://bun.sh/docs/bundler/loaders)
- [Plugins](https://bun.sh/docs/bundler/plugins)
@@ -140,7 +137,6 @@ bun upgrade --canary
- [vs esbuild](https://bun.sh/docs/bundler/vs-esbuild)
- Test runner
- [`bun test`](https://bun.sh/docs/cli/test)
- [Writing tests](https://bun.sh/docs/test/writing)
- [Watch mode](https://bun.sh/docs/test/hot)
@@ -152,11 +148,9 @@ bun upgrade --canary
- [Code coverage](https://bun.sh/docs/test/coverage)
- Package runner
- [`bunx`](https://bun.sh/docs/cli/bunx)
- API
- [HTTP server](https://bun.sh/docs/api/http)
- [WebSockets](https://bun.sh/docs/api/websockets)
- [Workers](https://bun.sh/docs/api/workers)
@@ -189,10 +183,9 @@ bun upgrade --canary
- [Building Windows](https://bun.sh/docs/project/building-windows)
- [License](https://bun.sh/docs/project/licensing)
## Guides
- Binary
## Guides
- Binary
- [Convert a Blob to a DataView](https://bun.sh/guides/binary/blob-to-dataview)
- [Convert a Blob to a ReadableStream](https://bun.sh/guides/binary/blob-to-stream)
- [Convert a Blob to a string](https://bun.sh/guides/binary/blob-to-string)
@@ -216,8 +209,7 @@ bun upgrade --canary
- [Convert an ArrayBuffer to a Uint8Array](https://bun.sh/guides/binary/arraybuffer-to-typedarray)
- [Convert an ArrayBuffer to an array of numbers](https://bun.sh/guides/binary/arraybuffer-to-array)
- Ecosystem
- Ecosystem
- [Build a frontend using Vite and Bun](https://bun.sh/guides/ecosystem/vite)
- [Build an app with Astro and Bun](https://bun.sh/guides/ecosystem/astro)
- [Build an app with Next.js and Bun](https://bun.sh/guides/ecosystem/nextjs)
@@ -244,8 +236,7 @@ bun upgrade --canary
- [Use React and JSX](https://bun.sh/guides/ecosystem/react)
- [Add Sentry to a Bun app](https://bun.sh/guides/ecosystem/sentry)
- HTTP
- HTTP
- [Common HTTP server usage](https://bun.sh/guides/http/server)
- [Configure TLS on an HTTP server](https://bun.sh/guides/http/tls)
- [fetch with unix domain sockets in Bun](https://bun.sh/guides/http/fetch-unix)
@@ -259,8 +250,7 @@ bun upgrade --canary
- [Upload files via HTTP using FormData](https://bun.sh/guides/http/file-uploads)
- [Write a simple HTTP server](https://bun.sh/guides/http/simple)
- Install
- Install
- [Add a dependency](https://bun.sh/guides/install/add)
- [Add a development dependency](https://bun.sh/guides/install/add-dev)
- [Add a Git dependency](https://bun.sh/guides/install/add-git)
@@ -278,8 +268,7 @@ bun upgrade --canary
- [Using bun install with an Azure Artifacts npm registry](https://bun.sh/guides/install/azure-artifacts)
- [Using bun install with Artifactory](https://bun.sh/guides/install/jfrog-artifactory)
- Process
- Process
- [Get the process uptime in nanoseconds](https://bun.sh/guides/process/nanoseconds)
- [Listen for CTRL+C](https://bun.sh/guides/process/ctrl-c)
- [Listen to OS signals](https://bun.sh/guides/process/os-signals)
@@ -290,8 +279,7 @@ bun upgrade --canary
- [Spawn a child process](https://bun.sh/guides/process/spawn)
- [Spawn a child process and communicate using IPC](https://bun.sh/guides/process/ipc)
- Read file
- Read file
- [Check if a file exists](https://bun.sh/guides/read-file/exists)
- [Get the MIME type of a file](https://bun.sh/guides/read-file/mime)
- [Read a file as a ReadableStream](https://bun.sh/guides/read-file/stream)
@@ -302,8 +290,7 @@ bun upgrade --canary
- [Read a JSON file](https://bun.sh/guides/read-file/json)
- [Watch a directory for changes](https://bun.sh/guides/read-file/watch)
- Runtime
- Runtime
- [Debugging Bun with the VS Code extension](https://bun.sh/guides/runtime/vscode-debugger)
- [Debugging Bun with the web debugger](https://bun.sh/guides/runtime/web-debugger)
- [Define and replace static globals & constants](https://bun.sh/guides/runtime/define-constant)
@@ -318,8 +305,7 @@ bun upgrade --canary
- [Set a time zone in Bun](https://bun.sh/guides/runtime/timezone)
- [Set environment variables](https://bun.sh/guides/runtime/set-env)
- Streams
- Streams
- [Convert a Node.js Readable to a Blob](https://bun.sh/guides/streams/node-readable-to-blob)
- [Convert a Node.js Readable to a string](https://bun.sh/guides/streams/node-readable-to-string)
- [Convert a Node.js Readable to an ArrayBuffer](https://bun.sh/guides/streams/node-readable-to-arraybuffer)
@@ -332,8 +318,7 @@ bun upgrade --canary
- [Convert a ReadableStream to an ArrayBuffer](https://bun.sh/guides/streams/to-arraybuffer)
- [Convert a ReadableStream to JSON](https://bun.sh/guides/streams/to-json)
- Test
- Test
- [Bail early with the Bun test runner](https://bun.sh/guides/test/bail)
- [Generate code coverage reports with the Bun test runner](https://bun.sh/guides/test/coverage)
- [Mark a test as a "todo" with the Bun test runner](https://bun.sh/guides/test/todo-tests)
@@ -351,8 +336,7 @@ bun upgrade --canary
- [Use snapshot testing in `bun test`](https://bun.sh/guides/test/snapshot)
- [Write browser DOM tests with Bun and happy-dom](https://bun.sh/guides/test/happy-dom)
- Util
- Util
- [Check if the current file is the entrypoint](https://bun.sh/guides/util/entrypoint)
- [Check if two objects are deeply equal](https://bun.sh/guides/util/deep-equals)
- [Compress and decompress data with DEFLATE](https://bun.sh/guides/util/deflate)
@@ -371,14 +355,13 @@ bun upgrade --canary
- [Hash a password](https://bun.sh/guides/util/hash-a-password)
- [Sleep for a fixed number of milliseconds](https://bun.sh/guides/util/sleep)
- WebSocket
- WebSocket
- [Build a publish-subscribe WebSocket server](https://bun.sh/guides/websocket/pubsub)
- [Build a simple WebSocket server](https://bun.sh/guides/websocket/simple)
- [Enable compression for WebSocket messages](https://bun.sh/guides/websocket/compression)
- [Set per-socket contextual data on a WebSocket](https://bun.sh/guides/websocket/context)
- Write file
- Write file
- [Append content to a file](https://bun.sh/guides/write-file/append)
- [Copy a file to another location](https://bun.sh/guides/write-file/file-cp)
- [Delete a file](https://bun.sh/guides/write-file/unlink)

View File

@@ -1,27 +0,0 @@
import { bench, run } from "mitata";
import { expect } from "bun:test";
const MAP_SIZE = 10_000;
function* genPairs(count) {
for (let i = 0; i < MAP_SIZE; i++) {
yield ["k" + i, "v" + i];
}
}
class CustomMap extends Map {
abc = 123;
constructor(iterable) {
super(iterable);
}
}
const a = new Map(genPairs());
const b = new Map(genPairs());
bench("deepEqual Map", () => expect(a).toEqual(b));
const x = new CustomMap(genPairs());
const y = new CustomMap(genPairs());
bench("deepEqual CustomMap", () => expect(x).toEqual(y));
await run();

View File

@@ -1,27 +0,0 @@
import { bench, run } from "mitata";
import { expect } from "bun:test";
const SET_SIZE = 10_000;
function* genValues(count) {
for (let i = 0; i < SET_SIZE; i++) {
yield "v" + i;
}
}
class CustomSet extends Set {
abc = 123;
constructor(iterable) {
super(iterable);
}
}
const a = new Set(genValues());
const b = new Set(genValues());
bench("deepEqual Set", () => expect(a).toEqual(b));
const x = new CustomSet(genValues());
const y = new CustomSet(genValues());
bench("deepEqual CustomSet", () => expect(x).toEqual(y));
await run();

View File

@@ -1,43 +1,20 @@
import { run, bench, group } from "mitata";
import { run, bench } from "mitata";
import { gzipSync, gunzipSync } from "bun";
const data = await Bun.file(require.resolve("@babel/standalone/babel.min.js")).arrayBuffer();
const data = new TextEncoder().encode("Hello World!".repeat(9999));
const compressed = gzipSync(data);
const libraries = ["zlib"];
if (Bun.semver.satisfies(Bun.version.replaceAll("-debug", ""), ">=1.1.21")) {
libraries.push("libdeflate");
}
const options = { library: undefined };
const benchFn = (name, fn) => {
if (libraries.length > 1) {
group(name, () => {
for (const library of libraries) {
bench(library, () => {
options.library = library;
fn();
});
}
});
} else {
options.library = libraries[0];
bench(name, () => {
fn();
});
}
};
benchFn(`roundtrip - @babel/standalone/babel.min.js`, () => {
gunzipSync(gzipSync(data, options), options);
bench(`roundtrip - "Hello World!".repeat(9999))`, () => {
gunzipSync(gzipSync(data));
});
benchFn(`gzipSync(@babel/standalone/babel.min.js`, () => {
gzipSync(data, options);
bench(`gzipSync("Hello World!".repeat(9999)))`, () => {
gzipSync(data);
});
benchFn(`gunzipSync(@babel/standalone/babel.min.js`, () => {
gunzipSync(compressed, options);
bench(`gunzipSync("Hello World!".repeat(9999)))`, () => {
gunzipSync(compressed);
});
await run();

Binary file not shown.

View File

@@ -1,22 +1,19 @@
import { run, bench } from "mitata";
import { gzipSync, gunzipSync } from "zlib";
import { createRequire } from "module";
import { readFileSync } from "fs";
const require = createRequire(import.meta.url);
const data = readFileSync(require.resolve("@babel/standalone/babel.min.js"));
const data = new TextEncoder().encode("Hello World!".repeat(9999));
const compressed = gzipSync(data);
bench(`roundtrip - @babel/standalone/babel.min.js)`, () => {
bench(`roundtrip - "Hello World!".repeat(9999))`, () => {
gunzipSync(gzipSync(data));
});
bench(`gzipSync(@babel/standalone/babel.min.js))`, () => {
bench(`gzipSync("Hello World!".repeat(9999)))`, () => {
gzipSync(data);
});
bench(`gunzipSync(@babel/standalone/babel.min.js))`, () => {
bench(`gunzipSync("Hello World!".repeat(9999)))`, () => {
gunzipSync(compressed);
});

View File

@@ -7,8 +7,5 @@
"bench:node": "$NODE node.mjs",
"bench:deno": "$DENO run -A --unstable deno.js",
"bench": "bun run bench:bun && bun run bench:node && bun run bench:deno"
},
"dependencies": {
"@babel/standalone": "7.24.10"
}
}

View File

@@ -1,55 +0,0 @@
import { bench, run } from "./runner.mjs";
const latin1 = `hello hello hello!!!! `.repeat(10240);
function create(src) {
function split(str, chunkSize) {
let chunkedHTML = [];
let html = str;
const encoder = new TextEncoder();
while (html.length > 0) {
chunkedHTML.push(encoder.encode(html.slice(0, chunkSize)));
html = html.slice(chunkSize);
}
return chunkedHTML;
}
async function runBench(chunks) {
const decoder = new TextDecoderStream();
const stream = new ReadableStream({
pull(controller) {
for (let chunk of chunks) {
controller.enqueue(chunk);
}
controller.close();
},
}).pipeThrough(decoder);
for (let reader = stream.getReader(); ; ) {
const { done, value } = await reader.read();
if (done) {
break;
}
}
}
// if (new TextDecoder().decode(await runBench(oneKB)) !== src) {
// throw new Error("Benchmark failed");
// }
const sizes = [16 * 1024, 64 * 1024, 256 * 1024];
for (const chunkSize of sizes) {
const text = split(src, chunkSize);
bench(
`${Math.round(src.length / 1024)} KB of text in ${Math.round(chunkSize / 1024) > 0 ? Math.round(chunkSize / 1024) : (chunkSize / 1024).toFixed(2)} KB chunks`,
async () => {
await runBench(text);
},
);
}
}
create(latin1);
create(
// bun's old readme was extremely long
await fetch("https://web.archive.org/web/20230119110956/https://github.com/oven-sh/bun").then(res => res.text()),
);
await run();

View File

@@ -1,49 +0,0 @@
import { bench, run } from "./runner.mjs";
const latin1 = `hello hello hello!!!! `.repeat(10240);
function create(src) {
function split(str, chunkSize) {
let chunkedHTML = [];
let html = str;
while (html.length > 0) {
chunkedHTML.push(html.slice(0, chunkSize));
html = html.slice(chunkSize);
}
return chunkedHTML;
}
async function runBench(chunks) {
const encoderStream = new TextEncoderStream();
const stream = new ReadableStream({
pull(controller) {
for (let chunk of chunks) {
controller.enqueue(chunk);
}
controller.close();
},
}).pipeThrough(encoderStream);
return await new Response(stream).bytes();
}
// if (new TextDecoder().decode(await runBench(oneKB)) !== src) {
// throw new Error("Benchmark failed");
// }
const sizes = [1024, 16 * 1024, 64 * 1024, 256 * 1024];
for (const chunkSize of sizes) {
const text = split(src, chunkSize);
bench(
`${Math.round(src.length / 1024)} KB of text in ${Math.round(chunkSize / 1024) > 0 ? Math.round(chunkSize / 1024) : (chunkSize / 1024).toFixed(2)} KB chunks`,
async () => {
await runBench(text);
},
);
}
}
create(latin1);
create(
// bun's old readme was extremely long
await fetch("https://web.archive.org/web/20230119110956/https://github.com/oven-sh/bun").then(res => res.text()),
);
await run();

View File

@@ -1,31 +0,0 @@
import { run, bench } from "mitata";
import { createRequire } from "module";
const require = createRequire(import.meta.url);
const db = require("better-sqlite3")("./src/northwind.sqlite");
{
const sql = db.prepare(`SELECT * FROM "Order"`);
bench('SELECT * FROM "Order"', () => {
sql.all();
});
}
{
const sql = db.prepare(`SELECT * FROM "Product"`);
bench('SELECT * FROM "Product"', () => {
sql.all();
});
}
{
const sql = db.prepare(`SELECT * FROM "OrderDetail"`);
bench('SELECT * FROM "OrderDetail"', () => {
sql.all();
});
}
await run();

View File

@@ -1,9 +1,8 @@
// Run `node --experimental-sqlite bench/sqlite/node.mjs` to run the script.
// You will need `--experimental-sqlite` flag to run this script and node v22.5.0 or higher.
import { run, bench } from "mitata";
import { DatabaseSync as Database } from "node:sqlite";
import { createRequire } from "module";
const db = new Database("./src/northwind.sqlite");
const require = createRequire(import.meta.url);
const db = require("better-sqlite3")("./src/northwind.sqlite");
{
const sql = db.prepare(`SELECT * FROM "Order"`);

View File

@@ -49,7 +49,6 @@ const BunBuildOptions = struct {
reported_nodejs_version: Version,
generated_code_dir: []const u8,
no_llvm: bool,
cached_options_module: ?*Module = null,
windows_shim: ?WindowsShim = null,
@@ -89,8 +88,10 @@ const BunBuildOptions = struct {
pub fn getOSVersionMin(os: OperatingSystem) ?Target.Query.OsVersion {
return switch (os) {
// bun needs macOS 12 to work properly due to icucore, but we have been
// compiling everything with 11 as the minimum.
.mac => .{
.semver = .{ .major = 13, .minor = 0, .patch = 0 },
.semver = .{ .major = 11, .minor = 0, .patch = 0 },
},
// Windows 10 1809 is the minimum supported version
@@ -180,8 +181,6 @@ pub fn build(b: *Build) !void {
const obj_format = b.option(ObjectFormat, "obj_format", "Output file for object files") orelse .obj;
const no_llvm = b.option(bool, "no_llvm", "Experiment with Zig self hosted backends. No stability guaranteed") orelse false;
var build_options = BunBuildOptions{
.target = target,
.optimize = optimize,
@@ -190,7 +189,6 @@ pub fn build(b: *Build) !void {
.arch = arch,
.generated_code_dir = generated_code_dir,
.no_llvm = no_llvm,
.version = try Version.parse(bun_version),
.canary_revision = canary: {
@@ -322,7 +320,6 @@ pub inline fn addMultiCheck(
.version = root_build_options.version,
.reported_nodejs_version = root_build_options.reported_nodejs_version,
.generated_code_dir = root_build_options.generated_code_dir,
.no_llvm = root_build_options.no_llvm,
};
var obj = addBunObject(b, &options);
@@ -341,8 +338,6 @@ pub fn addBunObject(b: *Build, opts: *BunBuildOptions) *Compile {
},
.target = opts.target,
.optimize = opts.optimize,
.use_llvm = !opts.no_llvm,
.use_lld = if (opts.os == .mac) false else !opts.no_llvm,
// https://github.com/ziglang/zig/issues/17430
.pic = true,
@@ -451,12 +446,6 @@ fn addInternalPackages(b: *Build, obj: *Compile, opts: *BunBuildOptions) void {
.root_source_file = .{ .cwd_relative = resolved_source_tag_path },
});
const error_code_path = b.pathJoin(&.{ opts.generated_code_dir, "ErrorCode.zig" });
validateGeneratedPath(error_code_path);
obj.root_module.addAnonymousImport("ErrorCode", .{
.root_source_file = .{ .cwd_relative = error_code_path },
});
if (os == .windows) {
obj.root_module.addAnonymousImport("bun_shim_impl.exe", .{
.root_source_file = opts.windowsShim(b).exe.getEmittedBin(),

View File

@@ -82,7 +82,7 @@ _bun_completions() {
declare -A PACKAGE_OPTIONS;
declare -A PM_OPTIONS;
local SUBCOMMANDS="dev bun create run install add remove upgrade completions discord help init pm x test repl update outdated link unlink build";
local SUBCOMMANDS="dev bun create run install add remove upgrade completions discord help init pm x test repl update link unlink build";
GLOBAL_OPTIONS[LONG_OPTIONS]="--use --cwd --bunfile --server-bunfile --config --disable-react-fast-refresh --disable-hmr --env-file --extension-order --jsx-factory --jsx-fragment --extension-order --jsx-factory --jsx-fragment --jsx-import-source --jsx-production --jsx-runtime --main-fields --no-summary --version --platform --public-dir --tsconfig-override --define --external --help --inject --loader --origin --port --dump-environment-variables --dump-limits --disable-bun-js";
GLOBAL_OPTIONS[SHORT_OPTIONS]="-c -v -d -e -h -i -l -u -p";

View File

@@ -179,7 +179,6 @@ complete -c bun -n "__fish_use_subcommand" -a "remove" -d "Remove a dependency f
complete -c bun -n "__fish_use_subcommand" -a "add" -d "Add a dependency to package.json" -f
complete -c bun -n "__fish_use_subcommand" -a "init" -d "Initialize a Bun project in this directory" -f
complete -c bun -n "__fish_use_subcommand" -a "link" -d "Register or link a local npm package" -f
complete -c bun -n "__fish_use_subcommand" -a "unlink" -d "Unregister a local npm package" -f
complete -c bun -n "__fish_use_subcommand" -a "link" -d "Unregister a local npm package" -f
complete -c bun -n "__fish_use_subcommand" -a "pm" -d "Additional package management utilities" -f
complete -c bun -n "__fish_use_subcommand" -a "x" -d "Execute a package binary, installing if needed" -f
complete -c bun -n "__fish_use_subcommand" -a "outdated" -d "Display the latest versions of outdated dependencies" -f

View File

@@ -563,22 +563,6 @@ _bun_update_completion() {
esac
}
_bun_outdated_completion() {
_arguments -s -C \
'--cwd[Set a specific cwd]:cwd' \
'--verbose[Excessively verbose logging]' \
'--no-progress[Disable the progress bar]' \
'--help[Print this help menu]' &&
ret=0
case $state in
config)
_bun_list_bunfig_toml
;;
esac
}
_bun_test_completion() {
_arguments -s -C \
'1: :->cmd1' \
@@ -685,7 +669,6 @@ _bun() {
'add\:"Add a dependency to package.json (bun a)" '
'remove\:"Remove a dependency from package.json (bun rm)" '
'update\:"Update outdated dependencies & save to package.json" '
'outdated\:"Display the latest versions of outdated dependencies" '
'link\:"Link an npm package globally" '
'unlink\:"Globally unlink an npm package" '
'pm\:"More commands for managing packages" '
@@ -757,10 +740,6 @@ _bun() {
update)
_bun_update_completion
;;
outdated)
_bun_outdated_completion
;;
'test')
_bun_test_completion
@@ -840,10 +819,6 @@ _bun() {
update)
_bun_update_completion
;;
outdated)
_bun_outdated_completion
;;
'test')
_bun_test_completion

View File

@@ -219,11 +219,6 @@ The following classes are typed arrays, along with a description of how they int
---
- [`Float16Array`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Float16Array)
- Every two (2) bytes are interpreted as a 16-bit floating point number. Range -6.104e5 to 6.55e4.
---
- [`Float32Array`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Float32Array)
- Every four (4) bytes are interpreted as a 32-bit floating point number. Range -3.4e38 to 3.4e38.

View File

@@ -42,7 +42,7 @@ const response = await fetch("http://example.com", {
});
```
`body` can be a string, a `FormData` object, an `ArrayBuffer`, a `Blob`, and more. See the [MDN documentation](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API/Using_Fetch#setting_a_body) for more information.
`body` can be a string, a `FormData` object, an `ArrayBuffer`, a `Blob`, and more. See the [MDN documentation](https://developer.mozilla.org/en-US/docs/Web/API/Body/body) for more information.
### Proxying requests

View File

@@ -70,116 +70,6 @@ const server = Bun.serve({
});
```
### Static routes
Use the `static` option to serve static `Response` objects by route.
```ts
// Bun v1.1.27+ required
Bun.serve({
static: {
// health-check endpoint
"/api/health-check": new Response("All good!"),
// redirect from /old-link to /new-link
"/old-link": Response.redirect("/new-link", 301),
// serve static text
"/": new Response("Hello World"),
// server a file by buffering it in memory
"/index.html": new Response(await Bun.file("./index.html").bytes(), {
headers: {
"Content-Type": "text/html",
},
}),
"/favicon.ico": new Response(await Bun.file("./favicon.ico").bytes(), {
headers: {
"Content-Type": "image/x-icon",
},
}),
// serve JSON
"/api/version.json": Response.json({ version: "1.0.0" }),
},
fetch(req) {
return new Response("404!");
},
});
```
Static routes support headers, status code, and other `Response` options.
```ts
Bun.serve({
static: {
"/api/time": new Response(new Date().toISOString(), {
headers: {
"X-Custom-Header": "Bun!",
},
}),
},
fetch(req) {
return new Response("404!");
},
});
```
Static routes can serve Response bodies faster than `fetch` handlers because they don't create `Request` objects, they don't create `AbortSignal`, they don't create additional `Response` objects. The only per-request memory allocation is the TCP/TLS socket data needed for each request.
{% note %}
`static` is experimental
{% /note %}
Static route responses are cached for the lifetime of the server object. To reload static routes, call `server.reload(options)`.
```ts
const server = Bun.serve({
static: {
"/api/time": new Response(new Date().toISOString()),
},
fetch(req) {
return new Response("404!");
},
});
// Update the time every second.
setInterval(() => {
server.reload({
static: {
"/api/time": new Response(new Date().toISOString()),
},
fetch(req) {
return new Response("404!");
},
});
}, 1000);
```
Reloading static routes only impact the next request. In-flight requests continue to use the old static routes. After in-flight requests to old static routes are finished, the old static routes are freed from memory.
To simplify error handling, static routes do not support streaming response bodies from `ReadableStream` or an `AsyncIterator`. Fortunately, you can still buffer the response in memory first:
```ts
const time = await fetch("https://api.example.com/v1/data");
// Buffer the response in memory first.
const blob = await time.blob();
const server = Bun.serve({
static: {
"/api/data": new Response(blob),
},
fetch(req) {
return new Response("404!");
},
});
```
### Changing the `port` and `hostname`
To configure which port and hostname the server will listen on, set `port` and `hostname` in the options object.
@@ -436,24 +326,7 @@ Bun.serve({
});
```
## idleTimeout
To configure the idle timeout, set the `idleTimeout` field in Bun.serve.
```ts
Bun.serve({
// 10 seconds:
idleTimeout: 10,
fetch(req) {
return new Response("Bun!");
},
});
```
This is the maximum amount of time a connection is allowed to be idle before the server closes it. A connection is idling if there is no data sent or received.
## export default syntax
## Object syntax
Thus far, the examples on this page have used the explicit `Bun.serve` API. Bun also supports an alternate syntax.

View File

@@ -106,31 +106,6 @@ The `--minify` argument optimizes the size of the transpiled output code. If you
The `--sourcemap` argument embeds a sourcemap compressed with zstd, so that errors & stacktraces point to their original locations instead of the transpiled location. Bun will automatically decompress & resolve the sourcemap when an error occurs.
## Worker
To use workers in a standalone executable, add the worker's entrypoint to the CLI arguments:
```sh
$ bun build --compile ./index.ts ./my-worker.ts --outfile myapp
```
Then, reference the worker in your code:
```ts
console.log("Hello from Bun!");
// Any of these will work:
new Worker("./my-worker.ts");
new Worker(new URL("./my-worker.ts", import.meta.url));
new Worker(new URL("./my-worker.ts", import.meta.url).href);
```
As of Bun v1.1.25, when you add multiple entrypoints to a standalone executable, they will be bundled separately into the executable.
In the future, we may automatically detect usages of statically-known paths in `new Worker(path)` and then bundle those into the executable, but for now, you'll need to add it to the shell command manually like the above example.
If you use a relative path to a file not included in the standalone executable, it will attempt to load that path from disk relative to the current working directory of the process (and then error if it doesn't exist).
## SQLite
You can use `bun:sqlite` imports with `bun build --compile`.
@@ -204,59 +179,6 @@ console.log(addon.hello());
Unfortunately, if you're using `@mapbox/node-pre-gyp` or other similar tools, you'll need to make sure the `.node` file is directly required or it won't bundle correctly.
### Embed directories
To embed a directory with `bun build --compile`, use a shell glob in your `bun build` command:
```sh
$ bun build --compile ./index.ts ./public/**/*.png
```
Then, you can reference the files in your code:
```ts
import icon from "./public/assets/icon.png" with { type: "file" };
import { file } from "bun";
export default {
fetch(req) {
// Embedded files can be streamed from Response objects
return new Response(file(icon));
},
};
```
This is honestly a workaround, and we expect to improve this in the future with a more direct API.
### Listing embedded files
To get a list of all embedded files, use `Bun.embeddedFiles`:
```js
import "./icon.png" with { type: "file" };
import { embeddedFiles } from "bun";
console.log(embeddedFiles[0].name); // `icon-${hash}.png`
```
`Bun.embeddedFiles` returns an array of `Blob` objects which you can use to get the size, contents, and other properties of the files.
```ts
embeddedFiles: Blob[]
```
The list of embedded files excludes bundled source code like `.ts` and `.js` files.
#### Content hash
By default, embedded files have a content hash appended to their name. This is useful for situations where you want to serve the file from a URL or CDN and have fewer cache invalidation issues. But sometimes, this is unexpected and you might want the original name instead:
To disable the content hash, pass `--asset-naming` to `bun build --compile` like this:
```sh
$ bun build --compile --asset-naming="[name].[ext]" ./index.ts
```
## Minification
To trim down the size of the executable a little, pass `--minify` to `bun build --compile`. This uses Bun's minifier to reduce the code size. Overall though, Bun's binary is still way too big and we need to make it smaller.

View File

@@ -1276,7 +1276,7 @@ interface BuildOptions {
loader?: { [k in string]: Loader }; // See https://bun.sh/docs/bundler/loaders
manifest?: boolean; // false
external?: string[]; // []
sourcemap?: "none" | "inline" | "linked" | "external" | "linked" | boolean; // "none"
sourcemap?: "none" | "inline" | "linked" | "external" | boolean; // "none"
root?: string; // computed from entrypoints
naming?:
| string

View File

@@ -208,7 +208,8 @@ In Bun's CLI, simple boolean flags like `--minify` do not accept an argument. Ot
---
- `--ignore-annotations`
- `--ignore-dce-annotations`
- n/a
- Not supported
---

View File

@@ -1,61 +0,0 @@
Use `bun outdated` to display a table of outdated dependencies with their latest versions for the current workspace:
```sh
$ bun outdated
|--------------------------------------------------------------------|
| Package | Current | Update | Latest |
|----------------------------------------|---------|--------|--------|
| @types/bun (dev) | 1.1.6 | 1.1.7 | 1.1.7 |
|----------------------------------------|---------|--------|--------|
| @types/react (dev) | 18.3.3 | 18.3.4 | 18.3.4 |
|----------------------------------------|---------|--------|--------|
| @typescript-eslint/eslint-plugin (dev) | 7.16.1 | 7.18.0 | 8.2.0 |
|----------------------------------------|---------|--------|--------|
| @typescript-eslint/parser (dev) | 7.16.1 | 7.18.0 | 8.2.0 |
|----------------------------------------|---------|--------|--------|
| esbuild (dev) | 0.21.5 | 0.21.5 | 0.23.1 |
|----------------------------------------|---------|--------|--------|
| eslint (dev) | 9.7.0 | 9.9.1 | 9.9.1 |
|----------------------------------------|---------|--------|--------|
| typescript (dev) | 5.5.3 | 5.5.4 | 5.5.4 |
|--------------------------------------------------------------------|
```
The `Update` column shows the version that would be installed if you ran `bun update [package]`. This version is the latest version that satisfies the version range specified in your `package.json`.
The `Latest` column shows the latest version available from the registry. `bun update --latest [package]` will update to this version.
Dependency names can be provided to filter the output (pattern matching is supported):
```sh
$ bun outdated "@types/*"
|------------------------------------------------|
| Package | Current | Update | Latest |
|--------------------|---------|--------|--------|
| @types/bun (dev) | 1.1.6 | 1.1.8 | 1.1.8 |
|--------------------|---------|--------|--------|
| @types/react (dev) | 18.3.3 | 18.3.4 | 18.3.4 |
|------------------------------------------------|
```
## `--filter`
The `--filter` flag can be used to select workspaces to include in the output. Workspace names or paths can be used as patterns.
```sh
$ bun outdated --filter <pattern>
```
For example, to only show outdated dependencies for workspaces in the `./apps` directory:
```sh
$ bun outdated --filter './apps/*'
```
If you want to do the same, but exclude the `./apps/api` workspace:
```sh
$ bun outdated --filter './apps/*' --filter '!./apps/api'
```

View File

@@ -10,7 +10,7 @@ This automatically load balances incoming requests across multiple instances of
```ts#server.ts
import { serve } from "bun";
const id = Math.random().toString(36).slice(2);
const id = = Math.random().toString(36).slice(2);
serve({
port: process.env.PORT || 8080,

View File

@@ -5,8 +5,8 @@ name: Define and replace static globals & constants
The `--define` flag lets you declare statically-analyzable constants and globals. It replace all usages of an identifier or property in a JavaScript or TypeScript file with a constant value. This feature is supported at runtime and also in `bun build`. This is sort of similar to `#define` in C/C++, except for JavaScript.
```ts
bun --define process.env.NODE_ENV="'production'" src/index.ts # Runtime
bun build --define process.env.NODE_ENV="'production'" src/index.ts # Build
bun --define:process.env.NODE_ENV="'production'" src/index.ts # Runtime
bun build --define:process.env.NODE_ENV="'production'" src/index.ts # Build
```
---
@@ -95,7 +95,7 @@ To replace all usages of `AWS` with the JSON object `{"ACCESS_KEY":"abc","SECRET
```sh
# JSON
bun --define AWS='{"ACCESS_KEY":"abc","SECRET_KEY":"def"}' src/index.ts
bun --define:AWS='{"ACCESS_KEY":"abc","SECRET_KEY":"def"}' src/index.ts
```
Those will be transformed into the equivalent JavaScript code.
@@ -119,7 +119,7 @@ You can also pass properties to the `--define` flag.
For example, to replace all usages of `console.write` with `console.log`, you can use the following command (requires Bun v1.1.5 or later)
```sh
bun --define console.write=console.log src/index.ts
bun --define:console.write=console.log src/index.ts
```
That transforms the following input:

View File

@@ -27,28 +27,6 @@ data.version; // => "1.0.0"
data.author.name; // => "John Dough"
```
Bun also supports [Import Attributes](https://github.com/tc39/proposal-import-attributes/) and [JSON modules](https://github.com/tc39/proposal-json-modules) syntax.
```ts
import data from "./package.json" with { type: "json" };
data.name; // => "bun"
data.version; // => "1.0.0"
data.author.name; // => "John Dough"
```
---
Bun also supports [Import Attributes](https://github.com/tc39/proposal-import-attributes/) and [JSON modules](https://github.com/tc39/proposal-json-modules) syntax.
```ts
import data from "./package.json" with { type: "json" };
data.name; // => "bun"
data.version; // => "1.0.0"
data.author.name; // => "John Dough"
```
---
See [Docs > Runtime > TypeScript](/docs/runtime/typescript) for more information on using TypeScript with Bun.

View File

@@ -16,6 +16,10 @@ $ bun test # run tests
$ bunx cowsay 'Hello, world!' # execute a package
```
{% callout type="note" %}
**Bun is still under development.** Use it to speed up your development workflows or run simpler production code in resource-constrained environments like serverless functions. We're working on more complete Node.js compatibility and integration with existing frameworks. Join the [Discord](https://bun.sh/discord) and watch the [GitHub repository](https://github.com/oven-sh/bun) to keep tabs on future releases.
{% /callout %}
Get started with one of the quick links below, or read on to learn more about Bun.
{% block className="gap-2 grid grid-flow-row grid-cols-1 md:grid-cols-2" %}

View File

@@ -164,9 +164,6 @@ export default {
page("cli/update", "`bun update`", {
description: "Update your project's dependencies.",
}),
page("cli/outdated", "`bun outdated`", {
description: "Check for outdated dependencies.",
}),
page("cli/link", "`bun link`", {
description: "Install local packages as dependencies in your project.",
}),

View File

@@ -171,8 +171,6 @@ Once imported, you should see something like this:
{% image alt="Viewing heap snapshot in Safari" src="https://user-images.githubusercontent.com/709451/204429337-b0d8935f-3509-4071-b991-217794d1fb27.png" caption="Viewing heap snapshot in Safari Dev Tools" /%}
> The [web debugger](https://bun.sh/docs/runtime/debugger#inspect) also offers the timeline feature which allows you to track and examine the memory usage of the running debug session.
### Native heap stats
Bun uses mimalloc for the other heap. To report a summary of non-JavaScript memory usage, set the `MIMALLOC_SHOW_STATS=1` environment variable. and stats will print on exit.

View File

@@ -22,7 +22,7 @@ This page is updated regularly to reflect compatibility status of the latest ver
### [`node:cluster`](https://nodejs.org/api/cluster.html)
🟡 Handles and file descriptors cannot be passed between workers, which means load-balancing HTTP requests across processes is only supported on Linux at this time (via `SO_REUSEPORT`). Otherwise, implemented but not battle-tested.
🔴 Not implemented.
### [`node:console`](https://nodejs.org/api/console.html)
@@ -153,7 +153,7 @@ Some methods are not optimized yet.
### [`node:util`](https://nodejs.org/api/util.html)
🟡 Missing `MIMEParams` `MIMEType` `aborted` `debug` `getSystemErrorMap` `transferableAbortController` `transferableAbortSignal` `stripVTControlCharacters`
🟡 Missing `MIMEParams` `MIMEType` `aborted` `debug` `getSystemErrorMap` `getSystemErrorName` `transferableAbortController` `transferableAbortSignal` `stripVTControlCharacters`
### [`node:v8`](https://nodejs.org/api/v8.html)
@@ -341,7 +341,7 @@ The table below lists all globals implemented by Node.js and Bun's current compa
### [`process`](https://nodejs.org/api/process.html)
🟡 Missing `domain` `initgroups` `setegid` `seteuid` `setgid` `setgroups` `setuid` `allowedNodeEnvironmentFlags` `getActiveResourcesInfo` `setActiveResourcesInfo` `moduleLoadList` `setSourceMapsEnabled`. `process.binding` is partially implemented.
🟡 Missing `domain` `initgroups` `setegid` `seteuid` `setgid` `setgroups` `setuid` `allowedNodeEnvironmentFlags` `getActiveResourcesInfo` `setActiveResourcesInfo` `moduleLoadList` `setSourceMapsEnabled` `channel`. `process.binding` is partially implemented.
### [`queueMicrotask()`](https://developer.mozilla.org/en-US/docs/Web/API/queueMicrotask)
@@ -413,7 +413,7 @@ The table below lists all globals implemented by Node.js and Bun's current compa
### [`TextDecoderStream`](https://developer.mozilla.org/en-US/docs/Web/API/TextDecoderStream)
🟢 Fully implemented.
🔴 Not implemented.
### [`TextEncoder`](https://developer.mozilla.org/en-US/docs/Web/API/TextEncoder)
@@ -421,7 +421,7 @@ The table below lists all globals implemented by Node.js and Bun's current compa
### [`TextEncoderStream`](https://developer.mozilla.org/en-US/docs/Web/API/TextEncoderStream)
🟢 Fully implemented.
🔴 Not implemented.
### [`TransformStream`](https://developer.mozilla.org/en-US/docs/Web/API/TransformStream)

View File

@@ -235,55 +235,6 @@ const result = await $`cat < ${response} | wc -w`.text();
console.log(result); // 6\n
```
## Command substitution (`$(...)`)
Command substitution allows you to substitute the output of another script into the current script:
```js
import { $ } from "bun";
// Prints out the hash of the current commit
await $`echo Hash of current commit: $(git rev-parse HEAD)`;
```
This is a textual insertion of the command's output and can be used to, for example, declare a shell variable:
```js
import { $ } from "bun";
await $`
REV=$(git rev-parse HEAD)
docker built -t myapp:$REV
echo Done building docker image "myapp:$REV"
`;
```
{% callout %}
**NOTE**: Because Bun internally uses the special [`raw`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Template_literals#raw_strings) property on the input template literal, using the backtick syntax for command substitution won't work:
```js
import { $ } from "bun";
await $`echo \`echo hi\``;
```
Instead of printing:
```
hi
```
The above will print out:
```
echo hi
```
We instead recommend sticking to the `$(...)` syntax.
{% /callout %}
## Environment variables
Environment variables can be set like in bash:

View File

@@ -7,37 +7,22 @@ The following Web APIs are partially or completely supported.
---
- HTTP
- [`fetch`](https://developer.mozilla.org/en-US/docs/Web/API/fetch)
[`Response`](https://developer.mozilla.org/en-US/docs/Web/API/Response)
[`Request`](https://developer.mozilla.org/en-US/docs/Web/API/Request)
[`Headers`](https://developer.mozilla.org/en-US/docs/Web/API/Headers)
[`AbortController`](https://developer.mozilla.org/en-US/docs/Web/API/AbortController)
[`AbortSignal`](https://developer.mozilla.org/en-US/docs/Web/API/AbortSignal)
- [`fetch`](https://developer.mozilla.org/en-US/docs/Web/API/fetch) [`Response`](https://developer.mozilla.org/en-US/docs/Web/API/Response) [`Request`](https://developer.mozilla.org/en-US/docs/Web/API/Request) [`Headers`](https://developer.mozilla.org/en-US/docs/Web/API/Headers) [`AbortController`](https://developer.mozilla.org/en-US/docs/Web/API/AbortController) [`AbortSignal`](https://developer.mozilla.org/en-US/docs/Web/API/AbortSignal)
---
- URLs
- [`URL`](https://developer.mozilla.org/en-US/docs/Web/API/URL)
[`URLSearchParams`](https://developer.mozilla.org/en-US/docs/Web/API/URLSearchParams)
- [`URL`](https://developer.mozilla.org/en-US/docs/Web/API/URL) [`URLSearchParams`](https://developer.mozilla.org/en-US/docs/Web/API/URLSearchParams)
---
- Web Workers
- [`Worker`](https://developer.mozilla.org/en-US/docs/Web/API/Worker)
[`self.postMessage`](https://developer.mozilla.org/en-US/docs/Web/API/DedicatedWorkerGlobalScope/postMessage)
[`structuredClone`](https://developer.mozilla.org/en-US/docs/Web/API/structuredClone)
[`MessagePort`](https://developer.mozilla.org/en-US/docs/Web/API/MessagePort)
[`MessageChannel`](https://developer.mozilla.org/en-US/docs/Web/API/MessageChannel)
[`BroadcastChannel`](https://developer.mozilla.org/en-US/docs/Web/API/BroadcastChannel).
- [`Worker`](https://developer.mozilla.org/en-US/docs/Web/API/Worker) [`self.postMessage`](https://developer.mozilla.org/en-US/docs/Web/API/DedicatedWorkerGlobalScope/postMessage) [`structuredClone`](https://developer.mozilla.org/en-US/docs/Web/API/structuredClone) [`MessagePort`](https://developer.mozilla.org/en-US/docs/Web/API/MessagePort) [`MessageChannel`](https://developer.mozilla.org/en-US/docs/Web/API/MessageChannel), [`BroadcastChannel`](https://developer.mozilla.org/en-US/docs/Web/API/BroadcastChannel).
---
- Streams
- [`ReadableStream`](https://developer.mozilla.org/en-US/docs/Web/API/ReadableStream)
[`WritableStream`](https://developer.mozilla.org/en-US/docs/Web/API/WritableStream)
[`TransformStream`](https://developer.mozilla.org/en-US/docs/Web/API/TransformStream)
[`ByteLengthQueuingStrategy`](https://developer.mozilla.org/en-US/docs/Web/API/ByteLengthQueuingStrategy)
[`CountQueuingStrategy`](https://developer.mozilla.org/en-US/docs/Web/API/CountQueuingStrategy) and associated classes
- [`ReadableStream`](https://developer.mozilla.org/en-US/docs/Web/API/ReadableStream) [`WritableStream`](https://developer.mozilla.org/en-US/docs/Web/API/WritableStream) [`TransformStream`](https://developer.mozilla.org/en-US/docs/Web/API/TransformStream) [`ByteLengthQueuingStrategy`](https://developer.mozilla.org/en-US/docs/Web/API/ByteLengthQueuingStrategy) [`CountQueuingStrategy`](https://developer.mozilla.org/en-US/docs/Web/API/CountQueuingStrategy) and associated classes
---
@@ -52,10 +37,7 @@ The following Web APIs are partially or completely supported.
---
- Encoding and decoding
- [`atob`](https://developer.mozilla.org/en-US/docs/Web/API/atob)
[`btoa`](https://developer.mozilla.org/en-US/docs/Web/API/btoa)
[`TextEncoder`](https://developer.mozilla.org/en-US/docs/Web/API/TextEncoder)
[`TextDecoder`](https://developer.mozilla.org/en-US/docs/Web/API/TextDecoder)
- [`atob`](https://developer.mozilla.org/en-US/docs/Web/API/atob) [`btoa`](https://developer.mozilla.org/en-US/docs/Web/API/btoa) [`TextEncoder`](https://developer.mozilla.org/en-US/docs/Web/API/TextEncoder) [`TextDecoder`](https://developer.mozilla.org/en-US/docs/Web/API/TextDecoder)
---
@@ -65,28 +47,24 @@ The following Web APIs are partially or completely supported.
---
- Timeouts
- [`setTimeout`](https://developer.mozilla.org/en-US/docs/Web/API/setTimeout)
[`clearTimeout`](https://developer.mozilla.org/en-US/docs/Web/API/clearTimeout)
- [`setTimeout`](https://developer.mozilla.org/en-US/docs/Web/API/setTimeout) [`clearTimeout`](https://developer.mozilla.org/en-US/docs/Web/API/clearTimeout)
---
- Intervals
- [`setInterval`](https://developer.mozilla.org/en-US/docs/Web/API/setInterval)
[`clearInterval`](https://developer.mozilla.org/en-US/docs/Web/API/clearInterval)
- [`setInterval`](https://developer.mozilla.org/en-US/docs/Web/API/setInterval)[`clearInterval`](https://developer.mozilla.org/en-US/docs/Web/API/clearInterval)
---
- Crypto
- [`crypto`](https://developer.mozilla.org/en-US/docs/Web/API/Crypto)
[`SubtleCrypto`](https://developer.mozilla.org/en-US/docs/Web/API/SubtleCrypto)
- [`crypto`](https://developer.mozilla.org/en-US/docs/Web/API/Crypto) [`SubtleCrypto`](https://developer.mozilla.org/en-US/docs/Web/API/SubtleCrypto)
[`CryptoKey`](https://developer.mozilla.org/en-US/docs/Web/API/CryptoKey)
---
- Debugging
- [`console`](https://developer.mozilla.org/en-US/docs/Web/API/console)
[`performance`](https://developer.mozilla.org/en-US/docs/Web/API/Performance)
- [`console`](https://developer.mozilla.org/en-US/docs/Web/API/console) [`performance`](https://developer.mozilla.org/en-US/docs/Web/API/Performance)
---
@@ -101,9 +79,7 @@ The following Web APIs are partially or completely supported.
---
- User interaction
- [`alert`](https://developer.mozilla.org/en-US/docs/Web/API/Window/alert)
[`confirm`](https://developer.mozilla.org/en-US/docs/Web/API/Window/confirm)
[`prompt`](https://developer.mozilla.org/en-US/docs/Web/API/Window/prompt) (intended for interactive CLIs)
- [`alert`](https://developer.mozilla.org/en-US/docs/Web/API/Window/alert) [`confirm`](https://developer.mozilla.org/en-US/docs/Web/API/Window/confirm) [`prompt`](https://developer.mozilla.org/en-US/docs/Web/API/Window/prompt) (intended for interactive CLIs)
<!-- - Blocking. Prints the alert message to terminal and awaits `[ENTER]` before proceeding. -->
<!-- - Blocking. Prints confirmation message and awaits `[y/N]` input from user. Returns `true` if user entered `y` or `Y`, `false` otherwise.
@@ -118,10 +94,7 @@ The following Web APIs are partially or completely supported.
- Events
- [`EventTarget`](https://developer.mozilla.org/en-US/docs/Web/API/EventTarget)
[`Event`](https://developer.mozilla.org/en-US/docs/Web/API/Event)
[`ErrorEvent`](https://developer.mozilla.org/en-US/docs/Web/API/ErrorEvent)
[`CloseEvent`](https://developer.mozilla.org/en-US/docs/Web/API/CloseEvent)
[`MessageEvent`](https://developer.mozilla.org/en-US/docs/Web/API/MessageEvent)
[`Event`](https://developer.mozilla.org/en-US/docs/Web/API/Event) [`ErrorEvent`](https://developer.mozilla.org/en-US/docs/Web/API/ErrorEvent) [`CloseEvent`](https://developer.mozilla.org/en-US/docs/Web/API/CloseEvent) [`MessageEvent`](https://developer.mozilla.org/en-US/docs/Web/API/MessageEvent)
---

View File

@@ -195,6 +195,7 @@ pub fn main() anyerror!void {
args.headers_buf,
response_body_string,
args.body,
0,
HTTP.FetchRedirect.follow,
),
};

View File

@@ -31,6 +31,7 @@ const params = [_]clap.Param(clap.Help){
clap.parseParam("-b, --body <STR> HTTP request body as a string") catch unreachable,
clap.parseParam("-f, --file <STR> File path to load as body") catch unreachable,
clap.parseParam("-n, --count <INT> How many runs? Default 10") catch unreachable,
clap.parseParam("-t, --timeout <INT> Max duration per request") catch unreachable,
clap.parseParam("-r, --retry <INT> Max retry count") catch unreachable,
clap.parseParam("--no-gzip Disable gzip") catch unreachable,
clap.parseParam("--no-deflate Disable deflate") catch unreachable,
@@ -74,6 +75,7 @@ pub const Arguments = struct {
body: string = "",
turbo: bool = false,
count: usize = 10,
timeout: usize = 0,
repeat: usize = 0,
concurrency: u16 = 32,
@@ -163,6 +165,10 @@ pub const Arguments = struct {
// .keep_alive = !args.flag("--no-keep-alive"),
.concurrency = std.fmt.parseInt(u16, args.option("--max-concurrency") orelse "32", 10) catch 32,
.turbo = args.flag("--turbo"),
.timeout = std.fmt.parseInt(usize, args.option("--timeout") orelse "0", 10) catch |err| {
Output.prettyErrorln("<r><red>{s}<r> parsing timeout", .{@errorName(err)});
Global.exit(1);
},
.count = std.fmt.parseInt(usize, args.option("--count") orelse "10", 10) catch |err| {
Output.prettyErrorln("<r><red>{s}<r> parsing count", .{@errorName(err)});
Global.exit(1);
@@ -219,6 +225,7 @@ pub fn main() anyerror!void {
args.headers_buf,
response_body,
"",
args.timeout,
),
};
ctx.http.client.verbose = args.verbose;

View File

@@ -29,7 +29,7 @@
"bump": "bun ./scripts/bump.ts",
"build": "if [ ! -e build ]; then bun setup; fi && ninja -C build",
"build:valgrind": "cmake . -DZIG_OPTIMIZE=Debug -DUSE_DEBUG_JSC=ON -DCMAKE_BUILD_TYPE=Debug -GNinja -Bbuild-valgrind && ninja -Cbuild-valgrind",
"build:tidy": "bash ./scripts/env.sh && BUN_SILENT=1 cmake --log-level=WARNING . ${CMAKE_FLAGS[@]} -DZIG_OPTIMIZE=Debug -DUSE_DEBUG_JSC=ON -DBUN_TIDY_ONLY=ON -DCMAKE_BUILD_TYPE=Debug -GNinja -Bbuild-tidy && BUN_SILENT=1 ninja -Cbuild-tidy",
"build:tidy": "BUN_SILENT=1 cmake --log-level=WARNING . -DZIG_OPTIMIZE=Debug -DUSE_DEBUG_JSC=ON -DBUN_TIDY_ONLY=ON -DCMAKE_BUILD_TYPE=Debug -GNinja -Bbuild-tidy >> ${GITHUB_STEP_SUMMARY:-/dev/stdout} && BUN_SILENT=1 ninja -Cbuild-tidy >> ${GITHUB_STEP_SUMMARY:-/dev/stdout}",
"build:tidy-extra": "cmake . -DZIG_OPTIMIZE=Debug -DUSE_DEBUG_JSC=ON -DBUN_TIDY_ONLY_EXTRA=ON -DCMAKE_BUILD_TYPE=Debug -GNinja -Bbuild-tidy && ninja -Cbuild-tidy",
"build:release": "cmake . -DCMAKE_BUILD_TYPE=Release -GNinja -Bbuild-release && ninja -Cbuild-release",
"build:release:local": "cmake . -DCMAKE_BUILD_TYPE=Release -DWEBKIT_DIR=$(pwd)/src/bun.js/WebKit/WebKitBuild/Release -GNinja -Bbuild-release-local && ninja -Cbuild-release-local",
@@ -37,7 +37,6 @@
"build:debug-zig-release": "cmake . -DCMAKE_BUILD_TYPE=Release -DZIG_OPTIMIZE=Debug -GNinja -Bbuild-debug-zig-release && ninja -Cbuild-debug-zig-release",
"build:safe": "cmake . -DZIG_OPTIMIZE=ReleaseSafe -DUSE_DEBUG_JSC=ON -DCMAKE_BUILD_TYPE=Release -GNinja -Bbuild-safe && ninja -Cbuild-safe",
"build:windows": "cmake -B build -S . -G Ninja -DCMAKE_BUILD_TYPE=Debug && ninja -Cbuild",
"build:windows:release": "cmake -B build-release -S . -G Ninja -DCMAKE_BUILD_TYPE=Release && ninja -Cbuild-release",
"typecheck": "tsc --noEmit && cd test && bun run typecheck",
"fmt": "prettier --write --cache './{.vscode,src,test,bench,packages/{bun-types,bun-inspector-*,bun-vscode,bun-debug-adapter-protocol}}/**/*.{mjs,ts,tsx,js,jsx}'",
"fmt:zig": "zig fmt src/*.zig src/*/*.zig src/*/*/*.zig src/*/*/*/*.zig",

View File

@@ -13,6 +13,5 @@
"std.StringArrayHashMap(": "bun.StringArrayHashMap has a faster `eql`",
"std.StringHashMapUnmanaged(": "bun.StringHashMapUnmanaged has a faster `eql`",
"std.StringHashMap(": "bun.StringHashMaphas a faster `eql`",
"std.enums.tagName(": "Use bun.tagName instead",
"": ""
}

View File

@@ -239,7 +239,7 @@ Starting "${testFileName}"
GITHUB_ACTIONS: process.env.GITHUB_ACTIONS ?? "true",
BUN_DEBUG_QUIET_LOGS: "1",
BUN_INSTALL_CACHE_DIR: join(TMPDIR, ".bun-install-cache"),
BUN_ENABLE_CRASH_REPORTING: "0",
BUN_ENABLE_CRASH_REPORTING: "1",
[windows ? "TEMP" : "TMPDIR"]: TMPDIR,
},
});

View File

@@ -173,14 +173,6 @@ function publishModule(name: string, dryRun?: boolean): void {
);
error(stderr || stdout);
if (exitCode !== 0) {
if (
stdout.includes("You cannot publish over the previously published version") ||
stderr.includes("You cannot publish over the previously published version")
) {
console.warn("Ignoring npm publish error:", stdout, stderr);
return;
}
throw new Error("npm publish failed with code " + exitCode);
}
} else {

View File

@@ -1521,7 +1521,7 @@ declare module "bun" {
define?: Record<string, string>;
// origin?: string; // e.g. http://mydomain.com
loader?: { [k in string]: Loader };
sourcemap?: "none" | "linked" | "inline" | "external" | "linked"; // default: "none", true -> "inline"
sourcemap?: "none" | "linked" | "inline" | "external"; // default: "none", true -> "inline"
/**
* package.json `exports` conditions used when resolving imports
*
@@ -1537,16 +1537,6 @@ declare module "bun" {
syntax?: boolean;
identifiers?: boolean;
};
/**
* Ignore dead code elimination/tree-shaking annotations such as @__PURE__ and package.json
* "sideEffects" fields. This should only be used as a temporary workaround for incorrect
* annotations in libraries.
*/
ignoreDCEAnnotations?: boolean;
/**
* Force emitting @__PURE__ annotations even if minify.whitespace is true.
*/
emitDCEAnnotations?: boolean;
// treeshaking?: boolean;
// jsx?:
@@ -2298,26 +2288,6 @@ declare module "bun" {
* This string will currently do nothing. But in the future it could be useful for logs or metrics.
*/
id?: string | null;
/**
* Server static Response objects by route.
*
* @example
* ```ts
* Bun.serve({
* static: {
* "/": new Response("Hello World"),
* "/about": new Response("About"),
* },
* fetch(req) {
* return new Response("Fallback response");
* },
* });
* ```
*
* @experimental
*/
static?: Record<`/${string}`, Response>;
}
interface ServeOptions extends GenericServeOptions {
@@ -2362,14 +2332,6 @@ declare module "bun" {
*/
unix?: never;
/**
* Sets the the number of seconds to wait before timing out a connection
* due to inactivity.
*
* Default is `10` seconds.
*/
idleTimeout?: number;
/**
* Handle HTTP requests
*
@@ -2787,16 +2749,6 @@ declare module "bun" {
compress?: boolean,
): ServerWebSocketSendStatus;
/**
* A count of connections subscribed to a given topic
*
* This operation will loop through each topic internally to get the count.
*
* @param topic the websocket topic to check how many subscribers are connected to
* @returns the number of subscribers
*/
subscriberCount(topic: string): number;
/**
* Returns the client IP address and port of the given Request. If the request was closed or is a unix socket, returns null.
*
@@ -2898,13 +2850,6 @@ declare module "bun" {
// tslint:disable-next-line:unified-signatures
function file(path: string | URL, options?: BlobPropertyBag): BunFile;
/**
* A list of files embedded into the standalone executable. Lexigraphically sorted by name.
*
* If the process is not a standalone executable, this returns an empty array.
*/
const embeddedFiles: ReadonlyArray<Blob>;
/**
* `Blob` that leverages the fastest system calls available to operate on files.
*
@@ -3536,13 +3481,6 @@ declare module "bun" {
* Filtered data consists mostly of small values with a somewhat random distribution.
*/
strategy?: number;
library?: "zlib";
}
interface LibdeflateCompressionOptions {
level?: 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12;
library?: "libdeflate";
}
/**
@@ -3551,38 +3489,26 @@ declare module "bun" {
* @param options Compression options to use
* @returns The output buffer with the compressed data
*/
function deflateSync(
data: Uint8Array | string | ArrayBuffer,
options?: ZlibCompressionOptions | LibdeflateCompressionOptions,
): Uint8Array;
function deflateSync(data: Uint8Array | string | ArrayBuffer, options?: ZlibCompressionOptions): Uint8Array;
/**
* Compresses a chunk of data with `zlib` GZIP algorithm.
* @param data The buffer of data to compress
* @param options Compression options to use
* @returns The output buffer with the compressed data
*/
function gzipSync(
data: Uint8Array | string | ArrayBuffer,
options?: ZlibCompressionOptions | LibdeflateCompressionOptions,
): Uint8Array;
function gzipSync(data: Uint8Array | string | ArrayBuffer, options?: ZlibCompressionOptions): Uint8Array;
/**
* Decompresses a chunk of data with `zlib` INFLATE algorithm.
* @param data The buffer of data to decompress
* @returns The output buffer with the decompressed data
*/
function inflateSync(
data: Uint8Array | string | ArrayBuffer,
options?: ZlibCompressionOptions | LibdeflateCompressionOptions,
): Uint8Array;
function inflateSync(data: Uint8Array | string | ArrayBuffer): Uint8Array;
/**
* Decompresses a chunk of data with `zlib` GUNZIP algorithm.
* @param data The buffer of data to decompress
* @returns The output buffer with the decompressed data
*/
function gunzipSync(
data: Uint8Array | string | ArrayBuffer,
options?: ZlibCompressionOptions | LibdeflateCompressionOptions,
): Uint8Array;
function gunzipSync(data: Uint8Array | string | ArrayBuffer): Uint8Array;
type Target =
/**
@@ -3902,7 +3828,7 @@ declare module "bun" {
*/
const isMainThread: boolean;
interface Socket<Data = undefined> extends Disposable {
interface Socket<Data = undefined> {
/**
* Write `data` to the socket
*
@@ -4184,7 +4110,7 @@ declare module "bun" {
setMaxSendFragment(size: number): boolean;
}
interface SocketListener<Data = undefined> extends Disposable {
interface SocketListener<Data = undefined> {
stop(closeActiveConnections?: boolean): void;
ref(): void;
unref(): void;

View File

@@ -955,7 +955,6 @@ declare global {
ref(): Timer;
unref(): Timer;
hasRef(): boolean;
refresh(): Timer
[Symbol.toPrimitive](): number;
}
@@ -1918,10 +1917,6 @@ declare global {
* closely to the `BodyMixin` API.
*/
formData(): Promise<FormData>;
/**
* Returns a promise that resolves to the contents of the blob as a Uint8Array (array of bytes) its the same as `new Uint8Array(await blob.arrayBuffer())`
*/
bytes(): Promise<Uint8Array>;
}
var Blob: typeof globalThis extends {
onerror: any;

View File

@@ -536,7 +536,7 @@ declare module "bun:sqlite" {
* // => [{bar: "baz"}]
*
* stmt.all();
* // => []
* // => [{bar: "baz"}]
*
* stmt.all("foo");
* // => [{bar: "foo"}]
@@ -555,14 +555,14 @@ declare module "bun:sqlite" {
* ```ts
* const stmt = db.prepare("SELECT * FROM foo WHERE bar = ?");
*
* stmt.get("baz");
* // => {bar: "baz"}
* stmt.all("baz");
* // => [{bar: "baz"}]
*
* stmt.get();
* // => null
* stmt.all();
* // => [{bar: "baz"}]
*
* stmt.get("foo");
* // => {bar: "foo"}
* stmt.all("foo");
* // => [{bar: "foo"}]
* ```
*
* The following types can be used when binding parameters:
@@ -747,7 +747,7 @@ declare module "bun:sqlite" {
* query.as(User);
* const user = query.get();
* console.log(user.birthdate);
* // => Date(1995, 12, 19)
* // => Date(1995, 11, 19)
* ```
*/
as<T = unknown>(Class: new (...args: any[]) => T): Statement<T, ParamsType>;

View File

@@ -1250,7 +1250,7 @@ declare module "bun:test" {
* - If expected is a `string` or `RegExp`, it will check the `message` property.
* - If expected is an `Error` object, it will check the `name` and `message` properties.
* - If expected is an `Error` constructor, it will check the class of the `Error`.
* - If expected is not provided, it will check if anything has thrown.
* - If expected is not provided, it will check if anything as thrown.
*
* @example
* function fail() {

View File

@@ -25359,127 +25359,3 @@ CKA_TRUST_SERVER_AUTH CK_TRUST CKT_NSS_TRUSTED_DELEGATOR
CKA_TRUST_EMAIL_PROTECTION CK_TRUST CKT_NSS_MUST_VERIFY_TRUST
CKA_TRUST_CODE_SIGNING CK_TRUST CKT_NSS_MUST_VERIFY_TRUST
CKA_TRUST_STEP_UP_APPROVED CK_BBOOL CK_FALSE
#
# Certificate "FIRMAPROFESIONAL CA ROOT-A WEB"
#
# Issuer: CN=FIRMAPROFESIONAL CA ROOT-A WEB,OID.2.5.4.97=VATES-A62634068,O=Firmaprofesional SA,C=ES
# Serial Number:31:97:21:ed:af:89:42:7f:35:41:87:a1:67:56:4c:6d
# Subject: CN=FIRMAPROFESIONAL CA ROOT-A WEB,OID.2.5.4.97=VATES-A62634068,O=Firmaprofesional SA,C=ES
# Not Valid Before: Wed Apr 06 09:01:36 2022
# Not Valid After : Sun Mar 31 09:01:36 2047
# Fingerprint (SHA-256): BE:F2:56:DA:F2:6E:9C:69:BD:EC:16:02:35:97:98:F3:CA:F7:18:21:A0:3E:01:82:57:C5:3C:65:61:7F:3D:4A
# Fingerprint (SHA1): A8:31:11:74:A6:14:15:0D:CA:77:DD:0E:E4:0C:5D:58:FC:A0:72:A5
CKA_CLASS CK_OBJECT_CLASS CKO_CERTIFICATE
CKA_TOKEN CK_BBOOL CK_TRUE
CKA_PRIVATE CK_BBOOL CK_FALSE
CKA_MODIFIABLE CK_BBOOL CK_FALSE
CKA_LABEL UTF8 "FIRMAPROFESIONAL CA ROOT-A WEB"
CKA_CERTIFICATE_TYPE CK_CERTIFICATE_TYPE CKC_X_509
CKA_SUBJECT MULTILINE_OCTAL
\060\156\061\013\060\011\006\003\125\004\006\023\002\105\123\061
\034\060\032\006\003\125\004\012\014\023\106\151\162\155\141\160
\162\157\146\145\163\151\157\156\141\154\040\123\101\061\030\060
\026\006\003\125\004\141\014\017\126\101\124\105\123\055\101\066
\062\066\063\064\060\066\070\061\047\060\045\006\003\125\004\003
\014\036\106\111\122\115\101\120\122\117\106\105\123\111\117\116
\101\114\040\103\101\040\122\117\117\124\055\101\040\127\105\102
END
CKA_ID UTF8 "0"
CKA_ISSUER MULTILINE_OCTAL
\060\156\061\013\060\011\006\003\125\004\006\023\002\105\123\061
\034\060\032\006\003\125\004\012\014\023\106\151\162\155\141\160
\162\157\146\145\163\151\157\156\141\154\040\123\101\061\030\060
\026\006\003\125\004\141\014\017\126\101\124\105\123\055\101\066
\062\066\063\064\060\066\070\061\047\060\045\006\003\125\004\003
\014\036\106\111\122\115\101\120\122\117\106\105\123\111\117\116
\101\114\040\103\101\040\122\117\117\124\055\101\040\127\105\102
END
CKA_SERIAL_NUMBER MULTILINE_OCTAL
\002\020\061\227\041\355\257\211\102\177\065\101\207\241\147\126
\114\155
END
CKA_VALUE MULTILINE_OCTAL
\060\202\002\172\060\202\002\000\240\003\002\001\002\002\020\061
\227\041\355\257\211\102\177\065\101\207\241\147\126\114\155\060
\012\006\010\052\206\110\316\075\004\003\003\060\156\061\013\060
\011\006\003\125\004\006\023\002\105\123\061\034\060\032\006\003
\125\004\012\014\023\106\151\162\155\141\160\162\157\146\145\163
\151\157\156\141\154\040\123\101\061\030\060\026\006\003\125\004
\141\014\017\126\101\124\105\123\055\101\066\062\066\063\064\060
\066\070\061\047\060\045\006\003\125\004\003\014\036\106\111\122
\115\101\120\122\117\106\105\123\111\117\116\101\114\040\103\101
\040\122\117\117\124\055\101\040\127\105\102\060\036\027\015\062
\062\060\064\060\066\060\071\060\061\063\066\132\027\015\064\067
\060\063\063\061\060\071\060\061\063\066\132\060\156\061\013\060
\011\006\003\125\004\006\023\002\105\123\061\034\060\032\006\003
\125\004\012\014\023\106\151\162\155\141\160\162\157\146\145\163
\151\157\156\141\154\040\123\101\061\030\060\026\006\003\125\004
\141\014\017\126\101\124\105\123\055\101\066\062\066\063\064\060
\066\070\061\047\060\045\006\003\125\004\003\014\036\106\111\122
\115\101\120\122\117\106\105\123\111\117\116\101\114\040\103\101
\040\122\117\117\124\055\101\040\127\105\102\060\166\060\020\006
\007\052\206\110\316\075\002\001\006\005\053\201\004\000\042\003
\142\000\004\107\123\352\054\021\244\167\307\052\352\363\326\137
\173\323\004\221\134\372\210\306\042\271\203\020\142\167\204\063
\055\351\003\210\324\340\063\367\355\167\054\112\140\352\344\157
\255\155\264\370\114\212\244\344\037\312\352\117\070\112\056\202
\163\053\307\146\233\012\214\100\234\174\212\366\362\071\140\262
\336\313\354\270\344\157\352\233\135\267\123\220\030\062\125\305
\040\267\224\243\143\060\141\060\017\006\003\125\035\023\001\001
\377\004\005\060\003\001\001\377\060\037\006\003\125\035\043\004
\030\060\026\200\024\223\341\103\143\134\074\235\326\047\363\122
\354\027\262\251\257\054\367\166\370\060\035\006\003\125\035\016
\004\026\004\024\223\341\103\143\134\074\235\326\047\363\122\354
\027\262\251\257\054\367\166\370\060\016\006\003\125\035\017\001
\001\377\004\004\003\002\001\006\060\012\006\010\052\206\110\316
\075\004\003\003\003\150\000\060\145\002\060\035\174\244\173\303
\211\165\063\341\073\251\105\277\106\351\351\241\335\311\042\026
\267\107\021\013\330\232\272\361\310\013\160\120\123\002\221\160
\205\131\251\036\244\346\352\043\061\240\000\002\061\000\375\342
\370\263\257\026\271\036\163\304\226\343\301\060\031\330\176\346
\303\227\336\034\117\270\211\057\063\353\110\017\031\367\207\106
\135\046\220\245\205\305\271\172\224\076\207\250\275\000
END
CKA_NSS_MOZILLA_CA_POLICY CK_BBOOL CK_TRUE
CKA_NSS_SERVER_DISTRUST_AFTER CK_BBOOL CK_FALSE
CKA_NSS_EMAIL_DISTRUST_AFTER CK_BBOOL CK_FALSE
# Trust for "FIRMAPROFESIONAL CA ROOT-A WEB"
# Issuer: CN=FIRMAPROFESIONAL CA ROOT-A WEB,OID.2.5.4.97=VATES-A62634068,O=Firmaprofesional SA,C=ES
# Serial Number:31:97:21:ed:af:89:42:7f:35:41:87:a1:67:56:4c:6d
# Subject: CN=FIRMAPROFESIONAL CA ROOT-A WEB,OID.2.5.4.97=VATES-A62634068,O=Firmaprofesional SA,C=ES
# Not Valid Before: Wed Apr 06 09:01:36 2022
# Not Valid After : Sun Mar 31 09:01:36 2047
# Fingerprint (SHA-256): BE:F2:56:DA:F2:6E:9C:69:BD:EC:16:02:35:97:98:F3:CA:F7:18:21:A0:3E:01:82:57:C5:3C:65:61:7F:3D:4A
# Fingerprint (SHA1): A8:31:11:74:A6:14:15:0D:CA:77:DD:0E:E4:0C:5D:58:FC:A0:72:A5
CKA_CLASS CK_OBJECT_CLASS CKO_NSS_TRUST
CKA_TOKEN CK_BBOOL CK_TRUE
CKA_PRIVATE CK_BBOOL CK_FALSE
CKA_MODIFIABLE CK_BBOOL CK_FALSE
CKA_LABEL UTF8 "FIRMAPROFESIONAL CA ROOT-A WEB"
CKA_CERT_SHA1_HASH MULTILINE_OCTAL
\250\061\021\164\246\024\025\015\312\167\335\016\344\014\135\130
\374\240\162\245
END
CKA_CERT_MD5_HASH MULTILINE_OCTAL
\202\262\255\105\000\202\260\146\143\370\137\303\147\116\316\243
END
CKA_ISSUER MULTILINE_OCTAL
\060\156\061\013\060\011\006\003\125\004\006\023\002\105\123\061
\034\060\032\006\003\125\004\012\014\023\106\151\162\155\141\160
\162\157\146\145\163\151\157\156\141\154\040\123\101\061\030\060
\026\006\003\125\004\141\014\017\126\101\124\105\123\055\101\066
\062\066\063\064\060\066\070\061\047\060\045\006\003\125\004\003
\014\036\106\111\122\115\101\120\122\117\106\105\123\111\117\116
\101\114\040\103\101\040\122\117\117\124\055\101\040\127\105\102
END
CKA_SERIAL_NUMBER MULTILINE_OCTAL
\002\020\061\227\041\355\257\211\102\177\065\101\207\241\147\126
\114\155
END
CKA_TRUST_SERVER_AUTH CK_TRUST CKT_NSS_TRUSTED_DELEGATOR
CKA_TRUST_EMAIL_PROTECTION CK_TRUST CKT_NSS_MUST_VERIFY_TRUST
CKA_TRUST_CODE_SIGNING CK_TRUST CKT_NSS_MUST_VERIFY_TRUST
CKA_TRUST_STEP_UP_APPROVED CK_BBOOL CK_FALSE

View File

@@ -73,9 +73,6 @@ const getReleases = text => {
release[kNSSDate] = new Date(normalizeTD(cells[columns[kNSSDate]]));
release[kFirefoxVersion] = normalizeTD(cells[columns[kFirefoxVersion]]);
release[kFirefoxDate] = new Date(normalizeTD(cells[columns[kFirefoxDate]]));
if (!isNaN(release[kFirefoxDate]) && isNaN(release[kNSSDate])) {
release[kNSSDate] = new Date(release[kFirefoxDate]);
}
releases.push(release);
row = matches.next();
}

View File

@@ -26,10 +26,6 @@
#include <stdlib.h>
#ifndef _WIN32
// Necessary for the stdint include
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
@@ -42,8 +38,8 @@
#include <mstcpip.h>
#endif
#if defined(__APPLE__)
extern int Bun__doesMacOSVersionSupportSendRecvMsgX();
#if defined(__APPLE__) && defined(__aarch64__)
#define HAS_MSGX
#endif
@@ -77,30 +73,32 @@ int bsd_sendmmsg(LIBUS_SOCKET_DESCRIPTOR fd, struct udp_sendbuf* sendbuf, int fl
}
return sendbuf->num;
#elif defined(__APPLE__)
// sendmsg_x does not support addresses.
if (!sendbuf->has_empty && !sendbuf->has_addresses && Bun__doesMacOSVersionSupportSendRecvMsgX()) {
while (1) {
int ret = sendmsg_x(fd, sendbuf->msgvec, sendbuf->num, flags);
if (ret >= 0) return ret;
// If we receive EMMSGSIZE, we should use the fallback code.
if (errno == EMSGSIZE) break;
if (errno != EINTR) return ret;
}
}
for (size_t i = 0, count = sendbuf->num; i < count; i++) {
while (1) {
ssize_t ret = sendmsg(fd, &sendbuf->msgvec[i].msg_hdr, flags);
if (ret < 0) {
if (errno == EINTR) continue;
if (errno == EAGAIN || errno == EWOULDBLOCK) return i;
return ret;
// TODO figure out why sendmsg_x fails when one of the messages is empty
// so that we can get rid of this code.
// One of the weird things is that once a non-empty message has been sent on the socket,
// empty messages start working as well. Bizzare.
#ifdef HAS_MSGX
if (sendbuf->has_empty) {
#endif
for (int i = 0; i < sendbuf->num; i++) {
while (1) {
ssize_t ret = sendmsg(fd, &sendbuf->msgvec[i].msg_hdr, flags);
if (ret < 0) {
if (errno == EINTR) continue;
if (errno == EAGAIN || errno == EWOULDBLOCK) return i;
return ret;
}
break;
}
break;
}
return sendbuf->num;
#ifdef HAS_MSGX
}
return sendbuf->num;
while (1) {
int ret = sendmsg_x(fd, sendbuf->msgvec, sendbuf->num, flags);
if (ret >= 0 || errno != EINTR) return ret;
}
#endif
#else
while (1) {
int ret = sendmmsg(fd, sendbuf->msgvec, sendbuf->num, flags | MSG_NOSIGNAL);
@@ -122,13 +120,12 @@ int bsd_recvmmsg(LIBUS_SOCKET_DESCRIPTOR fd, struct udp_recvbuf *recvbuf, int fl
return 1;
}
#elif defined(__APPLE__)
if (Bun__doesMacOSVersionSupportSendRecvMsgX()) {
while (1) {
int ret = recvmsg_x(fd, recvbuf->msgvec, LIBUS_UDP_RECV_COUNT, flags);
if (ret >= 0 || errno != EINTR) return ret;
}
#ifdef HAS_MSGX
while (1) {
int ret = recvmsg_x(fd, recvbuf->msgvec, LIBUS_UDP_RECV_COUNT, flags);
if (ret >= 0 || errno != EINTR) return ret;
}
#else
for (int i = 0; i < LIBUS_UDP_RECV_COUNT; ++i) {
while (1) {
ssize_t ret = recvmsg(fd, &recvbuf->msgvec[i].msg_hdr, flags);
@@ -142,6 +139,7 @@ int bsd_recvmmsg(LIBUS_SOCKET_DESCRIPTOR fd, struct udp_recvbuf *recvbuf, int fl
}
}
return LIBUS_UDP_RECV_COUNT;
#endif
#else
while (1) {
int ret = recvmmsg(fd, (struct mmsghdr *)&recvbuf->msgvec, LIBUS_UDP_RECV_COUNT, flags, 0);
@@ -156,20 +154,19 @@ void bsd_udp_setup_recvbuf(struct udp_recvbuf *recvbuf, void *databuf, size_t da
recvbuf->buflen = databuflen;
#else
// assert(databuflen > LIBUS_UDP_MAX_SIZE * LIBUS_UDP_RECV_COUNT);
memset(recvbuf, 0, sizeof(struct udp_recvbuf));
for (size_t i = 0; i < LIBUS_UDP_RECV_COUNT; i++) {
for (int i = 0; i < LIBUS_UDP_RECV_COUNT; i++) {
recvbuf->iov[i].iov_base = (char*)databuf + i * LIBUS_UDP_MAX_SIZE;
recvbuf->iov[i].iov_len = LIBUS_UDP_MAX_SIZE;
struct msghdr mh = {};
memset(&mh, 0, sizeof(struct msghdr));
mh.msg_name = &recvbuf->addr[i];
mh.msg_namelen = sizeof(struct sockaddr_storage);
mh.msg_iov = &recvbuf->iov[i];
mh.msg_iovlen = 1;
mh.msg_control = recvbuf->control[i];
mh.msg_controllen = sizeof(recvbuf->control[i]);
recvbuf->msgvec[i].msg_hdr = mh;
recvbuf->msgvec[i].msg_hdr.msg_name = &recvbuf->addr[i];
recvbuf->msgvec[i].msg_hdr.msg_namelen = sizeof(struct sockaddr_storage);
recvbuf->msgvec[i].msg_hdr.msg_iov = &recvbuf->iov[i];
recvbuf->msgvec[i].msg_hdr.msg_iovlen = 1;
recvbuf->msgvec[i].msg_hdr.msg_control = recvbuf->control[i];
recvbuf->msgvec[i].msg_hdr.msg_controllen = 256;
}
#endif
}
@@ -182,12 +179,7 @@ int bsd_udp_setup_sendbuf(struct udp_sendbuf *buf, size_t bufsize, void** payloa
buf->num = num;
return num;
#else
// TODO: can we skip empty messages altogether? Do we really need to send 0-length messages?
buf->has_empty = 0;
// sendmsg_x docs states it does not support addresses.
buf->has_addresses = 0;
struct mmsghdr *msgvec = buf->msgvec;
// todo check this math
size_t count = (bufsize - sizeof(struct udp_sendbuf)) / (sizeof(struct mmsghdr) + sizeof(struct iovec));
@@ -202,9 +194,6 @@ int bsd_udp_setup_sendbuf(struct udp_sendbuf *buf, size_t bufsize, void** payloa
addr_len = addr->sa_family == AF_INET ? sizeof(struct sockaddr_in)
: addr->sa_family == AF_INET6 ? sizeof(struct sockaddr_in6)
: 0;
if (addr_len > 0) {
buf->has_addresses = 1;
}
}
iov[i].iov_base = payloads[i];
iov[i].iov_len = lengths[i];
@@ -217,7 +206,6 @@ int bsd_udp_setup_sendbuf(struct udp_sendbuf *buf, size_t bufsize, void** payloa
msgvec[i].msg_hdr.msg_flags = 0;
msgvec[i].msg_len = 0;
if (lengths[i] == 0) {
buf->has_empty = 1;
}

View File

@@ -15,18 +15,18 @@
* limitations under the License.
*/
#include "internal/internal.h"
#include "libusockets.h"
#include <errno.h>
#include "internal/internal.h"
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#ifndef _WIN32
#include <arpa/inet.h>
#endif
#define CONCURRENT_CONNECTIONS 4
#define CONCURRENT_CONNECTIONS 2
// clang-format off
int default_is_low_prio_handler(struct us_socket_t *s) {
return 0;
}
@@ -44,7 +44,7 @@ int us_raw_root_certs(struct us_cert_string_t**out){
void us_listen_socket_close(int ssl, struct us_listen_socket_t *ls) {
/* us_listen_socket_t extends us_socket_t so we close in similar ways */
if (!us_socket_is_closed(0, &ls->s)) {
us_internal_socket_context_unlink_listen_socket(ssl, ls->s.context, ls);
us_internal_socket_context_unlink_listen_socket(ls->s.context, ls);
us_poll_stop((struct us_poll_t *) &ls->s, ls->s.context->loop);
bsd_close_socket(us_poll_fd((struct us_poll_t *) &ls->s));
@@ -60,19 +60,11 @@ void us_listen_socket_close(int ssl, struct us_listen_socket_t *ls) {
}
void us_socket_context_close(int ssl, struct us_socket_context_t *context) {
/* First start closing pending connecting sockets*/
struct us_connecting_socket_t *c = context->head_connecting_sockets;
while (c) {
struct us_connecting_socket_t *nextC = c->next_pending;
us_connecting_socket_close(ssl, c);
c = nextC;
}
/* After this by closing all listen sockets */
/* Begin by closing all listen sockets */
struct us_listen_socket_t *ls = context->head_listen_sockets;
while (ls) {
struct us_listen_socket_t *nextLS = (struct us_listen_socket_t *) ls->s.next;
us_listen_socket_close(ssl, ls);
ls = nextLS;
}
@@ -80,12 +72,12 @@ void us_socket_context_close(int ssl, struct us_socket_context_t *context) {
struct us_socket_t *s = context->head_sockets;
while (s) {
struct us_socket_t *nextS = s->next;
us_socket_close(ssl, s, LIBUS_SOCKET_CLOSE_CODE_CLEAN_SHUTDOWN, 0);
us_socket_close(ssl, s, 0, 0);
s = nextS;
}
}
void us_internal_socket_context_unlink_listen_socket(int ssl, struct us_socket_context_t *context, struct us_listen_socket_t *ls) {
void us_internal_socket_context_unlink_listen_socket(struct us_socket_context_t *context, struct us_listen_socket_t *ls) {
/* We have to properly update the iterator used to sweep sockets for timeouts */
if (ls == (struct us_listen_socket_t *) context->iterator) {
context->iterator = ls->s.next;
@@ -103,10 +95,9 @@ void us_internal_socket_context_unlink_listen_socket(int ssl, struct us_socket_c
ls->s.next->prev = ls->s.prev;
}
}
us_socket_context_unref(ssl, context);
}
void us_internal_socket_context_unlink_socket(int ssl, struct us_socket_context_t *context, struct us_socket_t *s) {
void us_internal_socket_context_unlink_socket(struct us_socket_context_t *context, struct us_socket_t *s) {
/* We have to properly update the iterator used to sweep sockets for timeouts */
if (s == context->iterator) {
context->iterator = s->next;
@@ -124,22 +115,6 @@ void us_internal_socket_context_unlink_socket(int ssl, struct us_socket_context_
s->next->prev = s->prev;
}
}
us_socket_context_unref(ssl, context);
}
void us_internal_socket_context_unlink_connecting_socket(int ssl, struct us_socket_context_t *context, struct us_connecting_socket_t *c) {
if (c->prev_pending == c->next_pending) {
context->head_connecting_sockets = 0;
} else {
if (c->prev_pending) {
c->prev_pending->next_pending = c->next_pending;
} else {
context->head_connecting_sockets = c->next_pending;
}
if (c->next_pending) {
c->next_pending->prev_pending = c->prev_pending;
}
}
us_socket_context_unref(ssl, context);
}
/* We always add in the top, so we don't modify any s.next */
@@ -151,21 +126,8 @@ void us_internal_socket_context_link_listen_socket(struct us_socket_context_t *c
context->head_listen_sockets->s.prev = &ls->s;
}
context->head_listen_sockets = ls;
us_socket_context_ref(0, context);
}
void us_internal_socket_context_link_connecting_socket(int ssl, struct us_socket_context_t *context, struct us_connecting_socket_t *c) {
c->context = context;
c->next_pending = context->head_connecting_sockets;
c->prev_pending = 0;
if (context->head_connecting_sockets) {
context->head_connecting_sockets->prev_pending = c;
}
context->head_connecting_sockets = c;
us_socket_context_ref(ssl, context);
}
/* We always add in the top, so we don't modify any s.next */
void us_internal_socket_context_link_socket(struct us_socket_context_t *context, struct us_socket_t *s) {
s->context = context;
@@ -175,7 +137,6 @@ void us_internal_socket_context_link_socket(struct us_socket_context_t *context,
context->head_sockets->prev = s;
}
context->head_sockets = s;
us_socket_context_ref(0, context);
}
struct us_loop_t *us_socket_context_loop(int ssl, struct us_socket_context_t *context) {
@@ -270,7 +231,6 @@ struct us_socket_context_t *us_create_socket_context(int ssl, struct us_loop_t *
struct us_socket_context_t *context = us_calloc(1, sizeof(struct us_socket_context_t) + context_ext_size);
context->loop = loop;
context->is_low_prio = default_is_low_prio_handler;
context->ref_count = 1;
us_internal_loop_link(loop, context);
@@ -292,7 +252,6 @@ struct us_socket_context_t *us_create_bun_socket_context(int ssl, struct us_loop
struct us_socket_context_t *context = us_calloc(1, sizeof(struct us_socket_context_t) + context_ext_size);
context->loop = loop;
context->is_low_prio = default_is_low_prio_handler;
context->ref_count = 1;
us_internal_loop_link(loop, context);
@@ -312,8 +271,8 @@ struct us_bun_verify_error_t us_socket_verify_error(int ssl, struct us_socket_t
return (struct us_bun_verify_error_t) { .error = 0, .code = NULL, .reason = NULL };
}
void us_internal_socket_context_free(int ssl, struct us_socket_context_t *context) {
void us_socket_context_free(int ssl, struct us_socket_context_t *context) {
#ifndef LIBUS_NO_SSL
if (ssl) {
/* This function will call us again with SSL=false */
@@ -326,24 +285,7 @@ void us_internal_socket_context_free(int ssl, struct us_socket_context_t *contex
* This is the opposite order compared to when creating the context - SSL code is cleaning up before non-SSL */
us_internal_loop_unlink(context->loop, context);
/* Link this context to the close-list and let it be deleted after this iteration */
context->next = context->loop->data.closed_context_head;
context->loop->data.closed_context_head = context;
}
void us_socket_context_ref(int ssl, struct us_socket_context_t *context) {
context->ref_count++;
}
void us_socket_context_unref(int ssl, struct us_socket_context_t *context) {
uint32_t ref_count = context->ref_count;
context->ref_count--;
if (ref_count == 1) {
us_internal_socket_context_free(ssl, context);
}
}
void us_socket_context_free(int ssl, struct us_socket_context_t *context) {
us_socket_context_unref(ssl, context);
us_free(context);
}
struct us_listen_socket_t *us_socket_context_listen(int ssl, struct us_socket_context_t *context, const char *host, int port, int options, int socket_ext_size) {
@@ -514,14 +456,14 @@ void *us_socket_context_connect(int ssl, struct us_socket_context_t *context, co
}
struct us_connecting_socket_t *c = us_calloc(1, sizeof(struct us_connecting_socket_t) + socket_ext_size);
c->socket_ext_size = socket_ext_size;
c->socket_ext_size = socket_ext_size;
c->context = context;
c->options = options;
c->ssl = ssl > 0;
c->timeout = 255;
c->long_timeout = 255;
c->pending_resolve_callback = 1;
c->port = port;
us_internal_socket_context_link_connecting_socket(ssl, context, c);
#ifdef _WIN32
loop->uv_loop->active_handles++;
@@ -583,12 +525,15 @@ void us_internal_socket_after_resolve(struct us_connecting_socket_t *c) {
c->pending_resolve_callback = 0;
// if the socket was closed while we were resolving the address, free it
if (c->closed) {
us_connecting_socket_free(c->ssl, c);
us_connecting_socket_free(c);
return;
}
struct addrinfo_result *result = Bun__addrinfo_getRequestResult(c->addrinfo_req);
if (result->error) {
us_connecting_socket_close(c->ssl, c);
c->error = result->error;
c->context->on_connect_error(c, result->error);
Bun__addrinfo_freeRequest(c->addrinfo_req, 0);
us_connecting_socket_close(0, c);
return;
}
@@ -596,7 +541,10 @@ void us_internal_socket_after_resolve(struct us_connecting_socket_t *c) {
int opened = start_connections(c, CONCURRENT_CONNECTIONS);
if (opened == 0) {
us_connecting_socket_close(c->ssl, c);
c->error = ECONNREFUSED;
c->context->on_connect_error(c, ECONNREFUSED);
Bun__addrinfo_freeRequest(c->addrinfo_req, 1);
us_connecting_socket_close(0, c);
return;
}
}
@@ -664,7 +612,10 @@ void us_internal_socket_after_open(struct us_socket_t *s, int error) {
// we have run out of addresses to attempt, signal the connection error
// but only if there are no other sockets in the list
if (opened == 0 && c->connecting_head == NULL) {
us_connecting_socket_close(c->ssl, c);
c->error = ECONNREFUSED;
c->context->on_connect_error(c, error);
Bun__addrinfo_freeRequest(c->addrinfo_req, ECONNREFUSED);
us_connecting_socket_close(0, c);
}
}
} else {
@@ -693,7 +644,7 @@ void us_internal_socket_after_open(struct us_socket_t *s, int error) {
}
// now that the socket is open, we can release the associated us_connecting_socket_t if it exists
Bun__addrinfo_freeRequest(c->addrinfo_req, 0);
us_connecting_socket_free(c->ssl, c);
us_connecting_socket_free(c);
s->connect_state = NULL;
}
@@ -752,15 +703,13 @@ struct us_socket_t *us_socket_context_adopt_socket(int ssl, struct us_socket_con
#endif
/* Cannot adopt a closed socket */
if (us_socket_is_closed(ssl, s) || us_socket_is_shut_down(ssl, s)) {
if (us_socket_is_closed(ssl, s)) {
return s;
}
if (s->low_prio_state != 1) {
/* We need to be sure that we still holding a reference*/
us_socket_context_ref(ssl, context);
/* This properly updates the iterator if in on_timeout */
us_internal_socket_context_unlink_socket(ssl, s->context, s);
us_internal_socket_context_unlink_socket(s->context, s);
}
@@ -771,10 +720,7 @@ struct us_socket_t *us_socket_context_adopt_socket(int ssl, struct us_socket_con
new_s = (struct us_socket_t *) us_poll_resize(&s->p, s->context->loop, sizeof(struct us_socket_t) + ext_size);
if (c) {
c->connecting_head = new_s;
struct us_socket_context_t *old_context = s->context;
c->context = context;
us_internal_socket_context_link_connecting_socket(ssl, context, c);
us_internal_socket_context_unlink_connecting_socket(ssl, old_context, c);
}
}
new_s->timeout = 255;
@@ -788,7 +734,6 @@ struct us_socket_t *us_socket_context_adopt_socket(int ssl, struct us_socket_con
if (new_s->next) new_s->next->prev = new_s;
} else {
us_internal_socket_context_link_socket(context, new_s);
us_socket_context_unref(ssl, context);
}
return new_s;

View File

@@ -14,14 +14,8 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// clang-format off
#if (defined(LIBUS_USE_OPENSSL) || defined(LIBUS_USE_WOLFSSL))
#include "internal/internal.h"
#include "libusockets.h"
#include <string.h>
/* These are in sni_tree.cpp */
void *sni_new();
void sni_free(void *sni, void (*cb)(void *));
@@ -29,6 +23,10 @@ int sni_add(void *sni, const char *hostname, void *user);
void *sni_remove(void *sni, const char *hostname);
void *sni_find(void *sni, const char *hostname);
#include "internal/internal.h"
#include "libusockets.h"
#include <string.h>
/* This module contains the entire OpenSSL implementation
* of the SSL socket and socket context interfaces. */
#ifdef LIBUS_USE_OPENSSL
@@ -73,6 +71,10 @@ struct us_internal_ssl_socket_context_t {
// socket context
SSL_CTX *ssl_context;
int is_parent;
#if ALLOW_SERVER_RENEGOTIATION
unsigned int client_renegotiation_limit;
unsigned int client_renegotiation_window;
#endif
/* These decorate the base implementation */
struct us_internal_ssl_socket_t *(*on_open)(struct us_internal_ssl_socket_t *,
int is_client, char *ip,
@@ -84,10 +86,6 @@ struct us_internal_ssl_socket_context_t {
struct us_internal_ssl_socket_t *(*on_close)(
struct us_internal_ssl_socket_t *, int code, void *reason);
struct us_internal_ssl_socket_t *(*on_timeout)(
struct us_internal_ssl_socket_t *);
struct us_internal_ssl_socket_t *(*on_long_timeout)(struct us_internal_ssl_socket_t *);
/* Called for missing SNI hostnames, if not NULL */
void (*on_server_name)(struct us_internal_ssl_socket_context_t *,
const char *hostname);
@@ -110,10 +108,15 @@ enum {
struct us_internal_ssl_socket_t {
struct us_socket_t s;
SSL *ssl; // this _must_ be the first member after s
#if ALLOW_SERVER_RENEGOTIATION
unsigned int client_pending_renegotiations;
uint64_t last_ssl_renegotiation;
unsigned int is_client : 1;
#endif
unsigned int ssl_write_wants_read : 1; // we use this for now
unsigned int ssl_read_wants_write : 1;
unsigned int handshake_state : 2;
unsigned int fatal_error : 1;
unsigned int received_ssl_shutdown : 1;
};
int passphrase_cb(char *buf, int size, int rwflag, void *u) {
@@ -179,26 +182,6 @@ int BIO_s_custom_read(BIO *bio, char *dst, int length) {
return length;
}
struct loop_ssl_data * us_internal_set_loop_ssl_data(struct us_internal_ssl_socket_t *s) {
// note: this context can change when we adopt the socket!
struct us_internal_ssl_socket_context_t *context =
(struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s);
struct us_loop_t *loop = us_socket_context_loop(0, &context->sc);
struct loop_ssl_data *loop_ssl_data =
(struct loop_ssl_data *)loop->data.ssl_data;
// note: if we put data here we should never really clear it (not in write
// either, it still should be available for SSL_write to read from!)
loop_ssl_data->ssl_read_input_length = 0;
loop_ssl_data->ssl_read_input_offset = 0;
loop_ssl_data->ssl_socket = &s->s;
loop_ssl_data->msg_more = 0;
return loop_ssl_data;
}
struct us_internal_ssl_socket_t *ssl_on_open(struct us_internal_ssl_socket_t *s,
int is_client, char *ip,
int ip_length) {
@@ -206,14 +189,21 @@ struct us_internal_ssl_socket_t *ssl_on_open(struct us_internal_ssl_socket_t *s,
struct us_internal_ssl_socket_context_t *context =
(struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s);
struct loop_ssl_data *loop_ssl_data = us_internal_set_loop_ssl_data(s);
struct us_loop_t *loop = us_socket_context_loop(0, &context->sc);
struct loop_ssl_data *loop_ssl_data =
(struct loop_ssl_data *)loop->data.ssl_data;
s->ssl = SSL_new(context->ssl_context);
#if ALLOW_SERVER_RENEGOTIATION
s->client_pending_renegotiations = context->client_renegotiation_limit;
s->last_ssl_renegotiation = 0;
s->is_client = is_client ? 1 : 0;
#endif
s->ssl_write_wants_read = 0;
s->ssl_read_wants_write = 0;
s->fatal_error = 0;
s->handshake_state = HANDSHAKE_PENDING;
s->received_ssl_shutdown = 0;
SSL_set_bio(s->ssl, loop_ssl_data->shared_rbio, loop_ssl_data->shared_wbio);
// if we allow renegotiation, we need to set the mode here
@@ -223,18 +213,24 @@ struct us_internal_ssl_socket_t *ssl_on_open(struct us_internal_ssl_socket_t *s,
// this can be a DoS vector for servers, so we enable it using a limit
// we do not use ssl_renegotiate_freely, since ssl_renegotiate_explicit is
// more performant when using BoringSSL
#if ALLOW_SERVER_RENEGOTIATION
if (context->client_renegotiation_limit) {
SSL_set_renegotiate_mode(s->ssl, ssl_renegotiate_explicit);
} else {
SSL_set_renegotiate_mode(s->ssl, ssl_renegotiate_never);
}
#endif
BIO_up_ref(loop_ssl_data->shared_rbio);
BIO_up_ref(loop_ssl_data->shared_wbio);
if (is_client) {
#if ALLOW_SERVER_RENEGOTIATION == 0
SSL_set_renegotiate_mode(s->ssl, ssl_renegotiate_explicit);
#endif
SSL_set_connect_state(s->ssl);
} else {
SSL_set_accept_state(s->ssl);
// we do not allow renegotiation on the server side (should be the default for BoringSSL, but we set to make openssl compatible)
SSL_set_renegotiate_mode(s->ssl, ssl_renegotiate_never);
}
struct us_internal_ssl_socket_t *result =
@@ -250,64 +246,6 @@ struct us_internal_ssl_socket_t *ssl_on_open(struct us_internal_ssl_socket_t *s,
return result;
}
/// @brief Complete the shutdown or do a fast shutdown when needed, this should only be called before closing the socket
/// @param s
int us_internal_handle_shutdown(struct us_internal_ssl_socket_t *s, int force_fast_shutdown) {
// if we are already shutdown or in the middle of a handshake we dont need to do anything
// Scenarios:
// 1 - SSL is not initialized yet (null)
// 2 - socket is alread shutdown
// 3 - we already sent a shutdown
// 4 - we are in the middle of a handshake
// 5 - we received a fatal error
if(us_internal_ssl_socket_is_shut_down(s) || s->fatal_error || !SSL_is_init_finished(s->ssl)) return 1;
// we are closing the socket but did not sent a shutdown yet
int state = SSL_get_shutdown(s->ssl);
int sent_shutdown = state & SSL_SENT_SHUTDOWN;
int received_shutdown = state & SSL_RECEIVED_SHUTDOWN;
// if we are missing a shutdown call, we need to do a fast shutdown here
if(!sent_shutdown || !received_shutdown) {
// make sure that the ssl loop data is set
us_internal_set_loop_ssl_data(s);
// Zero means that we should wait for the peer to close the connection
// but we are already closing the connection so we do a fast shutdown here
int ret = SSL_shutdown(s->ssl);
if(ret == 0 && force_fast_shutdown) {
// do a fast shutdown (dont wait for peer)
ret = SSL_shutdown(s->ssl);
}
if(ret < 0) {
// we got some error here, but we dont care about it, we are closing the socket
int err = SSL_get_error(s->ssl, ret);
if (err == SSL_ERROR_SSL || err == SSL_ERROR_SYSCALL) {
// clear
ERR_clear_error();
s->fatal_error = 1;
// Fatal error occurred, we should close the socket imeadiatly
return 1;
}
if(err == SSL_ERROR_WANT_READ || err == SSL_ERROR_WANT_WRITE) {
// We are waiting to be readable or writable this will come in SSL_read to complete the shutdown
// if we are forcing a fast shutdown we should return 1 here to imeadiatly close the socket
// Scenarios:
// 1 - We called abort but the socket is not writable or reable anymore (force_fast_shutdown = 1)
// 2 - We called close but wanna to wait until close_notify is received (force_fast_shutdown = 0)
return force_fast_shutdown ? 1 : 0;
}
// If we error we probably do not even start the first handshake or have a critical error so just close the socket
// Scenarios:
// 1 - We abort the connection to fast and we did not even start the first handshake
// 2 - SSL is in a broken state
// 3 - SSL is not broken but is in a state that we cannot recover from
s->fatal_error = 1;
return 1;
}
return ret == 1;
}
return 1;
}
void us_internal_on_ssl_handshake(
struct us_internal_ssl_socket_context_t *context,
void (*on_handshake)(struct us_internal_ssl_socket_t *, int success,
@@ -318,17 +256,9 @@ void us_internal_on_ssl_handshake(
context->handshake_data = custom_data;
}
int us_internal_ssl_socket_is_closed(struct us_internal_ssl_socket_t *s) {
return us_socket_is_closed(0, &s->s);
}
struct us_internal_ssl_socket_t *
us_internal_ssl_socket_close(struct us_internal_ssl_socket_t *s, int code,
void *reason) {
// check if we are already closed
if (us_internal_ssl_socket_is_closed(s)) return s;
if (s->handshake_state != HANDSHAKE_COMPLETED) {
// if we have some pending handshake we cancel it and try to check the
// latest handshake error this way we will always call on_handshake with the
@@ -339,14 +269,8 @@ us_internal_ssl_socket_close(struct us_internal_ssl_socket_t *s, int code,
us_internal_trigger_handshake_callback(s, 0);
}
// if we are in the middle of a close_notify we need to finish it (code != 0 forces a fast shutdown)
int can_close = us_internal_handle_shutdown(s, code != 0);
// only close the socket if we are not in the middle of a handshake
if(can_close) {
return (struct us_internal_ssl_socket_t *)us_socket_close(0, (struct us_socket_t *)s, code, reason);
}
return s;
return (struct us_internal_ssl_socket_t *)us_socket_close(
0, (struct us_socket_t *)s, code, reason);
}
void us_internal_trigger_handshake_callback(struct us_internal_ssl_socket_t *s,
@@ -368,7 +292,26 @@ int us_internal_ssl_renegotiate(struct us_internal_ssl_socket_t *s) {
// if is a server and we have no pending renegotiation we can check
// the limits
s->handshake_state = HANDSHAKE_RENEGOTIATION_PENDING;
#if ALLOW_SERVER_RENEGOTIATION
if (!s->is_client && !SSL_renegotiate_pending(s->ssl)) {
uint64_t now = time(NULL);
struct us_internal_ssl_socket_context_t *context =
(struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s);
// if is not the first time we negotiate and we are outside the time
// window, reset the limits
if (s->last_ssl_renegotiation && (now - s->last_ssl_renegotiation) >=
context->client_renegotiation_window) {
// reset the limits
s->client_pending_renegotiations = context->client_renegotiation_limit;
}
// if we have no more renegotiations, we should close the connection
if (s->client_pending_renegotiations == 0) {
return 0;
}
s->last_ssl_renegotiation = now;
s->client_pending_renegotiations--;
}
#endif
if (!SSL_renegotiate(s->ssl)) {
// we failed to renegotiate
us_internal_trigger_handshake_callback(s, 0);
@@ -378,13 +321,24 @@ int us_internal_ssl_renegotiate(struct us_internal_ssl_socket_t *s) {
}
void us_internal_update_handshake(struct us_internal_ssl_socket_t *s) {
struct us_internal_ssl_socket_context_t *context =
(struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s);
// nothing todo here, renegotiation must be handled in SSL_read
if (s->handshake_state != HANDSHAKE_PENDING)
return;
if (us_internal_ssl_socket_is_closed(s) || us_internal_ssl_socket_is_shut_down(s) ||
(s->ssl && SSL_get_shutdown(s->ssl) & SSL_RECEIVED_SHUTDOWN)) {
struct us_loop_t *loop = us_socket_context_loop(0, &context->sc);
struct loop_ssl_data *loop_ssl_data =
(struct loop_ssl_data *)loop->data.ssl_data;
loop_ssl_data->ssl_read_input_length = 0;
loop_ssl_data->ssl_read_input_offset = 0;
loop_ssl_data->ssl_socket = &s->s;
loop_ssl_data->msg_more = 0;
if (us_socket_is_closed(0, &s->s) || us_internal_ssl_socket_is_shut_down(s) ||
SSL_get_shutdown(s->ssl) & SSL_RECEIVED_SHUTDOWN) {
us_internal_trigger_handshake_callback(s, 0);
return;
@@ -393,6 +347,7 @@ void us_internal_update_handshake(struct us_internal_ssl_socket_t *s) {
int result = SSL_do_handshake(s->ssl);
if (SSL_get_shutdown(s->ssl) & SSL_RECEIVED_SHUTDOWN) {
s->received_ssl_shutdown = 1;
us_internal_ssl_socket_close(s, 0, NULL);
return;
}
@@ -401,23 +356,30 @@ void us_internal_update_handshake(struct us_internal_ssl_socket_t *s) {
int err = SSL_get_error(s->ssl, result);
// as far as I know these are the only errors we want to handle
if (err != SSL_ERROR_WANT_READ && err != SSL_ERROR_WANT_WRITE) {
us_internal_trigger_handshake_callback(s, 1);
// clear per thread error queue if it may contain something
if (err == SSL_ERROR_SSL || err == SSL_ERROR_SYSCALL) {
ERR_clear_error();
s->fatal_error = 1;
}
us_internal_trigger_handshake_callback(s, 0);
return;
}
s->handshake_state = HANDSHAKE_PENDING;
s->ssl_write_wants_read = 1;
// Ensure that we'll cycle through internal openssl's state
if (!us_socket_is_closed(0, &s->s) &&
!us_internal_ssl_socket_is_shut_down(s)) {
us_socket_write(1, loop_ssl_data->ssl_socket, "\0", 0, 0);
}
return;
}
// success
us_internal_trigger_handshake_callback(s, 1);
s->ssl_write_wants_read = 1;
// Ensure that we'll cycle through internal openssl's state
if (!us_socket_is_closed(0, &s->s) &&
!us_internal_ssl_socket_is_shut_down(s)) {
us_socket_write(1, loop_ssl_data->ssl_socket, "\0", 0, 0);
}
}
struct us_internal_ssl_socket_t *
@@ -425,33 +387,16 @@ ssl_on_close(struct us_internal_ssl_socket_t *s, int code, void *reason) {
struct us_internal_ssl_socket_context_t *context =
(struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s);
us_internal_set_loop_ssl_data(s);
struct us_internal_ssl_socket_t * ret = context->on_close(s, code, reason);
SSL_free(s->ssl); // free SSL after on_close
s->ssl = NULL; // set to NULL
return ret;
}
SSL_free(s->ssl);
struct us_internal_ssl_socket_t * ssl_on_timeout(struct us_internal_ssl_socket_t *s) {
struct us_internal_ssl_socket_context_t *context =
(struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s);
us_internal_set_loop_ssl_data(s);
return context->on_timeout(s);
}
struct us_internal_ssl_socket_t * ssl_on_long_timeout(struct us_internal_ssl_socket_t *s) {
struct us_internal_ssl_socket_context_t *context =
(struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s);
us_internal_set_loop_ssl_data(s);
return context->on_long_timeout(s);
return context->on_close(s, code, reason);
}
struct us_internal_ssl_socket_t *
ssl_on_end(struct us_internal_ssl_socket_t *s) {
us_internal_set_loop_ssl_data(s);
// whatever state we are in, a TCP FIN is always an answered shutdown
/* Todo: this should report CLEANLY SHUTDOWN as reason */
return us_internal_ssl_socket_close(s, 0, NULL);
}
@@ -463,20 +408,43 @@ struct us_internal_ssl_socket_t *ssl_on_data(struct us_internal_ssl_socket_t *s,
struct us_internal_ssl_socket_context_t *context =
(struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s);
struct loop_ssl_data *loop_ssl_data = us_internal_set_loop_ssl_data(s);
struct us_loop_t *loop = us_socket_context_loop(0, &context->sc);
struct loop_ssl_data *loop_ssl_data =
(struct loop_ssl_data *)loop->data.ssl_data;
// note: if we put data here we should never really clear it (not in write
// either, it still should be available for SSL_write to read from!)
loop_ssl_data->ssl_read_input = data;
loop_ssl_data->ssl_read_input_length = length;
loop_ssl_data->ssl_read_input_offset = 0;
loop_ssl_data->ssl_socket = &s->s;
loop_ssl_data->msg_more = 0;
if (us_internal_ssl_socket_is_closed(s)) {
if (us_socket_is_closed(0, &s->s) || s->received_ssl_shutdown) {
return NULL;
}
if (us_internal_ssl_socket_is_shut_down(s)) {
us_internal_ssl_socket_close(s, 0, NULL);
return NULL;
int ret = 0;
if ((ret = SSL_shutdown(s->ssl)) == 1) {
// two phase shutdown is complete here
/* Todo: this should also report some kind of clean shutdown */
return us_internal_ssl_socket_close(s, 0, NULL);
} else if (ret < 0) {
int err = SSL_get_error(s->ssl, ret);
if (err == SSL_ERROR_SSL || err == SSL_ERROR_SYSCALL) {
// we need to clear the error queue in case these added to the thread
// local queue
ERR_clear_error();
}
}
// no further processing of data when in shutdown state
return s;
}
// bug checking: this loop needs a lot of attention and clean-ups and
@@ -484,12 +452,17 @@ struct us_internal_ssl_socket_t *ssl_on_data(struct us_internal_ssl_socket_t *s,
int read = 0;
restart:
// read until shutdown
while (1) {
while (!s->received_ssl_shutdown) {
int just_read = SSL_read(s->ssl,
loop_ssl_data->ssl_read_output +
LIBUS_RECV_BUFFER_PADDING + read,
LIBUS_RECV_BUFFER_LENGTH - read);
// we need to check if we received a shutdown here
if (SSL_get_shutdown(s->ssl) & SSL_RECEIVED_SHUTDOWN) {
s->received_ssl_shutdown = 1;
// we will only close after we handle the data and errors
}
if (just_read <= 0) {
int err = SSL_get_error(s->ssl, just_read);
// as far as I know these are the only errors we want to handle
@@ -504,9 +477,8 @@ restart:
// clean and close renegotiation failed
err = SSL_ERROR_SSL;
} else if (err == SSL_ERROR_ZERO_RETURN) {
// Remotely-Initiated Shutdown
// See: https://www.openssl.org/docs/manmaster/man3/SSL_shutdown.html
// zero return can be EOF/FIN, if we have data just signal on_data and
// close
if (read) {
context =
(struct us_internal_ssl_socket_context_t *)us_socket_context(
@@ -515,24 +487,21 @@ restart:
s = context->on_data(
s, loop_ssl_data->ssl_read_output + LIBUS_RECV_BUFFER_PADDING,
read);
if (!s || us_internal_ssl_socket_is_closed(s)) {
return NULL; // stop processing data
if (!s || us_socket_is_closed(0, &s->s)) {
return s;
}
}
// terminate connection here
us_internal_ssl_socket_close(s, 0, NULL);
return NULL; // stop processing data
return us_internal_ssl_socket_close(s, 0, NULL);
}
if (err == SSL_ERROR_SSL || err == SSL_ERROR_SYSCALL) {
// clear per thread error queue if it may contain something
ERR_clear_error();
s->fatal_error = 1;
}
// terminate connection here
us_internal_ssl_socket_close(s, 0, NULL);
return NULL; // stop processing data
return us_internal_ssl_socket_close(s, 0, NULL);
} else {
// emit the data we have and exit
@@ -557,8 +526,8 @@ restart:
s = context->on_data(
s, loop_ssl_data->ssl_read_output + LIBUS_RECV_BUFFER_PADDING,
read);
if (!s || us_internal_ssl_socket_is_closed(s)) {
return NULL; // stop processing data
if (!s || us_socket_is_closed(0, &s->s)) {
return s;
}
break;
@@ -580,19 +549,22 @@ restart:
// emit data and restart
s = context->on_data(
s, loop_ssl_data->ssl_read_output + LIBUS_RECV_BUFFER_PADDING, read);
if (!s || us_internal_ssl_socket_is_closed(s)) {
return NULL;
if (!s || us_socket_is_closed(0, &s->s)) {
return s;
}
read = 0;
goto restart;
}
}
// Trigger writable if we failed last SSL_write with SSL_ERROR_WANT_READ
// If we failed SSL_read because we need to write more data (SSL_ERROR_WANT_WRITE) we are not going to trigger on_writable, we will wait until the next on_data or on_writable event
// SSL_read will try to flush the write buffer and if fails with SSL_ERROR_WANT_WRITE means the socket is not in a writable state anymore and only makes sense to trigger on_writable if we can write more data
// Otherwise we possible would trigger on_writable -> on_data event in a recursive loop
if (s->ssl_write_wants_read && !s->ssl_read_wants_write) {
// we received the shutdown after reading so we close
if (s->received_ssl_shutdown) {
us_internal_ssl_socket_close(s, 0, NULL);
return NULL;
}
// trigger writable if we failed last write with want read
if (s->ssl_write_wants_read) {
s->ssl_write_wants_read = 0;
// make sure to update context before we call (context can change if the
@@ -603,8 +575,8 @@ restart:
s = (struct us_internal_ssl_socket_t *)context->sc.on_writable(
&s->s); // cast here!
// if we are closed here, then exit
if (!s || us_internal_ssl_socket_is_closed(s)) {
return NULL;
if (!s || us_socket_is_closed(0, &s->s)) {
return s;
}
}
@@ -613,7 +585,6 @@ restart:
struct us_internal_ssl_socket_t *
ssl_on_writable(struct us_internal_ssl_socket_t *s) {
us_internal_set_loop_ssl_data(s);
us_internal_update_handshake(s);
struct us_internal_ssl_socket_context_t *context =
@@ -635,8 +606,8 @@ ssl_on_writable(struct us_internal_ssl_socket_t *s) {
}
// Do not call on_writable if the socket is closed.
// on close means the socket data is no longer accessible
if (!s || us_internal_ssl_socket_is_closed(s) || us_internal_ssl_socket_is_shut_down(s)) {
return s;
if (!s || us_socket_is_closed(0, &s->s)) {
return 0;
}
if (s->handshake_state == HANDSHAKE_COMPLETED) {
@@ -1058,8 +1029,15 @@ long us_internal_verify_peer_certificate( // NOLINT(runtime/int)
}
return err;
}
struct us_bun_verify_error_t us_ssl_socket_verify_error_from_ssl(SSL *ssl) {
struct us_bun_verify_error_t
us_internal_verify_error(struct us_internal_ssl_socket_t *s) {
if (us_socket_is_closed(0, &s->s) || us_internal_ssl_socket_is_shut_down(s)) {
return (struct us_bun_verify_error_t){
.error = 0, .code = NULL, .reason = NULL};
}
SSL *ssl = s->ssl;
long x509_verify_error = // NOLINT(runtime/int)
us_internal_verify_peer_certificate(ssl,
X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT);
@@ -1075,17 +1053,6 @@ struct us_bun_verify_error_t us_ssl_socket_verify_error_from_ssl(SSL *ssl) {
.error = x509_verify_error, .code = code, .reason = reason};
}
struct us_bun_verify_error_t
us_internal_verify_error(struct us_internal_ssl_socket_t *s) {
if (!s->ssl || us_socket_is_closed(0, &s->s) || us_internal_ssl_socket_is_shut_down(s)) {
return (struct us_bun_verify_error_t){
.error = 0, .code = NULL, .reason = NULL};
}
return us_ssl_socket_verify_error_from_ssl(s->ssl);
}
int us_verify_callback(int preverify_ok, X509_STORE_CTX *ctx) {
// From https://www.openssl.org/docs/man1.1.1/man3/SSL_verify_cb:
//
@@ -1350,6 +1317,10 @@ void us_bun_internal_ssl_socket_context_add_server_name(
/* We do not want to hold any nullptr's in our SNI tree */
if (ssl_context) {
#if ALLOW_SERVER_RENEGOTIATION
context->client_renegotiation_limit = options.client_renegotiation_limit;
context->client_renegotiation_window = options.client_renegotiation_window;
#endif
if (sni_add(context->sni, hostname_pattern, ssl_context)) {
/* If we already had that name, ignore */
free_ssl_context(ssl_context);
@@ -1498,6 +1469,10 @@ us_internal_bun_create_ssl_socket_context(
context->on_handshake = NULL;
context->handshake_data = NULL;
#if ALLOW_SERVER_RENEGOTIATION
context->client_renegotiation_limit = options.client_renegotiation_limit;
context->client_renegotiation_window = options.client_renegotiation_window;
#endif
/* We, as parent context, may ignore data */
context->sc.is_low_prio = (int (*)(struct us_socket_t *))ssl_is_low_prio;
@@ -1528,7 +1503,7 @@ void us_internal_ssl_socket_context_free(
sni_free(context->sni, sni_hostname_destructor);
}
us_internal_socket_context_free(0, &context->sc);
us_socket_context_free(0, &context->sc);
}
struct us_listen_socket_t *us_internal_ssl_socket_context_listen(
@@ -1617,8 +1592,7 @@ void us_internal_ssl_socket_context_on_timeout(
struct us_internal_ssl_socket_t *s)) {
us_socket_context_on_timeout(0, (struct us_socket_context_t *)context,
(struct us_socket_t * (*)(struct us_socket_t *))
ssl_on_timeout);
context->on_timeout = on_timeout;
on_timeout);
}
void us_internal_ssl_socket_context_on_long_timeout(
@@ -1627,8 +1601,7 @@ void us_internal_ssl_socket_context_on_long_timeout(
struct us_internal_ssl_socket_t *s)) {
us_socket_context_on_long_timeout(
0, (struct us_socket_context_t *)context,
(struct us_socket_t * (*)(struct us_socket_t *)) ssl_on_long_timeout);
context->on_long_timeout = on_long_timeout;
(struct us_socket_t * (*)(struct us_socket_t *)) on_long_timeout);
}
/* We do not really listen to passed FIN-handler, we entirely override it with
@@ -1683,8 +1656,8 @@ int us_internal_ssl_socket_raw_write(struct us_internal_ssl_socket_t *s,
int us_internal_ssl_socket_write(struct us_internal_ssl_socket_t *s,
const char *data, int length, int msg_more) {
if (us_socket_is_closed(0, &s->s) || us_internal_ssl_socket_is_shut_down(s) || length == 0) {
if (us_socket_is_closed(0, &s->s) || us_internal_ssl_socket_is_shut_down(s)) {
return 0;
}
@@ -1724,7 +1697,6 @@ int us_internal_ssl_socket_write(struct us_internal_ssl_socket_t *s,
// these two errors may add to the error queue, which is per thread and
// must be cleared
ERR_clear_error();
s->fatal_error = 1;
// all errors here except for want write are critical and should not
// happen
@@ -1742,12 +1714,12 @@ void *us_internal_connecting_ssl_socket_ext(struct us_connecting_socket_t *s) {
}
int us_internal_ssl_socket_is_shut_down(struct us_internal_ssl_socket_t *s) {
return !s->ssl || us_socket_is_shut_down(0, &s->s) ||
SSL_get_shutdown(s->ssl) & SSL_SENT_SHUTDOWN || s->fatal_error;
return us_socket_is_shut_down(0, &s->s) ||
SSL_get_shutdown(s->ssl) & SSL_SENT_SHUTDOWN;
}
void us_internal_ssl_socket_shutdown(struct us_internal_ssl_socket_t *s) {
if (!us_internal_ssl_socket_is_closed(s) &&
if (!us_socket_is_closed(0, &s->s) &&
!us_internal_ssl_socket_is_shut_down(s)) {
struct us_internal_ssl_socket_context_t *context =
(struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s);
@@ -1768,8 +1740,11 @@ void us_internal_ssl_socket_shutdown(struct us_internal_ssl_socket_t *s) {
loop_ssl_data->ssl_socket = &s->s;
loop_ssl_data->msg_more = 0;
// sets SSL_SENT_SHUTDOWN and waits for the other side to do the same
// sets SSL_SENT_SHUTDOWN no matter what (not actually true if error!)
int ret = SSL_shutdown(s->ssl);
if (ret == 0) {
ret = SSL_shutdown(s->ssl);
}
if (SSL_in_init(s->ssl) || SSL_get_quiet_shutdown(s->ssl)) {
// when SSL_in_init or quiet shutdown in BoringSSL, we call shutdown
@@ -1783,7 +1758,6 @@ void us_internal_ssl_socket_shutdown(struct us_internal_ssl_socket_t *s) {
if (err == SSL_ERROR_SSL || err == SSL_ERROR_SYSCALL) {
// clear
ERR_clear_error();
s->fatal_error = 1;
}
// we get here if we are shutting down while still in init
@@ -1824,7 +1798,6 @@ ssl_wrapped_context_on_close(struct us_internal_ssl_socket_t *s, int code,
wrapped_context->old_events.on_close((struct us_socket_t *)s, code, reason);
}
us_socket_context_unref(0, wrapped_context->tcp_context);
return s;
}
@@ -1981,7 +1954,6 @@ struct us_internal_ssl_socket_t *us_internal_ssl_socket_wrap_with_tls(
}
struct us_socket_context_t *old_context = us_socket_context(0, s);
us_socket_context_ref(0,old_context);
struct us_socket_context_t *context = us_create_bun_socket_context(
1, old_context->loop, sizeof(struct us_wrapped_socket_context_t),
@@ -2004,7 +1976,6 @@ struct us_internal_ssl_socket_t *us_internal_ssl_socket_wrap_with_tls(
};
wrapped_context->old_events = old_events;
wrapped_context->events = events;
wrapped_context->tcp_context = old_context;
// no need to wrap open because socket is already open (only new context will
// be called so we can configure hostname and ssl stuff normally here before
@@ -2077,8 +2048,8 @@ us_socket_context_on_socket_connect_error(
socket->ssl = NULL;
socket->ssl_write_wants_read = 0;
socket->ssl_read_wants_write = 0;
socket->fatal_error = 0;
socket->handshake_state = HANDSHAKE_PENDING;
socket->received_ssl_shutdown = 0;
return socket;
}

View File

@@ -1,16 +1,13 @@
// MSVC doesn't support C11 stdatomic.h propertly yet.
// so we use C++ std::atomic instead.
#include "./root_certs.h"
#include "./internal/internal.h"
#include <atomic>
#include <openssl/pem.h>
#include "./root_certs.h"
#include <openssl/x509.h>
#include <string.h>
static const int root_certs_size = sizeof(root_certs) / sizeof(root_certs[0]);
static X509 *root_cert_instances[sizeof(root_certs) / sizeof(root_certs[0])] = {
NULL};
static X509 *root_extra_cert_instances = {NULL};
#include <openssl/pem.h>
#include <atomic>
static const int root_certs_size = sizeof(root_certs) / sizeof(root_certs[0]);
static X509* root_cert_instances[sizeof(root_certs) / sizeof(root_certs[0])] = {NULL};
static std::atomic_flag root_cert_instances_lock = ATOMIC_FLAG_INIT;
static std::atomic_bool root_cert_instances_initialized = 0;
@@ -19,16 +16,15 @@ static std::atomic_bool root_cert_instances_initialized = 0;
// for the OpenSSL CLI, but works poorly for this case because it involves
// synchronous interaction with the controlling terminal, something we never
// want, and use this function to avoid it.
int us_no_password_callback(char *buf, int size, int rwflag, void *u) {
int us_no_password_callback(char* buf, int size, int rwflag, void* u) {
return 0;
}
static X509 *
us_ssl_ctx_get_X509_without_callback_from(struct us_cert_string_t content) {
static X509 * us_ssl_ctx_get_X509_without_callback_from(struct us_cert_string_t content) {
X509 *x = NULL;
BIO *in;
ERR_clear_error(); // clear error stack for SSL_CTX_use_certificate()
ERR_clear_error(); // clear error stack for SSL_CTX_use_certificate()
in = BIO_new_mem_buf(content.str, content.len);
if (in == NULL) {
@@ -41,37 +37,9 @@ us_ssl_ctx_get_X509_without_callback_from(struct us_cert_string_t content) {
OPENSSL_PUT_ERROR(SSL, ERR_R_PEM_LIB);
goto end;
}
return x;
end:
X509_free(x);
BIO_free(in);
return NULL;
}
static X509 *
us_ssl_ctx_get_X509_without_callback_from_file(const char *filename) {
X509 *x = NULL;
BIO *in;
ERR_clear_error(); // clear error stack for SSL_CTX_use_certificate()
in = BIO_new(BIO_s_file());
if (in == NULL) {
OPENSSL_PUT_ERROR(SSL, ERR_R_BUF_LIB);
goto end;
}
if (BIO_read_filename(in, filename) <= 0) {
OPENSSL_PUT_ERROR(SSL, ERR_R_SYS_LIB);
goto end;
}
x = PEM_read_bio_X509(in, NULL, us_no_password_callback, NULL);
if (x == NULL) {
OPENSSL_PUT_ERROR(SSL, ERR_R_PEM_LIB);
goto end;
}
return x;
end:
X509_free(x);
BIO_free(in);
@@ -79,65 +47,44 @@ end:
}
static void us_internal_init_root_certs() {
if (std::atomic_load(&root_cert_instances_initialized) == 1)
return;
if(std::atomic_load(&root_cert_instances_initialized) == 1) return;
while (atomic_flag_test_and_set_explicit(&root_cert_instances_lock,
std::memory_order_acquire))
;
while(atomic_flag_test_and_set_explicit(&root_cert_instances_lock, std::memory_order_acquire));
if (!atomic_exchange(&root_cert_instances_initialized, 1)) {
if(!atomic_exchange(&root_cert_instances_initialized, 1)) {
for (size_t i = 0; i < root_certs_size; i++) {
root_cert_instances[i] = us_ssl_ctx_get_X509_without_callback_from(root_certs[i]);
}
}
atomic_flag_clear_explicit(&root_cert_instances_lock, std::memory_order_release);
}
extern "C" int us_internal_raw_root_certs(struct us_cert_string_t** out) {
*out = root_certs;
return root_certs_size;
}
extern "C" X509_STORE* us_get_default_ca_store() {
X509_STORE *store = X509_STORE_new();
if (store == NULL) {
return NULL;
}
if (!X509_STORE_set_default_paths(store)) {
X509_STORE_free(store);
return NULL;
}
us_internal_init_root_certs();
// load all root_cert_instances on the default ca store
for (size_t i = 0; i < root_certs_size; i++) {
root_cert_instances[i] =
us_ssl_ctx_get_X509_without_callback_from(root_certs[i]);
X509* cert = root_cert_instances[i];
if(cert == NULL) continue;
X509_up_ref(cert);
X509_STORE_add_cert(store, cert);
}
// get extra cert option from environment variable
const char *extra_cert = getenv("NODE_EXTRA_CA_CERTS");
if (extra_cert) {
size_t length = strlen(extra_cert);
if (length > 0) {
root_extra_cert_instances =
us_ssl_ctx_get_X509_without_callback_from_file(extra_cert);
}
}
}
atomic_flag_clear_explicit(&root_cert_instances_lock,
std::memory_order_release);
}
extern "C" int us_internal_raw_root_certs(struct us_cert_string_t **out) {
*out = root_certs;
return root_certs_size;
}
extern "C" X509_STORE *us_get_default_ca_store() {
X509_STORE *store = X509_STORE_new();
if (store == NULL) {
return NULL;
}
if (!X509_STORE_set_default_paths(store)) {
X509_STORE_free(store);
return NULL;
}
us_internal_init_root_certs();
// load all root_cert_instances on the default ca store
for (size_t i = 0; i < root_certs_size; i++) {
X509 *cert = root_cert_instances[i];
if (cert == NULL)
continue;
X509_up_ref(cert);
X509_STORE_add_cert(store, cert);
}
if (root_extra_cert_instances) {
X509_up_ref(root_extra_cert_instances);
X509_STORE_add_cert(store, root_extra_cert_instances);
}
return store;
return store;
}

View File

@@ -3598,20 +3598,4 @@ static struct us_cert_string_t root_certs[] = {
"4Sw5/7W0cwDk90imc6y/st53BIe0o82bNSQ3+pCTE4FCxpgmdTdmQRCsu/WU48IxK63nI1bM\n"
"NSWSs1A=\n"
"-----END CERTIFICATE-----",.len=2033},
/* FIRMAPROFESIONAL CA ROOT-A WEB */
{.str="-----BEGIN CERTIFICATE-----\n"
"MIICejCCAgCgAwIBAgIQMZch7a+JQn81QYehZ1ZMbTAKBggqhkjOPQQDAzBuMQswCQYDVQQG\n"
"EwJFUzEcMBoGA1UECgwTRmlybWFwcm9mZXNpb25hbCBTQTEYMBYGA1UEYQwPVkFURVMtQTYy\n"
"NjM0MDY4MScwJQYDVQQDDB5GSVJNQVBST0ZFU0lPTkFMIENBIFJPT1QtQSBXRUIwHhcNMjIw\n"
"NDA2MDkwMTM2WhcNNDcwMzMxMDkwMTM2WjBuMQswCQYDVQQGEwJFUzEcMBoGA1UECgwTRmly\n"
"bWFwcm9mZXNpb25hbCBTQTEYMBYGA1UEYQwPVkFURVMtQTYyNjM0MDY4MScwJQYDVQQDDB5G\n"
"SVJNQVBST0ZFU0lPTkFMIENBIFJPT1QtQSBXRUIwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAARH\n"
"U+osEaR3xyrq89Zfe9MEkVz6iMYiuYMQYneEMy3pA4jU4DP37XcsSmDq5G+tbbT4TIqk5B/K\n"
"6k84Si6CcyvHZpsKjECcfIr28jlgst7L7Ljkb+qbXbdTkBgyVcUgt5SjYzBhMA8GA1UdEwEB\n"
"/wQFMAMBAf8wHwYDVR0jBBgwFoAUk+FDY1w8ndYn81LsF7Kpryz3dvgwHQYDVR0OBBYEFJPh\n"
"Q2NcPJ3WJ/NS7Beyqa8s93b4MA4GA1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNoADBlAjAd\n"
"fKR7w4l1M+E7qUW/Runpod3JIha3RxEL2Jq68cgLcFBTApFwhVmpHqTm6iMxoAACMQD94viz\n"
"rxa5HnPEluPBMBnYfubDl94cT7iJLzPrSA8Z94dGXSaQpYXFuXqUPoeovQA=\n"
"-----END CERTIFICATE-----",.len=917},
};

View File

@@ -14,7 +14,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// clang-format off
#pragma once
#ifndef INTERNAL_H
#define INTERNAL_H
@@ -101,9 +100,6 @@ struct addrinfo_result {
int error;
};
#define us_internal_ssl_socket_context_r struct us_internal_ssl_socket_context_t *nonnull_arg
#define us_internal_ssl_socket_r struct us_internal_ssl_socket_t *nonnull_arg
extern int Bun__addrinfo_get(struct us_loop_t* loop, const char* host, struct addrinfo_request** ptr);
extern int Bun__addrinfo_set(struct addrinfo_request* ptr, struct us_connecting_socket_t* socket);
extern void Bun__addrinfo_freeRequest(struct addrinfo_request* addrinfo_req, int error);
@@ -113,19 +109,19 @@ extern struct addrinfo_result *Bun__addrinfo_getRequestResult(struct addrinfo_re
/* Loop related */
void us_internal_dispatch_ready_poll(struct us_poll_t *p, int error,
int events);
void us_internal_timer_sweep(us_loop_r loop);
void us_internal_free_closed_sockets(us_loop_r loop);
void us_internal_timer_sweep(struct us_loop_t *loop);
void us_internal_free_closed_sockets(struct us_loop_t *loop);
void us_internal_loop_link(struct us_loop_t *loop,
struct us_socket_context_t *context);
void us_internal_loop_unlink(struct us_loop_t *loop,
struct us_socket_context_t *context);
void us_internal_loop_data_init(struct us_loop_t *loop,
void (*wakeup_cb)(us_loop_r loop),
void (*pre_cb)(us_loop_r loop),
void (*post_cb)(us_loop_r loop));
void us_internal_loop_data_free(us_loop_r loop);
void us_internal_loop_pre(us_loop_r loop);
void us_internal_loop_post(us_loop_r loop);
void (*wakeup_cb)(struct us_loop_t *loop),
void (*pre_cb)(struct us_loop_t *loop),
void (*post_cb)(struct us_loop_t *loop));
void us_internal_loop_data_free(struct us_loop_t *loop);
void us_internal_loop_pre(struct us_loop_t *loop);
void us_internal_loop_post(struct us_loop_t *loop);
/* Asyncs (old) */
struct us_internal_async *us_internal_create_async(struct us_loop_t *loop,
@@ -142,22 +138,18 @@ int us_internal_poll_type(struct us_poll_t *p);
void us_internal_poll_set_type(struct us_poll_t *p, int poll_type);
/* SSL loop data */
void us_internal_init_loop_ssl_data(us_loop_r loop);
void us_internal_free_loop_ssl_data(us_loop_r loop);
void us_internal_init_loop_ssl_data(struct us_loop_t *loop);
void us_internal_free_loop_ssl_data(struct us_loop_t *loop);
/* Socket context related */
void us_internal_socket_context_link_socket(us_socket_context_r context,
us_socket_r s);
void us_internal_socket_context_unlink_socket(int ssl,
us_socket_context_r context, us_socket_r s);
void us_internal_socket_context_link_socket(struct us_socket_context_t *context,
struct us_socket_t *s);
void us_internal_socket_context_unlink_socket(
struct us_socket_context_t *context, struct us_socket_t *s);
void us_internal_socket_after_resolve(struct us_connecting_socket_t *s);
void us_internal_socket_after_open(us_socket_r s, int error);
struct us_internal_ssl_socket_t *
us_internal_ssl_socket_close(us_internal_ssl_socket_r s, int code,
void *reason);
int us_internal_handle_dns_results(us_loop_r loop);
void us_internal_socket_after_open(struct us_socket_t *s, int error);
int us_internal_handle_dns_results(struct us_loop_t *loop);
/* Sockets are polls */
struct us_socket_t {
@@ -176,7 +168,6 @@ struct us_socket_t {
struct us_connecting_socket_t {
alignas(LIBUS_EXT_ALIGNMENT) struct addrinfo_request *addrinfo_req;
struct us_socket_context_t *context;
// this is used to track all dns resolutions in this connection
struct us_connecting_socket_t *next;
struct us_socket_t *connecting_head;
int options;
@@ -187,13 +178,9 @@ struct us_connecting_socket_t {
uint16_t port;
int error;
struct addrinfo *addrinfo_head;
// this is used to track pending connecting sockets in the context
struct us_connecting_socket_t* next_pending;
struct us_connecting_socket_t* prev_pending;
};
struct us_wrapped_socket_context_t {
struct us_socket_context_t* tcp_context;
struct us_socket_events_t events;
struct us_socket_events_t old_events;
};
@@ -256,19 +243,17 @@ struct us_listen_socket_t {
/* Listen sockets are keps in their own list */
void us_internal_socket_context_link_listen_socket(
us_socket_context_r context, struct us_listen_socket_t *s);
void us_internal_socket_context_unlink_listen_socket(int ssl,
us_socket_context_r context, struct us_listen_socket_t *s);
struct us_socket_context_t *context, struct us_listen_socket_t *s);
void us_internal_socket_context_unlink_listen_socket(
struct us_socket_context_t *context, struct us_listen_socket_t *s);
struct us_socket_context_t {
alignas(LIBUS_EXT_ALIGNMENT) struct us_loop_t *loop;
uint32_t global_tick;
uint32_t ref_count;
unsigned char timestamp;
unsigned char long_timestamp;
struct us_socket_t *head_sockets;
struct us_listen_socket_t *head_listen_sockets;
struct us_connecting_socket_t *head_connecting_sockets;
struct us_socket_t *iterator;
struct us_socket_context_t *prev, *next;
@@ -295,35 +280,34 @@ struct us_internal_ssl_socket_t;
typedef void (*us_internal_on_handshake_t)(
struct us_internal_ssl_socket_t *, int success,
struct us_bun_verify_error_t verify_error, void *custom_data);
void us_internal_socket_context_free(int ssl, struct us_socket_context_t *context);
/* SNI functions */
void us_internal_ssl_socket_context_add_server_name(
us_internal_ssl_socket_context_r context,
struct us_internal_ssl_socket_context_t *context,
const char *hostname_pattern, struct us_socket_context_options_t options,
void *user);
void us_bun_internal_ssl_socket_context_add_server_name(
us_internal_ssl_socket_context_r context,
struct us_internal_ssl_socket_context_t *context,
const char *hostname_pattern,
struct us_bun_socket_context_options_t options, void *user);
void us_internal_ssl_socket_context_remove_server_name(
us_internal_ssl_socket_context_r context,
struct us_internal_ssl_socket_context_t *context,
const char *hostname_pattern);
void us_internal_ssl_socket_context_on_server_name(
us_internal_ssl_socket_context_r context,
struct us_internal_ssl_socket_context_t *context,
void (*cb)(struct us_internal_ssl_socket_context_t *, const char *));
void *
us_internal_ssl_socket_get_sni_userdata(us_internal_ssl_socket_r s);
us_internal_ssl_socket_get_sni_userdata(struct us_internal_ssl_socket_t *s);
void *us_internal_ssl_socket_context_find_server_name_userdata(
us_internal_ssl_socket_context_r context,
struct us_internal_ssl_socket_context_t *context,
const char *hostname_pattern);
void *
us_internal_ssl_socket_get_native_handle(us_internal_ssl_socket_r s);
us_internal_ssl_socket_get_native_handle(struct us_internal_ssl_socket_t *s);
void *us_internal_ssl_socket_context_get_native_handle(
us_internal_ssl_socket_context_r context);
struct us_internal_ssl_socket_context_t *context);
struct us_bun_verify_error_t
us_internal_verify_error(us_internal_ssl_socket_r s);
us_internal_verify_error(struct us_internal_ssl_socket_t *s);
struct us_internal_ssl_socket_context_t *us_internal_create_ssl_socket_context(
struct us_loop_t *loop, int context_ext_size,
struct us_socket_context_options_t options);
@@ -333,115 +317,111 @@ us_internal_bun_create_ssl_socket_context(
struct us_bun_socket_context_options_t options);
void us_internal_ssl_socket_context_free(
us_internal_ssl_socket_context_r context);
struct us_internal_ssl_socket_context_t *context);
void us_internal_ssl_socket_context_on_open(
us_internal_ssl_socket_context_r context,
struct us_internal_ssl_socket_context_t *context,
struct us_internal_ssl_socket_t *(*on_open)(
us_internal_ssl_socket_r s, int is_client, char *ip,
struct us_internal_ssl_socket_t *s, int is_client, char *ip,
int ip_length));
void us_internal_ssl_socket_context_on_close(
us_internal_ssl_socket_context_r context,
struct us_internal_ssl_socket_context_t *context,
struct us_internal_ssl_socket_t *(*on_close)(
us_internal_ssl_socket_r s, int code, void *reason));
struct us_internal_ssl_socket_t *s, int code, void *reason));
void us_internal_ssl_socket_context_on_data(
us_internal_ssl_socket_context_r context,
struct us_internal_ssl_socket_context_t *context,
struct us_internal_ssl_socket_t *(*on_data)(
us_internal_ssl_socket_r s, char *data, int length));
struct us_internal_ssl_socket_t *s, char *data, int length));
void us_internal_update_handshake(us_internal_ssl_socket_r s);
int us_internal_renegotiate(us_internal_ssl_socket_r s);
void us_internal_trigger_handshake_callback(us_internal_ssl_socket_r s,
void us_internal_update_handshake(struct us_internal_ssl_socket_t *s);
int us_internal_renegotiate(struct us_internal_ssl_socket_t *s);
void us_internal_trigger_handshake_callback(struct us_internal_ssl_socket_t *s,
int success);
void us_internal_on_ssl_handshake(
us_internal_ssl_socket_context_r context,
struct us_internal_ssl_socket_context_t *context,
us_internal_on_handshake_t onhandshake, void *custom_data);
void us_internal_ssl_socket_context_on_writable(
us_internal_ssl_socket_context_r context,
struct us_internal_ssl_socket_context_t *context,
struct us_internal_ssl_socket_t *(*on_writable)(
us_internal_ssl_socket_r s));
struct us_internal_ssl_socket_t *s));
void us_internal_ssl_socket_context_on_timeout(
us_internal_ssl_socket_context_r context,
struct us_internal_ssl_socket_context_t *context,
struct us_internal_ssl_socket_t *(*on_timeout)(
us_internal_ssl_socket_r s));
struct us_internal_ssl_socket_t *s));
void us_internal_ssl_socket_context_on_long_timeout(
us_internal_ssl_socket_context_r context,
struct us_internal_ssl_socket_context_t *context,
struct us_internal_ssl_socket_t *(*on_timeout)(
us_internal_ssl_socket_r s));
struct us_internal_ssl_socket_t *s));
void us_internal_ssl_socket_context_on_end(
us_internal_ssl_socket_context_r context,
struct us_internal_ssl_socket_context_t *context,
struct us_internal_ssl_socket_t *(*on_end)(
us_internal_ssl_socket_r s));
struct us_internal_ssl_socket_t *s));
void us_internal_ssl_socket_context_on_connect_error(
us_internal_ssl_socket_context_r context,
struct us_internal_ssl_socket_context_t *context,
struct us_internal_ssl_socket_t *(*on_connect_error)(
us_internal_ssl_socket_r s, int code));
struct us_internal_ssl_socket_t *s, int code));
void us_internal_ssl_socket_context_on_socket_connect_error(
us_internal_ssl_socket_context_r context,
struct us_internal_ssl_socket_context_t *context,
struct us_internal_ssl_socket_t *(*on_socket_connect_error)(
us_internal_ssl_socket_r s, int code));
struct us_internal_ssl_socket_t *s, int code));
struct us_listen_socket_t *us_internal_ssl_socket_context_listen(
us_internal_ssl_socket_context_r context, const char *host,
struct us_internal_ssl_socket_context_t *context, const char *host,
int port, int options, int socket_ext_size);
struct us_listen_socket_t *us_internal_ssl_socket_context_listen_unix(
us_internal_ssl_socket_context_r context, const char *path,
struct us_internal_ssl_socket_context_t *context, const char *path,
size_t pathlen, int options, int socket_ext_size);
struct us_connecting_socket_t *us_internal_ssl_socket_context_connect(
us_internal_ssl_socket_context_r context, const char *host,
struct us_internal_ssl_socket_context_t *context, const char *host,
int port, int options, int socket_ext_size, int* is_resolved);
struct us_internal_ssl_socket_t *us_internal_ssl_socket_context_connect_unix(
us_internal_ssl_socket_context_r context, const char *server_path,
struct us_internal_ssl_socket_context_t *context, const char *server_path,
size_t pathlen, int options, int socket_ext_size);
int us_internal_ssl_socket_write(us_internal_ssl_socket_r s,
int us_internal_ssl_socket_write(struct us_internal_ssl_socket_t *s,
const char *data, int length, int msg_more);
int us_internal_ssl_socket_raw_write(us_internal_ssl_socket_r s,
int us_internal_ssl_socket_raw_write(struct us_internal_ssl_socket_t *s,
const char *data, int length,
int msg_more);
void us_internal_ssl_socket_timeout(us_internal_ssl_socket_r s,
void us_internal_ssl_socket_timeout(struct us_internal_ssl_socket_t *s,
unsigned int seconds);
void *
us_internal_ssl_socket_context_ext(struct us_internal_ssl_socket_context_t *s);
struct us_internal_ssl_socket_context_t *
us_internal_ssl_socket_get_context(us_internal_ssl_socket_r s);
void *us_internal_ssl_socket_ext(us_internal_ssl_socket_r s);
us_internal_ssl_socket_get_context(struct us_internal_ssl_socket_t *s);
void *us_internal_ssl_socket_ext(struct us_internal_ssl_socket_t *s);
void *us_internal_connecting_ssl_socket_ext(struct us_connecting_socket_t *c);
int us_internal_ssl_socket_is_shut_down(us_internal_ssl_socket_r s);
int us_internal_ssl_socket_is_closed(us_internal_ssl_socket_r s);
void us_internal_ssl_socket_shutdown(us_internal_ssl_socket_r s);
int us_internal_ssl_socket_is_shut_down(struct us_internal_ssl_socket_t *s);
void us_internal_ssl_socket_shutdown(struct us_internal_ssl_socket_t *s);
struct us_internal_ssl_socket_t *us_internal_ssl_socket_context_adopt_socket(
us_internal_ssl_socket_context_r context,
us_internal_ssl_socket_r s, int ext_size);
struct us_internal_ssl_socket_context_t *context,
struct us_internal_ssl_socket_t *s, int ext_size);
struct us_internal_ssl_socket_t *us_internal_ssl_socket_wrap_with_tls(
us_socket_r s, struct us_bun_socket_context_options_t options,
struct us_socket_t *s, struct us_bun_socket_context_options_t options,
struct us_socket_events_t events, int socket_ext_size);
struct us_internal_ssl_socket_context_t *
us_internal_create_child_ssl_socket_context(
us_internal_ssl_socket_context_r context, int context_ext_size);
struct us_internal_ssl_socket_context_t *context, int context_ext_size);
struct us_loop_t *us_internal_ssl_socket_context_loop(
us_internal_ssl_socket_context_r context);
struct us_internal_ssl_socket_context_t *context);
struct us_internal_ssl_socket_t *
us_internal_ssl_socket_open(us_internal_ssl_socket_r s, int is_client,
us_internal_ssl_socket_open(struct us_internal_ssl_socket_t *s, int is_client,
char *ip, int ip_length);
int us_raw_root_certs(struct us_cert_string_t **out);
void us_internal_socket_context_unlink_connecting_socket(int ssl, struct us_socket_context_t *context, struct us_connecting_socket_t *c);
void us_internal_socket_context_link_connecting_socket(int ssl, struct us_socket_context_t *context, struct us_connecting_socket_t *c);
#endif
#endif // INTERNAL_H

View File

@@ -27,7 +27,6 @@ struct us_internal_loop_data_t {
int last_write_failed;
struct us_socket_context_t *head;
struct us_socket_context_t *iterator;
struct us_socket_context_t *closed_context_head;
char *recv_buf;
char *send_buf;
void *ssl_data;

View File

@@ -17,7 +17,6 @@
#ifndef BSD_H
#define BSD_H
#pragma once
// top-most wrapper of bsd-like syscalls
@@ -26,7 +25,7 @@
#include "libusockets.h"
#ifdef _WIN32
#ifdef _WIN32
#ifndef NOMINMAX
#define NOMINMAX
#endif
@@ -35,7 +34,7 @@
#pragma comment(lib, "ws2_32.lib")
#define SETSOCKOPT_PTR_TYPE const char *
#define LIBUS_SOCKET_ERROR INVALID_SOCKET
#else /* POSIX */
#else
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
@@ -65,76 +64,14 @@ struct bsd_addr_t {
#endif
#ifdef __APPLE__
/*
* Extended version for sendmsg_x() and recvmsg_x() calls
*/
struct mmsghdr {
// a.k.a msghdr_x
struct mmsghdr {
struct msghdr msg_hdr;
size_t msg_len; /* byte length of buffer in msg_iov */
};
/*
* recvmsg_x() is a system call similar to recvmsg(2) to receive
* several datagrams at once in the array of message headers "msgp".
*
* recvmsg_x() can be used only with protocols handlers that have been specially
* modified to support sending and receiving several datagrams at once.
*
* The size of the array "msgp" is given by the argument "cnt".
*
* The "flags" arguments supports only the value MSG_DONTWAIT.
*
* Each member of "msgp" array is of type "struct msghdr_x".
*
* The "msg_iov" and "msg_iovlen" are input parameters that describe where to
* store a datagram in a scatter gather locations of buffers -- see recvmsg(2).
* On output the field "msg_datalen" gives the length of the received datagram.
*
* The field "msg_flags" must be set to zero on input. On output, "msg_flags"
* may have MSG_TRUNC set to indicate the trailing portion of the datagram was
* discarded because the datagram was larger than the buffer supplied.
* recvmsg_x() returns as soon as a datagram is truncated.
*
* recvmsg_x() may return with less than "cnt" datagrams received based on
* the low water mark and the amount of data pending in the socket buffer.
*
* recvmsg_x() returns the number of datagrams that have been received,
* or -1 if an error occurred.
*
* NOTE: This a private system call, the API is subject to change.
*/
ssize_t recvmsg_x(int s, const struct mmsghdr *msgp, u_int cnt, int flags);
/*
* sendmsg_x() is a system call similar to send(2) to send
* several datagrams at once in the array of message headers "msgp".
*
* sendmsg_x() can be used only with protocols handlers that have been specially
* modified to support sending and receiving several datagrams at once.
*
* The size of the array "msgp" is given by the argument "cnt".
*
* The "flags" arguments supports only the value MSG_DONTWAIT.
*
* Each member of "msgp" array is of type "struct msghdr_x".
*
* The "msg_iov" and "msg_iovlen" are input parameters that specify the
* data to be sent in a scatter gather locations of buffers -- see sendmsg(2).
*
* sendmsg_x() fails with EMSGSIZE if the sum of the length of the datagrams
* is greater than the high water mark.
*
* Address and ancillary data are not supported so the following fields
* must be set to zero on input:
* "msg_name", "msg_namelen", "msg_control" and "msg_controllen".
*
* The field "msg_flags" and "msg_datalen" must be set to zero on input.
*
* sendmsg_x() returns the number of datagrams that have been sent,
* or -1 if an error occurred.
*
* NOTE: This a private system call, the API is subject to change.
*/
ssize_t sendmsg_x(int s, const struct mmsghdr *msgp, u_int cnt, int flags);
ssize_t sendmsg_x(int s, struct mmsghdr *msgp, u_int cnt, int flags);
ssize_t recvmsg_x(int s, struct mmsghdr *msgp, u_int cnt, int flags);
#endif
struct udp_recvbuf {
@@ -158,9 +95,8 @@ struct udp_sendbuf {
void **addresses;
int num;
#else
unsigned int has_empty : 1;
unsigned int has_addresses : 1;
unsigned int num;
int num;
char has_empty;
struct mmsghdr msgvec[];
#endif
};

View File

@@ -15,7 +15,7 @@
* limitations under the License.
*/
// clang-format off
#pragma once
#ifndef us_calloc
#define us_calloc calloc
#endif
@@ -35,25 +35,6 @@
#ifndef LIBUSOCKETS_H
#define LIBUSOCKETS_H
#ifdef BUN_DEBUG
#define nonnull_arg _Nonnull
#else
#define nonnull_arg
#endif
#ifdef BUN_DEBUG
#define nonnull_fn_decl
#else
#ifndef nonnull_fn_decl
#define nonnull_fn_decl __attribute__((nonnull))
#endif
#endif
#define us_loop_r struct us_loop_t *nonnull_arg
#define us_socket_r struct us_socket_t *nonnull_arg
#define us_poll_r struct us_poll_t *nonnull_arg
#define us_socket_context_r struct us_socket_context_t *nonnull_arg
/* 512kb shared receive buffer */
#define LIBUS_RECV_BUFFER_LENGTH 524288
@@ -68,7 +49,6 @@
#define LIBUS_EXT_ALIGNMENT 16
#define ALLOW_SERVER_RENEGOTIATION 0
#define LIBUS_SOCKET_CLOSE_CODE_CLEAN_SHUTDOWN 0
#define LIBUS_SOCKET_CLOSE_CODE_CONNECTION_RESET 1
/* Define what a socket descriptor is based on platform */
@@ -143,11 +123,11 @@ struct us_udp_packet_buffer_t *us_create_udp_packet_buffer();
/* Creates a (heavy-weight) UDP socket with a user space ring buffer. Again, this one is heavy weight and
* shoud be reused. One entire QUIC server can be implemented using only one single UDP socket so weight
* is not a concern as is the case for TCP sockets which are 1-to-1 with TCP connections. */
//struct us_udp_socket_t *us_create_udp_socket(us_loop_r loop, void (*read_cb)(struct us_udp_socket_t *), unsigned short port);
//struct us_udp_socket_t *us_create_udp_socket(struct us_loop_t *loop, void (*read_cb)(struct us_udp_socket_t *), unsigned short port);
//struct us_udp_socket_t *us_create_udp_socket(us_loop_r loop, void (*data_cb)(struct us_udp_socket_t *, struct us_udp_packet_buffer_t *, int), void (*drain_cb)(struct us_udp_socket_t *), char *host, unsigned short port);
//struct us_udp_socket_t *us_create_udp_socket(struct us_loop_t *loop, void (*data_cb)(struct us_udp_socket_t *, struct us_udp_packet_buffer_t *, int), void (*drain_cb)(struct us_udp_socket_t *), char *host, unsigned short port);
struct us_udp_socket_t *us_create_udp_socket(us_loop_r loop, void (*data_cb)(struct us_udp_socket_t *, void *, int), void (*drain_cb)(struct us_udp_socket_t *), void (*close_cb)(struct us_udp_socket_t *), const char *host, unsigned short port, void *user);
struct us_udp_socket_t *us_create_udp_socket(struct us_loop_t *loop, void (*data_cb)(struct us_udp_socket_t *, void *, int), void (*drain_cb)(struct us_udp_socket_t *), void (*close_cb)(struct us_udp_socket_t *), const char *host, unsigned short port, void *user);
void us_udp_socket_close(struct us_udp_socket_t *s);
@@ -160,7 +140,7 @@ int us_udp_socket_bind(struct us_udp_socket_t *s, const char *hostname, unsigned
/* Public interfaces for timers */
/* Create a new high precision, low performance timer. May fail and return null */
struct us_timer_t *us_create_timer(us_loop_r loop, int fallthrough, unsigned int ext_size);
struct us_timer_t *us_create_timer(struct us_loop_t *loop, int fallthrough, unsigned int ext_size);
/* Returns user data extension for this timer */
void *us_timer_ext(struct us_timer_t *timer);
@@ -194,17 +174,17 @@ struct us_bun_verify_error_t {
};
struct us_socket_events_t {
struct us_socket_t *(*on_open)(us_socket_r, int is_client, char *ip, int ip_length);
struct us_socket_t *(*on_data)(us_socket_r, char *data, int length);
struct us_socket_t *(*on_writable)(us_socket_r);
struct us_socket_t *(*on_close)(us_socket_r, int code, void *reason);
struct us_socket_t *(*on_open)(struct us_socket_t *, int is_client, char *ip, int ip_length);
struct us_socket_t *(*on_data)(struct us_socket_t *, char *data, int length);
struct us_socket_t *(*on_writable)(struct us_socket_t *);
struct us_socket_t *(*on_close)(struct us_socket_t *, int code, void *reason);
//void (*on_timeout)(struct us_socket_context *);
struct us_socket_t *(*on_timeout)(us_socket_r);
struct us_socket_t *(*on_long_timeout)(us_socket_r);
struct us_socket_t *(*on_end)(us_socket_r);
struct us_socket_t *(*on_timeout)(struct us_socket_t *);
struct us_socket_t *(*on_long_timeout)(struct us_socket_t *);
struct us_socket_t *(*on_end)(struct us_socket_t *);
struct us_connecting_socket_t *(*on_connect_error)(struct us_connecting_socket_t *, int code);
struct us_socket_t *(*on_connecting_socket_error)(us_socket_r, int code);
void (*on_handshake)(us_socket_r, int success, struct us_bun_verify_error_t verify_error, void* custom_data);
struct us_socket_t *(*on_connecting_socket_error)(struct us_socket_t *, int code);
void (*on_handshake)(struct us_socket_t*, int success, struct us_bun_verify_error_t verify_error, void* custom_data);
};
@@ -230,70 +210,67 @@ struct us_bun_socket_context_options_t {
};
/* Return 15-bit timestamp for this context */
unsigned short us_socket_context_timestamp(int ssl, us_socket_context_r context) nonnull_fn_decl;
unsigned short us_socket_context_timestamp(int ssl, struct us_socket_context_t *context);
/* Adds SNI domain and cert in asn1 format */
void us_socket_context_add_server_name(int ssl, us_socket_context_r context, const char *hostname_pattern, struct us_socket_context_options_t options, void *user);
void us_bun_socket_context_add_server_name(int ssl, us_socket_context_r context, const char *hostname_pattern, struct us_bun_socket_context_options_t options, void *user);
void us_socket_context_remove_server_name(int ssl, us_socket_context_r context, const char *hostname_pattern);
void us_socket_context_on_server_name(int ssl, us_socket_context_r context, void (*cb)(us_socket_context_r context, const char *hostname));
void *us_socket_server_name_userdata(int ssl, us_socket_r s);
void *us_socket_context_find_server_name_userdata(int ssl, us_socket_context_r context, const char *hostname_pattern);
void us_socket_context_add_server_name(int ssl, struct us_socket_context_t *context, const char *hostname_pattern, struct us_socket_context_options_t options, void *user);
void us_bun_socket_context_add_server_name(int ssl, struct us_socket_context_t *context, const char *hostname_pattern, struct us_bun_socket_context_options_t options, void *user);
void us_socket_context_remove_server_name(int ssl, struct us_socket_context_t *context, const char *hostname_pattern);
void us_socket_context_on_server_name(int ssl, struct us_socket_context_t *context, void (*cb)(struct us_socket_context_t *, const char *hostname));
void *us_socket_server_name_userdata(int ssl, struct us_socket_t *s);
void *us_socket_context_find_server_name_userdata(int ssl, struct us_socket_context_t *context, const char *hostname_pattern);
/* Returns the underlying SSL native handle, such as SSL_CTX or nullptr */
void *us_socket_context_get_native_handle(int ssl, us_socket_context_r context);
void *us_socket_context_get_native_handle(int ssl, struct us_socket_context_t *context);
/* A socket context holds shared callbacks and user data extension for associated sockets */
struct us_socket_context_t *us_create_socket_context(int ssl, us_loop_r loop,
int ext_size, struct us_socket_context_options_t options) nonnull_fn_decl;
struct us_socket_context_t *us_create_socket_context(int ssl, struct us_loop_t *loop,
int ext_size, struct us_socket_context_options_t options);
struct us_socket_context_t *us_create_bun_socket_context(int ssl, struct us_loop_t *loop,
int ext_size, struct us_bun_socket_context_options_t options) nonnull_fn_decl;
/* Delete resources allocated at creation time (will call unref now and only free when ref count == 0). */
void us_socket_context_free(int ssl, us_socket_context_r context) nonnull_fn_decl;
void us_socket_context_ref(int ssl, us_socket_context_r context) nonnull_fn_decl;
void us_socket_context_unref(int ssl, us_socket_context_r context) nonnull_fn_decl;
int ext_size, struct us_bun_socket_context_options_t options);
/* Delete resources allocated at creation time. */
void us_socket_context_free(int ssl, struct us_socket_context_t *context);
struct us_bun_verify_error_t us_socket_verify_error(int ssl, struct us_socket_t *context);
/* Setters of various async callbacks */
void us_socket_context_on_open(int ssl, us_socket_context_r context,
struct us_socket_t *(*on_open)(us_socket_r s, int is_client, char *ip, int ip_length));
void us_socket_context_on_close(int ssl, us_socket_context_r context,
struct us_socket_t *(*on_close)(us_socket_r s, int code, void *reason));
void us_socket_context_on_data(int ssl, us_socket_context_r context,
struct us_socket_t *(*on_data)(us_socket_r s, char *data, int length));
void us_socket_context_on_writable(int ssl, us_socket_context_r context,
struct us_socket_t *(*on_writable)(us_socket_r s));
void us_socket_context_on_timeout(int ssl, us_socket_context_r context,
struct us_socket_t *(*on_timeout)(us_socket_r s));
void us_socket_context_on_long_timeout(int ssl, us_socket_context_r context,
struct us_socket_t *(*on_timeout)(us_socket_r s));
void us_socket_context_on_open(int ssl, struct us_socket_context_t *context,
struct us_socket_t *(*on_open)(struct us_socket_t *s, int is_client, char *ip, int ip_length));
void us_socket_context_on_close(int ssl, struct us_socket_context_t *context,
struct us_socket_t *(*on_close)(struct us_socket_t *s, int code, void *reason));
void us_socket_context_on_data(int ssl, struct us_socket_context_t *context,
struct us_socket_t *(*on_data)(struct us_socket_t *s, char *data, int length));
void us_socket_context_on_writable(int ssl, struct us_socket_context_t *context,
struct us_socket_t *(*on_writable)(struct us_socket_t *s));
void us_socket_context_on_timeout(int ssl, struct us_socket_context_t *context,
struct us_socket_t *(*on_timeout)(struct us_socket_t *s));
void us_socket_context_on_long_timeout(int ssl, struct us_socket_context_t *context,
struct us_socket_t *(*on_timeout)(struct us_socket_t *s));
/* This one is only used for when a connecting socket fails in a late stage. */
void us_socket_context_on_connect_error(int ssl, us_socket_context_r context,
void us_socket_context_on_connect_error(int ssl, struct us_socket_context_t *context,
struct us_connecting_socket_t *(*on_connect_error)(struct us_connecting_socket_t *s, int code));
void us_socket_context_on_socket_connect_error(int ssl, us_socket_context_r context,
struct us_socket_t *(*on_connect_error)(us_socket_r s, int code));
void us_socket_context_on_socket_connect_error(int ssl, struct us_socket_context_t *context,
struct us_socket_t *(*on_connect_error)(struct us_socket_t *s, int code));
void us_socket_context_on_handshake(int ssl, us_socket_context_r context, void (*on_handshake)(struct us_socket_t *, int success, struct us_bun_verify_error_t verify_error, void* custom_data), void* custom_data);
void us_socket_context_on_handshake(int ssl, struct us_socket_context_t *context, void (*on_handshake)(struct us_socket_t *, int success, struct us_bun_verify_error_t verify_error, void* custom_data), void* custom_data);
/* Emitted when a socket has been half-closed */
void us_socket_context_on_end(int ssl, us_socket_context_r context, struct us_socket_t *(*on_end)(us_socket_r s));
void us_socket_context_on_end(int ssl, struct us_socket_context_t *context, struct us_socket_t *(*on_end)(struct us_socket_t *s));
/* Returns user data extension for this socket context */
void *us_socket_context_ext(int ssl, us_socket_context_r context);
void *us_socket_context_ext(int ssl, struct us_socket_context_t *context);
/* Closes all open sockets, including listen sockets. Does not invalidate the socket context. */
void us_socket_context_close(int ssl, us_socket_context_r context);
void us_socket_context_close(int ssl, struct us_socket_context_t *context);
/* Listen for connections. Acts as the main driving cog in a server. Will call set async callbacks. */
struct us_listen_socket_t *us_socket_context_listen(int ssl, us_socket_context_r context,
struct us_listen_socket_t *us_socket_context_listen(int ssl, struct us_socket_context_t *context,
const char *host, int port, int options, int socket_ext_size);
struct us_listen_socket_t *us_socket_context_listen_unix(int ssl, us_socket_context_r context,
struct us_listen_socket_t *us_socket_context_listen_unix(int ssl, struct us_socket_context_t *context,
const char *path, size_t pathlen, int options, int socket_ext_size);
/* listen_socket.c/.h */
void us_listen_socket_close(int ssl, struct us_listen_socket_t *ls) nonnull_fn_decl;
void us_listen_socket_close(int ssl, struct us_listen_socket_t *ls);
/*
Returns one of
@@ -304,156 +281,156 @@ void us_listen_socket_close(int ssl, struct us_listen_socket_t *ls) nonnull_fn_d
This is the slow path where we must either go through DNS resolution or create multiple sockets
per the happy eyeballs algorithm
*/
void *us_socket_context_connect(int ssl, struct us_socket_context_t * nonnull_arg context,
const char *host, int port, int options, int socket_ext_size, int *is_connecting) __attribute__((nonnull(2)));
void *us_socket_context_connect(int ssl, struct us_socket_context_t *context,
const char *host, int port, int options, int socket_ext_size, int *is_connecting);
struct us_socket_t *us_socket_context_connect_unix(int ssl, us_socket_context_r context,
const char *server_path, size_t pathlen, int options, int socket_ext_size) __attribute__((nonnull(2)));
struct us_socket_t *us_socket_context_connect_unix(int ssl, struct us_socket_context_t *context,
const char *server_path, size_t pathlen, int options, int socket_ext_size);
/* Is this socket established? Can be used to check if a connecting socket has fired the on_open event yet.
* Can also be used to determine if a socket is a listen_socket or not, but you probably know that already. */
int us_socket_is_established(int ssl, us_socket_r s) nonnull_fn_decl;
int us_socket_is_established(int ssl, struct us_socket_t *s);
void us_connecting_socket_free(int ssl, struct us_connecting_socket_t *c) nonnull_fn_decl;
void us_connecting_socket_free(struct us_connecting_socket_t *c);
/* Cancel a connecting socket. Can be used together with us_socket_timeout to limit connection times.
* Entirely destroys the socket - this function works like us_socket_close but does not trigger on_close event since
* you never got the on_open event first. */
void us_connecting_socket_close(int ssl, struct us_connecting_socket_t *c) nonnull_fn_decl;
void us_connecting_socket_close(int ssl, struct us_connecting_socket_t *c);
/* Returns the loop for this socket context. */
struct us_loop_t *us_socket_context_loop(int ssl, us_socket_context_r context) nonnull_fn_decl __attribute((returns_nonnull));
struct us_loop_t *us_socket_context_loop(int ssl, struct us_socket_context_t *context);
/* Invalidates passed socket, returning a new resized socket which belongs to a different socket context.
* Used mainly for "socket upgrades" such as when transitioning from HTTP to WebSocket. */
struct us_socket_t *us_socket_context_adopt_socket(int ssl, us_socket_context_r context, us_socket_r s, int ext_size);
struct us_socket_t *us_socket_context_adopt_socket(int ssl, struct us_socket_context_t *context, struct us_socket_t *s, int ext_size);
/* Create a child socket context which acts much like its own socket context with its own callbacks yet still relies on the
* parent socket context for some shared resources. Child socket contexts should be used together with socket adoptions and nothing else. */
struct us_socket_context_t *us_create_child_socket_context(int ssl, us_socket_context_r context, int context_ext_size);
struct us_socket_context_t *us_create_child_socket_context(int ssl, struct us_socket_context_t *context, int context_ext_size);
/* Public interfaces for loops */
/* Returns a new event loop with user data extension */
struct us_loop_t *us_create_loop(void *hint, void (*wakeup_cb)(us_loop_r loop),
void (*pre_cb)(us_loop_r loop), void (*post_cb)(us_loop_r loop), unsigned int ext_size);
struct us_loop_t *us_create_loop(void *hint, void (*wakeup_cb)(struct us_loop_t *loop),
void (*pre_cb)(struct us_loop_t *loop), void (*post_cb)(struct us_loop_t *loop), unsigned int ext_size);
/* Frees the loop immediately */
void us_loop_free(us_loop_r loop) nonnull_fn_decl;
void us_loop_free(struct us_loop_t *loop);
/* Returns the loop user data extension */
void *us_loop_ext(us_loop_r loop) nonnull_fn_decl;
void *us_loop_ext(struct us_loop_t *loop);
/* Blocks the calling thread and drives the event loop until no more non-fallthrough polls are scheduled */
void us_loop_run(us_loop_r loop) nonnull_fn_decl;
void us_loop_run(struct us_loop_t *loop);
/* Signals the loop from any thread to wake up and execute its wakeup handler from the loop's own running thread.
* This is the only fully thread-safe function and serves as the basis for thread safety */
void us_wakeup_loop(us_loop_r loop) nonnull_fn_decl;
void us_wakeup_loop(struct us_loop_t *loop);
/* Hook up timers in existing loop */
void us_loop_integrate(us_loop_r loop) nonnull_fn_decl;
void us_loop_integrate(struct us_loop_t *loop);
/* Returns the loop iteration number */
long long us_loop_iteration_number(us_loop_r loop) nonnull_fn_decl;
long long us_loop_iteration_number(struct us_loop_t *loop);
/* Public interfaces for polls */
/* A fallthrough poll does not keep the loop running, it falls through */
struct us_poll_t *us_create_poll(us_loop_r loop, int fallthrough, unsigned int ext_size);
struct us_poll_t *us_create_poll(struct us_loop_t *loop, int fallthrough, unsigned int ext_size);
/* After stopping a poll you must manually free the memory */
void us_poll_free(us_poll_r p, struct us_loop_t *loop);
void us_poll_free(struct us_poll_t *p, struct us_loop_t *loop);
/* Associate this poll with a socket descriptor and poll type */
void us_poll_init(us_poll_r p, LIBUS_SOCKET_DESCRIPTOR fd, int poll_type);
void us_poll_init(struct us_poll_t *p, LIBUS_SOCKET_DESCRIPTOR fd, int poll_type);
/* Start, change and stop polling for events */
void us_poll_start(us_poll_r p, us_loop_r loop, int events) nonnull_fn_decl;
void us_poll_change(us_poll_r p, us_loop_r loop, int events) nonnull_fn_decl;
void us_poll_stop(us_poll_r p, struct us_loop_t *loop) nonnull_fn_decl;
void us_poll_start(struct us_poll_t *p, struct us_loop_t *loop, int events);
void us_poll_change(struct us_poll_t *p, struct us_loop_t *loop, int events);
void us_poll_stop(struct us_poll_t *p, struct us_loop_t *loop);
/* Return what events we are polling for */
int us_poll_events(us_poll_r p) nonnull_fn_decl;
int us_poll_events(struct us_poll_t *p);
/* Returns the user data extension of this poll */
void *us_poll_ext(us_poll_r p) nonnull_fn_decl;
void *us_poll_ext(struct us_poll_t *p);
/* Get associated socket descriptor from a poll */
LIBUS_SOCKET_DESCRIPTOR us_poll_fd(us_poll_r p) nonnull_fn_decl;
LIBUS_SOCKET_DESCRIPTOR us_poll_fd(struct us_poll_t *p);
/* Resize an active poll */
struct us_poll_t *us_poll_resize(us_poll_r p, us_loop_r loop, unsigned int ext_size) nonnull_fn_decl;
struct us_poll_t *us_poll_resize(struct us_poll_t *p, struct us_loop_t *loop, unsigned int ext_size);
/* Public interfaces for sockets */
/* Returns the underlying native handle for a socket, such as SSL or file descriptor.
* In the case of file descriptor, the value of pointer is fd. */
void *us_socket_get_native_handle(int ssl, us_socket_r s) nonnull_fn_decl;
void *us_socket_get_native_handle(int ssl, struct us_socket_t *s);
/* Write up to length bytes of data. Returns actual bytes written.
* Will call the on_writable callback of active socket context on failure to write everything off in one go.
* Set hint msg_more if you have more immediate data to write. */
int us_socket_write(int ssl, us_socket_r s, const char * nonnull_arg data, int length, int msg_more) nonnull_fn_decl;
int us_socket_write(int ssl, struct us_socket_t *s, const char *data, int length, int msg_more);
/* Special path for non-SSL sockets. Used to send header and payload in one go. Works like us_socket_write. */
int us_socket_write2(int ssl, us_socket_r s, const char *header, int header_length, const char *payload, int payload_length) nonnull_fn_decl;
int us_socket_write2(int ssl, struct us_socket_t *s, const char *header, int header_length, const char *payload, int payload_length);
/* Set a low precision, high performance timer on a socket. A socket can only have one single active timer
* at any given point in time. Will remove any such pre set timer */
void us_socket_timeout(int ssl, us_socket_r s, unsigned int seconds) nonnull_fn_decl;
void us_socket_timeout(int ssl, struct us_socket_t *s, unsigned int seconds);
/* Set a low precision, high performance timer on a socket. Suitable for per-minute precision. */
void us_socket_long_timeout(int ssl, us_socket_r s, unsigned int minutes) nonnull_fn_decl;
void us_socket_long_timeout(int ssl, struct us_socket_t *s, unsigned int minutes);
/* Return the user data extension of this socket */
void *us_socket_ext(int ssl, us_socket_r s) nonnull_fn_decl;
void *us_connecting_socket_ext(int ssl, struct us_connecting_socket_t *c) nonnull_fn_decl;
void *us_socket_ext(int ssl, struct us_socket_t *s);
void *us_connecting_socket_ext(int ssl, struct us_connecting_socket_t *c);
/* Return the socket context of this socket */
struct us_socket_context_t *us_socket_context(int ssl, us_socket_r s) nonnull_fn_decl __attribute__((returns_nonnull));
struct us_socket_context_t *us_socket_context(int ssl, struct us_socket_t *s);
/* Withdraw any msg_more status and flush any pending data */
void us_socket_flush(int ssl, us_socket_r s) nonnull_fn_decl;
void us_socket_flush(int ssl, struct us_socket_t *s);
/* Shuts down the connection by sending FIN and/or close_notify */
void us_socket_shutdown(int ssl, us_socket_r s) nonnull_fn_decl;
void us_socket_shutdown(int ssl, struct us_socket_t *s);
/* Shuts down the connection in terms of read, meaning next event loop
* iteration will catch the socket being closed. Can be used to defer closing
* to next event loop iteration. */
void us_socket_shutdown_read(int ssl, us_socket_r s) nonnull_fn_decl;
void us_socket_shutdown_read(int ssl, struct us_socket_t *s);
/* Returns whether the socket has been shut down or not */
int us_socket_is_shut_down(int ssl, us_socket_r s) nonnull_fn_decl;
int us_socket_is_shut_down(int ssl, struct us_socket_t *s);
/* Returns whether this socket has been closed. Only valid if memory has not yet been released. */
int us_socket_is_closed(int ssl, us_socket_r s) nonnull_fn_decl;
int us_socket_is_closed(int ssl, struct us_socket_t *s);
/* Immediately closes the socket */
struct us_socket_t *us_socket_close(int ssl, us_socket_r s, int code, void *reason) __attribute__((nonnull(2)));
struct us_socket_t *us_socket_close(int ssl, struct us_socket_t *s, int code, void *reason);
/* Returns local port or -1 on failure. */
int us_socket_local_port(int ssl, us_socket_r s) nonnull_fn_decl;
int us_socket_local_port(int ssl, struct us_socket_t *s);
/* Copy remote (IP) address of socket, or fail with zero length. */
void us_socket_remote_address(int ssl, us_socket_r s, char *nonnull_arg buf, int *nonnull_arg length) nonnull_fn_decl;
void us_socket_local_address(int ssl, us_socket_r s, char *nonnull_arg buf, int *nonnull_arg length) nonnull_fn_decl;
void us_socket_remote_address(int ssl, struct us_socket_t *s, char *buf, int *length);
void us_socket_local_address(int ssl, struct us_socket_t *s, char *buf, int *length);
/* Bun extras */
struct us_socket_t *us_socket_pair(struct us_socket_context_t *ctx, int socket_ext_size, LIBUS_SOCKET_DESCRIPTOR* fds);
struct us_socket_t *us_socket_from_fd(struct us_socket_context_t *ctx, int socket_ext_size, LIBUS_SOCKET_DESCRIPTOR fd);
struct us_socket_t *us_socket_attach(int ssl, LIBUS_SOCKET_DESCRIPTOR client_fd, struct us_socket_context_t *ctx, int flags, int socket_ext_size);
struct us_socket_t *us_socket_wrap_with_tls(int ssl, us_socket_r s, struct us_bun_socket_context_options_t options, struct us_socket_events_t events, int socket_ext_size);
int us_socket_raw_write(int ssl, us_socket_r s, const char *data, int length, int msg_more);
struct us_socket_t *us_socket_wrap_with_tls(int ssl, struct us_socket_t *s, struct us_bun_socket_context_options_t options, struct us_socket_events_t events, int socket_ext_size);
int us_socket_raw_write(int ssl, struct us_socket_t *s, const char *data, int length, int msg_more);
struct us_socket_t* us_socket_open(int ssl, struct us_socket_t * s, int is_client, char* ip, int ip_length);
int us_raw_root_certs(struct us_cert_string_t**out);
unsigned int us_get_remote_address_info(char *buf, us_socket_r s, const char **dest, int *port, int *is_ipv6);
int us_socket_get_error(int ssl, us_socket_r s);
unsigned int us_get_remote_address_info(char *buf, struct us_socket_t *s, const char **dest, int *port, int *is_ipv6);
int us_socket_get_error(int ssl, struct us_socket_t *s);
void us_socket_ref(us_socket_r s);
void us_socket_unref(us_socket_r s);
void us_socket_ref(struct us_socket_t *s);
void us_socket_unref(struct us_socket_t *s);
#ifdef __cplusplus
}

View File

@@ -47,8 +47,6 @@ void us_internal_loop_data_init(struct us_loop_t *loop, void (*wakeup_cb)(struct
loop->data.parent_ptr = 0;
loop->data.parent_tag = 0;
loop->data.closed_context_head = 0;
loop->data.wakeup_async = us_internal_create_async(loop, 1, 0);
us_internal_async_set(loop->data.wakeup_async, (void (*)(struct us_internal_async *)) wakeup_cb);
}
@@ -236,15 +234,6 @@ void us_internal_free_closed_sockets(struct us_loop_t *loop) {
loop->data.closed_connecting_head = 0;
}
void us_internal_free_closed_contexts(struct us_loop_t *loop) {
for (struct us_socket_context_t *ctx = loop->data.closed_context_head; ctx; ) {
struct us_socket_context_t *next = ctx->next;
us_free(ctx);
ctx = next;
}
loop->data.closed_context_head = 0;
}
void sweep_timer_cb(struct us_internal_callback_t *cb) {
us_internal_timer_sweep(cb->loop);
}
@@ -264,7 +253,6 @@ void us_internal_loop_pre(struct us_loop_t *loop) {
void us_internal_loop_post(struct us_loop_t *loop) {
us_internal_handle_dns_results(loop);
us_internal_free_closed_sockets(loop);
us_internal_free_closed_contexts(loop);
loop->data.post_cb(loop);
}
@@ -368,8 +356,7 @@ void us_internal_dispatch_ready_poll(struct us_poll_t *p, int error, int events)
s->context->loop->data.low_prio_budget--; /* Still having budget for this iteration - do normal processing */
} else {
us_poll_change(&s->p, us_socket_context(0, s)->loop, us_poll_events(&s->p) & LIBUS_SOCKET_WRITABLE);
us_socket_context_ref(0, s->context);
us_internal_socket_context_unlink_socket(0, s->context, s);
us_internal_socket_context_unlink_socket(s->context, s);
/* Link this socket to the low-priority queue - we use a LIFO queue, to prioritize newer clients that are
* maybe not already timeouted - sounds unfair, but works better in real-life with smaller client-timeouts
@@ -424,8 +411,7 @@ void us_internal_dispatch_ready_poll(struct us_poll_t *p, int error, int events)
if (us_socket_is_shut_down(0, s)) {
/* We got FIN back after sending it */
/* Todo: We should give "CLEAN SHUTDOWN" as reason here */
s = us_socket_close(0, s, LIBUS_SOCKET_CLOSE_CODE_CLEAN_SHUTDOWN, NULL);
return;
s = us_socket_close(0, s, 0, NULL);
} else {
/* We got FIN, so stop polling for readable */
us_poll_change(&s->p, us_socket_context(0, s)->loop, us_poll_events(&s->p) & LIBUS_SOCKET_WRITABLE);
@@ -433,7 +419,7 @@ void us_internal_dispatch_ready_poll(struct us_poll_t *p, int error, int events)
}
} else if (length == LIBUS_SOCKET_ERROR && !bsd_would_block()) {
/* Todo: decide also here what kind of reason we should give */
s = us_socket_close(0, s, LIBUS_SOCKET_CLOSE_CODE_CLEAN_SHUTDOWN, NULL);
s = us_socket_close(0, s, 0, NULL);
return;
}

View File

@@ -21,8 +21,6 @@
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <stdio.h>
#include <errno.h>
#ifndef WIN32
#include <fcntl.h>
@@ -115,9 +113,6 @@ void us_socket_flush(int ssl, struct us_socket_t *s) {
}
int us_socket_is_closed(int ssl, struct us_socket_t *s) {
if(ssl) {
return us_internal_ssl_socket_is_closed((struct us_internal_ssl_socket_t *) s);
}
return s->prev == (struct us_socket_t *) s->context;
}
@@ -130,11 +125,9 @@ int us_socket_is_established(int ssl, struct us_socket_t *s) {
return us_internal_poll_type((struct us_poll_t *) s) != POLL_TYPE_SEMI_SOCKET;
}
void us_connecting_socket_free(int ssl, struct us_connecting_socket_t *c) {
void us_connecting_socket_free(struct us_connecting_socket_t *c) {
// we can't just free c immediately, as it may be enqueued in the dns_ready_head list
// instead, we move it to a close list and free it after the iteration
us_internal_socket_context_unlink_connecting_socket(ssl, c->context, c);
c->next = c->context->loop->data.closed_connecting_head;
c->context->loop->data.closed_connecting_head = c;
}
@@ -142,9 +135,9 @@ void us_connecting_socket_free(int ssl, struct us_connecting_socket_t *c) {
void us_connecting_socket_close(int ssl, struct us_connecting_socket_t *c) {
if (c->closed) return;
c->closed = 1;
for (struct us_socket_t *s = c->connecting_head; s; s = s->connect_next) {
us_internal_socket_context_unlink_socket(ssl, s->context, s);
for (struct us_socket_t *s = c->connecting_head; s; s = s->connect_next) {
us_internal_socket_context_unlink_socket(s->context, s);
us_poll_stop((struct us_poll_t *) s, s->context->loop);
bsd_close_socket(us_poll_fd((struct us_poll_t *) s));
@@ -155,26 +148,15 @@ void us_connecting_socket_close(int ssl, struct us_connecting_socket_t *c) {
/* Any socket with prev = context is marked as closed */
s->prev = (struct us_socket_t *) s->context;
}
if(!c->error) {
// if we have no error, we have to set that we were aborted aka we called close
c->error = ECONNABORTED;
}
c->context->on_connect_error(c, c->error);
if(c->addrinfo_req) {
Bun__addrinfo_freeRequest(c->addrinfo_req, c->error == ECONNREFUSED);
c->addrinfo_req = 0;
}
// we can only schedule the socket to be freed if there is no pending callback
// otherwise, the callback will see that the socket is closed and will free it
if (!c->pending_resolve_callback) {
us_connecting_socket_free(ssl, c);
us_connecting_socket_free(c);
}
}
struct us_socket_t *us_socket_close(int ssl, struct us_socket_t *s, int code, void *reason) {
if(ssl) {
return (struct us_socket_t *)us_internal_ssl_socket_close((struct us_internal_ssl_socket_t *) s, code, reason);
}
if (!us_socket_is_closed(0, s)) {
if (s->low_prio_state == 1) {
/* Unlink this socket from the low-priority queue */
@@ -186,10 +168,8 @@ struct us_socket_t *us_socket_close(int ssl, struct us_socket_t *s, int code, vo
s->prev = 0;
s->next = 0;
s->low_prio_state = 0;
us_socket_context_unref(ssl, s->context);
} else {
us_internal_socket_context_unlink_socket(ssl, s->context, s);
us_internal_socket_context_unlink_socket(s->context, s);
}
#ifdef LIBUS_USE_KQUEUE
// kqueue automatically removes the fd from the set on close
@@ -238,10 +218,8 @@ struct us_socket_t *us_socket_detach(int ssl, struct us_socket_t *s) {
s->prev = 0;
s->next = 0;
s->low_prio_state = 0;
us_socket_context_unref(ssl, s->context);
} else {
us_internal_socket_context_unlink_socket(ssl, s->context, s);
us_internal_socket_context_unlink_socket(s->context, s);
}
us_poll_stop((struct us_poll_t *) s, s->context->loop);

View File

@@ -0,0 +1,38 @@
CAPI_EXAMPLE_FILES := HelloWorld HelloWorldAsync ServerName UpgradeSync UpgradeAsync EchoServer Broadcast BroadcastEchoServer
RUST_EXAMPLE_FILES := RustHelloWorld
LIBRARY_NAME := libuwebsockets
default:
$(MAKE) capi
$(CXX) -O3 -flto -I ../src -I ../uSockets/src examples/HelloWorld.c *.o -lz -luv -lssl -lcrypto -lstdc++ ../uSockets/uSockets.a -o HelloWorld
capi:
$(MAKE) clean
cd ../uSockets && $(CC) -pthread -DUWS_WITH_PROXY -DLIBUS_USE_OPENSSL -DLIBUS_USE_LIBUV -std=c11 -Isrc -flto -fPIC -O3 -c src/*.c src/eventing/*.c src/crypto/*.c
cd ../uSockets && $(CXX) -std=c++17 -flto -fPIC -O3 -c src/crypto/*.cpp
cd ../uSockets && $(AR) rvs uSockets.a *.o
$(CXX) -DUWS_WITH_PROXY -c -O3 -std=c++17 -lz -luv -flto -fPIC -I ../src -I ../uSockets/src $(LIBRARY_NAME).cpp
$(AR) rvs $(LIBRARY_NAME).a $(LIBRARY_NAME).o ../uSockets/uSockets.a
shared:
$(MAKE) clean
cd ../uSockets && $(CC) -pthread -DUWS_WITH_PROXY -DLIBUS_USE_OPENSSL -DLIBUS_USE_LIBUV -std=c11 -Isrc -flto -fPIC -O3 -c src/*.c src/eventing/*.c src/crypto/*.c
cd ../uSockets && $(CXX) -std=c++17 -flto -fPIC -O3 -c src/crypto/*.cpp
cd ../uSockets && $(AR) rvs uSockets.a *.o
$(CXX) -DUWS_WITH_PROXY -c -O3 -std=c++17 -lz -luv -flto -fPIC -I ../src -I ../uSockets/src $(LIBRARY_NAME).cpp
$(CXX) -shared -o $(LIBRARY_NAME).so $(LIBRARY_NAME).o ../uSockets/uSockets.a -fPIC -lz -luv -lssl -lcrypto
misc:
mkdir -p ../misc && openssl req -newkey rsa:2048 -new -nodes -x509 -days 3650 -passout pass:1234 -keyout ../misc/key.pem -out ../misc/cert.pem
rust:
$(MAKE) capi
rustc -C link-arg=$(LIBRARY_NAME).a -C link-args="-lstdc++ -luv" -C opt-level=3 -C lto -L all=. examples/RustHelloWorld.rs -o RustHelloWorld
clean:
rm -f *.o $(CAPI_EXAMPLE_FILES) $(RUST_EXAMPLE_FILES) $(LIBRARY_NAME).a $(LIBRARY_NAME).so
all:
for FILE in $(CAPI_EXAMPLE_FILES); do $(CXX) -O3 -flto -I ../src -I ../uSockets/src examples/$$FILE.c *.o -luv -lstdc++ ../uSockets/uSockets.a -o $$FILE & done; \
wait

View File

@@ -0,0 +1,157 @@
#include "../libuwebsockets.h"
#include <stdio.h>
#include <malloc.h>
#include <time.h>
#include <string.h>
#include <stdarg.h>
#define SSL 1
//Timer close helper
void uws_timer_close(struct us_timer_t *timer)
{
struct us_timer_t *t = (struct us_timer_t *)timer;
struct timer_handler_data *data;
memcpy(&data, us_timer_ext(t), sizeof(struct timer_handler_data *));
free(data);
us_timer_close(t, 0);
}
//Timer create helper
struct us_timer_t *uws_create_timer(int ms, int repeat_ms, void (*handler)(void *data), void *data)
{
struct us_loop_t *loop = uws_get_loop();
struct us_timer_t *delayTimer = us_create_timer(loop, 0, sizeof(void *));
struct timer_handler_data
{
void *data;
void (*handler)(void *data);
bool repeat;
};
struct timer_handler_data *timer_data = (struct timer_handler_data *)malloc(sizeof(timer_handler_data));
timer_data->data = data;
timer_data->handler = handler;
timer_data->repeat = repeat_ms > 0;
memcpy(us_timer_ext(delayTimer), &timer_data, sizeof(struct timer_handler_data *));
us_timer_set(
delayTimer, [](struct us_timer_t *t)
{
/* We wrote the pointer to the timer's extension */
struct timer_handler_data *data;
memcpy(&data, us_timer_ext(t), sizeof(struct timer_handler_data *));
data->handler(data->data);
if (!data->repeat)
{
free(data);
us_timer_close(t, 0);
}
},
ms, repeat_ms);
return (struct us_timer_t *)delayTimer;
}
/* This is a simple WebSocket "sync" upgrade example.
* You may compile it with "WITH_OPENSSL=1 make" or with "make" */
/* ws->getUserData returns one of these */
struct PerSocketData {
/* Fill with user data */
};
int buffer_size(const char* format, ...) {
va_list args;
va_start(args, format);
int result = vsnprintf(NULL, 0, format, args);
va_end(args);
return result + 1; // safe byte for \0
}
void listen_handler(struct us_listen_socket_t *listen_socket, uws_app_listen_config_t config, void* user_data)
{
if (listen_socket){
printf("Listening on port wss://localhost:%d\n", config.port);
}
}
void open_handler(uws_websocket_t* ws){
/* Open event here, you may access uws_ws_get_user_data(WS) which points to a PerSocketData struct */
uws_ws_subscribe(SSL, ws, "broadcast", 9);
}
void message_handler(uws_websocket_t* ws, const char* message, size_t length, uws_opcode_t opcode){
}
void close_handler(uws_websocket_t* ws, int code, const char* message, size_t length){
/* You may access uws_ws_get_user_data(ws) here, but sending or
* doing any kind of I/O with the socket is not valid. */
}
void drain_handler(uws_websocket_t* ws){
/* Check uws_ws_get_buffered_amount(ws) here */
}
void ping_handler(uws_websocket_t* ws, const char* message, size_t length){
/* You don't need to handle this one, we automatically respond to pings as per standard */
}
void pong_handler(uws_websocket_t* ws, const char* message, size_t length){
/* You don't need to handle this one either */
}
void on_timer_interval(void* data){
// broadcast the unix time as millis
uws_app_t * app = (uws_app_t *)data;
struct timespec ts;
timespec_get(&ts, TIME_UTC);
int64_t millis = ts.tv_sec * 1000 + ts.tv_nsec / 1000000;
char* message = (char*)malloc((size_t)buffer_size("%ld", millis));
size_t message_length = sprintf(message, "%ld", millis);
uws_publish(SSL, app, "broadcast", 9, message, message_length, uws_opcode_t::TEXT, false);
free(message);
}
int main()
{
uws_app_t *app = uws_create_app(SSL, (struct us_socket_context_options_t){
/* There are example certificates in uWebSockets.js repo */
.key_file_name = "../misc/key.pem",
.cert_file_name = "../misc/cert.pem",
.passphrase = "1234"
});
uws_ws(SSL, app, "/*", (uws_socket_behavior_t){
.compression = uws_compress_options_t::SHARED_COMPRESSOR,
.maxPayloadLength = 16 * 1024,
.idleTimeout = 12,
.maxBackpressure = 1 * 1024 * 1024,
.upgrade = NULL,
.open = open_handler,
.message = message_handler,
.drain = drain_handler,
.ping = ping_handler,
.pong = pong_handler,
.close = close_handler,
});
uws_app_listen(SSL, app, 9001, listen_handler, NULL);
// broadcast the unix time as millis every 8 millis
uws_create_timer(8, 8, on_timer_interval, app);
uws_app_run(SSL, app);
}

View File

@@ -0,0 +1,175 @@
#include "../libuwebsockets.h"
#include <stdio.h>
#include <malloc.h>
#include <time.h>
#include <string.h>
#include <stdarg.h>
#define SSL 1
/* This is a simple WebSocket "sync" upgrade example.
* You may compile it with "WITH_OPENSSL=1 make" or with "make" */
typedef struct
{
size_t length;
char *name;
} topic_t;
/* ws->getUserData returns one of these */
struct PerSocketData
{
/* Fill with user data */
topic_t **topics;
int topics_quantity;
int nr;
};
uws_app_t *app;
int buffer_size(const char *format, ...)
{
va_list args;
va_start(args, format);
int result = vsnprintf(NULL, 0, format, args);
va_end(args);
return result + 1; // safe byte for \0
}
void listen_handler(struct us_listen_socket_t *listen_socket, uws_app_listen_config_t config, void* user_data)
{
if (listen_socket)
{
printf("Listening on port wss://localhost:%d\n", config.port);
}
}
void upgrade_handler(uws_res_t *response, uws_req_t *request, uws_socket_context_t *context)
{
/* You may read from req only here, and COPY whatever you need into your PerSocketData.
* PerSocketData is valid from .open to .close event, accessed with uws_ws_get_user_data(ws).
* HttpRequest (req) is ONLY valid in this very callback, so any data you will need later
* has to be COPIED into PerSocketData here. */
/* Immediately upgrading without doing anything "async" before, is simple */
struct PerSocketData *data = (struct PerSocketData *)malloc(sizeof(struct PerSocketData));
data->topics = (topic_t **)calloc(32, sizeof(topic_t *));
data->topics_quantity = 32;
data->nr = 0;
const char *ws_key = NULL;
const char *ws_protocol = NULL;
const char *ws_extensions = NULL;
size_t ws_key_length = uws_req_get_header(request, "sec-websocket-key", 17, &ws_key);
size_t ws_protocol_length = uws_req_get_header(request, "sec-websocket-protocol", 22, &ws_protocol);
size_t ws_extensions_length = uws_req_get_header(request, "sec-websocket-extensions", 24, &ws_extensions);
uws_res_upgrade(SSL,
response,
(void *)data,
ws_key,
ws_key_length,
ws_protocol,
ws_protocol_length,
ws_extensions,
ws_extensions_length,
context);
}
void open_handler(uws_websocket_t *ws)
{
/* Open event here, you may access uws_ws_get_user_data(ws) which points to a PerSocketData struct */
struct PerSocketData *data = (struct PerSocketData *)uws_ws_get_user_data(SSL, ws);
for (int i = 0; i < data->topics_quantity; i++)
{
char *topic = (char *)malloc((size_t)buffer_size("%ld-%d", (uintptr_t)ws, i));
size_t topic_length = sprintf(topic, "%ld-%d", (uintptr_t)ws, i);
topic_t *new_topic = (topic_t*) malloc(sizeof(topic_t));
new_topic->length = topic_length;
new_topic->name = topic;
data->topics[i] = new_topic;
uws_ws_subscribe(SSL, ws, topic, topic_length);
}
}
void message_handler(uws_websocket_t *ws, const char *message, size_t length, uws_opcode_t opcode)
{
struct PerSocketData *data = (struct PerSocketData *)uws_ws_get_user_data(SSL, ws);
topic_t *topic = data->topics[(size_t)(++data->nr % data->topics_quantity)];
uws_publish(SSL, app, topic->name, topic->length, message, length, opcode, false);
topic = data->topics[(size_t)(++data->nr % data->topics_quantity)];
uws_ws_publish(SSL, ws, topic->name, topic->length, message, length);
}
void close_handler(uws_websocket_t *ws, int code, const char *message, size_t length)
{
/* You may access uws_ws_get_user_data(ws) here, but sending or
* doing any kind of I/O with the socket is not valid. */
struct PerSocketData *data = (struct PerSocketData *)uws_ws_get_user_data(SSL, ws);
if (data)
{
for (int i = 0; i < data->topics_quantity; i++)
{
topic_t* topic = data->topics[i];
free(topic->name);
free(topic);
}
free(data->topics);
free(data);
}
}
void drain_handler(uws_websocket_t *ws)
{
/* Check uws_ws_get_buffered_amount(ws) here */
}
void ping_handler(uws_websocket_t *ws, const char *message, size_t length)
{
/* You don't need to handle this one, we automatically respond to pings as per standard */
}
void pong_handler(uws_websocket_t *ws, const char *message, size_t length)
{
/* You don't need to handle this one either */
}
int main()
{
uws_app_t *app = uws_create_app(SSL, (struct us_socket_context_options_t){
/* There are example certificates in uWebSockets.js repo */
.key_file_name = "../misc/key.pem",
.cert_file_name = "../misc/cert.pem",
.passphrase = "1234"
});
uws_ws(SSL, app, "/*", (uws_socket_behavior_t){
.compression = uws_compress_options_t::SHARED_COMPRESSOR,
.maxPayloadLength = 16 * 1024,
.idleTimeout = 12,
.maxBackpressure = 1 * 1024 * 1024,
.upgrade = upgrade_handler,
.open = open_handler,
.message = message_handler,
.drain = drain_handler,
.ping = ping_handler,
.pong = pong_handler,
.close = close_handler,
});
uws_app_listen(SSL, app, 9001, listen_handler, NULL);
uws_app_run(SSL, app);
}

View File

@@ -0,0 +1,81 @@
#include "../libuwebsockets.h"
#include <stdio.h>
#include <malloc.h>
#define SSL 1
/* This is a simple WebSocket "sync" upgrade example.
* You may compile it with "WITH_OPENSSL=1 make" or with "make" */
/* ws->getUserData returns one of these */
struct PerSocketData {
/* Fill with user data */
};
void listen_handler(struct us_listen_socket_t *listen_socket, uws_app_listen_config_t config, void* user_data)
{
if (listen_socket){
printf("Listening on port wss://localhost:%d\n", config.port);
}
}
void open_handler(uws_websocket_t* ws){
/* Open event here, you may access uws_ws_get_user_data(WS) which points to a PerSocketData struct */
}
void message_handler(uws_websocket_t* ws, const char* message, size_t length, uws_opcode_t opcode){
uws_ws_send(SSL, ws, message, length, opcode);
}
void close_handler(uws_websocket_t* ws, int code, const char* message, size_t length){
/* You may access uws_ws_get_user_data(ws) here, but sending or
* doing any kind of I/O with the socket is not valid. */
}
void drain_handler(uws_websocket_t* ws){
/* Check uws_ws_get_buffered_amount(ws) here */
}
void ping_handler(uws_websocket_t* ws, const char* message, size_t length){
/* You don't need to handle this one, we automatically respond to pings as per standard */
}
void pong_handler(uws_websocket_t* ws, const char* message, size_t length){
/* You don't need to handle this one either */
}
int main()
{
uws_app_t *app = uws_create_app(SSL, (struct us_socket_context_options_t){
/* There are example certificates in uWebSockets.js repo */
.key_file_name = "../misc/key.pem",
.cert_file_name = "../misc/cert.pem",
.passphrase = "1234"
});
uws_ws(SSL, app, "/*", (uws_socket_behavior_t){
.compression = uws_compress_options_t::SHARED_COMPRESSOR,
.maxPayloadLength = 16 * 1024,
.idleTimeout = 12,
.maxBackpressure = 1 * 1024 * 1024,
.upgrade = NULL,
.open = open_handler,
.message = message_handler,
.drain = drain_handler,
.ping = ping_handler,
.pong = pong_handler,
.close = close_handler,
});
uws_app_listen(SSL,app, 9001, listen_handler, NULL);
uws_app_run(SSL, app);
}

View File

@@ -0,0 +1,33 @@
#include "../libuwebsockets.h"
#include "libusockets.h"
#include <stdio.h>
#define SSL 1
void get_handler(uws_res_t *res, uws_req_t *req, void *user_data)
{
uws_res_end(SSL, res, "Hello CAPI!", 11, false);
}
void listen_handler(struct us_listen_socket_t *listen_socket, uws_app_listen_config_t config, void *user_data)
{
if (listen_socket)
{
printf("Listening on port https://localhost:%d now\n", config.port);
}
}
int main()
{
/* Overly simple hello world app */
uws_app_t *app = uws_create_app(SSL, (struct us_socket_context_options_t){
/* There are example certificates in uWebSockets.js repo */
.key_file_name = "../misc/key.pem",
.cert_file_name = "../misc/cert.pem",
.passphrase = "1234"
});
uws_app_get(SSL, app, "/*", get_handler, NULL);
uws_app_listen(SSL, app, 3000, listen_handler, NULL);
uws_app_run(SSL, app);
}

View File

@@ -0,0 +1,123 @@
#include "../libuwebsockets.h"
#include "libusockets.h"
#include <stdio.h>
#include <malloc.h>
#include <string.h>
#define SSL 0
typedef struct {
uws_res_t* res;
bool aborted;
} async_request_t;
//Timer close helper
void uws_timer_close(struct us_timer_t *timer)
{
struct us_timer_t *t = (struct us_timer_t *)timer;
struct timer_handler_data *data;
memcpy(&data, us_timer_ext(t), sizeof(struct timer_handler_data *));
free(data);
us_timer_close(t, 0);
}
//Timer create helper
struct us_timer_t *uws_create_timer(int ms, int repeat_ms, void (*handler)(void *data), void *data)
{
struct us_loop_t *loop = uws_get_loop();
struct us_timer_t *delayTimer = us_create_timer(loop, 0, sizeof(void *));
struct timer_handler_data
{
void *data;
void (*handler)(void *data);
bool repeat;
};
struct timer_handler_data *timer_data = (struct timer_handler_data *)malloc(sizeof(timer_handler_data));
timer_data->data = data;
timer_data->handler = handler;
timer_data->repeat = repeat_ms > 0;
memcpy(us_timer_ext(delayTimer), &timer_data, sizeof(struct timer_handler_data *));
us_timer_set(
delayTimer, [](struct us_timer_t *t)
{
/* We wrote the pointer to the timer's extension */
struct timer_handler_data *data;
memcpy(&data, us_timer_ext(t), sizeof(struct timer_handler_data *));
data->handler(data->data);
if (!data->repeat)
{
free(data);
us_timer_close(t, 0);
}
},
ms, repeat_ms);
return (struct us_timer_t *)delayTimer;
}
void on_res_aborted(uws_res_t *response, void* data){
async_request_t* request_data = (async_request_t*)data;
/* We don't implement any kind of cancellation here,
* so simply flag us as aborted */
request_data->aborted = true;
}
void on_res_corked(uws_res_t *response, void* data){
uws_res_end(SSL, response, "Hello CAPI!", 11, false);
}
void on_timer_done(void *data){
async_request_t* request_data = (async_request_t*)data;
/* Were'nt we aborted before our async task finished? Okay, send a message! */
if(!request_data->aborted){
uws_res_cork(SSL, request_data->res,on_res_corked, request_data);
}
}
void get_handler(uws_res_t *res, uws_req_t *req, void* user_data)
{
/* We have to attach an abort handler for us to be aware
* of disconnections while we perform async tasks */
async_request_t* request_data = (async_request_t*) malloc(sizeof(async_request_t));
request_data->res = res;
request_data->aborted = false;
uws_res_on_aborted(SSL, res, on_res_aborted, request_data);
/* Simulate checking auth for 5 seconds. This looks like crap, never write
* code that utilize us_timer_t like this; they are high-cost and should
* not be created and destroyed more than rarely!
* Either way, here we go!*/
uws_create_timer(1, 0, on_timer_done, request_data);
}
void listen_handler(struct us_listen_socket_t *listen_socket, uws_app_listen_config_t config, void* user_data)
{
if (listen_socket)
{
printf("Listening on port https://localhost:%d now\n", config.port);
}
}
int main()
{
/* Overly simple hello world app with async response */
uws_app_t *app = uws_create_app(SSL, (struct us_socket_context_options_t){
/* There are example certificates in uWebSockets.js repo */
.key_file_name = "../misc/key.pem",
.cert_file_name = "../misc/cert.pem",
.passphrase = "1234"
});
uws_app_get(SSL, app, "/*", get_handler, NULL);
uws_app_listen(SSL, app, 3000, listen_handler, NULL);
uws_app_run(SSL, app);
}

View File

@@ -0,0 +1,309 @@
/* automatically generated by rust-bindgen 0.59.2 */
use std::convert::TryInto;
use std::ffi::CString;
pub type SizeT = ::std::os::raw::c_ulong;
pub type WcharT = ::std::os::raw::c_uint;
#[repr(C)]
#[repr(align(16))]
#[derive(Debug, Copy, Clone)]
pub struct max_align_t {
pub __clang_max_align_nonce1: ::std::os::raw::c_longlong,
pub __bindgen_padding_0: u64,
pub __clang_max_align_nonce2: u128,
}
#[test]
fn bindgen_test_layout_max_align_t() {
assert_eq!(
::std::mem::size_of::<max_align_t>(),
32usize,
concat!("Size of: ", stringify!(max_align_t))
);
assert_eq!(
::std::mem::align_of::<max_align_t>(),
16usize,
concat!("Alignment of ", stringify!(max_align_t))
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<max_align_t>())).__clang_max_align_nonce1 as *const _ as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(max_align_t),
"::",
stringify!(__clang_max_align_nonce1)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<max_align_t>())).__clang_max_align_nonce2 as *const _ as usize
},
16usize,
concat!(
"Offset of field: ",
stringify!(max_align_t),
"::",
stringify!(__clang_max_align_nonce2)
)
);
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct uws_app_s {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct uws_req_s {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct uws_res_s {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct uws_app_listen_config_s {
port: ::std::os::raw::c_int,
host: *const ::std::os::raw::c_char,
options: ::std::os::raw::c_int,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct us_socket_context_options_s {
key_file_name: *const ::std::os::raw::c_char,
cert_file_name: *const ::std::os::raw::c_char,
passphrase: *const ::std::os::raw::c_char,
dh_params_file_name: *const ::std::os::raw::c_char,
ca_file_name: *const ::std::os::raw::c_char,
ssl_prefer_low_memory_usage: ::std::os::raw::c_int,
}
pub type UwsAppListenConfigT = uws_app_listen_config_s;
pub type UsSocketContextOptionsT = us_socket_context_options_s;
pub struct UsSocketContextOptions<'a> {
key_file_name: &'a str,
cert_file_name: &'a str,
passphrase: &'a str,
dh_params_file_name: &'a str,
ca_file_name: &'a str,
ssl_prefer_low_memory_usage: i32,
}
pub type UwsAppT = uws_app_s;
pub type UwsReqT = uws_req_s;
pub type UwsResT = uws_res_s;
extern "C" {
pub fn uws_create_app(
ssl: ::std::os::raw::c_int,
options: UsSocketContextOptionsT,
) -> *mut UwsAppT;
pub fn uws_app_get(
ssl: ::std::os::raw::c_int,
app: *mut UwsAppT,
pattern: *const ::std::os::raw::c_char,
handler: ::std::option::Option<
unsafe extern "C" fn(
res: *mut UwsResT,
req: *mut UwsReqT,
user_data: *mut ::std::os::raw::c_void,
),
>,
user_data: *mut ::std::os::raw::c_void,
);
pub fn uws_app_run(ssl: ::std::os::raw::c_int, app: *mut UwsAppT);
pub fn uws_app_listen(
ssl: ::std::os::raw::c_int,
app: *mut UwsAppT,
port: ::std::os::raw::c_int,
handler: ::std::option::Option<
unsafe extern "C" fn(
listen_socket: *mut ::std::os::raw::c_void,
config: UwsAppListenConfigT,
user_data: *mut ::std::os::raw::c_void,
),
>,
user_data: *mut ::std::os::raw::c_void,
);
pub fn uws_res_end(
ssl: ::std::os::raw::c_int,
res: *mut UwsResT,
data: *const ::std::os::raw::c_char,
length: SizeT,
close_connection: bool,
);
}
pub struct AppResponse<const SSL: i32> {
native: *mut UwsResT,
}
pub struct AppRequest {
native: *mut UwsReqT,
}
impl AppRequest {
pub fn new(native: *mut UwsReqT) -> AppRequest {
AppRequest { native: native }
}
}
impl<const SSL: i32> AppResponse<SSL> {
pub fn new(native: *mut UwsResT) -> AppResponse<SSL> {
AppResponse::<SSL> { native: native }
}
fn end(self, message: &str) -> AppResponse<SSL> {
unsafe {
let c_message =
::std::ffi::CString::new(message).expect("Failed to create message CString");
//This will now const fold :/ performance impact needs refactor
uws_res_end(
SSL,
self.native,
c_message.as_ptr(),
message.len().try_into().unwrap(),
false,
);
}
self
}
}
pub type UwsMethodHandler<const SSL: i32> = fn(res: AppResponse<SSL>, req: AppRequest);
pub type UwsListenHandler =
fn(listen_socket: *mut ::std::os::raw::c_void, config: UwsAppListenConfigT);
pub struct TemplateApp<const SSL: i32> {
native: *mut UwsAppT,
}
extern "C" fn uws_generic_listen_handler(
listen_socket: *mut ::std::os::raw::c_void,
config: UwsAppListenConfigT,
user_data: *mut ::std::os::raw::c_void,
) {
unsafe {
let callback = &mut *(user_data as *mut UwsListenHandler);
callback(listen_socket, config);
}
}
extern "C" fn uws_generic_method_handler(
res: *mut UwsResT,
req: *mut UwsReqT,
user_data: *mut ::std::os::raw::c_void,
) {
unsafe {
let response = AppResponse::<0>::new(res);
let request = AppRequest::new(req);
let callback = &mut *(user_data as *mut UwsMethodHandler<0>);
callback(response, request);
}
}
extern "C" fn uws_ssl_generic_method_handler(
res: *mut UwsResT,
req: *mut UwsReqT,
user_data: *mut ::std::os::raw::c_void,
) {
unsafe {
let response = AppResponse::<1>::new(res);
let request = AppRequest::new(req);
let callback = &mut *(user_data as *mut UwsMethodHandler<1>);
callback(response, request);
}
}
impl<const SSL: i32> TemplateApp<SSL> {
pub fn new(config: UsSocketContextOptions) -> TemplateApp<SSL> {
unsafe {
let key_file_name_s =
CString::new(config.key_file_name).expect("Failed to create key_file_name CString");
let cert_file_name_s = CString::new(config.cert_file_name)
.expect("Failed to create cert_file_name CString");
let passphrase_s =
CString::new(config.passphrase).expect("Failed to create passphrase CString");
let dh_params_file_name_s = CString::new(config.dh_params_file_name)
.expect("Failed to create dh_params_file_name CString");
let ca_file_name_s =
CString::new(config.ca_file_name).expect("Failed to create ca_file_name CString");
let native_options = UsSocketContextOptionsT {
key_file_name: key_file_name_s.as_ptr(),
cert_file_name: cert_file_name_s.as_ptr(),
passphrase: passphrase_s.as_ptr(),
dh_params_file_name: dh_params_file_name_s.as_ptr(),
ca_file_name: ca_file_name_s.as_ptr(),
ssl_prefer_low_memory_usage: config.ssl_prefer_low_memory_usage,
};
TemplateApp::<SSL> {
native: uws_create_app(SSL, native_options),
}
}
}
pub fn get(self, route: &str, mut handler: UwsMethodHandler<SSL>) -> TemplateApp<SSL> {
unsafe {
let c_route = ::std::ffi::CString::new(route).expect("Failed to create route CString");
if SSL == 1 {
uws_app_get(
SSL,
self.native,
c_route.as_ptr(),
std::option::Option::Some(uws_ssl_generic_method_handler),
&mut handler as *mut _ as *mut ::std::os::raw::c_void,
);
} else {
uws_app_get(
SSL,
self.native,
c_route.as_ptr(),
std::option::Option::Some(uws_generic_method_handler),
&mut handler as *mut _ as *mut ::std::os::raw::c_void,
);
}
}
self
}
pub fn listen(self, port: i32, mut handler: UwsListenHandler) -> TemplateApp<SSL> {
unsafe {
uws_app_listen(
SSL,
self.native,
port,
::std::option::Option::Some(uws_generic_listen_handler),
&mut handler as *mut _ as *mut ::std::os::raw::c_void,
);
}
self
}
pub fn run(self) -> TemplateApp<SSL> {
unsafe {
uws_app_run(SSL, self.native);
}
self
}
}
pub type App = TemplateApp<0>;
pub type SSLApp = TemplateApp<1>;
fn main() {
let config = UsSocketContextOptions {
key_file_name: "../misc/key.pem",
cert_file_name: "../misc/cert.pem",
passphrase: "1234",
ca_file_name: "",
dh_params_file_name: "",
ssl_prefer_low_memory_usage: 0,
};
SSLApp::new(config)
.get("/", |res, _req| {
res.end("Hello Rust!");
})
.listen(3000, |_listen_socket, config| {
println!("Listening on port https://127.0.0.1:{}", config.port);
})
.run();
}

Some files were not shown because too many files have changed in this diff Show More