mirror of
https://github.com/oven-sh/bun
synced 2026-02-03 07:28:53 +00:00
Compare commits
8 Commits
jarred/pro
...
build-scri
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4ca0d96837 | ||
|
|
2091551c92 | ||
|
|
a2bc49a991 | ||
|
|
398e93249e | ||
|
|
1ec44688b7 | ||
|
|
7aa2360542 | ||
|
|
e87c599e6a | ||
|
|
d60da3d186 |
@@ -10,10 +10,9 @@ steps:
|
||||
blocked_state: "running"
|
||||
|
||||
- label: ":pipeline:"
|
||||
command: "buildkite-agent pipeline upload .buildkite/ci.yml"
|
||||
agents:
|
||||
queue: "build-darwin"
|
||||
command:
|
||||
- ".buildkite/scripts/prepare-build.sh"
|
||||
queue: "build-linux"
|
||||
|
||||
- if: "build.branch == 'main' && !build.pull_request.repository.fork"
|
||||
label: ":github:"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,55 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eo pipefail
|
||||
source "$(dirname "$0")/env.sh"
|
||||
|
||||
function run_command() {
|
||||
set -x
|
||||
"$@"
|
||||
{ set +x; } 2>/dev/null
|
||||
}
|
||||
|
||||
cwd="$(pwd)"
|
||||
|
||||
mkdir -p build
|
||||
source "$(dirname "$0")/download-artifact.sh" "build/bun-deps/**" --step "$BUILDKITE_GROUP_KEY-build-deps"
|
||||
source "$(dirname "$0")/download-artifact.sh" "build/bun-zig.o" --step "$BUILDKITE_GROUP_KEY-build-zig"
|
||||
source "$(dirname "$0")/download-artifact.sh" "build/bun-cpp-objects.a" --step "$BUILDKITE_GROUP_KEY-build-cpp" --split
|
||||
cd build
|
||||
|
||||
run_command cmake .. "${CMAKE_FLAGS[@]}" \
|
||||
-GNinja \
|
||||
-DBUN_LINK_ONLY="1" \
|
||||
-DNO_CONFIGURE_DEPENDS="1" \
|
||||
-DBUN_ZIG_OBJ_DIR="$cwd/build" \
|
||||
-DBUN_CPP_ARCHIVE="$cwd/build/bun-cpp-objects.a" \
|
||||
-DBUN_DEPS_OUT_DIR="$cwd/build/bun-deps" \
|
||||
-DCMAKE_BUILD_TYPE="$CMAKE_BUILD_TYPE" \
|
||||
-DCPU_TARGET="$CPU_TARGET" \
|
||||
-DUSE_LTO="$USE_LTO" \
|
||||
-DUSE_DEBUG_JSC="$USE_DEBUG_JSC" \
|
||||
-DCANARY="$CANARY" \
|
||||
-DGIT_SHA="$GIT_SHA"
|
||||
run_command ninja -v -j "$CPUS"
|
||||
run_command ls
|
||||
|
||||
tag="bun-$BUILDKITE_GROUP_KEY"
|
||||
if [ "$USE_LTO" == "OFF" ]; then
|
||||
# Remove OS check when LTO is enabled on macOS again
|
||||
if [[ "$tag" == *"darwin"* ]]; then
|
||||
tag="$tag-nolto"
|
||||
fi
|
||||
fi
|
||||
|
||||
for name in bun bun-profile; do
|
||||
dir="$tag"
|
||||
if [ "$name" == "bun-profile" ]; then
|
||||
dir="$tag-profile"
|
||||
fi
|
||||
run_command chmod +x "$name"
|
||||
run_command "./$name" --revision
|
||||
run_command mkdir -p "$dir"
|
||||
run_command mv "$name" "$dir/$name"
|
||||
run_command zip -r "$dir.zip" "$dir"
|
||||
source "$cwd/.buildkite/scripts/upload-artifact.sh" "$dir.zip"
|
||||
done
|
||||
@@ -1,35 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eo pipefail
|
||||
source "$(dirname "$0")/env.sh"
|
||||
export FORCE_UPDATE_SUBMODULES=1
|
||||
source "$(realpath $(dirname "$0")/../../scripts/update-submodules.sh)"
|
||||
{ set +x; } 2>/dev/null
|
||||
|
||||
function run_command() {
|
||||
set -x
|
||||
"$@"
|
||||
{ set +x; } 2>/dev/null
|
||||
}
|
||||
|
||||
mkdir -p build
|
||||
cd build
|
||||
mkdir -p tmp_modules tmp_functions js codegen
|
||||
|
||||
run_command cmake .. "${CMAKE_FLAGS[@]}" \
|
||||
-GNinja \
|
||||
-DBUN_CPP_ONLY="1" \
|
||||
-DNO_CONFIGURE_DEPENDS="1" \
|
||||
-DCMAKE_BUILD_TYPE="$CMAKE_BUILD_TYPE" \
|
||||
-DCPU_TARGET="$CPU_TARGET" \
|
||||
-DUSE_LTO="$USE_LTO" \
|
||||
-DUSE_DEBUG_JSC="$USE_DEBUG_JSC" \
|
||||
-DCANARY="$CANARY" \
|
||||
-DGIT_SHA="$GIT_SHA"
|
||||
|
||||
chmod +x compile-cpp-only.sh
|
||||
source compile-cpp-only.sh -v -j "$CPUS"
|
||||
{ set +x; } 2>/dev/null
|
||||
|
||||
cd ..
|
||||
source "$(dirname "$0")/upload-artifact.sh" "build/bun-cpp-objects.a" --split
|
||||
@@ -1,22 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eo pipefail
|
||||
source "$(dirname "$0")/env.sh"
|
||||
source "$(realpath $(dirname "$0")/../../scripts/all-dependencies.sh)"
|
||||
|
||||
artifacts=(
|
||||
libcrypto.a libssl.a libdecrepit.a
|
||||
libcares.a
|
||||
libarchive.a
|
||||
liblolhtml.a
|
||||
libmimalloc.a libmimalloc.o
|
||||
libtcc.a
|
||||
libz.a
|
||||
libzstd.a
|
||||
libdeflate.a
|
||||
liblshpack.a
|
||||
)
|
||||
|
||||
for artifact in "${artifacts[@]}"; do
|
||||
source "$(dirname "$0")/upload-artifact.sh" "build/bun-deps/$artifact"
|
||||
done
|
||||
@@ -1,40 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eo pipefail
|
||||
source "$(dirname "$0")/env.sh"
|
||||
|
||||
function assert_bun() {
|
||||
if ! command -v bun &>/dev/null; then
|
||||
echo "error: bun is not installed" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function assert_make() {
|
||||
if ! command -v make &>/dev/null; then
|
||||
echo "error: make is not installed" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function run_command() {
|
||||
set -x
|
||||
"$@"
|
||||
{ set +x; } 2>/dev/null
|
||||
}
|
||||
|
||||
function build_node_fallbacks() {
|
||||
local cwd="src/node-fallbacks"
|
||||
run_command bun install --cwd "$cwd" --frozen-lockfile
|
||||
run_command bun run --cwd "$cwd" build
|
||||
}
|
||||
|
||||
function build_old_js() {
|
||||
run_command bun install --frozen-lockfile
|
||||
run_command make runtime_js fallback_decoder bun_error
|
||||
}
|
||||
|
||||
assert_bun
|
||||
assert_make
|
||||
build_node_fallbacks
|
||||
build_old_js
|
||||
@@ -1,80 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eo pipefail
|
||||
source "$(dirname "$0")/env.sh"
|
||||
|
||||
function assert_target() {
|
||||
local arch="${2-$(uname -m)}"
|
||||
case "$(echo "$arch" | tr '[:upper:]' '[:lower:]')" in
|
||||
x64 | x86_64 | amd64)
|
||||
export ZIG_ARCH="x86_64"
|
||||
if [[ "$BUILDKITE_STEP_KEY" == *"baseline"* ]]; then
|
||||
export ZIG_CPU_TARGET="nehalem"
|
||||
else
|
||||
export ZIG_CPU_TARGET="haswell"
|
||||
fi
|
||||
;;
|
||||
aarch64 | arm64)
|
||||
export ZIG_ARCH="aarch64"
|
||||
export ZIG_CPU_TARGET="native"
|
||||
;;
|
||||
*)
|
||||
echo "error: Unsupported architecture: $arch" 1>&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
local os="${1-$(uname -s)}"
|
||||
case "$(echo "$os" | tr '[:upper:]' '[:lower:]')" in
|
||||
linux)
|
||||
export ZIG_TARGET="$ZIG_ARCH-linux-gnu" ;;
|
||||
darwin)
|
||||
export ZIG_TARGET="$ZIG_ARCH-macos-none" ;;
|
||||
windows)
|
||||
export ZIG_TARGET="$ZIG_ARCH-windows-msvc" ;;
|
||||
*)
|
||||
echo "error: Unsupported operating system: $os" 1>&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
function run_command() {
|
||||
set -x
|
||||
"$@"
|
||||
{ set +x; } 2>/dev/null
|
||||
}
|
||||
|
||||
assert_target "$@"
|
||||
|
||||
# Since the zig build depends on files from the zig submodule,
|
||||
# make sure to update the submodule before building.
|
||||
run_command git submodule update --init --recursive --progress --depth=1 --checkout src/deps/zig
|
||||
|
||||
# TODO: Move these to be part of the CMake build
|
||||
source "$(dirname "$0")/build-old-js.sh"
|
||||
|
||||
cwd="$(pwd)"
|
||||
mkdir -p build
|
||||
cd build
|
||||
|
||||
run_command cmake .. "${CMAKE_FLAGS[@]}" \
|
||||
-GNinja \
|
||||
-DNO_CONFIGURE_DEPENDS="1" \
|
||||
-DNO_CODEGEN="0" \
|
||||
-DWEBKIT_DIR="omit" \
|
||||
-DBUN_ZIG_OBJ_DIR="$cwd/build" \
|
||||
-DZIG_LIB_DIR="$cwd/src/deps/zig/lib" \
|
||||
-DCMAKE_BUILD_TYPE="$CMAKE_BUILD_TYPE" \
|
||||
-DARCH="$ZIG_ARCH" \
|
||||
-DCPU_TARGET="$ZIG_CPU_TARGET" \
|
||||
-DZIG_TARGET="$ZIG_TARGET" \
|
||||
-DUSE_LTO="$USE_LTO" \
|
||||
-DUSE_DEBUG_JSC="$USE_DEBUG_JSC" \
|
||||
-DCANARY="$CANARY" \
|
||||
-DGIT_SHA="$GIT_SHA"
|
||||
|
||||
export ONLY_ZIG="1"
|
||||
run_command ninja "$cwd/build/bun-zig.o" -v -j "$CPUS"
|
||||
|
||||
cd ..
|
||||
source "$(dirname "$0")/upload-artifact.sh" "build/bun-zig.o"
|
||||
@@ -1,47 +0,0 @@
|
||||
param (
|
||||
[Parameter(Mandatory=$true)]
|
||||
[string[]] $Paths,
|
||||
[switch] $Split
|
||||
)
|
||||
|
||||
$ErrorActionPreference = "Stop"
|
||||
|
||||
function Assert-Buildkite-Agent() {
|
||||
if (-not (Get-Command "buildkite-agent" -ErrorAction SilentlyContinue)) {
|
||||
Write-Error "Cannot find buildkite-agent, please install it: https://buildkite.com/docs/agent/v3/install"
|
||||
exit 1
|
||||
}
|
||||
}
|
||||
|
||||
function Assert-Join-File() {
|
||||
if (-not (Get-Command "Join-File" -ErrorAction SilentlyContinue)) {
|
||||
Write-Error "Cannot find Join-File, please install it: https://www.powershellgallery.com/packages/FileSplitter/1.3"
|
||||
exit 1
|
||||
}
|
||||
}
|
||||
|
||||
function Download-Buildkite-Artifact() {
|
||||
param (
|
||||
[Parameter(Mandatory=$true)]
|
||||
[string] $Path,
|
||||
)
|
||||
if ($Split) {
|
||||
& buildkite-agent artifact download "$Path.*" --debug --debug-http
|
||||
Join-File -Path "$(Resolve-Path .)\$Path" -Verbose -DeletePartFiles
|
||||
} else {
|
||||
& buildkite-agent artifact download "$Path" --debug --debug-http
|
||||
}
|
||||
if (-not (Test-Path $Path)) {
|
||||
Write-Error "Could not find artifact: $Path"
|
||||
exit 1
|
||||
}
|
||||
}
|
||||
|
||||
Assert-Buildkite-Agent
|
||||
if ($Split) {
|
||||
Assert-Join-File
|
||||
}
|
||||
|
||||
foreach ($Path in $Paths) {
|
||||
Download-Buildkite-Artifact $Path
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
function assert_buildkite_agent() {
|
||||
if ! command -v buildkite-agent &> /dev/null; then
|
||||
echo "error: Cannot find buildkite-agent, please install it:"
|
||||
echo "https://buildkite.com/docs/agent/v3/install"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function download_buildkite_artifact() {
|
||||
local path="$1"; shift
|
||||
local split="0"
|
||||
local args=()
|
||||
while true; do
|
||||
if [ -z "$1" ]; then
|
||||
break
|
||||
fi
|
||||
case "$1" in
|
||||
--split) split="1"; shift ;;
|
||||
*) args+=("$1"); shift ;;
|
||||
esac
|
||||
done
|
||||
if [ "$split" == "1" ]; then
|
||||
run_command buildkite-agent artifact download "$path.*" . "${args[@]}"
|
||||
run_command cat $path.?? > "$path"
|
||||
run_command rm -f $path.??
|
||||
else
|
||||
run_command buildkite-agent artifact download "$path" . "${args[@]}"
|
||||
fi
|
||||
if [[ "$path" != *"*"* ]] && [ ! -f "$path" ]; then
|
||||
echo "error: Could not find artifact: $path"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function run_command() {
|
||||
set -x
|
||||
"$@"
|
||||
{ set +x; } 2>/dev/null
|
||||
}
|
||||
|
||||
assert_buildkite_agent
|
||||
download_buildkite_artifact "$@"
|
||||
@@ -1,120 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
function assert_os() {
|
||||
local os="$(uname -s)"
|
||||
case "$os" in
|
||||
Linux)
|
||||
echo "linux" ;;
|
||||
Darwin)
|
||||
echo "darwin" ;;
|
||||
*)
|
||||
echo "error: Unsupported operating system: $os" 1>&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
function assert_arch() {
|
||||
local arch="$(uname -m)"
|
||||
case "$arch" in
|
||||
aarch64 | arm64)
|
||||
echo "aarch64" ;;
|
||||
x86_64 | amd64)
|
||||
echo "x64" ;;
|
||||
*)
|
||||
echo "error: Unknown architecture: $arch" 1>&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
function assert_build() {
|
||||
if [ -z "$BUILDKITE_REPO" ]; then
|
||||
echo "error: Cannot find repository for this build"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "$BUILDKITE_COMMIT" ]; then
|
||||
echo "error: Cannot find commit for this build"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "$BUILDKITE_STEP_KEY" ]; then
|
||||
echo "error: Cannot find step key for this build"
|
||||
exit 1
|
||||
fi
|
||||
if [ -n "$BUILDKITE_GROUP_KEY" ] && [[ "$BUILDKITE_STEP_KEY" != "$BUILDKITE_GROUP_KEY"* ]]; then
|
||||
echo "error: Build step '$BUILDKITE_STEP_KEY' does not start with group key '$BUILDKITE_GROUP_KEY'"
|
||||
exit 1
|
||||
fi
|
||||
# Skip os and arch checks for Zig, since it's cross-compiled on macOS
|
||||
if [[ "$BUILDKITE_STEP_KEY" != *"zig"* ]]; then
|
||||
local os="$(assert_os)"
|
||||
if [[ "$BUILDKITE_STEP_KEY" != *"$os"* ]]; then
|
||||
echo "error: Build step '$BUILDKITE_STEP_KEY' does not match operating system '$os'"
|
||||
exit 1
|
||||
fi
|
||||
local arch="$(assert_arch)"
|
||||
if [[ "$BUILDKITE_STEP_KEY" != *"$arch"* ]]; then
|
||||
echo "error: Build step '$BUILDKITE_STEP_KEY' does not match architecture '$arch'"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function assert_buildkite_agent() {
|
||||
if ! command -v buildkite-agent &> /dev/null; then
|
||||
echo "error: Cannot find buildkite-agent, please install it:"
|
||||
echo "https://buildkite.com/docs/agent/v3/install"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function export_environment() {
|
||||
source "$(realpath $(dirname "$0")/../../scripts/env.sh)"
|
||||
source "$(realpath $(dirname "$0")/../../scripts/update-submodules.sh)"
|
||||
{ set +x; } 2>/dev/null
|
||||
export GIT_SHA="$BUILDKITE_COMMIT"
|
||||
export CCACHE_DIR="$HOME/.cache/ccache/$BUILDKITE_STEP_KEY"
|
||||
export SCCACHE_DIR="$HOME/.cache/sccache/$BUILDKITE_STEP_KEY"
|
||||
export ZIG_LOCAL_CACHE_DIR="$HOME/.cache/zig-cache/$BUILDKITE_STEP_KEY"
|
||||
export ZIG_GLOBAL_CACHE_DIR="$HOME/.cache/zig-cache/$BUILDKITE_STEP_KEY"
|
||||
export BUN_DEPS_CACHE_DIR="$HOME/.cache/bun-deps/$BUILDKITE_STEP_KEY"
|
||||
if [ "$(assert_os)" == "linux" ]; then
|
||||
export USE_LTO="ON"
|
||||
fi
|
||||
if [ "$(assert_arch)" == "aarch64" ]; then
|
||||
export CPU_TARGET="native"
|
||||
elif [[ "$BUILDKITE_STEP_KEY" == *"baseline"* ]]; then
|
||||
export CPU_TARGET="nehalem"
|
||||
else
|
||||
export CPU_TARGET="haswell"
|
||||
fi
|
||||
if $(buildkite-agent meta-data exists release &> /dev/null); then
|
||||
export CMAKE_BUILD_TYPE="$(buildkite-agent meta-data get release)"
|
||||
else
|
||||
export CMAKE_BUILD_TYPE="Release"
|
||||
fi
|
||||
if $(buildkite-agent meta-data exists canary &> /dev/null); then
|
||||
export CANARY="$(buildkite-agent meta-data get canary)"
|
||||
else
|
||||
export CANARY="1"
|
||||
fi
|
||||
if $(buildkite-agent meta-data exists assertions &> /dev/null); then
|
||||
export USE_DEBUG_JSC="$(buildkite-agent meta-data get assertions)"
|
||||
else
|
||||
export USE_DEBUG_JSC="OFF"
|
||||
fi
|
||||
if [ "$BUILDKITE_CLEAN_CHECKOUT" == "true" || "$BUILDKITE_BRANCH" == "main" ]; then
|
||||
rm -rf "$CCACHE_DIR"
|
||||
rm -rf "$SCCACHE_DIR"
|
||||
rm -rf "$ZIG_LOCAL_CACHE_DIR"
|
||||
rm -rf "$ZIG_GLOBAL_CACHE_DIR"
|
||||
rm -rf "$BUN_DEPS_CACHE_DIR"
|
||||
export CCACHE_RECACHE="1"
|
||||
fi
|
||||
}
|
||||
|
||||
assert_build
|
||||
assert_buildkite_agent
|
||||
export_environment
|
||||
@@ -1,97 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
function assert_build() {
|
||||
if [ -z "$BUILDKITE_REPO" ]; then
|
||||
echo "error: Cannot find repository for this build"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "$BUILDKITE_COMMIT" ]; then
|
||||
echo "error: Cannot find commit for this build"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function assert_buildkite_agent() {
|
||||
if ! command -v buildkite-agent &> /dev/null; then
|
||||
echo "error: Cannot find buildkite-agent, please install it:"
|
||||
echo "https://buildkite.com/docs/agent/v3/install"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function assert_jq() {
|
||||
assert_command "jq" "jq" "https://stedolan.github.io/jq/"
|
||||
}
|
||||
|
||||
function assert_curl() {
|
||||
assert_command "curl" "curl" "https://curl.se/download.html"
|
||||
}
|
||||
|
||||
function assert_command() {
|
||||
local command="$1"
|
||||
local package="$2"
|
||||
local help_url="$3"
|
||||
if ! command -v "$command" &> /dev/null; then
|
||||
echo "warning: $command is not installed, installing..."
|
||||
if command -v brew &> /dev/null; then
|
||||
HOMEBREW_NO_AUTO_UPDATE=1 brew install "$package"
|
||||
else
|
||||
echo "error: Cannot install $command, please install it"
|
||||
if [ -n "$help_url" ]; then
|
||||
echo ""
|
||||
echo "hint: See $help_url for help"
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function assert_release() {
|
||||
if [ "$RELEASE" == "1" ]; then
|
||||
run_command buildkite-agent meta-data set canary "0"
|
||||
fi
|
||||
}
|
||||
|
||||
function assert_canary() {
|
||||
local canary="$(buildkite-agent meta-data get canary 2>/dev/null)"
|
||||
if [ -z "$canary" ]; then
|
||||
local repo=$(echo "$BUILDKITE_REPO" | sed -E 's#https://github.com/([^/]+)/([^/]+).git#\1/\2#g')
|
||||
local tag="$(curl -sL "https://api.github.com/repos/$repo/releases/latest" | jq -r ".tag_name")"
|
||||
if [ "$tag" == "null" ]; then
|
||||
canary="1"
|
||||
else
|
||||
local revision=$(curl -sL "https://api.github.com/repos/$repo/compare/$tag...$BUILDKITE_COMMIT" | jq -r ".ahead_by")
|
||||
if [ "$revision" == "null" ]; then
|
||||
canary="1"
|
||||
else
|
||||
canary="$revision"
|
||||
fi
|
||||
fi
|
||||
run_command buildkite-agent meta-data set canary "$canary"
|
||||
fi
|
||||
}
|
||||
|
||||
function upload_buildkite_pipeline() {
|
||||
local path="$1"
|
||||
if [ ! -f "$path" ]; then
|
||||
echo "error: Cannot find pipeline: $path"
|
||||
exit 1
|
||||
fi
|
||||
run_command buildkite-agent pipeline upload "$path"
|
||||
}
|
||||
|
||||
function run_command() {
|
||||
set -x
|
||||
"$@"
|
||||
{ set +x; } 2>/dev/null
|
||||
}
|
||||
|
||||
assert_build
|
||||
assert_buildkite_agent
|
||||
assert_jq
|
||||
assert_curl
|
||||
assert_release
|
||||
assert_canary
|
||||
upload_buildkite_pipeline ".buildkite/ci.yml"
|
||||
@@ -1,47 +0,0 @@
|
||||
param (
|
||||
[Parameter(Mandatory=$true)]
|
||||
[string[]] $Paths,
|
||||
[switch] $Split
|
||||
)
|
||||
|
||||
$ErrorActionPreference = "Stop"
|
||||
|
||||
function Assert-Buildkite-Agent() {
|
||||
if (-not (Get-Command "buildkite-agent" -ErrorAction SilentlyContinue)) {
|
||||
Write-Error "Cannot find buildkite-agent, please install it: https://buildkite.com/docs/agent/v3/install"
|
||||
exit 1
|
||||
}
|
||||
}
|
||||
|
||||
function Assert-Split-File() {
|
||||
if (-not (Get-Command "Split-File" -ErrorAction SilentlyContinue)) {
|
||||
Write-Error "Cannot find Split-File, please install it: https://www.powershellgallery.com/packages/FileSplitter/1.3"
|
||||
exit 1
|
||||
}
|
||||
}
|
||||
|
||||
function Upload-Buildkite-Artifact() {
|
||||
param (
|
||||
[Parameter(Mandatory=$true)]
|
||||
[string] $Path,
|
||||
)
|
||||
if (-not (Test-Path $Path)) {
|
||||
Write-Error "Could not find artifact: $Path"
|
||||
exit 1
|
||||
}
|
||||
if ($Split) {
|
||||
Remove-Item -Path "$Path.*" -Force
|
||||
Split-File -Path (Resolve-Path $Path) -PartSizeBytes "50MB" -Verbose
|
||||
$Path = "$Path.*"
|
||||
}
|
||||
& buildkite-agent artifact upload "$Path" --debug --debug-http
|
||||
}
|
||||
|
||||
Assert-Buildkite-Agent
|
||||
if ($Split) {
|
||||
Assert-Split-File
|
||||
}
|
||||
|
||||
foreach ($Path in $Paths) {
|
||||
Upload-Buildkite-Artifact $Path
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
function assert_buildkite_agent() {
|
||||
if ! command -v buildkite-agent &> /dev/null; then
|
||||
echo "error: Cannot find buildkite-agent, please install it:"
|
||||
echo "https://buildkite.com/docs/agent/v3/install"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function assert_split() {
|
||||
if ! command -v split &> /dev/null; then
|
||||
echo "error: Cannot find split, please install it:"
|
||||
echo "https://www.gnu.org/software/coreutils/split"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function upload_buildkite_artifact() {
|
||||
local path="$1"; shift
|
||||
local split="0"
|
||||
local args=()
|
||||
while true; do
|
||||
if [ -z "$1" ]; then
|
||||
break
|
||||
fi
|
||||
case "$1" in
|
||||
--split) split="1"; shift ;;
|
||||
*) args+=("$1"); shift ;;
|
||||
esac
|
||||
done
|
||||
if [ ! -f "$path" ]; then
|
||||
echo "error: Could not find artifact: $path"
|
||||
exit 1
|
||||
fi
|
||||
if [ "$split" == "1" ]; then
|
||||
run_command rm -f "$path."*
|
||||
run_command split -b 50MB -d "$path" "$path."
|
||||
run_command buildkite-agent artifact upload "$path.*" "${args[@]}"
|
||||
else
|
||||
run_command buildkite-agent artifact upload "$path" "${args[@]}"
|
||||
fi
|
||||
}
|
||||
|
||||
function run_command() {
|
||||
set -x
|
||||
"$@"
|
||||
{ set +x; } 2>/dev/null
|
||||
}
|
||||
|
||||
assert_buildkite_agent
|
||||
upload_buildkite_artifact "$@"
|
||||
@@ -3,15 +3,7 @@
|
||||
set -eo pipefail
|
||||
|
||||
function assert_main() {
|
||||
if [ -z "$BUILDKITE_REPO" ]; then
|
||||
echo "error: Cannot find repository for this build"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "$BUILDKITE_COMMIT" ]; then
|
||||
echo "error: Cannot find commit for this build"
|
||||
exit 1
|
||||
fi
|
||||
if [ -n "$BUILDKITE_PULL_REQUEST_REPO" ] && [ "$BUILDKITE_REPO" != "$BUILDKITE_PULL_REQUEST_REPO" ]; then
|
||||
if [[ "$BUILDKITE_PULL_REQUEST_REPO" && "$BUILDKITE_REPO" != "$BUILDKITE_PULL_REQUEST_REPO" ]]; then
|
||||
echo "error: Cannot upload release from a fork"
|
||||
exit 1
|
||||
fi
|
||||
@@ -26,187 +18,77 @@ function assert_main() {
|
||||
}
|
||||
|
||||
function assert_buildkite_agent() {
|
||||
if ! command -v "buildkite-agent" &> /dev/null; then
|
||||
if ! command -v buildkite-agent &> /dev/null; then
|
||||
echo "error: Cannot find buildkite-agent, please install it:"
|
||||
echo "https://buildkite.com/docs/agent/v3/install"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function assert_github() {
|
||||
assert_command "gh" "gh" "https://github.com/cli/cli#installation"
|
||||
assert_buildkite_secret "GITHUB_TOKEN"
|
||||
# gh expects the token in $GH_TOKEN
|
||||
export GH_TOKEN="$GITHUB_TOKEN"
|
||||
}
|
||||
|
||||
function assert_aws() {
|
||||
assert_command "aws" "awscli" "https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html"
|
||||
for secret in "AWS_ACCESS_KEY_ID" "AWS_SECRET_ACCESS_KEY" "AWS_ENDPOINT"; do
|
||||
assert_buildkite_secret "$secret"
|
||||
done
|
||||
assert_buildkite_secret "AWS_BUCKET" --skip-redaction
|
||||
}
|
||||
|
||||
function assert_sentry() {
|
||||
assert_command "sentry-cli" "getsentry/tools/sentry-cli" "https://docs.sentry.io/cli/installation/"
|
||||
for secret in "SENTRY_AUTH_TOKEN" "SENTRY_ORG" "SENTRY_PROJECT"; do
|
||||
assert_buildkite_secret "$secret"
|
||||
done
|
||||
}
|
||||
|
||||
function run_command() {
|
||||
set -x
|
||||
"$@"
|
||||
{ set +x; } 2>/dev/null
|
||||
}
|
||||
|
||||
function assert_command() {
|
||||
local command="$1"
|
||||
local package="$2"
|
||||
local help_url="$3"
|
||||
if ! command -v "$command" &> /dev/null; then
|
||||
echo "warning: $command is not installed, installing..."
|
||||
function assert_gh() {
|
||||
if ! command -v gh &> /dev/null; then
|
||||
echo "warning: gh is not installed, installing..."
|
||||
if command -v brew &> /dev/null; then
|
||||
HOMEBREW_NO_AUTO_UPDATE=1 run_command brew install "$package"
|
||||
brew install gh
|
||||
else
|
||||
echo "error: Cannot install $command, please install it"
|
||||
if [ -n "$help_url" ]; then
|
||||
echo ""
|
||||
echo "hint: See $help_url for help"
|
||||
fi
|
||||
echo "error: Cannot install gh, please install it:"
|
||||
echo "https://github.com/cli/cli#installation"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function assert_buildkite_secret() {
|
||||
local key="$1"
|
||||
local value=$(buildkite-agent secret get "$key" ${@:2})
|
||||
if [ -z "$value" ]; then
|
||||
echo "error: Cannot find $key secret"
|
||||
function assert_gh_token() {
|
||||
local token=$(buildkite-agent secret get GITHUB_TOKEN)
|
||||
if [ -z "$token" ]; then
|
||||
echo "error: Cannot find GITHUB_TOKEN secret"
|
||||
echo ""
|
||||
echo "hint: Create a secret named $key with a value:"
|
||||
echo "hint: Create a secret named GITHUB_TOKEN with a GitHub access token:"
|
||||
echo "https://buildkite.com/docs/pipelines/buildkite-secrets"
|
||||
exit 1
|
||||
fi
|
||||
export "$key"="$value"
|
||||
export GH_TOKEN="$token"
|
||||
}
|
||||
|
||||
function release_tag() {
|
||||
local version="$1"
|
||||
if [ "$version" == "canary" ]; then
|
||||
echo "canary"
|
||||
else
|
||||
echo "bun-v$version"
|
||||
fi
|
||||
}
|
||||
|
||||
function create_sentry_release() {
|
||||
local version="$1"
|
||||
local release="$version"
|
||||
if [ "$version" == "canary" ]; then
|
||||
release="$BUILDKITE_COMMIT-canary"
|
||||
fi
|
||||
run_command sentry-cli releases new "$release" --finalize
|
||||
run_command sentry-cli releases set-commits "$release" --auto --ignore-missing
|
||||
if [ "$version" == "canary" ]; then
|
||||
run_command sentry-cli deploys new --env="canary" --release="$release"
|
||||
fi
|
||||
}
|
||||
|
||||
function download_buildkite_artifact() {
|
||||
local name="$1"
|
||||
local dir="$2"
|
||||
if [ -z "$dir" ]; then
|
||||
dir="."
|
||||
fi
|
||||
run_command buildkite-agent artifact download "$name" "$dir"
|
||||
if [ ! -f "$dir/$name" ]; then
|
||||
function download_artifact() {
|
||||
local name=$1
|
||||
buildkite-agent artifact download "$name" .
|
||||
if [ ! -f "$name" ]; then
|
||||
echo "error: Cannot find Buildkite artifact: $name"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function upload_github_asset() {
|
||||
local version="$1"
|
||||
local tag="$(release_tag "$version")"
|
||||
local file="$2"
|
||||
run_command gh release upload "$tag" "$file" --clobber --repo "$BUILDKITE_REPO"
|
||||
|
||||
# Sometimes the upload fails, maybe this is a race condition in the gh CLI?
|
||||
while [ "$(gh release view "$tag" --repo "$BUILDKITE_REPO" | grep -c "$file")" -eq 0 ]; do
|
||||
echo "warn: Uploading $file to $tag failed, retrying..."
|
||||
sleep "$((RANDOM % 5 + 1))"
|
||||
run_command gh release upload "$tag" "$file" --clobber --repo "$BUILDKITE_REPO"
|
||||
done
|
||||
function upload_assets() {
|
||||
local tag=$1
|
||||
local files=${@:2}
|
||||
gh release upload "$tag" $files --clobber --repo "$BUILDKITE_REPO"
|
||||
}
|
||||
|
||||
function update_github_release() {
|
||||
local version="$1"
|
||||
local tag="$(release_tag "$version")"
|
||||
if [ "$tag" == "canary" ]; then
|
||||
sleep 5 # There is possibly a race condition where this overwrites artifacts?
|
||||
run_command gh release edit "$tag" --repo "$BUILDKITE_REPO" \
|
||||
--notes "This release of Bun corresponds to the commit: $BUILDKITE_COMMIT"
|
||||
fi
|
||||
}
|
||||
assert_main
|
||||
assert_buildkite_agent
|
||||
assert_gh
|
||||
assert_gh_token
|
||||
|
||||
function upload_s3_file() {
|
||||
local folder="$1"
|
||||
local file="$2"
|
||||
run_command aws --endpoint-url="$AWS_ENDPOINT" s3 cp "$file" "s3://$AWS_BUCKET/$folder/$file"
|
||||
}
|
||||
declare artifacts=(
|
||||
bun-darwin-aarch64.zip
|
||||
bun-darwin-aarch64-profile.zip
|
||||
bun-darwin-x64.zip
|
||||
bun-darwin-x64-profile.zip
|
||||
bun-linux-aarch64.zip
|
||||
bun-linux-aarch64-profile.zip
|
||||
bun-linux-x64.zip
|
||||
bun-linux-x64-profile.zip
|
||||
bun-linux-x64-baseline.zip
|
||||
bun-linux-x64-baseline-profile.zip
|
||||
bun-windows-x64.zip
|
||||
bun-windows-x64-profile.zip
|
||||
bun-windows-x64-baseline.zip
|
||||
bun-windows-x64-baseline-profile.zip
|
||||
)
|
||||
|
||||
function create_release() {
|
||||
assert_main
|
||||
assert_buildkite_agent
|
||||
assert_github
|
||||
assert_aws
|
||||
assert_sentry
|
||||
for artifact in "${artifacts[@]}"; do
|
||||
download_artifact $artifact
|
||||
done
|
||||
|
||||
local tag="$1" # 'canary' or 'x.y.z'
|
||||
local artifacts=(
|
||||
bun-darwin-aarch64.zip
|
||||
bun-darwin-aarch64-profile.zip
|
||||
bun-darwin-x64.zip
|
||||
bun-darwin-x64-profile.zip
|
||||
bun-linux-aarch64.zip
|
||||
bun-linux-aarch64-profile.zip
|
||||
bun-linux-x64.zip
|
||||
bun-linux-x64-profile.zip
|
||||
bun-linux-x64-baseline.zip
|
||||
bun-linux-x64-baseline-profile.zip
|
||||
bun-windows-x64.zip
|
||||
bun-windows-x64-profile.zip
|
||||
bun-windows-x64-baseline.zip
|
||||
bun-windows-x64-baseline-profile.zip
|
||||
)
|
||||
|
||||
function upload_artifact() {
|
||||
local artifact="$1"
|
||||
download_buildkite_artifact "$artifact"
|
||||
upload_s3_file "releases/$BUILDKITE_COMMIT" "$artifact" &
|
||||
upload_s3_file "releases/$tag" "$artifact" &
|
||||
upload_github_asset "$tag" "$artifact" &
|
||||
wait
|
||||
}
|
||||
|
||||
for artifact in "${artifacts[@]}"; do
|
||||
upload_artifact "$artifact"
|
||||
done
|
||||
|
||||
update_github_release "$tag"
|
||||
create_sentry_release "$tag"
|
||||
}
|
||||
|
||||
function assert_canary() {
|
||||
local canary="$(buildkite-agent meta-data get canary 2>/dev/null)"
|
||||
if [ -z "$canary" ] || [ "$canary" == "0" ]; then
|
||||
echo "warn: Skipping release because this is not a canary build"
|
||||
exit 0
|
||||
fi
|
||||
}
|
||||
|
||||
assert_canary
|
||||
create_release "canary"
|
||||
upload_assets "canary" "${artifacts[@]}"
|
||||
|
||||
286
.github/workflows/build-darwin.yml
vendored
Normal file
286
.github/workflows/build-darwin.yml
vendored
Normal file
@@ -0,0 +1,286 @@
|
||||
name: Build Darwin
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
actions: write
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
runs-on:
|
||||
type: string
|
||||
default: macos-13-large
|
||||
tag:
|
||||
type: string
|
||||
required: true
|
||||
arch:
|
||||
type: string
|
||||
required: true
|
||||
cpu:
|
||||
type: string
|
||||
required: true
|
||||
assertions:
|
||||
type: boolean
|
||||
canary:
|
||||
type: boolean
|
||||
no-cache:
|
||||
type: boolean
|
||||
|
||||
env:
|
||||
LLVM_VERSION: 18
|
||||
BUN_VERSION: 1.1.8
|
||||
LC_CTYPE: "en_US.UTF-8"
|
||||
LC_ALL: "en_US.UTF-8"
|
||||
# LTO is disabled because we cannot use lld on macOS currently
|
||||
BUN_ENABLE_LTO: "0"
|
||||
|
||||
jobs:
|
||||
build-submodules:
|
||||
name: Build Submodules
|
||||
runs-on: ${{ inputs.runs-on }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
sparse-checkout: |
|
||||
.gitmodules
|
||||
src/deps
|
||||
scripts
|
||||
- name: Hash Submodules
|
||||
id: hash
|
||||
run: |
|
||||
print_versions() {
|
||||
git submodule | grep -v WebKit
|
||||
echo "LLVM_VERSION=${{ env.LLVM_VERSION }}"
|
||||
cat $(echo scripts/build*.sh scripts/all-dependencies.sh | tr " " "\n" | sort)
|
||||
}
|
||||
echo "hash=$(print_versions | shasum)" >> $GITHUB_OUTPUT
|
||||
- name: Install Dependencies
|
||||
env:
|
||||
HOMEBREW_NO_INSTALLED_DEPENDENTS_CHECK: 1
|
||||
HOMEBREW_NO_AUTO_UPDATE: 1
|
||||
HOMEBREW_NO_INSTALL_CLEANUP: 1
|
||||
run: |
|
||||
brew install \
|
||||
llvm@${{ env.LLVM_VERSION }} \
|
||||
ccache \
|
||||
rust \
|
||||
pkg-config \
|
||||
coreutils \
|
||||
libtool \
|
||||
cmake \
|
||||
libiconv \
|
||||
automake \
|
||||
openssl@1.1 \
|
||||
ninja \
|
||||
golang \
|
||||
gnu-sed --force --overwrite
|
||||
echo "$(brew --prefix ccache)/bin" >> $GITHUB_PATH
|
||||
echo "$(brew --prefix coreutils)/libexec/gnubin" >> $GITHUB_PATH
|
||||
echo "$(brew --prefix llvm@$LLVM_VERSION)/bin" >> $GITHUB_PATH
|
||||
brew link --overwrite llvm@$LLVM_VERSION
|
||||
- name: Clone Submodules
|
||||
run: |
|
||||
./scripts/update-submodules.sh
|
||||
- name: Build Submodules
|
||||
env:
|
||||
CPU_TARGET: ${{ inputs.cpu }}
|
||||
BUN_DEPS_OUT_DIR: ${{ runner.temp }}/bun-deps
|
||||
run: |
|
||||
mkdir -p $BUN_DEPS_OUT_DIR
|
||||
./scripts/all-dependencies.sh
|
||||
- name: Upload bun-${{ inputs.tag }}-deps
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: bun-${{ inputs.tag }}-deps
|
||||
path: ${{ runner.temp }}/bun-deps
|
||||
if-no-files-found: error
|
||||
build-cpp:
|
||||
name: Build C++
|
||||
runs-on: ${{ inputs.runs-on }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
# TODO: Figure out how to cache homebrew dependencies
|
||||
- name: Install Dependencies
|
||||
env:
|
||||
HOMEBREW_NO_INSTALLED_DEPENDENTS_CHECK: 1
|
||||
HOMEBREW_NO_AUTO_UPDATE: 1
|
||||
HOMEBREW_NO_INSTALL_CLEANUP: 1
|
||||
run: |
|
||||
brew install \
|
||||
llvm@${{ env.LLVM_VERSION }} \
|
||||
ccache \
|
||||
rust \
|
||||
pkg-config \
|
||||
coreutils \
|
||||
libtool \
|
||||
cmake \
|
||||
libiconv \
|
||||
automake \
|
||||
openssl@1.1 \
|
||||
ninja \
|
||||
golang \
|
||||
gnu-sed --force --overwrite
|
||||
echo "$(brew --prefix ccache)/bin" >> $GITHUB_PATH
|
||||
echo "$(brew --prefix coreutils)/libexec/gnubin" >> $GITHUB_PATH
|
||||
echo "$(brew --prefix llvm@$LLVM_VERSION)/bin" >> $GITHUB_PATH
|
||||
brew link --overwrite llvm@$LLVM_VERSION
|
||||
- name: Setup Bun
|
||||
uses: ./.github/actions/setup-bun
|
||||
with:
|
||||
bun-version: ${{ env.BUN_VERSION }}
|
||||
- name: Compile
|
||||
env:
|
||||
CPU_TARGET: ${{ inputs.cpu }}
|
||||
SOURCE_DIR: ${{ github.workspace }}
|
||||
OBJ_DIR: ${{ runner.temp }}/bun-cpp-obj
|
||||
BUN_DEPS_OUT_DIR: ${{ runner.temp }}/bun-deps
|
||||
CCACHE_DIR: ${{ runner.temp }}/ccache
|
||||
run: |
|
||||
mkdir -p $OBJ_DIR
|
||||
cd $OBJ_DIR
|
||||
cmake -S $SOURCE_DIR -B $OBJ_DIR \
|
||||
-G Ninja \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DUSE_LTO=ON \
|
||||
-DBUN_CPP_ONLY=1 \
|
||||
-DNO_CONFIGURE_DEPENDS=1
|
||||
chmod +x compile-cpp-only.sh
|
||||
./compile-cpp-only.sh -v
|
||||
- name: Upload bun-${{ inputs.tag }}-cpp
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: bun-${{ inputs.tag }}-cpp
|
||||
path: ${{ runner.temp }}/bun-cpp-obj/bun-cpp-objects.a
|
||||
if-no-files-found: error
|
||||
build-zig:
|
||||
name: Build Zig
|
||||
uses: ./.github/workflows/build-zig.yml
|
||||
with:
|
||||
os: darwin
|
||||
only-zig: true
|
||||
tag: ${{ inputs.tag }}
|
||||
arch: ${{ inputs.arch }}
|
||||
cpu: ${{ inputs.cpu }}
|
||||
assertions: ${{ inputs.assertions }}
|
||||
canary: ${{ inputs.canary }}
|
||||
no-cache: ${{ inputs.no-cache }}
|
||||
link:
|
||||
name: Link
|
||||
runs-on: ${{ inputs.runs-on }}
|
||||
needs:
|
||||
- build-submodules
|
||||
- build-cpp
|
||||
- build-zig
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
# TODO: Figure out how to cache homebrew dependencies
|
||||
- name: Install Dependencies
|
||||
env:
|
||||
HOMEBREW_NO_INSTALLED_DEPENDENTS_CHECK: 1
|
||||
HOMEBREW_NO_AUTO_UPDATE: 1
|
||||
HOMEBREW_NO_INSTALL_CLEANUP: 1
|
||||
run: |
|
||||
brew install \
|
||||
llvm@${{ env.LLVM_VERSION }} \
|
||||
ccache \
|
||||
rust \
|
||||
pkg-config \
|
||||
coreutils \
|
||||
libtool \
|
||||
cmake \
|
||||
libiconv \
|
||||
automake \
|
||||
openssl@1.1 \
|
||||
ninja \
|
||||
golang \
|
||||
gnu-sed --force --overwrite
|
||||
echo "$(brew --prefix ccache)/bin" >> $GITHUB_PATH
|
||||
echo "$(brew --prefix coreutils)/libexec/gnubin" >> $GITHUB_PATH
|
||||
echo "$(brew --prefix llvm@$LLVM_VERSION)/bin" >> $GITHUB_PATH
|
||||
brew link --overwrite llvm@$LLVM_VERSION
|
||||
- name: Setup Bun
|
||||
uses: ./.github/actions/setup-bun
|
||||
with:
|
||||
bun-version: ${{ env.BUN_VERSION }}
|
||||
- name: Download bun-${{ inputs.tag }}-deps
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bun-${{ inputs.tag }}-deps
|
||||
path: ${{ runner.temp }}/bun-deps
|
||||
- name: Download bun-${{ inputs.tag }}-cpp
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bun-${{ inputs.tag }}-cpp
|
||||
path: ${{ runner.temp }}/bun-cpp-obj
|
||||
- name: Download bun-${{ inputs.tag }}-zig
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bun-${{ inputs.tag }}-zig
|
||||
path: ${{ runner.temp }}/release
|
||||
- name: Link
|
||||
env:
|
||||
CPU_TARGET: ${{ inputs.cpu }}
|
||||
run: |
|
||||
SRC_DIR=$PWD
|
||||
mkdir ${{ runner.temp }}/link-build
|
||||
cd ${{ runner.temp }}/link-build
|
||||
cmake $SRC_DIR \
|
||||
-G Ninja \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DUSE_LTO=ON \
|
||||
-DBUN_LINK_ONLY=1 \
|
||||
-DBUN_ZIG_OBJ_DIR="${{ runner.temp }}/release" \
|
||||
-DBUN_CPP_ARCHIVE="${{ runner.temp }}/bun-cpp-obj/bun-cpp-objects.a" \
|
||||
-DBUN_DEPS_OUT_DIR="${{ runner.temp }}/bun-deps" \
|
||||
-DNO_CONFIGURE_DEPENDS=1
|
||||
ninja -v
|
||||
- name: Prepare
|
||||
run: |
|
||||
cd ${{ runner.temp }}/link-build
|
||||
chmod +x bun-profile bun
|
||||
mkdir -p bun-${{ inputs.tag }}-profile/ bun-${{ inputs.tag }}/
|
||||
mv bun-profile bun-${{ inputs.tag }}-profile/bun-profile
|
||||
if [ -f bun-profile.dSYM || -d bun-profile.dSYM ]; then
|
||||
mv bun-profile.dSYM bun-${{ inputs.tag }}-profile/bun-profile.dSYM
|
||||
fi
|
||||
if [ -f bun.dSYM || -d bun.dSYM ]; then
|
||||
mv bun.dSYM bun-${{ inputs.tag }}-profile/bun-profile.dSYM
|
||||
fi
|
||||
mv bun bun-${{ inputs.tag }}/bun
|
||||
zip -r bun-${{ inputs.tag }}-profile.zip bun-${{ inputs.tag }}-profile
|
||||
zip -r bun-${{ inputs.tag }}.zip bun-${{ inputs.tag }}
|
||||
- name: Upload bun-${{ inputs.tag }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: bun-${{ inputs.tag }}
|
||||
path: ${{ runner.temp }}/link-build/bun-${{ inputs.tag }}.zip
|
||||
if-no-files-found: error
|
||||
- name: Upload bun-${{ inputs.tag }}-profile
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: bun-${{ inputs.tag }}-profile
|
||||
path: ${{ runner.temp }}/link-build/bun-${{ inputs.tag }}-profile.zip
|
||||
if-no-files-found: error
|
||||
on-failure:
|
||||
if: ${{ github.repository_owner == 'oven-sh' && failure() }}
|
||||
name: On Failure
|
||||
needs: link
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Send Message
|
||||
uses: sarisia/actions-status-discord@v1
|
||||
with:
|
||||
webhook: ${{ secrets.DISCORD_WEBHOOK }}
|
||||
nodetail: true
|
||||
color: "#FF0000"
|
||||
title: ""
|
||||
description: |
|
||||
### ❌ [${{ github.event.pull_request.title }}](${{ github.event.pull_request.html_url }})
|
||||
|
||||
@${{ github.actor }}, the build for bun-${{ inputs.tag }} failed.
|
||||
|
||||
**[View logs](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})**
|
||||
64
.github/workflows/build-linux.yml
vendored
Normal file
64
.github/workflows/build-linux.yml
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
name: Build Linux
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
actions: write
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
runs-on:
|
||||
type: string
|
||||
required: true
|
||||
tag:
|
||||
type: string
|
||||
required: true
|
||||
arch:
|
||||
type: string
|
||||
required: true
|
||||
cpu:
|
||||
type: string
|
||||
required: true
|
||||
assertions:
|
||||
type: boolean
|
||||
zig-optimize:
|
||||
type: string
|
||||
canary:
|
||||
type: boolean
|
||||
no-cache:
|
||||
type: boolean
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build Linux
|
||||
uses: ./.github/workflows/build-zig.yml
|
||||
with:
|
||||
os: linux
|
||||
only-zig: false
|
||||
runs-on: ${{ inputs.runs-on }}
|
||||
tag: ${{ inputs.tag }}
|
||||
arch: ${{ inputs.arch }}
|
||||
cpu: ${{ inputs.cpu }}
|
||||
assertions: ${{ inputs.assertions }}
|
||||
zig-optimize: ${{ inputs.zig-optimize }}
|
||||
canary: ${{ inputs.canary }}
|
||||
no-cache: ${{ inputs.no-cache }}
|
||||
on-failure:
|
||||
if: ${{ github.repository_owner == 'oven-sh' && failure() }}
|
||||
name: On Failure
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Send Message
|
||||
uses: sarisia/actions-status-discord@v1
|
||||
with:
|
||||
webhook: ${{ secrets.DISCORD_WEBHOOK }}
|
||||
nodetail: true
|
||||
color: "#FF0000"
|
||||
title: ""
|
||||
description: |
|
||||
### ❌ [${{ github.event.pull_request.title }}](${{ github.event.pull_request.html_url }})
|
||||
|
||||
@${{ github.actor }}, the build for bun-${{ inputs.tag }} failed.
|
||||
|
||||
**[View logs](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})**
|
||||
348
.github/workflows/build-windows.yml
vendored
Normal file
348
.github/workflows/build-windows.yml
vendored
Normal file
@@ -0,0 +1,348 @@
|
||||
name: Build Windows
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
actions: write
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
runs-on:
|
||||
type: string
|
||||
default: windows
|
||||
tag:
|
||||
type: string
|
||||
required: true
|
||||
arch:
|
||||
type: string
|
||||
required: true
|
||||
cpu:
|
||||
type: string
|
||||
required: true
|
||||
assertions:
|
||||
type: boolean
|
||||
canary:
|
||||
type: boolean
|
||||
no-cache:
|
||||
type: boolean
|
||||
bun-version:
|
||||
type: string
|
||||
default: 1.1.7
|
||||
|
||||
env:
|
||||
# Must specify exact version of LLVM for Windows
|
||||
LLVM_VERSION: 18.1.8
|
||||
BUN_VERSION: ${{ inputs.bun-version }}
|
||||
BUN_GARBAGE_COLLECTOR_LEVEL: 1
|
||||
BUN_FEATURE_FLAG_INTERNAL_FOR_TESTING: 1
|
||||
CI: true
|
||||
USE_LTO: 1
|
||||
|
||||
jobs:
|
||||
build-submodules:
|
||||
name: Build Submodules
|
||||
runs-on: ${{ inputs.runs-on }}
|
||||
steps:
|
||||
- name: Install Scoop
|
||||
run: |
|
||||
Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
|
||||
Invoke-RestMethod -Uri https://get.scoop.sh | Invoke-Expression
|
||||
Join-Path (Resolve-Path ~).Path "scoop\shims" >> $Env:GITHUB_PATH
|
||||
- name: Setup Git
|
||||
run: |
|
||||
git config --global core.autocrlf false
|
||||
git config --global core.eol lf
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
sparse-checkout: |
|
||||
.gitmodules
|
||||
src/deps
|
||||
scripts
|
||||
- name: Hash Submodules
|
||||
id: hash
|
||||
run: |
|
||||
$data = "$(& {
|
||||
git submodule | Where-Object { $_ -notmatch 'WebKit' }
|
||||
echo "LLVM_VERSION=${{ env.LLVM_VERSION }}"
|
||||
Get-Content -Path (Get-ChildItem -Path 'scripts/build*.ps1', 'scripts/all-dependencies.ps1', 'scripts/env.ps1' | Sort-Object -Property Name).FullName | Out-String
|
||||
echo 1
|
||||
})"
|
||||
$hash = ( -join ((New-Object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider).ComputeHash([System.Text.Encoding]::UTF8.GetBytes($data)) | ForEach-Object { $_.ToString("x2") } )).Substring(0, 10)
|
||||
echo "hash=${hash}" >> $env:GITHUB_OUTPUT
|
||||
- if: ${{ !inputs.no-cache }}
|
||||
name: Restore Cache
|
||||
id: cache
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: bun-deps
|
||||
key: bun-${{ inputs.tag }}-deps-${{ steps.hash.outputs.hash }}
|
||||
- if: ${{ inputs.no-cache || !steps.cache.outputs.cache-hit }}
|
||||
name: Install LLVM and Ninja
|
||||
run: |
|
||||
scoop install ninja
|
||||
scoop install llvm@${{ env.LLVM_VERSION }}
|
||||
scoop install nasm@2.16.01
|
||||
- if: ${{ inputs.no-cache || !steps.cache.outputs.cache-hit }}
|
||||
name: Clone Submodules
|
||||
run: |
|
||||
.\scripts\update-submodules.ps1
|
||||
- if: ${{ inputs.no-cache || !steps.cache.outputs.cache-hit }}
|
||||
name: Build Dependencies
|
||||
env:
|
||||
CPU_TARGET: ${{ inputs.cpu }}
|
||||
CCACHE_DIR: ccache
|
||||
USE_LTO: 1
|
||||
run: |
|
||||
.\scripts\env.ps1 ${{ contains(inputs.tag, '-baseline') && '-Baseline' || '' }}
|
||||
$env:BUN_DEPS_OUT_DIR = (mkdir -Force "./bun-deps")
|
||||
.\scripts\all-dependencies.ps1
|
||||
- name: Save Cache
|
||||
if: ${{ inputs.no-cache || !steps.cache.outputs.cache-hit }}
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: bun-deps
|
||||
key: ${{ steps.cache.outputs.cache-primary-key }}
|
||||
- name: Upload bun-${{ inputs.tag }}-deps
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: bun-${{ inputs.tag }}-deps
|
||||
path: bun-deps
|
||||
if-no-files-found: error
|
||||
codegen:
|
||||
name: Codegen
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Setup Git
|
||||
run: |
|
||||
git config --global core.autocrlf false
|
||||
git config --global core.eol lf
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Setup Bun
|
||||
uses: ./.github/actions/setup-bun
|
||||
with:
|
||||
bun-version: ${{ inputs.bun-version }}
|
||||
- name: Codegen
|
||||
run: |
|
||||
./scripts/cross-compile-codegen.sh win32 x64
|
||||
- if: ${{ inputs.canary }}
|
||||
name: Calculate Revision
|
||||
run: |
|
||||
echo "canary_revision=$(GITHUB_TOKEN="${{ github.token }}"
|
||||
bash ./scripts/calculate-canary-revision.sh --raw)" > build-codegen-win32-x64/.canary_revision
|
||||
- name: Upload bun-${{ inputs.tag }}-codegen
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: bun-${{ inputs.tag }}-codegen
|
||||
path: build-codegen-win32-x64
|
||||
if-no-files-found: error
|
||||
build-cpp:
|
||||
name: Build C++
|
||||
needs: codegen
|
||||
runs-on: ${{ inputs.runs-on }}
|
||||
steps:
|
||||
- name: Install Scoop
|
||||
run: |
|
||||
Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
|
||||
Invoke-RestMethod -Uri https://get.scoop.sh | Invoke-Expression
|
||||
Join-Path (Resolve-Path ~).Path "scoop\shims" >> $Env:GITHUB_PATH
|
||||
- name: Setup Git
|
||||
run: |
|
||||
git config --global core.autocrlf false
|
||||
git config --global core.eol lf
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
- name: Install LLVM and Ninja
|
||||
run: |
|
||||
scoop install ninja
|
||||
scoop install llvm@${{ env.LLVM_VERSION }}
|
||||
- name: Setup Bun
|
||||
uses: ./.github/actions/setup-bun
|
||||
with:
|
||||
bun-version: ${{ inputs.bun-version }}
|
||||
- if: ${{ !inputs.no-cache }}
|
||||
name: Restore Cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ccache
|
||||
key: bun-${{ inputs.tag }}-cpp-${{ hashFiles('Dockerfile', 'Makefile', 'CMakeLists.txt', 'build.zig', 'scripts/**', 'src/**', 'packages/bun-usockets/src/**', 'packages/bun-uws/src/**') }}
|
||||
restore-keys: |
|
||||
bun-${{ inputs.tag }}-cpp-
|
||||
- name: Download bun-${{ inputs.tag }}-codegen
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bun-${{ inputs.tag }}-codegen
|
||||
path: build
|
||||
- name: Compile
|
||||
env:
|
||||
CPU_TARGET: ${{ inputs.cpu }}
|
||||
CCACHE_DIR: ccache
|
||||
USE_LTO: 1
|
||||
run: |
|
||||
# $CANARY_REVISION = if (Test-Path build/.canary_revision) { Get-Content build/.canary_revision } else { "0" }
|
||||
$CANARY_REVISION = 0
|
||||
.\scripts\env.ps1 ${{ contains(inputs.tag, '-baseline') && '-Baseline' || '' }}
|
||||
.\scripts\update-submodules.ps1
|
||||
.\scripts\build-libuv.ps1 -CloneOnly $True
|
||||
cd build
|
||||
cmake .. -G Ninja -DCMAKE_BUILD_TYPE=Release `
|
||||
-DNO_CODEGEN=1 `
|
||||
-DUSE_LTO=1 `
|
||||
-DNO_CONFIGURE_DEPENDS=1 `
|
||||
"-DCANARY=${CANARY_REVISION}" `
|
||||
-DBUN_CPP_ONLY=1 ${{ contains(inputs.tag, '-baseline') && '-DUSE_BASELINE_BUILD=1' || '' }}
|
||||
if ($LASTEXITCODE -ne 0) { throw "CMake configuration failed" }
|
||||
.\compile-cpp-only.ps1 -v
|
||||
if ($LASTEXITCODE -ne 0) { throw "C++ compilation failed" }
|
||||
- name: Upload bun-${{ inputs.tag }}-cpp
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: bun-${{ inputs.tag }}-cpp
|
||||
path: build/bun-cpp-objects.a
|
||||
if-no-files-found: error
|
||||
build-zig:
|
||||
name: Build Zig
|
||||
uses: ./.github/workflows/build-zig.yml
|
||||
with:
|
||||
os: windows
|
||||
zig-optimize: ReleaseSafe
|
||||
only-zig: true
|
||||
tag: ${{ inputs.tag }}
|
||||
arch: ${{ inputs.arch }}
|
||||
cpu: ${{ inputs.cpu }}
|
||||
assertions: ${{ inputs.assertions }}
|
||||
canary: ${{ inputs.canary }}
|
||||
no-cache: ${{ inputs.no-cache }}
|
||||
link:
|
||||
name: Link
|
||||
runs-on: ${{ inputs.runs-on }}
|
||||
needs:
|
||||
- build-submodules
|
||||
- build-cpp
|
||||
- build-zig
|
||||
- codegen
|
||||
steps:
|
||||
- name: Install Scoop
|
||||
run: |
|
||||
Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
|
||||
Invoke-RestMethod -Uri https://get.scoop.sh | Invoke-Expression
|
||||
Join-Path (Resolve-Path ~).Path "scoop\shims" >> $Env:GITHUB_PATH
|
||||
- name: Setup Git
|
||||
run: |
|
||||
git config --global core.autocrlf false
|
||||
git config --global core.eol lf
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
- name: Install Ninja
|
||||
run: |
|
||||
scoop install ninja
|
||||
scoop install llvm@${{ env.LLVM_VERSION }}
|
||||
- name: Setup Bun
|
||||
uses: ./.github/actions/setup-bun
|
||||
with:
|
||||
bun-version: ${{ inputs.bun-version }}
|
||||
- name: Download bun-${{ inputs.tag }}-deps
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bun-${{ inputs.tag }}-deps
|
||||
path: bun-deps
|
||||
- name: Download bun-${{ inputs.tag }}-cpp
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bun-${{ inputs.tag }}-cpp
|
||||
path: bun-cpp
|
||||
- name: Download bun-${{ inputs.tag }}-zig
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bun-${{ inputs.tag }}-zig
|
||||
path: bun-zig
|
||||
- name: Download bun-${{ inputs.tag }}-codegen
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bun-${{ inputs.tag }}-codegen
|
||||
path: build
|
||||
- if: ${{ !inputs.no-cache }}
|
||||
name: Restore Cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ccache
|
||||
key: bun-${{ inputs.tag }}-cpp-${{ hashFiles('Dockerfile', 'Makefile', 'CMakeLists.txt', 'build.zig', 'scripts/**', 'src/**', 'packages/bun-usockets/src/**', 'packages/bun-uws/src/**') }}
|
||||
restore-keys: |
|
||||
bun-${{ inputs.tag }}-cpp-
|
||||
- name: Link
|
||||
env:
|
||||
CPU_TARGET: ${{ inputs.cpu }}
|
||||
CCACHE_DIR: ccache
|
||||
run: |
|
||||
.\scripts\update-submodules.ps1
|
||||
.\scripts\env.ps1 ${{ contains(inputs.tag, '-baseline') && '-Baseline' || '' }}
|
||||
Set-Location build
|
||||
# $CANARY_REVISION = if (Test-Path build/.canary_revision) { Get-Content build/.canary_revision } else { "0" }
|
||||
$CANARY_REVISION = 0
|
||||
cmake .. -G Ninja -DCMAKE_BUILD_TYPE=Release `
|
||||
-DNO_CODEGEN=1 `
|
||||
-DNO_CONFIGURE_DEPENDS=1 `
|
||||
"-DCANARY=${CANARY_REVISION}" `
|
||||
-DBUN_LINK_ONLY=1 `
|
||||
-DUSE_LTO=1 `
|
||||
"-DBUN_DEPS_OUT_DIR=$(Resolve-Path ../bun-deps)" `
|
||||
"-DBUN_CPP_ARCHIVE=$(Resolve-Path ../bun-cpp/bun-cpp-objects.a)" `
|
||||
"-DBUN_ZIG_OBJ_DIR=$(Resolve-Path ../bun-zig)" `
|
||||
${{ contains(inputs.tag, '-baseline') && '-DUSE_BASELINE_BUILD=1' || '' }}
|
||||
if ($LASTEXITCODE -ne 0) { throw "CMake configuration failed" }
|
||||
ninja -v
|
||||
if ($LASTEXITCODE -ne 0) { throw "Link failed!" }
|
||||
- name: Prepare
|
||||
run: |
|
||||
$Dist = mkdir -Force "bun-${{ inputs.tag }}"
|
||||
cp -r build\bun.exe "$Dist\bun.exe"
|
||||
Compress-Archive -Force "$Dist" "${Dist}.zip"
|
||||
$Dist = "$Dist-profile"
|
||||
MkDir -Force "$Dist"
|
||||
cp -r build\bun.exe "$Dist\bun.exe"
|
||||
cp -r build\bun.pdb "$Dist\bun.pdb"
|
||||
Compress-Archive -Force "$Dist" "$Dist.zip"
|
||||
.\build\bun.exe --print "JSON.stringify(require('bun:internal-for-testing').crash_handler.getFeatureData())" > .\features.json
|
||||
- name: Upload bun-${{ inputs.tag }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: bun-${{ inputs.tag }}
|
||||
path: bun-${{ inputs.tag }}.zip
|
||||
if-no-files-found: error
|
||||
- name: Upload bun-${{ inputs.tag }}-profile
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: bun-${{ inputs.tag }}-profile
|
||||
path: bun-${{ inputs.tag }}-profile.zip
|
||||
if-no-files-found: error
|
||||
- name: Upload bun-feature-data
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: bun-feature-data
|
||||
path: features.json
|
||||
if-no-files-found: error
|
||||
overwrite: true
|
||||
on-failure:
|
||||
if: ${{ github.repository_owner == 'oven-sh' && failure() }}
|
||||
name: On Failure
|
||||
needs: link
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Send Message
|
||||
uses: sarisia/actions-status-discord@v1
|
||||
with:
|
||||
webhook: ${{ secrets.DISCORD_WEBHOOK }}
|
||||
nodetail: true
|
||||
color: "#FF0000"
|
||||
title: ""
|
||||
description: |
|
||||
### ❌ [${{ github.event.pull_request.title }}](${{ github.event.pull_request.html_url }})
|
||||
|
||||
@${{ github.actor }}, the build for bun-${{ inputs.tag }} failed.
|
||||
|
||||
**[View logs](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})**
|
||||
122
.github/workflows/build-zig.yml
vendored
Normal file
122
.github/workflows/build-zig.yml
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
name: Build Zig
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
actions: write
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
runs-on:
|
||||
type: string
|
||||
default: ${{ github.repository_owner != 'oven-sh' && 'ubuntu-latest' || inputs.only-zig && 'namespace-profile-bun-ci-linux-x64' || inputs.arch == 'x64' && 'namespace-profile-bun-ci-linux-x64' || 'namespace-profile-bun-ci-linux-aarch64' }}
|
||||
tag:
|
||||
type: string
|
||||
required: true
|
||||
os:
|
||||
type: string
|
||||
required: true
|
||||
arch:
|
||||
type: string
|
||||
required: true
|
||||
cpu:
|
||||
type: string
|
||||
required: true
|
||||
assertions:
|
||||
type: boolean
|
||||
default: false
|
||||
zig-optimize:
|
||||
type: string # 'ReleaseSafe' or 'ReleaseFast'
|
||||
default: ReleaseFast
|
||||
canary:
|
||||
type: boolean
|
||||
default: ${{ github.ref == 'refs/heads/main' }}
|
||||
only-zig:
|
||||
type: boolean
|
||||
default: true
|
||||
no-cache:
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
jobs:
|
||||
build-zig:
|
||||
name: ${{ inputs.only-zig && 'Build Zig' || 'Build & Link' }}
|
||||
runs-on: ${{ inputs.runs-on }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Calculate Cache Key
|
||||
id: cache
|
||||
run: |
|
||||
echo "key=${{ hashFiles('Dockerfile', 'Makefile', 'CMakeLists.txt', 'build.zig', 'scripts/**', 'src/**', 'packages/bun-usockets/src/**', 'packages/bun-uws/src/**') }}" >> $GITHUB_OUTPUT
|
||||
- if: ${{ !inputs.no-cache }}
|
||||
name: Restore Cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
key: bun-${{ inputs.tag }}-docker-${{ steps.cache.outputs.key }}
|
||||
restore-keys: |
|
||||
bun-${{ inputs.tag }}-docker-
|
||||
path: |
|
||||
${{ runner.temp }}/dockercache
|
||||
- name: Setup Docker
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
install: true
|
||||
platforms: |
|
||||
linux/${{ runner.arch == 'X64' && 'amd64' || 'arm64' }}
|
||||
- name: Build
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
push: false
|
||||
target: ${{ inputs.only-zig && 'build_release_obj' || 'artifact' }}
|
||||
cache-from: |
|
||||
type=local,src=${{ runner.temp }}/dockercache
|
||||
cache-to: |
|
||||
type=local,dest=${{ runner.temp }}/dockercache,mode=max
|
||||
outputs: |
|
||||
type=local,dest=${{ runner.temp }}/release
|
||||
platforms: |
|
||||
linux/${{ runner.arch == 'X64' && 'amd64' || 'arm64' }}
|
||||
build-args: |
|
||||
GIT_SHA=${{ github.event.workflow_run.head_sha || github.sha }}
|
||||
TRIPLET=${{ inputs.os == 'darwin' && format('{0}-macos-none', inputs.arch == 'x64' && 'x86_64' || 'aarch64') || inputs.os == 'windows' && format('{0}-windows-msvc', inputs.arch == 'x64' && 'x86_64' || 'aarch64') || format('{0}-linux-gnu', inputs.arch == 'x64' && 'x86_64' || 'aarch64') }}
|
||||
ARCH=${{ inputs.arch == 'x64' && 'x86_64' || 'aarch64' }}
|
||||
BUILDARCH=${{ inputs.arch == 'x64' && 'amd64' || 'arm64' }}
|
||||
BUILD_MACHINE_ARCH=${{ inputs.arch == 'x64' && 'x86_64' || 'aarch64' }}
|
||||
CPU_TARGET=${{ inputs.arch == 'x64' && inputs.cpu || 'native' }}
|
||||
ASSERTIONS=${{ inputs.assertions && 'ON' || 'OFF' }}
|
||||
ZIG_OPTIMIZE=${{ inputs.zig-optimize }}
|
||||
CANARY=${{ inputs.canary && '1' || '0' }}
|
||||
- if: ${{ inputs.only-zig }}
|
||||
name: Upload bun-${{ inputs.tag }}-zig
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: bun-${{ inputs.tag }}-zig
|
||||
path: ${{ runner.temp }}/release/bun-zig.o
|
||||
if-no-files-found: error
|
||||
- if: ${{ !inputs.only-zig }}
|
||||
name: Prepare
|
||||
run: |
|
||||
cd ${{ runner.temp }}/release
|
||||
chmod +x bun-profile bun
|
||||
mkdir bun-${{ inputs.tag }}-profile
|
||||
mkdir bun-${{ inputs.tag }}
|
||||
strip bun
|
||||
mv bun-profile bun-${{ inputs.tag }}-profile/bun-profile
|
||||
mv bun bun-${{ inputs.tag }}/bun
|
||||
zip -r bun-${{ inputs.tag }}-profile.zip bun-${{ inputs.tag }}-profile
|
||||
zip -r bun-${{ inputs.tag }}.zip bun-${{ inputs.tag }}
|
||||
- if: ${{ !inputs.only-zig }}
|
||||
name: Upload bun-${{ inputs.tag }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: bun-${{ inputs.tag }}
|
||||
path: ${{ runner.temp }}/release/bun-${{ inputs.tag }}.zip
|
||||
if-no-files-found: error
|
||||
- if: ${{ !inputs.only-zig }}
|
||||
name: Upload bun-${{ inputs.tag }}-profile
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: bun-${{ inputs.tag }}-profile
|
||||
path: ${{ runner.temp }}/release/bun-${{ inputs.tag }}-profile.zip
|
||||
if-no-files-found: error
|
||||
245
.github/workflows/ci.yml
vendored
Normal file
245
.github/workflows/ci.yml
vendored
Normal file
@@ -0,0 +1,245 @@
|
||||
name: CI
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
actions: write
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name == 'workflow_dispatch' && inputs.run-id || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
run-id:
|
||||
type: string
|
||||
description: The workflow ID to download artifacts (skips the build step)
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- .vscode/**/*
|
||||
- docs/**/*
|
||||
- examples/**/*
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths-ignore:
|
||||
- .vscode/**/*
|
||||
- docs/**/*
|
||||
- examples/**/*
|
||||
|
||||
jobs:
|
||||
format:
|
||||
if: ${{ !inputs.run-id }}
|
||||
name: Format
|
||||
uses: ./.github/workflows/run-format.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
zig-version: 0.13.0
|
||||
permissions:
|
||||
contents: write
|
||||
lint:
|
||||
if: ${{ !inputs.run-id }}
|
||||
name: Lint
|
||||
uses: ./.github/workflows/run-lint.yml
|
||||
secrets: inherit
|
||||
linux-x64:
|
||||
if: ${{ !inputs.run-id }}
|
||||
name: Build linux-x64
|
||||
uses: ./.github/workflows/build-linux.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
runs-on: ${{ github.repository_owner == 'oven-sh' && 'namespace-profile-bun-ci-linux-x64' || 'ubuntu-latest' }}
|
||||
tag: linux-x64
|
||||
arch: x64
|
||||
cpu: haswell
|
||||
canary: true
|
||||
no-cache: true
|
||||
linux-x64-baseline:
|
||||
if: ${{ !inputs.run-id }}
|
||||
name: Build linux-x64-baseline
|
||||
uses: ./.github/workflows/build-linux.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
runs-on: ${{ github.repository_owner == 'oven-sh' && 'namespace-profile-bun-ci-linux-x64' || 'ubuntu-latest' }}
|
||||
tag: linux-x64-baseline
|
||||
arch: x64
|
||||
cpu: nehalem
|
||||
canary: true
|
||||
no-cache: true
|
||||
linux-aarch64:
|
||||
if: ${{ !inputs.run-id && github.repository_owner == 'oven-sh' }}
|
||||
name: Build linux-aarch64
|
||||
uses: ./.github/workflows/build-linux.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
runs-on: namespace-profile-bun-ci-linux-aarch64
|
||||
tag: linux-aarch64
|
||||
arch: aarch64
|
||||
cpu: native
|
||||
canary: true
|
||||
no-cache: true
|
||||
darwin-x64:
|
||||
if: ${{ !inputs.run-id }}
|
||||
name: Build darwin-x64
|
||||
uses: ./.github/workflows/build-darwin.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
runs-on: ${{ github.repository_owner == 'oven-sh' && 'macos-13-large' || 'macos-13' }}
|
||||
tag: darwin-x64
|
||||
arch: x64
|
||||
cpu: haswell
|
||||
canary: true
|
||||
darwin-x64-baseline:
|
||||
if: ${{ !inputs.run-id }}
|
||||
name: Build darwin-x64-baseline
|
||||
uses: ./.github/workflows/build-darwin.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
runs-on: ${{ github.repository_owner == 'oven-sh' && 'macos-13-large' || 'macos-13' }}
|
||||
tag: darwin-x64-baseline
|
||||
arch: x64
|
||||
cpu: nehalem
|
||||
canary: true
|
||||
darwin-aarch64:
|
||||
if: ${{ !inputs.run-id }}
|
||||
name: Build darwin-aarch64
|
||||
uses: ./.github/workflows/build-darwin.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
runs-on: ${{ github.repository_owner == 'oven-sh' && 'namespace-profile-bun-ci-darwin-aarch64' || 'macos-13' }}
|
||||
tag: darwin-aarch64
|
||||
arch: aarch64
|
||||
cpu: native
|
||||
canary: true
|
||||
windows-x64:
|
||||
if: ${{ !inputs.run-id }}
|
||||
name: Build windows-x64
|
||||
uses: ./.github/workflows/build-windows.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
runs-on: windows
|
||||
tag: windows-x64
|
||||
arch: x64
|
||||
cpu: haswell
|
||||
canary: true
|
||||
windows-x64-baseline:
|
||||
if: ${{ !inputs.run-id }}
|
||||
name: Build windows-x64-baseline
|
||||
uses: ./.github/workflows/build-windows.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
runs-on: windows
|
||||
tag: windows-x64-baseline
|
||||
arch: x64
|
||||
cpu: nehalem
|
||||
canary: true
|
||||
linux-x64-test:
|
||||
if: ${{ inputs.run-id || github.event_name == 'pull_request' }}
|
||||
name: Test linux-x64
|
||||
needs: linux-x64
|
||||
uses: ./.github/workflows/run-test.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
run-id: ${{ inputs.run-id }}
|
||||
pr-number: ${{ github.event.number }}
|
||||
runs-on: ${{ github.repository_owner == 'oven-sh' && 'namespace-profile-bun-ci-linux-x64' || 'ubuntu-latest' }}
|
||||
tag: linux-x64
|
||||
linux-x64-baseline-test:
|
||||
if: ${{ inputs.run-id || github.event_name == 'pull_request' }}
|
||||
name: Test linux-x64-baseline
|
||||
needs: linux-x64-baseline
|
||||
uses: ./.github/workflows/run-test.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
run-id: ${{ inputs.run-id }}
|
||||
pr-number: ${{ github.event.number }}
|
||||
runs-on: ${{ github.repository_owner == 'oven-sh' && 'namespace-profile-bun-ci-linux-x64' || 'ubuntu-latest' }}
|
||||
tag: linux-x64-baseline
|
||||
linux-aarch64-test:
|
||||
if: ${{ inputs.run-id || github.event_name == 'pull_request' && github.repository_owner == 'oven-sh'}}
|
||||
name: Test linux-aarch64
|
||||
needs: linux-aarch64
|
||||
uses: ./.github/workflows/run-test.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
run-id: ${{ inputs.run-id }}
|
||||
pr-number: ${{ github.event.number }}
|
||||
runs-on: namespace-profile-bun-ci-linux-aarch64
|
||||
tag: linux-aarch64
|
||||
darwin-x64-test:
|
||||
if: ${{ inputs.run-id || github.event_name == 'pull_request' }}
|
||||
name: Test darwin-x64
|
||||
needs: darwin-x64
|
||||
uses: ./.github/workflows/run-test.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
run-id: ${{ inputs.run-id }}
|
||||
pr-number: ${{ github.event.number }}
|
||||
runs-on: ${{ github.repository_owner == 'oven-sh' && 'macos-13-large' || 'macos-13' }}
|
||||
tag: darwin-x64
|
||||
darwin-x64-baseline-test:
|
||||
if: ${{ inputs.run-id || github.event_name == 'pull_request' }}
|
||||
name: Test darwin-x64-baseline
|
||||
needs: darwin-x64-baseline
|
||||
uses: ./.github/workflows/run-test.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
run-id: ${{ inputs.run-id }}
|
||||
pr-number: ${{ github.event.number }}
|
||||
runs-on: ${{ github.repository_owner == 'oven-sh' && 'macos-13-large' || 'macos-13' }}
|
||||
tag: darwin-x64-baseline
|
||||
darwin-aarch64-test:
|
||||
if: ${{ inputs.run-id || github.event_name == 'pull_request' }}
|
||||
name: Test darwin-aarch64
|
||||
needs: darwin-aarch64
|
||||
uses: ./.github/workflows/run-test.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
run-id: ${{ inputs.run-id }}
|
||||
pr-number: ${{ github.event.number }}
|
||||
runs-on: ${{ github.repository_owner == 'oven-sh' && 'namespace-profile-bun-ci-darwin-aarch64' || 'macos-13' }}
|
||||
tag: darwin-aarch64
|
||||
windows-x64-test:
|
||||
if: ${{ inputs.run-id || github.event_name == 'pull_request' }}
|
||||
name: Test windows-x64
|
||||
needs: windows-x64
|
||||
uses: ./.github/workflows/run-test.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
run-id: ${{ inputs.run-id }}
|
||||
pr-number: ${{ github.event.number }}
|
||||
runs-on: windows
|
||||
tag: windows-x64
|
||||
windows-x64-baseline-test:
|
||||
if: ${{ inputs.run-id || github.event_name == 'pull_request' }}
|
||||
name: Test windows-x64-baseline
|
||||
needs: windows-x64-baseline
|
||||
uses: ./.github/workflows/run-test.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
run-id: ${{ inputs.run-id }}
|
||||
pr-number: ${{ github.event.number }}
|
||||
runs-on: windows
|
||||
tag: windows-x64-baseline
|
||||
cleanup:
|
||||
if: ${{ always() }}
|
||||
name: Cleanup
|
||||
needs:
|
||||
- linux-x64
|
||||
- linux-x64-baseline
|
||||
- linux-aarch64
|
||||
- darwin-x64
|
||||
- darwin-x64-baseline
|
||||
- darwin-aarch64
|
||||
- windows-x64
|
||||
- windows-x64-baseline
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Cleanup Artifacts
|
||||
uses: geekyeggo/delete-artifact@v5
|
||||
with:
|
||||
name: |
|
||||
bun-*-cpp
|
||||
bun-*-zig
|
||||
bun-*-deps
|
||||
bun-*-codegen
|
||||
55
.github/workflows/comment.yml
vendored
Normal file
55
.github/workflows/comment.yml
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
name: Comment
|
||||
|
||||
permissions:
|
||||
actions: read
|
||||
pull-requests: write
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows:
|
||||
- CI
|
||||
types:
|
||||
- completed
|
||||
|
||||
jobs:
|
||||
comment:
|
||||
if: ${{ github.repository_owner == 'oven-sh' }}
|
||||
name: Comment
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download Tests
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: bun
|
||||
pattern: bun-*-tests
|
||||
github-token: ${{ github.token }}
|
||||
run-id: ${{ github.event.workflow_run.id }}
|
||||
- name: Setup Environment
|
||||
id: env
|
||||
shell: bash
|
||||
run: |
|
||||
echo "pr-number=$(<bun/bun-linux-x64-tests/pr-number.txt)" >> $GITHUB_OUTPUT
|
||||
- name: Generate Comment
|
||||
run: |
|
||||
cat bun/bun-*-tests/comment.md > comment.md
|
||||
if [ -s comment.md ]; then
|
||||
echo -e "❌ @${{ github.actor }}, your commit has failing tests :(\n\n$(cat comment.md)" > comment.md
|
||||
else
|
||||
echo -e "✅ @${{ github.actor }}, all tests passed!" > comment.md
|
||||
fi
|
||||
echo -e "\n**[View logs](https://github.com/${{ github.repository }}/actions/runs/${{ github.event.workflow_run.id }})**" >> comment.md
|
||||
echo -e "<!-- generated-comment workflow=${{ github.workflow }} -->" >> comment.md
|
||||
- name: Find Comment
|
||||
id: comment
|
||||
uses: peter-evans/find-comment@v3
|
||||
with:
|
||||
issue-number: ${{ steps.env.outputs.pr-number }}
|
||||
comment-author: github-actions[bot]
|
||||
body-includes: <!-- generated-comment workflow=${{ github.workflow }} -->
|
||||
- name: Write Comment
|
||||
uses: peter-evans/create-or-update-comment@v4
|
||||
with:
|
||||
comment-id: ${{ steps.comment.outputs.comment-id }}
|
||||
issue-number: ${{ steps.env.outputs.pr-number }}
|
||||
body-path: comment.md
|
||||
edit-mode: replace
|
||||
183
.github/workflows/create-release-build.yml
vendored
Normal file
183
.github/workflows/create-release-build.yml
vendored
Normal file
@@ -0,0 +1,183 @@
|
||||
name: Create Release Build
|
||||
run-name: Compile Bun v${{ inputs.version }} by ${{ github.actor }}
|
||||
|
||||
concurrency:
|
||||
group: release
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
actions: write
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
type: string
|
||||
required: true
|
||||
description: "Release version. Example: 1.1.4. Exclude the 'v' prefix."
|
||||
tag:
|
||||
type: string
|
||||
required: true
|
||||
description: "GitHub tag to use"
|
||||
clobber:
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
description: "Overwrite existing release artifacts?"
|
||||
release:
|
||||
types:
|
||||
- created
|
||||
|
||||
jobs:
|
||||
notify-start:
|
||||
if: ${{ github.repository_owner == 'oven-sh' }}
|
||||
name: Notify Start
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Send Message
|
||||
uses: sarisia/actions-status-discord@v1
|
||||
with:
|
||||
webhook: ${{ secrets.DISCORD_WEBHOOK_PUBLIC }}
|
||||
nodetail: true
|
||||
color: "#1F6FEB"
|
||||
title: "Bun v${{ inputs.version }} is compiling"
|
||||
description: |
|
||||
### @${{ github.actor }} started compiling Bun v${{inputs.version}}
|
||||
- name: Send Message
|
||||
uses: sarisia/actions-status-discord@v1
|
||||
with:
|
||||
webhook: ${{ secrets.BUN_DISCORD_GITHUB_CHANNEL_WEBHOOK }}
|
||||
nodetail: true
|
||||
color: "#1F6FEB"
|
||||
title: "Bun v${{ inputs.version }} is compiling"
|
||||
description: |
|
||||
### @${{ github.actor }} started compiling Bun v${{inputs.version}}
|
||||
|
||||
**[View logs](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})**
|
||||
linux-x64:
|
||||
name: Build linux-x64
|
||||
uses: ./.github/workflows/build-linux.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
runs-on: ${{ github.repository_owner == 'oven-sh' && 'namespace-profile-bun-ci-linux-x64' || 'ubuntu-latest' }}
|
||||
tag: linux-x64
|
||||
arch: x64
|
||||
cpu: haswell
|
||||
canary: false
|
||||
linux-x64-baseline:
|
||||
name: Build linux-x64-baseline
|
||||
uses: ./.github/workflows/build-linux.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
runs-on: ${{ github.repository_owner == 'oven-sh' && 'namespace-profile-bun-ci-linux-x64' || 'ubuntu-latest' }}
|
||||
tag: linux-x64-baseline
|
||||
arch: x64
|
||||
cpu: nehalem
|
||||
canary: false
|
||||
linux-aarch64:
|
||||
name: Build linux-aarch64
|
||||
uses: ./.github/workflows/build-linux.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
runs-on: namespace-profile-bun-ci-linux-aarch64
|
||||
tag: linux-aarch64
|
||||
arch: aarch64
|
||||
cpu: native
|
||||
canary: false
|
||||
darwin-x64:
|
||||
name: Build darwin-x64
|
||||
uses: ./.github/workflows/build-darwin.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
runs-on: ${{ github.repository_owner == 'oven-sh' && 'macos-13-large' || 'macos-13' }}
|
||||
tag: darwin-x64
|
||||
arch: x64
|
||||
cpu: haswell
|
||||
canary: false
|
||||
darwin-x64-baseline:
|
||||
name: Build darwin-x64-baseline
|
||||
uses: ./.github/workflows/build-darwin.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
runs-on: ${{ github.repository_owner == 'oven-sh' && 'macos-13-large' || 'macos-13' }}
|
||||
tag: darwin-x64-baseline
|
||||
arch: x64
|
||||
cpu: nehalem
|
||||
canary: false
|
||||
darwin-aarch64:
|
||||
name: Build darwin-aarch64
|
||||
uses: ./.github/workflows/build-darwin.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
runs-on: ${{ github.repository_owner == 'oven-sh' && 'namespace-profile-bun-ci-darwin-aarch64' || 'macos-13' }}
|
||||
tag: darwin-aarch64
|
||||
arch: aarch64
|
||||
cpu: native
|
||||
canary: false
|
||||
windows-x64:
|
||||
name: Build windows-x64
|
||||
uses: ./.github/workflows/build-windows.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
runs-on: windows
|
||||
tag: windows-x64
|
||||
arch: x64
|
||||
cpu: haswell
|
||||
canary: false
|
||||
windows-x64-baseline:
|
||||
name: Build windows-x64-baseline
|
||||
uses: ./.github/workflows/build-windows.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
runs-on: windows
|
||||
tag: windows-x64-baseline
|
||||
arch: x64
|
||||
cpu: nehalem
|
||||
canary: false
|
||||
|
||||
upload-artifacts:
|
||||
needs:
|
||||
- linux-x64
|
||||
- linux-x64-baseline
|
||||
- linux-aarch64
|
||||
- darwin-x64
|
||||
- darwin-x64-baseline
|
||||
- darwin-aarch64
|
||||
- windows-x64
|
||||
- windows-x64-baseline
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download Artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: bun-releases
|
||||
pattern: bun-*
|
||||
merge-multiple: true
|
||||
github-token: ${{ github.token }}
|
||||
- name: Check for Artifacts
|
||||
run: |
|
||||
if [ ! -d "bun-releases" ] || [ -z "$(ls -A bun-releases)" ]; then
|
||||
echo "Error: No artifacts were downloaded or 'bun-releases' directory does not exist."
|
||||
exit 1 # Fail the job if the condition is met
|
||||
else
|
||||
echo "Artifacts downloaded successfully."
|
||||
fi
|
||||
- name: Send Message
|
||||
uses: sarisia/actions-status-discord@v1
|
||||
with:
|
||||
webhook: ${{ secrets.DISCORD_WEBHOOK }}
|
||||
nodetail: true
|
||||
color: "#FF0000"
|
||||
title: "Bun v${{ inputs.version }} release artifacts uploaded"
|
||||
- name: "Upload Artifacts"
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
# Unzip one level deep each artifact
|
||||
cd bun-releases
|
||||
for f in *.zip; do
|
||||
unzip -o $f
|
||||
done
|
||||
cd ..
|
||||
gh release upload --repo=${{ github.repository }} ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.tag || github.event.release.id }} ${{ inputs.clobber && '--clobber' || '' }} bun-releases/*.zip
|
||||
3
.github/workflows/release.yml
vendored
3
.github/workflows/release.yml
vendored
@@ -1,6 +1,3 @@
|
||||
# TODO: Move this to bash scripts intead of Github Actions
|
||||
# so it can be run from Buildkite, see: .buildkite/scripts/release.sh
|
||||
|
||||
name: Release
|
||||
concurrency: release
|
||||
|
||||
|
||||
2
.github/workflows/run-format.yml
vendored
2
.github/workflows/run-format.yml
vendored
@@ -31,7 +31,7 @@ jobs:
|
||||
with:
|
||||
bun-version: "1.1.20"
|
||||
- name: Setup Zig
|
||||
uses: mlugg/setup-zig@v1
|
||||
uses: goto-bus-stop/setup-zig@c7b6cdd3adba8f8b96984640ff172c37c93f73ee
|
||||
with:
|
||||
version: ${{ inputs.zig-version }}
|
||||
- name: Install Dependencies
|
||||
|
||||
2
.github/workflows/run-lint-cpp.yml
vendored
2
.github/workflows/run-lint-cpp.yml
vendored
@@ -3,7 +3,7 @@ name: lint-cpp
|
||||
permissions:
|
||||
contents: read
|
||||
env:
|
||||
LLVM_VERSION: 18
|
||||
LLVM_VERSION: 16
|
||||
LC_CTYPE: "en_US.UTF-8"
|
||||
LC_ALL: "en_US.UTF-8"
|
||||
|
||||
|
||||
224
.github/workflows/run-test.yml
vendored
Normal file
224
.github/workflows/run-test.yml
vendored
Normal file
@@ -0,0 +1,224 @@
|
||||
name: Test
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
actions: read
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
runs-on:
|
||||
type: string
|
||||
required: true
|
||||
tag:
|
||||
type: string
|
||||
required: true
|
||||
pr-number:
|
||||
type: string
|
||||
required: true
|
||||
run-id:
|
||||
type: string
|
||||
default: ${{ github.run_id }}
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: Tests
|
||||
runs-on: ${{ inputs.runs-on }}
|
||||
steps:
|
||||
- if: ${{ runner.os == 'Windows' }}
|
||||
name: Setup Git
|
||||
run: |
|
||||
git config --global core.autocrlf false
|
||||
git config --global core.eol lf
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
sparse-checkout: |
|
||||
package.json
|
||||
bun.lockb
|
||||
test
|
||||
packages/bun-internal-test
|
||||
packages/bun-types
|
||||
- name: Setup Environment
|
||||
shell: bash
|
||||
run: |
|
||||
echo "${{ inputs.pr-number }}" > pr-number.txt
|
||||
- name: Download Bun
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bun-${{ inputs.tag }}
|
||||
path: bun
|
||||
github-token: ${{ github.token }}
|
||||
run-id: ${{ inputs.run-id || github.run_id }}
|
||||
- name: Download pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 8
|
||||
- if: ${{ runner.os != 'Windows' }}
|
||||
name: Setup Bun
|
||||
shell: bash
|
||||
run: |
|
||||
unzip bun/bun-*.zip
|
||||
cd bun-*
|
||||
pwd >> $GITHUB_PATH
|
||||
- if: ${{ runner.os == 'Windows' }}
|
||||
name: Setup Cygwin
|
||||
uses: secondlife/setup-cygwin@v3
|
||||
with:
|
||||
packages: bash
|
||||
- if: ${{ runner.os == 'Windows' }}
|
||||
name: Setup Bun (Windows)
|
||||
run: |
|
||||
unzip bun/bun-*.zip
|
||||
cd bun-*
|
||||
pwd >> $env:GITHUB_PATH
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
- name: Install Dependencies
|
||||
timeout-minutes: 5
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
bun install
|
||||
- name: Install Dependencies (test)
|
||||
timeout-minutes: 5
|
||||
run: |
|
||||
bun install --cwd test
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Install Dependencies (runner)
|
||||
timeout-minutes: 5
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
bun install --cwd packages/bun-internal-test
|
||||
- name: Run Tests
|
||||
id: test
|
||||
timeout-minutes: 90
|
||||
shell: bash
|
||||
env:
|
||||
IS_BUN_CI: 1
|
||||
TMPDIR: ${{ runner.temp }}
|
||||
BUN_TAG: ${{ inputs.tag }}
|
||||
BUN_FEATURE_FLAG_INTERNAL_FOR_TESTING: "true"
|
||||
SMTP_SENDGRID_SENDER: ${{ secrets.SMTP_SENDGRID_SENDER }}
|
||||
TLS_MONGODB_DATABASE_URL: ${{ secrets.TLS_MONGODB_DATABASE_URL }}
|
||||
TLS_POSTGRES_DATABASE_URL: ${{ secrets.TLS_POSTGRES_DATABASE_URL }}
|
||||
TEST_INFO_STRIPE: ${{ secrets.TEST_INFO_STRIPE }}
|
||||
TEST_INFO_AZURE_SERVICE_BUS: ${{ secrets.TEST_INFO_AZURE_SERVICE_BUS }}
|
||||
SHELLOPTS: igncr
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
node packages/bun-internal-test/src/runner.node.mjs $(which bun)
|
||||
- if: ${{ always() }}
|
||||
name: Upload Results
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: bun-${{ inputs.tag }}-tests
|
||||
path: |
|
||||
test-report.*
|
||||
comment.md
|
||||
pr-number.txt
|
||||
if-no-files-found: error
|
||||
overwrite: true
|
||||
- if: ${{ always() && steps.test.outputs.failing_tests != '' && github.event.pull_request && github.repository_owner == 'oven-sh' }}
|
||||
name: Send Message
|
||||
uses: sarisia/actions-status-discord@v1
|
||||
with:
|
||||
webhook: ${{ secrets.DISCORD_WEBHOOK }}
|
||||
nodetail: true
|
||||
color: "#FF0000"
|
||||
title: ""
|
||||
description: |
|
||||
### ❌ [${{ github.event.pull_request.title }}](${{ github.event.pull_request.html_url }})
|
||||
|
||||
@${{ github.actor }}, there are ${{ steps.test.outputs.failing_tests_count || 'some' }} failing tests on bun-${{ inputs.tag }}.
|
||||
|
||||
${{ steps.test.outputs.failing_tests }}
|
||||
|
||||
**[View logs](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})**
|
||||
- name: Fail
|
||||
if: ${{ failure() || always() && steps.test.outputs.failing_tests != '' }}
|
||||
run: |
|
||||
echo "There are ${{ steps.test.outputs.failing_tests_count || 'some' }} failing tests on bun-${{ inputs.tag }}."
|
||||
exit 1
|
||||
test-node:
|
||||
name: Node.js Tests
|
||||
# TODO: enable when we start paying attention to the results. In the meantime, this causes CI to queue jobs wasting developer time.
|
||||
if: 0
|
||||
runs-on: ${{ inputs.runs-on }}
|
||||
steps:
|
||||
- if: ${{ runner.os == 'Windows' }}
|
||||
name: Setup Git
|
||||
run: |
|
||||
git config --global core.autocrlf false
|
||||
git config --global core.eol lf
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
sparse-checkout: |
|
||||
test/node.js
|
||||
- name: Setup Environment
|
||||
shell: bash
|
||||
run: |
|
||||
echo "${{ inputs.pr-number }}" > pr-number.txt
|
||||
- name: Download Bun
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bun-${{ inputs.tag }}
|
||||
path: bun
|
||||
github-token: ${{ github.token }}
|
||||
run-id: ${{ inputs.run-id || github.run_id }}
|
||||
- if: ${{ runner.os != 'Windows' }}
|
||||
name: Setup Bun
|
||||
shell: bash
|
||||
run: |
|
||||
unzip bun/bun-*.zip
|
||||
cd bun-*
|
||||
pwd >> $GITHUB_PATH
|
||||
- if: ${{ runner.os == 'Windows' }}
|
||||
name: Setup Cygwin
|
||||
uses: secondlife/setup-cygwin@v3
|
||||
with:
|
||||
packages: bash
|
||||
- if: ${{ runner.os == 'Windows' }}
|
||||
name: Setup Bun (Windows)
|
||||
run: |
|
||||
unzip bun/bun-*.zip
|
||||
cd bun-*
|
||||
pwd >> $env:GITHUB_PATH
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
- name: Checkout Tests
|
||||
shell: bash
|
||||
working-directory: test/node.js
|
||||
run: |
|
||||
node runner.mjs --pull
|
||||
- name: Install Dependencies
|
||||
timeout-minutes: 5
|
||||
shell: bash
|
||||
working-directory: test/node.js
|
||||
run: |
|
||||
bun install
|
||||
- name: Run Tests
|
||||
timeout-minutes: 10 # Increase when more tests are added
|
||||
shell: bash
|
||||
working-directory: test/node.js
|
||||
env:
|
||||
TMPDIR: ${{ runner.temp }}
|
||||
BUN_GARBAGE_COLLECTOR_LEVEL: "0"
|
||||
BUN_FEATURE_FLAG_INTERNAL_FOR_TESTING: "true"
|
||||
run: |
|
||||
node runner.mjs
|
||||
- name: Upload Results
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: bun-${{ inputs.tag }}-node-tests
|
||||
path: |
|
||||
test/node.js/summary/*.json
|
||||
if-no-files-found: error
|
||||
overwrite: true
|
||||
94
.github/workflows/upload.yml
vendored
Normal file
94
.github/workflows/upload.yml
vendored
Normal file
@@ -0,0 +1,94 @@
|
||||
name: Upload Artifacts
|
||||
run-name: Canary release ${{github.sha}} upload
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows:
|
||||
- CI
|
||||
types:
|
||||
- completed
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
upload:
|
||||
if: ${{ github.repository_owner == 'oven-sh' }}
|
||||
name: Upload Artifacts
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download Artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: bun
|
||||
pattern: bun-*
|
||||
merge-multiple: true
|
||||
github-token: ${{ github.token }}
|
||||
run-id: ${{ github.event.workflow_run.id }}
|
||||
- name: Check for Artifacts
|
||||
run: |
|
||||
if [ ! -d "bun" ] || [ -z "$(ls -A bun)" ]; then
|
||||
echo "Error: No artifacts were downloaded or 'bun' directory does not exist."
|
||||
exit 1 # Fail the job if the condition is met
|
||||
else
|
||||
echo "Artifacts downloaded successfully."
|
||||
fi
|
||||
- name: Upload to GitHub Releases
|
||||
uses: ncipollo/release-action@v1
|
||||
with:
|
||||
tag: canary
|
||||
name: Canary (${{ github.sha }})
|
||||
prerelease: true
|
||||
body: This canary release of Bun corresponds to the commit [${{ github.sha }}]
|
||||
allowUpdates: true
|
||||
replacesArtifacts: true
|
||||
generateReleaseNotes: true
|
||||
artifactErrorsFailBuild: true
|
||||
artifacts: bun/**/bun-*.zip
|
||||
token: ${{ github.token }}
|
||||
- name: Upload to S3 (using SHA)
|
||||
uses: shallwefootball/s3-upload-action@4350529f410221787ccf424e50133cbc1b52704e
|
||||
with:
|
||||
endpoint: ${{ secrets.AWS_ENDPOINT }}
|
||||
aws_key_id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws_secret_access_key: ${{ secrets.AWS_SECRET_ACCESS_KEY}}
|
||||
aws_bucket: ${{ secrets.AWS_BUCKET }}
|
||||
source_dir: bun
|
||||
destination_dir: releases/${{ github.event.workflow_run.head_sha || github.sha }}-canary
|
||||
- name: Upload to S3 (using tag)
|
||||
uses: shallwefootball/s3-upload-action@4350529f410221787ccf424e50133cbc1b52704e
|
||||
with:
|
||||
endpoint: ${{ secrets.AWS_ENDPOINT }}
|
||||
aws_key_id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws_secret_access_key: ${{ secrets.AWS_SECRET_ACCESS_KEY}}
|
||||
aws_bucket: ${{ secrets.AWS_BUCKET }}
|
||||
source_dir: bun
|
||||
destination_dir: releases/canary
|
||||
- name: Announce on Discord
|
||||
uses: sarisia/actions-status-discord@v1
|
||||
with:
|
||||
webhook: ${{ secrets.BUN_DISCORD_GITHUB_CHANNEL_WEBHOOK }}
|
||||
nodetail: true
|
||||
color: "#1F6FEB"
|
||||
title: "New Bun Canary available"
|
||||
url: https://github.com/oven-sh/bun/commit/${{ github.sha }}
|
||||
description: |
|
||||
A new canary build of Bun has been automatically uploaded. To upgrade, run:
|
||||
```sh
|
||||
bun upgrade --canary
|
||||
# bun upgrade --stable <- to downgrade
|
||||
```
|
||||
# If notifying sentry fails, don't fail the rest of the build.
|
||||
- name: Notify Sentry
|
||||
uses: getsentry/action-release@v1.7.0
|
||||
env:
|
||||
SENTRY_AUTH_TOKEN: ${{ secrets.SENTRY_AUTH_TOKEN }}
|
||||
SENTRY_ORG: ${{ secrets.SENTRY_ORG }}
|
||||
SENTRY_PROJECT: ${{ secrets.SENTRY_PROJECT }}
|
||||
with:
|
||||
ignore_missing: true
|
||||
ignore_empty: true
|
||||
version: ${{ github.event.workflow_run.head_sha || github.sha }}-canary
|
||||
environment: canary
|
||||
12
.gitmodules
vendored
12
.gitmodules
vendored
@@ -76,13 +76,17 @@ ignore = dirty
|
||||
depth = 1
|
||||
shallow = true
|
||||
fetchRecurseSubmodules = false
|
||||
[submodule "src/deps/libuv"]
|
||||
path = src/deps/libuv
|
||||
url = https://github.com/libuv/libuv.git
|
||||
ignore = dirty
|
||||
depth = 1
|
||||
shallow = true
|
||||
fetchRecurseSubmodules = false
|
||||
branch = v1.48.0
|
||||
[submodule "zig"]
|
||||
path = src/deps/zig
|
||||
url = https://github.com/oven-sh/zig
|
||||
depth = 1
|
||||
shallow = true
|
||||
fetchRecurseSubmodules = false
|
||||
[submodule "src/deps/libdeflate"]
|
||||
path = src/deps/libdeflate
|
||||
url = https://github.com/ebiggers/libdeflate
|
||||
ignore = "dirty"
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
command script import src/deps/zig/tools/lldb_pretty_printers.py
|
||||
command script import src/bun.js/WebKit/Tools/lldb/lldb_webkit.py
|
||||
|
||||
# type summary add --summary-string "${var} | inner=${var[0-30]}, source=${var[33-64]}, tag=${var[31-32]}" "unsigned long"
|
||||
4
.vscode/launch.json
generated
vendored
4
.vscode/launch.json
generated
vendored
@@ -17,7 +17,6 @@
|
||||
"cwd": "${workspaceFolder}/test",
|
||||
"env": {
|
||||
"FORCE_COLOR": "1",
|
||||
"BUN_DEBUG_QUIET_LOGS": "1",
|
||||
"BUN_GARBAGE_COLLECTOR_LEVEL": "1",
|
||||
},
|
||||
"console": "internalConsole",
|
||||
@@ -146,13 +145,14 @@
|
||||
"request": "launch",
|
||||
"name": "bun run [file]",
|
||||
"program": "${workspaceFolder}/build/bun-debug",
|
||||
"args": ["run", "${fileBasename}"],
|
||||
"args": ["run", "${file}"],
|
||||
"cwd": "${fileDirname}",
|
||||
"env": {
|
||||
"FORCE_COLOR": "0",
|
||||
"BUN_DEBUG_QUIET_LOGS": "1",
|
||||
"BUN_DEBUG_EventLoop": "1",
|
||||
"BUN_GARBAGE_COLLECTOR_LEVEL": "2",
|
||||
"BUN_DEBUG_ALL": "1",
|
||||
},
|
||||
"console": "internalConsole",
|
||||
},
|
||||
|
||||
5
.vscode/settings.json
vendored
5
.vscode/settings.json
vendored
@@ -42,11 +42,8 @@
|
||||
"editor.defaultFormatter": "ziglang.vscode-zig",
|
||||
},
|
||||
|
||||
// lldb
|
||||
"lldb.launch.initCommands": ["command source ${workspaceFolder}/.lldbinit"],
|
||||
"lldb.verboseLogging": false,
|
||||
|
||||
// C++
|
||||
"lldb.verboseLogging": false,
|
||||
"cmake.configureOnOpen": false,
|
||||
"C_Cpp.errorSquiggles": "enabled",
|
||||
"[cpp]": {
|
||||
|
||||
103
CMakeLists.txt
103
CMakeLists.txt
@@ -3,8 +3,8 @@ cmake_policy(SET CMP0091 NEW)
|
||||
cmake_policy(SET CMP0067 NEW)
|
||||
|
||||
set(CMAKE_POLICY_DEFAULT_CMP0069 NEW)
|
||||
set(Bun_VERSION "1.1.22")
|
||||
set(WEBKIT_TAG a060f087c2232fb20d82c321d21e074e735d3261)
|
||||
set(Bun_VERSION "1.1.21")
|
||||
set(WEBKIT_TAG 49907bff8781719bc2ded068b0c934f6d0074d1e)
|
||||
|
||||
set(BUN_WORKDIR "${CMAKE_CURRENT_BINARY_DIR}")
|
||||
message(STATUS "Configuring Bun ${Bun_VERSION} in ${BUN_WORKDIR}")
|
||||
@@ -15,19 +15,6 @@ set(CMAKE_C_STANDARD 17)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||
set(CMAKE_C_STANDARD_REQUIRED ON)
|
||||
|
||||
option(ZIG_CACHE_DIR "Path to the Zig cache directory" "")
|
||||
|
||||
if(NOT ZIG_CACHE_DIR)
|
||||
SET(ZIG_CACHE_DIR "${BUN_WORKDIR}")
|
||||
cmake_path(APPEND ZIG_CACHE_DIR "zig-cache")
|
||||
endif()
|
||||
|
||||
set(LOCAL_ZIG_CACHE_DIR "${ZIG_CACHE_DIR}")
|
||||
set(GLOBAL_ZIG_CACHE_DIR "${ZIG_CACHE_DIR}")
|
||||
|
||||
cmake_path(APPEND LOCAL_ZIG_CACHE_DIR "local")
|
||||
cmake_path(APPEND GLOBAL_ZIG_CACHE_DIR "global")
|
||||
|
||||
# Used in process.version, process.versions.node, napi, and elsewhere
|
||||
set(REPORTED_NODEJS_VERSION "22.3.0")
|
||||
|
||||
@@ -323,7 +310,6 @@ endif()
|
||||
# -- Build Flags --
|
||||
option(USE_STATIC_SQLITE "Statically link SQLite?" ${DEFAULT_ON_UNLESS_APPLE})
|
||||
option(USE_CUSTOM_ZLIB "Use Bun's recommended version of zlib" ON)
|
||||
option(USE_CUSTOM_LIBDEFLATE "Use Bun's recommended version of libdeflate" ON)
|
||||
option(USE_CUSTOM_BORINGSSL "Use Bun's recommended version of BoringSSL" ON)
|
||||
option(USE_CUSTOM_LIBARCHIVE "Use Bun's recommended version of libarchive" ON)
|
||||
option(USE_CUSTOM_MIMALLOC "Use Bun's recommended version of Mimalloc" ON)
|
||||
@@ -347,7 +333,7 @@ option(USE_LTO "Enable Link-Time Optimization" ${DEFAULT_LTO})
|
||||
|
||||
if(APPLE AND USE_LTO)
|
||||
set(USE_LTO OFF)
|
||||
message(FATAL_ERROR "Link-Time Optimization is not supported on macOS because it requires -fuse-ld=lld and lld causes many segfaults on macOS (likely related to stack size)")
|
||||
message(WARNING "Link-Time Optimization is not supported on macOS because it requires -fuse-ld=lld and lld causes many segfaults on macOS (likely related to stack size)")
|
||||
endif()
|
||||
|
||||
if(WIN32 AND USE_LTO)
|
||||
@@ -476,8 +462,6 @@ elseif(NOT BUN_CPP_ONLY AND NOT BUN_LINK_ONLY AND NOT BUN_TIDY_ONLY AND NOT BUN_
|
||||
|
||||
message(STATUS "Installed Zig Compiler: ${ZIG_COMPILER}")
|
||||
set_property(DIRECTORY APPEND PROPERTY CMAKE_CONFIGURE_DEPENDS "build.zig")
|
||||
|
||||
message(STATUS "Using zig cache directory: ${ZIG_CACHE_DIR}")
|
||||
endif()
|
||||
|
||||
# Bun
|
||||
@@ -502,7 +486,7 @@ if(USE_UNIFIED_SOURCES)
|
||||
endif()
|
||||
|
||||
# CCache
|
||||
# find_program(CCACHE_PROGRAM sccache)
|
||||
find_program(CCACHE_PROGRAM sccache)
|
||||
find_program(CCACHE_PROGRAM ccache)
|
||||
|
||||
if(CCACHE_PROGRAM)
|
||||
@@ -704,32 +688,6 @@ add_custom_command(
|
||||
)
|
||||
list(APPEND BUN_RAW_SOURCES "${BUN_WORKDIR}/codegen/ZigGeneratedClasses.cpp")
|
||||
|
||||
if(NOT NO_CODEGEN)
|
||||
# --- ErrorCode Generator ---
|
||||
file(GLOB NODE_ERRORS_TS ${CONFIGURE_DEPENDS}
|
||||
"${BUN_SRC}/bun.js/bindings/ErrorCode.ts"
|
||||
)
|
||||
add_custom_command(
|
||||
OUTPUT "${BUN_WORKDIR}/codegen/ErrorCode+List.h" "${BUN_WORKDIR}/codegen/ErrorCode+Data.h" "${BUN_WORKDIR}/codegen/ErrorCode.zig"
|
||||
COMMAND ${BUN_EXECUTABLE} run "${BUN_CODEGEN_SRC}/generate-node-errors.ts" "${BUN_WORKDIR}/codegen"
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
MAIN_DEPENDENCY "${BUN_CODEGEN_SRC}/generate-node-errors.ts"
|
||||
DEPENDS ${NODE_ERRORS_TS}
|
||||
VERBATIM
|
||||
COMMENT "Generating ErrorCode.zig"
|
||||
)
|
||||
|
||||
# This needs something to force it to be regenerated
|
||||
WEBKIT_ADD_SOURCE_DEPENDENCIES(
|
||||
"${BUN_SRC}/bun.js/bindings/ErrorCode.cpp"
|
||||
"${BUN_WORKDIR}/codegen/ErrorCode+List.h"
|
||||
)
|
||||
WEBKIT_ADD_SOURCE_DEPENDENCIES(
|
||||
"${BUN_SRC}/bun.js/bindings/ErrorCode.h"
|
||||
"${BUN_WORKDIR}/codegen/ErrorCode+Data.h"
|
||||
)
|
||||
endif()
|
||||
|
||||
# --- JSSink Generator ---
|
||||
add_custom_command(
|
||||
OUTPUT "${BUN_WORKDIR}/codegen/JSSink.cpp"
|
||||
@@ -803,7 +761,7 @@ if(NOT NO_CODEGEN)
|
||||
OUTPUT ${BUN_IDENTIFIER_CACHE_OUT}
|
||||
MAIN_DEPENDENCY "${BUN_SRC}/js_lexer/identifier_data.zig"
|
||||
DEPENDS "${BUN_SRC}/js_lexer/identifier_cache.zig"
|
||||
COMMAND ${ZIG_COMPILER} run "--zig-lib-dir" "${ZIG_LIB_DIR}" "--cache-dir" "${LOCAL_ZIG_CACHE_DIR}" "--global-cache-dir" "${GLOBAL_ZIG_CACHE_DIR}" "${BUN_SRC}/js_lexer/identifier_data.zig"
|
||||
COMMAND ${ZIG_COMPILER} run "--zig-lib-dir" "${ZIG_LIB_DIR}" "${BUN_SRC}/js_lexer/identifier_data.zig"
|
||||
VERBATIM
|
||||
COMMENT "Building Identifier Cache"
|
||||
)
|
||||
@@ -952,13 +910,10 @@ if(NOT BUN_LINK_ONLY AND NOT BUN_CPP_ONLY)
|
||||
"-Denable_logs=${ENABLE_LOGS}"
|
||||
"-Dreported_nodejs_version=${REPORTED_NODEJS_VERSION}"
|
||||
"-Dobj_format=${BUN_ZIG_OBJ_FORMAT}"
|
||||
"--cache-dir" "${LOCAL_ZIG_CACHE_DIR}"
|
||||
"--global-cache-dir" "${GLOBAL_ZIG_CACHE_DIR}"
|
||||
DEPENDS
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/build.zig"
|
||||
"${ZIG_FILES}"
|
||||
"${BUN_WORKDIR}/codegen/ZigGeneratedClasses.zig"
|
||||
"${BUN_WORKDIR}/codegen/ErrorCode.zig"
|
||||
"${BUN_WORKDIR}/codegen/ResolvedSourceTag.zig"
|
||||
"${BUN_IDENTIFIER_CACHE_OUT}"
|
||||
"${BUN_SRC}/api/schema.zig"
|
||||
@@ -1044,20 +999,8 @@ add_compile_definitions(
|
||||
)
|
||||
|
||||
if(NOT ASSERT_ENABLED)
|
||||
if(APPLE)
|
||||
add_compile_definitions("_LIBCXX_ENABLE_ASSERTIONS=0")
|
||||
add_compile_definitions("_LIBCPP_HARDENING_MODE=_LIBCPP_HARDENING_MODE_NONE")
|
||||
endif()
|
||||
|
||||
add_compile_definitions("NDEBUG=1")
|
||||
else()
|
||||
if(APPLE)
|
||||
add_compile_definitions("_LIBCXX_ENABLE_ASSERTIONS=1")
|
||||
add_compile_definitions("_LIBCPP_HARDENING_MODE=_LIBCPP_HARDENING_MODE_DEBUG")
|
||||
elseif(CMAKE_SYSTEM_NAME STREQUAL "Linux")
|
||||
add_compile_definitions("_GLIBCXX_ASSERTIONS=1")
|
||||
endif()
|
||||
|
||||
add_compile_definitions("ASSERT_ENABLED=1")
|
||||
endif()
|
||||
|
||||
@@ -1127,25 +1070,12 @@ if(CMAKE_BUILD_TYPE STREQUAL "Debug")
|
||||
-Werror=uninitialized
|
||||
-Werror=conditional-uninitialized
|
||||
-Werror=suspicious-memaccess
|
||||
-Werror=int-conversion
|
||||
-Werror=nonnull
|
||||
-Werror=move
|
||||
-Werror=sometimes-uninitialized
|
||||
-Werror=unused
|
||||
-Wno-unused-function
|
||||
-Wno-nullability-completeness
|
||||
-Werror
|
||||
-fsanitize=null
|
||||
-fsanitize-recover=all
|
||||
-fsanitize=bounds
|
||||
-fsanitize=return
|
||||
-fsanitize=nullability-arg
|
||||
-fsanitize=nullability-assign
|
||||
-fsanitize=nullability-return
|
||||
-fsanitize=returns-nonnull-attribute
|
||||
-fsanitize=unreachable
|
||||
)
|
||||
target_link_libraries(${bun} PRIVATE -fsanitize=null)
|
||||
else()
|
||||
target_compile_options(${bun} PUBLIC /Od /Z7)
|
||||
endif()
|
||||
@@ -1167,11 +1097,8 @@ elseif(CMAKE_BUILD_TYPE STREQUAL "Release")
|
||||
-Werror=uninitialized
|
||||
-Werror=conditional-uninitialized
|
||||
-Werror=suspicious-memaccess
|
||||
-Werror=int-conversion
|
||||
-Werror=nonnull
|
||||
-Werror=move
|
||||
-Werror=sometimes-uninitialized
|
||||
-Wno-nullability-completeness
|
||||
-Werror
|
||||
)
|
||||
else()
|
||||
@@ -1283,7 +1210,7 @@ endif()
|
||||
|
||||
if(UNIX AND NOT APPLE)
|
||||
target_link_options(${bun} PUBLIC
|
||||
-fuse-ld=lld-${LLVM_VERSION}
|
||||
-fuse-ld=lld
|
||||
-fno-pic
|
||||
-static-libstdc++
|
||||
-static-libgcc
|
||||
@@ -1431,19 +1358,6 @@ else()
|
||||
target_link_libraries(${bun} PRIVATE LibArchive::LibArchive)
|
||||
endif()
|
||||
|
||||
if(USE_CUSTOM_LIBDEFLATE)
|
||||
include_directories(${BUN_DEPS_DIR}/libdeflate)
|
||||
|
||||
if(WIN32)
|
||||
target_link_libraries(${bun} PRIVATE "${BUN_DEPS_OUT_DIR}/deflate.lib")
|
||||
else()
|
||||
target_link_libraries(${bun} PRIVATE "${BUN_DEPS_OUT_DIR}/libdeflate.a")
|
||||
endif()
|
||||
else()
|
||||
find_package(LibDeflate REQUIRED)
|
||||
target_link_libraries(${bun} PRIVATE LibDeflate::LibDeflate)
|
||||
endif()
|
||||
|
||||
if(USE_CUSTOM_MIMALLOC)
|
||||
include_directories(${BUN_DEPS_DIR}/mimalloc/include)
|
||||
|
||||
@@ -1590,10 +1504,7 @@ endif()
|
||||
if(NOT WIN32)
|
||||
target_link_libraries(${bun} PRIVATE "${WEBKIT_LIB_DIR}/libWTF.a")
|
||||
target_link_libraries(${bun} PRIVATE "${WEBKIT_LIB_DIR}/libJavaScriptCore.a")
|
||||
|
||||
if(NOT APPLE OR EXISTS "${WEBKIT_LIB_DIR}/libbmalloc.a")
|
||||
target_link_libraries(${bun} PRIVATE "${WEBKIT_LIB_DIR}/libbmalloc.a")
|
||||
endif()
|
||||
target_link_libraries(${bun} PRIVATE "${WEBKIT_LIB_DIR}/libbmalloc.a")
|
||||
else()
|
||||
target_link_libraries(${bun} PRIVATE
|
||||
"${WEBKIT_LIB_DIR}/WTF.lib"
|
||||
|
||||
27
Dockerfile
27
Dockerfile
@@ -52,7 +52,7 @@ ENV CI 1
|
||||
ENV CPU_TARGET=${CPU_TARGET}
|
||||
ENV BUILDARCH=${BUILDARCH}
|
||||
ENV BUN_DEPS_OUT_DIR=${BUN_DEPS_OUT_DIR}
|
||||
ENV USE_LTO 1
|
||||
ENV BUN_ENABLE_LTO 1
|
||||
|
||||
ENV LC_CTYPE=en_US.UTF-8
|
||||
ENV LC_ALL=en_US.UTF-8
|
||||
@@ -263,27 +263,6 @@ RUN --mount=type=cache,target=${CCACHE_DIR} \
|
||||
&& bash ./scripts/build-zlib.sh && rm -rf src/deps/zlib scripts
|
||||
|
||||
|
||||
FROM bun-base as libdeflate
|
||||
|
||||
ARG BUN_DIR
|
||||
ARG CPU_TARGET
|
||||
ENV CPU_TARGET=${CPU_TARGET}
|
||||
ARG CCACHE_DIR=/ccache
|
||||
ENV CCACHE_DIR=${CCACHE_DIR}
|
||||
|
||||
COPY Makefile ${BUN_DIR}/Makefile
|
||||
COPY CMakeLists.txt ${BUN_DIR}/CMakeLists.txt
|
||||
COPY scripts ${BUN_DIR}/scripts
|
||||
COPY src/deps/libdeflate ${BUN_DIR}/src/deps/libdeflate
|
||||
COPY package.json bun.lockb Makefile .gitmodules ${BUN_DIR}/
|
||||
|
||||
WORKDIR $BUN_DIR
|
||||
|
||||
RUN --mount=type=cache,target=${CCACHE_DIR} \
|
||||
cd $BUN_DIR \
|
||||
&& bash ./scripts/build-libdeflate.sh && rm -rf src/deps/libdeflate scripts
|
||||
|
||||
|
||||
FROM bun-base as libarchive
|
||||
|
||||
ARG BUN_DIR
|
||||
@@ -433,9 +412,6 @@ COPY src ${BUN_DIR}/src
|
||||
COPY CMakeLists.txt ${BUN_DIR}/CMakeLists.txt
|
||||
COPY src/deps/boringssl/include ${BUN_DIR}/src/deps/boringssl/include
|
||||
|
||||
# for uWebSockets
|
||||
COPY src/deps/libdeflate ${BUN_DIR}/src/deps/libdeflate
|
||||
|
||||
ARG CCACHE_DIR=/ccache
|
||||
ENV CCACHE_DIR=${CCACHE_DIR}
|
||||
|
||||
@@ -540,7 +516,6 @@ COPY src/symbols.dyn src/linker.lds ${BUN_DIR}/src/
|
||||
|
||||
COPY CMakeLists.txt ${BUN_DIR}/CMakeLists.txt
|
||||
COPY --from=zlib ${BUN_DEPS_OUT_DIR}/* ${BUN_DEPS_OUT_DIR}/
|
||||
COPY --from=libdeflate ${BUN_DEPS_OUT_DIR}/* ${BUN_DEPS_OUT_DIR}/
|
||||
COPY --from=libarchive ${BUN_DEPS_OUT_DIR}/* ${BUN_DEPS_OUT_DIR}/
|
||||
COPY --from=boringssl ${BUN_DEPS_OUT_DIR}/* ${BUN_DEPS_OUT_DIR}/
|
||||
COPY --from=lolhtml ${BUN_DEPS_OUT_DIR}/* ${BUN_DEPS_OUT_DIR}/
|
||||
|
||||
@@ -34,8 +34,6 @@ Bun statically links these libraries:
|
||||
| [`c-ares`](https://github.com/c-ares/c-ares) | MIT licensed |
|
||||
| [`libicu`](https://github.com/unicode-org/icu) 72 | [license here](https://github.com/unicode-org/icu/blob/main/icu4c/LICENSE) |
|
||||
| [`libbase64`](https://github.com/aklomp/base64/blob/master/LICENSE) | BSD 2-Clause |
|
||||
| [`libuv`](https://github.com/libuv/libuv) (on Windows) | MIT |
|
||||
| [`libdeflate`](https://github.com/ebiggers/libdeflate) | MIT |
|
||||
| A fork of [`uWebsockets`](https://github.com/jarred-sumner/uwebsockets) | Apache 2.0 licensed |
|
||||
| Parts of [Tigerbeetle's IO code](https://github.com/tigerbeetle/tigerbeetle/blob/532c8b70b9142c17e07737ab6d3da68d7500cbca/src/io/windows.zig#L1) | Apache 2.0 licensed |
|
||||
|
||||
|
||||
1
Makefile
1
Makefile
@@ -1309,7 +1309,6 @@ jsc-build-mac-compile-debug:
|
||||
-DCMAKE_BUILD_TYPE=Debug \
|
||||
-DUSE_THIN_ARCHIVES=OFF \
|
||||
-DENABLE_FTL_JIT=ON \
|
||||
-DENABLE_MALLOC_HEAP_BREAKDOWN=ON \
|
||||
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON \
|
||||
-DUSE_BUN_JSC_ADDITIONS=ON \
|
||||
-DENABLE_BUN_SKIP_FAILING_ASSERTIONS=ON \
|
||||
|
||||
@@ -1,43 +1,20 @@
|
||||
import { run, bench, group } from "mitata";
|
||||
import { run, bench } from "mitata";
|
||||
import { gzipSync, gunzipSync } from "bun";
|
||||
|
||||
const data = await Bun.file(require.resolve("@babel/standalone/babel.min.js")).arrayBuffer();
|
||||
const data = new TextEncoder().encode("Hello World!".repeat(9999));
|
||||
|
||||
const compressed = gzipSync(data);
|
||||
|
||||
const libraries = ["zlib"];
|
||||
if (Bun.semver.satisfies(Bun.version.replaceAll("-debug", ""), ">=1.1.21")) {
|
||||
libraries.push("libdeflate");
|
||||
}
|
||||
const options = { library: undefined };
|
||||
const benchFn = (name, fn) => {
|
||||
if (libraries.length > 1) {
|
||||
group(name, () => {
|
||||
for (const library of libraries) {
|
||||
bench(library, () => {
|
||||
options.library = library;
|
||||
fn();
|
||||
});
|
||||
}
|
||||
});
|
||||
} else {
|
||||
options.library = libraries[0];
|
||||
bench(name, () => {
|
||||
fn();
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
benchFn(`roundtrip - @babel/standalone/babel.min.js`, () => {
|
||||
gunzipSync(gzipSync(data, options), options);
|
||||
bench(`roundtrip - "Hello World!".repeat(9999))`, () => {
|
||||
gunzipSync(gzipSync(data));
|
||||
});
|
||||
|
||||
benchFn(`gzipSync(@babel/standalone/babel.min.js`, () => {
|
||||
gzipSync(data, options);
|
||||
bench(`gzipSync("Hello World!".repeat(9999)))`, () => {
|
||||
gzipSync(data);
|
||||
});
|
||||
|
||||
benchFn(`gunzipSync(@babel/standalone/babel.min.js`, () => {
|
||||
gunzipSync(compressed, options);
|
||||
bench(`gunzipSync("Hello World!".repeat(9999)))`, () => {
|
||||
gunzipSync(compressed);
|
||||
});
|
||||
|
||||
await run();
|
||||
|
||||
Binary file not shown.
@@ -1,22 +1,19 @@
|
||||
import { run, bench } from "mitata";
|
||||
import { gzipSync, gunzipSync } from "zlib";
|
||||
import { createRequire } from "module";
|
||||
import { readFileSync } from "fs";
|
||||
|
||||
const require = createRequire(import.meta.url);
|
||||
const data = readFileSync(require.resolve("@babel/standalone/babel.min.js"));
|
||||
const data = new TextEncoder().encode("Hello World!".repeat(9999));
|
||||
|
||||
const compressed = gzipSync(data);
|
||||
|
||||
bench(`roundtrip - @babel/standalone/babel.min.js)`, () => {
|
||||
bench(`roundtrip - "Hello World!".repeat(9999))`, () => {
|
||||
gunzipSync(gzipSync(data));
|
||||
});
|
||||
|
||||
bench(`gzipSync(@babel/standalone/babel.min.js))`, () => {
|
||||
bench(`gzipSync("Hello World!".repeat(9999)))`, () => {
|
||||
gzipSync(data);
|
||||
});
|
||||
|
||||
bench(`gunzipSync(@babel/standalone/babel.min.js))`, () => {
|
||||
bench(`gunzipSync("Hello World!".repeat(9999)))`, () => {
|
||||
gunzipSync(compressed);
|
||||
});
|
||||
|
||||
|
||||
@@ -7,8 +7,5 @@
|
||||
"bench:node": "$NODE node.mjs",
|
||||
"bench:deno": "$DENO run -A --unstable deno.js",
|
||||
"bench": "bun run bench:bun && bun run bench:node && bun run bench:deno"
|
||||
},
|
||||
"dependencies": {
|
||||
"@babel/standalone": "7.24.10"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
import { run, bench } from "mitata";
|
||||
import { createRequire } from "module";
|
||||
|
||||
const require = createRequire(import.meta.url);
|
||||
const db = require("better-sqlite3")("./src/northwind.sqlite");
|
||||
|
||||
{
|
||||
const sql = db.prepare(`SELECT * FROM "Order"`);
|
||||
|
||||
bench('SELECT * FROM "Order"', () => {
|
||||
sql.all();
|
||||
});
|
||||
}
|
||||
|
||||
{
|
||||
const sql = db.prepare(`SELECT * FROM "Product"`);
|
||||
|
||||
bench('SELECT * FROM "Product"', () => {
|
||||
sql.all();
|
||||
});
|
||||
}
|
||||
|
||||
{
|
||||
const sql = db.prepare(`SELECT * FROM "OrderDetail"`);
|
||||
|
||||
bench('SELECT * FROM "OrderDetail"', () => {
|
||||
sql.all();
|
||||
});
|
||||
}
|
||||
|
||||
await run();
|
||||
@@ -1,9 +1,8 @@
|
||||
// Run `node --experimental-sqlite bench/sqlite/node.mjs` to run the script.
|
||||
// You will need `--experimental-sqlite` flag to run this script and node v22.5.0 or higher.
|
||||
import { run, bench } from "mitata";
|
||||
import { DatabaseSync as Database } from "node:sqlite";
|
||||
import { createRequire } from "module";
|
||||
|
||||
const db = new Database("./src/northwind.sqlite");
|
||||
const require = createRequire(import.meta.url);
|
||||
const db = require("better-sqlite3")("./src/northwind.sqlite");
|
||||
|
||||
{
|
||||
const sql = db.prepare(`SELECT * FROM "Order"`);
|
||||
|
||||
13
build.zig
13
build.zig
@@ -49,7 +49,6 @@ const BunBuildOptions = struct {
|
||||
reported_nodejs_version: Version,
|
||||
|
||||
generated_code_dir: []const u8,
|
||||
no_llvm: bool,
|
||||
|
||||
cached_options_module: ?*Module = null,
|
||||
windows_shim: ?WindowsShim = null,
|
||||
@@ -182,8 +181,6 @@ pub fn build(b: *Build) !void {
|
||||
|
||||
const obj_format = b.option(ObjectFormat, "obj_format", "Output file for object files") orelse .obj;
|
||||
|
||||
const no_llvm = b.option(bool, "no_llvm", "Experiment with Zig self hosted backends. No stability guaranteed") orelse false;
|
||||
|
||||
var build_options = BunBuildOptions{
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
@@ -192,7 +189,6 @@ pub fn build(b: *Build) !void {
|
||||
.arch = arch,
|
||||
|
||||
.generated_code_dir = generated_code_dir,
|
||||
.no_llvm = no_llvm,
|
||||
|
||||
.version = try Version.parse(bun_version),
|
||||
.canary_revision = canary: {
|
||||
@@ -324,7 +320,6 @@ pub inline fn addMultiCheck(
|
||||
.version = root_build_options.version,
|
||||
.reported_nodejs_version = root_build_options.reported_nodejs_version,
|
||||
.generated_code_dir = root_build_options.generated_code_dir,
|
||||
.no_llvm = root_build_options.no_llvm,
|
||||
};
|
||||
|
||||
var obj = addBunObject(b, &options);
|
||||
@@ -343,8 +338,6 @@ pub fn addBunObject(b: *Build, opts: *BunBuildOptions) *Compile {
|
||||
},
|
||||
.target = opts.target,
|
||||
.optimize = opts.optimize,
|
||||
.use_llvm = !opts.no_llvm,
|
||||
.use_lld = if (opts.os == .mac) false else !opts.no_llvm,
|
||||
|
||||
// https://github.com/ziglang/zig/issues/17430
|
||||
.pic = true,
|
||||
@@ -453,12 +446,6 @@ fn addInternalPackages(b: *Build, obj: *Compile, opts: *BunBuildOptions) void {
|
||||
.root_source_file = .{ .cwd_relative = resolved_source_tag_path },
|
||||
});
|
||||
|
||||
const error_code_path = b.pathJoin(&.{ opts.generated_code_dir, "ErrorCode.zig" });
|
||||
validateGeneratedPath(error_code_path);
|
||||
obj.root_module.addAnonymousImport("ErrorCode", .{
|
||||
.root_source_file = .{ .cwd_relative = error_code_path },
|
||||
});
|
||||
|
||||
if (os == .windows) {
|
||||
obj.root_module.addAnonymousImport("bun_shim_impl.exe", .{
|
||||
.root_source_file = opts.windowsShim(b).exe.getEmittedBin(),
|
||||
|
||||
@@ -208,7 +208,8 @@ In Bun's CLI, simple boolean flags like `--minify` do not accept an argument. Ot
|
||||
---
|
||||
|
||||
- `--ignore-annotations`
|
||||
- `--ignore-dce-annotations`
|
||||
- n/a
|
||||
- Not supported
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -5,8 +5,8 @@ name: Define and replace static globals & constants
|
||||
The `--define` flag lets you declare statically-analyzable constants and globals. It replace all usages of an identifier or property in a JavaScript or TypeScript file with a constant value. This feature is supported at runtime and also in `bun build`. This is sort of similar to `#define` in C/C++, except for JavaScript.
|
||||
|
||||
```ts
|
||||
bun --define process.env.NODE_ENV="'production'" src/index.ts # Runtime
|
||||
bun build --define process.env.NODE_ENV="'production'" src/index.ts # Build
|
||||
bun --define:process.env.NODE_ENV="'production'" src/index.ts # Runtime
|
||||
bun build --define:process.env.NODE_ENV="'production'" src/index.ts # Build
|
||||
```
|
||||
|
||||
---
|
||||
@@ -95,7 +95,7 @@ To replace all usages of `AWS` with the JSON object `{"ACCESS_KEY":"abc","SECRET
|
||||
|
||||
```sh
|
||||
# JSON
|
||||
bun --define AWS='{"ACCESS_KEY":"abc","SECRET_KEY":"def"}' src/index.ts
|
||||
bun --define:AWS='{"ACCESS_KEY":"abc","SECRET_KEY":"def"}' src/index.ts
|
||||
```
|
||||
|
||||
Those will be transformed into the equivalent JavaScript code.
|
||||
@@ -119,7 +119,7 @@ You can also pass properties to the `--define` flag.
|
||||
For example, to replace all usages of `console.write` with `console.log`, you can use the following command (requires Bun v1.1.5 or later)
|
||||
|
||||
```sh
|
||||
bun --define console.write=console.log src/index.ts
|
||||
bun --define:console.write=console.log src/index.ts
|
||||
```
|
||||
|
||||
That transforms the following input:
|
||||
|
||||
@@ -27,28 +27,6 @@ data.version; // => "1.0.0"
|
||||
data.author.name; // => "John Dough"
|
||||
```
|
||||
|
||||
Bun also supports [Import Attributes](https://github.com/tc39/proposal-import-attributes/) and [JSON modules](https://github.com/tc39/proposal-json-modules) syntax.
|
||||
|
||||
```ts
|
||||
import data from "./package.json" with { type: "json" };
|
||||
|
||||
data.name; // => "bun"
|
||||
data.version; // => "1.0.0"
|
||||
data.author.name; // => "John Dough"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
Bun also supports [Import Attributes](https://github.com/tc39/proposal-import-attributes/) and [JSON modules](https://github.com/tc39/proposal-json-modules) syntax.
|
||||
|
||||
```ts
|
||||
import data from "./package.json" with { type: "json" };
|
||||
|
||||
data.name; // => "bun"
|
||||
data.version; // => "1.0.0"
|
||||
data.author.name; // => "John Dough"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
See [Docs > Runtime > TypeScript](/docs/runtime/typescript) for more information on using TypeScript with Bun.
|
||||
|
||||
@@ -171,8 +171,6 @@ Once imported, you should see something like this:
|
||||
|
||||
{% image alt="Viewing heap snapshot in Safari" src="https://user-images.githubusercontent.com/709451/204429337-b0d8935f-3509-4071-b991-217794d1fb27.png" caption="Viewing heap snapshot in Safari Dev Tools" /%}
|
||||
|
||||
> The [web debugger](https://bun.sh/docs/runtime/debugger#inspect) also offers the timeline feature which allows you to track and examine the memory usage of the running debug session.
|
||||
|
||||
### Native heap stats
|
||||
|
||||
Bun uses mimalloc for the other heap. To report a summary of non-JavaScript memory usage, set the `MIMALLOC_SHOW_STATS=1` environment variable. and stats will print on exit.
|
||||
|
||||
@@ -153,7 +153,7 @@ Some methods are not optimized yet.
|
||||
|
||||
### [`node:util`](https://nodejs.org/api/util.html)
|
||||
|
||||
🟡 Missing `MIMEParams` `MIMEType` `aborted` `debug` `getSystemErrorMap` `transferableAbortController` `transferableAbortSignal` `stripVTControlCharacters`
|
||||
🟡 Missing `MIMEParams` `MIMEType` `aborted` `debug` `getSystemErrorMap` `getSystemErrorName` `transferableAbortController` `transferableAbortSignal` `stripVTControlCharacters`
|
||||
|
||||
### [`node:v8`](https://nodejs.org/api/v8.html)
|
||||
|
||||
|
||||
@@ -7,36 +7,22 @@ The following Web APIs are partially or completely supported.
|
||||
---
|
||||
|
||||
- HTTP
|
||||
- [`fetch`](https://developer.mozilla.org/en-US/docs/Web/API/fetch)
|
||||
[`Response`](https://developer.mozilla.org/en-US/docs/Web/API/Response)
|
||||
[`Request`](https://developer.mozilla.org/en-US/docs/Web/API/Request)
|
||||
[`Headers`](https://developer.mozilla.org/en-US/docs/Web/API/Headers)
|
||||
[`AbortController`](https://developer.mozilla.org/en-US/docs/Web/API/AbortController)
|
||||
[`AbortSignal`](https://developer.mozilla.org/en-US/docs/Web/API/AbortSignal)
|
||||
- [`fetch`](https://developer.mozilla.org/en-US/docs/Web/API/fetch) [`Response`](https://developer.mozilla.org/en-US/docs/Web/API/Response) [`Request`](https://developer.mozilla.org/en-US/docs/Web/API/Request) [`Headers`](https://developer.mozilla.org/en-US/docs/Web/API/Headers) [`AbortController`](https://developer.mozilla.org/en-US/docs/Web/API/AbortController) [`AbortSignal`](https://developer.mozilla.org/en-US/docs/Web/API/AbortSignal)
|
||||
|
||||
---
|
||||
|
||||
- URLs
|
||||
- [`URL`](https://developer.mozilla.org/en-US/docs/Web/API/URL)
|
||||
[`URLSearchParams`](https://developer.mozilla.org/en-US/docs/Web/API/URLSearchParams)
|
||||
- [`URL`](https://developer.mozilla.org/en-US/docs/Web/API/URL) [`URLSearchParams`](https://developer.mozilla.org/en-US/docs/Web/API/URLSearchParams)
|
||||
|
||||
---
|
||||
|
||||
- Web Workers
|
||||
- [`Worker`](https://developer.mozilla.org/en-US/docs/Web/API/Worker)
|
||||
[`self.postMessage`](https://developer.mozilla.org/en-US/docs/Web/API/DedicatedWorkerGlobalScope/postMessage)
|
||||
[`structuredClone`](https://developer.mozilla.org/en-US/docs/Web/API/structuredClone)
|
||||
[`MessagePort`](https://developer.mozilla.org/en-US/docs/Web/API/MessagePort)
|
||||
[`MessageChannel`](https://developer.mozilla.org/en-US/docs/Web/API/MessageChannel), [`BroadcastChannel`](https://developer.mozilla.org/en-US/docs/Web/API/BroadcastChannel).
|
||||
- [`Worker`](https://developer.mozilla.org/en-US/docs/Web/API/Worker) [`self.postMessage`](https://developer.mozilla.org/en-US/docs/Web/API/DedicatedWorkerGlobalScope/postMessage) [`structuredClone`](https://developer.mozilla.org/en-US/docs/Web/API/structuredClone) [`MessagePort`](https://developer.mozilla.org/en-US/docs/Web/API/MessagePort) [`MessageChannel`](https://developer.mozilla.org/en-US/docs/Web/API/MessageChannel), [`BroadcastChannel`](https://developer.mozilla.org/en-US/docs/Web/API/BroadcastChannel).
|
||||
|
||||
---
|
||||
|
||||
- Streams
|
||||
- [`ReadableStream`](https://developer.mozilla.org/en-US/docs/Web/API/ReadableStream)
|
||||
[`WritableStream`](https://developer.mozilla.org/en-US/docs/Web/API/WritableStream)
|
||||
[`TransformStream`](https://developer.mozilla.org/en-US/docs/Web/API/TransformStream)
|
||||
[`ByteLengthQueuingStrategy`](https://developer.mozilla.org/en-US/docs/Web/API/ByteLengthQueuingStrategy)
|
||||
[`CountQueuingStrategy`](https://developer.mozilla.org/en-US/docs/Web/API/CountQueuingStrategy) and associated classes
|
||||
- [`ReadableStream`](https://developer.mozilla.org/en-US/docs/Web/API/ReadableStream) [`WritableStream`](https://developer.mozilla.org/en-US/docs/Web/API/WritableStream) [`TransformStream`](https://developer.mozilla.org/en-US/docs/Web/API/TransformStream) [`ByteLengthQueuingStrategy`](https://developer.mozilla.org/en-US/docs/Web/API/ByteLengthQueuingStrategy) [`CountQueuingStrategy`](https://developer.mozilla.org/en-US/docs/Web/API/CountQueuingStrategy) and associated classes
|
||||
|
||||
---
|
||||
|
||||
@@ -51,10 +37,7 @@ The following Web APIs are partially or completely supported.
|
||||
---
|
||||
|
||||
- Encoding and decoding
|
||||
- [`atob`](https://developer.mozilla.org/en-US/docs/Web/API/atob)
|
||||
[`btoa`](https://developer.mozilla.org/en-US/docs/Web/API/btoa)
|
||||
[`TextEncoder`](https://developer.mozilla.org/en-US/docs/Web/API/TextEncoder)
|
||||
[`TextDecoder`](https://developer.mozilla.org/en-US/docs/Web/API/TextDecoder)
|
||||
- [`atob`](https://developer.mozilla.org/en-US/docs/Web/API/atob) [`btoa`](https://developer.mozilla.org/en-US/docs/Web/API/btoa) [`TextEncoder`](https://developer.mozilla.org/en-US/docs/Web/API/TextEncoder) [`TextDecoder`](https://developer.mozilla.org/en-US/docs/Web/API/TextDecoder)
|
||||
|
||||
---
|
||||
|
||||
@@ -64,8 +47,7 @@ The following Web APIs are partially or completely supported.
|
||||
---
|
||||
|
||||
- Timeouts
|
||||
- [`setTimeout`](https://developer.mozilla.org/en-US/docs/Web/API/setTimeout)
|
||||
[`clearTimeout`](https://developer.mozilla.org/en-US/docs/Web/API/clearTimeout)
|
||||
- [`setTimeout`](https://developer.mozilla.org/en-US/docs/Web/API/setTimeout) [`clearTimeout`](https://developer.mozilla.org/en-US/docs/Web/API/clearTimeout)
|
||||
|
||||
---
|
||||
|
||||
@@ -75,16 +57,14 @@ The following Web APIs are partially or completely supported.
|
||||
---
|
||||
|
||||
- Crypto
|
||||
- [`crypto`](https://developer.mozilla.org/en-US/docs/Web/API/Crypto)
|
||||
[`SubtleCrypto`](https://developer.mozilla.org/en-US/docs/Web/API/SubtleCrypto)
|
||||
- [`crypto`](https://developer.mozilla.org/en-US/docs/Web/API/Crypto) [`SubtleCrypto`](https://developer.mozilla.org/en-US/docs/Web/API/SubtleCrypto)
|
||||
[`CryptoKey`](https://developer.mozilla.org/en-US/docs/Web/API/CryptoKey)
|
||||
|
||||
---
|
||||
|
||||
- Debugging
|
||||
|
||||
- [`console`](https://developer.mozilla.org/en-US/docs/Web/API/console)
|
||||
[`performance`](https://developer.mozilla.org/en-US/docs/Web/API/Performance)
|
||||
- [`console`](https://developer.mozilla.org/en-US/docs/Web/API/console) [`performance`](https://developer.mozilla.org/en-US/docs/Web/API/Performance)
|
||||
|
||||
---
|
||||
|
||||
@@ -99,9 +79,7 @@ The following Web APIs are partially or completely supported.
|
||||
---
|
||||
|
||||
- User interaction
|
||||
- [`alert`](https://developer.mozilla.org/en-US/docs/Web/API/Window/alert)
|
||||
[`confirm`](https://developer.mozilla.org/en-US/docs/Web/API/Window/confirm)
|
||||
[`prompt`](https://developer.mozilla.org/en-US/docs/Web/API/Window/prompt) (intended for interactive CLIs)
|
||||
- [`alert`](https://developer.mozilla.org/en-US/docs/Web/API/Window/alert) [`confirm`](https://developer.mozilla.org/en-US/docs/Web/API/Window/confirm) [`prompt`](https://developer.mozilla.org/en-US/docs/Web/API/Window/prompt) (intended for interactive CLIs)
|
||||
|
||||
<!-- - Blocking. Prints the alert message to terminal and awaits `[ENTER]` before proceeding. -->
|
||||
<!-- - Blocking. Prints confirmation message and awaits `[y/N]` input from user. Returns `true` if user entered `y` or `Y`, `false` otherwise.
|
||||
@@ -116,10 +94,7 @@ The following Web APIs are partially or completely supported.
|
||||
|
||||
- Events
|
||||
- [`EventTarget`](https://developer.mozilla.org/en-US/docs/Web/API/EventTarget)
|
||||
[`Event`](https://developer.mozilla.org/en-US/docs/Web/API/Event)
|
||||
[`ErrorEvent`](https://developer.mozilla.org/en-US/docs/Web/API/ErrorEvent)
|
||||
[`CloseEvent`](https://developer.mozilla.org/en-US/docs/Web/API/CloseEvent)
|
||||
[`MessageEvent`](https://developer.mozilla.org/en-US/docs/Web/API/MessageEvent)
|
||||
[`Event`](https://developer.mozilla.org/en-US/docs/Web/API/Event) [`ErrorEvent`](https://developer.mozilla.org/en-US/docs/Web/API/ErrorEvent) [`CloseEvent`](https://developer.mozilla.org/en-US/docs/Web/API/CloseEvent) [`MessageEvent`](https://developer.mozilla.org/en-US/docs/Web/API/MessageEvent)
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -195,6 +195,7 @@ pub fn main() anyerror!void {
|
||||
args.headers_buf,
|
||||
response_body_string,
|
||||
args.body,
|
||||
0,
|
||||
HTTP.FetchRedirect.follow,
|
||||
),
|
||||
};
|
||||
|
||||
@@ -31,6 +31,7 @@ const params = [_]clap.Param(clap.Help){
|
||||
clap.parseParam("-b, --body <STR> HTTP request body as a string") catch unreachable,
|
||||
clap.parseParam("-f, --file <STR> File path to load as body") catch unreachable,
|
||||
clap.parseParam("-n, --count <INT> How many runs? Default 10") catch unreachable,
|
||||
clap.parseParam("-t, --timeout <INT> Max duration per request") catch unreachable,
|
||||
clap.parseParam("-r, --retry <INT> Max retry count") catch unreachable,
|
||||
clap.parseParam("--no-gzip Disable gzip") catch unreachable,
|
||||
clap.parseParam("--no-deflate Disable deflate") catch unreachable,
|
||||
@@ -74,6 +75,7 @@ pub const Arguments = struct {
|
||||
body: string = "",
|
||||
turbo: bool = false,
|
||||
count: usize = 10,
|
||||
timeout: usize = 0,
|
||||
repeat: usize = 0,
|
||||
concurrency: u16 = 32,
|
||||
|
||||
@@ -163,6 +165,10 @@ pub const Arguments = struct {
|
||||
// .keep_alive = !args.flag("--no-keep-alive"),
|
||||
.concurrency = std.fmt.parseInt(u16, args.option("--max-concurrency") orelse "32", 10) catch 32,
|
||||
.turbo = args.flag("--turbo"),
|
||||
.timeout = std.fmt.parseInt(usize, args.option("--timeout") orelse "0", 10) catch |err| {
|
||||
Output.prettyErrorln("<r><red>{s}<r> parsing timeout", .{@errorName(err)});
|
||||
Global.exit(1);
|
||||
},
|
||||
.count = std.fmt.parseInt(usize, args.option("--count") orelse "10", 10) catch |err| {
|
||||
Output.prettyErrorln("<r><red>{s}<r> parsing count", .{@errorName(err)});
|
||||
Global.exit(1);
|
||||
@@ -219,6 +225,7 @@ pub fn main() anyerror!void {
|
||||
args.headers_buf,
|
||||
response_body,
|
||||
"",
|
||||
args.timeout,
|
||||
),
|
||||
};
|
||||
ctx.http.client.verbose = args.verbose;
|
||||
|
||||
@@ -13,6 +13,5 @@
|
||||
"std.StringArrayHashMap(": "bun.StringArrayHashMap has a faster `eql`",
|
||||
"std.StringHashMapUnmanaged(": "bun.StringHashMapUnmanaged has a faster `eql`",
|
||||
"std.StringHashMap(": "bun.StringHashMaphas a faster `eql`",
|
||||
"std.enums.tagName(": "Use bun.tagName instead",
|
||||
"": ""
|
||||
}
|
||||
|
||||
@@ -239,7 +239,7 @@ Starting "${testFileName}"
|
||||
GITHUB_ACTIONS: process.env.GITHUB_ACTIONS ?? "true",
|
||||
BUN_DEBUG_QUIET_LOGS: "1",
|
||||
BUN_INSTALL_CACHE_DIR: join(TMPDIR, ".bun-install-cache"),
|
||||
BUN_ENABLE_CRASH_REPORTING: "0",
|
||||
BUN_ENABLE_CRASH_REPORTING: "1",
|
||||
[windows ? "TEMP" : "TMPDIR"]: TMPDIR,
|
||||
},
|
||||
});
|
||||
|
||||
41
packages/bun-types/bun.d.ts
vendored
41
packages/bun-types/bun.d.ts
vendored
@@ -1537,16 +1537,6 @@ declare module "bun" {
|
||||
syntax?: boolean;
|
||||
identifiers?: boolean;
|
||||
};
|
||||
/**
|
||||
* Ignore dead code elimination/tree-shaking annotations such as @__PURE__ and package.json
|
||||
* "sideEffects" fields. This should only be used as a temporary workaround for incorrect
|
||||
* annotations in libraries.
|
||||
*/
|
||||
ignoreDCEAnnotations?: boolean;
|
||||
/**
|
||||
* Force emitting @__PURE__ annotations even if minify.whitespace is true.
|
||||
*/
|
||||
emitDCEAnnotations?: boolean;
|
||||
// treeshaking?: boolean;
|
||||
|
||||
// jsx?:
|
||||
@@ -3491,13 +3481,6 @@ declare module "bun" {
|
||||
* Filtered data consists mostly of small values with a somewhat random distribution.
|
||||
*/
|
||||
strategy?: number;
|
||||
|
||||
library?: "zlib";
|
||||
}
|
||||
|
||||
interface LibdeflateCompressionOptions {
|
||||
level?: 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12;
|
||||
library?: "libdeflate";
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -3506,38 +3489,26 @@ declare module "bun" {
|
||||
* @param options Compression options to use
|
||||
* @returns The output buffer with the compressed data
|
||||
*/
|
||||
function deflateSync(
|
||||
data: Uint8Array | string | ArrayBuffer,
|
||||
options?: ZlibCompressionOptions | LibdeflateCompressionOptions,
|
||||
): Uint8Array;
|
||||
function deflateSync(data: Uint8Array | string | ArrayBuffer, options?: ZlibCompressionOptions): Uint8Array;
|
||||
/**
|
||||
* Compresses a chunk of data with `zlib` GZIP algorithm.
|
||||
* @param data The buffer of data to compress
|
||||
* @param options Compression options to use
|
||||
* @returns The output buffer with the compressed data
|
||||
*/
|
||||
function gzipSync(
|
||||
data: Uint8Array | string | ArrayBuffer,
|
||||
options?: ZlibCompressionOptions | LibdeflateCompressionOptions,
|
||||
): Uint8Array;
|
||||
function gzipSync(data: Uint8Array | string | ArrayBuffer, options?: ZlibCompressionOptions): Uint8Array;
|
||||
/**
|
||||
* Decompresses a chunk of data with `zlib` INFLATE algorithm.
|
||||
* @param data The buffer of data to decompress
|
||||
* @returns The output buffer with the decompressed data
|
||||
*/
|
||||
function inflateSync(
|
||||
data: Uint8Array | string | ArrayBuffer,
|
||||
options?: ZlibCompressionOptions | LibdeflateCompressionOptions,
|
||||
): Uint8Array;
|
||||
function inflateSync(data: Uint8Array | string | ArrayBuffer): Uint8Array;
|
||||
/**
|
||||
* Decompresses a chunk of data with `zlib` GUNZIP algorithm.
|
||||
* @param data The buffer of data to decompress
|
||||
* @returns The output buffer with the decompressed data
|
||||
*/
|
||||
function gunzipSync(
|
||||
data: Uint8Array | string | ArrayBuffer,
|
||||
options?: ZlibCompressionOptions | LibdeflateCompressionOptions,
|
||||
): Uint8Array;
|
||||
function gunzipSync(data: Uint8Array | string | ArrayBuffer): Uint8Array;
|
||||
|
||||
type Target =
|
||||
/**
|
||||
@@ -3857,7 +3828,7 @@ declare module "bun" {
|
||||
*/
|
||||
const isMainThread: boolean;
|
||||
|
||||
interface Socket<Data = undefined> extends Disposable {
|
||||
interface Socket<Data = undefined> {
|
||||
/**
|
||||
* Write `data` to the socket
|
||||
*
|
||||
@@ -4139,7 +4110,7 @@ declare module "bun" {
|
||||
setMaxSendFragment(size: number): boolean;
|
||||
}
|
||||
|
||||
interface SocketListener<Data = undefined> extends Disposable {
|
||||
interface SocketListener<Data = undefined> {
|
||||
stop(closeActiveConnections?: boolean): void;
|
||||
ref(): void;
|
||||
unref(): void;
|
||||
|
||||
@@ -26,10 +26,6 @@
|
||||
#include <stdlib.h>
|
||||
|
||||
#ifndef _WIN32
|
||||
// Necessary for the stdint include
|
||||
#ifndef _GNU_SOURCE
|
||||
#define _GNU_SOURCE
|
||||
#endif
|
||||
#include <sys/types.h>
|
||||
#include <sys/socket.h>
|
||||
#include <netinet/in.h>
|
||||
@@ -42,8 +38,8 @@
|
||||
#include <mstcpip.h>
|
||||
#endif
|
||||
|
||||
#if defined(__APPLE__)
|
||||
extern int Bun__doesMacOSVersionSupportSendRecvMsgX();
|
||||
#if defined(__APPLE__) && defined(__aarch64__)
|
||||
#define HAS_MSGX
|
||||
#endif
|
||||
|
||||
|
||||
@@ -77,30 +73,32 @@ int bsd_sendmmsg(LIBUS_SOCKET_DESCRIPTOR fd, struct udp_sendbuf* sendbuf, int fl
|
||||
}
|
||||
return sendbuf->num;
|
||||
#elif defined(__APPLE__)
|
||||
// sendmsg_x does not support addresses.
|
||||
if (!sendbuf->has_empty && !sendbuf->has_addresses && Bun__doesMacOSVersionSupportSendRecvMsgX()) {
|
||||
while (1) {
|
||||
int ret = sendmsg_x(fd, sendbuf->msgvec, sendbuf->num, flags);
|
||||
if (ret >= 0) return ret;
|
||||
// If we receive EMMSGSIZE, we should use the fallback code.
|
||||
if (errno == EMSGSIZE) break;
|
||||
if (errno != EINTR) return ret;
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t i = 0, count = sendbuf->num; i < count; i++) {
|
||||
while (1) {
|
||||
ssize_t ret = sendmsg(fd, &sendbuf->msgvec[i].msg_hdr, flags);
|
||||
if (ret < 0) {
|
||||
if (errno == EINTR) continue;
|
||||
if (errno == EAGAIN || errno == EWOULDBLOCK) return i;
|
||||
return ret;
|
||||
// TODO figure out why sendmsg_x fails when one of the messages is empty
|
||||
// so that we can get rid of this code.
|
||||
// One of the weird things is that once a non-empty message has been sent on the socket,
|
||||
// empty messages start working as well. Bizzare.
|
||||
#ifdef HAS_MSGX
|
||||
if (sendbuf->has_empty) {
|
||||
#endif
|
||||
for (int i = 0; i < sendbuf->num; i++) {
|
||||
while (1) {
|
||||
ssize_t ret = sendmsg(fd, &sendbuf->msgvec[i].msg_hdr, flags);
|
||||
if (ret < 0) {
|
||||
if (errno == EINTR) continue;
|
||||
if (errno == EAGAIN || errno == EWOULDBLOCK) return i;
|
||||
return ret;
|
||||
}
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
return sendbuf->num;
|
||||
#ifdef HAS_MSGX
|
||||
}
|
||||
|
||||
return sendbuf->num;
|
||||
while (1) {
|
||||
int ret = sendmsg_x(fd, sendbuf->msgvec, sendbuf->num, flags);
|
||||
if (ret >= 0 || errno != EINTR) return ret;
|
||||
}
|
||||
#endif
|
||||
#else
|
||||
while (1) {
|
||||
int ret = sendmmsg(fd, sendbuf->msgvec, sendbuf->num, flags | MSG_NOSIGNAL);
|
||||
@@ -122,13 +120,12 @@ int bsd_recvmmsg(LIBUS_SOCKET_DESCRIPTOR fd, struct udp_recvbuf *recvbuf, int fl
|
||||
return 1;
|
||||
}
|
||||
#elif defined(__APPLE__)
|
||||
if (Bun__doesMacOSVersionSupportSendRecvMsgX()) {
|
||||
while (1) {
|
||||
int ret = recvmsg_x(fd, recvbuf->msgvec, LIBUS_UDP_RECV_COUNT, flags);
|
||||
if (ret >= 0 || errno != EINTR) return ret;
|
||||
}
|
||||
#ifdef HAS_MSGX
|
||||
while (1) {
|
||||
int ret = recvmsg_x(fd, recvbuf->msgvec, LIBUS_UDP_RECV_COUNT, flags);
|
||||
if (ret >= 0 || errno != EINTR) return ret;
|
||||
}
|
||||
|
||||
#else
|
||||
for (int i = 0; i < LIBUS_UDP_RECV_COUNT; ++i) {
|
||||
while (1) {
|
||||
ssize_t ret = recvmsg(fd, &recvbuf->msgvec[i].msg_hdr, flags);
|
||||
@@ -142,6 +139,7 @@ int bsd_recvmmsg(LIBUS_SOCKET_DESCRIPTOR fd, struct udp_recvbuf *recvbuf, int fl
|
||||
}
|
||||
}
|
||||
return LIBUS_UDP_RECV_COUNT;
|
||||
#endif
|
||||
#else
|
||||
while (1) {
|
||||
int ret = recvmmsg(fd, (struct mmsghdr *)&recvbuf->msgvec, LIBUS_UDP_RECV_COUNT, flags, 0);
|
||||
@@ -156,20 +154,19 @@ void bsd_udp_setup_recvbuf(struct udp_recvbuf *recvbuf, void *databuf, size_t da
|
||||
recvbuf->buflen = databuflen;
|
||||
#else
|
||||
// assert(databuflen > LIBUS_UDP_MAX_SIZE * LIBUS_UDP_RECV_COUNT);
|
||||
memset(recvbuf, 0, sizeof(struct udp_recvbuf));
|
||||
for (size_t i = 0; i < LIBUS_UDP_RECV_COUNT; i++) {
|
||||
|
||||
for (int i = 0; i < LIBUS_UDP_RECV_COUNT; i++) {
|
||||
recvbuf->iov[i].iov_base = (char*)databuf + i * LIBUS_UDP_MAX_SIZE;
|
||||
recvbuf->iov[i].iov_len = LIBUS_UDP_MAX_SIZE;
|
||||
|
||||
struct msghdr mh = {};
|
||||
memset(&mh, 0, sizeof(struct msghdr));
|
||||
mh.msg_name = &recvbuf->addr[i];
|
||||
mh.msg_namelen = sizeof(struct sockaddr_storage);
|
||||
mh.msg_iov = &recvbuf->iov[i];
|
||||
mh.msg_iovlen = 1;
|
||||
mh.msg_control = recvbuf->control[i];
|
||||
mh.msg_controllen = sizeof(recvbuf->control[i]);
|
||||
recvbuf->msgvec[i].msg_hdr = mh;
|
||||
recvbuf->msgvec[i].msg_hdr.msg_name = &recvbuf->addr[i];
|
||||
recvbuf->msgvec[i].msg_hdr.msg_namelen = sizeof(struct sockaddr_storage);
|
||||
|
||||
recvbuf->msgvec[i].msg_hdr.msg_iov = &recvbuf->iov[i];
|
||||
recvbuf->msgvec[i].msg_hdr.msg_iovlen = 1;
|
||||
|
||||
recvbuf->msgvec[i].msg_hdr.msg_control = recvbuf->control[i];
|
||||
recvbuf->msgvec[i].msg_hdr.msg_controllen = 256;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
@@ -182,12 +179,7 @@ int bsd_udp_setup_sendbuf(struct udp_sendbuf *buf, size_t bufsize, void** payloa
|
||||
buf->num = num;
|
||||
return num;
|
||||
#else
|
||||
// TODO: can we skip empty messages altogether? Do we really need to send 0-length messages?
|
||||
buf->has_empty = 0;
|
||||
|
||||
// sendmsg_x docs states it does not support addresses.
|
||||
buf->has_addresses = 0;
|
||||
|
||||
struct mmsghdr *msgvec = buf->msgvec;
|
||||
// todo check this math
|
||||
size_t count = (bufsize - sizeof(struct udp_sendbuf)) / (sizeof(struct mmsghdr) + sizeof(struct iovec));
|
||||
@@ -202,9 +194,6 @@ int bsd_udp_setup_sendbuf(struct udp_sendbuf *buf, size_t bufsize, void** payloa
|
||||
addr_len = addr->sa_family == AF_INET ? sizeof(struct sockaddr_in)
|
||||
: addr->sa_family == AF_INET6 ? sizeof(struct sockaddr_in6)
|
||||
: 0;
|
||||
if (addr_len > 0) {
|
||||
buf->has_addresses = 1;
|
||||
}
|
||||
}
|
||||
iov[i].iov_base = payloads[i];
|
||||
iov[i].iov_len = lengths[i];
|
||||
@@ -217,7 +206,6 @@ int bsd_udp_setup_sendbuf(struct udp_sendbuf *buf, size_t bufsize, void** payloa
|
||||
msgvec[i].msg_hdr.msg_flags = 0;
|
||||
msgvec[i].msg_len = 0;
|
||||
|
||||
|
||||
if (lengths[i] == 0) {
|
||||
buf->has_empty = 1;
|
||||
}
|
||||
|
||||
@@ -15,17 +15,18 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "internal/internal.h"
|
||||
#include "libusockets.h"
|
||||
#include <errno.h>
|
||||
#include "internal/internal.h"
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <errno.h>
|
||||
|
||||
#ifndef _WIN32
|
||||
#include <arpa/inet.h>
|
||||
#endif
|
||||
|
||||
#define CONCURRENT_CONNECTIONS 2
|
||||
// clang-format off
|
||||
|
||||
int default_is_low_prio_handler(struct us_socket_t *s) {
|
||||
return 0;
|
||||
}
|
||||
@@ -43,7 +44,7 @@ int us_raw_root_certs(struct us_cert_string_t**out){
|
||||
void us_listen_socket_close(int ssl, struct us_listen_socket_t *ls) {
|
||||
/* us_listen_socket_t extends us_socket_t so we close in similar ways */
|
||||
if (!us_socket_is_closed(0, &ls->s)) {
|
||||
us_internal_socket_context_unlink_listen_socket(ssl, ls->s.context, ls);
|
||||
us_internal_socket_context_unlink_listen_socket(ls->s.context, ls);
|
||||
us_poll_stop((struct us_poll_t *) &ls->s, ls->s.context->loop);
|
||||
bsd_close_socket(us_poll_fd((struct us_poll_t *) &ls->s));
|
||||
|
||||
@@ -64,7 +65,6 @@ void us_socket_context_close(int ssl, struct us_socket_context_t *context) {
|
||||
while (ls) {
|
||||
struct us_listen_socket_t *nextLS = (struct us_listen_socket_t *) ls->s.next;
|
||||
us_listen_socket_close(ssl, ls);
|
||||
|
||||
ls = nextLS;
|
||||
}
|
||||
|
||||
@@ -72,12 +72,12 @@ void us_socket_context_close(int ssl, struct us_socket_context_t *context) {
|
||||
struct us_socket_t *s = context->head_sockets;
|
||||
while (s) {
|
||||
struct us_socket_t *nextS = s->next;
|
||||
us_socket_close(ssl, s, LIBUS_SOCKET_CLOSE_CODE_CLEAN_SHUTDOWN, 0);
|
||||
us_socket_close(ssl, s, 0, 0);
|
||||
s = nextS;
|
||||
}
|
||||
}
|
||||
|
||||
void us_internal_socket_context_unlink_listen_socket(int ssl, struct us_socket_context_t *context, struct us_listen_socket_t *ls) {
|
||||
void us_internal_socket_context_unlink_listen_socket(struct us_socket_context_t *context, struct us_listen_socket_t *ls) {
|
||||
/* We have to properly update the iterator used to sweep sockets for timeouts */
|
||||
if (ls == (struct us_listen_socket_t *) context->iterator) {
|
||||
context->iterator = ls->s.next;
|
||||
@@ -95,10 +95,9 @@ void us_internal_socket_context_unlink_listen_socket(int ssl, struct us_socket_c
|
||||
ls->s.next->prev = ls->s.prev;
|
||||
}
|
||||
}
|
||||
us_socket_context_unref(ssl, context);
|
||||
}
|
||||
|
||||
void us_internal_socket_context_unlink_socket(int ssl, struct us_socket_context_t *context, struct us_socket_t *s) {
|
||||
void us_internal_socket_context_unlink_socket(struct us_socket_context_t *context, struct us_socket_t *s) {
|
||||
/* We have to properly update the iterator used to sweep sockets for timeouts */
|
||||
if (s == context->iterator) {
|
||||
context->iterator = s->next;
|
||||
@@ -116,7 +115,6 @@ void us_internal_socket_context_unlink_socket(int ssl, struct us_socket_context_
|
||||
s->next->prev = s->prev;
|
||||
}
|
||||
}
|
||||
us_socket_context_unref(ssl, context);
|
||||
}
|
||||
|
||||
/* We always add in the top, so we don't modify any s.next */
|
||||
@@ -128,7 +126,6 @@ void us_internal_socket_context_link_listen_socket(struct us_socket_context_t *c
|
||||
context->head_listen_sockets->s.prev = &ls->s;
|
||||
}
|
||||
context->head_listen_sockets = ls;
|
||||
context->ref_count++;
|
||||
}
|
||||
|
||||
/* We always add in the top, so we don't modify any s.next */
|
||||
@@ -140,7 +137,6 @@ void us_internal_socket_context_link_socket(struct us_socket_context_t *context,
|
||||
context->head_sockets->prev = s;
|
||||
}
|
||||
context->head_sockets = s;
|
||||
context->ref_count++;
|
||||
}
|
||||
|
||||
struct us_loop_t *us_socket_context_loop(int ssl, struct us_socket_context_t *context) {
|
||||
@@ -235,7 +231,6 @@ struct us_socket_context_t *us_create_socket_context(int ssl, struct us_loop_t *
|
||||
struct us_socket_context_t *context = us_calloc(1, sizeof(struct us_socket_context_t) + context_ext_size);
|
||||
context->loop = loop;
|
||||
context->is_low_prio = default_is_low_prio_handler;
|
||||
context->ref_count = 1;
|
||||
|
||||
us_internal_loop_link(loop, context);
|
||||
|
||||
@@ -257,7 +252,6 @@ struct us_socket_context_t *us_create_bun_socket_context(int ssl, struct us_loop
|
||||
struct us_socket_context_t *context = us_calloc(1, sizeof(struct us_socket_context_t) + context_ext_size);
|
||||
context->loop = loop;
|
||||
context->is_low_prio = default_is_low_prio_handler;
|
||||
context->ref_count = 1;
|
||||
|
||||
us_internal_loop_link(loop, context);
|
||||
|
||||
@@ -278,8 +272,7 @@ struct us_bun_verify_error_t us_socket_verify_error(int ssl, struct us_socket_t
|
||||
}
|
||||
|
||||
|
||||
|
||||
void us_internal_socket_context_free(int ssl, struct us_socket_context_t *context) {
|
||||
void us_socket_context_free(int ssl, struct us_socket_context_t *context) {
|
||||
#ifndef LIBUS_NO_SSL
|
||||
if (ssl) {
|
||||
/* This function will call us again with SSL=false */
|
||||
@@ -292,23 +285,7 @@ void us_internal_socket_context_free(int ssl, struct us_socket_context_t *contex
|
||||
* This is the opposite order compared to when creating the context - SSL code is cleaning up before non-SSL */
|
||||
|
||||
us_internal_loop_unlink(context->loop, context);
|
||||
/* Link this context to the close-list and let it be deleted after this iteration */
|
||||
context->next = context->loop->data.closed_context_head;
|
||||
context->loop->data.closed_context_head = context;
|
||||
}
|
||||
|
||||
void us_socket_context_ref(int ssl, struct us_socket_context_t *context) {
|
||||
context->ref_count++;
|
||||
}
|
||||
|
||||
void us_socket_context_unref(int ssl, struct us_socket_context_t *context) {
|
||||
if (--context->ref_count == 0) {
|
||||
us_internal_socket_context_free(ssl, context);
|
||||
}
|
||||
}
|
||||
|
||||
void us_socket_context_free(int ssl, struct us_socket_context_t *context) {
|
||||
us_socket_context_unref(ssl, context);
|
||||
us_free(context);
|
||||
}
|
||||
|
||||
struct us_listen_socket_t *us_socket_context_listen(int ssl, struct us_socket_context_t *context, const char *host, int port, int options, int socket_ext_size) {
|
||||
@@ -726,15 +703,13 @@ struct us_socket_t *us_socket_context_adopt_socket(int ssl, struct us_socket_con
|
||||
#endif
|
||||
|
||||
/* Cannot adopt a closed socket */
|
||||
if (us_socket_is_closed(ssl, s) || us_socket_is_shut_down(ssl, s)) {
|
||||
if (us_socket_is_closed(ssl, s)) {
|
||||
return s;
|
||||
}
|
||||
|
||||
if (s->low_prio_state != 1) {
|
||||
/* We need to be sure that we still holding a reference*/
|
||||
us_socket_context_ref(ssl, context);
|
||||
/* This properly updates the iterator if in on_timeout */
|
||||
us_internal_socket_context_unlink_socket(ssl, s->context, s);
|
||||
us_internal_socket_context_unlink_socket(s->context, s);
|
||||
}
|
||||
|
||||
|
||||
@@ -759,7 +734,6 @@ struct us_socket_t *us_socket_context_adopt_socket(int ssl, struct us_socket_con
|
||||
if (new_s->next) new_s->next->prev = new_s;
|
||||
} else {
|
||||
us_internal_socket_context_link_socket(context, new_s);
|
||||
us_socket_context_unref(ssl, context);
|
||||
}
|
||||
|
||||
return new_s;
|
||||
|
||||
@@ -14,14 +14,8 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
// clang-format off
|
||||
|
||||
#if (defined(LIBUS_USE_OPENSSL) || defined(LIBUS_USE_WOLFSSL))
|
||||
|
||||
|
||||
#include "internal/internal.h"
|
||||
#include "libusockets.h"
|
||||
#include <string.h>
|
||||
|
||||
/* These are in sni_tree.cpp */
|
||||
void *sni_new();
|
||||
void sni_free(void *sni, void (*cb)(void *));
|
||||
@@ -29,6 +23,10 @@ int sni_add(void *sni, const char *hostname, void *user);
|
||||
void *sni_remove(void *sni, const char *hostname);
|
||||
void *sni_find(void *sni, const char *hostname);
|
||||
|
||||
#include "internal/internal.h"
|
||||
#include "libusockets.h"
|
||||
#include <string.h>
|
||||
|
||||
/* This module contains the entire OpenSSL implementation
|
||||
* of the SSL socket and socket context interfaces. */
|
||||
#ifdef LIBUS_USE_OPENSSL
|
||||
@@ -73,6 +71,10 @@ struct us_internal_ssl_socket_context_t {
|
||||
// socket context
|
||||
SSL_CTX *ssl_context;
|
||||
int is_parent;
|
||||
#if ALLOW_SERVER_RENEGOTIATION
|
||||
unsigned int client_renegotiation_limit;
|
||||
unsigned int client_renegotiation_window;
|
||||
#endif
|
||||
/* These decorate the base implementation */
|
||||
struct us_internal_ssl_socket_t *(*on_open)(struct us_internal_ssl_socket_t *,
|
||||
int is_client, char *ip,
|
||||
@@ -84,10 +86,6 @@ struct us_internal_ssl_socket_context_t {
|
||||
struct us_internal_ssl_socket_t *(*on_close)(
|
||||
struct us_internal_ssl_socket_t *, int code, void *reason);
|
||||
|
||||
struct us_internal_ssl_socket_t *(*on_timeout)(
|
||||
struct us_internal_ssl_socket_t *);
|
||||
struct us_internal_ssl_socket_t *(*on_long_timeout)(struct us_internal_ssl_socket_t *);
|
||||
|
||||
/* Called for missing SNI hostnames, if not NULL */
|
||||
void (*on_server_name)(struct us_internal_ssl_socket_context_t *,
|
||||
const char *hostname);
|
||||
@@ -110,10 +108,15 @@ enum {
|
||||
struct us_internal_ssl_socket_t {
|
||||
struct us_socket_t s;
|
||||
SSL *ssl; // this _must_ be the first member after s
|
||||
#if ALLOW_SERVER_RENEGOTIATION
|
||||
unsigned int client_pending_renegotiations;
|
||||
uint64_t last_ssl_renegotiation;
|
||||
unsigned int is_client : 1;
|
||||
#endif
|
||||
unsigned int ssl_write_wants_read : 1; // we use this for now
|
||||
unsigned int ssl_read_wants_write : 1;
|
||||
unsigned int handshake_state : 2;
|
||||
unsigned int fatal_error : 1;
|
||||
unsigned int received_ssl_shutdown : 1;
|
||||
};
|
||||
|
||||
int passphrase_cb(char *buf, int size, int rwflag, void *u) {
|
||||
@@ -179,26 +182,6 @@ int BIO_s_custom_read(BIO *bio, char *dst, int length) {
|
||||
return length;
|
||||
}
|
||||
|
||||
|
||||
struct loop_ssl_data * us_internal_set_loop_ssl_data(struct us_internal_ssl_socket_t *s) {
|
||||
// note: this context can change when we adopt the socket!
|
||||
struct us_internal_ssl_socket_context_t *context =
|
||||
(struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s);
|
||||
|
||||
struct us_loop_t *loop = us_socket_context_loop(0, &context->sc);
|
||||
struct loop_ssl_data *loop_ssl_data =
|
||||
(struct loop_ssl_data *)loop->data.ssl_data;
|
||||
|
||||
// note: if we put data here we should never really clear it (not in write
|
||||
// either, it still should be available for SSL_write to read from!)
|
||||
|
||||
loop_ssl_data->ssl_read_input_length = 0;
|
||||
loop_ssl_data->ssl_read_input_offset = 0;
|
||||
loop_ssl_data->ssl_socket = &s->s;
|
||||
loop_ssl_data->msg_more = 0;
|
||||
return loop_ssl_data;
|
||||
}
|
||||
|
||||
struct us_internal_ssl_socket_t *ssl_on_open(struct us_internal_ssl_socket_t *s,
|
||||
int is_client, char *ip,
|
||||
int ip_length) {
|
||||
@@ -206,14 +189,21 @@ struct us_internal_ssl_socket_t *ssl_on_open(struct us_internal_ssl_socket_t *s,
|
||||
struct us_internal_ssl_socket_context_t *context =
|
||||
(struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s);
|
||||
|
||||
struct loop_ssl_data *loop_ssl_data = us_internal_set_loop_ssl_data(s);
|
||||
struct us_loop_t *loop = us_socket_context_loop(0, &context->sc);
|
||||
struct loop_ssl_data *loop_ssl_data =
|
||||
(struct loop_ssl_data *)loop->data.ssl_data;
|
||||
|
||||
s->ssl = SSL_new(context->ssl_context);
|
||||
#if ALLOW_SERVER_RENEGOTIATION
|
||||
s->client_pending_renegotiations = context->client_renegotiation_limit;
|
||||
s->last_ssl_renegotiation = 0;
|
||||
s->is_client = is_client ? 1 : 0;
|
||||
|
||||
#endif
|
||||
s->ssl_write_wants_read = 0;
|
||||
s->ssl_read_wants_write = 0;
|
||||
s->fatal_error = 0;
|
||||
s->handshake_state = HANDSHAKE_PENDING;
|
||||
|
||||
s->received_ssl_shutdown = 0;
|
||||
|
||||
SSL_set_bio(s->ssl, loop_ssl_data->shared_rbio, loop_ssl_data->shared_wbio);
|
||||
// if we allow renegotiation, we need to set the mode here
|
||||
@@ -223,18 +213,24 @@ struct us_internal_ssl_socket_t *ssl_on_open(struct us_internal_ssl_socket_t *s,
|
||||
// this can be a DoS vector for servers, so we enable it using a limit
|
||||
// we do not use ssl_renegotiate_freely, since ssl_renegotiate_explicit is
|
||||
// more performant when using BoringSSL
|
||||
|
||||
#if ALLOW_SERVER_RENEGOTIATION
|
||||
if (context->client_renegotiation_limit) {
|
||||
SSL_set_renegotiate_mode(s->ssl, ssl_renegotiate_explicit);
|
||||
} else {
|
||||
SSL_set_renegotiate_mode(s->ssl, ssl_renegotiate_never);
|
||||
}
|
||||
#endif
|
||||
|
||||
BIO_up_ref(loop_ssl_data->shared_rbio);
|
||||
BIO_up_ref(loop_ssl_data->shared_wbio);
|
||||
|
||||
if (is_client) {
|
||||
#if ALLOW_SERVER_RENEGOTIATION == 0
|
||||
SSL_set_renegotiate_mode(s->ssl, ssl_renegotiate_explicit);
|
||||
#endif
|
||||
SSL_set_connect_state(s->ssl);
|
||||
} else {
|
||||
SSL_set_accept_state(s->ssl);
|
||||
// we do not allow renegotiation on the server side (should be the default for BoringSSL, but we set to make openssl compatible)
|
||||
SSL_set_renegotiate_mode(s->ssl, ssl_renegotiate_never);
|
||||
}
|
||||
|
||||
struct us_internal_ssl_socket_t *result =
|
||||
@@ -250,43 +246,6 @@ struct us_internal_ssl_socket_t *ssl_on_open(struct us_internal_ssl_socket_t *s,
|
||||
return result;
|
||||
}
|
||||
|
||||
/// @brief Complete the shutdown or do a fast shutdown when needed, this should only be called before closing the socket
|
||||
/// @param s
|
||||
int us_internal_handle_shutdown(struct us_internal_ssl_socket_t *s, int force_fast_shutdown) {
|
||||
// if we are already shutdown or in the middle of a handshake we dont need to do anything
|
||||
if(us_internal_ssl_socket_is_shut_down(s) || s->fatal_error) return 1;
|
||||
|
||||
|
||||
// we are closing the socket but did not sent a shutdown yet
|
||||
int state = SSL_get_shutdown(s->ssl);
|
||||
int sent_shutdown = state & SSL_SENT_SHUTDOWN;
|
||||
int received_shutdown = state & SSL_RECEIVED_SHUTDOWN;
|
||||
// if we are missing a shutdown call, we need to do a fast shutdown here
|
||||
if(!sent_shutdown || !received_shutdown) {
|
||||
// make sure that the ssl loop data is set
|
||||
us_internal_set_loop_ssl_data(s);
|
||||
// Zero means that we should wait for the peer to close the connection
|
||||
// but we are already closing the connection so we do a fast shutdown here
|
||||
int ret = SSL_shutdown(s->ssl);
|
||||
if(ret == 0 && force_fast_shutdown) {
|
||||
// do a fast shutdown (dont wait for peer)
|
||||
ret = SSL_shutdown(s->ssl);
|
||||
}
|
||||
if(ret < 0) {
|
||||
// we got some error here, but we dont care about it, we are closing the socket
|
||||
int err = SSL_get_error(s->ssl, ret);
|
||||
if (err == SSL_ERROR_SSL || err == SSL_ERROR_SYSCALL) {
|
||||
// clear
|
||||
ERR_clear_error();
|
||||
s->fatal_error = 1;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return ret == 1;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
void us_internal_on_ssl_handshake(
|
||||
struct us_internal_ssl_socket_context_t *context,
|
||||
void (*on_handshake)(struct us_internal_ssl_socket_t *, int success,
|
||||
@@ -297,17 +256,9 @@ void us_internal_on_ssl_handshake(
|
||||
context->handshake_data = custom_data;
|
||||
}
|
||||
|
||||
int us_internal_ssl_socket_is_closed(struct us_internal_ssl_socket_t *s) {
|
||||
return us_socket_is_closed(0, &s->s);
|
||||
}
|
||||
|
||||
struct us_internal_ssl_socket_t *
|
||||
us_internal_ssl_socket_close(struct us_internal_ssl_socket_t *s, int code,
|
||||
void *reason) {
|
||||
|
||||
// check if we are already closed
|
||||
if (us_internal_ssl_socket_is_closed(s)) return s;
|
||||
|
||||
if (s->handshake_state != HANDSHAKE_COMPLETED) {
|
||||
// if we have some pending handshake we cancel it and try to check the
|
||||
// latest handshake error this way we will always call on_handshake with the
|
||||
@@ -318,14 +269,8 @@ us_internal_ssl_socket_close(struct us_internal_ssl_socket_t *s, int code,
|
||||
us_internal_trigger_handshake_callback(s, 0);
|
||||
}
|
||||
|
||||
// if we are in the middle of a close_notify we need to finish it (code != 0 forces a fast shutdown)
|
||||
int can_close = s->ssl && us_internal_handle_shutdown(s, code != 0);
|
||||
|
||||
// only close the socket if we are not in the middle of a handshake
|
||||
if(can_close) {
|
||||
return (struct us_internal_ssl_socket_t *)us_socket_close(0, (struct us_socket_t *)s, code, reason);
|
||||
}
|
||||
return s;
|
||||
return (struct us_internal_ssl_socket_t *)us_socket_close(
|
||||
0, (struct us_socket_t *)s, code, reason);
|
||||
}
|
||||
|
||||
void us_internal_trigger_handshake_callback(struct us_internal_ssl_socket_t *s,
|
||||
@@ -347,7 +292,26 @@ int us_internal_ssl_renegotiate(struct us_internal_ssl_socket_t *s) {
|
||||
// if is a server and we have no pending renegotiation we can check
|
||||
// the limits
|
||||
s->handshake_state = HANDSHAKE_RENEGOTIATION_PENDING;
|
||||
|
||||
#if ALLOW_SERVER_RENEGOTIATION
|
||||
if (!s->is_client && !SSL_renegotiate_pending(s->ssl)) {
|
||||
uint64_t now = time(NULL);
|
||||
struct us_internal_ssl_socket_context_t *context =
|
||||
(struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s);
|
||||
// if is not the first time we negotiate and we are outside the time
|
||||
// window, reset the limits
|
||||
if (s->last_ssl_renegotiation && (now - s->last_ssl_renegotiation) >=
|
||||
context->client_renegotiation_window) {
|
||||
// reset the limits
|
||||
s->client_pending_renegotiations = context->client_renegotiation_limit;
|
||||
}
|
||||
// if we have no more renegotiations, we should close the connection
|
||||
if (s->client_pending_renegotiations == 0) {
|
||||
return 0;
|
||||
}
|
||||
s->last_ssl_renegotiation = now;
|
||||
s->client_pending_renegotiations--;
|
||||
}
|
||||
#endif
|
||||
if (!SSL_renegotiate(s->ssl)) {
|
||||
// we failed to renegotiate
|
||||
us_internal_trigger_handshake_callback(s, 0);
|
||||
@@ -357,13 +321,24 @@ int us_internal_ssl_renegotiate(struct us_internal_ssl_socket_t *s) {
|
||||
}
|
||||
|
||||
void us_internal_update_handshake(struct us_internal_ssl_socket_t *s) {
|
||||
struct us_internal_ssl_socket_context_t *context =
|
||||
(struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s);
|
||||
|
||||
// nothing todo here, renegotiation must be handled in SSL_read
|
||||
if (s->handshake_state != HANDSHAKE_PENDING)
|
||||
return;
|
||||
|
||||
if (us_internal_ssl_socket_is_closed(s) || us_internal_ssl_socket_is_shut_down(s) ||
|
||||
(s->ssl && SSL_get_shutdown(s->ssl) & SSL_RECEIVED_SHUTDOWN)) {
|
||||
|
||||
struct us_loop_t *loop = us_socket_context_loop(0, &context->sc);
|
||||
struct loop_ssl_data *loop_ssl_data =
|
||||
(struct loop_ssl_data *)loop->data.ssl_data;
|
||||
|
||||
loop_ssl_data->ssl_read_input_length = 0;
|
||||
loop_ssl_data->ssl_read_input_offset = 0;
|
||||
loop_ssl_data->ssl_socket = &s->s;
|
||||
loop_ssl_data->msg_more = 0;
|
||||
|
||||
if (us_socket_is_closed(0, &s->s) || us_internal_ssl_socket_is_shut_down(s) ||
|
||||
SSL_get_shutdown(s->ssl) & SSL_RECEIVED_SHUTDOWN) {
|
||||
|
||||
us_internal_trigger_handshake_callback(s, 0);
|
||||
return;
|
||||
@@ -372,6 +347,7 @@ void us_internal_update_handshake(struct us_internal_ssl_socket_t *s) {
|
||||
int result = SSL_do_handshake(s->ssl);
|
||||
|
||||
if (SSL_get_shutdown(s->ssl) & SSL_RECEIVED_SHUTDOWN) {
|
||||
s->received_ssl_shutdown = 1;
|
||||
us_internal_ssl_socket_close(s, 0, NULL);
|
||||
return;
|
||||
}
|
||||
@@ -380,23 +356,30 @@ void us_internal_update_handshake(struct us_internal_ssl_socket_t *s) {
|
||||
int err = SSL_get_error(s->ssl, result);
|
||||
// as far as I know these are the only errors we want to handle
|
||||
if (err != SSL_ERROR_WANT_READ && err != SSL_ERROR_WANT_WRITE) {
|
||||
us_internal_trigger_handshake_callback(s, 1);
|
||||
|
||||
// clear per thread error queue if it may contain something
|
||||
if (err == SSL_ERROR_SSL || err == SSL_ERROR_SYSCALL) {
|
||||
ERR_clear_error();
|
||||
s->fatal_error = 1;
|
||||
}
|
||||
us_internal_trigger_handshake_callback(s, 0);
|
||||
|
||||
return;
|
||||
}
|
||||
s->handshake_state = HANDSHAKE_PENDING;
|
||||
s->ssl_write_wants_read = 1;
|
||||
// Ensure that we'll cycle through internal openssl's state
|
||||
if (!us_socket_is_closed(0, &s->s) &&
|
||||
!us_internal_ssl_socket_is_shut_down(s)) {
|
||||
us_socket_write(1, loop_ssl_data->ssl_socket, "\0", 0, 0);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
// success
|
||||
us_internal_trigger_handshake_callback(s, 1);
|
||||
s->ssl_write_wants_read = 1;
|
||||
// Ensure that we'll cycle through internal openssl's state
|
||||
if (!us_socket_is_closed(0, &s->s) &&
|
||||
!us_internal_ssl_socket_is_shut_down(s)) {
|
||||
us_socket_write(1, loop_ssl_data->ssl_socket, "\0", 0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
struct us_internal_ssl_socket_t *
|
||||
@@ -404,33 +387,16 @@ ssl_on_close(struct us_internal_ssl_socket_t *s, int code, void *reason) {
|
||||
struct us_internal_ssl_socket_context_t *context =
|
||||
(struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s);
|
||||
|
||||
us_internal_set_loop_ssl_data(s);
|
||||
struct us_internal_ssl_socket_t * ret = context->on_close(s, code, reason);
|
||||
SSL_free(s->ssl); // free SSL after on_close
|
||||
s->ssl = NULL; // set to NULL
|
||||
return ret;
|
||||
}
|
||||
SSL_free(s->ssl);
|
||||
|
||||
struct us_internal_ssl_socket_t * ssl_on_timeout(struct us_internal_ssl_socket_t *s) {
|
||||
struct us_internal_ssl_socket_context_t *context =
|
||||
(struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s);
|
||||
|
||||
us_internal_set_loop_ssl_data(s);
|
||||
return context->on_timeout(s);
|
||||
}
|
||||
|
||||
struct us_internal_ssl_socket_t * ssl_on_long_timeout(struct us_internal_ssl_socket_t *s) {
|
||||
struct us_internal_ssl_socket_context_t *context =
|
||||
(struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s);
|
||||
|
||||
us_internal_set_loop_ssl_data(s);
|
||||
return context->on_long_timeout(s);
|
||||
return context->on_close(s, code, reason);
|
||||
}
|
||||
|
||||
struct us_internal_ssl_socket_t *
|
||||
ssl_on_end(struct us_internal_ssl_socket_t *s) {
|
||||
us_internal_set_loop_ssl_data(s);
|
||||
// whatever state we are in, a TCP FIN is always an answered shutdown
|
||||
|
||||
/* Todo: this should report CLEANLY SHUTDOWN as reason */
|
||||
return us_internal_ssl_socket_close(s, 0, NULL);
|
||||
}
|
||||
|
||||
@@ -442,20 +408,43 @@ struct us_internal_ssl_socket_t *ssl_on_data(struct us_internal_ssl_socket_t *s,
|
||||
struct us_internal_ssl_socket_context_t *context =
|
||||
(struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s);
|
||||
|
||||
struct loop_ssl_data *loop_ssl_data = us_internal_set_loop_ssl_data(s);
|
||||
struct us_loop_t *loop = us_socket_context_loop(0, &context->sc);
|
||||
struct loop_ssl_data *loop_ssl_data =
|
||||
(struct loop_ssl_data *)loop->data.ssl_data;
|
||||
|
||||
// note: if we put data here we should never really clear it (not in write
|
||||
// either, it still should be available for SSL_write to read from!)
|
||||
loop_ssl_data->ssl_read_input = data;
|
||||
loop_ssl_data->ssl_read_input_length = length;
|
||||
loop_ssl_data->ssl_read_input_offset = 0;
|
||||
loop_ssl_data->ssl_socket = &s->s;
|
||||
loop_ssl_data->msg_more = 0;
|
||||
|
||||
if (us_internal_ssl_socket_is_closed(s)) {
|
||||
if (us_socket_is_closed(0, &s->s) || s->received_ssl_shutdown) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (us_internal_ssl_socket_is_shut_down(s)) {
|
||||
us_internal_ssl_socket_close(s, 0, NULL);
|
||||
return NULL;
|
||||
|
||||
int ret = 0;
|
||||
if ((ret = SSL_shutdown(s->ssl)) == 1) {
|
||||
// two phase shutdown is complete here
|
||||
|
||||
/* Todo: this should also report some kind of clean shutdown */
|
||||
return us_internal_ssl_socket_close(s, 0, NULL);
|
||||
} else if (ret < 0) {
|
||||
|
||||
int err = SSL_get_error(s->ssl, ret);
|
||||
|
||||
if (err == SSL_ERROR_SSL || err == SSL_ERROR_SYSCALL) {
|
||||
// we need to clear the error queue in case these added to the thread
|
||||
// local queue
|
||||
ERR_clear_error();
|
||||
}
|
||||
}
|
||||
|
||||
// no further processing of data when in shutdown state
|
||||
return s;
|
||||
}
|
||||
|
||||
// bug checking: this loop needs a lot of attention and clean-ups and
|
||||
@@ -463,12 +452,17 @@ struct us_internal_ssl_socket_t *ssl_on_data(struct us_internal_ssl_socket_t *s,
|
||||
int read = 0;
|
||||
restart:
|
||||
// read until shutdown
|
||||
while (1) {
|
||||
while (!s->received_ssl_shutdown) {
|
||||
int just_read = SSL_read(s->ssl,
|
||||
loop_ssl_data->ssl_read_output +
|
||||
LIBUS_RECV_BUFFER_PADDING + read,
|
||||
LIBUS_RECV_BUFFER_LENGTH - read);
|
||||
|
||||
// we need to check if we received a shutdown here
|
||||
if (SSL_get_shutdown(s->ssl) & SSL_RECEIVED_SHUTDOWN) {
|
||||
s->received_ssl_shutdown = 1;
|
||||
// we will only close after we handle the data and errors
|
||||
}
|
||||
|
||||
if (just_read <= 0) {
|
||||
int err = SSL_get_error(s->ssl, just_read);
|
||||
// as far as I know these are the only errors we want to handle
|
||||
@@ -483,9 +477,8 @@ restart:
|
||||
// clean and close renegotiation failed
|
||||
err = SSL_ERROR_SSL;
|
||||
} else if (err == SSL_ERROR_ZERO_RETURN) {
|
||||
// Remotely-Initiated Shutdown
|
||||
// See: https://www.openssl.org/docs/manmaster/man3/SSL_shutdown.html
|
||||
|
||||
// zero return can be EOF/FIN, if we have data just signal on_data and
|
||||
// close
|
||||
if (read) {
|
||||
context =
|
||||
(struct us_internal_ssl_socket_context_t *)us_socket_context(
|
||||
@@ -494,24 +487,21 @@ restart:
|
||||
s = context->on_data(
|
||||
s, loop_ssl_data->ssl_read_output + LIBUS_RECV_BUFFER_PADDING,
|
||||
read);
|
||||
if (!s || us_internal_ssl_socket_is_closed(s)) {
|
||||
return NULL; // stop processing data
|
||||
if (!s || us_socket_is_closed(0, &s->s)) {
|
||||
return s;
|
||||
}
|
||||
}
|
||||
// terminate connection here
|
||||
us_internal_ssl_socket_close(s, 0, NULL);
|
||||
return NULL; // stop processing data
|
||||
return us_internal_ssl_socket_close(s, 0, NULL);
|
||||
}
|
||||
|
||||
if (err == SSL_ERROR_SSL || err == SSL_ERROR_SYSCALL) {
|
||||
// clear per thread error queue if it may contain something
|
||||
ERR_clear_error();
|
||||
s->fatal_error = 1;
|
||||
}
|
||||
|
||||
// terminate connection here
|
||||
us_internal_ssl_socket_close(s, 0, NULL);
|
||||
return NULL; // stop processing data
|
||||
return us_internal_ssl_socket_close(s, 0, NULL);
|
||||
} else {
|
||||
// emit the data we have and exit
|
||||
|
||||
@@ -536,8 +526,8 @@ restart:
|
||||
s = context->on_data(
|
||||
s, loop_ssl_data->ssl_read_output + LIBUS_RECV_BUFFER_PADDING,
|
||||
read);
|
||||
if (!s || us_internal_ssl_socket_is_closed(s)) {
|
||||
return NULL; // stop processing data
|
||||
if (!s || us_socket_is_closed(0, &s->s)) {
|
||||
return s;
|
||||
}
|
||||
|
||||
break;
|
||||
@@ -559,14 +549,20 @@ restart:
|
||||
// emit data and restart
|
||||
s = context->on_data(
|
||||
s, loop_ssl_data->ssl_read_output + LIBUS_RECV_BUFFER_PADDING, read);
|
||||
if (!s || us_internal_ssl_socket_is_closed(s)) {
|
||||
return NULL;
|
||||
if (!s || us_socket_is_closed(0, &s->s)) {
|
||||
return s;
|
||||
}
|
||||
|
||||
read = 0;
|
||||
goto restart;
|
||||
}
|
||||
}
|
||||
|
||||
// we received the shutdown after reading so we close
|
||||
if (s->received_ssl_shutdown) {
|
||||
us_internal_ssl_socket_close(s, 0, NULL);
|
||||
return NULL;
|
||||
}
|
||||
// trigger writable if we failed last write with want read
|
||||
if (s->ssl_write_wants_read) {
|
||||
s->ssl_write_wants_read = 0;
|
||||
@@ -579,8 +575,8 @@ restart:
|
||||
s = (struct us_internal_ssl_socket_t *)context->sc.on_writable(
|
||||
&s->s); // cast here!
|
||||
// if we are closed here, then exit
|
||||
if (!s || us_internal_ssl_socket_is_closed(s)) {
|
||||
return NULL;
|
||||
if (!s || us_socket_is_closed(0, &s->s)) {
|
||||
return s;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -589,7 +585,6 @@ restart:
|
||||
|
||||
struct us_internal_ssl_socket_t *
|
||||
ssl_on_writable(struct us_internal_ssl_socket_t *s) {
|
||||
us_internal_set_loop_ssl_data(s);
|
||||
us_internal_update_handshake(s);
|
||||
|
||||
struct us_internal_ssl_socket_context_t *context =
|
||||
@@ -611,8 +606,8 @@ ssl_on_writable(struct us_internal_ssl_socket_t *s) {
|
||||
}
|
||||
// Do not call on_writable if the socket is closed.
|
||||
// on close means the socket data is no longer accessible
|
||||
if (!s || us_internal_ssl_socket_is_closed(s) || us_internal_ssl_socket_is_shut_down(s)) {
|
||||
return s;
|
||||
if (!s || us_socket_is_closed(0, &s->s)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (s->handshake_state == HANDSHAKE_COMPLETED) {
|
||||
@@ -1037,7 +1032,7 @@ long us_internal_verify_peer_certificate( // NOLINT(runtime/int)
|
||||
|
||||
struct us_bun_verify_error_t
|
||||
us_internal_verify_error(struct us_internal_ssl_socket_t *s) {
|
||||
if (us_internal_ssl_socket_is_closed(s) || us_internal_ssl_socket_is_shut_down(s)) {
|
||||
if (us_socket_is_closed(0, &s->s) || us_internal_ssl_socket_is_shut_down(s)) {
|
||||
return (struct us_bun_verify_error_t){
|
||||
.error = 0, .code = NULL, .reason = NULL};
|
||||
}
|
||||
@@ -1322,6 +1317,10 @@ void us_bun_internal_ssl_socket_context_add_server_name(
|
||||
|
||||
/* We do not want to hold any nullptr's in our SNI tree */
|
||||
if (ssl_context) {
|
||||
#if ALLOW_SERVER_RENEGOTIATION
|
||||
context->client_renegotiation_limit = options.client_renegotiation_limit;
|
||||
context->client_renegotiation_window = options.client_renegotiation_window;
|
||||
#endif
|
||||
if (sni_add(context->sni, hostname_pattern, ssl_context)) {
|
||||
/* If we already had that name, ignore */
|
||||
free_ssl_context(ssl_context);
|
||||
@@ -1470,6 +1469,10 @@ us_internal_bun_create_ssl_socket_context(
|
||||
|
||||
context->on_handshake = NULL;
|
||||
context->handshake_data = NULL;
|
||||
#if ALLOW_SERVER_RENEGOTIATION
|
||||
context->client_renegotiation_limit = options.client_renegotiation_limit;
|
||||
context->client_renegotiation_window = options.client_renegotiation_window;
|
||||
#endif
|
||||
/* We, as parent context, may ignore data */
|
||||
context->sc.is_low_prio = (int (*)(struct us_socket_t *))ssl_is_low_prio;
|
||||
|
||||
@@ -1500,7 +1503,7 @@ void us_internal_ssl_socket_context_free(
|
||||
sni_free(context->sni, sni_hostname_destructor);
|
||||
}
|
||||
|
||||
us_internal_socket_context_free(0, &context->sc);
|
||||
us_socket_context_free(0, &context->sc);
|
||||
}
|
||||
|
||||
struct us_listen_socket_t *us_internal_ssl_socket_context_listen(
|
||||
@@ -1589,8 +1592,7 @@ void us_internal_ssl_socket_context_on_timeout(
|
||||
struct us_internal_ssl_socket_t *s)) {
|
||||
us_socket_context_on_timeout(0, (struct us_socket_context_t *)context,
|
||||
(struct us_socket_t * (*)(struct us_socket_t *))
|
||||
ssl_on_timeout);
|
||||
context->on_timeout = on_timeout;
|
||||
on_timeout);
|
||||
}
|
||||
|
||||
void us_internal_ssl_socket_context_on_long_timeout(
|
||||
@@ -1599,8 +1601,7 @@ void us_internal_ssl_socket_context_on_long_timeout(
|
||||
struct us_internal_ssl_socket_t *s)) {
|
||||
us_socket_context_on_long_timeout(
|
||||
0, (struct us_socket_context_t *)context,
|
||||
(struct us_socket_t * (*)(struct us_socket_t *)) ssl_on_long_timeout);
|
||||
context->on_long_timeout = on_long_timeout;
|
||||
(struct us_socket_t * (*)(struct us_socket_t *)) on_long_timeout);
|
||||
}
|
||||
|
||||
/* We do not really listen to passed FIN-handler, we entirely override it with
|
||||
@@ -1655,8 +1656,8 @@ int us_internal_ssl_socket_raw_write(struct us_internal_ssl_socket_t *s,
|
||||
|
||||
int us_internal_ssl_socket_write(struct us_internal_ssl_socket_t *s,
|
||||
const char *data, int length, int msg_more) {
|
||||
|
||||
if (us_socket_is_closed(0, &s->s) || us_internal_ssl_socket_is_shut_down(s) || length == 0) {
|
||||
|
||||
if (us_socket_is_closed(0, &s->s) || us_internal_ssl_socket_is_shut_down(s)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1696,7 +1697,6 @@ int us_internal_ssl_socket_write(struct us_internal_ssl_socket_t *s,
|
||||
// these two errors may add to the error queue, which is per thread and
|
||||
// must be cleared
|
||||
ERR_clear_error();
|
||||
s->fatal_error = 1;
|
||||
|
||||
// all errors here except for want write are critical and should not
|
||||
// happen
|
||||
@@ -1714,12 +1714,12 @@ void *us_internal_connecting_ssl_socket_ext(struct us_connecting_socket_t *s) {
|
||||
}
|
||||
|
||||
int us_internal_ssl_socket_is_shut_down(struct us_internal_ssl_socket_t *s) {
|
||||
return !s->ssl || us_socket_is_shut_down(0, &s->s) ||
|
||||
SSL_get_shutdown(s->ssl) & SSL_SENT_SHUTDOWN || s->fatal_error;
|
||||
return us_socket_is_shut_down(0, &s->s) ||
|
||||
SSL_get_shutdown(s->ssl) & SSL_SENT_SHUTDOWN;
|
||||
}
|
||||
|
||||
void us_internal_ssl_socket_shutdown(struct us_internal_ssl_socket_t *s) {
|
||||
if (!us_internal_ssl_socket_is_closed(s) &&
|
||||
if (!us_socket_is_closed(0, &s->s) &&
|
||||
!us_internal_ssl_socket_is_shut_down(s)) {
|
||||
struct us_internal_ssl_socket_context_t *context =
|
||||
(struct us_internal_ssl_socket_context_t *)us_socket_context(0, &s->s);
|
||||
@@ -1740,8 +1740,11 @@ void us_internal_ssl_socket_shutdown(struct us_internal_ssl_socket_t *s) {
|
||||
loop_ssl_data->ssl_socket = &s->s;
|
||||
|
||||
loop_ssl_data->msg_more = 0;
|
||||
// sets SSL_SENT_SHUTDOWN and waits for the other side to do the same
|
||||
// sets SSL_SENT_SHUTDOWN no matter what (not actually true if error!)
|
||||
int ret = SSL_shutdown(s->ssl);
|
||||
if (ret == 0) {
|
||||
ret = SSL_shutdown(s->ssl);
|
||||
}
|
||||
|
||||
if (SSL_in_init(s->ssl) || SSL_get_quiet_shutdown(s->ssl)) {
|
||||
// when SSL_in_init or quiet shutdown in BoringSSL, we call shutdown
|
||||
@@ -1755,7 +1758,6 @@ void us_internal_ssl_socket_shutdown(struct us_internal_ssl_socket_t *s) {
|
||||
if (err == SSL_ERROR_SSL || err == SSL_ERROR_SYSCALL) {
|
||||
// clear
|
||||
ERR_clear_error();
|
||||
s->fatal_error = 1;
|
||||
}
|
||||
|
||||
// we get here if we are shutting down while still in init
|
||||
@@ -2046,8 +2048,8 @@ us_socket_context_on_socket_connect_error(
|
||||
socket->ssl = NULL;
|
||||
socket->ssl_write_wants_read = 0;
|
||||
socket->ssl_read_wants_write = 0;
|
||||
socket->fatal_error = 0;
|
||||
socket->handshake_state = HANDSHAKE_PENDING;
|
||||
socket->received_ssl_shutdown = 0;
|
||||
return socket;
|
||||
}
|
||||
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
// clang-format off
|
||||
#pragma once
|
||||
#ifndef INTERNAL_H
|
||||
#define INTERNAL_H
|
||||
@@ -101,9 +100,6 @@ struct addrinfo_result {
|
||||
int error;
|
||||
};
|
||||
|
||||
#define us_internal_ssl_socket_context_r struct us_internal_ssl_socket_context_t *nonnull_arg
|
||||
#define us_internal_ssl_socket_r struct us_internal_ssl_socket_t *nonnull_arg
|
||||
|
||||
extern int Bun__addrinfo_get(struct us_loop_t* loop, const char* host, struct addrinfo_request** ptr);
|
||||
extern int Bun__addrinfo_set(struct addrinfo_request* ptr, struct us_connecting_socket_t* socket);
|
||||
extern void Bun__addrinfo_freeRequest(struct addrinfo_request* addrinfo_req, int error);
|
||||
@@ -113,19 +109,19 @@ extern struct addrinfo_result *Bun__addrinfo_getRequestResult(struct addrinfo_re
|
||||
/* Loop related */
|
||||
void us_internal_dispatch_ready_poll(struct us_poll_t *p, int error,
|
||||
int events);
|
||||
void us_internal_timer_sweep(us_loop_r loop);
|
||||
void us_internal_free_closed_sockets(us_loop_r loop);
|
||||
void us_internal_timer_sweep(struct us_loop_t *loop);
|
||||
void us_internal_free_closed_sockets(struct us_loop_t *loop);
|
||||
void us_internal_loop_link(struct us_loop_t *loop,
|
||||
struct us_socket_context_t *context);
|
||||
void us_internal_loop_unlink(struct us_loop_t *loop,
|
||||
struct us_socket_context_t *context);
|
||||
void us_internal_loop_data_init(struct us_loop_t *loop,
|
||||
void (*wakeup_cb)(us_loop_r loop),
|
||||
void (*pre_cb)(us_loop_r loop),
|
||||
void (*post_cb)(us_loop_r loop));
|
||||
void us_internal_loop_data_free(us_loop_r loop);
|
||||
void us_internal_loop_pre(us_loop_r loop);
|
||||
void us_internal_loop_post(us_loop_r loop);
|
||||
void (*wakeup_cb)(struct us_loop_t *loop),
|
||||
void (*pre_cb)(struct us_loop_t *loop),
|
||||
void (*post_cb)(struct us_loop_t *loop));
|
||||
void us_internal_loop_data_free(struct us_loop_t *loop);
|
||||
void us_internal_loop_pre(struct us_loop_t *loop);
|
||||
void us_internal_loop_post(struct us_loop_t *loop);
|
||||
|
||||
/* Asyncs (old) */
|
||||
struct us_internal_async *us_internal_create_async(struct us_loop_t *loop,
|
||||
@@ -142,22 +138,18 @@ int us_internal_poll_type(struct us_poll_t *p);
|
||||
void us_internal_poll_set_type(struct us_poll_t *p, int poll_type);
|
||||
|
||||
/* SSL loop data */
|
||||
void us_internal_init_loop_ssl_data(us_loop_r loop);
|
||||
void us_internal_free_loop_ssl_data(us_loop_r loop);
|
||||
void us_internal_init_loop_ssl_data(struct us_loop_t *loop);
|
||||
void us_internal_free_loop_ssl_data(struct us_loop_t *loop);
|
||||
|
||||
/* Socket context related */
|
||||
void us_internal_socket_context_link_socket(us_socket_context_r context,
|
||||
us_socket_r s);
|
||||
void us_internal_socket_context_unlink_socket(int ssl,
|
||||
us_socket_context_r context, us_socket_r s);
|
||||
void us_internal_socket_context_link_socket(struct us_socket_context_t *context,
|
||||
struct us_socket_t *s);
|
||||
void us_internal_socket_context_unlink_socket(
|
||||
struct us_socket_context_t *context, struct us_socket_t *s);
|
||||
|
||||
void us_internal_socket_after_resolve(struct us_connecting_socket_t *s);
|
||||
void us_internal_socket_after_open(us_socket_r s, int error);
|
||||
struct us_internal_ssl_socket_t *
|
||||
us_internal_ssl_socket_close(us_internal_ssl_socket_r s, int code,
|
||||
void *reason);
|
||||
|
||||
int us_internal_handle_dns_results(us_loop_r loop);
|
||||
void us_internal_socket_after_open(struct us_socket_t *s, int error);
|
||||
int us_internal_handle_dns_results(struct us_loop_t *loop);
|
||||
|
||||
/* Sockets are polls */
|
||||
struct us_socket_t {
|
||||
@@ -251,14 +243,13 @@ struct us_listen_socket_t {
|
||||
|
||||
/* Listen sockets are keps in their own list */
|
||||
void us_internal_socket_context_link_listen_socket(
|
||||
us_socket_context_r context, struct us_listen_socket_t *s);
|
||||
void us_internal_socket_context_unlink_listen_socket(int ssl,
|
||||
us_socket_context_r context, struct us_listen_socket_t *s);
|
||||
struct us_socket_context_t *context, struct us_listen_socket_t *s);
|
||||
void us_internal_socket_context_unlink_listen_socket(
|
||||
struct us_socket_context_t *context, struct us_listen_socket_t *s);
|
||||
|
||||
struct us_socket_context_t {
|
||||
alignas(LIBUS_EXT_ALIGNMENT) struct us_loop_t *loop;
|
||||
uint32_t global_tick;
|
||||
uint32_t ref_count;
|
||||
unsigned char timestamp;
|
||||
unsigned char long_timestamp;
|
||||
struct us_socket_t *head_sockets;
|
||||
@@ -289,35 +280,34 @@ struct us_internal_ssl_socket_t;
|
||||
typedef void (*us_internal_on_handshake_t)(
|
||||
struct us_internal_ssl_socket_t *, int success,
|
||||
struct us_bun_verify_error_t verify_error, void *custom_data);
|
||||
|
||||
void us_internal_socket_context_free(int ssl, struct us_socket_context_t *context);
|
||||
|
||||
/* SNI functions */
|
||||
void us_internal_ssl_socket_context_add_server_name(
|
||||
us_internal_ssl_socket_context_r context,
|
||||
struct us_internal_ssl_socket_context_t *context,
|
||||
const char *hostname_pattern, struct us_socket_context_options_t options,
|
||||
void *user);
|
||||
void us_bun_internal_ssl_socket_context_add_server_name(
|
||||
us_internal_ssl_socket_context_r context,
|
||||
struct us_internal_ssl_socket_context_t *context,
|
||||
const char *hostname_pattern,
|
||||
struct us_bun_socket_context_options_t options, void *user);
|
||||
void us_internal_ssl_socket_context_remove_server_name(
|
||||
us_internal_ssl_socket_context_r context,
|
||||
struct us_internal_ssl_socket_context_t *context,
|
||||
const char *hostname_pattern);
|
||||
void us_internal_ssl_socket_context_on_server_name(
|
||||
us_internal_ssl_socket_context_r context,
|
||||
struct us_internal_ssl_socket_context_t *context,
|
||||
void (*cb)(struct us_internal_ssl_socket_context_t *, const char *));
|
||||
void *
|
||||
us_internal_ssl_socket_get_sni_userdata(us_internal_ssl_socket_r s);
|
||||
us_internal_ssl_socket_get_sni_userdata(struct us_internal_ssl_socket_t *s);
|
||||
void *us_internal_ssl_socket_context_find_server_name_userdata(
|
||||
us_internal_ssl_socket_context_r context,
|
||||
struct us_internal_ssl_socket_context_t *context,
|
||||
const char *hostname_pattern);
|
||||
|
||||
void *
|
||||
us_internal_ssl_socket_get_native_handle(us_internal_ssl_socket_r s);
|
||||
us_internal_ssl_socket_get_native_handle(struct us_internal_ssl_socket_t *s);
|
||||
void *us_internal_ssl_socket_context_get_native_handle(
|
||||
us_internal_ssl_socket_context_r context);
|
||||
struct us_internal_ssl_socket_context_t *context);
|
||||
struct us_bun_verify_error_t
|
||||
us_internal_verify_error(us_internal_ssl_socket_r s);
|
||||
us_internal_verify_error(struct us_internal_ssl_socket_t *s);
|
||||
struct us_internal_ssl_socket_context_t *us_internal_create_ssl_socket_context(
|
||||
struct us_loop_t *loop, int context_ext_size,
|
||||
struct us_socket_context_options_t options);
|
||||
@@ -327,109 +317,108 @@ us_internal_bun_create_ssl_socket_context(
|
||||
struct us_bun_socket_context_options_t options);
|
||||
|
||||
void us_internal_ssl_socket_context_free(
|
||||
us_internal_ssl_socket_context_r context);
|
||||
struct us_internal_ssl_socket_context_t *context);
|
||||
void us_internal_ssl_socket_context_on_open(
|
||||
us_internal_ssl_socket_context_r context,
|
||||
struct us_internal_ssl_socket_context_t *context,
|
||||
struct us_internal_ssl_socket_t *(*on_open)(
|
||||
us_internal_ssl_socket_r s, int is_client, char *ip,
|
||||
struct us_internal_ssl_socket_t *s, int is_client, char *ip,
|
||||
int ip_length));
|
||||
|
||||
void us_internal_ssl_socket_context_on_close(
|
||||
us_internal_ssl_socket_context_r context,
|
||||
struct us_internal_ssl_socket_context_t *context,
|
||||
struct us_internal_ssl_socket_t *(*on_close)(
|
||||
us_internal_ssl_socket_r s, int code, void *reason));
|
||||
struct us_internal_ssl_socket_t *s, int code, void *reason));
|
||||
|
||||
void us_internal_ssl_socket_context_on_data(
|
||||
us_internal_ssl_socket_context_r context,
|
||||
struct us_internal_ssl_socket_context_t *context,
|
||||
struct us_internal_ssl_socket_t *(*on_data)(
|
||||
us_internal_ssl_socket_r s, char *data, int length));
|
||||
struct us_internal_ssl_socket_t *s, char *data, int length));
|
||||
|
||||
void us_internal_update_handshake(us_internal_ssl_socket_r s);
|
||||
int us_internal_renegotiate(us_internal_ssl_socket_r s);
|
||||
void us_internal_trigger_handshake_callback(us_internal_ssl_socket_r s,
|
||||
void us_internal_update_handshake(struct us_internal_ssl_socket_t *s);
|
||||
int us_internal_renegotiate(struct us_internal_ssl_socket_t *s);
|
||||
void us_internal_trigger_handshake_callback(struct us_internal_ssl_socket_t *s,
|
||||
int success);
|
||||
void us_internal_on_ssl_handshake(
|
||||
us_internal_ssl_socket_context_r context,
|
||||
struct us_internal_ssl_socket_context_t *context,
|
||||
us_internal_on_handshake_t onhandshake, void *custom_data);
|
||||
|
||||
void us_internal_ssl_socket_context_on_writable(
|
||||
us_internal_ssl_socket_context_r context,
|
||||
struct us_internal_ssl_socket_context_t *context,
|
||||
struct us_internal_ssl_socket_t *(*on_writable)(
|
||||
us_internal_ssl_socket_r s));
|
||||
struct us_internal_ssl_socket_t *s));
|
||||
|
||||
void us_internal_ssl_socket_context_on_timeout(
|
||||
us_internal_ssl_socket_context_r context,
|
||||
struct us_internal_ssl_socket_context_t *context,
|
||||
struct us_internal_ssl_socket_t *(*on_timeout)(
|
||||
us_internal_ssl_socket_r s));
|
||||
struct us_internal_ssl_socket_t *s));
|
||||
|
||||
void us_internal_ssl_socket_context_on_long_timeout(
|
||||
us_internal_ssl_socket_context_r context,
|
||||
struct us_internal_ssl_socket_context_t *context,
|
||||
struct us_internal_ssl_socket_t *(*on_timeout)(
|
||||
us_internal_ssl_socket_r s));
|
||||
struct us_internal_ssl_socket_t *s));
|
||||
|
||||
void us_internal_ssl_socket_context_on_end(
|
||||
us_internal_ssl_socket_context_r context,
|
||||
struct us_internal_ssl_socket_context_t *context,
|
||||
struct us_internal_ssl_socket_t *(*on_end)(
|
||||
us_internal_ssl_socket_r s));
|
||||
struct us_internal_ssl_socket_t *s));
|
||||
|
||||
void us_internal_ssl_socket_context_on_connect_error(
|
||||
us_internal_ssl_socket_context_r context,
|
||||
struct us_internal_ssl_socket_context_t *context,
|
||||
struct us_internal_ssl_socket_t *(*on_connect_error)(
|
||||
us_internal_ssl_socket_r s, int code));
|
||||
struct us_internal_ssl_socket_t *s, int code));
|
||||
|
||||
void us_internal_ssl_socket_context_on_socket_connect_error(
|
||||
us_internal_ssl_socket_context_r context,
|
||||
struct us_internal_ssl_socket_context_t *context,
|
||||
struct us_internal_ssl_socket_t *(*on_socket_connect_error)(
|
||||
us_internal_ssl_socket_r s, int code));
|
||||
struct us_internal_ssl_socket_t *s, int code));
|
||||
|
||||
struct us_listen_socket_t *us_internal_ssl_socket_context_listen(
|
||||
us_internal_ssl_socket_context_r context, const char *host,
|
||||
struct us_internal_ssl_socket_context_t *context, const char *host,
|
||||
int port, int options, int socket_ext_size);
|
||||
|
||||
struct us_listen_socket_t *us_internal_ssl_socket_context_listen_unix(
|
||||
us_internal_ssl_socket_context_r context, const char *path,
|
||||
struct us_internal_ssl_socket_context_t *context, const char *path,
|
||||
size_t pathlen, int options, int socket_ext_size);
|
||||
|
||||
struct us_connecting_socket_t *us_internal_ssl_socket_context_connect(
|
||||
us_internal_ssl_socket_context_r context, const char *host,
|
||||
struct us_internal_ssl_socket_context_t *context, const char *host,
|
||||
int port, int options, int socket_ext_size, int* is_resolved);
|
||||
|
||||
struct us_internal_ssl_socket_t *us_internal_ssl_socket_context_connect_unix(
|
||||
us_internal_ssl_socket_context_r context, const char *server_path,
|
||||
struct us_internal_ssl_socket_context_t *context, const char *server_path,
|
||||
size_t pathlen, int options, int socket_ext_size);
|
||||
|
||||
int us_internal_ssl_socket_write(us_internal_ssl_socket_r s,
|
||||
int us_internal_ssl_socket_write(struct us_internal_ssl_socket_t *s,
|
||||
const char *data, int length, int msg_more);
|
||||
int us_internal_ssl_socket_raw_write(us_internal_ssl_socket_r s,
|
||||
int us_internal_ssl_socket_raw_write(struct us_internal_ssl_socket_t *s,
|
||||
const char *data, int length,
|
||||
int msg_more);
|
||||
|
||||
void us_internal_ssl_socket_timeout(us_internal_ssl_socket_r s,
|
||||
void us_internal_ssl_socket_timeout(struct us_internal_ssl_socket_t *s,
|
||||
unsigned int seconds);
|
||||
void *
|
||||
us_internal_ssl_socket_context_ext(struct us_internal_ssl_socket_context_t *s);
|
||||
struct us_internal_ssl_socket_context_t *
|
||||
us_internal_ssl_socket_get_context(us_internal_ssl_socket_r s);
|
||||
void *us_internal_ssl_socket_ext(us_internal_ssl_socket_r s);
|
||||
us_internal_ssl_socket_get_context(struct us_internal_ssl_socket_t *s);
|
||||
void *us_internal_ssl_socket_ext(struct us_internal_ssl_socket_t *s);
|
||||
void *us_internal_connecting_ssl_socket_ext(struct us_connecting_socket_t *c);
|
||||
int us_internal_ssl_socket_is_shut_down(us_internal_ssl_socket_r s);
|
||||
int us_internal_ssl_socket_is_closed(us_internal_ssl_socket_r s);
|
||||
void us_internal_ssl_socket_shutdown(us_internal_ssl_socket_r s);
|
||||
int us_internal_ssl_socket_is_shut_down(struct us_internal_ssl_socket_t *s);
|
||||
void us_internal_ssl_socket_shutdown(struct us_internal_ssl_socket_t *s);
|
||||
|
||||
struct us_internal_ssl_socket_t *us_internal_ssl_socket_context_adopt_socket(
|
||||
us_internal_ssl_socket_context_r context,
|
||||
us_internal_ssl_socket_r s, int ext_size);
|
||||
struct us_internal_ssl_socket_context_t *context,
|
||||
struct us_internal_ssl_socket_t *s, int ext_size);
|
||||
|
||||
struct us_internal_ssl_socket_t *us_internal_ssl_socket_wrap_with_tls(
|
||||
us_socket_r s, struct us_bun_socket_context_options_t options,
|
||||
struct us_socket_t *s, struct us_bun_socket_context_options_t options,
|
||||
struct us_socket_events_t events, int socket_ext_size);
|
||||
struct us_internal_ssl_socket_context_t *
|
||||
us_internal_create_child_ssl_socket_context(
|
||||
us_internal_ssl_socket_context_r context, int context_ext_size);
|
||||
struct us_internal_ssl_socket_context_t *context, int context_ext_size);
|
||||
struct us_loop_t *us_internal_ssl_socket_context_loop(
|
||||
us_internal_ssl_socket_context_r context);
|
||||
struct us_internal_ssl_socket_context_t *context);
|
||||
struct us_internal_ssl_socket_t *
|
||||
us_internal_ssl_socket_open(us_internal_ssl_socket_r s, int is_client,
|
||||
us_internal_ssl_socket_open(struct us_internal_ssl_socket_t *s, int is_client,
|
||||
char *ip, int ip_length);
|
||||
|
||||
int us_raw_root_certs(struct us_cert_string_t **out);
|
||||
|
||||
@@ -27,7 +27,6 @@ struct us_internal_loop_data_t {
|
||||
int last_write_failed;
|
||||
struct us_socket_context_t *head;
|
||||
struct us_socket_context_t *iterator;
|
||||
struct us_socket_context_t *closed_context_head;
|
||||
char *recv_buf;
|
||||
char *send_buf;
|
||||
void *ssl_data;
|
||||
|
||||
@@ -17,7 +17,6 @@
|
||||
|
||||
#ifndef BSD_H
|
||||
#define BSD_H
|
||||
#pragma once
|
||||
|
||||
// top-most wrapper of bsd-like syscalls
|
||||
|
||||
@@ -26,7 +25,7 @@
|
||||
|
||||
#include "libusockets.h"
|
||||
|
||||
#ifdef _WIN32
|
||||
#ifdef _WIN32
|
||||
#ifndef NOMINMAX
|
||||
#define NOMINMAX
|
||||
#endif
|
||||
@@ -35,7 +34,7 @@
|
||||
#pragma comment(lib, "ws2_32.lib")
|
||||
#define SETSOCKOPT_PTR_TYPE const char *
|
||||
#define LIBUS_SOCKET_ERROR INVALID_SOCKET
|
||||
#else /* POSIX */
|
||||
#else
|
||||
#ifndef _GNU_SOURCE
|
||||
#define _GNU_SOURCE
|
||||
#endif
|
||||
@@ -65,76 +64,14 @@ struct bsd_addr_t {
|
||||
#endif
|
||||
|
||||
#ifdef __APPLE__
|
||||
/*
|
||||
* Extended version for sendmsg_x() and recvmsg_x() calls
|
||||
*/
|
||||
struct mmsghdr {
|
||||
// a.k.a msghdr_x
|
||||
struct mmsghdr {
|
||||
struct msghdr msg_hdr;
|
||||
size_t msg_len; /* byte length of buffer in msg_iov */
|
||||
};
|
||||
/*
|
||||
* recvmsg_x() is a system call similar to recvmsg(2) to receive
|
||||
* several datagrams at once in the array of message headers "msgp".
|
||||
*
|
||||
* recvmsg_x() can be used only with protocols handlers that have been specially
|
||||
* modified to support sending and receiving several datagrams at once.
|
||||
*
|
||||
* The size of the array "msgp" is given by the argument "cnt".
|
||||
*
|
||||
* The "flags" arguments supports only the value MSG_DONTWAIT.
|
||||
*
|
||||
* Each member of "msgp" array is of type "struct msghdr_x".
|
||||
*
|
||||
* The "msg_iov" and "msg_iovlen" are input parameters that describe where to
|
||||
* store a datagram in a scatter gather locations of buffers -- see recvmsg(2).
|
||||
* On output the field "msg_datalen" gives the length of the received datagram.
|
||||
*
|
||||
* The field "msg_flags" must be set to zero on input. On output, "msg_flags"
|
||||
* may have MSG_TRUNC set to indicate the trailing portion of the datagram was
|
||||
* discarded because the datagram was larger than the buffer supplied.
|
||||
* recvmsg_x() returns as soon as a datagram is truncated.
|
||||
*
|
||||
* recvmsg_x() may return with less than "cnt" datagrams received based on
|
||||
* the low water mark and the amount of data pending in the socket buffer.
|
||||
*
|
||||
* recvmsg_x() returns the number of datagrams that have been received,
|
||||
* or -1 if an error occurred.
|
||||
*
|
||||
* NOTE: This a private system call, the API is subject to change.
|
||||
*/
|
||||
ssize_t recvmsg_x(int s, const struct mmsghdr *msgp, u_int cnt, int flags);
|
||||
|
||||
/*
|
||||
* sendmsg_x() is a system call similar to send(2) to send
|
||||
* several datagrams at once in the array of message headers "msgp".
|
||||
*
|
||||
* sendmsg_x() can be used only with protocols handlers that have been specially
|
||||
* modified to support sending and receiving several datagrams at once.
|
||||
*
|
||||
* The size of the array "msgp" is given by the argument "cnt".
|
||||
*
|
||||
* The "flags" arguments supports only the value MSG_DONTWAIT.
|
||||
*
|
||||
* Each member of "msgp" array is of type "struct msghdr_x".
|
||||
*
|
||||
* The "msg_iov" and "msg_iovlen" are input parameters that specify the
|
||||
* data to be sent in a scatter gather locations of buffers -- see sendmsg(2).
|
||||
*
|
||||
* sendmsg_x() fails with EMSGSIZE if the sum of the length of the datagrams
|
||||
* is greater than the high water mark.
|
||||
*
|
||||
* Address and ancillary data are not supported so the following fields
|
||||
* must be set to zero on input:
|
||||
* "msg_name", "msg_namelen", "msg_control" and "msg_controllen".
|
||||
*
|
||||
* The field "msg_flags" and "msg_datalen" must be set to zero on input.
|
||||
*
|
||||
* sendmsg_x() returns the number of datagrams that have been sent,
|
||||
* or -1 if an error occurred.
|
||||
*
|
||||
* NOTE: This a private system call, the API is subject to change.
|
||||
*/
|
||||
ssize_t sendmsg_x(int s, const struct mmsghdr *msgp, u_int cnt, int flags);
|
||||
ssize_t sendmsg_x(int s, struct mmsghdr *msgp, u_int cnt, int flags);
|
||||
ssize_t recvmsg_x(int s, struct mmsghdr *msgp, u_int cnt, int flags);
|
||||
#endif
|
||||
|
||||
struct udp_recvbuf {
|
||||
@@ -158,9 +95,8 @@ struct udp_sendbuf {
|
||||
void **addresses;
|
||||
int num;
|
||||
#else
|
||||
unsigned int has_empty : 1;
|
||||
unsigned int has_addresses : 1;
|
||||
unsigned int num;
|
||||
int num;
|
||||
char has_empty;
|
||||
struct mmsghdr msgvec[];
|
||||
#endif
|
||||
};
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
// clang-format off
|
||||
#pragma once
|
||||
|
||||
#ifndef us_calloc
|
||||
#define us_calloc calloc
|
||||
#endif
|
||||
@@ -35,25 +35,6 @@
|
||||
#ifndef LIBUSOCKETS_H
|
||||
#define LIBUSOCKETS_H
|
||||
|
||||
#ifdef BUN_DEBUG
|
||||
#define nonnull_arg _Nonnull
|
||||
#else
|
||||
#define nonnull_arg
|
||||
#endif
|
||||
|
||||
#ifdef BUN_DEBUG
|
||||
#define nonnull_fn_decl
|
||||
#else
|
||||
#ifndef nonnull_fn_decl
|
||||
#define nonnull_fn_decl __attribute__((nonnull))
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define us_loop_r struct us_loop_t *nonnull_arg
|
||||
#define us_socket_r struct us_socket_t *nonnull_arg
|
||||
#define us_poll_r struct us_poll_t *nonnull_arg
|
||||
#define us_socket_context_r struct us_socket_context_t *nonnull_arg
|
||||
|
||||
|
||||
/* 512kb shared receive buffer */
|
||||
#define LIBUS_RECV_BUFFER_LENGTH 524288
|
||||
@@ -68,7 +49,6 @@
|
||||
#define LIBUS_EXT_ALIGNMENT 16
|
||||
#define ALLOW_SERVER_RENEGOTIATION 0
|
||||
|
||||
#define LIBUS_SOCKET_CLOSE_CODE_CLEAN_SHUTDOWN 0
|
||||
#define LIBUS_SOCKET_CLOSE_CODE_CONNECTION_RESET 1
|
||||
|
||||
/* Define what a socket descriptor is based on platform */
|
||||
@@ -143,11 +123,11 @@ struct us_udp_packet_buffer_t *us_create_udp_packet_buffer();
|
||||
/* Creates a (heavy-weight) UDP socket with a user space ring buffer. Again, this one is heavy weight and
|
||||
* shoud be reused. One entire QUIC server can be implemented using only one single UDP socket so weight
|
||||
* is not a concern as is the case for TCP sockets which are 1-to-1 with TCP connections. */
|
||||
//struct us_udp_socket_t *us_create_udp_socket(us_loop_r loop, void (*read_cb)(struct us_udp_socket_t *), unsigned short port);
|
||||
//struct us_udp_socket_t *us_create_udp_socket(struct us_loop_t *loop, void (*read_cb)(struct us_udp_socket_t *), unsigned short port);
|
||||
|
||||
//struct us_udp_socket_t *us_create_udp_socket(us_loop_r loop, void (*data_cb)(struct us_udp_socket_t *, struct us_udp_packet_buffer_t *, int), void (*drain_cb)(struct us_udp_socket_t *), char *host, unsigned short port);
|
||||
//struct us_udp_socket_t *us_create_udp_socket(struct us_loop_t *loop, void (*data_cb)(struct us_udp_socket_t *, struct us_udp_packet_buffer_t *, int), void (*drain_cb)(struct us_udp_socket_t *), char *host, unsigned short port);
|
||||
|
||||
struct us_udp_socket_t *us_create_udp_socket(us_loop_r loop, void (*data_cb)(struct us_udp_socket_t *, void *, int), void (*drain_cb)(struct us_udp_socket_t *), void (*close_cb)(struct us_udp_socket_t *), const char *host, unsigned short port, void *user);
|
||||
struct us_udp_socket_t *us_create_udp_socket(struct us_loop_t *loop, void (*data_cb)(struct us_udp_socket_t *, void *, int), void (*drain_cb)(struct us_udp_socket_t *), void (*close_cb)(struct us_udp_socket_t *), const char *host, unsigned short port, void *user);
|
||||
|
||||
void us_udp_socket_close(struct us_udp_socket_t *s);
|
||||
|
||||
@@ -160,7 +140,7 @@ int us_udp_socket_bind(struct us_udp_socket_t *s, const char *hostname, unsigned
|
||||
/* Public interfaces for timers */
|
||||
|
||||
/* Create a new high precision, low performance timer. May fail and return null */
|
||||
struct us_timer_t *us_create_timer(us_loop_r loop, int fallthrough, unsigned int ext_size);
|
||||
struct us_timer_t *us_create_timer(struct us_loop_t *loop, int fallthrough, unsigned int ext_size);
|
||||
|
||||
/* Returns user data extension for this timer */
|
||||
void *us_timer_ext(struct us_timer_t *timer);
|
||||
@@ -194,17 +174,17 @@ struct us_bun_verify_error_t {
|
||||
};
|
||||
|
||||
struct us_socket_events_t {
|
||||
struct us_socket_t *(*on_open)(us_socket_r, int is_client, char *ip, int ip_length);
|
||||
struct us_socket_t *(*on_data)(us_socket_r, char *data, int length);
|
||||
struct us_socket_t *(*on_writable)(us_socket_r);
|
||||
struct us_socket_t *(*on_close)(us_socket_r, int code, void *reason);
|
||||
struct us_socket_t *(*on_open)(struct us_socket_t *, int is_client, char *ip, int ip_length);
|
||||
struct us_socket_t *(*on_data)(struct us_socket_t *, char *data, int length);
|
||||
struct us_socket_t *(*on_writable)(struct us_socket_t *);
|
||||
struct us_socket_t *(*on_close)(struct us_socket_t *, int code, void *reason);
|
||||
//void (*on_timeout)(struct us_socket_context *);
|
||||
struct us_socket_t *(*on_timeout)(us_socket_r);
|
||||
struct us_socket_t *(*on_long_timeout)(us_socket_r);
|
||||
struct us_socket_t *(*on_end)(us_socket_r);
|
||||
struct us_socket_t *(*on_timeout)(struct us_socket_t *);
|
||||
struct us_socket_t *(*on_long_timeout)(struct us_socket_t *);
|
||||
struct us_socket_t *(*on_end)(struct us_socket_t *);
|
||||
struct us_connecting_socket_t *(*on_connect_error)(struct us_connecting_socket_t *, int code);
|
||||
struct us_socket_t *(*on_connecting_socket_error)(us_socket_r, int code);
|
||||
void (*on_handshake)(us_socket_r, int success, struct us_bun_verify_error_t verify_error, void* custom_data);
|
||||
struct us_socket_t *(*on_connecting_socket_error)(struct us_socket_t *, int code);
|
||||
void (*on_handshake)(struct us_socket_t*, int success, struct us_bun_verify_error_t verify_error, void* custom_data);
|
||||
};
|
||||
|
||||
|
||||
@@ -230,70 +210,67 @@ struct us_bun_socket_context_options_t {
|
||||
};
|
||||
|
||||
/* Return 15-bit timestamp for this context */
|
||||
unsigned short us_socket_context_timestamp(int ssl, us_socket_context_r context) nonnull_fn_decl;
|
||||
unsigned short us_socket_context_timestamp(int ssl, struct us_socket_context_t *context);
|
||||
|
||||
/* Adds SNI domain and cert in asn1 format */
|
||||
void us_socket_context_add_server_name(int ssl, us_socket_context_r context, const char *hostname_pattern, struct us_socket_context_options_t options, void *user);
|
||||
void us_bun_socket_context_add_server_name(int ssl, us_socket_context_r context, const char *hostname_pattern, struct us_bun_socket_context_options_t options, void *user);
|
||||
void us_socket_context_remove_server_name(int ssl, us_socket_context_r context, const char *hostname_pattern);
|
||||
void us_socket_context_on_server_name(int ssl, us_socket_context_r context, void (*cb)(us_socket_context_r context, const char *hostname));
|
||||
void *us_socket_server_name_userdata(int ssl, us_socket_r s);
|
||||
void *us_socket_context_find_server_name_userdata(int ssl, us_socket_context_r context, const char *hostname_pattern);
|
||||
void us_socket_context_add_server_name(int ssl, struct us_socket_context_t *context, const char *hostname_pattern, struct us_socket_context_options_t options, void *user);
|
||||
void us_bun_socket_context_add_server_name(int ssl, struct us_socket_context_t *context, const char *hostname_pattern, struct us_bun_socket_context_options_t options, void *user);
|
||||
void us_socket_context_remove_server_name(int ssl, struct us_socket_context_t *context, const char *hostname_pattern);
|
||||
void us_socket_context_on_server_name(int ssl, struct us_socket_context_t *context, void (*cb)(struct us_socket_context_t *, const char *hostname));
|
||||
void *us_socket_server_name_userdata(int ssl, struct us_socket_t *s);
|
||||
void *us_socket_context_find_server_name_userdata(int ssl, struct us_socket_context_t *context, const char *hostname_pattern);
|
||||
|
||||
/* Returns the underlying SSL native handle, such as SSL_CTX or nullptr */
|
||||
void *us_socket_context_get_native_handle(int ssl, us_socket_context_r context);
|
||||
void *us_socket_context_get_native_handle(int ssl, struct us_socket_context_t *context);
|
||||
|
||||
/* A socket context holds shared callbacks and user data extension for associated sockets */
|
||||
struct us_socket_context_t *us_create_socket_context(int ssl, us_loop_r loop,
|
||||
int ext_size, struct us_socket_context_options_t options) nonnull_fn_decl;
|
||||
struct us_socket_context_t *us_create_socket_context(int ssl, struct us_loop_t *loop,
|
||||
int ext_size, struct us_socket_context_options_t options);
|
||||
struct us_socket_context_t *us_create_bun_socket_context(int ssl, struct us_loop_t *loop,
|
||||
int ext_size, struct us_bun_socket_context_options_t options) nonnull_fn_decl;
|
||||
|
||||
/* Delete resources allocated at creation time (will call unref now and only free when ref count == 0). */
|
||||
void us_socket_context_free(int ssl, us_socket_context_r context) nonnull_fn_decl;
|
||||
void us_socket_context_ref(int ssl, us_socket_context_r context) nonnull_fn_decl;
|
||||
void us_socket_context_unref(int ssl, us_socket_context_r context) nonnull_fn_decl;
|
||||
int ext_size, struct us_bun_socket_context_options_t options);
|
||||
|
||||
/* Delete resources allocated at creation time. */
|
||||
void us_socket_context_free(int ssl, struct us_socket_context_t *context);
|
||||
struct us_bun_verify_error_t us_socket_verify_error(int ssl, struct us_socket_t *context);
|
||||
/* Setters of various async callbacks */
|
||||
void us_socket_context_on_open(int ssl, us_socket_context_r context,
|
||||
struct us_socket_t *(*on_open)(us_socket_r s, int is_client, char *ip, int ip_length));
|
||||
void us_socket_context_on_close(int ssl, us_socket_context_r context,
|
||||
struct us_socket_t *(*on_close)(us_socket_r s, int code, void *reason));
|
||||
void us_socket_context_on_data(int ssl, us_socket_context_r context,
|
||||
struct us_socket_t *(*on_data)(us_socket_r s, char *data, int length));
|
||||
void us_socket_context_on_writable(int ssl, us_socket_context_r context,
|
||||
struct us_socket_t *(*on_writable)(us_socket_r s));
|
||||
void us_socket_context_on_timeout(int ssl, us_socket_context_r context,
|
||||
struct us_socket_t *(*on_timeout)(us_socket_r s));
|
||||
void us_socket_context_on_long_timeout(int ssl, us_socket_context_r context,
|
||||
struct us_socket_t *(*on_timeout)(us_socket_r s));
|
||||
void us_socket_context_on_open(int ssl, struct us_socket_context_t *context,
|
||||
struct us_socket_t *(*on_open)(struct us_socket_t *s, int is_client, char *ip, int ip_length));
|
||||
void us_socket_context_on_close(int ssl, struct us_socket_context_t *context,
|
||||
struct us_socket_t *(*on_close)(struct us_socket_t *s, int code, void *reason));
|
||||
void us_socket_context_on_data(int ssl, struct us_socket_context_t *context,
|
||||
struct us_socket_t *(*on_data)(struct us_socket_t *s, char *data, int length));
|
||||
void us_socket_context_on_writable(int ssl, struct us_socket_context_t *context,
|
||||
struct us_socket_t *(*on_writable)(struct us_socket_t *s));
|
||||
void us_socket_context_on_timeout(int ssl, struct us_socket_context_t *context,
|
||||
struct us_socket_t *(*on_timeout)(struct us_socket_t *s));
|
||||
void us_socket_context_on_long_timeout(int ssl, struct us_socket_context_t *context,
|
||||
struct us_socket_t *(*on_timeout)(struct us_socket_t *s));
|
||||
/* This one is only used for when a connecting socket fails in a late stage. */
|
||||
void us_socket_context_on_connect_error(int ssl, us_socket_context_r context,
|
||||
void us_socket_context_on_connect_error(int ssl, struct us_socket_context_t *context,
|
||||
struct us_connecting_socket_t *(*on_connect_error)(struct us_connecting_socket_t *s, int code));
|
||||
void us_socket_context_on_socket_connect_error(int ssl, us_socket_context_r context,
|
||||
struct us_socket_t *(*on_connect_error)(us_socket_r s, int code));
|
||||
void us_socket_context_on_socket_connect_error(int ssl, struct us_socket_context_t *context,
|
||||
struct us_socket_t *(*on_connect_error)(struct us_socket_t *s, int code));
|
||||
|
||||
void us_socket_context_on_handshake(int ssl, us_socket_context_r context, void (*on_handshake)(struct us_socket_t *, int success, struct us_bun_verify_error_t verify_error, void* custom_data), void* custom_data);
|
||||
void us_socket_context_on_handshake(int ssl, struct us_socket_context_t *context, void (*on_handshake)(struct us_socket_t *, int success, struct us_bun_verify_error_t verify_error, void* custom_data), void* custom_data);
|
||||
|
||||
/* Emitted when a socket has been half-closed */
|
||||
void us_socket_context_on_end(int ssl, us_socket_context_r context, struct us_socket_t *(*on_end)(us_socket_r s));
|
||||
void us_socket_context_on_end(int ssl, struct us_socket_context_t *context, struct us_socket_t *(*on_end)(struct us_socket_t *s));
|
||||
|
||||
/* Returns user data extension for this socket context */
|
||||
void *us_socket_context_ext(int ssl, us_socket_context_r context);
|
||||
void *us_socket_context_ext(int ssl, struct us_socket_context_t *context);
|
||||
|
||||
/* Closes all open sockets, including listen sockets. Does not invalidate the socket context. */
|
||||
void us_socket_context_close(int ssl, us_socket_context_r context);
|
||||
void us_socket_context_close(int ssl, struct us_socket_context_t *context);
|
||||
|
||||
/* Listen for connections. Acts as the main driving cog in a server. Will call set async callbacks. */
|
||||
struct us_listen_socket_t *us_socket_context_listen(int ssl, us_socket_context_r context,
|
||||
struct us_listen_socket_t *us_socket_context_listen(int ssl, struct us_socket_context_t *context,
|
||||
const char *host, int port, int options, int socket_ext_size);
|
||||
|
||||
struct us_listen_socket_t *us_socket_context_listen_unix(int ssl, us_socket_context_r context,
|
||||
struct us_listen_socket_t *us_socket_context_listen_unix(int ssl, struct us_socket_context_t *context,
|
||||
const char *path, size_t pathlen, int options, int socket_ext_size);
|
||||
|
||||
/* listen_socket.c/.h */
|
||||
void us_listen_socket_close(int ssl, struct us_listen_socket_t *ls) nonnull_fn_decl;
|
||||
void us_listen_socket_close(int ssl, struct us_listen_socket_t *ls);
|
||||
|
||||
/*
|
||||
Returns one of
|
||||
@@ -304,156 +281,156 @@ void us_listen_socket_close(int ssl, struct us_listen_socket_t *ls) nonnull_fn_d
|
||||
This is the slow path where we must either go through DNS resolution or create multiple sockets
|
||||
per the happy eyeballs algorithm
|
||||
*/
|
||||
void *us_socket_context_connect(int ssl, struct us_socket_context_t * nonnull_arg context,
|
||||
const char *host, int port, int options, int socket_ext_size, int *is_connecting) __attribute__((nonnull(2)));
|
||||
void *us_socket_context_connect(int ssl, struct us_socket_context_t *context,
|
||||
const char *host, int port, int options, int socket_ext_size, int *is_connecting);
|
||||
|
||||
struct us_socket_t *us_socket_context_connect_unix(int ssl, us_socket_context_r context,
|
||||
const char *server_path, size_t pathlen, int options, int socket_ext_size) __attribute__((nonnull(2)));
|
||||
struct us_socket_t *us_socket_context_connect_unix(int ssl, struct us_socket_context_t *context,
|
||||
const char *server_path, size_t pathlen, int options, int socket_ext_size);
|
||||
|
||||
/* Is this socket established? Can be used to check if a connecting socket has fired the on_open event yet.
|
||||
* Can also be used to determine if a socket is a listen_socket or not, but you probably know that already. */
|
||||
int us_socket_is_established(int ssl, us_socket_r s) nonnull_fn_decl;
|
||||
int us_socket_is_established(int ssl, struct us_socket_t *s);
|
||||
|
||||
void us_connecting_socket_free(struct us_connecting_socket_t *c) nonnull_fn_decl;
|
||||
void us_connecting_socket_free(struct us_connecting_socket_t *c);
|
||||
|
||||
/* Cancel a connecting socket. Can be used together with us_socket_timeout to limit connection times.
|
||||
* Entirely destroys the socket - this function works like us_socket_close but does not trigger on_close event since
|
||||
* you never got the on_open event first. */
|
||||
void us_connecting_socket_close(int ssl, struct us_connecting_socket_t *c) nonnull_fn_decl;
|
||||
void us_connecting_socket_close(int ssl, struct us_connecting_socket_t *c);
|
||||
|
||||
/* Returns the loop for this socket context. */
|
||||
struct us_loop_t *us_socket_context_loop(int ssl, us_socket_context_r context) nonnull_fn_decl __attribute((returns_nonnull));
|
||||
struct us_loop_t *us_socket_context_loop(int ssl, struct us_socket_context_t *context);
|
||||
|
||||
/* Invalidates passed socket, returning a new resized socket which belongs to a different socket context.
|
||||
* Used mainly for "socket upgrades" such as when transitioning from HTTP to WebSocket. */
|
||||
struct us_socket_t *us_socket_context_adopt_socket(int ssl, us_socket_context_r context, us_socket_r s, int ext_size);
|
||||
struct us_socket_t *us_socket_context_adopt_socket(int ssl, struct us_socket_context_t *context, struct us_socket_t *s, int ext_size);
|
||||
|
||||
/* Create a child socket context which acts much like its own socket context with its own callbacks yet still relies on the
|
||||
* parent socket context for some shared resources. Child socket contexts should be used together with socket adoptions and nothing else. */
|
||||
struct us_socket_context_t *us_create_child_socket_context(int ssl, us_socket_context_r context, int context_ext_size);
|
||||
struct us_socket_context_t *us_create_child_socket_context(int ssl, struct us_socket_context_t *context, int context_ext_size);
|
||||
|
||||
/* Public interfaces for loops */
|
||||
|
||||
/* Returns a new event loop with user data extension */
|
||||
struct us_loop_t *us_create_loop(void *hint, void (*wakeup_cb)(us_loop_r loop),
|
||||
void (*pre_cb)(us_loop_r loop), void (*post_cb)(us_loop_r loop), unsigned int ext_size);
|
||||
struct us_loop_t *us_create_loop(void *hint, void (*wakeup_cb)(struct us_loop_t *loop),
|
||||
void (*pre_cb)(struct us_loop_t *loop), void (*post_cb)(struct us_loop_t *loop), unsigned int ext_size);
|
||||
|
||||
/* Frees the loop immediately */
|
||||
void us_loop_free(us_loop_r loop) nonnull_fn_decl;
|
||||
void us_loop_free(struct us_loop_t *loop);
|
||||
|
||||
/* Returns the loop user data extension */
|
||||
void *us_loop_ext(us_loop_r loop) nonnull_fn_decl;
|
||||
void *us_loop_ext(struct us_loop_t *loop);
|
||||
|
||||
/* Blocks the calling thread and drives the event loop until no more non-fallthrough polls are scheduled */
|
||||
void us_loop_run(us_loop_r loop) nonnull_fn_decl;
|
||||
void us_loop_run(struct us_loop_t *loop);
|
||||
|
||||
|
||||
/* Signals the loop from any thread to wake up and execute its wakeup handler from the loop's own running thread.
|
||||
* This is the only fully thread-safe function and serves as the basis for thread safety */
|
||||
void us_wakeup_loop(us_loop_r loop) nonnull_fn_decl;
|
||||
void us_wakeup_loop(struct us_loop_t *loop);
|
||||
|
||||
/* Hook up timers in existing loop */
|
||||
void us_loop_integrate(us_loop_r loop) nonnull_fn_decl;
|
||||
void us_loop_integrate(struct us_loop_t *loop);
|
||||
|
||||
/* Returns the loop iteration number */
|
||||
long long us_loop_iteration_number(us_loop_r loop) nonnull_fn_decl;
|
||||
long long us_loop_iteration_number(struct us_loop_t *loop);
|
||||
|
||||
/* Public interfaces for polls */
|
||||
|
||||
/* A fallthrough poll does not keep the loop running, it falls through */
|
||||
struct us_poll_t *us_create_poll(us_loop_r loop, int fallthrough, unsigned int ext_size);
|
||||
struct us_poll_t *us_create_poll(struct us_loop_t *loop, int fallthrough, unsigned int ext_size);
|
||||
|
||||
/* After stopping a poll you must manually free the memory */
|
||||
void us_poll_free(us_poll_r p, struct us_loop_t *loop);
|
||||
void us_poll_free(struct us_poll_t *p, struct us_loop_t *loop);
|
||||
|
||||
/* Associate this poll with a socket descriptor and poll type */
|
||||
void us_poll_init(us_poll_r p, LIBUS_SOCKET_DESCRIPTOR fd, int poll_type);
|
||||
void us_poll_init(struct us_poll_t *p, LIBUS_SOCKET_DESCRIPTOR fd, int poll_type);
|
||||
|
||||
/* Start, change and stop polling for events */
|
||||
void us_poll_start(us_poll_r p, us_loop_r loop, int events) nonnull_fn_decl;
|
||||
void us_poll_change(us_poll_r p, us_loop_r loop, int events) nonnull_fn_decl;
|
||||
void us_poll_stop(us_poll_r p, struct us_loop_t *loop) nonnull_fn_decl;
|
||||
void us_poll_start(struct us_poll_t *p, struct us_loop_t *loop, int events);
|
||||
void us_poll_change(struct us_poll_t *p, struct us_loop_t *loop, int events);
|
||||
void us_poll_stop(struct us_poll_t *p, struct us_loop_t *loop);
|
||||
|
||||
/* Return what events we are polling for */
|
||||
int us_poll_events(us_poll_r p) nonnull_fn_decl;
|
||||
int us_poll_events(struct us_poll_t *p);
|
||||
|
||||
/* Returns the user data extension of this poll */
|
||||
void *us_poll_ext(us_poll_r p) nonnull_fn_decl;
|
||||
void *us_poll_ext(struct us_poll_t *p);
|
||||
|
||||
/* Get associated socket descriptor from a poll */
|
||||
LIBUS_SOCKET_DESCRIPTOR us_poll_fd(us_poll_r p) nonnull_fn_decl;
|
||||
LIBUS_SOCKET_DESCRIPTOR us_poll_fd(struct us_poll_t *p);
|
||||
|
||||
/* Resize an active poll */
|
||||
struct us_poll_t *us_poll_resize(us_poll_r p, us_loop_r loop, unsigned int ext_size) nonnull_fn_decl;
|
||||
struct us_poll_t *us_poll_resize(struct us_poll_t *p, struct us_loop_t *loop, unsigned int ext_size);
|
||||
|
||||
/* Public interfaces for sockets */
|
||||
|
||||
/* Returns the underlying native handle for a socket, such as SSL or file descriptor.
|
||||
* In the case of file descriptor, the value of pointer is fd. */
|
||||
void *us_socket_get_native_handle(int ssl, us_socket_r s) nonnull_fn_decl;
|
||||
void *us_socket_get_native_handle(int ssl, struct us_socket_t *s);
|
||||
|
||||
/* Write up to length bytes of data. Returns actual bytes written.
|
||||
* Will call the on_writable callback of active socket context on failure to write everything off in one go.
|
||||
* Set hint msg_more if you have more immediate data to write. */
|
||||
int us_socket_write(int ssl, us_socket_r s, const char * nonnull_arg data, int length, int msg_more) nonnull_fn_decl;
|
||||
int us_socket_write(int ssl, struct us_socket_t *s, const char *data, int length, int msg_more);
|
||||
|
||||
/* Special path for non-SSL sockets. Used to send header and payload in one go. Works like us_socket_write. */
|
||||
int us_socket_write2(int ssl, us_socket_r s, const char *header, int header_length, const char *payload, int payload_length) nonnull_fn_decl;
|
||||
int us_socket_write2(int ssl, struct us_socket_t *s, const char *header, int header_length, const char *payload, int payload_length);
|
||||
|
||||
/* Set a low precision, high performance timer on a socket. A socket can only have one single active timer
|
||||
* at any given point in time. Will remove any such pre set timer */
|
||||
void us_socket_timeout(int ssl, us_socket_r s, unsigned int seconds) nonnull_fn_decl;
|
||||
void us_socket_timeout(int ssl, struct us_socket_t *s, unsigned int seconds);
|
||||
|
||||
/* Set a low precision, high performance timer on a socket. Suitable for per-minute precision. */
|
||||
void us_socket_long_timeout(int ssl, us_socket_r s, unsigned int minutes) nonnull_fn_decl;
|
||||
void us_socket_long_timeout(int ssl, struct us_socket_t *s, unsigned int minutes);
|
||||
|
||||
/* Return the user data extension of this socket */
|
||||
void *us_socket_ext(int ssl, us_socket_r s) nonnull_fn_decl;
|
||||
void *us_connecting_socket_ext(int ssl, struct us_connecting_socket_t *c) nonnull_fn_decl;
|
||||
void *us_socket_ext(int ssl, struct us_socket_t *s);
|
||||
void *us_connecting_socket_ext(int ssl, struct us_connecting_socket_t *c);
|
||||
|
||||
/* Return the socket context of this socket */
|
||||
struct us_socket_context_t *us_socket_context(int ssl, us_socket_r s) nonnull_fn_decl __attribute__((returns_nonnull));
|
||||
struct us_socket_context_t *us_socket_context(int ssl, struct us_socket_t *s);
|
||||
|
||||
/* Withdraw any msg_more status and flush any pending data */
|
||||
void us_socket_flush(int ssl, us_socket_r s) nonnull_fn_decl;
|
||||
void us_socket_flush(int ssl, struct us_socket_t *s);
|
||||
|
||||
/* Shuts down the connection by sending FIN and/or close_notify */
|
||||
void us_socket_shutdown(int ssl, us_socket_r s) nonnull_fn_decl;
|
||||
void us_socket_shutdown(int ssl, struct us_socket_t *s);
|
||||
|
||||
/* Shuts down the connection in terms of read, meaning next event loop
|
||||
* iteration will catch the socket being closed. Can be used to defer closing
|
||||
* to next event loop iteration. */
|
||||
void us_socket_shutdown_read(int ssl, us_socket_r s) nonnull_fn_decl;
|
||||
void us_socket_shutdown_read(int ssl, struct us_socket_t *s);
|
||||
|
||||
/* Returns whether the socket has been shut down or not */
|
||||
int us_socket_is_shut_down(int ssl, us_socket_r s) nonnull_fn_decl;
|
||||
int us_socket_is_shut_down(int ssl, struct us_socket_t *s);
|
||||
|
||||
/* Returns whether this socket has been closed. Only valid if memory has not yet been released. */
|
||||
int us_socket_is_closed(int ssl, us_socket_r s) nonnull_fn_decl;
|
||||
int us_socket_is_closed(int ssl, struct us_socket_t *s);
|
||||
|
||||
/* Immediately closes the socket */
|
||||
struct us_socket_t *us_socket_close(int ssl, us_socket_r s, int code, void *reason) __attribute__((nonnull(2)));
|
||||
struct us_socket_t *us_socket_close(int ssl, struct us_socket_t *s, int code, void *reason);
|
||||
|
||||
/* Returns local port or -1 on failure. */
|
||||
int us_socket_local_port(int ssl, us_socket_r s) nonnull_fn_decl;
|
||||
int us_socket_local_port(int ssl, struct us_socket_t *s);
|
||||
|
||||
/* Copy remote (IP) address of socket, or fail with zero length. */
|
||||
void us_socket_remote_address(int ssl, us_socket_r s, char *nonnull_arg buf, int *nonnull_arg length) nonnull_fn_decl;
|
||||
void us_socket_local_address(int ssl, us_socket_r s, char *nonnull_arg buf, int *nonnull_arg length) nonnull_fn_decl;
|
||||
void us_socket_remote_address(int ssl, struct us_socket_t *s, char *buf, int *length);
|
||||
void us_socket_local_address(int ssl, struct us_socket_t *s, char *buf, int *length);
|
||||
|
||||
/* Bun extras */
|
||||
struct us_socket_t *us_socket_pair(struct us_socket_context_t *ctx, int socket_ext_size, LIBUS_SOCKET_DESCRIPTOR* fds);
|
||||
struct us_socket_t *us_socket_from_fd(struct us_socket_context_t *ctx, int socket_ext_size, LIBUS_SOCKET_DESCRIPTOR fd);
|
||||
struct us_socket_t *us_socket_attach(int ssl, LIBUS_SOCKET_DESCRIPTOR client_fd, struct us_socket_context_t *ctx, int flags, int socket_ext_size);
|
||||
struct us_socket_t *us_socket_wrap_with_tls(int ssl, us_socket_r s, struct us_bun_socket_context_options_t options, struct us_socket_events_t events, int socket_ext_size);
|
||||
int us_socket_raw_write(int ssl, us_socket_r s, const char *data, int length, int msg_more);
|
||||
struct us_socket_t *us_socket_wrap_with_tls(int ssl, struct us_socket_t *s, struct us_bun_socket_context_options_t options, struct us_socket_events_t events, int socket_ext_size);
|
||||
int us_socket_raw_write(int ssl, struct us_socket_t *s, const char *data, int length, int msg_more);
|
||||
struct us_socket_t* us_socket_open(int ssl, struct us_socket_t * s, int is_client, char* ip, int ip_length);
|
||||
int us_raw_root_certs(struct us_cert_string_t**out);
|
||||
unsigned int us_get_remote_address_info(char *buf, us_socket_r s, const char **dest, int *port, int *is_ipv6);
|
||||
int us_socket_get_error(int ssl, us_socket_r s);
|
||||
unsigned int us_get_remote_address_info(char *buf, struct us_socket_t *s, const char **dest, int *port, int *is_ipv6);
|
||||
int us_socket_get_error(int ssl, struct us_socket_t *s);
|
||||
|
||||
void us_socket_ref(us_socket_r s);
|
||||
void us_socket_unref(us_socket_r s);
|
||||
void us_socket_ref(struct us_socket_t *s);
|
||||
void us_socket_unref(struct us_socket_t *s);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
||||
@@ -47,8 +47,6 @@ void us_internal_loop_data_init(struct us_loop_t *loop, void (*wakeup_cb)(struct
|
||||
loop->data.parent_ptr = 0;
|
||||
loop->data.parent_tag = 0;
|
||||
|
||||
loop->data.closed_context_head = 0;
|
||||
|
||||
loop->data.wakeup_async = us_internal_create_async(loop, 1, 0);
|
||||
us_internal_async_set(loop->data.wakeup_async, (void (*)(struct us_internal_async *)) wakeup_cb);
|
||||
}
|
||||
@@ -236,15 +234,6 @@ void us_internal_free_closed_sockets(struct us_loop_t *loop) {
|
||||
loop->data.closed_connecting_head = 0;
|
||||
}
|
||||
|
||||
void us_internal_free_closed_contexts(struct us_loop_t *loop) {
|
||||
for (struct us_socket_context_t *ctx = loop->data.closed_context_head; ctx; ) {
|
||||
struct us_socket_context_t *next = ctx->next;
|
||||
us_free(ctx);
|
||||
ctx = next;
|
||||
}
|
||||
loop->data.closed_context_head = 0;
|
||||
}
|
||||
|
||||
void sweep_timer_cb(struct us_internal_callback_t *cb) {
|
||||
us_internal_timer_sweep(cb->loop);
|
||||
}
|
||||
@@ -264,7 +253,6 @@ void us_internal_loop_pre(struct us_loop_t *loop) {
|
||||
void us_internal_loop_post(struct us_loop_t *loop) {
|
||||
us_internal_handle_dns_results(loop);
|
||||
us_internal_free_closed_sockets(loop);
|
||||
us_internal_free_closed_contexts(loop);
|
||||
loop->data.post_cb(loop);
|
||||
}
|
||||
|
||||
@@ -368,8 +356,7 @@ void us_internal_dispatch_ready_poll(struct us_poll_t *p, int error, int events)
|
||||
s->context->loop->data.low_prio_budget--; /* Still having budget for this iteration - do normal processing */
|
||||
} else {
|
||||
us_poll_change(&s->p, us_socket_context(0, s)->loop, us_poll_events(&s->p) & LIBUS_SOCKET_WRITABLE);
|
||||
us_socket_context_ref(0, s->context);
|
||||
us_internal_socket_context_unlink_socket(0, s->context, s);
|
||||
us_internal_socket_context_unlink_socket(s->context, s);
|
||||
|
||||
/* Link this socket to the low-priority queue - we use a LIFO queue, to prioritize newer clients that are
|
||||
* maybe not already timeouted - sounds unfair, but works better in real-life with smaller client-timeouts
|
||||
@@ -424,8 +411,7 @@ void us_internal_dispatch_ready_poll(struct us_poll_t *p, int error, int events)
|
||||
if (us_socket_is_shut_down(0, s)) {
|
||||
/* We got FIN back after sending it */
|
||||
/* Todo: We should give "CLEAN SHUTDOWN" as reason here */
|
||||
s = us_socket_close(0, s, LIBUS_SOCKET_CLOSE_CODE_CLEAN_SHUTDOWN, NULL);
|
||||
return;
|
||||
s = us_socket_close(0, s, 0, NULL);
|
||||
} else {
|
||||
/* We got FIN, so stop polling for readable */
|
||||
us_poll_change(&s->p, us_socket_context(0, s)->loop, us_poll_events(&s->p) & LIBUS_SOCKET_WRITABLE);
|
||||
@@ -433,7 +419,7 @@ void us_internal_dispatch_ready_poll(struct us_poll_t *p, int error, int events)
|
||||
}
|
||||
} else if (length == LIBUS_SOCKET_ERROR && !bsd_would_block()) {
|
||||
/* Todo: decide also here what kind of reason we should give */
|
||||
s = us_socket_close(0, s, LIBUS_SOCKET_CLOSE_CODE_CLEAN_SHUTDOWN, NULL);
|
||||
s = us_socket_close(0, s, 0, NULL);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -21,7 +21,6 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#ifndef WIN32
|
||||
#include <fcntl.h>
|
||||
@@ -114,9 +113,6 @@ void us_socket_flush(int ssl, struct us_socket_t *s) {
|
||||
}
|
||||
|
||||
int us_socket_is_closed(int ssl, struct us_socket_t *s) {
|
||||
if(ssl) {
|
||||
return us_internal_ssl_socket_is_closed((struct us_internal_ssl_socket_t *) s);
|
||||
}
|
||||
return s->prev == (struct us_socket_t *) s->context;
|
||||
}
|
||||
|
||||
@@ -141,7 +137,7 @@ void us_connecting_socket_close(int ssl, struct us_connecting_socket_t *c) {
|
||||
c->closed = 1;
|
||||
|
||||
for (struct us_socket_t *s = c->connecting_head; s; s = s->connect_next) {
|
||||
us_internal_socket_context_unlink_socket(ssl, s->context, s);
|
||||
us_internal_socket_context_unlink_socket(s->context, s);
|
||||
us_poll_stop((struct us_poll_t *) s, s->context->loop);
|
||||
bsd_close_socket(us_poll_fd((struct us_poll_t *) s));
|
||||
|
||||
@@ -161,9 +157,6 @@ void us_connecting_socket_close(int ssl, struct us_connecting_socket_t *c) {
|
||||
}
|
||||
|
||||
struct us_socket_t *us_socket_close(int ssl, struct us_socket_t *s, int code, void *reason) {
|
||||
if(ssl) {
|
||||
return (struct us_socket_t *)us_internal_ssl_socket_close((struct us_internal_ssl_socket_t *) s, code, reason);
|
||||
}
|
||||
if (!us_socket_is_closed(0, s)) {
|
||||
if (s->low_prio_state == 1) {
|
||||
/* Unlink this socket from the low-priority queue */
|
||||
@@ -175,10 +168,8 @@ struct us_socket_t *us_socket_close(int ssl, struct us_socket_t *s, int code, vo
|
||||
s->prev = 0;
|
||||
s->next = 0;
|
||||
s->low_prio_state = 0;
|
||||
us_socket_context_unref(ssl, s->context);
|
||||
|
||||
} else {
|
||||
us_internal_socket_context_unlink_socket(ssl, s->context, s);
|
||||
us_internal_socket_context_unlink_socket(s->context, s);
|
||||
}
|
||||
#ifdef LIBUS_USE_KQUEUE
|
||||
// kqueue automatically removes the fd from the set on close
|
||||
@@ -227,10 +218,8 @@ struct us_socket_t *us_socket_detach(int ssl, struct us_socket_t *s) {
|
||||
s->prev = 0;
|
||||
s->next = 0;
|
||||
s->low_prio_state = 0;
|
||||
us_socket_context_unref(ssl, s->context);
|
||||
|
||||
} else {
|
||||
us_internal_socket_context_unlink_socket(ssl, s->context, s);
|
||||
us_internal_socket_context_unlink_socket(s->context, s);
|
||||
}
|
||||
us_poll_stop((struct us_poll_t *) s, s->context->loop);
|
||||
|
||||
|
||||
38
packages/bun-uws/capi/Makefile
Normal file
38
packages/bun-uws/capi/Makefile
Normal file
@@ -0,0 +1,38 @@
|
||||
CAPI_EXAMPLE_FILES := HelloWorld HelloWorldAsync ServerName UpgradeSync UpgradeAsync EchoServer Broadcast BroadcastEchoServer
|
||||
RUST_EXAMPLE_FILES := RustHelloWorld
|
||||
LIBRARY_NAME := libuwebsockets
|
||||
|
||||
default:
|
||||
$(MAKE) capi
|
||||
$(CXX) -O3 -flto -I ../src -I ../uSockets/src examples/HelloWorld.c *.o -lz -luv -lssl -lcrypto -lstdc++ ../uSockets/uSockets.a -o HelloWorld
|
||||
|
||||
capi:
|
||||
$(MAKE) clean
|
||||
cd ../uSockets && $(CC) -pthread -DUWS_WITH_PROXY -DLIBUS_USE_OPENSSL -DLIBUS_USE_LIBUV -std=c11 -Isrc -flto -fPIC -O3 -c src/*.c src/eventing/*.c src/crypto/*.c
|
||||
cd ../uSockets && $(CXX) -std=c++17 -flto -fPIC -O3 -c src/crypto/*.cpp
|
||||
cd ../uSockets && $(AR) rvs uSockets.a *.o
|
||||
|
||||
$(CXX) -DUWS_WITH_PROXY -c -O3 -std=c++17 -lz -luv -flto -fPIC -I ../src -I ../uSockets/src $(LIBRARY_NAME).cpp
|
||||
$(AR) rvs $(LIBRARY_NAME).a $(LIBRARY_NAME).o ../uSockets/uSockets.a
|
||||
shared:
|
||||
$(MAKE) clean
|
||||
|
||||
cd ../uSockets && $(CC) -pthread -DUWS_WITH_PROXY -DLIBUS_USE_OPENSSL -DLIBUS_USE_LIBUV -std=c11 -Isrc -flto -fPIC -O3 -c src/*.c src/eventing/*.c src/crypto/*.c
|
||||
cd ../uSockets && $(CXX) -std=c++17 -flto -fPIC -O3 -c src/crypto/*.cpp
|
||||
cd ../uSockets && $(AR) rvs uSockets.a *.o
|
||||
|
||||
$(CXX) -DUWS_WITH_PROXY -c -O3 -std=c++17 -lz -luv -flto -fPIC -I ../src -I ../uSockets/src $(LIBRARY_NAME).cpp
|
||||
$(CXX) -shared -o $(LIBRARY_NAME).so $(LIBRARY_NAME).o ../uSockets/uSockets.a -fPIC -lz -luv -lssl -lcrypto
|
||||
misc:
|
||||
mkdir -p ../misc && openssl req -newkey rsa:2048 -new -nodes -x509 -days 3650 -passout pass:1234 -keyout ../misc/key.pem -out ../misc/cert.pem
|
||||
rust:
|
||||
$(MAKE) capi
|
||||
rustc -C link-arg=$(LIBRARY_NAME).a -C link-args="-lstdc++ -luv" -C opt-level=3 -C lto -L all=. examples/RustHelloWorld.rs -o RustHelloWorld
|
||||
|
||||
clean:
|
||||
rm -f *.o $(CAPI_EXAMPLE_FILES) $(RUST_EXAMPLE_FILES) $(LIBRARY_NAME).a $(LIBRARY_NAME).so
|
||||
|
||||
all:
|
||||
for FILE in $(CAPI_EXAMPLE_FILES); do $(CXX) -O3 -flto -I ../src -I ../uSockets/src examples/$$FILE.c *.o -luv -lstdc++ ../uSockets/uSockets.a -o $$FILE & done; \
|
||||
wait
|
||||
|
||||
157
packages/bun-uws/capi/examples/Broadcast.c
Normal file
157
packages/bun-uws/capi/examples/Broadcast.c
Normal file
@@ -0,0 +1,157 @@
|
||||
#include "../libuwebsockets.h"
|
||||
#include <stdio.h>
|
||||
#include <malloc.h>
|
||||
#include <time.h>
|
||||
#include <string.h>
|
||||
#include <stdarg.h>
|
||||
#define SSL 1
|
||||
|
||||
|
||||
//Timer close helper
|
||||
void uws_timer_close(struct us_timer_t *timer)
|
||||
{
|
||||
struct us_timer_t *t = (struct us_timer_t *)timer;
|
||||
struct timer_handler_data *data;
|
||||
memcpy(&data, us_timer_ext(t), sizeof(struct timer_handler_data *));
|
||||
free(data);
|
||||
us_timer_close(t, 0);
|
||||
}
|
||||
//Timer create helper
|
||||
struct us_timer_t *uws_create_timer(int ms, int repeat_ms, void (*handler)(void *data), void *data)
|
||||
{
|
||||
struct us_loop_t *loop = uws_get_loop();
|
||||
struct us_timer_t *delayTimer = us_create_timer(loop, 0, sizeof(void *));
|
||||
|
||||
struct timer_handler_data
|
||||
{
|
||||
void *data;
|
||||
void (*handler)(void *data);
|
||||
bool repeat;
|
||||
};
|
||||
|
||||
struct timer_handler_data *timer_data = (struct timer_handler_data *)malloc(sizeof(timer_handler_data));
|
||||
timer_data->data = data;
|
||||
timer_data->handler = handler;
|
||||
timer_data->repeat = repeat_ms > 0;
|
||||
memcpy(us_timer_ext(delayTimer), &timer_data, sizeof(struct timer_handler_data *));
|
||||
|
||||
us_timer_set(
|
||||
delayTimer, [](struct us_timer_t *t)
|
||||
{
|
||||
/* We wrote the pointer to the timer's extension */
|
||||
struct timer_handler_data *data;
|
||||
memcpy(&data, us_timer_ext(t), sizeof(struct timer_handler_data *));
|
||||
|
||||
data->handler(data->data);
|
||||
|
||||
if (!data->repeat)
|
||||
{
|
||||
free(data);
|
||||
us_timer_close(t, 0);
|
||||
}
|
||||
},
|
||||
ms, repeat_ms);
|
||||
|
||||
return (struct us_timer_t *)delayTimer;
|
||||
}
|
||||
|
||||
/* This is a simple WebSocket "sync" upgrade example.
|
||||
* You may compile it with "WITH_OPENSSL=1 make" or with "make" */
|
||||
|
||||
/* ws->getUserData returns one of these */
|
||||
struct PerSocketData {
|
||||
/* Fill with user data */
|
||||
};
|
||||
|
||||
int buffer_size(const char* format, ...) {
|
||||
va_list args;
|
||||
va_start(args, format);
|
||||
int result = vsnprintf(NULL, 0, format, args);
|
||||
va_end(args);
|
||||
return result + 1; // safe byte for \0
|
||||
}
|
||||
|
||||
void listen_handler(struct us_listen_socket_t *listen_socket, uws_app_listen_config_t config, void* user_data)
|
||||
{
|
||||
if (listen_socket){
|
||||
printf("Listening on port wss://localhost:%d\n", config.port);
|
||||
}
|
||||
}
|
||||
|
||||
void open_handler(uws_websocket_t* ws){
|
||||
|
||||
/* Open event here, you may access uws_ws_get_user_data(WS) which points to a PerSocketData struct */
|
||||
uws_ws_subscribe(SSL, ws, "broadcast", 9);
|
||||
}
|
||||
|
||||
void message_handler(uws_websocket_t* ws, const char* message, size_t length, uws_opcode_t opcode){
|
||||
}
|
||||
|
||||
void close_handler(uws_websocket_t* ws, int code, const char* message, size_t length){
|
||||
/* You may access uws_ws_get_user_data(ws) here, but sending or
|
||||
* doing any kind of I/O with the socket is not valid. */
|
||||
}
|
||||
|
||||
void drain_handler(uws_websocket_t* ws){
|
||||
/* Check uws_ws_get_buffered_amount(ws) here */
|
||||
}
|
||||
|
||||
void ping_handler(uws_websocket_t* ws, const char* message, size_t length){
|
||||
/* You don't need to handle this one, we automatically respond to pings as per standard */
|
||||
}
|
||||
|
||||
void pong_handler(uws_websocket_t* ws, const char* message, size_t length){
|
||||
|
||||
/* You don't need to handle this one either */
|
||||
}
|
||||
|
||||
void on_timer_interval(void* data){
|
||||
|
||||
// broadcast the unix time as millis
|
||||
|
||||
uws_app_t * app = (uws_app_t *)data;
|
||||
struct timespec ts;
|
||||
timespec_get(&ts, TIME_UTC);
|
||||
|
||||
int64_t millis = ts.tv_sec * 1000 + ts.tv_nsec / 1000000;
|
||||
|
||||
|
||||
char* message = (char*)malloc((size_t)buffer_size("%ld", millis));
|
||||
size_t message_length = sprintf(message, "%ld", millis);
|
||||
|
||||
uws_publish(SSL, app, "broadcast", 9, message, message_length, uws_opcode_t::TEXT, false);
|
||||
free(message);
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
|
||||
|
||||
uws_app_t *app = uws_create_app(SSL, (struct us_socket_context_options_t){
|
||||
/* There are example certificates in uWebSockets.js repo */
|
||||
.key_file_name = "../misc/key.pem",
|
||||
.cert_file_name = "../misc/cert.pem",
|
||||
.passphrase = "1234"
|
||||
});
|
||||
|
||||
uws_ws(SSL, app, "/*", (uws_socket_behavior_t){
|
||||
.compression = uws_compress_options_t::SHARED_COMPRESSOR,
|
||||
.maxPayloadLength = 16 * 1024,
|
||||
.idleTimeout = 12,
|
||||
.maxBackpressure = 1 * 1024 * 1024,
|
||||
.upgrade = NULL,
|
||||
.open = open_handler,
|
||||
.message = message_handler,
|
||||
.drain = drain_handler,
|
||||
.ping = ping_handler,
|
||||
.pong = pong_handler,
|
||||
.close = close_handler,
|
||||
});
|
||||
|
||||
uws_app_listen(SSL, app, 9001, listen_handler, NULL);
|
||||
|
||||
// broadcast the unix time as millis every 8 millis
|
||||
uws_create_timer(8, 8, on_timer_interval, app);
|
||||
|
||||
uws_app_run(SSL, app);
|
||||
}
|
||||
175
packages/bun-uws/capi/examples/BroadcastEchoServer.c
Normal file
175
packages/bun-uws/capi/examples/BroadcastEchoServer.c
Normal file
@@ -0,0 +1,175 @@
|
||||
#include "../libuwebsockets.h"
|
||||
#include <stdio.h>
|
||||
#include <malloc.h>
|
||||
#include <time.h>
|
||||
#include <string.h>
|
||||
#include <stdarg.h>
|
||||
|
||||
#define SSL 1
|
||||
|
||||
|
||||
/* This is a simple WebSocket "sync" upgrade example.
|
||||
* You may compile it with "WITH_OPENSSL=1 make" or with "make" */
|
||||
|
||||
typedef struct
|
||||
{
|
||||
size_t length;
|
||||
char *name;
|
||||
} topic_t;
|
||||
|
||||
/* ws->getUserData returns one of these */
|
||||
struct PerSocketData
|
||||
{
|
||||
/* Fill with user data */
|
||||
topic_t **topics;
|
||||
int topics_quantity;
|
||||
int nr;
|
||||
};
|
||||
|
||||
uws_app_t *app;
|
||||
|
||||
int buffer_size(const char *format, ...)
|
||||
{
|
||||
va_list args;
|
||||
va_start(args, format);
|
||||
int result = vsnprintf(NULL, 0, format, args);
|
||||
va_end(args);
|
||||
return result + 1; // safe byte for \0
|
||||
}
|
||||
|
||||
void listen_handler(struct us_listen_socket_t *listen_socket, uws_app_listen_config_t config, void* user_data)
|
||||
{
|
||||
if (listen_socket)
|
||||
{
|
||||
printf("Listening on port wss://localhost:%d\n", config.port);
|
||||
}
|
||||
}
|
||||
|
||||
void upgrade_handler(uws_res_t *response, uws_req_t *request, uws_socket_context_t *context)
|
||||
{
|
||||
|
||||
/* You may read from req only here, and COPY whatever you need into your PerSocketData.
|
||||
* PerSocketData is valid from .open to .close event, accessed with uws_ws_get_user_data(ws).
|
||||
* HttpRequest (req) is ONLY valid in this very callback, so any data you will need later
|
||||
* has to be COPIED into PerSocketData here. */
|
||||
|
||||
/* Immediately upgrading without doing anything "async" before, is simple */
|
||||
|
||||
struct PerSocketData *data = (struct PerSocketData *)malloc(sizeof(struct PerSocketData));
|
||||
data->topics = (topic_t **)calloc(32, sizeof(topic_t *));
|
||||
data->topics_quantity = 32;
|
||||
data->nr = 0;
|
||||
|
||||
const char *ws_key = NULL;
|
||||
const char *ws_protocol = NULL;
|
||||
const char *ws_extensions = NULL;
|
||||
|
||||
size_t ws_key_length = uws_req_get_header(request, "sec-websocket-key", 17, &ws_key);
|
||||
size_t ws_protocol_length = uws_req_get_header(request, "sec-websocket-protocol", 22, &ws_protocol);
|
||||
size_t ws_extensions_length = uws_req_get_header(request, "sec-websocket-extensions", 24, &ws_extensions);
|
||||
|
||||
uws_res_upgrade(SSL,
|
||||
response,
|
||||
(void *)data,
|
||||
ws_key,
|
||||
ws_key_length,
|
||||
ws_protocol,
|
||||
ws_protocol_length,
|
||||
ws_extensions,
|
||||
ws_extensions_length,
|
||||
context);
|
||||
}
|
||||
|
||||
void open_handler(uws_websocket_t *ws)
|
||||
{
|
||||
|
||||
/* Open event here, you may access uws_ws_get_user_data(ws) which points to a PerSocketData struct */
|
||||
struct PerSocketData *data = (struct PerSocketData *)uws_ws_get_user_data(SSL, ws);
|
||||
for (int i = 0; i < data->topics_quantity; i++)
|
||||
{
|
||||
|
||||
char *topic = (char *)malloc((size_t)buffer_size("%ld-%d", (uintptr_t)ws, i));
|
||||
size_t topic_length = sprintf(topic, "%ld-%d", (uintptr_t)ws, i);
|
||||
|
||||
topic_t *new_topic = (topic_t*) malloc(sizeof(topic_t));
|
||||
new_topic->length = topic_length;
|
||||
new_topic->name = topic;
|
||||
data->topics[i] = new_topic;
|
||||
uws_ws_subscribe(SSL, ws, topic, topic_length);
|
||||
}
|
||||
}
|
||||
|
||||
void message_handler(uws_websocket_t *ws, const char *message, size_t length, uws_opcode_t opcode)
|
||||
{
|
||||
struct PerSocketData *data = (struct PerSocketData *)uws_ws_get_user_data(SSL, ws);
|
||||
topic_t *topic = data->topics[(size_t)(++data->nr % data->topics_quantity)];
|
||||
uws_publish(SSL, app, topic->name, topic->length, message, length, opcode, false);
|
||||
|
||||
topic = data->topics[(size_t)(++data->nr % data->topics_quantity)];
|
||||
uws_ws_publish(SSL, ws, topic->name, topic->length, message, length);
|
||||
}
|
||||
|
||||
void close_handler(uws_websocket_t *ws, int code, const char *message, size_t length)
|
||||
{
|
||||
/* You may access uws_ws_get_user_data(ws) here, but sending or
|
||||
* doing any kind of I/O with the socket is not valid. */
|
||||
struct PerSocketData *data = (struct PerSocketData *)uws_ws_get_user_data(SSL, ws);
|
||||
if (data)
|
||||
{
|
||||
for (int i = 0; i < data->topics_quantity; i++)
|
||||
{
|
||||
|
||||
topic_t* topic = data->topics[i];
|
||||
free(topic->name);
|
||||
free(topic);
|
||||
}
|
||||
free(data->topics);
|
||||
free(data);
|
||||
}
|
||||
}
|
||||
|
||||
void drain_handler(uws_websocket_t *ws)
|
||||
{
|
||||
/* Check uws_ws_get_buffered_amount(ws) here */
|
||||
}
|
||||
|
||||
void ping_handler(uws_websocket_t *ws, const char *message, size_t length)
|
||||
{
|
||||
/* You don't need to handle this one, we automatically respond to pings as per standard */
|
||||
}
|
||||
|
||||
void pong_handler(uws_websocket_t *ws, const char *message, size_t length)
|
||||
{
|
||||
|
||||
/* You don't need to handle this one either */
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
|
||||
|
||||
uws_app_t *app = uws_create_app(SSL, (struct us_socket_context_options_t){
|
||||
/* There are example certificates in uWebSockets.js repo */
|
||||
.key_file_name = "../misc/key.pem",
|
||||
.cert_file_name = "../misc/cert.pem",
|
||||
.passphrase = "1234"
|
||||
});
|
||||
|
||||
uws_ws(SSL, app, "/*", (uws_socket_behavior_t){
|
||||
.compression = uws_compress_options_t::SHARED_COMPRESSOR,
|
||||
.maxPayloadLength = 16 * 1024,
|
||||
.idleTimeout = 12,
|
||||
.maxBackpressure = 1 * 1024 * 1024,
|
||||
.upgrade = upgrade_handler,
|
||||
.open = open_handler,
|
||||
.message = message_handler,
|
||||
.drain = drain_handler,
|
||||
.ping = ping_handler,
|
||||
.pong = pong_handler,
|
||||
.close = close_handler,
|
||||
});
|
||||
|
||||
uws_app_listen(SSL, app, 9001, listen_handler, NULL);
|
||||
|
||||
uws_app_run(SSL, app);
|
||||
}
|
||||
81
packages/bun-uws/capi/examples/EchoServer.c
Normal file
81
packages/bun-uws/capi/examples/EchoServer.c
Normal file
@@ -0,0 +1,81 @@
|
||||
#include "../libuwebsockets.h"
|
||||
#include <stdio.h>
|
||||
#include <malloc.h>
|
||||
|
||||
#define SSL 1
|
||||
|
||||
|
||||
/* This is a simple WebSocket "sync" upgrade example.
|
||||
* You may compile it with "WITH_OPENSSL=1 make" or with "make" */
|
||||
|
||||
/* ws->getUserData returns one of these */
|
||||
struct PerSocketData {
|
||||
/* Fill with user data */
|
||||
};
|
||||
|
||||
void listen_handler(struct us_listen_socket_t *listen_socket, uws_app_listen_config_t config, void* user_data)
|
||||
{
|
||||
if (listen_socket){
|
||||
printf("Listening on port wss://localhost:%d\n", config.port);
|
||||
}
|
||||
}
|
||||
|
||||
void open_handler(uws_websocket_t* ws){
|
||||
|
||||
/* Open event here, you may access uws_ws_get_user_data(WS) which points to a PerSocketData struct */
|
||||
}
|
||||
|
||||
void message_handler(uws_websocket_t* ws, const char* message, size_t length, uws_opcode_t opcode){
|
||||
uws_ws_send(SSL, ws, message, length, opcode);
|
||||
}
|
||||
|
||||
void close_handler(uws_websocket_t* ws, int code, const char* message, size_t length){
|
||||
|
||||
/* You may access uws_ws_get_user_data(ws) here, but sending or
|
||||
* doing any kind of I/O with the socket is not valid. */
|
||||
}
|
||||
|
||||
void drain_handler(uws_websocket_t* ws){
|
||||
/* Check uws_ws_get_buffered_amount(ws) here */
|
||||
}
|
||||
|
||||
void ping_handler(uws_websocket_t* ws, const char* message, size_t length){
|
||||
/* You don't need to handle this one, we automatically respond to pings as per standard */
|
||||
}
|
||||
|
||||
void pong_handler(uws_websocket_t* ws, const char* message, size_t length){
|
||||
|
||||
/* You don't need to handle this one either */
|
||||
}
|
||||
|
||||
|
||||
int main()
|
||||
{
|
||||
|
||||
|
||||
uws_app_t *app = uws_create_app(SSL, (struct us_socket_context_options_t){
|
||||
/* There are example certificates in uWebSockets.js repo */
|
||||
.key_file_name = "../misc/key.pem",
|
||||
.cert_file_name = "../misc/cert.pem",
|
||||
.passphrase = "1234"
|
||||
});
|
||||
|
||||
uws_ws(SSL, app, "/*", (uws_socket_behavior_t){
|
||||
.compression = uws_compress_options_t::SHARED_COMPRESSOR,
|
||||
.maxPayloadLength = 16 * 1024,
|
||||
.idleTimeout = 12,
|
||||
.maxBackpressure = 1 * 1024 * 1024,
|
||||
.upgrade = NULL,
|
||||
.open = open_handler,
|
||||
.message = message_handler,
|
||||
.drain = drain_handler,
|
||||
.ping = ping_handler,
|
||||
.pong = pong_handler,
|
||||
.close = close_handler,
|
||||
});
|
||||
|
||||
uws_app_listen(SSL,app, 9001, listen_handler, NULL);
|
||||
|
||||
|
||||
uws_app_run(SSL, app);
|
||||
}
|
||||
33
packages/bun-uws/capi/examples/HelloWorld.c
Normal file
33
packages/bun-uws/capi/examples/HelloWorld.c
Normal file
@@ -0,0 +1,33 @@
|
||||
#include "../libuwebsockets.h"
|
||||
#include "libusockets.h"
|
||||
#include <stdio.h>
|
||||
|
||||
#define SSL 1
|
||||
|
||||
void get_handler(uws_res_t *res, uws_req_t *req, void *user_data)
|
||||
{
|
||||
uws_res_end(SSL, res, "Hello CAPI!", 11, false);
|
||||
}
|
||||
|
||||
void listen_handler(struct us_listen_socket_t *listen_socket, uws_app_listen_config_t config, void *user_data)
|
||||
{
|
||||
if (listen_socket)
|
||||
{
|
||||
printf("Listening on port https://localhost:%d now\n", config.port);
|
||||
}
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
/* Overly simple hello world app */
|
||||
|
||||
uws_app_t *app = uws_create_app(SSL, (struct us_socket_context_options_t){
|
||||
/* There are example certificates in uWebSockets.js repo */
|
||||
.key_file_name = "../misc/key.pem",
|
||||
.cert_file_name = "../misc/cert.pem",
|
||||
.passphrase = "1234"
|
||||
});
|
||||
uws_app_get(SSL, app, "/*", get_handler, NULL);
|
||||
uws_app_listen(SSL, app, 3000, listen_handler, NULL);
|
||||
uws_app_run(SSL, app);
|
||||
}
|
||||
123
packages/bun-uws/capi/examples/HelloWorldAsync.c
Normal file
123
packages/bun-uws/capi/examples/HelloWorldAsync.c
Normal file
@@ -0,0 +1,123 @@
|
||||
#include "../libuwebsockets.h"
|
||||
#include "libusockets.h"
|
||||
|
||||
#include <stdio.h>
|
||||
#include <malloc.h>
|
||||
#include <string.h>
|
||||
|
||||
#define SSL 0
|
||||
|
||||
typedef struct {
|
||||
uws_res_t* res;
|
||||
bool aborted;
|
||||
} async_request_t;
|
||||
|
||||
//Timer close helper
|
||||
void uws_timer_close(struct us_timer_t *timer)
|
||||
{
|
||||
struct us_timer_t *t = (struct us_timer_t *)timer;
|
||||
struct timer_handler_data *data;
|
||||
memcpy(&data, us_timer_ext(t), sizeof(struct timer_handler_data *));
|
||||
free(data);
|
||||
us_timer_close(t, 0);
|
||||
}
|
||||
//Timer create helper
|
||||
struct us_timer_t *uws_create_timer(int ms, int repeat_ms, void (*handler)(void *data), void *data)
|
||||
{
|
||||
struct us_loop_t *loop = uws_get_loop();
|
||||
struct us_timer_t *delayTimer = us_create_timer(loop, 0, sizeof(void *));
|
||||
|
||||
struct timer_handler_data
|
||||
{
|
||||
void *data;
|
||||
void (*handler)(void *data);
|
||||
bool repeat;
|
||||
};
|
||||
|
||||
struct timer_handler_data *timer_data = (struct timer_handler_data *)malloc(sizeof(timer_handler_data));
|
||||
timer_data->data = data;
|
||||
timer_data->handler = handler;
|
||||
timer_data->repeat = repeat_ms > 0;
|
||||
memcpy(us_timer_ext(delayTimer), &timer_data, sizeof(struct timer_handler_data *));
|
||||
|
||||
us_timer_set(
|
||||
delayTimer, [](struct us_timer_t *t)
|
||||
{
|
||||
/* We wrote the pointer to the timer's extension */
|
||||
struct timer_handler_data *data;
|
||||
memcpy(&data, us_timer_ext(t), sizeof(struct timer_handler_data *));
|
||||
|
||||
data->handler(data->data);
|
||||
|
||||
if (!data->repeat)
|
||||
{
|
||||
free(data);
|
||||
us_timer_close(t, 0);
|
||||
}
|
||||
},
|
||||
ms, repeat_ms);
|
||||
|
||||
return (struct us_timer_t *)delayTimer;
|
||||
}
|
||||
|
||||
void on_res_aborted(uws_res_t *response, void* data){
|
||||
async_request_t* request_data = (async_request_t*)data;
|
||||
/* We don't implement any kind of cancellation here,
|
||||
* so simply flag us as aborted */
|
||||
request_data->aborted = true;
|
||||
}
|
||||
|
||||
void on_res_corked(uws_res_t *response, void* data){
|
||||
uws_res_end(SSL, response, "Hello CAPI!", 11, false);
|
||||
}
|
||||
void on_timer_done(void *data){
|
||||
async_request_t* request_data = (async_request_t*)data;
|
||||
/* Were'nt we aborted before our async task finished? Okay, send a message! */
|
||||
if(!request_data->aborted){
|
||||
|
||||
uws_res_cork(SSL, request_data->res,on_res_corked, request_data);
|
||||
}
|
||||
}
|
||||
|
||||
void get_handler(uws_res_t *res, uws_req_t *req, void* user_data)
|
||||
{
|
||||
|
||||
/* We have to attach an abort handler for us to be aware
|
||||
* of disconnections while we perform async tasks */
|
||||
async_request_t* request_data = (async_request_t*) malloc(sizeof(async_request_t));
|
||||
request_data->res = res;
|
||||
request_data->aborted = false;
|
||||
|
||||
uws_res_on_aborted(SSL, res, on_res_aborted, request_data);
|
||||
|
||||
/* Simulate checking auth for 5 seconds. This looks like crap, never write
|
||||
* code that utilize us_timer_t like this; they are high-cost and should
|
||||
* not be created and destroyed more than rarely!
|
||||
* Either way, here we go!*/
|
||||
uws_create_timer(1, 0, on_timer_done, request_data);
|
||||
}
|
||||
|
||||
|
||||
void listen_handler(struct us_listen_socket_t *listen_socket, uws_app_listen_config_t config, void* user_data)
|
||||
{
|
||||
if (listen_socket)
|
||||
{
|
||||
printf("Listening on port https://localhost:%d now\n", config.port);
|
||||
}
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
/* Overly simple hello world app with async response */
|
||||
|
||||
|
||||
uws_app_t *app = uws_create_app(SSL, (struct us_socket_context_options_t){
|
||||
/* There are example certificates in uWebSockets.js repo */
|
||||
.key_file_name = "../misc/key.pem",
|
||||
.cert_file_name = "../misc/cert.pem",
|
||||
.passphrase = "1234"
|
||||
});
|
||||
uws_app_get(SSL, app, "/*", get_handler, NULL);
|
||||
uws_app_listen(SSL, app, 3000, listen_handler, NULL);
|
||||
uws_app_run(SSL, app);
|
||||
}
|
||||
309
packages/bun-uws/capi/examples/RustHelloWorld.rs
Normal file
309
packages/bun-uws/capi/examples/RustHelloWorld.rs
Normal file
@@ -0,0 +1,309 @@
|
||||
/* automatically generated by rust-bindgen 0.59.2 */
|
||||
use std::convert::TryInto;
|
||||
use std::ffi::CString;
|
||||
|
||||
pub type SizeT = ::std::os::raw::c_ulong;
|
||||
pub type WcharT = ::std::os::raw::c_uint;
|
||||
#[repr(C)]
|
||||
#[repr(align(16))]
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct max_align_t {
|
||||
pub __clang_max_align_nonce1: ::std::os::raw::c_longlong,
|
||||
pub __bindgen_padding_0: u64,
|
||||
pub __clang_max_align_nonce2: u128,
|
||||
}
|
||||
#[test]
|
||||
fn bindgen_test_layout_max_align_t() {
|
||||
assert_eq!(
|
||||
::std::mem::size_of::<max_align_t>(),
|
||||
32usize,
|
||||
concat!("Size of: ", stringify!(max_align_t))
|
||||
);
|
||||
assert_eq!(
|
||||
::std::mem::align_of::<max_align_t>(),
|
||||
16usize,
|
||||
concat!("Alignment of ", stringify!(max_align_t))
|
||||
);
|
||||
assert_eq!(
|
||||
unsafe {
|
||||
&(*(::std::ptr::null::<max_align_t>())).__clang_max_align_nonce1 as *const _ as usize
|
||||
},
|
||||
0usize,
|
||||
concat!(
|
||||
"Offset of field: ",
|
||||
stringify!(max_align_t),
|
||||
"::",
|
||||
stringify!(__clang_max_align_nonce1)
|
||||
)
|
||||
);
|
||||
assert_eq!(
|
||||
unsafe {
|
||||
&(*(::std::ptr::null::<max_align_t>())).__clang_max_align_nonce2 as *const _ as usize
|
||||
},
|
||||
16usize,
|
||||
concat!(
|
||||
"Offset of field: ",
|
||||
stringify!(max_align_t),
|
||||
"::",
|
||||
stringify!(__clang_max_align_nonce2)
|
||||
)
|
||||
);
|
||||
}
|
||||
#[repr(C)]
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct uws_app_s {
|
||||
_unused: [u8; 0],
|
||||
}
|
||||
#[repr(C)]
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct uws_req_s {
|
||||
_unused: [u8; 0],
|
||||
}
|
||||
#[repr(C)]
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct uws_res_s {
|
||||
_unused: [u8; 0],
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct uws_app_listen_config_s {
|
||||
port: ::std::os::raw::c_int,
|
||||
host: *const ::std::os::raw::c_char,
|
||||
options: ::std::os::raw::c_int,
|
||||
}
|
||||
#[repr(C)]
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct us_socket_context_options_s {
|
||||
key_file_name: *const ::std::os::raw::c_char,
|
||||
cert_file_name: *const ::std::os::raw::c_char,
|
||||
passphrase: *const ::std::os::raw::c_char,
|
||||
dh_params_file_name: *const ::std::os::raw::c_char,
|
||||
ca_file_name: *const ::std::os::raw::c_char,
|
||||
ssl_prefer_low_memory_usage: ::std::os::raw::c_int,
|
||||
}
|
||||
|
||||
pub type UwsAppListenConfigT = uws_app_listen_config_s;
|
||||
pub type UsSocketContextOptionsT = us_socket_context_options_s;
|
||||
pub struct UsSocketContextOptions<'a> {
|
||||
key_file_name: &'a str,
|
||||
cert_file_name: &'a str,
|
||||
passphrase: &'a str,
|
||||
dh_params_file_name: &'a str,
|
||||
ca_file_name: &'a str,
|
||||
ssl_prefer_low_memory_usage: i32,
|
||||
}
|
||||
pub type UwsAppT = uws_app_s;
|
||||
pub type UwsReqT = uws_req_s;
|
||||
pub type UwsResT = uws_res_s;
|
||||
extern "C" {
|
||||
pub fn uws_create_app(
|
||||
ssl: ::std::os::raw::c_int,
|
||||
options: UsSocketContextOptionsT,
|
||||
) -> *mut UwsAppT;
|
||||
pub fn uws_app_get(
|
||||
ssl: ::std::os::raw::c_int,
|
||||
app: *mut UwsAppT,
|
||||
pattern: *const ::std::os::raw::c_char,
|
||||
handler: ::std::option::Option<
|
||||
unsafe extern "C" fn(
|
||||
res: *mut UwsResT,
|
||||
req: *mut UwsReqT,
|
||||
user_data: *mut ::std::os::raw::c_void,
|
||||
),
|
||||
>,
|
||||
user_data: *mut ::std::os::raw::c_void,
|
||||
);
|
||||
pub fn uws_app_run(ssl: ::std::os::raw::c_int, app: *mut UwsAppT);
|
||||
|
||||
pub fn uws_app_listen(
|
||||
ssl: ::std::os::raw::c_int,
|
||||
app: *mut UwsAppT,
|
||||
port: ::std::os::raw::c_int,
|
||||
handler: ::std::option::Option<
|
||||
unsafe extern "C" fn(
|
||||
listen_socket: *mut ::std::os::raw::c_void,
|
||||
config: UwsAppListenConfigT,
|
||||
user_data: *mut ::std::os::raw::c_void,
|
||||
),
|
||||
>,
|
||||
user_data: *mut ::std::os::raw::c_void,
|
||||
);
|
||||
pub fn uws_res_end(
|
||||
ssl: ::std::os::raw::c_int,
|
||||
res: *mut UwsResT,
|
||||
data: *const ::std::os::raw::c_char,
|
||||
length: SizeT,
|
||||
close_connection: bool,
|
||||
);
|
||||
}
|
||||
|
||||
pub struct AppResponse<const SSL: i32> {
|
||||
native: *mut UwsResT,
|
||||
}
|
||||
pub struct AppRequest {
|
||||
native: *mut UwsReqT,
|
||||
}
|
||||
impl AppRequest {
|
||||
pub fn new(native: *mut UwsReqT) -> AppRequest {
|
||||
AppRequest { native: native }
|
||||
}
|
||||
}
|
||||
impl<const SSL: i32> AppResponse<SSL> {
|
||||
pub fn new(native: *mut UwsResT) -> AppResponse<SSL> {
|
||||
AppResponse::<SSL> { native: native }
|
||||
}
|
||||
fn end(self, message: &str) -> AppResponse<SSL> {
|
||||
unsafe {
|
||||
let c_message =
|
||||
::std::ffi::CString::new(message).expect("Failed to create message CString");
|
||||
//This will now const fold :/ performance impact needs refactor
|
||||
uws_res_end(
|
||||
SSL,
|
||||
self.native,
|
||||
c_message.as_ptr(),
|
||||
message.len().try_into().unwrap(),
|
||||
false,
|
||||
);
|
||||
}
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
pub type UwsMethodHandler<const SSL: i32> = fn(res: AppResponse<SSL>, req: AppRequest);
|
||||
pub type UwsListenHandler =
|
||||
fn(listen_socket: *mut ::std::os::raw::c_void, config: UwsAppListenConfigT);
|
||||
|
||||
pub struct TemplateApp<const SSL: i32> {
|
||||
native: *mut UwsAppT,
|
||||
}
|
||||
|
||||
extern "C" fn uws_generic_listen_handler(
|
||||
listen_socket: *mut ::std::os::raw::c_void,
|
||||
config: UwsAppListenConfigT,
|
||||
user_data: *mut ::std::os::raw::c_void,
|
||||
) {
|
||||
unsafe {
|
||||
let callback = &mut *(user_data as *mut UwsListenHandler);
|
||||
callback(listen_socket, config);
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" fn uws_generic_method_handler(
|
||||
res: *mut UwsResT,
|
||||
req: *mut UwsReqT,
|
||||
user_data: *mut ::std::os::raw::c_void,
|
||||
) {
|
||||
unsafe {
|
||||
let response = AppResponse::<0>::new(res);
|
||||
let request = AppRequest::new(req);
|
||||
let callback = &mut *(user_data as *mut UwsMethodHandler<0>);
|
||||
callback(response, request);
|
||||
}
|
||||
}
|
||||
extern "C" fn uws_ssl_generic_method_handler(
|
||||
res: *mut UwsResT,
|
||||
req: *mut UwsReqT,
|
||||
user_data: *mut ::std::os::raw::c_void,
|
||||
) {
|
||||
unsafe {
|
||||
let response = AppResponse::<1>::new(res);
|
||||
let request = AppRequest::new(req);
|
||||
let callback = &mut *(user_data as *mut UwsMethodHandler<1>);
|
||||
callback(response, request);
|
||||
}
|
||||
}
|
||||
|
||||
impl<const SSL: i32> TemplateApp<SSL> {
|
||||
pub fn new(config: UsSocketContextOptions) -> TemplateApp<SSL> {
|
||||
unsafe {
|
||||
let key_file_name_s =
|
||||
CString::new(config.key_file_name).expect("Failed to create key_file_name CString");
|
||||
let cert_file_name_s = CString::new(config.cert_file_name)
|
||||
.expect("Failed to create cert_file_name CString");
|
||||
let passphrase_s =
|
||||
CString::new(config.passphrase).expect("Failed to create passphrase CString");
|
||||
let dh_params_file_name_s = CString::new(config.dh_params_file_name)
|
||||
.expect("Failed to create dh_params_file_name CString");
|
||||
let ca_file_name_s =
|
||||
CString::new(config.ca_file_name).expect("Failed to create ca_file_name CString");
|
||||
|
||||
let native_options = UsSocketContextOptionsT {
|
||||
key_file_name: key_file_name_s.as_ptr(),
|
||||
cert_file_name: cert_file_name_s.as_ptr(),
|
||||
passphrase: passphrase_s.as_ptr(),
|
||||
dh_params_file_name: dh_params_file_name_s.as_ptr(),
|
||||
ca_file_name: ca_file_name_s.as_ptr(),
|
||||
ssl_prefer_low_memory_usage: config.ssl_prefer_low_memory_usage,
|
||||
};
|
||||
TemplateApp::<SSL> {
|
||||
native: uws_create_app(SSL, native_options),
|
||||
}
|
||||
}
|
||||
}
|
||||
pub fn get(self, route: &str, mut handler: UwsMethodHandler<SSL>) -> TemplateApp<SSL> {
|
||||
unsafe {
|
||||
let c_route = ::std::ffi::CString::new(route).expect("Failed to create route CString");
|
||||
if SSL == 1 {
|
||||
uws_app_get(
|
||||
SSL,
|
||||
self.native,
|
||||
c_route.as_ptr(),
|
||||
std::option::Option::Some(uws_ssl_generic_method_handler),
|
||||
&mut handler as *mut _ as *mut ::std::os::raw::c_void,
|
||||
);
|
||||
} else {
|
||||
uws_app_get(
|
||||
SSL,
|
||||
self.native,
|
||||
c_route.as_ptr(),
|
||||
std::option::Option::Some(uws_generic_method_handler),
|
||||
&mut handler as *mut _ as *mut ::std::os::raw::c_void,
|
||||
);
|
||||
}
|
||||
}
|
||||
self
|
||||
}
|
||||
|
||||
pub fn listen(self, port: i32, mut handler: UwsListenHandler) -> TemplateApp<SSL> {
|
||||
unsafe {
|
||||
uws_app_listen(
|
||||
SSL,
|
||||
self.native,
|
||||
port,
|
||||
::std::option::Option::Some(uws_generic_listen_handler),
|
||||
&mut handler as *mut _ as *mut ::std::os::raw::c_void,
|
||||
);
|
||||
}
|
||||
self
|
||||
}
|
||||
|
||||
pub fn run(self) -> TemplateApp<SSL> {
|
||||
unsafe {
|
||||
uws_app_run(SSL, self.native);
|
||||
}
|
||||
self
|
||||
}
|
||||
}
|
||||
pub type App = TemplateApp<0>;
|
||||
pub type SSLApp = TemplateApp<1>;
|
||||
|
||||
fn main() {
|
||||
let config = UsSocketContextOptions {
|
||||
key_file_name: "../misc/key.pem",
|
||||
cert_file_name: "../misc/cert.pem",
|
||||
passphrase: "1234",
|
||||
ca_file_name: "",
|
||||
dh_params_file_name: "",
|
||||
ssl_prefer_low_memory_usage: 0,
|
||||
};
|
||||
|
||||
SSLApp::new(config)
|
||||
.get("/", |res, _req| {
|
||||
res.end("Hello Rust!");
|
||||
})
|
||||
.listen(3000, |_listen_socket, config| {
|
||||
println!("Listening on port https://127.0.0.1:{}", config.port);
|
||||
})
|
||||
.run();
|
||||
}
|
||||
59
packages/bun-uws/capi/examples/ServerName.c
Normal file
59
packages/bun-uws/capi/examples/ServerName.c
Normal file
@@ -0,0 +1,59 @@
|
||||
#include "../libuwebsockets.h"
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#define SSL 1
|
||||
|
||||
|
||||
struct us_listen_socket_t *globalListenSocket;
|
||||
uws_app_t *app;
|
||||
void get_handler(uws_res_t *res, uws_req_t *req, void* user_data)
|
||||
{
|
||||
|
||||
uws_res_end(SSL, res, "Hello CAPI!", 11, false);
|
||||
}
|
||||
|
||||
void exit_handler(uws_res_t *res, uws_req_t *req, void* user_data)
|
||||
{
|
||||
uws_res_end(SSL, res, "Shutting down!",14, false);
|
||||
/* We use this to check graceful closedown */
|
||||
us_listen_socket_close(false, globalListenSocket);
|
||||
}
|
||||
|
||||
void missing_server_name_handler(const char *hostname, void* user_data){
|
||||
printf("We are missing server name: <%s>\n", hostname);
|
||||
|
||||
/* Assume it is localhost, so add it */
|
||||
uws_add_server_name(SSL, app, "localhost");
|
||||
}
|
||||
|
||||
void listen_handler(struct us_listen_socket_t *listen_socket, uws_app_listen_config_t config, void* user_data)
|
||||
{
|
||||
if (listen_socket){
|
||||
printf("Listening on port https://localhost:%d\n", config.port);
|
||||
globalListenSocket = listen_socket;
|
||||
}else{
|
||||
printf("Failed to listen on port https://localhost:%d\n", config.port);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
/* Overly simple hello world app (SNI)*/
|
||||
|
||||
app = uws_create_app(SSL, (struct us_socket_context_options_t){
|
||||
/* There are example certificates in uWebSockets.js repo */
|
||||
.key_file_name = "../misc/key.pem",
|
||||
.cert_file_name = "../misc/cert.pem",
|
||||
.passphrase = "1234"
|
||||
});
|
||||
uws_missing_server_name(SSL, app, missing_server_name_handler, NULL);
|
||||
uws_app_get(SSL, app, "/*", get_handler, NULL);
|
||||
uws_app_get(SSL, app, "/exit", exit_handler, NULL);
|
||||
uws_app_listen(SSL, app, 3000, listen_handler, NULL);
|
||||
|
||||
/* Let's add a wildcard SNI to begin with */
|
||||
uws_add_server_name(SSL, app, "*.google.*");
|
||||
|
||||
uws_app_run(SSL, app);
|
||||
}
|
||||
255
packages/bun-uws/capi/examples/UpgradeAsync.c
Normal file
255
packages/bun-uws/capi/examples/UpgradeAsync.c
Normal file
@@ -0,0 +1,255 @@
|
||||
#include "../libuwebsockets.h"
|
||||
#include "libusockets.h"
|
||||
#include <stdio.h>
|
||||
#include <malloc.h>
|
||||
#include <string.h>
|
||||
/* This is a simple WebSocket "sync" upgrade example.
|
||||
* You may compile it with "WITH_OPENSSL=1 make" or with "make" */
|
||||
|
||||
#define SSL 1
|
||||
|
||||
typedef struct
|
||||
{
|
||||
char *value;
|
||||
size_t length;
|
||||
} header_t;
|
||||
struct PerSocketData
|
||||
{
|
||||
/* Define your user data */
|
||||
int something;
|
||||
};
|
||||
|
||||
struct UpgradeData
|
||||
{
|
||||
header_t *secWebSocketKey;
|
||||
header_t *secWebSocketProtocol;
|
||||
header_t *secWebSocketExtensions;
|
||||
uws_socket_context_t *context;
|
||||
uws_res_t *response;
|
||||
bool aborted;
|
||||
};
|
||||
|
||||
header_t *create_header(size_t length, const char* value)
|
||||
{
|
||||
header_t *header = (header_t *)malloc(sizeof(header_t));
|
||||
if(length > 0){
|
||||
header->value = (char *)calloc(sizeof(char), length);
|
||||
header->length = length;
|
||||
memcpy(header->value, value, length);
|
||||
}else{
|
||||
header->value = NULL;
|
||||
header->length = 0;
|
||||
}
|
||||
return header;
|
||||
}
|
||||
void free_header(header_t *header)
|
||||
{
|
||||
|
||||
free(header->value);
|
||||
free(header);
|
||||
}
|
||||
void listen_handler(struct us_listen_socket_t *listen_socket, uws_app_listen_config_t config, void *user_data)
|
||||
{
|
||||
if (listen_socket)
|
||||
{
|
||||
printf("Listening on port wss://localhost:%d\n", config.port);
|
||||
}
|
||||
}
|
||||
//Timer close helper
|
||||
void uws_timer_close(struct us_timer_t *timer)
|
||||
{
|
||||
struct us_timer_t *t = (struct us_timer_t *)timer;
|
||||
struct timer_handler_data *data;
|
||||
memcpy(&data, us_timer_ext(t), sizeof(struct timer_handler_data *));
|
||||
free(data);
|
||||
us_timer_close(t, 0);
|
||||
}
|
||||
//Timer create helper
|
||||
struct us_timer_t *uws_create_timer(int ms, int repeat_ms, void (*handler)(void *data), void *data)
|
||||
{
|
||||
struct us_loop_t *loop = uws_get_loop();
|
||||
struct us_timer_t *delayTimer = us_create_timer(loop, 0, sizeof(void *));
|
||||
|
||||
struct timer_handler_data
|
||||
{
|
||||
void *data;
|
||||
void (*handler)(void *data);
|
||||
bool repeat;
|
||||
};
|
||||
|
||||
struct timer_handler_data *timer_data = (struct timer_handler_data *)malloc(sizeof(timer_handler_data));
|
||||
timer_data->data = data;
|
||||
timer_data->handler = handler;
|
||||
timer_data->repeat = repeat_ms > 0;
|
||||
memcpy(us_timer_ext(delayTimer), &timer_data, sizeof(struct timer_handler_data *));
|
||||
|
||||
us_timer_set(
|
||||
delayTimer, [](struct us_timer_t *t)
|
||||
{
|
||||
/* We wrote the pointer to the timer's extension */
|
||||
struct timer_handler_data *data;
|
||||
memcpy(&data, us_timer_ext(t), sizeof(struct timer_handler_data *));
|
||||
|
||||
data->handler(data->data);
|
||||
|
||||
if (!data->repeat)
|
||||
{
|
||||
free(data);
|
||||
us_timer_close(t, 0);
|
||||
}
|
||||
},
|
||||
ms, repeat_ms);
|
||||
|
||||
return (struct us_timer_t *)delayTimer;
|
||||
}
|
||||
void on_timer_done(void *data)
|
||||
{
|
||||
|
||||
struct UpgradeData *upgrade_data = (struct UpgradeData *)data;
|
||||
|
||||
/* Were'nt we aborted before our async task finished? Okay, upgrade then! */
|
||||
if (!upgrade_data->aborted)
|
||||
{
|
||||
struct PerSocketData *socket_data = (struct PerSocketData *)malloc(sizeof(struct PerSocketData));
|
||||
socket_data->something = 15;
|
||||
printf("Async task done, upgrading to WebSocket now!\n");
|
||||
|
||||
uws_res_upgrade(SSL,
|
||||
upgrade_data->response,
|
||||
(void *)socket_data,
|
||||
upgrade_data->secWebSocketKey->value,
|
||||
upgrade_data->secWebSocketKey->length,
|
||||
upgrade_data->secWebSocketProtocol->value,
|
||||
upgrade_data->secWebSocketProtocol->length,
|
||||
upgrade_data->secWebSocketExtensions->value,
|
||||
upgrade_data->secWebSocketExtensions->length,
|
||||
upgrade_data->context);
|
||||
}
|
||||
else
|
||||
{
|
||||
printf("Async task done, but the HTTP socket was closed. Skipping upgrade to WebSocket!\n");
|
||||
}
|
||||
free_header(upgrade_data->secWebSocketKey);
|
||||
free_header(upgrade_data->secWebSocketProtocol);
|
||||
free_header(upgrade_data->secWebSocketExtensions);
|
||||
free(upgrade_data);
|
||||
}
|
||||
|
||||
void on_res_aborted(uws_res_t *response, void *data)
|
||||
{
|
||||
struct UpgradeData *upgrade_data = (struct UpgradeData *)data;
|
||||
/* We don't implement any kind of cancellation here,
|
||||
* so simply flag us as aborted */
|
||||
upgrade_data->aborted = true;
|
||||
}
|
||||
void upgrade_handler(uws_res_t *response, uws_req_t *request, uws_socket_context_t *context)
|
||||
{
|
||||
|
||||
/* HttpRequest (req) is only valid in this very callback, so we must COPY the headers
|
||||
* we need later on while upgrading to WebSocket. You must not access req after first return.
|
||||
* Here we create a heap allocated struct holding everything we will need later on. */
|
||||
|
||||
struct UpgradeData *data = (struct UpgradeData *)malloc(sizeof(struct UpgradeData));
|
||||
data->aborted = false;
|
||||
data->context = context;
|
||||
data->response = response;
|
||||
|
||||
const char *ws_key = NULL;
|
||||
const char *ws_protocol = NULL;
|
||||
const char *ws_extensions = NULL;
|
||||
|
||||
size_t ws_key_length = uws_req_get_header(request, "sec-websocket-key", 17, &ws_key);
|
||||
size_t ws_protocol_length = uws_req_get_header(request, "sec-websocket-protocol", 22, &ws_protocol);
|
||||
size_t ws_extensions_length = uws_req_get_header(request, "sec-websocket-extensions", 24, &ws_extensions);
|
||||
|
||||
|
||||
data->secWebSocketKey = create_header(ws_key_length, ws_key);
|
||||
data->secWebSocketProtocol = create_header(ws_protocol_length, ws_protocol);
|
||||
data->secWebSocketExtensions = create_header(ws_extensions_length, ws_extensions);
|
||||
|
||||
/* We have to attach an abort handler for us to be aware
|
||||
* of disconnections while we perform async tasks */
|
||||
|
||||
uws_res_on_aborted(SSL, response, on_res_aborted, data);
|
||||
|
||||
/* Simulate checking auth for 5 seconds. This looks like crap, never write
|
||||
* code that utilize us_timer_t like this; they are high-cost and should
|
||||
* not be created and destroyed more than rarely!
|
||||
* Either way, here we go!*/
|
||||
uws_create_timer(5000, 0, on_timer_done, data);
|
||||
}
|
||||
|
||||
void open_handler(uws_websocket_t *ws)
|
||||
{
|
||||
|
||||
/* Open event here, you may access uws_ws_get_user_data(ws) which points to a PerSocketData struct.
|
||||
* Here we simply validate that indeed, something == 15 as set in upgrade handler. */
|
||||
|
||||
struct PerSocketData *data = (struct PerSocketData *)uws_ws_get_user_data(SSL, ws);
|
||||
data->something = 15;
|
||||
printf("Something is: %d\n", data->something);
|
||||
}
|
||||
|
||||
void message_handler(uws_websocket_t *ws, const char *message, size_t length, uws_opcode_t opcode)
|
||||
{
|
||||
|
||||
/* We simply echo whatever data we get */
|
||||
uws_ws_send(SSL, ws, message, length, opcode);
|
||||
}
|
||||
|
||||
void close_handler(uws_websocket_t *ws, int code, const char *message, size_t length)
|
||||
{
|
||||
|
||||
/* You may access uws_ws_get_user_data(ws) here, but sending or
|
||||
* doing any kind of I/O with the socket is not valid. */
|
||||
struct PerSocketData *data = (struct PerSocketData *)uws_ws_get_user_data(SSL, ws);
|
||||
if (data)
|
||||
{
|
||||
free(data);
|
||||
}
|
||||
}
|
||||
|
||||
void drain_handler(uws_websocket_t *ws)
|
||||
{
|
||||
/* Check uws_ws_get_buffered_amount(ws) here */
|
||||
}
|
||||
|
||||
void ping_handler(uws_websocket_t *ws, const char *message, size_t length)
|
||||
{
|
||||
/* You don't need to handle this one, we automatically respond to pings as per standard */
|
||||
}
|
||||
|
||||
void pong_handler(uws_websocket_t *ws, const char *message, size_t length)
|
||||
{
|
||||
|
||||
/* You don't need to handle this one either */
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
|
||||
uws_app_t *app = uws_create_app(SSL, (struct us_socket_context_options_t){
|
||||
/* There are example certificates in uWebSockets.js repo */
|
||||
.key_file_name = "../misc/key.pem",
|
||||
.cert_file_name = "../misc/cert.pem",
|
||||
.passphrase = "1234"
|
||||
});
|
||||
|
||||
uws_ws(SSL, app, "/*", (uws_socket_behavior_t){
|
||||
.compression = uws_compress_options_t::SHARED_COMPRESSOR,
|
||||
.maxPayloadLength = 16 * 1024,
|
||||
.idleTimeout = 12,
|
||||
.maxBackpressure = 1 * 1024 * 1024,
|
||||
.upgrade = upgrade_handler,
|
||||
.open = open_handler,
|
||||
.message = message_handler,
|
||||
.drain = drain_handler,
|
||||
.ping = ping_handler,
|
||||
.pong = pong_handler,
|
||||
.close = close_handler,
|
||||
});
|
||||
|
||||
uws_app_listen(SSL, app, 9001, listen_handler, NULL);
|
||||
|
||||
uws_app_run(SSL, app);
|
||||
}
|
||||
117
packages/bun-uws/capi/examples/UpgradeSync.c
Normal file
117
packages/bun-uws/capi/examples/UpgradeSync.c
Normal file
@@ -0,0 +1,117 @@
|
||||
#include "../libuwebsockets.h"
|
||||
#include <stdio.h>
|
||||
#include <malloc.h>
|
||||
|
||||
#define SSL 1
|
||||
|
||||
/* This is a simple WebSocket "sync" upgrade example.
|
||||
* You may compile it with "WITH_OPENSSL=1 make" or with "make" */
|
||||
|
||||
/* uws_ws_get_user_data(ws) returns one of these */
|
||||
|
||||
struct PerSocketData
|
||||
{
|
||||
/* Define your user data */
|
||||
int something;
|
||||
};
|
||||
|
||||
void listen_handler(struct us_listen_socket_t *listen_socket, uws_app_listen_config_t config, void *user_data)
|
||||
{
|
||||
if (listen_socket)
|
||||
{
|
||||
printf("Listening on port wss://localhost:%d\n", config.port);
|
||||
}
|
||||
}
|
||||
|
||||
void upgrade_handler(uws_res_t *response, uws_req_t *request, uws_socket_context_t *context)
|
||||
{
|
||||
|
||||
/* You may read from req only here, and COPY whatever you need into your PerSocketData.
|
||||
* PerSocketData is valid from .open to .close event, accessed with uws_ws_get_user_data(ws).
|
||||
* HttpRequest (req) is ONLY valid in this very callback, so any data you will need later
|
||||
* has to be COPIED into PerSocketData here. */
|
||||
|
||||
/* Immediately upgrading without doing anything "async" before, is simple */
|
||||
|
||||
struct PerSocketData *data = (struct PerSocketData *)malloc(sizeof(struct PerSocketData));
|
||||
data->something = 15;
|
||||
|
||||
const char *ws_key = NULL;
|
||||
const char *ws_protocol = NULL;
|
||||
const char *ws_extensions = NULL;
|
||||
|
||||
size_t ws_key_length = uws_req_get_header(request, "sec-websocket-key", 17, &ws_key);
|
||||
size_t ws_protocol_length = uws_req_get_header(request, "sec-websocket-protocol", 22, &ws_protocol);
|
||||
size_t ws_extensions_length = uws_req_get_header(request, "sec-websocket-extensions", 24, &ws_extensions);
|
||||
|
||||
uws_res_upgrade(SSL,
|
||||
response,
|
||||
(void *)data,
|
||||
ws_key,
|
||||
ws_key_length,
|
||||
ws_protocol,
|
||||
ws_protocol_length,
|
||||
ws_extensions,
|
||||
ws_extensions_length,
|
||||
context);
|
||||
}
|
||||
|
||||
void open_handler(uws_websocket_t *ws)
|
||||
{
|
||||
|
||||
/* Open event here, you may access uws_ws_get_user_data(ws) which points to a PerSocketData struct.
|
||||
* Here we simply validate that indeed, something == 15 as set in upgrade handler. */
|
||||
|
||||
struct PerSocketData *data = (struct PerSocketData *)uws_ws_get_user_data(SSL, ws);
|
||||
data->something = 15;
|
||||
printf("Something is: %d\n", data->something);
|
||||
}
|
||||
|
||||
void message_handler(uws_websocket_t *ws, const char *message, size_t length, uws_opcode_t opcode)
|
||||
{
|
||||
/* We simply echo whatever data we get */
|
||||
uws_ws_send(SSL, ws, message, length, opcode);
|
||||
}
|
||||
|
||||
void close_handler(uws_websocket_t *ws, int code, const char *message, size_t length)
|
||||
{
|
||||
|
||||
/* You may access uws_ws_get_user_data(ws) here, but sending or
|
||||
* doing any kind of I/O with the socket is not valid. */
|
||||
struct PerSocketData *data = (struct PerSocketData *)uws_ws_get_user_data(SSL, ws);
|
||||
if (data)
|
||||
free(data);
|
||||
}
|
||||
|
||||
void drain_handler(uws_websocket_t *ws)
|
||||
{
|
||||
/* Check uws_ws_get_buffered_amount(ws) here */
|
||||
}
|
||||
|
||||
void ping_handler(uws_websocket_t *ws, const char *message, size_t length)
|
||||
{
|
||||
/* You don't need to handle this one, we automatically respond to pings as per standard */
|
||||
}
|
||||
|
||||
void pong_handler(uws_websocket_t *ws, const char *message, size_t length)
|
||||
{
|
||||
|
||||
/* You don't need to handle this one either */
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
|
||||
uws_app_t *app = uws_create_app(SSL, (struct us_socket_context_options_t){
|
||||
/* There are example certificates in uWebSockets.js repo */
|
||||
.key_file_name = "../misc/key.pem",
|
||||
.cert_file_name = "../misc/cert.pem",
|
||||
.passphrase = "1234"
|
||||
});
|
||||
|
||||
uws_ws(SSL, app, "/*", (uws_socket_behavior_t){.compression = uws_compress_options_t::SHARED_COMPRESSOR, .maxPayloadLength = 16 * 1024, .idleTimeout = 12, .maxBackpressure = 1 * 1024 * 1024, .upgrade = upgrade_handler, .open = open_handler, .message = message_handler, .drain = drain_handler, .ping = ping_handler, .pong = pong_handler, .close = close_handler});
|
||||
|
||||
uws_app_listen(SSL, app, 9001, listen_handler, NULL);
|
||||
|
||||
uws_app_run(SSL, app);
|
||||
}
|
||||
1349
packages/bun-uws/capi/libuwebsockets.cpp
Normal file
1349
packages/bun-uws/capi/libuwebsockets.cpp
Normal file
File diff suppressed because it is too large
Load Diff
260
packages/bun-uws/capi/libuwebsockets.h
Normal file
260
packages/bun-uws/capi/libuwebsockets.h
Normal file
@@ -0,0 +1,260 @@
|
||||
/*
|
||||
* Copyright 2022 Ciro Spaciari
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
// clang-format off
|
||||
#ifndef LIBUWS_CAPI_HEADER
|
||||
#define LIBUWS_CAPI_HEADER
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
#include "libusockets.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif
|
||||
#ifdef _WIN32
|
||||
# define DLL_EXPORT __declspec( dllexport )
|
||||
#else
|
||||
# define DLL_EXPORT
|
||||
#endif
|
||||
|
||||
DLL_EXPORT typedef enum
|
||||
{
|
||||
/* These are not actual compression options */
|
||||
_COMPRESSOR_MASK = 0x00FF,
|
||||
_DECOMPRESSOR_MASK = 0x0F00,
|
||||
/* Disabled, shared, shared are "special" values */
|
||||
DISABLED = 0,
|
||||
SHARED_COMPRESSOR = 1,
|
||||
SHARED_DECOMPRESSOR = 1 << 8,
|
||||
/* Highest 4 bits describe decompressor */
|
||||
DEDICATED_DECOMPRESSOR_32KB = 15 << 8,
|
||||
DEDICATED_DECOMPRESSOR_16KB = 14 << 8,
|
||||
DEDICATED_DECOMPRESSOR_8KB = 13 << 8,
|
||||
DEDICATED_DECOMPRESSOR_4KB = 12 << 8,
|
||||
DEDICATED_DECOMPRESSOR_2KB = 11 << 8,
|
||||
DEDICATED_DECOMPRESSOR_1KB = 10 << 8,
|
||||
DEDICATED_DECOMPRESSOR_512B = 9 << 8,
|
||||
/* Same as 32kb */
|
||||
DEDICATED_DECOMPRESSOR = 15 << 8,
|
||||
|
||||
/* Lowest 8 bit describe compressor */
|
||||
DEDICATED_COMPRESSOR_3KB = 9 << 4 | 1,
|
||||
DEDICATED_COMPRESSOR_4KB = 9 << 4 | 2,
|
||||
DEDICATED_COMPRESSOR_8KB = 10 << 4 | 3,
|
||||
DEDICATED_COMPRESSOR_16KB = 11 << 4 | 4,
|
||||
DEDICATED_COMPRESSOR_32KB = 12 << 4 | 5,
|
||||
DEDICATED_COMPRESSOR_64KB = 13 << 4 | 6,
|
||||
DEDICATED_COMPRESSOR_128KB = 14 << 4 | 7,
|
||||
DEDICATED_COMPRESSOR_256KB = 15 << 4 | 8,
|
||||
/* Same as 256kb */
|
||||
DEDICATED_COMPRESSOR = 15 << 4 | 8
|
||||
} uws_compress_options_t;
|
||||
|
||||
DLL_EXPORT typedef enum
|
||||
{
|
||||
CONTINUATION = 0,
|
||||
TEXT = 1,
|
||||
BINARY = 2,
|
||||
CLOSE = 8,
|
||||
PING = 9,
|
||||
PONG = 10
|
||||
} uws_opcode_t;
|
||||
|
||||
DLL_EXPORT typedef enum
|
||||
{
|
||||
BACKPRESSURE,
|
||||
SUCCESS,
|
||||
DROPPED
|
||||
} uws_sendstatus_t;
|
||||
|
||||
DLL_EXPORT typedef struct
|
||||
{
|
||||
|
||||
int port;
|
||||
const char *host;
|
||||
int options;
|
||||
} uws_app_listen_config_t;
|
||||
|
||||
DLL_EXPORT typedef struct {
|
||||
bool ok;
|
||||
bool has_responded;
|
||||
} uws_try_end_result_t;
|
||||
|
||||
DLL_EXPORT struct uws_app_s;
|
||||
DLL_EXPORT struct uws_req_s;
|
||||
DLL_EXPORT struct uws_res_s;
|
||||
DLL_EXPORT struct uws_websocket_s;
|
||||
DLL_EXPORT struct uws_header_iterator_s;
|
||||
DLL_EXPORT typedef struct uws_app_s uws_app_t;
|
||||
DLL_EXPORT typedef struct uws_req_s uws_req_t;
|
||||
DLL_EXPORT typedef struct uws_res_s uws_res_t;
|
||||
DLL_EXPORT typedef struct uws_socket_context_s uws_socket_context_t;
|
||||
DLL_EXPORT typedef struct uws_websocket_s uws_websocket_t;
|
||||
|
||||
DLL_EXPORT typedef void (*uws_websocket_handler)(uws_websocket_t *ws, void* user_data);
|
||||
DLL_EXPORT typedef void (*uws_websocket_message_handler)(uws_websocket_t *ws, const char *message, size_t length, uws_opcode_t opcode, void* user_data);
|
||||
DLL_EXPORT typedef void (*uws_websocket_ping_pong_handler)(uws_websocket_t *ws, const char *message, size_t length, void* user_data);
|
||||
DLL_EXPORT typedef void (*uws_websocket_close_handler)(uws_websocket_t *ws, int code, const char *message, size_t length, void* user_data);
|
||||
DLL_EXPORT typedef void (*uws_websocket_upgrade_handler)(uws_res_t *response, uws_req_t *request, uws_socket_context_t *context, void* user_data);
|
||||
DLL_EXPORT typedef void (*uws_websocket_subscription_handler)(uws_websocket_t *ws, const char *topic_name, size_t topic_name_length, int new_number_of_subscriber, int old_number_of_subscriber, void* user_data);
|
||||
|
||||
DLL_EXPORT typedef struct
|
||||
{
|
||||
uws_compress_options_t compression;
|
||||
/* Maximum message size we can receive */
|
||||
unsigned int maxPayloadLength;
|
||||
/* 2 minutes timeout is good */
|
||||
unsigned short idleTimeout;
|
||||
/* 64kb backpressure is probably good */
|
||||
unsigned int maxBackpressure;
|
||||
bool closeOnBackpressureLimit;
|
||||
/* This one depends on kernel timeouts and is a bad default */
|
||||
bool resetIdleTimeoutOnSend;
|
||||
/* A good default, esp. for newcomers */
|
||||
bool sendPingsAutomatically;
|
||||
/* Maximum socket lifetime in seconds before forced closure (defaults to disabled) */
|
||||
unsigned short maxLifetime;
|
||||
uws_websocket_upgrade_handler upgrade;
|
||||
uws_websocket_handler open;
|
||||
uws_websocket_message_handler message;
|
||||
uws_websocket_handler drain;
|
||||
uws_websocket_ping_pong_handler ping;
|
||||
uws_websocket_ping_pong_handler pong;
|
||||
uws_websocket_close_handler close;
|
||||
uws_websocket_subscription_handler subscription;
|
||||
} uws_socket_behavior_t;
|
||||
|
||||
DLL_EXPORT typedef void (*uws_listen_handler)(struct us_listen_socket_t *listen_socket, uws_app_listen_config_t config, void *user_data);
|
||||
DLL_EXPORT typedef void (*uws_listen_domain_handler)(struct us_listen_socket_t *listen_socket, const char* domain, size_t domain_length, int options, void *user_data);
|
||||
DLL_EXPORT typedef void (*uws_method_handler)(uws_res_t *response, uws_req_t *request, void *user_data);
|
||||
DLL_EXPORT typedef void (*uws_filter_handler)(uws_res_t *response, int, void *user_data);
|
||||
DLL_EXPORT typedef void (*uws_missing_server_handler)(const char *hostname, size_t hostname_length, void *user_data);
|
||||
DLL_EXPORT typedef void (*uws_get_headers_server_handler)(const char *header_name, size_t header_name_size, const char *header_value, size_t header_value_size, void *user_data);
|
||||
//Basic HTTP
|
||||
DLL_EXPORT uws_app_t *uws_create_app(int ssl, struct us_bun_socket_context_options_t options);
|
||||
DLL_EXPORT void uws_app_destroy(int ssl, uws_app_t *app);
|
||||
DLL_EXPORT void uws_app_get(int ssl, uws_app_t *app, const char *pattern, uws_method_handler handler, void *user_data);
|
||||
DLL_EXPORT void uws_app_post(int ssl, uws_app_t *app, const char *pattern, uws_method_handler handler, void *user_data);
|
||||
DLL_EXPORT void uws_app_options(int ssl, uws_app_t *app, const char *pattern, uws_method_handler handler, void *user_data);
|
||||
DLL_EXPORT void uws_app_delete(int ssl, uws_app_t *app, const char *pattern, uws_method_handler handler, void *user_data);
|
||||
DLL_EXPORT void uws_app_patch(int ssl, uws_app_t *app, const char *pattern, uws_method_handler handler, void *user_data);
|
||||
DLL_EXPORT void uws_app_put(int ssl, uws_app_t *app, const char *pattern, uws_method_handler handler, void *user_data);
|
||||
DLL_EXPORT void uws_app_head(int ssl, uws_app_t *app, const char *pattern, uws_method_handler handler, void *user_data);
|
||||
DLL_EXPORT void uws_app_connect(int ssl, uws_app_t *app, const char *pattern, uws_method_handler handler, void *user_data);
|
||||
DLL_EXPORT void uws_app_trace(int ssl, uws_app_t *app, const char *pattern, uws_method_handler handler, void *user_data);
|
||||
DLL_EXPORT void uws_app_any(int ssl, uws_app_t *app, const char *pattern, uws_method_handler handler, void *user_data);
|
||||
|
||||
DLL_EXPORT void uws_app_run(int ssl, uws_app_t *);
|
||||
|
||||
DLL_EXPORT void uws_app_listen(int ssl, uws_app_t *app, int port, uws_listen_handler handler, void *user_data);
|
||||
DLL_EXPORT void uws_app_listen_with_config(int ssl, uws_app_t *app, uws_app_listen_config_t config, uws_listen_handler handler, void *user_data);
|
||||
DLL_EXPORT void uws_app_listen_domain(int ssl, uws_app_t *app, const char *domain, size_t domain_length, uws_listen_domain_handler handler, void *user_data);
|
||||
DLL_EXPORT void uws_app_listen_domain_with_options(int ssl, uws_app_t *app, const char *domain,size_t domain_length, int options, uws_listen_domain_handler handler, void *user_data);
|
||||
DLL_EXPORT void uws_app_domain(int ssl, uws_app_t *app, const char* server_name, size_t server_name_length);
|
||||
|
||||
DLL_EXPORT bool uws_constructor_failed(int ssl, uws_app_t *app);
|
||||
DLL_EXPORT unsigned int uws_num_subscribers(int ssl, uws_app_t *app, const char *topic, size_t topic_length);
|
||||
DLL_EXPORT bool uws_publish(int ssl, uws_app_t *app, const char *topic, size_t topic_length, const char *message, size_t message_length, uws_opcode_t opcode, bool compress);
|
||||
DLL_EXPORT void *uws_get_native_handle(int ssl, uws_app_t *app);
|
||||
DLL_EXPORT void uws_remove_server_name(int ssl, uws_app_t *app, const char *hostname_pattern, size_t hostname_pattern_length);
|
||||
DLL_EXPORT void uws_add_server_name(int ssl, uws_app_t *app, const char *hostname_pattern, size_t hostname_pattern_length);
|
||||
DLL_EXPORT void uws_add_server_name_with_options(int ssl, uws_app_t *app, const char *hostname_pattern, size_t hostname_pattern_length, struct us_bun_socket_context_options_t options);
|
||||
DLL_EXPORT void uws_missing_server_name(int ssl, uws_app_t *app, uws_missing_server_handler handler, void *user_data);
|
||||
DLL_EXPORT void uws_filter(int ssl, uws_app_t *app, uws_filter_handler handler, void *user_data);
|
||||
|
||||
//WebSocket
|
||||
DLL_EXPORT void uws_ws(int ssl, uws_app_t *app, const char *pattern, uws_socket_behavior_t behavior, void* user_data);
|
||||
DLL_EXPORT void *uws_ws_get_user_data(int ssl, uws_websocket_t *ws);
|
||||
DLL_EXPORT void uws_ws_close(int ssl, uws_websocket_t *ws);
|
||||
DLL_EXPORT uws_sendstatus_t uws_ws_send(int ssl, uws_websocket_t *ws, const char *message, size_t length, uws_opcode_t opcode);
|
||||
DLL_EXPORT uws_sendstatus_t uws_ws_send_with_options(int ssl, uws_websocket_t *ws, const char *message, size_t length, uws_opcode_t opcode, bool compress, bool fin);
|
||||
DLL_EXPORT uws_sendstatus_t uws_ws_send_fragment(int ssl, uws_websocket_t *ws, const char *message, size_t length, bool compress);
|
||||
DLL_EXPORT uws_sendstatus_t uws_ws_send_first_fragment(int ssl, uws_websocket_t *ws, const char *message, size_t length, bool compress);
|
||||
DLL_EXPORT uws_sendstatus_t uws_ws_send_first_fragment_with_opcode(int ssl, uws_websocket_t *ws, const char *message, size_t length, uws_opcode_t opcode, bool compress);
|
||||
DLL_EXPORT uws_sendstatus_t uws_ws_send_last_fragment(int ssl, uws_websocket_t *ws, const char *message, size_t length, bool compress);
|
||||
DLL_EXPORT void uws_ws_end(int ssl, uws_websocket_t *ws, int code, const char *message, size_t length);
|
||||
DLL_EXPORT void uws_ws_cork(int ssl, uws_websocket_t *ws, void (*handler)(void *user_data), void *user_data);
|
||||
|
||||
DLL_EXPORT bool uws_ws_subscribe(int ssl, uws_websocket_t *ws, const char *topic, size_t length);
|
||||
DLL_EXPORT bool uws_ws_unsubscribe(int ssl, uws_websocket_t *ws, const char *topic, size_t length);
|
||||
DLL_EXPORT bool uws_ws_is_subscribed(int ssl, uws_websocket_t *ws, const char *topic, size_t length);
|
||||
DLL_EXPORT void uws_ws_iterate_topics(int ssl, uws_websocket_t *ws, void (*callback)(const char *topic, size_t length, void *user_data), void *user_data);
|
||||
DLL_EXPORT bool uws_ws_publish(int ssl, uws_websocket_t *ws, const char *topic, size_t topic_length, const char *message, size_t message_length);
|
||||
DLL_EXPORT bool uws_ws_publish_with_options(int ssl, uws_websocket_t *ws, const char *topic, size_t topic_length, const char *message, size_t message_length, uws_opcode_t opcode, bool compress);
|
||||
DLL_EXPORT unsigned int uws_ws_get_buffered_amount(int ssl, uws_websocket_t *ws);
|
||||
DLL_EXPORT size_t uws_ws_get_remote_address(int ssl, uws_websocket_t *ws, const char **dest);
|
||||
DLL_EXPORT size_t uws_ws_get_remote_address_as_text(int ssl, uws_websocket_t *ws, const char **dest);
|
||||
DLL_EXPORT void uws_res_get_remote_address_info(uws_res_t *res, const char **dest, size_t *length, unsigned int *port);
|
||||
|
||||
//Response
|
||||
DLL_EXPORT void uws_res_end(int ssl, uws_res_t *res, const char *data, size_t length, bool close_connection);
|
||||
DLL_EXPORT uws_try_end_result_t uws_res_try_end(int ssl, uws_res_t *res, const char *data, size_t length, uint64_t total_size, bool close_connection);
|
||||
DLL_EXPORT void uws_res_cork(int ssl, uws_res_t *res, void(*callback)(uws_res_t *res, void* user_data) ,void* user_data);
|
||||
DLL_EXPORT void uws_res_pause(int ssl, uws_res_t *res);
|
||||
DLL_EXPORT void uws_res_resume(int ssl, uws_res_t *res);
|
||||
DLL_EXPORT void uws_res_write_continue(int ssl, uws_res_t *res);
|
||||
DLL_EXPORT void uws_res_write_status(int ssl, uws_res_t *res, const char *status, size_t length);
|
||||
DLL_EXPORT void uws_res_write_header(int ssl, uws_res_t *res, const char *key, size_t key_length, const char *value, size_t value_length);
|
||||
|
||||
DLL_EXPORT void uws_res_write_header_int(int ssl, uws_res_t *res, const char *key, size_t key_length, uint64_t value);
|
||||
DLL_EXPORT void uws_res_end_without_body(int ssl, uws_res_t *res, bool close_connection);
|
||||
DLL_EXPORT bool uws_res_write(int ssl, uws_res_t *res, const char *data, size_t length);
|
||||
DLL_EXPORT uint64_t uws_res_get_write_offset(int ssl, uws_res_t *res);
|
||||
DLL_EXPORT void uws_res_override_write_offset(int ssl, uws_res_t *res, uint64_t offset);
|
||||
DLL_EXPORT bool uws_res_has_responded(int ssl, uws_res_t *res);
|
||||
DLL_EXPORT void uws_res_on_writable(int ssl, uws_res_t *res, bool (*handler)(uws_res_t *res, uint64_t, void *optional_data), void *user_data);
|
||||
DLL_EXPORT void uws_res_on_aborted(int ssl, uws_res_t *res, void (*handler)(uws_res_t *res, void *optional_data), void *optional_data);
|
||||
DLL_EXPORT void uws_res_on_data(int ssl, uws_res_t *res, void (*handler)(uws_res_t *res, const char *chunk, size_t chunk_length, bool is_end, void *optional_data), void *optional_data);
|
||||
DLL_EXPORT void uws_res_upgrade(int ssl, uws_res_t *res, void *data, const char *sec_web_socket_key, size_t sec_web_socket_key_length, const char *sec_web_socket_protocol, size_t sec_web_socket_protocol_length, const char *sec_web_socket_extensions, size_t sec_web_socket_extensions_length, uws_socket_context_t *ws);
|
||||
DLL_EXPORT size_t uws_res_get_remote_address(int ssl, uws_res_t *res, const char **dest);
|
||||
DLL_EXPORT size_t uws_res_get_remote_address_as_text(int ssl, uws_res_t *res, const char **dest);
|
||||
#ifdef UWS_WITH_PROXY
|
||||
DLL_EXPORT size_t uws_res_get_proxied_remote_address(int ssl, uws_res_t *res, const char **dest);
|
||||
DLL_EXPORT size_t uws_res_get_proxied_remote_address_as_text(int ssl, uws_res_t *res, const char **dest);
|
||||
#endif
|
||||
DLL_EXPORT void *uws_res_get_native_handle(int ssl, uws_res_t *res);
|
||||
|
||||
//Request
|
||||
DLL_EXPORT bool uws_req_is_ancient(uws_req_t *res);
|
||||
DLL_EXPORT bool uws_req_get_yield(uws_req_t *res);
|
||||
DLL_EXPORT void uws_req_set_yield(uws_req_t *res, bool yield);
|
||||
DLL_EXPORT size_t uws_req_get_url(uws_req_t *res, const char **dest);
|
||||
DLL_EXPORT size_t uws_req_get_full_url(uws_req_t *res, const char **dest);
|
||||
DLL_EXPORT size_t uws_req_get_method(uws_req_t *res, const char **dest);
|
||||
DLL_EXPORT size_t uws_req_get_case_sensitive_method(uws_req_t *res, const char **dest);
|
||||
|
||||
DLL_EXPORT size_t uws_req_get_header(uws_req_t *res, const char *lower_case_header, size_t lower_case_header_length, const char **dest);
|
||||
DLL_EXPORT void uws_req_for_each_header(uws_req_t *res, uws_get_headers_server_handler handler, void *user_data);
|
||||
DLL_EXPORT size_t uws_req_get_query(uws_req_t *res, const char *key, size_t key_length, const char **dest);
|
||||
DLL_EXPORT size_t uws_req_get_parameter(uws_req_t *res, unsigned short index, const char **dest);
|
||||
|
||||
DLL_EXPORT struct us_loop_t *uws_get_loop();
|
||||
DLL_EXPORT struct us_loop_t *uws_get_loop_with_native(void* existing_native_loop);
|
||||
DLL_EXPORT void uws_loop_defer(struct us_loop_t *loop, void( cb(void *user_data) ), void *user_data);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
@@ -124,7 +124,7 @@ private:
|
||||
|
||||
/* Signal broken HTTP request only if we have a pending request */
|
||||
if (httpResponseData->onAborted) {
|
||||
httpResponseData->onAborted((HttpResponse<SSL> *)s, httpResponseData->userData);
|
||||
httpResponseData->onAborted();
|
||||
}
|
||||
|
||||
/* Destruct socket ext */
|
||||
@@ -258,7 +258,7 @@ private:
|
||||
}
|
||||
|
||||
/* We might respond in the handler, so do not change timeout after this */
|
||||
httpResponseData->inStream(static_cast<HttpResponse<SSL>*>(user), data.data(), data.length(), fin, httpResponseData->userData);
|
||||
httpResponseData->inStream(data, fin);
|
||||
|
||||
/* Was the socket closed? */
|
||||
if (us_socket_is_closed(SSL, (struct us_socket_t *) user)) {
|
||||
@@ -366,7 +366,7 @@ private:
|
||||
|
||||
/* We expect the developer to return whether or not write was successful (true).
|
||||
* If write was never called, the developer should still return true so that we may drain. */
|
||||
bool success = httpResponseData->callOnWritable((HttpResponse<SSL> *)asyncSocket, httpResponseData->offset);
|
||||
bool success = httpResponseData->callOnWritable(httpResponseData->offset);
|
||||
|
||||
/* The developer indicated that their onWritable failed. */
|
||||
if (!success) {
|
||||
|
||||
@@ -558,11 +558,10 @@ public:
|
||||
}
|
||||
|
||||
/* Attach handler for writable HTTP response */
|
||||
HttpResponse *onWritable(void* userData, HttpResponseData<SSL>::OnWritableCallback handler) {
|
||||
HttpResponse *onWritable(MoveOnlyFunction<bool(uint64_t)> &&handler) {
|
||||
HttpResponseData<SSL> *httpResponseData = getHttpResponseData();
|
||||
|
||||
httpResponseData->userData = userData;
|
||||
httpResponseData->onWritable = handler;
|
||||
httpResponseData->onWritable = std::move(handler);
|
||||
return this;
|
||||
}
|
||||
|
||||
@@ -575,11 +574,10 @@ public:
|
||||
}
|
||||
|
||||
/* Attach handler for aborted HTTP request */
|
||||
HttpResponse *onAborted(void* userData, HttpResponseData<SSL>::OnAbortedCallback handler) {
|
||||
HttpResponse *onAborted(MoveOnlyFunction<void()> &&handler) {
|
||||
HttpResponseData<SSL> *httpResponseData = getHttpResponseData();
|
||||
|
||||
httpResponseData->userData = userData;
|
||||
httpResponseData->onAborted = handler;
|
||||
|
||||
httpResponseData->onAborted = std::move(handler);
|
||||
return this;
|
||||
}
|
||||
HttpResponse* clearOnWritableAndAborted() {
|
||||
@@ -596,10 +594,9 @@ public:
|
||||
return this;
|
||||
}
|
||||
/* Attach a read handler for data sent. Will be called with FIN set true if last segment. */
|
||||
void onData(void* userData, HttpResponseData<SSL>::OnDataCallback handler) {
|
||||
void onData(MoveOnlyFunction<void(std::string_view, bool)> &&handler) {
|
||||
HttpResponseData<SSL> *data = getHttpResponseData();
|
||||
data->userData = userData;
|
||||
data->inStream = handler;
|
||||
data->inStream = std::move(handler);
|
||||
|
||||
/* Always reset this counter here */
|
||||
data->received_bytes_per_timeout = 0;
|
||||
|
||||
@@ -33,10 +33,6 @@ struct HttpResponseData : AsyncSocketData<SSL>, HttpParser {
|
||||
template <bool> friend struct HttpResponse;
|
||||
template <bool> friend struct HttpContext;
|
||||
public:
|
||||
using OnWritableCallback = bool (*)(uWS::HttpResponse<SSL>*, uint64_t, void*);
|
||||
using OnAbortedCallback = void (*)(uWS::HttpResponse<SSL>*, void*);
|
||||
using OnDataCallback = void (*)(uWS::HttpResponse<SSL>* response, const char* chunk, size_t chunk_length, bool, void*);
|
||||
|
||||
/* When we are done with a response we mark it like so */
|
||||
void markDone() {
|
||||
onAborted = nullptr;
|
||||
@@ -50,15 +46,15 @@ struct HttpResponseData : AsyncSocketData<SSL>, HttpParser {
|
||||
}
|
||||
|
||||
/* Caller of onWritable. It is possible onWritable calls markDone so we need to borrow it. */
|
||||
bool callOnWritable( uWS::HttpResponse<SSL>* response, uint64_t offset) {
|
||||
bool callOnWritable(uint64_t offset) {
|
||||
/* Borrow real onWritable */
|
||||
auto* borrowedOnWritable = std::move(onWritable);
|
||||
MoveOnlyFunction<bool(uint64_t)> borrowedOnWritable = std::move(onWritable);
|
||||
|
||||
/* Set onWritable to placeholder */
|
||||
onWritable = [](uWS::HttpResponse<SSL>*, uint64_t, void*) {return true;};
|
||||
onWritable = [](uint64_t) {return true;};
|
||||
|
||||
/* Run borrowed onWritable */
|
||||
bool ret = borrowedOnWritable(response, offset, userData);
|
||||
bool ret = borrowedOnWritable(offset);
|
||||
|
||||
/* If we still have onWritable (the placeholder) then move back the real one */
|
||||
if (onWritable) {
|
||||
@@ -78,13 +74,10 @@ struct HttpResponseData : AsyncSocketData<SSL>, HttpParser {
|
||||
HTTP_CONNECTION_CLOSE = 16 // used
|
||||
};
|
||||
|
||||
/* Shared context pointer */
|
||||
void* userData = nullptr;
|
||||
|
||||
/* Per socket event handlers */
|
||||
OnWritableCallback onWritable = nullptr;
|
||||
OnAbortedCallback onAborted = nullptr;
|
||||
OnDataCallback inStream = nullptr;
|
||||
MoveOnlyFunction<bool(uint64_t)> onWritable;
|
||||
MoveOnlyFunction<void()> onAborted;
|
||||
MoveOnlyFunction<void(std::string_view, bool)> inStream; // onData
|
||||
/* Outgoing offset */
|
||||
uint64_t offset = 0;
|
||||
|
||||
|
||||
@@ -20,8 +20,6 @@
|
||||
#ifndef UWS_PERMESSAGEDEFLATE_H
|
||||
#define UWS_PERMESSAGEDEFLATE_H
|
||||
|
||||
#define UWS_USE_LIBDEFLATE 1
|
||||
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
|
||||
@@ -136,9 +134,6 @@ struct ZlibContext {
|
||||
|
||||
struct DeflationStream {
|
||||
z_stream deflationStream = {};
|
||||
#ifdef UWS_USE_LIBDEFLATE
|
||||
unsigned char reset_buffer[4096 + 1];
|
||||
#endif
|
||||
|
||||
DeflationStream(CompressOptions compressOptions) {
|
||||
|
||||
@@ -159,11 +154,13 @@ struct DeflationStream {
|
||||
/* Run a fast path in case of shared_compressor */
|
||||
if (reset) {
|
||||
size_t written = 0;
|
||||
written = libdeflate_deflate_compress(zlibContext->compressor, raw.data(), raw.length(), reset_buffer, 4096);
|
||||
static unsigned char buf[1024 + 1];
|
||||
|
||||
written = libdeflate_deflate_compress(zlibContext->compressor, raw.data(), raw.length(), buf, 1024);
|
||||
|
||||
if (written) {
|
||||
memcpy(&reset_buffer[written], "\x00", 1);
|
||||
return std::string_view((char *) reset_buffer, written + 1);
|
||||
memcpy(&buf[written], "\x00", 1);
|
||||
return std::string_view((char *) buf, written + 1);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -217,9 +214,6 @@ struct DeflationStream {
|
||||
|
||||
struct InflationStream {
|
||||
z_stream inflationStream = {};
|
||||
#ifdef UWS_USE_LIBDEFLATE
|
||||
char buf[4096];
|
||||
#endif
|
||||
|
||||
InflationStream(CompressOptions compressOptions) {
|
||||
/* Inflation windowBits are the top 8 bits of the 16 bit compressOptions */
|
||||
@@ -236,12 +230,13 @@ struct InflationStream {
|
||||
#ifdef UWS_USE_LIBDEFLATE
|
||||
/* Try fast path first */
|
||||
size_t written = 0;
|
||||
static char buf[1024];
|
||||
|
||||
/* We have to pad 9 bytes and restore those bytes when done since 9 is more than 6 of next WebSocket message */
|
||||
char tmp[9];
|
||||
memcpy(tmp, (char *) compressed.data() + compressed.length(), 9);
|
||||
memcpy((char *) compressed.data() + compressed.length(), "\x00\x00\xff\xff\x01\x00\x00\xff\xff", 9);
|
||||
libdeflate_result res = libdeflate_deflate_decompress(zlibContext->decompressor, compressed.data(), compressed.length() + 9, buf, 4096, &written);
|
||||
libdeflate_result res = libdeflate_deflate_decompress(zlibContext->decompressor, compressed.data(), compressed.length() + 9, buf, 1024, &written);
|
||||
memcpy((char *) compressed.data() + compressed.length(), tmp, 9);
|
||||
|
||||
if (res == 0) {
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
// clang-format off
|
||||
|
||||
#ifndef UWS_WEBSOCKETCONTEXT_H
|
||||
#define UWS_WEBSOCKETCONTEXT_H
|
||||
|
||||
@@ -270,7 +270,7 @@ private:
|
||||
webSocketData->subscriber = nullptr;
|
||||
|
||||
if (webSocketContextData->closeHandler) {
|
||||
webSocketContextData->closeHandler((WebSocket<SSL, isServer, USERDATA> *) s, 1006, reason != NULL && code > 0 ? std::string_view{(char *) reason, (size_t) code} : std::string_view());
|
||||
webSocketContextData->closeHandler((WebSocket<SSL, isServer, USERDATA> *) s, 1006, {(char *) reason, (size_t) code});
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
#include <string_view>
|
||||
|
||||
// bun-specific
|
||||
#include "wtf/SIMDUTF.h"
|
||||
#include "simdutf.h"
|
||||
|
||||
namespace uWS {
|
||||
|
||||
|
||||
@@ -3,9 +3,9 @@ param(
|
||||
)
|
||||
|
||||
$ErrorActionPreference = 'Stop'
|
||||
|
||||
. (Join-Path $PSScriptRoot "env.ps1")
|
||||
if ($env:CI -eq "true") {
|
||||
|
||||
if ($env:CI) {
|
||||
& (Join-Path $PSScriptRoot "update-submodules.ps1")
|
||||
}
|
||||
|
||||
@@ -79,10 +79,6 @@ Build-Dependency `
|
||||
-Script "lshpack" `
|
||||
-Outputs @("lshpack.lib")
|
||||
|
||||
Build-Dependency `
|
||||
-Script "libdeflate" `
|
||||
-Outputs @("deflate.lib")
|
||||
|
||||
if (!($Script:DidAnything)) {
|
||||
Write-Host "(run with -Force to rebuild all)"
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ set -eo pipefail
|
||||
source "$(dirname -- "${BASH_SOURCE[0]}")/env.sh"
|
||||
|
||||
if [[ "$CI" ]]; then
|
||||
$(dirname -- "${BASH_SOURCE[0]}")/update-submodules.sh
|
||||
$(dirname -- "${BASH_SOURCE[0]}")/update-submodules.sh
|
||||
fi
|
||||
|
||||
FORCE=
|
||||
@@ -36,11 +36,9 @@ fi
|
||||
dep() {
|
||||
local submodule="$1"
|
||||
local script="$2"
|
||||
CACHE_KEY=
|
||||
if [ "$CACHE" == "1" ]; then
|
||||
local hash="$(echo "$SUBMODULES" | grep "$submodule" | awk '{print $1}')"
|
||||
local os="$(uname -s | tr '[:upper:]' '[:lower:]')"
|
||||
local arch="$(uname -m)"
|
||||
CACHE_KEY="$submodule/$hash-$os-$arch-$CPU_TARGET"
|
||||
CACHE_KEY="$submodule/$(echo "$SUBMODULES" | grep "$submodule" | git hash-object --stdin)"
|
||||
fi
|
||||
if [ -z "$FORCE" ]; then
|
||||
HAS_ALL_DEPS=1
|
||||
@@ -94,7 +92,6 @@ dep mimalloc mimalloc libmimalloc.a libmimalloc.o
|
||||
dep tinycc tinycc libtcc.a
|
||||
dep zlib zlib libz.a
|
||||
dep zstd zstd libzstd.a
|
||||
dep libdeflate libdeflate libdeflate.a
|
||||
dep ls-hpack lshpack liblshpack.a
|
||||
|
||||
if [ "$BUILT_ANY" -eq 0 ]; then
|
||||
|
||||
@@ -3,7 +3,6 @@ $ErrorActionPreference = 'Stop' # Setting strict mode, similar to 'set -euo pip
|
||||
|
||||
Push-Location (Join-Path $BUN_DEPS_DIR 'boringssl')
|
||||
try {
|
||||
Remove-Item -ErrorAction SilentlyContinue -Recurse -Force build
|
||||
Set-Location (mkdir -Force build)
|
||||
|
||||
Run cmake @CMAKE_FLAGS ..
|
||||
|
||||
@@ -1,29 +1,29 @@
|
||||
param (
|
||||
[switch] $Baseline = $False,
|
||||
[switch] $Fast = $False
|
||||
)
|
||||
|
||||
$ErrorActionPreference = 'Stop' # Setting strict mode, similar to 'set -euo pipefail' in bash
|
||||
|
||||
. (Join-Path $PSScriptRoot "env.ps1")
|
||||
if ($env:CI -eq "true") {
|
||||
$env:FORCE_UPDATE_SUBMODULES = "1"
|
||||
& (Join-Path $PSScriptRoot "update-submodules.ps1")
|
||||
& (Join-Path $PSScriptRoot "build-libuv.ps1") -CloneOnly $True
|
||||
}
|
||||
$Tag = If ($Baseline) { "-Baseline" } Else { "" }
|
||||
$UseBaselineBuild = If ($Baseline) { "ON" } Else { "OFF" }
|
||||
$UseLto = If ($Fast) { "OFF" } Else { "ON" }
|
||||
|
||||
# $CANARY_REVISION = if (Test-Path build/.canary_revision) { Get-Content build/.canary_revision } else { "0" }
|
||||
$CANARY_REVISION = 0
|
||||
.\scripts\env.ps1 $Tag
|
||||
.\scripts\update-submodules.ps1
|
||||
.\scripts\build-libuv.ps1 -CloneOnly $True
|
||||
cd build
|
||||
cmake .. @CMAKE_FLAGS `
|
||||
-G Ninja `
|
||||
-DCMAKE_BUILD_TYPE=Release `
|
||||
|
||||
cmake .. -G Ninja -DCMAKE_BUILD_TYPE=Release `
|
||||
-DNO_CODEGEN=0 `
|
||||
-DNO_CONFIGURE_DEPENDS=1 `
|
||||
-DBUN_CPP_ONLY=1
|
||||
"-DUSE_BASELINE_BUILD=${UseBaselineBuild}" `
|
||||
"-DUSE_LTO=${UseLto}" `
|
||||
"-DCANARY=${CANARY_REVISION}" `
|
||||
-DBUN_CPP_ONLY=1 $Flags
|
||||
if ($LASTEXITCODE -ne 0) { throw "CMake configuration failed" }
|
||||
|
||||
.\compile-cpp-only.ps1 -v -j $env:CPUS
|
||||
if ($LASTEXITCODE -ne 0) { throw "C++ compilation failed" }
|
||||
|
||||
# HACK: For some reason, the buildkite agent is hanging when uploading bun-cpp-objects.a
|
||||
# Best guess is that there is an issue when uploading files larger than 500 MB
|
||||
#
|
||||
# For now, use FileSplitter to split the file into smaller chunks:
|
||||
# https://www.powershellgallery.com/packages/FileSplitter/1.3
|
||||
if ($env:BUILDKITE) {
|
||||
Split-File -Path (Resolve-Path "bun-cpp-objects.a") -PartSizeBytes "50MB" -Verbose
|
||||
}
|
||||
.\compile-cpp-only.ps1 -v
|
||||
if ($LASTEXITCODE -ne 0) { throw "C++ compilation failed" }
|
||||
48
scripts/build-bun-cpp.sh
Executable file
48
scripts/build-bun-cpp.sh
Executable file
@@ -0,0 +1,48 @@
|
||||
#!/usr/bin/env bash
|
||||
set -exo pipefail
|
||||
source $(dirname -- "${BASH_SOURCE[0]}")/env.sh
|
||||
|
||||
export USE_LTO="${USE_LTO:-ON}"
|
||||
case "$(uname -m)" in
|
||||
aarch64|arm64)
|
||||
export CPU_TARGET="${CPU_TARGET:-native}"
|
||||
;;
|
||||
*)
|
||||
export CPU_TARGET="${CPU_TARGET:-haswell}"
|
||||
;;
|
||||
esac
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--fast|--no-lto)
|
||||
export USE_LTO="OFF"
|
||||
shift
|
||||
;;
|
||||
--baseline)
|
||||
export CPU_TARGET="nehalem"
|
||||
shift
|
||||
;;
|
||||
--cpu)
|
||||
export CPU_TARGET="$2"
|
||||
shift
|
||||
shift
|
||||
;;
|
||||
*|-*|--*)
|
||||
echo "Unknown option $1"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
mkdir -p build
|
||||
cd build
|
||||
mkdir -p tmp_modules tmp_functions js codegen
|
||||
cmake .. \
|
||||
-GNinja \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DUSE_LTO=${USE_LTO} \
|
||||
-DCPU_TARGET=${CPU_TARGET} \
|
||||
-DBUN_CPP_ONLY=1 \
|
||||
-DNO_CONFIGURE_DEPENDS=1
|
||||
chmod +x ./compile-cpp-only.sh
|
||||
bash ./compile-cpp-only.sh -v
|
||||
95
scripts/build-bun-zig.sh
Executable file
95
scripts/build-bun-zig.sh
Executable file
@@ -0,0 +1,95 @@
|
||||
#!/usr/bin/env bash
|
||||
set -exo pipefail
|
||||
source $(dirname -- "${BASH_SOURCE[0]}")/env.sh
|
||||
|
||||
cwd=$(pwd)
|
||||
zig=
|
||||
|
||||
if [[ "$CI" ]]; then
|
||||
# Since the zig build depends on files from the zig submodule,
|
||||
# make sure to update the submodule before building.
|
||||
git submodule update --init --recursive --progress --depth=1 --checkout src/deps/zig
|
||||
|
||||
# Also update the correct version of zig in the submodule.
|
||||
$(dirname -- "${BASH_SOURCE[0]}")/download-zig.sh
|
||||
fi
|
||||
|
||||
if [ -f "$cwd/.cache/zig/zig" ]; then
|
||||
zig="$cwd/.cache/zig/zig"
|
||||
else
|
||||
zig=$(which zig)
|
||||
fi
|
||||
|
||||
ZIG_OPTIMIZE="${ZIG_OPTIMIZE:-ReleaseFast}"
|
||||
CANARY="${CANARY:-0}"
|
||||
GIT_SHA="${GIT_SHA:-$(git rev-parse HEAD)}"
|
||||
|
||||
BUILD_MACHINE_ARCH="${BUILD_MACHINE_ARCH:-$(uname -m)}"
|
||||
DOCKER_MACHINE_ARCH=""
|
||||
if [[ "$BUILD_MACHINE_ARCH" == "x86_64" || "$BUILD_MACHINE_ARCH" == "amd64" ]]; then
|
||||
BUILD_MACHINE_ARCH="x86_64"
|
||||
DOCKER_MACHINE_ARCH="amd64"
|
||||
elif [[ "$BUILD_MACHINE_ARCH" == "aarch64" || "$BUILD_MACHINE_ARCH" == "arm64" ]]; then
|
||||
BUILD_MACHINE_ARCH="aarch64"
|
||||
DOCKER_MACHINE_ARCH="arm64"
|
||||
fi
|
||||
|
||||
TARGET_OS="${1:-linux}"
|
||||
TARGET_ARCH="${2:-x64}"
|
||||
TARGET_CPU="${3:-${CPU_TARGET:-native}}"
|
||||
|
||||
BUILDARCH=""
|
||||
if [[ "$TARGET_ARCH" == "x64" || "$TARGET_ARCH" == "x86_64" || "$TARGET_ARCH" == "amd64" ]]; then
|
||||
TARGET_ARCH="x86_64"
|
||||
BUILDARCH="amd64"
|
||||
elif [[ "$TARGET_ARCH" == "aarch64" || "$TARGET_ARCH" == "arm64" ]]; then
|
||||
TARGET_ARCH="aarch64"
|
||||
BUILDARCH="arm64"
|
||||
fi
|
||||
|
||||
TRIPLET=""
|
||||
if [[ "$TARGET_OS" == "linux" ]]; then
|
||||
TRIPLET="$TARGET_ARCH-linux-gnu"
|
||||
elif [[ "$TARGET_OS" == "darwin" ]]; then
|
||||
TRIPLET="$TARGET_ARCH-macos-none"
|
||||
elif [[ "$TARGET_OS" == "windows" ]]; then
|
||||
TRIPLET="$TARGET_ARCH-windows-msvc"
|
||||
fi
|
||||
|
||||
echo "--- Building identifier-cache"
|
||||
$zig run src/js_lexer/identifier_data.zig
|
||||
|
||||
echo "--- Building node-fallbacks"
|
||||
cd src/node-fallbacks
|
||||
bun install --frozen-lockfile
|
||||
bun run build
|
||||
cd "$cwd"
|
||||
|
||||
echo "--- Building codegen"
|
||||
bun install --frozen-lockfile
|
||||
make runtime_js fallback_decoder bun_error
|
||||
|
||||
echo "--- Building modules"
|
||||
mkdir -p build
|
||||
bun run src/codegen/bundle-modules.ts --debug=OFF build
|
||||
|
||||
echo "--- Building zig"
|
||||
cd build
|
||||
cmake .. \
|
||||
-GNinja \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DUSE_LTO=ON \
|
||||
-DZIG_OPTIMIZE="${ZIG_OPTIMIZE}" \
|
||||
-DGIT_SHA="${GIT_SHA}" \
|
||||
-DARCH="${TARGET_ARCH}" \
|
||||
-DBUILDARCH="${BUILDARCH}" \
|
||||
-DCPU_TARGET="${TARGET_CPU}" \
|
||||
-DZIG_TARGET="${TRIPLET}" \
|
||||
-DASSERTIONS="OFF" \
|
||||
-DWEBKIT_DIR="omit" \
|
||||
-DNO_CONFIGURE_DEPENDS=1 \
|
||||
-DNO_CODEGEN=1 \
|
||||
-DBUN_ZIG_OBJ_DIR="$cwd/build" \
|
||||
-DCANARY="$CANARY" \
|
||||
-DZIG_LIB_DIR=src/deps/zig/lib
|
||||
ONLY_ZIG=1 ninja "$cwd/build/bun-zig.o" -v
|
||||
@@ -1,16 +0,0 @@
|
||||
$ErrorActionPreference = 'Stop' # Setting strict mode, similar to 'set -euo pipefail' in bash
|
||||
. (Join-Path $PSScriptRoot "env.ps1")
|
||||
|
||||
Push-Location (Join-Path $BUN_DEPS_DIR 'libdeflate')
|
||||
try {
|
||||
Remove-Item CMakeCache.txt, CMakeFiles, build -Recurse -ErrorAction SilentlyContinue
|
||||
mkdir -Force build
|
||||
|
||||
Run cmake -S "." -B build @CMAKE_FLAGS -DLIBDEFLATE_BUILD_STATIC_LIB=ON -DLIBDEFLATE_BUILD_SHARED_LIB=OFF -DLIBDEFLATE_BUILD_GZIP=OFF
|
||||
Run cmake --build build --clean-first --config Release
|
||||
|
||||
# In https://github.com/ebiggers/libdeflate/releases/tag/v1.20, it's outputting libdeflate.a even on Windows
|
||||
Copy-Item build/deflatestatic.lib $BUN_DEPS_OUT_DIR/deflate.lib
|
||||
Write-Host "-> deflate.lib"
|
||||
} finally { Pop-Location }
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -exo pipefail
|
||||
source $(dirname -- "${BASH_SOURCE[0]}")/env.sh
|
||||
|
||||
mkdir -p $BUN_DEPS_OUT_DIR
|
||||
cd $BUN_DEPS_DIR/libdeflate
|
||||
rm -rf build CMakeCache.txt CMakeFiles
|
||||
cmake "${CMAKE_FLAGS[@]}" -DLIBDEFLATE_BUILD_STATIC_LIB=ON -DLIBDEFLATE_BUILD_SHARED_LIB=OFF -DLIBDEFLATE_BUILD_GZIP=OFF -B build -S . -G Ninja
|
||||
ninja libdeflate.a -C build
|
||||
cp build/libdeflate.a $BUN_DEPS_OUT_DIR/libdeflate.a
|
||||
@@ -7,25 +7,22 @@ MIMALLOC_VALGRIND_ENABLED_FLAG=${MIMALLOC_VALGRIND_ENABLED_FLAG:-}
|
||||
|
||||
cd $BUN_DEPS_DIR/mimalloc
|
||||
|
||||
rm -rf CMakeCache* CMakeFiles build
|
||||
rm -rf CMakeCache* CMakeFiles
|
||||
|
||||
mkdir build
|
||||
|
||||
cd build
|
||||
|
||||
cmake "${CMAKE_FLAGS[@]}" .. \
|
||||
cmake "${CMAKE_FLAGS[@]}" . \
|
||||
-DCMAKE_BUILD_TYPE=Debug \
|
||||
-DMI_DEBUG_FULL=1 \
|
||||
-DMI_DEBUG=1 \
|
||||
-DMI_SKIP_COLLECT_ON_EXIT=1 \
|
||||
-DMI_BUILD_SHARED=OFF \
|
||||
-DMI_BUILD_STATIC=ON \
|
||||
-DMI_BUILD_TESTS=OFF \
|
||||
-DMI_OSX_ZONE=OFF \
|
||||
-DMI_OSX_INTERPOSE=OFF \
|
||||
-DMI_BUILD_OBJECT=ON \
|
||||
-DMI_OVERRIDE=OFF \
|
||||
-DMI_TRACK_VALGRIND=ON \
|
||||
-DMI_BUILD_OBJECT=ON \
|
||||
-DMI_USE_CXX=ON \
|
||||
-DMI_OVERRIDE=OFF \
|
||||
-DMI_OSX_ZONE=OFF \
|
||||
-GNinja
|
||||
|
||||
ninja
|
||||
|
||||
@@ -3,7 +3,6 @@ $ErrorActionPreference = 'Stop' # Setting strict mode, similar to 'set -euo pip
|
||||
|
||||
Push-Location (Join-Path $BUN_DEPS_DIR 'mimalloc')
|
||||
try {
|
||||
Remove-Item -ErrorAction SilentlyContinue -Recurse -Force build
|
||||
Set-Location (mkdir -Force build)
|
||||
|
||||
Run cmake .. @CMAKE_FLAGS `
|
||||
|
||||
@@ -20,6 +20,8 @@ try {
|
||||
Run clang-cl -DTCC_TARGET_PE -DTCC_TARGET_X86_64 config.h -DC2STR -o c2str.exe conftest.c
|
||||
Run .\c2str.exe .\include\tccdefs.h tccdefs_.h
|
||||
|
||||
$Baseline = $env:BUN_DEV_ENV_SET -eq "Baseline=True"
|
||||
|
||||
Run clang-cl @($env:CFLAGS -split ' ') libtcc.c -o tcc.obj "-DTCC_TARGET_PE" "-DTCC_TARGET_X86_64" "-O2" "-W2" "-Zi" "-MD" "-GS-" "-c" "-MT"
|
||||
Run llvm-lib "tcc.obj" "-OUT:tcc.lib"
|
||||
|
||||
|
||||
1415
scripts/build.sh
Executable file
1415
scripts/build.sh
Executable file
File diff suppressed because it is too large
Load Diff
@@ -1,13 +1,17 @@
|
||||
param(
|
||||
[switch]$Baseline = $false
|
||||
param (
|
||||
[switch] $Baseline = $False,
|
||||
[switch] $Fast = $False
|
||||
)
|
||||
|
||||
$ErrorActionPreference = 'Stop' # Setting strict mode, similar to 'set -euo pipefail' in bash
|
||||
|
||||
$Target = If ($Baseline) { "windows-x64-baseline" } Else { "windows-x64" }
|
||||
$Tag = "bun-$Target"
|
||||
$TagSuffix = If ($Baseline) { "-Baseline" } Else { "" }
|
||||
$UseBaselineBuild = If ($Baseline) { "ON" } Else { "OFF" }
|
||||
$UseLto = If ($Fast) { "OFF" } Else { "ON" }
|
||||
|
||||
. (Join-Path $PSScriptRoot "env.ps1")
|
||||
.\scripts\env.ps1 $TagSuffix
|
||||
|
||||
mkdir -Force build
|
||||
buildkite-agent artifact download "**" build --step "${Target}-build-zig"
|
||||
@@ -17,24 +21,29 @@ mv -Force -ErrorAction SilentlyContinue build\build\bun-deps\* build\bun-deps
|
||||
mv -Force -ErrorAction SilentlyContinue build\build\* build
|
||||
|
||||
Set-Location build
|
||||
|
||||
# HACK: See scripts/build-bun-cpp.ps1
|
||||
Join-File -Path "$(Resolve-Path .)\bun-cpp-objects.a" -Verbose -DeletePartFiles
|
||||
|
||||
cmake .. @CMAKE_FLAGS `
|
||||
-G Ninja `
|
||||
-DCMAKE_BUILD_TYPE=Release `
|
||||
$CANARY_REVISION = 0
|
||||
cmake .. -G Ninja -DCMAKE_BUILD_TYPE=Release `
|
||||
-DNO_CODEGEN=1 `
|
||||
-DNO_CONFIGURE_DEPENDS=1 `
|
||||
"-DCPU_TARGET=${CPU_TARGET}" `
|
||||
"-DCANARY=${CANARY_REVISION}" `
|
||||
-DBUN_LINK_ONLY=1 `
|
||||
"-DUSE_BASELINE_BUILD=${UseBaselineBuild}" `
|
||||
"-DUSE_LTO=${UseLto}" `
|
||||
"-DBUN_DEPS_OUT_DIR=$(Resolve-Path bun-deps)" `
|
||||
"-DBUN_CPP_ARCHIVE=$(Resolve-Path bun-cpp-objects.a)" `
|
||||
"-DBUN_ZIG_OBJ_DIR=$(Resolve-Path .)"
|
||||
"-DBUN_ZIG_OBJ_DIR=$(Resolve-Path .)" `
|
||||
"$Flags"
|
||||
if ($LASTEXITCODE -ne 0) { throw "CMake configuration failed" }
|
||||
|
||||
ninja -v -j $env:CPUS
|
||||
ninja -v
|
||||
if ($LASTEXITCODE -ne 0) { throw "Link failed!" }
|
||||
|
||||
ls
|
||||
if ($Fast) {
|
||||
$Tag = "$Tag-nolto"
|
||||
}
|
||||
|
||||
Set-Location ..
|
||||
$Dist = mkdir -Force "${Tag}"
|
||||
cp -r build\bun.exe "$Dist\bun.exe"
|
||||
|
||||
80
scripts/buildkite-link-bun.sh
Executable file
80
scripts/buildkite-link-bun.sh
Executable file
@@ -0,0 +1,80 @@
|
||||
#!/usr/bin/env bash
|
||||
set -exo pipefail
|
||||
source $(dirname -- "${BASH_SOURCE[0]}")/env.sh
|
||||
|
||||
export USE_LTO="${USE_LTO:-ON}"
|
||||
case "$(uname -m)" in
|
||||
aarch64|arm64)
|
||||
export CPU_TARGET="${CPU_TARGET:-native}"
|
||||
;;
|
||||
*)
|
||||
export CPU_TARGET="${CPU_TARGET:-haswell}"
|
||||
;;
|
||||
esac
|
||||
|
||||
export TAG=""
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--tag)
|
||||
export TAG="$2"
|
||||
shift
|
||||
shift
|
||||
;;
|
||||
--fast|--no-lto)
|
||||
export USE_LTO="OFF"
|
||||
shift
|
||||
;;
|
||||
--baseline)
|
||||
export CPU_TARGET="nehalem"
|
||||
shift
|
||||
;;
|
||||
--cpu)
|
||||
export CPU_TARGET="$2"
|
||||
shift
|
||||
shift
|
||||
;;
|
||||
*|-*|--*)
|
||||
echo "Unknown option $1"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ -z "$TAG" ]]; then
|
||||
echo "--tag <name> is required"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm -rf release
|
||||
mkdir -p release
|
||||
buildkite-agent artifact download '**' release --step $TAG-build-deps
|
||||
buildkite-agent artifact download '**' release --step $TAG-build-zig
|
||||
buildkite-agent artifact download '**' release --step $TAG-build-cpp
|
||||
|
||||
cd release
|
||||
cmake .. \
|
||||
-GNinja \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DCPU_TARGET=${CPU_TARGET} \
|
||||
-DUSE_LTO=${USE_LTO} \
|
||||
-DBUN_LINK_ONLY=1 \
|
||||
-DBUN_ZIG_OBJ_DIR="$(pwd)/build" \
|
||||
-DBUN_CPP_ARCHIVE="$(pwd)/build/bun-cpp-objects.a" \
|
||||
-DBUN_DEPS_OUT_DIR="$(pwd)/build/bun-deps" \
|
||||
-DNO_CONFIGURE_DEPENDS=1
|
||||
ninja -v
|
||||
|
||||
if [[ "${USE_LTO}" == "OFF" ]]; then
|
||||
TAG="${TAG}-nolto"
|
||||
fi
|
||||
|
||||
chmod +x bun-profile bun
|
||||
mkdir -p bun-$TAG-profile/ bun-$TAG/
|
||||
mv bun-profile bun-$TAG-profile/bun-profile
|
||||
mv bun bun-$TAG/bun
|
||||
zip -r bun-$TAG-profile.zip bun-$TAG-profile
|
||||
zip -r bun-$TAG.zip bun-$TAG
|
||||
|
||||
cd ..
|
||||
mv release/bun-$TAG.zip bun-$TAG.zip
|
||||
mv release/bun-$TAG-profile.zip bun-$TAG-profile.zip
|
||||
@@ -43,34 +43,11 @@ fi
|
||||
|
||||
rm -rf "$OUTDIR"
|
||||
|
||||
download () {
|
||||
local command="$1"
|
||||
local retries="$2"
|
||||
local options="$-"
|
||||
if [[ $options == *e* ]]; then
|
||||
set +e
|
||||
fi
|
||||
$command
|
||||
local exit_code=$?
|
||||
if [[ $options == *e* ]]; then
|
||||
set -e
|
||||
fi
|
||||
if [[ $exit_code -ne 0 && $retries -gt 0 ]]; then
|
||||
download "$command" $(($retries - 1))
|
||||
else
|
||||
return $exit_code
|
||||
fi
|
||||
}
|
||||
|
||||
# this is a big download so we will retry 5 times and ask curl to resume
|
||||
# download from where failure occurred if it fails and is rerun
|
||||
if [ ! -f "$tar" ]; then
|
||||
echo "-- Downloading WebKit"
|
||||
if ! download "curl -C - --http1.1 -o $tar.tmp -L $url" 5; then
|
||||
if ! curl -o "$tar" -L "$url"; then
|
||||
echo "Failed to download $url"
|
||||
exit 1
|
||||
else
|
||||
mv $tar.tmp $tar
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
@@ -1,3 +1,11 @@
|
||||
param(
|
||||
[switch]$Baseline = $false
|
||||
)
|
||||
|
||||
if ($ENV:BUN_DEV_ENV_SET -eq "Baseline=True") {
|
||||
$Baseline = $true
|
||||
}
|
||||
|
||||
$ErrorActionPreference = 'Stop' # Setting strict mode, similar to 'set -euo pipefail' in bash
|
||||
|
||||
# this is the environment script for building bun's dependencies
|
||||
@@ -30,27 +38,21 @@ if($Env:VSCMD_ARG_TGT_ARCH -eq "x86") {
|
||||
throw "Visual Studio environment is targetting 32 bit. This configuration is definetly a mistake."
|
||||
}
|
||||
|
||||
$ENV:BUN_DEV_ENV_SET = "Baseline=$Baseline";
|
||||
|
||||
$BUN_BASE_DIR = if ($env:BUN_BASE_DIR) { $env:BUN_BASE_DIR } else { Join-Path $ScriptDir '..' }
|
||||
$BUN_DEPS_DIR = if ($env:BUN_DEPS_DIR) { $env:BUN_DEPS_DIR } else { Join-Path $BUN_BASE_DIR 'src\deps' }
|
||||
$BUN_DEPS_OUT_DIR = if ($env:BUN_DEPS_OUT_DIR) { $env:BUN_DEPS_OUT_DIR } else { Join-Path $BUN_BASE_DIR 'build\bun-deps' }
|
||||
|
||||
$CPUS = if ($env:CPUS) { $env:CPUS } else { (Get-CimInstance -Class Win32_Processor).NumberOfCores }
|
||||
$Lto = if ($env:USE_LTO) { $env:USE_LTO -eq "1" } else { $False }
|
||||
$Baseline = if ($env:USE_BASELINE_BUILD) {
|
||||
$env:USE_BASELINE_BUILD -eq "1"
|
||||
} elseif ($env:BUILDKITE_STEP_KEY -match "baseline") {
|
||||
$True
|
||||
} else {
|
||||
$False
|
||||
}
|
||||
|
||||
$CC = "clang-cl"
|
||||
$CXX = "clang-cl"
|
||||
|
||||
$CFLAGS = '/O2 /Z7 /MT /O2 /Ob2 /DNDEBUG /U_DLL'
|
||||
$CXXFLAGS = '/O2 /Z7 /MT /O2 /Ob2 /DNDEBUG /U_DLL -Xclang -fno-c++-static-destructors '
|
||||
$CXXFLAGS = '/O2 /Z7 /MT /O2 /Ob2 /DNDEBUG /U_DLL'
|
||||
|
||||
if ($Lto) {
|
||||
if ($env:USE_LTO -eq "1") {
|
||||
$CXXFLAGS += " -fuse-ld=lld -flto -Xclang -emit-llvm-bc"
|
||||
$CFLAGS += " -fuse-ld=lld -flto -Xclang -emit-llvm-bc"
|
||||
}
|
||||
@@ -61,14 +63,6 @@ $env:CPU_TARGET = $CPU_NAME
|
||||
$CFLAGS += " -march=${CPU_NAME}"
|
||||
$CXXFLAGS += " -march=${CPU_NAME}"
|
||||
|
||||
$Canary = If ($env:CANARY) {
|
||||
$env:CANARY
|
||||
} ElseIf ($env:BUILDKITE -eq "true") {
|
||||
(buildkite-agent meta-data get canary)
|
||||
} Else {
|
||||
"1"
|
||||
}
|
||||
|
||||
$CMAKE_FLAGS = @(
|
||||
"-GNinja",
|
||||
"-DCMAKE_BUILD_TYPE=Release",
|
||||
@@ -78,15 +72,15 @@ $CMAKE_FLAGS = @(
|
||||
"-DCMAKE_CXX_FLAGS=$CXXFLAGS",
|
||||
"-DCMAKE_C_FLAGS_RELEASE=$CFLAGS",
|
||||
"-DCMAKE_CXX_FLAGS_RELEASE=$CXXFLAGS",
|
||||
"-DCMAKE_MSVC_RUNTIME_LIBRARY=MultiThreaded",
|
||||
"-DCANARY=$Canary"
|
||||
"-DCMAKE_MSVC_RUNTIME_LIBRARY=MultiThreaded"
|
||||
)
|
||||
|
||||
if (Get-Command llvm-lib -ErrorAction SilentlyContinue) {
|
||||
$AR_CMD = Get-Command llvm-lib -ErrorAction SilentlyContinue
|
||||
$AR = $AR_CMD.Path
|
||||
$env:AR = $AR
|
||||
$CMAKE_FLAGS += "-DCMAKE_AR=$AR"
|
||||
if ($env:USE_LTO -eq "1") {
|
||||
if (Get-Command lld-lib -ErrorAction SilentlyContinue) {
|
||||
$AR = Get-Command lld-lib -ErrorAction SilentlyContinue
|
||||
$env:AR = $AR
|
||||
$CMAKE_FLAGS += "-DCMAKE_AR=$AR"
|
||||
}
|
||||
}
|
||||
|
||||
$env:CC = "clang-cl"
|
||||
@@ -99,14 +93,7 @@ if ($Baseline) {
|
||||
$CMAKE_FLAGS += "-DUSE_BASELINE_BUILD=ON"
|
||||
}
|
||||
|
||||
if ($Lto) {
|
||||
$CMAKE_FLAGS += "-DUSE_LTO=ON"
|
||||
}
|
||||
|
||||
if (Get-Command ccache -ErrorAction SilentlyContinue) {
|
||||
$CMAKE_FLAGS += "-DCMAKE_C_COMPILER_LAUNCHER=ccache"
|
||||
$CMAKE_FLAGS += "-DCMAKE_CXX_COMPILER_LAUNCHER=ccache"
|
||||
} elseif (Get-Command sccache -ErrorAction SilentlyContinue) {
|
||||
if (Get-Command sccache -ErrorAction SilentlyContinue) {
|
||||
# Continue with local compiler if sccache has an error
|
||||
$env:SCCACHE_IGNORE_SERVER_IO_ERROR = "1"
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Hack for buildkite sometimes not having the right path
|
||||
# Hack for Buildkite sometimes not having the right path
|
||||
if [[ "${CI:-}" == "1" || "${CI:-}" == "true" ]]; then
|
||||
if [ -f ~/.bashrc ]; then
|
||||
source ~/.bashrc
|
||||
@@ -24,7 +24,12 @@ export BUN_DEPS_OUT_DIR=${BUN_DEPS_OUT_DIR:-$BUN_BASE_DIR/build/bun-deps}
|
||||
export LC_CTYPE="en_US.UTF-8"
|
||||
export LC_ALL="en_US.UTF-8"
|
||||
|
||||
if [[ $(uname -s) == 'Darwin' ]]; then
|
||||
if [[ "$CI" != "1" && "$CI" != "true" ]]; then
|
||||
if [ -f $SCRIPT_DIR/env.local ]; then
|
||||
echo "Sourcing $SCRIPT_DIR/env.local"
|
||||
source $SCRIPT_DIR/env.local
|
||||
fi
|
||||
elif [[ $(uname -s) == 'Darwin' ]]; then
|
||||
export CXX="$(brew --prefix llvm)@$LLVM_VERSION/bin/clang++"
|
||||
export CC="$(brew --prefix llvm)@$LLVM_VERSION/bin/clang"
|
||||
export AR="$(brew --prefix llvm)@$LLVM_VERSION/bin/llvm-ar"
|
||||
@@ -32,11 +37,6 @@ if [[ $(uname -s) == 'Darwin' ]]; then
|
||||
export LIBTOOL="$(brew --prefix llvm)@$LLVM_VERSION/bin/llvm-libtool-darwin"
|
||||
export PATH="$(brew --prefix llvm)@$LLVM_VERSION/bin:$PATH"
|
||||
ln -sf $LIBTOOL "$(brew --prefix llvm)@$LLVM_VERSION/bin/libtool" || true
|
||||
elif [[ "$CI" != "1" && "$CI" != "true" ]]; then
|
||||
if [[ -f $SCRIPT_DIR/env.local ]]; then
|
||||
echo "Sourcing $SCRIPT_DIR/env.local"
|
||||
source $SCRIPT_DIR/env.local
|
||||
fi
|
||||
fi
|
||||
|
||||
# this compiler detection could be better
|
||||
@@ -60,7 +60,7 @@ export CXXFLAGS='-O3 -fno-exceptions -fno-rtti -fvisibility=hidden -fvisibility-
|
||||
|
||||
# Add flags for LTO
|
||||
# We cannot enable LTO on macOS for dependencies because it requires -fuse-ld=lld and lld causes many segfaults on macOS (likely related to stack size)
|
||||
if [ "$USE_LTO" == "1" ] || [ "$USE_LTO" == "ON" ]; then
|
||||
if [ "$BUN_ENABLE_LTO" == "1" ]; then
|
||||
export CFLAGS="$CFLAGS -flto=full "
|
||||
export CXXFLAGS="$CXXFLAGS -flto=full -fwhole-program-vtables -fforce-emit-vtables "
|
||||
export LDFLAGS="$LDFLAGS -flto=full -fwhole-program-vtables -fforce-emit-vtables "
|
||||
@@ -76,7 +76,7 @@ fi
|
||||
# https://gitlab.kitware.com/cmake/cmake/-/issues/25755
|
||||
if [[ $(uname -s) == 'Darwin' && $LLVM_VERSION == '18' ]]; then
|
||||
export CFLAGS="$CFLAGS -fno-define-target-os-macros "
|
||||
export CXXFLAGS="$CXXFLAGS -fno-define-target-os-macros -D_LIBCXX_ENABLE_ASSERTIONS=0 -D_LIBCPP_HARDENING_MODE=_LIBCPP_HARDENING_MODE_NONE "
|
||||
export CXXFLAGS="$CXXFLAGS -fno-define-target-os-macros "
|
||||
fi
|
||||
|
||||
# libarchive needs position-independent executables to compile successfully
|
||||
@@ -120,6 +120,7 @@ fi
|
||||
|
||||
if [[ $(uname -s) == 'Darwin' ]]; then
|
||||
export CMAKE_OSX_DEPLOYMENT_TARGET=${CMAKE_OSX_DEPLOYMENT_TARGET:-13.0}
|
||||
|
||||
CMAKE_FLAGS+=(-DCMAKE_OSX_DEPLOYMENT_TARGET=${CMAKE_OSX_DEPLOYMENT_TARGET})
|
||||
export CFLAGS="$CFLAGS -mmacosx-version-min=${CMAKE_OSX_DEPLOYMENT_TARGET} -D__DARWIN_NON_CANCELABLE=1 "
|
||||
export CXXFLAGS="$CXXFLAGS -mmacosx-version-min=${CMAKE_OSX_DEPLOYMENT_TARGET} -D__DARWIN_NON_CANCELABLE=1 "
|
||||
|
||||
@@ -6,162 +6,16 @@ import { copyFileSync, existsSync, mkdirSync, mkdtempSync, readFileSync, readdir
|
||||
import { basename, dirname, join } from "node:path";
|
||||
import { tmpdir } from "node:os";
|
||||
|
||||
const projectPath = dirname(import.meta.dirname);
|
||||
const vendorPath = process.env.BUN_VENDOR_PATH || join(projectPath, "vendor");
|
||||
|
||||
const isWindows = process.platform === "win32";
|
||||
const isMacOS = process.platform === "darwin";
|
||||
const isLinux = process.platform === "linux";
|
||||
|
||||
const cwd = dirname(import.meta.dirname);
|
||||
const spawnSyncTimeout = 1000 * 60;
|
||||
const spawnTimeout = 1000 * 60 * 3;
|
||||
|
||||
/**
|
||||
* @typedef {Object} S3UploadOptions
|
||||
* @property {string} [bucket]
|
||||
* @property {string} filename
|
||||
* @property {string} content
|
||||
* @property {Record<string, string>} [headers]
|
||||
*/
|
||||
|
||||
/**
|
||||
* @param {S3UploadOptions} options
|
||||
*/
|
||||
async function uploadFileToS3(options) {
|
||||
const { AwsV4Signer } = await import("aws4fetch");
|
||||
|
||||
const { bucket, filename, content, ...extra } = options;
|
||||
const baseUrl = getEnv(["S3_ENDPOINT", "S3_BASE_URL", "AWS_ENDPOINT"], "https://s3.amazonaws.com");
|
||||
const bucketUrl = new URL(bucket || getEnv(["S3_BUCKET", "AWS_BUCKET"]), baseUrl);
|
||||
|
||||
const signer = new AwsV4Signer({
|
||||
accessKeyId: getSecret(["S3_ACCESS_KEY_ID", "AWS_ACCESS_KEY_ID"]),
|
||||
secretAccessKey: getSecret(["S3_SECRET_ACCESS_KEY", "AWS_SECRET_ACCESS_KEY"]),
|
||||
url: new URL(filename, bucketUrl),
|
||||
method: "PUT",
|
||||
body: content,
|
||||
...extra,
|
||||
});
|
||||
|
||||
const { url, method, headers, body } = signer.sign();
|
||||
await fetchSafe(url, {
|
||||
method,
|
||||
headers,
|
||||
body,
|
||||
});
|
||||
|
||||
console.log("Uploaded file to S3:", {
|
||||
url: `${bucketUrl}`,
|
||||
filename,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* @typedef {Object} SentryRelease
|
||||
* @property {string} organizationId
|
||||
* @property {string} projectId
|
||||
* @property {string} version
|
||||
* @property {string} [url]
|
||||
* @property {string} [ref]
|
||||
* @property {string} [dateReleased]
|
||||
*/
|
||||
|
||||
/**
|
||||
* @param {SentryRelease} options
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
async function createSentryRelease(options) {
|
||||
const { organizationId, projectId, ...body } = options;
|
||||
|
||||
const baseUrl = getEnv("SENTRY_BASE_URL", "https://sentry.io");
|
||||
const url = new URL(`api/0/organizations/${organizationId}/releases`, baseUrl);
|
||||
const accessToken = getSecret(["SENTRY_AUTH_TOKEN", "SENTRY_TOKEN"]);
|
||||
|
||||
const release = await fetchSafe(url, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Authorization": `Bearer ${accessToken}`,
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
body: JSON.stringify(body),
|
||||
format: "json",
|
||||
});
|
||||
|
||||
console.log("Created Sentry release:", release);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return {string}
|
||||
*/
|
||||
function getGithubToken() {
|
||||
const token = getEnv("GITHUB_TOKEN", null);
|
||||
if (token) {
|
||||
return token;
|
||||
}
|
||||
|
||||
const gh = which("gh");
|
||||
if (gh) {
|
||||
const { exitCode, stdout } = spawnSyncSafe(gh, ["auth", "token"]);
|
||||
if (exitCode === 0) {
|
||||
return stdout.trim();
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error("Failed to get GitHub token (set GITHUB_TOKEN or run `gh auth login`)");
|
||||
}
|
||||
|
||||
/**
|
||||
* @param {string | string[]} name
|
||||
* @return {string}
|
||||
*/
|
||||
function getSecret(name) {
|
||||
return getEnv(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param {string | string[]} name
|
||||
* @param {string | null} [defaultValue]
|
||||
* @returns {string | undefined}
|
||||
*/
|
||||
function getEnv(name, defaultValue) {
|
||||
let result = defaultValue;
|
||||
|
||||
for (const key of typeof name === "string" ? [name] : name) {
|
||||
const value = process.env[key];
|
||||
if (value) {
|
||||
result = value;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (result || result === null) {
|
||||
return result;
|
||||
}
|
||||
|
||||
throw new Error(`Environment variable is required: ${name}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* @typedef {Object} SpawnOptions
|
||||
* @property {boolean} [throwOnError]
|
||||
* @property {string} [cwd]
|
||||
* @property {string} [env]
|
||||
* @property {string} [encoding]
|
||||
* @property {number} [timeout]
|
||||
*/
|
||||
|
||||
/**
|
||||
* @typedef {Object} SpawnResult
|
||||
* @property {number | null} exitCode
|
||||
* @property {number | null} signalCode
|
||||
* @property {string} stdout
|
||||
* @property {string} stderr
|
||||
*/
|
||||
|
||||
/**
|
||||
* @param {string} command
|
||||
* @param {string[]} [args]
|
||||
* @param {SpawnOptions} [options]
|
||||
* @returns {Promise<SpawnResult>}
|
||||
*/
|
||||
async function spawnSafe(command, args, options = {}) {
|
||||
const result = new Promise((resolve, reject) => {
|
||||
let stdout = "";
|
||||
@@ -206,12 +60,6 @@ async function spawnSafe(command, args, options = {}) {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @param {string} command
|
||||
* @param {string[]} [args]
|
||||
* @param {SpawnOptions} [options]
|
||||
* @returns {SpawnResult}
|
||||
*/
|
||||
function spawnSyncSafe(command, args, options = {}) {
|
||||
try {
|
||||
const { error, status, signal, stdout, stderr } = spawnSync(command, args, {
|
||||
@@ -238,20 +86,6 @@ function spawnSyncSafe(command, args, options = {}) {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @typedef {Object} FetchOptions
|
||||
* @property {string} [method]
|
||||
* @property {Record<string, string>} [headers]
|
||||
* @property {string | Uint8Array} [body]
|
||||
* @property {"json" | "text" | "bytes"} [format]
|
||||
* @property {boolean} [throwOnError]
|
||||
*/
|
||||
|
||||
/**
|
||||
* @param {string | URL} url
|
||||
* @param {FetchOptions} [options]
|
||||
* @returns {Promise<Response | string | Uint8Array>}
|
||||
*/
|
||||
async function fetchSafe(url, options = {}) {
|
||||
let response;
|
||||
try {
|
||||
@@ -304,6 +138,47 @@ function which(command, path) {
|
||||
return result.trimEnd();
|
||||
}
|
||||
|
||||
function getZigTarget(os = process.platform, arch = process.arch) {
|
||||
if (arch === "x64") {
|
||||
if (os === "linux") return "linux-x86_64";
|
||||
if (os === "darwin") return "macos-x86_64";
|
||||
if (os === "win32") return "windows-x86_64";
|
||||
}
|
||||
if (arch === "arm64") {
|
||||
if (os === "linux") return "linux-aarch64";
|
||||
if (os === "darwin") return "macos-aarch64";
|
||||
}
|
||||
throw new Error(`Unsupported zig target: os=${os}, arch=${arch}`);
|
||||
}
|
||||
|
||||
function getRecommendedZigVersion() {
|
||||
const scriptPath = join(projectPath, "build.zig");
|
||||
try {
|
||||
const scriptContent = readFileSync(scriptPath, "utf-8");
|
||||
const match = scriptContent.match(/recommended_zig_version = "([^"]+)"/);
|
||||
if (!match) {
|
||||
throw new Error("File does not contain string: 'recommended_zig_version'");
|
||||
}
|
||||
return match[1];
|
||||
} catch (cause) {
|
||||
throw new Error("Failed to find recommended Zig version", { cause });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @returns {Promise<string>}
|
||||
*/
|
||||
async function getLatestZigVersion() {
|
||||
try {
|
||||
const response = await fetchSafe("https://ziglang.org/download/index.json", { format: "json" });
|
||||
const { master } = response;
|
||||
const { version } = master;
|
||||
return version;
|
||||
} catch (cause) {
|
||||
throw new Error("Failed to get latest Zig version", { cause });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @param {string} execPath
|
||||
* @returns {string | undefined}
|
||||
@@ -316,3 +191,110 @@ function getVersion(execPath) {
|
||||
}
|
||||
return result.trim();
|
||||
}
|
||||
|
||||
/**
|
||||
* @returns {string}
|
||||
*/
|
||||
function getTmpdir() {
|
||||
if (isMacOS && existsSync("/tmp")) {
|
||||
return "/tmp";
|
||||
}
|
||||
return tmpdir();
|
||||
}
|
||||
|
||||
/**
|
||||
* @returns {string}
|
||||
*/
|
||||
function mkTmpdir() {
|
||||
return mkdtempSync(join(getTmpdir(), "bun-"));
|
||||
}
|
||||
|
||||
/**
|
||||
* @param {string} url
|
||||
* @param {string} [path]
|
||||
* @returns {Promise<string>}
|
||||
*/
|
||||
async function downloadFile(url, path) {
|
||||
const outPath = path || join(mkTmpdir(), basename(url));
|
||||
const bytes = await fetchSafe(url, { format: "bytes" });
|
||||
mkdirSync(dirname(outPath), { recursive: true });
|
||||
writeFileSync(outPath, bytes);
|
||||
return outPath;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param {string} tarPath
|
||||
* @param {string} [path]
|
||||
* @returns {Promise<string>}
|
||||
*/
|
||||
async function extractFile(tarPath, path) {
|
||||
const outPath = path || join(mkTmpdir(), basename(tarPath));
|
||||
mkdirSync(outPath, { recursive: true });
|
||||
await spawnSafe("tar", ["-xf", tarPath, "-C", outPath, "--strip-components=1"]);
|
||||
return outPath;
|
||||
}
|
||||
|
||||
const dependencies = [
|
||||
{
|
||||
name: "zig",
|
||||
version: getRecommendedZigVersion(),
|
||||
download: downloadZig,
|
||||
},
|
||||
];
|
||||
|
||||
async function getDependencyPath(name) {
|
||||
let dependency;
|
||||
for (const entry of dependencies) {
|
||||
if (name === entry.name) {
|
||||
dependency = entry;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!dependency) {
|
||||
throw new Error(`Unknown dependency: ${name}`);
|
||||
}
|
||||
const { version, download } = dependency;
|
||||
mkdirSync(vendorPath, { recursive: true });
|
||||
for (const path of readdirSync(vendorPath)) {
|
||||
if (!path.startsWith(name)) {
|
||||
continue;
|
||||
}
|
||||
const dependencyPath = join(vendorPath, path);
|
||||
const dependencyVersion = getVersion(dependencyPath);
|
||||
if (dependencyVersion === version) {
|
||||
return dependencyPath;
|
||||
}
|
||||
}
|
||||
if (!download) {
|
||||
throw new Error(`Dependency not found: ${name}`);
|
||||
}
|
||||
return await download(version);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param {string} [version]
|
||||
*/
|
||||
async function downloadZig(version) {
|
||||
const target = getZigTarget();
|
||||
const expectedVersion = version || getRecommendedZigVersion();
|
||||
const url = `https://ziglang.org/builds/zig-${target}-${expectedVersion}.tar.xz`;
|
||||
const tarPath = await downloadFile(url);
|
||||
const extractedPath = await extractFile(tarPath);
|
||||
const zigPath = join(extractedPath, exePath("zig"));
|
||||
const actualVersion = getVersion(zigPath);
|
||||
const outPath = join(vendorPath, exePath(`zig-${actualVersion}`));
|
||||
mkdirSync(dirname(outPath), { recursive: true });
|
||||
copyFileSync(zigPath, outPath);
|
||||
return outPath;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param {string} path
|
||||
* @returns {string}
|
||||
*/
|
||||
function exePath(path) {
|
||||
return isWindows ? `${path}.exe` : path;
|
||||
}
|
||||
|
||||
const execPath = await getDependencyPath("zig");
|
||||
console.log(execPath);
|
||||
|
||||
@@ -26,7 +26,7 @@ import { normalize as normalizeWindows } from "node:path/win32";
|
||||
import { isIP } from "node:net";
|
||||
import { parseArgs } from "node:util";
|
||||
|
||||
const spawnTimeout = 5_000;
|
||||
const spawnTimeout = 30_000;
|
||||
const testTimeout = 3 * 60_000;
|
||||
const integrationTimeout = 5 * 60_000;
|
||||
|
||||
@@ -104,19 +104,15 @@ async function printInfo() {
|
||||
console.log("Glibc:", getGlibcVersion());
|
||||
}
|
||||
console.log("Hostname:", getHostname());
|
||||
if (isCloud) {
|
||||
console.log("Public IP:", await getPublicIp());
|
||||
console.log("Cloud:", getCloud());
|
||||
}
|
||||
if (isCI) {
|
||||
console.log("CI:", getCI());
|
||||
console.log("Shard:", options["shard"], "/", options["max-shards"]);
|
||||
console.log("Build URL:", getBuildUrl());
|
||||
console.log("Environment:", process.env);
|
||||
if (isCloud) {
|
||||
console.log("Public IP:", await getPublicIp());
|
||||
console.log("Cloud:", getCloud());
|
||||
}
|
||||
const tailscaleIp = await getTailscaleIp();
|
||||
if (tailscaleIp) {
|
||||
console.log("Tailscale IP:", tailscaleIp);
|
||||
}
|
||||
}
|
||||
console.log("Cwd:", cwd);
|
||||
console.log("Tmpdir:", tmpPath);
|
||||
@@ -134,32 +130,7 @@ async function printInfo() {
|
||||
async function runTests() {
|
||||
let execPath;
|
||||
if (options["step"]) {
|
||||
downloadLoop: for (let i = 0; i < 10; i++) {
|
||||
execPath = await getExecPathFromBuildKite(options["step"]);
|
||||
for (let j = 0; j < 10; j++) {
|
||||
const { error } = spawnSync(execPath, ["--version"], {
|
||||
encoding: "utf-8",
|
||||
timeout: spawnTimeout,
|
||||
env: {
|
||||
PATH: process.env.PATH,
|
||||
BUN_DEBUG_QUIET_LOGS: 1,
|
||||
},
|
||||
});
|
||||
if (!error) {
|
||||
break;
|
||||
}
|
||||
const { code } = error;
|
||||
if (code === "EBUSY") {
|
||||
console.log("Bun appears to be busy, retrying...");
|
||||
continue;
|
||||
}
|
||||
if (code === "UNKNOWN") {
|
||||
console.log("Bun appears to be corrupted, downloading again...");
|
||||
rmSync(execPath, { force: true });
|
||||
continue downloadLoop;
|
||||
}
|
||||
}
|
||||
}
|
||||
execPath = await getExecPathFromBuildKite(options["step"]);
|
||||
} else {
|
||||
execPath = getExecPath(options["exec-path"]);
|
||||
}
|
||||
@@ -260,20 +231,18 @@ async function runTests() {
|
||||
*/
|
||||
|
||||
/**
|
||||
* @param {SpawnOptions} options
|
||||
* @param {SpawnOptions} request
|
||||
* @returns {Promise<SpawnResult>}
|
||||
*/
|
||||
async function spawnSafe(options) {
|
||||
const {
|
||||
command,
|
||||
args,
|
||||
cwd,
|
||||
env,
|
||||
timeout = spawnTimeout,
|
||||
stdout = process.stdout.write.bind(process.stdout),
|
||||
stderr = process.stderr.write.bind(process.stderr),
|
||||
retries = 0,
|
||||
} = options;
|
||||
async function spawnSafe({
|
||||
command,
|
||||
args,
|
||||
cwd,
|
||||
env,
|
||||
timeout = spawnTimeout,
|
||||
stdout = process.stdout.write.bind(process.stdout),
|
||||
stderr = process.stderr.write.bind(process.stderr),
|
||||
}) {
|
||||
let exitCode;
|
||||
let signalCode;
|
||||
let spawnError;
|
||||
@@ -349,16 +318,6 @@ async function spawnSafe(options) {
|
||||
resolve();
|
||||
}
|
||||
});
|
||||
if (spawnError && retries < 5) {
|
||||
const { code } = spawnError;
|
||||
if (code === "EBUSY" || code === "UNKNOWN") {
|
||||
await new Promise(resolve => setTimeout(resolve, 1000 * (retries + 1)));
|
||||
return spawnSafe({
|
||||
...options,
|
||||
retries: retries + 1,
|
||||
});
|
||||
}
|
||||
}
|
||||
let error;
|
||||
if (exitCode === 0) {
|
||||
// ...
|
||||
@@ -437,7 +396,7 @@ async function spawnBun(execPath, { args, cwd, timeout, env, stdout, stderr }) {
|
||||
BUN_FEATURE_FLAG_INTERNAL_FOR_TESTING: "1",
|
||||
BUN_DEBUG_QUIET_LOGS: "1",
|
||||
BUN_GARBAGE_COLLECTOR_LEVEL: "1",
|
||||
BUN_ENABLE_CRASH_REPORTING: "0", // change this to '1' if https://github.com/oven-sh/bun/issues/13012 is implemented
|
||||
BUN_ENABLE_CRASH_REPORTING: "1",
|
||||
BUN_RUNTIME_TRANSPILER_CACHE_PATH: "0",
|
||||
BUN_INSTALL_CACHE_DIR: tmpdirPath,
|
||||
SHELLOPTS: isWindows ? "igncr" : undefined, // ignore "\r" on Windows
|
||||
@@ -1002,7 +961,7 @@ async function getExecPathFromBuildKite(target) {
|
||||
if (isWindows) {
|
||||
await spawnSafe({
|
||||
command: "powershell",
|
||||
args: ["-Command", `Expand-Archive -Path ${zipPath} -DestinationPath ${releasePath} -Force`],
|
||||
args: ["-Command", `Expand-Archive -Path ${zipPath} -DestinationPath ${releasePath}`],
|
||||
});
|
||||
} else {
|
||||
await spawnSafe({
|
||||
@@ -1327,26 +1286,6 @@ async function getPublicIp() {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @returns {string | undefined}
|
||||
*/
|
||||
function getTailscaleIp() {
|
||||
try {
|
||||
const { status, stdout } = spawnSync("tailscale", ["ip", "--1"], {
|
||||
encoding: "utf-8",
|
||||
timeout: spawnTimeout,
|
||||
env: {
|
||||
PATH: process.env.PATH,
|
||||
},
|
||||
});
|
||||
if (status === 0) {
|
||||
return stdout.trim();
|
||||
}
|
||||
} catch {
|
||||
// ...
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @param {...string} paths
|
||||
* @returns {string}
|
||||
@@ -1393,7 +1332,7 @@ function formatTestToMarkdown(result, concise) {
|
||||
|
||||
let markdown = "";
|
||||
for (const { testPath, ok, tests, error, stdoutPreview: stdout } of results) {
|
||||
if (ok || error === "SIGTERM") {
|
||||
if (ok) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user