mirror of
https://github.com/oven-sh/bun
synced 2026-02-03 07:28:53 +00:00
Compare commits
1 Commits
kai/mimall
...
claude/nod
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
715413adc0 |
@@ -303,34 +303,9 @@ function getCppAgent(platform, options) {
|
||||
}
|
||||
|
||||
return getEc2Agent(platform, options, {
|
||||
instanceType: arch === "aarch64" ? "c8g.4xlarge" : "c7i.4xlarge",
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* @param {Platform} platform
|
||||
* @param {PipelineOptions} options
|
||||
* @returns {string}
|
||||
*/
|
||||
function getLinkBunAgent(platform, options) {
|
||||
const { os, arch, distro } = platform;
|
||||
|
||||
if (os === "darwin") {
|
||||
return {
|
||||
queue: `build-${os}`,
|
||||
os,
|
||||
arch,
|
||||
};
|
||||
}
|
||||
|
||||
if (os === "windows") {
|
||||
return getEc2Agent(platform, options, {
|
||||
instanceType: arch === "aarch64" ? "r8g.large" : "r7i.large",
|
||||
});
|
||||
}
|
||||
|
||||
return getEc2Agent(platform, options, {
|
||||
instanceType: arch === "aarch64" ? "r8g.xlarge" : "r7i.xlarge",
|
||||
instanceType: arch === "aarch64" ? "c8g.16xlarge" : "c7i.16xlarge",
|
||||
cpuCount: 32,
|
||||
threadsPerCore: 1,
|
||||
});
|
||||
}
|
||||
|
||||
@@ -381,7 +356,7 @@ function getTestAgent(platform, options) {
|
||||
};
|
||||
}
|
||||
|
||||
// TODO: delete this block when we upgrade to mimalloc v3
|
||||
// TODO: `dev-server-ssr-110.test.ts` and `next-build.test.ts` run out of memory at 8GB of memory, so use 16GB instead.
|
||||
if (os === "windows") {
|
||||
return getEc2Agent(platform, options, {
|
||||
instanceType: "c7i.2xlarge",
|
||||
@@ -527,7 +502,7 @@ function getLinkBunStep(platform, options) {
|
||||
key: `${getTargetKey(platform)}-build-bun`,
|
||||
label: `${getTargetLabel(platform)} - build-bun`,
|
||||
depends_on: [`${getTargetKey(platform)}-build-cpp`, `${getTargetKey(platform)}-build-zig`],
|
||||
agents: getLinkBunAgent(platform, options),
|
||||
agents: getCppAgent(platform, options),
|
||||
retry: getRetry(),
|
||||
cancel_on_build_failing: isMergeQueue(),
|
||||
env: {
|
||||
|
||||
103
.github/workflows/CLAUDE.md
vendored
103
.github/workflows/CLAUDE.md
vendored
@@ -1,103 +0,0 @@
|
||||
# GitHub Actions Workflow Maintenance Guide
|
||||
|
||||
This document provides guidance for maintaining the GitHub Actions workflows in this repository.
|
||||
|
||||
## format.yml Workflow
|
||||
|
||||
### Overview
|
||||
The `format.yml` workflow runs code formatters (Prettier, clang-format, and Zig fmt) on pull requests and pushes to main. It's optimized for speed by running all formatters in parallel.
|
||||
|
||||
### Key Components
|
||||
|
||||
#### 1. Clang-format Script (`scripts/run-clang-format.sh`)
|
||||
- **Purpose**: Formats C++ source and header files
|
||||
- **What it does**:
|
||||
- Reads C++ files from `cmake/sources/CxxSources.txt`
|
||||
- Finds all header files in `src/` and `packages/`
|
||||
- Excludes third-party directories (libuv, napi, deps, vendor, sqlite, etc.)
|
||||
- Requires specific clang-format version (no fallbacks)
|
||||
|
||||
**Important exclusions**:
|
||||
- `src/napi/` - Node API headers (third-party)
|
||||
- `src/bun.js/bindings/libuv/` - libuv headers (third-party)
|
||||
- `src/bun.js/bindings/sqlite/` - SQLite headers (third-party)
|
||||
- `src/bun.js/api/ffi-*.h` - FFI headers (generated/third-party)
|
||||
- `src/deps/` - Dependencies (third-party)
|
||||
- Files in `vendor/`, `third_party/`, `generated/` directories
|
||||
|
||||
#### 2. Parallel Execution
|
||||
The workflow runs all three formatters simultaneously:
|
||||
- Each formatter outputs with a prefix (`[prettier]`, `[clang-format]`, `[zig]`)
|
||||
- Output is streamed in real-time without blocking
|
||||
- Uses GitHub Actions groups (`::group::`) for collapsible sections
|
||||
|
||||
#### 3. Tool Installation
|
||||
|
||||
##### Clang-format-19
|
||||
- Installs ONLY `clang-format-19` package (not the entire LLVM toolchain)
|
||||
- Uses `--no-install-recommends --no-install-suggests` to skip unnecessary packages
|
||||
- Quiet installation with `-qq` and `-o=Dpkg::Use-Pty=0`
|
||||
|
||||
##### Zig
|
||||
- Downloads from `oven-sh/zig` releases (musl build for static linking)
|
||||
- URL: `https://github.com/oven-sh/zig/releases/download/autobuild-{COMMIT}/bootstrap-x86_64-linux-musl.zip`
|
||||
- Extracts to temp directory to avoid polluting the repository
|
||||
- Directory structure: `bootstrap-x86_64-linux-musl/zig`
|
||||
|
||||
### Updating the Workflow
|
||||
|
||||
#### To update Zig version:
|
||||
1. Find the new commit hash from https://github.com/oven-sh/zig/releases
|
||||
2. Replace the hash in the wget URL (line 65 of format.yml)
|
||||
3. Test that the URL is valid and the binary works
|
||||
|
||||
#### To update clang-format version:
|
||||
1. Update `LLVM_VERSION_MAJOR` environment variable at the top of format.yml
|
||||
2. Update the version check in `scripts/run-clang-format.sh`
|
||||
|
||||
#### To add/remove file exclusions:
|
||||
1. Edit the exclusion patterns in `scripts/run-clang-format.sh` (lines 34-39)
|
||||
2. Test locally to ensure the right files are being formatted
|
||||
|
||||
### Performance Optimizations
|
||||
1. **Parallel execution**: All formatters run simultaneously
|
||||
2. **Minimal installations**: Only required packages, no extras
|
||||
3. **Temp directories**: Tools downloaded to temp dirs, cleaned up after use
|
||||
4. **Streaming output**: Real-time feedback without buffering
|
||||
5. **Early start**: Formatting begins immediately after each tool is ready
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
**If formatters appear to run sequentially:**
|
||||
- Check if output is being buffered (should use `sed` for line prefixing)
|
||||
- Ensure background processes use `&` and proper wait commands
|
||||
|
||||
**If third-party files are being formatted:**
|
||||
- Review exclusion patterns in `scripts/run-clang-format.sh`
|
||||
- Check if new third-party directories were added that need exclusion
|
||||
|
||||
**If clang-format installation is slow:**
|
||||
- Ensure using minimal package installation flags
|
||||
- Check if apt cache needs updating
|
||||
- Consider caching the clang-format binary between runs
|
||||
|
||||
### Testing Changes Locally
|
||||
|
||||
```bash
|
||||
# Test the clang-format script
|
||||
export LLVM_VERSION_MAJOR=19
|
||||
./scripts/run-clang-format.sh format
|
||||
|
||||
# Test with check mode (no modifications)
|
||||
./scripts/run-clang-format.sh check
|
||||
|
||||
# Test specific file exclusions
|
||||
./scripts/run-clang-format.sh format 2>&1 | grep -E "(libuv|napi|deps)"
|
||||
# Should return nothing if exclusions work correctly
|
||||
```
|
||||
|
||||
### Important Notes
|
||||
- The script defaults to **format** mode (modifies files)
|
||||
- Always test locally before pushing workflow changes
|
||||
- The musl Zig build works on glibc systems due to static linking
|
||||
- Keep the exclusion list updated as new third-party code is added
|
||||
24
.github/workflows/auto-label-claude-prs.yml
vendored
24
.github/workflows/auto-label-claude-prs.yml
vendored
@@ -1,24 +0,0 @@
|
||||
name: Auto-label Claude PRs
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened]
|
||||
|
||||
jobs:
|
||||
auto-label:
|
||||
if: github.event.pull_request.user.login == 'robobun'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Add claude label to PRs from robobun
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
github.rest.issues.addLabels({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
labels: ['claude']
|
||||
});
|
||||
48
.github/workflows/claude.yml
vendored
48
.github/workflows/claude.yml
vendored
@@ -13,55 +13,23 @@ on:
|
||||
jobs:
|
||||
claude:
|
||||
if: |
|
||||
github.repository == 'oven-sh/bun' &&
|
||||
(
|
||||
(github.event_name == 'issue_comment' && (github.event.comment.author_association == 'MEMBER' || github.event.comment.author_association == 'OWNER' || github.event.comment.author_association == 'COLLABORATOR')) ||
|
||||
(github.event_name == 'pull_request_review_comment' && (github.event.comment.author_association == 'MEMBER' || github.event.comment.author_association == 'OWNER' || github.event.comment.author_association == 'COLLABORATOR')) ||
|
||||
(github.event_name == 'pull_request_review' && (github.event.review.author_association == 'MEMBER' || github.event.review.author_association == 'OWNER' || github.event.review.author_association == 'COLLABORATOR')) ||
|
||||
(github.event_name == 'issues' && (github.event.issue.author_association == 'MEMBER' || github.event.issue.author_association == 'OWNER' || github.event.issue.author_association == 'COLLABORATOR'))
|
||||
) &&
|
||||
(github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) ||
|
||||
(github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) ||
|
||||
(github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) ||
|
||||
(github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude')))
|
||||
runs-on: claude
|
||||
env:
|
||||
IS_SANDBOX: 1
|
||||
container:
|
||||
image: localhost:5000/claude-bun:latest
|
||||
options: --privileged --user 1000:1000
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
working-directory: /workspace/bun
|
||||
run: |
|
||||
git config --global user.email "claude-bot@bun.sh" && \
|
||||
git config --global user.name "Claude Bot" && \
|
||||
git config --global url."git@github.com:".insteadOf "https://github.com/" && \
|
||||
git config --global url."git@github.com:".insteadOf "http://github.com/" && \
|
||||
git config --global --add safe.directory /workspace/bun && \
|
||||
git config --global push.default current && \
|
||||
git config --global pull.rebase true && \
|
||||
git config --global init.defaultBranch main && \
|
||||
git config --global core.editor "vim" && \
|
||||
git config --global color.ui auto && \
|
||||
git config --global fetch.prune true && \
|
||||
git config --global diff.colorMoved zebra && \
|
||||
git config --global merge.conflictStyle diff3 && \
|
||||
git config --global rerere.enabled true && \
|
||||
git config --global core.autocrlf input
|
||||
git fetch origin ${{ github.event.pull_request.head.sha }}
|
||||
git checkout ${{ github.event.pull_request.head.ref }}
|
||||
git reset --hard origin/${{ github.event.pull_request.head.ref }}
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Run Claude Code
|
||||
id: claude
|
||||
# TODO: switch this out once they merge their v1
|
||||
uses: km-anthropic/claude-code-action@v1-dev
|
||||
uses: anthropics/claude-code-action@beta
|
||||
with:
|
||||
timeout_minutes: "180"
|
||||
claude_args: |
|
||||
--dangerously-skip-permissions
|
||||
--system-prompt "You are working on the Bun codebase"
|
||||
claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
|
||||
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
|
||||
|
||||
2
.github/workflows/docs.yml
vendored
2
.github/workflows/docs.yml
vendored
@@ -6,8 +6,6 @@ on:
|
||||
- "docs/**"
|
||||
- "packages/bun-types/**.d.ts"
|
||||
- "CONTRIBUTING.md"
|
||||
- "src/cli/install.sh"
|
||||
- "src/cli/install.ps1"
|
||||
branches:
|
||||
- main
|
||||
|
||||
|
||||
82
.github/workflows/format.yml
vendored
82
.github/workflows/format.yml
vendored
@@ -37,72 +37,24 @@ jobs:
|
||||
- name: Setup Dependencies
|
||||
run: |
|
||||
bun install
|
||||
- name: Format Code
|
||||
- name: Install LLVM
|
||||
run: |
|
||||
# Start prettier in background with prefixed output
|
||||
echo "::group::Prettier"
|
||||
(bun run prettier 2>&1 | sed 's/^/[prettier] /' || echo "[prettier] Failed with exit code $?") &
|
||||
PRETTIER_PID=$!
|
||||
|
||||
# Start clang-format installation and formatting in background with prefixed output
|
||||
echo "::group::Clang-format"
|
||||
(
|
||||
echo "[clang-format] Installing clang-format-${{ env.LLVM_VERSION_MAJOR }}..."
|
||||
wget -qO- https://apt.llvm.org/llvm-snapshot.gpg.key | sudo tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc > /dev/null
|
||||
echo "deb http://apt.llvm.org/$(lsb_release -cs)/ llvm-toolchain-$(lsb_release -cs)-${{ env.LLVM_VERSION_MAJOR }} main" | sudo tee /etc/apt/sources.list.d/llvm.list > /dev/null
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get install -y -qq --no-install-recommends --no-install-suggests -o=Dpkg::Use-Pty=0 clang-format-${{ env.LLVM_VERSION_MAJOR }}
|
||||
echo "[clang-format] Running clang-format..."
|
||||
LLVM_VERSION_MAJOR=${{ env.LLVM_VERSION_MAJOR }} ./scripts/run-clang-format.sh format 2>&1 | sed 's/^/[clang-format] /'
|
||||
) &
|
||||
CLANG_PID=$!
|
||||
|
||||
# Setup Zig in temp directory and run zig fmt in background with prefixed output
|
||||
echo "::group::Zig fmt"
|
||||
(
|
||||
ZIG_TEMP=$(mktemp -d)
|
||||
echo "[zig] Downloading Zig (musl build)..."
|
||||
wget -q -O "$ZIG_TEMP/zig.zip" https://github.com/oven-sh/zig/releases/download/autobuild-d1a4e0b0ddc75f37c6a090b97eef0cbb6335556e/bootstrap-x86_64-linux-musl.zip
|
||||
unzip -q -d "$ZIG_TEMP" "$ZIG_TEMP/zig.zip"
|
||||
export PATH="$ZIG_TEMP/bootstrap-x86_64-linux-musl:$PATH"
|
||||
echo "[zig] Running zig fmt..."
|
||||
zig fmt src 2>&1 | sed 's/^/[zig] /'
|
||||
./scripts/sort-imports.ts src 2>&1 | sed 's/^/[zig] /'
|
||||
zig fmt src 2>&1 | sed 's/^/[zig] /'
|
||||
rm -rf "$ZIG_TEMP"
|
||||
) &
|
||||
ZIG_PID=$!
|
||||
|
||||
# Wait for all formatting tasks to complete
|
||||
echo ""
|
||||
echo "Running formatters in parallel..."
|
||||
FAILED=0
|
||||
|
||||
if ! wait $PRETTIER_PID; then
|
||||
echo "::error::Prettier failed"
|
||||
FAILED=1
|
||||
fi
|
||||
echo "::endgroup::"
|
||||
|
||||
if ! wait $CLANG_PID; then
|
||||
echo "::error::Clang-format failed"
|
||||
FAILED=1
|
||||
fi
|
||||
echo "::endgroup::"
|
||||
|
||||
if ! wait $ZIG_PID; then
|
||||
echo "::error::Zig fmt failed"
|
||||
FAILED=1
|
||||
fi
|
||||
echo "::endgroup::"
|
||||
|
||||
# Exit with error if any formatter failed
|
||||
if [ $FAILED -eq 1 ]; then
|
||||
echo "::error::One or more formatters failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ All formatters completed successfully"
|
||||
curl -fsSL https://apt.llvm.org/llvm.sh | sudo bash -s -- ${{ env.LLVM_VERSION_MAJOR }} all
|
||||
- name: Setup Zig
|
||||
uses: mlugg/setup-zig@v1
|
||||
with:
|
||||
version: 0.14.0
|
||||
- name: Zig Format
|
||||
run: |
|
||||
zig fmt src
|
||||
./scripts/sort-imports.ts src
|
||||
zig fmt src
|
||||
- name: Prettier Format
|
||||
run: |
|
||||
bun run prettier
|
||||
- name: Clang Format
|
||||
run: |
|
||||
bun run clang-format
|
||||
- name: Ban Words
|
||||
run: |
|
||||
bun ./test/internal/ban-words.test.ts
|
||||
|
||||
3
.vscode/launch.json
generated
vendored
3
.vscode/launch.json
generated
vendored
@@ -22,9 +22,6 @@
|
||||
"BUN_DEBUG_QUIET_LOGS": "1",
|
||||
"BUN_DEBUG_jest": "1",
|
||||
"BUN_GARBAGE_COLLECTOR_LEVEL": "1",
|
||||
// "BUN_JSC_validateExceptionChecks": "1",
|
||||
// "BUN_JSC_dumpSimulatedThrows": "1",
|
||||
// "BUN_JSC_unexpectedExceptionStackTraceLimit": "20",
|
||||
},
|
||||
"console": "internalConsole",
|
||||
"sourceMap": {
|
||||
|
||||
2
.vscode/settings.json
vendored
2
.vscode/settings.json
vendored
@@ -168,5 +168,5 @@
|
||||
"WebKit/WebInspectorUI": true,
|
||||
},
|
||||
"git.detectSubmodules": false,
|
||||
"bun.test.customScript": "./build/debug/bun-debug test",
|
||||
"bun.test.customScript": "./build/debug/bun-debug test"
|
||||
}
|
||||
|
||||
14
CLAUDE.md
14
CLAUDE.md
@@ -43,12 +43,7 @@ Tests use Bun's Jest-compatible test runner with proper test fixtures:
|
||||
|
||||
```typescript
|
||||
import { test, expect } from "bun:test";
|
||||
import {
|
||||
bunEnv,
|
||||
bunExe,
|
||||
normalizeBunSnapshot,
|
||||
tempDirWithFiles,
|
||||
} from "harness";
|
||||
import { bunEnv, bunExe, tempDirWithFiles } from "harness";
|
||||
|
||||
test("my feature", async () => {
|
||||
// Create temp directory with test files
|
||||
@@ -61,7 +56,6 @@ test("my feature", async () => {
|
||||
cmd: [bunExe(), "index.js"],
|
||||
env: bunEnv,
|
||||
cwd: dir,
|
||||
stderr: "pipe",
|
||||
});
|
||||
|
||||
const [stdout, stderr, exitCode] = await Promise.all([
|
||||
@@ -71,14 +65,11 @@ test("my feature", async () => {
|
||||
]);
|
||||
|
||||
expect(exitCode).toBe(0);
|
||||
// Prefer snapshot tests over expect(stdout).toBe("hello\n");
|
||||
expect(normalizeBunSnapshot(stdout, dir)).toMatchInlineSnapshot(`"hello"`);
|
||||
expect(stdout).toBe("hello\n");
|
||||
});
|
||||
```
|
||||
|
||||
- Always use `port: 0`. Do not hardcode ports. Do not use your own random port number function.
|
||||
- Use `normalizeBunSnapshot` to normalize snapshot output of the test.
|
||||
- NEVER write tests that check for no "panic" or "uncaught exception" or similar in the test output. That is NOT a valid test.
|
||||
|
||||
## Code Architecture
|
||||
|
||||
@@ -240,7 +231,6 @@ bun ci
|
||||
9. **Cross-platform** - Run `bun run zig:check-all` to compile the Zig code on all platforms when making platform-specific changes
|
||||
10. **Debug builds** - Use `BUN_DEBUG_QUIET_LOGS=1` to disable debug logging, or `BUN_DEBUG_<scope>=1` to enable specific scopes
|
||||
11. **Be humble & honest** - NEVER overstate what you got done or what actually works in commits, PRs or in messages to the user.
|
||||
12. **Branch names must start with `claude/`** - This is a requirement for the CI to work.
|
||||
|
||||
## Key APIs and Features
|
||||
|
||||
|
||||
@@ -15,13 +15,11 @@
|
||||
"eventemitter3": "^5.0.0",
|
||||
"execa": "^8.0.1",
|
||||
"fast-glob": "3.3.1",
|
||||
"fastify": "^5.0.0",
|
||||
"fdir": "^6.1.0",
|
||||
"mitata": "^1.0.25",
|
||||
"react": "^18.3.1",
|
||||
"react-dom": "^18.3.1",
|
||||
"string-width": "7.1.0",
|
||||
"strip-ansi": "^7.1.0",
|
||||
"tinycolor2": "^1.6.0",
|
||||
"zx": "^7.2.3",
|
||||
},
|
||||
@@ -95,18 +93,6 @@
|
||||
|
||||
"@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.14.54", "", { "os": "linux", "cpu": "none" }, "sha512-bZBrLAIX1kpWelV0XemxBZllyRmM6vgFQQG2GdNb+r3Fkp0FOh1NJSvekXDs7jq70k4euu1cryLMfU+mTXlEpw=="],
|
||||
|
||||
"@fastify/ajv-compiler": ["@fastify/ajv-compiler@4.0.2", "", { "dependencies": { "ajv": "^8.12.0", "ajv-formats": "^3.0.1", "fast-uri": "^3.0.0" } }, "sha512-Rkiu/8wIjpsf46Rr+Fitd3HRP+VsxUFDDeag0hs9L0ksfnwx2g7SPQQTFL0E8Qv+rfXzQOxBJnjUB9ITUDjfWQ=="],
|
||||
|
||||
"@fastify/error": ["@fastify/error@4.2.0", "", {}, "sha512-RSo3sVDXfHskiBZKBPRgnQTtIqpi/7zhJOEmAxCiBcM7d0uwdGdxLlsCaLzGs8v8NnxIRlfG0N51p5yFaOentQ=="],
|
||||
|
||||
"@fastify/fast-json-stringify-compiler": ["@fastify/fast-json-stringify-compiler@5.0.3", "", { "dependencies": { "fast-json-stringify": "^6.0.0" } }, "sha512-uik7yYHkLr6fxd8hJSZ8c+xF4WafPK+XzneQDPU+D10r5X19GW8lJcom2YijX2+qtFF1ENJlHXKFM9ouXNJYgQ=="],
|
||||
|
||||
"@fastify/forwarded": ["@fastify/forwarded@3.0.0", "", {}, "sha512-kJExsp4JCms7ipzg7SJ3y8DwmePaELHxKYtg+tZow+k0znUTf3cb+npgyqm8+ATZOdmfgfydIebPDWM172wfyA=="],
|
||||
|
||||
"@fastify/merge-json-schemas": ["@fastify/merge-json-schemas@0.2.1", "", { "dependencies": { "dequal": "^2.0.3" } }, "sha512-OA3KGBCy6KtIvLf8DINC5880o5iBlDX4SxzLQS8HorJAbqluzLRn80UXU0bxZn7UOFhFgpRJDasfwn9nG4FG4A=="],
|
||||
|
||||
"@fastify/proxy-addr": ["@fastify/proxy-addr@5.0.0", "", { "dependencies": { "@fastify/forwarded": "^3.0.0", "ipaddr.js": "^2.1.0" } }, "sha512-37qVVA1qZ5sgH7KpHkkC4z9SK6StIsIcOmpjvMPXNb3vx2GQxhZocogVYbr2PbbeLCQxYIPDok307xEvRZOzGA=="],
|
||||
|
||||
"@jridgewell/gen-mapping": ["@jridgewell/gen-mapping@0.1.1", "", { "dependencies": { "@jridgewell/set-array": "^1.0.0", "@jridgewell/sourcemap-codec": "^1.4.10" } }, "sha512-sQXCasFk+U8lWYEe66WxRDOE9PjVz4vSM51fTu3Hw+ClTpUSQb718772vH3pyS5pShp6lvQM7SxgIDXXXmOX7w=="],
|
||||
|
||||
"@jridgewell/resolve-uri": ["@jridgewell/resolve-uri@3.1.0", "", {}, "sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w=="],
|
||||
@@ -157,20 +143,10 @@
|
||||
|
||||
"@types/which": ["@types/which@3.0.3", "", {}, "sha512-2C1+XoY0huExTbs8MQv1DuS5FS86+SEjdM9F/+GS61gg5Hqbtj8ZiDSx8MfWcyei907fIPbfPGCOrNUTnVHY1g=="],
|
||||
|
||||
"abstract-logging": ["abstract-logging@2.0.1", "", {}, "sha512-2BjRTZxTPvheOvGbBslFSYOUkr+SjPtOnrLP33f+VIWLzezQpZcqVg7ja3L4dBXmzzgwT+a029jRx5PCi3JuiA=="],
|
||||
|
||||
"ajv": ["ajv@8.17.1", "", { "dependencies": { "fast-deep-equal": "^3.1.3", "fast-uri": "^3.0.1", "json-schema-traverse": "^1.0.0", "require-from-string": "^2.0.2" } }, "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g=="],
|
||||
|
||||
"ajv-formats": ["ajv-formats@3.0.1", "", { "dependencies": { "ajv": "^8.0.0" } }, "sha512-8iUql50EUR+uUcdRQ3HDqa6EVyo3docL8g5WJ3FNcWmu62IbkGUue/pEyLBW8VGKKucTPgqeks4fIU1DA4yowQ=="],
|
||||
|
||||
"ansi-regex": ["ansi-regex@6.0.1", "", {}, "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA=="],
|
||||
|
||||
"ansi-styles": ["ansi-styles@3.2.1", "", { "dependencies": { "color-convert": "^1.9.0" } }, "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA=="],
|
||||
|
||||
"atomic-sleep": ["atomic-sleep@1.0.0", "", {}, "sha512-kNOjDqAh7px0XWNI+4QbzoiR/nTkHAWNud2uvnJquD1/x5a7EQZMJT0AczqK0Qn67oY/TTQ1LbUKajZpp3I9tQ=="],
|
||||
|
||||
"avvio": ["avvio@9.1.0", "", { "dependencies": { "@fastify/error": "^4.0.0", "fastq": "^1.17.1" } }, "sha512-fYASnYi600CsH/j9EQov7lECAniYiBFiiAtBNuZYLA2leLe9qOvZzqYHFjtIj6gD2VMoMLP14834LFWvr4IfDw=="],
|
||||
|
||||
"benchmark": ["benchmark@2.1.4", "", { "dependencies": { "lodash": "^4.17.4", "platform": "^1.3.3" } }, "sha512-l9MlfN4M1K/H2fbhfMy3B7vJd6AGKJVQn2h6Sg/Yx+KckoUA7ewS5Vv6TjSq18ooE1kS9hhAlQRH3AkXIh/aOQ=="],
|
||||
|
||||
"braces": ["braces@3.0.2", "", { "dependencies": { "fill-range": "^7.0.1" } }, "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A=="],
|
||||
@@ -191,16 +167,12 @@
|
||||
|
||||
"convert-source-map": ["convert-source-map@1.9.0", "", {}, "sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A=="],
|
||||
|
||||
"cookie": ["cookie@1.0.2", "", {}, "sha512-9Kr/j4O16ISv8zBBhJoi4bXOYNTkFLOqSL3UDB0njXxCXNezjeyVrJyGOWtgfs/q2km1gwBcfH8q1yEGoMYunA=="],
|
||||
|
||||
"cross-spawn": ["cross-spawn@7.0.3", "", { "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", "which": "^2.0.1" } }, "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w=="],
|
||||
|
||||
"data-uri-to-buffer": ["data-uri-to-buffer@4.0.1", "", {}, "sha512-0R9ikRb668HB7QDxT1vkpuUBtqc53YyAwMwGeUFKRojY/NWKvdZ+9UYtRfGmhqNbRkTSVpMbmyhXipFFv2cb/A=="],
|
||||
|
||||
"debug": ["debug@4.3.4", "", { "dependencies": { "ms": "2.1.2" } }, "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ=="],
|
||||
|
||||
"dequal": ["dequal@2.0.3", "", {}, "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA=="],
|
||||
|
||||
"dir-glob": ["dir-glob@3.0.1", "", { "dependencies": { "path-type": "^4.0.0" } }, "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA=="],
|
||||
|
||||
"duplexer": ["duplexer@0.1.2", "", {}, "sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg=="],
|
||||
@@ -261,22 +233,10 @@
|
||||
|
||||
"execa": ["execa@8.0.1", "", { "dependencies": { "cross-spawn": "^7.0.3", "get-stream": "^8.0.1", "human-signals": "^5.0.0", "is-stream": "^3.0.0", "merge-stream": "^2.0.0", "npm-run-path": "^5.1.0", "onetime": "^6.0.0", "signal-exit": "^4.1.0", "strip-final-newline": "^3.0.0" } }, "sha512-VyhnebXciFV2DESc+p6B+y0LjSm0krU4OgJN44qFAhBY0TJ+1V61tYD2+wHusZ6F9n5K+vl8k0sTy7PEfV4qpg=="],
|
||||
|
||||
"fast-decode-uri-component": ["fast-decode-uri-component@1.0.1", "", {}, "sha512-WKgKWg5eUxvRZGwW8FvfbaH7AXSh2cL+3j5fMGzUMCxWBJ3dV3a7Wz8y2f/uQ0e3B6WmodD3oS54jTQ9HVTIIg=="],
|
||||
|
||||
"fast-deep-equal": ["fast-deep-equal@3.1.3", "", {}, "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q=="],
|
||||
|
||||
"fast-glob": ["fast-glob@3.3.1", "", { "dependencies": { "@nodelib/fs.stat": "^2.0.2", "@nodelib/fs.walk": "^1.2.3", "glob-parent": "^5.1.2", "merge2": "^1.3.0", "micromatch": "^4.0.4" } }, "sha512-kNFPyjhh5cKjrUltxs+wFx+ZkbRaxxmZ+X0ZU31SOsxCEtP9VPgtq2teZw1DebupL5GmDaNQ6yKMMVcM41iqDg=="],
|
||||
|
||||
"fast-json-stringify": ["fast-json-stringify@6.0.1", "", { "dependencies": { "@fastify/merge-json-schemas": "^0.2.0", "ajv": "^8.12.0", "ajv-formats": "^3.0.1", "fast-uri": "^3.0.0", "json-schema-ref-resolver": "^2.0.0", "rfdc": "^1.2.0" } }, "sha512-s7SJE83QKBZwg54dIbD5rCtzOBVD43V1ReWXXYqBgwCwHLYAAT0RQc/FmrQglXqWPpz6omtryJQOau5jI4Nrvg=="],
|
||||
|
||||
"fast-querystring": ["fast-querystring@1.1.2", "", { "dependencies": { "fast-decode-uri-component": "^1.0.1" } }, "sha512-g6KuKWmFXc0fID8WWH0jit4g0AGBoJhCkJMb1RmbsSEUNvQ+ZC8D6CUZ+GtF8nMzSPXnhiePyyqqipzNNEnHjg=="],
|
||||
|
||||
"fast-redact": ["fast-redact@3.5.0", "", {}, "sha512-dwsoQlS7h9hMeYUq1W++23NDcBLV4KqONnITDV9DjfS3q1SgDGVrBdvvTLUotWtPSD7asWDV9/CmsZPy8Hf70A=="],
|
||||
|
||||
"fast-uri": ["fast-uri@3.0.6", "", {}, "sha512-Atfo14OibSv5wAp4VWNsFYE1AchQRTv9cBGWET4pZWHzYshFSS9NQI6I57rdKn9croWVMbYFbLhJ+yJvmZIIHw=="],
|
||||
|
||||
"fastify": ["fastify@5.5.0", "", { "dependencies": { "@fastify/ajv-compiler": "^4.0.0", "@fastify/error": "^4.0.0", "@fastify/fast-json-stringify-compiler": "^5.0.0", "@fastify/proxy-addr": "^5.0.0", "abstract-logging": "^2.0.1", "avvio": "^9.0.0", "fast-json-stringify": "^6.0.0", "find-my-way": "^9.0.0", "light-my-request": "^6.0.0", "pino": "^9.0.0", "process-warning": "^5.0.0", "rfdc": "^1.3.1", "secure-json-parse": "^4.0.0", "semver": "^7.6.0", "toad-cache": "^3.7.0" } }, "sha512-ZWSWlzj3K/DcULCnCjEiC2zn2FBPdlZsSA/pnPa/dbUfLvxkD/Nqmb0XXMXLrWkeM4uQPUvjdJpwtXmTfriXqw=="],
|
||||
|
||||
"fastq": ["fastq@1.15.0", "", { "dependencies": { "reusify": "^1.0.4" } }, "sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw=="],
|
||||
|
||||
"fdir": ["fdir@6.1.0", "", { "peerDependencies": { "picomatch": "2.x" } }, "sha512-274qhz5PxNnA/fybOu6apTCUnM0GnO3QazB6VH+oag/7DQskdYq8lm07ZSm90kEQuWYH5GvjAxGruuHrEr0bcg=="],
|
||||
@@ -285,8 +245,6 @@
|
||||
|
||||
"fill-range": ["fill-range@7.0.1", "", { "dependencies": { "to-regex-range": "^5.0.1" } }, "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ=="],
|
||||
|
||||
"find-my-way": ["find-my-way@9.3.0", "", { "dependencies": { "fast-deep-equal": "^3.1.3", "fast-querystring": "^1.0.0", "safe-regex2": "^5.0.0" } }, "sha512-eRoFWQw+Yv2tuYlK2pjFS2jGXSxSppAs3hSQjfxVKxM5amECzIgYYc1FEI8ZmhSh/Ig+FrKEz43NLRKJjYCZVg=="],
|
||||
|
||||
"formdata-polyfill": ["formdata-polyfill@4.0.10", "", { "dependencies": { "fetch-blob": "^3.1.2" } }, "sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g=="],
|
||||
|
||||
"from": ["from@0.1.7", "", {}, "sha512-twe20eF1OxVxp/ML/kq2p1uc6KvFK/+vs8WjEbeKmV2He22MKm7YF2ANIt+EOqhJ5L3K/SuuPhk0hWQDjOM23g=="],
|
||||
@@ -315,8 +273,6 @@
|
||||
|
||||
"ignore": ["ignore@5.3.0", "", {}, "sha512-g7dmpshy+gD7mh88OC9NwSGTKoc3kyLAZQRU1mt53Aw/vnvfXnbC+F/7F7QoYVKbV+KNvJx8wArewKy1vXMtlg=="],
|
||||
|
||||
"ipaddr.js": ["ipaddr.js@2.2.0", "", {}, "sha512-Ag3wB2o37wslZS19hZqorUnrnzSkpOVy+IiiDEiTqNubEYpYuHWIf6K4psgN2ZWKExS4xhVCrRVfb/wfW8fWJA=="],
|
||||
|
||||
"is-arrayish": ["is-arrayish@0.3.2", "", {}, "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ=="],
|
||||
|
||||
"is-extglob": ["is-extglob@2.1.1", "", {}, "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ=="],
|
||||
@@ -333,16 +289,10 @@
|
||||
|
||||
"jsesc": ["jsesc@2.5.2", "", { "bin": { "jsesc": "bin/jsesc" } }, "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA=="],
|
||||
|
||||
"json-schema-ref-resolver": ["json-schema-ref-resolver@2.0.1", "", { "dependencies": { "dequal": "^2.0.3" } }, "sha512-HG0SIB9X4J8bwbxCbnd5FfPEbcXAJYTi1pBJeP/QPON+w8ovSME8iRG+ElHNxZNX2Qh6eYn1GdzJFS4cDFfx0Q=="],
|
||||
|
||||
"json-schema-traverse": ["json-schema-traverse@1.0.0", "", {}, "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="],
|
||||
|
||||
"json5": ["json5@2.2.3", "", { "bin": { "json5": "lib/cli.js" } }, "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg=="],
|
||||
|
||||
"jsonfile": ["jsonfile@6.1.0", "", { "dependencies": { "universalify": "^2.0.0" }, "optionalDependencies": { "graceful-fs": "^4.1.6" } }, "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ=="],
|
||||
|
||||
"light-my-request": ["light-my-request@6.6.0", "", { "dependencies": { "cookie": "^1.0.1", "process-warning": "^4.0.0", "set-cookie-parser": "^2.6.0" } }, "sha512-CHYbu8RtboSIoVsHZ6Ye4cj4Aw/yg2oAFimlF7mNvfDV192LR7nDiKtSIfCuLT7KokPSTn/9kfVLm5OGN0A28A=="],
|
||||
|
||||
"lodash": ["lodash@4.17.21", "", {}, "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg=="],
|
||||
|
||||
"loose-envify": ["loose-envify@1.4.0", "", { "dependencies": { "js-tokens": "^3.0.0 || ^4.0.0" }, "bin": { "loose-envify": "cli.js" } }, "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q=="],
|
||||
@@ -373,8 +323,6 @@
|
||||
|
||||
"npm-run-path": ["npm-run-path@5.2.0", "", { "dependencies": { "path-key": "^4.0.0" } }, "sha512-W4/tgAXFqFA0iL7fk0+uQ3g7wkL8xJmx3XdK0VGb4cHW//eZTtKGvFBBoRKVTpY7n6ze4NL9ly7rgXcHufqXKg=="],
|
||||
|
||||
"on-exit-leak-free": ["on-exit-leak-free@2.1.2", "", {}, "sha512-0eJJY6hXLGf1udHwfNftBqH+g73EU4B504nZeKpz1sYRKafAghwxEJunB2O7rDZkL4PGfsMVnTXZ2EjibbqcsA=="],
|
||||
|
||||
"onetime": ["onetime@6.0.0", "", { "dependencies": { "mimic-fn": "^4.0.0" } }, "sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ=="],
|
||||
|
||||
"path-key": ["path-key@3.1.1", "", {}, "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q=="],
|
||||
@@ -387,50 +335,24 @@
|
||||
|
||||
"picomatch": ["picomatch@2.3.1", "", {}, "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA=="],
|
||||
|
||||
"pino": ["pino@9.9.0", "", { "dependencies": { "atomic-sleep": "^1.0.0", "fast-redact": "^3.1.1", "on-exit-leak-free": "^2.1.0", "pino-abstract-transport": "^2.0.0", "pino-std-serializers": "^7.0.0", "process-warning": "^5.0.0", "quick-format-unescaped": "^4.0.3", "real-require": "^0.2.0", "safe-stable-stringify": "^2.3.1", "sonic-boom": "^4.0.1", "thread-stream": "^3.0.0" }, "bin": { "pino": "bin.js" } }, "sha512-zxsRIQG9HzG+jEljmvmZupOMDUQ0Jpj0yAgE28jQvvrdYTlEaiGwelJpdndMl/MBuRr70heIj83QyqJUWaU8mQ=="],
|
||||
|
||||
"pino-abstract-transport": ["pino-abstract-transport@2.0.0", "", { "dependencies": { "split2": "^4.0.0" } }, "sha512-F63x5tizV6WCh4R6RHyi2Ml+M70DNRXt/+HANowMflpgGFMAym/VKm6G7ZOQRjqN7XbGxK1Lg9t6ZrtzOaivMw=="],
|
||||
|
||||
"pino-std-serializers": ["pino-std-serializers@7.0.0", "", {}, "sha512-e906FRY0+tV27iq4juKzSYPbUj2do2X2JX4EzSca1631EB2QJQUqGbDuERal7LCtOpxl6x3+nvo9NPZcmjkiFA=="],
|
||||
|
||||
"platform": ["platform@1.3.6", "", {}, "sha512-fnWVljUchTro6RiCFvCXBbNhJc2NijN7oIQxbwsyL0buWJPG85v81ehlHI9fXrJsMNgTofEoWIQeClKpgxFLrg=="],
|
||||
|
||||
"process-warning": ["process-warning@5.0.0", "", {}, "sha512-a39t9ApHNx2L4+HBnQKqxxHNs1r7KF+Intd8Q/g1bUh6q0WIp9voPXJ/x0j+ZL45KF1pJd9+q2jLIRMfvEshkA=="],
|
||||
|
||||
"ps-tree": ["ps-tree@1.2.0", "", { "dependencies": { "event-stream": "=3.3.4" }, "bin": { "ps-tree": "./bin/ps-tree.js" } }, "sha512-0VnamPPYHl4uaU/nSFeZZpR21QAWRz+sRv4iW9+v/GS/J5U5iZB5BNN6J0RMoOvdx2gWM2+ZFMIm58q24e4UYA=="],
|
||||
|
||||
"queue-microtask": ["queue-microtask@1.2.3", "", {}, "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A=="],
|
||||
|
||||
"quick-format-unescaped": ["quick-format-unescaped@4.0.4", "", {}, "sha512-tYC1Q1hgyRuHgloV/YXs2w15unPVh8qfu/qCTfhTYamaw7fyhumKa2yGpdSo87vY32rIclj+4fWYQXUMs9EHvg=="],
|
||||
|
||||
"react": ["react@18.3.1", "", { "dependencies": { "loose-envify": "^1.1.0" } }, "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ=="],
|
||||
|
||||
"react-dom": ["react-dom@18.3.1", "", { "dependencies": { "loose-envify": "^1.1.0", "scheduler": "^0.23.2" }, "peerDependencies": { "react": "^18.3.1" } }, "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw=="],
|
||||
|
||||
"real-require": ["real-require@0.2.0", "", {}, "sha512-57frrGM/OCTLqLOAh0mhVA9VBMHd+9U7Zb2THMGdBUoZVOtGbJzjxsYGDJ3A9AYYCP4hn6y1TVbaOfzWtm5GFg=="],
|
||||
|
||||
"require-from-string": ["require-from-string@2.0.2", "", {}, "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw=="],
|
||||
|
||||
"ret": ["ret@0.5.0", "", {}, "sha512-I1XxrZSQ+oErkRR4jYbAyEEu2I0avBvvMM5JN+6EBprOGRCs63ENqZ3vjavq8fBw2+62G5LF5XelKwuJpcvcxw=="],
|
||||
|
||||
"reusify": ["reusify@1.0.4", "", {}, "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw=="],
|
||||
|
||||
"rfdc": ["rfdc@1.4.1", "", {}, "sha512-q1b3N5QkRUWUl7iyylaaj3kOpIT0N2i9MqIEQXP73GVsN9cw3fdx8X63cEmWhJGi2PPCF23Ijp7ktmd39rawIA=="],
|
||||
|
||||
"run-parallel": ["run-parallel@1.2.0", "", { "dependencies": { "queue-microtask": "^1.2.2" } }, "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA=="],
|
||||
|
||||
"safe-regex2": ["safe-regex2@5.0.0", "", { "dependencies": { "ret": "~0.5.0" } }, "sha512-YwJwe5a51WlK7KbOJREPdjNrpViQBI3p4T50lfwPuDhZnE3XGVTlGvi+aolc5+RvxDD6bnUmjVsU9n1eboLUYw=="],
|
||||
|
||||
"safe-stable-stringify": ["safe-stable-stringify@2.5.0", "", {}, "sha512-b3rppTKm9T+PsVCBEOUR46GWI7fdOs00VKZ1+9c1EWDaDMvjQc6tUwuFyIprgGgTcWoVHSKrU8H31ZHA2e0RHA=="],
|
||||
|
||||
"scheduler": ["scheduler@0.23.2", "", { "dependencies": { "loose-envify": "^1.1.0" } }, "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ=="],
|
||||
|
||||
"secure-json-parse": ["secure-json-parse@4.0.0", "", {}, "sha512-dxtLJO6sc35jWidmLxo7ij+Eg48PM/kleBsxpC8QJE0qJICe+KawkDQmvCMZUr9u7WKVHgMW6vy3fQ7zMiFZMA=="],
|
||||
|
||||
"semver": ["semver@6.3.0", "", { "bin": { "semver": "./bin/semver.js" } }, "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw=="],
|
||||
|
||||
"set-cookie-parser": ["set-cookie-parser@2.7.1", "", {}, "sha512-IOc8uWeOZgnb3ptbCURJWNjWUPcO3ZnTTdzsurqERrP6nPyv+paC55vJM0LpOlT2ne+Ix+9+CRG1MNLlyZ4GjQ=="],
|
||||
|
||||
"shebang-command": ["shebang-command@2.0.0", "", { "dependencies": { "shebang-regex": "^3.0.0" } }, "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA=="],
|
||||
|
||||
"shebang-regex": ["shebang-regex@3.0.0", "", {}, "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A=="],
|
||||
@@ -441,12 +363,8 @@
|
||||
|
||||
"slash": ["slash@4.0.0", "", {}, "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew=="],
|
||||
|
||||
"sonic-boom": ["sonic-boom@4.2.0", "", { "dependencies": { "atomic-sleep": "^1.0.0" } }, "sha512-INb7TM37/mAcsGmc9hyyI6+QR3rR1zVRu36B0NeGXKnOOLiZOfER5SA+N7X7k3yUYRzLWafduTDvJAfDswwEww=="],
|
||||
|
||||
"split": ["split@0.3.3", "", { "dependencies": { "through": "2" } }, "sha512-wD2AeVmxXRBoX44wAycgjVpMhvbwdI2aZjCkvfNcH1YqHQvJVa1duWc73OyVGJUc05fhFaTZeQ/PYsrmyH0JVA=="],
|
||||
|
||||
"split2": ["split2@4.2.0", "", {}, "sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg=="],
|
||||
|
||||
"stream-combiner": ["stream-combiner@0.0.4", "", { "dependencies": { "duplexer": "~0.1.1" } }, "sha512-rT00SPnTVyRsaSz5zgSPma/aHSOic5U1prhYdRy5HS2kTZviFpmDgzilbtsJsxiroqACmayynDN/9VzIbX5DOw=="],
|
||||
|
||||
"string-width": ["string-width@7.1.0", "", { "dependencies": { "emoji-regex": "^10.3.0", "get-east-asian-width": "^1.0.0", "strip-ansi": "^7.1.0" } }, "sha512-SEIJCWiX7Kg4c129n48aDRwLbFb2LJmXXFrWBG4NGaRtMQ3myKPKbwrD1BKqQn74oCoNMBVrfDEr5M9YxCsrkw=="],
|
||||
@@ -457,8 +375,6 @@
|
||||
|
||||
"supports-color": ["supports-color@5.5.0", "", { "dependencies": { "has-flag": "^3.0.0" } }, "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow=="],
|
||||
|
||||
"thread-stream": ["thread-stream@3.1.0", "", { "dependencies": { "real-require": "^0.2.0" } }, "sha512-OqyPZ9u96VohAyMfJykzmivOrY2wfMSf3C5TtFJVgN+Hm6aj+voFhlK+kZEIv2FBh1X6Xp3DlnCOfEQ3B2J86A=="],
|
||||
|
||||
"through": ["through@2.3.8", "", {}, "sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg=="],
|
||||
|
||||
"tinycolor2": ["tinycolor2@1.6.0", "", {}, "sha512-XPaBkWQJdsf3pLKJV9p4qN/S+fm2Oj8AIPo1BTUhg5oxkvm9+SVEGFdhyOz7tTdUTfvxMiAs4sp6/eZO2Ew+pw=="],
|
||||
@@ -467,8 +383,6 @@
|
||||
|
||||
"to-regex-range": ["to-regex-range@5.0.1", "", { "dependencies": { "is-number": "^7.0.0" } }, "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ=="],
|
||||
|
||||
"toad-cache": ["toad-cache@3.7.0", "", {}, "sha512-/m8M+2BJUpoJdgAHoG+baCwBT+tf2VraSfkBgl0Y00qIWt41DJ8R5B8nsEw0I58YwF5IZH6z24/2TobDKnqSWw=="],
|
||||
|
||||
"undici-types": ["undici-types@5.26.5", "", {}, "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA=="],
|
||||
|
||||
"universalify": ["universalify@2.0.1", "", {}, "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw=="],
|
||||
@@ -493,14 +407,8 @@
|
||||
|
||||
"ansi-styles/color-convert": ["color-convert@1.9.3", "", { "dependencies": { "color-name": "1.1.3" } }, "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg=="],
|
||||
|
||||
"avvio/fastq": ["fastq@1.19.1", "", { "dependencies": { "reusify": "^1.0.4" } }, "sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ=="],
|
||||
|
||||
"cross-spawn/which": ["which@2.0.2", "", { "dependencies": { "isexe": "^2.0.0" }, "bin": { "node-which": "./bin/node-which" } }, "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA=="],
|
||||
|
||||
"fastify/semver": ["semver@7.7.2", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA=="],
|
||||
|
||||
"light-my-request/process-warning": ["process-warning@4.0.1", "", {}, "sha512-3c2LzQ3rY9d0hc1emcsHhfT9Jwz0cChib/QN89oME2R451w5fy3f0afAhERFZAwrbDU43wk12d0ORBpDVME50Q=="],
|
||||
|
||||
"npm-run-path/path-key": ["path-key@4.0.0", "", {}, "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ=="],
|
||||
|
||||
"ansi-styles/color-convert/color-name": ["color-name@1.1.3", "", {}, "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw=="],
|
||||
|
||||
@@ -18,7 +18,6 @@
|
||||
"react": "^18.3.1",
|
||||
"react-dom": "^18.3.1",
|
||||
"string-width": "7.1.0",
|
||||
"strip-ansi": "^7.1.0",
|
||||
"tinycolor2": "^1.6.0",
|
||||
"zx": "^7.2.3"
|
||||
},
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
// Benchmark for string fast path optimization in postMessage with Workers
|
||||
|
||||
import { bench, run } from "mitata";
|
||||
import { Worker, isMainThread, parentPort } from "node:worker_threads";
|
||||
|
||||
// Test strings of different sizes
|
||||
const strings = {
|
||||
small: "Hello world",
|
||||
medium: Buffer.alloc("Hello World!!!".length * 1024, "Hello World!!!").toString(),
|
||||
large: Buffer.alloc("Hello World!!!".length * 1024 * 256, "Hello World!!!").toString(),
|
||||
};
|
||||
|
||||
let worker;
|
||||
let receivedCount = new Int32Array(new SharedArrayBuffer(4));
|
||||
let sentCount = 0;
|
||||
|
||||
function createWorker() {
|
||||
const workerCode = `
|
||||
import { parentPort, workerData } from "node:worker_threads";
|
||||
|
||||
let int = workerData;
|
||||
|
||||
parentPort?.on("message", data => {
|
||||
Atomics.add(int, 0, 1);
|
||||
});
|
||||
`;
|
||||
|
||||
worker = new Worker(workerCode, { eval: true, workerData: receivedCount });
|
||||
|
||||
worker.on("message", confirmationId => {});
|
||||
|
||||
worker.on("error", error => {
|
||||
console.error("Worker error:", error);
|
||||
});
|
||||
}
|
||||
|
||||
// Initialize worker before running benchmarks
|
||||
createWorker();
|
||||
|
||||
function fmt(int) {
|
||||
if (int < 1000) {
|
||||
return `${int} chars`;
|
||||
}
|
||||
|
||||
if (int < 100000) {
|
||||
return `${(int / 1024) | 0} KB`;
|
||||
}
|
||||
|
||||
return `${(int / 1024 / 1024) | 0} MB`;
|
||||
}
|
||||
|
||||
// Benchmark postMessage with pure strings (uses fast path)
|
||||
bench("postMessage(" + fmt(strings.small.length) + " string)", async () => {
|
||||
sentCount++;
|
||||
worker.postMessage(strings.small);
|
||||
});
|
||||
|
||||
bench("postMessage(" + fmt(strings.medium.length) + " string)", async () => {
|
||||
sentCount++;
|
||||
worker.postMessage(strings.medium);
|
||||
});
|
||||
|
||||
bench("postMessage(" + fmt(strings.large.length) + " string)", async () => {
|
||||
sentCount++;
|
||||
worker.postMessage(strings.large);
|
||||
});
|
||||
|
||||
await run();
|
||||
|
||||
await new Promise(resolve => setTimeout(resolve, 5000));
|
||||
|
||||
if (receivedCount[0] !== sentCount) {
|
||||
throw new Error("Expected " + receivedCount[0] + " to equal " + sentCount);
|
||||
}
|
||||
|
||||
// Cleanup worker
|
||||
worker?.terminate();
|
||||
@@ -1,56 +0,0 @@
|
||||
// Benchmark for string fast path optimization in postMessage and structuredClone
|
||||
|
||||
import { bench, run } from "mitata";
|
||||
|
||||
// Test strings of different sizes
|
||||
const strings = {
|
||||
small: "Hello world",
|
||||
medium: "Hello World!!!".repeat(1024).split("").join(""),
|
||||
large: "Hello World!!!".repeat(1024).repeat(1024).split("").join(""),
|
||||
};
|
||||
|
||||
console.log("String fast path benchmark");
|
||||
console.log("Comparing pure strings (fast path) vs objects containing strings (traditional)");
|
||||
console.log("For structuredClone, pure strings should have constant time regardless of size.");
|
||||
console.log("");
|
||||
|
||||
// Benchmark structuredClone with pure strings (uses fast path)
|
||||
bench("structuredClone small string (fast path)", () => {
|
||||
structuredClone(strings.small);
|
||||
});
|
||||
|
||||
bench("structuredClone medium string (fast path)", () => {
|
||||
structuredClone(strings.medium);
|
||||
});
|
||||
|
||||
bench("structuredClone large string (fast path)", () => {
|
||||
structuredClone(strings.large);
|
||||
});
|
||||
|
||||
// Benchmark structuredClone with objects containing strings (traditional path)
|
||||
bench("structuredClone object with small string", () => {
|
||||
structuredClone({ str: strings.small });
|
||||
});
|
||||
|
||||
bench("structuredClone object with medium string", () => {
|
||||
structuredClone({ str: strings.medium });
|
||||
});
|
||||
|
||||
bench("structuredClone object with large string", () => {
|
||||
structuredClone({ str: strings.large });
|
||||
});
|
||||
|
||||
// Multiple string cloning benchmark
|
||||
bench("structuredClone 100 small strings", () => {
|
||||
for (let i = 0; i < 100; i++) {
|
||||
structuredClone(strings.small);
|
||||
}
|
||||
});
|
||||
|
||||
bench("structuredClone 100 small objects", () => {
|
||||
for (let i = 0; i < 100; i++) {
|
||||
structuredClone({ str: strings.small });
|
||||
}
|
||||
});
|
||||
|
||||
await run();
|
||||
@@ -1,37 +0,0 @@
|
||||
import npmStripAnsi from "strip-ansi";
|
||||
import { bench, run } from "../runner.mjs";
|
||||
|
||||
let bunStripANSI = null;
|
||||
if (!process.env.FORCE_NPM) {
|
||||
bunStripANSI = globalThis?.Bun?.stripANSI;
|
||||
}
|
||||
|
||||
const stripANSI = bunStripANSI || npmStripAnsi;
|
||||
const formatter = new Intl.NumberFormat();
|
||||
const format = n => {
|
||||
return formatter.format(n);
|
||||
};
|
||||
|
||||
const inputs = [
|
||||
["hello world", "no-ansi"],
|
||||
["\x1b[31mred\x1b[39m", "ansi"],
|
||||
["a".repeat(1024 * 16), "long-no-ansi"],
|
||||
["\x1b[31mred\x1b[39m".repeat(1024 * 16), "long-ansi"],
|
||||
];
|
||||
|
||||
const maxInputLength = Math.max(...inputs.map(([input]) => input.length));
|
||||
|
||||
for (const [input, textLabel] of inputs) {
|
||||
const label = bunStripANSI ? "Bun.stripANSI" : "npm/strip-ansi";
|
||||
const name = `${label} ${format(input.length).padStart(format(maxInputLength).length, " ")} chars ${textLabel}`;
|
||||
|
||||
bench(name, () => {
|
||||
stripANSI(input);
|
||||
});
|
||||
|
||||
if (bunStripANSI && bunStripANSI(input) !== npmStripAnsi(input)) {
|
||||
throw new Error("strip-ansi mismatch");
|
||||
}
|
||||
}
|
||||
|
||||
await run();
|
||||
13
bun.lock
13
bun.lock
@@ -6,7 +6,6 @@
|
||||
"devDependencies": {
|
||||
"@lezer/common": "^1.2.3",
|
||||
"@lezer/cpp": "^1.1.3",
|
||||
"@types/bun": "workspace:*",
|
||||
"bun-tracestrings": "github:oven-sh/bun.report#912ca63e26c51429d3e6799aa2a6ab079b188fd8",
|
||||
"esbuild": "^0.21.4",
|
||||
"mitata": "^0.1.11",
|
||||
@@ -40,8 +39,8 @@
|
||||
},
|
||||
},
|
||||
"overrides": {
|
||||
"bun-types": "workspace:packages/bun-types",
|
||||
"@types/bun": "workspace:packages/@types/bun",
|
||||
"bun-types": "workspace:packages/bun-types",
|
||||
},
|
||||
"packages": {
|
||||
"@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.21.5", "", { "os": "aix", "cpu": "ppc64" }, "sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ=="],
|
||||
@@ -148,7 +147,7 @@
|
||||
|
||||
"@octokit/webhooks-types": ["@octokit/webhooks-types@7.6.1", "", {}, "sha512-S8u2cJzklBC0FgTwWVLaM8tMrDuDMVE4xiTK4EYXM9GntyvrdbSoxqDQa+Fh57CCNApyIpyeqPhhFEmHPfrXgw=="],
|
||||
|
||||
"@sentry/types": ["@sentry/types@7.120.4", "", {}, "sha512-cUq2hSSe6/qrU6oZsEP4InMI5VVdD86aypE+ENrQ6eZEVLTCYm1w6XhW1NvIu3UuWh7gZec4a9J7AFpYxki88Q=="],
|
||||
"@sentry/types": ["@sentry/types@7.120.3", "", {}, "sha512-C4z+3kGWNFJ303FC+FxAd4KkHvxpNFYAFN8iMIgBwJdpIl25KZ8Q/VdGn0MLLUEHNLvjob0+wvwlcRBBNLXOow=="],
|
||||
|
||||
"@types/aws-lambda": ["@types/aws-lambda@8.10.152", "", {}, "sha512-soT/c2gYBnT5ygwiHPmd9a1bftj462NWVk2tKCc1PYHSIacB2UwbTS2zYG4jzag1mRDuzg/OjtxQjQ2NKRB6Rw=="],
|
||||
|
||||
@@ -160,9 +159,9 @@
|
||||
|
||||
"@types/ms": ["@types/ms@2.1.0", "", {}, "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA=="],
|
||||
|
||||
"@types/node": ["@types/node@24.2.1", "", { "dependencies": { "undici-types": "~7.10.0" } }, "sha512-DRh5K+ka5eJic8CjH7td8QpYEV6Zo10gfRkjHCO3weqZHWDtAaSTFtl4+VMqOJ4N5jcuhZ9/l+yy8rVgw7BQeQ=="],
|
||||
"@types/node": ["@types/node@24.1.0", "", { "dependencies": { "undici-types": "~7.8.0" } }, "sha512-ut5FthK5moxFKH2T1CUOC6ctR67rQRvvHdFLCD2Ql6KXmMuCrjsSsRI9UsLCm9M18BMwClv4pn327UvB7eeO1w=="],
|
||||
|
||||
"@types/react": ["@types/react@19.1.10", "", { "dependencies": { "csstype": "^3.0.2" } }, "sha512-EhBeSYX0Y6ye8pNebpKrwFJq7BoQ8J5SO6NlvNwwHjSj6adXJViPQrKlsyPw7hLBLvckEMO1yxeGdR82YBBlDg=="],
|
||||
"@types/react": ["@types/react@19.1.8", "", { "dependencies": { "csstype": "^3.0.2" } }, "sha512-AwAfQ2Wa5bCx9WP8nZL2uMZWod7J7/JSplxbTmBQ5ms6QpqNYm672H0Vu9ZVKVngQ+ii4R/byguVEUZQyeg44g=="],
|
||||
|
||||
"aggregate-error": ["aggregate-error@3.1.0", "", { "dependencies": { "clean-stack": "^2.0.0", "indent-string": "^4.0.0" } }, "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA=="],
|
||||
|
||||
@@ -312,7 +311,7 @@
|
||||
|
||||
"uglify-js": ["uglify-js@3.19.3", "", { "bin": { "uglifyjs": "bin/uglifyjs" } }, "sha512-v3Xu+yuwBXisp6QYTcH4UbH+xYJXqnq2m/LtQVWKWzYc1iehYnLixoQDN9FH6/j9/oybfd6W9Ghwkl8+UMKTKQ=="],
|
||||
|
||||
"undici-types": ["undici-types@7.10.0", "", {}, "sha512-t5Fy/nfn+14LuOc2KNYg75vZqClpAiqscVvMygNnlsHBFpSXdJaYtXMcdNLpl/Qvc3P2cB3s6lOV51nqsFq4ag=="],
|
||||
"undici-types": ["undici-types@7.8.0", "", {}, "sha512-9UJ2xGDvQ43tYyVMpuHlsgApydB8ZKfVYTsLDhXkFL/6gfkp+U8xTGdh8pMJv1SpZna0zxG1DwsKZsreLbXBxw=="],
|
||||
|
||||
"universal-github-app-jwt": ["universal-github-app-jwt@1.2.0", "", { "dependencies": { "@types/jsonwebtoken": "^9.0.0", "jsonwebtoken": "^9.0.2" } }, "sha512-dncpMpnsKBk0eetwfN8D8OUHGfiDhhJ+mtsbMl+7PfW7mYjiH8LIcqRmYMtzYLgSh47HjfdBtrBwIQ/gizKR3g=="],
|
||||
|
||||
@@ -334,6 +333,8 @@
|
||||
|
||||
"@octokit/webhooks/@octokit/webhooks-methods": ["@octokit/webhooks-methods@4.1.0", "", {}, "sha512-zoQyKw8h9STNPqtm28UGOYFE7O6D4Il8VJwhAtMHFt2C4L0VQT1qGKLeefUOqHNs1mNRYSadVv7x0z8U2yyeWQ=="],
|
||||
|
||||
"bun-tracestrings/typescript": ["typescript@5.8.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ=="],
|
||||
|
||||
"camel-case/no-case": ["no-case@2.3.2", "", { "dependencies": { "lower-case": "^1.1.1" } }, "sha512-rmTZ9kz+f3rCvK2TD1Ue/oZlns7OGoIWP4fc3llxxRXlOkHKoWPPWJOfFYpITabSow43QJbRIoHQXtt10VldyQ=="],
|
||||
|
||||
"change-case/camel-case": ["camel-case@4.1.2", "", { "dependencies": { "pascal-case": "^3.1.2", "tslib": "^2.0.3" } }, "sha512-gxGWBrTT1JuMx6R+o5PTXMmUnhnVzLQ9SNutD4YqKtI6ap897t3tKECYla6gCWEkplXnlNybEkZg9GEGxKFCgw=="],
|
||||
|
||||
@@ -42,7 +42,6 @@ src/bun.js/bindings/DOMURL.cpp
|
||||
src/bun.js/bindings/DOMWrapperWorld.cpp
|
||||
src/bun.js/bindings/DoubleFormatter.cpp
|
||||
src/bun.js/bindings/EncodeURIComponent.cpp
|
||||
src/bun.js/bindings/EncodingTables.cpp
|
||||
src/bun.js/bindings/ErrorCode.cpp
|
||||
src/bun.js/bindings/ErrorStackFrame.cpp
|
||||
src/bun.js/bindings/ErrorStackTrace.cpp
|
||||
@@ -193,16 +192,7 @@ src/bun.js/bindings/ServerRouteList.cpp
|
||||
src/bun.js/bindings/spawn.cpp
|
||||
src/bun.js/bindings/SQLClient.cpp
|
||||
src/bun.js/bindings/sqlite/JSSQLStatement.cpp
|
||||
src/bun.js/bindings/stripANSI.cpp
|
||||
src/bun.js/bindings/Strong.cpp
|
||||
src/bun.js/bindings/TextCodec.cpp
|
||||
src/bun.js/bindings/TextCodecCJK.cpp
|
||||
src/bun.js/bindings/TextCodecReplacement.cpp
|
||||
src/bun.js/bindings/TextCodecSingleByte.cpp
|
||||
src/bun.js/bindings/TextCodecUserDefined.cpp
|
||||
src/bun.js/bindings/TextCodecWrapper.cpp
|
||||
src/bun.js/bindings/TextEncoding.cpp
|
||||
src/bun.js/bindings/TextEncodingRegistry.cpp
|
||||
src/bun.js/bindings/Uint8Array.cpp
|
||||
src/bun.js/bindings/Undici.cpp
|
||||
src/bun.js/bindings/URLDecomposition.cpp
|
||||
|
||||
@@ -65,12 +65,6 @@ src/js/internal/linkedlist.ts
|
||||
src/js/internal/primordials.js
|
||||
src/js/internal/promisify.ts
|
||||
src/js/internal/shared.ts
|
||||
src/js/internal/sql/errors.ts
|
||||
src/js/internal/sql/postgres.ts
|
||||
src/js/internal/sql/query.ts
|
||||
src/js/internal/sql/shared.ts
|
||||
src/js/internal/sql/sqlite.ts
|
||||
src/js/internal/sql/utils.ts
|
||||
src/js/internal/stream.promises.ts
|
||||
src/js/internal/stream.ts
|
||||
src/js/internal/streams/add-abort-signal.ts
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
src/allocators.zig
|
||||
src/allocators/AllocationScope.zig
|
||||
src/allocators/basic.zig
|
||||
src/allocators/fallback.zig
|
||||
src/allocators/fallback/z.zig
|
||||
src/allocators/LinuxMemFdAllocator.zig
|
||||
src/allocators/MaxHeapAllocator.zig
|
||||
src/allocators/MemoryReportingAllocator.zig
|
||||
@@ -21,43 +19,19 @@ src/ast/base.zig
|
||||
src/ast/Binding.zig
|
||||
src/ast/BundledAst.zig
|
||||
src/ast/CharFreq.zig
|
||||
src/ast/ConvertESMExportsForHmr.zig
|
||||
src/ast/E.zig
|
||||
src/ast/Expr.zig
|
||||
src/ast/foldStringAddition.zig
|
||||
src/ast/G.zig
|
||||
src/ast/ImportScanner.zig
|
||||
src/ast/KnownGlobal.zig
|
||||
src/ast/Macro.zig
|
||||
src/ast/maybe.zig
|
||||
src/ast/NewStore.zig
|
||||
src/ast/Op.zig
|
||||
src/ast/P.zig
|
||||
src/ast/parse.zig
|
||||
src/ast/parseFn.zig
|
||||
src/ast/parseImportExport.zig
|
||||
src/ast/parseJSXElement.zig
|
||||
src/ast/parsePrefix.zig
|
||||
src/ast/parseProperty.zig
|
||||
src/ast/Parser.zig
|
||||
src/ast/parseStmt.zig
|
||||
src/ast/parseSuffix.zig
|
||||
src/ast/parseTypescript.zig
|
||||
src/ast/S.zig
|
||||
src/ast/Scope.zig
|
||||
src/ast/ServerComponentBoundary.zig
|
||||
src/ast/SideEffects.zig
|
||||
src/ast/skipTypescript.zig
|
||||
src/ast/Stmt.zig
|
||||
src/ast/Symbol.zig
|
||||
src/ast/symbols.zig
|
||||
src/ast/TS.zig
|
||||
src/ast/TypeScript.zig
|
||||
src/ast/UseDirective.zig
|
||||
src/ast/visit.zig
|
||||
src/ast/visitBinaryExpression.zig
|
||||
src/ast/visitExpr.zig
|
||||
src/ast/visitStmt.zig
|
||||
src/async/posix_event_loop.zig
|
||||
src/async/stub_event_loop.zig
|
||||
src/async/windows_event_loop.zig
|
||||
@@ -98,11 +72,6 @@ src/bun.js/api/bun/spawn.zig
|
||||
src/bun.js/api/bun/spawn/stdio.zig
|
||||
src/bun.js/api/bun/ssl_wrapper.zig
|
||||
src/bun.js/api/bun/subprocess.zig
|
||||
src/bun.js/api/bun/subprocess/Readable.zig
|
||||
src/bun.js/api/bun/subprocess/ResourceUsage.zig
|
||||
src/bun.js/api/bun/subprocess/StaticPipeWriter.zig
|
||||
src/bun.js/api/bun/subprocess/SubprocessPipeReader.zig
|
||||
src/bun.js/api/bun/subprocess/Writable.zig
|
||||
src/bun.js/api/bun/udp_socket.zig
|
||||
src/bun.js/api/bun/x509.zig
|
||||
src/bun.js/api/BunObject.zig
|
||||
@@ -135,7 +104,6 @@ src/bun.js/api/server/StaticRoute.zig
|
||||
src/bun.js/api/server/WebSocketServerContext.zig
|
||||
src/bun.js/api/streams.classes.zig
|
||||
src/bun.js/api/Timer.zig
|
||||
src/bun.js/api/Timer/DateHeaderTimer.zig
|
||||
src/bun.js/api/Timer/EventLoopTimer.zig
|
||||
src/bun.js/api/Timer/ImmediateObject.zig
|
||||
src/bun.js/api/Timer/TimeoutObject.zig
|
||||
@@ -197,7 +165,6 @@ src/bun.js/bindings/SourceProvider.zig
|
||||
src/bun.js/bindings/SourceType.zig
|
||||
src/bun.js/bindings/static_export.zig
|
||||
src/bun.js/bindings/SystemError.zig
|
||||
src/bun.js/bindings/TextCodec.zig
|
||||
src/bun.js/bindings/URL.zig
|
||||
src/bun.js/bindings/URLSearchParams.zig
|
||||
src/bun.js/bindings/VM.zig
|
||||
@@ -283,84 +250,7 @@ src/bun.js/RuntimeTranspilerCache.zig
|
||||
src/bun.js/SavedSourceMap.zig
|
||||
src/bun.js/Strong.zig
|
||||
src/bun.js/test/diff_format.zig
|
||||
src/bun.js/test/diff/diff_match_patch.zig
|
||||
src/bun.js/test/diff/printDiff.zig
|
||||
src/bun.js/test/expect.zig
|
||||
src/bun.js/test/expect/toBe.zig
|
||||
src/bun.js/test/expect/toBeArray.zig
|
||||
src/bun.js/test/expect/toBeArrayOfSize.zig
|
||||
src/bun.js/test/expect/toBeBoolean.zig
|
||||
src/bun.js/test/expect/toBeCloseTo.zig
|
||||
src/bun.js/test/expect/toBeDate.zig
|
||||
src/bun.js/test/expect/toBeDefined.zig
|
||||
src/bun.js/test/expect/toBeEmpty.zig
|
||||
src/bun.js/test/expect/toBeEmptyObject.zig
|
||||
src/bun.js/test/expect/toBeEven.zig
|
||||
src/bun.js/test/expect/toBeFalse.zig
|
||||
src/bun.js/test/expect/toBeFalsy.zig
|
||||
src/bun.js/test/expect/toBeFinite.zig
|
||||
src/bun.js/test/expect/toBeFunction.zig
|
||||
src/bun.js/test/expect/toBeGreaterThan.zig
|
||||
src/bun.js/test/expect/toBeGreaterThanOrEqual.zig
|
||||
src/bun.js/test/expect/toBeInstanceOf.zig
|
||||
src/bun.js/test/expect/toBeInteger.zig
|
||||
src/bun.js/test/expect/toBeLessThan.zig
|
||||
src/bun.js/test/expect/toBeLessThanOrEqual.zig
|
||||
src/bun.js/test/expect/toBeNaN.zig
|
||||
src/bun.js/test/expect/toBeNegative.zig
|
||||
src/bun.js/test/expect/toBeNil.zig
|
||||
src/bun.js/test/expect/toBeNull.zig
|
||||
src/bun.js/test/expect/toBeNumber.zig
|
||||
src/bun.js/test/expect/toBeObject.zig
|
||||
src/bun.js/test/expect/toBeOdd.zig
|
||||
src/bun.js/test/expect/toBeOneOf.zig
|
||||
src/bun.js/test/expect/toBePositive.zig
|
||||
src/bun.js/test/expect/toBeString.zig
|
||||
src/bun.js/test/expect/toBeSymbol.zig
|
||||
src/bun.js/test/expect/toBeTrue.zig
|
||||
src/bun.js/test/expect/toBeTruthy.zig
|
||||
src/bun.js/test/expect/toBeTypeOf.zig
|
||||
src/bun.js/test/expect/toBeUndefined.zig
|
||||
src/bun.js/test/expect/toBeValidDate.zig
|
||||
src/bun.js/test/expect/toBeWithin.zig
|
||||
src/bun.js/test/expect/toContain.zig
|
||||
src/bun.js/test/expect/toContainAllKeys.zig
|
||||
src/bun.js/test/expect/toContainAllValues.zig
|
||||
src/bun.js/test/expect/toContainAnyKeys.zig
|
||||
src/bun.js/test/expect/toContainAnyValues.zig
|
||||
src/bun.js/test/expect/toContainEqual.zig
|
||||
src/bun.js/test/expect/toContainKey.zig
|
||||
src/bun.js/test/expect/toContainKeys.zig
|
||||
src/bun.js/test/expect/toContainValue.zig
|
||||
src/bun.js/test/expect/toContainValues.zig
|
||||
src/bun.js/test/expect/toEndWith.zig
|
||||
src/bun.js/test/expect/toEqual.zig
|
||||
src/bun.js/test/expect/toEqualIgnoringWhitespace.zig
|
||||
src/bun.js/test/expect/toHaveBeenCalled.zig
|
||||
src/bun.js/test/expect/toHaveBeenCalledOnce.zig
|
||||
src/bun.js/test/expect/toHaveBeenCalledTimes.zig
|
||||
src/bun.js/test/expect/toHaveBeenCalledWith.zig
|
||||
src/bun.js/test/expect/toHaveBeenLastCalledWith.zig
|
||||
src/bun.js/test/expect/toHaveBeenNthCalledWith.zig
|
||||
src/bun.js/test/expect/toHaveLastReturnedWith.zig
|
||||
src/bun.js/test/expect/toHaveLength.zig
|
||||
src/bun.js/test/expect/toHaveNthReturnedWith.zig
|
||||
src/bun.js/test/expect/toHaveProperty.zig
|
||||
src/bun.js/test/expect/toHaveReturned.zig
|
||||
src/bun.js/test/expect/toHaveReturnedTimes.zig
|
||||
src/bun.js/test/expect/toHaveReturnedWith.zig
|
||||
src/bun.js/test/expect/toInclude.zig
|
||||
src/bun.js/test/expect/toIncludeRepeated.zig
|
||||
src/bun.js/test/expect/toMatch.zig
|
||||
src/bun.js/test/expect/toMatchInlineSnapshot.zig
|
||||
src/bun.js/test/expect/toMatchObject.zig
|
||||
src/bun.js/test/expect/toMatchSnapshot.zig
|
||||
src/bun.js/test/expect/toSatisfy.zig
|
||||
src/bun.js/test/expect/toStartWith.zig
|
||||
src/bun.js/test/expect/toStrictEqual.zig
|
||||
src/bun.js/test/expect/toThrow.zig
|
||||
src/bun.js/test/expect/toThrowErrorMatchingInlineSnapshot.zig
|
||||
src/bun.js/test/expect/toThrowErrorMatchingSnapshot.zig
|
||||
src/bun.js/test/jest.zig
|
||||
src/bun.js/test/pretty_format.zig
|
||||
src/bun.js/test/snapshot.zig
|
||||
@@ -597,6 +487,7 @@ src/defines.zig
|
||||
src/deps/boringssl.translated.zig
|
||||
src/deps/brotli_c.zig
|
||||
src/deps/c_ares.zig
|
||||
src/deps/diffz/DiffMatchPatch.zig
|
||||
src/deps/libdeflate.zig
|
||||
src/deps/libuv.zig
|
||||
src/deps/lol-html.zig
|
||||
@@ -791,9 +682,6 @@ src/Progress.zig
|
||||
src/ptr.zig
|
||||
src/ptr/Cow.zig
|
||||
src/ptr/CowSlice.zig
|
||||
src/ptr/meta.zig
|
||||
src/ptr/owned.zig
|
||||
src/ptr/owned/maybe.zig
|
||||
src/ptr/ref_count.zig
|
||||
src/ptr/tagged_pointer.zig
|
||||
src/ptr/weak_ptr.zig
|
||||
@@ -818,10 +706,8 @@ src/s3/multipart.zig
|
||||
src/s3/simple_request.zig
|
||||
src/s3/storage_class.zig
|
||||
src/safety.zig
|
||||
src/safety/alloc.zig
|
||||
src/safety/alloc_ptr.zig
|
||||
src/safety/CriticalSection.zig
|
||||
src/safety/thread_id.zig
|
||||
src/safety/ThreadLock.zig
|
||||
src/semver.zig
|
||||
src/semver/ExternalString.zig
|
||||
src/semver/SemverObject.zig
|
||||
|
||||
@@ -4,7 +4,7 @@ register_repository(
|
||||
REPOSITORY
|
||||
oven-sh/mimalloc
|
||||
COMMIT
|
||||
c90e3981edcf6d75c8243e3d323e4300d700ebc6
|
||||
178534eeb7c0b4e2f438b513640c6f4d7338416a
|
||||
)
|
||||
|
||||
set(MIMALLOC_CMAKE_ARGS
|
||||
@@ -14,7 +14,7 @@ set(MIMALLOC_CMAKE_ARGS
|
||||
-DMI_BUILD_TESTS=OFF
|
||||
-DMI_USE_CXX=ON
|
||||
-DMI_SKIP_COLLECT_ON_EXIT=ON
|
||||
|
||||
|
||||
# ```
|
||||
# ❯ mimalloc_allow_large_os_pages=0 BUN_PORT=3004 mem bun http-hello.js
|
||||
# Started development server: http://localhost:3004
|
||||
@@ -39,23 +39,18 @@ set(MIMALLOC_CMAKE_ARGS
|
||||
-DMI_NO_THP=1
|
||||
)
|
||||
|
||||
if (ABI STREQUAL "musl")
|
||||
list(APPEND MIMALLOC_CMAKE_ARGS -DMI_LIBC_MUSL=ON)
|
||||
endif()
|
||||
|
||||
if(ENABLE_ASAN)
|
||||
list(APPEND MIMALLOC_CMAKE_ARGS -DMI_TRACK_ASAN=ON)
|
||||
list(APPEND MIMALLOC_CMAKE_ARGS -DMI_OVERRIDE=OFF)
|
||||
list(APPEND MIMALLOC_CMAKE_ARGS -DMI_OSX_ZONE=OFF)
|
||||
list(APPEND MIMALLOC_CMAKE_ARGS -DMI_OSX_INTERPOSE=OFF)
|
||||
list(APPEND MIMALLOC_CMAKE_ARGS -DMI_DEBUG_UBSAN=ON)
|
||||
elseif(APPLE OR LINUX)
|
||||
# Enable static override when ASAN is not enabled
|
||||
list(APPEND MIMALLOC_CMAKE_ARGS -DMI_OVERRIDE=ON)
|
||||
if(APPLE)
|
||||
list(APPEND MIMALLOC_CMAKE_ARGS -DMI_OVERRIDE=OFF)
|
||||
list(APPEND MIMALLOC_CMAKE_ARGS -DMI_OSX_ZONE=OFF)
|
||||
list(APPEND MIMALLOC_CMAKE_ARGS -DMI_OSX_INTERPOSE=OFF)
|
||||
list(APPEND MIMALLOC_CMAKE_ARGS -DMI_OSX_ZONE=ON)
|
||||
list(APPEND MIMALLOC_CMAKE_ARGS -DMI_OSX_INTERPOSE=ON)
|
||||
else()
|
||||
list(APPEND MIMALLOC_CMAKE_ARGS -DMI_OVERRIDE=ON)
|
||||
list(APPEND MIMALLOC_CMAKE_ARGS -DMI_OSX_ZONE=OFF)
|
||||
list(APPEND MIMALLOC_CMAKE_ARGS -DMI_OSX_INTERPOSE=OFF)
|
||||
endif()
|
||||
@@ -69,19 +64,7 @@ if(ENABLE_VALGRIND)
|
||||
list(APPEND MIMALLOC_CMAKE_ARGS -DMI_VALGRIND=ON)
|
||||
endif()
|
||||
|
||||
# Enable SIMD optimizations when not building for baseline (older CPUs)
|
||||
if(NOT ENABLE_BASELINE)
|
||||
list(APPEND MIMALLOC_CMAKE_ARGS -DMI_OPT_ARCH=ON)
|
||||
list(APPEND MIMALLOC_CMAKE_ARGS -DMI_OPT_SIMD=ON)
|
||||
endif()
|
||||
|
||||
if(WIN32)
|
||||
if(DEBUG)
|
||||
set(MIMALLOC_LIBRARY mimalloc-static-debug)
|
||||
else()
|
||||
set(MIMALLOC_LIBRARY mimalloc-static)
|
||||
endif()
|
||||
elseif(DEBUG)
|
||||
if(DEBUG)
|
||||
if (ENABLE_ASAN)
|
||||
set(MIMALLOC_LIBRARY mimalloc-asan-debug)
|
||||
else()
|
||||
|
||||
@@ -2,7 +2,7 @@ option(WEBKIT_VERSION "The version of WebKit to use")
|
||||
option(WEBKIT_LOCAL "If a local version of WebKit should be used instead of downloading")
|
||||
|
||||
if(NOT WEBKIT_VERSION)
|
||||
set(WEBKIT_VERSION 53385bda2d2270223ac66f7b021a4aec3dd6df75)
|
||||
set(WEBKIT_VERSION 642e2252f6298387edb6d2f991a0408fd0320466)
|
||||
endif()
|
||||
|
||||
string(SUBSTRING ${WEBKIT_VERSION} 0 16 WEBKIT_VERSION_PREFIX)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -320,6 +320,7 @@ Bun automatically sets the `Content-Type` header for request bodies when not exp
|
||||
|
||||
- For `Blob` objects, uses the blob's `type`
|
||||
- For `FormData`, sets appropriate multipart boundary
|
||||
- For JSON objects, sets `application/json`
|
||||
|
||||
## Debugging
|
||||
|
||||
|
||||
369
docs/api/sql.md
369
docs/api/sql.md
@@ -1,20 +1,20 @@
|
||||
Bun provides native bindings for working with SQL databases through a unified Promise-based API that supports both PostgreSQL and SQLite. The interface is designed to be simple and performant, using tagged template literals for queries and offering features like connection pooling, transactions, and prepared statements.
|
||||
Bun provides native bindings for working with PostgreSQL databases with a modern, Promise-based API. The interface is designed to be simple and performant, using tagged template literals for queries and offering features like connection pooling, transactions, and prepared statements.
|
||||
|
||||
```ts
|
||||
import { sql, SQL } from "bun";
|
||||
import { sql } from "bun";
|
||||
|
||||
// PostgreSQL (default)
|
||||
const users = await sql`
|
||||
SELECT * FROM users
|
||||
WHERE active = ${true}
|
||||
LIMIT ${10}
|
||||
`;
|
||||
|
||||
// With a a SQLite db
|
||||
const sqlite = new SQL("sqlite://myapp.db");
|
||||
const results = await sqlite`
|
||||
SELECT * FROM users
|
||||
WHERE active = ${1}
|
||||
// Select with multiple conditions
|
||||
const activeUsers = await sql`
|
||||
SELECT *
|
||||
FROM users
|
||||
WHERE active = ${true}
|
||||
AND age >= ${18}
|
||||
`;
|
||||
```
|
||||
|
||||
@@ -44,115 +44,6 @@ const results = await sqlite`
|
||||
|
||||
{% /features %}
|
||||
|
||||
## Database Support
|
||||
|
||||
Bun.SQL provides a unified API for multiple database systems:
|
||||
|
||||
### PostgreSQL
|
||||
|
||||
PostgreSQL is used when:
|
||||
|
||||
- The connection string doesn't match SQLite patterns (it's the fallback adapter)
|
||||
- The connection string explicitly uses `postgres://` or `postgresql://` protocols
|
||||
- No connection string is provided and environment variables point to PostgreSQL
|
||||
|
||||
```ts
|
||||
import { sql } from "bun";
|
||||
// Uses PostgreSQL if DATABASE_URL is not set or is a PostgreSQL URL
|
||||
await sql`SELECT ...`;
|
||||
|
||||
import { SQL } from "bun";
|
||||
const pg = new SQL("postgres://user:pass@localhost:5432/mydb");
|
||||
await pg`SELECT ...`;
|
||||
```
|
||||
|
||||
### SQLite
|
||||
|
||||
SQLite support is now built into Bun.SQL, providing the same tagged template literal interface as PostgreSQL:
|
||||
|
||||
```ts
|
||||
import { SQL } from "bun";
|
||||
|
||||
// In-memory database
|
||||
const memory = new SQL(":memory:");
|
||||
const memory2 = new SQL("sqlite://:memory:");
|
||||
|
||||
// File-based database
|
||||
const db = new SQL("sqlite://myapp.db");
|
||||
|
||||
// Using options object
|
||||
const db2 = new SQL({
|
||||
adapter: "sqlite",
|
||||
filename: "./data/app.db",
|
||||
});
|
||||
|
||||
// For simple filenames, specify adapter explicitly
|
||||
const db3 = new SQL("myapp.db", { adapter: "sqlite" });
|
||||
```
|
||||
|
||||
<details>
|
||||
<summary>SQLite Connection String Formats</summary>
|
||||
|
||||
SQLite accepts various URL formats for connection strings:
|
||||
|
||||
```ts
|
||||
// Standard sqlite:// protocol
|
||||
new SQL("sqlite://path/to/database.db");
|
||||
new SQL("sqlite:path/to/database.db"); // Without slashes
|
||||
|
||||
// file:// protocol (also recognized as SQLite)
|
||||
new SQL("file://path/to/database.db");
|
||||
new SQL("file:path/to/database.db");
|
||||
|
||||
// Special :memory: database
|
||||
new SQL(":memory:");
|
||||
new SQL("sqlite://:memory:");
|
||||
new SQL("file://:memory:");
|
||||
|
||||
// Relative and absolute paths
|
||||
new SQL("sqlite://./local.db"); // Relative to current directory
|
||||
new SQL("sqlite://../parent/db.db"); // Parent directory
|
||||
new SQL("sqlite:///absolute/path.db"); // Absolute path
|
||||
|
||||
// With query parameters
|
||||
new SQL("sqlite://data.db?mode=ro"); // Read-only mode
|
||||
new SQL("sqlite://data.db?mode=rw"); // Read-write mode (no create)
|
||||
new SQL("sqlite://data.db?mode=rwc"); // Read-write-create mode (default)
|
||||
```
|
||||
|
||||
**Note:** Simple filenames without a protocol (like `"myapp.db"`) require explicitly specifying `{ adapter: "sqlite" }` to avoid ambiguity with PostgreSQL.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>SQLite-Specific Options</summary>
|
||||
|
||||
SQLite databases support additional configuration options:
|
||||
|
||||
```ts
|
||||
const db = new SQL({
|
||||
adapter: "sqlite",
|
||||
filename: "app.db",
|
||||
|
||||
// SQLite-specific options
|
||||
readonly: false, // Open in read-only mode
|
||||
create: true, // Create database if it doesn't exist
|
||||
readwrite: true, // Open for reading and writing
|
||||
|
||||
// Additional Bun:sqlite options
|
||||
strict: true, // Enable strict mode
|
||||
safeIntegers: false, // Use JavaScript numbers for integers
|
||||
});
|
||||
```
|
||||
|
||||
Query parameters in the URL are parsed to set these options:
|
||||
|
||||
- `?mode=ro` → `readonly: true`
|
||||
- `?mode=rw` → `readonly: false, create: false`
|
||||
- `?mode=rwc` → `readonly: false, create: true` (default)
|
||||
|
||||
</details>
|
||||
|
||||
### Inserting data
|
||||
|
||||
You can pass JavaScript values directly to the SQL template literal and escaping will be handled for you.
|
||||
@@ -360,55 +251,14 @@ await query;
|
||||
|
||||
## Database Environment Variables
|
||||
|
||||
`sql` connection parameters can be configured using environment variables. The client checks these variables in a specific order of precedence and automatically detects the database type based on the connection string format.
|
||||
`sql` connection parameters can be configured using environment variables. The client checks these variables in a specific order of precedence.
|
||||
|
||||
### Automatic Database Detection
|
||||
|
||||
When using `Bun.sql()` without arguments or `new SQL()` with a connection string, the adapter is automatically detected based on the URL format. SQLite becomes the default adapter in these cases:
|
||||
|
||||
#### SQLite Auto-Detection
|
||||
|
||||
SQLite is automatically selected when the connection string matches these patterns:
|
||||
|
||||
- `:memory:` - In-memory database
|
||||
- `sqlite://...` - SQLite protocol URLs
|
||||
- `sqlite:...` - SQLite protocol without slashes
|
||||
- `file://...` - File protocol URLs
|
||||
- `file:...` - File protocol without slashes
|
||||
|
||||
```ts
|
||||
// These all use SQLite automatically (no adapter needed)
|
||||
const sql1 = new SQL(":memory:");
|
||||
const sql2 = new SQL("sqlite://app.db");
|
||||
const sql3 = new SQL("file://./database.db");
|
||||
|
||||
// Works with DATABASE_URL environment variable
|
||||
DATABASE_URL=":memory:" bun run app.js
|
||||
DATABASE_URL="sqlite://myapp.db" bun run app.js
|
||||
DATABASE_URL="file://./data/app.db" bun run app.js
|
||||
```
|
||||
|
||||
#### PostgreSQL Auto-Detection
|
||||
|
||||
PostgreSQL is the default for all other connection strings:
|
||||
|
||||
```bash
|
||||
# PostgreSQL is detected for these patterns
|
||||
DATABASE_URL="postgres://user:pass@localhost:5432/mydb" bun run app.js
|
||||
DATABASE_URL="postgresql://user:pass@localhost:5432/mydb" bun run app.js
|
||||
|
||||
# Or any URL that doesn't match SQLite patterns
|
||||
DATABASE_URL="localhost:5432/mydb" bun run app.js
|
||||
```
|
||||
|
||||
### PostgreSQL Environment Variables
|
||||
|
||||
The following environment variables can be used to define the PostgreSQL connection:
|
||||
The following environment variables can be used to define the connection URL:
|
||||
|
||||
| Environment Variable | Description |
|
||||
| --------------------------- | ------------------------------------------ |
|
||||
| `POSTGRES_URL` | Primary connection URL for PostgreSQL |
|
||||
| `DATABASE_URL` | Alternative connection URL (auto-detected) |
|
||||
| `DATABASE_URL` | Alternative connection URL |
|
||||
| `PGURL` | Alternative connection URL |
|
||||
| `PG_URL` | Alternative connection URL |
|
||||
| `TLS_POSTGRES_DATABASE_URL` | SSL/TLS-enabled connection URL |
|
||||
@@ -424,19 +274,6 @@ If no connection URL is provided, the system checks for the following individual
|
||||
| `PGPASSWORD` | - | (empty) | Database password |
|
||||
| `PGDATABASE` | - | username | Database name |
|
||||
|
||||
### SQLite Environment Variables
|
||||
|
||||
SQLite connections can be configured via `DATABASE_URL` when it contains a SQLite-compatible URL:
|
||||
|
||||
```bash
|
||||
# These are all recognized as SQLite
|
||||
DATABASE_URL=":memory:"
|
||||
DATABASE_URL="sqlite://./app.db"
|
||||
DATABASE_URL="file:///absolute/path/to/db.sqlite"
|
||||
```
|
||||
|
||||
**Note:** PostgreSQL-specific environment variables (`POSTGRES_URL`, `PGHOST`, etc.) are ignored when using SQLite.
|
||||
|
||||
## Runtime Preconnection
|
||||
|
||||
Bun can preconnect to PostgreSQL at startup to improve performance by establishing database connections before your application code runs. This is useful for reducing connection latency on the first database query.
|
||||
@@ -456,18 +293,16 @@ The `--sql-preconnect` flag will automatically establish a PostgreSQL connection
|
||||
|
||||
## Connection Options
|
||||
|
||||
You can configure your database connection manually by passing options to the SQL constructor. Options vary depending on the database adapter:
|
||||
|
||||
### PostgreSQL Options
|
||||
You can configure your database connection manually by passing options to the SQL constructor:
|
||||
|
||||
```ts
|
||||
import { SQL } from "bun";
|
||||
|
||||
const db = new SQL({
|
||||
// Connection details (adapter is auto-detected as PostgreSQL)
|
||||
// Required
|
||||
url: "postgres://user:pass@localhost:5432/dbname",
|
||||
|
||||
// Alternative connection parameters
|
||||
// Optional configuration
|
||||
hostname: "localhost",
|
||||
port: 5432,
|
||||
database: "myapp",
|
||||
@@ -495,53 +330,14 @@ const db = new SQL({
|
||||
|
||||
// Callbacks
|
||||
onconnect: client => {
|
||||
console.log("Connected to PostgreSQL");
|
||||
console.log("Connected to database");
|
||||
},
|
||||
onclose: client => {
|
||||
console.log("PostgreSQL connection closed");
|
||||
console.log("Connection closed");
|
||||
},
|
||||
});
|
||||
```
|
||||
|
||||
### SQLite Options
|
||||
|
||||
```ts
|
||||
import { SQL } from "bun";
|
||||
|
||||
const db = new SQL({
|
||||
// Required for SQLite
|
||||
adapter: "sqlite",
|
||||
filename: "./data/app.db", // or ":memory:" for in-memory database
|
||||
|
||||
// SQLite-specific access modes
|
||||
readonly: false, // Open in read-only mode
|
||||
create: true, // Create database if it doesn't exist
|
||||
readwrite: true, // Allow read and write operations
|
||||
|
||||
// SQLite data handling
|
||||
strict: true, // Enable strict mode for better type safety
|
||||
safeIntegers: false, // Use BigInt for integers exceeding JS number range
|
||||
|
||||
// Callbacks
|
||||
onconnect: client => {
|
||||
console.log("SQLite database opened");
|
||||
},
|
||||
onclose: client => {
|
||||
console.log("SQLite database closed");
|
||||
},
|
||||
});
|
||||
```
|
||||
|
||||
<details>
|
||||
<summary>SQLite Connection Notes</summary>
|
||||
|
||||
- **Connection Pooling**: SQLite doesn't use connection pooling as it's a file-based database. Each `SQL` instance represents a single connection.
|
||||
- **Transactions**: SQLite supports nested transactions through savepoints, similar to PostgreSQL.
|
||||
- **Concurrent Access**: SQLite handles concurrent access through file locking. Use WAL mode for better concurrency.
|
||||
- **Memory Databases**: Using `:memory:` creates a temporary database that exists only for the connection lifetime.
|
||||
|
||||
</details>
|
||||
|
||||
## Dynamic passwords
|
||||
|
||||
When clients need to use alternative authentication schemes such as access tokens or connections to databases with rotating passwords, provide either a synchronous or asynchronous function that will resolve the dynamic password value at connection time.
|
||||
@@ -557,66 +353,11 @@ const sql = new SQL(url, {
|
||||
});
|
||||
```
|
||||
|
||||
## SQLite-Specific Features
|
||||
|
||||
### Query Execution
|
||||
|
||||
SQLite executes queries synchronously, unlike PostgreSQL which uses asynchronous I/O. However, the API remains consistent using Promises:
|
||||
|
||||
```ts
|
||||
const sqlite = new SQL("sqlite://app.db");
|
||||
|
||||
// Works the same as PostgreSQL, but executes synchronously under the hood
|
||||
const users = await sqlite`SELECT * FROM users`;
|
||||
|
||||
// Parameters work identically
|
||||
const user = await sqlite`SELECT * FROM users WHERE id = ${userId}`;
|
||||
```
|
||||
|
||||
### SQLite Pragmas
|
||||
|
||||
You can use PRAGMA statements to configure SQLite behavior:
|
||||
|
||||
```ts
|
||||
const sqlite = new SQL("sqlite://app.db");
|
||||
|
||||
// Enable foreign keys
|
||||
await sqlite`PRAGMA foreign_keys = ON`;
|
||||
|
||||
// Set journal mode to WAL for better concurrency
|
||||
await sqlite`PRAGMA journal_mode = WAL`;
|
||||
|
||||
// Check integrity
|
||||
const integrity = await sqlite`PRAGMA integrity_check`;
|
||||
```
|
||||
|
||||
### Data Type Differences
|
||||
|
||||
SQLite has a more flexible type system than PostgreSQL:
|
||||
|
||||
```ts
|
||||
// SQLite stores data in 5 storage classes: NULL, INTEGER, REAL, TEXT, BLOB
|
||||
const sqlite = new SQL("sqlite://app.db");
|
||||
|
||||
// SQLite is more lenient with types
|
||||
await sqlite`
|
||||
CREATE TABLE flexible (
|
||||
id INTEGER PRIMARY KEY,
|
||||
data TEXT, -- Can store numbers as strings
|
||||
value NUMERIC, -- Can store integers, reals, or text
|
||||
blob BLOB -- Binary data
|
||||
)
|
||||
`;
|
||||
|
||||
// JavaScript values are automatically converted
|
||||
await sqlite`INSERT INTO flexible VALUES (${1}, ${"text"}, ${123.45}, ${Buffer.from("binary")})`;
|
||||
```
|
||||
|
||||
## Transactions
|
||||
|
||||
To start a new transaction, use `sql.begin`. This method works for both PostgreSQL and SQLite. For PostgreSQL, it reserves a dedicated connection from the pool. For SQLite, it begins a transaction on the single connection.
|
||||
To start a new transaction, use `sql.begin`. This method reserves a dedicated connection for the duration of the transaction and provides a scoped `sql` instance to use within the callback function. Once the callback completes, `sql.begin` resolves with the return value of the callback.
|
||||
|
||||
The `BEGIN` command is sent automatically, including any optional configurations you specify. If an error occurs during the transaction, a `ROLLBACK` is triggered to ensure the process continues smoothly.
|
||||
The `BEGIN` command is sent automatically, including any optional configurations you specify. If an error occurs during the transaction, a `ROLLBACK` is triggered to release the reserved connection and ensure the process continues smoothly.
|
||||
|
||||
### Basic Transactions
|
||||
|
||||
@@ -811,34 +552,9 @@ Note that disabling prepared statements may impact performance for queries that
|
||||
|
||||
## Error Handling
|
||||
|
||||
The client provides typed errors for different failure scenarios. Errors are database-specific and extend from base error classes:
|
||||
The client provides typed errors for different failure scenarios:
|
||||
|
||||
### Error Classes
|
||||
|
||||
```ts
|
||||
import { SQL } from "bun";
|
||||
|
||||
try {
|
||||
await sql`SELECT * FROM users`;
|
||||
} catch (error) {
|
||||
if (error instanceof SQL.PostgresError) {
|
||||
// PostgreSQL-specific error
|
||||
console.log(error.code); // PostgreSQL error code
|
||||
console.log(error.detail); // Detailed error message
|
||||
console.log(error.hint); // Helpful hint from PostgreSQL
|
||||
} else if (error instanceof SQL.SQLiteError) {
|
||||
// SQLite-specific error
|
||||
console.log(error.code); // SQLite error code (e.g., "SQLITE_CONSTRAINT")
|
||||
console.log(error.errno); // SQLite error number
|
||||
console.log(error.byteOffset); // Byte offset in SQL statement (if available)
|
||||
} else if (error instanceof SQL.SQLError) {
|
||||
// Generic SQL error (base class)
|
||||
console.log(error.message);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### PostgreSQL Connection Errors
|
||||
### Connection Errors
|
||||
|
||||
| Connection Errors | Description |
|
||||
| --------------------------------- | ---------------------------------------------------- |
|
||||
@@ -903,50 +619,6 @@ try {
|
||||
| `ERR_POSTGRES_UNSAFE_TRANSACTION` | Unsafe transaction operation detected |
|
||||
| `ERR_POSTGRES_INVALID_TRANSACTION_STATE` | Invalid transaction state |
|
||||
|
||||
### SQLite-Specific Errors
|
||||
|
||||
SQLite errors provide error codes and numbers that correspond to SQLite's standard error codes:
|
||||
|
||||
<details>
|
||||
<summary>Common SQLite Error Codes</summary>
|
||||
|
||||
| Error Code | errno | Description |
|
||||
| ------------------- | ----- | ---------------------------------------------------- |
|
||||
| `SQLITE_CONSTRAINT` | 19 | Constraint violation (UNIQUE, CHECK, NOT NULL, etc.) |
|
||||
| `SQLITE_BUSY` | 5 | Database is locked |
|
||||
| `SQLITE_LOCKED` | 6 | Table in the database is locked |
|
||||
| `SQLITE_READONLY` | 8 | Attempt to write to a readonly database |
|
||||
| `SQLITE_IOERR` | 10 | Disk I/O error |
|
||||
| `SQLITE_CORRUPT` | 11 | Database disk image is malformed |
|
||||
| `SQLITE_FULL` | 13 | Database or disk is full |
|
||||
| `SQLITE_CANTOPEN` | 14 | Unable to open database file |
|
||||
| `SQLITE_PROTOCOL` | 15 | Database lock protocol error |
|
||||
| `SQLITE_SCHEMA` | 17 | Database schema has changed |
|
||||
| `SQLITE_TOOBIG` | 18 | String or BLOB exceeds size limit |
|
||||
| `SQLITE_MISMATCH` | 20 | Data type mismatch |
|
||||
| `SQLITE_MISUSE` | 21 | Library used incorrectly |
|
||||
| `SQLITE_AUTH` | 23 | Authorization denied |
|
||||
|
||||
Example error handling:
|
||||
|
||||
```ts
|
||||
const sqlite = new SQL("sqlite://app.db");
|
||||
|
||||
try {
|
||||
await sqlite`INSERT INTO users (id, name) VALUES (1, 'Alice')`;
|
||||
await sqlite`INSERT INTO users (id, name) VALUES (1, 'Bob')`; // Duplicate ID
|
||||
} catch (error) {
|
||||
if (error instanceof SQL.SQLiteError) {
|
||||
if (error.code === "SQLITE_CONSTRAINT") {
|
||||
console.log("Constraint violation:", error.message);
|
||||
// Handle unique constraint violation
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## Numbers and BigInt
|
||||
|
||||
Bun's SQL client includes special handling for large numbers that exceed the range of a 53-bit integer. Here's how it works:
|
||||
@@ -980,6 +652,7 @@ There's still some things we haven't finished yet.
|
||||
|
||||
- Connection preloading via `--db-preconnect` Bun CLI flag
|
||||
- MySQL support: [we're working on it](https://github.com/oven-sh/bun/pull/15274)
|
||||
- SQLite support: planned, but not started. Ideally, we implement it natively instead of wrapping `bun:sqlite`.
|
||||
- Column name transforms (e.g. `snake_case` to `camelCase`). This is mostly blocked on a unicode-aware implementation of changing the case in C++ using WebKit's `WTF::String`.
|
||||
- Column type transforms
|
||||
|
||||
|
||||
@@ -772,65 +772,6 @@ console.log(obj); // => { foo: "bar" }
|
||||
|
||||
Internally, [`structuredClone`](https://developer.mozilla.org/en-US/docs/Web/API/structuredClone) and [`postMessage`](https://developer.mozilla.org/en-US/docs/Web/API/Window/postMessage) serialize and deserialize the same way. This exposes the underlying [HTML Structured Clone Algorithm](https://developer.mozilla.org/en-US/docs/Web/API/Web_Workers_API/Structured_clone_algorithm) to JavaScript as an ArrayBuffer.
|
||||
|
||||
## `Bun.stripANSI()` ~6-57x faster `strip-ansi` alternative
|
||||
|
||||
`Bun.stripANSI(text: string): string`
|
||||
|
||||
Strip ANSI escape codes from a string. This is useful for removing colors and formatting from terminal output.
|
||||
|
||||
```ts
|
||||
const coloredText = "\u001b[31mHello\u001b[0m \u001b[32mWorld\u001b[0m";
|
||||
const plainText = Bun.stripANSI(coloredText);
|
||||
console.log(plainText); // => "Hello World"
|
||||
|
||||
// Works with various ANSI codes
|
||||
const formatted = "\u001b[1m\u001b[4mBold and underlined\u001b[0m";
|
||||
console.log(Bun.stripANSI(formatted)); // => "Bold and underlined"
|
||||
```
|
||||
|
||||
`Bun.stripANSI` is significantly faster than the popular [`strip-ansi`](https://www.npmjs.com/package/strip-ansi) npm package:
|
||||
|
||||
```js
|
||||
> bun bench/snippets/strip-ansi.mjs
|
||||
cpu: Apple M3 Max
|
||||
runtime: bun 1.2.21 (arm64-darwin)
|
||||
|
||||
benchmark avg (min … max) p75 / p99
|
||||
------------------------------------------------------- ----------
|
||||
Bun.stripANSI 11 chars no-ansi 8.13 ns/iter 8.27 ns
|
||||
(7.45 ns … 33.59 ns) 10.29 ns
|
||||
|
||||
Bun.stripANSI 13 chars ansi 51.68 ns/iter 52.51 ns
|
||||
(46.16 ns … 113.71 ns) 57.71 ns
|
||||
|
||||
Bun.stripANSI 16,384 chars long-no-ansi 298.39 ns/iter 305.44 ns
|
||||
(281.50 ns … 331.65 ns) 320.70 ns
|
||||
|
||||
Bun.stripANSI 212,992 chars long-ansi 227.65 µs/iter 234.50 µs
|
||||
(216.46 µs … 401.92 µs) 262.25 µs
|
||||
```
|
||||
|
||||
```js
|
||||
> node bench/snippets/strip-ansi.mjs
|
||||
cpu: Apple M3 Max
|
||||
runtime: node 24.6.0 (arm64-darwin)
|
||||
|
||||
benchmark avg (min … max) p75 / p99
|
||||
-------------------------------------------------------- ---------
|
||||
npm/strip-ansi 11 chars no-ansi 466.79 ns/iter 468.67 ns
|
||||
(454.08 ns … 570.67 ns) 543.67 ns
|
||||
|
||||
npm/strip-ansi 13 chars ansi 546.77 ns/iter 550.23 ns
|
||||
(532.74 ns … 651.08 ns) 590.35 ns
|
||||
|
||||
npm/strip-ansi 16,384 chars long-no-ansi 4.85 µs/iter 4.89 µs
|
||||
(4.71 µs … 5.00 µs) 4.98 µs
|
||||
|
||||
npm/strip-ansi 212,992 chars long-ansi 1.36 ms/iter 1.38 ms
|
||||
(1.27 ms … 1.73 ms) 1.49 ms
|
||||
|
||||
```
|
||||
|
||||
## `estimateShallowMemoryUsageOf` in `bun:jsc`
|
||||
|
||||
The `estimateShallowMemoryUsageOf` function returns a best-effort estimate of the memory usage of an object in bytes, excluding the memory usage of properties or other objects it references. For accurate per-object memory usage, use `Bun.generateHeapSnapshot`.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{% callout %}
|
||||
**🚧** — The `Worker` API is still experimental (particularly for terminating workers). We are actively working on improving this.
|
||||
**🚧** — The `Worker` API is still experimental and should not be considered ready for production.
|
||||
{% /callout %}
|
||||
|
||||
[`Worker`](https://developer.mozilla.org/en-US/docs/Web/API/Worker) lets you start and communicate with a new JavaScript instance running on a separate thread while sharing I/O resources with the main thread.
|
||||
|
||||
@@ -496,36 +496,6 @@ Whether to generate a non-Bun lockfile alongside `bun.lock`. (A `bun.lock` will
|
||||
print = "yarn"
|
||||
```
|
||||
|
||||
### `install.linker`
|
||||
|
||||
Configure the default linker strategy. Default `"hoisted"`.
|
||||
|
||||
For complete documentation refer to [Package manager > Isolated installs](https://bun.com/docs/install/isolated).
|
||||
|
||||
```toml
|
||||
[install]
|
||||
linker = "hoisted"
|
||||
```
|
||||
|
||||
Valid values are:
|
||||
|
||||
{% table %}
|
||||
|
||||
- Value
|
||||
- Description
|
||||
|
||||
---
|
||||
|
||||
- `"hoisted"`
|
||||
- Link dependencies in a shared `node_modules` directory.
|
||||
|
||||
---
|
||||
|
||||
- `"isolated"`
|
||||
- Link dependencies inside each package installation.
|
||||
|
||||
{% /table %}
|
||||
|
||||
<!-- ## Debugging -->
|
||||
|
||||
<!--
|
||||
|
||||
@@ -148,7 +148,7 @@ This page is updated regularly to reflect compatibility status of the latest ver
|
||||
|
||||
### [`node:vm`](https://nodejs.org/api/vm.html)
|
||||
|
||||
🟡 Core functionality and ES modules are implemented, including `vm.Script`, `vm.createContext`, `vm.runInContext`, `vm.runInNewContext`, `vm.runInThisContext`, `vm.compileFunction`, `vm.isContext`, `vm.Module`, `vm.SourceTextModule`, `vm.SyntheticModule`, and `importModuleDynamically` support. Options like `timeout` and `breakOnSigint` are fully supported. Missing `vm.measureMemory` and some `cachedData` functionality.
|
||||
🟡 Core functionality works, but experimental VM ES modules are not implemented, including `vm.Module`, `vm.SourceTextModule`, `vm.SyntheticModule`,`importModuleDynamically`, and `vm.measureMemory`. Options like `timeout`, `breakOnSigint`, `cachedData` are not implemented yet.
|
||||
|
||||
### [`node:wasi`](https://nodejs.org/api/wasi.html)
|
||||
|
||||
@@ -214,10 +214,6 @@ The table below lists all globals implemented by Node.js and Bun's current compa
|
||||
|
||||
🟢 Fully implemented.
|
||||
|
||||
### [`Atomics`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Atomics)
|
||||
|
||||
🟢 Fully implemented.
|
||||
|
||||
### [`BroadcastChannel`](https://developer.mozilla.org/en-US/docs/Web/API/BroadcastChannel)
|
||||
|
||||
🟢 Fully implemented.
|
||||
|
||||
@@ -532,74 +532,6 @@ Hello World! pwd=C:\Users\Demo
|
||||
|
||||
Bun Shell is a small programming language in Bun that is implemented in Zig. It includes a handwritten lexer, parser, and interpreter. Unlike bash, zsh, and other shells, Bun Shell runs operations concurrently.
|
||||
|
||||
## Security in the Bun shell
|
||||
|
||||
By design, the Bun shell _does not invoke a system shell_ (like `/bin/sh`) and
|
||||
is instead a re-implementation of bash that runs in the same Bun process,
|
||||
designed with security in mind.
|
||||
|
||||
When parsing command arguments, it treats all _interpolated variables_ as single, literal strings.
|
||||
|
||||
This protects the Bun shell against **command injection**:
|
||||
|
||||
```js
|
||||
import { $ } from "bun";
|
||||
|
||||
const userInput = "my-file.txt; rm -rf /";
|
||||
|
||||
// SAFE: `userInput` is treated as a single quoted string
|
||||
await $`ls ${userInput}`;
|
||||
```
|
||||
|
||||
In the above example, `userInput` is treated as a single string. This causes
|
||||
the `ls` command to try to read the contents of a single directory named
|
||||
"my-file; rm -rf /".
|
||||
|
||||
### Security considerations
|
||||
|
||||
While command injection is prevented by default, developers are still
|
||||
responsible for security in certain scenarios.
|
||||
|
||||
Similar to the `Bun.spawn` or `node:child_process.exec()` APIs, you can intentionally
|
||||
execute a command which spawns a new shell (e.g. `bash -c`) with arguments.
|
||||
|
||||
When you do this, you hand off control, and Bun's built-in protections no
|
||||
longer apply to the string interpreted by that new shell.
|
||||
|
||||
```js
|
||||
import { $ } from "bun";
|
||||
|
||||
const userInput = "world; touch /tmp/pwned";
|
||||
|
||||
// UNSAFE: You have explicitly started a new shell process with `bash -c`.
|
||||
// This new shell will execute the `touch` command. Any user input
|
||||
// passed this way must be rigorously sanitized.
|
||||
await $`bash -c "echo ${userInput}"`;
|
||||
```
|
||||
|
||||
### Argument injection
|
||||
|
||||
The Bun shell cannot know how an external command interprets its own
|
||||
command-line arguments. An attacker can supply input that the target program
|
||||
recognizes as one of its own options or flags, leading to unintended behavior.
|
||||
|
||||
```js
|
||||
import { $ } from "bun";
|
||||
|
||||
// Malicious input formatted as a Git command-line flag
|
||||
const branch = "--upload-pack=echo pwned";
|
||||
|
||||
// UNSAFE: While Bun safely passes the string as a single argument,
|
||||
// the `git` program itself sees and acts upon the malicious flag.
|
||||
await $`git ls-remote origin ${branch}`;
|
||||
```
|
||||
|
||||
{% callout %}
|
||||
**Recommendation** — As is best practice in every language, always sanitize
|
||||
user-provided input before passing it as an argument to an external command.
|
||||
The responsibility for validating arguments rests with your application code.
|
||||
{% /callout %}
|
||||
|
||||
## Credits
|
||||
|
||||
Large parts of this API were inspired by [zx](https://github.com/google/zx), [dax](https://github.com/dsherret/dax), and [bnx](https://github.com/wobsoriano/bnx). Thank you to the authors of those projects.
|
||||
|
||||
@@ -1,724 +0,0 @@
|
||||
#!/usr/bin/env bun
|
||||
/**
|
||||
* CLI Flag Parser for Bun Commands
|
||||
*
|
||||
* This script reads the --help menu for every Bun command and generates JSON
|
||||
* containing all flag information, descriptions, and whether they support
|
||||
* positional or non-positional arguments.
|
||||
*
|
||||
* Handles complex cases like:
|
||||
* - Nested subcommands (bun pm cache rm)
|
||||
* - Command aliases (bun i = bun install, bun a = bun add)
|
||||
* - Dynamic completions (scripts, packages, files)
|
||||
* - Context-aware flags
|
||||
* - Special cases like bare 'bun' vs 'bun run'
|
||||
*
|
||||
* Output is saved to completions/bun-cli.json for use in generating
|
||||
* shell completions (fish, bash, zsh).
|
||||
*/
|
||||
|
||||
import { spawn } from "bun";
|
||||
import { mkdirSync, writeFileSync, mkdtempSync, rmSync } from "fs";
|
||||
import { join } from "path";
|
||||
|
||||
interface FlagInfo {
|
||||
name: string;
|
||||
shortName?: string;
|
||||
description: string;
|
||||
hasValue: boolean;
|
||||
valueType?: string;
|
||||
defaultValue?: string;
|
||||
choices?: string[];
|
||||
required?: boolean;
|
||||
multiple?: boolean;
|
||||
}
|
||||
|
||||
interface SubcommandInfo {
|
||||
name: string;
|
||||
description: string;
|
||||
flags?: FlagInfo[];
|
||||
subcommands?: Record<string, SubcommandInfo>;
|
||||
positionalArgs?: {
|
||||
name: string;
|
||||
description?: string;
|
||||
required: boolean;
|
||||
multiple: boolean;
|
||||
type?: string;
|
||||
completionType?: string;
|
||||
}[];
|
||||
examples?: string[];
|
||||
}
|
||||
|
||||
interface CommandInfo {
|
||||
name: string;
|
||||
aliases?: string[];
|
||||
description: string;
|
||||
usage?: string;
|
||||
flags: FlagInfo[];
|
||||
positionalArgs: {
|
||||
name: string;
|
||||
description?: string;
|
||||
required: boolean;
|
||||
multiple: boolean;
|
||||
type?: string;
|
||||
completionType?: string;
|
||||
}[];
|
||||
examples: string[];
|
||||
subcommands?: Record<string, SubcommandInfo>;
|
||||
documentationUrl?: string;
|
||||
dynamicCompletions?: {
|
||||
scripts?: boolean;
|
||||
packages?: boolean;
|
||||
files?: boolean;
|
||||
binaries?: boolean;
|
||||
};
|
||||
}
|
||||
|
||||
interface CompletionData {
|
||||
version: string;
|
||||
commands: Record<string, CommandInfo>;
|
||||
globalFlags: FlagInfo[];
|
||||
specialHandling: {
|
||||
bareCommand: {
|
||||
description: string;
|
||||
canRunFiles: boolean;
|
||||
dynamicCompletions: {
|
||||
scripts: boolean;
|
||||
files: boolean;
|
||||
binaries: boolean;
|
||||
};
|
||||
};
|
||||
};
|
||||
bunGetCompletes: {
|
||||
available: boolean;
|
||||
commands: {
|
||||
scripts: string; // "bun getcompletes s" or "bun getcompletes z"
|
||||
binaries: string; // "bun getcompletes b"
|
||||
packages: string; // "bun getcompletes a <prefix>"
|
||||
files: string; // "bun getcompletes j"
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
const BUN_EXECUTABLE = process.env.BUN_DEBUG_BUILD || "bun";
|
||||
|
||||
/**
|
||||
* Parse flag line from help output
|
||||
*/
|
||||
function parseFlag(line: string): FlagInfo | null {
|
||||
// Match patterns like:
|
||||
// -h, --help Display this menu and exit
|
||||
// --timeout=<val> Set the per-test timeout in milliseconds, default is 5000.
|
||||
// -r, --preload=<val> Import a module before other modules are loaded
|
||||
// --watch Automatically restart the process on file change
|
||||
|
||||
const patterns = [
|
||||
// Long flag with short flag and value: -r, --preload=<val>
|
||||
/^\s*(-[a-zA-Z]),\s+(--[a-zA-Z-]+)=(<[^>]+>)\s+(.+)$/,
|
||||
// Long flag with short flag: -h, --help
|
||||
/^\s*(-[a-zA-Z]),\s+(--[a-zA-Z-]+)\s+(.+)$/,
|
||||
// Long flag with value: --timeout=<val>
|
||||
/^\s+(--[a-zA-Z-]+)=(<[^>]+>)\s+(.+)$/,
|
||||
// Long flag without value: --watch
|
||||
/^\s+(--[a-zA-Z-]+)\s+(.+)$/,
|
||||
// Short flag only: -i
|
||||
/^\s+(-[a-zA-Z])\s+(.+)$/,
|
||||
];
|
||||
|
||||
for (const pattern of patterns) {
|
||||
const match = line.match(pattern);
|
||||
if (match) {
|
||||
let shortName: string | undefined;
|
||||
let longName: string;
|
||||
let valueSpec: string | undefined;
|
||||
let description: string;
|
||||
|
||||
if (match.length === 5) {
|
||||
// Pattern with short flag, long flag, and value
|
||||
[, shortName, longName, valueSpec, description] = match;
|
||||
} else if (match.length === 4) {
|
||||
if (match[1].startsWith("-") && match[1].length === 2) {
|
||||
// Short flag with long flag
|
||||
[, shortName, longName, description] = match;
|
||||
} else if (match[2].startsWith("<")) {
|
||||
// Long flag with value
|
||||
[, longName, valueSpec, description] = match;
|
||||
} else {
|
||||
// Long flag without value
|
||||
[, longName, description] = match;
|
||||
}
|
||||
} else if (match.length === 3) {
|
||||
if (match[1].length === 2) {
|
||||
// Short flag only
|
||||
[, shortName, description] = match;
|
||||
longName = shortName.replace("-", "--");
|
||||
} else {
|
||||
// Long flag without value
|
||||
[, longName, description] = match;
|
||||
}
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Extract additional info from description
|
||||
const hasValue = !!valueSpec;
|
||||
let valueType: string | undefined;
|
||||
let defaultValue: string | undefined;
|
||||
let choices: string[] | undefined;
|
||||
|
||||
if (valueSpec) {
|
||||
valueType = valueSpec.replace(/[<>]/g, "");
|
||||
}
|
||||
|
||||
// Look for default values in description
|
||||
const defaultMatch = description.match(/[Dd]efault(?:s?)\s*(?:is|to|:)\s*"?([^".\s,]+)"?/);
|
||||
if (defaultMatch) {
|
||||
defaultValue = defaultMatch[1];
|
||||
}
|
||||
|
||||
// Look for choices/enums
|
||||
const choicesMatch = description.match(/(?:One of|Valid (?:orders?|values?|options?)):?\s*"?([^"]+)"?/);
|
||||
if (choicesMatch) {
|
||||
choices = choicesMatch[1]
|
||||
.split(/[,\s]+/)
|
||||
.map(s => s.replace(/[",]/g, "").trim())
|
||||
.filter(Boolean);
|
||||
}
|
||||
|
||||
return {
|
||||
name: longName.replace(/^--/, ""),
|
||||
shortName: shortName?.replace(/^-/, ""),
|
||||
description: description.trim(),
|
||||
hasValue,
|
||||
valueType,
|
||||
defaultValue,
|
||||
choices,
|
||||
required: false, // We'll determine this from usage patterns
|
||||
multiple: description.toLowerCase().includes("multiple") || description.includes("[]"),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse usage line to extract positional arguments
|
||||
*/
|
||||
function parseUsage(usage: string): {
|
||||
name: string;
|
||||
description?: string;
|
||||
required: boolean;
|
||||
multiple: boolean;
|
||||
type?: string;
|
||||
completionType?: string;
|
||||
}[] {
|
||||
const args: {
|
||||
name: string;
|
||||
description?: string;
|
||||
required: boolean;
|
||||
multiple: boolean;
|
||||
type?: string;
|
||||
completionType?: string;
|
||||
}[] = [];
|
||||
|
||||
// Extract parts after command name
|
||||
const parts = usage.split(/\s+/).slice(2); // Skip "Usage:" and command name
|
||||
|
||||
for (const part of parts) {
|
||||
if (part.startsWith("[") || part.startsWith("<") || part.includes("...")) {
|
||||
let name = part;
|
||||
let required = false;
|
||||
let multiple = false;
|
||||
let completionType: string | undefined;
|
||||
|
||||
// Clean up the argument name
|
||||
name = name.replace(/[\[\]<>]/g, "");
|
||||
|
||||
if (part.startsWith("<")) {
|
||||
required = true;
|
||||
}
|
||||
|
||||
if (part.includes("...") || name.includes("...")) {
|
||||
multiple = true;
|
||||
name = name.replace(/\.{3}/g, "");
|
||||
}
|
||||
|
||||
// Skip flags
|
||||
if (!name.startsWith("-") && name.length > 0) {
|
||||
// Determine completion type based on argument name
|
||||
if (name.toLowerCase().includes("package")) {
|
||||
completionType = "package";
|
||||
} else if (name.toLowerCase().includes("script")) {
|
||||
completionType = "script";
|
||||
} else if (name.toLowerCase().includes("file") || name.includes(".")) {
|
||||
completionType = "file";
|
||||
}
|
||||
|
||||
args.push({
|
||||
name,
|
||||
required,
|
||||
multiple,
|
||||
type: "string", // Default type
|
||||
completionType,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return args;
|
||||
}
|
||||
|
||||
const temppackagejson = mkdtempSync("package");
|
||||
writeFileSync(
|
||||
join(temppackagejson, "package.json"),
|
||||
JSON.stringify({
|
||||
name: "test",
|
||||
version: "1.0.0",
|
||||
scripts: {},
|
||||
}),
|
||||
);
|
||||
process.once("beforeExit", () => {
|
||||
rmSync(temppackagejson, { recursive: true });
|
||||
});
|
||||
|
||||
/**
|
||||
* Execute bun command and get help output
|
||||
*/
|
||||
async function getHelpOutput(command: string[]): Promise<string> {
|
||||
try {
|
||||
const proc = spawn({
|
||||
cmd: [BUN_EXECUTABLE, ...command, "--help"],
|
||||
stdout: "pipe",
|
||||
stderr: "pipe",
|
||||
cwd: temppackagejson,
|
||||
});
|
||||
|
||||
const [stdout, stderr] = await Promise.all([new Response(proc.stdout).text(), new Response(proc.stderr).text()]);
|
||||
|
||||
await proc.exited;
|
||||
|
||||
return stdout || stderr || "";
|
||||
} catch (error) {
|
||||
console.error(`Failed to get help for command: ${command.join(" ")}`, error);
|
||||
return "";
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse PM subcommands from help output
|
||||
*/
|
||||
function parsePmSubcommands(helpText: string): Record<string, SubcommandInfo> {
|
||||
const lines = helpText.split("\n");
|
||||
const subcommands: Record<string, SubcommandInfo> = {};
|
||||
|
||||
let inCommands = false;
|
||||
for (const line of lines) {
|
||||
const trimmed = line.trim();
|
||||
|
||||
if (trimmed === "Commands:") {
|
||||
inCommands = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (inCommands && trimmed.startsWith("Learn more")) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (inCommands && line.match(/^\s+bun pm \w+/)) {
|
||||
// Parse lines like: "bun pm pack create a tarball of the current workspace"
|
||||
const match = line.match(/^\s+bun pm (\S+)(?:\s+(.+))?$/);
|
||||
if (match) {
|
||||
const [, name, description = ""] = match;
|
||||
subcommands[name] = {
|
||||
name,
|
||||
description: description.trim(),
|
||||
flags: [],
|
||||
positionalArgs: [],
|
||||
};
|
||||
|
||||
// Special handling for subcommands with their own subcommands
|
||||
if (name === "cache") {
|
||||
subcommands[name].subcommands = {
|
||||
rm: {
|
||||
name: "rm",
|
||||
description: "clear the cache",
|
||||
},
|
||||
};
|
||||
} else if (name === "pkg") {
|
||||
subcommands[name].subcommands = {
|
||||
get: { name: "get", description: "get values from package.json" },
|
||||
set: { name: "set", description: "set values in package.json" },
|
||||
delete: { name: "delete", description: "delete keys from package.json" },
|
||||
fix: { name: "fix", description: "auto-correct common package.json errors" },
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return subcommands;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse help output into CommandInfo
|
||||
*/
|
||||
function parseHelpOutput(helpText: string, commandName: string): CommandInfo {
|
||||
const lines = helpText.split("\n");
|
||||
const command: CommandInfo = {
|
||||
name: commandName,
|
||||
description: "",
|
||||
flags: [],
|
||||
positionalArgs: [],
|
||||
examples: [],
|
||||
};
|
||||
|
||||
let currentSection = "";
|
||||
let inFlags = false;
|
||||
let inExamples = false;
|
||||
|
||||
for (let i = 0; i < lines.length; i++) {
|
||||
const line = lines[i];
|
||||
const trimmed = line.trim();
|
||||
|
||||
// Extract command description (usually the first non-usage line)
|
||||
if (
|
||||
!command.description &&
|
||||
trimmed &&
|
||||
!trimmed.startsWith("Usage:") &&
|
||||
!trimmed.startsWith("Alias:") &&
|
||||
currentSection === ""
|
||||
) {
|
||||
command.description = trimmed;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Extract aliases
|
||||
if (trimmed.startsWith("Alias:")) {
|
||||
const aliasMatch = trimmed.match(/Alias:\s*(.+)/);
|
||||
if (aliasMatch) {
|
||||
command.aliases = aliasMatch[1]
|
||||
.split(/[,\s]+/)
|
||||
.map(a => a.trim())
|
||||
.filter(Boolean);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Extract usage and positional args
|
||||
if (trimmed.startsWith("Usage:")) {
|
||||
command.usage = trimmed;
|
||||
command.positionalArgs = parseUsage(trimmed);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Track sections
|
||||
if (trimmed === "Flags:") {
|
||||
inFlags = true;
|
||||
currentSection = "flags";
|
||||
continue;
|
||||
} else if (trimmed === "Examples:") {
|
||||
inExamples = true;
|
||||
inFlags = false;
|
||||
currentSection = "examples";
|
||||
continue;
|
||||
} else if (
|
||||
trimmed.startsWith("Full documentation") ||
|
||||
trimmed.startsWith("Learn more") ||
|
||||
trimmed.startsWith("A full list")
|
||||
) {
|
||||
const urlMatch = trimmed.match(/https?:\/\/[^\s]+/);
|
||||
if (urlMatch) {
|
||||
command.documentationUrl = urlMatch[0];
|
||||
}
|
||||
inFlags = false;
|
||||
inExamples = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Parse flags
|
||||
if (inFlags && line.match(/^\s+(-|\s+--)/)) {
|
||||
const flag = parseFlag(line);
|
||||
if (flag) {
|
||||
command.flags.push(flag);
|
||||
}
|
||||
}
|
||||
|
||||
// Parse examples
|
||||
if (inExamples && trimmed && !trimmed.startsWith("Full documentation")) {
|
||||
if (trimmed.startsWith("bun ") || trimmed.startsWith("./") || trimmed.startsWith("Bundle")) {
|
||||
command.examples.push(trimmed);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Special case for pm command
|
||||
if (commandName === "pm") {
|
||||
command.subcommands = parsePmSubcommands(helpText);
|
||||
}
|
||||
|
||||
// Add dynamic completion info based on command
|
||||
command.dynamicCompletions = {};
|
||||
if (commandName === "run") {
|
||||
command.dynamicCompletions.scripts = true;
|
||||
command.dynamicCompletions.files = true;
|
||||
command.dynamicCompletions.binaries = true;
|
||||
// Also add file type info for positional args
|
||||
for (const arg of command.positionalArgs) {
|
||||
if (arg.name.includes("file") || arg.name.includes("script")) {
|
||||
arg.completionType = "javascript_files";
|
||||
}
|
||||
}
|
||||
} else if (commandName === "add") {
|
||||
command.dynamicCompletions.packages = true;
|
||||
// Mark package args
|
||||
for (const arg of command.positionalArgs) {
|
||||
if (arg.name.includes("package") || arg.name === "name") {
|
||||
arg.completionType = "package";
|
||||
}
|
||||
}
|
||||
} else if (commandName === "remove") {
|
||||
command.dynamicCompletions.packages = true; // installed packages
|
||||
for (const arg of command.positionalArgs) {
|
||||
if (arg.name.includes("package") || arg.name === "name") {
|
||||
arg.completionType = "installed_package";
|
||||
}
|
||||
}
|
||||
} else if (["test"].includes(commandName)) {
|
||||
command.dynamicCompletions.files = true;
|
||||
for (const arg of command.positionalArgs) {
|
||||
if (arg.name.includes("pattern") || arg.name.includes("file")) {
|
||||
arg.completionType = "test_files";
|
||||
}
|
||||
}
|
||||
} else if (["build"].includes(commandName)) {
|
||||
command.dynamicCompletions.files = true;
|
||||
for (const arg of command.positionalArgs) {
|
||||
if (arg.name === "entrypoint" || arg.name.includes("file")) {
|
||||
arg.completionType = "javascript_files";
|
||||
}
|
||||
}
|
||||
} else if (commandName === "create") {
|
||||
// Create has special template completions
|
||||
for (const arg of command.positionalArgs) {
|
||||
if (arg.name.includes("template")) {
|
||||
arg.completionType = "create_template";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return command;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get list of main commands from bun --help
|
||||
*/
|
||||
async function getMainCommands(): Promise<string[]> {
|
||||
const helpText = await getHelpOutput([]);
|
||||
const lines = helpText.split("\n");
|
||||
const commands: string[] = [];
|
||||
|
||||
let inCommands = false;
|
||||
for (const line of lines) {
|
||||
const trimmed = line.trim();
|
||||
|
||||
if (trimmed === "Commands:") {
|
||||
inCommands = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Stop when we hit the "Flags:" section
|
||||
if (inCommands && trimmed === "Flags:") {
|
||||
break;
|
||||
}
|
||||
|
||||
if (inCommands && line.match(/^\s+\w+/)) {
|
||||
// Extract command name (first word after whitespace)
|
||||
const match = line.match(/^\s+(\w+)/);
|
||||
if (match) {
|
||||
commands.push(match[1]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const commandsToRemove = ["lint"];
|
||||
|
||||
return commands.filter(a => {
|
||||
if (commandsToRemove.includes(a)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract global flags from main help
|
||||
*/
|
||||
function parseGlobalFlags(helpText: string): FlagInfo[] {
|
||||
const lines = helpText.split("\n");
|
||||
const flags: FlagInfo[] = [];
|
||||
|
||||
let inFlags = false;
|
||||
for (const line of lines) {
|
||||
const trimmed = line.trim();
|
||||
|
||||
if (trimmed === "Flags:") {
|
||||
inFlags = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (inFlags && (trimmed === "" || trimmed.startsWith("("))) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (inFlags && line.match(/^\s+(-|\s+--)/)) {
|
||||
const flag = parseFlag(line);
|
||||
if (flag) {
|
||||
flags.push(flag);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add command aliases based on common patterns
|
||||
*/
|
||||
function addCommandAliases(commands: Record<string, CommandInfo>): void {
|
||||
const aliasMap: Record<string, string[]> = {
|
||||
"install": ["i"],
|
||||
"add": ["a"],
|
||||
"remove": ["rm"],
|
||||
"create": ["c"],
|
||||
"x": ["bunx"], // bunx is an alias for bun x
|
||||
};
|
||||
|
||||
for (const [command, aliases] of Object.entries(aliasMap)) {
|
||||
if (commands[command]) {
|
||||
commands[command].aliases = aliases;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Main function to generate completion data
|
||||
*/
|
||||
async function generateCompletions(): Promise<void> {
|
||||
console.log("🔍 Discovering Bun commands...");
|
||||
|
||||
// Get main help and extract commands
|
||||
const mainHelpText = await getHelpOutput([]);
|
||||
const mainCommands = await getMainCommands();
|
||||
const globalFlags = parseGlobalFlags(mainHelpText);
|
||||
|
||||
console.log(`📋 Found ${mainCommands.length} main commands: ${mainCommands.join(", ")}`);
|
||||
|
||||
const completionData: CompletionData = {
|
||||
version: "1.1.0",
|
||||
commands: {},
|
||||
globalFlags,
|
||||
specialHandling: {
|
||||
bareCommand: {
|
||||
description: "Run JavaScript/TypeScript files directly or access package scripts and binaries",
|
||||
canRunFiles: true,
|
||||
dynamicCompletions: {
|
||||
scripts: true,
|
||||
files: true,
|
||||
binaries: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
bunGetCompletes: {
|
||||
available: true,
|
||||
commands: {
|
||||
scripts: "bun getcompletes s", // or "bun getcompletes z" for scripts with descriptions
|
||||
binaries: "bun getcompletes b",
|
||||
packages: "bun getcompletes a", // takes prefix as argument
|
||||
files: "bun getcompletes j", // JavaScript/TypeScript files
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
// Parse each command
|
||||
for (const commandName of mainCommands) {
|
||||
console.log(`📖 Parsing help for: ${commandName}`);
|
||||
|
||||
try {
|
||||
const helpText = await getHelpOutput([commandName]);
|
||||
if (helpText.trim()) {
|
||||
const commandInfo = parseHelpOutput(helpText, commandName);
|
||||
completionData.commands[commandName] = commandInfo;
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`❌ Failed to parse ${commandName}:`, error);
|
||||
}
|
||||
}
|
||||
|
||||
// Add common aliases
|
||||
addCommandAliases(completionData.commands);
|
||||
|
||||
// Also check some common subcommands that might have their own help
|
||||
const additionalCommands = ["pm"];
|
||||
for (const commandName of additionalCommands) {
|
||||
if (!completionData.commands[commandName]) {
|
||||
console.log(`📖 Parsing help for additional command: ${commandName}`);
|
||||
|
||||
try {
|
||||
const helpText = await getHelpOutput([commandName]);
|
||||
if (helpText.trim() && !helpText.includes("error:") && !helpText.includes("Error:")) {
|
||||
const commandInfo = parseHelpOutput(helpText, commandName);
|
||||
completionData.commands[commandName] = commandInfo;
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`❌ Failed to parse ${commandName}:`, error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure completions directory exists
|
||||
const completionsDir = join(process.cwd(), "completions");
|
||||
try {
|
||||
mkdirSync(completionsDir, { recursive: true });
|
||||
} catch (error) {
|
||||
// Directory might already exist
|
||||
}
|
||||
|
||||
// Write the JSON file
|
||||
const outputPath = join(completionsDir, "bun-cli.json");
|
||||
const jsonData = JSON.stringify(completionData, null, 2);
|
||||
|
||||
writeFileSync(outputPath, jsonData, "utf8");
|
||||
|
||||
console.log(`✅ Generated CLI completion data at: ${outputPath}`);
|
||||
console.log(`📊 Statistics:`);
|
||||
console.log(` - Commands: ${Object.keys(completionData.commands).length}`);
|
||||
console.log(` - Global flags: ${completionData.globalFlags.length}`);
|
||||
|
||||
let totalFlags = 0;
|
||||
let totalExamples = 0;
|
||||
let totalSubcommands = 0;
|
||||
for (const [name, cmd] of Object.entries(completionData.commands)) {
|
||||
totalFlags += cmd.flags.length;
|
||||
totalExamples += cmd.examples.length;
|
||||
const subcommandCount = cmd.subcommands ? Object.keys(cmd.subcommands).length : 0;
|
||||
totalSubcommands += subcommandCount;
|
||||
|
||||
const aliasInfo = cmd.aliases ? ` (aliases: ${cmd.aliases.join(", ")})` : "";
|
||||
const subcommandInfo = subcommandCount > 0 ? `, ${subcommandCount} subcommands` : "";
|
||||
const dynamicInfo = cmd.dynamicCompletions ? ` [dynamic: ${Object.keys(cmd.dynamicCompletions).join(", ")}]` : "";
|
||||
|
||||
console.log(
|
||||
` - ${name}${aliasInfo}: ${cmd.flags.length} flags, ${cmd.positionalArgs.length} positional args, ${cmd.examples.length} examples${subcommandInfo}${dynamicInfo}`,
|
||||
);
|
||||
}
|
||||
|
||||
console.log(` - Total command flags: ${totalFlags}`);
|
||||
console.log(` - Total examples: ${totalExamples}`);
|
||||
console.log(` - Total subcommands: ${totalSubcommands}`);
|
||||
}
|
||||
|
||||
// Run the script
|
||||
if (import.meta.main) {
|
||||
generateCompletions().catch(console.error);
|
||||
}
|
||||
@@ -1,121 +0,0 @@
|
||||
# LLDB Pretty Printers for Bun
|
||||
|
||||
This directory contains LLDB pretty printers for various Bun data structures to improve the debugging experience.
|
||||
|
||||
## Files
|
||||
|
||||
- `bun_pretty_printer.py` - Pretty printers for Bun-specific types (bun.String, WTFStringImpl, ZigString, BabyList, etc.)
|
||||
- `lldb_pretty_printers.py` - Pretty printers for Zig language types from the Zig project
|
||||
- `lldb_webkit.py` - Pretty printers for WebKit/JavaScriptCore types
|
||||
- `init.lldb` - LLDB initialization commands
|
||||
|
||||
## Supported Types
|
||||
|
||||
### bun.String Types
|
||||
- `bun.String` (or just `String`) - The main Bun string type
|
||||
- `WTFStringImpl` - WebKit string implementation (Latin1/UTF16)
|
||||
- `ZigString` - Zig string type (UTF8/Latin1/UTF16 with pointer tagging)
|
||||
|
||||
### Display Format
|
||||
|
||||
The pretty printers show string content directly, with additional metadata:
|
||||
|
||||
```
|
||||
# bun.String examples:
|
||||
"Hello, World!" [latin1] # Regular ZigString
|
||||
"UTF-8 String 🎉" [utf8] # UTF-8 encoded
|
||||
"Static content" [latin1 static] # Static string
|
||||
"" # Empty string
|
||||
<dead> # Dead/invalid string
|
||||
|
||||
# WTFStringImpl examples:
|
||||
"WebKit String" # Shows the actual string content
|
||||
|
||||
# ZigString examples:
|
||||
"Some text" [utf16 global] # UTF16 globally allocated
|
||||
"ASCII text" [latin1] # Latin1 encoded
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
### Option 1: Manual Loading
|
||||
In your LLDB session:
|
||||
```lldb
|
||||
command script import /path/to/bun/misctools/lldb/bun_pretty_printer.py
|
||||
```
|
||||
|
||||
### Option 2: Add to ~/.lldbinit
|
||||
Add the following line to your `~/.lldbinit` file to load automatically:
|
||||
```lldb
|
||||
command script import /path/to/bun/misctools/lldb/bun_pretty_printer.py
|
||||
```
|
||||
|
||||
### Option 3: Use init.lldb
|
||||
```lldb
|
||||
command source /path/to/bun/misctools/lldb/init.lldb
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
To test the pretty printers:
|
||||
|
||||
1. Build a debug version of Bun:
|
||||
```bash
|
||||
bun bd
|
||||
```
|
||||
|
||||
2. Create a test file that uses bun.String types
|
||||
|
||||
3. Debug with LLDB:
|
||||
```bash
|
||||
lldb ./build/debug/bun-debug
|
||||
(lldb) command script import misctools/lldb/bun_pretty_printer.py
|
||||
(lldb) breakpoint set --file your_test.zig --line <line_number>
|
||||
(lldb) run your_test.zig
|
||||
(lldb) frame variable
|
||||
```
|
||||
|
||||
## Implementation Details
|
||||
|
||||
### ZigString Pointer Tagging
|
||||
ZigString uses pointer tagging in the upper bits:
|
||||
- Bit 63: 1 = UTF16, 0 = UTF8/Latin1
|
||||
- Bit 62: 1 = Globally allocated (mimalloc)
|
||||
- Bit 61: 1 = UTF8 encoding
|
||||
|
||||
The pretty printer automatically detects and handles these tags.
|
||||
|
||||
### WTFStringImpl Encoding
|
||||
WTFStringImpl uses flags in `m_hashAndFlags`:
|
||||
- Bit 2 (s_hashFlag8BitBuffer): 1 = Latin1, 0 = UTF16
|
||||
|
||||
### bun.String Tag Union
|
||||
bun.String is a tagged union with these variants:
|
||||
- Dead (0): Invalid/freed string
|
||||
- WTFStringImpl (1): WebKit string
|
||||
- ZigString (2): Regular Zig string
|
||||
- StaticZigString (3): Static/immortal string
|
||||
- Empty (4): Empty string ""
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If the pretty printers don't work:
|
||||
|
||||
1. Verify the Python script loaded:
|
||||
```lldb
|
||||
(lldb) script print("Python works")
|
||||
```
|
||||
|
||||
2. Check if the category is enabled:
|
||||
```lldb
|
||||
(lldb) type category list
|
||||
```
|
||||
|
||||
3. Enable the Bun category manually:
|
||||
```lldb
|
||||
(lldb) type category enable bun
|
||||
```
|
||||
|
||||
4. For debugging the pretty printer itself, check for exceptions:
|
||||
- The pretty printers catch all exceptions and return `<error>`
|
||||
- Modify the code to print exceptions for debugging
|
||||
@@ -10,8 +10,8 @@ class bun_BabyList_SynthProvider:
|
||||
|
||||
try:
|
||||
self.ptr = self.value.GetChildMemberWithName('ptr')
|
||||
self.len = self.value.GetChildMemberWithName('len').GetValueAsUnsigned()
|
||||
self.cap = self.value.GetChildMemberWithName('cap').GetValueAsUnsigned()
|
||||
self.len = self.value.GetChildMemberWithName('len').unsigned
|
||||
self.cap = self.value.GetChildMemberWithName('cap').unsigned
|
||||
self.elem_type = self.ptr.type.GetPointeeType()
|
||||
self.elem_size = self.elem_type.size
|
||||
except:
|
||||
@@ -46,7 +46,7 @@ def bun_BabyList_SummaryProvider(value, _=None):
|
||||
value = value.GetNonSyntheticValue()
|
||||
len_val = value.GetChildMemberWithName('len')
|
||||
cap_val = value.GetChildMemberWithName('cap')
|
||||
return 'len=%d cap=%d' % (len_val.GetValueAsUnsigned(), cap_val.GetValueAsUnsigned())
|
||||
return 'len=%d cap=%d' % (len_val.unsigned, cap_val.unsigned)
|
||||
except:
|
||||
return 'len=? cap=?'
|
||||
|
||||
@@ -67,241 +67,6 @@ def add(debugger, *, category, regex=False, type, identifier=None, synth=False,
|
||||
type
|
||||
))
|
||||
|
||||
def WTFStringImpl_SummaryProvider(value, _=None):
|
||||
try:
|
||||
# Get the raw pointer (it's already a pointer type)
|
||||
value = value.GetNonSyntheticValue()
|
||||
|
||||
# Check if it's a pointer type and dereference if needed
|
||||
if value.type.IsPointerType():
|
||||
struct = value.deref
|
||||
else:
|
||||
struct = value
|
||||
|
||||
m_length = struct.GetChildMemberWithName('m_length').GetValueAsUnsigned()
|
||||
m_hashAndFlags = struct.GetChildMemberWithName('m_hashAndFlags').GetValueAsUnsigned()
|
||||
m_ptr = struct.GetChildMemberWithName('m_ptr')
|
||||
|
||||
# Check if it's 8-bit (latin1) or 16-bit (utf16) string
|
||||
s_hashFlag8BitBuffer = 1 << 2
|
||||
is_8bit = (m_hashAndFlags & s_hashFlag8BitBuffer) != 0
|
||||
|
||||
if m_length == 0:
|
||||
return '[%s] ""' % ('latin1' if is_8bit else 'utf16')
|
||||
|
||||
# Limit memory reads to 1MB for performance
|
||||
MAX_BYTES = 1024 * 1024 # 1MB
|
||||
MAX_DISPLAY_CHARS = 200 # Maximum characters to display
|
||||
|
||||
# Calculate how much to read
|
||||
bytes_per_char = 1 if is_8bit else 2
|
||||
total_bytes = m_length * bytes_per_char
|
||||
truncated = False
|
||||
|
||||
if total_bytes > MAX_BYTES:
|
||||
# Read only first part of very large strings
|
||||
chars_to_read = MAX_BYTES // bytes_per_char
|
||||
bytes_to_read = chars_to_read * bytes_per_char
|
||||
truncated = True
|
||||
else:
|
||||
chars_to_read = m_length
|
||||
bytes_to_read = total_bytes
|
||||
|
||||
if is_8bit:
|
||||
# Latin1 string
|
||||
latin1_ptr = m_ptr.GetChildMemberWithName('latin1')
|
||||
process = value.process
|
||||
error = lldb.SBError()
|
||||
ptr_addr = latin1_ptr.GetValueAsUnsigned()
|
||||
if ptr_addr:
|
||||
byte_data = process.ReadMemory(ptr_addr, min(chars_to_read, m_length), error)
|
||||
if error.Success():
|
||||
string_val = byte_data.decode('latin1', errors='replace')
|
||||
else:
|
||||
return '[latin1] <read error: %s>' % error
|
||||
else:
|
||||
return '[latin1] <null ptr>'
|
||||
else:
|
||||
# UTF16 string
|
||||
utf16_ptr = m_ptr.GetChildMemberWithName('utf16')
|
||||
process = value.process
|
||||
error = lldb.SBError()
|
||||
ptr_addr = utf16_ptr.GetValueAsUnsigned()
|
||||
if ptr_addr:
|
||||
byte_data = process.ReadMemory(ptr_addr, bytes_to_read, error)
|
||||
if error.Success():
|
||||
# Properly decode UTF16LE to string
|
||||
string_val = byte_data.decode('utf-16le', errors='replace')
|
||||
else:
|
||||
return '[utf16] <read error: %s>' % error
|
||||
else:
|
||||
return '[utf16] <null ptr>'
|
||||
|
||||
# Escape special characters
|
||||
string_val = string_val.replace('\\', '\\\\')
|
||||
string_val = string_val.replace('"', '\\"')
|
||||
string_val = string_val.replace('\n', '\\n')
|
||||
string_val = string_val.replace('\r', '\\r')
|
||||
string_val = string_val.replace('\t', '\\t')
|
||||
|
||||
# Truncate display if too long
|
||||
display_truncated = truncated or len(string_val) > MAX_DISPLAY_CHARS
|
||||
if len(string_val) > MAX_DISPLAY_CHARS:
|
||||
string_val = string_val[:MAX_DISPLAY_CHARS]
|
||||
|
||||
# Add encoding and size info at the beginning
|
||||
encoding = 'latin1' if is_8bit else 'utf16'
|
||||
|
||||
if display_truncated:
|
||||
size_info = ' %d chars' % m_length
|
||||
if total_bytes >= 1024 * 1024:
|
||||
size_info += ' (%.1fMB)' % (total_bytes / (1024.0 * 1024.0))
|
||||
elif total_bytes >= 1024:
|
||||
size_info += ' (%.1fKB)' % (total_bytes / 1024.0)
|
||||
return '[%s%s] "%s..." <truncated>' % (encoding, size_info, string_val)
|
||||
else:
|
||||
return '[%s] "%s"' % (encoding, string_val)
|
||||
except:
|
||||
return '<error>'
|
||||
|
||||
def ZigString_SummaryProvider(value, _=None):
|
||||
try:
|
||||
value = value.GetNonSyntheticValue()
|
||||
|
||||
ptr = value.GetChildMemberWithName('_unsafe_ptr_do_not_use').GetValueAsUnsigned()
|
||||
length = value.GetChildMemberWithName('len').GetValueAsUnsigned()
|
||||
|
||||
# Check encoding flags
|
||||
is_16bit = (ptr & (1 << 63)) != 0
|
||||
is_utf8 = (ptr & (1 << 61)) != 0
|
||||
is_global = (ptr & (1 << 62)) != 0
|
||||
|
||||
# Determine encoding
|
||||
encoding = 'utf16' if is_16bit else ('utf8' if is_utf8 else 'latin1')
|
||||
flags = ' global' if is_global else ''
|
||||
|
||||
if length == 0:
|
||||
return '[%s%s] ""' % (encoding, flags)
|
||||
|
||||
# Untag the pointer (keep only the lower 53 bits)
|
||||
untagged_ptr = ptr & ((1 << 53) - 1)
|
||||
|
||||
# Limit memory reads to 1MB for performance
|
||||
MAX_BYTES = 1024 * 1024 # 1MB
|
||||
MAX_DISPLAY_CHARS = 200 # Maximum characters to display
|
||||
|
||||
# Calculate how much to read
|
||||
bytes_per_char = 2 if is_16bit else 1
|
||||
total_bytes = length * bytes_per_char
|
||||
truncated = False
|
||||
|
||||
if total_bytes > MAX_BYTES:
|
||||
# Read only first part of very large strings
|
||||
chars_to_read = MAX_BYTES // bytes_per_char
|
||||
bytes_to_read = chars_to_read * bytes_per_char
|
||||
truncated = True
|
||||
else:
|
||||
bytes_to_read = total_bytes
|
||||
|
||||
# Read the string data
|
||||
process = value.process
|
||||
error = lldb.SBError()
|
||||
|
||||
byte_data = process.ReadMemory(untagged_ptr, bytes_to_read, error)
|
||||
if not error.Success():
|
||||
return '[%s%s] <read error>' % (encoding, flags)
|
||||
|
||||
# Decode based on encoding
|
||||
if is_16bit:
|
||||
string_val = byte_data.decode('utf-16le', errors='replace')
|
||||
elif is_utf8:
|
||||
string_val = byte_data.decode('utf-8', errors='replace')
|
||||
else:
|
||||
string_val = byte_data.decode('latin1', errors='replace')
|
||||
|
||||
# Escape special characters
|
||||
string_val = string_val.replace('\\', '\\\\')
|
||||
string_val = string_val.replace('"', '\\"')
|
||||
string_val = string_val.replace('\n', '\\n')
|
||||
string_val = string_val.replace('\r', '\\r')
|
||||
string_val = string_val.replace('\t', '\\t')
|
||||
|
||||
# Truncate display if too long
|
||||
display_truncated = truncated or len(string_val) > MAX_DISPLAY_CHARS
|
||||
if len(string_val) > MAX_DISPLAY_CHARS:
|
||||
string_val = string_val[:MAX_DISPLAY_CHARS]
|
||||
|
||||
# Build the output
|
||||
if display_truncated:
|
||||
size_info = ' %d chars' % length
|
||||
if total_bytes >= 1024 * 1024:
|
||||
size_info += ' (%.1fMB)' % (total_bytes / (1024.0 * 1024.0))
|
||||
elif total_bytes >= 1024:
|
||||
size_info += ' (%.1fKB)' % (total_bytes / 1024.0)
|
||||
return '[%s%s%s] "%s..." <truncated>' % (encoding, flags, size_info, string_val)
|
||||
else:
|
||||
return '[%s%s] "%s"' % (encoding, flags, string_val)
|
||||
except:
|
||||
return '<error>'
|
||||
|
||||
def bun_String_SummaryProvider(value, _=None):
|
||||
try:
|
||||
value = value.GetNonSyntheticValue()
|
||||
|
||||
# Debug: Show the actual type name LLDB sees
|
||||
type_name = value.GetTypeName()
|
||||
|
||||
tag = value.GetChildMemberWithName('tag')
|
||||
if not tag or not tag.IsValid():
|
||||
# Try alternate field names
|
||||
tag = value.GetChildMemberWithName('Tag')
|
||||
if not tag or not tag.IsValid():
|
||||
# Show type name to help debug
|
||||
return '<no tag field in type: %s>' % type_name
|
||||
|
||||
tag_value = tag.GetValueAsUnsigned()
|
||||
|
||||
# Map tag values to names
|
||||
tag_names = {
|
||||
0: 'Dead',
|
||||
1: 'WTFStringImpl',
|
||||
2: 'ZigString',
|
||||
3: 'StaticZigString',
|
||||
4: 'Empty'
|
||||
}
|
||||
|
||||
tag_name = tag_names.get(tag_value, 'Unknown')
|
||||
|
||||
if tag_name == 'Empty':
|
||||
return '""'
|
||||
elif tag_name == 'Dead':
|
||||
return '<dead>'
|
||||
elif tag_name == 'WTFStringImpl':
|
||||
value_union = value.GetChildMemberWithName('value')
|
||||
if not value_union or not value_union.IsValid():
|
||||
return '<no value field>'
|
||||
impl_value = value_union.GetChildMemberWithName('WTFStringImpl')
|
||||
if not impl_value or not impl_value.IsValid():
|
||||
return '<no WTFStringImpl field>'
|
||||
return WTFStringImpl_SummaryProvider(impl_value, _)
|
||||
elif tag_name == 'ZigString' or tag_name == 'StaticZigString':
|
||||
value_union = value.GetChildMemberWithName('value')
|
||||
if not value_union or not value_union.IsValid():
|
||||
return '<no value field>'
|
||||
field_name = 'ZigString' if tag_name == 'ZigString' else 'StaticZigString'
|
||||
zig_string_value = value_union.GetChildMemberWithName(field_name)
|
||||
if not zig_string_value or not zig_string_value.IsValid():
|
||||
return '<no %s field>' % field_name
|
||||
result = ZigString_SummaryProvider(zig_string_value, _)
|
||||
# Add static marker if needed
|
||||
if tag_name == 'StaticZigString':
|
||||
result = result.replace(']', ' static]')
|
||||
return result
|
||||
else:
|
||||
return '<unknown tag %d>' % tag_value
|
||||
except Exception as e:
|
||||
return '<error: %s>' % str(e)
|
||||
|
||||
def __lldb_init_module(debugger, _=None):
|
||||
# Initialize Bun Category
|
||||
debugger.HandleCommand('type category define --language c99 bun')
|
||||
@@ -309,30 +74,5 @@ def __lldb_init_module(debugger, _=None):
|
||||
# Initialize Bun Data Structures
|
||||
add(debugger, category='bun', regex=True, type='^baby_list\\.BabyList\\(.*\\)$', identifier='bun_BabyList', synth=True, expand=True, summary=True)
|
||||
|
||||
# Add WTFStringImpl pretty printer - try multiple possible type names
|
||||
add(debugger, category='bun', type='WTFStringImpl', identifier='WTFStringImpl', summary=True)
|
||||
add(debugger, category='bun', type='*WTFStringImplStruct', identifier='WTFStringImpl', summary=True)
|
||||
add(debugger, category='bun', type='string.WTFStringImpl', identifier='WTFStringImpl', summary=True)
|
||||
add(debugger, category='bun', type='string.WTFStringImplStruct', identifier='WTFStringImpl', summary=True)
|
||||
add(debugger, category='bun', type='*string.WTFStringImplStruct', identifier='WTFStringImpl', summary=True)
|
||||
|
||||
# Add ZigString pretty printer - try multiple possible type names
|
||||
add(debugger, category='bun', type='ZigString', identifier='ZigString', summary=True)
|
||||
add(debugger, category='bun', type='bun.js.bindings.ZigString', identifier='ZigString', summary=True)
|
||||
add(debugger, category='bun', type='bindings.ZigString', identifier='ZigString', summary=True)
|
||||
|
||||
# Add bun.String pretty printer - try multiple possible type names
|
||||
add(debugger, category='bun', type='String', identifier='bun_String', summary=True)
|
||||
add(debugger, category='bun', type='bun.String', identifier='bun_String', summary=True)
|
||||
add(debugger, category='bun', type='string.String', identifier='bun_String', summary=True)
|
||||
add(debugger, category='bun', type='BunString', identifier='bun_String', summary=True)
|
||||
add(debugger, category='bun', type='bun::String', identifier='bun_String', summary=True)
|
||||
add(debugger, category='bun', type='bun::string::String', identifier='bun_String', summary=True)
|
||||
|
||||
# Try regex patterns for more flexible matching
|
||||
add(debugger, category='bun', regex=True, type='.*String$', identifier='bun_String', summary=True)
|
||||
add(debugger, category='bun', regex=True, type='.*WTFStringImpl.*', identifier='WTFStringImpl', summary=True)
|
||||
add(debugger, category='bun', regex=True, type='.*ZigString.*', identifier='ZigString', summary=True)
|
||||
|
||||
# Enable the category
|
||||
debugger.HandleCommand('type category enable bun')
|
||||
14
package.json
14
package.json
@@ -1,16 +1,15 @@
|
||||
{
|
||||
"private": true,
|
||||
"name": "bun",
|
||||
"version": "1.2.21",
|
||||
"version": "1.2.20",
|
||||
"workspaces": [
|
||||
"./packages/bun-types",
|
||||
"./packages/@types/bun"
|
||||
],
|
||||
"devDependencies": {
|
||||
"bun-tracestrings": "github:oven-sh/bun.report#912ca63e26c51429d3e6799aa2a6ab079b188fd8",
|
||||
"@lezer/common": "^1.2.3",
|
||||
"@lezer/cpp": "^1.1.3",
|
||||
"@types/bun": "workspace:*",
|
||||
"bun-tracestrings": "github:oven-sh/bun.report#912ca63e26c51429d3e6799aa2a6ab079b188fd8",
|
||||
"esbuild": "^0.21.4",
|
||||
"mitata": "^0.1.11",
|
||||
"peechy": "0.4.34",
|
||||
@@ -49,9 +48,6 @@
|
||||
"css-properties": "bun run src/css/properties/generate_properties.ts",
|
||||
"uv-posix-stubs": "bun run src/bun.js/bindings/libuv/generate_uv_posix_stubs.ts",
|
||||
"bump": "bun ./scripts/bump.ts",
|
||||
"jsc:build": "bun ./scripts/build-jsc.ts release",
|
||||
"jsc:build:debug": "bun ./scripts/build-jsc.ts debug",
|
||||
"jsc:build:lto": "bun ./scripts/build-jsc.ts lto",
|
||||
"typecheck": "tsc --noEmit && cd test && bun run typecheck",
|
||||
"fmt": "bun run prettier",
|
||||
"fmt:cpp": "bun run clang-format",
|
||||
@@ -72,9 +68,9 @@
|
||||
"zig:check-windows": "bun run zig build check-windows --summary new",
|
||||
"analysis": "bun ./scripts/build.mjs -DCMAKE_BUILD_TYPE=Debug -DENABLE_ANALYSIS=ON -DENABLE_CCACHE=OFF -B build/analysis",
|
||||
"analysis:no-llvm": "bun run analysis -DENABLE_LLVM=OFF",
|
||||
"clang-format": "./scripts/run-clang-format.sh format",
|
||||
"clang-format:check": "./scripts/run-clang-format.sh check",
|
||||
"clang-format:diff": "./scripts/run-clang-format.sh diff",
|
||||
"clang-format": "bun run analysis --target clang-format",
|
||||
"clang-format:check": "bun run analysis --target clang-format-check",
|
||||
"clang-format:diff": "bun run analysis --target clang-format-diff",
|
||||
"clang-tidy": "bun run analysis --target clang-tidy",
|
||||
"clang-tidy:check": "bun run analysis --target clang-tidy-check",
|
||||
"clang-tidy:diff": "bun run analysis --target clang-tidy-diff",
|
||||
|
||||
834
packages/bun-types/bun.d.ts
vendored
834
packages/bun-types/bun.d.ts
vendored
@@ -14,6 +14,7 @@
|
||||
* This module aliases `globalThis.Bun`.
|
||||
*/
|
||||
declare module "bun" {
|
||||
type DistributedOmit<T, K extends PropertyKey> = T extends T ? Omit<T, K> : never;
|
||||
type PathLike = string | NodeJS.TypedArray | ArrayBufferLike | URL;
|
||||
type ArrayBufferView<TArrayBuffer extends ArrayBufferLike = ArrayBufferLike> =
|
||||
| NodeJS.TypedArray<TArrayBuffer>
|
||||
@@ -67,31 +68,39 @@ declare module "bun" {
|
||||
? T
|
||||
: Otherwise // Not defined in lib dom (or anywhere else), so no conflict. We can safely use our own definition
|
||||
: Otherwise; // Lib dom not loaded anyway, so no conflict. We can safely use our own definition
|
||||
|
||||
/**
|
||||
* Like Omit, but correctly distributes over unions. Most useful for removing
|
||||
* properties from union options objects, like {@link Bun.SQL.Options}
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* type X = Bun.DistributedOmit<{type?: 'a', url?: string} | {type?: 'b', flag?: boolean}, "url">
|
||||
* // `{type?: 'a'} | {type?: 'b', flag?: boolean}` (Omit applied to each union item instead of entire type)
|
||||
*
|
||||
* type X = Omit<{type?: 'a', url?: string} | {type?: 'b', flag?: boolean}, "url">;
|
||||
* // `{type?: "a" | "b" | undefined}` (Missing `flag` property and no longer a union)
|
||||
* ```
|
||||
*/
|
||||
type DistributedOmit<T, K extends PropertyKey> = T extends T ? Omit<T, K> : never;
|
||||
|
||||
type KeysInBoth<A, B> = Extract<keyof A, keyof B>;
|
||||
type MergeInner<A, B> = Omit<A, KeysInBoth<A, B>> &
|
||||
Omit<B, KeysInBoth<A, B>> & {
|
||||
[Key in KeysInBoth<A, B>]: A[Key] | B[Key];
|
||||
};
|
||||
type Merge<A, B> = MergeInner<A, B> & MergeInner<B, A>;
|
||||
type DistributedMerge<T, Else = T> = T extends T ? Merge<T, Exclude<Else, T>> : never;
|
||||
}
|
||||
|
||||
/** @deprecated This type is unused in Bun's types and might be removed in the near future */
|
||||
type Platform =
|
||||
| "aix"
|
||||
| "android"
|
||||
| "darwin"
|
||||
| "freebsd"
|
||||
| "haiku"
|
||||
| "linux"
|
||||
| "openbsd"
|
||||
| "sunos"
|
||||
| "win32"
|
||||
| "cygwin"
|
||||
| "netbsd";
|
||||
|
||||
/** @deprecated This type is unused in Bun's types and might be removed in the near future */
|
||||
type Architecture = "arm" | "arm64" | "ia32" | "mips" | "mipsel" | "ppc" | "ppc64" | "s390" | "s390x" | "x64";
|
||||
|
||||
/** @deprecated This type is unused in Bun's types and might be removed in the near future */
|
||||
type UncaughtExceptionListener = (error: Error, origin: UncaughtExceptionOrigin) => void;
|
||||
|
||||
/**
|
||||
* Most of the time the unhandledRejection will be an Error, but this should not be relied upon
|
||||
* as *anything* can be thrown/rejected, it is therefore unsafe to assume that the value is an Error.
|
||||
*
|
||||
* @deprecated This type is unused in Bun's types and might be removed in the near future
|
||||
*/
|
||||
type UnhandledRejectionListener = (reason: unknown, promise: Promise<unknown>) => void;
|
||||
|
||||
/** @deprecated This type is unused in Bun's types and might be removed in the near future */
|
||||
type MultipleResolveListener = (type: MultipleResolveType, promise: Promise<unknown>, value: unknown) => void;
|
||||
|
||||
interface ErrorEventInit extends EventInit {
|
||||
colno?: number;
|
||||
error?: any;
|
||||
@@ -587,23 +596,6 @@ declare module "bun" {
|
||||
options?: StringWidthOptions,
|
||||
): number;
|
||||
|
||||
/**
|
||||
* Remove ANSI escape codes from a string.
|
||||
*
|
||||
* @category Utilities
|
||||
*
|
||||
* @param input The string to remove ANSI escape codes from.
|
||||
* @returns The string with ANSI escape codes removed.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* import { stripANSI } from "bun";
|
||||
*
|
||||
* console.log(stripANSI("\u001b[31mhello\u001b[39m")); // "hello"
|
||||
* ```
|
||||
*/
|
||||
function stripANSI(input: string): string;
|
||||
|
||||
/**
|
||||
* TOML related APIs
|
||||
*/
|
||||
@@ -1267,6 +1259,678 @@ declare module "bun" {
|
||||
stat(): Promise<import("node:fs").Stats>;
|
||||
}
|
||||
|
||||
namespace SQL {
|
||||
type AwaitPromisesArray<T extends Array<PromiseLike<any>>> = {
|
||||
[K in keyof T]: Awaited<T[K]>;
|
||||
};
|
||||
|
||||
type ContextCallbackResult<T> = T extends Array<PromiseLike<any>> ? AwaitPromisesArray<T> : Awaited<T>;
|
||||
type ContextCallback<T, SQL> = (sql: SQL) => Promise<T>;
|
||||
|
||||
/**
|
||||
* Configuration options for SQL client connection and behavior
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const config: Bun.SQL.Options = {
|
||||
* host: 'localhost',
|
||||
* port: 5432,
|
||||
* user: 'dbuser',
|
||||
* password: 'secretpass',
|
||||
* database: 'myapp',
|
||||
* idleTimeout: 30,
|
||||
* max: 20,
|
||||
* onconnect: (client) => {
|
||||
* console.log('Connected to database');
|
||||
* }
|
||||
* };
|
||||
* ```
|
||||
*/
|
||||
interface Options {
|
||||
/**
|
||||
* Connection URL (can be string or URL object)
|
||||
*/
|
||||
url?: URL | string | undefined;
|
||||
|
||||
/**
|
||||
* Database server hostname
|
||||
* @default "localhost"
|
||||
*/
|
||||
host?: string | undefined;
|
||||
|
||||
/**
|
||||
* Database server hostname (alias for host)
|
||||
* @deprecated Prefer {@link host}
|
||||
* @default "localhost"
|
||||
*/
|
||||
hostname?: string | undefined;
|
||||
|
||||
/**
|
||||
* Database server port number
|
||||
* @default 5432
|
||||
*/
|
||||
port?: number | string | undefined;
|
||||
|
||||
/**
|
||||
* Database user for authentication
|
||||
* @default "postgres"
|
||||
*/
|
||||
username?: string | undefined;
|
||||
|
||||
/**
|
||||
* Database user for authentication (alias for username)
|
||||
* @deprecated Prefer {@link username}
|
||||
* @default "postgres"
|
||||
*/
|
||||
user?: string | undefined;
|
||||
|
||||
/**
|
||||
* Database password for authentication
|
||||
* @default ""
|
||||
*/
|
||||
password?: string | (() => MaybePromise<string>) | undefined;
|
||||
|
||||
/**
|
||||
* Database password for authentication (alias for password)
|
||||
* @deprecated Prefer {@link password}
|
||||
* @default ""
|
||||
*/
|
||||
pass?: string | (() => MaybePromise<string>) | undefined;
|
||||
|
||||
/**
|
||||
* Name of the database to connect to
|
||||
* @default The username value
|
||||
*/
|
||||
database?: string | undefined;
|
||||
|
||||
/**
|
||||
* Name of the database to connect to (alias for database)
|
||||
* @deprecated Prefer {@link database}
|
||||
* @default The username value
|
||||
*/
|
||||
db?: string | undefined;
|
||||
|
||||
/**
|
||||
* Database adapter/driver to use
|
||||
* @default "postgres"
|
||||
*/
|
||||
adapter?: "postgres" /*| "sqlite" | "mysql"*/ | (string & {}) | undefined;
|
||||
|
||||
/**
|
||||
* Maximum time in seconds to wait for connection to become available
|
||||
* @default 0 (no timeout)
|
||||
*/
|
||||
idleTimeout?: number | undefined;
|
||||
|
||||
/**
|
||||
* Maximum time in seconds to wait for connection to become available (alias for idleTimeout)
|
||||
* @deprecated Prefer {@link idleTimeout}
|
||||
* @default 0 (no timeout)
|
||||
*/
|
||||
idle_timeout?: number | undefined;
|
||||
|
||||
/**
|
||||
* Maximum time in seconds to wait when establishing a connection
|
||||
* @default 30
|
||||
*/
|
||||
connectionTimeout?: number | undefined;
|
||||
|
||||
/**
|
||||
* Maximum time in seconds to wait when establishing a connection (alias for connectionTimeout)
|
||||
* @deprecated Prefer {@link connectionTimeout}
|
||||
* @default 30
|
||||
*/
|
||||
connection_timeout?: number | undefined;
|
||||
|
||||
/**
|
||||
* Maximum time in seconds to wait when establishing a connection (alias for connectionTimeout)
|
||||
* @deprecated Prefer {@link connectionTimeout}
|
||||
* @default 30
|
||||
*/
|
||||
connectTimeout?: number | undefined;
|
||||
|
||||
/**
|
||||
* Maximum time in seconds to wait when establishing a connection (alias for connectionTimeout)
|
||||
* @deprecated Prefer {@link connectionTimeout}
|
||||
* @default 30
|
||||
*/
|
||||
connect_timeout?: number | undefined;
|
||||
|
||||
/**
|
||||
* Maximum lifetime in seconds of a connection
|
||||
* @default 0 (no maximum lifetime)
|
||||
*/
|
||||
maxLifetime?: number | undefined;
|
||||
|
||||
/**
|
||||
* Maximum lifetime in seconds of a connection (alias for maxLifetime)
|
||||
* @deprecated Prefer {@link maxLifetime}
|
||||
* @default 0 (no maximum lifetime)
|
||||
*/
|
||||
max_lifetime?: number | undefined;
|
||||
|
||||
/**
|
||||
* Whether to use TLS/SSL for the connection
|
||||
* @default false
|
||||
*/
|
||||
tls?: TLSOptions | boolean | undefined;
|
||||
|
||||
/**
|
||||
* Whether to use TLS/SSL for the connection (alias for tls)
|
||||
* @default false
|
||||
*/
|
||||
ssl?: TLSOptions | boolean | undefined;
|
||||
|
||||
// `.path` is currently unsupported in Bun, the implementation is incomplete.
|
||||
//
|
||||
// /**
|
||||
// * Unix domain socket path for connection
|
||||
// * @default ""
|
||||
// */
|
||||
// path?: string | undefined;
|
||||
|
||||
/**
|
||||
* Callback function executed when a connection is established
|
||||
*/
|
||||
onconnect?: ((client: SQL) => void) | undefined;
|
||||
|
||||
/**
|
||||
* Callback function executed when a connection is closed
|
||||
*/
|
||||
onclose?: ((client: SQL) => void) | undefined;
|
||||
|
||||
/**
|
||||
* Postgres client runtime configuration options
|
||||
*
|
||||
* @see https://www.postgresql.org/docs/current/runtime-config-client.html
|
||||
*/
|
||||
connection?: Record<string, string | boolean | number> | undefined;
|
||||
|
||||
/**
|
||||
* Maximum number of connections in the pool
|
||||
* @default 10
|
||||
*/
|
||||
max?: number | undefined;
|
||||
|
||||
/**
|
||||
* By default values outside i32 range are returned as strings. If this is true, values outside i32 range are returned as BigInts.
|
||||
* @default false
|
||||
*/
|
||||
bigint?: boolean | undefined;
|
||||
|
||||
/**
|
||||
* Automatic creation of prepared statements
|
||||
* @default true
|
||||
*/
|
||||
prepare?: boolean | undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a SQL query that can be executed, with additional control methods
|
||||
* Extends Promise to allow for async/await usage
|
||||
*/
|
||||
interface Query<T> extends Promise<T> {
|
||||
/**
|
||||
* Indicates if the query is currently executing
|
||||
*/
|
||||
active: boolean;
|
||||
|
||||
/**
|
||||
* Indicates if the query has been cancelled
|
||||
*/
|
||||
cancelled: boolean;
|
||||
|
||||
/**
|
||||
* Cancels the executing query
|
||||
*/
|
||||
cancel(): Query<T>;
|
||||
|
||||
/**
|
||||
* Executes the query as a simple query, no parameters are allowed but can execute multiple commands separated by semicolons
|
||||
*/
|
||||
simple(): Query<T>;
|
||||
|
||||
/**
|
||||
* Executes the query
|
||||
*/
|
||||
execute(): Query<T>;
|
||||
|
||||
/**
|
||||
* Returns the raw query result
|
||||
*/
|
||||
raw(): Query<T>;
|
||||
|
||||
/**
|
||||
* Returns only the values from the query result
|
||||
*/
|
||||
values(): Query<T>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Callback function type for transaction contexts
|
||||
* @param sql Function to execute SQL queries within the transaction
|
||||
*/
|
||||
type TransactionContextCallback<T> = ContextCallback<T, TransactionSQL>;
|
||||
|
||||
/**
|
||||
* Callback function type for savepoint contexts
|
||||
* @param sql Function to execute SQL queries within the savepoint
|
||||
*/
|
||||
type SavepointContextCallback<T> = ContextCallback<T, SavepointSQL>;
|
||||
|
||||
/**
|
||||
* SQL.Helper represents a parameter or serializable
|
||||
* value inside of a query.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const helper = sql(users, 'id');
|
||||
* await sql`insert into users ${helper}`;
|
||||
* ```
|
||||
*/
|
||||
interface Helper<T> {
|
||||
readonly value: T[];
|
||||
readonly columns: (keyof T)[];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Main SQL client interface providing connection and transaction management
|
||||
*/
|
||||
interface SQL extends AsyncDisposable {
|
||||
/**
|
||||
* Executes a SQL query using template literals
|
||||
* @example
|
||||
* ```ts
|
||||
* const [user] = await sql<Users[]>`select * from users where id = ${1}`;
|
||||
* ```
|
||||
*/
|
||||
<T = any>(strings: TemplateStringsArray, ...values: unknown[]): SQL.Query<T>;
|
||||
|
||||
/**
|
||||
* Execute a SQL query using a string
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const users = await sql<User[]>`SELECT * FROM users WHERE id = ${1}`;
|
||||
* ```
|
||||
*/
|
||||
<T = any>(string: string): SQL.Query<T>;
|
||||
|
||||
/**
|
||||
* Helper function for inserting an object into a query
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* // Insert an object
|
||||
* const result = await sql`insert into users ${sql(users)} returning *`;
|
||||
*
|
||||
* // Or pick specific columns
|
||||
* const result = await sql`insert into users ${sql(users, "id", "name")} returning *`;
|
||||
*
|
||||
* // Or a single object
|
||||
* const result = await sql`insert into users ${sql(user)} returning *`;
|
||||
* ```
|
||||
*/
|
||||
<T extends { [Key in PropertyKey]: unknown }>(obj: T | T[] | readonly T[]): SQL.Helper<T>;
|
||||
|
||||
/**
|
||||
* Helper function for inserting an object into a query, supporting specific columns
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* // Insert an object
|
||||
* const result = await sql`insert into users ${sql(users)} returning *`;
|
||||
*
|
||||
* // Or pick specific columns
|
||||
* const result = await sql`insert into users ${sql(users, "id", "name")} returning *`;
|
||||
*
|
||||
* // Or a single object
|
||||
* const result = await sql`insert into users ${sql(user)} returning *`;
|
||||
* ```
|
||||
*/
|
||||
<T extends { [Key in PropertyKey]: unknown }, Keys extends keyof T = keyof T>(
|
||||
obj: T | T[] | readonly T[],
|
||||
...columns: readonly Keys[]
|
||||
): SQL.Helper<Pick<T, Keys>>;
|
||||
|
||||
/**
|
||||
* Helper function for inserting any serializable value into a query
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const result = await sql`SELECT * FROM users WHERE id IN ${sql([1, 2, 3])}`;
|
||||
* ```
|
||||
*/
|
||||
<T>(value: T): SQL.Helper<T>;
|
||||
|
||||
/**
|
||||
* Commits a distributed transaction also know as prepared transaction in postgres or XA transaction in MySQL
|
||||
*
|
||||
* @param name - The name of the distributed transaction
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* await sql.commitDistributed("my_distributed_transaction");
|
||||
* ```
|
||||
*/
|
||||
commitDistributed(name: string): Promise<void>;
|
||||
|
||||
/**
|
||||
* Rolls back a distributed transaction also know as prepared transaction in postgres or XA transaction in MySQL
|
||||
*
|
||||
* @param name - The name of the distributed transaction
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* await sql.rollbackDistributed("my_distributed_transaction");
|
||||
* ```
|
||||
*/
|
||||
rollbackDistributed(name: string): Promise<void>;
|
||||
|
||||
/** Waits for the database connection to be established
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* await sql.connect();
|
||||
* ```
|
||||
*/
|
||||
connect(): Promise<SQL>;
|
||||
|
||||
/**
|
||||
* Closes the database connection with optional timeout in seconds. If timeout is 0, it will close immediately, if is not provided it will wait for all queries to finish before closing.
|
||||
*
|
||||
* @param options - The options for the close
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* await sql.close({ timeout: 1 });
|
||||
* ```
|
||||
*/
|
||||
close(options?: { timeout?: number }): Promise<void>;
|
||||
|
||||
/**
|
||||
* Closes the database connection with optional timeout in seconds. If timeout is 0, it will close immediately, if is not provided it will wait for all queries to finish before closing.
|
||||
* This is an alias of {@link SQL.close}
|
||||
*
|
||||
* @param options - The options for the close
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* await sql.end({ timeout: 1 });
|
||||
* ```
|
||||
*/
|
||||
end(options?: { timeout?: number }): Promise<void>;
|
||||
|
||||
/**
|
||||
* Flushes any pending operations
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* sql.flush();
|
||||
* ```
|
||||
*/
|
||||
flush(): void;
|
||||
|
||||
/**
|
||||
* The reserve method pulls out a connection from the pool, and returns a client that wraps the single connection.
|
||||
*
|
||||
* This can be used for running queries on an isolated connection.
|
||||
* Calling reserve in a reserved Sql will return a new reserved connection, not the same connection (behavior matches postgres package).
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const reserved = await sql.reserve();
|
||||
* await reserved`select * from users`;
|
||||
* await reserved.release();
|
||||
* // with in a production scenario would be something more like
|
||||
* const reserved = await sql.reserve();
|
||||
* try {
|
||||
* // ... queries
|
||||
* } finally {
|
||||
* await reserved.release();
|
||||
* }
|
||||
*
|
||||
* // Bun supports Symbol.dispose and Symbol.asyncDispose
|
||||
* {
|
||||
* // always release after context (safer)
|
||||
* using reserved = await sql.reserve()
|
||||
* await reserved`select * from users`
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
reserve(): Promise<ReservedSQL>;
|
||||
|
||||
/**
|
||||
* Begins a new transaction.
|
||||
*
|
||||
* Will reserve a connection for the transaction and supply a scoped sql instance for all transaction uses in the callback function. sql.begin will resolve with the returned value from the callback function.
|
||||
* BEGIN is automatically sent with the optional options, and if anything fails ROLLBACK will be called so the connection can be released and execution can continue.
|
||||
* @example
|
||||
* const [user, account] = await sql.begin(async sql => {
|
||||
* const [user] = await sql`
|
||||
* insert into users (
|
||||
* name
|
||||
* ) values (
|
||||
* 'Murray'
|
||||
* )
|
||||
* returning *
|
||||
* `
|
||||
* const [account] = await sql`
|
||||
* insert into accounts (
|
||||
* user_id
|
||||
* ) values (
|
||||
* ${ user.user_id }
|
||||
* )
|
||||
* returning *
|
||||
* `
|
||||
* return [user, account]
|
||||
* })
|
||||
*/
|
||||
begin<const T>(fn: SQL.TransactionContextCallback<T>): Promise<SQL.ContextCallbackResult<T>>;
|
||||
|
||||
/**
|
||||
* Begins a new transaction with options.
|
||||
*
|
||||
* Will reserve a connection for the transaction and supply a scoped sql instance for all transaction uses in the callback function. sql.begin will resolve with the returned value from the callback function.
|
||||
* BEGIN is automatically sent with the optional options, and if anything fails ROLLBACK will be called so the connection can be released and execution can continue.
|
||||
* @example
|
||||
* const [user, account] = await sql.begin("read write", async sql => {
|
||||
* const [user] = await sql`
|
||||
* insert into users (
|
||||
* name
|
||||
* ) values (
|
||||
* 'Murray'
|
||||
* )
|
||||
* returning *
|
||||
* `
|
||||
* const [account] = await sql`
|
||||
* insert into accounts (
|
||||
* user_id
|
||||
* ) values (
|
||||
* ${ user.user_id }
|
||||
* )
|
||||
* returning *
|
||||
* `
|
||||
* return [user, account]
|
||||
* })
|
||||
*/
|
||||
begin<const T>(options: string, fn: SQL.TransactionContextCallback<T>): Promise<SQL.ContextCallbackResult<T>>;
|
||||
|
||||
/**
|
||||
* Alternative method to begin a transaction.
|
||||
*
|
||||
* Will reserve a connection for the transaction and supply a scoped sql instance for all transaction uses in the callback function. sql.transaction will resolve with the returned value from the callback function.
|
||||
* BEGIN is automatically sent with the optional options, and if anything fails ROLLBACK will be called so the connection can be released and execution can continue.
|
||||
* @alias begin
|
||||
* @example
|
||||
* const [user, account] = await sql.transaction(async sql => {
|
||||
* const [user] = await sql`
|
||||
* insert into users (
|
||||
* name
|
||||
* ) values (
|
||||
* 'Murray'
|
||||
* )
|
||||
* returning *
|
||||
* `
|
||||
* const [account] = await sql`
|
||||
* insert into accounts (
|
||||
* user_id
|
||||
* ) values (
|
||||
* ${ user.user_id }
|
||||
* )
|
||||
* returning *
|
||||
* `
|
||||
* return [user, account]
|
||||
* })
|
||||
*/
|
||||
transaction<const T>(fn: SQL.TransactionContextCallback<T>): Promise<SQL.ContextCallbackResult<T>>;
|
||||
|
||||
/**
|
||||
* Alternative method to begin a transaction with options
|
||||
* Will reserve a connection for the transaction and supply a scoped sql instance for all transaction uses in the callback function. sql.transaction will resolve with the returned value from the callback function.
|
||||
* BEGIN is automatically sent with the optional options, and if anything fails ROLLBACK will be called so the connection can be released and execution can continue.
|
||||
*
|
||||
* @alias {@link begin}
|
||||
*
|
||||
* @example
|
||||
* const [user, account] = await sql.transaction("read write", async sql => {
|
||||
* const [user] = await sql`
|
||||
* insert into users (
|
||||
* name
|
||||
* ) values (
|
||||
* 'Murray'
|
||||
* )
|
||||
* returning *
|
||||
* `
|
||||
* const [account] = await sql`
|
||||
* insert into accounts (
|
||||
* user_id
|
||||
* ) values (
|
||||
* ${ user.user_id }
|
||||
* )
|
||||
* returning *
|
||||
* `
|
||||
* return [user, account]
|
||||
* });
|
||||
*/
|
||||
transaction<const T>(options: string, fn: SQL.TransactionContextCallback<T>): Promise<SQL.ContextCallbackResult<T>>;
|
||||
|
||||
/**
|
||||
* Begins a distributed transaction
|
||||
* Also know as Two-Phase Commit, in a distributed transaction, Phase 1 involves the coordinator preparing nodes by ensuring data is written and ready to commit, while Phase 2 finalizes with nodes committing or rolling back based on the coordinator's decision, ensuring durability and releasing locks.
|
||||
* In PostgreSQL and MySQL distributed transactions persist beyond the original session, allowing privileged users or coordinators to commit/rollback them, ensuring support for distributed transactions, recovery, and administrative tasks.
|
||||
* beginDistributed will automatic rollback if any exception are not caught, and you can commit and rollback later if everything goes well.
|
||||
* PostgreSQL natively supports distributed transactions using PREPARE TRANSACTION, while MySQL uses XA Transactions, and MSSQL also supports distributed/XA transactions. However, in MSSQL, distributed transactions are tied to the original session, the DTC coordinator, and the specific connection.
|
||||
* These transactions are automatically committed or rolled back following the same rules as regular transactions, with no option for manual intervention from other sessions, in MSSQL distributed transactions are used to coordinate transactions using Linked Servers.
|
||||
*
|
||||
* @example
|
||||
* await sql.beginDistributed("numbers", async sql => {
|
||||
* await sql`create table if not exists numbers (a int)`;
|
||||
* await sql`insert into numbers values(1)`;
|
||||
* });
|
||||
* // later you can call
|
||||
* await sql.commitDistributed("numbers");
|
||||
* // or await sql.rollbackDistributed("numbers");
|
||||
*/
|
||||
beginDistributed<const T>(
|
||||
name: string,
|
||||
fn: SQL.TransactionContextCallback<T>,
|
||||
): Promise<SQL.ContextCallbackResult<T>>;
|
||||
|
||||
/** Alternative method to begin a distributed transaction
|
||||
* @alias {@link beginDistributed}
|
||||
*/
|
||||
distributed<const T>(name: string, fn: SQL.TransactionContextCallback<T>): Promise<SQL.ContextCallbackResult<T>>;
|
||||
|
||||
/**If you know what you're doing, you can use unsafe to pass any string you'd like.
|
||||
* Please note that this can lead to SQL injection if you're not careful.
|
||||
* You can also nest sql.unsafe within a safe sql expression. This is useful if only part of your fraction has unsafe elements.
|
||||
* @example
|
||||
* const result = await sql.unsafe(`select ${danger} from users where id = ${dragons}`)
|
||||
*/
|
||||
unsafe<T = any>(string: string, values?: any[]): SQL.Query<T>;
|
||||
|
||||
/**
|
||||
* Reads a file and uses the contents as a query.
|
||||
* Optional parameters can be used if the file includes $1, $2, etc
|
||||
* @example
|
||||
* const result = await sql.file("query.sql", [1, 2, 3]);
|
||||
*/
|
||||
file<T = any>(filename: string, values?: any[]): SQL.Query<T>;
|
||||
|
||||
/**
|
||||
* Current client options
|
||||
*/
|
||||
options: SQL.Options;
|
||||
}
|
||||
|
||||
const SQL: {
|
||||
/**
|
||||
* Creates a new SQL client instance
|
||||
*
|
||||
* @param connectionString - The connection string for the SQL client
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const sql = new SQL("postgres://localhost:5432/mydb");
|
||||
* const sql = new SQL(new URL("postgres://localhost:5432/mydb"));
|
||||
* ```
|
||||
*/
|
||||
new (connectionString: string | URL): SQL;
|
||||
|
||||
/**
|
||||
* Creates a new SQL client instance with options
|
||||
*
|
||||
* @param connectionString - The connection string for the SQL client
|
||||
* @param options - The options for the SQL client
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const sql = new SQL("postgres://localhost:5432/mydb", { idleTimeout: 1000 });
|
||||
* ```
|
||||
*/
|
||||
new (connectionString: string | URL, options: Omit<SQL.Options, "url">): SQL;
|
||||
|
||||
/**
|
||||
* Creates a new SQL client instance with options
|
||||
*
|
||||
* @param options - The options for the SQL client
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const sql = new SQL({ url: "postgres://localhost:5432/mydb", idleTimeout: 1000 });
|
||||
* ```
|
||||
*/
|
||||
new (options?: SQL.Options): SQL;
|
||||
};
|
||||
|
||||
/**
|
||||
* Represents a reserved connection from the connection pool
|
||||
* Extends SQL with additional release functionality
|
||||
*/
|
||||
interface ReservedSQL extends SQL, Disposable {
|
||||
/**
|
||||
* Releases the client back to the connection pool
|
||||
*/
|
||||
release(): void;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a client within a transaction context
|
||||
* Extends SQL with savepoint functionality
|
||||
*/
|
||||
interface TransactionSQL extends SQL {
|
||||
/** Creates a savepoint within the current transaction */
|
||||
savepoint<T>(name: string, fn: SQLSavepointContextCallback<T>): Promise<T>;
|
||||
savepoint<T>(fn: SQLSavepointContextCallback<T>): Promise<T>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a savepoint within a transaction
|
||||
*/
|
||||
interface SavepointSQL extends SQL {}
|
||||
|
||||
type CSRFAlgorithm = "blake2b256" | "blake2b512" | "sha256" | "sha384" | "sha512" | "sha512-256";
|
||||
|
||||
interface CSRFGenerateOptions {
|
||||
@@ -1314,6 +1978,16 @@ declare module "bun" {
|
||||
maxAge?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* SQL client
|
||||
*/
|
||||
const sql: SQL;
|
||||
|
||||
/**
|
||||
* SQL client for PostgreSQL
|
||||
*/
|
||||
const postgres: SQL;
|
||||
|
||||
/**
|
||||
* Generate and verify CSRF tokens
|
||||
*
|
||||
@@ -1628,24 +2302,12 @@ declare module "bun" {
|
||||
kind: ImportKind;
|
||||
}
|
||||
|
||||
namespace _BunBuildInterface {
|
||||
type Architecture = "x64" | "arm64";
|
||||
type Libc = "glibc" | "musl";
|
||||
type SIMD = "baseline" | "modern";
|
||||
type Target =
|
||||
| `bun-darwin-${Architecture}`
|
||||
| `bun-darwin-x64-${SIMD}`
|
||||
| `bun-linux-${Architecture}`
|
||||
| `bun-linux-${Architecture}-${Libc}`
|
||||
| "bun-windows-x64"
|
||||
| `bun-windows-x64-${SIMD}`
|
||||
| `bun-linux-x64-${SIMD}-${Libc}`;
|
||||
}
|
||||
/**
|
||||
* @see [Bun.build API docs](https://bun.com/docs/bundler#api)
|
||||
*/
|
||||
interface BuildConfigBase {
|
||||
interface BuildConfig {
|
||||
entrypoints: string[]; // list of file path
|
||||
outdir?: string; // output directory
|
||||
/**
|
||||
* @default "browser"
|
||||
*/
|
||||
@@ -1683,6 +2345,7 @@ declare module "bun" {
|
||||
asset?: string;
|
||||
}; // | string;
|
||||
root?: string; // project root
|
||||
splitting?: boolean; // default true, enable code splitting
|
||||
plugins?: BunPlugin[];
|
||||
// manifest?: boolean; // whether to return manifest
|
||||
external?: string[];
|
||||
@@ -1831,57 +2494,8 @@ declare module "bun" {
|
||||
* ```
|
||||
*/
|
||||
tsconfig?: string;
|
||||
|
||||
outdir?: string;
|
||||
}
|
||||
|
||||
interface CompileBuildOptions {
|
||||
target?: _BunBuildInterface.Target;
|
||||
execArgv?: string[];
|
||||
executablePath?: string;
|
||||
outfile?: string;
|
||||
windows?: {
|
||||
hideConsole?: boolean;
|
||||
icon?: string;
|
||||
title?: string;
|
||||
};
|
||||
}
|
||||
|
||||
// Compile build config - uses outfile for executable output
|
||||
interface CompileBuildConfig extends BuildConfigBase {
|
||||
/**
|
||||
* Create a standalone executable
|
||||
*
|
||||
* When `true`, creates an executable for the current platform.
|
||||
* When a target string, creates an executable for that platform.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* // Create executable for current platform
|
||||
* await Bun.build({
|
||||
* entrypoints: ['./app.js'],
|
||||
* compile: {
|
||||
* target: 'linux-x64',
|
||||
* },
|
||||
* outfile: './my-app'
|
||||
* });
|
||||
*
|
||||
* // Cross-compile for Linux x64
|
||||
* await Bun.build({
|
||||
* entrypoints: ['./app.js'],
|
||||
* compile: 'linux-x64',
|
||||
* outfile: './my-app'
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
compile: boolean | _BunBuildInterface.Target | CompileBuildOptions;
|
||||
}
|
||||
|
||||
/**
|
||||
* @see [Bun.build API docs](https://bun.com/docs/bundler#api)
|
||||
*/
|
||||
type BuildConfig = BuildConfigBase | CompileBuildConfig;
|
||||
|
||||
/**
|
||||
* Hash and verify passwords using argon2 or bcrypt
|
||||
*
|
||||
@@ -3752,11 +4366,11 @@ declare module "bun" {
|
||||
* The type of options that can be passed to {@link serve}, with support for `routes` and a safer requirement for `fetch`
|
||||
*/
|
||||
type ServeFunctionOptions<T, R extends { [K in keyof R]: RouterTypes.RouteValue<Extract<K, string>> }> =
|
||||
| (__internal.DistributedOmit<Exclude<Serve<T>, WebSocketServeOptions<T>>, "fetch"> & {
|
||||
| (DistributedOmit<Exclude<Serve<T>, WebSocketServeOptions<T>>, "fetch"> & {
|
||||
routes: R;
|
||||
fetch?: (this: Server, request: Request, server: Server) => Response | Promise<Response>;
|
||||
})
|
||||
| (__internal.DistributedOmit<Exclude<Serve<T>, WebSocketServeOptions<T>>, "routes"> & {
|
||||
| (DistributedOmit<Exclude<Serve<T>, WebSocketServeOptions<T>>, "routes"> & {
|
||||
routes?: never;
|
||||
fetch: (this: Server, request: Request, server: Server) => Response | Promise<Response>;
|
||||
})
|
||||
@@ -6915,11 +7529,10 @@ declare module "bun" {
|
||||
* Internally, this uses [posix_spawn(2)](https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man2/posix_spawn.2.html)
|
||||
*/
|
||||
function spawnSync<
|
||||
const In extends SpawnOptions.Writable = "ignore",
|
||||
const Out extends SpawnOptions.Readable = "pipe",
|
||||
const Err extends SpawnOptions.Readable = "pipe",
|
||||
const Err extends SpawnOptions.Readable = "inherit",
|
||||
>(
|
||||
options: SpawnOptions.OptionsObject<In, Out, Err> & {
|
||||
options: SpawnOptions.OptionsObject<"ignore", Out, Err> & {
|
||||
/**
|
||||
* The command to run
|
||||
*
|
||||
@@ -6951,9 +7564,8 @@ declare module "bun" {
|
||||
* Internally, this uses [posix_spawn(2)](https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man2/posix_spawn.2.html)
|
||||
*/
|
||||
function spawnSync<
|
||||
const In extends SpawnOptions.Writable = "ignore",
|
||||
const Out extends SpawnOptions.Readable = "pipe",
|
||||
const Err extends SpawnOptions.Readable = "pipe",
|
||||
const Err extends SpawnOptions.Readable = "inherit",
|
||||
>(
|
||||
/**
|
||||
* The command to run
|
||||
@@ -6970,7 +7582,7 @@ declare module "bun" {
|
||||
* ```
|
||||
*/
|
||||
cmds: string[],
|
||||
options?: SpawnOptions.OptionsObject<In, Out, Err>,
|
||||
options?: SpawnOptions.OptionsObject<"ignore", Out, Err>,
|
||||
): SyncSubprocess<Out, Err>;
|
||||
|
||||
/** Utility type for any process from {@link Bun.spawn()} with both stdout and stderr set to `"pipe"` */
|
||||
|
||||
31
packages/bun-types/deprecated.d.ts
vendored
31
packages/bun-types/deprecated.d.ts
vendored
@@ -1,35 +1,4 @@
|
||||
declare module "bun" {
|
||||
/** @deprecated This type is unused in Bun's types and might be removed in the near future */
|
||||
type Platform =
|
||||
| "aix"
|
||||
| "android"
|
||||
| "darwin"
|
||||
| "freebsd"
|
||||
| "haiku"
|
||||
| "linux"
|
||||
| "openbsd"
|
||||
| "sunos"
|
||||
| "win32"
|
||||
| "cygwin"
|
||||
| "netbsd";
|
||||
|
||||
/** @deprecated This type is unused in Bun's types and might be removed in the near future */
|
||||
type Architecture = "arm" | "arm64" | "ia32" | "mips" | "mipsel" | "ppc" | "ppc64" | "s390" | "s390x" | "x64";
|
||||
|
||||
/** @deprecated This type is unused in Bun's types and might be removed in the near future */
|
||||
type UncaughtExceptionListener = (error: Error, origin: UncaughtExceptionOrigin) => void;
|
||||
|
||||
/**
|
||||
* Most of the time the unhandledRejection will be an Error, but this should not be relied upon
|
||||
* as *anything* can be thrown/rejected, it is therefore unsafe to assume that the value is an Error.
|
||||
*
|
||||
* @deprecated This type is unused in Bun's types and might be removed in the near future
|
||||
*/
|
||||
type UnhandledRejectionListener = (reason: unknown, promise: Promise<unknown>) => void;
|
||||
|
||||
/** @deprecated This type is unused in Bun's types and might be removed in the near future */
|
||||
type MultipleResolveListener = (type: MultipleResolveType, promise: Promise<unknown>, value: unknown) => void;
|
||||
|
||||
/**
|
||||
* Consume all data from a {@link ReadableStream} until it closes or errors.
|
||||
*
|
||||
|
||||
19
packages/bun-types/globals.d.ts
vendored
19
packages/bun-types/globals.d.ts
vendored
@@ -1888,25 +1888,6 @@ interface BunFetchRequestInit extends RequestInit {
|
||||
* ```
|
||||
*/
|
||||
unix?: string;
|
||||
|
||||
/**
|
||||
* Control automatic decompression of the response body.
|
||||
* When set to `false`, the response body will not be automatically decompressed,
|
||||
* and the `Content-Encoding` header will be preserved. This can improve performance
|
||||
* when you need to handle compressed data manually or forward it as-is.
|
||||
* This is a custom property that is not part of the Fetch API specification.
|
||||
*
|
||||
* @default true
|
||||
* @example
|
||||
* ```js
|
||||
* // Disable automatic decompression for a proxy server
|
||||
* const response = await fetch("https://example.com/api", {
|
||||
* decompress: false
|
||||
* });
|
||||
* // response.headers.get('content-encoding') might be 'gzip' or 'br'
|
||||
* ```
|
||||
*/
|
||||
decompress?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
1
packages/bun-types/index.d.ts
vendored
1
packages/bun-types/index.d.ts
vendored
@@ -21,7 +21,6 @@
|
||||
/// <reference path="./redis.d.ts" />
|
||||
/// <reference path="./shell.d.ts" />
|
||||
/// <reference path="./experimental.d.ts" />
|
||||
/// <reference path="./sql.d.ts" />
|
||||
|
||||
/// <reference path="./bun.ns.d.ts" />
|
||||
|
||||
|
||||
6
packages/bun-types/overrides.d.ts
vendored
6
packages/bun-types/overrides.d.ts
vendored
@@ -24,12 +24,6 @@ declare module "stream/web" {
|
||||
}
|
||||
}
|
||||
|
||||
declare module "url" {
|
||||
interface URLSearchParams {
|
||||
toJSON(): Record<string, string>;
|
||||
}
|
||||
}
|
||||
|
||||
declare global {
|
||||
namespace NodeJS {
|
||||
interface ProcessEnv extends Bun.Env {}
|
||||
|
||||
44
packages/bun-types/redis.d.ts
vendored
44
packages/bun-types/redis.d.ts
vendored
@@ -574,50 +574,6 @@ declare module "bun" {
|
||||
*/
|
||||
getex(key: RedisClient.KeyLike): Promise<string | null>;
|
||||
|
||||
/**
|
||||
* Get the value of a key and set its expiration in seconds
|
||||
* @param key The key to get
|
||||
* @param ex Set the specified expire time, in seconds
|
||||
* @param seconds The number of seconds until expiration
|
||||
* @returns Promise that resolves with the value of the key, or null if the key doesn't exist
|
||||
*/
|
||||
getex(key: RedisClient.KeyLike, ex: "EX", seconds: number): Promise<string | null>;
|
||||
|
||||
/**
|
||||
* Get the value of a key and set its expiration in milliseconds
|
||||
* @param key The key to get
|
||||
* @param px Set the specified expire time, in milliseconds
|
||||
* @param milliseconds The number of milliseconds until expiration
|
||||
* @returns Promise that resolves with the value of the key, or null if the key doesn't exist
|
||||
*/
|
||||
getex(key: RedisClient.KeyLike, px: "PX", milliseconds: number): Promise<string | null>;
|
||||
|
||||
/**
|
||||
* Get the value of a key and set its expiration at a specific Unix timestamp in seconds
|
||||
* @param key The key to get
|
||||
* @param exat Set the specified Unix time at which the key will expire, in seconds
|
||||
* @param timestampSeconds The Unix timestamp in seconds
|
||||
* @returns Promise that resolves with the value of the key, or null if the key doesn't exist
|
||||
*/
|
||||
getex(key: RedisClient.KeyLike, exat: "EXAT", timestampSeconds: number): Promise<string | null>;
|
||||
|
||||
/**
|
||||
* Get the value of a key and set its expiration at a specific Unix timestamp in milliseconds
|
||||
* @param key The key to get
|
||||
* @param pxat Set the specified Unix time at which the key will expire, in milliseconds
|
||||
* @param timestampMilliseconds The Unix timestamp in milliseconds
|
||||
* @returns Promise that resolves with the value of the key, or null if the key doesn't exist
|
||||
*/
|
||||
getex(key: RedisClient.KeyLike, pxat: "PXAT", timestampMilliseconds: number): Promise<string | null>;
|
||||
|
||||
/**
|
||||
* Get the value of a key and remove its expiration
|
||||
* @param key The key to get
|
||||
* @param persist Remove the expiration from the key
|
||||
* @returns Promise that resolves with the value of the key, or null if the key doesn't exist
|
||||
*/
|
||||
getex(key: RedisClient.KeyLike, persist: "PERSIST"): Promise<string | null>;
|
||||
|
||||
/**
|
||||
* Ping the server
|
||||
* @returns Promise that resolves with "PONG" if the server is reachable, or throws an error if the server is not reachable
|
||||
|
||||
805
packages/bun-types/sql.d.ts
vendored
805
packages/bun-types/sql.d.ts
vendored
@@ -1,805 +0,0 @@
|
||||
import type * as BunSQLite from "bun:sqlite";
|
||||
|
||||
declare module "bun" {
|
||||
/**
|
||||
* Represents a reserved connection from the connection pool Extends SQL with
|
||||
* additional release functionality
|
||||
*/
|
||||
interface ReservedSQL extends SQL, Disposable {
|
||||
/**
|
||||
* Releases the client back to the connection pool
|
||||
*/
|
||||
release(): void;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a client within a transaction context Extends SQL with savepoint
|
||||
* functionality
|
||||
*/
|
||||
interface TransactionSQL extends SQL {
|
||||
/**
|
||||
* Creates a savepoint within the current transaction
|
||||
*/
|
||||
savepoint<T>(name: string, fn: SQL.SavepointContextCallback<T>): Promise<T>;
|
||||
savepoint<T>(fn: SQL.SavepointContextCallback<T>): Promise<T>;
|
||||
|
||||
/**
|
||||
* The reserve method pulls out a connection from the pool, and returns a
|
||||
* client that wraps the single connection.
|
||||
*
|
||||
* Using reserve() inside of a transaction will return a brand new
|
||||
* connection, not one related to the transaction. This matches the
|
||||
* behaviour of the `postgres` package.
|
||||
*/
|
||||
reserve(): Promise<ReservedSQL>;
|
||||
}
|
||||
|
||||
namespace SQL {
|
||||
class SQLError extends Error {
|
||||
constructor(message: string);
|
||||
}
|
||||
|
||||
class PostgresError extends SQLError {
|
||||
public readonly code: string;
|
||||
public readonly errno: string | undefined;
|
||||
public readonly detail: string | undefined;
|
||||
public readonly hint: string | undefined;
|
||||
public readonly severity: string | undefined;
|
||||
public readonly position: string | undefined;
|
||||
public readonly internalPosition: string | undefined;
|
||||
public readonly internalQuery: string | undefined;
|
||||
public readonly where: string | undefined;
|
||||
public readonly schema: string | undefined;
|
||||
public readonly table: string | undefined;
|
||||
public readonly column: string | undefined;
|
||||
public readonly dataType: string | undefined;
|
||||
public readonly constraint: string | undefined;
|
||||
public readonly file: string | undefined;
|
||||
public readonly line: string | undefined;
|
||||
public readonly routine: string | undefined;
|
||||
|
||||
constructor(
|
||||
message: string,
|
||||
options: {
|
||||
code: string;
|
||||
errno?: string | undefined;
|
||||
detail?: string;
|
||||
hint?: string | undefined;
|
||||
severity?: string | undefined;
|
||||
position?: string | undefined;
|
||||
internalPosition?: string;
|
||||
internalQuery?: string;
|
||||
where?: string | undefined;
|
||||
schema?: string;
|
||||
table?: string | undefined;
|
||||
column?: string | undefined;
|
||||
dataType?: string | undefined;
|
||||
constraint?: string;
|
||||
file?: string | undefined;
|
||||
line?: string | undefined;
|
||||
routine?: string | undefined;
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
class SQLiteError extends SQLError {
|
||||
public readonly code: string;
|
||||
public readonly errno: number;
|
||||
public readonly byteOffset?: number | undefined;
|
||||
|
||||
constructor(message: string, options: { code: string; errno: number; byteOffset?: number | undefined });
|
||||
}
|
||||
|
||||
type AwaitPromisesArray<T extends Array<PromiseLike<any>>> = {
|
||||
[K in keyof T]: Awaited<T[K]>;
|
||||
};
|
||||
|
||||
type ContextCallbackResult<T> = T extends Array<PromiseLike<any>> ? AwaitPromisesArray<T> : Awaited<T>;
|
||||
type ContextCallback<T, SQL> = (sql: SQL) => Bun.MaybePromise<T>;
|
||||
|
||||
interface SQLiteOptions extends BunSQLite.DatabaseOptions {
|
||||
adapter?: "sqlite";
|
||||
|
||||
/**
|
||||
* Specify the path to the database file
|
||||
*
|
||||
* Examples:
|
||||
*
|
||||
* - `sqlite://:memory:`
|
||||
* - `sqlite://./path/to/database.db`
|
||||
* - `sqlite:///Users/bun/projects/my-app/database.db`
|
||||
* - `./dev.db`
|
||||
* - `:memory:`
|
||||
*
|
||||
* @default ":memory:"
|
||||
*/
|
||||
filename?: URL | ":memory:" | (string & {}) | undefined;
|
||||
|
||||
/**
|
||||
* Callback executed when a connection attempt completes (SQLite)
|
||||
* Receives an Error on failure, or null on success.
|
||||
*/
|
||||
onconnect?: ((err: Error | null) => void) | undefined;
|
||||
|
||||
/**
|
||||
* Callback executed when a connection is closed (SQLite)
|
||||
* Receives the closing Error or null.
|
||||
*/
|
||||
onclose?: ((err: Error | null) => void) | undefined;
|
||||
}
|
||||
|
||||
interface PostgresOptions {
|
||||
/**
|
||||
* Connection URL (can be string or URL object)
|
||||
*/
|
||||
url?: URL | string | undefined;
|
||||
|
||||
/**
|
||||
* Database server hostname
|
||||
* @default "localhost"
|
||||
*/
|
||||
host?: string | undefined;
|
||||
|
||||
/**
|
||||
* Database server hostname (alias for host)
|
||||
* @deprecated Prefer {@link host}
|
||||
* @default "localhost"
|
||||
*/
|
||||
hostname?: string | undefined;
|
||||
|
||||
/**
|
||||
* Database server port number
|
||||
* @default 5432
|
||||
*/
|
||||
port?: number | string | undefined;
|
||||
|
||||
/**
|
||||
* Database user for authentication
|
||||
* @default "postgres"
|
||||
*/
|
||||
username?: string | undefined;
|
||||
|
||||
/**
|
||||
* Database user for authentication (alias for username)
|
||||
* @deprecated Prefer {@link username}
|
||||
* @default "postgres"
|
||||
*/
|
||||
user?: string | undefined;
|
||||
|
||||
/**
|
||||
* Database password for authentication
|
||||
* @default ""
|
||||
*/
|
||||
password?: string | (() => MaybePromise<string>) | undefined;
|
||||
|
||||
/**
|
||||
* Database password for authentication (alias for password)
|
||||
* @deprecated Prefer {@link password}
|
||||
* @default ""
|
||||
*/
|
||||
pass?: string | (() => MaybePromise<string>) | undefined;
|
||||
|
||||
/**
|
||||
* Name of the database to connect to
|
||||
* @default The username value
|
||||
*/
|
||||
database?: string | undefined;
|
||||
|
||||
/**
|
||||
* Name of the database to connect to (alias for database)
|
||||
* @deprecated Prefer {@link database}
|
||||
* @default The username value
|
||||
*/
|
||||
db?: string | undefined;
|
||||
|
||||
/**
|
||||
* Database adapter/driver to use
|
||||
* @default "postgres"
|
||||
*/
|
||||
adapter?: "postgres";
|
||||
|
||||
/**
|
||||
* Maximum time in seconds to wait for connection to become available
|
||||
* @default 0 (no timeout)
|
||||
*/
|
||||
idleTimeout?: number | undefined;
|
||||
|
||||
/**
|
||||
* Maximum time in seconds to wait for connection to become available (alias for idleTimeout)
|
||||
* @deprecated Prefer {@link idleTimeout}
|
||||
* @default 0 (no timeout)
|
||||
*/
|
||||
idle_timeout?: number | undefined;
|
||||
|
||||
/**
|
||||
* Maximum time in seconds to wait when establishing a connection
|
||||
* @default 30
|
||||
*/
|
||||
connectionTimeout?: number | undefined;
|
||||
|
||||
/**
|
||||
* Maximum time in seconds to wait when establishing a connection (alias for connectionTimeout)
|
||||
* @deprecated Prefer {@link connectionTimeout}
|
||||
* @default 30
|
||||
*/
|
||||
connection_timeout?: number | undefined;
|
||||
|
||||
/**
|
||||
* Maximum time in seconds to wait when establishing a connection (alias
|
||||
* for connectionTimeout)
|
||||
* @deprecated Prefer {@link connectionTimeout}
|
||||
* @default 30
|
||||
*/
|
||||
connectTimeout?: number | undefined;
|
||||
|
||||
/**
|
||||
* Maximum time in seconds to wait when establishing a connection (alias
|
||||
* for connectionTimeout)
|
||||
* @deprecated Prefer {@link connectionTimeout}
|
||||
* @default 30
|
||||
*/
|
||||
connect_timeout?: number | undefined;
|
||||
|
||||
/**
|
||||
* Maximum lifetime in seconds of a connection
|
||||
* @default 0 (no maximum lifetime)
|
||||
*/
|
||||
maxLifetime?: number | undefined;
|
||||
|
||||
/**
|
||||
* Maximum lifetime in seconds of a connection (alias for maxLifetime)
|
||||
* @deprecated Prefer {@link maxLifetime}
|
||||
* @default 0 (no maximum lifetime)
|
||||
*/
|
||||
max_lifetime?: number | undefined;
|
||||
|
||||
/**
|
||||
* Whether to use TLS/SSL for the connection
|
||||
* @default false
|
||||
*/
|
||||
tls?: TLSOptions | boolean | undefined;
|
||||
|
||||
/**
|
||||
* Whether to use TLS/SSL for the connection (alias for tls)
|
||||
* @default false
|
||||
*/
|
||||
ssl?: TLSOptions | boolean | undefined;
|
||||
|
||||
// `.path` is currently unsupported in Bun, the implementation is
|
||||
// incomplete.
|
||||
//
|
||||
// /**
|
||||
// * Unix domain socket path for connection
|
||||
// * @default ""
|
||||
// */
|
||||
// path?: string | undefined;
|
||||
|
||||
/**
|
||||
* Callback executed when a connection attempt completes
|
||||
* Receives an Error on failure, or null on success.
|
||||
*/
|
||||
onconnect?: ((err: Error | null) => void) | undefined;
|
||||
|
||||
/**
|
||||
* Callback executed when a connection is closed
|
||||
* Receives the closing Error or null.
|
||||
*/
|
||||
onclose?: ((err: Error | null) => void) | undefined;
|
||||
|
||||
/**
|
||||
* Postgres client runtime configuration options
|
||||
*
|
||||
* @see https://www.postgresql.org/docs/current/runtime-config-client.html
|
||||
*/
|
||||
connection?: Record<string, string | boolean | number> | undefined;
|
||||
|
||||
/**
|
||||
* Maximum number of connections in the pool
|
||||
* @default 10
|
||||
*/
|
||||
max?: number | undefined;
|
||||
|
||||
/**
|
||||
* By default values outside i32 range are returned as strings. If this is
|
||||
* true, values outside i32 range are returned as BigInts.
|
||||
* @default false
|
||||
*/
|
||||
bigint?: boolean | undefined;
|
||||
|
||||
/**
|
||||
* Automatic creation of prepared statements
|
||||
* @default true
|
||||
*/
|
||||
prepare?: boolean | undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Configuration options for SQL client connection and behavior
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const config: Bun.SQL.Options = {
|
||||
* host: 'localhost',
|
||||
* port: 5432,
|
||||
* user: 'dbuser',
|
||||
* password: 'secretpass',
|
||||
* database: 'myapp',
|
||||
* idleTimeout: 30,
|
||||
* max: 20,
|
||||
* onconnect: (client) => {
|
||||
* console.log('Connected to database');
|
||||
* }
|
||||
* };
|
||||
* ```
|
||||
*/
|
||||
type Options = SQLiteOptions | PostgresOptions;
|
||||
|
||||
/**
|
||||
* Represents a SQL query that can be executed, with additional control
|
||||
* methods Extends Promise to allow for async/await usage
|
||||
*/
|
||||
interface Query<T> extends Promise<T> {
|
||||
/**
|
||||
* Indicates if the query is currently executing
|
||||
*/
|
||||
active: boolean;
|
||||
|
||||
/**
|
||||
* Indicates if the query has been cancelled
|
||||
*/
|
||||
cancelled: boolean;
|
||||
|
||||
/**
|
||||
* Cancels the executing query
|
||||
*/
|
||||
cancel(): Query<T>;
|
||||
|
||||
/**
|
||||
* Executes the query as a simple query, no parameters are allowed but can
|
||||
* execute multiple commands separated by semicolons
|
||||
*/
|
||||
simple(): Query<T>;
|
||||
|
||||
/**
|
||||
* Executes the query
|
||||
*/
|
||||
execute(): Query<T>;
|
||||
|
||||
/**
|
||||
* Returns the raw query result
|
||||
*/
|
||||
raw(): Query<T>;
|
||||
|
||||
/**
|
||||
* Returns only the values from the query result
|
||||
*/
|
||||
values(): Query<T>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Callback function type for transaction contexts
|
||||
* @param sql Function to execute SQL queries within the transaction
|
||||
*/
|
||||
type TransactionContextCallback<T> = ContextCallback<T, TransactionSQL>;
|
||||
|
||||
/**
|
||||
* Callback function type for savepoint contexts
|
||||
* @param sql Function to execute SQL queries within the savepoint
|
||||
*/
|
||||
type SavepointContextCallback<T> = ContextCallback<T, SavepointSQL>;
|
||||
|
||||
/**
|
||||
* SQL.Helper represents a parameter or serializable
|
||||
* value inside of a query.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const helper = sql(users, 'id');
|
||||
* await sql`insert into users ${helper}`;
|
||||
* ```
|
||||
*/
|
||||
interface Helper<T> {
|
||||
readonly value: T[];
|
||||
readonly columns: (keyof T)[];
|
||||
}
|
||||
}
|
||||
|
||||
interface SQL extends AsyncDisposable {
|
||||
/**
|
||||
* Executes a SQL query using template literals
|
||||
* @example
|
||||
* ```ts
|
||||
* const [user] = await sql<Users[]>`select * from users where id = ${1}`;
|
||||
* ```
|
||||
*/
|
||||
<T = any>(strings: TemplateStringsArray, ...values: unknown[]): SQL.Query<T>;
|
||||
|
||||
/**
|
||||
* Execute a SQL query using a string
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const users = await sql<User[]>`SELECT * FROM users WHERE id = ${1}`;
|
||||
* ```
|
||||
*/
|
||||
<T = any>(string: string): SQL.Query<T>;
|
||||
|
||||
/**
|
||||
* Helper function for inserting an object into a query
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* // Insert an object
|
||||
* const result = await sql`insert into users ${sql(users)} returning *`;
|
||||
*
|
||||
* // Or pick specific columns
|
||||
* const result = await sql`insert into users ${sql(users, "id", "name")} returning *`;
|
||||
*
|
||||
* // Or a single object
|
||||
* const result = await sql`insert into users ${sql(user)} returning *`;
|
||||
* ```
|
||||
*/
|
||||
<T extends { [Key in PropertyKey]: unknown }>(obj: T | T[] | readonly T[]): SQL.Helper<T>; // Contributor note: This is the same as the signature below with the exception of the columns and the Pick<T, Keys>
|
||||
|
||||
/**
|
||||
* Helper function for inserting an object into a query, supporting specific columns
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* // Insert an object
|
||||
* const result = await sql`insert into users ${sql(users)} returning *`;
|
||||
*
|
||||
* // Or pick specific columns
|
||||
* const result = await sql`insert into users ${sql(users, "id", "name")} returning *`;
|
||||
*
|
||||
* // Or a single object
|
||||
* const result = await sql`insert into users ${sql(user)} returning *`;
|
||||
* ```
|
||||
*/
|
||||
<T extends { [Key in PropertyKey]: unknown }, Keys extends keyof T = keyof T>(
|
||||
obj: T | T[] | readonly T[],
|
||||
...columns: readonly Keys[]
|
||||
): SQL.Helper<Pick<T, Keys>>; // Contributor note: This is the same as the signature above with the exception of this signature tracking keys
|
||||
|
||||
/**
|
||||
* Helper function for inserting any serializable value into a query
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const result = await sql`SELECT * FROM users WHERE id IN ${sql([1, 2, 3])}`;
|
||||
* ```
|
||||
*/
|
||||
<T>(value: T): SQL.Helper<T>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Main SQL client interface providing connection and transaction management
|
||||
*/
|
||||
class SQL {
|
||||
/**
|
||||
* Creates a new SQL client instance
|
||||
*
|
||||
* @param connectionString - The connection string for the SQL client
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const sql = new SQL("postgres://localhost:5432/mydb");
|
||||
* const sql = new SQL(new URL("postgres://localhost:5432/mydb"));
|
||||
* ```
|
||||
*/
|
||||
constructor(connectionString: string | URL);
|
||||
|
||||
/**
|
||||
* Creates a new SQL client instance with options
|
||||
*
|
||||
* @param connectionString - The connection string for the SQL client
|
||||
* @param options - The options for the SQL client
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const sql = new SQL("postgres://localhost:5432/mydb", { idleTimeout: 1000 });
|
||||
* ```
|
||||
*/
|
||||
constructor(
|
||||
connectionString: string | URL,
|
||||
options: Bun.__internal.DistributedOmit<SQL.Options, "url" | "filename">,
|
||||
);
|
||||
|
||||
/**
|
||||
* Creates a new SQL client instance with options
|
||||
*
|
||||
* @param options - The options for the SQL client
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const sql = new SQL({ url: "postgres://localhost:5432/mydb", idleTimeout: 1000 });
|
||||
* ```
|
||||
*/
|
||||
constructor(options?: SQL.Options);
|
||||
|
||||
/**
|
||||
* Current client options
|
||||
*/
|
||||
options: Bun.__internal.DistributedMerge<SQL.Options>;
|
||||
|
||||
/**
|
||||
* Commits a distributed transaction also know as prepared transaction in postgres or XA transaction in MySQL
|
||||
*
|
||||
* @param name - The name of the distributed transaction
|
||||
*
|
||||
* @throws {Error} If the adapter does not support distributed transactions (e.g., SQLite)
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* await sql.commitDistributed("my_distributed_transaction");
|
||||
* ```
|
||||
*/
|
||||
commitDistributed(name: string): Promise<void>;
|
||||
|
||||
/**
|
||||
* Rolls back a distributed transaction also know as prepared transaction in postgres or XA transaction in MySQL
|
||||
*
|
||||
* @param name - The name of the distributed transaction
|
||||
*
|
||||
* @throws {Error} If the adapter does not support distributed transactions (e.g., SQLite)
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* await sql.rollbackDistributed("my_distributed_transaction");
|
||||
* ```
|
||||
*/
|
||||
rollbackDistributed(name: string): Promise<void>;
|
||||
|
||||
/** Waits for the database connection to be established
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* await sql.connect();
|
||||
* ```
|
||||
*/
|
||||
connect(): Promise<SQL>;
|
||||
|
||||
/**
|
||||
* Closes the database connection with optional timeout in seconds. If timeout is 0, it will close immediately, if is not provided it will wait for all queries to finish before closing.
|
||||
*
|
||||
* @param options - The options for the close
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* await sql.close({ timeout: 1 });
|
||||
* ```
|
||||
*/
|
||||
close(options?: { timeout?: number }): Promise<void>;
|
||||
|
||||
/**
|
||||
* Closes the database connection with optional timeout in seconds. If timeout is 0, it will close immediately, if is not provided it will wait for all queries to finish before closing.
|
||||
* This is an alias of {@link SQL.close}
|
||||
*
|
||||
* @param options - The options for the close
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* await sql.end({ timeout: 1 });
|
||||
* ```
|
||||
*/
|
||||
end(options?: { timeout?: number }): Promise<void>;
|
||||
|
||||
/**
|
||||
* Flushes any pending operations
|
||||
*
|
||||
* @throws {Error} If the adapter does not support flushing (e.g., SQLite)
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* sql.flush();
|
||||
* ```
|
||||
*/
|
||||
flush(): void;
|
||||
|
||||
/**
|
||||
* The reserve method pulls out a connection from the pool, and returns a client that wraps the single connection.
|
||||
*
|
||||
* This can be used for running queries on an isolated connection.
|
||||
* Calling reserve in a reserved Sql will return a new reserved connection, not the same connection (behavior matches postgres package).
|
||||
*
|
||||
* @throws {Error} If the adapter does not support connection pooling (e.g., SQLite)s
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const reserved = await sql.reserve();
|
||||
* await reserved`select * from users`;
|
||||
* await reserved.release();
|
||||
* // with in a production scenario would be something more like
|
||||
* const reserved = await sql.reserve();
|
||||
* try {
|
||||
* // ... queries
|
||||
* } finally {
|
||||
* await reserved.release();
|
||||
* }
|
||||
*
|
||||
* // Bun supports Symbol.dispose and Symbol.asyncDispose
|
||||
* // always release after context (safer)
|
||||
* using reserved = await sql.reserve()
|
||||
* await reserved`select * from users`
|
||||
* ```
|
||||
*/
|
||||
reserve(): Promise<ReservedSQL>;
|
||||
|
||||
/**
|
||||
* Begins a new transaction.
|
||||
*
|
||||
* Will reserve a connection for the transaction and supply a scoped sql instance for all transaction uses in the callback function. sql.begin will resolve with the returned value from the callback function.
|
||||
* BEGIN is automatically sent with the optional options, and if anything fails ROLLBACK will be called so the connection can be released and execution can continue.
|
||||
* @example
|
||||
* const [user, account] = await sql.begin(async sql => {
|
||||
* const [user] = await sql`
|
||||
* insert into users (
|
||||
* name
|
||||
* ) values (
|
||||
* 'Murray'
|
||||
* )
|
||||
* returning *
|
||||
* `
|
||||
* const [account] = await sql`
|
||||
* insert into accounts (
|
||||
* user_id
|
||||
* ) values (
|
||||
* ${ user.user_id }
|
||||
* )
|
||||
* returning *
|
||||
* `
|
||||
* return [user, account]
|
||||
* })
|
||||
*/
|
||||
begin<const T>(fn: SQL.TransactionContextCallback<T>): Promise<SQL.ContextCallbackResult<T>>;
|
||||
|
||||
/**
|
||||
* Begins a new transaction with options.
|
||||
*
|
||||
* Will reserve a connection for the transaction and supply a scoped sql instance for all transaction uses in the callback function. sql.begin will resolve with the returned value from the callback function.
|
||||
* BEGIN is automatically sent with the optional options, and if anything fails ROLLBACK will be called so the connection can be released and execution can continue.
|
||||
* @example
|
||||
* const [user, account] = await sql.begin("read write", async sql => {
|
||||
* const [user] = await sql`
|
||||
* insert into users (
|
||||
* name
|
||||
* ) values (
|
||||
* 'Murray'
|
||||
* )
|
||||
* returning *
|
||||
* `
|
||||
* const [account] = await sql`
|
||||
* insert into accounts (
|
||||
* user_id
|
||||
* ) values (
|
||||
* ${ user.user_id }
|
||||
* )
|
||||
* returning *
|
||||
* `
|
||||
* return [user, account]
|
||||
* })
|
||||
*/
|
||||
begin<const T>(options: string, fn: SQL.TransactionContextCallback<T>): Promise<SQL.ContextCallbackResult<T>>;
|
||||
|
||||
/**
|
||||
* Alternative method to begin a transaction.
|
||||
*
|
||||
* Will reserve a connection for the transaction and supply a scoped sql instance for all transaction uses in the callback function. sql.transaction will resolve with the returned value from the callback function.
|
||||
* BEGIN is automatically sent with the optional options, and if anything fails ROLLBACK will be called so the connection can be released and execution can continue.
|
||||
* @alias begin
|
||||
* @example
|
||||
* const [user, account] = await sql.transaction(async sql => {
|
||||
* const [user] = await sql`
|
||||
* insert into users (
|
||||
* name
|
||||
* ) values (
|
||||
* 'Murray'
|
||||
* )
|
||||
* returning *
|
||||
* `
|
||||
* const [account] = await sql`
|
||||
* insert into accounts (
|
||||
* user_id
|
||||
* ) values (
|
||||
* ${ user.user_id }
|
||||
* )
|
||||
* returning *
|
||||
* `
|
||||
* return [user, account]
|
||||
* })
|
||||
*/
|
||||
transaction<const T>(fn: SQL.TransactionContextCallback<T>): Promise<SQL.ContextCallbackResult<T>>;
|
||||
|
||||
/**
|
||||
* Alternative method to begin a transaction with options
|
||||
* Will reserve a connection for the transaction and supply a scoped sql instance for all transaction uses in the callback function. sql.transaction will resolve with the returned value from the callback function.
|
||||
* BEGIN is automatically sent with the optional options, and if anything fails ROLLBACK will be called so the connection can be released and execution can continue.
|
||||
*
|
||||
* @alias {@link begin}
|
||||
*
|
||||
* @example
|
||||
* const [user, account] = await sql.transaction("read write", async sql => {
|
||||
* const [user] = await sql`
|
||||
* insert into users (
|
||||
* name
|
||||
* ) values (
|
||||
* 'Murray'
|
||||
* )
|
||||
* returning *
|
||||
* `
|
||||
* const [account] = await sql`
|
||||
* insert into accounts (
|
||||
* user_id
|
||||
* ) values (
|
||||
* ${ user.user_id }
|
||||
* )
|
||||
* returning *
|
||||
* `
|
||||
* return [user, account]
|
||||
* });
|
||||
*/
|
||||
transaction<const T>(options: string, fn: SQL.TransactionContextCallback<T>): Promise<SQL.ContextCallbackResult<T>>;
|
||||
|
||||
/**
|
||||
* Begins a distributed transaction
|
||||
* Also know as Two-Phase Commit, in a distributed transaction, Phase 1 involves the coordinator preparing nodes by ensuring data is written and ready to commit, while Phase 2 finalizes with nodes committing or rolling back based on the coordinator's decision, ensuring durability and releasing locks.
|
||||
* In PostgreSQL and MySQL distributed transactions persist beyond the original session, allowing privileged users or coordinators to commit/rollback them, ensuring support for distributed transactions, recovery, and administrative tasks.
|
||||
* beginDistributed will automatic rollback if any exception are not caught, and you can commit and rollback later if everything goes well.
|
||||
* PostgreSQL natively supports distributed transactions using PREPARE TRANSACTION, while MySQL uses XA Transactions, and MSSQL also supports distributed/XA transactions. However, in MSSQL, distributed transactions are tied to the original session, the DTC coordinator, and the specific connection.
|
||||
* These transactions are automatically committed or rolled back following the same rules as regular transactions, with no option for manual intervention from other sessions, in MSSQL distributed transactions are used to coordinate transactions using Linked Servers.
|
||||
*
|
||||
* @throws {Error} If the adapter does not support distributed transactions (e.g., SQLite)
|
||||
*
|
||||
* @example
|
||||
* await sql.beginDistributed("numbers", async sql => {
|
||||
* await sql`create table if not exists numbers (a int)`;
|
||||
* await sql`insert into numbers values(1)`;
|
||||
* });
|
||||
* // later you can call
|
||||
* await sql.commitDistributed("numbers");
|
||||
* // or await sql.rollbackDistributed("numbers");
|
||||
*/
|
||||
beginDistributed<const T>(
|
||||
name: string,
|
||||
fn: SQL.TransactionContextCallback<T>,
|
||||
): Promise<SQL.ContextCallbackResult<T>>;
|
||||
|
||||
/** Alternative method to begin a distributed transaction
|
||||
* @alias {@link beginDistributed}
|
||||
*/
|
||||
distributed<const T>(name: string, fn: SQL.TransactionContextCallback<T>): Promise<SQL.ContextCallbackResult<T>>;
|
||||
|
||||
/**If you know what you're doing, you can use unsafe to pass any string you'd like.
|
||||
* Please note that this can lead to SQL injection if you're not careful.
|
||||
* You can also nest sql.unsafe within a safe sql expression. This is useful if only part of your fraction has unsafe elements.
|
||||
* @example
|
||||
* const result = await sql.unsafe(`select ${danger} from users where id = ${dragons}`)
|
||||
*/
|
||||
unsafe<T = any>(string: string, values?: any[]): SQL.Query<T>;
|
||||
|
||||
/**
|
||||
* Reads a file and uses the contents as a query.
|
||||
* Optional parameters can be used if the file includes $1, $2, etc
|
||||
* @example
|
||||
* const result = await sql.file("query.sql", [1, 2, 3]);
|
||||
*/
|
||||
file<T = any>(filename: string, values?: any[]): SQL.Query<T>;
|
||||
}
|
||||
|
||||
/**
|
||||
* SQL client
|
||||
*/
|
||||
const sql: SQL;
|
||||
|
||||
/**
|
||||
* SQL client for PostgreSQL
|
||||
*
|
||||
* @deprecated Prefer {@link Bun.sql}
|
||||
*/
|
||||
const postgres: SQL;
|
||||
|
||||
/**
|
||||
* Represents a savepoint within a transaction
|
||||
*/
|
||||
interface SavepointSQL extends SQL {}
|
||||
}
|
||||
190
packages/bun-types/sqlite.d.ts
vendored
190
packages/bun-types/sqlite.d.ts
vendored
@@ -24,66 +24,6 @@
|
||||
* | `null` | `NULL` |
|
||||
*/
|
||||
declare module "bun:sqlite" {
|
||||
/**
|
||||
* Options for {@link Database}
|
||||
*/
|
||||
export interface DatabaseOptions {
|
||||
/**
|
||||
* Open the database as read-only (no write operations, no create).
|
||||
*
|
||||
* Equivalent to {@link constants.SQLITE_OPEN_READONLY}
|
||||
*/
|
||||
readonly?: boolean;
|
||||
|
||||
/**
|
||||
* Allow creating a new database
|
||||
*
|
||||
* Equivalent to {@link constants.SQLITE_OPEN_CREATE}
|
||||
*/
|
||||
create?: boolean;
|
||||
|
||||
/**
|
||||
* Open the database as read-write
|
||||
*
|
||||
* Equivalent to {@link constants.SQLITE_OPEN_READWRITE}
|
||||
*/
|
||||
readwrite?: boolean;
|
||||
|
||||
/**
|
||||
* When set to `true`, integers are returned as `bigint` types.
|
||||
*
|
||||
* When set to `false`, integers are returned as `number` types and truncated to 52 bits.
|
||||
*
|
||||
* @default false
|
||||
* @since v1.1.14
|
||||
*/
|
||||
safeIntegers?: boolean;
|
||||
|
||||
/**
|
||||
* When set to `false` or `undefined`:
|
||||
* - Queries missing bound parameters will NOT throw an error
|
||||
* - Bound named parameters in JavaScript need to exactly match the SQL query.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const db = new Database(":memory:", { strict: false });
|
||||
* db.run("INSERT INTO foo (name) VALUES ($name)", { $name: "foo" });
|
||||
* ```
|
||||
*
|
||||
* When set to `true`:
|
||||
* - Queries missing bound parameters will throw an error
|
||||
* - Bound named parameters in JavaScript no longer need to be `$`, `:`, or `@`. The SQL query will remain prefixed.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const db = new Database(":memory:", { strict: true });
|
||||
* db.run("INSERT INTO foo (name) VALUES ($name)", { name: "foo" });
|
||||
* ```
|
||||
* @since v1.1.14
|
||||
*/
|
||||
strict?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* A SQLite3 database
|
||||
*
|
||||
@@ -113,6 +53,8 @@ declare module "bun:sqlite" {
|
||||
* ```ts
|
||||
* const db = new Database("mydb.sqlite", {readonly: true});
|
||||
* ```
|
||||
*
|
||||
* @category Database
|
||||
*/
|
||||
export class Database implements Disposable {
|
||||
/**
|
||||
@@ -121,19 +63,96 @@ declare module "bun:sqlite" {
|
||||
* @param filename The filename of the database to open. Pass an empty string (`""`) or `":memory:"` or undefined for an in-memory database.
|
||||
* @param options defaults to `{readwrite: true, create: true}`. If a number, then it's treated as `SQLITE_OPEN_*` constant flags.
|
||||
*/
|
||||
constructor(filename?: string, options?: number | DatabaseOptions);
|
||||
constructor(
|
||||
filename?: string,
|
||||
options?:
|
||||
| number
|
||||
| {
|
||||
/**
|
||||
* Open the database as read-only (no write operations, no create).
|
||||
*
|
||||
* Equivalent to {@link constants.SQLITE_OPEN_READONLY}
|
||||
*/
|
||||
readonly?: boolean;
|
||||
/**
|
||||
* Allow creating a new database
|
||||
*
|
||||
* Equivalent to {@link constants.SQLITE_OPEN_CREATE}
|
||||
*/
|
||||
create?: boolean;
|
||||
/**
|
||||
* Open the database as read-write
|
||||
*
|
||||
* Equivalent to {@link constants.SQLITE_OPEN_READWRITE}
|
||||
*/
|
||||
readwrite?: boolean;
|
||||
|
||||
/**
|
||||
* When set to `true`, integers are returned as `bigint` types.
|
||||
*
|
||||
* When set to `false`, integers are returned as `number` types and truncated to 52 bits.
|
||||
*
|
||||
* @default false
|
||||
* @since v1.1.14
|
||||
*/
|
||||
safeIntegers?: boolean;
|
||||
|
||||
/**
|
||||
* When set to `false` or `undefined`:
|
||||
* - Queries missing bound parameters will NOT throw an error
|
||||
* - Bound named parameters in JavaScript need to exactly match the SQL query.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const db = new Database(":memory:", { strict: false });
|
||||
* db.run("INSERT INTO foo (name) VALUES ($name)", { $name: "foo" });
|
||||
* ```
|
||||
*
|
||||
* When set to `true`:
|
||||
* - Queries missing bound parameters will throw an error
|
||||
* - Bound named parameters in JavaScript no longer need to be `$`, `:`, or `@`. The SQL query will remain prefixed.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const db = new Database(":memory:", { strict: true });
|
||||
* db.run("INSERT INTO foo (name) VALUES ($name)", { name: "foo" });
|
||||
* ```
|
||||
* @since v1.1.14
|
||||
*/
|
||||
strict?: boolean;
|
||||
},
|
||||
);
|
||||
|
||||
/**
|
||||
* Open or create a SQLite3 databases
|
||||
*
|
||||
* @param filename The filename of the database to open. Pass an empty string (`""`) or `":memory:"` or undefined for an in-memory database.
|
||||
* @param options defaults to `{readwrite: true, create: true}`. If a number, then it's treated as `SQLITE_OPEN_*` constant flags.
|
||||
*
|
||||
* This is an alias of `new Database()`
|
||||
*
|
||||
* See {@link Database}
|
||||
*/
|
||||
static open(filename: string, options?: number | DatabaseOptions): Database;
|
||||
static open(
|
||||
filename: string,
|
||||
options?:
|
||||
| number
|
||||
| {
|
||||
/**
|
||||
* Open the database as read-only (no write operations, no create).
|
||||
*
|
||||
* Equivalent to {@link constants.SQLITE_OPEN_READONLY}
|
||||
*/
|
||||
readonly?: boolean;
|
||||
/**
|
||||
* Allow creating a new database
|
||||
*
|
||||
* Equivalent to {@link constants.SQLITE_OPEN_CREATE}
|
||||
*/
|
||||
create?: boolean;
|
||||
/**
|
||||
* Open the database as read-write
|
||||
*
|
||||
* Equivalent to {@link constants.SQLITE_OPEN_READWRITE}
|
||||
*/
|
||||
readwrite?: boolean;
|
||||
},
|
||||
): Database;
|
||||
|
||||
/**
|
||||
* Execute a SQL query **without returning any results**.
|
||||
@@ -184,11 +203,8 @@ declare module "bun:sqlite" {
|
||||
* @returns `Database` instance
|
||||
*/
|
||||
run<ParamsType extends SQLQueryBindings[]>(sql: string, ...bindings: ParamsType[]): Changes;
|
||||
|
||||
/**
|
||||
* This is an alias of {@link Database.run}
|
||||
*
|
||||
* @deprecated Prefer {@link Database.run}
|
||||
*/
|
||||
exec<ParamsType extends SQLQueryBindings[]>(sql: string, ...bindings: ParamsType[]): Changes;
|
||||
|
||||
@@ -335,16 +351,6 @@ declare module "bun:sqlite" {
|
||||
*/
|
||||
static setCustomSQLite(path: string): boolean;
|
||||
|
||||
/**
|
||||
* Closes the database when using the async resource proposal
|
||||
*
|
||||
* @example
|
||||
* ```
|
||||
* using db = new Database("myapp.db");
|
||||
* doSomethingWithDatabase(db);
|
||||
* // Automatically closed when `db` goes out of scope
|
||||
* ```
|
||||
*/
|
||||
[Symbol.dispose](): void;
|
||||
|
||||
/**
|
||||
@@ -738,30 +744,6 @@ declare module "bun:sqlite" {
|
||||
*/
|
||||
values(...params: ParamsType): Array<Array<string | bigint | number | boolean | Uint8Array>>;
|
||||
|
||||
/**
|
||||
* Execute the prepared statement and return all results as arrays of
|
||||
* `Uint8Array`s.
|
||||
*
|
||||
* This is similar to `values()` but returns all values as Uint8Array
|
||||
* objects, regardless of their original SQLite type.
|
||||
*
|
||||
* @param params optional values to bind to the statement. If omitted, the
|
||||
* statement is run with the last bound values or no parameters if there are
|
||||
* none.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const stmt = db.prepare("SELECT * FROM foo WHERE bar = ?");
|
||||
*
|
||||
* stmt.raw("baz");
|
||||
* // => [[Uint8Array(24)]]
|
||||
*
|
||||
* stmt.raw();
|
||||
* // => [[Uint8Array(24)]]
|
||||
* ```
|
||||
*/
|
||||
raw(...params: ParamsType): Array<Array<Uint8Array | null>>;
|
||||
|
||||
/**
|
||||
* The names of the columns returned by the prepared statement.
|
||||
* @example
|
||||
|
||||
@@ -346,7 +346,6 @@ us_internal_ssl_socket_close(struct us_internal_ssl_socket_t *s, int code,
|
||||
|
||||
// check if we are already closed
|
||||
if (us_internal_ssl_socket_is_closed(s)) return s;
|
||||
us_internal_set_loop_ssl_data(s);
|
||||
us_internal_update_handshake(s);
|
||||
|
||||
if (s->handshake_state != HANDSHAKE_COMPLETED) {
|
||||
|
||||
@@ -30,17 +30,13 @@ extern void __attribute((__noreturn__)) Bun__panic(const char* message, size_t l
|
||||
#define BUN_PANIC(message) Bun__panic(message, sizeof(message) - 1)
|
||||
#endif
|
||||
|
||||
extern void Bun__internal_ensureDateHeaderTimerIsEnabled(struct us_loop_t *loop);
|
||||
|
||||
void sweep_timer_cb(struct us_internal_callback_t *cb);
|
||||
|
||||
void us_internal_enable_sweep_timer(struct us_loop_t *loop) {
|
||||
loop->data.sweep_timer_count++;
|
||||
if (loop->data.sweep_timer_count == 1) {
|
||||
if (loop->data.sweep_timer_count == 0) {
|
||||
us_timer_set(loop->data.sweep_timer, (void (*)(struct us_timer_t *)) sweep_timer_cb, LIBUS_TIMEOUT_GRANULARITY * 1000, LIBUS_TIMEOUT_GRANULARITY * 1000);
|
||||
Bun__internal_ensureDateHeaderTimerIsEnabled(loop);
|
||||
}
|
||||
|
||||
loop->data.sweep_timer_count++;
|
||||
}
|
||||
|
||||
void us_internal_disable_sweep_timer(struct us_loop_t *loop) {
|
||||
|
||||
@@ -222,78 +222,6 @@ namespace uWS
|
||||
return std::string_view(nullptr, 0);
|
||||
}
|
||||
|
||||
struct TransferEncoding {
|
||||
bool has: 1 = false;
|
||||
bool chunked: 1 = false;
|
||||
bool invalid: 1 = false;
|
||||
};
|
||||
|
||||
TransferEncoding getTransferEncoding()
|
||||
{
|
||||
TransferEncoding te;
|
||||
|
||||
if (!bf.mightHave("transfer-encoding")) {
|
||||
return te;
|
||||
}
|
||||
|
||||
for (Header *h = headers; (++h)->key.length();) {
|
||||
if (h->key.length() == 17 && !strncmp(h->key.data(), "transfer-encoding", 17)) {
|
||||
// Parse comma-separated values, ensuring "chunked" is last if present
|
||||
const auto value = h->value;
|
||||
size_t pos = 0;
|
||||
size_t lastTokenStart = 0;
|
||||
size_t lastTokenLen = 0;
|
||||
|
||||
while (pos < value.length()) {
|
||||
// Skip leading whitespace
|
||||
while (pos < value.length() && (value[pos] == ' ' || value[pos] == '\t')) {
|
||||
pos++;
|
||||
}
|
||||
|
||||
// Remember start of this token
|
||||
size_t tokenStart = pos;
|
||||
|
||||
// Find end of token (until comma or end)
|
||||
while (pos < value.length() && value[pos] != ',') {
|
||||
pos++;
|
||||
}
|
||||
|
||||
// Trim trailing whitespace from token
|
||||
size_t tokenEnd = pos;
|
||||
while (tokenEnd > tokenStart && (value[tokenEnd - 1] == ' ' || value[tokenEnd - 1] == '\t')) {
|
||||
tokenEnd--;
|
||||
}
|
||||
|
||||
size_t tokenLen = tokenEnd - tokenStart;
|
||||
if (tokenLen > 0) {
|
||||
lastTokenStart = tokenStart;
|
||||
lastTokenLen = tokenLen;
|
||||
}
|
||||
|
||||
// Move past comma if present
|
||||
if (pos < value.length() && value[pos] == ',') {
|
||||
pos++;
|
||||
}
|
||||
}
|
||||
|
||||
if (te.chunked) [[unlikely]] {
|
||||
te.invalid = true;
|
||||
return te;
|
||||
}
|
||||
|
||||
te.has = lastTokenLen > 0;
|
||||
|
||||
// Check if the last token is "chunked"
|
||||
if (lastTokenLen == 7 && !strncmp(value.data() + lastTokenStart, "chunked", 7)) [[likely]] {
|
||||
te.chunked = true;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return te;
|
||||
}
|
||||
|
||||
|
||||
std::string_view getUrl()
|
||||
{
|
||||
@@ -843,16 +771,14 @@ namespace uWS
|
||||
* the Transfer-Encoding overrides the Content-Length. Such a message might indicate an attempt
|
||||
* to perform request smuggling (Section 11.2) or response splitting (Section 11.1) and
|
||||
* ought to be handled as an error. */
|
||||
const std::string_view contentLengthString = req->getHeader("content-length");
|
||||
const auto contentLengthStringLen = contentLengthString.length();
|
||||
|
||||
/* Check Transfer-Encoding header validity and conflicts */
|
||||
HttpRequest::TransferEncoding transferEncoding = req->getTransferEncoding();
|
||||
std::string_view transferEncodingString = req->getHeader("transfer-encoding");
|
||||
std::string_view contentLengthString = req->getHeader("content-length");
|
||||
|
||||
transferEncoding.invalid = transferEncoding.invalid || (transferEncoding.has && (contentLengthStringLen || !transferEncoding.chunked));
|
||||
|
||||
if (transferEncoding.invalid) [[unlikely]] {
|
||||
/* Invalid Transfer-Encoding (multiple headers or chunked not last - request smuggling attempt) */
|
||||
auto transferEncodingStringLen = transferEncodingString.length();
|
||||
auto contentLengthStringLen = contentLengthString.length();
|
||||
if (transferEncodingStringLen && contentLengthStringLen) {
|
||||
/* We could be smart and set an error in the context along with this, to indicate what
|
||||
* http error response we might want to return */
|
||||
return HttpParserResult::error(HTTP_ERROR_400_BAD_REQUEST, HTTP_PARSER_ERROR_INVALID_TRANSFER_ENCODING);
|
||||
}
|
||||
|
||||
@@ -863,7 +789,7 @@ namespace uWS
|
||||
// lets check if content len is valid before calling requestHandler
|
||||
if(contentLengthStringLen) {
|
||||
remainingStreamingBytes = toUnsignedInteger(contentLengthString);
|
||||
if (remainingStreamingBytes == UINT64_MAX) [[unlikely]] {
|
||||
if (remainingStreamingBytes == UINT64_MAX) {
|
||||
/* Parser error */
|
||||
return HttpParserResult::error(HTTP_ERROR_400_BAD_REQUEST, HTTP_PARSER_ERROR_INVALID_CONTENT_LENGTH);
|
||||
}
|
||||
@@ -887,8 +813,20 @@ namespace uWS
|
||||
/* RFC 9112 6.3
|
||||
* If a message is received with both a Transfer-Encoding and a Content-Length header field,
|
||||
* the Transfer-Encoding overrides the Content-Length. */
|
||||
if (transferEncoding.has) {
|
||||
/* We already validated that chunked is last if present, before calling the handler */
|
||||
if (transferEncodingStringLen) {
|
||||
|
||||
/* If a proxy sent us the transfer-encoding header that 100% means it must be chunked or else the proxy is
|
||||
* not RFC 9112 compliant. Therefore it is always better to assume this is the case, since that entirely eliminates
|
||||
* all forms of transfer-encoding obfuscation tricks. We just rely on the header. */
|
||||
|
||||
/* RFC 9112 6.3
|
||||
* If a Transfer-Encoding header field is present in a request and the chunked transfer coding is not the
|
||||
* final encoding, the message body length cannot be determined reliably; the server MUST respond with the
|
||||
* 400 (Bad Request) status code and then close the connection. */
|
||||
|
||||
/* In this case we fail later by having the wrong interpretation (assuming chunked).
|
||||
* This could be made stricter but makes no difference either way, unless forwarding the identical message as a proxy. */
|
||||
|
||||
remainingStreamingBytes = STATE_IS_CHUNKED;
|
||||
/* If consume minimally, we do not want to consume anything but we want to mark this as being chunked */
|
||||
if constexpr (!ConsumeMinimally) {
|
||||
@@ -897,7 +835,7 @@ namespace uWS
|
||||
for (auto chunk : uWS::ChunkIterator(&dataToConsume, &remainingStreamingBytes)) {
|
||||
dataHandler(user, chunk, chunk.length() == 0);
|
||||
}
|
||||
if (isParsingInvalidChunkedEncoding(remainingStreamingBytes)) [[unlikely]] {
|
||||
if (isParsingInvalidChunkedEncoding(remainingStreamingBytes)) {
|
||||
// TODO: what happen if we already responded?
|
||||
return HttpParserResult::error(HTTP_ERROR_400_BAD_REQUEST, HTTP_PARSER_ERROR_INVALID_CHUNKED_ENCODING);
|
||||
}
|
||||
|
||||
@@ -82,6 +82,19 @@ private:
|
||||
|
||||
static Loop *create(void *hint) {
|
||||
Loop *loop = ((Loop *) us_create_loop(hint, wakeupCb, preCb, postCb, sizeof(LoopData)))->init();
|
||||
|
||||
/* We also need some timers (should live off the one 4 second timer rather) */
|
||||
LoopData *loopData = (LoopData *) us_loop_ext((struct us_loop_t *) loop);
|
||||
loopData->dateTimer = us_create_timer((struct us_loop_t *) loop, 1, sizeof(LoopData *));
|
||||
loopData->updateDate();
|
||||
|
||||
memcpy(us_timer_ext(loopData->dateTimer), &loopData, sizeof(LoopData *));
|
||||
us_timer_set(loopData->dateTimer, [](struct us_timer_t *t) {
|
||||
LoopData *loopData;
|
||||
memcpy(&loopData, us_timer_ext(t), sizeof(LoopData *));
|
||||
loopData->updateDate();
|
||||
}, 1000, 1000);
|
||||
|
||||
return loop;
|
||||
}
|
||||
|
||||
@@ -133,7 +146,10 @@ public:
|
||||
/* Freeing the default loop should be done once */
|
||||
void free() {
|
||||
LoopData *loopData = (LoopData *) us_loop_ext((us_loop_t *) this);
|
||||
|
||||
|
||||
/* Stop and free dateTimer first */
|
||||
us_timer_close(loopData->dateTimer, 1);
|
||||
|
||||
loopData->~LoopData();
|
||||
/* uSockets will track whether this loop is owned by us or a borrowed alien loop */
|
||||
us_loop_free((us_loop_t *) this);
|
||||
|
||||
@@ -151,6 +151,8 @@ public:
|
||||
ZlibContext *zlibContext = nullptr;
|
||||
InflationStream *inflationStream = nullptr;
|
||||
DeflationStream *deflationStream = nullptr;
|
||||
|
||||
us_timer_t *dateTimer;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
@@ -163,11 +163,8 @@ export class BunTestController implements vscode.Disposable {
|
||||
const ignoreGlobs = await this.buildIgnoreGlobs(cancellationToken);
|
||||
const tests = await vscode.workspace.findFiles(
|
||||
this.customFilePattern(),
|
||||
"**/node_modules/**",
|
||||
// 5k tests is more than enough for most projects.
|
||||
// If they need more, they can manually open the files themself and it should be added to the test explorer.
|
||||
// This is needed because otherwise with too many tests, vscode OOMs.
|
||||
5_000,
|
||||
"node_modules",
|
||||
undefined,
|
||||
cancellationToken,
|
||||
);
|
||||
|
||||
|
||||
@@ -1,215 +0,0 @@
|
||||
#!/usr/bin/env bun
|
||||
import { spawnSync } from "child_process";
|
||||
import { existsSync, mkdirSync } from "fs";
|
||||
import { arch, platform } from "os";
|
||||
import { join, resolve } from "path";
|
||||
|
||||
// Build configurations
|
||||
type BuildConfig = "debug" | "release" | "lto";
|
||||
|
||||
// Parse command line arguments
|
||||
const args = process.argv.slice(2);
|
||||
const buildConfig: BuildConfig = (args[0] as BuildConfig) || "debug";
|
||||
const validConfigs = ["debug", "release", "lto"];
|
||||
|
||||
if (!validConfigs.includes(buildConfig)) {
|
||||
console.error(`Invalid build configuration: ${buildConfig}`);
|
||||
console.error(`Valid configurations: ${validConfigs.join(", ")}`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Detect platform
|
||||
const OS_NAME = platform().toLowerCase();
|
||||
const ARCH_NAME_RAW = arch();
|
||||
const IS_MAC = OS_NAME === "darwin";
|
||||
const IS_LINUX = OS_NAME === "linux";
|
||||
const IS_ARM64 = ARCH_NAME_RAW === "arm64" || ARCH_NAME_RAW === "aarch64";
|
||||
|
||||
// Paths
|
||||
const ROOT_DIR = resolve(import.meta.dir, "..");
|
||||
const WEBKIT_DIR = resolve(ROOT_DIR, "vendor/WebKit");
|
||||
const WEBKIT_BUILD_DIR = join(WEBKIT_DIR, "WebKitBuild");
|
||||
const WEBKIT_RELEASE_DIR = join(WEBKIT_BUILD_DIR, "Release");
|
||||
const WEBKIT_DEBUG_DIR = join(WEBKIT_BUILD_DIR, "Debug");
|
||||
const WEBKIT_RELEASE_DIR_LTO = join(WEBKIT_BUILD_DIR, "ReleaseLTO");
|
||||
|
||||
// Homebrew prefix detection
|
||||
const HOMEBREW_PREFIX = IS_ARM64 ? "/opt/homebrew/" : "/usr/local/";
|
||||
|
||||
// Compiler detection
|
||||
function findExecutable(names: string[]): string | null {
|
||||
for (const name of names) {
|
||||
const result = spawnSync("which", [name], { encoding: "utf8" });
|
||||
if (result.status === 0) {
|
||||
return result.stdout.trim();
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
const CC = findExecutable(["clang-19", "clang"]) || "clang";
|
||||
const CXX = findExecutable(["clang++-19", "clang++"]) || "clang++";
|
||||
|
||||
// Build directory based on config
|
||||
const getBuildDir = (config: BuildConfig) => {
|
||||
switch (config) {
|
||||
case "debug":
|
||||
return WEBKIT_DEBUG_DIR;
|
||||
case "lto":
|
||||
return WEBKIT_RELEASE_DIR_LTO;
|
||||
default:
|
||||
return WEBKIT_RELEASE_DIR;
|
||||
}
|
||||
};
|
||||
|
||||
// Common CMake flags
|
||||
const getCommonFlags = () => {
|
||||
const flags = [
|
||||
"-DPORT=JSCOnly",
|
||||
"-DENABLE_STATIC_JSC=ON",
|
||||
"-DALLOW_LINE_AND_COLUMN_NUMBER_IN_BUILTINS=ON",
|
||||
"-DUSE_THIN_ARCHIVES=OFF",
|
||||
"-DUSE_BUN_JSC_ADDITIONS=ON",
|
||||
"-DUSE_BUN_EVENT_LOOP=ON",
|
||||
"-DENABLE_FTL_JIT=ON",
|
||||
"-G",
|
||||
"Ninja",
|
||||
`-DCMAKE_C_COMPILER=${CC}`,
|
||||
`-DCMAKE_CXX_COMPILER=${CXX}`,
|
||||
];
|
||||
|
||||
if (IS_MAC) {
|
||||
flags.push(
|
||||
"-DENABLE_SINGLE_THREADED_VM_ENTRY_SCOPE=ON",
|
||||
"-DBUN_FAST_TLS=ON",
|
||||
"-DPTHREAD_JIT_PERMISSIONS_API=1",
|
||||
"-DUSE_PTHREAD_JIT_PERMISSIONS_API=ON",
|
||||
);
|
||||
} else if (IS_LINUX) {
|
||||
flags.push(
|
||||
"-DJSEXPORT_PRIVATE=WTF_EXPORT_DECLARATION",
|
||||
"-DUSE_VISIBILITY_ATTRIBUTE=1",
|
||||
"-DENABLE_REMOTE_INSPECTOR=ON",
|
||||
);
|
||||
}
|
||||
|
||||
return flags;
|
||||
};
|
||||
|
||||
// Build-specific CMake flags
|
||||
const getBuildFlags = (config: BuildConfig) => {
|
||||
const flags = [...getCommonFlags()];
|
||||
|
||||
switch (config) {
|
||||
case "debug":
|
||||
flags.push(
|
||||
"-DCMAKE_BUILD_TYPE=Debug",
|
||||
"-DENABLE_BUN_SKIP_FAILING_ASSERTIONS=ON",
|
||||
"-DCMAKE_EXPORT_COMPILE_COMMANDS=ON",
|
||||
"-DENABLE_REMOTE_INSPECTOR=ON",
|
||||
"-DUSE_VISIBILITY_ATTRIBUTE=1",
|
||||
);
|
||||
|
||||
if (IS_MAC) {
|
||||
// Enable address sanitizer by default on Mac debug builds
|
||||
flags.push("-DENABLE_SANITIZERS=address");
|
||||
// To disable asan, comment the line above and uncomment:
|
||||
// flags.push("-DENABLE_MALLOC_HEAP_BREAKDOWN=ON");
|
||||
}
|
||||
break;
|
||||
|
||||
case "lto":
|
||||
flags.push("-DCMAKE_BUILD_TYPE=Release", "-DCMAKE_C_FLAGS=-flto=full", "-DCMAKE_CXX_FLAGS=-flto=full");
|
||||
break;
|
||||
|
||||
default: // release
|
||||
flags.push("-DCMAKE_BUILD_TYPE=RelWithDebInfo");
|
||||
break;
|
||||
}
|
||||
|
||||
return flags;
|
||||
};
|
||||
|
||||
// Environment variables for the build
|
||||
const getBuildEnv = () => {
|
||||
const env = { ...process.env };
|
||||
|
||||
const cflags = ["-ffat-lto-objects"];
|
||||
const cxxflags = ["-ffat-lto-objects"];
|
||||
|
||||
if (IS_LINUX && buildConfig !== "lto") {
|
||||
cflags.push("-Wl,--whole-archive");
|
||||
cxxflags.push("-Wl,--whole-archive", "-DUSE_BUN_JSC_ADDITIONS=ON", "-DUSE_BUN_EVENT_LOOP=ON");
|
||||
}
|
||||
|
||||
env.CFLAGS = (env.CFLAGS || "") + " " + cflags.join(" ");
|
||||
env.CXXFLAGS = (env.CXXFLAGS || "") + " " + cxxflags.join(" ");
|
||||
|
||||
if (IS_MAC) {
|
||||
env.ICU_INCLUDE_DIRS = `${HOMEBREW_PREFIX}opt/icu4c/include`;
|
||||
}
|
||||
|
||||
return env;
|
||||
};
|
||||
|
||||
// Run a command with proper error handling
|
||||
function runCommand(command: string, args: string[], options: any = {}) {
|
||||
console.log(`Running: ${command} ${args.join(" ")}`);
|
||||
const result = spawnSync(command, args, {
|
||||
stdio: "inherit",
|
||||
...options,
|
||||
});
|
||||
|
||||
if (result.error) {
|
||||
console.error(`Failed to execute command: ${result.error.message}`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
if (result.status !== 0) {
|
||||
console.error(`Command failed with exit code ${result.status}`);
|
||||
process.exit(result.status || 1);
|
||||
}
|
||||
}
|
||||
|
||||
// Main build function
|
||||
function buildJSC() {
|
||||
const buildDir = getBuildDir(buildConfig);
|
||||
const cmakeFlags = getBuildFlags(buildConfig);
|
||||
const env = getBuildEnv();
|
||||
|
||||
console.log(`Building JSC with configuration: ${buildConfig}`);
|
||||
console.log(`Build directory: ${buildDir}`);
|
||||
|
||||
// Create build directories
|
||||
if (!existsSync(buildDir)) {
|
||||
mkdirSync(buildDir, { recursive: true });
|
||||
}
|
||||
|
||||
if (!existsSync(WEBKIT_DIR)) {
|
||||
mkdirSync(WEBKIT_DIR, { recursive: true });
|
||||
}
|
||||
|
||||
// Configure with CMake
|
||||
console.log("\n📦 Configuring with CMake...");
|
||||
runCommand("cmake", [...cmakeFlags, WEBKIT_DIR, buildDir], {
|
||||
cwd: buildDir,
|
||||
env,
|
||||
});
|
||||
|
||||
// Build with CMake
|
||||
console.log("\n🔨 Building JSC...");
|
||||
const buildType = buildConfig === "debug" ? "Debug" : buildConfig === "lto" ? "Release" : "RelWithDebInfo";
|
||||
|
||||
runCommand("cmake", ["--build", buildDir, "--config", buildType, "--target", "jsc"], {
|
||||
cwd: buildDir,
|
||||
env,
|
||||
});
|
||||
|
||||
console.log(`\n✅ JSC build completed successfully!`);
|
||||
console.log(`Build output: ${buildDir}`);
|
||||
}
|
||||
|
||||
// Entry point
|
||||
if (import.meta.main) {
|
||||
buildJSC();
|
||||
}
|
||||
@@ -1,176 +0,0 @@
|
||||
#!/usr/bin/env bun
|
||||
|
||||
import { $ } from "bun";
|
||||
|
||||
interface ReleaseInfo {
|
||||
publishedAt: string;
|
||||
tag: string;
|
||||
}
|
||||
|
||||
interface Issue {
|
||||
number: number;
|
||||
closedAt: string;
|
||||
stateReason: string;
|
||||
}
|
||||
|
||||
interface Reaction {
|
||||
content: string;
|
||||
}
|
||||
|
||||
interface Comment {
|
||||
id: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get release information for a given tag
|
||||
*/
|
||||
async function getReleaseInfo(tag: string): Promise<ReleaseInfo> {
|
||||
try {
|
||||
const result = await $`gh release view ${tag} --json publishedAt,tagName`.json();
|
||||
return {
|
||||
publishedAt: result.publishedAt,
|
||||
tag: result.tagName,
|
||||
};
|
||||
} catch (error) {
|
||||
throw new Error(`Failed to get release info for ${tag}: ${error}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Count issues closed as completed since a given date
|
||||
*/
|
||||
async function countCompletedIssues(sinceDate: string): Promise<{ count: number; issues: number[] }> {
|
||||
try {
|
||||
const result =
|
||||
(await $`gh issue list --state closed --search "closed:>=${sinceDate} reason:completed" --limit 1000 --json number,closedAt,stateReason`.json()) as Issue[];
|
||||
|
||||
const completedIssues = result.filter(issue => issue.stateReason === "COMPLETED");
|
||||
|
||||
return {
|
||||
count: completedIssues.length,
|
||||
issues: completedIssues.map(issue => issue.number),
|
||||
};
|
||||
} catch (error) {
|
||||
throw new Error(`Failed to count completed issues: ${error}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get positive reactions for an issue (👍, ❤️, 🎉, 🚀)
|
||||
*/
|
||||
async function getIssueReactions(issueNumber: number): Promise<number> {
|
||||
try {
|
||||
const reactions = (await $`gh api "repos/oven-sh/bun/issues/${issueNumber}/reactions"`.json()) as Reaction[];
|
||||
return reactions.filter(r => ["+1", "heart", "hooray", "rocket"].includes(r.content)).length;
|
||||
} catch {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get positive reactions for all comments on an issue
|
||||
*/
|
||||
async function getCommentReactions(issueNumber: number): Promise<number> {
|
||||
try {
|
||||
const comments = (await $`gh api "repos/oven-sh/bun/issues/${issueNumber}/comments"`.json()) as Comment[];
|
||||
|
||||
let totalReactions = 0;
|
||||
for (const comment of comments) {
|
||||
try {
|
||||
const reactions =
|
||||
(await $`gh api "repos/oven-sh/bun/issues/comments/${comment.id}/reactions"`.json()) as Reaction[];
|
||||
totalReactions += reactions.filter(r => ["+1", "heart", "hooray", "rocket"].includes(r.content)).length;
|
||||
} catch {
|
||||
// Skip if we can't get reactions for this comment
|
||||
}
|
||||
}
|
||||
|
||||
return totalReactions;
|
||||
} catch {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Count total positive reactions for issues and their comments
|
||||
*/
|
||||
async function countReactions(issueNumbers: number[], verbose = false): Promise<number> {
|
||||
let totalReactions = 0;
|
||||
|
||||
for (const issueNumber of issueNumbers) {
|
||||
if (verbose) {
|
||||
console.log(`Processing issue #${issueNumber}...`);
|
||||
}
|
||||
|
||||
const [issueReactions, commentReactions] = await Promise.all([
|
||||
getIssueReactions(issueNumber),
|
||||
getCommentReactions(issueNumber),
|
||||
]);
|
||||
|
||||
const issueTotal = issueReactions + commentReactions;
|
||||
totalReactions += issueTotal;
|
||||
|
||||
if (verbose && issueTotal > 0) {
|
||||
console.log(
|
||||
` Issue #${issueNumber}: ${issueReactions} issue + ${commentReactions} comment = ${issueTotal} total`,
|
||||
);
|
||||
}
|
||||
|
||||
// Small delay to avoid rate limiting
|
||||
await Bun.sleep(50);
|
||||
}
|
||||
|
||||
return totalReactions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Main function to collect GitHub metrics
|
||||
*/
|
||||
async function main() {
|
||||
const args = process.argv.slice(2);
|
||||
const releaseTag = args[0];
|
||||
const verbose = args.includes("--verbose") || args.includes("-v");
|
||||
|
||||
if (!releaseTag) {
|
||||
console.error("Usage: bun run scripts/github-metrics.ts <release-tag> [--verbose]");
|
||||
console.error("Example: bun run scripts/github-metrics.ts bun-v1.2.19");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
try {
|
||||
console.log(`📊 Collecting GitHub metrics since ${releaseTag}...`);
|
||||
|
||||
// Get release date
|
||||
const releaseInfo = await getReleaseInfo(releaseTag);
|
||||
const releaseDate = releaseInfo.publishedAt.split("T")[0]; // Extract date part
|
||||
|
||||
if (verbose) {
|
||||
console.log(`📅 Release date: ${releaseDate}`);
|
||||
}
|
||||
|
||||
// Count completed issues
|
||||
console.log("🔍 Counting completed issues...");
|
||||
const { count: issueCount, issues: issueNumbers } = await countCompletedIssues(releaseDate);
|
||||
|
||||
// Count reactions
|
||||
console.log("👍 Counting positive reactions...");
|
||||
const reactionCount = await countReactions(issueNumbers, verbose);
|
||||
|
||||
// Display results
|
||||
console.log("\n📈 Results:");
|
||||
console.log(`Issues closed as completed since ${releaseTag}: ${issueCount}`);
|
||||
console.log(`Total positive reactions (👍❤️🎉🚀): ${reactionCount}`);
|
||||
|
||||
if (issueCount > 0) {
|
||||
console.log(`Average reactions per completed issue: ${(reactionCount / issueCount).toFixed(1)}`);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("❌ Error:", error.message);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
// Run if this script is executed directly
|
||||
if (import.meta.main) {
|
||||
main();
|
||||
}
|
||||
@@ -1,126 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Run clang-format on all C++ source and header files in the Bun project
|
||||
|
||||
# Get the directory where this script is located
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
# Get the project root directory (parent of scripts/)
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
||||
|
||||
# Default to format mode (modify files)
|
||||
MODE="${1:-format}"
|
||||
|
||||
# Use LLVM_VERSION_MAJOR from environment or default to 19
|
||||
LLVM_VERSION="${LLVM_VERSION_MAJOR:-19}"
|
||||
|
||||
# Ensure we have the specific clang-format version
|
||||
CLANG_FORMAT="clang-format-${LLVM_VERSION}"
|
||||
if ! command -v "$CLANG_FORMAT" &> /dev/null; then
|
||||
echo "Error: $CLANG_FORMAT not found" >&2
|
||||
echo "Please install clang-format version $LLVM_VERSION" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd "$PROJECT_ROOT"
|
||||
|
||||
# Array to hold all files to format
|
||||
declare -a FILES_TO_FORMAT
|
||||
|
||||
# Find all header files in src/ and packages/, excluding third-party and generated code
|
||||
echo "Finding header files..."
|
||||
while IFS= read -r -d '' file; do
|
||||
# Additional filtering for specific files and patterns
|
||||
if [[ "$file" =~ src/bun\.js/api/ffi- ]] || \
|
||||
[[ "$file" =~ src/napi/ ]] || \
|
||||
[[ "$file" =~ src/bun\.js/bindings/libuv/ ]] || \
|
||||
[[ "$file" =~ src/bun\.js/bindings/sqlite/ ]] || \
|
||||
[[ "$file" =~ packages/bun-usockets/.*libuv ]] || \
|
||||
[[ "$file" =~ src/deps/ ]]; then
|
||||
continue
|
||||
fi
|
||||
FILES_TO_FORMAT+=("$file")
|
||||
done < <(find src packages -type f \( -name "*.h" -o -name "*.hpp" \) \
|
||||
-not -path "*/vendor/*" \
|
||||
-not -path "*/third_party/*" \
|
||||
-not -path "*/thirdparty/*" \
|
||||
-not -path "*/generated/*" \
|
||||
-print0 2>/dev/null || true)
|
||||
|
||||
# Read C++ source files from CxxSources.txt
|
||||
echo "Reading C++ source files from CxxSources.txt..."
|
||||
if [ -f "cmake/sources/CxxSources.txt" ]; then
|
||||
while IFS= read -r file; do
|
||||
# Skip empty lines and comments
|
||||
if [[ -n "$file" && ! "$file" =~ ^[[:space:]]*# ]]; then
|
||||
# Check if file exists
|
||||
if [ -f "$file" ]; then
|
||||
FILES_TO_FORMAT+=("$file")
|
||||
fi
|
||||
fi
|
||||
done < "cmake/sources/CxxSources.txt"
|
||||
else
|
||||
echo "Warning: cmake/sources/CxxSources.txt not found" >&2
|
||||
fi
|
||||
|
||||
# Remove duplicates while preserving order
|
||||
declare -a UNIQUE_FILES
|
||||
declare -A seen
|
||||
for file in "${FILES_TO_FORMAT[@]}"; do
|
||||
if [[ ! -v "seen[$file]" ]]; then
|
||||
seen["$file"]=1
|
||||
UNIQUE_FILES+=("$file")
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Processing ${#UNIQUE_FILES[@]} files..."
|
||||
|
||||
# Run clang-format based on mode
|
||||
if [ "$MODE" = "check" ]; then
|
||||
# Check mode - verify formatting without modifying files
|
||||
FAILED=0
|
||||
for file in "${UNIQUE_FILES[@]}"; do
|
||||
# Find the nearest .clang-format file for this source file
|
||||
dir=$(dirname "$file")
|
||||
while [ "$dir" != "." ] && [ "$dir" != "/" ]; do
|
||||
if [ -f "$dir/.clang-format" ]; then
|
||||
break
|
||||
fi
|
||||
dir=$(dirname "$dir")
|
||||
done
|
||||
|
||||
if ! $CLANG_FORMAT --dry-run --Werror "$file" 2>/dev/null; then
|
||||
echo "Format check failed: $file"
|
||||
FAILED=1
|
||||
fi
|
||||
done
|
||||
|
||||
if [ $FAILED -eq 1 ]; then
|
||||
echo "Some files need formatting. Run 'bun run clang-format' to fix."
|
||||
exit 1
|
||||
else
|
||||
echo "All files are properly formatted."
|
||||
fi
|
||||
elif [ "$MODE" = "format" ] || [ "$MODE" = "fix" ]; then
|
||||
# Format mode - modify files in place
|
||||
for file in "${UNIQUE_FILES[@]}"; do
|
||||
echo "Formatting: $file"
|
||||
$CLANG_FORMAT -i "$file"
|
||||
done
|
||||
echo "Formatting complete."
|
||||
elif [ "$MODE" = "diff" ]; then
|
||||
# Diff mode - show what would change
|
||||
for file in "${UNIQUE_FILES[@]}"; do
|
||||
if ! $CLANG_FORMAT --dry-run --Werror "$file" 2>/dev/null; then
|
||||
echo "=== $file ==="
|
||||
diff -u "$file" <($CLANG_FORMAT "$file") || true
|
||||
fi
|
||||
done
|
||||
else
|
||||
echo "Usage: $0 [check|format|fix|diff]" >&2
|
||||
echo " check - Check if files are formatted (default)" >&2
|
||||
echo " format - Format files in place" >&2
|
||||
echo " fix - Same as format" >&2
|
||||
echo " diff - Show formatting differences" >&2
|
||||
exit 1
|
||||
fi
|
||||
@@ -82,10 +82,6 @@ function getNodeParallelTestTimeout(testPath) {
|
||||
return 10_000;
|
||||
}
|
||||
|
||||
process.on("SIGTRAP", () => {
|
||||
console.warn("Test runner received SIGTRAP. Doing nothing.");
|
||||
});
|
||||
|
||||
const { values: options, positionals: filters } = parseArgs({
|
||||
allowPositionals: true,
|
||||
options: {
|
||||
@@ -182,37 +178,6 @@ if (options["quiet"]) {
|
||||
isQuiet = true;
|
||||
}
|
||||
|
||||
let newFiles = [];
|
||||
let prFileCount = 0;
|
||||
if (isBuildkite) {
|
||||
try {
|
||||
console.log("on buildkite: collecting new files from PR");
|
||||
const per_page = 50;
|
||||
for (let i = 1; i <= 5; i++) {
|
||||
const res = await fetch(
|
||||
`https://api.github.com/repos/oven-sh/bun/pulls/${process.env.BUILDKITE_PULL_REQUEST}/files?per_page=${per_page}&page=${i}`,
|
||||
{
|
||||
headers: {
|
||||
Authorization: `Bearer ${getSecret("GITHUB_TOKEN")}`,
|
||||
},
|
||||
},
|
||||
);
|
||||
const doc = await res.json();
|
||||
console.log(`-> page ${i}, found ${doc.length} items`);
|
||||
if (doc.length === 0) break;
|
||||
if (doc.length < per_page) break;
|
||||
for (const { filename, status } of doc) {
|
||||
prFileCount += 1;
|
||||
if (status !== "added") continue;
|
||||
newFiles.push(filename);
|
||||
}
|
||||
}
|
||||
console.log(`- PR ${process.env.BUILDKITE_PULL_REQUEST}, ${prFileCount} files, ${newFiles.length} new files`);
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
}
|
||||
}
|
||||
|
||||
let coresDir;
|
||||
|
||||
if (options["coredump-upload"]) {
|
||||
@@ -455,7 +420,6 @@ async function runTests() {
|
||||
if (attempt >= maxAttempts || isAlwaysFailure(error)) {
|
||||
flaky = false;
|
||||
failedResults.push(failure);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -565,7 +529,6 @@ async function runTests() {
|
||||
};
|
||||
if ((basename(execPath).includes("asan") || !isCI) && shouldValidateExceptions(testPath)) {
|
||||
env.BUN_JSC_validateExceptionChecks = "1";
|
||||
env.BUN_JSC_dumpSimulatedThrows = "1";
|
||||
}
|
||||
return runTest(title, async () => {
|
||||
const { ok, error, stdout, crashes } = await spawnBun(execPath, {
|
||||
@@ -1289,7 +1252,6 @@ async function spawnBunTest(execPath, testPath, options = { cwd }) {
|
||||
};
|
||||
if ((basename(execPath).includes("asan") || !isCI) && shouldValidateExceptions(relative(cwd, absPath))) {
|
||||
env.BUN_JSC_validateExceptionChecks = "1";
|
||||
env.BUN_JSC_dumpSimulatedThrows = "1";
|
||||
}
|
||||
|
||||
const { ok, error, stdout, crashes } = await spawnBun(execPath, {
|
||||
@@ -2019,9 +1981,6 @@ function formatTestToMarkdown(result, concise, retries) {
|
||||
if (retries > 0) {
|
||||
markdown += ` (${retries} ${retries === 1 ? "retry" : "retries"})`;
|
||||
}
|
||||
if (newFiles.includes(testTitle)) {
|
||||
markdown += ` (new)`;
|
||||
}
|
||||
|
||||
if (concise) {
|
||||
markdown += "</li>\n";
|
||||
@@ -2227,7 +2186,6 @@ function isAlwaysFailure(error) {
|
||||
error.includes("illegal instruction") ||
|
||||
error.includes("sigtrap") ||
|
||||
error.includes("error: addresssanitizer") ||
|
||||
error.includes("internal assertion failure") ||
|
||||
error.includes("core dumped") ||
|
||||
error.includes("crash reported")
|
||||
);
|
||||
|
||||
@@ -14,7 +14,7 @@ const usage = String.raw`
|
||||
/_____ \____/|__| |__| /_____ \__|__|_| / __/ \____/|__| |__| /____ >
|
||||
\/ \/ \/|__| \/
|
||||
|
||||
Usage: bun scripts/sort-imports [options] <files...>
|
||||
Usage: bun scripts/sortImports [options] <files...>
|
||||
|
||||
Options:
|
||||
--help Show this help message
|
||||
@@ -22,7 +22,7 @@ Options:
|
||||
--keep-unused Don't remove unused imports
|
||||
|
||||
Examples:
|
||||
bun scripts/sort-imports src
|
||||
bun scripts/sortImports src
|
||||
`.slice(1);
|
||||
if (args.includes("--help")) {
|
||||
console.log(usage);
|
||||
|
||||
@@ -47,7 +47,7 @@ fn createImportRecord(this: *HTMLScanner, input_path: []const u8, kind: ImportKi
|
||||
try this.import_records.push(this.allocator, record);
|
||||
}
|
||||
|
||||
const debug = bun.Output.scoped(.HTMLScanner, .hidden);
|
||||
const debug = bun.Output.scoped(.HTMLScanner, true);
|
||||
|
||||
pub fn onWriteHTML(_: *HTMLScanner, bytes: []const u8) void {
|
||||
_ = bytes; // bytes are not written in scan phase
|
||||
|
||||
@@ -6,7 +6,6 @@ pub const StandaloneModuleGraph = struct {
|
||||
bytes: []const u8 = "",
|
||||
files: bun.StringArrayHashMap(File),
|
||||
entry_point_id: u32 = 0,
|
||||
compile_exec_argv: []const u8 = "",
|
||||
|
||||
// We never want to hit the filesystem for these files
|
||||
// We use the `/$bunfs/` prefix to indicate that it's a virtual path
|
||||
@@ -55,7 +54,7 @@ pub const StandaloneModuleGraph = struct {
|
||||
|
||||
// by normalized file path
|
||||
pub fn find(this: *const StandaloneModuleGraph, name: []const u8) ?*File {
|
||||
if (!isBunStandaloneFilePath(name)) {
|
||||
if (!isBunStandaloneFilePath(base_path)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@@ -280,7 +279,6 @@ pub const StandaloneModuleGraph = struct {
|
||||
byte_count: usize = 0,
|
||||
modules_ptr: bun.StringPointer = .{},
|
||||
entry_point_id: u32 = 0,
|
||||
compile_exec_argv_ptr: bun.StringPointer = .{},
|
||||
};
|
||||
|
||||
const trailer = "\n---- Bun! ----\n";
|
||||
@@ -325,7 +323,6 @@ pub const StandaloneModuleGraph = struct {
|
||||
.bytes = raw_bytes[0..offsets.byte_count],
|
||||
.files = modules,
|
||||
.entry_point_id = offsets.entry_point_id,
|
||||
.compile_exec_argv = sliceToZ(raw_bytes, offsets.compile_exec_argv_ptr),
|
||||
};
|
||||
}
|
||||
|
||||
@@ -341,14 +338,14 @@ pub const StandaloneModuleGraph = struct {
|
||||
return bytes[ptr.offset..][0..ptr.length :0];
|
||||
}
|
||||
|
||||
pub fn toBytes(allocator: std.mem.Allocator, prefix: []const u8, output_files: []const bun.options.OutputFile, output_format: bun.options.Format, compile_exec_argv: []const u8) ![]u8 {
|
||||
pub fn toBytes(allocator: std.mem.Allocator, prefix: []const u8, output_files: []const bun.options.OutputFile, output_format: bun.options.Format) ![]u8 {
|
||||
var serialize_trace = bun.perf.trace("StandaloneModuleGraph.serialize");
|
||||
defer serialize_trace.end();
|
||||
|
||||
var entry_point_id: ?usize = null;
|
||||
var string_builder = bun.StringBuilder{};
|
||||
var module_count: usize = 0;
|
||||
for (output_files) |*output_file| {
|
||||
for (output_files) |output_file| {
|
||||
string_builder.countZ(output_file.dest_path);
|
||||
string_builder.countZ(prefix);
|
||||
if (output_file.value == .buffer) {
|
||||
@@ -382,7 +379,6 @@ pub const StandaloneModuleGraph = struct {
|
||||
string_builder.cap += trailer.len;
|
||||
string_builder.cap += 16;
|
||||
string_builder.cap += @sizeOf(Offsets);
|
||||
string_builder.countZ(compile_exec_argv);
|
||||
|
||||
try string_builder.allocate(allocator);
|
||||
|
||||
@@ -395,7 +391,7 @@ pub const StandaloneModuleGraph = struct {
|
||||
var source_map_arena = bun.ArenaAllocator.init(allocator);
|
||||
defer source_map_arena.deinit();
|
||||
|
||||
for (output_files) |*output_file| {
|
||||
for (output_files) |output_file| {
|
||||
if (!output_file.output_kind.isFileInStandaloneMode()) {
|
||||
continue;
|
||||
}
|
||||
@@ -467,7 +463,6 @@ pub const StandaloneModuleGraph = struct {
|
||||
const offsets = Offsets{
|
||||
.entry_point_id = @as(u32, @truncate(entry_point_id.?)),
|
||||
.modules_ptr = string_builder.appendCount(std.mem.sliceAsBytes(modules.items)),
|
||||
.compile_exec_argv_ptr = string_builder.appendCountZ(compile_exec_argv),
|
||||
.byte_count = string_builder.len,
|
||||
};
|
||||
|
||||
@@ -496,21 +491,6 @@ pub const StandaloneModuleGraph = struct {
|
||||
windows_hide_console: bool = false,
|
||||
};
|
||||
|
||||
pub const CompileResult = union(enum) {
|
||||
success: void,
|
||||
error_message: []const u8,
|
||||
|
||||
pub fn fail(msg: []const u8) CompileResult {
|
||||
return .{ .error_message = msg };
|
||||
}
|
||||
|
||||
pub fn deinit(this: *const @This()) void {
|
||||
if (this.* == .error_message) {
|
||||
bun.default_allocator.free(this.error_message);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub fn inject(bytes: []const u8, self_exe: [:0]const u8, inject_options: InjectOptions, target: *const CompileTarget) bun.FileDescriptor {
|
||||
var buf: bun.PathBuffer = undefined;
|
||||
var zname: [:0]const u8 = bun.span(bun.fs.FileSystem.instance.tmpname("bun-build", &buf, @as(u64, @bitCast(std.time.milliTimestamp()))) catch |err| {
|
||||
@@ -647,7 +627,6 @@ pub const StandaloneModuleGraph = struct {
|
||||
cleanup(zname, fd);
|
||||
Global.exit(1);
|
||||
};
|
||||
|
||||
break :brk fd;
|
||||
};
|
||||
|
||||
@@ -837,43 +816,7 @@ pub const StandaloneModuleGraph = struct {
|
||||
var needs_download: bool = true;
|
||||
const dest_z = target.exePath(&exe_path_buf, version_str, env, &needs_download);
|
||||
if (needs_download) {
|
||||
target.downloadToPath(env, allocator, dest_z) catch |err| {
|
||||
// For CLI, provide detailed error messages and exit
|
||||
switch (err) {
|
||||
error.TargetNotFound => {
|
||||
Output.errGeneric(
|
||||
\\Does this target and version of Bun exist?
|
||||
\\
|
||||
\\404 downloading {} from npm registry
|
||||
, .{target.*});
|
||||
},
|
||||
error.NetworkError => {
|
||||
Output.errGeneric(
|
||||
\\Failed to download cross-compilation target.
|
||||
\\
|
||||
\\Network error downloading {} from npm registry
|
||||
, .{target.*});
|
||||
},
|
||||
error.InvalidResponse => {
|
||||
Output.errGeneric(
|
||||
\\Failed to verify the integrity of the downloaded tarball.
|
||||
\\
|
||||
\\The downloaded content for {} appears to be corrupted
|
||||
, .{target.*});
|
||||
},
|
||||
error.ExtractionFailed => {
|
||||
Output.errGeneric(
|
||||
\\Failed to extract the downloaded tarball.
|
||||
\\
|
||||
\\Could not extract executable for {}
|
||||
, .{target.*});
|
||||
},
|
||||
else => {
|
||||
Output.errGeneric("Failed to download {}: {s}", .{ target.*, @errorName(err) });
|
||||
},
|
||||
}
|
||||
Global.exit(1);
|
||||
};
|
||||
try target.downloadToPath(env, allocator, dest_z);
|
||||
}
|
||||
|
||||
return try allocator.dupeZ(u8, dest_z);
|
||||
@@ -890,68 +833,27 @@ pub const StandaloneModuleGraph = struct {
|
||||
output_format: bun.options.Format,
|
||||
windows_hide_console: bool,
|
||||
windows_icon: ?[]const u8,
|
||||
compile_exec_argv: []const u8,
|
||||
self_exe_path: ?[]const u8,
|
||||
) !CompileResult {
|
||||
const bytes = toBytes(allocator, module_prefix, output_files, output_format, compile_exec_argv) catch |err| {
|
||||
return CompileResult.fail(std.fmt.allocPrint(allocator, "failed to generate module graph bytes: {s}", .{@errorName(err)}) catch "failed to generate module graph bytes");
|
||||
};
|
||||
if (bytes.len == 0) return CompileResult.fail("no output files to bundle");
|
||||
defer allocator.free(bytes);
|
||||
) !void {
|
||||
const bytes = try toBytes(allocator, module_prefix, output_files, output_format);
|
||||
if (bytes.len == 0) return;
|
||||
|
||||
var free_self_exe = false;
|
||||
const self_exe = if (self_exe_path) |path| brk: {
|
||||
free_self_exe = true;
|
||||
break :brk allocator.dupeZ(u8, path) catch bun.outOfMemory();
|
||||
} else if (target.isDefault())
|
||||
bun.selfExePath() catch |err| {
|
||||
return CompileResult.fail(std.fmt.allocPrint(allocator, "failed to get self executable path: {s}", .{@errorName(err)}) catch "failed to get self executable path");
|
||||
}
|
||||
else blk: {
|
||||
var exe_path_buf: bun.PathBuffer = undefined;
|
||||
var version_str_buf: [1024]u8 = undefined;
|
||||
const version_str = std.fmt.bufPrintZ(&version_str_buf, "{}", .{target}) catch {
|
||||
return CompileResult.fail("failed to format target version string");
|
||||
};
|
||||
var needs_download: bool = true;
|
||||
const dest_z = target.exePath(&exe_path_buf, version_str, env, &needs_download);
|
||||
|
||||
if (needs_download) {
|
||||
target.downloadToPath(env, allocator, dest_z) catch |err| {
|
||||
const msg = switch (err) {
|
||||
error.TargetNotFound => std.fmt.allocPrint(allocator, "Target platform '{}' is not available for download. Check if this version of Bun supports this target.", .{target}) catch "Target platform not available for download",
|
||||
error.NetworkError => std.fmt.allocPrint(allocator, "Network error downloading executable for '{}'. Check your internet connection and proxy settings.", .{target}) catch "Network error downloading executable",
|
||||
error.InvalidResponse => std.fmt.allocPrint(allocator, "Downloaded file for '{}' appears to be corrupted. Please try again.", .{target}) catch "Downloaded file is corrupted",
|
||||
error.ExtractionFailed => std.fmt.allocPrint(allocator, "Failed to extract executable for '{}'. The download may be incomplete.", .{target}) catch "Failed to extract downloaded executable",
|
||||
error.UnsupportedTarget => std.fmt.allocPrint(allocator, "Target '{}' is not supported", .{target}) catch "Unsupported target",
|
||||
else => std.fmt.allocPrint(allocator, "Failed to download '{}': {s}", .{ target, @errorName(err) }) catch "Download failed",
|
||||
};
|
||||
return CompileResult.fail(msg);
|
||||
};
|
||||
}
|
||||
|
||||
free_self_exe = true;
|
||||
break :blk allocator.dupeZ(u8, dest_z) catch bun.outOfMemory();
|
||||
};
|
||||
|
||||
defer if (free_self_exe) {
|
||||
allocator.free(self_exe);
|
||||
};
|
||||
|
||||
var fd = inject(
|
||||
const fd = inject(
|
||||
bytes,
|
||||
self_exe,
|
||||
if (target.isDefault())
|
||||
bun.selfExePath() catch |err| {
|
||||
Output.err(err, "failed to get self executable path", .{});
|
||||
Global.exit(1);
|
||||
}
|
||||
else
|
||||
download(allocator, target, env) catch |err| {
|
||||
Output.err(err, "failed to download cross-compiled bun executable", .{});
|
||||
Global.exit(1);
|
||||
},
|
||||
.{ .windows_hide_console = windows_hide_console },
|
||||
target,
|
||||
);
|
||||
defer if (fd != bun.invalid_fd) fd.close();
|
||||
bun.debugAssert(fd.kind == .system);
|
||||
|
||||
if (Environment.isPosix) {
|
||||
// Set executable permissions (0o755 = rwxr-xr-x) - makes it executable for owner, readable/executable for group and others
|
||||
_ = Syscall.fchmod(fd, 0o755);
|
||||
}
|
||||
|
||||
if (Environment.isWindows) {
|
||||
var outfile_buf: bun.OSPathBuffer = undefined;
|
||||
const outfile_slice = brk: {
|
||||
@@ -963,59 +865,52 @@ pub const StandaloneModuleGraph = struct {
|
||||
};
|
||||
|
||||
bun.windows.moveOpenedFileAtLoose(fd, .fromStdDir(root_dir), outfile_slice, true).unwrap() catch |err| {
|
||||
_ = bun.windows.deleteOpenedFile(fd);
|
||||
if (err == error.EISDIR) {
|
||||
return CompileResult.fail(std.fmt.allocPrint(allocator, "{s} is a directory. Please choose a different --outfile or delete the directory", .{outfile}) catch "outfile is a directory");
|
||||
Output.errGeneric("{} is a directory. Please choose a different --outfile or delete the directory", .{bun.fmt.utf16(outfile_slice)});
|
||||
} else {
|
||||
return CompileResult.fail(std.fmt.allocPrint(allocator, "failed to move executable to result path: {s}", .{@errorName(err)}) catch "failed to move executable");
|
||||
Output.err(err, "failed to move executable to result path", .{});
|
||||
}
|
||||
};
|
||||
|
||||
_ = bun.windows.deleteOpenedFile(fd);
|
||||
|
||||
Global.exit(1);
|
||||
};
|
||||
fd.close();
|
||||
fd = bun.invalid_fd;
|
||||
|
||||
if (windows_icon) |icon_utf8| {
|
||||
var icon_buf: bun.OSPathBuffer = undefined;
|
||||
const icon = bun.strings.toWPathNormalized(&icon_buf, icon_utf8);
|
||||
bun.windows.rescle.setIcon(outfile_slice, icon) catch |err| {
|
||||
Output.debug("Warning: Failed to set Windows icon for executable: {s}", .{@errorName(err)});
|
||||
bun.windows.rescle.setIcon(outfile_slice, icon) catch {
|
||||
Output.warn("Failed to set executable icon", .{});
|
||||
};
|
||||
}
|
||||
return .success;
|
||||
return;
|
||||
}
|
||||
|
||||
var buf: bun.PathBuffer = undefined;
|
||||
const temp_location = bun.getFdPath(fd, &buf) catch |err| {
|
||||
return CompileResult.fail(std.fmt.allocPrint(allocator, "failed to get path for fd: {s}", .{@errorName(err)}) catch "failed to get path for file descriptor");
|
||||
};
|
||||
const temp_posix = std.posix.toPosixPath(temp_location) catch |err| {
|
||||
return CompileResult.fail(std.fmt.allocPrint(allocator, "path too long: {s}", .{@errorName(err)}) catch "path too long");
|
||||
};
|
||||
const outfile_basename = std.fs.path.basename(outfile);
|
||||
const outfile_posix = std.posix.toPosixPath(outfile_basename) catch |err| {
|
||||
return CompileResult.fail(std.fmt.allocPrint(allocator, "outfile name too long: {s}", .{@errorName(err)}) catch "outfile name too long");
|
||||
Output.prettyErrorln("<r><red>error<r><d>:<r> failed to get path for fd: {s}", .{@errorName(err)});
|
||||
Global.exit(1);
|
||||
};
|
||||
|
||||
bun.sys.moveFileZWithHandle(
|
||||
fd,
|
||||
bun.FD.cwd(),
|
||||
bun.sliceTo(&temp_posix, 0),
|
||||
bun.sliceTo(&(try std.posix.toPosixPath(temp_location)), 0),
|
||||
.fromStdDir(root_dir),
|
||||
bun.sliceTo(&outfile_posix, 0),
|
||||
bun.sliceTo(&(try std.posix.toPosixPath(std.fs.path.basename(outfile))), 0),
|
||||
) catch |err| {
|
||||
fd.close();
|
||||
fd = bun.invalid_fd;
|
||||
|
||||
_ = Syscall.unlink(&temp_posix);
|
||||
|
||||
if (err == error.IsDir or err == error.EISDIR) {
|
||||
return CompileResult.fail(std.fmt.allocPrint(allocator, "{s} is a directory. Please choose a different --outfile or delete the directory", .{outfile}) catch "outfile is a directory");
|
||||
Output.prettyErrorln("<r><red>error<r><d>:<r> {} is a directory. Please choose a different --outfile or delete the directory", .{bun.fmt.quote(outfile)});
|
||||
} else {
|
||||
return CompileResult.fail(std.fmt.allocPrint(allocator, "failed to rename {s} to {s}: {s}", .{ temp_location, outfile, @errorName(err) }) catch "failed to rename file");
|
||||
Output.prettyErrorln("<r><red>error<r><d>:<r> failed to rename {s} to {s}: {s}", .{ temp_location, outfile, @errorName(err) });
|
||||
}
|
||||
};
|
||||
_ = Syscall.unlink(
|
||||
&(try std.posix.toPosixPath(temp_location)),
|
||||
);
|
||||
|
||||
return .success;
|
||||
Global.exit(1);
|
||||
};
|
||||
}
|
||||
|
||||
pub fn fromExecutable(allocator: std.mem.Allocator) !?StandaloneModuleGraph {
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
const Watcher = @This();
|
||||
|
||||
const DebugLogScope = bun.Output.Scoped(.watcher, .visible);
|
||||
const DebugLogScope = bun.Output.Scoped(.watcher, false);
|
||||
const log = DebugLogScope.log;
|
||||
|
||||
// This will always be [max_count]WatchEvent,
|
||||
@@ -32,7 +32,7 @@ ctx: *anyopaque,
|
||||
onFileUpdate: *const fn (this: *anyopaque, events: []WatchEvent, changed_files: []?[:0]u8, watchlist: WatchList) void,
|
||||
onError: *const fn (this: *anyopaque, err: bun.sys.Error) void,
|
||||
|
||||
thread_lock: bun.safety.ThreadLock = .initUnlocked(),
|
||||
thread_lock: bun.DebugThreadLock = bun.DebugThreadLock.unlocked,
|
||||
|
||||
pub const max_count = 128;
|
||||
pub const requires_file_descriptors = switch (Environment.os) {
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
pub const c_allocator = basic.c_allocator;
|
||||
pub const z_allocator = basic.z_allocator;
|
||||
pub const freeWithoutSize = basic.freeWithoutSize;
|
||||
pub const c_allocator = @import("./allocators/basic.zig").c_allocator;
|
||||
pub const z_allocator = @import("./allocators/basic.zig").z_allocator;
|
||||
pub const mimalloc = @import("./allocators/mimalloc.zig");
|
||||
pub const MimallocArena = @import("./allocators/MimallocArena.zig");
|
||||
pub const AllocationScope = @import("./allocators/AllocationScope.zig");
|
||||
@@ -226,6 +225,7 @@ pub fn BSSList(comptime ValueType: type, comptime _count: anytype) type {
|
||||
}
|
||||
};
|
||||
|
||||
const Allocator = std.mem.Allocator;
|
||||
const Self = @This();
|
||||
|
||||
allocator: Allocator,
|
||||
@@ -311,6 +311,7 @@ pub fn BSSStringList(comptime _count: usize, comptime _item_length: usize) type
|
||||
|
||||
return struct {
|
||||
pub const Overflow = OverflowList([]const u8, count / 4);
|
||||
const Allocator = std.mem.Allocator;
|
||||
const Self = @This();
|
||||
|
||||
backing_buf: [count * item_length]u8,
|
||||
@@ -494,6 +495,7 @@ pub fn BSSStringList(comptime _count: usize, comptime _item_length: usize) type
|
||||
pub fn BSSMap(comptime ValueType: type, comptime count: anytype, comptime store_keys: bool, comptime estimated_key_length: usize, comptime remove_trailing_slashes: bool) type {
|
||||
const max_index = count - 1;
|
||||
const BSSMapType = struct {
|
||||
const Allocator = std.mem.Allocator;
|
||||
const Self = @This();
|
||||
const Overflow = OverflowList(ValueType, count / 4);
|
||||
|
||||
@@ -770,44 +772,8 @@ pub fn BSSMap(comptime ValueType: type, comptime count: anytype, comptime store_
|
||||
};
|
||||
}
|
||||
|
||||
pub fn isDefault(allocator: Allocator) bool {
|
||||
return allocator.vtable == c_allocator.vtable;
|
||||
}
|
||||
|
||||
/// Allocate memory for a value of type `T` using the provided allocator, and initialize the memory
|
||||
/// with `value`.
|
||||
///
|
||||
/// If `allocator` is `bun.default_allocator`, this will internally use `bun.tryNew` to benefit from
|
||||
/// the added assertions.
|
||||
pub fn create(comptime T: type, allocator: Allocator, value: T) OOM!*T {
|
||||
if ((comptime Environment.allow_assert) and isDefault(allocator)) {
|
||||
return bun.tryNew(T, value);
|
||||
}
|
||||
const ptr = try allocator.create(T);
|
||||
ptr.* = value;
|
||||
return ptr;
|
||||
}
|
||||
|
||||
/// Free memory previously allocated by `create`.
|
||||
///
|
||||
/// The memory must have been allocated by the `create` function in this namespace, not
|
||||
/// directly by `allocator.create`.
|
||||
pub fn destroy(allocator: Allocator, ptr: anytype) void {
|
||||
if ((comptime Environment.allow_assert) and isDefault(allocator)) {
|
||||
bun.destroy(ptr);
|
||||
} else {
|
||||
allocator.destroy(ptr);
|
||||
}
|
||||
}
|
||||
|
||||
const basic = if (bun.use_mimalloc)
|
||||
@import("./allocators/basic.zig")
|
||||
else
|
||||
@import("./allocators/fallback.zig");
|
||||
|
||||
const Environment = @import("./env.zig");
|
||||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
const bun = @import("bun");
|
||||
const OOM = bun.OOM;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
const MemoryReportingAllocator = @This();
|
||||
|
||||
const log = bun.Output.scoped(.MEM, .visible);
|
||||
const log = bun.Output.scoped(.MEM, false);
|
||||
|
||||
child_allocator: std.mem.Allocator,
|
||||
memory_cost: std.atomic.Value(usize) = std.atomic.Value(usize).init(0),
|
||||
|
||||
@@ -1,58 +1,29 @@
|
||||
const Self = @This();
|
||||
|
||||
heap: HeapPtr,
|
||||
heap: ?*mimalloc.Heap = null,
|
||||
|
||||
const HeapPtr = if (safety_checks) *DebugHeap else *mimalloc.Heap;
|
||||
|
||||
const DebugHeap = struct {
|
||||
inner: *mimalloc.Heap,
|
||||
thread_lock: bun.safety.ThreadLock,
|
||||
};
|
||||
|
||||
fn getMimallocHeap(self: Self) *mimalloc.Heap {
|
||||
return if (comptime safety_checks) self.heap.inner else self.heap;
|
||||
}
|
||||
|
||||
fn fromOpaque(ptr: *anyopaque) Self {
|
||||
return .{ .heap = bun.cast(HeapPtr, ptr) };
|
||||
}
|
||||
|
||||
fn assertThreadLock(self: Self) void {
|
||||
if (comptime safety_checks) self.heap.thread_lock.assertLocked();
|
||||
}
|
||||
|
||||
threadlocal var thread_heap: if (safety_checks) ?DebugHeap else void = if (safety_checks) null;
|
||||
|
||||
fn getThreadHeap() HeapPtr {
|
||||
if (comptime !safety_checks) return mimalloc.mi_heap_get_default();
|
||||
if (thread_heap == null) {
|
||||
thread_heap = .{
|
||||
.inner = mimalloc.mi_heap_get_default(),
|
||||
.thread_lock = .initLocked(),
|
||||
};
|
||||
}
|
||||
return &thread_heap.?;
|
||||
}
|
||||
|
||||
const log = bun.Output.scoped(.mimalloc, .hidden);
|
||||
const log = bun.Output.scoped(.mimalloc, true);
|
||||
|
||||
/// Internally, mimalloc calls mi_heap_get_default()
|
||||
/// to get the default heap.
|
||||
/// It uses pthread_getspecific to do that.
|
||||
/// We can save those extra calls if we just do it once in here
|
||||
pub fn getThreadLocalDefault() Allocator {
|
||||
return Allocator{ .ptr = getThreadHeap(), .vtable = &c_allocator_vtable };
|
||||
pub fn getThreadlocalDefault() Allocator {
|
||||
return Allocator{ .ptr = mimalloc.mi_heap_get_default(), .vtable = &c_allocator_vtable };
|
||||
}
|
||||
|
||||
pub fn backingAllocator(_: Self) Allocator {
|
||||
return getThreadLocalDefault();
|
||||
pub fn backingAllocator(self: Self) Allocator {
|
||||
var arena = Self{ .heap = self.heap.?.backing() };
|
||||
return arena.allocator();
|
||||
}
|
||||
|
||||
pub fn allocator(self: Self) Allocator {
|
||||
return Allocator{ .ptr = self.heap, .vtable = &c_allocator_vtable };
|
||||
@setRuntimeSafety(false);
|
||||
return Allocator{ .ptr = self.heap.?, .vtable = &c_allocator_vtable };
|
||||
}
|
||||
|
||||
pub fn dumpThreadStats(_: *Self) void {
|
||||
pub fn dumpThreadStats(self: *Self) void {
|
||||
_ = self;
|
||||
const dump_fn = struct {
|
||||
pub fn dump(textZ: [*:0]const u8, _: ?*anyopaque) callconv(.C) void {
|
||||
const text = bun.span(textZ);
|
||||
@@ -63,7 +34,8 @@ pub fn dumpThreadStats(_: *Self) void {
|
||||
bun.Output.flush();
|
||||
}
|
||||
|
||||
pub fn dumpStats(_: *Self) void {
|
||||
pub fn dumpStats(self: *Self) void {
|
||||
_ = self;
|
||||
const dump_fn = struct {
|
||||
pub fn dump(textZ: [*:0]const u8, _: ?*anyopaque) callconv(.C) void {
|
||||
const text = bun.span(textZ);
|
||||
@@ -75,51 +47,37 @@ pub fn dumpStats(_: *Self) void {
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Self) void {
|
||||
const mimalloc_heap = self.getMimallocHeap();
|
||||
if (comptime safety_checks) {
|
||||
bun.destroy(self.heap);
|
||||
}
|
||||
mimalloc.mi_heap_destroy(mimalloc_heap);
|
||||
self.* = undefined;
|
||||
mimalloc.mi_heap_destroy(bun.take(&self.heap).?);
|
||||
}
|
||||
|
||||
pub fn init() Self {
|
||||
const mimalloc_heap = mimalloc.mi_heap_new() orelse bun.outOfMemory();
|
||||
const heap = if (comptime safety_checks)
|
||||
bun.new(DebugHeap, .{
|
||||
.inner = mimalloc_heap,
|
||||
.thread_lock = .initLocked(),
|
||||
})
|
||||
else
|
||||
mimalloc_heap;
|
||||
return .{ .heap = heap };
|
||||
pub fn init() !Self {
|
||||
return .{ .heap = mimalloc.mi_heap_new() orelse return error.OutOfMemory };
|
||||
}
|
||||
|
||||
pub fn gc(self: Self) void {
|
||||
mimalloc.mi_heap_collect(self.getMimallocHeap(), false);
|
||||
mimalloc.mi_heap_collect(self.heap orelse return, false);
|
||||
}
|
||||
|
||||
pub inline fn helpCatchMemoryIssues(self: Self) void {
|
||||
if (comptime bun.FeatureFlags.help_catch_memory_issues) {
|
||||
if (comptime FeatureFlags.help_catch_memory_issues) {
|
||||
self.gc();
|
||||
bun.mimalloc.mi_collect(false);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ownsPtr(self: Self, ptr: *const anyopaque) bool {
|
||||
return mimalloc.mi_heap_check_owned(self.getMimallocHeap(), ptr);
|
||||
return mimalloc.mi_heap_check_owned(self.heap.?, ptr);
|
||||
}
|
||||
pub const supports_posix_memalign = true;
|
||||
|
||||
fn alignedAlloc(self: Self, len: usize, alignment: Alignment) ?[*]u8 {
|
||||
fn alignedAlloc(heap: *mimalloc.Heap, len: usize, alignment: mem.Alignment) ?[*]u8 {
|
||||
log("Malloc: {d}\n", .{len});
|
||||
|
||||
const heap = self.getMimallocHeap();
|
||||
const ptr: ?*anyopaque = if (mimalloc.mustUseAlignedAlloc(alignment))
|
||||
mimalloc.mi_heap_malloc_aligned(heap, len, alignment.toByteUnits())
|
||||
else
|
||||
mimalloc.mi_heap_malloc(heap, len);
|
||||
|
||||
if (comptime bun.Environment.isDebug) {
|
||||
if (comptime Environment.isDebug) {
|
||||
const usable = mimalloc.mi_malloc_usable_size(ptr);
|
||||
if (usable < len) {
|
||||
std.debug.panic("mimalloc: allocated size is too small: {d} < {d}", .{ usable, len });
|
||||
@@ -136,28 +94,30 @@ fn alignedAllocSize(ptr: [*]u8) usize {
|
||||
return mimalloc.mi_malloc_usable_size(ptr);
|
||||
}
|
||||
|
||||
fn alloc(ptr: *anyopaque, len: usize, alignment: Alignment, _: usize) ?[*]u8 {
|
||||
const self = fromOpaque(ptr);
|
||||
self.assertThreadLock();
|
||||
return alignedAlloc(self, len, alignment);
|
||||
fn alloc(arena: *anyopaque, len: usize, alignment: mem.Alignment, _: usize) ?[*]u8 {
|
||||
const self = bun.cast(*mimalloc.Heap, arena);
|
||||
|
||||
return alignedAlloc(
|
||||
self,
|
||||
len,
|
||||
alignment,
|
||||
);
|
||||
}
|
||||
|
||||
fn resize(ptr: *anyopaque, buf: []u8, _: Alignment, new_len: usize, _: usize) bool {
|
||||
const self = fromOpaque(ptr);
|
||||
self.assertThreadLock();
|
||||
fn resize(_: *anyopaque, buf: []u8, _: mem.Alignment, new_len: usize, _: usize) bool {
|
||||
return mimalloc.mi_expand(buf.ptr, new_len) != null;
|
||||
}
|
||||
|
||||
fn free(
|
||||
_: *anyopaque,
|
||||
buf: []u8,
|
||||
alignment: Alignment,
|
||||
alignment: mem.Alignment,
|
||||
_: usize,
|
||||
) void {
|
||||
// mi_free_size internally just asserts the size
|
||||
// so it's faster if we don't pass that value through
|
||||
// but its good to have that assertion
|
||||
if (comptime bun.Environment.isDebug) {
|
||||
if (comptime Environment.isDebug) {
|
||||
assert(mimalloc.mi_is_in_heap_region(buf.ptr));
|
||||
if (mimalloc.mustUseAlignedAlloc(alignment))
|
||||
mimalloc.mi_free_size_aligned(buf.ptr, buf.len, alignment.toByteUnits())
|
||||
@@ -187,12 +147,9 @@ fn free(
|
||||
/// `ret_addr` is optionally provided as the first return address of the
|
||||
/// allocation call stack. If the value is `0` it means no return address
|
||||
/// has been provided.
|
||||
fn remap(ptr: *anyopaque, buf: []u8, alignment: Alignment, new_len: usize, _: usize) ?[*]u8 {
|
||||
const self = fromOpaque(ptr);
|
||||
self.assertThreadLock();
|
||||
const heap = self.getMimallocHeap();
|
||||
fn remap(self: *anyopaque, buf: []u8, alignment: mem.Alignment, new_len: usize, _: usize) ?[*]u8 {
|
||||
const aligned_size = alignment.toByteUnits();
|
||||
const value = mimalloc.mi_heap_realloc_aligned(heap, buf.ptr, new_len, aligned_size);
|
||||
const value = mimalloc.mi_heap_realloc_aligned(@ptrCast(self), buf.ptr, new_len, aligned_size);
|
||||
return @ptrCast(value);
|
||||
}
|
||||
|
||||
@@ -207,12 +164,13 @@ const c_allocator_vtable = Allocator.VTable{
|
||||
.free = &Self.free,
|
||||
};
|
||||
|
||||
const Environment = @import("../env.zig");
|
||||
const FeatureFlags = @import("../feature_flags.zig");
|
||||
const std = @import("std");
|
||||
|
||||
const bun = @import("bun");
|
||||
const assert = bun.assert;
|
||||
const mimalloc = bun.mimalloc;
|
||||
const safety_checks = bun.Environment.ci_assert;
|
||||
|
||||
const Alignment = std.mem.Alignment;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const mem = std.mem;
|
||||
const Allocator = mem.Allocator;
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
const log = bun.Output.scoped(.mimalloc, .hidden);
|
||||
const log = bun.Output.scoped(.mimalloc, true);
|
||||
|
||||
fn mimalloc_free(
|
||||
_: *anyopaque,
|
||||
buf: []u8,
|
||||
alignment: Alignment,
|
||||
alignment: mem.Alignment,
|
||||
_: usize,
|
||||
) void {
|
||||
if (comptime Environment.enable_logs)
|
||||
@@ -23,7 +23,8 @@ fn mimalloc_free(
|
||||
}
|
||||
|
||||
const MimallocAllocator = struct {
|
||||
fn alignedAlloc(len: usize, alignment: Alignment) ?[*]u8 {
|
||||
pub const supports_posix_memalign = true;
|
||||
fn alignedAlloc(len: usize, alignment: mem.Alignment) ?[*]u8 {
|
||||
if (comptime Environment.enable_logs)
|
||||
log("mi_alloc({d}, {d})", .{ len, alignment.toByteUnits() });
|
||||
|
||||
@@ -48,15 +49,15 @@ const MimallocAllocator = struct {
|
||||
return mimalloc.mi_malloc_size(ptr);
|
||||
}
|
||||
|
||||
fn alloc_with_default_allocator(_: *anyopaque, len: usize, alignment: Alignment, _: usize) ?[*]u8 {
|
||||
fn alloc_with_default_allocator(_: *anyopaque, len: usize, alignment: mem.Alignment, _: usize) ?[*]u8 {
|
||||
return alignedAlloc(len, alignment);
|
||||
}
|
||||
|
||||
fn resize_with_default_allocator(_: *anyopaque, buf: []u8, _: Alignment, new_len: usize, _: usize) bool {
|
||||
fn resize_with_default_allocator(_: *anyopaque, buf: []u8, _: mem.Alignment, new_len: usize, _: usize) bool {
|
||||
return mimalloc.mi_expand(buf.ptr, new_len) != null;
|
||||
}
|
||||
|
||||
fn remap_with_default_allocator(_: *anyopaque, buf: []u8, alignment: Alignment, new_len: usize, _: usize) ?[*]u8 {
|
||||
fn remap_with_default_allocator(_: *anyopaque, buf: []u8, alignment: mem.Alignment, new_len: usize, _: usize) ?[*]u8 {
|
||||
return @ptrCast(mimalloc.mi_realloc_aligned(buf.ptr, new_len, alignment.toByteUnits()));
|
||||
}
|
||||
|
||||
@@ -76,7 +77,9 @@ const c_allocator_vtable = &Allocator.VTable{
|
||||
};
|
||||
|
||||
const ZAllocator = struct {
|
||||
fn alignedAlloc(len: usize, alignment: Alignment) ?[*]u8 {
|
||||
pub const supports_posix_memalign = true;
|
||||
|
||||
fn alignedAlloc(len: usize, alignment: mem.Alignment) ?[*]u8 {
|
||||
log("ZAllocator.alignedAlloc: {d}\n", .{len});
|
||||
|
||||
const ptr = if (mimalloc.mustUseAlignedAlloc(alignment))
|
||||
@@ -100,11 +103,11 @@ const ZAllocator = struct {
|
||||
return mimalloc.mi_malloc_size(ptr);
|
||||
}
|
||||
|
||||
fn alloc_with_z_allocator(_: *anyopaque, len: usize, alignment: Alignment, _: usize) ?[*]u8 {
|
||||
fn alloc_with_z_allocator(_: *anyopaque, len: usize, alignment: mem.Alignment, _: usize) ?[*]u8 {
|
||||
return alignedAlloc(len, alignment);
|
||||
}
|
||||
|
||||
fn resize_with_z_allocator(_: *anyopaque, buf: []u8, _: Alignment, new_len: usize, _: usize) bool {
|
||||
fn resize_with_z_allocator(_: *anyopaque, buf: []u8, _: mem.Alignment, new_len: usize, _: usize) bool {
|
||||
if (new_len <= buf.len) {
|
||||
return true;
|
||||
}
|
||||
@@ -135,20 +138,15 @@ pub const z_allocator = Allocator{
|
||||
const z_allocator_vtable = Allocator.VTable{
|
||||
.alloc = &ZAllocator.alloc_with_z_allocator,
|
||||
.resize = &ZAllocator.resize_with_z_allocator,
|
||||
.remap = &Allocator.noRemap,
|
||||
.remap = &std.mem.Allocator.noRemap,
|
||||
.free = &ZAllocator.free_with_z_allocator,
|
||||
};
|
||||
|
||||
/// mimalloc can free allocations without being given their size.
|
||||
pub fn freeWithoutSize(ptr: ?*anyopaque) void {
|
||||
mimalloc.mi_free(ptr);
|
||||
}
|
||||
|
||||
const Environment = @import("../env.zig");
|
||||
const std = @import("std");
|
||||
|
||||
const bun = @import("bun");
|
||||
const mimalloc = bun.mimalloc;
|
||||
|
||||
const Alignment = std.mem.Alignment;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const mem = @import("std").mem;
|
||||
const Allocator = mem.Allocator;
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
pub const c_allocator = std.heap.c_allocator;
|
||||
pub const z_allocator = @import("./fallback/z.zig").allocator;
|
||||
|
||||
/// libc can free allocations without being given their size.
|
||||
pub fn freeWithoutSize(ptr: ?*anyopaque) void {
|
||||
std.c.free(ptr);
|
||||
}
|
||||
|
||||
const std = @import("std");
|
||||
@@ -1,43 +0,0 @@
|
||||
/// A fallback zero-initializing allocator.
|
||||
pub const allocator = Allocator{
|
||||
.ptr = undefined,
|
||||
.vtable = &vtable,
|
||||
};
|
||||
|
||||
const vtable = Allocator.VTable{
|
||||
.alloc = alloc,
|
||||
.resize = resize,
|
||||
.remap = Allocator.noRemap, // the mimalloc z_allocator doesn't support remap
|
||||
.free = free,
|
||||
};
|
||||
|
||||
fn alloc(_: *anyopaque, len: usize, alignment: Alignment, return_address: usize) ?[*]u8 {
|
||||
const result = c_allocator.rawAlloc(len, alignment, return_address) orelse
|
||||
return null;
|
||||
@memset(result[0..len], 0);
|
||||
return result;
|
||||
}
|
||||
|
||||
fn resize(
|
||||
_: *anyopaque,
|
||||
buf: []u8,
|
||||
alignment: Alignment,
|
||||
new_len: usize,
|
||||
return_address: usize,
|
||||
) bool {
|
||||
if (!c_allocator.rawResize(buf, alignment, new_len, return_address)) {
|
||||
return false;
|
||||
}
|
||||
@memset(buf.ptr[buf.len..new_len], 0);
|
||||
return true;
|
||||
}
|
||||
|
||||
fn free(_: *anyopaque, buf: []u8, alignment: Alignment, return_address: usize) void {
|
||||
c_allocator.rawFree(buf, alignment, return_address);
|
||||
}
|
||||
|
||||
const std = @import("std");
|
||||
const c_allocator = std.heap.c_allocator;
|
||||
|
||||
const Alignment = std.mem.Alignment;
|
||||
const Allocator = std.mem.Allocator;
|
||||
@@ -52,6 +52,10 @@ pub const Heap = opaque {
|
||||
return mi_heap_malloc(self, size);
|
||||
}
|
||||
|
||||
pub fn backing(_: *Heap) *Heap {
|
||||
return mi_heap_get_default();
|
||||
}
|
||||
|
||||
pub fn calloc(self: *Heap, count: usize, size: usize) ?*anyopaque {
|
||||
return mi_heap_calloc(self, count, size);
|
||||
}
|
||||
|
||||
@@ -111,7 +111,6 @@ pub const Features = struct {
|
||||
pub var csrf_generate: usize = 0;
|
||||
pub var unsupported_uv_function: usize = 0;
|
||||
pub var exited: usize = 0;
|
||||
pub var yarn_migration: usize = 0;
|
||||
|
||||
comptime {
|
||||
@export(&napi_module_register, .{ .name = "Bun__napi_module_register_count" });
|
||||
|
||||
@@ -1,526 +0,0 @@
|
||||
last_part: *js_ast.Part,
|
||||
// files in node modules will not get hot updates, so the code generation
|
||||
// can be a bit more concise for re-exports
|
||||
is_in_node_modules: bool,
|
||||
imports_seen: bun.StringArrayHashMapUnmanaged(ImportRef) = .{},
|
||||
export_star_props: std.ArrayListUnmanaged(G.Property) = .{},
|
||||
export_props: std.ArrayListUnmanaged(G.Property) = .{},
|
||||
stmts: std.ArrayListUnmanaged(Stmt) = .{},
|
||||
|
||||
const ImportRef = struct {
|
||||
/// Index into ConvertESMExportsForHmr.stmts
|
||||
stmt_index: u32,
|
||||
};
|
||||
|
||||
pub fn convertStmt(ctx: *ConvertESMExportsForHmr, p: anytype, stmt: Stmt) !void {
|
||||
const new_stmt = switch (stmt.data) {
|
||||
else => brk: {
|
||||
break :brk stmt;
|
||||
},
|
||||
.s_local => |st| stmt: {
|
||||
if (!st.is_export) {
|
||||
break :stmt stmt;
|
||||
}
|
||||
|
||||
st.is_export = false;
|
||||
|
||||
var new_len: usize = 0;
|
||||
for (st.decls.slice()) |*decl_ptr| {
|
||||
const decl = decl_ptr.*; // explicit copy to avoid aliasinng
|
||||
const value = decl.value orelse {
|
||||
st.decls.mut(new_len).* = decl;
|
||||
new_len += 1;
|
||||
try ctx.visitBindingToExport(p, decl.binding);
|
||||
continue;
|
||||
};
|
||||
|
||||
switch (decl.binding.data) {
|
||||
.b_missing => {},
|
||||
|
||||
.b_identifier => |id| {
|
||||
const symbol = p.symbols.items[id.ref.inner_index];
|
||||
|
||||
// if the symbol is not used, we don't need to preserve
|
||||
// a binding in this scope. we can move it to the exports object.
|
||||
if (symbol.use_count_estimate == 0 and value.canBeMoved()) {
|
||||
try ctx.export_props.append(p.allocator, .{
|
||||
.key = Expr.init(E.String, .{ .data = symbol.original_name }, decl.binding.loc),
|
||||
.value = value,
|
||||
});
|
||||
} else {
|
||||
st.decls.mut(new_len).* = decl;
|
||||
new_len += 1;
|
||||
try ctx.visitBindingToExport(p, decl.binding);
|
||||
}
|
||||
},
|
||||
|
||||
else => {
|
||||
st.decls.mut(new_len).* = decl;
|
||||
new_len += 1;
|
||||
try ctx.visitBindingToExport(p, decl.binding);
|
||||
},
|
||||
}
|
||||
}
|
||||
if (new_len == 0) {
|
||||
return;
|
||||
}
|
||||
st.decls.len = @intCast(new_len);
|
||||
|
||||
break :stmt stmt;
|
||||
},
|
||||
.s_export_default => |st| stmt: {
|
||||
// When React Fast Refresh needs to tag the default export, the statement
|
||||
// cannot be moved, since a local reference is required.
|
||||
if (p.options.features.react_fast_refresh and
|
||||
st.value == .stmt and st.value.stmt.data == .s_function)
|
||||
fast_refresh_edge_case: {
|
||||
const symbol = st.value.stmt.data.s_function.func.name orelse
|
||||
break :fast_refresh_edge_case;
|
||||
const name = p.symbols.items[symbol.ref.?.inner_index].original_name;
|
||||
if (ReactRefresh.isComponentishName(name)) {
|
||||
// Lower to a function statement, and reference the function in the export list.
|
||||
try ctx.export_props.append(p.allocator, .{
|
||||
.key = Expr.init(E.String, .{ .data = "default" }, stmt.loc),
|
||||
.value = Expr.initIdentifier(symbol.ref.?, stmt.loc),
|
||||
});
|
||||
break :stmt st.value.stmt;
|
||||
}
|
||||
// All other functions can be properly moved.
|
||||
}
|
||||
|
||||
// Try to move the export default expression to the end.
|
||||
const can_be_moved_to_inner_scope = switch (st.value) {
|
||||
.stmt => |s| switch (s.data) {
|
||||
.s_class => |c| c.class.canBeMoved() and (if (c.class.class_name) |name|
|
||||
p.symbols.items[name.ref.?.inner_index].use_count_estimate == 0
|
||||
else
|
||||
true),
|
||||
.s_function => |f| if (f.func.name) |name|
|
||||
p.symbols.items[name.ref.?.inner_index].use_count_estimate == 0
|
||||
else
|
||||
true,
|
||||
else => unreachable,
|
||||
},
|
||||
.expr => |e| switch (e.data) {
|
||||
.e_identifier => true,
|
||||
else => e.canBeMoved(),
|
||||
},
|
||||
};
|
||||
if (can_be_moved_to_inner_scope) {
|
||||
try ctx.export_props.append(p.allocator, .{
|
||||
.key = Expr.init(E.String, .{ .data = "default" }, stmt.loc),
|
||||
.value = st.value.toExpr(),
|
||||
});
|
||||
// no statement emitted
|
||||
return;
|
||||
}
|
||||
|
||||
// Otherwise, an identifier must be exported
|
||||
switch (st.value) {
|
||||
.expr => {
|
||||
const temp_id = p.generateTempRef("default_export");
|
||||
try ctx.last_part.declared_symbols.append(p.allocator, .{ .ref = temp_id, .is_top_level = true });
|
||||
try ctx.last_part.symbol_uses.putNoClobber(p.allocator, temp_id, .{ .count_estimate = 1 });
|
||||
try p.current_scope.generated.push(p.allocator, temp_id);
|
||||
|
||||
try ctx.export_props.append(p.allocator, .{
|
||||
.key = Expr.init(E.String, .{ .data = "default" }, stmt.loc),
|
||||
.value = Expr.initIdentifier(temp_id, stmt.loc),
|
||||
});
|
||||
|
||||
break :stmt Stmt.alloc(S.Local, .{
|
||||
.kind = .k_const,
|
||||
.decls = try G.Decl.List.fromSlice(p.allocator, &.{
|
||||
.{
|
||||
.binding = Binding.alloc(p.allocator, B.Identifier{ .ref = temp_id }, stmt.loc),
|
||||
.value = st.value.toExpr(),
|
||||
},
|
||||
}),
|
||||
}, stmt.loc);
|
||||
},
|
||||
.stmt => |s| {
|
||||
try ctx.export_props.append(p.allocator, .{
|
||||
.key = Expr.init(E.String, .{ .data = "default" }, stmt.loc),
|
||||
.value = Expr.initIdentifier(switch (s.data) {
|
||||
.s_class => |class| class.class.class_name.?.ref.?,
|
||||
.s_function => |func| func.func.name.?.ref.?,
|
||||
else => unreachable,
|
||||
}, stmt.loc),
|
||||
});
|
||||
break :stmt s;
|
||||
},
|
||||
}
|
||||
},
|
||||
.s_class => |st| stmt: {
|
||||
|
||||
// Strip the "export" keyword
|
||||
if (!st.is_export) {
|
||||
break :stmt stmt;
|
||||
}
|
||||
|
||||
// Export as CommonJS
|
||||
try ctx.export_props.append(p.allocator, .{
|
||||
.key = Expr.init(E.String, .{
|
||||
.data = p.symbols.items[st.class.class_name.?.ref.?.inner_index].original_name,
|
||||
}, stmt.loc),
|
||||
.value = Expr.initIdentifier(st.class.class_name.?.ref.?, stmt.loc),
|
||||
});
|
||||
|
||||
st.is_export = false;
|
||||
|
||||
break :stmt stmt;
|
||||
},
|
||||
.s_function => |st| stmt: {
|
||||
// Strip the "export" keyword
|
||||
if (!st.func.flags.contains(.is_export)) break :stmt stmt;
|
||||
|
||||
st.func.flags.remove(.is_export);
|
||||
|
||||
try ctx.visitRefToExport(
|
||||
p,
|
||||
st.func.name.?.ref.?,
|
||||
null,
|
||||
stmt.loc,
|
||||
false,
|
||||
);
|
||||
|
||||
break :stmt stmt;
|
||||
},
|
||||
.s_export_clause => |st| {
|
||||
for (st.items) |item| {
|
||||
const ref = item.name.ref.?;
|
||||
try ctx.visitRefToExport(p, ref, item.alias, item.name.loc, false);
|
||||
}
|
||||
|
||||
return; // do not emit a statement here
|
||||
},
|
||||
.s_export_from => |st| {
|
||||
const namespace_ref = try ctx.deduplicatedImport(
|
||||
p,
|
||||
st.import_record_index,
|
||||
st.namespace_ref,
|
||||
st.items,
|
||||
stmt.loc,
|
||||
null,
|
||||
stmt.loc,
|
||||
);
|
||||
for (st.items) |*item| {
|
||||
const ref = item.name.ref.?;
|
||||
const symbol = &p.symbols.items[ref.innerIndex()];
|
||||
if (symbol.namespace_alias == null) {
|
||||
symbol.namespace_alias = .{
|
||||
.namespace_ref = namespace_ref,
|
||||
.alias = item.original_name,
|
||||
.import_record_index = st.import_record_index,
|
||||
};
|
||||
}
|
||||
try ctx.visitRefToExport(
|
||||
p,
|
||||
ref,
|
||||
item.alias,
|
||||
item.name.loc,
|
||||
!ctx.is_in_node_modules, // live binding when this may be replaced
|
||||
);
|
||||
|
||||
// imports and export statements have their alias +
|
||||
// original_name swapped. this is likely a design bug in
|
||||
// the parser but since everything uses these
|
||||
// assumptions, this hack is simpler than making it
|
||||
// proper
|
||||
const alias = item.alias;
|
||||
item.alias = item.original_name;
|
||||
item.original_name = alias;
|
||||
}
|
||||
return;
|
||||
},
|
||||
.s_export_star => |st| {
|
||||
const namespace_ref = try ctx.deduplicatedImport(
|
||||
p,
|
||||
st.import_record_index,
|
||||
st.namespace_ref,
|
||||
&.{},
|
||||
stmt.loc,
|
||||
null,
|
||||
stmt.loc,
|
||||
);
|
||||
|
||||
if (st.alias) |alias| {
|
||||
// 'export * as ns from' creates one named property.
|
||||
try ctx.export_props.append(p.allocator, .{
|
||||
.key = Expr.init(E.String, .{ .data = alias.original_name }, stmt.loc),
|
||||
.value = Expr.initIdentifier(namespace_ref, stmt.loc),
|
||||
});
|
||||
} else {
|
||||
// 'export * from' creates a spread, hoisted at the top.
|
||||
try ctx.export_star_props.append(p.allocator, .{
|
||||
.kind = .spread,
|
||||
.value = Expr.initIdentifier(namespace_ref, stmt.loc),
|
||||
});
|
||||
}
|
||||
return;
|
||||
},
|
||||
// De-duplicate import statements. It is okay to disregard
|
||||
// named/default imports here as we always rewrite them as
|
||||
// full qualified property accesses (needed for live-bindings)
|
||||
.s_import => |st| {
|
||||
_ = try ctx.deduplicatedImport(
|
||||
p,
|
||||
st.import_record_index,
|
||||
st.namespace_ref,
|
||||
st.items,
|
||||
st.star_name_loc,
|
||||
st.default_name,
|
||||
stmt.loc,
|
||||
);
|
||||
return;
|
||||
},
|
||||
};
|
||||
|
||||
try ctx.stmts.append(p.allocator, new_stmt);
|
||||
}
|
||||
|
||||
/// Deduplicates imports, returning a previously used Ref if present.
|
||||
fn deduplicatedImport(
|
||||
ctx: *ConvertESMExportsForHmr,
|
||||
p: anytype,
|
||||
import_record_index: u32,
|
||||
namespace_ref: Ref,
|
||||
items: []js_ast.ClauseItem,
|
||||
star_name_loc: ?logger.Loc,
|
||||
default_name: ?js_ast.LocRef,
|
||||
loc: logger.Loc,
|
||||
) !Ref {
|
||||
const ir = &p.import_records.items[import_record_index];
|
||||
const gop = try ctx.imports_seen.getOrPut(p.allocator, ir.path.text);
|
||||
if (gop.found_existing) {
|
||||
// Disable this one since an older record is getting used. It isn't
|
||||
// practical to delete this import record entry since an import or
|
||||
// require expression can exist.
|
||||
ir.is_unused = true;
|
||||
|
||||
const stmt = ctx.stmts.items[gop.value_ptr.stmt_index].data.s_import;
|
||||
if (items.len > 0) {
|
||||
if (stmt.items.len == 0) {
|
||||
stmt.items = items;
|
||||
} else {
|
||||
stmt.items = try std.mem.concat(p.allocator, js_ast.ClauseItem, &.{ stmt.items, items });
|
||||
}
|
||||
}
|
||||
if (namespace_ref.isValid()) {
|
||||
if (!stmt.namespace_ref.isValid()) {
|
||||
stmt.namespace_ref = namespace_ref;
|
||||
return namespace_ref;
|
||||
} else {
|
||||
// Erase this namespace ref, but since it may be used in
|
||||
// existing AST trees, a link must be established.
|
||||
const symbol = &p.symbols.items[namespace_ref.innerIndex()];
|
||||
symbol.use_count_estimate = 0;
|
||||
symbol.link = stmt.namespace_ref;
|
||||
if (@hasField(@typeInfo(@TypeOf(p)).pointer.child, "symbol_uses")) {
|
||||
_ = p.symbol_uses.swapRemove(namespace_ref);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (stmt.star_name_loc == null) if (star_name_loc) |stl| {
|
||||
stmt.star_name_loc = stl;
|
||||
};
|
||||
if (stmt.default_name == null) if (default_name) |dn| {
|
||||
stmt.default_name = dn;
|
||||
};
|
||||
return stmt.namespace_ref;
|
||||
}
|
||||
|
||||
try ctx.stmts.append(p.allocator, Stmt.alloc(S.Import, .{
|
||||
.import_record_index = import_record_index,
|
||||
.is_single_line = true,
|
||||
.default_name = default_name,
|
||||
.items = items,
|
||||
.namespace_ref = namespace_ref,
|
||||
.star_name_loc = star_name_loc,
|
||||
}, loc));
|
||||
|
||||
gop.value_ptr.* = .{ .stmt_index = @intCast(ctx.stmts.items.len - 1) };
|
||||
return namespace_ref;
|
||||
}
|
||||
|
||||
fn visitBindingToExport(ctx: *ConvertESMExportsForHmr, p: anytype, binding: Binding) !void {
|
||||
switch (binding.data) {
|
||||
.b_missing => {},
|
||||
.b_identifier => |id| {
|
||||
try ctx.visitRefToExport(p, id.ref, null, binding.loc, false);
|
||||
},
|
||||
.b_array => |array| {
|
||||
for (array.items) |item| {
|
||||
try ctx.visitBindingToExport(p, item.binding);
|
||||
}
|
||||
},
|
||||
.b_object => |object| {
|
||||
for (object.properties) |item| {
|
||||
try ctx.visitBindingToExport(p, item.value);
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn visitRefToExport(
|
||||
ctx: *ConvertESMExportsForHmr,
|
||||
p: anytype,
|
||||
ref: Ref,
|
||||
export_symbol_name: ?[]const u8,
|
||||
loc: logger.Loc,
|
||||
is_live_binding_source: bool,
|
||||
) !void {
|
||||
const symbol = p.symbols.items[ref.inner_index];
|
||||
const id = if (symbol.kind == .import)
|
||||
Expr.init(E.ImportIdentifier, .{ .ref = ref }, loc)
|
||||
else
|
||||
Expr.initIdentifier(ref, loc);
|
||||
if (is_live_binding_source or (symbol.kind == .import and !ctx.is_in_node_modules) or symbol.has_been_assigned_to) {
|
||||
// TODO (2024-11-24) instead of requiring getters for live-bindings,
|
||||
// a callback propagation system should be considered. mostly
|
||||
// because here, these might not even be live bindings, and
|
||||
// re-exports are so, so common.
|
||||
//
|
||||
// update(2025-03-05): HMRModule in ts now contains an exhaustive map
|
||||
// of importers. For local live bindings, these can just remember to
|
||||
// mutate the field in the exports object. Re-exports can just be
|
||||
// encoded into the module format, propagated in `replaceModules`
|
||||
const key = Expr.init(E.String, .{
|
||||
.data = export_symbol_name orelse symbol.original_name,
|
||||
}, loc);
|
||||
|
||||
// This is technically incorrect in that we've marked this as a
|
||||
// top level symbol. but all we care about is preventing name
|
||||
// collisions, not necessarily the best minificaiton (dev only)
|
||||
const arg1 = p.generateTempRef(symbol.original_name);
|
||||
try ctx.last_part.declared_symbols.append(p.allocator, .{ .ref = arg1, .is_top_level = true });
|
||||
try ctx.last_part.symbol_uses.putNoClobber(p.allocator, arg1, .{ .count_estimate = 1 });
|
||||
try p.current_scope.generated.push(p.allocator, arg1);
|
||||
|
||||
// 'get abc() { return abc }'
|
||||
try ctx.export_props.append(p.allocator, .{
|
||||
.kind = .get,
|
||||
.key = key,
|
||||
.value = Expr.init(E.Function, .{ .func = .{
|
||||
.body = .{
|
||||
.stmts = try p.allocator.dupe(Stmt, &.{
|
||||
Stmt.alloc(S.Return, .{ .value = id }, loc),
|
||||
}),
|
||||
.loc = loc,
|
||||
},
|
||||
} }, loc),
|
||||
});
|
||||
// no setter is added since live bindings are read-only
|
||||
} else {
|
||||
// 'abc,'
|
||||
try ctx.export_props.append(p.allocator, .{
|
||||
.key = Expr.init(E.String, .{
|
||||
.data = export_symbol_name orelse symbol.original_name,
|
||||
}, loc),
|
||||
.value = id,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
pub fn finalize(ctx: *ConvertESMExportsForHmr, p: anytype, all_parts: []js_ast.Part) !void {
|
||||
if (ctx.export_star_props.items.len > 0) {
|
||||
if (ctx.export_props.items.len == 0) {
|
||||
ctx.export_props = ctx.export_star_props;
|
||||
} else {
|
||||
const export_star_len = ctx.export_star_props.items.len;
|
||||
try ctx.export_props.ensureUnusedCapacity(p.allocator, export_star_len);
|
||||
const len = ctx.export_props.items.len;
|
||||
ctx.export_props.items.len += export_star_len;
|
||||
bun.copy(G.Property, ctx.export_props.items[export_star_len..], ctx.export_props.items[0..len]);
|
||||
@memcpy(ctx.export_props.items[0..export_star_len], ctx.export_star_props.items);
|
||||
}
|
||||
}
|
||||
|
||||
if (ctx.export_props.items.len > 0) {
|
||||
const obj = Expr.init(E.Object, .{
|
||||
.properties = G.Property.List.fromList(ctx.export_props),
|
||||
}, logger.Loc.Empty);
|
||||
|
||||
// `hmr.exports = ...`
|
||||
try ctx.stmts.append(p.allocator, Stmt.alloc(S.SExpr, .{
|
||||
.value = Expr.assign(
|
||||
Expr.init(E.Dot, .{
|
||||
.target = Expr.initIdentifier(p.hmr_api_ref, logger.Loc.Empty),
|
||||
.name = "exports",
|
||||
.name_loc = logger.Loc.Empty,
|
||||
}, logger.Loc.Empty),
|
||||
obj,
|
||||
),
|
||||
}, logger.Loc.Empty));
|
||||
|
||||
// mark a dependency on module_ref so it is renamed
|
||||
try ctx.last_part.symbol_uses.put(p.allocator, p.module_ref, .{ .count_estimate = 1 });
|
||||
try ctx.last_part.declared_symbols.append(p.allocator, .{ .ref = p.module_ref, .is_top_level = true });
|
||||
}
|
||||
|
||||
if (p.options.features.react_fast_refresh and p.react_refresh.register_used) {
|
||||
try ctx.stmts.append(p.allocator, Stmt.alloc(S.SExpr, .{
|
||||
.value = Expr.init(E.Call, .{
|
||||
.target = Expr.init(E.Dot, .{
|
||||
.target = Expr.initIdentifier(p.hmr_api_ref, .Empty),
|
||||
.name = "reactRefreshAccept",
|
||||
.name_loc = .Empty,
|
||||
}, .Empty),
|
||||
.args = .init(&.{}),
|
||||
}, .Empty),
|
||||
}, .Empty));
|
||||
}
|
||||
|
||||
// Merge all part metadata into the first part.
|
||||
for (all_parts[0 .. all_parts.len - 1]) |*part| {
|
||||
try ctx.last_part.declared_symbols.appendList(p.allocator, part.declared_symbols);
|
||||
try ctx.last_part.import_record_indices.append(p.allocator, part.import_record_indices.slice());
|
||||
for (part.symbol_uses.keys(), part.symbol_uses.values()) |k, v| {
|
||||
const gop = try ctx.last_part.symbol_uses.getOrPut(p.allocator, k);
|
||||
if (!gop.found_existing) {
|
||||
gop.value_ptr.* = v;
|
||||
} else {
|
||||
gop.value_ptr.count_estimate += v.count_estimate;
|
||||
}
|
||||
}
|
||||
part.stmts = &.{};
|
||||
part.declared_symbols.entries.len = 0;
|
||||
part.tag = .dead_due_to_inlining;
|
||||
part.dependencies.clearRetainingCapacity();
|
||||
try part.dependencies.push(p.allocator, .{
|
||||
.part_index = @intCast(all_parts.len - 1),
|
||||
.source_index = p.source.index,
|
||||
});
|
||||
}
|
||||
|
||||
try ctx.last_part.import_record_indices.append(p.allocator, p.import_records_for_current_part.items);
|
||||
try ctx.last_part.declared_symbols.appendList(p.allocator, p.declared_symbols);
|
||||
|
||||
ctx.last_part.stmts = ctx.stmts.items;
|
||||
ctx.last_part.tag = .none;
|
||||
}
|
||||
|
||||
const bun = @import("bun");
|
||||
const logger = bun.logger;
|
||||
|
||||
const js_ast = bun.ast;
|
||||
const B = js_ast.B;
|
||||
const Binding = js_ast.Binding;
|
||||
const E = js_ast.E;
|
||||
const Expr = js_ast.Expr;
|
||||
const LocRef = js_ast.LocRef;
|
||||
const S = js_ast.S;
|
||||
const Stmt = js_ast.Stmt;
|
||||
|
||||
const G = js_ast.G;
|
||||
const Decl = G.Decl;
|
||||
const Property = G.Property;
|
||||
|
||||
const js_parser = bun.js_parser;
|
||||
const ConvertESMExportsForHmr = js_parser.ConvertESMExportsForHmr;
|
||||
const ReactRefresh = js_parser.ReactRefresh;
|
||||
const Ref = js_parser.Ref;
|
||||
const options = js_parser.options;
|
||||
|
||||
const std = @import("std");
|
||||
const List = std.ArrayListUnmanaged;
|
||||
@@ -19,7 +19,7 @@ pub fn clone(this: Expr, allocator: std.mem.Allocator) !Expr {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deepClone(this: Expr, allocator: std.mem.Allocator) OOM!Expr {
|
||||
pub fn deepClone(this: Expr, allocator: std.mem.Allocator) anyerror!Expr {
|
||||
return .{
|
||||
.loc = this.loc,
|
||||
.data = try this.data.deepClone(allocator),
|
||||
|
||||
@@ -1,530 +0,0 @@
|
||||
stmts: []Stmt = &.{},
|
||||
kept_import_equals: bool = false,
|
||||
removed_import_equals: bool = false,
|
||||
|
||||
pub fn scan(
|
||||
comptime P: type,
|
||||
p: *P,
|
||||
stmts: []Stmt,
|
||||
will_transform_to_common_js: bool,
|
||||
comptime hot_module_reloading_transformations: bool,
|
||||
hot_module_reloading_context: if (hot_module_reloading_transformations) *ConvertESMExportsForHmr else void,
|
||||
) !ImportScanner {
|
||||
var scanner = ImportScanner{};
|
||||
var stmts_end: usize = 0;
|
||||
const allocator = p.allocator;
|
||||
const is_typescript_enabled: bool = comptime P.parser_features.typescript;
|
||||
|
||||
for (stmts) |_stmt| {
|
||||
var stmt = _stmt; // copy
|
||||
switch (stmt.data) {
|
||||
.s_import => |import_ptr| {
|
||||
var st = import_ptr.*;
|
||||
defer import_ptr.* = st;
|
||||
|
||||
const record: *ImportRecord = &p.import_records.items[st.import_record_index];
|
||||
|
||||
if (record.path.isMacro()) {
|
||||
record.is_unused = true;
|
||||
record.path.is_disabled = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
// The official TypeScript compiler always removes unused imported
|
||||
// symbols. However, we deliberately deviate from the official
|
||||
// TypeScript compiler's behavior doing this in a specific scenario:
|
||||
// we are not bundling, symbol renaming is off, and the tsconfig.json
|
||||
// "importsNotUsedAsValues" setting is present and is not set to
|
||||
// "remove".
|
||||
//
|
||||
// This exists to support the use case of compiling partial modules for
|
||||
// compile-to-JavaScript languages such as Svelte. These languages try
|
||||
// to reference imports in ways that are impossible for esbuild to know
|
||||
// about when esbuild is only given a partial module to compile. Here
|
||||
// is an example of some Svelte code that might use esbuild to convert
|
||||
// TypeScript to JavaScript:
|
||||
//
|
||||
// <script lang="ts">
|
||||
// import Counter from './Counter.svelte';
|
||||
// export let name: string = 'world';
|
||||
// </script>
|
||||
// <main>
|
||||
// <h1>Hello {name}!</h1>
|
||||
// <Counter />
|
||||
// </main>
|
||||
//
|
||||
// Tools that use esbuild to compile TypeScript code inside a Svelte
|
||||
// file like this only give esbuild the contents of the <script> tag.
|
||||
// These tools work around this missing import problem when using the
|
||||
// official TypeScript compiler by hacking the TypeScript AST to
|
||||
// remove the "unused import" flags. This isn't possible in esbuild
|
||||
// because esbuild deliberately does not expose an AST manipulation
|
||||
// API for performance reasons.
|
||||
//
|
||||
// We deviate from the TypeScript compiler's behavior in this specific
|
||||
// case because doing so is useful for these compile-to-JavaScript
|
||||
// languages and is benign in other cases. The rationale is as follows:
|
||||
//
|
||||
// * If "importsNotUsedAsValues" is absent or set to "remove", then
|
||||
// we don't know if these imports are values or types. It's not
|
||||
// safe to keep them because if they are types, the missing imports
|
||||
// will cause run-time failures because there will be no matching
|
||||
// exports. It's only safe keep imports if "importsNotUsedAsValues"
|
||||
// is set to "preserve" or "error" because then we can assume that
|
||||
// none of the imports are types (since the TypeScript compiler
|
||||
// would generate an error in that case).
|
||||
//
|
||||
// * If we're bundling, then we know we aren't being used to compile
|
||||
// a partial module. The parser is seeing the entire code for the
|
||||
// module so it's safe to remove unused imports. And also we don't
|
||||
// want the linker to generate errors about missing imports if the
|
||||
// imported file is also in the bundle.
|
||||
//
|
||||
// * If identifier minification is enabled, then using esbuild as a
|
||||
// partial-module transform library wouldn't work anyway because
|
||||
// the names wouldn't match. And that means we're minifying so the
|
||||
// user is expecting the output to be as small as possible. So we
|
||||
// should omit unused imports.
|
||||
//
|
||||
var did_remove_star_loc = false;
|
||||
const keep_unused_imports = !p.options.features.trim_unused_imports;
|
||||
// TypeScript always trims unused imports. This is important for
|
||||
// correctness since some imports might be fake (only in the type
|
||||
// system and used for type-only imports).
|
||||
if (!keep_unused_imports) {
|
||||
var found_imports = false;
|
||||
var is_unused_in_typescript = true;
|
||||
|
||||
if (st.default_name) |default_name| {
|
||||
found_imports = true;
|
||||
const symbol = p.symbols.items[default_name.ref.?.innerIndex()];
|
||||
|
||||
// TypeScript has a separate definition of unused
|
||||
if (is_typescript_enabled and p.ts_use_counts.items[default_name.ref.?.innerIndex()] != 0) {
|
||||
is_unused_in_typescript = false;
|
||||
}
|
||||
|
||||
// Remove the symbol if it's never used outside a dead code region
|
||||
if (symbol.use_count_estimate == 0) {
|
||||
st.default_name = null;
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the star import if it's unused
|
||||
if (st.star_name_loc) |_| {
|
||||
found_imports = true;
|
||||
const symbol = p.symbols.items[st.namespace_ref.innerIndex()];
|
||||
|
||||
// TypeScript has a separate definition of unused
|
||||
if (is_typescript_enabled and p.ts_use_counts.items[st.namespace_ref.innerIndex()] != 0) {
|
||||
is_unused_in_typescript = false;
|
||||
}
|
||||
|
||||
// Remove the symbol if it's never used outside a dead code region
|
||||
if (symbol.use_count_estimate == 0) {
|
||||
// Make sure we don't remove this if it was used for a property
|
||||
// access while bundling
|
||||
var has_any = false;
|
||||
|
||||
if (p.import_items_for_namespace.get(st.namespace_ref)) |entry| {
|
||||
if (entry.count() > 0) {
|
||||
has_any = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!has_any) {
|
||||
st.star_name_loc = null;
|
||||
did_remove_star_loc = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Remove items if they are unused
|
||||
if (st.items.len > 0) {
|
||||
found_imports = true;
|
||||
var items_end: usize = 0;
|
||||
for (st.items) |item| {
|
||||
const ref = item.name.ref.?;
|
||||
const symbol: Symbol = p.symbols.items[ref.innerIndex()];
|
||||
|
||||
// TypeScript has a separate definition of unused
|
||||
if (is_typescript_enabled and p.ts_use_counts.items[ref.innerIndex()] != 0) {
|
||||
is_unused_in_typescript = false;
|
||||
}
|
||||
|
||||
// Remove the symbol if it's never used outside a dead code region
|
||||
if (symbol.use_count_estimate != 0) {
|
||||
st.items[items_end] = item;
|
||||
items_end += 1;
|
||||
}
|
||||
}
|
||||
|
||||
st.items = st.items[0..items_end];
|
||||
}
|
||||
|
||||
// -- Original Comment --
|
||||
// Omit this statement if we're parsing TypeScript and all imports are
|
||||
// unused. Note that this is distinct from the case where there were
|
||||
// no imports at all (e.g. "import 'foo'"). In that case we want to keep
|
||||
// the statement because the user is clearly trying to import the module
|
||||
// for side effects.
|
||||
//
|
||||
// This culling is important for correctness when parsing TypeScript
|
||||
// because a) the TypeScript compiler does this and we want to match it
|
||||
// and b) this may be a fake module that only exists in the type system
|
||||
// and doesn't actually exist in reality.
|
||||
//
|
||||
// We do not want to do this culling in JavaScript though because the
|
||||
// module may have side effects even if all imports are unused.
|
||||
// -- Original Comment --
|
||||
|
||||
// jarred: I think, in this project, we want this behavior, even in JavaScript.
|
||||
// I think this would be a big performance improvement.
|
||||
// The less you import, the less code you transpile.
|
||||
// Side-effect imports are nearly always done through identifier-less imports
|
||||
// e.g. `import 'fancy-stylesheet-thing/style.css';`
|
||||
// This is a breaking change though. We can make it an option with some guardrail
|
||||
// so maybe if it errors, it shows a suggestion "retry without trimming unused imports"
|
||||
if ((is_typescript_enabled and found_imports and is_unused_in_typescript and !p.options.preserve_unused_imports_ts) or
|
||||
(!is_typescript_enabled and p.options.features.trim_unused_imports and found_imports and st.star_name_loc == null and st.items.len == 0 and st.default_name == null))
|
||||
{
|
||||
// internal imports are presumed to be always used
|
||||
// require statements cannot be stripped
|
||||
if (!record.is_internal and !record.was_originally_require) {
|
||||
record.is_unused = true;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const namespace_ref = st.namespace_ref;
|
||||
const convert_star_to_clause = !p.options.bundle and (p.symbols.items[namespace_ref.innerIndex()].use_count_estimate == 0);
|
||||
|
||||
if (convert_star_to_clause and !keep_unused_imports) {
|
||||
st.star_name_loc = null;
|
||||
}
|
||||
|
||||
record.contains_default_alias = record.contains_default_alias or st.default_name != null;
|
||||
|
||||
const existing_items: ImportItemForNamespaceMap = p.import_items_for_namespace.get(namespace_ref) orelse
|
||||
ImportItemForNamespaceMap.init(allocator);
|
||||
|
||||
if (p.options.bundle) {
|
||||
if (st.star_name_loc != null and existing_items.count() > 0) {
|
||||
const sorted = try allocator.alloc(string, existing_items.count());
|
||||
defer allocator.free(sorted);
|
||||
for (sorted, existing_items.keys()) |*result, alias| {
|
||||
result.* = alias;
|
||||
}
|
||||
strings.sortDesc(sorted);
|
||||
p.named_imports.ensureUnusedCapacity(p.allocator, sorted.len) catch bun.outOfMemory();
|
||||
|
||||
// Create named imports for these property accesses. This will
|
||||
// cause missing imports to generate useful warnings.
|
||||
//
|
||||
// It will also improve bundling efficiency for internal imports
|
||||
// by still converting property accesses off the namespace into
|
||||
// bare identifiers even if the namespace is still needed.
|
||||
for (sorted) |alias| {
|
||||
const item = existing_items.get(alias).?;
|
||||
p.named_imports.put(
|
||||
p.allocator,
|
||||
item.ref.?,
|
||||
js_ast.NamedImport{
|
||||
.alias = alias,
|
||||
.alias_loc = item.loc,
|
||||
.namespace_ref = namespace_ref,
|
||||
.import_record_index = st.import_record_index,
|
||||
},
|
||||
) catch bun.outOfMemory();
|
||||
|
||||
const name: LocRef = item;
|
||||
const name_ref = name.ref.?;
|
||||
|
||||
// Make sure the printer prints this as a property access
|
||||
var symbol: *Symbol = &p.symbols.items[name_ref.innerIndex()];
|
||||
|
||||
symbol.namespace_alias = G.NamespaceAlias{
|
||||
.namespace_ref = namespace_ref,
|
||||
.alias = alias,
|
||||
.import_record_index = st.import_record_index,
|
||||
.was_originally_property_access = st.star_name_loc != null and existing_items.contains(symbol.original_name),
|
||||
};
|
||||
|
||||
// Also record these automatically-generated top-level namespace alias symbols
|
||||
p.declared_symbols.append(p.allocator, .{
|
||||
.ref = name_ref,
|
||||
.is_top_level = true,
|
||||
}) catch unreachable;
|
||||
}
|
||||
}
|
||||
|
||||
p.named_imports.ensureUnusedCapacity(
|
||||
p.allocator,
|
||||
st.items.len + @as(usize, @intFromBool(st.default_name != null)) + @as(usize, @intFromBool(st.star_name_loc != null)),
|
||||
) catch bun.outOfMemory();
|
||||
|
||||
if (st.star_name_loc) |loc| {
|
||||
record.contains_import_star = true;
|
||||
p.named_imports.putAssumeCapacity(
|
||||
namespace_ref,
|
||||
js_ast.NamedImport{
|
||||
.alias_is_star = true,
|
||||
.alias = "",
|
||||
.alias_loc = loc,
|
||||
.namespace_ref = Ref.None,
|
||||
.import_record_index = st.import_record_index,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
if (st.default_name) |default| {
|
||||
record.contains_default_alias = true;
|
||||
p.named_imports.putAssumeCapacity(
|
||||
default.ref.?,
|
||||
.{
|
||||
.alias = "default",
|
||||
.alias_loc = default.loc,
|
||||
.namespace_ref = namespace_ref,
|
||||
.import_record_index = st.import_record_index,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
for (st.items) |item| {
|
||||
const name: LocRef = item.name;
|
||||
const name_ref = name.ref.?;
|
||||
|
||||
p.named_imports.putAssumeCapacity(
|
||||
name_ref,
|
||||
js_ast.NamedImport{
|
||||
.alias = item.alias,
|
||||
.alias_loc = name.loc,
|
||||
.namespace_ref = namespace_ref,
|
||||
.import_record_index = st.import_record_index,
|
||||
},
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// ESM requires live bindings
|
||||
// CommonJS does not require live bindings
|
||||
// We load ESM in browsers & in Bun.js
|
||||
// We have to simulate live bindings for cases where the code is bundled
|
||||
// We do not know at this stage whether or not the import statement is bundled
|
||||
// This keeps track of the `namespace_alias` incase, at printing time, we determine that we should print it with the namespace
|
||||
for (st.items) |item| {
|
||||
record.contains_default_alias = record.contains_default_alias or strings.eqlComptime(item.alias, "default");
|
||||
|
||||
const name: LocRef = item.name;
|
||||
const name_ref = name.ref.?;
|
||||
|
||||
try p.named_imports.put(p.allocator, name_ref, js_ast.NamedImport{
|
||||
.alias = item.alias,
|
||||
.alias_loc = name.loc,
|
||||
.namespace_ref = namespace_ref,
|
||||
.import_record_index = st.import_record_index,
|
||||
});
|
||||
|
||||
// Make sure the printer prints this as a property access
|
||||
var symbol: *Symbol = &p.symbols.items[name_ref.innerIndex()];
|
||||
if (record.contains_import_star or st.star_name_loc != null)
|
||||
symbol.namespace_alias = G.NamespaceAlias{
|
||||
.namespace_ref = namespace_ref,
|
||||
.alias = item.alias,
|
||||
.import_record_index = st.import_record_index,
|
||||
.was_originally_property_access = st.star_name_loc != null and existing_items.contains(symbol.original_name),
|
||||
};
|
||||
}
|
||||
|
||||
if (record.was_originally_require) {
|
||||
var symbol = &p.symbols.items[namespace_ref.innerIndex()];
|
||||
symbol.namespace_alias = G.NamespaceAlias{
|
||||
.namespace_ref = namespace_ref,
|
||||
.alias = "",
|
||||
.import_record_index = st.import_record_index,
|
||||
.was_originally_property_access = false,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
try p.import_records_for_current_part.append(allocator, st.import_record_index);
|
||||
|
||||
record.contains_import_star = record.contains_import_star or st.star_name_loc != null;
|
||||
record.contains_default_alias = record.contains_default_alias or st.default_name != null;
|
||||
|
||||
for (st.items) |*item| {
|
||||
record.contains_default_alias = record.contains_default_alias or strings.eqlComptime(item.alias, "default");
|
||||
record.contains_es_module_alias = record.contains_es_module_alias or strings.eqlComptime(item.alias, "__esModule");
|
||||
}
|
||||
},
|
||||
|
||||
.s_function => |st| {
|
||||
if (st.func.flags.contains(.is_export)) {
|
||||
if (st.func.name) |name| {
|
||||
const original_name = p.symbols.items[name.ref.?.innerIndex()].original_name;
|
||||
try p.recordExport(name.loc, original_name, name.ref.?);
|
||||
} else {
|
||||
try p.log.addRangeError(p.source, logger.Range{ .loc = st.func.open_parens_loc, .len = 2 }, "Exported functions must have a name");
|
||||
}
|
||||
}
|
||||
},
|
||||
.s_class => |st| {
|
||||
if (st.is_export) {
|
||||
if (st.class.class_name) |name| {
|
||||
try p.recordExport(name.loc, p.symbols.items[name.ref.?.innerIndex()].original_name, name.ref.?);
|
||||
} else {
|
||||
try p.log.addRangeError(p.source, logger.Range{ .loc = st.class.body_loc, .len = 0 }, "Exported classes must have a name");
|
||||
}
|
||||
}
|
||||
},
|
||||
.s_local => |st| {
|
||||
if (st.is_export) {
|
||||
for (st.decls.slice()) |decl| {
|
||||
p.recordExportedBinding(decl.binding);
|
||||
}
|
||||
}
|
||||
|
||||
// Remove unused import-equals statements, since those likely
|
||||
// correspond to types instead of values
|
||||
if (st.was_ts_import_equals and !st.is_export and st.decls.len > 0) {
|
||||
var decl = st.decls.ptr[0];
|
||||
|
||||
// Skip to the underlying reference
|
||||
var value = decl.value;
|
||||
if (decl.value != null) {
|
||||
while (true) {
|
||||
if (@as(Expr.Tag, value.?.data) == .e_dot) {
|
||||
value = value.?.data.e_dot.target;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Is this an identifier reference and not a require() call?
|
||||
if (value) |val| {
|
||||
if (@as(Expr.Tag, val.data) == .e_identifier) {
|
||||
// Is this import statement unused?
|
||||
if (@as(Binding.Tag, decl.binding.data) == .b_identifier and p.symbols.items[decl.binding.data.b_identifier.ref.innerIndex()].use_count_estimate == 0) {
|
||||
p.ignoreUsage(val.data.e_identifier.ref);
|
||||
|
||||
scanner.removed_import_equals = true;
|
||||
continue;
|
||||
} else {
|
||||
scanner.kept_import_equals = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
.s_export_default => |st| {
|
||||
// This is defer'd so that we still record export default for identifiers
|
||||
defer {
|
||||
if (st.default_name.ref) |ref| {
|
||||
p.recordExport(st.default_name.loc, "default", ref) catch {};
|
||||
}
|
||||
}
|
||||
|
||||
// Rewrite this export to be:
|
||||
// exports.default =
|
||||
// But only if it's anonymous
|
||||
if (!hot_module_reloading_transformations and will_transform_to_common_js and P != bun.bundle_v2.AstBuilder) {
|
||||
const expr = st.value.toExpr();
|
||||
var export_default_args = try p.allocator.alloc(Expr, 2);
|
||||
export_default_args[0] = p.@"module.exports"(expr.loc);
|
||||
export_default_args[1] = expr;
|
||||
stmt = p.s(S.SExpr{ .value = p.callRuntime(expr.loc, "__exportDefault", export_default_args) }, expr.loc);
|
||||
}
|
||||
},
|
||||
.s_export_clause => |st| {
|
||||
for (st.items) |item| {
|
||||
try p.recordExport(item.alias_loc, item.alias, item.name.ref.?);
|
||||
}
|
||||
},
|
||||
.s_export_star => |st| {
|
||||
try p.import_records_for_current_part.append(allocator, st.import_record_index);
|
||||
|
||||
if (st.alias) |alias| {
|
||||
// "export * as ns from 'path'"
|
||||
try p.named_imports.put(p.allocator, st.namespace_ref, js_ast.NamedImport{
|
||||
.alias = null,
|
||||
.alias_is_star = true,
|
||||
.alias_loc = alias.loc,
|
||||
.namespace_ref = Ref.None,
|
||||
.import_record_index = st.import_record_index,
|
||||
.is_exported = true,
|
||||
});
|
||||
try p.recordExport(alias.loc, alias.original_name, st.namespace_ref);
|
||||
var record = &p.import_records.items[st.import_record_index];
|
||||
record.contains_import_star = true;
|
||||
} else {
|
||||
// "export * from 'path'"
|
||||
try p.export_star_import_records.append(allocator, st.import_record_index);
|
||||
}
|
||||
},
|
||||
.s_export_from => |st| {
|
||||
try p.import_records_for_current_part.append(allocator, st.import_record_index);
|
||||
p.named_imports.ensureUnusedCapacity(p.allocator, st.items.len) catch unreachable;
|
||||
for (st.items) |item| {
|
||||
const ref = item.name.ref orelse p.panic("Expected export from item to have a name {any}", .{st});
|
||||
// Note that the imported alias is not item.Alias, which is the
|
||||
// exported alias. This is somewhat confusing because each
|
||||
// SExportFrom statement is basically SImport + SExportClause in one.
|
||||
try p.named_imports.put(p.allocator, ref, js_ast.NamedImport{
|
||||
.alias_is_star = false,
|
||||
.alias = item.original_name,
|
||||
.alias_loc = item.name.loc,
|
||||
.namespace_ref = st.namespace_ref,
|
||||
.import_record_index = st.import_record_index,
|
||||
.is_exported = true,
|
||||
});
|
||||
try p.recordExport(item.name.loc, item.alias, ref);
|
||||
|
||||
var record = &p.import_records.items[st.import_record_index];
|
||||
if (strings.eqlComptime(item.original_name, "default")) {
|
||||
record.contains_default_alias = true;
|
||||
} else if (strings.eqlComptime(item.original_name, "__esModule")) {
|
||||
record.contains_es_module_alias = true;
|
||||
}
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
if (hot_module_reloading_transformations) {
|
||||
try hot_module_reloading_context.convertStmt(p, stmt);
|
||||
} else {
|
||||
stmts[stmts_end] = stmt;
|
||||
stmts_end += 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (!hot_module_reloading_transformations)
|
||||
scanner.stmts = stmts[0..stmts_end];
|
||||
|
||||
return scanner;
|
||||
}
|
||||
|
||||
const string = []const u8;
|
||||
|
||||
const bun = @import("bun");
|
||||
const ImportRecord = bun.ImportRecord;
|
||||
const logger = bun.logger;
|
||||
const strings = bun.strings;
|
||||
|
||||
const js_ast = bun.ast;
|
||||
const Binding = js_ast.Binding;
|
||||
const Expr = js_ast.Expr;
|
||||
const G = js_ast.G;
|
||||
const LocRef = js_ast.LocRef;
|
||||
const S = js_ast.S;
|
||||
const Stmt = js_ast.Stmt;
|
||||
const Symbol = js_ast.Symbol;
|
||||
|
||||
const js_parser = bun.js_parser;
|
||||
const ConvertESMExportsForHmr = js_parser.ConvertESMExportsForHmr;
|
||||
const ImportItemForNamespaceMap = js_parser.ImportItemForNamespaceMap;
|
||||
const ImportScanner = js_parser.ImportScanner;
|
||||
const Ref = js_parser.Ref;
|
||||
const TypeScript = js_parser.TypeScript;
|
||||
const options = js_parser.options;
|
||||
@@ -1,210 +0,0 @@
|
||||
pub const KnownGlobal = enum {
|
||||
WeakSet,
|
||||
WeakMap,
|
||||
Date,
|
||||
Set,
|
||||
Map,
|
||||
Headers,
|
||||
Response,
|
||||
TextEncoder,
|
||||
TextDecoder,
|
||||
|
||||
pub const map = bun.ComptimeEnumMap(KnownGlobal);
|
||||
|
||||
pub noinline fn maybeMarkConstructorAsPure(noalias e: *E.New, symbols: []const Symbol) void {
|
||||
const id = if (e.target.data == .e_identifier) e.target.data.e_identifier.ref else return;
|
||||
const symbol = &symbols[id.innerIndex()];
|
||||
if (symbol.kind != .unbound)
|
||||
return;
|
||||
|
||||
const constructor = map.get(symbol.original_name) orelse return;
|
||||
|
||||
switch (constructor) {
|
||||
.WeakSet, .WeakMap => {
|
||||
const n = e.args.len;
|
||||
|
||||
if (n == 0) {
|
||||
// "new WeakSet()" is pure
|
||||
e.can_be_unwrapped_if_unused = .if_unused;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (n == 1) {
|
||||
switch (e.args.ptr[0].data) {
|
||||
.e_null, .e_undefined => {
|
||||
// "new WeakSet(null)" is pure
|
||||
// "new WeakSet(void 0)" is pure
|
||||
e.can_be_unwrapped_if_unused = .if_unused;
|
||||
},
|
||||
.e_array => |array| {
|
||||
if (array.items.len == 0) {
|
||||
// "new WeakSet([])" is pure
|
||||
e.can_be_unwrapped_if_unused = .if_unused;
|
||||
} else {
|
||||
// "new WeakSet([x])" is impure because an exception is thrown if "x" is not an object
|
||||
}
|
||||
},
|
||||
else => {
|
||||
// "new WeakSet(x)" is impure because the iterator for "x" could have side effects
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
.Date => {
|
||||
const n = e.args.len;
|
||||
|
||||
if (n == 0) {
|
||||
// "new Date()" is pure
|
||||
e.can_be_unwrapped_if_unused = .if_unused;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (n == 1) {
|
||||
switch (e.args.ptr[0].knownPrimitive()) {
|
||||
.null, .undefined, .boolean, .number, .string => {
|
||||
// "new Date('')" is pure
|
||||
// "new Date(0)" is pure
|
||||
// "new Date(null)" is pure
|
||||
// "new Date(true)" is pure
|
||||
// "new Date(false)" is pure
|
||||
// "new Date(undefined)" is pure
|
||||
e.can_be_unwrapped_if_unused = .if_unused;
|
||||
},
|
||||
else => {
|
||||
// "new Date(x)" is impure because the argument could be a string with side effects
|
||||
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
.Set => {
|
||||
const n = e.args.len;
|
||||
|
||||
if (n == 0) {
|
||||
// "new Set()" is pure
|
||||
e.can_be_unwrapped_if_unused = .if_unused;
|
||||
return;
|
||||
}
|
||||
|
||||
if (n == 1) {
|
||||
switch (e.args.ptr[0].data) {
|
||||
.e_array, .e_null, .e_undefined => {
|
||||
// "new Set([a, b, c])" is pure
|
||||
// "new Set(null)" is pure
|
||||
// "new Set(void 0)" is pure
|
||||
e.can_be_unwrapped_if_unused = .if_unused;
|
||||
},
|
||||
else => {
|
||||
// "new Set(x)" is impure because the iterator for "x" could have side effects
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
.Headers => {
|
||||
const n = e.args.len;
|
||||
|
||||
if (n == 0) {
|
||||
// "new Headers()" is pure
|
||||
e.can_be_unwrapped_if_unused = .if_unused;
|
||||
|
||||
return;
|
||||
}
|
||||
},
|
||||
|
||||
.Response => {
|
||||
const n = e.args.len;
|
||||
|
||||
if (n == 0) {
|
||||
// "new Response()" is pure
|
||||
e.can_be_unwrapped_if_unused = .if_unused;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (n == 1) {
|
||||
switch (e.args.ptr[0].knownPrimitive()) {
|
||||
.null, .undefined, .boolean, .number, .string => {
|
||||
// "new Response('')" is pure
|
||||
// "new Response(0)" is pure
|
||||
// "new Response(null)" is pure
|
||||
// "new Response(true)" is pure
|
||||
// "new Response(false)" is pure
|
||||
// "new Response(undefined)" is pure
|
||||
|
||||
e.can_be_unwrapped_if_unused = .if_unused;
|
||||
},
|
||||
else => {
|
||||
// "new Response(x)" is impure
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
.TextDecoder, .TextEncoder => {
|
||||
const n = e.args.len;
|
||||
|
||||
if (n == 0) {
|
||||
// "new TextEncoder()" is pure
|
||||
// "new TextDecoder()" is pure
|
||||
e.can_be_unwrapped_if_unused = .if_unused;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// We _could_ validate the encoding argument
|
||||
// But let's not bother
|
||||
},
|
||||
|
||||
.Map => {
|
||||
const n = e.args.len;
|
||||
|
||||
if (n == 0) {
|
||||
// "new Map()" is pure
|
||||
e.can_be_unwrapped_if_unused = .if_unused;
|
||||
return;
|
||||
}
|
||||
|
||||
if (n == 1) {
|
||||
switch (e.args.ptr[0].data) {
|
||||
.e_null, .e_undefined => {
|
||||
// "new Map(null)" is pure
|
||||
// "new Map(void 0)" is pure
|
||||
e.can_be_unwrapped_if_unused = .if_unused;
|
||||
},
|
||||
.e_array => |array| {
|
||||
var all_items_are_arrays = true;
|
||||
for (array.items.slice()) |item| {
|
||||
if (item.data != .e_array) {
|
||||
all_items_are_arrays = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (all_items_are_arrays) {
|
||||
// "new Map([[a, b], [c, d]])" is pure
|
||||
e.can_be_unwrapped_if_unused = .if_unused;
|
||||
}
|
||||
},
|
||||
else => {
|
||||
// "new Map(x)" is impure because the iterator for "x" could have side effects
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const string = []const u8;
|
||||
|
||||
const bun = @import("bun");
|
||||
|
||||
const js_ast = bun.ast;
|
||||
const E = js_ast.E;
|
||||
const Symbol = js_ast.Symbol;
|
||||
|
||||
const std = @import("std");
|
||||
const Map = std.AutoHashMapUnmanaged;
|
||||
@@ -22,7 +22,7 @@ pub fn NewStore(comptime types: []const type, comptime count: usize) type {
|
||||
|
||||
const backing_allocator = bun.default_allocator;
|
||||
|
||||
const log = Output.scoped(.Store, .hidden);
|
||||
const log = Output.scoped(.Store, true);
|
||||
|
||||
return struct {
|
||||
const Store = @This();
|
||||
|
||||
6666
src/ast/P.zig
6666
src/ast/P.zig
File diff suppressed because it is too large
Load Diff
1528
src/ast/Parser.zig
1528
src/ast/Parser.zig
File diff suppressed because it is too large
Load Diff
@@ -1,887 +0,0 @@
|
||||
pub const SideEffects = enum(u1) {
|
||||
could_have_side_effects,
|
||||
no_side_effects,
|
||||
|
||||
pub const Result = struct {
|
||||
side_effects: SideEffects,
|
||||
ok: bool = false,
|
||||
value: bool = false,
|
||||
};
|
||||
|
||||
pub fn canChangeStrictToLoose(lhs: Expr.Data, rhs: Expr.Data) bool {
|
||||
const left = lhs.knownPrimitive();
|
||||
const right = rhs.knownPrimitive();
|
||||
return left == right and left != .unknown and left != .mixed;
|
||||
}
|
||||
|
||||
pub fn simplifyBoolean(p: anytype, expr: Expr) Expr {
|
||||
if (!p.options.features.dead_code_elimination) return expr;
|
||||
|
||||
var result: Expr = expr;
|
||||
_simplifyBoolean(p, &result);
|
||||
return result;
|
||||
}
|
||||
|
||||
fn _simplifyBoolean(p: anytype, expr: *Expr) void {
|
||||
while (true) {
|
||||
switch (expr.data) {
|
||||
.e_unary => |e| {
|
||||
if (e.op == .un_not) {
|
||||
// "!!a" => "a"
|
||||
if (e.value.data == .e_unary and e.value.data.e_unary.op == .un_not) {
|
||||
expr.* = e.value.data.e_unary.value;
|
||||
continue;
|
||||
}
|
||||
|
||||
_simplifyBoolean(p, &e.value);
|
||||
}
|
||||
},
|
||||
.e_binary => |e| {
|
||||
switch (e.op) {
|
||||
.bin_logical_and => {
|
||||
const effects = SideEffects.toBoolean(p, e.right.data);
|
||||
if (effects.ok and effects.value and effects.side_effects == .no_side_effects) {
|
||||
// "if (anything && truthyNoSideEffects)" => "if (anything)"
|
||||
expr.* = e.left;
|
||||
continue;
|
||||
}
|
||||
},
|
||||
.bin_logical_or => {
|
||||
const effects = SideEffects.toBoolean(p, e.right.data);
|
||||
if (effects.ok and !effects.value and effects.side_effects == .no_side_effects) {
|
||||
// "if (anything || falsyNoSideEffects)" => "if (anything)"
|
||||
expr.* = e.left;
|
||||
continue;
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
pub const toNumber = Expr.Data.toNumber;
|
||||
pub const typeof = Expr.Data.toTypeof;
|
||||
|
||||
pub fn isPrimitiveToReorder(data: Expr.Data) bool {
|
||||
return switch (data) {
|
||||
.e_null,
|
||||
.e_undefined,
|
||||
.e_string,
|
||||
.e_boolean,
|
||||
.e_number,
|
||||
.e_big_int,
|
||||
.e_inlined_enum,
|
||||
.e_require_main,
|
||||
=> true,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn simplifyUnusedExpr(p: anytype, expr: Expr) ?Expr {
|
||||
if (!p.options.features.dead_code_elimination) return expr;
|
||||
switch (expr.data) {
|
||||
.e_null,
|
||||
.e_undefined,
|
||||
.e_missing,
|
||||
.e_boolean,
|
||||
.e_number,
|
||||
.e_big_int,
|
||||
.e_string,
|
||||
.e_this,
|
||||
.e_reg_exp,
|
||||
.e_function,
|
||||
.e_arrow,
|
||||
.e_import_meta,
|
||||
.e_inlined_enum,
|
||||
=> return null,
|
||||
|
||||
.e_dot => |dot| {
|
||||
if (dot.can_be_removed_if_unused) {
|
||||
return null;
|
||||
}
|
||||
},
|
||||
.e_identifier => |ident| {
|
||||
if (ident.must_keep_due_to_with_stmt) {
|
||||
return expr;
|
||||
}
|
||||
|
||||
if (ident.can_be_removed_if_unused or p.symbols.items[ident.ref.innerIndex()].kind != .unbound) {
|
||||
return null;
|
||||
}
|
||||
},
|
||||
.e_if => |ternary| {
|
||||
ternary.yes = simplifyUnusedExpr(p, ternary.yes) orelse ternary.yes.toEmpty();
|
||||
ternary.no = simplifyUnusedExpr(p, ternary.no) orelse ternary.no.toEmpty();
|
||||
|
||||
// "foo() ? 1 : 2" => "foo()"
|
||||
if (ternary.yes.isEmpty() and ternary.no.isEmpty()) {
|
||||
return simplifyUnusedExpr(p, ternary.test_);
|
||||
}
|
||||
|
||||
// "foo() ? 1 : bar()" => "foo() || bar()"
|
||||
if (ternary.yes.isEmpty()) {
|
||||
return Expr.joinWithLeftAssociativeOp(
|
||||
.bin_logical_or,
|
||||
ternary.test_,
|
||||
ternary.no,
|
||||
p.allocator,
|
||||
);
|
||||
}
|
||||
|
||||
// "foo() ? bar() : 2" => "foo() && bar()"
|
||||
if (ternary.no.isEmpty()) {
|
||||
return Expr.joinWithLeftAssociativeOp(
|
||||
.bin_logical_and,
|
||||
ternary.test_,
|
||||
ternary.yes,
|
||||
p.allocator,
|
||||
);
|
||||
}
|
||||
},
|
||||
.e_unary => |un| {
|
||||
// These operators must not have any type conversions that can execute code
|
||||
// such as "toString" or "valueOf". They must also never throw any exceptions.
|
||||
switch (un.op) {
|
||||
.un_void, .un_not => {
|
||||
return simplifyUnusedExpr(p, un.value);
|
||||
},
|
||||
.un_typeof => {
|
||||
// "typeof x" must not be transformed into if "x" since doing so could
|
||||
// cause an exception to be thrown. Instead we can just remove it since
|
||||
// "typeof x" is special-cased in the standard to never throw.
|
||||
if (std.meta.activeTag(un.value.data) == .e_identifier) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return simplifyUnusedExpr(p, un.value);
|
||||
},
|
||||
|
||||
else => {},
|
||||
}
|
||||
},
|
||||
|
||||
inline .e_call, .e_new => |call| {
|
||||
// A call that has been marked "__PURE__" can be removed if all arguments
|
||||
// can be removed. The annotation causes us to ignore the target.
|
||||
if (call.can_be_unwrapped_if_unused != .never) {
|
||||
if (call.args.len > 0) {
|
||||
const joined = Expr.joinAllWithCommaCallback(call.args.slice(), @TypeOf(p), p, comptime simplifyUnusedExpr, p.allocator);
|
||||
if (joined != null and call.can_be_unwrapped_if_unused == .if_unused_and_toString_safe) {
|
||||
@branchHint(.unlikely);
|
||||
// For now, only support this for 1 argument.
|
||||
if (joined.?.data.isSafeToString()) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
return joined;
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
.e_binary => |bin| {
|
||||
switch (bin.op) {
|
||||
// These operators must not have any type conversions that can execute code
|
||||
// such as "toString" or "valueOf". They must also never throw any exceptions.
|
||||
.bin_strict_eq,
|
||||
.bin_strict_ne,
|
||||
.bin_comma,
|
||||
=> return simplifyUnusedBinaryCommaExpr(p, expr),
|
||||
|
||||
// We can simplify "==" and "!=" even though they can call "toString" and/or
|
||||
// "valueOf" if we can statically determine that the types of both sides are
|
||||
// primitives. In that case there won't be any chance for user-defined
|
||||
// "toString" and/or "valueOf" to be called.
|
||||
.bin_loose_eq,
|
||||
.bin_loose_ne,
|
||||
=> {
|
||||
if (isPrimitiveWithSideEffects(bin.left.data) and isPrimitiveWithSideEffects(bin.right.data)) {
|
||||
return Expr.joinWithComma(
|
||||
simplifyUnusedExpr(p, bin.left) orelse bin.left.toEmpty(),
|
||||
simplifyUnusedExpr(p, bin.right) orelse bin.right.toEmpty(),
|
||||
p.allocator,
|
||||
);
|
||||
}
|
||||
// If one side is a number, the number can be printed as
|
||||
// `0` since the result being unused doesnt matter, we
|
||||
// only care to invoke the coercion.
|
||||
if (bin.left.data == .e_number) {
|
||||
bin.left.data = .{ .e_number = .{ .value = 0.0 } };
|
||||
} else if (bin.right.data == .e_number) {
|
||||
bin.right.data = .{ .e_number = .{ .value = 0.0 } };
|
||||
}
|
||||
},
|
||||
|
||||
.bin_logical_and, .bin_logical_or, .bin_nullish_coalescing => {
|
||||
bin.right = simplifyUnusedExpr(p, bin.right) orelse bin.right.toEmpty();
|
||||
// Preserve short-circuit behavior: the left expression is only unused if
|
||||
// the right expression can be completely removed. Otherwise, the left
|
||||
// expression is important for the branch.
|
||||
|
||||
if (bin.right.isEmpty())
|
||||
return simplifyUnusedExpr(p, bin.left);
|
||||
},
|
||||
|
||||
else => {},
|
||||
}
|
||||
},
|
||||
|
||||
.e_object => {
|
||||
// Objects with "..." spread expressions can't be unwrapped because the
|
||||
// "..." triggers code evaluation via getters. In that case, just trim
|
||||
// the other items instead and leave the object expression there.
|
||||
var properties_slice = expr.data.e_object.properties.slice();
|
||||
var end: usize = 0;
|
||||
for (properties_slice) |spread| {
|
||||
end = 0;
|
||||
if (spread.kind == .spread) {
|
||||
// Spread properties must always be evaluated
|
||||
for (properties_slice) |prop_| {
|
||||
var prop = prop_;
|
||||
if (prop_.kind != .spread) {
|
||||
const value = simplifyUnusedExpr(p, prop.value.?);
|
||||
if (value != null) {
|
||||
prop.value = value;
|
||||
} else if (!prop.flags.contains(.is_computed)) {
|
||||
continue;
|
||||
} else {
|
||||
prop.value = p.newExpr(E.Number{ .value = 0.0 }, prop.value.?.loc);
|
||||
}
|
||||
}
|
||||
|
||||
properties_slice[end] = prop_;
|
||||
end += 1;
|
||||
}
|
||||
|
||||
properties_slice = properties_slice[0..end];
|
||||
expr.data.e_object.properties = G.Property.List.init(properties_slice);
|
||||
return expr;
|
||||
}
|
||||
}
|
||||
|
||||
var result = Expr.init(E.Missing, E.Missing{}, expr.loc);
|
||||
|
||||
// Otherwise, the object can be completely removed. We only need to keep any
|
||||
// object properties with side effects. Apply this simplification recursively.
|
||||
for (properties_slice) |prop| {
|
||||
if (prop.flags.contains(.is_computed)) {
|
||||
// Make sure "ToString" is still evaluated on the key
|
||||
result = result.joinWithComma(
|
||||
p.newExpr(
|
||||
E.Binary{
|
||||
.op = .bin_add,
|
||||
.left = prop.key.?,
|
||||
.right = p.newExpr(E.String{}, prop.key.?.loc),
|
||||
},
|
||||
prop.key.?.loc,
|
||||
),
|
||||
p.allocator,
|
||||
);
|
||||
}
|
||||
result = result.joinWithComma(
|
||||
simplifyUnusedExpr(p, prop.value.?) orelse prop.value.?.toEmpty(),
|
||||
p.allocator,
|
||||
);
|
||||
}
|
||||
|
||||
return result;
|
||||
},
|
||||
.e_array => {
|
||||
var items = expr.data.e_array.items.slice();
|
||||
|
||||
for (items) |item| {
|
||||
if (item.data == .e_spread) {
|
||||
var end: usize = 0;
|
||||
for (items) |item__| {
|
||||
const item_ = item__;
|
||||
if (item_.data != .e_missing) {
|
||||
items[end] = item_;
|
||||
end += 1;
|
||||
}
|
||||
|
||||
expr.data.e_array.items = ExprNodeList.init(items[0..end]);
|
||||
return expr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise, the array can be completely removed. We only need to keep any
|
||||
// array items with side effects. Apply this simplification recursively.
|
||||
return Expr.joinAllWithCommaCallback(
|
||||
items,
|
||||
@TypeOf(p),
|
||||
p,
|
||||
comptime simplifyUnusedExpr,
|
||||
p.allocator,
|
||||
);
|
||||
},
|
||||
|
||||
else => {},
|
||||
}
|
||||
|
||||
return expr;
|
||||
}
|
||||
|
||||
pub const BinaryExpressionSimplifyVisitor = struct {
|
||||
bin: *E.Binary,
|
||||
};
|
||||
|
||||
///
|
||||
fn simplifyUnusedBinaryCommaExpr(p: anytype, expr: Expr) ?Expr {
|
||||
if (Environment.allow_assert) {
|
||||
assert(expr.data == .e_binary);
|
||||
assert(switch (expr.data.e_binary.op) {
|
||||
.bin_strict_eq,
|
||||
.bin_strict_ne,
|
||||
.bin_comma,
|
||||
=> true,
|
||||
else => false,
|
||||
});
|
||||
}
|
||||
const stack: *std.ArrayList(BinaryExpressionSimplifyVisitor) = &p.binary_expression_simplify_stack;
|
||||
const stack_bottom = stack.items.len;
|
||||
defer stack.shrinkRetainingCapacity(stack_bottom);
|
||||
|
||||
stack.append(.{ .bin = expr.data.e_binary }) catch bun.outOfMemory();
|
||||
|
||||
// Build stack up of expressions
|
||||
var left: Expr = expr.data.e_binary.left;
|
||||
while (left.data.as(.e_binary)) |left_bin| {
|
||||
switch (left_bin.op) {
|
||||
.bin_strict_eq,
|
||||
.bin_strict_ne,
|
||||
.bin_comma,
|
||||
=> {
|
||||
stack.append(.{ .bin = left_bin }) catch bun.outOfMemory();
|
||||
left = left_bin.left;
|
||||
},
|
||||
else => break,
|
||||
}
|
||||
}
|
||||
|
||||
// Ride the stack downwards
|
||||
var i = stack.items.len;
|
||||
var result = simplifyUnusedExpr(p, left) orelse Expr.empty;
|
||||
while (i > stack_bottom) {
|
||||
i -= 1;
|
||||
const top = stack.items[i];
|
||||
const visited_right = simplifyUnusedExpr(p, top.bin.right) orelse Expr.empty;
|
||||
result = result.joinWithComma(visited_right, p.allocator);
|
||||
}
|
||||
|
||||
return if (result.isMissing()) null else result;
|
||||
}
|
||||
|
||||
fn findIdentifiers(binding: Binding, decls: *std.ArrayList(G.Decl)) void {
|
||||
switch (binding.data) {
|
||||
.b_identifier => {
|
||||
decls.append(.{ .binding = binding }) catch unreachable;
|
||||
},
|
||||
.b_array => |array| {
|
||||
for (array.items) |item| {
|
||||
findIdentifiers(item.binding, decls);
|
||||
}
|
||||
},
|
||||
.b_object => |obj| {
|
||||
for (obj.properties) |item| {
|
||||
findIdentifiers(item.value, decls);
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
|
||||
fn shouldKeepStmtsInDeadControlFlow(stmts: []Stmt, allocator: Allocator) bool {
|
||||
for (stmts) |child| {
|
||||
if (shouldKeepStmtInDeadControlFlow(child, allocator)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/// If this is in a dead branch, then we want to trim as much dead code as we
|
||||
/// can. Everything can be trimmed except for hoisted declarations ("var" and
|
||||
/// "function"), which affect the parent scope. For example:
|
||||
///
|
||||
/// function foo() {
|
||||
/// if (false) { var x; }
|
||||
/// x = 1;
|
||||
/// }
|
||||
///
|
||||
/// We can't trim the entire branch as dead or calling foo() will incorrectly
|
||||
/// assign to a global variable instead.
|
||||
///
|
||||
/// Caller is expected to first check `p.options.dead_code_elimination` so we only check it once.
|
||||
pub fn shouldKeepStmtInDeadControlFlow(stmt: Stmt, allocator: Allocator) bool {
|
||||
switch (stmt.data) {
|
||||
// Omit these statements entirely
|
||||
.s_empty, .s_expr, .s_throw, .s_return, .s_break, .s_continue, .s_class, .s_debugger => return false,
|
||||
|
||||
.s_local => |local| {
|
||||
if (local.kind != .k_var) {
|
||||
// Omit these statements entirely
|
||||
return false;
|
||||
}
|
||||
|
||||
// Omit everything except the identifiers
|
||||
|
||||
// common case: single var foo = blah, don't need to allocate
|
||||
if (local.decls.len == 1 and local.decls.ptr[0].binding.data == .b_identifier) {
|
||||
const prev = local.decls.ptr[0];
|
||||
stmt.data.s_local.decls.ptr[0] = G.Decl{ .binding = prev.binding };
|
||||
return true;
|
||||
}
|
||||
|
||||
var decls = std.ArrayList(G.Decl).initCapacity(allocator, local.decls.len) catch unreachable;
|
||||
for (local.decls.slice()) |decl| {
|
||||
findIdentifiers(decl.binding, &decls);
|
||||
}
|
||||
|
||||
local.decls.update(decls);
|
||||
return true;
|
||||
},
|
||||
|
||||
.s_block => |block| {
|
||||
return shouldKeepStmtsInDeadControlFlow(block.stmts, allocator);
|
||||
},
|
||||
|
||||
.s_try => |try_stmt| {
|
||||
if (shouldKeepStmtsInDeadControlFlow(try_stmt.body, allocator)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (try_stmt.catch_) |*catch_stmt| {
|
||||
if (shouldKeepStmtsInDeadControlFlow(catch_stmt.body, allocator)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (try_stmt.finally) |*finally_stmt| {
|
||||
if (shouldKeepStmtsInDeadControlFlow(finally_stmt.stmts, allocator)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
},
|
||||
|
||||
.s_if => |_if_| {
|
||||
if (shouldKeepStmtInDeadControlFlow(_if_.yes, allocator)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
const no = _if_.no orelse return false;
|
||||
|
||||
return shouldKeepStmtInDeadControlFlow(no, allocator);
|
||||
},
|
||||
|
||||
.s_while => {
|
||||
return shouldKeepStmtInDeadControlFlow(stmt.data.s_while.body, allocator);
|
||||
},
|
||||
|
||||
.s_do_while => {
|
||||
return shouldKeepStmtInDeadControlFlow(stmt.data.s_do_while.body, allocator);
|
||||
},
|
||||
|
||||
.s_for => |__for__| {
|
||||
if (__for__.init) |init_| {
|
||||
if (shouldKeepStmtInDeadControlFlow(init_, allocator)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return shouldKeepStmtInDeadControlFlow(__for__.body, allocator);
|
||||
},
|
||||
|
||||
.s_for_in => |__for__| {
|
||||
return shouldKeepStmtInDeadControlFlow(__for__.init, allocator) or shouldKeepStmtInDeadControlFlow(__for__.body, allocator);
|
||||
},
|
||||
|
||||
.s_for_of => |__for__| {
|
||||
return shouldKeepStmtInDeadControlFlow(__for__.init, allocator) or shouldKeepStmtInDeadControlFlow(__for__.body, allocator);
|
||||
},
|
||||
|
||||
.s_label => |label| {
|
||||
return shouldKeepStmtInDeadControlFlow(label.stmt, allocator);
|
||||
},
|
||||
|
||||
else => return true,
|
||||
}
|
||||
}
|
||||
|
||||
// Returns true if this expression is known to result in a primitive value (i.e.
|
||||
// null, undefined, boolean, number, bigint, or string), even if the expression
|
||||
// cannot be removed due to side effects.
|
||||
pub fn isPrimitiveWithSideEffects(data: Expr.Data) bool {
|
||||
switch (data) {
|
||||
.e_null,
|
||||
.e_undefined,
|
||||
.e_boolean,
|
||||
.e_number,
|
||||
.e_big_int,
|
||||
.e_string,
|
||||
.e_inlined_enum,
|
||||
=> {
|
||||
return true;
|
||||
},
|
||||
.e_unary => |e| {
|
||||
switch (e.op) {
|
||||
// number or bigint
|
||||
.un_pos,
|
||||
.un_neg,
|
||||
.un_cpl,
|
||||
.un_pre_dec,
|
||||
.un_pre_inc,
|
||||
.un_post_dec,
|
||||
.un_post_inc,
|
||||
// boolean
|
||||
.un_not,
|
||||
.un_delete,
|
||||
// undefined
|
||||
.un_void,
|
||||
// string
|
||||
.un_typeof,
|
||||
=> {
|
||||
return true;
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
},
|
||||
.e_binary => |e| {
|
||||
switch (e.op) {
|
||||
// boolean
|
||||
.bin_lt,
|
||||
.bin_le,
|
||||
.bin_gt,
|
||||
.bin_ge,
|
||||
.bin_in,
|
||||
.bin_instanceof,
|
||||
.bin_loose_eq,
|
||||
.bin_loose_ne,
|
||||
.bin_strict_eq,
|
||||
.bin_strict_ne,
|
||||
// string, number, or bigint
|
||||
.bin_add,
|
||||
.bin_add_assign,
|
||||
// number or bigint
|
||||
.bin_sub,
|
||||
.bin_mul,
|
||||
.bin_div,
|
||||
.bin_rem,
|
||||
.bin_pow,
|
||||
.bin_sub_assign,
|
||||
.bin_mul_assign,
|
||||
.bin_div_assign,
|
||||
.bin_rem_assign,
|
||||
.bin_pow_assign,
|
||||
.bin_shl,
|
||||
.bin_shr,
|
||||
.bin_u_shr,
|
||||
.bin_shl_assign,
|
||||
.bin_shr_assign,
|
||||
.bin_u_shr_assign,
|
||||
.bin_bitwise_or,
|
||||
.bin_bitwise_and,
|
||||
.bin_bitwise_xor,
|
||||
.bin_bitwise_or_assign,
|
||||
.bin_bitwise_and_assign,
|
||||
.bin_bitwise_xor_assign,
|
||||
=> {
|
||||
return true;
|
||||
},
|
||||
|
||||
// These always return one of the arguments unmodified
|
||||
.bin_logical_and,
|
||||
.bin_logical_or,
|
||||
.bin_nullish_coalescing,
|
||||
.bin_logical_and_assign,
|
||||
.bin_logical_or_assign,
|
||||
.bin_nullish_coalescing_assign,
|
||||
=> {
|
||||
return isPrimitiveWithSideEffects(e.left.data) and isPrimitiveWithSideEffects(e.right.data);
|
||||
},
|
||||
.bin_comma => {
|
||||
return isPrimitiveWithSideEffects(e.right.data);
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
},
|
||||
.e_if => |e| {
|
||||
return isPrimitiveWithSideEffects(e.yes.data) and isPrimitiveWithSideEffects(e.no.data);
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
pub const toTypeOf = Expr.Data.typeof;
|
||||
|
||||
pub fn toNullOrUndefined(p: anytype, exp: Expr.Data) Result {
|
||||
if (!p.options.features.dead_code_elimination) {
|
||||
// value should not be read if ok is false, all existing calls to this function already adhere to this
|
||||
return Result{ .ok = false, .value = undefined, .side_effects = .could_have_side_effects };
|
||||
}
|
||||
switch (exp) {
|
||||
// Never null or undefined
|
||||
.e_boolean, .e_number, .e_string, .e_reg_exp, .e_function, .e_arrow, .e_big_int => {
|
||||
return Result{ .value = false, .side_effects = .no_side_effects, .ok = true };
|
||||
},
|
||||
|
||||
.e_object, .e_array, .e_class => {
|
||||
return Result{ .value = false, .side_effects = .could_have_side_effects, .ok = true };
|
||||
},
|
||||
|
||||
// always a null or undefined
|
||||
.e_null, .e_undefined => {
|
||||
return Result{ .value = true, .side_effects = .no_side_effects, .ok = true };
|
||||
},
|
||||
|
||||
.e_unary => |e| {
|
||||
switch (e.op) {
|
||||
// Always number or bigint
|
||||
.un_pos,
|
||||
.un_neg,
|
||||
.un_cpl,
|
||||
.un_pre_dec,
|
||||
.un_pre_inc,
|
||||
.un_post_dec,
|
||||
.un_post_inc,
|
||||
|
||||
// Always boolean
|
||||
.un_not,
|
||||
.un_typeof,
|
||||
.un_delete,
|
||||
=> {
|
||||
return Result{ .ok = true, .value = false, .side_effects = SideEffects.could_have_side_effects };
|
||||
},
|
||||
|
||||
// Always undefined
|
||||
.un_void => {
|
||||
return Result{ .value = true, .side_effects = .could_have_side_effects, .ok = true };
|
||||
},
|
||||
|
||||
else => {},
|
||||
}
|
||||
},
|
||||
|
||||
.e_binary => |e| {
|
||||
switch (e.op) {
|
||||
// always string or number or bigint
|
||||
.bin_add,
|
||||
.bin_add_assign,
|
||||
// always number or bigint
|
||||
.bin_sub,
|
||||
.bin_mul,
|
||||
.bin_div,
|
||||
.bin_rem,
|
||||
.bin_pow,
|
||||
.bin_sub_assign,
|
||||
.bin_mul_assign,
|
||||
.bin_div_assign,
|
||||
.bin_rem_assign,
|
||||
.bin_pow_assign,
|
||||
.bin_shl,
|
||||
.bin_shr,
|
||||
.bin_u_shr,
|
||||
.bin_shl_assign,
|
||||
.bin_shr_assign,
|
||||
.bin_u_shr_assign,
|
||||
.bin_bitwise_or,
|
||||
.bin_bitwise_and,
|
||||
.bin_bitwise_xor,
|
||||
.bin_bitwise_or_assign,
|
||||
.bin_bitwise_and_assign,
|
||||
.bin_bitwise_xor_assign,
|
||||
// always boolean
|
||||
.bin_lt,
|
||||
.bin_le,
|
||||
.bin_gt,
|
||||
.bin_ge,
|
||||
.bin_in,
|
||||
.bin_instanceof,
|
||||
.bin_loose_eq,
|
||||
.bin_loose_ne,
|
||||
.bin_strict_eq,
|
||||
.bin_strict_ne,
|
||||
=> {
|
||||
return Result{ .ok = true, .value = false, .side_effects = SideEffects.could_have_side_effects };
|
||||
},
|
||||
|
||||
.bin_comma => {
|
||||
const res = toNullOrUndefined(p, e.right.data);
|
||||
if (res.ok) {
|
||||
return Result{ .ok = true, .value = res.value, .side_effects = SideEffects.could_have_side_effects };
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
},
|
||||
.e_inlined_enum => |inlined| {
|
||||
return toNullOrUndefined(p, inlined.value.data);
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
return Result{ .ok = false, .value = false, .side_effects = SideEffects.could_have_side_effects };
|
||||
}
|
||||
|
||||
pub fn toBoolean(p: anytype, exp: Expr.Data) Result {
|
||||
// Only do this check once.
|
||||
if (!p.options.features.dead_code_elimination) {
|
||||
// value should not be read if ok is false, all existing calls to this function already adhere to this
|
||||
return Result{ .ok = false, .value = undefined, .side_effects = .could_have_side_effects };
|
||||
}
|
||||
|
||||
return toBooleanWithoutDCECheck(exp);
|
||||
}
|
||||
|
||||
// Avoid passing through *P
|
||||
// This is a very recursive function.
|
||||
fn toBooleanWithoutDCECheck(exp: Expr.Data) Result {
|
||||
switch (exp) {
|
||||
.e_null, .e_undefined => {
|
||||
return Result{ .ok = true, .value = false, .side_effects = .no_side_effects };
|
||||
},
|
||||
.e_boolean => |e| {
|
||||
return Result{ .ok = true, .value = e.value, .side_effects = .no_side_effects };
|
||||
},
|
||||
.e_number => |e| {
|
||||
return Result{ .ok = true, .value = e.value != 0.0 and !std.math.isNan(e.value), .side_effects = .no_side_effects };
|
||||
},
|
||||
.e_big_int => |e| {
|
||||
return Result{ .ok = true, .value = !strings.eqlComptime(e.value, "0"), .side_effects = .no_side_effects };
|
||||
},
|
||||
.e_string => |e| {
|
||||
return Result{ .ok = true, .value = e.isPresent(), .side_effects = .no_side_effects };
|
||||
},
|
||||
.e_function, .e_arrow, .e_reg_exp => {
|
||||
return Result{ .ok = true, .value = true, .side_effects = .no_side_effects };
|
||||
},
|
||||
.e_object, .e_array, .e_class => {
|
||||
return Result{ .ok = true, .value = true, .side_effects = .could_have_side_effects };
|
||||
},
|
||||
.e_unary => |e_| {
|
||||
switch (e_.op) {
|
||||
.un_void => {
|
||||
return Result{ .ok = true, .value = false, .side_effects = .could_have_side_effects };
|
||||
},
|
||||
.un_typeof => {
|
||||
// Never an empty string
|
||||
|
||||
return Result{ .ok = true, .value = true, .side_effects = .could_have_side_effects };
|
||||
},
|
||||
.un_not => {
|
||||
const result = toBooleanWithoutDCECheck(e_.value.data);
|
||||
if (result.ok) {
|
||||
return .{ .ok = true, .value = !result.value, .side_effects = result.side_effects };
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
},
|
||||
.e_binary => |e_| {
|
||||
switch (e_.op) {
|
||||
.bin_logical_or => {
|
||||
// "anything || truthy" is truthy
|
||||
const result = toBooleanWithoutDCECheck(e_.right.data);
|
||||
if (result.value and result.ok) {
|
||||
return Result{ .ok = true, .value = true, .side_effects = .could_have_side_effects };
|
||||
}
|
||||
},
|
||||
.bin_logical_and => {
|
||||
// "anything && falsy" is falsy
|
||||
const result = toBooleanWithoutDCECheck(e_.right.data);
|
||||
if (!result.value and result.ok) {
|
||||
return Result{ .ok = true, .value = false, .side_effects = .could_have_side_effects };
|
||||
}
|
||||
},
|
||||
.bin_comma => {
|
||||
// "anything, truthy/falsy" is truthy/falsy
|
||||
var result = toBooleanWithoutDCECheck(e_.right.data);
|
||||
if (result.ok) {
|
||||
result.side_effects = .could_have_side_effects;
|
||||
return result;
|
||||
}
|
||||
},
|
||||
.bin_gt => {
|
||||
if (e_.left.data.toFiniteNumber()) |left_num| {
|
||||
if (e_.right.data.toFiniteNumber()) |right_num| {
|
||||
return Result{ .ok = true, .value = left_num > right_num, .side_effects = .no_side_effects };
|
||||
}
|
||||
}
|
||||
},
|
||||
.bin_lt => {
|
||||
if (e_.left.data.toFiniteNumber()) |left_num| {
|
||||
if (e_.right.data.toFiniteNumber()) |right_num| {
|
||||
return Result{ .ok = true, .value = left_num < right_num, .side_effects = .no_side_effects };
|
||||
}
|
||||
}
|
||||
},
|
||||
.bin_le => {
|
||||
if (e_.left.data.toFiniteNumber()) |left_num| {
|
||||
if (e_.right.data.toFiniteNumber()) |right_num| {
|
||||
return Result{ .ok = true, .value = left_num <= right_num, .side_effects = .no_side_effects };
|
||||
}
|
||||
}
|
||||
},
|
||||
.bin_ge => {
|
||||
if (e_.left.data.toFiniteNumber()) |left_num| {
|
||||
if (e_.right.data.toFiniteNumber()) |right_num| {
|
||||
return Result{ .ok = true, .value = left_num >= right_num, .side_effects = .no_side_effects };
|
||||
}
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
},
|
||||
.e_inlined_enum => |inlined| {
|
||||
return toBooleanWithoutDCECheck(inlined.value.data);
|
||||
},
|
||||
.e_special => |special| switch (special) {
|
||||
.module_exports,
|
||||
.resolved_specifier_string,
|
||||
.hot_data,
|
||||
=> {},
|
||||
.hot_accept,
|
||||
.hot_accept_visited,
|
||||
.hot_enabled,
|
||||
=> return .{ .ok = true, .value = true, .side_effects = .no_side_effects },
|
||||
.hot_disabled,
|
||||
=> return .{ .ok = true, .value = false, .side_effects = .no_side_effects },
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
return Result{ .ok = false, .value = false, .side_effects = SideEffects.could_have_side_effects };
|
||||
}
|
||||
};
|
||||
|
||||
const string = []const u8;
|
||||
|
||||
const options = @import("../options.zig");
|
||||
|
||||
const bun = @import("bun");
|
||||
const Environment = bun.Environment;
|
||||
const assert = bun.assert;
|
||||
const strings = bun.strings;
|
||||
|
||||
const js_ast = bun.ast;
|
||||
const Binding = js_ast.Binding;
|
||||
const E = js_ast.E;
|
||||
const Expr = js_ast.Expr;
|
||||
const ExprNodeList = js_ast.ExprNodeList;
|
||||
const Stmt = js_ast.Stmt;
|
||||
|
||||
const G = js_ast.G;
|
||||
const Decl = G.Decl;
|
||||
const Property = G.Property;
|
||||
|
||||
const std = @import("std");
|
||||
const List = std.ArrayListUnmanaged;
|
||||
const Allocator = std.mem.Allocator;
|
||||
@@ -1,472 +0,0 @@
|
||||
// This function is taken from the official TypeScript compiler source code:
|
||||
// https://github.com/microsoft/TypeScript/blob/master/src/compiler/parser.ts
|
||||
pub fn canFollowTypeArgumentsInExpression(p: anytype) bool {
|
||||
return switch (p.lexer.token) {
|
||||
// These are the only tokens can legally follow a type argument list. So we
|
||||
// definitely want to treat them as type arg lists.
|
||||
.t_open_paren, // foo<x>(
|
||||
.t_no_substitution_template_literal, // foo<T> `...`
|
||||
// foo<T> `...${100}...`
|
||||
.t_template_head,
|
||||
=> true,
|
||||
|
||||
// A type argument list followed by `<` never makes sense, and a type argument list followed
|
||||
// by `>` is ambiguous with a (re-scanned) `>>` operator, so we disqualify both. Also, in
|
||||
// this context, `+` and `-` are unary operators, not binary operators.
|
||||
.t_less_than,
|
||||
.t_greater_than,
|
||||
.t_plus,
|
||||
.t_minus,
|
||||
// TypeScript always sees "t_greater_than" instead of these tokens since
|
||||
// their scanner works a little differently than our lexer. So since
|
||||
// "t_greater_than" is forbidden above, we also forbid these too.
|
||||
.t_greater_than_equals,
|
||||
.t_greater_than_greater_than,
|
||||
.t_greater_than_greater_than_equals,
|
||||
.t_greater_than_greater_than_greater_than,
|
||||
.t_greater_than_greater_than_greater_than_equals,
|
||||
=> false,
|
||||
|
||||
// We favor the type argument list interpretation when it is immediately followed by
|
||||
// a line break, a binary operator, or something that can't start an expression.
|
||||
else => p.lexer.has_newline_before or isBinaryOperator(p) or !isStartOfExpression(p),
|
||||
};
|
||||
}
|
||||
|
||||
pub const Metadata = union(enum) {
|
||||
m_none: void,
|
||||
|
||||
m_never: void,
|
||||
m_unknown: void,
|
||||
m_any: void,
|
||||
m_void: void,
|
||||
m_null: void,
|
||||
m_undefined: void,
|
||||
m_function: void,
|
||||
m_array: void,
|
||||
m_boolean: void,
|
||||
m_string: void,
|
||||
m_object: void,
|
||||
m_number: void,
|
||||
m_bigint: void,
|
||||
m_symbol: void,
|
||||
m_promise: void,
|
||||
m_identifier: Ref,
|
||||
m_dot: List(Ref),
|
||||
|
||||
pub const default: @This() = .m_none;
|
||||
|
||||
// the logic in finishUnion, mergeUnion, finishIntersection and mergeIntersection is
|
||||
// translated from:
|
||||
// https://github.com/microsoft/TypeScript/blob/e0a324b0503be479f2b33fd2e17c6e86c94d1297/src/compiler/transformers/typeSerializer.ts#L402
|
||||
|
||||
/// Return the final union type if possible, or return null to continue merging.
|
||||
///
|
||||
/// If the current type is m_never, m_null, or m_undefined assign the current type
|
||||
/// to m_none and return null to ensure it's always replaced by the next type.
|
||||
pub fn finishUnion(current: *@This(), p: anytype) ?@This() {
|
||||
return switch (current.*) {
|
||||
.m_identifier => |ref| {
|
||||
if (strings.eqlComptime(p.loadNameFromRef(ref), "Object")) {
|
||||
return .m_object;
|
||||
}
|
||||
return null;
|
||||
},
|
||||
|
||||
.m_unknown,
|
||||
.m_any,
|
||||
.m_object,
|
||||
=> .m_object,
|
||||
|
||||
.m_never,
|
||||
.m_null,
|
||||
.m_undefined,
|
||||
=> {
|
||||
current.* = .m_none;
|
||||
return null;
|
||||
},
|
||||
|
||||
else => null,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn mergeUnion(result: *@This(), left: @This()) void {
|
||||
if (left != .m_none) {
|
||||
if (std.meta.activeTag(result.*) != std.meta.activeTag(left)) {
|
||||
result.* = switch (result.*) {
|
||||
.m_never,
|
||||
.m_undefined,
|
||||
.m_null,
|
||||
=> left,
|
||||
|
||||
else => .m_object,
|
||||
};
|
||||
} else {
|
||||
switch (result.*) {
|
||||
.m_identifier => |ref| {
|
||||
if (!ref.eql(left.m_identifier)) {
|
||||
result.* = .m_object;
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// always take the next value if left is m_none
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the final intersection type if possible, or return null to continue merging.
|
||||
///
|
||||
/// If the current type is m_unknown, m_null, or m_undefined assign the current type
|
||||
/// to m_none and return null to ensure it's always replaced by the next type.
|
||||
pub fn finishIntersection(current: *@This(), p: anytype) ?@This() {
|
||||
return switch (current.*) {
|
||||
.m_identifier => |ref| {
|
||||
if (strings.eqlComptime(p.loadNameFromRef(ref), "Object")) {
|
||||
return .m_object;
|
||||
}
|
||||
return null;
|
||||
},
|
||||
|
||||
// ensure m_never is the final type
|
||||
.m_never => .m_never,
|
||||
|
||||
.m_any,
|
||||
.m_object,
|
||||
=> .m_object,
|
||||
|
||||
.m_unknown,
|
||||
.m_null,
|
||||
.m_undefined,
|
||||
=> {
|
||||
current.* = .m_none;
|
||||
return null;
|
||||
},
|
||||
|
||||
else => null,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn mergeIntersection(result: *@This(), left: @This()) void {
|
||||
if (left != .m_none) {
|
||||
if (std.meta.activeTag(result.*) != std.meta.activeTag(left)) {
|
||||
result.* = switch (result.*) {
|
||||
.m_unknown,
|
||||
.m_undefined,
|
||||
.m_null,
|
||||
=> left,
|
||||
|
||||
// ensure m_never is the final type
|
||||
.m_never => .m_never,
|
||||
|
||||
else => .m_object,
|
||||
};
|
||||
} else {
|
||||
switch (result.*) {
|
||||
.m_identifier => |ref| {
|
||||
if (!ref.eql(left.m_identifier)) {
|
||||
result.* = .m_object;
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// make sure intersection of only m_unknown serializes to "undefined"
|
||||
// instead of "Object"
|
||||
if (result.* == .m_unknown) {
|
||||
result.* = .m_undefined;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub fn isTSArrowFnJSX(p: anytype) !bool {
|
||||
const old_lexer = p.lexer;
|
||||
|
||||
try p.lexer.next();
|
||||
// Look ahead to see if this should be an arrow function instead
|
||||
var is_ts_arrow_fn = false;
|
||||
|
||||
if (p.lexer.token == .t_const) {
|
||||
try p.lexer.next();
|
||||
}
|
||||
if (p.lexer.token == .t_identifier) {
|
||||
try p.lexer.next();
|
||||
if (p.lexer.token == .t_comma) {
|
||||
is_ts_arrow_fn = true;
|
||||
} else if (p.lexer.token == .t_extends) {
|
||||
try p.lexer.next();
|
||||
is_ts_arrow_fn = p.lexer.token != .t_equals and p.lexer.token != .t_greater_than;
|
||||
}
|
||||
}
|
||||
|
||||
// Restore the lexer
|
||||
p.lexer.restore(&old_lexer);
|
||||
return is_ts_arrow_fn;
|
||||
}
|
||||
|
||||
// This function is taken from the official TypeScript compiler source code:
|
||||
// https://github.com/microsoft/TypeScript/blob/master/src/compiler/parser.ts
|
||||
fn isBinaryOperator(p: anytype) bool {
|
||||
return switch (p.lexer.token) {
|
||||
.t_in => p.allow_in,
|
||||
|
||||
.t_question_question,
|
||||
.t_bar_bar,
|
||||
.t_ampersand_ampersand,
|
||||
.t_bar,
|
||||
.t_caret,
|
||||
.t_ampersand,
|
||||
.t_equals_equals,
|
||||
.t_exclamation_equals,
|
||||
.t_equals_equals_equals,
|
||||
.t_exclamation_equals_equals,
|
||||
.t_less_than,
|
||||
.t_greater_than,
|
||||
.t_less_than_equals,
|
||||
.t_greater_than_equals,
|
||||
.t_instanceof,
|
||||
.t_less_than_less_than,
|
||||
.t_greater_than_greater_than,
|
||||
.t_greater_than_greater_than_greater_than,
|
||||
.t_plus,
|
||||
.t_minus,
|
||||
.t_asterisk,
|
||||
.t_slash,
|
||||
.t_percent,
|
||||
.t_asterisk_asterisk,
|
||||
=> true,
|
||||
.t_identifier => p.lexer.isContextualKeyword("as") or p.lexer.isContextualKeyword("satisfies"),
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
|
||||
// This function is taken from the official TypeScript compiler source code:
|
||||
// https://github.com/microsoft/TypeScript/blob/master/src/compiler/parser.ts
|
||||
fn isStartOfLeftHandSideExpression(p: anytype) bool {
|
||||
return switch (p.lexer.token) {
|
||||
.t_this,
|
||||
.t_super,
|
||||
.t_null,
|
||||
.t_true,
|
||||
.t_false,
|
||||
.t_numeric_literal,
|
||||
.t_big_integer_literal,
|
||||
.t_string_literal,
|
||||
.t_no_substitution_template_literal,
|
||||
.t_template_head,
|
||||
.t_open_paren,
|
||||
.t_open_bracket,
|
||||
.t_open_brace,
|
||||
.t_function,
|
||||
.t_class,
|
||||
.t_new,
|
||||
.t_slash,
|
||||
.t_slash_equals,
|
||||
.t_identifier,
|
||||
=> true,
|
||||
.t_import => lookAheadNextTokenIsOpenParenOrLessThanOrDot(p),
|
||||
else => isIdentifier(p),
|
||||
};
|
||||
}
|
||||
|
||||
fn lookAheadNextTokenIsOpenParenOrLessThanOrDot(p: anytype) bool {
|
||||
const old_lexer = p.lexer;
|
||||
const old_log_disabled = p.lexer.is_log_disabled;
|
||||
p.lexer.is_log_disabled = true;
|
||||
defer {
|
||||
p.lexer.restore(&old_lexer);
|
||||
p.lexer.is_log_disabled = old_log_disabled;
|
||||
}
|
||||
p.lexer.next() catch {};
|
||||
|
||||
return switch (p.lexer.token) {
|
||||
.t_open_paren, .t_less_than, .t_dot => true,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
|
||||
// This function is taken from the official TypeScript compiler source code:
|
||||
// https://github.com/microsoft/TypeScript/blob/master/src/compiler/parser.ts
|
||||
fn isIdentifier(p: anytype) bool {
|
||||
if (p.lexer.token == .t_identifier) {
|
||||
// If we have a 'yield' keyword, and we're in the [yield] context, then 'yield' is
|
||||
// considered a keyword and is not an identifier.
|
||||
if (p.fn_or_arrow_data_parse.allow_yield != .allow_ident and strings.eqlComptime(p.lexer.identifier, "yield")) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// If we have an 'await' keyword, and we're in the [await] context, then 'await' is
|
||||
// considered a keyword and is not an identifier.
|
||||
if (p.fn_or_arrow_data_parse.allow_await != .allow_ident and strings.eqlComptime(p.lexer.identifier, "await")) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
fn isStartOfExpression(p: anytype) bool {
|
||||
if (isStartOfLeftHandSideExpression(p))
|
||||
return true;
|
||||
|
||||
switch (p.lexer.token) {
|
||||
.t_plus,
|
||||
.t_minus,
|
||||
.t_tilde,
|
||||
.t_exclamation,
|
||||
.t_delete,
|
||||
.t_typeof,
|
||||
.t_void,
|
||||
.t_plus_plus,
|
||||
.t_minus_minus,
|
||||
.t_less_than,
|
||||
.t_private_identifier,
|
||||
.t_at,
|
||||
=> return true,
|
||||
else => {
|
||||
if (p.lexer.token == .t_identifier and (strings.eqlComptime(p.lexer.identifier, "await") or strings.eqlComptime(p.lexer.identifier, "yield"))) {
|
||||
// Yield/await always starts an expression. Either it is an identifier (in which case
|
||||
// it is definitely an expression). Or it's a keyword (either because we're in
|
||||
// a generator or async function, or in strict mode (or both)) and it started a yield or await expression.
|
||||
return true;
|
||||
}
|
||||
|
||||
// Error tolerance. If we see the start of some binary operator, we consider
|
||||
// that the start of an expression. That way we'll parse out a missing identifier,
|
||||
// give a good message about an identifier being missing, and then consume the
|
||||
// rest of the binary expression.
|
||||
if (isBinaryOperator(p)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return isIdentifier(p);
|
||||
},
|
||||
}
|
||||
|
||||
unreachable;
|
||||
}
|
||||
|
||||
pub const Identifier = struct {
|
||||
pub const StmtIdentifier = enum {
|
||||
s_type,
|
||||
|
||||
s_namespace,
|
||||
|
||||
s_abstract,
|
||||
|
||||
s_module,
|
||||
|
||||
s_interface,
|
||||
|
||||
s_declare,
|
||||
};
|
||||
pub fn forStr(str: string) ?StmtIdentifier {
|
||||
switch (str.len) {
|
||||
"type".len => return if (strings.eqlComptimeIgnoreLen(str, "type"))
|
||||
.s_type
|
||||
else
|
||||
null,
|
||||
"interface".len => {
|
||||
if (strings.eqlComptime(str, "interface")) {
|
||||
return .s_interface;
|
||||
} else if (strings.eqlComptime(str, "namespace")) {
|
||||
return .s_namespace;
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
},
|
||||
"abstract".len => {
|
||||
if (strings.eqlComptime(str, "abstract")) {
|
||||
return .s_abstract;
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
},
|
||||
"declare".len => {
|
||||
if (strings.eqlComptime(str, "declare")) {
|
||||
return .s_declare;
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
},
|
||||
"module".len => {
|
||||
if (strings.eqlComptime(str, "module")) {
|
||||
return .s_module;
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
},
|
||||
else => return null,
|
||||
}
|
||||
}
|
||||
pub const IMap = bun.ComptimeStringMap(Kind, .{
|
||||
.{ "unique", .unique },
|
||||
.{ "abstract", .abstract },
|
||||
.{ "asserts", .asserts },
|
||||
|
||||
.{ "keyof", .prefix_keyof },
|
||||
.{ "readonly", .prefix_readonly },
|
||||
|
||||
.{ "any", .primitive_any },
|
||||
.{ "never", .primitive_never },
|
||||
.{ "unknown", .primitive_unknown },
|
||||
.{ "undefined", .primitive_undefined },
|
||||
.{ "object", .primitive_object },
|
||||
.{ "number", .primitive_number },
|
||||
.{ "string", .primitive_string },
|
||||
.{ "boolean", .primitive_boolean },
|
||||
.{ "bigint", .primitive_bigint },
|
||||
.{ "symbol", .primitive_symbol },
|
||||
|
||||
.{ "infer", .infer },
|
||||
});
|
||||
pub const Kind = enum {
|
||||
normal,
|
||||
unique,
|
||||
abstract,
|
||||
asserts,
|
||||
prefix_keyof,
|
||||
prefix_readonly,
|
||||
primitive_any,
|
||||
primitive_never,
|
||||
primitive_unknown,
|
||||
primitive_undefined,
|
||||
primitive_object,
|
||||
primitive_number,
|
||||
primitive_string,
|
||||
primitive_boolean,
|
||||
primitive_bigint,
|
||||
primitive_symbol,
|
||||
infer,
|
||||
};
|
||||
};
|
||||
|
||||
pub const SkipTypeOptions = enum {
|
||||
is_return_type,
|
||||
is_index_signature,
|
||||
allow_tuple_labels,
|
||||
disallow_conditional_types,
|
||||
|
||||
pub const Bitset = std.enums.EnumSet(@This());
|
||||
pub const empty = Bitset.initEmpty();
|
||||
};
|
||||
|
||||
const string = []const u8;
|
||||
|
||||
const bun = @import("bun");
|
||||
const strings = bun.strings;
|
||||
|
||||
const js_lexer = bun.js_lexer;
|
||||
const T = js_lexer.T;
|
||||
|
||||
const js_parser = bun.js_parser;
|
||||
const Ref = js_parser.Ref;
|
||||
const TypeScript = js_parser.TypeScript;
|
||||
|
||||
const std = @import("std");
|
||||
const List = std.ArrayListUnmanaged;
|
||||
@@ -1,233 +0,0 @@
|
||||
/// Concatenate two `E.String`s, mutating BOTH inputs
|
||||
/// unless `has_inlined_enum_poison` is set.
|
||||
///
|
||||
/// Currently inlined enum poison refers to where mutation would cause output
|
||||
/// bugs due to inlined enum values sharing `E.String`s. If a new use case
|
||||
/// besides inlined enums comes up to set this to true, please rename the
|
||||
/// variable and document it.
|
||||
fn joinStrings(left: *const E.String, right: *const E.String, has_inlined_enum_poison: bool) E.String {
|
||||
var new = if (has_inlined_enum_poison)
|
||||
// Inlined enums can be shared by multiple call sites. In
|
||||
// this case, we need to ensure that the ENTIRE rope is
|
||||
// cloned. In other situations, the lhs doesn't have any
|
||||
// other owner, so it is fine to mutate `lhs.data.end.next`.
|
||||
//
|
||||
// Consider the following case:
|
||||
// const enum A {
|
||||
// B = "a" + "b",
|
||||
// D = B + "d",
|
||||
// };
|
||||
// console.log(A.B, A.D);
|
||||
left.cloneRopeNodes()
|
||||
else
|
||||
left.*;
|
||||
|
||||
// Similarly, the right side has to be cloned for an enum rope too.
|
||||
//
|
||||
// Consider the following case:
|
||||
// const enum A {
|
||||
// B = "1" + "2",
|
||||
// C = ("3" + B) + "4",
|
||||
// };
|
||||
// console.log(A.B, A.C);
|
||||
const rhs_clone = Expr.Data.Store.append(E.String, if (has_inlined_enum_poison)
|
||||
right.cloneRopeNodes()
|
||||
else
|
||||
right.*);
|
||||
|
||||
new.push(rhs_clone);
|
||||
new.prefer_template = new.prefer_template or rhs_clone.prefer_template;
|
||||
|
||||
return new;
|
||||
}
|
||||
|
||||
/// Transforming the left operand into a string is not safe if it comes from a
|
||||
/// nested AST node.
|
||||
const FoldStringAdditionKind = enum {
|
||||
// "x" + "y" -> "xy"
|
||||
// 1 + "y" -> "1y"
|
||||
normal,
|
||||
// a + "x" + "y" -> a + "xy"
|
||||
// a + 1 + "y" -> a + 1 + y
|
||||
nested_left,
|
||||
};
|
||||
|
||||
/// NOTE: unlike esbuild's js_ast_helpers.FoldStringAddition, this does mutate
|
||||
/// the input AST in the case of rope strings
|
||||
pub fn foldStringAddition(l: Expr, r: Expr, allocator: std.mem.Allocator, kind: FoldStringAdditionKind) ?Expr {
|
||||
// "See through" inline enum constants
|
||||
// TODO: implement foldAdditionPreProcess to fold some more things :)
|
||||
var lhs = l.unwrapInlined();
|
||||
var rhs = r.unwrapInlined();
|
||||
|
||||
if (kind != .nested_left) {
|
||||
// See comment on `FoldStringAdditionKind` for examples
|
||||
switch (rhs.data) {
|
||||
.e_string, .e_template => {
|
||||
if (lhs.toStringExprWithoutSideEffects(allocator)) |str| {
|
||||
lhs = str;
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
|
||||
switch (lhs.data) {
|
||||
.e_string => |left| {
|
||||
if (rhs.toStringExprWithoutSideEffects(allocator)) |str| {
|
||||
rhs = str;
|
||||
}
|
||||
|
||||
if (left.isUTF8()) {
|
||||
switch (rhs.data) {
|
||||
// "bar" + "baz" => "barbaz"
|
||||
.e_string => |right| {
|
||||
if (right.isUTF8()) {
|
||||
const has_inlined_enum_poison =
|
||||
l.data == .e_inlined_enum or
|
||||
r.data == .e_inlined_enum;
|
||||
|
||||
return Expr.init(E.String, joinStrings(
|
||||
left,
|
||||
right,
|
||||
has_inlined_enum_poison,
|
||||
), lhs.loc);
|
||||
}
|
||||
},
|
||||
// "bar" + `baz${bar}` => `barbaz${bar}`
|
||||
.e_template => |right| {
|
||||
if (right.head.isUTF8()) {
|
||||
return Expr.init(E.Template, E.Template{
|
||||
.parts = right.parts,
|
||||
.head = .{ .cooked = joinStrings(
|
||||
left,
|
||||
&right.head.cooked,
|
||||
l.data == .e_inlined_enum,
|
||||
) },
|
||||
}, l.loc);
|
||||
}
|
||||
},
|
||||
else => {
|
||||
// other constant-foldable ast nodes would have been converted to .e_string
|
||||
},
|
||||
}
|
||||
|
||||
// "'x' + `y${z}`" => "`xy${z}`"
|
||||
if (rhs.data == .e_template and rhs.data.e_template.tag == null) {}
|
||||
}
|
||||
|
||||
if (left.len() == 0 and rhs.knownPrimitive() == .string) {
|
||||
return rhs;
|
||||
}
|
||||
|
||||
return null;
|
||||
},
|
||||
|
||||
.e_template => |left| {
|
||||
// "`${x}` + 0" => "`${x}` + '0'"
|
||||
if (rhs.toStringExprWithoutSideEffects(allocator)) |str| {
|
||||
rhs = str;
|
||||
}
|
||||
|
||||
if (left.tag == null) {
|
||||
switch (rhs.data) {
|
||||
// `foo${bar}` + "baz" => `foo${bar}baz`
|
||||
.e_string => |right| {
|
||||
if (right.isUTF8()) {
|
||||
// Mutation of this node is fine because it will be not
|
||||
// be shared by other places. Note that e_template will
|
||||
// be treated by enums as strings, but will not be
|
||||
// inlined unless they could be converted into
|
||||
// .e_string.
|
||||
if (left.parts.len > 0) {
|
||||
const i = left.parts.len - 1;
|
||||
const last = left.parts[i];
|
||||
if (last.tail.isUTF8()) {
|
||||
left.parts[i].tail = .{ .cooked = joinStrings(
|
||||
&last.tail.cooked,
|
||||
right,
|
||||
r.data == .e_inlined_enum,
|
||||
) };
|
||||
return lhs;
|
||||
}
|
||||
} else {
|
||||
if (left.head.isUTF8()) {
|
||||
left.head = .{ .cooked = joinStrings(
|
||||
&left.head.cooked,
|
||||
right,
|
||||
r.data == .e_inlined_enum,
|
||||
) };
|
||||
return lhs;
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
// `foo${bar}` + `a${hi}b` => `foo${bar}a${hi}b`
|
||||
.e_template => |right| {
|
||||
if (right.tag == null and right.head.isUTF8()) {
|
||||
if (left.parts.len > 0) {
|
||||
const i = left.parts.len - 1;
|
||||
const last = left.parts[i];
|
||||
if (last.tail.isUTF8() and right.head.isUTF8()) {
|
||||
left.parts[i].tail = .{ .cooked = joinStrings(
|
||||
&last.tail.cooked,
|
||||
&right.head.cooked,
|
||||
r.data == .e_inlined_enum,
|
||||
) };
|
||||
|
||||
left.parts = if (right.parts.len == 0)
|
||||
left.parts
|
||||
else
|
||||
std.mem.concat(
|
||||
allocator,
|
||||
E.TemplatePart,
|
||||
&.{ left.parts, right.parts },
|
||||
) catch bun.outOfMemory();
|
||||
return lhs;
|
||||
}
|
||||
} else {
|
||||
if (left.head.isUTF8() and right.head.isUTF8()) {
|
||||
left.head = .{ .cooked = joinStrings(
|
||||
&left.head.cooked,
|
||||
&right.head.cooked,
|
||||
r.data == .e_inlined_enum,
|
||||
) };
|
||||
left.parts = right.parts;
|
||||
return lhs;
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
else => {
|
||||
// other constant-foldable ast nodes would have been converted to .e_string
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
else => {
|
||||
// other constant-foldable ast nodes would have been converted to .e_string
|
||||
},
|
||||
}
|
||||
|
||||
if (rhs.data.as(.e_string)) |right| {
|
||||
if (right.len() == 0 and lhs.knownPrimitive() == .string) {
|
||||
return lhs;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
const string = []const u8;
|
||||
|
||||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
const bun = @import("bun");
|
||||
const strings = bun.strings;
|
||||
|
||||
const js_ast = bun.ast;
|
||||
const B = js_ast.B;
|
||||
const E = js_ast.E;
|
||||
const Expr = js_ast.Expr;
|
||||
@@ -1,725 +0,0 @@
|
||||
pub fn AstMaybe(
|
||||
comptime parser_feature__typescript: bool,
|
||||
comptime parser_feature__jsx: JSXTransformType,
|
||||
comptime parser_feature__scan_only: bool,
|
||||
) type {
|
||||
return struct {
|
||||
const P = js_parser.NewParser_(parser_feature__typescript, parser_feature__jsx, parser_feature__scan_only);
|
||||
|
||||
pub fn maybeRelocateVarsToTopLevel(p: *P, decls: []const G.Decl, mode: RelocateVars.Mode) RelocateVars {
|
||||
// Only do this when the scope is not already top-level and when we're not inside a function.
|
||||
if (p.current_scope == p.module_scope) {
|
||||
return .{ .ok = false };
|
||||
}
|
||||
|
||||
var scope = p.current_scope;
|
||||
while (!scope.kindStopsHoisting()) {
|
||||
if (comptime Environment.allow_assert) assert(scope.parent != null);
|
||||
scope = scope.parent.?;
|
||||
}
|
||||
|
||||
if (scope != p.module_scope) {
|
||||
return .{ .ok = false };
|
||||
}
|
||||
|
||||
var value: Expr = Expr{ .loc = logger.Loc.Empty, .data = Expr.Data{ .e_missing = E.Missing{} } };
|
||||
|
||||
for (decls) |decl| {
|
||||
const binding = Binding.toExpr(
|
||||
&decl.binding,
|
||||
p.to_expr_wrapper_hoisted,
|
||||
);
|
||||
if (decl.value) |decl_value| {
|
||||
value = value.joinWithComma(Expr.assign(binding, decl_value), p.allocator);
|
||||
} else if (mode == .for_in_or_for_of) {
|
||||
value = value.joinWithComma(binding, p.allocator);
|
||||
}
|
||||
}
|
||||
|
||||
if (value.data == .e_missing) {
|
||||
return .{ .ok = true };
|
||||
}
|
||||
|
||||
return .{ .stmt = p.s(S.SExpr{ .value = value }, value.loc), .ok = true };
|
||||
}
|
||||
|
||||
// EDot nodes represent a property access. This function may return an
|
||||
// expression to replace the property access with. It assumes that the
|
||||
// target of the EDot expression has already been visited.
|
||||
pub fn maybeRewritePropertyAccess(
|
||||
p: *P,
|
||||
loc: logger.Loc,
|
||||
target: js_ast.Expr,
|
||||
name: string,
|
||||
name_loc: logger.Loc,
|
||||
identifier_opts: IdentifierOpts,
|
||||
) ?Expr {
|
||||
sw: switch (target.data) {
|
||||
.e_identifier => |id| {
|
||||
// Rewrite property accesses on explicit namespace imports as an identifier.
|
||||
// This lets us replace them easily in the printer to rebind them to
|
||||
// something else without paying the cost of a whole-tree traversal during
|
||||
// module linking just to rewrite these EDot expressions.
|
||||
if (p.options.bundle) {
|
||||
if (p.import_items_for_namespace.getPtr(id.ref)) |import_items| {
|
||||
const ref = (import_items.get(name) orelse brk: {
|
||||
// Generate a new import item symbol in the module scope
|
||||
const new_item = LocRef{
|
||||
.loc = name_loc,
|
||||
.ref = p.newSymbol(.import, name) catch unreachable,
|
||||
};
|
||||
p.module_scope.generated.push(p.allocator, new_item.ref.?) catch unreachable;
|
||||
|
||||
import_items.put(name, new_item) catch unreachable;
|
||||
p.is_import_item.put(p.allocator, new_item.ref.?, {}) catch unreachable;
|
||||
|
||||
var symbol = &p.symbols.items[new_item.ref.?.innerIndex()];
|
||||
|
||||
// Mark this as generated in case it's missing. We don't want to
|
||||
// generate errors for missing import items that are automatically
|
||||
// generated.
|
||||
symbol.import_item_status = .generated;
|
||||
|
||||
break :brk new_item;
|
||||
}).ref.?;
|
||||
|
||||
// Undo the usage count for the namespace itself. This is used later
|
||||
// to detect whether the namespace symbol has ever been "captured"
|
||||
// or whether it has just been used to read properties off of.
|
||||
//
|
||||
// The benefit of doing this is that if both this module and the
|
||||
// imported module end up in the same module group and the namespace
|
||||
// symbol has never been captured, then we don't need to generate
|
||||
// any code for the namespace at all.
|
||||
p.ignoreUsage(id.ref);
|
||||
|
||||
// Track how many times we've referenced this symbol
|
||||
p.recordUsage(ref);
|
||||
|
||||
return p.handleIdentifier(
|
||||
name_loc,
|
||||
E.Identifier{ .ref = ref },
|
||||
name,
|
||||
.{
|
||||
.assign_target = identifier_opts.assign_target,
|
||||
.is_call_target = identifier_opts.is_call_target,
|
||||
.is_delete_target = identifier_opts.is_delete_target,
|
||||
|
||||
// If this expression is used as the target of a call expression, make
|
||||
// sure the value of "this" is preserved.
|
||||
.was_originally_identifier = false,
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (!p.is_control_flow_dead and id.ref.eql(p.module_ref)) {
|
||||
// Rewrite "module.require()" to "require()" for Webpack compatibility.
|
||||
// See https://github.com/webpack/webpack/pull/7750 for more info.
|
||||
// This also makes correctness a little easier.
|
||||
if (identifier_opts.is_call_target and strings.eqlComptime(name, "require")) {
|
||||
p.ignoreUsage(p.module_ref);
|
||||
return p.valueForRequire(name_loc);
|
||||
} else if (!p.commonjs_named_exports_deoptimized and strings.eqlComptime(name, "exports")) {
|
||||
if (identifier_opts.assign_target != .none) {
|
||||
p.commonjs_module_exports_assigned_deoptimized = true;
|
||||
}
|
||||
|
||||
// Detect if we are doing
|
||||
//
|
||||
// module.exports = {
|
||||
// foo: "bar"
|
||||
// }
|
||||
//
|
||||
// Note that it cannot be any of these:
|
||||
//
|
||||
// module.exports += { };
|
||||
// delete module.exports = {};
|
||||
// module.exports()
|
||||
if (!(identifier_opts.is_call_target or identifier_opts.is_delete_target) and
|
||||
identifier_opts.assign_target == .replace and
|
||||
p.stmt_expr_value == .e_binary and
|
||||
p.stmt_expr_value.e_binary.op == .bin_assign)
|
||||
{
|
||||
if (
|
||||
// if it's not top-level, don't do this
|
||||
p.module_scope != p.current_scope or
|
||||
// if you do
|
||||
//
|
||||
// exports.foo = 123;
|
||||
// module.exports = {};
|
||||
//
|
||||
// that's a de-opt.
|
||||
p.commonjs_named_exports.count() > 0 or
|
||||
|
||||
// anything which is not module.exports = {} is a de-opt.
|
||||
p.stmt_expr_value.e_binary.right.data != .e_object or
|
||||
p.stmt_expr_value.e_binary.left.data != .e_dot or
|
||||
!strings.eqlComptime(p.stmt_expr_value.e_binary.left.data.e_dot.name, "exports") or
|
||||
p.stmt_expr_value.e_binary.left.data.e_dot.target.data != .e_identifier or
|
||||
!p.stmt_expr_value.e_binary.left.data.e_dot.target.data.e_identifier.ref.eql(p.module_ref))
|
||||
{
|
||||
p.deoptimizeCommonJSNamedExports();
|
||||
return null;
|
||||
}
|
||||
|
||||
const props: []const G.Property = p.stmt_expr_value.e_binary.right.data.e_object.properties.slice();
|
||||
for (props) |prop| {
|
||||
// if it's not a trivial object literal, de-opt
|
||||
if (prop.kind != .normal or
|
||||
prop.key == null or
|
||||
prop.key.?.data != .e_string or
|
||||
prop.flags.contains(Flags.Property.is_method) or
|
||||
prop.flags.contains(Flags.Property.is_computed) or
|
||||
prop.flags.contains(Flags.Property.is_spread) or
|
||||
prop.flags.contains(Flags.Property.is_static) or
|
||||
// If it creates a new scope, we can't do this optimization right now
|
||||
// Our scope order verification stuff will get mad
|
||||
// But we should let you do module.exports = { bar: foo(), baz: 123 }
|
||||
// just not module.exports = { bar: function() {} }
|
||||
// just not module.exports = { bar() {} }
|
||||
switch (prop.value.?.data) {
|
||||
.e_commonjs_export_identifier, .e_import_identifier, .e_identifier => false,
|
||||
.e_call => |call| switch (call.target.data) {
|
||||
.e_commonjs_export_identifier, .e_import_identifier, .e_identifier => false,
|
||||
else => |call_target| !@as(Expr.Tag, call_target).isPrimitiveLiteral(),
|
||||
},
|
||||
else => !prop.value.?.isPrimitiveLiteral(),
|
||||
})
|
||||
{
|
||||
p.deoptimizeCommonJSNamedExports();
|
||||
return null;
|
||||
}
|
||||
} else {
|
||||
// empty object de-opts because otherwise the statement becomes
|
||||
// <empty space> = {};
|
||||
p.deoptimizeCommonJSNamedExports();
|
||||
return null;
|
||||
}
|
||||
|
||||
var stmts = std.ArrayList(Stmt).initCapacity(p.allocator, props.len * 2) catch unreachable;
|
||||
var decls = p.allocator.alloc(Decl, props.len) catch unreachable;
|
||||
var clause_items = p.allocator.alloc(js_ast.ClauseItem, props.len) catch unreachable;
|
||||
|
||||
for (props) |prop| {
|
||||
const key = prop.key.?.data.e_string.string(p.allocator) catch unreachable;
|
||||
const visited_value = p.visitExpr(prop.value.?);
|
||||
const value = SideEffects.simplifyUnusedExpr(p, visited_value) orelse visited_value;
|
||||
|
||||
// We are doing `module.exports = { ... }`
|
||||
// lets rewrite it to a series of what will become export assignments
|
||||
const named_export_entry = p.commonjs_named_exports.getOrPut(p.allocator, key) catch unreachable;
|
||||
if (!named_export_entry.found_existing) {
|
||||
const new_ref = p.newSymbol(
|
||||
.other,
|
||||
std.fmt.allocPrint(p.allocator, "${any}", .{bun.fmt.fmtIdentifier(key)}) catch unreachable,
|
||||
) catch unreachable;
|
||||
p.module_scope.generated.push(p.allocator, new_ref) catch unreachable;
|
||||
named_export_entry.value_ptr.* = .{
|
||||
.loc_ref = LocRef{
|
||||
.loc = name_loc,
|
||||
.ref = new_ref,
|
||||
},
|
||||
.needs_decl = false,
|
||||
};
|
||||
}
|
||||
const ref = named_export_entry.value_ptr.loc_ref.ref.?;
|
||||
// module.exports = {
|
||||
// foo: "bar",
|
||||
// baz: "qux",
|
||||
// }
|
||||
// ->
|
||||
// exports.foo = "bar", exports.baz = "qux"
|
||||
// Which will become
|
||||
// $foo = "bar";
|
||||
// $baz = "qux";
|
||||
// export { $foo as foo, $baz as baz }
|
||||
|
||||
decls[0] = .{
|
||||
.binding = p.b(B.Identifier{ .ref = ref }, prop.key.?.loc),
|
||||
.value = value,
|
||||
};
|
||||
// we have to ensure these are known to be top-level
|
||||
p.declared_symbols.append(p.allocator, .{
|
||||
.ref = ref,
|
||||
.is_top_level = true,
|
||||
}) catch unreachable;
|
||||
p.had_commonjs_named_exports_this_visit = true;
|
||||
clause_items[0] = js_ast.ClauseItem{
|
||||
// We want the generated name to not conflict
|
||||
.alias = key,
|
||||
.alias_loc = prop.key.?.loc,
|
||||
.name = named_export_entry.value_ptr.loc_ref,
|
||||
};
|
||||
|
||||
stmts.appendSlice(
|
||||
&[_]Stmt{
|
||||
p.s(
|
||||
S.Local{
|
||||
.kind = .k_var,
|
||||
.is_export = false,
|
||||
.was_commonjs_export = true,
|
||||
.decls = G.Decl.List.init(decls[0..1]),
|
||||
},
|
||||
prop.key.?.loc,
|
||||
),
|
||||
p.s(
|
||||
S.ExportClause{
|
||||
.items = clause_items[0..1],
|
||||
.is_single_line = true,
|
||||
},
|
||||
prop.key.?.loc,
|
||||
),
|
||||
},
|
||||
) catch unreachable;
|
||||
decls = decls[1..];
|
||||
clause_items = clause_items[1..];
|
||||
}
|
||||
|
||||
p.ignoreUsage(p.module_ref);
|
||||
p.commonjs_replacement_stmts = stmts.items;
|
||||
return p.newExpr(E.Missing{}, name_loc);
|
||||
}
|
||||
|
||||
// Deoptimizations:
|
||||
// delete module.exports
|
||||
// module.exports();
|
||||
if (identifier_opts.is_call_target or identifier_opts.is_delete_target or identifier_opts.assign_target != .none) {
|
||||
p.deoptimizeCommonJSNamedExports();
|
||||
return null;
|
||||
}
|
||||
|
||||
// rewrite `module.exports` to `exports`
|
||||
return .{ .data = .{ .e_special = .module_exports }, .loc = name_loc };
|
||||
} else if (p.options.bundle and strings.eqlComptime(name, "id") and identifier_opts.assign_target == .none) {
|
||||
// inline module.id
|
||||
p.ignoreUsage(p.module_ref);
|
||||
return p.newExpr(E.String.init(p.source.path.pretty), name_loc);
|
||||
} else if (p.options.bundle and strings.eqlComptime(name, "filename") and identifier_opts.assign_target == .none) {
|
||||
// inline module.filename
|
||||
p.ignoreUsage(p.module_ref);
|
||||
return p.newExpr(E.String.init(p.source.path.name.filename), name_loc);
|
||||
} else if (p.options.bundle and strings.eqlComptime(name, "path") and identifier_opts.assign_target == .none) {
|
||||
// inline module.path
|
||||
p.ignoreUsage(p.module_ref);
|
||||
return p.newExpr(E.String.init(p.source.path.pretty), name_loc);
|
||||
}
|
||||
}
|
||||
|
||||
if (p.shouldUnwrapCommonJSToESM()) {
|
||||
if (!p.is_control_flow_dead and id.ref.eql(p.exports_ref)) {
|
||||
if (!p.commonjs_named_exports_deoptimized) {
|
||||
if (identifier_opts.is_delete_target) {
|
||||
p.deoptimizeCommonJSNamedExports();
|
||||
return null;
|
||||
}
|
||||
|
||||
const named_export_entry = p.commonjs_named_exports.getOrPut(p.allocator, name) catch unreachable;
|
||||
if (!named_export_entry.found_existing) {
|
||||
const new_ref = p.newSymbol(
|
||||
.other,
|
||||
std.fmt.allocPrint(p.allocator, "${any}", .{bun.fmt.fmtIdentifier(name)}) catch unreachable,
|
||||
) catch unreachable;
|
||||
p.module_scope.generated.push(p.allocator, new_ref) catch unreachable;
|
||||
named_export_entry.value_ptr.* = .{
|
||||
.loc_ref = LocRef{
|
||||
.loc = name_loc,
|
||||
.ref = new_ref,
|
||||
},
|
||||
.needs_decl = true,
|
||||
};
|
||||
if (p.commonjs_named_exports_needs_conversion == std.math.maxInt(u32))
|
||||
p.commonjs_named_exports_needs_conversion = @as(u32, @truncate(p.commonjs_named_exports.count() - 1));
|
||||
}
|
||||
|
||||
const ref = named_export_entry.value_ptr.*.loc_ref.ref.?;
|
||||
p.ignoreUsage(id.ref);
|
||||
p.recordUsage(ref);
|
||||
|
||||
return p.newExpr(
|
||||
E.CommonJSExportIdentifier{
|
||||
.ref = ref,
|
||||
},
|
||||
name_loc,
|
||||
);
|
||||
} else if (p.options.features.commonjs_at_runtime and identifier_opts.assign_target != .none) {
|
||||
p.has_commonjs_export_names = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle references to namespaces or namespace members
|
||||
if (p.ts_namespace.expr == .e_identifier and
|
||||
id.ref.eql(p.ts_namespace.expr.e_identifier.ref) and
|
||||
identifier_opts.assign_target == .none and
|
||||
!identifier_opts.is_delete_target)
|
||||
{
|
||||
return maybeRewritePropertyAccessForNamespace(p, name, &target, loc, name_loc);
|
||||
}
|
||||
},
|
||||
.e_string => |str| {
|
||||
if (p.options.features.minify_syntax) {
|
||||
// minify "long-string".length to 11
|
||||
if (strings.eqlComptime(name, "length")) {
|
||||
if (str.javascriptLength()) |len| {
|
||||
return p.newExpr(E.Number{ .value = @floatFromInt(len) }, loc);
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
.e_inlined_enum => |ie| {
|
||||
continue :sw ie.value.data;
|
||||
},
|
||||
.e_object => |obj| {
|
||||
if (comptime FeatureFlags.inline_properties_in_transpiler) {
|
||||
if (p.options.features.minify_syntax) {
|
||||
// Rewrite a property access like this:
|
||||
// { f: () => {} }.f
|
||||
// To:
|
||||
// () => {}
|
||||
//
|
||||
// To avoid thinking too much about edgecases, only do this for:
|
||||
// 1) Objects with a single property
|
||||
// 2) Not a method, not a computed property
|
||||
if (obj.properties.len == 1 and
|
||||
!identifier_opts.is_delete_target and
|
||||
identifier_opts.assign_target == .none and !identifier_opts.is_call_target)
|
||||
{
|
||||
const prop: G.Property = obj.properties.ptr[0];
|
||||
if (prop.value != null and
|
||||
prop.flags.count() == 0 and
|
||||
prop.key != null and
|
||||
prop.key.?.data == .e_string and
|
||||
prop.key.?.data.e_string.eql([]const u8, name) and
|
||||
!bun.strings.eqlComptime(name, "__proto__"))
|
||||
{
|
||||
return prop.value.?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
.e_import_meta => {
|
||||
if (strings.eqlComptime(name, "main")) {
|
||||
return p.valueForImportMetaMain(false, target.loc);
|
||||
}
|
||||
|
||||
if (strings.eqlComptime(name, "hot")) {
|
||||
return .{ .data = .{
|
||||
.e_special = if (p.options.features.hot_module_reloading) .hot_enabled else .hot_disabled,
|
||||
}, .loc = loc };
|
||||
}
|
||||
|
||||
// Inline import.meta properties for Bake
|
||||
if (p.options.framework != null) {
|
||||
if (strings.eqlComptime(name, "dir") or strings.eqlComptime(name, "dirname")) {
|
||||
// Inline import.meta.dir
|
||||
return p.newExpr(E.String.init(p.source.path.name.dir), name_loc);
|
||||
} else if (strings.eqlComptime(name, "file")) {
|
||||
// Inline import.meta.file (filename only)
|
||||
return p.newExpr(E.String.init(p.source.path.name.filename), name_loc);
|
||||
} else if (strings.eqlComptime(name, "path")) {
|
||||
// Inline import.meta.path (full path)
|
||||
return p.newExpr(E.String.init(p.source.path.text), name_loc);
|
||||
} else if (strings.eqlComptime(name, "url")) {
|
||||
// Inline import.meta.url as file:// URL
|
||||
const bunstr = bun.String.fromBytes(p.source.path.text);
|
||||
defer bunstr.deref();
|
||||
const url = std.fmt.allocPrint(p.allocator, "{s}", .{jsc.URL.fileURLFromString(bunstr)}) catch unreachable;
|
||||
return p.newExpr(E.String.init(url), name_loc);
|
||||
}
|
||||
}
|
||||
|
||||
// Make all property accesses on `import.meta.url` side effect free.
|
||||
return p.newExpr(
|
||||
E.Dot{
|
||||
.target = target,
|
||||
.name = name,
|
||||
.name_loc = name_loc,
|
||||
.can_be_removed_if_unused = true,
|
||||
},
|
||||
target.loc,
|
||||
);
|
||||
},
|
||||
.e_require_call_target => {
|
||||
if (strings.eqlComptime(name, "main")) {
|
||||
return .{ .loc = loc, .data = .e_require_main };
|
||||
}
|
||||
},
|
||||
.e_import_identifier => |id| {
|
||||
// Symbol uses due to a property access off of an imported symbol are tracked
|
||||
// specially. This lets us do tree shaking for cross-file TypeScript enums.
|
||||
if (p.options.bundle and !p.is_control_flow_dead) {
|
||||
const use = p.symbol_uses.getPtr(id.ref).?;
|
||||
use.count_estimate -|= 1;
|
||||
// note: this use is not removed as we assume it exists later
|
||||
|
||||
// Add a special symbol use instead
|
||||
const gop = p.import_symbol_property_uses.getOrPutValue(
|
||||
p.allocator,
|
||||
id.ref,
|
||||
.{},
|
||||
) catch bun.outOfMemory();
|
||||
const inner_use = gop.value_ptr.getOrPutValue(
|
||||
p.allocator,
|
||||
name,
|
||||
.{},
|
||||
) catch bun.outOfMemory();
|
||||
inner_use.value_ptr.count_estimate += 1;
|
||||
}
|
||||
},
|
||||
inline .e_dot, .e_index => |data, tag| {
|
||||
if (p.ts_namespace.expr == tag and
|
||||
data == @field(p.ts_namespace.expr, @tagName(tag)) and
|
||||
identifier_opts.assign_target == .none and
|
||||
!identifier_opts.is_delete_target)
|
||||
{
|
||||
return maybeRewritePropertyAccessForNamespace(p, name, &target, loc, name_loc);
|
||||
}
|
||||
},
|
||||
.e_special => |special| switch (special) {
|
||||
.module_exports => {
|
||||
if (p.shouldUnwrapCommonJSToESM()) {
|
||||
if (!p.is_control_flow_dead) {
|
||||
if (!p.commonjs_named_exports_deoptimized) {
|
||||
if (identifier_opts.is_delete_target) {
|
||||
p.deoptimizeCommonJSNamedExports();
|
||||
return null;
|
||||
}
|
||||
|
||||
const named_export_entry = p.commonjs_named_exports.getOrPut(p.allocator, name) catch unreachable;
|
||||
if (!named_export_entry.found_existing) {
|
||||
const new_ref = p.newSymbol(
|
||||
.other,
|
||||
std.fmt.allocPrint(p.allocator, "${any}", .{bun.fmt.fmtIdentifier(name)}) catch unreachable,
|
||||
) catch unreachable;
|
||||
p.module_scope.generated.push(p.allocator, new_ref) catch unreachable;
|
||||
named_export_entry.value_ptr.* = .{
|
||||
.loc_ref = LocRef{
|
||||
.loc = name_loc,
|
||||
.ref = new_ref,
|
||||
},
|
||||
.needs_decl = true,
|
||||
};
|
||||
if (p.commonjs_named_exports_needs_conversion == std.math.maxInt(u32))
|
||||
p.commonjs_named_exports_needs_conversion = @as(u32, @truncate(p.commonjs_named_exports.count() - 1));
|
||||
}
|
||||
|
||||
const ref = named_export_entry.value_ptr.*.loc_ref.ref.?;
|
||||
p.recordUsage(ref);
|
||||
|
||||
return p.newExpr(
|
||||
E.CommonJSExportIdentifier{
|
||||
.ref = ref,
|
||||
// Record this as from module.exports
|
||||
.base = .module_dot_exports,
|
||||
},
|
||||
name_loc,
|
||||
);
|
||||
} else if (p.options.features.commonjs_at_runtime and identifier_opts.assign_target != .none) {
|
||||
p.has_commonjs_export_names = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
.hot_enabled, .hot_disabled => {
|
||||
const enabled = p.options.features.hot_module_reloading;
|
||||
if (bun.strings.eqlComptime(name, "data")) {
|
||||
return if (enabled)
|
||||
.{ .data = .{ .e_special = .hot_data }, .loc = loc }
|
||||
else
|
||||
Expr.init(E.Object, .{}, loc);
|
||||
}
|
||||
if (bun.strings.eqlComptime(name, "accept")) {
|
||||
if (!enabled) {
|
||||
p.method_call_must_be_replaced_with_undefined = true;
|
||||
return .{ .data = .e_undefined, .loc = loc };
|
||||
}
|
||||
return .{ .data = .{
|
||||
.e_special = .hot_accept,
|
||||
}, .loc = loc };
|
||||
}
|
||||
const lookup_table = comptime bun.ComptimeStringMap(void, [_]struct { [:0]const u8, void }{
|
||||
.{ "decline", {} },
|
||||
.{ "dispose", {} },
|
||||
.{ "prune", {} },
|
||||
.{ "invalidate", {} },
|
||||
.{ "on", {} },
|
||||
.{ "off", {} },
|
||||
.{ "send", {} },
|
||||
});
|
||||
if (lookup_table.has(name)) {
|
||||
if (enabled) {
|
||||
return Expr.init(E.Dot, .{
|
||||
.target = Expr.initIdentifier(p.hmr_api_ref, target.loc),
|
||||
.name = name,
|
||||
.name_loc = name_loc,
|
||||
}, loc);
|
||||
} else {
|
||||
p.method_call_must_be_replaced_with_undefined = true;
|
||||
return .{ .data = .e_undefined, .loc = loc };
|
||||
}
|
||||
} else {
|
||||
// This error is a bit out of place since the HMR
|
||||
// API is validated in the parser instead of at
|
||||
// runtime. When the API is not validated in this
|
||||
// way, the developer may unintentionally read or
|
||||
// write internal fields of HMRModule.
|
||||
p.log.addError(
|
||||
p.source,
|
||||
loc,
|
||||
std.fmt.allocPrint(
|
||||
p.allocator,
|
||||
"import.meta.hot.{s} does not exist",
|
||||
.{name},
|
||||
) catch bun.outOfMemory(),
|
||||
) catch bun.outOfMemory();
|
||||
return .{ .data = .e_undefined, .loc = loc };
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
fn maybeRewritePropertyAccessForNamespace(
|
||||
p: *P,
|
||||
name: string,
|
||||
target: *const Expr,
|
||||
loc: logger.Loc,
|
||||
name_loc: logger.Loc,
|
||||
) ?Expr {
|
||||
if (p.ts_namespace.map.?.get(name)) |value| {
|
||||
switch (value.data) {
|
||||
.enum_number => |num| {
|
||||
p.ignoreUsageOfIdentifierInDotChain(target.*);
|
||||
return p.wrapInlinedEnum(
|
||||
.{ .loc = loc, .data = .{ .e_number = .{ .value = num } } },
|
||||
name,
|
||||
);
|
||||
},
|
||||
|
||||
.enum_string => |str| {
|
||||
p.ignoreUsageOfIdentifierInDotChain(target.*);
|
||||
return p.wrapInlinedEnum(
|
||||
.{ .loc = loc, .data = .{ .e_string = str } },
|
||||
name,
|
||||
);
|
||||
},
|
||||
|
||||
.namespace => |namespace| {
|
||||
// If this isn't a constant, return a clone of this property access
|
||||
// but with the namespace member data associated with it so that
|
||||
// more property accesses off of this property access are recognized.
|
||||
const expr = if (js_lexer.isIdentifier(name))
|
||||
p.newExpr(E.Dot{
|
||||
.target = target.*,
|
||||
.name = name,
|
||||
.name_loc = name_loc,
|
||||
}, loc)
|
||||
else
|
||||
p.newExpr(E.Dot{
|
||||
.target = target.*,
|
||||
.name = name,
|
||||
.name_loc = name_loc,
|
||||
}, loc);
|
||||
|
||||
p.ts_namespace = .{
|
||||
.expr = expr.data,
|
||||
.map = namespace,
|
||||
};
|
||||
|
||||
return expr;
|
||||
},
|
||||
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
pub fn checkIfDefinedHelper(p: *P, expr: Expr) !Expr {
|
||||
return p.newExpr(
|
||||
E.Binary{
|
||||
.op = .bin_strict_eq,
|
||||
.left = p.newExpr(
|
||||
E.Unary{
|
||||
.op = .un_typeof,
|
||||
.value = expr,
|
||||
},
|
||||
logger.Loc.Empty,
|
||||
),
|
||||
.right = p.newExpr(
|
||||
E.String{ .data = "undefined" },
|
||||
logger.Loc.Empty,
|
||||
),
|
||||
},
|
||||
logger.Loc.Empty,
|
||||
);
|
||||
}
|
||||
|
||||
pub fn maybeDefinedHelper(p: *P, identifier_expr: Expr) !Expr {
|
||||
return p.newExpr(
|
||||
E.If{
|
||||
.test_ = try p.checkIfDefinedHelper(identifier_expr),
|
||||
.yes = p.newExpr(
|
||||
E.Identifier{
|
||||
.ref = (p.findSymbol(logger.Loc.Empty, "Object") catch unreachable).ref,
|
||||
},
|
||||
logger.Loc.Empty,
|
||||
),
|
||||
.no = identifier_expr,
|
||||
},
|
||||
logger.Loc.Empty,
|
||||
);
|
||||
}
|
||||
|
||||
pub fn maybeCommaSpreadError(p: *P, _comma_after_spread: ?logger.Loc) void {
|
||||
const comma_after_spread = _comma_after_spread orelse return;
|
||||
if (comma_after_spread.start == -1) return;
|
||||
|
||||
p.log.addRangeError(p.source, logger.Range{ .loc = comma_after_spread, .len = 1 }, "Unexpected \",\" after rest pattern") catch unreachable;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
const string = []const u8;
|
||||
|
||||
const bun = @import("bun");
|
||||
const Environment = bun.Environment;
|
||||
const FeatureFlags = bun.FeatureFlags;
|
||||
const assert = bun.assert;
|
||||
const js_lexer = bun.js_lexer;
|
||||
const jsc = bun.jsc;
|
||||
const logger = bun.logger;
|
||||
const strings = bun.strings;
|
||||
|
||||
const js_ast = bun.ast;
|
||||
const B = js_ast.B;
|
||||
const Binding = js_ast.Binding;
|
||||
const E = js_ast.E;
|
||||
const Expr = js_ast.Expr;
|
||||
const Flags = js_ast.Flags;
|
||||
const LocRef = js_ast.LocRef;
|
||||
const S = js_ast.S;
|
||||
const Stmt = js_ast.Stmt;
|
||||
const Symbol = js_ast.Symbol;
|
||||
|
||||
const G = js_ast.G;
|
||||
const Decl = G.Decl;
|
||||
const Property = G.Property;
|
||||
|
||||
const js_parser = bun.js_parser;
|
||||
const IdentifierOpts = js_parser.IdentifierOpts;
|
||||
const JSXTransformType = js_parser.JSXTransformType;
|
||||
const RelocateVars = js_parser.RelocateVars;
|
||||
const SideEffects = js_parser.SideEffects;
|
||||
const TypeScript = js_parser.TypeScript;
|
||||
const options = js_parser.options;
|
||||
|
||||
const std = @import("std");
|
||||
const List = std.ArrayListUnmanaged;
|
||||
1396
src/ast/parse.zig
1396
src/ast/parse.zig
File diff suppressed because it is too large
Load Diff
@@ -1,517 +0,0 @@
|
||||
pub fn ParseFn(
|
||||
comptime parser_feature__typescript: bool,
|
||||
comptime parser_feature__jsx: JSXTransformType,
|
||||
comptime parser_feature__scan_only: bool,
|
||||
) type {
|
||||
return struct {
|
||||
const P = js_parser.NewParser_(parser_feature__typescript, parser_feature__jsx, parser_feature__scan_only);
|
||||
const is_typescript_enabled = P.is_typescript_enabled;
|
||||
|
||||
/// This assumes the "function" token has already been parsed
|
||||
pub fn parseFnStmt(noalias p: *P, loc: logger.Loc, noalias opts: *ParseStatementOptions, asyncRange: ?logger.Range) !Stmt {
|
||||
const is_generator = p.lexer.token == T.t_asterisk;
|
||||
const is_async = asyncRange != null;
|
||||
|
||||
if (is_generator) {
|
||||
// p.markSyntaxFeature(compat.Generator, p.lexer.Range())
|
||||
try p.lexer.next();
|
||||
} else if (is_async) {
|
||||
// p.markLoweredSyntaxFeature(compat.AsyncAwait, asyncRange, compat.Generator)
|
||||
}
|
||||
|
||||
switch (opts.lexical_decl) {
|
||||
.forbid => {
|
||||
try p.forbidLexicalDecl(loc);
|
||||
},
|
||||
|
||||
// Allow certain function statements in certain single-statement contexts
|
||||
.allow_fn_inside_if, .allow_fn_inside_label => {
|
||||
if (opts.is_typescript_declare or is_generator or is_async) {
|
||||
try p.forbidLexicalDecl(loc);
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
var name: ?js_ast.LocRef = null;
|
||||
var nameText: string = "";
|
||||
|
||||
// The name is optional for "export default function() {}" pseudo-statements
|
||||
if (!opts.is_name_optional or p.lexer.token == T.t_identifier) {
|
||||
const nameLoc = p.lexer.loc();
|
||||
nameText = p.lexer.identifier;
|
||||
try p.lexer.expect(T.t_identifier);
|
||||
// Difference
|
||||
const ref = try p.newSymbol(Symbol.Kind.other, nameText);
|
||||
name = js_ast.LocRef{
|
||||
.loc = nameLoc,
|
||||
.ref = ref,
|
||||
};
|
||||
}
|
||||
|
||||
// Even anonymous functions can have TypeScript type parameters
|
||||
if (is_typescript_enabled) {
|
||||
_ = try p.skipTypeScriptTypeParameters(.{ .allow_const_modifier = true });
|
||||
}
|
||||
|
||||
// Introduce a fake block scope for function declarations inside if statements
|
||||
var ifStmtScopeIndex: usize = 0;
|
||||
const hasIfScope = opts.lexical_decl == .allow_fn_inside_if;
|
||||
if (hasIfScope) {
|
||||
ifStmtScopeIndex = try p.pushScopeForParsePass(js_ast.Scope.Kind.block, loc);
|
||||
}
|
||||
|
||||
var scopeIndex: usize = 0;
|
||||
var pushedScopeForFunctionArgs = false;
|
||||
// Push scope if the current lexer token is an open parenthesis token.
|
||||
// That is, the parser is about parsing function arguments
|
||||
if (p.lexer.token == .t_open_paren) {
|
||||
scopeIndex = try p.pushScopeForParsePass(js_ast.Scope.Kind.function_args, p.lexer.loc());
|
||||
pushedScopeForFunctionArgs = true;
|
||||
}
|
||||
|
||||
var func = try p.parseFn(name, FnOrArrowDataParse{
|
||||
.needs_async_loc = loc,
|
||||
.async_range = asyncRange orelse logger.Range.None,
|
||||
.has_async_range = asyncRange != null,
|
||||
.allow_await = if (is_async) AwaitOrYield.allow_expr else AwaitOrYield.allow_ident,
|
||||
.allow_yield = if (is_generator) AwaitOrYield.allow_expr else AwaitOrYield.allow_ident,
|
||||
.is_typescript_declare = opts.is_typescript_declare,
|
||||
|
||||
// Only allow omitting the body if we're parsing TypeScript
|
||||
.allow_missing_body_for_type_script = is_typescript_enabled,
|
||||
});
|
||||
p.fn_or_arrow_data_parse.has_argument_decorators = false;
|
||||
|
||||
if (comptime is_typescript_enabled) {
|
||||
// Don't output anything if it's just a forward declaration of a function
|
||||
if ((opts.is_typescript_declare or func.flags.contains(.is_forward_declaration)) and pushedScopeForFunctionArgs) {
|
||||
p.popAndDiscardScope(scopeIndex);
|
||||
|
||||
// Balance the fake block scope introduced above
|
||||
if (hasIfScope) {
|
||||
p.popScope();
|
||||
}
|
||||
|
||||
if (opts.is_typescript_declare and opts.is_namespace_scope and opts.is_export) {
|
||||
p.has_non_local_export_declare_inside_namespace = true;
|
||||
}
|
||||
|
||||
return p.s(S.TypeScript{}, loc);
|
||||
}
|
||||
}
|
||||
|
||||
if (pushedScopeForFunctionArgs) {
|
||||
p.popScope();
|
||||
}
|
||||
|
||||
// Only declare the function after we know if it had a body or not. Otherwise
|
||||
// TypeScript code such as this will double-declare the symbol:
|
||||
//
|
||||
// function foo(): void;
|
||||
// function foo(): void {}
|
||||
//
|
||||
if (name != null) {
|
||||
const kind = if (is_generator or is_async)
|
||||
Symbol.Kind.generator_or_async_function
|
||||
else
|
||||
Symbol.Kind.hoisted_function;
|
||||
|
||||
name.?.ref = try p.declareSymbol(kind, name.?.loc, nameText);
|
||||
func.name = name;
|
||||
}
|
||||
|
||||
func.flags.setPresent(.has_if_scope, hasIfScope);
|
||||
func.flags.setPresent(.is_export, opts.is_export);
|
||||
|
||||
// Balance the fake block scope introduced above
|
||||
if (hasIfScope) {
|
||||
p.popScope();
|
||||
}
|
||||
|
||||
return p.s(
|
||||
S.Function{
|
||||
.func = func,
|
||||
},
|
||||
loc,
|
||||
);
|
||||
}
|
||||
|
||||
pub fn parseFn(p: *P, name: ?js_ast.LocRef, opts: FnOrArrowDataParse) anyerror!G.Fn {
|
||||
// if data.allowAwait and data.allowYield {
|
||||
// p.markSyntaxFeature(compat.AsyncGenerator, data.asyncRange)
|
||||
// }
|
||||
|
||||
var func = G.Fn{
|
||||
.name = name,
|
||||
|
||||
.flags = Flags.Function.init(.{
|
||||
.has_rest_arg = false,
|
||||
.is_async = opts.allow_await == .allow_expr,
|
||||
.is_generator = opts.allow_yield == .allow_expr,
|
||||
}),
|
||||
|
||||
.arguments_ref = null,
|
||||
.open_parens_loc = p.lexer.loc(),
|
||||
};
|
||||
try p.lexer.expect(T.t_open_paren);
|
||||
|
||||
// Await and yield are not allowed in function arguments
|
||||
var old_fn_or_arrow_data = std.mem.toBytes(p.fn_or_arrow_data_parse);
|
||||
|
||||
p.fn_or_arrow_data_parse.allow_await = if (opts.allow_await == .allow_expr)
|
||||
AwaitOrYield.forbid_all
|
||||
else
|
||||
AwaitOrYield.allow_ident;
|
||||
|
||||
p.fn_or_arrow_data_parse.allow_yield = if (opts.allow_yield == .allow_expr)
|
||||
AwaitOrYield.forbid_all
|
||||
else
|
||||
AwaitOrYield.allow_ident;
|
||||
|
||||
// Don't suggest inserting "async" before anything if "await" is found
|
||||
p.fn_or_arrow_data_parse.needs_async_loc = logger.Loc.Empty;
|
||||
|
||||
// If "super()" is allowed in the body, it's allowed in the arguments
|
||||
p.fn_or_arrow_data_parse.allow_super_call = opts.allow_super_call;
|
||||
p.fn_or_arrow_data_parse.allow_super_property = opts.allow_super_property;
|
||||
|
||||
var rest_arg: bool = false;
|
||||
var arg_has_decorators: bool = false;
|
||||
var args = List(G.Arg){};
|
||||
while (p.lexer.token != T.t_close_paren) {
|
||||
// Skip over "this" type annotations
|
||||
if (is_typescript_enabled and p.lexer.token == T.t_this) {
|
||||
try p.lexer.next();
|
||||
if (p.lexer.token == T.t_colon) {
|
||||
try p.lexer.next();
|
||||
try p.skipTypeScriptType(.lowest);
|
||||
}
|
||||
if (p.lexer.token != T.t_comma) {
|
||||
break;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
continue;
|
||||
}
|
||||
|
||||
var ts_decorators: []ExprNodeIndex = &([_]ExprNodeIndex{});
|
||||
if (opts.allow_ts_decorators) {
|
||||
ts_decorators = try p.parseTypeScriptDecorators();
|
||||
if (ts_decorators.len > 0) {
|
||||
arg_has_decorators = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!func.flags.contains(.has_rest_arg) and p.lexer.token == T.t_dot_dot_dot) {
|
||||
// p.markSyntaxFeature
|
||||
try p.lexer.next();
|
||||
rest_arg = true;
|
||||
func.flags.insert(.has_rest_arg);
|
||||
}
|
||||
|
||||
var is_typescript_ctor_field = false;
|
||||
const is_identifier = p.lexer.token == T.t_identifier;
|
||||
var text = p.lexer.identifier;
|
||||
var arg = try p.parseBinding(.{});
|
||||
var ts_metadata = TypeScript.Metadata.default;
|
||||
|
||||
if (comptime is_typescript_enabled) {
|
||||
if (is_identifier and opts.is_constructor) {
|
||||
// Skip over TypeScript accessibility modifiers, which turn this argument
|
||||
// into a class field when used inside a class constructor. This is known
|
||||
// as a "parameter property" in TypeScript.
|
||||
while (true) {
|
||||
switch (p.lexer.token) {
|
||||
.t_identifier, .t_open_brace, .t_open_bracket => {
|
||||
if (!js_lexer.TypeScriptAccessibilityModifier.has(text)) {
|
||||
break;
|
||||
}
|
||||
|
||||
is_typescript_ctor_field = true;
|
||||
|
||||
// TypeScript requires an identifier binding
|
||||
if (p.lexer.token != .t_identifier) {
|
||||
try p.lexer.expect(.t_identifier);
|
||||
}
|
||||
text = p.lexer.identifier;
|
||||
|
||||
// Re-parse the binding (the current binding is the TypeScript keyword)
|
||||
arg = try p.parseBinding(.{});
|
||||
},
|
||||
else => {
|
||||
break;
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// "function foo(a?) {}"
|
||||
if (p.lexer.token == .t_question) {
|
||||
try p.lexer.next();
|
||||
}
|
||||
|
||||
// "function foo(a: any) {}"
|
||||
if (p.lexer.token == .t_colon) {
|
||||
try p.lexer.next();
|
||||
if (!rest_arg) {
|
||||
if (p.options.features.emit_decorator_metadata and
|
||||
opts.allow_ts_decorators and
|
||||
(opts.has_argument_decorators or opts.has_decorators or arg_has_decorators))
|
||||
{
|
||||
ts_metadata = try p.skipTypeScriptTypeWithMetadata(.lowest);
|
||||
} else {
|
||||
try p.skipTypeScriptType(.lowest);
|
||||
}
|
||||
} else {
|
||||
// rest parameter is always object, leave metadata as m_none
|
||||
try p.skipTypeScriptType(.lowest);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var parseStmtOpts = ParseStatementOptions{};
|
||||
p.declareBinding(.hoisted, &arg, &parseStmtOpts) catch unreachable;
|
||||
|
||||
var default_value: ?ExprNodeIndex = null;
|
||||
if (!func.flags.contains(.has_rest_arg) and p.lexer.token == .t_equals) {
|
||||
// p.markSyntaxFeature
|
||||
try p.lexer.next();
|
||||
default_value = try p.parseExpr(.comma);
|
||||
}
|
||||
|
||||
args.append(p.allocator, G.Arg{
|
||||
.ts_decorators = ExprNodeList.init(ts_decorators),
|
||||
.binding = arg,
|
||||
.default = default_value,
|
||||
|
||||
// We need to track this because it affects code generation
|
||||
.is_typescript_ctor_field = is_typescript_ctor_field,
|
||||
.ts_metadata = ts_metadata,
|
||||
}) catch unreachable;
|
||||
|
||||
if (p.lexer.token != .t_comma) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (func.flags.contains(.has_rest_arg)) {
|
||||
// JavaScript does not allow a comma after a rest argument
|
||||
if (opts.is_typescript_declare) {
|
||||
// TypeScript does allow a comma after a rest argument in a "declare" context
|
||||
try p.lexer.next();
|
||||
} else {
|
||||
try p.lexer.expect(.t_close_paren);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
rest_arg = false;
|
||||
}
|
||||
if (args.items.len > 0) {
|
||||
func.args = args.items;
|
||||
}
|
||||
|
||||
// Reserve the special name "arguments" in this scope. This ensures that it
|
||||
// shadows any variable called "arguments" in any parent scopes. But only do
|
||||
// this if it wasn't already declared above because arguments are allowed to
|
||||
// be called "arguments", in which case the real "arguments" is inaccessible.
|
||||
if (!p.current_scope.members.contains("arguments")) {
|
||||
func.arguments_ref = p.declareSymbolMaybeGenerated(.arguments, func.open_parens_loc, arguments_str, false) catch unreachable;
|
||||
p.symbols.items[func.arguments_ref.?.innerIndex()].must_not_be_renamed = true;
|
||||
}
|
||||
|
||||
try p.lexer.expect(.t_close_paren);
|
||||
p.fn_or_arrow_data_parse = std.mem.bytesToValue(@TypeOf(p.fn_or_arrow_data_parse), &old_fn_or_arrow_data);
|
||||
|
||||
p.fn_or_arrow_data_parse.has_argument_decorators = arg_has_decorators;
|
||||
|
||||
// "function foo(): any {}"
|
||||
if (is_typescript_enabled) {
|
||||
if (p.lexer.token == .t_colon) {
|
||||
try p.lexer.next();
|
||||
|
||||
if (p.options.features.emit_decorator_metadata and opts.allow_ts_decorators and (opts.has_argument_decorators or opts.has_decorators)) {
|
||||
func.return_ts_metadata = try p.skipTypescriptReturnTypeWithMetadata();
|
||||
} else {
|
||||
try p.skipTypescriptReturnType();
|
||||
}
|
||||
} else if (p.options.features.emit_decorator_metadata and opts.allow_ts_decorators and (opts.has_argument_decorators or opts.has_decorators)) {
|
||||
if (func.flags.contains(.is_async)) {
|
||||
func.return_ts_metadata = .m_promise;
|
||||
} else {
|
||||
func.return_ts_metadata = .m_undefined;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// "function foo(): any;"
|
||||
if (opts.allow_missing_body_for_type_script and p.lexer.token != .t_open_brace) {
|
||||
try p.lexer.expectOrInsertSemicolon();
|
||||
func.flags.insert(.is_forward_declaration);
|
||||
return func;
|
||||
}
|
||||
var tempOpts = opts;
|
||||
func.body = try p.parseFnBody(&tempOpts);
|
||||
|
||||
return func;
|
||||
}
|
||||
|
||||
pub fn parseFnExpr(p: *P, loc: logger.Loc, is_async: bool, async_range: logger.Range) !Expr {
|
||||
try p.lexer.next();
|
||||
const is_generator = p.lexer.token == T.t_asterisk;
|
||||
if (is_generator) {
|
||||
// p.markSyntaxFeature()
|
||||
try p.lexer.next();
|
||||
} else if (is_async) {
|
||||
// p.markLoweredSyntaxFeature(compat.AsyncAwait, asyncRange, compat.Generator)
|
||||
}
|
||||
|
||||
var name: ?js_ast.LocRef = null;
|
||||
|
||||
_ = p.pushScopeForParsePass(.function_args, loc) catch unreachable;
|
||||
|
||||
// The name is optional
|
||||
if (p.lexer.token == .t_identifier) {
|
||||
const text = p.lexer.identifier;
|
||||
|
||||
// Don't declare the name "arguments" since it's shadowed and inaccessible
|
||||
name = js_ast.LocRef{
|
||||
.loc = p.lexer.loc(),
|
||||
.ref = if (text.len > 0 and !strings.eqlComptime(text, "arguments"))
|
||||
try p.declareSymbol(.hoisted_function, p.lexer.loc(), text)
|
||||
else
|
||||
try p.newSymbol(.hoisted_function, text),
|
||||
};
|
||||
|
||||
try p.lexer.next();
|
||||
}
|
||||
|
||||
// Even anonymous functions can have TypeScript type parameters
|
||||
if (comptime is_typescript_enabled) {
|
||||
_ = try p.skipTypeScriptTypeParameters(.{ .allow_const_modifier = true });
|
||||
}
|
||||
|
||||
const func = try p.parseFn(name, FnOrArrowDataParse{
|
||||
.needs_async_loc = loc,
|
||||
.async_range = async_range,
|
||||
.allow_await = if (is_async) .allow_expr else .allow_ident,
|
||||
.allow_yield = if (is_generator) .allow_expr else .allow_ident,
|
||||
});
|
||||
p.fn_or_arrow_data_parse.has_argument_decorators = false;
|
||||
|
||||
p.validateFunctionName(func, .expr);
|
||||
p.popScope();
|
||||
|
||||
return p.newExpr(js_ast.E.Function{
|
||||
.func = func,
|
||||
}, loc);
|
||||
}
|
||||
|
||||
pub fn parseFnBody(p: *P, data: *FnOrArrowDataParse) !G.FnBody {
|
||||
const oldFnOrArrowData = p.fn_or_arrow_data_parse;
|
||||
const oldAllowIn = p.allow_in;
|
||||
p.fn_or_arrow_data_parse = data.*;
|
||||
p.allow_in = true;
|
||||
|
||||
const loc = p.lexer.loc();
|
||||
var pushedScopeForFunctionBody = false;
|
||||
if (p.lexer.token == .t_open_brace) {
|
||||
_ = try p.pushScopeForParsePass(Scope.Kind.function_body, p.lexer.loc());
|
||||
pushedScopeForFunctionBody = true;
|
||||
}
|
||||
|
||||
try p.lexer.expect(.t_open_brace);
|
||||
var opts = ParseStatementOptions{};
|
||||
const stmts = try p.parseStmtsUpTo(.t_close_brace, &opts);
|
||||
try p.lexer.next();
|
||||
|
||||
if (pushedScopeForFunctionBody) p.popScope();
|
||||
|
||||
p.allow_in = oldAllowIn;
|
||||
p.fn_or_arrow_data_parse = oldFnOrArrowData;
|
||||
return G.FnBody{ .loc = loc, .stmts = stmts };
|
||||
}
|
||||
|
||||
pub fn parseArrowBody(p: *P, args: []js_ast.G.Arg, data: *FnOrArrowDataParse) !E.Arrow {
|
||||
const arrow_loc = p.lexer.loc();
|
||||
|
||||
// Newlines are not allowed before "=>"
|
||||
if (p.lexer.has_newline_before) {
|
||||
try p.log.addRangeError(p.source, p.lexer.range(), "Unexpected newline before \"=>\"");
|
||||
return error.SyntaxError;
|
||||
}
|
||||
|
||||
try p.lexer.expect(T.t_equals_greater_than);
|
||||
|
||||
for (args) |*arg| {
|
||||
var opts = ParseStatementOptions{};
|
||||
try p.declareBinding(Symbol.Kind.hoisted, &arg.binding, &opts);
|
||||
}
|
||||
|
||||
// The ability to use "this" and "super()" is inherited by arrow functions
|
||||
data.allow_super_call = p.fn_or_arrow_data_parse.allow_super_call;
|
||||
data.allow_super_property = p.fn_or_arrow_data_parse.allow_super_property;
|
||||
data.is_this_disallowed = p.fn_or_arrow_data_parse.is_this_disallowed;
|
||||
|
||||
if (p.lexer.token == .t_open_brace) {
|
||||
const body = try p.parseFnBody(data);
|
||||
p.after_arrow_body_loc = p.lexer.loc();
|
||||
return E.Arrow{ .args = args, .body = body };
|
||||
}
|
||||
|
||||
_ = try p.pushScopeForParsePass(Scope.Kind.function_body, arrow_loc);
|
||||
defer p.popScope();
|
||||
|
||||
var old_fn_or_arrow_data = std.mem.toBytes(p.fn_or_arrow_data_parse);
|
||||
|
||||
p.fn_or_arrow_data_parse = data.*;
|
||||
const expr = try p.parseExpr(Level.comma);
|
||||
p.fn_or_arrow_data_parse = std.mem.bytesToValue(@TypeOf(p.fn_or_arrow_data_parse), &old_fn_or_arrow_data);
|
||||
|
||||
var stmts = try p.allocator.alloc(Stmt, 1);
|
||||
stmts[0] = p.s(S.Return{ .value = expr }, expr.loc);
|
||||
return E.Arrow{ .args = args, .prefer_expr = true, .body = G.FnBody{ .loc = arrow_loc, .stmts = stmts } };
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
const string = []const u8;
|
||||
|
||||
const bun = @import("bun");
|
||||
const logger = bun.logger;
|
||||
const strings = bun.strings;
|
||||
|
||||
const js_ast = bun.ast;
|
||||
const E = js_ast.E;
|
||||
const Expr = js_ast.Expr;
|
||||
const ExprNodeIndex = js_ast.ExprNodeIndex;
|
||||
const ExprNodeList = js_ast.ExprNodeList;
|
||||
const Flags = js_ast.Flags;
|
||||
const LocRef = js_ast.LocRef;
|
||||
const S = js_ast.S;
|
||||
const Scope = js_ast.Scope;
|
||||
const Stmt = js_ast.Stmt;
|
||||
const Symbol = js_ast.Symbol;
|
||||
|
||||
const G = js_ast.G;
|
||||
const Arg = G.Arg;
|
||||
|
||||
const Op = js_ast.Op;
|
||||
const Level = js_ast.Op.Level;
|
||||
|
||||
const js_lexer = bun.js_lexer;
|
||||
const T = js_lexer.T;
|
||||
|
||||
const js_parser = bun.js_parser;
|
||||
const AwaitOrYield = js_parser.AwaitOrYield;
|
||||
const FnOrArrowDataParse = js_parser.FnOrArrowDataParse;
|
||||
const JSXTransformType = js_parser.JSXTransformType;
|
||||
const ParseStatementOptions = js_parser.ParseStatementOptions;
|
||||
const TypeScript = js_parser.TypeScript;
|
||||
const arguments_str = js_parser.arguments_str;
|
||||
const options = js_parser.options;
|
||||
|
||||
const std = @import("std");
|
||||
const List = std.ArrayListUnmanaged;
|
||||
@@ -1,437 +0,0 @@
|
||||
pub fn ParseImportExport(
|
||||
comptime parser_feature__typescript: bool,
|
||||
comptime parser_feature__jsx: JSXTransformType,
|
||||
comptime parser_feature__scan_only: bool,
|
||||
) type {
|
||||
return struct {
|
||||
const P = js_parser.NewParser_(parser_feature__typescript, parser_feature__jsx, parser_feature__scan_only);
|
||||
const is_typescript_enabled = P.is_typescript_enabled;
|
||||
const only_scan_imports_and_do_not_visit = P.only_scan_imports_and_do_not_visit;
|
||||
|
||||
/// Note: The caller has already parsed the "import" keyword
|
||||
pub fn parseImportExpr(noalias p: *P, loc: logger.Loc, level: Level) anyerror!Expr {
|
||||
// Parse an "import.meta" expression
|
||||
if (p.lexer.token == .t_dot) {
|
||||
p.esm_import_keyword = js_lexer.rangeOfIdentifier(p.source, loc);
|
||||
try p.lexer.next();
|
||||
if (p.lexer.isContextualKeyword("meta")) {
|
||||
try p.lexer.next();
|
||||
p.has_import_meta = true;
|
||||
return p.newExpr(E.ImportMeta{}, loc);
|
||||
} else {
|
||||
try p.lexer.expectedString("\"meta\"");
|
||||
}
|
||||
}
|
||||
|
||||
if (level.gt(.call)) {
|
||||
const r = js_lexer.rangeOfIdentifier(p.source, loc);
|
||||
p.log.addRangeError(p.source, r, "Cannot use an \"import\" expression here without parentheses") catch unreachable;
|
||||
}
|
||||
|
||||
// allow "in" inside call arguments;
|
||||
const old_allow_in = p.allow_in;
|
||||
p.allow_in = true;
|
||||
|
||||
p.lexer.preserve_all_comments_before = true;
|
||||
try p.lexer.expect(.t_open_paren);
|
||||
|
||||
// const comments = try p.lexer.comments_to_preserve_before.toOwnedSlice();
|
||||
p.lexer.comments_to_preserve_before.clearRetainingCapacity();
|
||||
|
||||
p.lexer.preserve_all_comments_before = false;
|
||||
|
||||
const value = try p.parseExpr(.comma);
|
||||
|
||||
var import_options = Expr.empty;
|
||||
if (p.lexer.token == .t_comma) {
|
||||
// "import('./foo.json', )"
|
||||
try p.lexer.next();
|
||||
|
||||
if (p.lexer.token != .t_close_paren) {
|
||||
// "import('./foo.json', { assert: { type: 'json' } })"
|
||||
import_options = try p.parseExpr(.comma);
|
||||
|
||||
if (p.lexer.token == .t_comma) {
|
||||
// "import('./foo.json', { assert: { type: 'json' } }, )"
|
||||
try p.lexer.next();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try p.lexer.expect(.t_close_paren);
|
||||
|
||||
p.allow_in = old_allow_in;
|
||||
|
||||
if (comptime only_scan_imports_and_do_not_visit) {
|
||||
if (value.data == .e_string and value.data.e_string.isUTF8() and value.data.e_string.isPresent()) {
|
||||
const import_record_index = p.addImportRecord(.dynamic, value.loc, value.data.e_string.slice(p.allocator));
|
||||
|
||||
return p.newExpr(E.Import{
|
||||
.expr = value,
|
||||
// .leading_interior_comments = comments,
|
||||
.import_record_index = import_record_index,
|
||||
.options = import_options,
|
||||
}, loc);
|
||||
}
|
||||
}
|
||||
|
||||
// _ = comments; // TODO: leading_interior comments
|
||||
|
||||
return p.newExpr(E.Import{
|
||||
.expr = value,
|
||||
// .leading_interior_comments = comments,
|
||||
.import_record_index = std.math.maxInt(u32),
|
||||
.options = import_options,
|
||||
}, loc);
|
||||
}
|
||||
|
||||
pub fn parseImportClause(
|
||||
p: *P,
|
||||
) !ImportClause {
|
||||
var items = ListManaged(js_ast.ClauseItem).init(p.allocator);
|
||||
try p.lexer.expect(.t_open_brace);
|
||||
var is_single_line = !p.lexer.has_newline_before;
|
||||
// this variable should not exist if we're not in a typescript file
|
||||
var had_type_only_imports = if (comptime is_typescript_enabled)
|
||||
false;
|
||||
|
||||
while (p.lexer.token != .t_close_brace) {
|
||||
// The alias may be a keyword;
|
||||
const isIdentifier = p.lexer.token == .t_identifier;
|
||||
const alias_loc = p.lexer.loc();
|
||||
const alias = try p.parseClauseAlias("import");
|
||||
var name = LocRef{ .loc = alias_loc, .ref = try p.storeNameInRef(alias) };
|
||||
var original_name = alias;
|
||||
try p.lexer.next();
|
||||
|
||||
const probably_type_only_import = if (comptime is_typescript_enabled)
|
||||
strings.eqlComptime(alias, "type") and
|
||||
p.lexer.token != .t_comma and
|
||||
p.lexer.token != .t_close_brace
|
||||
else
|
||||
false;
|
||||
|
||||
// "import { type xx } from 'mod'"
|
||||
// "import { type xx as yy } from 'mod'"
|
||||
// "import { type 'xx' as yy } from 'mod'"
|
||||
// "import { type as } from 'mod'"
|
||||
// "import { type as as } from 'mod'"
|
||||
// "import { type as as as } from 'mod'"
|
||||
if (probably_type_only_import) {
|
||||
if (p.lexer.isContextualKeyword("as")) {
|
||||
try p.lexer.next();
|
||||
if (p.lexer.isContextualKeyword("as")) {
|
||||
original_name = p.lexer.identifier;
|
||||
name = LocRef{ .loc = p.lexer.loc(), .ref = try p.storeNameInRef(original_name) };
|
||||
try p.lexer.next();
|
||||
|
||||
if (p.lexer.token == .t_identifier) {
|
||||
|
||||
// "import { type as as as } from 'mod'"
|
||||
// "import { type as as foo } from 'mod'"
|
||||
had_type_only_imports = true;
|
||||
try p.lexer.next();
|
||||
} else {
|
||||
// "import { type as as } from 'mod'"
|
||||
|
||||
try items.append(.{
|
||||
.alias = alias,
|
||||
.alias_loc = alias_loc,
|
||||
.name = name,
|
||||
.original_name = original_name,
|
||||
});
|
||||
}
|
||||
} else if (p.lexer.token == .t_identifier) {
|
||||
had_type_only_imports = true;
|
||||
|
||||
// "import { type as xxx } from 'mod'"
|
||||
original_name = p.lexer.identifier;
|
||||
name = LocRef{ .loc = p.lexer.loc(), .ref = try p.storeNameInRef(original_name) };
|
||||
try p.lexer.expect(.t_identifier);
|
||||
|
||||
if (isEvalOrArguments(original_name)) {
|
||||
const r = p.source.rangeOfString(name.loc);
|
||||
try p.log.addRangeErrorFmt(p.source, r, p.allocator, "Cannot use {s} as an identifier here", .{original_name});
|
||||
}
|
||||
|
||||
try items.append(.{
|
||||
.alias = alias,
|
||||
.alias_loc = alias_loc,
|
||||
.name = name,
|
||||
.original_name = original_name,
|
||||
});
|
||||
}
|
||||
} else {
|
||||
const is_identifier = p.lexer.token == .t_identifier;
|
||||
|
||||
// "import { type xx } from 'mod'"
|
||||
// "import { type xx as yy } from 'mod'"
|
||||
// "import { type if as yy } from 'mod'"
|
||||
// "import { type 'xx' as yy } from 'mod'"
|
||||
_ = try p.parseClauseAlias("import");
|
||||
try p.lexer.next();
|
||||
|
||||
if (p.lexer.isContextualKeyword("as")) {
|
||||
try p.lexer.next();
|
||||
|
||||
try p.lexer.expect(.t_identifier);
|
||||
} else if (!is_identifier) {
|
||||
// An import where the name is a keyword must have an alias
|
||||
try p.lexer.expectedString("\"as\"");
|
||||
}
|
||||
had_type_only_imports = true;
|
||||
}
|
||||
} else {
|
||||
if (p.lexer.isContextualKeyword("as")) {
|
||||
try p.lexer.next();
|
||||
original_name = p.lexer.identifier;
|
||||
name = LocRef{ .loc = alias_loc, .ref = try p.storeNameInRef(original_name) };
|
||||
try p.lexer.expect(.t_identifier);
|
||||
} else if (!isIdentifier) {
|
||||
// An import where the name is a keyword must have an alias
|
||||
try p.lexer.expectedString("\"as\"");
|
||||
}
|
||||
|
||||
// Reject forbidden names
|
||||
if (isEvalOrArguments(original_name)) {
|
||||
const r = js_lexer.rangeOfIdentifier(p.source, name.loc);
|
||||
try p.log.addRangeErrorFmt(p.source, r, p.allocator, "Cannot use \"{s}\" as an identifier here", .{original_name});
|
||||
}
|
||||
|
||||
try items.append(js_ast.ClauseItem{
|
||||
.alias = alias,
|
||||
.alias_loc = alias_loc,
|
||||
.name = name,
|
||||
.original_name = original_name,
|
||||
});
|
||||
}
|
||||
|
||||
if (p.lexer.token != .t_comma) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (p.lexer.has_newline_before) {
|
||||
is_single_line = false;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
|
||||
if (p.lexer.has_newline_before) {
|
||||
is_single_line = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (p.lexer.has_newline_before) {
|
||||
is_single_line = false;
|
||||
}
|
||||
|
||||
try p.lexer.expect(.t_close_brace);
|
||||
return ImportClause{
|
||||
.items = items.items,
|
||||
.is_single_line = is_single_line,
|
||||
.had_type_only_imports = if (comptime is_typescript_enabled)
|
||||
had_type_only_imports
|
||||
else
|
||||
false,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn parseExportClause(p: *P) !ExportClauseResult {
|
||||
var items = ListManaged(js_ast.ClauseItem).initCapacity(p.allocator, 1) catch unreachable;
|
||||
try p.lexer.expect(.t_open_brace);
|
||||
var is_single_line = !p.lexer.has_newline_before;
|
||||
var first_non_identifier_loc = logger.Loc{ .start = 0 };
|
||||
var had_type_only_exports = false;
|
||||
|
||||
while (p.lexer.token != .t_close_brace) {
|
||||
var alias = try p.parseClauseAlias("export");
|
||||
var alias_loc = p.lexer.loc();
|
||||
|
||||
const name = LocRef{
|
||||
.loc = alias_loc,
|
||||
.ref = p.storeNameInRef(alias) catch unreachable,
|
||||
};
|
||||
const original_name = alias;
|
||||
|
||||
// The name can actually be a keyword if we're really an "export from"
|
||||
// statement. However, we won't know until later. Allow keywords as
|
||||
// identifiers for now and throw an error later if there's no "from".
|
||||
//
|
||||
// // This is fine
|
||||
// export { default } from 'path'
|
||||
//
|
||||
// // This is a syntax error
|
||||
// export { default }
|
||||
//
|
||||
if (p.lexer.token != .t_identifier and first_non_identifier_loc.start == 0) {
|
||||
first_non_identifier_loc = p.lexer.loc();
|
||||
}
|
||||
try p.lexer.next();
|
||||
|
||||
if (comptime is_typescript_enabled) {
|
||||
if (strings.eqlComptime(alias, "type") and p.lexer.token != .t_comma and p.lexer.token != .t_close_brace) {
|
||||
if (p.lexer.isContextualKeyword("as")) {
|
||||
try p.lexer.next();
|
||||
|
||||
if (p.lexer.isContextualKeyword("as")) {
|
||||
alias = try p.parseClauseAlias("export");
|
||||
alias_loc = p.lexer.loc();
|
||||
try p.lexer.next();
|
||||
|
||||
if (p.lexer.token != .t_comma and p.lexer.token != .t_close_brace) {
|
||||
// "export { type as as as }"
|
||||
// "export { type as as foo }"
|
||||
// "export { type as as 'foo' }"
|
||||
_ = p.parseClauseAlias("export") catch "";
|
||||
had_type_only_exports = true;
|
||||
try p.lexer.next();
|
||||
} else {
|
||||
// "export { type as as }"
|
||||
items.append(js_ast.ClauseItem{
|
||||
.alias = alias,
|
||||
.alias_loc = alias_loc,
|
||||
.name = name,
|
||||
.original_name = original_name,
|
||||
}) catch unreachable;
|
||||
}
|
||||
} else if (p.lexer.token != .t_comma and p.lexer.token != .t_close_brace) {
|
||||
// "export { type as xxx }"
|
||||
// "export { type as 'xxx' }"
|
||||
alias = try p.parseClauseAlias("export");
|
||||
alias_loc = p.lexer.loc();
|
||||
try p.lexer.next();
|
||||
|
||||
items.append(js_ast.ClauseItem{
|
||||
.alias = alias,
|
||||
.alias_loc = alias_loc,
|
||||
.name = name,
|
||||
.original_name = original_name,
|
||||
}) catch unreachable;
|
||||
} else {
|
||||
had_type_only_exports = true;
|
||||
}
|
||||
} else {
|
||||
// The name can actually be a keyword if we're really an "export from"
|
||||
// statement. However, we won't know until later. Allow keywords as
|
||||
// identifiers for now and throw an error later if there's no "from".
|
||||
//
|
||||
// // This is fine
|
||||
// export { default } from 'path'
|
||||
//
|
||||
// // This is a syntax error
|
||||
// export { default }
|
||||
//
|
||||
if (p.lexer.token != .t_identifier and first_non_identifier_loc.start == 0) {
|
||||
first_non_identifier_loc = p.lexer.loc();
|
||||
}
|
||||
|
||||
// "export { type xx }"
|
||||
// "export { type xx as yy }"
|
||||
// "export { type xx as if }"
|
||||
// "export { type default } from 'path'"
|
||||
// "export { type default as if } from 'path'"
|
||||
// "export { type xx as 'yy' }"
|
||||
// "export { type 'xx' } from 'mod'"
|
||||
_ = p.parseClauseAlias("export") catch "";
|
||||
try p.lexer.next();
|
||||
|
||||
if (p.lexer.isContextualKeyword("as")) {
|
||||
try p.lexer.next();
|
||||
_ = p.parseClauseAlias("export") catch "";
|
||||
try p.lexer.next();
|
||||
}
|
||||
|
||||
had_type_only_exports = true;
|
||||
}
|
||||
} else {
|
||||
if (p.lexer.isContextualKeyword("as")) {
|
||||
try p.lexer.next();
|
||||
alias = try p.parseClauseAlias("export");
|
||||
alias_loc = p.lexer.loc();
|
||||
|
||||
try p.lexer.next();
|
||||
}
|
||||
|
||||
items.append(js_ast.ClauseItem{
|
||||
.alias = alias,
|
||||
.alias_loc = alias_loc,
|
||||
.name = name,
|
||||
.original_name = original_name,
|
||||
}) catch unreachable;
|
||||
}
|
||||
} else {
|
||||
if (p.lexer.isContextualKeyword("as")) {
|
||||
try p.lexer.next();
|
||||
alias = try p.parseClauseAlias("export");
|
||||
alias_loc = p.lexer.loc();
|
||||
|
||||
try p.lexer.next();
|
||||
}
|
||||
|
||||
items.append(js_ast.ClauseItem{
|
||||
.alias = alias,
|
||||
.alias_loc = alias_loc,
|
||||
.name = name,
|
||||
.original_name = original_name,
|
||||
}) catch unreachable;
|
||||
}
|
||||
|
||||
// we're done if there's no comma
|
||||
if (p.lexer.token != .t_comma) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (p.lexer.has_newline_before) {
|
||||
is_single_line = false;
|
||||
}
|
||||
try p.lexer.next();
|
||||
if (p.lexer.has_newline_before) {
|
||||
is_single_line = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (p.lexer.has_newline_before) {
|
||||
is_single_line = false;
|
||||
}
|
||||
try p.lexer.expect(.t_close_brace);
|
||||
|
||||
// Throw an error here if we found a keyword earlier and this isn't an
|
||||
// "export from" statement after all
|
||||
if (first_non_identifier_loc.start != 0 and !p.lexer.isContextualKeyword("from")) {
|
||||
const r = js_lexer.rangeOfIdentifier(p.source, first_non_identifier_loc);
|
||||
try p.lexer.addRangeError(r, "Expected identifier but found \"{s}\"", .{p.source.textForRange(r)}, true);
|
||||
return error.SyntaxError;
|
||||
}
|
||||
|
||||
return ExportClauseResult{
|
||||
.clauses = items.items,
|
||||
.is_single_line = is_single_line,
|
||||
.had_type_only_exports = had_type_only_exports,
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
const bun = @import("bun");
|
||||
const assert = bun.assert;
|
||||
const js_lexer = bun.js_lexer;
|
||||
const logger = bun.logger;
|
||||
const strings = bun.strings;
|
||||
|
||||
const js_ast = bun.ast;
|
||||
const E = js_ast.E;
|
||||
const Expr = js_ast.Expr;
|
||||
const LocRef = js_ast.LocRef;
|
||||
|
||||
const Op = js_ast.Op;
|
||||
const Level = js_ast.Op.Level;
|
||||
|
||||
const js_parser = bun.js_parser;
|
||||
const ExportClauseResult = js_parser.ExportClauseResult;
|
||||
const ImportClause = js_parser.ImportClause;
|
||||
const JSXTransformType = js_parser.JSXTransformType;
|
||||
const isEvalOrArguments = js_parser.isEvalOrArguments;
|
||||
const options = js_parser.options;
|
||||
|
||||
const std = @import("std");
|
||||
const ListManaged = std.ArrayList;
|
||||
@@ -1,319 +0,0 @@
|
||||
pub fn ParseJSXElement(
|
||||
comptime parser_feature__typescript: bool,
|
||||
comptime parser_feature__jsx: JSXTransformType,
|
||||
comptime parser_feature__scan_only: bool,
|
||||
) type {
|
||||
return struct {
|
||||
const P = js_parser.NewParser_(parser_feature__typescript, parser_feature__jsx, parser_feature__scan_only);
|
||||
const is_typescript_enabled = P.is_typescript_enabled;
|
||||
const only_scan_imports_and_do_not_visit = P.only_scan_imports_and_do_not_visit;
|
||||
|
||||
pub fn parseJSXElement(noalias p: *P, loc: logger.Loc) anyerror!Expr {
|
||||
if (only_scan_imports_and_do_not_visit) {
|
||||
p.needs_jsx_import = true;
|
||||
}
|
||||
|
||||
const tag = try JSXTag.parse(P, p);
|
||||
|
||||
// The tag may have TypeScript type arguments: "<Foo<T>/>"
|
||||
if (is_typescript_enabled) {
|
||||
// Pass a flag to the type argument skipper because we need to call
|
||||
_ = try p.skipTypeScriptTypeArguments(true);
|
||||
}
|
||||
|
||||
var previous_string_with_backslash_loc = logger.Loc{};
|
||||
var properties = G.Property.List{};
|
||||
var key_prop_i: i32 = -1;
|
||||
var flags = Flags.JSXElement.Bitset{};
|
||||
var start_tag: ?ExprNodeIndex = null;
|
||||
|
||||
// Fragments don't have props
|
||||
// Fragments of the form "React.Fragment" are not parsed as fragments.
|
||||
if (@as(JSXTag.TagType, tag.data) == .tag) {
|
||||
start_tag = tag.data.tag;
|
||||
|
||||
var spread_loc: logger.Loc = logger.Loc.Empty;
|
||||
var props = ListManaged(G.Property).init(p.allocator);
|
||||
var first_spread_prop_i: i32 = -1;
|
||||
var i: i32 = 0;
|
||||
parse_attributes: while (true) {
|
||||
switch (p.lexer.token) {
|
||||
.t_identifier => {
|
||||
defer i += 1;
|
||||
// Parse the prop name
|
||||
const key_range = p.lexer.range();
|
||||
const prop_name_literal = p.lexer.identifier;
|
||||
const special_prop = E.JSXElement.SpecialProp.Map.get(prop_name_literal) orelse E.JSXElement.SpecialProp.any;
|
||||
try p.lexer.nextInsideJSXElement();
|
||||
|
||||
if (special_prop == .key) {
|
||||
// <ListItem key>
|
||||
if (p.lexer.token != .t_equals) {
|
||||
// Unlike Babel, we're going to just warn here and move on.
|
||||
try p.log.addWarning(p.source, key_range.loc, "\"key\" prop ignored. Must be a string, number or symbol.");
|
||||
continue;
|
||||
}
|
||||
|
||||
key_prop_i = i;
|
||||
}
|
||||
|
||||
const prop_name = p.newExpr(E.String{ .data = prop_name_literal }, key_range.loc);
|
||||
|
||||
// Parse the value
|
||||
var value: Expr = undefined;
|
||||
if (p.lexer.token != .t_equals) {
|
||||
|
||||
// Implicitly true value
|
||||
// <button selected>
|
||||
value = p.newExpr(E.Boolean{ .value = true }, logger.Loc{ .start = key_range.loc.start + key_range.len });
|
||||
} else {
|
||||
value = try p.parseJSXPropValueIdentifier(&previous_string_with_backslash_loc);
|
||||
}
|
||||
|
||||
try props.append(G.Property{ .key = prop_name, .value = value });
|
||||
},
|
||||
.t_open_brace => {
|
||||
defer i += 1;
|
||||
// Use Next() not ExpectInsideJSXElement() so we can parse "..."
|
||||
try p.lexer.next();
|
||||
|
||||
switch (p.lexer.token) {
|
||||
.t_dot_dot_dot => {
|
||||
try p.lexer.next();
|
||||
|
||||
if (first_spread_prop_i == -1) first_spread_prop_i = i;
|
||||
spread_loc = p.lexer.loc();
|
||||
try props.append(G.Property{ .value = try p.parseExpr(.comma), .kind = .spread });
|
||||
},
|
||||
// This implements
|
||||
// <div {foo} />
|
||||
// ->
|
||||
// <div foo={foo} />
|
||||
T.t_identifier => {
|
||||
// we need to figure out what the key they mean is
|
||||
// to do that, we must determine the key name
|
||||
const expr = try p.parseExpr(Level.lowest);
|
||||
|
||||
const key = brk: {
|
||||
switch (expr.data) {
|
||||
.e_import_identifier => |ident| {
|
||||
break :brk p.newExpr(E.String{ .data = p.loadNameFromRef(ident.ref) }, expr.loc);
|
||||
},
|
||||
.e_commonjs_export_identifier => |ident| {
|
||||
break :brk p.newExpr(E.String{ .data = p.loadNameFromRef(ident.ref) }, expr.loc);
|
||||
},
|
||||
.e_identifier => |ident| {
|
||||
break :brk p.newExpr(E.String{ .data = p.loadNameFromRef(ident.ref) }, expr.loc);
|
||||
},
|
||||
.e_dot => |dot| {
|
||||
break :brk p.newExpr(E.String{ .data = dot.name }, dot.name_loc);
|
||||
},
|
||||
.e_index => |index| {
|
||||
if (index.index.data == .e_string) {
|
||||
break :brk index.index;
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
// If we get here, it's invalid
|
||||
try p.log.addError(p.source, expr.loc, "Invalid JSX prop shorthand, must be identifier, dot or string");
|
||||
return error.SyntaxError;
|
||||
};
|
||||
|
||||
try props.append(G.Property{ .value = expr, .key = key, .kind = .normal });
|
||||
},
|
||||
// This implements
|
||||
// <div {"foo"} />
|
||||
// <div {'foo'} />
|
||||
// ->
|
||||
// <div foo="foo" />
|
||||
// note: template literals are not supported, operations on strings are not supported either
|
||||
T.t_string_literal => {
|
||||
const key = p.newExpr(try p.lexer.toEString(), p.lexer.loc());
|
||||
try p.lexer.next();
|
||||
try props.append(G.Property{ .value = key, .key = key, .kind = .normal });
|
||||
},
|
||||
|
||||
else => try p.lexer.unexpected(),
|
||||
}
|
||||
|
||||
try p.lexer.nextInsideJSXElement();
|
||||
},
|
||||
else => {
|
||||
break :parse_attributes;
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
const is_key_after_spread = key_prop_i > -1 and first_spread_prop_i > -1 and key_prop_i > first_spread_prop_i;
|
||||
flags.setPresent(.is_key_after_spread, is_key_after_spread);
|
||||
properties = G.Property.List.fromList(props);
|
||||
if (is_key_after_spread and p.options.jsx.runtime == .automatic and !p.has_classic_runtime_warned) {
|
||||
try p.log.addWarning(p.source, spread_loc, "\"key\" prop after a {...spread} is deprecated in JSX. Falling back to classic runtime.");
|
||||
p.has_classic_runtime_warned = true;
|
||||
}
|
||||
}
|
||||
|
||||
// People sometimes try to use the output of "JSON.stringify()" as a JSX
|
||||
// attribute when automatically-generating JSX code. Doing so is incorrect
|
||||
// because JSX strings work like XML instead of like JS (since JSX is XML-in-
|
||||
// JS). Specifically, using a backslash before a quote does not cause it to
|
||||
// be escaped:
|
||||
//
|
||||
// JSX ends the "content" attribute here and sets "content" to 'some so-called \\'
|
||||
// v
|
||||
// <Button content="some so-called \"button text\"" />
|
||||
// ^
|
||||
// There is no "=" after the JSX attribute "text", so we expect a ">"
|
||||
//
|
||||
// This code special-cases this error to provide a less obscure error message.
|
||||
if (p.lexer.token == .t_syntax_error and strings.eqlComptime(p.lexer.raw(), "\\") and previous_string_with_backslash_loc.start > 0) {
|
||||
const r = p.lexer.range();
|
||||
// Not dealing with this right now.
|
||||
try p.log.addRangeError(p.source, r, "Invalid JSX escape - use XML entity codes quotes or pass a JavaScript string instead");
|
||||
return error.SyntaxError;
|
||||
}
|
||||
|
||||
// A slash here is a self-closing element
|
||||
if (p.lexer.token == .t_slash) {
|
||||
const close_tag_loc = p.lexer.loc();
|
||||
// Use NextInsideJSXElement() not Next() so we can parse ">>" as ">"
|
||||
|
||||
try p.lexer.nextInsideJSXElement();
|
||||
|
||||
if (p.lexer.token != .t_greater_than) {
|
||||
try p.lexer.expected(.t_greater_than);
|
||||
}
|
||||
|
||||
return p.newExpr(E.JSXElement{
|
||||
.tag = start_tag,
|
||||
.properties = properties,
|
||||
.key_prop_index = key_prop_i,
|
||||
.flags = flags,
|
||||
.close_tag_loc = close_tag_loc,
|
||||
}, loc);
|
||||
}
|
||||
|
||||
// Use ExpectJSXElementChild() so we parse child strings
|
||||
try p.lexer.expectJSXElementChild(.t_greater_than);
|
||||
var children = ListManaged(Expr).init(p.allocator);
|
||||
// var last_element_i: usize = 0;
|
||||
|
||||
while (true) {
|
||||
switch (p.lexer.token) {
|
||||
.t_string_literal => {
|
||||
try children.append(p.newExpr(try p.lexer.toEString(), loc));
|
||||
try p.lexer.nextJSXElementChild();
|
||||
},
|
||||
.t_open_brace => {
|
||||
// Use Next() instead of NextJSXElementChild() here since the next token is an expression
|
||||
try p.lexer.next();
|
||||
|
||||
const is_spread = p.lexer.token == .t_dot_dot_dot;
|
||||
if (is_spread) {
|
||||
try p.lexer.next();
|
||||
}
|
||||
|
||||
// The expression is optional, and may be absent
|
||||
if (p.lexer.token != .t_close_brace) {
|
||||
var item = try p.parseExpr(.lowest);
|
||||
if (is_spread) {
|
||||
item = p.newExpr(E.Spread{ .value = item }, loc);
|
||||
}
|
||||
try children.append(item);
|
||||
}
|
||||
|
||||
// Use ExpectJSXElementChild() so we parse child strings
|
||||
try p.lexer.expectJSXElementChild(.t_close_brace);
|
||||
},
|
||||
.t_less_than => {
|
||||
const less_than_loc = p.lexer.loc();
|
||||
try p.lexer.nextInsideJSXElement();
|
||||
|
||||
if (p.lexer.token != .t_slash) {
|
||||
// This is a child element
|
||||
|
||||
children.append(try p.parseJSXElement(less_than_loc)) catch unreachable;
|
||||
|
||||
// The call to parseJSXElement() above doesn't consume the last
|
||||
// TGreaterThan because the caller knows what Next() function to call.
|
||||
// Use NextJSXElementChild() here since the next token is an element
|
||||
// child.
|
||||
try p.lexer.nextJSXElementChild();
|
||||
continue;
|
||||
}
|
||||
|
||||
// This is the closing element
|
||||
try p.lexer.nextInsideJSXElement();
|
||||
const end_tag = try JSXTag.parse(P, p);
|
||||
|
||||
if (!strings.eql(end_tag.name, tag.name)) {
|
||||
try p.log.addRangeErrorFmtWithNote(
|
||||
p.source,
|
||||
end_tag.range,
|
||||
p.allocator,
|
||||
"Expected closing JSX tag to match opening tag \"\\<{s}\\>\"",
|
||||
.{tag.name},
|
||||
"Opening tag here:",
|
||||
.{},
|
||||
tag.range,
|
||||
);
|
||||
return error.SyntaxError;
|
||||
}
|
||||
|
||||
if (p.lexer.token != .t_greater_than) {
|
||||
try p.lexer.expected(.t_greater_than);
|
||||
}
|
||||
|
||||
return p.newExpr(E.JSXElement{
|
||||
.tag = end_tag.data.asExpr(),
|
||||
.children = ExprNodeList.fromList(children),
|
||||
.properties = properties,
|
||||
.key_prop_index = key_prop_i,
|
||||
.flags = flags,
|
||||
.close_tag_loc = end_tag.range.loc,
|
||||
}, loc);
|
||||
},
|
||||
else => {
|
||||
try p.lexer.unexpected();
|
||||
return error.SyntaxError;
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
const string = []const u8;
|
||||
|
||||
const bun = @import("bun");
|
||||
const logger = bun.logger;
|
||||
const strings = bun.strings;
|
||||
|
||||
const js_ast = bun.ast;
|
||||
const E = js_ast.E;
|
||||
const Expr = js_ast.Expr;
|
||||
const ExprNodeIndex = js_ast.ExprNodeIndex;
|
||||
const ExprNodeList = js_ast.ExprNodeList;
|
||||
const Flags = js_ast.Flags;
|
||||
|
||||
const G = js_ast.G;
|
||||
const Property = G.Property;
|
||||
|
||||
const Op = js_ast.Op;
|
||||
const Level = js_ast.Op.Level;
|
||||
|
||||
const js_lexer = bun.js_lexer;
|
||||
const T = js_lexer.T;
|
||||
|
||||
const js_parser = bun.js_parser;
|
||||
const JSXTag = js_parser.JSXTag;
|
||||
const JSXTransformType = js_parser.JSXTransformType;
|
||||
const TypeScript = js_parser.TypeScript;
|
||||
const options = js_parser.options;
|
||||
|
||||
const std = @import("std");
|
||||
const List = std.ArrayListUnmanaged;
|
||||
const ListManaged = std.ArrayList;
|
||||
const Map = std.AutoHashMapUnmanaged;
|
||||
@@ -1,763 +0,0 @@
|
||||
pub fn ParsePrefix(
|
||||
comptime parser_feature__typescript: bool,
|
||||
comptime parser_feature__jsx: JSXTransformType,
|
||||
comptime parser_feature__scan_only: bool,
|
||||
) type {
|
||||
return struct {
|
||||
const P = js_parser.NewParser_(parser_feature__typescript, parser_feature__jsx, parser_feature__scan_only);
|
||||
const is_jsx_enabled = P.is_jsx_enabled;
|
||||
const is_typescript_enabled = P.is_typescript_enabled;
|
||||
|
||||
fn t_super(noalias p: *P, level: Level) anyerror!Expr {
|
||||
const loc = p.lexer.loc();
|
||||
const l = @intFromEnum(level);
|
||||
const superRange = p.lexer.range();
|
||||
try p.lexer.next();
|
||||
|
||||
switch (p.lexer.token) {
|
||||
.t_open_paren => {
|
||||
if (l < @intFromEnum(Level.call) and p.fn_or_arrow_data_parse.allow_super_call) {
|
||||
return p.newExpr(E.Super{}, loc);
|
||||
}
|
||||
},
|
||||
.t_dot, .t_open_bracket => {
|
||||
if (p.fn_or_arrow_data_parse.allow_super_property) {
|
||||
return p.newExpr(E.Super{}, loc);
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
p.log.addRangeError(p.source, superRange, "Unexpected \"super\"") catch unreachable;
|
||||
return p.newExpr(E.Super{}, loc);
|
||||
}
|
||||
fn t_open_paren(noalias p: *P, level: Level) anyerror!Expr {
|
||||
const loc = p.lexer.loc();
|
||||
try p.lexer.next();
|
||||
|
||||
// Arrow functions aren't allowed in the middle of expressions
|
||||
if (level.gt(.assign)) {
|
||||
// Allow "in" inside parentheses
|
||||
const oldAllowIn = p.allow_in;
|
||||
p.allow_in = true;
|
||||
|
||||
var value = try p.parseExpr(Level.lowest);
|
||||
p.markExprAsParenthesized(&value);
|
||||
try p.lexer.expect(.t_close_paren);
|
||||
|
||||
p.allow_in = oldAllowIn;
|
||||
return value;
|
||||
}
|
||||
|
||||
return p.parseParenExpr(loc, level, ParenExprOpts{});
|
||||
}
|
||||
fn t_false(noalias p: *P) anyerror!Expr {
|
||||
const loc = p.lexer.loc();
|
||||
try p.lexer.next();
|
||||
return p.newExpr(E.Boolean{ .value = false }, loc);
|
||||
}
|
||||
fn t_true(noalias p: *P) anyerror!Expr {
|
||||
const loc = p.lexer.loc();
|
||||
try p.lexer.next();
|
||||
return p.newExpr(E.Boolean{ .value = true }, loc);
|
||||
}
|
||||
fn t_null(noalias p: *P) anyerror!Expr {
|
||||
const loc = p.lexer.loc();
|
||||
try p.lexer.next();
|
||||
return p.newExpr(E.Null{}, loc);
|
||||
}
|
||||
fn t_this(noalias p: *P) anyerror!Expr {
|
||||
const loc = p.lexer.loc();
|
||||
if (p.fn_or_arrow_data_parse.is_this_disallowed) {
|
||||
p.log.addRangeError(p.source, p.lexer.range(), "Cannot use \"this\" here") catch unreachable;
|
||||
}
|
||||
try p.lexer.next();
|
||||
return Expr{ .data = Prefill.Data.This, .loc = loc };
|
||||
}
|
||||
fn t_private_identifier(noalias p: *P, level: Level) anyerror!Expr {
|
||||
const loc = p.lexer.loc();
|
||||
if (!p.allow_private_identifiers or !p.allow_in or level.gte(.compare)) {
|
||||
try p.lexer.unexpected();
|
||||
return error.SyntaxError;
|
||||
}
|
||||
|
||||
const name = p.lexer.identifier;
|
||||
try p.lexer.next();
|
||||
|
||||
// Check for "#foo in bar"
|
||||
if (p.lexer.token != .t_in) {
|
||||
try p.lexer.expected(.t_in);
|
||||
}
|
||||
|
||||
return p.newExpr(E.PrivateIdentifier{ .ref = try p.storeNameInRef(name) }, loc);
|
||||
}
|
||||
fn t_identifier(noalias p: *P, level: Level) anyerror!Expr {
|
||||
const loc = p.lexer.loc();
|
||||
const name = p.lexer.identifier;
|
||||
const name_range = p.lexer.range();
|
||||
const raw = p.lexer.raw();
|
||||
|
||||
try p.lexer.next();
|
||||
|
||||
// Handle async and await expressions
|
||||
switch (AsyncPrefixExpression.find(name)) {
|
||||
.is_async => {
|
||||
if ((raw.ptr == name.ptr and raw.len == name.len) or AsyncPrefixExpression.find(raw) == .is_async) {
|
||||
return try p.parseAsyncPrefixExpr(name_range, level);
|
||||
}
|
||||
},
|
||||
|
||||
.is_await => {
|
||||
switch (p.fn_or_arrow_data_parse.allow_await) {
|
||||
.forbid_all => {
|
||||
p.log.addRangeError(p.source, name_range, "The keyword \"await\" cannot be used here") catch unreachable;
|
||||
},
|
||||
.allow_expr => {
|
||||
if (AsyncPrefixExpression.find(raw) != .is_await) {
|
||||
p.log.addRangeError(p.source, name_range, "The keyword \"await\" cannot be escaped") catch unreachable;
|
||||
} else {
|
||||
if (p.fn_or_arrow_data_parse.is_top_level) {
|
||||
p.top_level_await_keyword = name_range;
|
||||
}
|
||||
|
||||
if (p.fn_or_arrow_data_parse.track_arrow_arg_errors) {
|
||||
p.fn_or_arrow_data_parse.arrow_arg_errors.invalid_expr_await = name_range;
|
||||
}
|
||||
|
||||
const value = try p.parseExpr(.prefix);
|
||||
if (p.lexer.token == T.t_asterisk_asterisk) {
|
||||
try p.lexer.unexpected();
|
||||
return error.SyntaxError;
|
||||
}
|
||||
|
||||
return p.newExpr(E.Await{ .value = value }, loc);
|
||||
}
|
||||
},
|
||||
.allow_ident => {
|
||||
p.lexer.prev_token_was_await_keyword = true;
|
||||
p.lexer.await_keyword_loc = name_range.loc;
|
||||
p.lexer.fn_or_arrow_start_loc = p.fn_or_arrow_data_parse.needs_async_loc;
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
.is_yield => {
|
||||
switch (p.fn_or_arrow_data_parse.allow_yield) {
|
||||
.forbid_all => {
|
||||
p.log.addRangeError(p.source, name_range, "The keyword \"yield\" cannot be used here") catch unreachable;
|
||||
},
|
||||
.allow_expr => {
|
||||
if (AsyncPrefixExpression.find(raw) != .is_yield) {
|
||||
p.log.addRangeError(p.source, name_range, "The keyword \"yield\" cannot be escaped") catch unreachable;
|
||||
} else {
|
||||
if (level.gt(.assign)) {
|
||||
p.log.addRangeError(p.source, name_range, "Cannot use a \"yield\" here without parentheses") catch unreachable;
|
||||
}
|
||||
|
||||
if (p.fn_or_arrow_data_parse.track_arrow_arg_errors) {
|
||||
p.fn_or_arrow_data_parse.arrow_arg_errors.invalid_expr_yield = name_range;
|
||||
}
|
||||
|
||||
return p.parseYieldExpr(loc);
|
||||
}
|
||||
},
|
||||
// .allow_ident => {
|
||||
|
||||
// },
|
||||
else => {
|
||||
// Try to gracefully recover if "yield" is used in the wrong place
|
||||
if (!p.lexer.has_newline_before) {
|
||||
switch (p.lexer.token) {
|
||||
.t_null, .t_identifier, .t_false, .t_true, .t_numeric_literal, .t_big_integer_literal, .t_string_literal => {
|
||||
p.log.addRangeError(p.source, name_range, "Cannot use \"yield\" outside a generator function") catch unreachable;
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
},
|
||||
.none => {},
|
||||
}
|
||||
|
||||
// Handle the start of an arrow expression
|
||||
if (p.lexer.token == .t_equals_greater_than and level.lte(.assign)) {
|
||||
const ref = p.storeNameInRef(name) catch unreachable;
|
||||
var args = p.allocator.alloc(Arg, 1) catch unreachable;
|
||||
args[0] = Arg{ .binding = p.b(B.Identifier{
|
||||
.ref = ref,
|
||||
}, loc) };
|
||||
|
||||
_ = p.pushScopeForParsePass(.function_args, loc) catch unreachable;
|
||||
defer p.popScope();
|
||||
|
||||
var fn_or_arrow_data = FnOrArrowDataParse{
|
||||
.needs_async_loc = loc,
|
||||
};
|
||||
return p.newExpr(try p.parseArrowBody(args, &fn_or_arrow_data), loc);
|
||||
}
|
||||
|
||||
const ref = p.storeNameInRef(name) catch unreachable;
|
||||
|
||||
return Expr.initIdentifier(ref, loc);
|
||||
}
|
||||
fn t_template_head(noalias p: *P) anyerror!Expr {
|
||||
const loc = p.lexer.loc();
|
||||
const head = try p.lexer.toEString();
|
||||
|
||||
const parts = try p.parseTemplateParts(false);
|
||||
|
||||
// Check if TemplateLiteral is unsupported. We don't care for this product.`
|
||||
// if ()
|
||||
|
||||
return p.newExpr(E.Template{
|
||||
.head = .{ .cooked = head },
|
||||
.parts = parts,
|
||||
}, loc);
|
||||
}
|
||||
fn t_numeric_literal(noalias p: *P) anyerror!Expr {
|
||||
const loc = p.lexer.loc();
|
||||
const value = p.newExpr(E.Number{ .value = p.lexer.number }, loc);
|
||||
// p.checkForLegacyOctalLiteral()
|
||||
try p.lexer.next();
|
||||
return value;
|
||||
}
|
||||
fn t_big_integer_literal(noalias p: *P) anyerror!Expr {
|
||||
const loc = p.lexer.loc();
|
||||
const value = p.lexer.identifier;
|
||||
// markSyntaxFeature bigInt
|
||||
try p.lexer.next();
|
||||
return p.newExpr(E.BigInt{ .value = value }, loc);
|
||||
}
|
||||
fn t_slash(noalias p: *P) anyerror!Expr {
|
||||
const loc = p.lexer.loc();
|
||||
try p.lexer.scanRegExp();
|
||||
// always set regex_flags_start to null to make sure we don't accidentally use the wrong value later
|
||||
defer p.lexer.regex_flags_start = null;
|
||||
const value = p.lexer.raw();
|
||||
try p.lexer.next();
|
||||
|
||||
return p.newExpr(E.RegExp{ .value = value, .flags_offset = p.lexer.regex_flags_start }, loc);
|
||||
}
|
||||
fn t_void(noalias p: *P) anyerror!Expr {
|
||||
const loc = p.lexer.loc();
|
||||
try p.lexer.next();
|
||||
const value = try p.parseExpr(.prefix);
|
||||
if (p.lexer.token == .t_asterisk_asterisk) {
|
||||
try p.lexer.unexpected();
|
||||
return error.SyntaxError;
|
||||
}
|
||||
|
||||
return p.newExpr(E.Unary{
|
||||
.op = .un_void,
|
||||
.value = value,
|
||||
}, loc);
|
||||
}
|
||||
fn t_typeof(noalias p: *P) anyerror!Expr {
|
||||
const loc = p.lexer.loc();
|
||||
try p.lexer.next();
|
||||
const value = try p.parseExpr(.prefix);
|
||||
if (p.lexer.token == .t_asterisk_asterisk) {
|
||||
try p.lexer.unexpected();
|
||||
return error.SyntaxError;
|
||||
}
|
||||
|
||||
return p.newExpr(E.Unary{ .op = .un_typeof, .value = value }, loc);
|
||||
}
|
||||
fn t_delete(noalias p: *P) anyerror!Expr {
|
||||
const loc = p.lexer.loc();
|
||||
try p.lexer.next();
|
||||
const value = try p.parseExpr(.prefix);
|
||||
if (p.lexer.token == .t_asterisk_asterisk) {
|
||||
try p.lexer.unexpected();
|
||||
return error.SyntaxError;
|
||||
}
|
||||
if (value.data == .e_index) {
|
||||
if (value.data.e_index.index.data == .e_private_identifier) {
|
||||
const private = value.data.e_index.index.data.e_private_identifier;
|
||||
const name = p.loadNameFromRef(private.ref);
|
||||
const range = logger.Range{ .loc = value.loc, .len = @as(i32, @intCast(name.len)) };
|
||||
p.log.addRangeErrorFmt(p.source, range, p.allocator, "Deleting the private name \"{s}\" is forbidden", .{name}) catch unreachable;
|
||||
}
|
||||
}
|
||||
|
||||
return p.newExpr(E.Unary{ .op = .un_delete, .value = value }, loc);
|
||||
}
|
||||
fn t_plus(noalias p: *P) anyerror!Expr {
|
||||
const loc = p.lexer.loc();
|
||||
try p.lexer.next();
|
||||
const value = try p.parseExpr(.prefix);
|
||||
if (p.lexer.token == .t_asterisk_asterisk) {
|
||||
try p.lexer.unexpected();
|
||||
return error.SyntaxError;
|
||||
}
|
||||
|
||||
return p.newExpr(E.Unary{ .op = .un_pos, .value = value }, loc);
|
||||
}
|
||||
fn t_minus(noalias p: *P) anyerror!Expr {
|
||||
const loc = p.lexer.loc();
|
||||
try p.lexer.next();
|
||||
const value = try p.parseExpr(.prefix);
|
||||
if (p.lexer.token == .t_asterisk_asterisk) {
|
||||
try p.lexer.unexpected();
|
||||
return error.SyntaxError;
|
||||
}
|
||||
|
||||
return p.newExpr(E.Unary{ .op = .un_neg, .value = value }, loc);
|
||||
}
|
||||
fn t_tilde(noalias p: *P) anyerror!Expr {
|
||||
const loc = p.lexer.loc();
|
||||
try p.lexer.next();
|
||||
const value = try p.parseExpr(.prefix);
|
||||
if (p.lexer.token == .t_asterisk_asterisk) {
|
||||
try p.lexer.unexpected();
|
||||
return error.SyntaxError;
|
||||
}
|
||||
|
||||
return p.newExpr(E.Unary{ .op = .un_cpl, .value = value }, loc);
|
||||
}
|
||||
fn t_exclamation(noalias p: *P) anyerror!Expr {
|
||||
const loc = p.lexer.loc();
|
||||
try p.lexer.next();
|
||||
const value = try p.parseExpr(.prefix);
|
||||
if (p.lexer.token == .t_asterisk_asterisk) {
|
||||
try p.lexer.unexpected();
|
||||
return error.SyntaxError;
|
||||
}
|
||||
|
||||
return p.newExpr(E.Unary{ .op = .un_not, .value = value }, loc);
|
||||
}
|
||||
fn t_minus_minus(noalias p: *P) anyerror!Expr {
|
||||
const loc = p.lexer.loc();
|
||||
try p.lexer.next();
|
||||
return p.newExpr(E.Unary{ .op = .un_pre_dec, .value = try p.parseExpr(.prefix) }, loc);
|
||||
}
|
||||
fn t_plus_plus(noalias p: *P) anyerror!Expr {
|
||||
const loc = p.lexer.loc();
|
||||
try p.lexer.next();
|
||||
return p.newExpr(E.Unary{ .op = .un_pre_inc, .value = try p.parseExpr(.prefix) }, loc);
|
||||
}
|
||||
fn t_function(noalias p: *P) anyerror!Expr {
|
||||
const loc = p.lexer.loc();
|
||||
return try p.parseFnExpr(loc, false, logger.Range.None);
|
||||
}
|
||||
fn t_class(noalias p: *P) anyerror!Expr {
|
||||
const loc = p.lexer.loc();
|
||||
const classKeyword = p.lexer.range();
|
||||
// markSyntaxFEatuer class
|
||||
try p.lexer.next();
|
||||
var name: ?js_ast.LocRef = null;
|
||||
|
||||
_ = p.pushScopeForParsePass(.class_name, loc) catch unreachable;
|
||||
|
||||
// Parse an optional class name
|
||||
if (p.lexer.token == .t_identifier) {
|
||||
const name_text = p.lexer.identifier;
|
||||
if (!is_typescript_enabled or !strings.eqlComptime(name_text, "implements")) {
|
||||
if (p.fn_or_arrow_data_parse.allow_await != .allow_ident and strings.eqlComptime(name_text, "await")) {
|
||||
p.log.addRangeError(p.source, p.lexer.range(), "Cannot use \"await\" as an identifier here") catch unreachable;
|
||||
}
|
||||
|
||||
name = js_ast.LocRef{
|
||||
.loc = p.lexer.loc(),
|
||||
.ref = p.newSymbol(
|
||||
.other,
|
||||
name_text,
|
||||
) catch unreachable,
|
||||
};
|
||||
try p.lexer.next();
|
||||
}
|
||||
}
|
||||
|
||||
// Even anonymous classes can have TypeScript type parameters
|
||||
if (is_typescript_enabled) {
|
||||
_ = try p.skipTypeScriptTypeParameters(.{ .allow_in_out_variance_annotations = true, .allow_const_modifier = true });
|
||||
}
|
||||
|
||||
const class = try p.parseClass(classKeyword, name, ParseClassOptions{});
|
||||
p.popScope();
|
||||
|
||||
return p.newExpr(class, loc);
|
||||
}
|
||||
fn t_new(noalias p: *P, flags: Expr.EFlags) anyerror!Expr {
|
||||
const loc = p.lexer.loc();
|
||||
try p.lexer.next();
|
||||
|
||||
// Special-case the weird "new.target" expression here
|
||||
if (p.lexer.token == .t_dot) {
|
||||
try p.lexer.next();
|
||||
|
||||
if (p.lexer.token != .t_identifier or !strings.eqlComptime(p.lexer.raw(), "target")) {
|
||||
try p.lexer.unexpected();
|
||||
return error.SyntaxError;
|
||||
}
|
||||
const range = logger.Range{ .loc = loc, .len = p.lexer.range().end().start - loc.start };
|
||||
|
||||
try p.lexer.next();
|
||||
return p.newExpr(E.NewTarget{ .range = range }, loc);
|
||||
}
|
||||
|
||||
// This wil become the new expr
|
||||
var new = p.newExpr(E.New{
|
||||
.target = undefined,
|
||||
.args = undefined,
|
||||
.close_parens_loc = undefined,
|
||||
}, loc);
|
||||
|
||||
try p.parseExprWithFlags(.member, flags, &new.data.e_new.target);
|
||||
|
||||
if (comptime is_typescript_enabled) {
|
||||
// Skip over TypeScript type arguments here if there are any
|
||||
if (p.lexer.token == .t_less_than) {
|
||||
_ = p.trySkipTypeScriptTypeArgumentsWithBacktracking();
|
||||
}
|
||||
}
|
||||
|
||||
if (p.lexer.token == .t_open_paren) {
|
||||
const call_args = try p.parseCallArgs();
|
||||
new.data.e_new.args = call_args.list;
|
||||
new.data.e_new.close_parens_loc = call_args.loc;
|
||||
} else {
|
||||
new.data.e_new.close_parens_loc = .Empty;
|
||||
new.data.e_new.args = .{};
|
||||
}
|
||||
|
||||
return new;
|
||||
}
|
||||
fn t_open_bracket(noalias p: *P, noalias errors: ?*DeferredErrors) anyerror!Expr {
|
||||
const loc = p.lexer.loc();
|
||||
try p.lexer.next();
|
||||
var is_single_line = !p.lexer.has_newline_before;
|
||||
var items = ListManaged(Expr).init(p.allocator);
|
||||
var self_errors = DeferredErrors{};
|
||||
var comma_after_spread = logger.Loc{};
|
||||
|
||||
// Allow "in" inside arrays
|
||||
const old_allow_in = p.allow_in;
|
||||
p.allow_in = true;
|
||||
|
||||
while (p.lexer.token != .t_close_bracket) {
|
||||
switch (p.lexer.token) {
|
||||
.t_comma => {
|
||||
items.append(Expr{ .data = Prefill.Data.EMissing, .loc = p.lexer.loc() }) catch unreachable;
|
||||
},
|
||||
.t_dot_dot_dot => {
|
||||
if (errors != null)
|
||||
errors.?.array_spread_feature = p.lexer.range();
|
||||
|
||||
const dots_loc = p.lexer.loc();
|
||||
try p.lexer.next();
|
||||
try items.ensureUnusedCapacity(1);
|
||||
const spread_expr: *Expr = &items.unusedCapacitySlice()[0];
|
||||
spread_expr.* = p.newExpr(E.Spread{ .value = undefined }, dots_loc);
|
||||
try p.parseExprOrBindings(.comma, &self_errors, &spread_expr.data.e_spread.value);
|
||||
items.items.len += 1;
|
||||
|
||||
// Commas are not allowed here when destructuring
|
||||
if (p.lexer.token == .t_comma) {
|
||||
comma_after_spread = p.lexer.loc();
|
||||
}
|
||||
},
|
||||
else => {
|
||||
try items.ensureUnusedCapacity(1);
|
||||
const item: *Expr = &items.unusedCapacitySlice()[0];
|
||||
try p.parseExprOrBindings(.comma, &self_errors, item);
|
||||
items.items.len += 1;
|
||||
},
|
||||
}
|
||||
|
||||
if (p.lexer.token != .t_comma) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (p.lexer.has_newline_before) {
|
||||
is_single_line = false;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
|
||||
if (p.lexer.has_newline_before) {
|
||||
is_single_line = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (p.lexer.has_newline_before) {
|
||||
is_single_line = false;
|
||||
}
|
||||
|
||||
const close_bracket_loc = p.lexer.loc();
|
||||
try p.lexer.expect(.t_close_bracket);
|
||||
p.allow_in = old_allow_in;
|
||||
|
||||
// Is this a binding pattern?
|
||||
if (p.willNeedBindingPattern()) {
|
||||
// noop
|
||||
} else if (errors == null) {
|
||||
// Is this an expression?
|
||||
p.logExprErrors(&self_errors);
|
||||
} else {
|
||||
// In this case, we can't distinguish between the two yet
|
||||
self_errors.mergeInto(errors.?);
|
||||
}
|
||||
return p.newExpr(E.Array{
|
||||
.items = ExprNodeList.fromList(items),
|
||||
.comma_after_spread = comma_after_spread.toNullable(),
|
||||
.is_single_line = is_single_line,
|
||||
.close_bracket_loc = close_bracket_loc,
|
||||
}, loc);
|
||||
}
|
||||
fn t_open_brace(noalias p: *P, noalias errors: ?*DeferredErrors) anyerror!Expr {
|
||||
const loc = p.lexer.loc();
|
||||
try p.lexer.next();
|
||||
var is_single_line = !p.lexer.has_newline_before;
|
||||
var properties = ListManaged(G.Property).init(p.allocator);
|
||||
var self_errors = DeferredErrors{};
|
||||
var comma_after_spread: logger.Loc = logger.Loc{};
|
||||
|
||||
// Allow "in" inside object literals
|
||||
const old_allow_in = p.allow_in;
|
||||
p.allow_in = true;
|
||||
|
||||
while (p.lexer.token != .t_close_brace) {
|
||||
if (p.lexer.token == .t_dot_dot_dot) {
|
||||
try p.lexer.next();
|
||||
try properties.ensureUnusedCapacity(1);
|
||||
const property: *G.Property = &properties.unusedCapacitySlice()[0];
|
||||
property.* = .{
|
||||
.kind = .spread,
|
||||
.value = Expr.empty,
|
||||
};
|
||||
|
||||
try p.parseExprOrBindings(
|
||||
.comma,
|
||||
&self_errors,
|
||||
&(property.value.?),
|
||||
);
|
||||
properties.items.len += 1;
|
||||
|
||||
// Commas are not allowed here when destructuring
|
||||
if (p.lexer.token == .t_comma) {
|
||||
comma_after_spread = p.lexer.loc();
|
||||
}
|
||||
} else {
|
||||
// This property may turn out to be a type in TypeScript, which should be ignored
|
||||
var propertyOpts = PropertyOpts{};
|
||||
if (try p.parseProperty(.normal, &propertyOpts, &self_errors)) |prop| {
|
||||
if (comptime Environment.allow_assert) {
|
||||
assert(prop.key != null or prop.value != null);
|
||||
}
|
||||
properties.append(prop) catch unreachable;
|
||||
}
|
||||
}
|
||||
|
||||
if (p.lexer.token != .t_comma) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (p.lexer.has_newline_before) {
|
||||
is_single_line = false;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
|
||||
if (p.lexer.has_newline_before) {
|
||||
is_single_line = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (p.lexer.has_newline_before) {
|
||||
is_single_line = false;
|
||||
}
|
||||
|
||||
const close_brace_loc = p.lexer.loc();
|
||||
try p.lexer.expect(.t_close_brace);
|
||||
p.allow_in = old_allow_in;
|
||||
|
||||
if (p.willNeedBindingPattern()) {
|
||||
// Is this a binding pattern?
|
||||
} else if (errors == null) {
|
||||
// Is this an expression?
|
||||
p.logExprErrors(&self_errors);
|
||||
} else {
|
||||
// In this case, we can't distinguish between the two yet
|
||||
self_errors.mergeInto(errors.?);
|
||||
}
|
||||
|
||||
return p.newExpr(E.Object{
|
||||
.properties = G.Property.List.fromList(properties),
|
||||
.comma_after_spread = if (comma_after_spread.start > 0)
|
||||
comma_after_spread
|
||||
else
|
||||
null,
|
||||
.is_single_line = is_single_line,
|
||||
.close_brace_loc = close_brace_loc,
|
||||
}, loc);
|
||||
}
|
||||
fn t_less_than(noalias p: *P, level: Level, noalias errors: ?*DeferredErrors, flags: Expr.EFlags) anyerror!Expr {
|
||||
const loc = p.lexer.loc();
|
||||
// This is a very complicated and highly ambiguous area of TypeScript
|
||||
// syntax. Many similar-looking things are overloaded.
|
||||
//
|
||||
// TS:
|
||||
//
|
||||
// A type cast:
|
||||
// <A>(x)
|
||||
// <[]>(x)
|
||||
// <A[]>(x)
|
||||
//
|
||||
// An arrow function with type parameters:
|
||||
// <A>(x) => {}
|
||||
// <A, B>(x) => {}
|
||||
// <A = B>(x) => {}
|
||||
// <A extends B>(x) => {}
|
||||
//
|
||||
// TSX:
|
||||
//
|
||||
// A JSX element:
|
||||
// <A>(x) => {}</A>
|
||||
// <A extends>(x) => {}</A>
|
||||
// <A extends={false}>(x) => {}</A>
|
||||
//
|
||||
// An arrow function with type parameters:
|
||||
// <A, B>(x) => {}
|
||||
// <A extends B>(x) => {}
|
||||
//
|
||||
// A syntax error:
|
||||
// <[]>(x)
|
||||
// <A[]>(x)
|
||||
// <A>(x) => {}
|
||||
// <A = B>(x) => {}
|
||||
if (comptime is_typescript_enabled and is_jsx_enabled) {
|
||||
if (try TypeScript.isTSArrowFnJSX(p)) {
|
||||
_ = try p.skipTypeScriptTypeParameters(TypeParameterFlag{
|
||||
.allow_const_modifier = true,
|
||||
});
|
||||
try p.lexer.expect(.t_open_paren);
|
||||
return try p.parseParenExpr(loc, level, ParenExprOpts{ .force_arrow_fn = true });
|
||||
}
|
||||
}
|
||||
|
||||
if (is_jsx_enabled) {
|
||||
// Use NextInsideJSXElement() instead of Next() so we parse "<<" as "<"
|
||||
try p.lexer.nextInsideJSXElement();
|
||||
const element = try p.parseJSXElement(loc);
|
||||
|
||||
// The call to parseJSXElement() above doesn't consume the last
|
||||
// TGreaterThan because the caller knows what Next() function to call.
|
||||
// Use Next() instead of NextInsideJSXElement() here since the next
|
||||
// token is an expression.
|
||||
try p.lexer.next();
|
||||
return element;
|
||||
}
|
||||
|
||||
if (is_typescript_enabled) {
|
||||
// This is either an old-style type cast or a generic lambda function
|
||||
|
||||
// "<T>(x)"
|
||||
// "<T>(x) => {}"
|
||||
switch (p.trySkipTypeScriptTypeParametersThenOpenParenWithBacktracking()) {
|
||||
.did_not_skip_anything => {},
|
||||
else => |result| {
|
||||
try p.lexer.expect(.t_open_paren);
|
||||
return p.parseParenExpr(loc, level, ParenExprOpts{
|
||||
.force_arrow_fn = result == .definitely_type_parameters,
|
||||
});
|
||||
},
|
||||
}
|
||||
|
||||
// "<T>x"
|
||||
try p.lexer.next();
|
||||
try p.skipTypeScriptType(.lowest);
|
||||
try p.lexer.expectGreaterThan(false);
|
||||
return p.parsePrefix(level, errors, flags);
|
||||
}
|
||||
|
||||
try p.lexer.unexpected();
|
||||
return error.SyntaxError;
|
||||
}
|
||||
fn t_import(noalias p: *P, level: Level) anyerror!Expr {
|
||||
const loc = p.lexer.loc();
|
||||
try p.lexer.next();
|
||||
return p.parseImportExpr(loc, level);
|
||||
}
|
||||
|
||||
// Before splitting this up, this used 3 KB of stack space per call.
|
||||
pub fn parsePrefix(noalias p: *P, level: Level, noalias errors: ?*DeferredErrors, flags: Expr.EFlags) anyerror!Expr {
|
||||
return switch (p.lexer.token) {
|
||||
.t_open_bracket => t_open_bracket(p, errors),
|
||||
.t_open_brace => t_open_brace(p, errors),
|
||||
.t_less_than => t_less_than(p, level, errors, flags),
|
||||
.t_import => t_import(p, level),
|
||||
.t_open_paren => t_open_paren(p, level),
|
||||
.t_private_identifier => t_private_identifier(p, level),
|
||||
.t_identifier => t_identifier(p, level),
|
||||
.t_false => t_false(p),
|
||||
.t_true => t_true(p),
|
||||
.t_null => t_null(p),
|
||||
.t_this => t_this(p),
|
||||
.t_template_head => t_template_head(p),
|
||||
.t_numeric_literal => t_numeric_literal(p),
|
||||
.t_big_integer_literal => t_big_integer_literal(p),
|
||||
.t_string_literal, .t_no_substitution_template_literal => p.parseStringLiteral(),
|
||||
.t_slash_equals, .t_slash => t_slash(p),
|
||||
.t_void => t_void(p),
|
||||
.t_typeof => t_typeof(p),
|
||||
.t_delete => t_delete(p),
|
||||
.t_plus => t_plus(p),
|
||||
.t_minus => t_minus(p),
|
||||
.t_tilde => t_tilde(p),
|
||||
.t_exclamation => t_exclamation(p),
|
||||
.t_minus_minus => t_minus_minus(p),
|
||||
.t_plus_plus => t_plus_plus(p),
|
||||
.t_function => t_function(p),
|
||||
.t_class => t_class(p),
|
||||
.t_new => t_new(p, flags),
|
||||
.t_super => t_super(p, level),
|
||||
else => {
|
||||
@branchHint(.cold);
|
||||
try p.lexer.unexpected();
|
||||
return error.SyntaxError;
|
||||
},
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
const bun = @import("bun");
|
||||
const Environment = bun.Environment;
|
||||
const assert = bun.assert;
|
||||
const logger = bun.logger;
|
||||
const strings = bun.strings;
|
||||
|
||||
const js_ast = bun.ast;
|
||||
const B = js_ast.B;
|
||||
const E = js_ast.E;
|
||||
const Expr = js_ast.Expr;
|
||||
const ExprNodeList = js_ast.ExprNodeList;
|
||||
const LocRef = js_ast.LocRef;
|
||||
|
||||
const G = js_ast.G;
|
||||
const Arg = G.Arg;
|
||||
const Property = G.Property;
|
||||
|
||||
const Op = js_ast.Op;
|
||||
const Level = js_ast.Op.Level;
|
||||
|
||||
const js_lexer = bun.js_lexer;
|
||||
const T = js_lexer.T;
|
||||
|
||||
const js_parser = bun.js_parser;
|
||||
const AsyncPrefixExpression = js_parser.AsyncPrefixExpression;
|
||||
const DeferredErrors = js_parser.DeferredErrors;
|
||||
const FnOrArrowDataParse = js_parser.FnOrArrowDataParse;
|
||||
const JSXTransformType = js_parser.JSXTransformType;
|
||||
const ParenExprOpts = js_parser.ParenExprOpts;
|
||||
const ParseClassOptions = js_parser.ParseClassOptions;
|
||||
const Prefill = js_parser.Prefill;
|
||||
const PropertyOpts = js_parser.PropertyOpts;
|
||||
const TypeParameterFlag = js_parser.TypeParameterFlag;
|
||||
const TypeScript = js_parser.TypeScript;
|
||||
|
||||
const std = @import("std");
|
||||
const List = std.ArrayListUnmanaged;
|
||||
const ListManaged = std.ArrayList;
|
||||
@@ -1,575 +0,0 @@
|
||||
pub fn ParseProperty(
|
||||
comptime parser_feature__typescript: bool,
|
||||
comptime parser_feature__jsx: JSXTransformType,
|
||||
comptime parser_feature__scan_only: bool,
|
||||
) type {
|
||||
return struct {
|
||||
const P = js_parser.NewParser_(parser_feature__typescript, parser_feature__jsx, parser_feature__scan_only);
|
||||
const is_typescript_enabled = P.is_typescript_enabled;
|
||||
|
||||
fn parseMethodExpression(p: *P, kind: Property.Kind, opts: *PropertyOpts, is_computed: bool, key: *Expr, key_range: logger.Range) anyerror!?G.Property {
|
||||
if (p.lexer.token == .t_open_paren and kind != .get and kind != .set) {
|
||||
// markSyntaxFeature object extensions
|
||||
}
|
||||
|
||||
const loc = p.lexer.loc();
|
||||
const scope_index = p.pushScopeForParsePass(.function_args, loc) catch unreachable;
|
||||
var is_constructor = false;
|
||||
|
||||
// Forbid the names "constructor" and "prototype" in some cases
|
||||
if (opts.is_class and !is_computed) {
|
||||
switch (key.data) {
|
||||
.e_string => |str| {
|
||||
if (!opts.is_static and str.eqlComptime("constructor")) {
|
||||
if (kind == .get) {
|
||||
p.log.addRangeError(p.source, key_range, "Class constructor cannot be a getter") catch unreachable;
|
||||
} else if (kind == .set) {
|
||||
p.log.addRangeError(p.source, key_range, "Class constructor cannot be a setter") catch unreachable;
|
||||
} else if (opts.is_async) {
|
||||
p.log.addRangeError(p.source, key_range, "Class constructor cannot be an async function") catch unreachable;
|
||||
} else if (opts.is_generator) {
|
||||
p.log.addRangeError(p.source, key_range, "Class constructor cannot be a generator function") catch unreachable;
|
||||
} else {
|
||||
is_constructor = true;
|
||||
}
|
||||
} else if (opts.is_static and str.eqlComptime("prototype")) {
|
||||
p.log.addRangeError(p.source, key_range, "Invalid static method name \"prototype\"") catch unreachable;
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
|
||||
var func = try p.parseFn(null, FnOrArrowDataParse{
|
||||
.async_range = opts.async_range,
|
||||
.needs_async_loc = key.loc,
|
||||
.has_async_range = !opts.async_range.isEmpty(),
|
||||
.allow_await = if (opts.is_async) AwaitOrYield.allow_expr else AwaitOrYield.allow_ident,
|
||||
.allow_yield = if (opts.is_generator) AwaitOrYield.allow_expr else AwaitOrYield.allow_ident,
|
||||
.allow_super_call = opts.class_has_extends and is_constructor,
|
||||
.allow_super_property = true,
|
||||
.allow_ts_decorators = opts.allow_ts_decorators,
|
||||
.is_constructor = is_constructor,
|
||||
.has_decorators = opts.ts_decorators.len > 0 or (opts.has_class_decorators and is_constructor),
|
||||
|
||||
// Only allow omitting the body if we're parsing TypeScript class
|
||||
.allow_missing_body_for_type_script = is_typescript_enabled and opts.is_class,
|
||||
});
|
||||
|
||||
opts.has_argument_decorators = opts.has_argument_decorators or p.fn_or_arrow_data_parse.has_argument_decorators;
|
||||
p.fn_or_arrow_data_parse.has_argument_decorators = false;
|
||||
|
||||
// "class Foo { foo(): void; foo(): void {} }"
|
||||
if (func.flags.contains(.is_forward_declaration)) {
|
||||
// Skip this property entirely
|
||||
p.popAndDiscardScope(scope_index);
|
||||
return null;
|
||||
}
|
||||
|
||||
p.popScope();
|
||||
func.flags.insert(.is_unique_formal_parameters);
|
||||
const value = p.newExpr(E.Function{ .func = func }, loc);
|
||||
|
||||
// Enforce argument rules for accessors
|
||||
switch (kind) {
|
||||
.get => {
|
||||
if (func.args.len > 0) {
|
||||
const r = js_lexer.rangeOfIdentifier(p.source, func.args[0].binding.loc);
|
||||
p.log.addRangeErrorFmt(p.source, r, p.allocator, "Getter {s} must have zero arguments", .{p.keyNameForError(key)}) catch unreachable;
|
||||
}
|
||||
},
|
||||
.set => {
|
||||
if (func.args.len != 1) {
|
||||
var r = js_lexer.rangeOfIdentifier(p.source, if (func.args.len > 0) func.args[0].binding.loc else loc);
|
||||
if (func.args.len > 1) {
|
||||
r = js_lexer.rangeOfIdentifier(p.source, func.args[1].binding.loc);
|
||||
}
|
||||
p.log.addRangeErrorFmt(p.source, r, p.allocator, "Setter {s} must have exactly 1 argument (there are {d})", .{ p.keyNameForError(key), func.args.len }) catch unreachable;
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
// Special-case private identifiers
|
||||
switch (key.data) {
|
||||
.e_private_identifier => |*private| {
|
||||
const declare: Symbol.Kind = switch (kind) {
|
||||
.get => if (opts.is_static)
|
||||
.private_static_get
|
||||
else
|
||||
.private_get,
|
||||
|
||||
.set => if (opts.is_static)
|
||||
.private_static_set
|
||||
else
|
||||
.private_set,
|
||||
else => if (opts.is_static)
|
||||
.private_static_method
|
||||
else
|
||||
.private_method,
|
||||
};
|
||||
|
||||
const name = p.loadNameFromRef(private.ref);
|
||||
if (strings.eqlComptime(name, "#constructor")) {
|
||||
p.log.addRangeError(p.source, key_range, "Invalid method name \"#constructor\"") catch unreachable;
|
||||
}
|
||||
private.ref = p.declareSymbol(declare, key.loc, name) catch unreachable;
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
return G.Property{
|
||||
.ts_decorators = ExprNodeList.init(opts.ts_decorators),
|
||||
.kind = kind,
|
||||
.flags = Flags.Property.init(.{
|
||||
.is_computed = is_computed,
|
||||
.is_method = true,
|
||||
.is_static = opts.is_static,
|
||||
}),
|
||||
.key = key.*,
|
||||
.value = value,
|
||||
.ts_metadata = .m_function,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn parseProperty(p: *P, kind_: Property.Kind, opts: *PropertyOpts, errors_: ?*DeferredErrors) anyerror!?G.Property {
|
||||
var kind = kind_;
|
||||
var errors = errors_;
|
||||
// This while loop exists to conserve stack space by reducing (but not completely eliminating) recursion.
|
||||
restart: while (true) {
|
||||
var key: Expr = Expr{ .loc = logger.Loc.Empty, .data = .{ .e_missing = E.Missing{} } };
|
||||
const key_range = p.lexer.range();
|
||||
var is_computed = false;
|
||||
|
||||
switch (p.lexer.token) {
|
||||
.t_numeric_literal => {
|
||||
key = p.newExpr(E.Number{
|
||||
.value = p.lexer.number,
|
||||
}, p.lexer.loc());
|
||||
// p.checkForLegacyOctalLiteral()
|
||||
try p.lexer.next();
|
||||
},
|
||||
.t_string_literal => {
|
||||
key = try p.parseStringLiteral();
|
||||
},
|
||||
.t_big_integer_literal => {
|
||||
key = p.newExpr(E.BigInt{ .value = p.lexer.identifier }, p.lexer.loc());
|
||||
// markSyntaxFeature
|
||||
try p.lexer.next();
|
||||
},
|
||||
.t_private_identifier => {
|
||||
if (!opts.is_class or opts.ts_decorators.len > 0) {
|
||||
try p.lexer.expected(.t_identifier);
|
||||
}
|
||||
|
||||
key = p.newExpr(E.PrivateIdentifier{ .ref = p.storeNameInRef(p.lexer.identifier) catch unreachable }, p.lexer.loc());
|
||||
try p.lexer.next();
|
||||
},
|
||||
.t_open_bracket => {
|
||||
is_computed = true;
|
||||
// p.markSyntaxFeature(compat.objectExtensions, p.lexer.range())
|
||||
try p.lexer.next();
|
||||
const wasIdentifier = p.lexer.token == .t_identifier;
|
||||
const expr = try p.parseExpr(.comma);
|
||||
|
||||
if (comptime is_typescript_enabled) {
|
||||
|
||||
// Handle index signatures
|
||||
if (p.lexer.token == .t_colon and wasIdentifier and opts.is_class) {
|
||||
switch (expr.data) {
|
||||
.e_identifier => {
|
||||
try p.lexer.next();
|
||||
try p.skipTypeScriptType(.lowest);
|
||||
try p.lexer.expect(.t_close_bracket);
|
||||
try p.lexer.expect(.t_colon);
|
||||
try p.skipTypeScriptType(.lowest);
|
||||
try p.lexer.expectOrInsertSemicolon();
|
||||
|
||||
// Skip this property entirely
|
||||
return null;
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try p.lexer.expect(.t_close_bracket);
|
||||
key = expr;
|
||||
},
|
||||
.t_asterisk => {
|
||||
if (kind != .normal or opts.is_generator) {
|
||||
try p.lexer.unexpected();
|
||||
return error.SyntaxError;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
opts.is_generator = true;
|
||||
kind = .normal;
|
||||
continue :restart;
|
||||
},
|
||||
|
||||
else => {
|
||||
const name = p.lexer.identifier;
|
||||
const raw = p.lexer.raw();
|
||||
const name_range = p.lexer.range();
|
||||
|
||||
if (!p.lexer.isIdentifierOrKeyword()) {
|
||||
try p.lexer.expect(.t_identifier);
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
|
||||
// Support contextual keywords
|
||||
if (kind == .normal and !opts.is_generator) {
|
||||
// Does the following token look like a key?
|
||||
const couldBeModifierKeyword = p.lexer.isIdentifierOrKeyword() or switch (p.lexer.token) {
|
||||
.t_open_bracket, .t_numeric_literal, .t_string_literal, .t_asterisk, .t_private_identifier => true,
|
||||
else => false,
|
||||
};
|
||||
|
||||
// If so, check for a modifier keyword
|
||||
if (couldBeModifierKeyword) {
|
||||
// TODO: micro-optimization, use a smaller list for non-typescript files.
|
||||
if (js_lexer.PropertyModifierKeyword.List.get(name)) |keyword| {
|
||||
switch (keyword) {
|
||||
.p_get => {
|
||||
if (!opts.is_async and (js_lexer.PropertyModifierKeyword.List.get(raw) orelse .p_static) == .p_get) {
|
||||
kind = .get;
|
||||
errors = null;
|
||||
continue :restart;
|
||||
}
|
||||
},
|
||||
|
||||
.p_set => {
|
||||
if (!opts.is_async and (js_lexer.PropertyModifierKeyword.List.get(raw) orelse .p_static) == .p_set) {
|
||||
// p.markSyntaxFeature(ObjectAccessors, name_range)
|
||||
kind = .set;
|
||||
errors = null;
|
||||
continue :restart;
|
||||
}
|
||||
},
|
||||
.p_async => {
|
||||
if (!opts.is_async and (js_lexer.PropertyModifierKeyword.List.get(raw) orelse .p_static) == .p_async and !p.lexer.has_newline_before) {
|
||||
opts.is_async = true;
|
||||
opts.async_range = name_range;
|
||||
|
||||
// p.markSyntaxFeature(ObjectAccessors, name_range)
|
||||
|
||||
errors = null;
|
||||
continue :restart;
|
||||
}
|
||||
},
|
||||
.p_static => {
|
||||
if (!opts.is_static and !opts.is_async and opts.is_class and (js_lexer.PropertyModifierKeyword.List.get(raw) orelse .p_get) == .p_static) {
|
||||
opts.is_static = true;
|
||||
kind = .normal;
|
||||
errors = null;
|
||||
continue :restart;
|
||||
}
|
||||
},
|
||||
.p_declare => {
|
||||
// skip declare keyword entirely
|
||||
// https://github.com/oven-sh/bun/issues/1907
|
||||
if (opts.is_class and is_typescript_enabled and strings.eqlComptime(raw, "declare")) {
|
||||
const scope_index = p.scopes_in_order.items.len;
|
||||
if (try p.parseProperty(kind, opts, null)) |_prop| {
|
||||
var prop = _prop;
|
||||
if (prop.kind == .normal and prop.value == null and opts.ts_decorators.len > 0) {
|
||||
prop.kind = .declare;
|
||||
return prop;
|
||||
}
|
||||
}
|
||||
|
||||
p.discardScopesUpTo(scope_index);
|
||||
return null;
|
||||
}
|
||||
},
|
||||
.p_abstract => {
|
||||
if (opts.is_class and is_typescript_enabled and !opts.is_ts_abstract and strings.eqlComptime(raw, "abstract")) {
|
||||
opts.is_ts_abstract = true;
|
||||
const scope_index = p.scopes_in_order.items.len;
|
||||
if (try p.parseProperty(kind, opts, null)) |*prop| {
|
||||
if (prop.kind == .normal and prop.value == null and opts.ts_decorators.len > 0) {
|
||||
var prop_ = prop.*;
|
||||
prop_.kind = .abstract;
|
||||
return prop_;
|
||||
}
|
||||
}
|
||||
p.discardScopesUpTo(scope_index);
|
||||
return null;
|
||||
}
|
||||
},
|
||||
.p_private, .p_protected, .p_public, .p_readonly, .p_override => {
|
||||
// Skip over TypeScript keywords
|
||||
if (opts.is_class and is_typescript_enabled and (js_lexer.PropertyModifierKeyword.List.get(raw) orelse .p_static) == keyword) {
|
||||
errors = null;
|
||||
continue :restart;
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
} else if (p.lexer.token == .t_open_brace and strings.eqlComptime(name, "static")) {
|
||||
const loc = p.lexer.loc();
|
||||
try p.lexer.next();
|
||||
|
||||
const old_fn_or_arrow_data_parse = p.fn_or_arrow_data_parse;
|
||||
p.fn_or_arrow_data_parse = .{
|
||||
.is_return_disallowed = true,
|
||||
.allow_super_property = true,
|
||||
.allow_await = .forbid_all,
|
||||
};
|
||||
|
||||
_ = try p.pushScopeForParsePass(.class_static_init, loc);
|
||||
var _parse_opts = ParseStatementOptions{};
|
||||
const stmts = try p.parseStmtsUpTo(.t_close_brace, &_parse_opts);
|
||||
|
||||
p.popScope();
|
||||
|
||||
p.fn_or_arrow_data_parse = old_fn_or_arrow_data_parse;
|
||||
try p.lexer.expect(.t_close_brace);
|
||||
|
||||
const block = p.allocator.create(
|
||||
G.ClassStaticBlock,
|
||||
) catch unreachable;
|
||||
|
||||
block.* = G.ClassStaticBlock{
|
||||
.stmts = js_ast.BabyList(Stmt).init(stmts),
|
||||
.loc = loc,
|
||||
};
|
||||
|
||||
return G.Property{
|
||||
.kind = .class_static_block,
|
||||
.class_static_block = block,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Handle invalid identifiers in property names
|
||||
// https://github.com/oven-sh/bun/issues/12039
|
||||
if (p.lexer.token == .t_syntax_error) {
|
||||
p.log.addRangeErrorFmt(p.source, name_range, p.allocator, "Unexpected {}", .{bun.fmt.quote(name)}) catch bun.outOfMemory();
|
||||
return error.SyntaxError;
|
||||
}
|
||||
|
||||
key = p.newExpr(E.String{ .data = name }, name_range.loc);
|
||||
|
||||
// Parse a shorthand property
|
||||
const isShorthandProperty = !opts.is_class and
|
||||
kind == .normal and
|
||||
p.lexer.token != .t_colon and
|
||||
p.lexer.token != .t_open_paren and
|
||||
p.lexer.token != .t_less_than and
|
||||
!opts.is_generator and
|
||||
!opts.is_async and
|
||||
!js_lexer.Keywords.has(name);
|
||||
|
||||
if (isShorthandProperty) {
|
||||
if ((p.fn_or_arrow_data_parse.allow_await != .allow_ident and
|
||||
strings.eqlComptime(name, "await")) or
|
||||
(p.fn_or_arrow_data_parse.allow_yield != .allow_ident and
|
||||
strings.eqlComptime(name, "yield")))
|
||||
{
|
||||
if (strings.eqlComptime(name, "await")) {
|
||||
p.log.addRangeError(p.source, name_range, "Cannot use \"await\" here") catch unreachable;
|
||||
} else {
|
||||
p.log.addRangeError(p.source, name_range, "Cannot use \"yield\" here") catch unreachable;
|
||||
}
|
||||
}
|
||||
|
||||
const ref = p.storeNameInRef(name) catch unreachable;
|
||||
const value = p.newExpr(E.Identifier{ .ref = ref }, key.loc);
|
||||
|
||||
// Destructuring patterns have an optional default value
|
||||
var initializer: ?Expr = null;
|
||||
if (errors != null and p.lexer.token == .t_equals) {
|
||||
errors.?.invalid_expr_default_value = p.lexer.range();
|
||||
try p.lexer.next();
|
||||
initializer = try p.parseExpr(.comma);
|
||||
}
|
||||
|
||||
return G.Property{
|
||||
.kind = kind,
|
||||
.key = key,
|
||||
.value = value,
|
||||
.initializer = initializer,
|
||||
.flags = Flags.Property.init(.{
|
||||
.was_shorthand = true,
|
||||
}),
|
||||
};
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var has_type_parameters = false;
|
||||
var has_definite_assignment_assertion_operator = false;
|
||||
|
||||
if (comptime is_typescript_enabled) {
|
||||
if (opts.is_class) {
|
||||
if (p.lexer.token == .t_question) {
|
||||
// "class X { foo?: number }"
|
||||
// "class X { foo!: number }"
|
||||
try p.lexer.next();
|
||||
} else if (p.lexer.token == .t_exclamation and
|
||||
!p.lexer.has_newline_before and
|
||||
kind == .normal and
|
||||
!opts.is_async and
|
||||
!opts.is_generator)
|
||||
{
|
||||
// "class X { foo!: number }"
|
||||
try p.lexer.next();
|
||||
has_definite_assignment_assertion_operator = true;
|
||||
}
|
||||
}
|
||||
|
||||
// "class X { foo?<T>(): T }"
|
||||
// "const x = { foo<T>(): T {} }"
|
||||
if (!has_definite_assignment_assertion_operator) {
|
||||
has_type_parameters = try p.skipTypeScriptTypeParameters(.{ .allow_const_modifier = true }) != .did_not_skip_anything;
|
||||
}
|
||||
}
|
||||
|
||||
// Parse a class field with an optional initial value
|
||||
if (opts.is_class and
|
||||
kind == .normal and !opts.is_async and
|
||||
!opts.is_generator and
|
||||
p.lexer.token != .t_open_paren and
|
||||
!has_type_parameters and
|
||||
(p.lexer.token != .t_open_paren or has_definite_assignment_assertion_operator))
|
||||
{
|
||||
var initializer: ?Expr = null;
|
||||
var ts_metadata = TypeScript.Metadata.default;
|
||||
|
||||
// Forbid the names "constructor" and "prototype" in some cases
|
||||
if (!is_computed) {
|
||||
switch (key.data) {
|
||||
.e_string => |str| {
|
||||
if (str.eqlComptime("constructor") or (opts.is_static and str.eqlComptime("prototype"))) {
|
||||
// TODO: fmt error message to include string value.
|
||||
p.log.addRangeError(p.source, key_range, "Invalid field name") catch unreachable;
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
|
||||
if (comptime is_typescript_enabled) {
|
||||
// Skip over types
|
||||
if (p.lexer.token == .t_colon) {
|
||||
try p.lexer.next();
|
||||
if (p.options.features.emit_decorator_metadata and opts.is_class and opts.ts_decorators.len > 0) {
|
||||
ts_metadata = try p.skipTypeScriptTypeWithMetadata(.lowest);
|
||||
} else {
|
||||
try p.skipTypeScriptType(.lowest);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (p.lexer.token == .t_equals) {
|
||||
if (comptime is_typescript_enabled) {
|
||||
if (!opts.declare_range.isEmpty()) {
|
||||
try p.log.addRangeError(p.source, p.lexer.range(), "Class fields that use \"declare\" cannot be initialized");
|
||||
}
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
|
||||
// "this" and "super" property access is allowed in field initializers
|
||||
const old_is_this_disallowed = p.fn_or_arrow_data_parse.is_this_disallowed;
|
||||
const old_allow_super_property = p.fn_or_arrow_data_parse.allow_super_property;
|
||||
p.fn_or_arrow_data_parse.is_this_disallowed = false;
|
||||
p.fn_or_arrow_data_parse.allow_super_property = true;
|
||||
|
||||
initializer = try p.parseExpr(.comma);
|
||||
|
||||
p.fn_or_arrow_data_parse.is_this_disallowed = old_is_this_disallowed;
|
||||
p.fn_or_arrow_data_parse.allow_super_property = old_allow_super_property;
|
||||
}
|
||||
|
||||
// Special-case private identifiers
|
||||
switch (key.data) {
|
||||
.e_private_identifier => |*private| {
|
||||
const name = p.loadNameFromRef(private.ref);
|
||||
if (strings.eqlComptime(name, "#constructor")) {
|
||||
p.log.addRangeError(p.source, key_range, "Invalid field name \"#constructor\"") catch unreachable;
|
||||
}
|
||||
|
||||
const declare: js_ast.Symbol.Kind = if (opts.is_static)
|
||||
.private_static_field
|
||||
else
|
||||
.private_field;
|
||||
|
||||
private.ref = p.declareSymbol(declare, key.loc, name) catch unreachable;
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
try p.lexer.expectOrInsertSemicolon();
|
||||
|
||||
return G.Property{
|
||||
.ts_decorators = ExprNodeList.init(opts.ts_decorators),
|
||||
.kind = kind,
|
||||
.flags = Flags.Property.init(.{
|
||||
.is_computed = is_computed,
|
||||
.is_static = opts.is_static,
|
||||
}),
|
||||
.key = key,
|
||||
.initializer = initializer,
|
||||
.ts_metadata = ts_metadata,
|
||||
};
|
||||
}
|
||||
|
||||
// Parse a method expression
|
||||
if (p.lexer.token == .t_open_paren or kind != .normal or opts.is_class or opts.is_async or opts.is_generator) {
|
||||
return parseMethodExpression(p, kind, opts, is_computed, &key, key_range);
|
||||
}
|
||||
|
||||
// Parse an object key/value pair
|
||||
try p.lexer.expect(.t_colon);
|
||||
var property: G.Property = .{
|
||||
.kind = kind,
|
||||
.flags = Flags.Property.init(.{
|
||||
.is_computed = is_computed,
|
||||
}),
|
||||
.key = key,
|
||||
.value = Expr{ .data = .e_missing, .loc = .{} },
|
||||
};
|
||||
|
||||
try p.parseExprOrBindings(.comma, errors, &property.value.?);
|
||||
return property;
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
const string = []const u8;
|
||||
|
||||
const bun = @import("bun");
|
||||
const logger = bun.logger;
|
||||
const strings = bun.strings;
|
||||
|
||||
const js_ast = bun.ast;
|
||||
const E = js_ast.E;
|
||||
const Expr = js_ast.Expr;
|
||||
const ExprNodeList = js_ast.ExprNodeList;
|
||||
const Flags = js_ast.Flags;
|
||||
const Stmt = js_ast.Stmt;
|
||||
const Symbol = js_ast.Symbol;
|
||||
|
||||
const G = js_ast.G;
|
||||
const Property = G.Property;
|
||||
|
||||
const js_lexer = bun.js_lexer;
|
||||
const T = js_lexer.T;
|
||||
|
||||
const js_parser = bun.js_parser;
|
||||
const AwaitOrYield = js_parser.AwaitOrYield;
|
||||
const DeferredErrors = js_parser.DeferredErrors;
|
||||
const FnOrArrowDataParse = js_parser.FnOrArrowDataParse;
|
||||
const JSXTransformType = js_parser.JSXTransformType;
|
||||
const ParseStatementOptions = js_parser.ParseStatementOptions;
|
||||
const PropertyOpts = js_parser.PropertyOpts;
|
||||
const TypeScript = js_parser.TypeScript;
|
||||
const options = js_parser.options;
|
||||
|
||||
const std = @import("std");
|
||||
const List = std.ArrayListUnmanaged;
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,956 +0,0 @@
|
||||
pub fn ParseSuffix(
|
||||
comptime parser_feature__typescript: bool,
|
||||
comptime parser_feature__jsx: JSXTransformType,
|
||||
comptime parser_feature__scan_only: bool,
|
||||
) type {
|
||||
return struct {
|
||||
const P = js_parser.NewParser_(parser_feature__typescript, parser_feature__jsx, parser_feature__scan_only);
|
||||
const is_typescript_enabled = P.is_typescript_enabled;
|
||||
|
||||
fn handleTypescriptAs(p: *P, level: Level) anyerror!Continuation {
|
||||
if (is_typescript_enabled and level.lt(.compare) and !p.lexer.has_newline_before and (p.lexer.isContextualKeyword("as") or p.lexer.isContextualKeyword("satisfies"))) {
|
||||
try p.lexer.next();
|
||||
try p.skipTypeScriptType(.lowest);
|
||||
|
||||
// These tokens are not allowed to follow a cast expression. This isn't
|
||||
// an outright error because it may be on a new line, in which case it's
|
||||
// the start of a new expression when it's after a cast:
|
||||
//
|
||||
// x = y as z
|
||||
// (something);
|
||||
//
|
||||
switch (p.lexer.token) {
|
||||
.t_plus_plus,
|
||||
.t_minus_minus,
|
||||
.t_no_substitution_template_literal,
|
||||
.t_template_head,
|
||||
.t_open_paren,
|
||||
.t_open_bracket,
|
||||
.t_question_dot,
|
||||
=> {
|
||||
p.forbid_suffix_after_as_loc = p.lexer.loc();
|
||||
return .done;
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
if (p.lexer.token.isAssign()) {
|
||||
p.forbid_suffix_after_as_loc = p.lexer.loc();
|
||||
return .done;
|
||||
}
|
||||
return .next;
|
||||
}
|
||||
return .done;
|
||||
}
|
||||
|
||||
fn t_dot(p: *P, optional_chain: *?OptionalChain, old_optional_chain: ?OptionalChain, left: *Expr) anyerror!Continuation {
|
||||
try p.lexer.next();
|
||||
const target = left.*;
|
||||
|
||||
if (p.lexer.token == .t_private_identifier and p.allow_private_identifiers) {
|
||||
// "a.#b"
|
||||
// "a?.b.#c"
|
||||
switch (left.data) {
|
||||
.e_super => {
|
||||
try p.lexer.expected(.t_identifier);
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
const name = p.lexer.identifier;
|
||||
const name_loc = p.lexer.loc();
|
||||
try p.lexer.next();
|
||||
const ref = p.storeNameInRef(name) catch unreachable;
|
||||
left.* = p.newExpr(E.Index{
|
||||
.target = target,
|
||||
.index = p.newExpr(
|
||||
E.PrivateIdentifier{
|
||||
.ref = ref,
|
||||
},
|
||||
name_loc,
|
||||
),
|
||||
.optional_chain = old_optional_chain,
|
||||
}, left.loc);
|
||||
} else {
|
||||
// "a.b"
|
||||
// "a?.b.c"
|
||||
if (!p.lexer.isIdentifierOrKeyword()) {
|
||||
try p.lexer.expect(.t_identifier);
|
||||
}
|
||||
|
||||
const name = p.lexer.identifier;
|
||||
const name_loc = p.lexer.loc();
|
||||
try p.lexer.next();
|
||||
|
||||
left.* = p.newExpr(
|
||||
E.Dot{
|
||||
.target = target,
|
||||
.name = name,
|
||||
.name_loc = name_loc,
|
||||
.optional_chain = old_optional_chain,
|
||||
},
|
||||
left.loc,
|
||||
);
|
||||
}
|
||||
optional_chain.* = old_optional_chain;
|
||||
return .next;
|
||||
}
|
||||
fn t_question_dot(p: *P, level: Level, optional_chain: *?OptionalChain, left: *Expr) anyerror!Continuation {
|
||||
try p.lexer.next();
|
||||
var optional_start: ?OptionalChain = OptionalChain.start;
|
||||
|
||||
// Remove unnecessary optional chains
|
||||
if (p.options.features.minify_syntax) {
|
||||
const result = SideEffects.toNullOrUndefined(p, left.data);
|
||||
if (result.ok and !result.value) {
|
||||
optional_start = null;
|
||||
}
|
||||
}
|
||||
|
||||
switch (p.lexer.token) {
|
||||
.t_open_bracket => {
|
||||
// "a?.[b]"
|
||||
try p.lexer.next();
|
||||
|
||||
// allow "in" inside the brackets;
|
||||
const old_allow_in = p.allow_in;
|
||||
p.allow_in = true;
|
||||
|
||||
const index = try p.parseExpr(.lowest);
|
||||
|
||||
p.allow_in = old_allow_in;
|
||||
|
||||
try p.lexer.expect(.t_close_bracket);
|
||||
left.* = p.newExpr(
|
||||
E.Index{ .target = left.*, .index = index, .optional_chain = optional_start },
|
||||
left.loc,
|
||||
);
|
||||
},
|
||||
|
||||
.t_open_paren => {
|
||||
// "a?.()"
|
||||
if (level.gte(.call)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
const list_loc = try p.parseCallArgs();
|
||||
left.* = p.newExpr(E.Call{
|
||||
.target = left.*,
|
||||
.args = list_loc.list,
|
||||
.close_paren_loc = list_loc.loc,
|
||||
.optional_chain = optional_start,
|
||||
}, left.loc);
|
||||
},
|
||||
.t_less_than, .t_less_than_less_than => {
|
||||
// "a?.<T>()"
|
||||
if (comptime !is_typescript_enabled) {
|
||||
try p.lexer.expected(.t_identifier);
|
||||
return error.SyntaxError;
|
||||
}
|
||||
|
||||
_ = try p.skipTypeScriptTypeArguments(false);
|
||||
if (p.lexer.token != .t_open_paren) {
|
||||
try p.lexer.expected(.t_open_paren);
|
||||
}
|
||||
|
||||
if (level.gte(.call)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
const list_loc = try p.parseCallArgs();
|
||||
left.* = p.newExpr(E.Call{
|
||||
.target = left.*,
|
||||
.args = list_loc.list,
|
||||
.close_paren_loc = list_loc.loc,
|
||||
.optional_chain = optional_start,
|
||||
}, left.loc);
|
||||
},
|
||||
else => {
|
||||
if (p.lexer.token == .t_private_identifier and p.allow_private_identifiers) {
|
||||
// "a?.#b"
|
||||
const name = p.lexer.identifier;
|
||||
const name_loc = p.lexer.loc();
|
||||
try p.lexer.next();
|
||||
const ref = p.storeNameInRef(name) catch unreachable;
|
||||
left.* = p.newExpr(E.Index{
|
||||
.target = left.*,
|
||||
.index = p.newExpr(
|
||||
E.PrivateIdentifier{
|
||||
.ref = ref,
|
||||
},
|
||||
name_loc,
|
||||
),
|
||||
.optional_chain = optional_start,
|
||||
}, left.loc);
|
||||
} else {
|
||||
// "a?.b"
|
||||
if (!p.lexer.isIdentifierOrKeyword()) {
|
||||
try p.lexer.expect(.t_identifier);
|
||||
}
|
||||
const name = p.lexer.identifier;
|
||||
const name_loc = p.lexer.loc();
|
||||
try p.lexer.next();
|
||||
|
||||
left.* = p.newExpr(E.Dot{
|
||||
.target = left.*,
|
||||
.name = name,
|
||||
.name_loc = name_loc,
|
||||
.optional_chain = optional_start,
|
||||
}, left.loc);
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// Only continue if we have started
|
||||
if ((optional_start orelse .continuation) == .start) {
|
||||
optional_chain.* = .continuation;
|
||||
}
|
||||
|
||||
return .next;
|
||||
}
|
||||
fn t_no_substitution_template_literal(p: *P, _: Level, _: *?OptionalChain, old_optional_chain: ?OptionalChain, left: *Expr) anyerror!Continuation {
|
||||
if (old_optional_chain != null) {
|
||||
p.log.addRangeError(p.source, p.lexer.range(), "Template literals cannot have an optional chain as a tag") catch unreachable;
|
||||
}
|
||||
// p.markSyntaxFeature(compat.TemplateLiteral, p.lexer.Range());
|
||||
const head = p.lexer.rawTemplateContents();
|
||||
try p.lexer.next();
|
||||
|
||||
left.* = p.newExpr(E.Template{
|
||||
.tag = left.*,
|
||||
.head = .{ .raw = head },
|
||||
}, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_template_head(p: *P, _: Level, _: *?OptionalChain, old_optional_chain: ?OptionalChain, left: *Expr) anyerror!Continuation {
|
||||
if (old_optional_chain != null) {
|
||||
p.log.addRangeError(p.source, p.lexer.range(), "Template literals cannot have an optional chain as a tag") catch unreachable;
|
||||
}
|
||||
// p.markSyntaxFeature(compat.TemplateLiteral, p.lexer.Range());
|
||||
const head = p.lexer.rawTemplateContents();
|
||||
const partsGroup = try p.parseTemplateParts(true);
|
||||
const tag = left.*;
|
||||
left.* = p.newExpr(E.Template{
|
||||
.tag = tag,
|
||||
.head = .{ .raw = head },
|
||||
.parts = partsGroup,
|
||||
}, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_open_bracket(p: *P, optional_chain: *?OptionalChain, old_optional_chain: ?OptionalChain, left: *Expr, flags: Expr.EFlags) anyerror!Continuation {
|
||||
// When parsing a decorator, ignore EIndex expressions since they may be
|
||||
// part of a computed property:
|
||||
//
|
||||
// class Foo {
|
||||
// @foo ['computed']() {}
|
||||
// }
|
||||
//
|
||||
// This matches the behavior of the TypeScript compiler.
|
||||
if (flags == .ts_decorator) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
|
||||
// Allow "in" inside the brackets
|
||||
const old_allow_in = p.allow_in;
|
||||
p.allow_in = true;
|
||||
|
||||
const index = try p.parseExpr(.lowest);
|
||||
|
||||
p.allow_in = old_allow_in;
|
||||
|
||||
try p.lexer.expect(.t_close_bracket);
|
||||
|
||||
left.* = p.newExpr(E.Index{
|
||||
.target = left.*,
|
||||
.index = index,
|
||||
.optional_chain = old_optional_chain,
|
||||
}, left.loc);
|
||||
optional_chain.* = old_optional_chain;
|
||||
return .next;
|
||||
}
|
||||
fn t_open_paren(p: *P, level: Level, optional_chain: *?OptionalChain, old_optional_chain: ?OptionalChain, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.call)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
const list_loc = try p.parseCallArgs();
|
||||
left.* = p.newExpr(
|
||||
E.Call{
|
||||
.target = left.*,
|
||||
.args = list_loc.list,
|
||||
.close_paren_loc = list_loc.loc,
|
||||
.optional_chain = old_optional_chain,
|
||||
},
|
||||
left.loc,
|
||||
);
|
||||
optional_chain.* = old_optional_chain;
|
||||
return .next;
|
||||
}
|
||||
fn t_question(p: *P, level: Level, noalias errors: ?*DeferredErrors, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.conditional)) {
|
||||
return .done;
|
||||
}
|
||||
try p.lexer.next();
|
||||
|
||||
// Stop now if we're parsing one of these:
|
||||
// "(a?) => {}"
|
||||
// "(a?: b) => {}"
|
||||
// "(a?, b?) => {}"
|
||||
if (is_typescript_enabled and left.loc.start == p.latest_arrow_arg_loc.start and (p.lexer.token == .t_colon or
|
||||
p.lexer.token == .t_close_paren or p.lexer.token == .t_comma))
|
||||
{
|
||||
if (errors == null) {
|
||||
try p.lexer.unexpected();
|
||||
return error.SyntaxError;
|
||||
}
|
||||
errors.?.invalid_expr_after_question = p.lexer.range();
|
||||
return .done;
|
||||
}
|
||||
|
||||
const ternary = p.newExpr(E.If{
|
||||
.test_ = left.*,
|
||||
.yes = undefined,
|
||||
.no = undefined,
|
||||
}, left.loc);
|
||||
|
||||
// Allow "in" in between "?" and ":"
|
||||
const old_allow_in = p.allow_in;
|
||||
p.allow_in = true;
|
||||
|
||||
// condition ? yes : no
|
||||
// ^
|
||||
try p.parseExprWithFlags(.comma, .none, &ternary.data.e_if.yes);
|
||||
|
||||
p.allow_in = old_allow_in;
|
||||
|
||||
// condition ? yes : no
|
||||
// ^
|
||||
try p.lexer.expect(.t_colon);
|
||||
|
||||
// condition ? yes : no
|
||||
// ^
|
||||
try p.parseExprWithFlags(.comma, .none, &ternary.data.e_if.no);
|
||||
|
||||
// condition ? yes : no
|
||||
// ^
|
||||
|
||||
left.* = ternary;
|
||||
return .next;
|
||||
}
|
||||
fn t_exclamation(p: *P, optional_chain: *?OptionalChain, old_optional_chain: ?OptionalChain) anyerror!Continuation {
|
||||
// Skip over TypeScript non-null assertions
|
||||
if (p.lexer.has_newline_before) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
if (!is_typescript_enabled) {
|
||||
try p.lexer.unexpected();
|
||||
return error.SyntaxError;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
optional_chain.* = old_optional_chain;
|
||||
|
||||
return .next;
|
||||
}
|
||||
fn t_minus_minus(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (p.lexer.has_newline_before or level.gte(.postfix)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Unary{ .op = .un_post_dec, .value = left.* }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_plus_plus(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (p.lexer.has_newline_before or level.gte(.postfix)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Unary{ .op = .un_post_inc, .value = left.* }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_comma(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.comma)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_comma, .left = left.*, .right = try p.parseExpr(.comma) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_plus(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.add)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_add, .left = left.*, .right = try p.parseExpr(.add) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_plus_equals(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.assign)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_add_assign, .left = left.*, .right = try p.parseExpr(@as(Op.Level, @enumFromInt(@intFromEnum(Op.Level.assign) - 1))) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_minus(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.add)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_sub, .left = left.*, .right = try p.parseExpr(.add) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_minus_equals(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.assign)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_sub_assign, .left = left.*, .right = try p.parseExpr(Op.Level.sub(Op.Level.assign, 1)) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_asterisk(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.multiply)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_mul, .left = left.*, .right = try p.parseExpr(.multiply) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_asterisk_asterisk(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.exponentiation)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_pow, .left = left.*, .right = try p.parseExpr(Op.Level.exponentiation.sub(1)) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_asterisk_asterisk_equals(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.assign)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_pow_assign, .left = left.*, .right = try p.parseExpr(Op.Level.assign.sub(1)) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_asterisk_equals(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.assign)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_mul_assign, .left = left.*, .right = try p.parseExpr(Op.Level.assign.sub(1)) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_percent(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.multiply)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_rem, .left = left.*, .right = try p.parseExpr(Op.Level.multiply) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_percent_equals(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.assign)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_rem_assign, .left = left.*, .right = try p.parseExpr(Level.assign.sub(1)) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_slash(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.multiply)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_div, .left = left.*, .right = try p.parseExpr(Level.multiply) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_slash_equals(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.assign)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_div_assign, .left = left.*, .right = try p.parseExpr(Level.assign.sub(1)) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_equals_equals(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.equals)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_loose_eq, .left = left.*, .right = try p.parseExpr(Level.equals) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_exclamation_equals(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.equals)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_loose_ne, .left = left.*, .right = try p.parseExpr(Level.equals) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_equals_equals_equals(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.equals)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_strict_eq, .left = left.*, .right = try p.parseExpr(Level.equals) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_exclamation_equals_equals(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.equals)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_strict_ne, .left = left.*, .right = try p.parseExpr(Level.equals) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_less_than(p: *P, level: Level, optional_chain: *?OptionalChain, old_optional_chain: ?OptionalChain, left: *Expr) anyerror!Continuation {
|
||||
// TypeScript allows type arguments to be specified with angle brackets
|
||||
// inside an expression. Unlike in other languages, this unfortunately
|
||||
// appears to require backtracking to parse.
|
||||
if (is_typescript_enabled and p.trySkipTypeScriptTypeArgumentsWithBacktracking()) {
|
||||
optional_chain.* = old_optional_chain;
|
||||
return .next;
|
||||
}
|
||||
|
||||
if (level.gte(.compare)) {
|
||||
return .done;
|
||||
}
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_lt, .left = left.*, .right = try p.parseExpr(.compare) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_less_than_equals(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.compare)) {
|
||||
return .done;
|
||||
}
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_le, .left = left.*, .right = try p.parseExpr(.compare) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_greater_than(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.compare)) {
|
||||
return .done;
|
||||
}
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_gt, .left = left.*, .right = try p.parseExpr(.compare) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_greater_than_equals(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.compare)) {
|
||||
return .done;
|
||||
}
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_ge, .left = left.*, .right = try p.parseExpr(.compare) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_less_than_less_than(p: *P, level: Level, optional_chain: *?OptionalChain, old_optional_chain: ?OptionalChain, left: *Expr) anyerror!Continuation {
|
||||
// TypeScript allows type arguments to be specified with angle brackets
|
||||
// inside an expression. Unlike in other languages, this unfortunately
|
||||
// appears to require backtracking to parse.
|
||||
if (is_typescript_enabled and p.trySkipTypeScriptTypeArgumentsWithBacktracking()) {
|
||||
optional_chain.* = old_optional_chain;
|
||||
return .next;
|
||||
}
|
||||
|
||||
if (level.gte(.shift)) {
|
||||
return .done;
|
||||
}
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_shl, .left = left.*, .right = try p.parseExpr(.shift) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_less_than_less_than_equals(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.assign)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_shl_assign, .left = left.*, .right = try p.parseExpr(Level.assign.sub(1)) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_greater_than_greater_than(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.shift)) {
|
||||
return .done;
|
||||
}
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_shr, .left = left.*, .right = try p.parseExpr(.shift) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_greater_than_greater_than_equals(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.assign)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_shr_assign, .left = left.*, .right = try p.parseExpr(Level.assign.sub(1)) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_greater_than_greater_than_greater_than(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.shift)) {
|
||||
return .done;
|
||||
}
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_u_shr, .left = left.*, .right = try p.parseExpr(.shift) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_greater_than_greater_than_greater_than_equals(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.assign)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_u_shr_assign, .left = left.*, .right = try p.parseExpr(Level.assign.sub(1)) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_question_question(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.nullish_coalescing)) {
|
||||
return .done;
|
||||
}
|
||||
try p.lexer.next();
|
||||
const prev = left.*;
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_nullish_coalescing, .left = prev, .right = try p.parseExpr(.nullish_coalescing) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_question_question_equals(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.assign)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_nullish_coalescing_assign, .left = left.*, .right = try p.parseExpr(Level.assign.sub(1)) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_bar_bar(p: *P, level: Level, left: *Expr, flags: Expr.EFlags) anyerror!Continuation {
|
||||
if (level.gte(.logical_or)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
// Prevent "||" inside "??" from the right
|
||||
if (level.eql(.nullish_coalescing)) {
|
||||
try p.lexer.unexpected();
|
||||
return error.SyntaxError;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
const right = try p.parseExpr(.logical_or);
|
||||
left.* = p.newExpr(E.Binary{ .op = Op.Code.bin_logical_or, .left = left.*, .right = right }, left.loc);
|
||||
|
||||
if (level.lt(.nullish_coalescing)) {
|
||||
try p.parseSuffix(left, Level.nullish_coalescing.addF(1), null, flags);
|
||||
|
||||
if (p.lexer.token == .t_question_question) {
|
||||
try p.lexer.unexpected();
|
||||
return error.SyntaxError;
|
||||
}
|
||||
}
|
||||
return .next;
|
||||
}
|
||||
fn t_bar_bar_equals(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.assign)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_logical_or_assign, .left = left.*, .right = try p.parseExpr(Level.assign.sub(1)) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_ampersand_ampersand(p: *P, level: Level, left: *Expr, flags: Expr.EFlags) anyerror!Continuation {
|
||||
if (level.gte(.logical_and)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
// Prevent "&&" inside "??" from the right
|
||||
if (level.eql(.nullish_coalescing)) {
|
||||
try p.lexer.unexpected();
|
||||
return error.SyntaxError;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_logical_and, .left = left.*, .right = try p.parseExpr(.logical_and) }, left.loc);
|
||||
|
||||
// Prevent "&&" inside "??" from the left
|
||||
if (level.lt(.nullish_coalescing)) {
|
||||
try p.parseSuffix(left, Level.nullish_coalescing.addF(1), null, flags);
|
||||
|
||||
if (p.lexer.token == .t_question_question) {
|
||||
try p.lexer.unexpected();
|
||||
return error.SyntaxError;
|
||||
}
|
||||
}
|
||||
return .next;
|
||||
}
|
||||
fn t_ampersand_ampersand_equals(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.assign)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_logical_and_assign, .left = left.*, .right = try p.parseExpr(Level.assign.sub(1)) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_bar(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.bitwise_or)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_bitwise_or, .left = left.*, .right = try p.parseExpr(.bitwise_or) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_bar_equals(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.assign)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_bitwise_or_assign, .left = left.*, .right = try p.parseExpr(Level.assign.sub(1)) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_ampersand(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.bitwise_and)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_bitwise_and, .left = left.*, .right = try p.parseExpr(.bitwise_and) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_ampersand_equals(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.assign)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_bitwise_and_assign, .left = left.*, .right = try p.parseExpr(Level.assign.sub(1)) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_caret(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.bitwise_xor)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_bitwise_xor, .left = left.*, .right = try p.parseExpr(.bitwise_xor) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_caret_equals(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.assign)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_bitwise_xor_assign, .left = left.*, .right = try p.parseExpr(Level.assign.sub(1)) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_equals(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.assign)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_assign, .left = left.*, .right = try p.parseExpr(Level.assign.sub(1)) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_in(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.compare) or !p.allow_in) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
// Warn about "!a in b" instead of "!(a in b)"
|
||||
switch (left.data) {
|
||||
.e_unary => |unary| {
|
||||
if (unary.op == .un_not) {
|
||||
// TODO:
|
||||
// p.log.addRangeWarning(source: ?Source, r: Range, text: string)
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_in, .left = left.*, .right = try p.parseExpr(.compare) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
fn t_instanceof(p: *P, level: Level, left: *Expr) anyerror!Continuation {
|
||||
if (level.gte(.compare)) {
|
||||
return .done;
|
||||
}
|
||||
|
||||
// Warn about "!a instanceof b" instead of "!(a instanceof b)". Here's an
|
||||
// example of code with this problem: https://github.com/mrdoob/three.js/pull/11182.
|
||||
if (!p.options.suppress_warnings_about_weird_code) {
|
||||
switch (left.data) {
|
||||
.e_unary => |unary| {
|
||||
if (unary.op == .un_not) {
|
||||
// TODO:
|
||||
// p.log.addRangeWarning(source: ?Source, r: Range, text: string)
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{ .op = .bin_instanceof, .left = left.*, .right = try p.parseExpr(.compare) }, left.loc);
|
||||
return .next;
|
||||
}
|
||||
|
||||
pub fn parseSuffix(p: *P, left_and_out: *Expr, level: Level, noalias errors: ?*DeferredErrors, flags: Expr.EFlags) anyerror!void {
|
||||
var left_value = left_and_out.*;
|
||||
// Zig has a bug where it creates a new address to stack locals each & usage.
|
||||
const left = &left_value;
|
||||
|
||||
var optional_chain_: ?OptionalChain = null;
|
||||
const optional_chain = &optional_chain_;
|
||||
while (true) {
|
||||
if (p.lexer.loc().start == p.after_arrow_body_loc.start) {
|
||||
while (true) {
|
||||
switch (p.lexer.token) {
|
||||
.t_comma => {
|
||||
if (level.gte(.comma)) {
|
||||
break;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
left.* = p.newExpr(E.Binary{
|
||||
.op = .bin_comma,
|
||||
.left = left.*,
|
||||
.right = try p.parseExpr(.comma),
|
||||
}, left.loc);
|
||||
},
|
||||
else => {
|
||||
break;
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (comptime is_typescript_enabled) {
|
||||
// Stop now if this token is forbidden to follow a TypeScript "as" cast
|
||||
if (p.forbid_suffix_after_as_loc.start > -1 and p.lexer.loc().start == p.forbid_suffix_after_as_loc.start) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Reset the optional chain flag by default. That way we won't accidentally
|
||||
// treat "c.d" as OptionalChainContinue in "a?.b + c.d".
|
||||
const old_optional_chain = optional_chain.*;
|
||||
optional_chain.* = null;
|
||||
|
||||
// Each of these tokens are split into a function to conserve
|
||||
// stack space. Currently in Zig, the compiler does not reuse
|
||||
// stack space between scopes This means that having a large
|
||||
// function with many scopes and local variables consumes
|
||||
// enormous amounts of stack space.
|
||||
const continuation = switch (p.lexer.token) {
|
||||
inline .t_ampersand,
|
||||
.t_ampersand_ampersand_equals,
|
||||
.t_ampersand_equals,
|
||||
.t_asterisk,
|
||||
.t_asterisk_asterisk,
|
||||
.t_asterisk_asterisk_equals,
|
||||
.t_asterisk_equals,
|
||||
.t_bar,
|
||||
.t_bar_bar_equals,
|
||||
.t_bar_equals,
|
||||
.t_caret,
|
||||
.t_caret_equals,
|
||||
.t_comma,
|
||||
.t_equals,
|
||||
.t_equals_equals,
|
||||
.t_equals_equals_equals,
|
||||
.t_exclamation_equals,
|
||||
.t_exclamation_equals_equals,
|
||||
.t_greater_than,
|
||||
.t_greater_than_equals,
|
||||
.t_greater_than_greater_than,
|
||||
.t_greater_than_greater_than_equals,
|
||||
.t_greater_than_greater_than_greater_than,
|
||||
.t_greater_than_greater_than_greater_than_equals,
|
||||
.t_in,
|
||||
.t_instanceof,
|
||||
.t_less_than_equals,
|
||||
.t_less_than_less_than_equals,
|
||||
.t_minus,
|
||||
.t_minus_equals,
|
||||
.t_minus_minus,
|
||||
.t_percent,
|
||||
.t_percent_equals,
|
||||
.t_plus,
|
||||
.t_plus_equals,
|
||||
.t_plus_plus,
|
||||
.t_question_question,
|
||||
.t_question_question_equals,
|
||||
.t_slash,
|
||||
.t_slash_equals,
|
||||
=> |tag| @field(@This(), @tagName(tag))(p, level, left),
|
||||
.t_exclamation => t_exclamation(p, optional_chain, old_optional_chain),
|
||||
.t_bar_bar => t_bar_bar(p, level, left, flags),
|
||||
.t_ampersand_ampersand => t_ampersand_ampersand(p, level, left, flags),
|
||||
.t_question => t_question(p, level, errors, left),
|
||||
.t_question_dot => t_question_dot(p, level, optional_chain, left),
|
||||
.t_template_head => t_template_head(p, level, optional_chain, old_optional_chain, left),
|
||||
.t_less_than => t_less_than(p, level, optional_chain, old_optional_chain, left),
|
||||
.t_open_paren => t_open_paren(p, level, optional_chain, old_optional_chain, left),
|
||||
.t_no_substitution_template_literal => t_no_substitution_template_literal(p, level, optional_chain, old_optional_chain, left),
|
||||
.t_open_bracket => t_open_bracket(p, optional_chain, old_optional_chain, left, flags),
|
||||
.t_dot => t_dot(p, optional_chain, old_optional_chain, left),
|
||||
.t_less_than_less_than => t_less_than_less_than(p, level, optional_chain, old_optional_chain, left),
|
||||
else => handleTypescriptAs(p, level),
|
||||
};
|
||||
|
||||
switch (try continuation) {
|
||||
.next => {},
|
||||
.done => break,
|
||||
}
|
||||
}
|
||||
|
||||
left_and_out.* = left_value;
|
||||
}
|
||||
};
|
||||
}
|
||||
const Continuation = enum { next, done };
|
||||
const string = []const u8;
|
||||
|
||||
const bun = @import("bun");
|
||||
|
||||
const js_ast = bun.ast;
|
||||
const E = js_ast.E;
|
||||
const Expr = js_ast.Expr;
|
||||
const OptionalChain = js_ast.OptionalChain;
|
||||
|
||||
const Op = js_ast.Op;
|
||||
const Level = js_ast.Op.Level;
|
||||
|
||||
const js_lexer = bun.js_lexer;
|
||||
const T = js_lexer.T;
|
||||
|
||||
const js_parser = bun.js_parser;
|
||||
const DeferredErrors = js_parser.DeferredErrors;
|
||||
const JSXTransformType = js_parser.JSXTransformType;
|
||||
const SideEffects = js_parser.SideEffects;
|
||||
const TypeScript = js_parser.TypeScript;
|
||||
const options = js_parser.options;
|
||||
@@ -1,460 +0,0 @@
|
||||
pub fn ParseTypescript(
|
||||
comptime parser_feature__typescript: bool,
|
||||
comptime parser_feature__jsx: JSXTransformType,
|
||||
comptime parser_feature__scan_only: bool,
|
||||
) type {
|
||||
return struct {
|
||||
const P = js_parser.NewParser_(parser_feature__typescript, parser_feature__jsx, parser_feature__scan_only);
|
||||
const is_typescript_enabled = P.is_typescript_enabled;
|
||||
|
||||
pub fn parseTypeScriptDecorators(p: *P) ![]ExprNodeIndex {
|
||||
if (!is_typescript_enabled) {
|
||||
return &([_]ExprNodeIndex{});
|
||||
}
|
||||
|
||||
var decorators = ListManaged(ExprNodeIndex).init(p.allocator);
|
||||
while (p.lexer.token == T.t_at) {
|
||||
try p.lexer.next();
|
||||
|
||||
// Parse a new/call expression with "exprFlagTSDecorator" so we ignore
|
||||
// EIndex expressions, since they may be part of a computed property:
|
||||
//
|
||||
// class Foo {
|
||||
// @foo ['computed']() {}
|
||||
// }
|
||||
//
|
||||
// This matches the behavior of the TypeScript compiler.
|
||||
try decorators.ensureUnusedCapacity(1);
|
||||
try p.parseExprWithFlags(.new, Expr.EFlags.ts_decorator, &decorators.unusedCapacitySlice()[0]);
|
||||
decorators.items.len += 1;
|
||||
}
|
||||
|
||||
return decorators.items;
|
||||
}
|
||||
|
||||
pub fn parseTypeScriptNamespaceStmt(p: *P, loc: logger.Loc, opts: *ParseStatementOptions) anyerror!Stmt {
|
||||
// "namespace foo {}";
|
||||
const name_loc = p.lexer.loc();
|
||||
const name_text = p.lexer.identifier;
|
||||
try p.lexer.next();
|
||||
|
||||
// Generate the namespace object
|
||||
const ts_namespace = p.getOrCreateExportedNamespaceMembers(name_text, opts.is_export, false);
|
||||
const exported_members = ts_namespace.exported_members;
|
||||
const ns_member_data = js_ast.TSNamespaceMember.Data{ .namespace = exported_members };
|
||||
|
||||
// Declare the namespace and create the scope
|
||||
var name = LocRef{ .loc = name_loc, .ref = null };
|
||||
const scope_index = try p.pushScopeForParsePass(.entry, loc);
|
||||
p.current_scope.ts_namespace = ts_namespace;
|
||||
|
||||
const old_has_non_local_export_declare_inside_namespace = p.has_non_local_export_declare_inside_namespace;
|
||||
p.has_non_local_export_declare_inside_namespace = false;
|
||||
|
||||
// Parse the statements inside the namespace
|
||||
var stmts: ListManaged(Stmt) = ListManaged(Stmt).init(p.allocator);
|
||||
if (p.lexer.token == .t_dot) {
|
||||
const dot_loc = p.lexer.loc();
|
||||
try p.lexer.next();
|
||||
|
||||
var _opts = ParseStatementOptions{
|
||||
.is_export = true,
|
||||
.is_namespace_scope = true,
|
||||
.is_typescript_declare = opts.is_typescript_declare,
|
||||
};
|
||||
stmts.append(try p.parseTypeScriptNamespaceStmt(dot_loc, &_opts)) catch unreachable;
|
||||
} else if (opts.is_typescript_declare and p.lexer.token != .t_open_brace) {
|
||||
try p.lexer.expectOrInsertSemicolon();
|
||||
} else {
|
||||
try p.lexer.expect(.t_open_brace);
|
||||
var _opts = ParseStatementOptions{
|
||||
.is_namespace_scope = true,
|
||||
.is_typescript_declare = opts.is_typescript_declare,
|
||||
};
|
||||
stmts = ListManaged(Stmt).fromOwnedSlice(p.allocator, try p.parseStmtsUpTo(.t_close_brace, &_opts));
|
||||
try p.lexer.next();
|
||||
}
|
||||
const has_non_local_export_declare_inside_namespace = p.has_non_local_export_declare_inside_namespace;
|
||||
p.has_non_local_export_declare_inside_namespace = old_has_non_local_export_declare_inside_namespace;
|
||||
|
||||
// Add any exported members from this namespace's body as members of the
|
||||
// associated namespace object.
|
||||
for (stmts.items) |stmt| {
|
||||
switch (stmt.data) {
|
||||
.s_function => |func| {
|
||||
if (func.func.flags.contains(.is_export)) {
|
||||
const locref = func.func.name.?;
|
||||
const fn_name = p.symbols.items[locref.ref.?.inner_index].original_name;
|
||||
try exported_members.put(p.allocator, fn_name, .{
|
||||
.loc = locref.loc,
|
||||
.data = .property,
|
||||
});
|
||||
try p.ref_to_ts_namespace_member.put(
|
||||
p.allocator,
|
||||
locref.ref.?,
|
||||
.property,
|
||||
);
|
||||
}
|
||||
},
|
||||
.s_class => |class| {
|
||||
if (class.is_export) {
|
||||
const locref = class.class.class_name.?;
|
||||
const class_name = p.symbols.items[locref.ref.?.inner_index].original_name;
|
||||
try exported_members.put(p.allocator, class_name, .{
|
||||
.loc = locref.loc,
|
||||
.data = .property,
|
||||
});
|
||||
try p.ref_to_ts_namespace_member.put(
|
||||
p.allocator,
|
||||
locref.ref.?,
|
||||
.property,
|
||||
);
|
||||
}
|
||||
},
|
||||
inline .s_namespace, .s_enum => |ns| {
|
||||
if (ns.is_export) {
|
||||
if (p.ref_to_ts_namespace_member.get(ns.name.ref.?)) |member_data| {
|
||||
try exported_members.put(
|
||||
p.allocator,
|
||||
p.symbols.items[ns.name.ref.?.inner_index].original_name,
|
||||
.{
|
||||
.data = member_data,
|
||||
.loc = ns.name.loc,
|
||||
},
|
||||
);
|
||||
try p.ref_to_ts_namespace_member.put(
|
||||
p.allocator,
|
||||
ns.name.ref.?,
|
||||
member_data,
|
||||
);
|
||||
}
|
||||
}
|
||||
},
|
||||
.s_local => |local| {
|
||||
if (local.is_export) {
|
||||
for (local.decls.slice()) |decl| {
|
||||
try p.defineExportedNamespaceBinding(
|
||||
exported_members,
|
||||
decl.binding,
|
||||
);
|
||||
}
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
|
||||
// Import assignments may be only used in type expressions, not value
|
||||
// expressions. If this is the case, the TypeScript compiler removes
|
||||
// them entirely from the output. That can cause the namespace itself
|
||||
// to be considered empty and thus be removed.
|
||||
var import_equal_count: usize = 0;
|
||||
for (stmts.items) |stmt| {
|
||||
switch (stmt.data) {
|
||||
.s_local => |local| {
|
||||
if (local.was_ts_import_equals and !local.is_export) {
|
||||
import_equal_count += 1;
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
|
||||
// TypeScript omits namespaces without values. These namespaces
|
||||
// are only allowed to be used in type expressions. They are
|
||||
// allowed to be exported, but can also only be used in type
|
||||
// expressions when imported. So we shouldn't count them as a
|
||||
// real export either.
|
||||
//
|
||||
// TypeScript also strangely counts namespaces containing only
|
||||
// "export declare" statements as non-empty even though "declare"
|
||||
// statements are only type annotations. We cannot omit the namespace
|
||||
// in that case. See https://github.com/evanw/esbuild/issues/1158.
|
||||
if ((stmts.items.len == import_equal_count and !has_non_local_export_declare_inside_namespace) or opts.is_typescript_declare) {
|
||||
p.popAndDiscardScope(scope_index);
|
||||
if (opts.is_module_scope) {
|
||||
p.local_type_names.put(p.allocator, name_text, true) catch unreachable;
|
||||
}
|
||||
return p.s(S.TypeScript{}, loc);
|
||||
}
|
||||
|
||||
var arg_ref = Ref.None;
|
||||
if (!opts.is_typescript_declare) {
|
||||
// Avoid a collision with the namespace closure argument variable if the
|
||||
// namespace exports a symbol with the same name as the namespace itself:
|
||||
//
|
||||
// namespace foo {
|
||||
// export let foo = 123
|
||||
// console.log(foo)
|
||||
// }
|
||||
//
|
||||
// TypeScript generates the following code in this case:
|
||||
//
|
||||
// var foo;
|
||||
// (function (foo_1) {
|
||||
// foo_1.foo = 123;
|
||||
// console.log(foo_1.foo);
|
||||
// })(foo || (foo = {}));
|
||||
//
|
||||
if (p.current_scope.members.contains(name_text)) {
|
||||
// Add a "_" to make tests easier to read, since non-bundler tests don't
|
||||
// run the renamer. For external-facing things the renamer will avoid
|
||||
// collisions automatically so this isn't important for correctness.
|
||||
arg_ref = p.newSymbol(.hoisted, strings.cat(p.allocator, "_", name_text) catch unreachable) catch unreachable;
|
||||
p.current_scope.generated.push(p.allocator, arg_ref) catch unreachable;
|
||||
} else {
|
||||
arg_ref = p.newSymbol(.hoisted, name_text) catch unreachable;
|
||||
}
|
||||
ts_namespace.arg_ref = arg_ref;
|
||||
}
|
||||
p.popScope();
|
||||
|
||||
if (!opts.is_typescript_declare) {
|
||||
name.ref = p.declareSymbol(.ts_namespace, name_loc, name_text) catch bun.outOfMemory();
|
||||
try p.ref_to_ts_namespace_member.put(p.allocator, name.ref.?, ns_member_data);
|
||||
}
|
||||
|
||||
return p.s(S.Namespace{
|
||||
.name = name,
|
||||
.arg = arg_ref,
|
||||
.stmts = stmts.items,
|
||||
.is_export = opts.is_export,
|
||||
}, loc);
|
||||
}
|
||||
|
||||
pub fn parseTypeScriptImportEqualsStmt(p: *P, loc: logger.Loc, opts: *ParseStatementOptions, default_name_loc: logger.Loc, default_name: string) anyerror!Stmt {
|
||||
try p.lexer.expect(.t_equals);
|
||||
|
||||
const kind = S.Local.Kind.k_const;
|
||||
const name = p.lexer.identifier;
|
||||
const target = p.newExpr(E.Identifier{ .ref = p.storeNameInRef(name) catch unreachable }, p.lexer.loc());
|
||||
var value = target;
|
||||
try p.lexer.expect(.t_identifier);
|
||||
|
||||
if (strings.eqlComptime(name, "require") and p.lexer.token == .t_open_paren) {
|
||||
// "import ns = require('x')"
|
||||
try p.lexer.next();
|
||||
const path = p.newExpr(try p.lexer.toEString(), p.lexer.loc());
|
||||
try p.lexer.expect(.t_string_literal);
|
||||
try p.lexer.expect(.t_close_paren);
|
||||
if (!opts.is_typescript_declare) {
|
||||
const args = try ExprNodeList.one(p.allocator, path);
|
||||
value = p.newExpr(E.Call{ .target = target, .close_paren_loc = p.lexer.loc(), .args = args }, loc);
|
||||
}
|
||||
} else {
|
||||
// "import Foo = Bar"
|
||||
// "import Foo = Bar.Baz"
|
||||
var prev_value = value;
|
||||
while (p.lexer.token == .t_dot) : (prev_value = value) {
|
||||
try p.lexer.next();
|
||||
value = p.newExpr(E.Dot{ .target = prev_value, .name = p.lexer.identifier, .name_loc = p.lexer.loc() }, loc);
|
||||
try p.lexer.expect(.t_identifier);
|
||||
}
|
||||
}
|
||||
|
||||
try p.lexer.expectOrInsertSemicolon();
|
||||
|
||||
if (opts.is_typescript_declare) {
|
||||
// "import type foo = require('bar');"
|
||||
// "import type foo = bar.baz;"
|
||||
return p.s(S.TypeScript{}, loc);
|
||||
}
|
||||
|
||||
const ref = p.declareSymbol(.constant, default_name_loc, default_name) catch unreachable;
|
||||
var decls = p.allocator.alloc(Decl, 1) catch unreachable;
|
||||
decls[0] = Decl{
|
||||
.binding = p.b(B.Identifier{ .ref = ref }, default_name_loc),
|
||||
.value = value,
|
||||
};
|
||||
return p.s(S.Local{ .kind = kind, .decls = Decl.List.init(decls), .is_export = opts.is_export, .was_ts_import_equals = true }, loc);
|
||||
}
|
||||
|
||||
pub fn parseTypescriptEnumStmt(p: *P, loc: logger.Loc, opts: *ParseStatementOptions) anyerror!Stmt {
|
||||
try p.lexer.expect(.t_enum);
|
||||
const name_loc = p.lexer.loc();
|
||||
const name_text = p.lexer.identifier;
|
||||
try p.lexer.expect(.t_identifier);
|
||||
var name = LocRef{ .loc = name_loc, .ref = Ref.None };
|
||||
|
||||
// Generate the namespace object
|
||||
var arg_ref: Ref = undefined;
|
||||
const ts_namespace = p.getOrCreateExportedNamespaceMembers(name_text, opts.is_export, true);
|
||||
const exported_members = ts_namespace.exported_members;
|
||||
const enum_member_data = js_ast.TSNamespaceMember.Data{ .namespace = exported_members };
|
||||
|
||||
// Declare the enum and create the scope
|
||||
const scope_index = p.scopes_in_order.items.len;
|
||||
if (!opts.is_typescript_declare) {
|
||||
name.ref = try p.declareSymbol(.ts_enum, name_loc, name_text);
|
||||
_ = try p.pushScopeForParsePass(.entry, loc);
|
||||
p.current_scope.ts_namespace = ts_namespace;
|
||||
p.ref_to_ts_namespace_member.putNoClobber(p.allocator, name.ref.?, enum_member_data) catch bun.outOfMemory();
|
||||
}
|
||||
|
||||
try p.lexer.expect(.t_open_brace);
|
||||
|
||||
// Parse the body
|
||||
var values = std.ArrayList(js_ast.EnumValue).init(p.allocator);
|
||||
while (p.lexer.token != .t_close_brace) {
|
||||
var value = js_ast.EnumValue{ .loc = p.lexer.loc(), .ref = Ref.None, .name = undefined, .value = null };
|
||||
var needs_symbol = false;
|
||||
|
||||
// Parse the name
|
||||
if (p.lexer.token == .t_string_literal) {
|
||||
value.name = (try p.lexer.toUTF8EString()).slice8();
|
||||
needs_symbol = js_lexer.isIdentifier(value.name);
|
||||
} else if (p.lexer.isIdentifierOrKeyword()) {
|
||||
value.name = p.lexer.identifier;
|
||||
needs_symbol = true;
|
||||
} else {
|
||||
try p.lexer.expect(.t_identifier);
|
||||
// error early, name is still `undefined`
|
||||
return error.SyntaxError;
|
||||
}
|
||||
try p.lexer.next();
|
||||
|
||||
// Identifiers can be referenced by other values
|
||||
if (!opts.is_typescript_declare and needs_symbol) {
|
||||
value.ref = try p.declareSymbol(.other, value.loc, value.name);
|
||||
}
|
||||
|
||||
// Parse the initializer
|
||||
if (p.lexer.token == .t_equals) {
|
||||
try p.lexer.next();
|
||||
value.value = try p.parseExpr(.comma);
|
||||
}
|
||||
|
||||
values.append(value) catch unreachable;
|
||||
|
||||
exported_members.put(p.allocator, value.name, .{
|
||||
.loc = value.loc,
|
||||
.data = .enum_property,
|
||||
}) catch bun.outOfMemory();
|
||||
|
||||
if (p.lexer.token != .t_comma and p.lexer.token != .t_semicolon) {
|
||||
break;
|
||||
}
|
||||
|
||||
try p.lexer.next();
|
||||
}
|
||||
|
||||
if (!opts.is_typescript_declare) {
|
||||
// Avoid a collision with the enum closure argument variable if the
|
||||
// enum exports a symbol with the same name as the enum itself:
|
||||
//
|
||||
// enum foo {
|
||||
// foo = 123,
|
||||
// bar = foo,
|
||||
// }
|
||||
//
|
||||
// TypeScript generates the following code in this case:
|
||||
//
|
||||
// var foo;
|
||||
// (function (foo) {
|
||||
// foo[foo["foo"] = 123] = "foo";
|
||||
// foo[foo["bar"] = 123] = "bar";
|
||||
// })(foo || (foo = {}));
|
||||
//
|
||||
// Whereas in this case:
|
||||
//
|
||||
// enum foo {
|
||||
// bar = foo as any,
|
||||
// }
|
||||
//
|
||||
// TypeScript generates the following code:
|
||||
//
|
||||
// var foo;
|
||||
// (function (foo) {
|
||||
// foo[foo["bar"] = foo] = "bar";
|
||||
// })(foo || (foo = {}));
|
||||
if (p.current_scope.members.contains(name_text)) {
|
||||
// Add a "_" to make tests easier to read, since non-bundler tests don't
|
||||
// run the renamer. For external-facing things the renamer will avoid
|
||||
// collisions automatically so this isn't important for correctness.
|
||||
arg_ref = p.newSymbol(.hoisted, strings.cat(p.allocator, "_", name_text) catch unreachable) catch unreachable;
|
||||
p.current_scope.generated.push(p.allocator, arg_ref) catch unreachable;
|
||||
} else {
|
||||
arg_ref = p.declareSymbol(.hoisted, name_loc, name_text) catch unreachable;
|
||||
}
|
||||
p.ref_to_ts_namespace_member.put(p.allocator, arg_ref, enum_member_data) catch bun.outOfMemory();
|
||||
ts_namespace.arg_ref = arg_ref;
|
||||
|
||||
p.popScope();
|
||||
}
|
||||
|
||||
try p.lexer.expect(.t_close_brace);
|
||||
|
||||
if (opts.is_typescript_declare) {
|
||||
if (opts.is_namespace_scope and opts.is_export) {
|
||||
p.has_non_local_export_declare_inside_namespace = true;
|
||||
}
|
||||
|
||||
return p.s(S.TypeScript{}, loc);
|
||||
}
|
||||
|
||||
// Save these for when we do out-of-order enum visiting
|
||||
//
|
||||
// Make a copy of "scopesInOrder" instead of a slice or index since
|
||||
// the original array may be flattened in the future by
|
||||
// "popAndFlattenScope"
|
||||
p.scopes_in_order_for_enum.putNoClobber(
|
||||
p.allocator,
|
||||
loc,
|
||||
scope_order_clone: {
|
||||
var count: usize = 0;
|
||||
for (p.scopes_in_order.items[scope_index..]) |i| {
|
||||
if (i != null) count += 1;
|
||||
}
|
||||
|
||||
const items = p.allocator.alloc(ScopeOrder, count) catch bun.outOfMemory();
|
||||
var i: usize = 0;
|
||||
for (p.scopes_in_order.items[scope_index..]) |item| {
|
||||
items[i] = item orelse continue;
|
||||
i += 1;
|
||||
}
|
||||
break :scope_order_clone items;
|
||||
},
|
||||
) catch bun.outOfMemory();
|
||||
|
||||
return p.s(S.Enum{
|
||||
.name = name,
|
||||
.arg = arg_ref,
|
||||
.values = values.items,
|
||||
.is_export = opts.is_export,
|
||||
}, loc);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
const string = []const u8;
|
||||
|
||||
const bun = @import("bun");
|
||||
const logger = bun.logger;
|
||||
const strings = bun.strings;
|
||||
|
||||
const js_ast = bun.ast;
|
||||
const B = js_ast.B;
|
||||
const E = js_ast.E;
|
||||
const Expr = js_ast.Expr;
|
||||
const ExprNodeIndex = js_ast.ExprNodeIndex;
|
||||
const ExprNodeList = js_ast.ExprNodeList;
|
||||
const LocRef = js_ast.LocRef;
|
||||
const S = js_ast.S;
|
||||
const Stmt = js_ast.Stmt;
|
||||
|
||||
const G = js_ast.G;
|
||||
const Decl = G.Decl;
|
||||
|
||||
const js_lexer = bun.js_lexer;
|
||||
const T = js_lexer.T;
|
||||
|
||||
const js_parser = bun.js_parser;
|
||||
const JSXTransformType = js_parser.JSXTransformType;
|
||||
const ParseStatementOptions = js_parser.ParseStatementOptions;
|
||||
const Ref = js_parser.Ref;
|
||||
const ScopeOrder = js_parser.ScopeOrder;
|
||||
const TypeScript = js_parser.TypeScript;
|
||||
|
||||
const std = @import("std");
|
||||
const List = std.ArrayListUnmanaged;
|
||||
const ListManaged = std.ArrayList;
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,133 +0,0 @@
|
||||
pub fn Symbols(
|
||||
comptime parser_feature__typescript: bool,
|
||||
comptime parser_feature__jsx: JSXTransformType,
|
||||
comptime parser_feature__scan_only: bool,
|
||||
) type {
|
||||
return struct {
|
||||
const P = js_parser.NewParser_(parser_feature__typescript, parser_feature__jsx, parser_feature__scan_only);
|
||||
|
||||
pub fn findSymbol(noalias p: *P, loc: logger.Loc, name: string) !FindSymbolResult {
|
||||
return findSymbolWithRecordUsage(p, loc, name, true);
|
||||
}
|
||||
|
||||
pub fn findSymbolWithRecordUsage(noalias p: *P, loc: logger.Loc, name: string, comptime record_usage: bool) !FindSymbolResult {
|
||||
var declare_loc: logger.Loc = logger.Loc.Empty;
|
||||
var is_inside_with_scope = false;
|
||||
// This function can show up in profiling.
|
||||
// That's part of why we do this.
|
||||
// Instead of rehashing `name` for every scope, we do it just once.
|
||||
const hash = Scope.getMemberHash(name);
|
||||
const allocator = p.allocator;
|
||||
|
||||
const ref: Ref = brk: {
|
||||
var current: ?*Scope = p.current_scope;
|
||||
|
||||
var did_forbid_arguments = false;
|
||||
|
||||
while (current) |scope| : (current = current.?.parent) {
|
||||
// Track if we're inside a "with" statement body
|
||||
if (scope.kind == .with) {
|
||||
is_inside_with_scope = true;
|
||||
}
|
||||
|
||||
// Forbid referencing "arguments" inside class bodies
|
||||
if (scope.forbid_arguments and !did_forbid_arguments and strings.eqlComptime(name, "arguments")) {
|
||||
const r = js_lexer.rangeOfIdentifier(p.source, loc);
|
||||
p.log.addRangeErrorFmt(p.source, r, allocator, "Cannot access \"{s}\" here", .{name}) catch unreachable;
|
||||
did_forbid_arguments = true;
|
||||
}
|
||||
|
||||
// Is the symbol a member of this scope?
|
||||
if (scope.getMemberWithHash(name, hash)) |member| {
|
||||
declare_loc = member.loc;
|
||||
break :brk member.ref;
|
||||
}
|
||||
|
||||
// Is the symbol a member of this scope's TypeScript namespace?
|
||||
if (scope.ts_namespace) |ts_namespace| {
|
||||
if (ts_namespace.exported_members.get(name)) |member| {
|
||||
if (member.data.isEnum() == ts_namespace.is_enum_scope) {
|
||||
declare_loc = member.loc;
|
||||
// If this is an identifier from a sibling TypeScript namespace, then we're
|
||||
// going to have to generate a property access instead of a simple reference.
|
||||
// Lazily-generate an identifier that represents this property access.
|
||||
const gop = try ts_namespace.property_accesses.getOrPut(p.allocator, name);
|
||||
if (!gop.found_existing) {
|
||||
const ref = try p.newSymbol(.other, name);
|
||||
gop.value_ptr.* = ref;
|
||||
p.symbols.items[ref.inner_index].namespace_alias = .{
|
||||
.namespace_ref = ts_namespace.arg_ref,
|
||||
.alias = name,
|
||||
};
|
||||
break :brk ref;
|
||||
}
|
||||
break :brk gop.value_ptr.*;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Allocate an "unbound" symbol
|
||||
p.checkForNonBMPCodePoint(loc, name);
|
||||
if (comptime !record_usage) {
|
||||
return FindSymbolResult{
|
||||
.ref = Ref.None,
|
||||
.declare_loc = loc,
|
||||
.is_inside_with_scope = is_inside_with_scope,
|
||||
};
|
||||
}
|
||||
|
||||
const gpe = p.module_scope.getOrPutMemberWithHash(allocator, name, hash) catch unreachable;
|
||||
|
||||
// I don't think this happens?
|
||||
if (gpe.found_existing) {
|
||||
const existing = gpe.value_ptr.*;
|
||||
declare_loc = existing.loc;
|
||||
break :brk existing.ref;
|
||||
}
|
||||
|
||||
const _ref = p.newSymbol(.unbound, name) catch unreachable;
|
||||
|
||||
gpe.key_ptr.* = name;
|
||||
gpe.value_ptr.* = js_ast.Scope.Member{ .ref = _ref, .loc = loc };
|
||||
|
||||
declare_loc = loc;
|
||||
|
||||
break :brk _ref;
|
||||
};
|
||||
|
||||
// If we had to pass through a "with" statement body to get to the symbol
|
||||
// declaration, then this reference could potentially also refer to a
|
||||
// property on the target object of the "with" statement. We must not rename
|
||||
// it or we risk changing the behavior of the code.
|
||||
if (is_inside_with_scope) {
|
||||
p.symbols.items[ref.innerIndex()].must_not_be_renamed = true;
|
||||
}
|
||||
|
||||
// Track how many times we've referenced this symbol
|
||||
if (comptime record_usage) p.recordUsage(ref);
|
||||
|
||||
return FindSymbolResult{
|
||||
.ref = ref,
|
||||
.declare_loc = declare_loc,
|
||||
.is_inside_with_scope = is_inside_with_scope,
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
const string = []const u8;
|
||||
|
||||
const bun = @import("bun");
|
||||
const js_lexer = bun.js_lexer;
|
||||
const logger = bun.logger;
|
||||
const strings = bun.strings;
|
||||
|
||||
const js_ast = bun.ast;
|
||||
const Scope = js_ast.Scope;
|
||||
|
||||
const js_parser = bun.js_parser;
|
||||
const FindSymbolResult = js_parser.FindSymbolResult;
|
||||
const JSXTransformType = js_parser.JSXTransformType;
|
||||
const Ref = js_parser.Ref;
|
||||
const TypeScript = js_parser.TypeScript;
|
||||
1367
src/ast/visit.zig
1367
src/ast/visit.zig
File diff suppressed because it is too large
Load Diff
@@ -1,451 +0,0 @@
|
||||
pub fn CreateBinaryExpressionVisitor(
|
||||
comptime parser_feature__typescript: bool,
|
||||
comptime parser_feature__jsx: JSXTransformType,
|
||||
comptime parser_feature__scan_only: bool,
|
||||
) type {
|
||||
return struct {
|
||||
const P = js_parser.NewParser_(parser_feature__typescript, parser_feature__jsx, parser_feature__scan_only);
|
||||
|
||||
pub const BinaryExpressionVisitor = struct {
|
||||
e: *E.Binary,
|
||||
loc: logger.Loc,
|
||||
in: ExprIn,
|
||||
|
||||
/// Input for visiting the left child
|
||||
left_in: ExprIn,
|
||||
|
||||
/// "Local variables" passed from "checkAndPrepare" to "visitRightAndFinish"
|
||||
is_stmt_expr: bool = false,
|
||||
|
||||
pub fn visitRightAndFinish(
|
||||
v: *BinaryExpressionVisitor,
|
||||
p: *P,
|
||||
) Expr {
|
||||
var e_ = v.e;
|
||||
const is_call_target = @as(Expr.Tag, p.call_target) == .e_binary and e_ == p.call_target.e_binary;
|
||||
// const is_stmt_expr = @as(Expr.Tag, p.stmt_expr_value) == .e_binary and expr.data.e_binary == p.stmt_expr_value.e_binary;
|
||||
const was_anonymous_named_expr = e_.right.isAnonymousNamed();
|
||||
|
||||
// Mark the control flow as dead if the branch is never taken
|
||||
switch (e_.op) {
|
||||
.bin_logical_or => {
|
||||
const side_effects = SideEffects.toBoolean(p, e_.left.data);
|
||||
if (side_effects.ok and side_effects.value) {
|
||||
// "true || dead"
|
||||
const old = p.is_control_flow_dead;
|
||||
p.is_control_flow_dead = true;
|
||||
e_.right = p.visitExpr(e_.right);
|
||||
p.is_control_flow_dead = old;
|
||||
} else {
|
||||
e_.right = p.visitExpr(e_.right);
|
||||
}
|
||||
},
|
||||
.bin_logical_and => {
|
||||
const side_effects = SideEffects.toBoolean(p, e_.left.data);
|
||||
if (side_effects.ok and !side_effects.value) {
|
||||
// "false && dead"
|
||||
const old = p.is_control_flow_dead;
|
||||
p.is_control_flow_dead = true;
|
||||
e_.right = p.visitExpr(e_.right);
|
||||
p.is_control_flow_dead = old;
|
||||
} else {
|
||||
e_.right = p.visitExpr(e_.right);
|
||||
}
|
||||
},
|
||||
.bin_nullish_coalescing => {
|
||||
const side_effects = SideEffects.toNullOrUndefined(p, e_.left.data);
|
||||
if (side_effects.ok and !side_effects.value) {
|
||||
// "notNullOrUndefined ?? dead"
|
||||
const old = p.is_control_flow_dead;
|
||||
p.is_control_flow_dead = true;
|
||||
e_.right = p.visitExpr(e_.right);
|
||||
p.is_control_flow_dead = old;
|
||||
} else {
|
||||
e_.right = p.visitExpr(e_.right);
|
||||
}
|
||||
},
|
||||
else => {
|
||||
e_.right = p.visitExpr(e_.right);
|
||||
},
|
||||
}
|
||||
|
||||
// Always put constants on the right for equality comparisons to help
|
||||
// reduce the number of cases we have to check during pattern matching. We
|
||||
// can only reorder expressions that do not have any side effects.
|
||||
switch (e_.op) {
|
||||
.bin_loose_eq, .bin_loose_ne, .bin_strict_eq, .bin_strict_ne => {
|
||||
if (SideEffects.isPrimitiveToReorder(e_.left.data) and !SideEffects.isPrimitiveToReorder(e_.right.data)) {
|
||||
const _left = e_.left;
|
||||
const _right = e_.right;
|
||||
e_.left = _right;
|
||||
e_.right = _left;
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
switch (e_.op) {
|
||||
.bin_comma => {
|
||||
// "(1, 2)" => "2"
|
||||
// "(sideEffects(), 2)" => "(sideEffects(), 2)"
|
||||
// "(0, this.fn)" => "this.fn"
|
||||
// "(0, this.fn)()" => "(0, this.fn)()"
|
||||
if (p.options.features.minify_syntax) {
|
||||
if (SideEffects.simplifyUnusedExpr(p, e_.left)) |simplified_left| {
|
||||
e_.left = simplified_left;
|
||||
} else {
|
||||
// The left operand has no side effects, but we need to preserve
|
||||
// the comma operator semantics when used as a call target
|
||||
if (is_call_target and e_.right.hasValueForThisInCall()) {
|
||||
// Keep the comma expression to strip "this" binding
|
||||
e_.left = Expr{ .data = Prefill.Data.Zero, .loc = e_.left.loc };
|
||||
} else {
|
||||
return e_.right;
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
.bin_loose_eq => {
|
||||
const equality = e_.left.data.eql(e_.right.data, p, .loose);
|
||||
if (equality.ok) {
|
||||
if (equality.is_require_main_and_module) {
|
||||
p.ignoreUsageOfRuntimeRequire();
|
||||
p.ignoreUsage(p.module_ref);
|
||||
return p.valueForImportMetaMain(false, v.loc);
|
||||
}
|
||||
|
||||
return p.newExpr(
|
||||
E.Boolean{ .value = equality.equal },
|
||||
v.loc,
|
||||
);
|
||||
}
|
||||
|
||||
if (p.options.features.minify_syntax) {
|
||||
// "x == void 0" => "x == null"
|
||||
if (e_.left.data == .e_undefined) {
|
||||
e_.left.data = .{ .e_null = E.Null{} };
|
||||
} else if (e_.right.data == .e_undefined) {
|
||||
e_.right.data = .{ .e_null = E.Null{} };
|
||||
}
|
||||
}
|
||||
|
||||
// const after_op_loc = locAfterOp(e_.);
|
||||
// TODO: warn about equality check
|
||||
// TODO: warn about typeof string
|
||||
|
||||
},
|
||||
.bin_strict_eq => {
|
||||
const equality = e_.left.data.eql(e_.right.data, p, .strict);
|
||||
if (equality.ok) {
|
||||
if (equality.is_require_main_and_module) {
|
||||
p.ignoreUsage(p.module_ref);
|
||||
p.ignoreUsageOfRuntimeRequire();
|
||||
return p.valueForImportMetaMain(false, v.loc);
|
||||
}
|
||||
|
||||
return p.newExpr(E.Boolean{ .value = equality.equal }, v.loc);
|
||||
}
|
||||
|
||||
// const after_op_loc = locAfterOp(e_.);
|
||||
// TODO: warn about equality check
|
||||
// TODO: warn about typeof string
|
||||
},
|
||||
.bin_loose_ne => {
|
||||
const equality = e_.left.data.eql(e_.right.data, p, .loose);
|
||||
if (equality.ok) {
|
||||
if (equality.is_require_main_and_module) {
|
||||
p.ignoreUsage(p.module_ref);
|
||||
p.ignoreUsageOfRuntimeRequire();
|
||||
return p.valueForImportMetaMain(true, v.loc);
|
||||
}
|
||||
|
||||
return p.newExpr(E.Boolean{ .value = !equality.equal }, v.loc);
|
||||
}
|
||||
// const after_op_loc = locAfterOp(e_.);
|
||||
// TODO: warn about equality check
|
||||
// TODO: warn about typeof string
|
||||
|
||||
// "x != void 0" => "x != null"
|
||||
if (@as(Expr.Tag, e_.right.data) == .e_undefined) {
|
||||
e_.right = p.newExpr(E.Null{}, e_.right.loc);
|
||||
}
|
||||
},
|
||||
.bin_strict_ne => {
|
||||
const equality = e_.left.data.eql(e_.right.data, p, .strict);
|
||||
if (equality.ok) {
|
||||
if (equality.is_require_main_and_module) {
|
||||
p.ignoreUsage(p.module_ref);
|
||||
p.ignoreUsageOfRuntimeRequire();
|
||||
return p.valueForImportMetaMain(true, v.loc);
|
||||
}
|
||||
|
||||
return p.newExpr(E.Boolean{ .value = !equality.equal }, v.loc);
|
||||
}
|
||||
},
|
||||
.bin_nullish_coalescing => {
|
||||
const nullorUndefined = SideEffects.toNullOrUndefined(p, e_.left.data);
|
||||
if (nullorUndefined.ok) {
|
||||
if (!nullorUndefined.value) {
|
||||
return e_.left;
|
||||
} else if (nullorUndefined.side_effects == .no_side_effects) {
|
||||
// "(null ?? fn)()" => "fn()"
|
||||
// "(null ?? this.fn)" => "this.fn"
|
||||
// "(null ?? this.fn)()" => "(0, this.fn)()"
|
||||
if (is_call_target and e_.right.hasValueForThisInCall()) {
|
||||
return Expr.joinWithComma(Expr{ .data = .{ .e_number = .{ .value = 0.0 } }, .loc = e_.left.loc }, e_.right, p.allocator);
|
||||
}
|
||||
|
||||
return e_.right;
|
||||
}
|
||||
}
|
||||
},
|
||||
.bin_logical_or => {
|
||||
const side_effects = SideEffects.toBoolean(p, e_.left.data);
|
||||
if (side_effects.ok and side_effects.value) {
|
||||
return e_.left;
|
||||
} else if (side_effects.ok and side_effects.side_effects == .no_side_effects) {
|
||||
// "(0 || fn)()" => "fn()"
|
||||
// "(0 || this.fn)" => "this.fn"
|
||||
// "(0 || this.fn)()" => "(0, this.fn)()"
|
||||
if (is_call_target and e_.right.hasValueForThisInCall()) {
|
||||
return Expr.joinWithComma(Expr{ .data = Prefill.Data.Zero, .loc = e_.left.loc }, e_.right, p.allocator);
|
||||
}
|
||||
|
||||
return e_.right;
|
||||
}
|
||||
},
|
||||
.bin_logical_and => {
|
||||
const side_effects = SideEffects.toBoolean(p, e_.left.data);
|
||||
if (side_effects.ok) {
|
||||
if (!side_effects.value) {
|
||||
return e_.left;
|
||||
} else if (side_effects.side_effects == .no_side_effects) {
|
||||
// "(1 && fn)()" => "fn()"
|
||||
// "(1 && this.fn)" => "this.fn"
|
||||
// "(1 && this.fn)()" => "(0, this.fn)()"
|
||||
if (is_call_target and e_.right.hasValueForThisInCall()) {
|
||||
return Expr.joinWithComma(Expr{ .data = Prefill.Data.Zero, .loc = e_.left.loc }, e_.right, p.allocator);
|
||||
}
|
||||
|
||||
return e_.right;
|
||||
}
|
||||
}
|
||||
},
|
||||
.bin_add => {
|
||||
if (p.should_fold_typescript_constant_expressions) {
|
||||
if (Expr.extractNumericValues(e_.left.data, e_.right.data)) |vals| {
|
||||
return p.newExpr(E.Number{ .value = vals[0] + vals[1] }, v.loc);
|
||||
}
|
||||
|
||||
// "'abc' + 'xyz'" => "'abcxyz'"
|
||||
if (foldStringAddition(e_.left, e_.right, p.allocator, .normal)) |res| {
|
||||
return res;
|
||||
}
|
||||
|
||||
// "(x + 'abc') + 'xyz'" => "'abcxyz'"
|
||||
if (e_.left.data.as(.e_binary)) |left| {
|
||||
if (left.op == .bin_add) {
|
||||
if (foldStringAddition(left.right, e_.right, p.allocator, .nested_left)) |result| {
|
||||
return p.newExpr(E.Binary{
|
||||
.left = left.left,
|
||||
.right = result,
|
||||
.op = .bin_add,
|
||||
}, e_.left.loc);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
.bin_sub => {
|
||||
if (p.should_fold_typescript_constant_expressions) {
|
||||
if (Expr.extractNumericValues(e_.left.data, e_.right.data)) |vals| {
|
||||
return p.newExpr(E.Number{ .value = vals[0] - vals[1] }, v.loc);
|
||||
}
|
||||
}
|
||||
},
|
||||
.bin_mul => {
|
||||
if (p.should_fold_typescript_constant_expressions) {
|
||||
if (Expr.extractNumericValues(e_.left.data, e_.right.data)) |vals| {
|
||||
return p.newExpr(E.Number{ .value = vals[0] * vals[1] }, v.loc);
|
||||
}
|
||||
}
|
||||
},
|
||||
.bin_div => {
|
||||
if (p.should_fold_typescript_constant_expressions) {
|
||||
if (Expr.extractNumericValues(e_.left.data, e_.right.data)) |vals| {
|
||||
return p.newExpr(E.Number{ .value = vals[0] / vals[1] }, v.loc);
|
||||
}
|
||||
}
|
||||
},
|
||||
.bin_rem => {
|
||||
if (p.should_fold_typescript_constant_expressions) {
|
||||
if (Expr.extractNumericValues(e_.left.data, e_.right.data)) |vals| {
|
||||
const fmod = @extern(*const fn (f64, f64) callconv(.C) f64, .{ .name = "fmod" });
|
||||
return p.newExpr(
|
||||
// Use libc fmod here to be consistent with what JavaScriptCore does
|
||||
// https://github.com/oven-sh/WebKit/blob/7a0b13626e5db69aa5a32d037431d381df5dfb61/Source/JavaScriptCore/runtime/MathCommon.cpp#L574-L597
|
||||
E.Number{ .value = if (comptime Environment.isNative) fmod(vals[0], vals[1]) else std.math.mod(f64, vals[0], vals[1]) catch 0 },
|
||||
v.loc,
|
||||
);
|
||||
}
|
||||
}
|
||||
},
|
||||
.bin_pow => {
|
||||
if (p.should_fold_typescript_constant_expressions) {
|
||||
if (Expr.extractNumericValues(e_.left.data, e_.right.data)) |vals| {
|
||||
return p.newExpr(E.Number{ .value = jsc.math.pow(vals[0], vals[1]) }, v.loc);
|
||||
}
|
||||
}
|
||||
},
|
||||
.bin_shl => {
|
||||
if (p.should_fold_typescript_constant_expressions) {
|
||||
if (Expr.extractNumericValues(e_.left.data, e_.right.data)) |vals| {
|
||||
const left = floatToInt32(vals[0]);
|
||||
const right: u8 = @intCast(@as(u32, @bitCast(floatToInt32(vals[1]))) % 32);
|
||||
const result: i32 = @bitCast(std.math.shl(i32, left, right));
|
||||
return p.newExpr(E.Number{
|
||||
.value = @floatFromInt(result),
|
||||
}, v.loc);
|
||||
}
|
||||
}
|
||||
},
|
||||
.bin_shr => {
|
||||
if (p.should_fold_typescript_constant_expressions) {
|
||||
if (Expr.extractNumericValues(e_.left.data, e_.right.data)) |vals| {
|
||||
const left = floatToInt32(vals[0]);
|
||||
const right: u8 = @intCast(@as(u32, @bitCast(floatToInt32(vals[1]))) % 32);
|
||||
const result: i32 = @bitCast(std.math.shr(i32, left, right));
|
||||
return p.newExpr(E.Number{
|
||||
.value = @floatFromInt(result),
|
||||
}, v.loc);
|
||||
}
|
||||
}
|
||||
},
|
||||
.bin_u_shr => {
|
||||
if (p.should_fold_typescript_constant_expressions) {
|
||||
if (Expr.extractNumericValues(e_.left.data, e_.right.data)) |vals| {
|
||||
const left: u32 = @bitCast(floatToInt32(vals[0]));
|
||||
const right: u8 = @intCast(@as(u32, @bitCast(floatToInt32(vals[1]))) % 32);
|
||||
const result: u32 = std.math.shr(u32, left, right);
|
||||
return p.newExpr(E.Number{
|
||||
.value = @floatFromInt(result),
|
||||
}, v.loc);
|
||||
}
|
||||
}
|
||||
},
|
||||
.bin_bitwise_and => {
|
||||
if (p.should_fold_typescript_constant_expressions) {
|
||||
if (Expr.extractNumericValues(e_.left.data, e_.right.data)) |vals| {
|
||||
return p.newExpr(E.Number{
|
||||
.value = @floatFromInt((floatToInt32(vals[0]) & floatToInt32(vals[1]))),
|
||||
}, v.loc);
|
||||
}
|
||||
}
|
||||
},
|
||||
.bin_bitwise_or => {
|
||||
if (p.should_fold_typescript_constant_expressions) {
|
||||
if (Expr.extractNumericValues(e_.left.data, e_.right.data)) |vals| {
|
||||
return p.newExpr(E.Number{
|
||||
.value = @floatFromInt((floatToInt32(vals[0]) | floatToInt32(vals[1]))),
|
||||
}, v.loc);
|
||||
}
|
||||
}
|
||||
},
|
||||
.bin_bitwise_xor => {
|
||||
if (p.should_fold_typescript_constant_expressions) {
|
||||
if (Expr.extractNumericValues(e_.left.data, e_.right.data)) |vals| {
|
||||
return p.newExpr(E.Number{
|
||||
.value = @floatFromInt((floatToInt32(vals[0]) ^ floatToInt32(vals[1]))),
|
||||
}, v.loc);
|
||||
}
|
||||
}
|
||||
},
|
||||
// ---------------------------------------------------------------------------------------------------
|
||||
.bin_assign => {
|
||||
// Optionally preserve the name
|
||||
if (e_.left.data == .e_identifier) {
|
||||
e_.right = p.maybeKeepExprSymbolName(e_.right, p.symbols.items[e_.left.data.e_identifier.ref.innerIndex()].original_name, was_anonymous_named_expr);
|
||||
}
|
||||
},
|
||||
.bin_nullish_coalescing_assign, .bin_logical_or_assign => {
|
||||
// Special case `{}.field ??= value` to minify to `value`
|
||||
// This optimization is specifically to target this pattern in HMR:
|
||||
// `import.meta.hot.data.etc ??= init()`
|
||||
if (e_.left.data.as(.e_dot)) |dot| {
|
||||
if (dot.target.data.as(.e_object)) |obj| {
|
||||
if (obj.properties.len == 0) {
|
||||
if (!bun.strings.eqlComptime(dot.name, "__proto__"))
|
||||
return e_.right;
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
return Expr{ .loc = v.loc, .data = .{ .e_binary = e_ } };
|
||||
}
|
||||
|
||||
pub fn checkAndPrepare(v: *BinaryExpressionVisitor, p: *P) ?Expr {
|
||||
var e_ = v.e;
|
||||
switch (e_.left.data) {
|
||||
// Special-case private identifiers
|
||||
.e_private_identifier => |_private| {
|
||||
if (e_.op == .bin_in) {
|
||||
var private = _private;
|
||||
const name = p.loadNameFromRef(private.ref);
|
||||
const result = p.findSymbol(e_.left.loc, name) catch unreachable;
|
||||
private.ref = result.ref;
|
||||
|
||||
// Unlike regular identifiers, there are no unbound private identifiers
|
||||
const kind: Symbol.Kind = p.symbols.items[result.ref.innerIndex()].kind;
|
||||
if (!Symbol.isKindPrivate(kind)) {
|
||||
const r = logger.Range{ .loc = e_.left.loc, .len = @as(i32, @intCast(name.len)) };
|
||||
p.log.addRangeErrorFmt(p.source, r, p.allocator, "Private name \"{s}\" must be declared in an enclosing class", .{name}) catch unreachable;
|
||||
}
|
||||
|
||||
e_.right = p.visitExpr(e_.right);
|
||||
e_.left = .{ .data = .{ .e_private_identifier = private }, .loc = e_.left.loc };
|
||||
|
||||
// privateSymbolNeedsToBeLowered
|
||||
return Expr{ .loc = v.loc, .data = .{ .e_binary = e_ } };
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
v.is_stmt_expr = p.stmt_expr_value == .e_binary and p.stmt_expr_value.e_binary == e_;
|
||||
|
||||
v.left_in = ExprIn{
|
||||
.assign_target = e_.op.binaryAssignTarget(),
|
||||
};
|
||||
|
||||
return null;
|
||||
}
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
const string = []const u8;
|
||||
|
||||
const std = @import("std");
|
||||
|
||||
const bun = @import("bun");
|
||||
const Environment = bun.Environment;
|
||||
const jsc = bun.jsc;
|
||||
const logger = bun.logger;
|
||||
const strings = bun.strings;
|
||||
|
||||
const js_ast = bun.ast;
|
||||
const E = js_ast.E;
|
||||
const Expr = js_ast.Expr;
|
||||
const Symbol = js_ast.Symbol;
|
||||
|
||||
const js_parser = bun.js_parser;
|
||||
const ExprIn = js_parser.ExprIn;
|
||||
const JSXTransformType = js_parser.JSXTransformType;
|
||||
const Prefill = js_parser.Prefill;
|
||||
const SideEffects = js_parser.SideEffects;
|
||||
const floatToInt32 = js_parser.floatToInt32;
|
||||
const foldStringAddition = js_parser.foldStringAddition;
|
||||
const options = js_parser.options;
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -5,7 +5,7 @@ pub const Loop = uws.Loop;
|
||||
pub const KeepAlive = struct {
|
||||
status: Status = .inactive,
|
||||
|
||||
const log = Output.scoped(.KeepAlive, .visible);
|
||||
const log = Output.scoped(.KeepAlive, false);
|
||||
|
||||
const Status = enum { active, inactive, done };
|
||||
|
||||
@@ -541,7 +541,7 @@ pub const FilePoll = struct {
|
||||
pending_free_head: ?*FilePoll = null,
|
||||
pending_free_tail: ?*FilePoll = null,
|
||||
|
||||
const log = Output.scoped(.FilePoll, .visible);
|
||||
const log = Output.scoped(.FilePoll, false);
|
||||
|
||||
pub fn init() Store {
|
||||
return .{
|
||||
|
||||
@@ -3,7 +3,7 @@ pub const Loop = uv.Loop;
|
||||
pub const KeepAlive = struct {
|
||||
status: Status = .inactive,
|
||||
|
||||
const log = Output.scoped(.KeepAlive, .visible);
|
||||
const log = Output.scoped(.KeepAlive, false);
|
||||
|
||||
const Status = enum { active, inactive, done };
|
||||
|
||||
@@ -121,7 +121,7 @@ pub const FilePoll = struct {
|
||||
pub const Flags = Posix.FilePoll.Flags;
|
||||
pub const Owner = Posix.FilePoll.Owner;
|
||||
|
||||
const log = Output.scoped(.FilePoll, .visible);
|
||||
const log = Output.scoped(.FilePoll, false);
|
||||
|
||||
pub inline fn isActive(this: *const FilePoll) bool {
|
||||
return this.flags.contains(.has_incremented_poll_count);
|
||||
@@ -305,7 +305,7 @@ pub const FilePoll = struct {
|
||||
pending_free_head: ?*FilePoll = null,
|
||||
pending_free_tail: ?*FilePoll = null,
|
||||
|
||||
const log = Output.scoped(.FilePoll, .visible);
|
||||
const log = Output.scoped(.FilePoll, false);
|
||||
|
||||
pub fn init() Store {
|
||||
return .{
|
||||
|
||||
10
src/bake.zig
10
src/bake.zig
@@ -721,12 +721,7 @@ pub const Framework = struct {
|
||||
out.options.react_fast_refresh = mode == .development and renderer == .client and framework.react_fast_refresh != null;
|
||||
out.options.server_components = framework.server_components != null;
|
||||
|
||||
out.options.conditions = try bun.options.ESMConditions.init(
|
||||
arena,
|
||||
out.options.target.defaultConditions(),
|
||||
out.options.target.isServerSide(),
|
||||
bundler_options.conditions.keys(),
|
||||
);
|
||||
out.options.conditions = try bun.options.ESMConditions.init(arena, out.options.target.defaultConditions());
|
||||
if (renderer == .server and framework.server_components != null) {
|
||||
try out.options.conditions.appendSlice(&.{"react-server"});
|
||||
}
|
||||
@@ -739,6 +734,9 @@ pub const Framework = struct {
|
||||
if (renderer == .server or renderer == .ssr) {
|
||||
try out.options.conditions.appendSlice(&.{"node"});
|
||||
}
|
||||
if (bundler_options.conditions.count() > 0) {
|
||||
try out.options.conditions.appendSlice(bundler_options.conditions.keys());
|
||||
}
|
||||
|
||||
out.options.production = mode != .development;
|
||||
out.options.tree_shaking = mode != .development;
|
||||
|
||||
@@ -2,4 +2,4 @@
|
||||
#include "headers-handwritten.h"
|
||||
|
||||
namespace Bake {
|
||||
} // namespace Bake
|
||||
} // namespace Bake
|
||||
@@ -10,9 +10,9 @@
|
||||
|
||||
const DevServer = @This();
|
||||
|
||||
pub const debug = bun.Output.Scoped(.DevServer, .visible);
|
||||
pub const igLog = bun.Output.scoped(.IncrementalGraph, .visible);
|
||||
pub const mapLog = bun.Output.scoped(.SourceMapStore, .visible);
|
||||
pub const debug = bun.Output.Scoped(.DevServer, false);
|
||||
pub const igLog = bun.Output.scoped(.IncrementalGraph, false);
|
||||
pub const mapLog = bun.Output.scoped(.SourceMapStore, false);
|
||||
|
||||
pub const Options = struct {
|
||||
/// Arena must live until DevServer.deinit()
|
||||
@@ -63,10 +63,10 @@ server: ?bun.jsc.API.AnyServer,
|
||||
router: FrameworkRouter,
|
||||
/// Every navigatable route has bundling state here.
|
||||
route_bundles: ArrayListUnmanaged(RouteBundle),
|
||||
/// All access into IncrementalGraph is guarded by a ThreadLock. This is
|
||||
/// All access into IncrementalGraph is guarded by a DebugThreadLock. This is
|
||||
/// only a debug assertion as contention to this is always a bug; If a bundle is
|
||||
/// active and a file is changed, that change is placed into the next bundle.
|
||||
graph_safety_lock: bun.safety.ThreadLock,
|
||||
graph_safety_lock: bun.DebugThreadLock,
|
||||
client_graph: IncrementalGraph(.client),
|
||||
server_graph: IncrementalGraph(.server),
|
||||
/// State populated during bundling and hot updates. Often cleared
|
||||
@@ -205,6 +205,8 @@ deferred_request_pool: bun.HiveArray(DeferredRequest.Node, DeferredRequest.max_p
|
||||
/// UWS can handle closing the websocket connections themselves
|
||||
active_websocket_connections: std.AutoHashMapUnmanaged(*HmrSocket, void),
|
||||
|
||||
relative_path_buf: DebugGuardedValue(bun.PathBuffer),
|
||||
|
||||
// Debugging
|
||||
|
||||
dump_dir: if (bun.FeatureFlags.bake_debugging_features) ?std.fs.Dir else void,
|
||||
@@ -282,7 +284,7 @@ pub fn init(options: Options) bun.JSOOM!*DevServer {
|
||||
.server_fetch_function_callback = .empty,
|
||||
.server_register_update_callback = .empty,
|
||||
.generation = 0,
|
||||
.graph_safety_lock = .initUnlocked(),
|
||||
.graph_safety_lock = .unlocked,
|
||||
.dump_dir = dump_dir,
|
||||
.framework = options.framework,
|
||||
.bundler_options = options.bundler_options,
|
||||
@@ -333,6 +335,7 @@ pub fn init(options: Options) bun.JSOOM!*DevServer {
|
||||
.watcher_atomics = undefined,
|
||||
.log = undefined,
|
||||
.deferred_request_pool = undefined,
|
||||
.relative_path_buf = .init(undefined, bun.DebugThreadLock.unlocked),
|
||||
});
|
||||
errdefer bun.destroy(dev);
|
||||
const allocator = dev.allocation_scope.allocator();
|
||||
@@ -563,6 +566,7 @@ pub fn deinit(dev: *DevServer) void {
|
||||
.server = {},
|
||||
.server_transpiler = {},
|
||||
.ssr_transpiler = {},
|
||||
.relative_path_buf = {},
|
||||
.vm = {},
|
||||
|
||||
// WebSockets should be deinitialized before other parts
|
||||
@@ -1249,8 +1253,8 @@ fn onFrameworkRequestWithBundle(
|
||||
// routerTypeMain
|
||||
router_type.server_file_string.get() orelse str: {
|
||||
const name = dev.server_graph.bundled_files.keys()[fromOpaqueFileId(.server, router_type.server_file).get()];
|
||||
const relative_path_buf = bun.path_buffer_pool.get();
|
||||
defer bun.path_buffer_pool.put(relative_path_buf);
|
||||
const relative_path_buf = dev.relative_path_buf.lock();
|
||||
defer dev.relative_path_buf.unlock();
|
||||
const str = try bun.String.createUTF8ForJS(dev.vm.global, dev.relativePath(relative_path_buf, name));
|
||||
router_type.server_file_string = .create(str, dev.vm.global);
|
||||
break :str str;
|
||||
@@ -1268,16 +1272,16 @@ fn onFrameworkRequestWithBundle(
|
||||
const arr = try JSValue.createEmptyArray(global, n);
|
||||
route = dev.router.routePtr(bundle.route_index);
|
||||
{
|
||||
const relative_path_buf = bun.path_buffer_pool.get();
|
||||
defer bun.path_buffer_pool.put(relative_path_buf);
|
||||
const relative_path_buf = dev.relative_path_buf.lock();
|
||||
defer dev.relative_path_buf.unlock();
|
||||
var route_name = bun.String.cloneUTF8(dev.relativePath(relative_path_buf, keys[fromOpaqueFileId(.server, route.file_page.unwrap().?).get()]));
|
||||
try arr.putIndex(global, 0, route_name.transferToJS(global));
|
||||
}
|
||||
n = 1;
|
||||
while (true) {
|
||||
if (route.file_layout.unwrap()) |layout| {
|
||||
const relative_path_buf = bun.path_buffer_pool.get();
|
||||
defer bun.path_buffer_pool.put(relative_path_buf);
|
||||
const relative_path_buf = dev.relative_path_buf.lock();
|
||||
defer dev.relative_path_buf.unlock();
|
||||
var layout_name = bun.String.cloneUTF8(dev.relativePath(
|
||||
relative_path_buf,
|
||||
keys[fromOpaqueFileId(.server, layout).get()],
|
||||
@@ -1584,7 +1588,7 @@ pub const DeferredRequest = struct {
|
||||
/// such as for bundling failures or aborting the server.
|
||||
/// Does not free the underlying `DeferredRequest.Node`
|
||||
fn deinitImpl(this: *DeferredRequest) void {
|
||||
this.ref_count.assertNoRefs();
|
||||
bun.assert(this.ref_count.active_counts == 0);
|
||||
|
||||
defer this.dev.deferred_request_pool.put(@fieldParentPtr("data", this));
|
||||
switch (this.handler) {
|
||||
@@ -1646,7 +1650,7 @@ pub fn startAsyncBundle(
|
||||
// Ref server to keep it from closing.
|
||||
if (dev.server) |server| server.onPendingRequest();
|
||||
|
||||
var heap = ThreadLocalArena.init();
|
||||
var heap = try ThreadLocalArena.init();
|
||||
errdefer heap.deinit();
|
||||
const allocator = heap.allocator();
|
||||
const ast_memory_allocator = try allocator.create(bun.ast.ASTMemoryAllocator);
|
||||
@@ -1925,8 +1929,8 @@ fn makeArrayForServerComponentsPatch(dev: *DevServer, global: *jsc.JSGlobalObjec
|
||||
const arr = try jsc.JSArray.createEmpty(global, items.len);
|
||||
const names = dev.server_graph.bundled_files.keys();
|
||||
for (items, 0..) |item, i| {
|
||||
const relative_path_buf = bun.path_buffer_pool.get();
|
||||
defer bun.path_buffer_pool.put(relative_path_buf);
|
||||
const relative_path_buf = dev.relative_path_buf.lock();
|
||||
defer dev.relative_path_buf.unlock();
|
||||
const str = bun.String.cloneUTF8(dev.relativePath(relative_path_buf, names[item.get()]));
|
||||
defer str.deref();
|
||||
try arr.putIndex(global, @intCast(i), str.toJS(global));
|
||||
@@ -2599,8 +2603,8 @@ pub fn finalizeBundle(
|
||||
// Intentionally creating a new scope here so we can limit the lifetime
|
||||
// of the `relative_path_buf`
|
||||
{
|
||||
const relative_path_buf = bun.path_buffer_pool.get();
|
||||
defer bun.path_buffer_pool.put(relative_path_buf);
|
||||
const relative_path_buf = dev.relative_path_buf.lock();
|
||||
defer dev.relative_path_buf.unlock();
|
||||
|
||||
// Compute a file name to display
|
||||
const file_name: ?[]const u8 = if (current_bundle.had_reload_event)
|
||||
@@ -2903,18 +2907,22 @@ fn encodeSerializedFailures(
|
||||
buf: *std.ArrayList(u8),
|
||||
inspector_agent: ?*BunFrontendDevServerAgent,
|
||||
) bun.OOM!void {
|
||||
var all_failures_len: usize = 0;
|
||||
for (failures) |fail| all_failures_len += fail.data.len;
|
||||
var all_failures = try std.ArrayListUnmanaged(u8).initCapacity(dev.allocator, all_failures_len);
|
||||
defer all_failures.deinit(dev.allocator);
|
||||
for (failures) |fail| all_failures.appendSliceAssumeCapacity(fail.data);
|
||||
|
||||
const failures_start_buf_pos = buf.items.len;
|
||||
for (failures) |fail| {
|
||||
const len = bun.base64.encodeLen(fail.data);
|
||||
|
||||
const len = bun.base64.encodeLen(all_failures.items);
|
||||
try buf.ensureUnusedCapacity(len);
|
||||
const to_write_into = buf.unusedCapacitySlice();
|
||||
buf.items.len += bun.base64.encode(to_write_into, all_failures.items);
|
||||
try buf.ensureUnusedCapacity(len);
|
||||
const start = buf.items.len;
|
||||
buf.items.len += len;
|
||||
const to_write_into = buf.items[start..];
|
||||
|
||||
var encoded = to_write_into[0..bun.base64.encode(to_write_into, fail.data)];
|
||||
while (encoded.len > 0 and encoded[encoded.len - 1] == '=') {
|
||||
encoded.len -= 1;
|
||||
}
|
||||
|
||||
buf.items.len = start + encoded.len;
|
||||
}
|
||||
|
||||
// Re-use the encoded buffer to avoid encoding failures more times than neccecary.
|
||||
if (inspector_agent) |agent| {
|
||||
@@ -3327,8 +3335,8 @@ pub fn writeVisualizerMessage(dev: *DevServer, payload: *std.ArrayList(u8)) !voi
|
||||
g.bundled_files.values(),
|
||||
0..,
|
||||
) |k, v, i| {
|
||||
const relative_path_buf = bun.path_buffer_pool.get();
|
||||
defer bun.path_buffer_pool.put(relative_path_buf);
|
||||
const relative_path_buf = dev.relative_path_buf.lock();
|
||||
defer dev.relative_path_buf.unlock();
|
||||
const normalized_key = dev.relativePath(relative_path_buf, k);
|
||||
try w.writeInt(u32, @intCast(normalized_key.len), .little);
|
||||
if (k.len == 0) continue;
|
||||
@@ -3760,8 +3768,8 @@ pub fn onRouterCollisionError(dev: *DevServer, rel_path: []const u8, other_id: O
|
||||
},
|
||||
});
|
||||
Output.prettyErrorln(" - <blue>{s}<r>", .{rel_path});
|
||||
const relative_path_buf = bun.path_buffer_pool.get();
|
||||
defer bun.path_buffer_pool.put(relative_path_buf);
|
||||
const relative_path_buf = dev.relative_path_buf.lock();
|
||||
defer dev.relative_path_buf.unlock();
|
||||
Output.prettyErrorln(" - <blue>{s}<r>", .{
|
||||
dev.relativePath(relative_path_buf, dev.server_graph.bundled_files.keys()[fromOpaqueFileId(.server, other_id).get()]),
|
||||
});
|
||||
@@ -3789,9 +3797,16 @@ fn fromOpaqueFileId(comptime side: bake.Side, id: OpaqueFileId) IncrementalGraph
|
||||
}
|
||||
|
||||
/// Returns posix style path, suitible for URLs and reproducible hashes.
|
||||
/// Calculate the relative path from the dev server root.
|
||||
/// The caller must provide a PathBuffer from the pool.
|
||||
/// To avoid overwriting memory, this has a lock for the buffer.
|
||||
///
|
||||
///
|
||||
/// You must pass the pathbuffer contained within `dev.relative_path_buffer`!
|
||||
pub fn relativePath(dev: *DevServer, relative_path_buf: *bun.PathBuffer, path: []const u8) []const u8 {
|
||||
// You must pass the pathbuffer contained within `dev.relative_path_buffer`!
|
||||
bun.assert_eql(
|
||||
@intFromPtr(relative_path_buf),
|
||||
@intFromPtr(&dev.relative_path_buf.unsynchronized_value),
|
||||
);
|
||||
bun.assert(dev.root[dev.root.len - 1] != '/');
|
||||
|
||||
if (!std.fs.path.isAbsolute(path)) {
|
||||
@@ -4061,6 +4076,7 @@ const SourceMap = bun.sourcemap;
|
||||
const Watcher = bun.Watcher;
|
||||
const assert = bun.assert;
|
||||
const bake = bun.bake;
|
||||
const DebugGuardedValue = bun.threading.DebugGuardedValue;
|
||||
const DynamicBitSetUnmanaged = bun.bit_set.DynamicBitSetUnmanaged;
|
||||
const Log = bun.logger.Log;
|
||||
const MimeType = bun.http.MimeType;
|
||||
|
||||
@@ -124,21 +124,9 @@ pub fn runWithBody(ctx: *ErrorReportRequest, body: []const u8, r: AnyResponse) !
|
||||
}
|
||||
const result: *const SourceMapStore.GetResult = &(gop.value_ptr.* orelse continue);
|
||||
|
||||
// When before the first generated line, remap to the HMR runtime.
|
||||
//
|
||||
// Reminder that the HMR runtime is *not* sourcemapped. And appears
|
||||
// first in the bundle. This means that the mappings usually looks like
|
||||
// this:
|
||||
//
|
||||
// AAAA;;;;;;;;;;;ICGA,qCAA4B;
|
||||
// ^ ^ generated_mappings[1], actual code
|
||||
// ^
|
||||
// ^ generated_mappings[0], we always start it with this
|
||||
//
|
||||
// So we can know if the frame is inside the HMR runtime if
|
||||
// `frame.position.line < generated_mappings[1].lines`.
|
||||
// When before the first generated line, remap to the HMR runtime
|
||||
const generated_mappings = result.mappings.generated();
|
||||
if (generated_mappings.len <= 1 or frame.position.line.zeroBased() < generated_mappings[1].lines.zeroBased()) {
|
||||
if (frame.position.line.oneBased() < generated_mappings[1].lines) {
|
||||
frame.source_url = .init(runtime_name); // matches value in source map
|
||||
frame.position = .invalid;
|
||||
continue;
|
||||
@@ -159,12 +147,12 @@ pub fn runWithBody(ctx: *ErrorReportRequest, body: []const u8, r: AnyResponse) !
|
||||
if (index >= 1 and (index - 1) < result.file_paths.len) {
|
||||
const abs_path = result.file_paths[@intCast(index - 1)];
|
||||
frame.source_url = .init(abs_path);
|
||||
const relative_path_buf = bun.path_buffer_pool.get();
|
||||
defer bun.path_buffer_pool.put(relative_path_buf);
|
||||
const relative_path_buf = ctx.dev.relative_path_buf.lock();
|
||||
const rel_path = ctx.dev.relativePath(relative_path_buf, abs_path);
|
||||
if (bun.strings.eql(frame.function_name.value.ZigString.slice(), rel_path)) {
|
||||
frame.function_name = .empty;
|
||||
}
|
||||
ctx.dev.relative_path_buf.unlock();
|
||||
frame.remapped = true;
|
||||
|
||||
if (runtime_lines == null) {
|
||||
@@ -253,8 +241,7 @@ pub fn runWithBody(ctx: *ErrorReportRequest, body: []const u8, r: AnyResponse) !
|
||||
|
||||
const src_to_write = frame.source_url.value.ZigString.slice();
|
||||
if (bun.strings.hasPrefixComptime(src_to_write, "/")) {
|
||||
const relative_path_buf = bun.path_buffer_pool.get();
|
||||
defer bun.path_buffer_pool.put(relative_path_buf);
|
||||
const relative_path_buf = ctx.dev.relative_path_buf.lock();
|
||||
const file = ctx.dev.relativePath(relative_path_buf, src_to_write);
|
||||
try w.writeInt(u32, @intCast(file.len), .little);
|
||||
try w.writeAll(file);
|
||||
|
||||
@@ -182,7 +182,7 @@ pub fn IncrementalGraph(side: bake.Side) type {
|
||||
};
|
||||
|
||||
comptime {
|
||||
if (!Environment.ci_assert) {
|
||||
if (@import("builtin").mode == .ReleaseFast or @import("builtin").mode == .ReleaseSmall) {
|
||||
bun.assert_eql(@sizeOf(@This()), @sizeOf(u64) * 5);
|
||||
bun.assert_eql(@alignOf(@This()), @alignOf([*]u8));
|
||||
}
|
||||
@@ -614,7 +614,7 @@ pub fn IncrementalGraph(side: bake.Side) type {
|
||||
bundle_graph_index: bun.ast.Index,
|
||||
temp_alloc: Allocator,
|
||||
) bun.OOM!void {
|
||||
const log = bun.Output.scoped(.processChunkDependencies, .visible);
|
||||
const log = bun.Output.scoped(.processChunkDependencies, false);
|
||||
const file_index: FileIndex = ctx.getCachedIndex(side, bundle_graph_index).*.unwrap() orelse
|
||||
@panic("unresolved index"); // do not process for failed chunks
|
||||
log("index id={d} {}:", .{
|
||||
@@ -715,7 +715,7 @@ pub fn IncrementalGraph(side: bake.Side) type {
|
||||
fn disconnectEdgeFromDependencyList(g: *@This(), edge_index: EdgeIndex) void {
|
||||
const edge = &g.edges.items[edge_index.get()];
|
||||
const imported = edge.imported.get();
|
||||
const log = bun.Output.scoped(.disconnectEdgeFromDependencyList, .hidden);
|
||||
const log = bun.Output.scoped(.disconnectEdgeFromDependencyList, true);
|
||||
log("detach edge={d} | id={d} {} -> id={d} {} (first_dep={d})", .{
|
||||
edge_index.get(),
|
||||
edge.dependency.get(),
|
||||
@@ -804,7 +804,7 @@ pub fn IncrementalGraph(side: bake.Side) type {
|
||||
css,
|
||||
},
|
||||
) bun.OOM!enum { @"continue", stop } {
|
||||
const log = bun.Output.scoped(.processEdgeAttachment, .visible);
|
||||
const log = bun.Output.scoped(.processEdgeAttachment, false);
|
||||
|
||||
// When an import record is duplicated, it gets marked unused.
|
||||
// This happens in `ConvertESMExportsForHmr.deduplicatedImport`
|
||||
@@ -923,7 +923,7 @@ pub fn IncrementalGraph(side: bake.Side) type {
|
||||
// don't call this function for CSS sources
|
||||
bun.assert(ctx.loaders[index.get()] != .css);
|
||||
|
||||
const log = bun.Output.scoped(.processChunkDependencies, .visible);
|
||||
const log = bun.Output.scoped(.processChunkDependencies, false);
|
||||
for (ctx.import_records[index.get()].slice()) |import_record| {
|
||||
// When an import record is duplicated, it gets marked unused.
|
||||
// This happens in `ConvertESMExportsForHmr.deduplicatedImport`
|
||||
@@ -1413,8 +1413,8 @@ pub fn IncrementalGraph(side: bake.Side) type {
|
||||
// the error list as it changes while also supporting a REPL
|
||||
log.print(Output.errorWriter()) catch {};
|
||||
const failure = failure: {
|
||||
const relative_path_buf = bun.path_buffer_pool.get();
|
||||
defer bun.path_buffer_pool.put(relative_path_buf);
|
||||
const relative_path_buf = dev.relative_path_buf.lock();
|
||||
defer dev.relative_path_buf.unlock();
|
||||
// this string is just going to be memcpy'd into the log buffer
|
||||
const owner_display_name = dev.relativePath(relative_path_buf, gop.key_ptr.*);
|
||||
break :failure try SerializedFailure.initFromLog(
|
||||
@@ -1637,8 +1637,8 @@ pub fn IncrementalGraph(side: bake.Side) type {
|
||||
try w.writeAll("}, {\n main: ");
|
||||
const initial_response_entry_point = options.initial_response_entry_point;
|
||||
if (initial_response_entry_point.len > 0) {
|
||||
const relative_path_buf = bun.path_buffer_pool.get();
|
||||
defer bun.path_buffer_pool.put(relative_path_buf);
|
||||
const relative_path_buf = g.owner().relative_path_buf.lock();
|
||||
defer g.owner().relative_path_buf.unlock();
|
||||
try bun.js_printer.writeJSONString(
|
||||
g.owner().relativePath(relative_path_buf, initial_response_entry_point),
|
||||
@TypeOf(w),
|
||||
@@ -1663,8 +1663,8 @@ pub fn IncrementalGraph(side: bake.Side) type {
|
||||
|
||||
if (options.react_refresh_entry_point.len > 0) {
|
||||
try w.writeAll(",\n refresh: ");
|
||||
const relative_path_buf = bun.path_buffer_pool.get();
|
||||
defer bun.path_buffer_pool.put(relative_path_buf);
|
||||
const relative_path_buf = g.owner().relative_path_buf.lock();
|
||||
defer g.owner().relative_path_buf.unlock();
|
||||
try bun.js_printer.writeJSONString(
|
||||
g.owner().relativePath(relative_path_buf, options.react_refresh_entry_point),
|
||||
@TypeOf(w),
|
||||
|
||||
@@ -80,7 +80,7 @@ pub fn quotedContents(self: *const @This()) []u8 {
|
||||
}
|
||||
|
||||
comptime {
|
||||
if (!Environment.ci_assert) {
|
||||
if (!Environment.isDebug) {
|
||||
assert_eql(@sizeOf(@This()), @sizeOf(usize) * 7);
|
||||
assert_eql(@alignOf(@This()), @alignOf(usize));
|
||||
}
|
||||
|
||||
@@ -48,6 +48,7 @@ pub fn memoryCostDetailed(dev: *DevServer) MemoryCost {
|
||||
.server_register_update_callback = {},
|
||||
.server_fetch_function_callback = {},
|
||||
.watcher_atomics = {},
|
||||
.relative_path_buf = {},
|
||||
|
||||
// pointers that are not considered a part of DevServer
|
||||
.vm = {},
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
//! Implements building a Bake application to production
|
||||
const log = bun.Output.scoped(.production, .visible);
|
||||
const log = bun.Output.scoped(.production, false);
|
||||
|
||||
pub fn buildCommand(ctx: bun.cli.Command.Context) !void {
|
||||
bun.bake.printWarning();
|
||||
@@ -26,7 +26,7 @@ pub fn buildCommand(ctx: bun.cli.Command.Context) !void {
|
||||
bun.ast.Expr.Data.Store.create();
|
||||
bun.ast.Stmt.Data.Store.create();
|
||||
|
||||
var arena = bun.MimallocArena.init();
|
||||
var arena = try bun.MimallocArena.init();
|
||||
defer arena.deinit();
|
||||
|
||||
const vm = try VirtualMachine.initBake(.{
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user