mirror of
https://github.com/oven-sh/bun
synced 2026-02-17 22:32:06 +00:00
Compare commits
6 Commits
jarred/nes
...
claude/fix
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4784ddd69a | ||
|
|
d27b7b01a8 | ||
|
|
730e044c53 | ||
|
|
b64edcb490 | ||
|
|
4feede90f5 | ||
|
|
fc4624c672 |
@@ -31,13 +31,6 @@ execute_process(
|
||||
ERROR_QUIET
|
||||
)
|
||||
|
||||
if(MACOS_VERSION VERSION_LESS ${CMAKE_OSX_DEPLOYMENT_TARGET})
|
||||
message(FATAL_ERROR "Your computer is running macOS ${MACOS_VERSION}, which is older than the target macOS SDK ${CMAKE_OSX_DEPLOYMENT_TARGET}. To fix this, either:\n"
|
||||
" - Upgrade your computer to macOS ${CMAKE_OSX_DEPLOYMENT_TARGET} or newer\n"
|
||||
" - Download a newer version of the macOS SDK from Apple: https://developer.apple.com/download/all/?q=xcode\n"
|
||||
" - Set -DCMAKE_OSX_DEPLOYMENT_TARGET=${MACOS_VERSION}\n")
|
||||
endif()
|
||||
|
||||
execute_process(
|
||||
COMMAND xcrun --sdk macosx --show-sdk-path
|
||||
OUTPUT_VARIABLE DEFAULT_CMAKE_OSX_SYSROOT
|
||||
|
||||
@@ -2,7 +2,7 @@ option(WEBKIT_VERSION "The version of WebKit to use")
|
||||
option(WEBKIT_LOCAL "If a local version of WebKit should be used instead of downloading")
|
||||
|
||||
if(NOT WEBKIT_VERSION)
|
||||
set(WEBKIT_VERSION cc5e0bddf7eae1d820cf673158845fe9bd83c094)
|
||||
set(WEBKIT_VERSION 9a2cc42ae1bf693a0fd0ceb9b1d7d965d9cfd3ea)
|
||||
endif()
|
||||
|
||||
# Use preview build URL for Windows ARM64 until the fix is merged to main
|
||||
|
||||
@@ -365,6 +365,23 @@ The `--bytecode` argument enables bytecode compilation. Every time you run JavaS
|
||||
console.log(process.execArgv); // ["--smol", "--user-agent=MyBot"]
|
||||
```
|
||||
|
||||
### Runtime arguments via `BUN_OPTIONS`
|
||||
|
||||
The `BUN_OPTIONS` environment variable is applied to standalone executables, allowing you to pass runtime flags without recompiling:
|
||||
|
||||
```bash terminal icon="terminal"
|
||||
# Enable CPU profiling on a compiled executable
|
||||
BUN_OPTIONS="--cpu-prof" ./myapp
|
||||
|
||||
# Enable heap profiling with markdown output
|
||||
BUN_OPTIONS="--heap-prof-md" ./myapp
|
||||
|
||||
# Combine multiple flags
|
||||
BUN_OPTIONS="--smol --cpu-prof-md" ./myapp
|
||||
```
|
||||
|
||||
This is useful for debugging or profiling production executables without rebuilding them.
|
||||
|
||||
---
|
||||
|
||||
## Automatic config loading
|
||||
|
||||
@@ -1333,6 +1333,50 @@ Generate metadata about the build in a structured format. The metafile contains
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
#### Markdown metafile
|
||||
|
||||
Use `--metafile-md` to generate a markdown metafile, which is LLM-friendly and easy to read in the terminal:
|
||||
|
||||
```bash terminal icon="terminal"
|
||||
bun build ./src/index.ts --outdir ./dist --metafile-md ./dist/meta.md
|
||||
```
|
||||
|
||||
Both `--metafile` and `--metafile-md` can be used together:
|
||||
|
||||
```bash terminal icon="terminal"
|
||||
bun build ./src/index.ts --outdir ./dist --metafile ./dist/meta.json --metafile-md ./dist/meta.md
|
||||
```
|
||||
|
||||
#### `metafile` option formats
|
||||
|
||||
In the JavaScript API, `metafile` accepts several forms:
|
||||
|
||||
```ts title="build.ts" icon="/icons/typescript.svg"
|
||||
// Boolean — include metafile in the result object
|
||||
await Bun.build({
|
||||
entrypoints: ["./src/index.ts"],
|
||||
outdir: "./dist",
|
||||
metafile: true,
|
||||
});
|
||||
|
||||
// String — write JSON metafile to a specific path
|
||||
await Bun.build({
|
||||
entrypoints: ["./src/index.ts"],
|
||||
outdir: "./dist",
|
||||
metafile: "./dist/meta.json",
|
||||
});
|
||||
|
||||
// Object — specify separate paths for JSON and markdown output
|
||||
await Bun.build({
|
||||
entrypoints: ["./src/index.ts"],
|
||||
outdir: "./dist",
|
||||
metafile: {
|
||||
json: "./dist/meta.json",
|
||||
markdown: "./dist/meta.md",
|
||||
},
|
||||
});
|
||||
```
|
||||
|
||||
The metafile structure contains:
|
||||
|
||||
```ts
|
||||
|
||||
@@ -227,6 +227,26 @@ bun --cpu-prof script.js
|
||||
|
||||
This generates a `.cpuprofile` file you can open in Chrome DevTools (Performance tab → Load profile) or VS Code's CPU profiler.
|
||||
|
||||
### Markdown output
|
||||
|
||||
Use `--cpu-prof-md` to generate a markdown CPU profile, which is grep-friendly and designed for LLM analysis:
|
||||
|
||||
```sh terminal icon="terminal"
|
||||
bun --cpu-prof-md script.js
|
||||
```
|
||||
|
||||
Both `--cpu-prof` and `--cpu-prof-md` can be used together to generate both formats at once:
|
||||
|
||||
```sh terminal icon="terminal"
|
||||
bun --cpu-prof --cpu-prof-md script.js
|
||||
```
|
||||
|
||||
You can also trigger profiling via the `BUN_OPTIONS` environment variable:
|
||||
|
||||
```sh terminal icon="terminal"
|
||||
BUN_OPTIONS="--cpu-prof-md" bun script.js
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```sh terminal icon="terminal"
|
||||
@@ -234,8 +254,43 @@ bun --cpu-prof --cpu-prof-name my-profile.cpuprofile script.js
|
||||
bun --cpu-prof --cpu-prof-dir ./profiles script.js
|
||||
```
|
||||
|
||||
| Flag | Description |
|
||||
| ---------------------------- | -------------------- |
|
||||
| `--cpu-prof` | Enable profiling |
|
||||
| `--cpu-prof-name <filename>` | Set output filename |
|
||||
| `--cpu-prof-dir <dir>` | Set output directory |
|
||||
| Flag | Description |
|
||||
| ---------------------------- | ----------------------------------------------------------- |
|
||||
| `--cpu-prof` | Generate a `.cpuprofile` JSON file (Chrome DevTools format) |
|
||||
| `--cpu-prof-md` | Generate a markdown CPU profile (grep/LLM-friendly) |
|
||||
| `--cpu-prof-name <filename>` | Set output filename |
|
||||
| `--cpu-prof-dir <dir>` | Set output directory |
|
||||
|
||||
## Heap profiling
|
||||
|
||||
Generate heap snapshots on exit to analyze memory usage and find memory leaks.
|
||||
|
||||
```sh terminal icon="terminal"
|
||||
bun --heap-prof script.js
|
||||
```
|
||||
|
||||
This generates a V8 `.heapsnapshot` file that can be loaded in Chrome DevTools (Memory tab → Load).
|
||||
|
||||
### Markdown output
|
||||
|
||||
Use `--heap-prof-md` to generate a markdown heap profile for CLI analysis:
|
||||
|
||||
```sh terminal icon="terminal"
|
||||
bun --heap-prof-md script.js
|
||||
```
|
||||
|
||||
<Note>If both `--heap-prof` and `--heap-prof-md` are specified, the markdown format is used.</Note>
|
||||
|
||||
### Options
|
||||
|
||||
```sh terminal icon="terminal"
|
||||
bun --heap-prof --heap-prof-name my-snapshot.heapsnapshot script.js
|
||||
bun --heap-prof --heap-prof-dir ./profiles script.js
|
||||
```
|
||||
|
||||
| Flag | Description |
|
||||
| ----------------------------- | ------------------------------------------ |
|
||||
| `--heap-prof` | Generate a V8 `.heapsnapshot` file on exit |
|
||||
| `--heap-prof-md` | Generate a markdown heap profile on exit |
|
||||
| `--heap-prof-name <filename>` | Set output filename |
|
||||
| `--heap-prof-dir <dir>` | Set output directory |
|
||||
|
||||
@@ -165,7 +165,7 @@ This page is updated regularly to reflect compatibility status of the latest ver
|
||||
|
||||
### [`node:inspector`](https://nodejs.org/api/inspector.html)
|
||||
|
||||
🔴 Not implemented.
|
||||
🟡 Partially implemented. `Profiler` API is supported (`Profiler.enable`, `Profiler.disable`, `Profiler.start`, `Profiler.stop`, `Profiler.setSamplingInterval`). Other inspector APIs are not yet implemented.
|
||||
|
||||
### [`node:repl`](https://nodejs.org/api/repl.html)
|
||||
|
||||
|
||||
@@ -135,6 +135,18 @@ await s3file.write(JSON.stringify({ name: "John", age: 30 }), {
|
||||
type: "application/json",
|
||||
});
|
||||
|
||||
// Write with content encoding (e.g. for pre-compressed data)
|
||||
await s3file.write(compressedData, {
|
||||
type: "application/json",
|
||||
contentEncoding: "gzip",
|
||||
});
|
||||
|
||||
// Write with content disposition
|
||||
await s3file.write(pdfData, {
|
||||
type: "application/pdf",
|
||||
contentDisposition: 'attachment; filename="report.pdf"',
|
||||
});
|
||||
|
||||
// Write using a writer (streaming)
|
||||
const writer = s3file.writer({ type: "application/json" });
|
||||
writer.write("Hello");
|
||||
@@ -188,7 +200,13 @@ const download = s3.presign("my-file.txt"); // GET, text/plain, expires in 24 ho
|
||||
const upload = s3.presign("my-file", {
|
||||
expiresIn: 3600, // 1 hour
|
||||
method: "PUT",
|
||||
type: "application/json", // No extension for inferring, so we can specify the content type to be JSON
|
||||
type: "application/json", // Sets response-content-type in the presigned URL
|
||||
});
|
||||
|
||||
// Presign with content disposition (e.g. force download with a specific filename)
|
||||
const downloadUrl = s3.presign("report.pdf", {
|
||||
expiresIn: 3600,
|
||||
contentDisposition: 'attachment; filename="quarterly-report.pdf"',
|
||||
});
|
||||
|
||||
// You can call .presign() if on a file reference, but avoid doing so
|
||||
|
||||
@@ -460,7 +460,7 @@ console.log(result); // Blob(13) { size: 13, type: "text/plain" }
|
||||
For cross-platform compatibility, Bun Shell implements a set of builtin commands, in addition to reading commands from the PATH environment variable.
|
||||
|
||||
- `cd`: change the working directory
|
||||
- `ls`: list files in a directory
|
||||
- `ls`: list files in a directory (supports `-l` for long listing format)
|
||||
- `rm`: remove files and directories
|
||||
- `echo`: print text
|
||||
- `pwd`: print the working directory
|
||||
|
||||
@@ -880,6 +880,94 @@ npm/strip-ansi 212,992 chars long-ansi 1.36 ms/iter 1.38 ms
|
||||
|
||||
---
|
||||
|
||||
## `Bun.wrapAnsi()`
|
||||
|
||||
<Note>Drop-in replacement for `wrap-ansi` npm package</Note>
|
||||
|
||||
`Bun.wrapAnsi(input: string, columns: number, options?: WrapAnsiOptions): string`
|
||||
|
||||
Wrap text to a specified column width while preserving ANSI escape codes, hyperlinks, and handling Unicode/emoji width correctly. This is a native, high-performance alternative to the popular [`wrap-ansi`](https://www.npmjs.com/package/wrap-ansi) npm package.
|
||||
|
||||
```ts
|
||||
// Basic wrapping at 20 columns
|
||||
Bun.wrapAnsi("The quick brown fox jumps over the lazy dog", 20);
|
||||
// => "The quick brown fox\njumps over the lazy\ndog"
|
||||
|
||||
// Preserves ANSI escape codes
|
||||
Bun.wrapAnsi("\u001b[31mThe quick brown fox jumps over the lazy dog\u001b[0m", 20);
|
||||
// => "\u001b[31mThe quick brown fox\njumps over the lazy\ndog\u001b[0m"
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```ts
|
||||
Bun.wrapAnsi("Hello World", 5, {
|
||||
hard: true, // Break words that exceed column width (default: false)
|
||||
wordWrap: true, // Wrap at word boundaries (default: true)
|
||||
trim: true, // Trim leading/trailing whitespace per line (default: true)
|
||||
ambiguousIsNarrow: true, // Treat ambiguous-width characters as narrow (default: true)
|
||||
});
|
||||
```
|
||||
|
||||
| Option | Default | Description |
|
||||
| ------------------- | ------- | --------------------------------------------------------------------------------------------------------------- |
|
||||
| `hard` | `false` | If `true`, break words in the middle if they exceed the column width. |
|
||||
| `wordWrap` | `true` | If `true`, wrap at word boundaries. If `false`, only break at explicit newlines. |
|
||||
| `trim` | `true` | If `true`, trim leading and trailing whitespace from each line. |
|
||||
| `ambiguousIsNarrow` | `true` | If `true`, treat ambiguous-width Unicode characters as 1 column wide. If `false`, treat them as 2 columns wide. |
|
||||
|
||||
TypeScript definition:
|
||||
|
||||
```ts expandable
|
||||
namespace Bun {
|
||||
export function wrapAnsi(
|
||||
/**
|
||||
* The string to wrap
|
||||
*/
|
||||
input: string,
|
||||
/**
|
||||
* The maximum column width
|
||||
*/
|
||||
columns: number,
|
||||
/**
|
||||
* Wrapping options
|
||||
*/
|
||||
options?: {
|
||||
/**
|
||||
* If `true`, break words in the middle if they don't fit on a line.
|
||||
* If `false`, only break at word boundaries.
|
||||
*
|
||||
* @default false
|
||||
*/
|
||||
hard?: boolean;
|
||||
/**
|
||||
* If `true`, wrap at word boundaries when possible.
|
||||
* If `false`, don't perform word wrapping (only wrap at explicit newlines).
|
||||
*
|
||||
* @default true
|
||||
*/
|
||||
wordWrap?: boolean;
|
||||
/**
|
||||
* If `true`, trim leading and trailing whitespace from each line.
|
||||
* If `false`, preserve whitespace.
|
||||
*
|
||||
* @default true
|
||||
*/
|
||||
trim?: boolean;
|
||||
/**
|
||||
* When it's ambiguous and `true`, count ambiguous width characters as 1 character wide.
|
||||
* If `false`, count them as 2 characters wide.
|
||||
*
|
||||
* @default true
|
||||
*/
|
||||
ambiguousIsNarrow?: boolean;
|
||||
},
|
||||
): string;
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## `serialize` & `deserialize` in `bun:jsc`
|
||||
|
||||
To save a JavaScript value into an ArrayBuffer & back, use `serialize` and `deserialize` from the `"bun:jsc"` module.
|
||||
|
||||
@@ -799,6 +799,8 @@ void NodeVMGlobalObject::finishCreation(JSC::VM& vm)
|
||||
auto* parentGlobalObject = defaultGlobalObject(this);
|
||||
if (parentGlobalObject && parentGlobalObject->m_asyncContextData) {
|
||||
m_asyncContextData.set(vm, this, parentGlobalObject->m_asyncContextData.get());
|
||||
if (parentGlobalObject->isAsyncContextTrackingEnabled())
|
||||
setAsyncContextTrackingEnabled(true);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -311,8 +311,11 @@ pub const BundleV2 = struct {
|
||||
}
|
||||
}
|
||||
|
||||
const is_js = v.all_loaders[source_index.get()].isJavaScriptLike();
|
||||
const is_css = v.all_loaders[source_index.get()].isCSS();
|
||||
const loader = v.all_loaders[source_index.get()];
|
||||
// HTML is included because it can reference files (e.g., <img src>) that may also
|
||||
// be inlined in CSS, and we need to ensure those files are emitted for HTML.
|
||||
const is_js_or_html = loader.isJavaScriptLike() or loader == .html;
|
||||
const is_css = loader.isCSS();
|
||||
|
||||
const import_record_list_id = source_index;
|
||||
// when there are no import records, v index will be invalid
|
||||
@@ -341,9 +344,9 @@ pub const BundleV2 = struct {
|
||||
}
|
||||
}
|
||||
|
||||
// Mark if the file is imported by JS and its URL is inlined for CSS
|
||||
// Mark if the file is imported by JS/HTML and its URL is inlined for CSS
|
||||
const is_inlined = import_record.source_index.isValid() and v.all_urls_for_css[import_record.source_index.get()].len > 0;
|
||||
if (is_js and is_inlined) {
|
||||
if (is_js_or_html and is_inlined) {
|
||||
v.additional_files_imported_by_js_and_inlined_in_css.set(import_record.source_index.get());
|
||||
} else if (is_css and is_inlined) {
|
||||
v.additional_files_imported_by_css_and_inlined.set(import_record.source_index.get());
|
||||
|
||||
@@ -103,21 +103,6 @@ peer_dependencies: bun.LinearFifo(DependencyID, .Dynamic) = .init(default_alloca
|
||||
// name hash from alias package name -> aliased package dependency version info
|
||||
known_npm_aliases: NpmAliasMap = .{},
|
||||
|
||||
/// Maps PackageID → OverrideMap.NodeID
|
||||
/// Tracks which override tree node is the context for each resolved package's children.
|
||||
/// Public: accessed by PackageManagerEnqueue, PackageManagerResolution, and install_with_manager.
|
||||
pkg_override_ctx: std.AutoHashMapUnmanaged(PackageID, OverrideMap.NodeID) = .{},
|
||||
|
||||
/// Maps DependencyID → OverrideMap.NodeID
|
||||
/// Temporary: holds the override context for a dependency between enqueue and resolution.
|
||||
/// Public: written in PackageManagerEnqueue, read in PackageManagerResolution.
|
||||
dep_pending_override: std.AutoHashMapUnmanaged(DependencyID, OverrideMap.NodeID) = .{},
|
||||
|
||||
/// Precomputed reverse mapping: DependencyID → owning PackageID.
|
||||
/// Built lazily to avoid O(N) scans per dependency in the enqueue path.
|
||||
/// Public: accessed by PackageManagerEnqueue.
|
||||
dep_parent_map: std.ArrayListUnmanaged(PackageID) = .{},
|
||||
|
||||
event_loop: jsc.AnyEventLoop,
|
||||
|
||||
// During `installPackages` we learn exactly what dependencies from --trust
|
||||
@@ -1232,7 +1217,6 @@ pub const assignResolution = resolution.assignResolution;
|
||||
pub const assignRootResolution = resolution.assignRootResolution;
|
||||
pub const formatLaterVersionInCache = resolution.formatLaterVersionInCache;
|
||||
pub const getInstalledVersionsFromDiskCache = resolution.getInstalledVersionsFromDiskCache;
|
||||
pub const populateOverrideContexts = resolution.populateOverrideContexts;
|
||||
pub const resolveFromDiskCache = resolution.resolveFromDiskCache;
|
||||
pub const scopeForPackageName = resolution.scopeForPackageName;
|
||||
pub const verifyResolutions = resolution.verifyResolutions;
|
||||
@@ -1338,5 +1322,4 @@ const TaskCallbackContext = bun.install.TaskCallbackContext;
|
||||
const initializeStore = bun.install.initializeStore;
|
||||
|
||||
const Lockfile = bun.install.Lockfile;
|
||||
const OverrideMap = Lockfile.OverrideMap;
|
||||
const Package = Lockfile.Package;
|
||||
|
||||
@@ -478,64 +478,6 @@ pub fn enqueueDependencyWithMainAndSuccessFn(
|
||||
// allow overriding all dependencies unless the dependency is coming directly from an alias, "npm:<this dep>" or
|
||||
// if it's a workspaceOnly dependency
|
||||
if (!dependency.behavior.isWorkspace() and (dependency.version.tag != .npm or !dependency.version.value.npm.is_alias)) {
|
||||
// Phase 1: Tree-based nested override check
|
||||
if (this.lockfile.overrides.hasTree()) tree_check: {
|
||||
const parent_pkg_id = getParentPackageIdFromMap(this, id);
|
||||
const parent_ctx = if (parent_pkg_id != invalid_package_id)
|
||||
this.pkg_override_ctx.get(parent_pkg_id) orelse 0
|
||||
else
|
||||
0;
|
||||
|
||||
// Walk up from context through ancestors, checking each level for matching children.
|
||||
// If a child matches name_hash but fails key_spec, try the next sibling with the same name.
|
||||
var ctx = parent_ctx;
|
||||
while (true) {
|
||||
var candidate = this.lockfile.overrides.findChild(ctx, name_hash);
|
||||
while (candidate) |child_id| {
|
||||
const child = this.lockfile.overrides.nodes.items[child_id];
|
||||
|
||||
// Check version constraint on the matched node (e.g., "express@^4.0.0")
|
||||
if (!child.key_spec.isEmpty()) {
|
||||
if (!isKeySpecCompatible(child.key_spec, dependency, this.lockfile.buffers.string_bytes.items)) {
|
||||
// Try next sibling with the same name_hash
|
||||
candidate = this.lockfile.overrides.findChildAfter(ctx, name_hash, child_id);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Store context for propagation when this dep resolves
|
||||
this.dep_pending_override.put(this.allocator, id, child_id) catch {};
|
||||
|
||||
if (child.value) |val| {
|
||||
// Apply the override
|
||||
debug("nested override: {s} -> {s}", .{ this.lockfile.str(&dependency.version.literal), this.lockfile.str(&val.version.literal) });
|
||||
name, name_hash = updateNameAndNameHashFromVersionReplacement(this.lockfile, name, name_hash, val.version);
|
||||
|
||||
if (val.version.tag == .catalog) {
|
||||
if (this.lockfile.catalogs.get(this.lockfile, val.version.value.catalog, name)) |catalog_dep| {
|
||||
name, name_hash = updateNameAndNameHashFromVersionReplacement(this.lockfile, name, name_hash, catalog_dep.version);
|
||||
break :version catalog_dep.version;
|
||||
}
|
||||
}
|
||||
|
||||
break :version val.version;
|
||||
}
|
||||
|
||||
break :tree_check;
|
||||
}
|
||||
|
||||
// Move up to parent context
|
||||
if (ctx == 0) break;
|
||||
const parent = this.lockfile.overrides.nodes.items[ctx].parent;
|
||||
if (parent == OverrideMap.invalid_node_id) break;
|
||||
ctx = parent;
|
||||
}
|
||||
|
||||
// Inherit parent's context even if no override value applied
|
||||
this.dep_pending_override.put(this.allocator, id, parent_ctx) catch {};
|
||||
}
|
||||
|
||||
// Phase 2: Fall back to flat global override (existing behavior)
|
||||
if (this.lockfile.overrides.get(name_hash)) |new| {
|
||||
debug("override: {s} -> {s}", .{ this.lockfile.str(&dependency.version.literal), this.lockfile.str(&new.literal) });
|
||||
|
||||
@@ -1385,104 +1327,6 @@ fn enqueueLocalTarball(
|
||||
return &task.threadpool_task;
|
||||
}
|
||||
|
||||
/// Look up the parent PackageID for a given DependencyID using a precomputed
|
||||
/// reverse mapping, building/extending it lazily as needed.
|
||||
fn getParentPackageIdFromMap(this: *PackageManager, dep_id: DependencyID) PackageID {
|
||||
const total_deps = this.lockfile.buffers.dependencies.items.len;
|
||||
if (total_deps == 0) return invalid_package_id;
|
||||
|
||||
// Rebuild/extend the map when new dependencies have been added since last build.
|
||||
if (dep_id >= this.dep_parent_map.items.len) {
|
||||
const old_len = this.dep_parent_map.items.len;
|
||||
this.dep_parent_map.ensureTotalCapacityPrecise(this.allocator, total_deps) catch return invalid_package_id;
|
||||
this.dep_parent_map.appendNTimesAssumeCapacity(@as(PackageID, invalid_package_id), total_deps - old_len);
|
||||
|
||||
const dep_lists = this.lockfile.packages.items(.dependencies);
|
||||
for (dep_lists, 0..) |dep_slice, pkg_id| {
|
||||
const end = dep_slice.off +| dep_slice.len;
|
||||
// Only fill entries that are new (>= old_len) or were never built.
|
||||
if (end <= old_len) continue;
|
||||
const start = @max(dep_slice.off, @as(u32, @intCast(old_len)));
|
||||
var i: u32 = start;
|
||||
while (i < end) : (i += 1) {
|
||||
if (i < this.dep_parent_map.items.len) {
|
||||
this.dep_parent_map.items[i] = @intCast(pkg_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (dep_id >= this.dep_parent_map.items.len) return invalid_package_id;
|
||||
return this.dep_parent_map.items[dep_id];
|
||||
}
|
||||
|
||||
/// Check if a dependency's version range is compatible with a key_spec constraint.
|
||||
/// For example, if key_spec is "^4.0.0" and the dependency version is "4.18.2" or "^4.0.0",
|
||||
/// checks if they can intersect (i.e., some version could satisfy both).
|
||||
fn isKeySpecCompatible(key_spec: String, dependency: *const Dependency, buf: string) bool {
|
||||
if (key_spec.isEmpty()) return true;
|
||||
|
||||
// Only check npm dependencies with semver ranges
|
||||
if (dependency.version.tag != .npm) return true;
|
||||
|
||||
const key_spec_str = key_spec.slice(buf);
|
||||
if (key_spec_str.len == 0) return true;
|
||||
|
||||
// Parse key_spec as a semver query. The parsed query's internal strings
|
||||
// reference key_spec_str, so we must use key_spec_str as the list_buf
|
||||
// when calling satisfies on key_spec_group.
|
||||
const sliced = Semver.SlicedString.init(key_spec_str, key_spec_str);
|
||||
var key_spec_group = Semver.Query.parse(
|
||||
bun.default_allocator,
|
||||
key_spec_str,
|
||||
sliced,
|
||||
) catch return true; // on parse error, allow optimistically
|
||||
defer key_spec_group.deinit();
|
||||
|
||||
// Check if any boundary version of the dependency's range satisfies the key_spec.
|
||||
// Walk the dependency's query list checking left/right boundary versions.
|
||||
// Note: dep versions reference `buf` (lockfile strings), key_spec_group references `key_spec_str`.
|
||||
const dep_group = dependency.version.value.npm.version;
|
||||
var dep_list: ?*const Semver.Query.List = &dep_group.head;
|
||||
while (dep_list) |queries| {
|
||||
var curr: ?*const Semver.Query = &queries.head;
|
||||
while (curr) |query| {
|
||||
// key_spec_group's strings are in key_spec_str, version's strings are in buf
|
||||
if (query.range.hasLeft()) {
|
||||
if (key_spec_group.head.satisfies(query.range.left.version, key_spec_str, buf))
|
||||
return true;
|
||||
}
|
||||
if (query.range.hasRight()) {
|
||||
if (key_spec_group.head.satisfies(query.range.right.version, key_spec_str, buf))
|
||||
return true;
|
||||
}
|
||||
curr = query.next;
|
||||
}
|
||||
dep_list = queries.next;
|
||||
}
|
||||
|
||||
// Also check if any key_spec boundary satisfies the dependency range
|
||||
// dep_group's strings are in buf, key_spec version's strings are in key_spec_str
|
||||
var ks_list: ?*const Semver.Query.List = &key_spec_group.head;
|
||||
while (ks_list) |queries| {
|
||||
var curr: ?*const Semver.Query = &queries.head;
|
||||
while (curr) |query| {
|
||||
if (query.range.hasLeft()) {
|
||||
if (dep_group.head.satisfies(query.range.left.version, buf, key_spec_str))
|
||||
return true;
|
||||
}
|
||||
if (query.range.hasRight()) {
|
||||
if (dep_group.head.satisfies(query.range.right.version, buf, key_spec_str))
|
||||
return true;
|
||||
}
|
||||
curr = query.next;
|
||||
}
|
||||
ks_list = queries.next;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
fn updateNameAndNameHashFromVersionReplacement(
|
||||
lockfile: *const Lockfile,
|
||||
original_name: String,
|
||||
@@ -2053,7 +1897,6 @@ const TaskCallbackContext = bun.install.TaskCallbackContext;
|
||||
const invalid_package_id = bun.install.invalid_package_id;
|
||||
|
||||
const Lockfile = bun.install.Lockfile;
|
||||
const OverrideMap = Lockfile.OverrideMap;
|
||||
const Package = Lockfile.Package;
|
||||
|
||||
const NetworkTask = bun.install.NetworkTask;
|
||||
|
||||
@@ -152,14 +152,6 @@ pub fn assignResolution(this: *PackageManager, dependency_id: DependencyID, pack
|
||||
dep.name = this.lockfile.packages.items(.name)[package_id];
|
||||
dep.name_hash = this.lockfile.packages.items(.name_hash)[package_id];
|
||||
}
|
||||
|
||||
// Propagate override context (first-write-wins for shared packages)
|
||||
if (this.dep_pending_override.get(dependency_id)) |ctx_id| {
|
||||
const gop = this.pkg_override_ctx.getOrPut(this.allocator, package_id) catch return;
|
||||
if (!gop.found_existing) {
|
||||
gop.value_ptr.* = ctx_id;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn assignRootResolution(this: *PackageManager, dependency_id: DependencyID, package_id: PackageID) void {
|
||||
@@ -176,14 +168,6 @@ pub fn assignRootResolution(this: *PackageManager, dependency_id: DependencyID,
|
||||
dep.name = this.lockfile.packages.items(.name)[package_id];
|
||||
dep.name_hash = this.lockfile.packages.items(.name_hash)[package_id];
|
||||
}
|
||||
|
||||
// Propagate override context for root resolution
|
||||
if (this.dep_pending_override.get(dependency_id)) |ctx_id| {
|
||||
const gop = this.pkg_override_ctx.getOrPut(this.allocator, package_id) catch return;
|
||||
if (!gop.found_existing) {
|
||||
gop.value_ptr.* = ctx_id;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn verifyResolutions(this: *PackageManager, log_level: PackageManager.Options.LogLevel) void {
|
||||
@@ -233,136 +217,6 @@ pub fn verifyResolutions(this: *PackageManager, log_level: PackageManager.Option
|
||||
if (any_failed) this.crash();
|
||||
}
|
||||
|
||||
/// Pre-populate override contexts for all resolved packages.
|
||||
/// This is needed during re-resolution when overrides change,
|
||||
/// because existing packages were resolved without context tracking.
|
||||
/// Does a BFS from root, propagating override tree node IDs along the dependency graph.
|
||||
pub fn populateOverrideContexts(this: *PackageManager) void {
|
||||
if (!this.lockfile.overrides.hasTree()) return;
|
||||
|
||||
const OverrideMap = Lockfile.OverrideMap;
|
||||
const packages = this.lockfile.packages.slice();
|
||||
const dep_lists = packages.items(.dependencies);
|
||||
const res_lists = packages.items(.resolutions);
|
||||
const name_hashes = packages.items(.name_hash);
|
||||
const resolutions = packages.items(.resolution);
|
||||
const buf = this.lockfile.buffers.string_bytes.items;
|
||||
|
||||
// Use a simple worklist (BFS queue)
|
||||
const QueueItem = struct { pkg_id: PackageID, ctx: OverrideMap.NodeID };
|
||||
var queue = std.ArrayListUnmanaged(QueueItem){};
|
||||
defer queue.deinit(this.allocator);
|
||||
|
||||
// Start from root package
|
||||
this.pkg_override_ctx.put(this.allocator, 0, 0) catch return;
|
||||
queue.append(this.allocator, .{ .pkg_id = 0, .ctx = 0 }) catch return;
|
||||
|
||||
// BFS using index-based iteration to avoid O(N) shifts from orderedRemove(0)
|
||||
var queue_idx: usize = 0;
|
||||
while (queue_idx < queue.items.len) {
|
||||
const item = queue.items[queue_idx];
|
||||
queue_idx += 1;
|
||||
const deps = dep_lists[item.pkg_id].get(this.lockfile.buffers.dependencies.items);
|
||||
const ress = res_lists[item.pkg_id].get(this.lockfile.buffers.resolutions.items);
|
||||
|
||||
for (deps, ress) |dep, resolved_pkg_id| {
|
||||
if (resolved_pkg_id >= packages.len) continue;
|
||||
|
||||
// Determine child context: walk siblings with key_spec validation
|
||||
// (mirrors the enqueue path's sibling-walk logic)
|
||||
var child_ctx = item.ctx;
|
||||
const resolved_version = if (resolved_pkg_id < resolutions.len and resolutions[resolved_pkg_id].tag == .npm)
|
||||
resolutions[resolved_pkg_id].value.npm.version
|
||||
else
|
||||
null;
|
||||
|
||||
child_ctx = findValidChild(
|
||||
&this.lockfile.overrides,
|
||||
item.ctx,
|
||||
dep.name_hash,
|
||||
resolved_version,
|
||||
buf,
|
||||
) orelse blk: {
|
||||
// Also check root if current context is not root
|
||||
if (item.ctx != 0) {
|
||||
break :blk findValidChild(
|
||||
&this.lockfile.overrides,
|
||||
0,
|
||||
dep.name_hash,
|
||||
resolved_version,
|
||||
buf,
|
||||
) orelse item.ctx;
|
||||
}
|
||||
break :blk item.ctx;
|
||||
};
|
||||
|
||||
// Also check by resolved package's name_hash (in case dep name differs from pkg name)
|
||||
if (child_ctx == item.ctx and resolved_pkg_id < name_hashes.len) {
|
||||
const pkg_name_hash = name_hashes[resolved_pkg_id];
|
||||
if (pkg_name_hash != dep.name_hash) {
|
||||
child_ctx = findValidChild(
|
||||
&this.lockfile.overrides,
|
||||
item.ctx,
|
||||
pkg_name_hash,
|
||||
resolved_version,
|
||||
buf,
|
||||
) orelse child_ctx;
|
||||
}
|
||||
}
|
||||
|
||||
const gop = this.pkg_override_ctx.getOrPut(this.allocator, resolved_pkg_id) catch continue;
|
||||
if (!gop.found_existing) {
|
||||
gop.value_ptr.* = child_ctx;
|
||||
queue.append(this.allocator, .{ .pkg_id = resolved_pkg_id, .ctx = child_ctx }) catch continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Find a child matching name_hash under parent_ctx, walking siblings to skip
|
||||
/// nodes whose key_spec doesn't match the resolved version.
|
||||
fn findValidChild(
|
||||
overrides: *const Lockfile.OverrideMap,
|
||||
parent_ctx: Lockfile.OverrideMap.NodeID,
|
||||
name_hash: PackageNameHash,
|
||||
resolved_version: ?Semver.Version,
|
||||
buf: string,
|
||||
) ?Lockfile.OverrideMap.NodeID {
|
||||
var candidate = overrides.findChild(parent_ctx, name_hash);
|
||||
while (candidate) |child_id| {
|
||||
const child = overrides.nodes.items[child_id];
|
||||
if (!child.key_spec.isEmpty()) {
|
||||
if (!isKeySpecSatisfiedByVersion(child.key_spec, resolved_version, buf)) {
|
||||
candidate = overrides.findChildAfter(parent_ctx, name_hash, child_id);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
return child_id;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/// Check if a resolved Semver.Version satisfies a key_spec constraint.
|
||||
/// Used during BFS context propagation where we have actual resolved versions.
|
||||
fn isKeySpecSatisfiedByVersion(key_spec: String, resolved_version: ?Semver.Version, buf: string) bool {
|
||||
if (key_spec.isEmpty()) return true;
|
||||
const version = resolved_version orelse return true; // non-npm: match optimistically
|
||||
|
||||
const key_spec_str = key_spec.slice(buf);
|
||||
if (key_spec_str.len == 0) return true;
|
||||
|
||||
const sliced = Semver.SlicedString.init(key_spec_str, key_spec_str);
|
||||
var key_spec_group = Semver.Query.parse(
|
||||
bun.default_allocator,
|
||||
key_spec_str,
|
||||
sliced,
|
||||
) catch return true; // on parse error, allow optimistically
|
||||
defer key_spec_group.deinit();
|
||||
|
||||
// key_spec_group's strings are in key_spec_str, version's strings are in buf
|
||||
return key_spec_group.head.satisfies(version, key_spec_str, buf);
|
||||
}
|
||||
|
||||
const string = []const u8;
|
||||
|
||||
const std = @import("std");
|
||||
|
||||
@@ -237,29 +237,12 @@ pub fn installWithManager(
|
||||
|
||||
const all_name_hashes: []PackageNameHash = brk: {
|
||||
if (!manager.summary.overrides_changed) break :brk &.{};
|
||||
|
||||
// Collect hashes from flat maps
|
||||
const flat_hashes_len = manager.lockfile.overrides.map.entries.len + lockfile.overrides.map.entries.len;
|
||||
|
||||
// Collect hashes from tree leaf nodes
|
||||
const old_tree_hashes = try manager.lockfile.overrides.collectTreeLeafHashes(bun.default_allocator);
|
||||
defer if (old_tree_hashes.len > 0) bun.default_allocator.free(old_tree_hashes);
|
||||
const new_tree_hashes = try lockfile.overrides.collectTreeLeafHashes(bun.default_allocator);
|
||||
defer if (new_tree_hashes.len > 0) bun.default_allocator.free(new_tree_hashes);
|
||||
|
||||
const total_len = flat_hashes_len + old_tree_hashes.len + new_tree_hashes.len;
|
||||
if (total_len == 0) break :brk &.{};
|
||||
|
||||
var all_name_hashes = try bun.default_allocator.alloc(PackageNameHash, total_len);
|
||||
const hashes_len = manager.lockfile.overrides.map.entries.len + lockfile.overrides.map.entries.len;
|
||||
if (hashes_len == 0) break :brk &.{};
|
||||
var all_name_hashes = try bun.default_allocator.alloc(PackageNameHash, hashes_len);
|
||||
@memcpy(all_name_hashes[0..manager.lockfile.overrides.map.entries.len], manager.lockfile.overrides.map.keys());
|
||||
@memcpy(all_name_hashes[manager.lockfile.overrides.map.entries.len .. manager.lockfile.overrides.map.entries.len + lockfile.overrides.map.entries.len], lockfile.overrides.map.keys());
|
||||
var dest = manager.lockfile.overrides.map.entries.len + lockfile.overrides.map.entries.len;
|
||||
@memcpy(all_name_hashes[dest .. dest + old_tree_hashes.len], old_tree_hashes);
|
||||
dest += old_tree_hashes.len;
|
||||
@memcpy(all_name_hashes[dest .. dest + new_tree_hashes.len], new_tree_hashes);
|
||||
|
||||
// Deduplicate
|
||||
var i: usize = manager.lockfile.overrides.map.entries.len;
|
||||
@memcpy(all_name_hashes[manager.lockfile.overrides.map.entries.len..], lockfile.overrides.map.keys());
|
||||
var i = manager.lockfile.overrides.map.entries.len;
|
||||
while (i < all_name_hashes.len) {
|
||||
if (std.mem.indexOfScalar(PackageNameHash, all_name_hashes[0..i], all_name_hashes[i]) != null) {
|
||||
all_name_hashes[i] = all_name_hashes[all_name_hashes.len - 1];
|
||||
@@ -378,10 +361,6 @@ pub fn installWithManager(
|
||||
builder.clamp();
|
||||
|
||||
if (manager.summary.overrides_changed and all_name_hashes.len > 0) {
|
||||
// Pre-populate override contexts for existing resolved packages
|
||||
// so that re-enqueued deps can find their override tree context.
|
||||
manager.populateOverrideContexts();
|
||||
|
||||
for (manager.lockfile.buffers.dependencies.items, 0..) |*dependency, dependency_i| {
|
||||
if (std.mem.indexOfScalar(PackageNameHash, all_name_hashes, dependency.name_hash)) |_| {
|
||||
manager.lockfile.buffers.resolutions.items[dependency_i] = invalid_package_id;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -596,22 +596,6 @@ pub fn Package(comptime SemverIntType: type) type {
|
||||
}
|
||||
}
|
||||
|
||||
// Also compare override trees
|
||||
if (!summary.overrides_changed) {
|
||||
from_lockfile.overrides.sort(from_lockfile);
|
||||
to_lockfile.overrides.sort(to_lockfile);
|
||||
if (!from_lockfile.overrides.treeEquals(
|
||||
&to_lockfile.overrides,
|
||||
from_lockfile.buffers.string_bytes.items,
|
||||
to_lockfile.buffers.string_bytes.items,
|
||||
)) {
|
||||
summary.overrides_changed = true;
|
||||
if (PackageManager.verbose_install) {
|
||||
Output.prettyErrorln("Override tree changed since last install", .{});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (is_root) catalogs: {
|
||||
|
||||
// don't sort if lengths are different
|
||||
|
||||
@@ -300,7 +300,7 @@ pub const Stringifier = struct {
|
||||
);
|
||||
}
|
||||
|
||||
if (lockfile.overrides.map.count() > 0 or lockfile.overrides.hasTree()) {
|
||||
if (lockfile.overrides.map.count() > 0) {
|
||||
lockfile.overrides.sort(lockfile);
|
||||
|
||||
try writeIndent(writer, indent);
|
||||
@@ -309,33 +309,12 @@ pub const Stringifier = struct {
|
||||
\\
|
||||
);
|
||||
indent.* += 1;
|
||||
|
||||
if (lockfile.overrides.hasTree()) {
|
||||
// Write tree nodes recursively, starting from root's children
|
||||
try writeOverrideTree(writer, &lockfile.overrides, buf, indent);
|
||||
} else {
|
||||
// Write flat overrides
|
||||
for (lockfile.overrides.map.values()) |override_dep| {
|
||||
try writeIndent(writer, indent);
|
||||
try writer.print(
|
||||
\\{f}: {f},
|
||||
\\
|
||||
, .{ override_dep.name.fmtJson(buf, .{}), override_dep.version.literal.fmtJson(buf, .{}) });
|
||||
}
|
||||
}
|
||||
|
||||
// Also write flat-only overrides that are not in the tree
|
||||
if (lockfile.overrides.hasTree()) {
|
||||
for (lockfile.overrides.map.values()) |override_dep| {
|
||||
const name_hash = override_dep.name_hash;
|
||||
// Skip if this override is already represented in the tree
|
||||
if (lockfile.overrides.findChild(0, name_hash) != null) continue;
|
||||
try writeIndent(writer, indent);
|
||||
try writer.print(
|
||||
\\{f}: {f},
|
||||
\\
|
||||
, .{ override_dep.name.fmtJson(buf, .{}), override_dep.version.literal.fmtJson(buf, .{}) });
|
||||
}
|
||||
for (lockfile.overrides.map.values()) |override_dep| {
|
||||
try writeIndent(writer, indent);
|
||||
try writer.print(
|
||||
\\{f}: {f},
|
||||
\\
|
||||
, .{ override_dep.name.fmtJson(buf, .{}), override_dep.version.literal.fmtJson(buf, .{}) });
|
||||
}
|
||||
|
||||
try decIndent(writer, indent);
|
||||
@@ -982,63 +961,6 @@ pub const Stringifier = struct {
|
||||
try writer.writeAll("},");
|
||||
}
|
||||
|
||||
fn writeOverrideTree(writer: *std.Io.Writer, overrides: *const OverrideMap, buf: string, indent: *u32) std.Io.Writer.Error!void {
|
||||
if (overrides.nodes.items.len == 0) return;
|
||||
try writeOverrideNodeChildren(writer, overrides, 0, buf, indent);
|
||||
}
|
||||
|
||||
fn writeOverrideNodeChildren(writer: *std.Io.Writer, overrides: *const OverrideMap, node_id: OverrideMap.NodeID, buf: string, indent: *u32) std.Io.Writer.Error!void {
|
||||
if (node_id >= overrides.nodes.items.len) return;
|
||||
var child_id = overrides.nodes.items[node_id].first_child;
|
||||
while (child_id != OverrideMap.invalid_node_id) {
|
||||
if (child_id >= overrides.nodes.items.len) break;
|
||||
const child = overrides.nodes.items[child_id];
|
||||
|
||||
try writeIndent(writer, indent);
|
||||
|
||||
if (child.first_child != OverrideMap.invalid_node_id) {
|
||||
// Has children: write as object with key = name or name@key_spec
|
||||
try writeOverrideNodeKey(writer, child, buf);
|
||||
try writer.writeAll(": {\n");
|
||||
indent.* += 1;
|
||||
if (child.value) |val| {
|
||||
try writeIndent(writer, indent);
|
||||
try writer.print(
|
||||
\\".": {f},
|
||||
\\
|
||||
, .{val.version.literal.fmtJson(buf, .{})});
|
||||
}
|
||||
try writeOverrideNodeChildren(writer, overrides, child_id, buf, indent);
|
||||
try decIndent(writer, indent);
|
||||
try writer.writeAll("},\n");
|
||||
} else if (child.value) |val| {
|
||||
// Leaf with value: write key = name or name@key_spec
|
||||
try writeOverrideNodeKey(writer, child, buf);
|
||||
try writer.print(
|
||||
\\: {f},
|
||||
\\
|
||||
, .{val.version.literal.fmtJson(buf, .{})});
|
||||
}
|
||||
|
||||
child_id = child.next_sibling;
|
||||
}
|
||||
}
|
||||
|
||||
/// Write the JSON key for an override node: "name" or "name@key_spec"
|
||||
fn writeOverrideNodeKey(writer: *std.Io.Writer, node: OverrideMap.OverrideNode, buf: string) std.Io.Writer.Error!void {
|
||||
const key_spec_str = node.key_spec.slice(buf);
|
||||
if (key_spec_str.len > 0) {
|
||||
// Write "name@key_spec" as a single JSON string with proper escaping
|
||||
try writer.writeAll("\"");
|
||||
try writer.print("{f}", .{node.name.fmtJson(buf, .{ .quote = false })});
|
||||
try writer.writeAll("@");
|
||||
try writer.print("{f}", .{node.key_spec.fmtJson(buf, .{ .quote = false })});
|
||||
try writer.writeAll("\"");
|
||||
} else {
|
||||
try writer.print("{f}", .{node.name.fmtJson(buf, .{})});
|
||||
}
|
||||
}
|
||||
|
||||
fn writeIndent(writer: *std.Io.Writer, indent: *const u32) std.Io.Writer.Error!void {
|
||||
for (0..indent.*) |_| {
|
||||
try writer.writeAll(" " ** indent_scalar);
|
||||
@@ -1301,7 +1223,49 @@ pub fn parseIntoBinaryLockfile(
|
||||
return error.InvalidOverridesObject;
|
||||
}
|
||||
|
||||
try parseOverridesFromLockfileObj(lockfile, overrides_expr, allocator, &string_buf, log, source, manager, 0);
|
||||
for (overrides_expr.data.e_object.properties.slice()) |prop| {
|
||||
const key = prop.key.?;
|
||||
const value = prop.value.?;
|
||||
|
||||
if (!key.isString() or key.data.e_string.len() == 0) {
|
||||
try log.addError(source, key.loc, "Expected a non-empty string");
|
||||
return error.InvalidOverridesObject;
|
||||
}
|
||||
|
||||
const name_str = key.asString(allocator).?;
|
||||
const name_hash = String.Builder.stringHash(name_str);
|
||||
const name = try string_buf.appendWithHash(name_str, name_hash);
|
||||
|
||||
// TODO(dylan-conway) also accept object when supported
|
||||
if (!value.isString()) {
|
||||
try log.addError(source, value.loc, "Expected a string");
|
||||
return error.InvalidOverridesObject;
|
||||
}
|
||||
|
||||
const version_str = value.asString(allocator).?;
|
||||
const version_hash = String.Builder.stringHash(version_str);
|
||||
const version = try string_buf.appendWithHash(version_str, version_hash);
|
||||
const version_sliced = version.sliced(string_buf.bytes.items);
|
||||
|
||||
const dep: Dependency = .{
|
||||
.name = name,
|
||||
.name_hash = name_hash,
|
||||
.version = Dependency.parse(
|
||||
allocator,
|
||||
name,
|
||||
name_hash,
|
||||
version_sliced.slice,
|
||||
&version_sliced,
|
||||
log,
|
||||
manager,
|
||||
) orelse {
|
||||
try log.addError(source, value.loc, "Invalid override version");
|
||||
return error.InvalidOverridesObject;
|
||||
},
|
||||
};
|
||||
|
||||
try lockfile.overrides.map.put(allocator, name_hash, dep);
|
||||
}
|
||||
}
|
||||
|
||||
if (root.get("catalog")) |catalog_expr| {
|
||||
@@ -2074,139 +2038,6 @@ pub fn parseIntoBinaryLockfile(
|
||||
}
|
||||
}
|
||||
|
||||
fn parseOverridesFromLockfileObj(
|
||||
lockfile: *BinaryLockfile,
|
||||
expr: Expr,
|
||||
allocator: std.mem.Allocator,
|
||||
string_buf: *String.Buf,
|
||||
log: *logger.Log,
|
||||
source: *const logger.Source,
|
||||
manager: ?*PackageManager,
|
||||
parent_node_id: OverrideMap.NodeID,
|
||||
) !void {
|
||||
if (!expr.isObject()) return;
|
||||
|
||||
for (expr.data.e_object.properties.slice()) |prop| {
|
||||
const key = prop.key.?;
|
||||
const value = prop.value.?;
|
||||
|
||||
if (!key.isString() or key.data.e_string.len() == 0) {
|
||||
try log.addError(source, key.loc, "Expected a non-empty string");
|
||||
return error.InvalidOverridesObject;
|
||||
}
|
||||
|
||||
const raw_key_str = key.asString(allocator).?;
|
||||
// Skip "." key (handled by parent)
|
||||
if (strings.eql(raw_key_str, ".")) continue;
|
||||
|
||||
// Parse key: "name" or "name@key_spec"
|
||||
const parsed_key = OverrideMap.parseKeyWithVersion(raw_key_str);
|
||||
const name_str = parsed_key.name;
|
||||
const key_spec_str = parsed_key.spec;
|
||||
|
||||
const name_hash = String.Builder.stringHash(name_str);
|
||||
const name = try string_buf.appendWithHash(name_str, name_hash);
|
||||
const key_spec_s = if (key_spec_str.len > 0) try string_buf.append(key_spec_str) else String{};
|
||||
|
||||
if (value.isString()) {
|
||||
const version_str = value.asString(allocator).?;
|
||||
const version_hash = String.Builder.stringHash(version_str);
|
||||
const version_s = try string_buf.appendWithHash(version_str, version_hash);
|
||||
const version_sliced = version_s.sliced(string_buf.bytes.items);
|
||||
|
||||
const dep: Dependency = .{
|
||||
.name = name,
|
||||
.name_hash = name_hash,
|
||||
.version = Dependency.parse(
|
||||
allocator,
|
||||
name,
|
||||
name_hash,
|
||||
version_sliced.slice,
|
||||
&version_sliced,
|
||||
log,
|
||||
manager,
|
||||
) orelse {
|
||||
try log.addError(source, value.loc, "Invalid override version");
|
||||
return error.InvalidOverridesObject;
|
||||
},
|
||||
};
|
||||
|
||||
if (parent_node_id == 0 and lockfile.overrides.nodes.items.len == 0) {
|
||||
try lockfile.overrides.map.put(allocator, name_hash, dep);
|
||||
} else {
|
||||
try lockfile.overrides.ensureRootNode(allocator);
|
||||
_ = try lockfile.overrides.getOrAddChild(allocator, parent_node_id, .{
|
||||
.name = name,
|
||||
.name_hash = name_hash,
|
||||
.key_spec = key_spec_s,
|
||||
.value = dep,
|
||||
.first_child = OverrideMap.invalid_node_id,
|
||||
.next_sibling = OverrideMap.invalid_node_id,
|
||||
.parent = OverrideMap.invalid_node_id,
|
||||
}, string_buf.bytes.items);
|
||||
}
|
||||
} else if (value.isObject()) {
|
||||
var self_dep: ?Dependency = null;
|
||||
|
||||
if (value.asProperty(".")) |dot_prop| {
|
||||
if (dot_prop.expr.isString()) {
|
||||
const dot_str = dot_prop.expr.asString(allocator).?;
|
||||
const dot_hash = String.Builder.stringHash(dot_str);
|
||||
const dot_s = try string_buf.appendWithHash(dot_str, dot_hash);
|
||||
const dot_sliced = dot_s.sliced(string_buf.bytes.items);
|
||||
self_dep = .{
|
||||
.name = name,
|
||||
.name_hash = name_hash,
|
||||
.version = Dependency.parse(
|
||||
allocator,
|
||||
name,
|
||||
name_hash,
|
||||
dot_sliced.slice,
|
||||
&dot_sliced,
|
||||
log,
|
||||
manager,
|
||||
) orelse {
|
||||
try log.addError(source, dot_prop.expr.loc, "Invalid override version");
|
||||
return error.InvalidOverridesObject;
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
var has_children = false;
|
||||
for (value.data.e_object.properties.slice()) |child_prop| {
|
||||
const ck = child_prop.key.?.asString(allocator).?;
|
||||
if (!strings.eql(ck, ".")) {
|
||||
has_children = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!has_children and self_dep != null and parent_node_id == 0 and lockfile.overrides.nodes.items.len == 0) {
|
||||
try lockfile.overrides.map.put(allocator, name_hash, self_dep.?);
|
||||
} else {
|
||||
try lockfile.overrides.ensureRootNode(allocator);
|
||||
if (self_dep != null and parent_node_id == 0) {
|
||||
try lockfile.overrides.map.put(allocator, name_hash, self_dep.?);
|
||||
}
|
||||
const node_id = try lockfile.overrides.getOrAddChild(allocator, parent_node_id, .{
|
||||
.name = name,
|
||||
.name_hash = name_hash,
|
||||
.key_spec = key_spec_s,
|
||||
.value = self_dep,
|
||||
.first_child = OverrideMap.invalid_node_id,
|
||||
.next_sibling = OverrideMap.invalid_node_id,
|
||||
.parent = OverrideMap.invalid_node_id,
|
||||
}, string_buf.bytes.items);
|
||||
try parseOverridesFromLockfileObj(lockfile, value, allocator, string_buf, log, source, manager, node_id);
|
||||
}
|
||||
} else {
|
||||
try log.addError(source, value.loc, "Expected a string or object");
|
||||
return error.InvalidOverridesObject;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn mapDepToPkg(dep: *Dependency, dep_id: DependencyID, pkg_id: PackageID, lockfile: *BinaryLockfile, pkg_resolutions: []const Resolution) void {
|
||||
lockfile.buffers.resolutions.items[dep_id] = pkg_id;
|
||||
|
||||
@@ -2423,7 +2254,6 @@ const invalid_package_id = Install.invalid_package_id;
|
||||
const BinaryLockfile = bun.install.Lockfile;
|
||||
const DependencySlice = BinaryLockfile.DependencySlice;
|
||||
const LoadResult = BinaryLockfile.LoadResult;
|
||||
const OverrideMap = BinaryLockfile.OverrideMap;
|
||||
const Meta = BinaryLockfile.Package.Meta;
|
||||
|
||||
const Npm = Install.Npm;
|
||||
|
||||
@@ -8,7 +8,6 @@ const has_workspace_package_ids_tag: u64 = @bitCast(@as([8]u8, "wOrKsPaC".*));
|
||||
const has_trusted_dependencies_tag: u64 = @bitCast(@as([8]u8, "tRuStEDd".*));
|
||||
const has_empty_trusted_dependencies_tag: u64 = @bitCast(@as([8]u8, "eMpTrUsT".*));
|
||||
const has_overrides_tag: u64 = @bitCast(@as([8]u8, "oVeRriDs".*));
|
||||
const has_nested_overrides_tag: u64 = @bitCast(@as([8]u8, "nStOvRd\x00".*));
|
||||
const has_catalogs_tag: u64 = @bitCast(@as([8]u8, "cAtAlOgS".*));
|
||||
const has_config_version_tag: u64 = @bitCast(@as([8]u8, "cNfGvRsN".*));
|
||||
|
||||
@@ -157,29 +156,6 @@ pub fn save(this: *Lockfile, options: *const PackageManager.Options, bytes: *std
|
||||
);
|
||||
}
|
||||
|
||||
// Write nested override tree (if any)
|
||||
if (this.overrides.hasTree()) {
|
||||
try writer.writeAll(std.mem.asBytes(&has_nested_overrides_tag));
|
||||
|
||||
const node_count: u32 = @intCast(this.overrides.nodes.items.len);
|
||||
try writer.writeAll(std.mem.asBytes(&node_count));
|
||||
|
||||
var external_nodes = try std.ArrayListUnmanaged(OverrideMap.OverrideNode.External).initCapacity(z_allocator, node_count);
|
||||
defer external_nodes.deinit(z_allocator);
|
||||
external_nodes.items.len = node_count;
|
||||
for (external_nodes.items, this.overrides.nodes.items) |*dest, src| {
|
||||
dest.* = src.toExternal();
|
||||
}
|
||||
try Lockfile.Buffers.writeArray(
|
||||
StreamType,
|
||||
stream,
|
||||
@TypeOf(writer),
|
||||
writer,
|
||||
[]OverrideMap.OverrideNode.External,
|
||||
external_nodes.items,
|
||||
);
|
||||
}
|
||||
|
||||
if (this.patched_dependencies.entries.len > 0) {
|
||||
for (this.patched_dependencies.values()) |patched_dep| bun.assert(!patched_dep.patchfile_hash_is_null);
|
||||
|
||||
@@ -499,42 +475,6 @@ pub fn load(
|
||||
}
|
||||
}
|
||||
|
||||
// Read nested override tree
|
||||
{
|
||||
const remaining_in_buffer = total_buffer_size -| stream.pos;
|
||||
|
||||
if (remaining_in_buffer > 8 and total_buffer_size <= stream.buffer.len) {
|
||||
const next_num = try reader.readInt(u64, .little);
|
||||
if (next_num == has_nested_overrides_tag) {
|
||||
const node_count = try reader.readInt(u32, .little);
|
||||
if (node_count > 0) {
|
||||
const external_nodes = try Lockfile.Buffers.readArray(
|
||||
stream,
|
||||
allocator,
|
||||
std.ArrayListUnmanaged(OverrideMap.OverrideNode.External),
|
||||
);
|
||||
if (external_nodes.items.len != node_count) {
|
||||
return error.MalformedLockfile;
|
||||
}
|
||||
const context: Dependency.Context = .{
|
||||
.allocator = allocator,
|
||||
.log = log,
|
||||
.buffer = lockfile.buffers.string_bytes.items,
|
||||
.package_manager = manager,
|
||||
};
|
||||
var nodes = &lockfile.overrides.nodes;
|
||||
try nodes.ensureTotalCapacity(allocator, external_nodes.items.len);
|
||||
for (external_nodes.items) |ext_node| {
|
||||
nodes.appendAssumeCapacity(OverrideMap.OverrideNode.fromExternal(ext_node, context));
|
||||
}
|
||||
lockfile.overrides.rebuildParentPointers();
|
||||
}
|
||||
} else {
|
||||
stream.pos -= 8;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
const remaining_in_buffer = total_buffer_size -| stream.pos;
|
||||
|
||||
@@ -694,7 +634,6 @@ const PatchedDep = install.PatchedDep;
|
||||
const alignment_bytes_to_repeat_buffer = install.alignment_bytes_to_repeat_buffer;
|
||||
|
||||
const Lockfile = install.Lockfile;
|
||||
const OverrideMap = Lockfile.OverrideMap;
|
||||
const PackageIndex = Lockfile.PackageIndex;
|
||||
const Stream = Lockfile.Stream;
|
||||
const StringPool = Lockfile.StringPool;
|
||||
|
||||
@@ -263,8 +263,8 @@ exports[`pnpm comprehensive migration tests pnpm with patches and overrides: pat
|
||||
"express@4.18.2": "patches/express@4.18.2.patch",
|
||||
},
|
||||
"overrides": {
|
||||
"negotiator@>0.6.0": "0.6.2",
|
||||
"mime-types": "2.1.33",
|
||||
"negotiator@>0.6.0": "0.6.2",
|
||||
},
|
||||
"packages": {
|
||||
"accepts": ["accepts@1.3.8", "", { "dependencies": { "mime-types": "2.1.33", "negotiator": "0.6.2" } }, "sha512-acceptsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=="],
|
||||
|
||||
@@ -206,9 +206,6 @@ exports[`yarn.lock migration basic yarn.lock with resolutions: resolutions-yarn-
|
||||
},
|
||||
},
|
||||
"overrides": {
|
||||
"webpack": {
|
||||
"acorn": "8.11.2",
|
||||
},
|
||||
"acorn": "8.11.3",
|
||||
},
|
||||
"packages": {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -44,4 +44,4 @@
|
||||
"undefined != ": 0,
|
||||
"undefined == ": 0,
|
||||
"usingnamespace": 0
|
||||
}
|
||||
}
|
||||
92
test/regression/issue/26575.test.ts
Normal file
92
test/regression/issue/26575.test.ts
Normal file
@@ -0,0 +1,92 @@
|
||||
import { describe, expect } from "bun:test";
|
||||
import { readdirSync } from "fs";
|
||||
import { itBundled } from "../../bundler/expectBundled";
|
||||
|
||||
// https://github.com/oven-sh/bun/issues/26575
|
||||
// When an image is referenced from both HTML (via <img src>) and CSS (via url()),
|
||||
// and the image is small enough to be inlined in CSS, the image file should still
|
||||
// be emitted to the output directory for the HTML reference.
|
||||
describe("issue #26575", () => {
|
||||
itBundled("html/image-referenced-by-html-and-css-inlined", {
|
||||
outdir: "out/",
|
||||
files: {
|
||||
"/index.html": `<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<link rel="stylesheet" href="./styles.css">
|
||||
</head>
|
||||
<body>
|
||||
<img src="./img.webp">
|
||||
</body>
|
||||
</html>`,
|
||||
"/styles.css": `body {
|
||||
background-image: url("./img.webp");
|
||||
}`,
|
||||
// Small image that will be inlined in CSS (under the inlining threshold)
|
||||
// This is a minimal valid WebP file (34 bytes)
|
||||
"/img.webp": Buffer.from([
|
||||
0x52, 0x49, 0x46, 0x46, 0x1a, 0x00, 0x00, 0x00, 0x57, 0x45, 0x42, 0x50, 0x56, 0x50, 0x38, 0x4c, 0x0d, 0x00,
|
||||
0x00, 0x00, 0x2f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
]),
|
||||
},
|
||||
entryPoints: ["/index.html"],
|
||||
onAfterBundle(api) {
|
||||
// The image should be inlined in the CSS (as a data URL)
|
||||
const htmlContent = api.readFile("out/index.html");
|
||||
const cssMatch = htmlContent.match(/href="(.*\.css)"/);
|
||||
expect(cssMatch).not.toBeNull();
|
||||
const cssContent = api.readFile("out/" + cssMatch![1]);
|
||||
expect(cssContent).toContain("data:image/webp;base64,");
|
||||
|
||||
// The HTML should reference the hashed image file (not inline it)
|
||||
expect(htmlContent).not.toContain("data:image/webp");
|
||||
const imgSrcMatch = htmlContent.match(/src="(\.\/[^"]+\.webp)"/);
|
||||
expect(imgSrcMatch).not.toBeNull();
|
||||
|
||||
// Verify the referenced image file actually exists in the output directory
|
||||
const imgFilename = imgSrcMatch![1].replace("./", "");
|
||||
const outputFiles = readdirSync(api.outdir);
|
||||
expect(outputFiles).toContain(imgFilename);
|
||||
},
|
||||
});
|
||||
|
||||
// Also test with a larger image that won't be inlined
|
||||
itBundled("html/image-referenced-by-html-and-css-not-inlined", {
|
||||
outdir: "out/",
|
||||
files: {
|
||||
"/index.html": `<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<link rel="stylesheet" href="./styles.css">
|
||||
</head>
|
||||
<body>
|
||||
<img src="./img.png">
|
||||
</body>
|
||||
</html>`,
|
||||
"/styles.css": `body {
|
||||
background-image: url("./img.png");
|
||||
}`,
|
||||
// Large image content that won't be inlined (over 128KB threshold)
|
||||
"/img.png": Buffer.alloc(150000, "x"),
|
||||
},
|
||||
entryPoints: ["/index.html"],
|
||||
onAfterBundle(api) {
|
||||
// The image should NOT be inlined in the CSS
|
||||
const htmlContent = api.readFile("out/index.html");
|
||||
const cssMatch = htmlContent.match(/href="(.*\.css)"/);
|
||||
expect(cssMatch).not.toBeNull();
|
||||
const cssContent = api.readFile("out/" + cssMatch![1]);
|
||||
expect(cssContent).not.toContain("data:image/png;base64,");
|
||||
expect(cssContent).toMatch(/url\(".*\.png"\)/);
|
||||
|
||||
// The HTML should reference the hashed image file
|
||||
const imgSrcMatch = htmlContent.match(/src="(\.\/[^"]+\.png)"/);
|
||||
expect(imgSrcMatch).not.toBeNull();
|
||||
|
||||
// Verify the referenced image file actually exists in the output directory
|
||||
const imgFilename = imgSrcMatch![1].replace("./", "");
|
||||
const outputFiles = readdirSync(api.outdir);
|
||||
expect(outputFiles).toContain(imgFilename);
|
||||
},
|
||||
});
|
||||
});
|
||||
Reference in New Issue
Block a user