mirror of
https://github.com/oven-sh/bun
synced 2026-02-09 10:28:47 +00:00
feat(install): add nested/scoped dependency overrides
Support npm nested overrides, yarn resolution paths (`parent/child`),
and pnpm `>` syntax (`parent>child`) to scope overrides to specific
dependency subtrees. This extends OverrideMap with a tree structure
that tracks override context through the dependency graph during
resolution, enabling overrides like `{ express: { bytes: "1.0.0" } }`
to only affect `bytes` when it appears under `express`. Includes
serialization for both bun.lock and bun.lockb formats, version-
constrained parent keys, and multi-level nesting.
Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -103,6 +103,18 @@ peer_dependencies: bun.LinearFifo(DependencyID, .Dynamic) = .init(default_alloca
|
||||
// name hash from alias package name -> aliased package dependency version info
|
||||
known_npm_aliases: NpmAliasMap = .{},
|
||||
|
||||
/// Maps PackageID → OverrideMap.NodeID
|
||||
/// Tracks which override tree node is the context for each resolved package's children.
|
||||
pkg_override_ctx: std.AutoHashMapUnmanaged(PackageID, OverrideMap.NodeID) = .{},
|
||||
|
||||
/// Maps DependencyID → OverrideMap.NodeID
|
||||
/// Temporary: holds the override context for a dependency between enqueue and resolution.
|
||||
dep_pending_override: std.AutoHashMapUnmanaged(DependencyID, OverrideMap.NodeID) = .{},
|
||||
|
||||
/// Precomputed reverse mapping: DependencyID → owning PackageID.
|
||||
/// Built lazily to avoid O(N) scans per dependency in the enqueue path.
|
||||
dep_parent_map: std.ArrayListUnmanaged(PackageID) = .{},
|
||||
|
||||
event_loop: jsc.AnyEventLoop,
|
||||
|
||||
// During `installPackages` we learn exactly what dependencies from --trust
|
||||
@@ -1217,6 +1229,7 @@ pub const assignResolution = resolution.assignResolution;
|
||||
pub const assignRootResolution = resolution.assignRootResolution;
|
||||
pub const formatLaterVersionInCache = resolution.formatLaterVersionInCache;
|
||||
pub const getInstalledVersionsFromDiskCache = resolution.getInstalledVersionsFromDiskCache;
|
||||
pub const populateOverrideContexts = resolution.populateOverrideContexts;
|
||||
pub const resolveFromDiskCache = resolution.resolveFromDiskCache;
|
||||
pub const scopeForPackageName = resolution.scopeForPackageName;
|
||||
pub const verifyResolutions = resolution.verifyResolutions;
|
||||
@@ -1322,4 +1335,5 @@ const TaskCallbackContext = bun.install.TaskCallbackContext;
|
||||
const initializeStore = bun.install.initializeStore;
|
||||
|
||||
const Lockfile = bun.install.Lockfile;
|
||||
const OverrideMap = Lockfile.OverrideMap;
|
||||
const Package = Lockfile.Package;
|
||||
|
||||
@@ -478,6 +478,64 @@ pub fn enqueueDependencyWithMainAndSuccessFn(
|
||||
// allow overriding all dependencies unless the dependency is coming directly from an alias, "npm:<this dep>" or
|
||||
// if it's a workspaceOnly dependency
|
||||
if (!dependency.behavior.isWorkspace() and (dependency.version.tag != .npm or !dependency.version.value.npm.is_alias)) {
|
||||
// Phase 1: Tree-based nested override check
|
||||
if (this.lockfile.overrides.hasTree()) tree_check: {
|
||||
const parent_pkg_id = getParentPackageIdFromMap(this, id);
|
||||
const parent_ctx = if (parent_pkg_id != invalid_package_id)
|
||||
this.pkg_override_ctx.get(parent_pkg_id) orelse 0
|
||||
else
|
||||
0;
|
||||
|
||||
// Walk up from context through ancestors, checking each level for matching children.
|
||||
// If a child matches name_hash but fails key_spec, try the next sibling with the same name.
|
||||
var ctx = parent_ctx;
|
||||
while (true) {
|
||||
var candidate = this.lockfile.overrides.findChild(ctx, name_hash);
|
||||
while (candidate) |child_id| {
|
||||
const child = this.lockfile.overrides.nodes.items[child_id];
|
||||
|
||||
// Check version constraint on the matched node (e.g., "express@^4.0.0")
|
||||
if (!child.key_spec.isEmpty()) {
|
||||
if (!isKeySpecCompatible(child.key_spec, dependency, this.lockfile.buffers.string_bytes.items)) {
|
||||
// Try next sibling with the same name_hash
|
||||
candidate = this.lockfile.overrides.findChildAfter(ctx, name_hash, child_id);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Store context for propagation when this dep resolves
|
||||
this.dep_pending_override.put(this.allocator, id, child_id) catch {};
|
||||
|
||||
if (child.value) |val| {
|
||||
// Apply the override
|
||||
debug("nested override: {s} -> {s}", .{ this.lockfile.str(&dependency.version.literal), this.lockfile.str(&val.version.literal) });
|
||||
name, name_hash = updateNameAndNameHashFromVersionReplacement(this.lockfile, name, name_hash, val.version);
|
||||
|
||||
if (val.version.tag == .catalog) {
|
||||
if (this.lockfile.catalogs.get(this.lockfile, val.version.value.catalog, name)) |catalog_dep| {
|
||||
name, name_hash = updateNameAndNameHashFromVersionReplacement(this.lockfile, name, name_hash, catalog_dep.version);
|
||||
break :version catalog_dep.version;
|
||||
}
|
||||
}
|
||||
|
||||
break :version val.version;
|
||||
}
|
||||
|
||||
break :tree_check;
|
||||
}
|
||||
|
||||
// Move up to parent context
|
||||
if (ctx == 0) break;
|
||||
const parent = this.lockfile.overrides.nodes.items[ctx].parent;
|
||||
if (parent == OverrideMap.invalid_node_id) break;
|
||||
ctx = parent;
|
||||
}
|
||||
|
||||
// Inherit parent's context even if no override value applied
|
||||
this.dep_pending_override.put(this.allocator, id, parent_ctx) catch {};
|
||||
}
|
||||
|
||||
// Phase 2: Fall back to flat global override (existing behavior)
|
||||
if (this.lockfile.overrides.get(name_hash)) |new| {
|
||||
debug("override: {s} -> {s}", .{ this.lockfile.str(&dependency.version.literal), this.lockfile.str(&new.literal) });
|
||||
|
||||
@@ -1327,6 +1385,104 @@ fn enqueueLocalTarball(
|
||||
return &task.threadpool_task;
|
||||
}
|
||||
|
||||
/// Look up the parent PackageID for a given DependencyID using a precomputed
|
||||
/// reverse mapping, building/extending it lazily as needed.
|
||||
fn getParentPackageIdFromMap(this: *PackageManager, dep_id: DependencyID) PackageID {
|
||||
const total_deps = this.lockfile.buffers.dependencies.items.len;
|
||||
if (total_deps == 0) return invalid_package_id;
|
||||
|
||||
// Rebuild/extend the map when new dependencies have been added since last build.
|
||||
if (dep_id >= this.dep_parent_map.items.len) {
|
||||
const old_len = this.dep_parent_map.items.len;
|
||||
this.dep_parent_map.ensureTotalCapacityPrecise(this.allocator, total_deps) catch return invalid_package_id;
|
||||
this.dep_parent_map.appendNTimesAssumeCapacity(@as(PackageID, invalid_package_id), total_deps - old_len);
|
||||
|
||||
const dep_lists = this.lockfile.packages.items(.dependencies);
|
||||
for (dep_lists, 0..) |dep_slice, pkg_id| {
|
||||
const end = dep_slice.off +| dep_slice.len;
|
||||
// Only fill entries that are new (>= old_len) or were never built.
|
||||
if (end <= old_len) continue;
|
||||
const start = @max(dep_slice.off, @as(u32, @intCast(old_len)));
|
||||
var i: u32 = start;
|
||||
while (i < end) : (i += 1) {
|
||||
if (i < this.dep_parent_map.items.len) {
|
||||
this.dep_parent_map.items[i] = @intCast(pkg_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (dep_id >= this.dep_parent_map.items.len) return invalid_package_id;
|
||||
return this.dep_parent_map.items[dep_id];
|
||||
}
|
||||
|
||||
/// Check if a dependency's version range is compatible with a key_spec constraint.
|
||||
/// For example, if key_spec is "^4.0.0" and the dependency version is "4.18.2" or "^4.0.0",
|
||||
/// checks if they can intersect (i.e., some version could satisfy both).
|
||||
fn isKeySpecCompatible(key_spec: String, dependency: *const Dependency, buf: string) bool {
|
||||
if (key_spec.isEmpty()) return true;
|
||||
|
||||
// Only check npm dependencies with semver ranges
|
||||
if (dependency.version.tag != .npm) return true;
|
||||
|
||||
const key_spec_str = key_spec.slice(buf);
|
||||
if (key_spec_str.len == 0) return true;
|
||||
|
||||
// Parse key_spec as a semver query. The parsed query's internal strings
|
||||
// reference key_spec_str, so we must use key_spec_str as the list_buf
|
||||
// when calling satisfies on key_spec_group.
|
||||
const sliced = Semver.SlicedString.init(key_spec_str, key_spec_str);
|
||||
var key_spec_group = Semver.Query.parse(
|
||||
bun.default_allocator,
|
||||
key_spec_str,
|
||||
sliced,
|
||||
) catch return true; // on parse error, allow optimistically
|
||||
defer key_spec_group.deinit();
|
||||
|
||||
// Check if any boundary version of the dependency's range satisfies the key_spec.
|
||||
// Walk the dependency's query list checking left/right boundary versions.
|
||||
// Note: dep versions reference `buf` (lockfile strings), key_spec_group references `key_spec_str`.
|
||||
const dep_group = dependency.version.value.npm.version;
|
||||
var dep_list: ?*const Semver.Query.List = &dep_group.head;
|
||||
while (dep_list) |queries| {
|
||||
var curr: ?*const Semver.Query = &queries.head;
|
||||
while (curr) |query| {
|
||||
// key_spec_group's strings are in key_spec_str, version's strings are in buf
|
||||
if (query.range.hasLeft()) {
|
||||
if (key_spec_group.head.satisfies(query.range.left.version, key_spec_str, buf))
|
||||
return true;
|
||||
}
|
||||
if (query.range.hasRight()) {
|
||||
if (key_spec_group.head.satisfies(query.range.right.version, key_spec_str, buf))
|
||||
return true;
|
||||
}
|
||||
curr = query.next;
|
||||
}
|
||||
dep_list = queries.next;
|
||||
}
|
||||
|
||||
// Also check if any key_spec boundary satisfies the dependency range
|
||||
// dep_group's strings are in buf, key_spec version's strings are in key_spec_str
|
||||
var ks_list: ?*const Semver.Query.List = &key_spec_group.head;
|
||||
while (ks_list) |queries| {
|
||||
var curr: ?*const Semver.Query = &queries.head;
|
||||
while (curr) |query| {
|
||||
if (query.range.hasLeft()) {
|
||||
if (dep_group.head.satisfies(query.range.left.version, buf, key_spec_str))
|
||||
return true;
|
||||
}
|
||||
if (query.range.hasRight()) {
|
||||
if (dep_group.head.satisfies(query.range.right.version, buf, key_spec_str))
|
||||
return true;
|
||||
}
|
||||
curr = query.next;
|
||||
}
|
||||
ks_list = queries.next;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
fn updateNameAndNameHashFromVersionReplacement(
|
||||
lockfile: *const Lockfile,
|
||||
original_name: String,
|
||||
@@ -1897,6 +2053,7 @@ const TaskCallbackContext = bun.install.TaskCallbackContext;
|
||||
const invalid_package_id = bun.install.invalid_package_id;
|
||||
|
||||
const Lockfile = bun.install.Lockfile;
|
||||
const OverrideMap = Lockfile.OverrideMap;
|
||||
const Package = Lockfile.Package;
|
||||
|
||||
const NetworkTask = bun.install.NetworkTask;
|
||||
|
||||
@@ -152,6 +152,14 @@ pub fn assignResolution(this: *PackageManager, dependency_id: DependencyID, pack
|
||||
dep.name = this.lockfile.packages.items(.name)[package_id];
|
||||
dep.name_hash = this.lockfile.packages.items(.name_hash)[package_id];
|
||||
}
|
||||
|
||||
// Propagate override context (first-write-wins for shared packages)
|
||||
if (this.dep_pending_override.get(dependency_id)) |ctx_id| {
|
||||
const gop = this.pkg_override_ctx.getOrPut(this.allocator, package_id) catch return;
|
||||
if (!gop.found_existing) {
|
||||
gop.value_ptr.* = ctx_id;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn assignRootResolution(this: *PackageManager, dependency_id: DependencyID, package_id: PackageID) void {
|
||||
@@ -168,6 +176,14 @@ pub fn assignRootResolution(this: *PackageManager, dependency_id: DependencyID,
|
||||
dep.name = this.lockfile.packages.items(.name)[package_id];
|
||||
dep.name_hash = this.lockfile.packages.items(.name_hash)[package_id];
|
||||
}
|
||||
|
||||
// Propagate override context for root resolution
|
||||
if (this.dep_pending_override.get(dependency_id)) |ctx_id| {
|
||||
const gop = this.pkg_override_ctx.getOrPut(this.allocator, package_id) catch return;
|
||||
if (!gop.found_existing) {
|
||||
gop.value_ptr.* = ctx_id;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn verifyResolutions(this: *PackageManager, log_level: PackageManager.Options.LogLevel) void {
|
||||
@@ -217,6 +233,70 @@ pub fn verifyResolutions(this: *PackageManager, log_level: PackageManager.Option
|
||||
if (any_failed) this.crash();
|
||||
}
|
||||
|
||||
/// Pre-populate override contexts for all resolved packages.
|
||||
/// This is needed during re-resolution when overrides change,
|
||||
/// because existing packages were resolved without context tracking.
|
||||
/// Does a BFS from root, propagating override tree node IDs along the dependency graph.
|
||||
pub fn populateOverrideContexts(this: *PackageManager) void {
|
||||
if (!this.lockfile.overrides.hasTree()) return;
|
||||
|
||||
const OverrideMap = Lockfile.OverrideMap;
|
||||
const packages = this.lockfile.packages.slice();
|
||||
const dep_lists = packages.items(.dependencies);
|
||||
const res_lists = packages.items(.resolutions);
|
||||
const name_hashes = packages.items(.name_hash);
|
||||
|
||||
// Use a simple worklist (BFS queue)
|
||||
const QueueItem = struct { pkg_id: PackageID, ctx: OverrideMap.NodeID };
|
||||
var queue = std.ArrayListUnmanaged(QueueItem){};
|
||||
defer queue.deinit(this.allocator);
|
||||
|
||||
// Start from root package
|
||||
this.pkg_override_ctx.put(this.allocator, 0, 0) catch return;
|
||||
queue.append(this.allocator, .{ .pkg_id = 0, .ctx = 0 }) catch return;
|
||||
|
||||
// BFS using index-based iteration to avoid O(N) shifts from orderedRemove(0)
|
||||
var queue_idx: usize = 0;
|
||||
while (queue_idx < queue.items.len) {
|
||||
const item = queue.items[queue_idx];
|
||||
queue_idx += 1;
|
||||
const deps = dep_lists[item.pkg_id].get(this.lockfile.buffers.dependencies.items);
|
||||
const ress = res_lists[item.pkg_id].get(this.lockfile.buffers.resolutions.items);
|
||||
|
||||
for (deps, ress) |dep, resolved_pkg_id| {
|
||||
if (resolved_pkg_id >= packages.len) continue;
|
||||
|
||||
// Determine child context: if the dep matches a child in the override tree, use that child node
|
||||
var child_ctx = item.ctx;
|
||||
if (this.lockfile.overrides.findChild(item.ctx, dep.name_hash)) |child_id| {
|
||||
child_ctx = child_id;
|
||||
} else if (item.ctx != 0) {
|
||||
// Also check if the dep matches a child of root (for packages that match
|
||||
// a root-level entry in the tree but are discovered via a non-matching path)
|
||||
if (this.lockfile.overrides.findChild(0, dep.name_hash)) |child_id| {
|
||||
child_ctx = child_id;
|
||||
}
|
||||
}
|
||||
|
||||
// Also check by resolved package's name_hash (in case dep name differs from pkg name)
|
||||
if (child_ctx == item.ctx and resolved_pkg_id < name_hashes.len) {
|
||||
const pkg_name_hash = name_hashes[resolved_pkg_id];
|
||||
if (pkg_name_hash != dep.name_hash) {
|
||||
if (this.lockfile.overrides.findChild(item.ctx, pkg_name_hash)) |child_id| {
|
||||
child_ctx = child_id;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const gop = this.pkg_override_ctx.getOrPut(this.allocator, resolved_pkg_id) catch continue;
|
||||
if (!gop.found_existing) {
|
||||
gop.value_ptr.* = child_ctx;
|
||||
queue.append(this.allocator, .{ .pkg_id = resolved_pkg_id, .ctx = child_ctx }) catch continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const string = []const u8;
|
||||
|
||||
const std = @import("std");
|
||||
|
||||
@@ -237,12 +237,29 @@ pub fn installWithManager(
|
||||
|
||||
const all_name_hashes: []PackageNameHash = brk: {
|
||||
if (!manager.summary.overrides_changed) break :brk &.{};
|
||||
const hashes_len = manager.lockfile.overrides.map.entries.len + lockfile.overrides.map.entries.len;
|
||||
if (hashes_len == 0) break :brk &.{};
|
||||
var all_name_hashes = try bun.default_allocator.alloc(PackageNameHash, hashes_len);
|
||||
|
||||
// Collect hashes from flat maps
|
||||
const flat_hashes_len = manager.lockfile.overrides.map.entries.len + lockfile.overrides.map.entries.len;
|
||||
|
||||
// Collect hashes from tree leaf nodes
|
||||
const old_tree_hashes = try manager.lockfile.overrides.collectTreeLeafHashes(bun.default_allocator);
|
||||
defer if (old_tree_hashes.len > 0) bun.default_allocator.free(old_tree_hashes);
|
||||
const new_tree_hashes = try lockfile.overrides.collectTreeLeafHashes(bun.default_allocator);
|
||||
defer if (new_tree_hashes.len > 0) bun.default_allocator.free(new_tree_hashes);
|
||||
|
||||
const total_len = flat_hashes_len + old_tree_hashes.len + new_tree_hashes.len;
|
||||
if (total_len == 0) break :brk &.{};
|
||||
|
||||
var all_name_hashes = try bun.default_allocator.alloc(PackageNameHash, total_len);
|
||||
@memcpy(all_name_hashes[0..manager.lockfile.overrides.map.entries.len], manager.lockfile.overrides.map.keys());
|
||||
@memcpy(all_name_hashes[manager.lockfile.overrides.map.entries.len..], lockfile.overrides.map.keys());
|
||||
var i = manager.lockfile.overrides.map.entries.len;
|
||||
@memcpy(all_name_hashes[manager.lockfile.overrides.map.entries.len .. manager.lockfile.overrides.map.entries.len + lockfile.overrides.map.entries.len], lockfile.overrides.map.keys());
|
||||
var dest = manager.lockfile.overrides.map.entries.len + lockfile.overrides.map.entries.len;
|
||||
@memcpy(all_name_hashes[dest .. dest + old_tree_hashes.len], old_tree_hashes);
|
||||
dest += old_tree_hashes.len;
|
||||
@memcpy(all_name_hashes[dest .. dest + new_tree_hashes.len], new_tree_hashes);
|
||||
|
||||
// Deduplicate
|
||||
var i: usize = manager.lockfile.overrides.map.entries.len;
|
||||
while (i < all_name_hashes.len) {
|
||||
if (std.mem.indexOfScalar(PackageNameHash, all_name_hashes[0..i], all_name_hashes[i]) != null) {
|
||||
all_name_hashes[i] = all_name_hashes[all_name_hashes.len - 1];
|
||||
@@ -361,6 +378,10 @@ pub fn installWithManager(
|
||||
builder.clamp();
|
||||
|
||||
if (manager.summary.overrides_changed and all_name_hashes.len > 0) {
|
||||
// Pre-populate override contexts for existing resolved packages
|
||||
// so that re-enqueued deps can find their override tree context.
|
||||
manager.populateOverrideContexts();
|
||||
|
||||
for (manager.lockfile.buffers.dependencies.items, 0..) |*dependency, dependency_i| {
|
||||
if (std.mem.indexOfScalar(PackageNameHash, all_name_hashes, dependency.name_hash)) |_| {
|
||||
manager.lockfile.buffers.resolutions.items[dependency_i] = invalid_package_id;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -596,6 +596,22 @@ pub fn Package(comptime SemverIntType: type) type {
|
||||
}
|
||||
}
|
||||
|
||||
// Also compare override trees
|
||||
if (!summary.overrides_changed) {
|
||||
from_lockfile.overrides.sort(from_lockfile);
|
||||
to_lockfile.overrides.sort(to_lockfile);
|
||||
if (!from_lockfile.overrides.treeEquals(
|
||||
&to_lockfile.overrides,
|
||||
from_lockfile.buffers.string_bytes.items,
|
||||
to_lockfile.buffers.string_bytes.items,
|
||||
)) {
|
||||
summary.overrides_changed = true;
|
||||
if (PackageManager.verbose_install) {
|
||||
Output.prettyErrorln("Override tree changed since last install", .{});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (is_root) catalogs: {
|
||||
|
||||
// don't sort if lengths are different
|
||||
|
||||
@@ -300,7 +300,7 @@ pub const Stringifier = struct {
|
||||
);
|
||||
}
|
||||
|
||||
if (lockfile.overrides.map.count() > 0) {
|
||||
if (lockfile.overrides.map.count() > 0 or lockfile.overrides.hasTree()) {
|
||||
lockfile.overrides.sort(lockfile);
|
||||
|
||||
try writeIndent(writer, indent);
|
||||
@@ -309,12 +309,33 @@ pub const Stringifier = struct {
|
||||
\\
|
||||
);
|
||||
indent.* += 1;
|
||||
for (lockfile.overrides.map.values()) |override_dep| {
|
||||
try writeIndent(writer, indent);
|
||||
try writer.print(
|
||||
\\{f}: {f},
|
||||
\\
|
||||
, .{ override_dep.name.fmtJson(buf, .{}), override_dep.version.literal.fmtJson(buf, .{}) });
|
||||
|
||||
if (lockfile.overrides.hasTree()) {
|
||||
// Write tree nodes recursively, starting from root's children
|
||||
try writeOverrideTree(writer, &lockfile.overrides, buf, indent);
|
||||
} else {
|
||||
// Write flat overrides
|
||||
for (lockfile.overrides.map.values()) |override_dep| {
|
||||
try writeIndent(writer, indent);
|
||||
try writer.print(
|
||||
\\{f}: {f},
|
||||
\\
|
||||
, .{ override_dep.name.fmtJson(buf, .{}), override_dep.version.literal.fmtJson(buf, .{}) });
|
||||
}
|
||||
}
|
||||
|
||||
// Also write flat-only overrides that are not in the tree
|
||||
if (lockfile.overrides.hasTree()) {
|
||||
for (lockfile.overrides.map.values()) |override_dep| {
|
||||
const name_hash = override_dep.name_hash;
|
||||
// Skip if this override is already represented in the tree
|
||||
if (lockfile.overrides.findChild(0, name_hash) != null) continue;
|
||||
try writeIndent(writer, indent);
|
||||
try writer.print(
|
||||
\\{f}: {f},
|
||||
\\
|
||||
, .{ override_dep.name.fmtJson(buf, .{}), override_dep.version.literal.fmtJson(buf, .{}) });
|
||||
}
|
||||
}
|
||||
|
||||
try decIndent(writer, indent);
|
||||
@@ -961,6 +982,64 @@ pub const Stringifier = struct {
|
||||
try writer.writeAll("},");
|
||||
}
|
||||
|
||||
fn writeOverrideTree(writer: *std.Io.Writer, overrides: *const OverrideMap, buf: string, indent: *u32) std.Io.Writer.Error!void {
|
||||
if (overrides.nodes.items.len == 0) return;
|
||||
try writeOverrideNodeChildren(writer, overrides, 0, buf, indent);
|
||||
}
|
||||
|
||||
fn writeOverrideNodeChildren(writer: *std.Io.Writer, overrides: *const OverrideMap, node_id: OverrideMap.NodeID, buf: string, indent: *u32) std.Io.Writer.Error!void {
|
||||
if (node_id >= overrides.nodes.items.len) return;
|
||||
var child_id = overrides.nodes.items[node_id].first_child;
|
||||
while (child_id != OverrideMap.invalid_node_id) {
|
||||
if (child_id >= overrides.nodes.items.len) break;
|
||||
const child = overrides.nodes.items[child_id];
|
||||
|
||||
try writeIndent(writer, indent);
|
||||
|
||||
if (child.first_child != OverrideMap.invalid_node_id) {
|
||||
// Has children: write as object with key = name or name@key_spec
|
||||
try writeOverrideNodeKey(writer, child, buf);
|
||||
try writer.writeAll(": {\n");
|
||||
indent.* += 1;
|
||||
if (child.value) |val| {
|
||||
try writeIndent(writer, indent);
|
||||
try writer.print(
|
||||
\\".": {f},
|
||||
\\
|
||||
, .{val.version.literal.fmtJson(buf, .{})});
|
||||
}
|
||||
try writeOverrideNodeChildren(writer, overrides, child_id, buf, indent);
|
||||
try decIndent(writer, indent);
|
||||
try writer.writeAll("},\n");
|
||||
} else if (child.value) |val| {
|
||||
// Leaf with value: write key = name or name@key_spec
|
||||
try writeOverrideNodeKey(writer, child, buf);
|
||||
try writer.print(
|
||||
\\: {f},
|
||||
\\
|
||||
, .{val.version.literal.fmtJson(buf, .{})});
|
||||
}
|
||||
|
||||
child_id = child.next_sibling;
|
||||
}
|
||||
}
|
||||
|
||||
/// Write the JSON key for an override node: "name" or "name@key_spec"
|
||||
fn writeOverrideNodeKey(writer: *std.Io.Writer, node: OverrideMap.OverrideNode, buf: string) std.Io.Writer.Error!void {
|
||||
const key_spec_str = node.key_spec.slice(buf);
|
||||
if (key_spec_str.len > 0) {
|
||||
// Write "name@key_spec" as a single JSON string
|
||||
const name_str = node.name.slice(buf);
|
||||
try writer.writeAll("\"");
|
||||
try writer.writeAll(name_str);
|
||||
try writer.writeAll("@");
|
||||
try writer.writeAll(key_spec_str);
|
||||
try writer.writeAll("\"");
|
||||
} else {
|
||||
try writer.print("{f}", .{node.name.fmtJson(buf, .{})});
|
||||
}
|
||||
}
|
||||
|
||||
fn writeIndent(writer: *std.Io.Writer, indent: *const u32) std.Io.Writer.Error!void {
|
||||
for (0..indent.*) |_| {
|
||||
try writer.writeAll(" " ** indent_scalar);
|
||||
@@ -1223,49 +1302,7 @@ pub fn parseIntoBinaryLockfile(
|
||||
return error.InvalidOverridesObject;
|
||||
}
|
||||
|
||||
for (overrides_expr.data.e_object.properties.slice()) |prop| {
|
||||
const key = prop.key.?;
|
||||
const value = prop.value.?;
|
||||
|
||||
if (!key.isString() or key.data.e_string.len() == 0) {
|
||||
try log.addError(source, key.loc, "Expected a non-empty string");
|
||||
return error.InvalidOverridesObject;
|
||||
}
|
||||
|
||||
const name_str = key.asString(allocator).?;
|
||||
const name_hash = String.Builder.stringHash(name_str);
|
||||
const name = try string_buf.appendWithHash(name_str, name_hash);
|
||||
|
||||
// TODO(dylan-conway) also accept object when supported
|
||||
if (!value.isString()) {
|
||||
try log.addError(source, value.loc, "Expected a string");
|
||||
return error.InvalidOverridesObject;
|
||||
}
|
||||
|
||||
const version_str = value.asString(allocator).?;
|
||||
const version_hash = String.Builder.stringHash(version_str);
|
||||
const version = try string_buf.appendWithHash(version_str, version_hash);
|
||||
const version_sliced = version.sliced(string_buf.bytes.items);
|
||||
|
||||
const dep: Dependency = .{
|
||||
.name = name,
|
||||
.name_hash = name_hash,
|
||||
.version = Dependency.parse(
|
||||
allocator,
|
||||
name,
|
||||
name_hash,
|
||||
version_sliced.slice,
|
||||
&version_sliced,
|
||||
log,
|
||||
manager,
|
||||
) orelse {
|
||||
try log.addError(source, value.loc, "Invalid override version");
|
||||
return error.InvalidOverridesObject;
|
||||
},
|
||||
};
|
||||
|
||||
try lockfile.overrides.map.put(allocator, name_hash, dep);
|
||||
}
|
||||
try parseOverridesFromLockfileObj(lockfile, overrides_expr, allocator, &string_buf, log, source, manager, 0);
|
||||
}
|
||||
|
||||
if (root.get("catalog")) |catalog_expr| {
|
||||
@@ -2038,6 +2075,139 @@ pub fn parseIntoBinaryLockfile(
|
||||
}
|
||||
}
|
||||
|
||||
fn parseOverridesFromLockfileObj(
|
||||
lockfile: *BinaryLockfile,
|
||||
expr: Expr,
|
||||
allocator: std.mem.Allocator,
|
||||
string_buf: *String.Buf,
|
||||
log: *logger.Log,
|
||||
source: *const logger.Source,
|
||||
manager: ?*PackageManager,
|
||||
parent_node_id: OverrideMap.NodeID,
|
||||
) !void {
|
||||
if (!expr.isObject()) return;
|
||||
|
||||
for (expr.data.e_object.properties.slice()) |prop| {
|
||||
const key = prop.key.?;
|
||||
const value = prop.value.?;
|
||||
|
||||
if (!key.isString() or key.data.e_string.len() == 0) {
|
||||
try log.addError(source, key.loc, "Expected a non-empty string");
|
||||
return error.InvalidOverridesObject;
|
||||
}
|
||||
|
||||
const raw_key_str = key.asString(allocator).?;
|
||||
// Skip "." key (handled by parent)
|
||||
if (strings.eql(raw_key_str, ".")) continue;
|
||||
|
||||
// Parse key: "name" or "name@key_spec"
|
||||
const parsed_key = OverrideMap.parseKeyWithVersion(raw_key_str);
|
||||
const name_str = parsed_key.name;
|
||||
const key_spec_str = parsed_key.spec;
|
||||
|
||||
const name_hash = String.Builder.stringHash(name_str);
|
||||
const name = try string_buf.appendWithHash(name_str, name_hash);
|
||||
const key_spec_s = if (key_spec_str.len > 0) try string_buf.append(key_spec_str) else String{};
|
||||
|
||||
if (value.isString()) {
|
||||
const version_str = value.asString(allocator).?;
|
||||
const version_hash = String.Builder.stringHash(version_str);
|
||||
const version_s = try string_buf.appendWithHash(version_str, version_hash);
|
||||
const version_sliced = version_s.sliced(string_buf.bytes.items);
|
||||
|
||||
const dep: Dependency = .{
|
||||
.name = name,
|
||||
.name_hash = name_hash,
|
||||
.version = Dependency.parse(
|
||||
allocator,
|
||||
name,
|
||||
name_hash,
|
||||
version_sliced.slice,
|
||||
&version_sliced,
|
||||
log,
|
||||
manager,
|
||||
) orelse {
|
||||
try log.addError(source, value.loc, "Invalid override version");
|
||||
return error.InvalidOverridesObject;
|
||||
},
|
||||
};
|
||||
|
||||
if (parent_node_id == 0 and lockfile.overrides.nodes.items.len == 0) {
|
||||
try lockfile.overrides.map.put(allocator, name_hash, dep);
|
||||
} else {
|
||||
try lockfile.overrides.ensureRootNode(allocator);
|
||||
_ = try lockfile.overrides.getOrAddChild(allocator, parent_node_id, .{
|
||||
.name = name,
|
||||
.name_hash = name_hash,
|
||||
.key_spec = key_spec_s,
|
||||
.value = dep,
|
||||
.first_child = OverrideMap.invalid_node_id,
|
||||
.next_sibling = OverrideMap.invalid_node_id,
|
||||
.parent = OverrideMap.invalid_node_id,
|
||||
}, string_buf.bytes.items);
|
||||
}
|
||||
} else if (value.isObject()) {
|
||||
var self_dep: ?Dependency = null;
|
||||
|
||||
if (value.asProperty(".")) |dot_prop| {
|
||||
if (dot_prop.expr.isString()) {
|
||||
const dot_str = dot_prop.expr.asString(allocator).?;
|
||||
const dot_hash = String.Builder.stringHash(dot_str);
|
||||
const dot_s = try string_buf.appendWithHash(dot_str, dot_hash);
|
||||
const dot_sliced = dot_s.sliced(string_buf.bytes.items);
|
||||
self_dep = .{
|
||||
.name = name,
|
||||
.name_hash = name_hash,
|
||||
.version = Dependency.parse(
|
||||
allocator,
|
||||
name,
|
||||
name_hash,
|
||||
dot_sliced.slice,
|
||||
&dot_sliced,
|
||||
log,
|
||||
manager,
|
||||
) orelse {
|
||||
try log.addError(source, dot_prop.expr.loc, "Invalid override version");
|
||||
return error.InvalidOverridesObject;
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
var has_children = false;
|
||||
for (value.data.e_object.properties.slice()) |child_prop| {
|
||||
const ck = child_prop.key.?.asString(allocator).?;
|
||||
if (!strings.eql(ck, ".")) {
|
||||
has_children = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!has_children and self_dep != null and parent_node_id == 0 and lockfile.overrides.nodes.items.len == 0) {
|
||||
try lockfile.overrides.map.put(allocator, name_hash, self_dep.?);
|
||||
} else {
|
||||
try lockfile.overrides.ensureRootNode(allocator);
|
||||
if (self_dep != null and parent_node_id == 0) {
|
||||
try lockfile.overrides.map.put(allocator, name_hash, self_dep.?);
|
||||
}
|
||||
const node_id = try lockfile.overrides.getOrAddChild(allocator, parent_node_id, .{
|
||||
.name = name,
|
||||
.name_hash = name_hash,
|
||||
.key_spec = key_spec_s,
|
||||
.value = self_dep,
|
||||
.first_child = OverrideMap.invalid_node_id,
|
||||
.next_sibling = OverrideMap.invalid_node_id,
|
||||
.parent = OverrideMap.invalid_node_id,
|
||||
}, string_buf.bytes.items);
|
||||
try parseOverridesFromLockfileObj(lockfile, value, allocator, string_buf, log, source, manager, node_id);
|
||||
}
|
||||
} else {
|
||||
try log.addError(source, value.loc, "Expected a string or object");
|
||||
return error.InvalidOverridesObject;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn mapDepToPkg(dep: *Dependency, dep_id: DependencyID, pkg_id: PackageID, lockfile: *BinaryLockfile, pkg_resolutions: []const Resolution) void {
|
||||
lockfile.buffers.resolutions.items[dep_id] = pkg_id;
|
||||
|
||||
@@ -2253,6 +2423,7 @@ const invalid_package_id = Install.invalid_package_id;
|
||||
|
||||
const BinaryLockfile = bun.install.Lockfile;
|
||||
const DependencySlice = BinaryLockfile.DependencySlice;
|
||||
const OverrideMap = BinaryLockfile.OverrideMap;
|
||||
const LoadResult = BinaryLockfile.LoadResult;
|
||||
const Meta = BinaryLockfile.Package.Meta;
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ const has_workspace_package_ids_tag: u64 = @bitCast(@as([8]u8, "wOrKsPaC".*));
|
||||
const has_trusted_dependencies_tag: u64 = @bitCast(@as([8]u8, "tRuStEDd".*));
|
||||
const has_empty_trusted_dependencies_tag: u64 = @bitCast(@as([8]u8, "eMpTrUsT".*));
|
||||
const has_overrides_tag: u64 = @bitCast(@as([8]u8, "oVeRriDs".*));
|
||||
const has_nested_overrides_tag: u64 = @bitCast(@as([8]u8, "nStOvRd\x00".*));
|
||||
const has_catalogs_tag: u64 = @bitCast(@as([8]u8, "cAtAlOgS".*));
|
||||
const has_config_version_tag: u64 = @bitCast(@as([8]u8, "cNfGvRsN".*));
|
||||
|
||||
@@ -156,6 +157,29 @@ pub fn save(this: *Lockfile, options: *const PackageManager.Options, bytes: *std
|
||||
);
|
||||
}
|
||||
|
||||
// Write nested override tree (if any)
|
||||
if (this.overrides.hasTree()) {
|
||||
try writer.writeAll(std.mem.asBytes(&has_nested_overrides_tag));
|
||||
|
||||
const node_count: u32 = @intCast(this.overrides.nodes.items.len);
|
||||
try writer.writeAll(std.mem.asBytes(&node_count));
|
||||
|
||||
var external_nodes = try std.ArrayListUnmanaged(OverrideMap.OverrideNode.External).initCapacity(z_allocator, node_count);
|
||||
defer external_nodes.deinit(z_allocator);
|
||||
external_nodes.items.len = node_count;
|
||||
for (external_nodes.items, this.overrides.nodes.items) |*dest, src| {
|
||||
dest.* = src.toExternal();
|
||||
}
|
||||
try Lockfile.Buffers.writeArray(
|
||||
StreamType,
|
||||
stream,
|
||||
@TypeOf(writer),
|
||||
writer,
|
||||
[]OverrideMap.OverrideNode.External,
|
||||
external_nodes.items,
|
||||
);
|
||||
}
|
||||
|
||||
if (this.patched_dependencies.entries.len > 0) {
|
||||
for (this.patched_dependencies.values()) |patched_dep| bun.assert(!patched_dep.patchfile_hash_is_null);
|
||||
|
||||
@@ -475,6 +499,39 @@ pub fn load(
|
||||
}
|
||||
}
|
||||
|
||||
// Read nested override tree
|
||||
{
|
||||
const remaining_in_buffer = total_buffer_size -| stream.pos;
|
||||
|
||||
if (remaining_in_buffer > 8 and total_buffer_size <= stream.buffer.len) {
|
||||
const next_num = try reader.readInt(u64, .little);
|
||||
if (next_num == has_nested_overrides_tag) {
|
||||
const node_count = try reader.readInt(u32, .little);
|
||||
if (node_count > 0) {
|
||||
const external_nodes = try Lockfile.Buffers.readArray(
|
||||
stream,
|
||||
allocator,
|
||||
std.ArrayListUnmanaged(OverrideMap.OverrideNode.External),
|
||||
);
|
||||
const context: Dependency.Context = .{
|
||||
.allocator = allocator,
|
||||
.log = log,
|
||||
.buffer = lockfile.buffers.string_bytes.items,
|
||||
.package_manager = manager,
|
||||
};
|
||||
var nodes = &lockfile.overrides.nodes;
|
||||
try nodes.ensureTotalCapacity(allocator, external_nodes.items.len);
|
||||
for (external_nodes.items) |ext_node| {
|
||||
nodes.appendAssumeCapacity(OverrideMap.OverrideNode.fromExternal(ext_node, context));
|
||||
}
|
||||
lockfile.overrides.rebuildParentPointers();
|
||||
}
|
||||
} else {
|
||||
stream.pos -= 8;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
const remaining_in_buffer = total_buffer_size -| stream.pos;
|
||||
|
||||
@@ -634,6 +691,7 @@ const PatchedDep = install.PatchedDep;
|
||||
const alignment_bytes_to_repeat_buffer = install.alignment_bytes_to_repeat_buffer;
|
||||
|
||||
const Lockfile = install.Lockfile;
|
||||
const OverrideMap = Lockfile.OverrideMap;
|
||||
const PackageIndex = Lockfile.PackageIndex;
|
||||
const Stream = Lockfile.Stream;
|
||||
const StringPool = Lockfile.StringPool;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user