mirror of
https://github.com/oven-sh/bun
synced 2026-02-09 10:28:47 +00:00
fix(install): harden nested overrides implementation
- Fix writeOverrideNodeKey to JSON-escape name and key_spec via fmtJson - Validate node_count matches deserialized nodes in bun.lockb reader - Add overflow detection in splitResolutionPath (max 8 segments) - Prevent version-scoped root overrides from being promoted to flat map - Add bounds validation in findOverrideInContext for invalid node IDs - Add key_spec sibling-walk validation in populateOverrideContexts BFS - Document public override fields in PackageManager Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -105,14 +105,17 @@ known_npm_aliases: NpmAliasMap = .{},
|
||||
|
||||
/// Maps PackageID → OverrideMap.NodeID
|
||||
/// Tracks which override tree node is the context for each resolved package's children.
|
||||
/// Public: accessed by PackageManagerEnqueue, PackageManagerResolution, and install_with_manager.
|
||||
pkg_override_ctx: std.AutoHashMapUnmanaged(PackageID, OverrideMap.NodeID) = .{},
|
||||
|
||||
/// Maps DependencyID → OverrideMap.NodeID
|
||||
/// Temporary: holds the override context for a dependency between enqueue and resolution.
|
||||
/// Public: written in PackageManagerEnqueue, read in PackageManagerResolution.
|
||||
dep_pending_override: std.AutoHashMapUnmanaged(DependencyID, OverrideMap.NodeID) = .{},
|
||||
|
||||
/// Precomputed reverse mapping: DependencyID → owning PackageID.
|
||||
/// Built lazily to avoid O(N) scans per dependency in the enqueue path.
|
||||
/// Public: accessed by PackageManagerEnqueue.
|
||||
dep_parent_map: std.ArrayListUnmanaged(PackageID) = .{},
|
||||
|
||||
event_loop: jsc.AnyEventLoop,
|
||||
|
||||
@@ -245,6 +245,8 @@ pub fn populateOverrideContexts(this: *PackageManager) void {
|
||||
const dep_lists = packages.items(.dependencies);
|
||||
const res_lists = packages.items(.resolutions);
|
||||
const name_hashes = packages.items(.name_hash);
|
||||
const resolutions = packages.items(.resolution);
|
||||
const buf = this.lockfile.buffers.string_bytes.items;
|
||||
|
||||
// Use a simple worklist (BFS queue)
|
||||
const QueueItem = struct { pkg_id: PackageID, ctx: OverrideMap.NodeID };
|
||||
@@ -266,25 +268,45 @@ pub fn populateOverrideContexts(this: *PackageManager) void {
|
||||
for (deps, ress) |dep, resolved_pkg_id| {
|
||||
if (resolved_pkg_id >= packages.len) continue;
|
||||
|
||||
// Determine child context: if the dep matches a child in the override tree, use that child node
|
||||
// Determine child context: walk siblings with key_spec validation
|
||||
// (mirrors the enqueue path's sibling-walk logic)
|
||||
var child_ctx = item.ctx;
|
||||
if (this.lockfile.overrides.findChild(item.ctx, dep.name_hash)) |child_id| {
|
||||
child_ctx = child_id;
|
||||
} else if (item.ctx != 0) {
|
||||
// Also check if the dep matches a child of root (for packages that match
|
||||
// a root-level entry in the tree but are discovered via a non-matching path)
|
||||
if (this.lockfile.overrides.findChild(0, dep.name_hash)) |child_id| {
|
||||
child_ctx = child_id;
|
||||
const resolved_version = if (resolved_pkg_id < resolutions.len and resolutions[resolved_pkg_id].tag == .npm)
|
||||
resolutions[resolved_pkg_id].value.npm.version
|
||||
else
|
||||
null;
|
||||
|
||||
child_ctx = findValidChild(
|
||||
&this.lockfile.overrides,
|
||||
item.ctx,
|
||||
dep.name_hash,
|
||||
resolved_version,
|
||||
buf,
|
||||
) orelse blk: {
|
||||
// Also check root if current context is not root
|
||||
if (item.ctx != 0) {
|
||||
break :blk findValidChild(
|
||||
&this.lockfile.overrides,
|
||||
0,
|
||||
dep.name_hash,
|
||||
resolved_version,
|
||||
buf,
|
||||
) orelse item.ctx;
|
||||
}
|
||||
}
|
||||
break :blk item.ctx;
|
||||
};
|
||||
|
||||
// Also check by resolved package's name_hash (in case dep name differs from pkg name)
|
||||
if (child_ctx == item.ctx and resolved_pkg_id < name_hashes.len) {
|
||||
const pkg_name_hash = name_hashes[resolved_pkg_id];
|
||||
if (pkg_name_hash != dep.name_hash) {
|
||||
if (this.lockfile.overrides.findChild(item.ctx, pkg_name_hash)) |child_id| {
|
||||
child_ctx = child_id;
|
||||
}
|
||||
child_ctx = findValidChild(
|
||||
&this.lockfile.overrides,
|
||||
item.ctx,
|
||||
pkg_name_hash,
|
||||
resolved_version,
|
||||
buf,
|
||||
) orelse child_ctx;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -297,6 +319,50 @@ pub fn populateOverrideContexts(this: *PackageManager) void {
|
||||
}
|
||||
}
|
||||
|
||||
/// Find a child matching name_hash under parent_ctx, walking siblings to skip
|
||||
/// nodes whose key_spec doesn't match the resolved version.
|
||||
fn findValidChild(
|
||||
overrides: *const Lockfile.OverrideMap,
|
||||
parent_ctx: Lockfile.OverrideMap.NodeID,
|
||||
name_hash: PackageNameHash,
|
||||
resolved_version: ?Semver.Version,
|
||||
buf: string,
|
||||
) ?Lockfile.OverrideMap.NodeID {
|
||||
var candidate = overrides.findChild(parent_ctx, name_hash);
|
||||
while (candidate) |child_id| {
|
||||
const child = overrides.nodes.items[child_id];
|
||||
if (!child.key_spec.isEmpty()) {
|
||||
if (!isKeySpecSatisfiedByVersion(child.key_spec, resolved_version, buf)) {
|
||||
candidate = overrides.findChildAfter(parent_ctx, name_hash, child_id);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
return child_id;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/// Check if a resolved Semver.Version satisfies a key_spec constraint.
|
||||
/// Used during BFS context propagation where we have actual resolved versions.
|
||||
fn isKeySpecSatisfiedByVersion(key_spec: String, resolved_version: ?Semver.Version, buf: string) bool {
|
||||
if (key_spec.isEmpty()) return true;
|
||||
const version = resolved_version orelse return true; // non-npm: match optimistically
|
||||
|
||||
const key_spec_str = key_spec.slice(buf);
|
||||
if (key_spec_str.len == 0) return true;
|
||||
|
||||
const sliced = Semver.SlicedString.init(key_spec_str, key_spec_str);
|
||||
var key_spec_group = Semver.Query.parse(
|
||||
bun.default_allocator,
|
||||
key_spec_str,
|
||||
sliced,
|
||||
) catch return true; // on parse error, allow optimistically
|
||||
defer key_spec_group.deinit();
|
||||
|
||||
// key_spec_group's strings are in key_spec_str, version's strings are in buf
|
||||
return key_spec_group.head.satisfies(version, key_spec_str, buf);
|
||||
}
|
||||
|
||||
const string = []const u8;
|
||||
|
||||
const std = @import("std");
|
||||
|
||||
@@ -90,10 +90,12 @@ pub fn findChildAfter(this: *const OverrideMap, parent_node_id: NodeID, name_has
|
||||
/// for a match. Returns the most specific (deepest) matching child.
|
||||
/// This implements npm's "ruleset" semantics where closer overrides shadow ancestor overrides.
|
||||
pub fn findOverrideInContext(this: *const OverrideMap, context_node_id: NodeID, name_hash: PackageNameHash) ?NodeID {
|
||||
if (context_node_id == invalid_node_id or context_node_id >= this.nodes.items.len) return null;
|
||||
var ctx = context_node_id;
|
||||
while (true) {
|
||||
if (this.findChild(ctx, name_hash)) |child_id| return child_id;
|
||||
if (ctx == 0) return null;
|
||||
if (ctx >= this.nodes.items.len) return null;
|
||||
const parent = this.nodes.items[ctx].parent;
|
||||
if (parent == invalid_node_id) return null;
|
||||
ctx = parent;
|
||||
@@ -619,11 +621,11 @@ fn parseOverrideObject(
|
||||
version_str,
|
||||
builder,
|
||||
)) |version| {
|
||||
if (is_root_level and parent_node_id == 0) {
|
||||
// Global override: add to flat map
|
||||
if (is_root_level and parent_node_id == 0 and key_spec_str.len == 0) {
|
||||
// Global unscoped override: add to flat map
|
||||
this.map.putAssumeCapacity(name_hash, version);
|
||||
} else {
|
||||
// Nested override: add to tree only
|
||||
// Nested or version-scoped override: add to tree only
|
||||
try this.ensureRootNode(lockfile.allocator);
|
||||
const key_spec = if (key_spec_str.len > 0) builder.append(String, key_spec_str) else String{};
|
||||
_ = try this.getOrAddChild(lockfile.allocator, parent_node_id, .{
|
||||
@@ -671,16 +673,16 @@ fn parseOverrideObject(
|
||||
}
|
||||
}
|
||||
|
||||
if (is_root_level and parent_node_id == 0 and self_value != null and !has_children) {
|
||||
// Simple case: only "." key at root level, treat as flat override
|
||||
if (is_root_level and parent_node_id == 0 and key_spec_str.len == 0 and self_value != null and !has_children) {
|
||||
// Simple case: only "." key at root level, unscoped, treat as flat override
|
||||
this.map.putAssumeCapacity(name_hash, self_value.?);
|
||||
} else {
|
||||
// Add to tree
|
||||
try this.ensureRootNode(lockfile.allocator);
|
||||
const key_spec = if (key_spec_str.len > 0) builder.append(String, key_spec_str) else String{};
|
||||
|
||||
if (is_root_level and self_value != null) {
|
||||
// Also add to flat map for backward compat
|
||||
if (is_root_level and key_spec_str.len == 0 and self_value != null) {
|
||||
// Also add to flat map for unscoped root-level overrides for backward compat
|
||||
this.map.putAssumeCapacity(name_hash, self_value.?);
|
||||
}
|
||||
|
||||
@@ -820,6 +822,10 @@ pub fn parseFromResolutions(
|
||||
|
||||
// Parse path segments (e.g., "parent/child" or "@scope/parent/child")
|
||||
const segments = splitResolutionPath(k);
|
||||
if (segments.overflow) {
|
||||
try log.addWarningFmt(source, key.loc, lockfile.allocator, "Resolution path has too many segments (max 8): \"{s}\"", .{k});
|
||||
continue;
|
||||
}
|
||||
if (segments.count == 1) {
|
||||
// Simple resolution (no nesting)
|
||||
if (try parseOverrideValue(
|
||||
@@ -889,6 +895,7 @@ const ResolutionSegments = struct {
|
||||
segments: [8][]const u8 = undefined,
|
||||
count: usize = 0,
|
||||
last: []const u8 = "",
|
||||
overflow: bool = false,
|
||||
|
||||
fn get(this: *const ResolutionSegments, idx: usize) []const u8 {
|
||||
return this.segments[idx];
|
||||
@@ -901,7 +908,11 @@ fn splitResolutionPath(k: []const u8) ResolutionSegments {
|
||||
var result = ResolutionSegments{};
|
||||
var remaining = k;
|
||||
|
||||
while (remaining.len > 0 and result.count < 8) {
|
||||
while (remaining.len > 0) {
|
||||
if (result.count >= result.segments.len) {
|
||||
result.overflow = true;
|
||||
break;
|
||||
}
|
||||
// Strip **/ prefixes
|
||||
while (strings.hasPrefixComptime(remaining, "**/")) remaining = remaining[3..];
|
||||
if (remaining.len == 0) break;
|
||||
|
||||
@@ -1028,12 +1028,11 @@ pub const Stringifier = struct {
|
||||
fn writeOverrideNodeKey(writer: *std.Io.Writer, node: OverrideMap.OverrideNode, buf: string) std.Io.Writer.Error!void {
|
||||
const key_spec_str = node.key_spec.slice(buf);
|
||||
if (key_spec_str.len > 0) {
|
||||
// Write "name@key_spec" as a single JSON string
|
||||
const name_str = node.name.slice(buf);
|
||||
// Write "name@key_spec" as a single JSON string with proper escaping
|
||||
try writer.writeAll("\"");
|
||||
try writer.writeAll(name_str);
|
||||
try writer.print("{f}", .{node.name.fmtJson(buf, .{ .quote = false })});
|
||||
try writer.writeAll("@");
|
||||
try writer.writeAll(key_spec_str);
|
||||
try writer.print("{f}", .{node.key_spec.fmtJson(buf, .{ .quote = false })});
|
||||
try writer.writeAll("\"");
|
||||
} else {
|
||||
try writer.print("{f}", .{node.name.fmtJson(buf, .{})});
|
||||
|
||||
@@ -513,6 +513,9 @@ pub fn load(
|
||||
allocator,
|
||||
std.ArrayListUnmanaged(OverrideMap.OverrideNode.External),
|
||||
);
|
||||
if (external_nodes.items.len != node_count) {
|
||||
return error.MalformedLockfile;
|
||||
}
|
||||
const context: Dependency.Context = .{
|
||||
.allocator = allocator,
|
||||
.log = log,
|
||||
|
||||
Reference in New Issue
Block a user