Compare commits

...

1 Commits

Author SHA1 Message Date
Claude Bot
aac89964c7 fix(install): use hash map for O(1) lookups in dependency hoisting
The `hoistDependency` function performed a linear scan of all
dependencies in each tree node to find matching package names. In
workspace configurations with many overlapping transitive dependencies,
this caused O(n^2) behavior as the root tree accumulated hundreds of
dependencies and each new dependency scanned through the entire list.

Replace the linear scan with a per-tree `NameHashMap` (PackageNameHash
-> DependencyID) that is maintained alongside the dependency lists.
This reduces the name lookup from O(n) to O(1), fixing the exponential
slowdown reported in #27033 where workspace installs with many shared
tamagui packages would hang indefinitely.

Closes #27033

Co-Authored-By: Claude <noreply@anthropic.com>
2026-02-14 20:32:58 +00:00
2 changed files with 73 additions and 6 deletions

View File

@@ -277,6 +277,7 @@ pub fn Builder(comptime method: BuilderMethod) type {
pub const Entry = struct {
tree: Tree,
dependencies: Lockfile.DependencyIDList,
name_hash_map: NameHashMap,
};
pub const CleanResult = struct {
@@ -299,8 +300,11 @@ pub fn Builder(comptime method: BuilderMethod) type {
var dep_ids = try DependencyIDList.initCapacity(this.allocator, total);
for (trees, dependencies) |*tree, *child| {
const name_hash_maps = slice.items(.name_hash_map);
for (trees, dependencies, name_hash_maps) |*tree, *child, *name_map| {
defer child.deinit(this.allocator);
defer name_map.deinit(this.allocator);
const off: u32 = @intCast(dep_ids.items.len);
for (child.items) |dep_id| {
@@ -471,11 +475,13 @@ pub fn processSubtree(
.dependency_id = dependency_id,
},
.dependencies = .{},
.name_hash_map = .{},
});
const list_slice = builder.list.slice();
const trees = list_slice.items(.tree);
const dependency_lists = list_slice.items(.dependencies);
const name_hash_maps = list_slice.items(.name_hash_map);
const next: *Tree = &trees[builder.list.len - 1];
const pkgs = builder.lockfile.packages.slice();
@@ -553,6 +559,7 @@ pub fn processSubtree(
pkg_id,
&dependency,
dependency_lists,
name_hash_maps,
trees,
method,
builder,
@@ -573,6 +580,7 @@ pub fn processSubtree(
pkg_id,
&dependency,
dependency_lists,
name_hash_maps,
trees,
method,
builder,
@@ -617,6 +625,8 @@ pub fn processSubtree(
placed_dep_id.* = dep_id;
}
}
// Update the name_hash_map to point to the new dep_id
name_hash_maps[replace.id].put(builder.allocator, dependency.name_hash, dep_id) catch bun.outOfMemory();
if (pkg_id != invalid_package_id and builder.resolution_lists[pkg_id].len > 0) {
try builder.queue.writeItem(.{
.tree_id = replace.id,
@@ -638,6 +648,7 @@ pub fn processSubtree(
},
.placement => |dest| {
bun.handleOom(dependency_lists[dest.id].append(builder.allocator, dep_id));
name_hash_maps[dest.id].put(builder.allocator, dependency.name_hash, dep_id) catch bun.outOfMemory();
trees[dest.id].dependencies.len += 1;
if (pkg_id != invalid_package_id and builder.resolution_lists[pkg_id].len > 0) {
try builder.queue.writeItem(.{
@@ -669,16 +680,13 @@ fn hoistDependency(
package_id: PackageID,
dependency: *const Dependency,
dependency_lists: []Lockfile.DependencyIDList,
name_hash_maps: []NameHashMap,
trees: []Tree,
comptime method: BuilderMethod,
builder: *Builder(method),
) !HoistDependencyResult {
const this_dependencies = this.dependencies.get(dependency_lists[this.id].items);
for (0..this_dependencies.len) |i| {
const dep_id = this_dependencies[i];
if (name_hash_maps[this.id].get(dependency.name_hash)) |dep_id| {
const dep = builder.dependencies[dep_id];
if (dep.name_hash != dependency.name_hash) continue;
const res_id = builder.resolutions[dep_id];
if (res_id == invalid_package_id and package_id == invalid_package_id) {
@@ -758,6 +766,7 @@ fn hoistDependency(
package_id,
dependency,
dependency_lists,
name_hash_maps,
trees,
method,
builder,
@@ -769,6 +778,8 @@ fn hoistDependency(
return .{ .placement = .{ .id = this.id } }; // 2
}
pub const NameHashMap = std.AutoHashMapUnmanaged(PackageNameHash, DependencyID);
pub const FillItem = struct {
tree_id: Tree.Id,
dependency_id: DependencyID,

View File

@@ -0,0 +1,56 @@
import { expect, test } from "bun:test";
import { bunEnv, bunExe, tempDir } from "harness";
// Regression test for #27033: `bun install` had exponential slowdown with workspace
// configurations containing many packages with overlapping transitive dependencies.
// The root cause was O(n) linear scans in hoistDependency() for each dependency being
// hoisted, resulting in O(n^2) behavior when many deps are hoisted to the root tree.
test("workspace install with many overlapping workspace dependencies does not hang", async () => {
// Create a workspace with many workspace packages that cross-reference each other.
// This exercises the hoisting code path that was O(n^2) before the fix.
// With 30 workspace packages each depending on all others, this creates ~900
// dependency edges that all need to be hoisted.
const numPackages = 30;
const files: Record<string, string> = {};
for (let i = 0; i < numPackages; i++) {
const deps: Record<string, string> = {};
// Each package depends on all other packages
for (let j = 0; j < numPackages; j++) {
if (i !== j) {
deps[`pkg-${j}`] = "workspace:*";
}
}
files[`packages/pkg-${i}/package.json`] = JSON.stringify({
name: `pkg-${i}`,
version: "1.0.0",
dependencies: deps,
});
}
using dir = tempDir("issue-27033", {
"package.json": JSON.stringify({
name: "workspace-root",
private: true,
workspaces: ["packages/*"],
}),
...files,
});
// Before the fix, this would hang or take >60s with many overlapping deps.
// After the fix, it should complete in a few seconds.
await using proc = Bun.spawn({
cmd: [bunExe(), "install"],
env: bunEnv,
cwd: String(dir),
stdout: "pipe",
stderr: "pipe",
});
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
expect(stderr).not.toContain("panic:");
expect(stderr).not.toContain("error:");
expect(stdout).toContain("31 packages");
expect(exitCode).toBe(0);
}, 30_000);