Compare commits

...

19 Commits

Author SHA1 Message Date
Dylan Conway
e84dcdac25 Merge branch 'main' into dylan/faster-peer-resolution 2025-12-29 15:58:16 -08:00
Dylan Conway
13a22d478e update 2025-11-14 12:34:33 -08:00
Dylan Conway
78b859d76e use bun.safety.CriticalSection 2025-11-14 11:41:56 -08:00
Dylan Conway
1a7821ff73 update 2025-11-14 11:22:37 -08:00
Dylan Conway
81440bf024 update 2025-11-14 10:53:28 -08:00
Dylan Conway
608b2c866e update 2025-11-14 10:50:56 -08:00
Dylan Conway
a2314ed85d Merge branch 'main' into dylan/faster-peer-resolution 2025-11-14 10:32:45 -08:00
Dylan Conway
ed8128a4e4 update 2025-11-13 20:37:54 -08:00
Dylan Conway
66df05c81e wait for completed tasks 2025-11-13 20:01:41 -08:00
Dylan Conway
86b3528954 address pr feedback 2025-11-13 16:00:13 -08:00
autofix-ci[bot]
6c72d90b2b [autofix.ci] apply automated fixes 2025-11-13 23:47:18 +00:00
Dylan Conway
6edbcf1efb fixup merge 2025-11-13 15:45:31 -08:00
Dylan Conway
d420acc384 Merge branch 'main' into dylan/faster-peer-resolution 2025-11-13 12:33:39 -08:00
autofix-ci[bot]
8acbe57b47 [autofix.ci] apply automated fixes 2025-10-03 01:39:04 +00:00
Dylan Conway
4c75664c3b fix merge 2025-10-02 18:36:20 -07:00
Dylan Conway
98d086b405 Merge branch 'main' into dylan/faster-peer-resolution 2025-10-02 18:35:43 -07:00
autofix-ci[bot]
af0a6992e9 [autofix.ci] apply automated fixes 2025-08-13 19:23:50 +00:00
Dylan Conway
1d25370b59 Merge branch 'main' into dylan/faster-peer-resolution 2025-08-13 12:22:22 -07:00
Dylan Conway
cfd381e829 deduplicate more nodes 2025-08-03 18:16:56 -07:00
4 changed files with 845 additions and 622 deletions

View File

@@ -10,609 +10,13 @@ pub fn installIsolatedPackages(
) OOM!PackageInstall.Summary {
bun.analytics.Features.isolated_bun_install += 1;
const lockfile = manager.lockfile;
const store: Store = store: {
var timer = std.time.Timer.start() catch unreachable;
const pkgs = lockfile.packages.slice();
const pkg_dependency_slices = pkgs.items(.dependencies);
const pkg_resolutions = pkgs.items(.resolution);
const pkg_names = pkgs.items(.name);
const resolutions = lockfile.buffers.resolutions.items;
const dependencies = lockfile.buffers.dependencies.items;
const string_buf = lockfile.buffers.string_bytes.items;
var nodes: Store.Node.List = .empty;
const QueuedNode = struct {
parent_id: Store.Node.Id,
dep_id: DependencyID,
pkg_id: PackageID,
};
var node_queue: bun.LinearFifo(QueuedNode, .Dynamic) = .init(lockfile.allocator);
defer node_queue.deinit();
try node_queue.writeItem(.{
.parent_id = .invalid,
.dep_id = invalid_dependency_id,
.pkg_id = 0,
});
var dep_ids_sort_buf: std.ArrayListUnmanaged(DependencyID) = .empty;
defer dep_ids_sort_buf.deinit(lockfile.allocator);
// Used by leaves and linked dependencies. They can be deduplicated early
// because peers won't change them.
//
// In the pnpm repo without this map: 772,471 nodes
// and with this map: 314,022 nodes
var early_dedupe: std.AutoHashMap(PackageID, Store.Node.Id) = .init(lockfile.allocator);
defer early_dedupe.deinit();
var peer_dep_ids: std.array_list.Managed(DependencyID) = .init(lockfile.allocator);
defer peer_dep_ids.deinit();
var visited_parent_node_ids: std.array_list.Managed(Store.Node.Id) = .init(lockfile.allocator);
defer visited_parent_node_ids.deinit();
// First pass: create full dependency tree with resolved peers
next_node: while (node_queue.readItem()) |entry| {
check_cycle: {
// check for cycles
const nodes_slice = nodes.slice();
const node_pkg_ids = nodes_slice.items(.pkg_id);
const node_dep_ids = nodes_slice.items(.dep_id);
const node_parent_ids = nodes_slice.items(.parent_id);
const node_nodes = nodes_slice.items(.nodes);
var curr_id = entry.parent_id;
while (curr_id != .invalid) {
if (node_pkg_ids[curr_id.get()] == entry.pkg_id) {
// skip the new node, and add the previously added node to parent so it appears in
// 'node_modules/.bun/parent@version/node_modules'.
const dep_id = node_dep_ids[curr_id.get()];
if (dep_id == invalid_dependency_id and entry.dep_id == invalid_dependency_id) {
node_nodes[entry.parent_id.get()].appendAssumeCapacity(curr_id);
continue :next_node;
}
if (dep_id == invalid_dependency_id or entry.dep_id == invalid_dependency_id) {
// one is the root package, one is a dependency on the root package (it has a valid dep_id)
// create a new node for it.
break :check_cycle;
}
const curr_dep = dependencies[dep_id];
const entry_dep = dependencies[entry.dep_id];
// ensure the dependency name is the same before skipping the cycle. if they aren't
// we lose dependency name information for the symlinks
if (curr_dep.name_hash == entry_dep.name_hash and
// also ensure workspace self deps are not skipped.
// implicit workspace dep != explicit workspace dep
curr_dep.behavior.workspace == entry_dep.behavior.workspace)
{
node_nodes[entry.parent_id.get()].appendAssumeCapacity(curr_id);
continue :next_node;
}
}
curr_id = node_parent_ids[curr_id.get()];
}
}
const node_id: Store.Node.Id = .from(@intCast(nodes.len));
const pkg_deps = pkg_dependency_slices[entry.pkg_id];
// for skipping dependnecies of workspace packages and the root package. the dependencies
// of these packages should only be pulled in once, but we might need to create more than
// one entry if there's multiple dependencies on the workspace or root package.
var skip_dependencies = entry.pkg_id == 0 and entry.dep_id != invalid_dependency_id;
if (entry.dep_id != invalid_dependency_id) {
const entry_dep = dependencies[entry.dep_id];
if (pkg_deps.len == 0 or entry_dep.version.tag == .workspace) dont_dedupe: {
const dedupe_entry = try early_dedupe.getOrPut(entry.pkg_id);
if (dedupe_entry.found_existing) {
const dedupe_node_id = dedupe_entry.value_ptr.*;
const nodes_slice = nodes.slice();
const node_nodes = nodes_slice.items(.nodes);
const node_dep_ids = nodes_slice.items(.dep_id);
const dedupe_dep_id = node_dep_ids[dedupe_node_id.get()];
if (dedupe_dep_id == invalid_dependency_id) {
break :dont_dedupe;
}
const dedupe_dep = dependencies[dedupe_dep_id];
if (dedupe_dep.name_hash != entry_dep.name_hash) {
break :dont_dedupe;
}
if (dedupe_dep.version.tag == .workspace and entry_dep.version.tag == .workspace) {
if (dedupe_dep.behavior.isWorkspace() != entry_dep.behavior.isWorkspace()) {
// only attach the dependencies to one of the workspaces
skip_dependencies = true;
break :dont_dedupe;
}
}
node_nodes[entry.parent_id.get()].appendAssumeCapacity(dedupe_node_id);
continue;
}
dedupe_entry.value_ptr.* = node_id;
}
}
try nodes.append(lockfile.allocator, .{
.pkg_id = entry.pkg_id,
.dep_id = entry.dep_id,
.parent_id = entry.parent_id,
.nodes = if (skip_dependencies) .empty else try .initCapacity(lockfile.allocator, pkg_deps.len),
.dependencies = if (skip_dependencies) .empty else try .initCapacity(lockfile.allocator, pkg_deps.len),
});
const nodes_slice = nodes.slice();
const node_parent_ids = nodes_slice.items(.parent_id);
const node_dependencies = nodes_slice.items(.dependencies);
const node_peers = nodes_slice.items(.peers);
const node_nodes = nodes_slice.items(.nodes);
if (entry.parent_id.tryGet()) |parent_id| {
node_nodes[parent_id].appendAssumeCapacity(node_id);
}
if (skip_dependencies) {
continue;
}
dep_ids_sort_buf.clearRetainingCapacity();
try dep_ids_sort_buf.ensureUnusedCapacity(lockfile.allocator, pkg_deps.len);
for (pkg_deps.begin()..pkg_deps.end()) |_dep_id| {
const dep_id: DependencyID = @intCast(_dep_id);
dep_ids_sort_buf.appendAssumeCapacity(dep_id);
}
// TODO: make this sort in an order that allows peers to be resolved last
// and devDependency handling to match `hoistDependency`
std.sort.pdq(
DependencyID,
dep_ids_sort_buf.items,
Lockfile.DepSorter{
.lockfile = lockfile,
},
Lockfile.DepSorter.isLessThan,
);
peer_dep_ids.clearRetainingCapacity();
queue_deps: {
if (packages_to_install) |packages| {
if (node_id == .root) { // TODO: print an error when scanner is actually a dependency of a workspace (we should not support this)
for (dep_ids_sort_buf.items) |dep_id| {
const pkg_id = resolutions[dep_id];
if (pkg_id == invalid_package_id) {
continue;
}
for (packages) |package_to_install| {
if (package_to_install == pkg_id) {
node_dependencies[node_id.get()].appendAssumeCapacity(.{ .dep_id = dep_id, .pkg_id = pkg_id });
try node_queue.writeItem(.{
.parent_id = node_id,
.dep_id = dep_id,
.pkg_id = pkg_id,
});
break;
}
}
}
break :queue_deps;
}
}
for (dep_ids_sort_buf.items) |dep_id| {
if (Tree.isFilteredDependencyOrWorkspace(
dep_id,
entry.pkg_id,
workspace_filters,
install_root_dependencies,
manager,
lockfile,
)) {
continue;
}
const pkg_id = resolutions[dep_id];
const dep = dependencies[dep_id];
// TODO: handle duplicate dependencies. should be similar logic
// like we have for dev dependencies in `hoistDependency`
if (!dep.behavior.isPeer()) {
// simple case:
// - add it as a dependency
// - queue it
node_dependencies[node_id.get()].appendAssumeCapacity(.{ .dep_id = dep_id, .pkg_id = pkg_id });
try node_queue.writeItem(.{
.parent_id = node_id,
.dep_id = dep_id,
.pkg_id = pkg_id,
});
continue;
}
try peer_dep_ids.append(dep_id);
}
}
for (peer_dep_ids.items) |peer_dep_id| {
const resolved_pkg_id, const auto_installed = resolved_pkg_id: {
// Go through the peers parents looking for a package with the same name.
// If none is found, use current best version. Parents visited must have
// the package id for the chosen peer marked as a transitive peer. Nodes
// are deduplicated only if their package id and their transitive peer package
// ids are equal.
const peer_dep = dependencies[peer_dep_id];
// TODO: double check this
// Start with the current package. A package
// can satisfy it's own peers.
var curr_id = node_id;
visited_parent_node_ids.clearRetainingCapacity();
while (curr_id != .invalid) {
for (node_dependencies[curr_id.get()].items) |ids| {
const dep = dependencies[ids.dep_id];
if (dep.name_hash != peer_dep.name_hash) {
continue;
}
const res = pkg_resolutions[ids.pkg_id];
if (peer_dep.version.tag != .npm or res.tag != .npm) {
// TODO: print warning for this? we don't have a version
// to compare to say if this satisfies or not.
break :resolved_pkg_id .{ ids.pkg_id, false };
}
const peer_dep_version = peer_dep.version.value.npm.version;
const res_version = res.value.npm.version;
if (!peer_dep_version.satisfies(res_version, string_buf, string_buf)) {
// TODO: add warning!
}
break :resolved_pkg_id .{ ids.pkg_id, false };
}
const curr_peers = node_peers[curr_id.get()];
for (curr_peers.list.items) |ids| {
const transitive_peer_dep = dependencies[ids.dep_id];
if (transitive_peer_dep.name_hash != peer_dep.name_hash) {
continue;
}
// A transitive peer with the same name has already passed
// through this node
if (!ids.auto_installed) {
// The resolution was found here or above. Choose the same
// peer resolution. No need to mark this node or above.
// TODO: add warning if not satisfies()!
break :resolved_pkg_id .{ ids.pkg_id, false };
}
// It didn't find a matching name and auto installed
// from somewhere this peer can't reach. Choose best
// version. Only mark all parents if resolution is
// different from this transitive peer.
const best_version = resolutions[peer_dep_id];
if (best_version == invalid_package_id) {
break :resolved_pkg_id .{ invalid_package_id, true };
}
if (best_version == ids.pkg_id) {
break :resolved_pkg_id .{ ids.pkg_id, true };
}
// add the remaining parent ids
while (curr_id != .invalid) {
try visited_parent_node_ids.append(curr_id);
curr_id = node_parent_ids[curr_id.get()];
}
break :resolved_pkg_id .{ best_version, true };
}
// TODO: prevent marking workspace and symlink deps with transitive peers
// add to visited parents after searching for a peer resolution.
// if a node resolves a transitive peer, it can still be deduplicated
try visited_parent_node_ids.append(curr_id);
curr_id = node_parent_ids[curr_id.get()];
}
// choose the current best version
break :resolved_pkg_id .{ resolutions[peer_dep_id], true };
};
if (resolved_pkg_id == invalid_package_id) {
// these are optional peers that failed to find any dependency with a matching
// name. they are completely excluded
continue;
}
for (visited_parent_node_ids.items) |visited_parent_id| {
const ctx: Store.Node.TransitivePeer.OrderedArraySetCtx = .{
.string_buf = string_buf,
.pkg_names = pkg_names,
};
const peer: Store.Node.TransitivePeer = .{
.dep_id = peer_dep_id,
.pkg_id = resolved_pkg_id,
.auto_installed = auto_installed,
};
try node_peers[visited_parent_id.get()].insert(lockfile.allocator, peer, &ctx);
}
if (visited_parent_node_ids.items.len != 0) {
// visited parents length == 0 means the node satisfied it's own
// peer. don't queue.
node_dependencies[node_id.get()].appendAssumeCapacity(.{ .dep_id = peer_dep_id, .pkg_id = resolved_pkg_id });
try node_queue.writeItem(.{
.parent_id = node_id,
.dep_id = peer_dep_id,
.pkg_id = resolved_pkg_id,
});
}
}
}
if (manager.options.log_level.isVerbose()) {
const full_tree_end = timer.read();
timer.reset();
Output.prettyErrorln("Resolved peers [{f}]", .{bun.fmt.fmtDurationOneDecimal(full_tree_end)});
}
const DedupeInfo = struct {
entry_id: Store.Entry.Id,
dep_id: DependencyID,
peers: Store.OrderedArraySet(Store.Node.TransitivePeer, Store.Node.TransitivePeer.OrderedArraySetCtx),
};
var dedupe: std.AutoHashMapUnmanaged(PackageID, std.ArrayListUnmanaged(DedupeInfo)) = .empty;
defer dedupe.deinit(lockfile.allocator);
var res_fmt_buf: std.array_list.Managed(u8) = .init(lockfile.allocator);
defer res_fmt_buf.deinit();
const nodes_slice = nodes.slice();
const node_pkg_ids = nodes_slice.items(.pkg_id);
const node_dep_ids = nodes_slice.items(.dep_id);
const node_peers: []const Store.Node.Peers = nodes_slice.items(.peers);
const node_nodes = nodes_slice.items(.nodes);
var store: Store.Entry.List = .empty;
const QueuedEntry = struct {
node_id: Store.Node.Id,
entry_parent_id: Store.Entry.Id,
};
var entry_queue: bun.LinearFifo(QueuedEntry, .Dynamic) = .init(lockfile.allocator);
defer entry_queue.deinit();
try entry_queue.writeItem(.{
.node_id = .from(0),
.entry_parent_id = .invalid,
});
var public_hoisted: bun.StringArrayHashMap(void) = .init(manager.allocator);
defer public_hoisted.deinit();
var hidden_hoisted: bun.StringArrayHashMap(void) = .init(manager.allocator);
defer hidden_hoisted.deinit();
// Second pass: Deduplicate nodes when the pkg_id and peer set match an existing entry.
next_entry: while (entry_queue.readItem()) |entry| {
const pkg_id = node_pkg_ids[entry.node_id.get()];
const dedupe_entry = try dedupe.getOrPut(lockfile.allocator, pkg_id);
if (!dedupe_entry.found_existing) {
dedupe_entry.value_ptr.* = .{};
} else {
const curr_peers = node_peers[entry.node_id.get()];
const curr_dep_id = node_dep_ids[entry.node_id.get()];
for (dedupe_entry.value_ptr.items) |info| {
if (info.dep_id == invalid_dependency_id or curr_dep_id == invalid_dependency_id) {
if (info.dep_id != curr_dep_id) {
continue;
}
}
if (info.dep_id != invalid_dependency_id and curr_dep_id != invalid_dependency_id) {
const curr_dep = dependencies[curr_dep_id];
const existing_dep = dependencies[info.dep_id];
if (existing_dep.version.tag == .workspace and curr_dep.version.tag == .workspace) {
if (existing_dep.behavior.isWorkspace() != curr_dep.behavior.isWorkspace()) {
continue;
}
}
}
const eql_ctx: Store.Node.TransitivePeer.OrderedArraySetCtx = .{
.string_buf = string_buf,
.pkg_names = pkg_names,
};
if (info.peers.eql(&curr_peers, &eql_ctx)) {
// dedupe! depend on the already created entry
const entries = store.slice();
const entry_dependencies = entries.items(.dependencies);
const entry_parents = entries.items(.parents);
var parents = &entry_parents[info.entry_id.get()];
if (curr_dep_id != invalid_dependency_id and dependencies[curr_dep_id].behavior.isWorkspace()) {
try parents.append(lockfile.allocator, entry.entry_parent_id);
continue :next_entry;
}
const ctx: Store.Entry.DependenciesOrderedArraySetCtx = .{
.string_buf = string_buf,
.dependencies = dependencies,
};
try entry_dependencies[entry.entry_parent_id.get()].insert(
lockfile.allocator,
.{ .entry_id = info.entry_id, .dep_id = curr_dep_id },
&ctx,
);
try parents.append(lockfile.allocator, entry.entry_parent_id);
continue :next_entry;
}
}
// nothing matched - create a new entry
}
const new_entry_peer_hash: Store.Entry.PeerHash = peer_hash: {
const peers = node_peers[entry.node_id.get()];
if (peers.len() == 0) {
break :peer_hash .none;
}
var hasher = bun.Wyhash11.init(0);
for (peers.slice()) |peer_ids| {
const pkg_name = pkg_names[peer_ids.pkg_id];
hasher.update(pkg_name.slice(string_buf));
const pkg_res = pkg_resolutions[peer_ids.pkg_id];
res_fmt_buf.clearRetainingCapacity();
try res_fmt_buf.writer().print("{f}", .{pkg_res.fmt(string_buf, .posix)});
hasher.update(res_fmt_buf.items);
}
break :peer_hash .from(hasher.final());
};
const new_entry_dep_id = node_dep_ids[entry.node_id.get()];
const new_entry_is_root = new_entry_dep_id == invalid_dependency_id;
const new_entry_is_workspace = !new_entry_is_root and dependencies[new_entry_dep_id].version.tag == .workspace;
const new_entry_dependencies: Store.Entry.Dependencies = if (dedupe_entry.found_existing and new_entry_is_workspace)
.empty
else
try .initCapacity(lockfile.allocator, node_nodes[entry.node_id.get()].items.len);
var new_entry_parents: std.ArrayListUnmanaged(Store.Entry.Id) = try .initCapacity(lockfile.allocator, 1);
new_entry_parents.appendAssumeCapacity(entry.entry_parent_id);
const hoisted = hoisted: {
if (new_entry_dep_id == invalid_dependency_id) {
break :hoisted false;
}
const dep_name = dependencies[new_entry_dep_id].name.slice(string_buf);
const hoist_pattern = manager.options.hoist_pattern orelse {
const hoist_entry = try hidden_hoisted.getOrPut(dep_name);
break :hoisted !hoist_entry.found_existing;
};
if (hoist_pattern.isMatch(dep_name)) {
const hoist_entry = try hidden_hoisted.getOrPut(dep_name);
break :hoisted !hoist_entry.found_existing;
}
break :hoisted false;
};
const new_entry: Store.Entry = .{
.node_id = entry.node_id,
.dependencies = new_entry_dependencies,
.parents = new_entry_parents,
.peer_hash = new_entry_peer_hash,
.hoisted = hoisted,
};
const new_entry_id: Store.Entry.Id = .from(@intCast(store.len));
try store.append(lockfile.allocator, new_entry);
if (entry.entry_parent_id.tryGet()) |entry_parent_id| skip_adding_dependency: {
if (new_entry_dep_id != invalid_dependency_id and dependencies[new_entry_dep_id].behavior.isWorkspace()) {
// skip implicit workspace dependencies on the root.
break :skip_adding_dependency;
}
const entries = store.slice();
const entry_dependencies = entries.items(.dependencies);
const ctx: Store.Entry.DependenciesOrderedArraySetCtx = .{
.string_buf = string_buf,
.dependencies = dependencies,
};
try entry_dependencies[entry_parent_id].insert(
lockfile.allocator,
.{ .entry_id = new_entry_id, .dep_id = new_entry_dep_id },
&ctx,
);
if (new_entry_dep_id != invalid_dependency_id) {
if (entry.entry_parent_id == .root) {
// make sure direct dependencies are not replaced
const dep_name = dependencies[new_entry_dep_id].name.slice(string_buf);
try public_hoisted.put(dep_name, {});
} else {
// transitive dependencies (also direct dependencies of workspaces!)
const dep_name = dependencies[new_entry_dep_id].name.slice(string_buf);
if (manager.options.public_hoist_pattern) |public_hoist_pattern| {
if (public_hoist_pattern.isMatch(dep_name)) {
const hoist_entry = try public_hoisted.getOrPut(dep_name);
if (!hoist_entry.found_existing) {
try entry_dependencies[0].insert(
lockfile.allocator,
.{ .entry_id = new_entry_id, .dep_id = new_entry_dep_id },
&ctx,
);
}
}
}
}
}
}
try dedupe_entry.value_ptr.append(lockfile.allocator, .{
.entry_id = new_entry_id,
.dep_id = new_entry_dep_id,
.peers = node_peers[entry.node_id.get()],
});
for (node_nodes[entry.node_id.get()].items) |node_id| {
try entry_queue.writeItem(.{
.node_id = node_id,
.entry_parent_id = new_entry_id,
});
}
}
if (manager.options.log_level.isVerbose()) {
const dedupe_end = timer.read();
Output.prettyErrorln("Created store [{f}]", .{bun.fmt.fmtDurationOneDecimal(dedupe_end)});
}
break :store .{
.entries = store,
.nodes = nodes,
};
};
const store: Store = try .init(
manager,
install_root_dependencies,
workspace_filters,
packages_to_install,
);
defer store.deinit();
// setup node_modules/.bun
const is_new_bun_modules = is_new_bun_modules: {
@@ -676,8 +80,8 @@ pub fn installIsolatedPackages(
}
// 3
for (lockfile.workspace_paths.values()) |workspace_path| {
var workspace_node_modules: bun.AutoRelPath = .from(workspace_path.slice(lockfile.buffers.string_bytes.items));
for (manager.lockfile.workspace_paths.values()) |workspace_path| {
var workspace_node_modules: bun.AutoRelPath = .from(workspace_path.slice(manager.lockfile.buffers.string_bytes.items));
defer workspace_node_modules.deinit();
const basename = workspace_node_modules.basename();
@@ -742,8 +146,8 @@ pub fn installIsolatedPackages(
rename_path.undo(1);
// 5
for (lockfile.workspace_paths.values()) |workspace_path| {
var workspace_node_modules: bun.AutoRelPath = .from(workspace_path.slice(lockfile.buffers.string_bytes.items));
for (manager.lockfile.workspace_paths.values()) |workspace_path| {
var workspace_node_modules: bun.AutoRelPath = .from(workspace_path.slice(manager.lockfile.buffers.string_bytes.items));
defer workspace_node_modules.deinit();
const basename = workspace_node_modules.basename();
@@ -799,6 +203,7 @@ pub fn installIsolatedPackages(
const entry_dependencies = entries.items(.dependencies);
const entry_hoisted = entries.items(.hoisted);
const lockfile = manager.lockfile;
const string_buf = lockfile.buffers.string_bytes.items;
const pkgs = lockfile.packages.slice();
@@ -867,7 +272,7 @@ pub fn installIsolatedPackages(
continue;
},
.root => {
if (dep_id == invalid_dependency_id) {
if (dep_id == install.invalid_dependency_id) {
// .monotonic is okay in this block because the task isn't running on another
// thread.
entry_steps[entry_id.get()].store(.symlink_dependencies, .monotonic);
@@ -1166,6 +571,8 @@ pub fn installIsolatedPackages(
Output.err(err, "failed to install packages", .{});
Global.exit(1);
}
manager.thread_pool.waitForAll();
}
if (manager.options.log_level.showProgress()) {
@@ -1235,16 +642,10 @@ const sys = bun.sys;
const Command = bun.cli.Command;
const install = bun.install;
const DependencyID = install.DependencyID;
const PackageID = install.PackageID;
const PackageInstall = install.PackageInstall;
const Resolution = install.Resolution;
const Store = install.Store;
const invalid_dependency_id = install.invalid_dependency_id;
const invalid_package_id = install.invalid_package_id;
const Lockfile = install.Lockfile;
const Tree = Lockfile.Tree;
const PackageManager = install.PackageManager;
const ProgressStrings = PackageManager.ProgressStrings;

View File

@@ -270,7 +270,7 @@ pub const Installer = struct {
const dep = this.lockfile.buffers.dependencies.items[dep_id];
if (dep.behavior.isWorkspace()) {
if (dep.behavior.isWorkspace() or dep.version.tag == .workspace) {
break :state .{ node_id, .skipped };
}
@@ -339,6 +339,8 @@ pub const Installer = struct {
result: Result,
critical_section: bun.safety.CriticalSection = .{},
const Result = union(enum) {
none,
err: Error,
@@ -1123,6 +1125,9 @@ pub const Installer = struct {
pub fn callback(task: *ThreadPool.Task) void {
const this: *Task = @fieldParentPtr("task", task);
this.critical_section.begin();
defer this.critical_section.end();
const res = this.run() catch |err| switch (err) {
error.OutOfMemory => bun.outOfMemory(),
};

View File

@@ -5,8 +5,23 @@ const Ids = struct {
pub const Store = struct {
/// Accessed from multiple threads
entries: Entry.List,
nodes: Node.List,
entries: Entry.List = .empty,
nodes: Node.List = .empty,
// allocator used for `entries` and `nodes`
allocator: std.mem.Allocator,
pub fn appendNode(this: *Store, node: Node) OOM!Node.Id {
const node_id: Node.Id = .from(@intCast(this.nodes.len));
try this.nodes.append(this.allocator, node);
return node_id;
}
pub fn appendEntry(this: *Store, entry: Entry) OOM!Entry.Id {
const entry_id: Entry.Id = .from(@intCast(this.entries.len));
try this.entries.append(this.allocator, entry);
return entry_id;
}
const log = Output.scoped(.Store, .visible);
@@ -51,6 +66,718 @@ pub const Store = struct {
pub const Installer = @import("./Installer.zig").Installer;
const NextNode = struct {
parent_id: Node.Id,
dep_id: DependencyID,
pkg_id: PackageID,
// no deinit because each field does not need to
// be deinitialized. see `bun.memory.deinit`
pub const deinit = void;
};
// struct holding up-to-date pointers to multi array list fields
// and some code moved into functions for reuse
const CreateCtx = struct {
store: Store,
allocator: std.mem.Allocator,
// lockfile buffers
string_buf: []const u8,
dependencies: []const Dependency,
resolutions: []const PackageID,
pkg_names: []const String,
pkg_resolutions: []const Resolution,
pkg_name_hashes: []const PackageNameHash,
pkg_dependency_slices: []const DependencySlice,
node_dep_ids: []DependencyID,
node_pkg_ids: []PackageID,
node_parent_ids: []Node.Id,
node_dependencies: []std.ArrayListUnmanaged(Ids),
node_peers: []Node.Peers,
node_nodes: []std.ArrayListUnmanaged(Node.Id),
node_dedupe: std.AutoArrayHashMap(PackageID, Node.Id),
entry_dependencies: []Entry.Dependencies,
entry_parents: []std.ArrayListUnmanaged(Entry.Id),
pub fn init(allocator: std.mem.Allocator, lockfile: *const Lockfile) OOM!@This() {
const pkgs = lockfile.packages.slice();
var ctx: @This() = .{
.store = .{ .allocator = allocator },
.allocator = allocator,
.string_buf = lockfile.buffers.string_bytes.items,
.dependencies = lockfile.buffers.dependencies.items,
.resolutions = lockfile.buffers.resolutions.items,
.pkg_names = pkgs.items(.name),
.pkg_resolutions = pkgs.items(.resolution),
.pkg_name_hashes = pkgs.items(.name_hash),
.pkg_dependency_slices = pkgs.items(.dependencies),
.node_dep_ids = &.{},
.node_pkg_ids = &.{},
.node_parent_ids = &.{},
.node_dependencies = &.{},
.node_peers = &.{},
.node_nodes = &.{},
.node_dedupe = .init(allocator),
.entry_dependencies = &.{},
.entry_parents = &.{},
};
// Both of these will be similar in size to packages.len. Peer dependencies will make them slightly larger.
try ctx.store.nodes.ensureUnusedCapacity(ctx.store.allocator, ctx.pkg_names.len);
try ctx.store.entries.ensureUnusedCapacity(ctx.store.allocator, ctx.pkg_names.len);
return ctx;
}
pub fn deinit(this: *@This()) void {
this.node_dedupe.deinit();
}
const NodeParentIterator = struct {
next_id: Node.Id,
node_parent_ids: []const Node.Id,
pub fn next(this: *@This()) ?Node.Id {
if (this.next_id == .invalid) {
return null;
}
const curr_id = this.next_id;
this.next_id = this.node_parent_ids[curr_id.get()];
return curr_id;
}
};
pub fn iterateNodeParents(this: *const @This(), first_parent_id: Node.Id) NodeParentIterator {
return .{ .next_id = first_parent_id, .node_parent_ids = this.node_parent_ids };
}
const AppendNodeResult = union(enum) {
new_node: Node.Id,
deduplicated,
};
pub fn appendNode(this: *@This(), next_node: NextNode) OOM!AppendNodeResult {
if (this.node_dedupe.get(next_node.pkg_id)) |dedupe_node_id| create_new_node: {
const node_dep = this.dependencies[next_node.dep_id];
const dedupe_dep_id = this.node_dep_ids[dedupe_node_id.get()];
const dedupe_dep = this.dependencies[dedupe_dep_id];
if (dedupe_dep.name_hash != node_dep.name_hash or
dedupe_dep.behavior.workspace != node_dep.behavior.workspace)
{
// create a new node if it's an alias so we don't lose the alias name
break :create_new_node;
}
try this.addNodeToParentNodes(next_node.parent_id, dedupe_node_id);
return .deduplicated;
}
const pkg_deps = this.pkg_dependency_slices[next_node.pkg_id];
const node_id = try this.store.appendNode(.{
.pkg_id = next_node.pkg_id,
.dep_id = next_node.dep_id,
.parent_id = next_node.parent_id,
// capacity is set to the expected size after we
// find the exact dependency count
.nodes = .empty,
.dependencies = try .initCapacity(this.allocator, pkg_deps.len),
});
// update pointers
const nodes = this.store.nodes.slice();
this.node_dep_ids = nodes.items(.dep_id);
this.node_pkg_ids = nodes.items(.pkg_id);
this.node_parent_ids = nodes.items(.parent_id);
this.node_dependencies = nodes.items(.dependencies);
this.node_peers = nodes.items(.peers);
this.node_nodes = nodes.items(.nodes);
return .{ .new_node = node_id };
}
pub fn addNodeToParentNodes(this: *@This(), parent_id: Node.Id, node_id: Node.Id) OOM!void {
this.node_nodes[parent_id.get()].appendAssumeCapacity(node_id);
if (this.node_nodes[parent_id.get()].items.len == this.node_dependencies[parent_id.get()].items.len) {
// we've visited all the children nodes of the parent, see if we can add to the dedupe map.
try this.maybeAddNodeToDedupeMap(parent_id);
}
}
pub fn maybeAddNodeToDedupeMap(this: *@This(), node_id: Node.Id) OOM!void {
if (this.node_peers[node_id.get()].list.items.len != 0) {
// only nodes without peers (transitive or direct) are added to the map.
return;
}
const dep_id = this.node_dep_ids[node_id.get()];
if (dep_id == invalid_dependency_id) {
// no need to add the root package
return;
}
const dep = this.dependencies[dep_id];
const pkg_id = this.node_pkg_ids[node_id.get()];
if (dep.name_hash != this.pkg_name_hashes[pkg_id]) {
// don't add to the dedupe map if the dependency name does not match
// the package name. this means it's an alias, and won't be as common
// as a normal dependency on this package.
return;
}
const dedupe = try this.node_dedupe.getOrPut(pkg_id);
if (dedupe.found_existing) {
bun.debugAssert(dep.version.tag == .workspace);
return;
}
dedupe.value_ptr.* = node_id;
}
pub fn appendEntry(this: *@This(), entry: Entry) OOM!Entry.Id {
const entry_id = try this.store.appendEntry(entry);
// update pointers
const entries = this.store.entries.slice();
this.entry_dependencies = entries.items(.dependencies);
this.entry_parents = entries.items(.parents);
return entry_id;
}
};
pub fn init(
manager: *PackageManager,
install_root_dependencies: bool,
workspace_filters: []const WorkspaceFilter,
packages_to_install: ?[]const PackageID,
) OOM!Store {
var timer = std.time.Timer.start() catch unreachable;
var next_node_stack: bun.collections.ArrayListDefault(NextNode) = .init();
defer next_node_stack.deinit();
try next_node_stack.append(.{
.parent_id = .invalid,
.dep_id = invalid_dependency_id,
.pkg_id = 0,
});
var ctx: CreateCtx = try .init(manager.allocator, manager.lockfile);
defer ctx.deinit();
var dep_ids_sort_buf: bun.collections.ArrayListDefault(DependencyID) = .init();
defer dep_ids_sort_buf.deinit();
var peer_dep_ids_buf: bun.collections.ArrayListDefault(DependencyID) = .init();
defer peer_dep_ids_buf.deinit();
var visited_node_ids_buf: std.array_list.Managed(Node.Id) = .init(ctx.allocator);
defer visited_node_ids_buf.deinit();
// First pass: create full dependency tree with resolved peers
next_node: while (next_node_stack.pop()) |next_node| {
check_cycle: {
// check for cycles
var parent_iter = ctx.iterateNodeParents(next_node.parent_id);
while (parent_iter.next()) |parent_id| {
if (ctx.node_pkg_ids[parent_id.get()] != next_node.pkg_id) {
continue;
}
// pkg_id is the same. skip the new node, and add the previously added node
// to parent so it appears in 'node_modules/.bun/parent@version/node_modules'.
const dep_id = ctx.node_dep_ids[parent_id.get()];
if (dep_id == invalid_dependency_id and next_node.dep_id == invalid_dependency_id) {
try ctx.addNodeToParentNodes(next_node.parent_id, parent_id);
continue :next_node;
}
if (dep_id == invalid_dependency_id or next_node.dep_id == invalid_dependency_id) {
// one is the root package, one is a dependency on the root package (it has a valid dep_id)
// create a new node for it.
break :check_cycle;
}
const parent_dep = ctx.dependencies[dep_id];
const node_dep = ctx.dependencies[next_node.dep_id];
// ensure the dependency name is the same before skipping the cycle. if they aren't
// we lose dependency name information for the symlinks
if (parent_dep.name_hash == node_dep.name_hash and
// also ensure workspace self deps are not skipped.
// implicit workspace dep != explicit workspace dep
parent_dep.behavior.workspace == node_dep.behavior.workspace)
{
try ctx.addNodeToParentNodes(next_node.parent_id, parent_id);
continue :next_node;
}
}
}
const node_id = switch (try ctx.appendNode(next_node)) {
.new_node => |id| id,
.deduplicated => continue,
};
const pkg_deps = ctx.pkg_dependency_slices[next_node.pkg_id];
dep_ids_sort_buf.clearRetainingCapacity();
try dep_ids_sort_buf.ensureUnusedCapacity(pkg_deps.len);
for (pkg_deps.begin()..pkg_deps.end()) |_dep_id| {
const dep_id: DependencyID = @intCast(_dep_id);
dep_ids_sort_buf.appendAssumeCapacity(dep_id);
}
// TODO: make this sort in an order that allows peers to be resolved last
// and devDependency handling to match `hoistDependency`
std.sort.pdq(
DependencyID,
dep_ids_sort_buf.items(),
Lockfile.DepSorter{ .lockfile = manager.lockfile },
Lockfile.DepSorter.isLessThan,
);
peer_dep_ids_buf.clearRetainingCapacity();
queue_deps: {
if (packages_to_install) |packages| {
if (node_id == .root) { // TODO: print an error when scanner is actually a dependency of a workspace (we should not support this)
for (dep_ids_sort_buf.items()) |dep_id| {
const pkg_id = ctx.resolutions[dep_id];
if (pkg_id == invalid_package_id) {
continue;
}
for (packages) |package_to_install| {
if (package_to_install == pkg_id) {
ctx.node_dependencies[node_id.get()].appendAssumeCapacity(.{ .dep_id = dep_id, .pkg_id = pkg_id });
try next_node_stack.append(.{
.parent_id = node_id,
.dep_id = dep_id,
.pkg_id = pkg_id,
});
break;
}
}
}
break :queue_deps;
}
}
for (dep_ids_sort_buf.items()) |dep_id| {
if (Tree.isFilteredDependencyOrWorkspace(
dep_id,
next_node.pkg_id,
workspace_filters,
install_root_dependencies,
manager,
manager.lockfile,
)) {
continue;
}
const pkg_id = ctx.resolutions[dep_id];
const dep = ctx.dependencies[dep_id];
// TODO: handle duplicate dependencies. should be similar logic
// like we have for dev dependencies in `hoistDependency`
if (dep.behavior.isPeer()) {
try peer_dep_ids_buf.append(dep_id);
continue;
}
// simple case:
// - add it as a dependency
// - queue it
ctx.node_dependencies[node_id.get()].appendAssumeCapacity(.{ .dep_id = dep_id, .pkg_id = pkg_id });
try next_node_stack.append(.{
.parent_id = node_id,
.dep_id = dep_id,
.pkg_id = pkg_id,
});
}
}
for (peer_dep_ids_buf.items()) |peer_dep_id| {
const resolved_pkg_id, const auto_installed = resolved_pkg_id: {
// Go through the peers parents looking for a package with the same name.
// If none is found, use current best version. Parents visited must have
// the package id for the chosen peer marked as a transitive peer. Nodes
// are deduplicated only if their package id and their transitive peer package
// ids are equal.
const peer_dep = ctx.dependencies[peer_dep_id];
// TODO: double check this
// Start with the current package. A package
// can satisfy it's own peers.
var parent_iter = ctx.iterateNodeParents(node_id);
visited_node_ids_buf.clearRetainingCapacity();
while (parent_iter.next()) |parent_id| {
for (ctx.node_dependencies[parent_id.get()].items) |ids| {
const dep = ctx.dependencies[ids.dep_id];
if (dep.name_hash != peer_dep.name_hash) {
continue;
}
const res = ctx.pkg_resolutions[ids.pkg_id];
if (peer_dep.version.tag != .npm or res.tag != .npm) {
// TODO: print warning for this? we don't have a version
// to compare to say if this satisfies or not.
break :resolved_pkg_id .{ ids.pkg_id, false };
}
const peer_dep_version = peer_dep.version.value.npm.version;
const res_version = res.value.npm.version;
if (!peer_dep_version.satisfies(res_version, ctx.string_buf, ctx.string_buf)) {
// TODO: add warning!
}
break :resolved_pkg_id .{ ids.pkg_id, false };
}
const curr_peers = ctx.node_peers[parent_id.get()];
for (curr_peers.list.items) |ids| {
const transitive_peer_dep = ctx.dependencies[ids.dep_id];
if (transitive_peer_dep.name_hash != peer_dep.name_hash) {
continue;
}
// A transitive peer with the same name has already passed
// through this node
if (!ids.auto_installed) {
// The resolution was found here or above. Choose the same
// peer resolution. No need to mark this node or above.
// TODO: add warning if not satisfies()!
break :resolved_pkg_id .{ ids.pkg_id, false };
}
// It didn't find a matching name and auto installed
// from somewhere this peer can't reach. Choose best
// version. Only mark all parents if resolution is
// different from this transitive peer.
const best_version = ctx.resolutions[peer_dep_id];
if (best_version == invalid_package_id) {
break :resolved_pkg_id .{ invalid_package_id, true };
}
if (best_version == ids.pkg_id) {
break :resolved_pkg_id .{ ids.pkg_id, true };
}
// add the remaining parent ids
try visited_node_ids_buf.append(parent_id);
while (parent_iter.next()) |remaining_parent_id| {
try visited_node_ids_buf.append(remaining_parent_id);
}
break :resolved_pkg_id .{ best_version, true };
}
// TODO: prevent marking workspace and symlink deps with transitive peers
// add to visited parents after searching for a peer resolution.
// if a node resolves a transitive peer, it can still be deduplicated
try visited_node_ids_buf.append(parent_id);
}
// choose the current best version
break :resolved_pkg_id .{ ctx.resolutions[peer_dep_id], true };
};
if (resolved_pkg_id == invalid_package_id) {
// these are optional peers that failed to find any dependency with a matching
// name. they are completely excluded.
continue;
}
for (visited_node_ids_buf.items) |visited_id| {
const insert_ctx: Node.TransitivePeer.OrderedArraySetCtx = .{
.string_buf = ctx.string_buf,
.pkg_names = ctx.pkg_names,
};
const peer: Node.TransitivePeer = .{
.dep_id = peer_dep_id,
.pkg_id = resolved_pkg_id,
.auto_installed = auto_installed,
};
try ctx.node_peers[visited_id.get()].insert(ctx.allocator, peer, &insert_ctx);
}
if (visited_node_ids_buf.items.len != 0) {
// visited parents length == 0 means the node satisfied it's own
// peer. don't queue
ctx.node_dependencies[node_id.get()].appendAssumeCapacity(.{ .dep_id = peer_dep_id, .pkg_id = resolved_pkg_id });
try next_node_stack.append(.{
.parent_id = node_id,
.dep_id = peer_dep_id,
.pkg_id = resolved_pkg_id,
});
}
}
const node_dependencies_count = ctx.node_dependencies[node_id.get()].items.len;
try ctx.node_nodes[node_id.get()].ensureTotalCapacityPrecise(ctx.allocator, node_dependencies_count);
if (node_dependencies_count == 0) {
// it's a leaf. we can try adding it to the dedupe map now
try ctx.maybeAddNodeToDedupeMap(node_id);
}
if (next_node.parent_id != .invalid) {
try ctx.addNodeToParentNodes(next_node.parent_id, node_id);
}
}
if (manager.options.log_level.isVerbose()) {
const full_tree_end = timer.read();
timer.reset();
Output.prettyErrorln("Resolved peers: {d} nodes [{f}]", .{
ctx.store.nodes.len,
bun.fmt.fmtDurationOneDecimal(full_tree_end),
});
}
const EntryDedupe = struct {
entry_id: Entry.Id,
dep_id: DependencyID,
peers: OrderedArraySet(Node.TransitivePeer, Node.TransitivePeer.OrderedArraySetCtx),
};
var entry_dedupe: std.AutoArrayHashMap(PackageID, bun.collections.ArrayListDefault(EntryDedupe)) = .init(ctx.allocator);
defer entry_dedupe.deinit();
var res_fmt_buf: bun.collections.ArrayListDefault(u8) = .init();
defer res_fmt_buf.deinit();
const NextEntry = struct {
node_id: Node.Id,
parent_id: Entry.Id,
};
var next_entry_queue: bun.LinearFifo(NextEntry, .Dynamic) = .init(ctx.allocator);
defer next_entry_queue.deinit();
try next_entry_queue.writeItem(.{
.node_id = .from(0),
.parent_id = .invalid,
});
var public_hoisted: bun.StringArrayHashMap(void) = .init(ctx.allocator);
defer public_hoisted.deinit();
var hidden_hoisted: bun.StringArrayHashMap(void) = .init(ctx.allocator);
defer hidden_hoisted.deinit();
// Second pass: Deduplicate nodes when the pkg_id and peer set match an existing entry.
next_entry: while (next_entry_queue.readItem()) |next_entry| {
const pkg_id = ctx.node_pkg_ids[next_entry.node_id.get()];
const dep_id = ctx.node_dep_ids[next_entry.node_id.get()];
const dedupe = try entry_dedupe.getOrPut(pkg_id);
if (!dedupe.found_existing) {
dedupe.value_ptr.* = .init();
} else {
const peers = ctx.node_peers[next_entry.node_id.get()];
for (dedupe.value_ptr.items()) |info| {
if (info.dep_id == invalid_dependency_id or dep_id == invalid_dependency_id) {
if (info.dep_id != dep_id) {
continue;
}
}
if (info.dep_id != invalid_dependency_id and dep_id != invalid_dependency_id) {
const curr_dep = ctx.dependencies[dep_id];
const existing_dep = ctx.dependencies[info.dep_id];
if (existing_dep.version.tag == .workspace and curr_dep.version.tag == .workspace) {
if (existing_dep.behavior.isWorkspace() != curr_dep.behavior.isWorkspace()) {
continue;
}
}
}
const eql_ctx: Node.TransitivePeer.OrderedArraySetCtx = .{
.string_buf = ctx.string_buf,
.pkg_names = ctx.pkg_names,
};
if (info.peers.eql(&peers, &eql_ctx)) {
// dedupe! depend on the already created entry
var parents = &ctx.entry_parents[info.entry_id.get()];
if (dep_id != invalid_dependency_id and ctx.dependencies[dep_id].behavior.isWorkspace()) {
try parents.append(ctx.allocator, next_entry.parent_id);
continue :next_entry;
}
const insert_ctx: Entry.DependenciesOrderedArraySetCtx = .{
.string_buf = ctx.string_buf,
.dependencies = ctx.dependencies,
};
try ctx.entry_dependencies[next_entry.parent_id.get()].insert(
ctx.allocator,
.{ .entry_id = info.entry_id, .dep_id = dep_id },
&insert_ctx,
);
try parents.append(ctx.allocator, next_entry.parent_id);
continue :next_entry;
}
}
// nothing matched - create a new entry
}
const entry_id = try ctx.appendEntry(.{
.node_id = next_entry.node_id,
.dependencies = dependencies: {
if (dedupe.found_existing and dep_id != invalid_dependency_id and ctx.dependencies[dep_id].version.tag == .workspace) {
break :dependencies .empty;
}
break :dependencies try .initCapacity(ctx.allocator, ctx.node_nodes[next_entry.node_id.get()].items.len);
},
.parents = parents: {
var parents: std.ArrayListUnmanaged(Entry.Id) = try .initCapacity(ctx.allocator, 1);
parents.appendAssumeCapacity(next_entry.parent_id);
break :parents parents;
},
.peer_hash = peer_hash: {
const peers = ctx.node_peers[next_entry.node_id.get()];
if (peers.len() == 0) {
break :peer_hash .none;
}
var hasher = bun.Wyhash11.init(0);
for (peers.slice()) |peer_ids| {
const pkg_name = ctx.pkg_names[peer_ids.pkg_id];
hasher.update(pkg_name.slice(ctx.string_buf));
const pkg_res = ctx.pkg_resolutions[peer_ids.pkg_id];
res_fmt_buf.clearRetainingCapacity();
try res_fmt_buf.writer().print("{f}", .{pkg_res.fmt(ctx.string_buf, .posix)});
hasher.update(res_fmt_buf.items());
}
break :peer_hash .from(hasher.final());
},
.hoisted = hoisted: {
if (dep_id == invalid_dependency_id) {
break :hoisted false;
}
const dep_name = ctx.dependencies[dep_id].name.slice(ctx.string_buf);
const hoist_pattern = manager.options.hoist_pattern orelse {
const hoist_entry = try hidden_hoisted.getOrPut(dep_name);
break :hoisted !hoist_entry.found_existing;
};
if (hoist_pattern.isMatch(dep_name)) {
const hoist_entry = try hidden_hoisted.getOrPut(dep_name);
break :hoisted !hoist_entry.found_existing;
}
break :hoisted false;
},
});
if (next_entry.parent_id != .invalid) skip_adding_dependency: {
if (dep_id != invalid_dependency_id and ctx.dependencies[dep_id].behavior.isWorkspace()) {
// skip implicit workspace dependencies on the root.
break :skip_adding_dependency;
}
const insert_ctx: Entry.DependenciesOrderedArraySetCtx = .{
.string_buf = ctx.string_buf,
.dependencies = ctx.dependencies,
};
try ctx.entry_dependencies[next_entry.parent_id.get()].insert(
ctx.allocator,
.{ .entry_id = entry_id, .dep_id = dep_id },
&insert_ctx,
);
if (dep_id == invalid_dependency_id) {
break :skip_adding_dependency;
}
const dep_name = ctx.dependencies[dep_id].name.slice(ctx.string_buf);
if (next_entry.parent_id == .root) {
// make sure direct dependencies are not replaced
try public_hoisted.put(dep_name, {});
} else {
// transitive dependencies (including direct dependencies of workspaces!)
const public_hoist_pattern = manager.options.public_hoist_pattern orelse {
break :skip_adding_dependency;
};
if (!public_hoist_pattern.isMatch(dep_name)) {
break :skip_adding_dependency;
}
const hoist_entry = try public_hoisted.getOrPut(dep_name);
if (hoist_entry.found_existing) {
break :skip_adding_dependency;
}
try ctx.entry_dependencies[0].insert(
ctx.allocator,
.{ .entry_id = entry_id, .dep_id = dep_id },
&insert_ctx,
);
}
}
try dedupe.value_ptr.append(.{
.entry_id = entry_id,
.dep_id = dep_id,
.peers = ctx.node_peers[next_entry.node_id.get()],
});
for (ctx.node_nodes[next_entry.node_id.get()].items) |node_id| {
try next_entry_queue.writeItem(.{
.node_id = node_id,
.parent_id = entry_id,
});
}
}
if (manager.options.log_level.isVerbose()) {
const dedupe_end = timer.read();
Output.prettyErrorln("Created store: {d} entries [{f}]", .{
ctx.store.entries.len,
bun.fmt.fmtDurationOneDecimal(dedupe_end),
});
}
return ctx.store;
}
pub fn deinit(this: *const Store) void {
var nodes = this.nodes;
nodes.deinit(this.allocator);
var entries = this.entries;
entries.deinit(this.allocator);
}
/// Called from multiple threads. `parent_dedupe` should not be shared between threads.
pub fn isCycle(this: *const Store, id: Entry.Id, maybe_parent_id: Entry.Id, parent_dedupe: *std.AutoArrayHashMap(Entry.Id, void)) bool {
var i: usize = 0;
@@ -555,7 +1282,15 @@ const install = bun.install;
const Dependency = install.Dependency;
const DependencyID = install.DependencyID;
const PackageID = install.PackageID;
const PackageNameHash = install.PackageNameHash;
const Resolution = install.Resolution;
const invalid_dependency_id = install.invalid_dependency_id;
const invalid_package_id = install.invalid_package_id;
const Lockfile = install.Lockfile;
const DependencySlice = Lockfile.DependencySlice;
const Package = Lockfile.Package;
const Tree = Lockfile.Tree;
const PackageManager = install.PackageManager;
const WorkspaceFilter = PackageManager.WorkspaceFilter;

View File

@@ -1,7 +1,7 @@
import { file, spawn, write } from "bun";
import { afterAll, beforeAll, describe, expect, test } from "bun:test";
import { existsSync, lstatSync, readlinkSync } from "fs";
import { mkdir, readlink, rm, symlink } from "fs/promises";
import { exists, mkdir, readlink, rm, symlink } from "fs/promises";
import { VerdaccioRegistry, bunEnv, bunExe, readdirSorted, runBunInstall } from "harness";
import { join } from "path";
@@ -344,9 +344,7 @@ test("can install folder dependencies on root package", async () => {
});
describe("isolated workspaces", () => {
test("basic", async () => {
const { packageJson, packageDir } = await registry.createTestDir({ bunfigOpts: { linker: "isolated" } });
async function createWorkspace(packageJson, packageDir) {
await Promise.all([
write(
packageJson,
@@ -383,6 +381,11 @@ describe("isolated workspaces", () => {
}),
),
]);
}
test("basic", async () => {
const { packageJson, packageDir } = await registry.createTestDir({ bunfigOpts: { linker: "isolated" } });
await createWorkspace(packageJson, packageDir);
await runBunInstall(bunEnv, packageDir);
@@ -417,6 +420,86 @@ describe("isolated workspaces", () => {
});
});
test("--filter only includes matched workspaces and transitively workspaces", async () => {
const { packageJson, packageDir } = await registry.createTestDir({ bunfigOpts: { linker: "isolated" } });
await createWorkspace(packageJson, packageDir);
let { exited } = spawn({
cmd: [bunExe(), "install", "--filter", "test-pkg-workspaces"],
cwd: packageDir,
stdout: "ignore",
stderr: "ignore",
env: bunEnv,
});
expect(await exited).toBe(0);
// only the root workspace should have installed node_modules
expect(
await Promise.all([
readdirSorted(join(packageDir, "node_modules")),
readdirSorted(join(packageDir, "node_modules", ".bun")),
exists(join(packageDir, "pkg-1", "node_modules")),
exists(join(packageDir, "pkg-2", "node_modules")),
]),
).toEqual([[".bun", "no-deps"], ["no-deps@1.0.0", "node_modules"], false, false]);
await rm(join(packageDir, "node_modules"), { recursive: true });
// Should install pkg-1, and also pkg-2 because pkg-1
// depends on pkg-2.
({ exited } = spawn({
cmd: [bunExe(), "install", "--filter", "pkg-1"],
cwd: packageDir,
env: bunEnv,
stdout: "ignore",
stderr: "ignore",
}));
expect(await exited).toBe(0);
expect(
await Promise.all([
readdirSorted(join(packageDir, "node_modules")),
readdirSorted(join(packageDir, "node_modules", ".bun")),
readdirSorted(join(packageDir, "pkg-1", "node_modules")),
readdirSorted(join(packageDir, "pkg-2", "node_modules")),
]),
).toEqual([
[".bun"],
["@types+is-number@1.0.0", "a-dep-b@1.0.0", "a-dep@1.0.1", "b-dep-a@1.0.0", "node_modules"],
["@types", "a-dep", "pkg-2"],
["b-dep-a"],
]);
await Promise.all([
rm(join(packageDir, "node_modules"), { recursive: true }),
rm(join(packageDir, "pkg-1", "node_modules"), { recursive: true }),
rm(join(packageDir, "pkg-2", "node_modules"), { recursive: true }),
]);
// only pkg-2 should be installed
({ exited } = spawn({
cmd: [bunExe(), "install", "--filter", "pkg-2"],
cwd: packageDir,
env: bunEnv,
stdout: "ignore",
stderr: "ignore",
}));
expect(await exited).toBe(0);
expect(
await Promise.all([
readdirSorted(join(packageDir, "node_modules")),
readdirSorted(join(packageDir, "node_modules", ".bun")),
exists(join(packageDir, "pkg-1", "node_modules")),
readdirSorted(join(packageDir, "pkg-2", "node_modules")),
]),
).toEqual([[".bun"], ["a-dep-b@1.0.0", "b-dep-a@1.0.0", "node_modules"], false, ["b-dep-a"]]);
});
test("workspace self dependencies create symlinks", async () => {
const { packageDir } = await registry.createTestDir({
bunfigOpts: { linker: "isolated" },
@@ -595,7 +678,6 @@ describe("optional peers", () => {
}
await checkInstall();
await checkInstall();
});
});