mirror of
https://github.com/oven-sh/bun
synced 2026-03-01 13:01:06 +01:00
Compare commits
31 Commits
claude/fix
...
dylan/fast
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
55e03cbb21 | ||
|
|
dc31aa1c8a | ||
|
|
68d8bb5ce5 | ||
|
|
3c5b3d3547 | ||
|
|
977506c329 | ||
|
|
f06119ad0c | ||
|
|
7ef4b1be38 | ||
|
|
f931515906 | ||
|
|
fabc0523c8 | ||
|
|
ea212ca159 | ||
|
|
1034fc922c | ||
|
|
6800f0b8ec | ||
|
|
e84dcdac25 | ||
|
|
13a22d478e | ||
|
|
78b859d76e | ||
|
|
1a7821ff73 | ||
|
|
81440bf024 | ||
|
|
608b2c866e | ||
|
|
a2314ed85d | ||
|
|
ed8128a4e4 | ||
|
|
66df05c81e | ||
|
|
86b3528954 | ||
|
|
6c72d90b2b | ||
|
|
6edbcf1efb | ||
|
|
d420acc384 | ||
|
|
8acbe57b47 | ||
|
|
4c75664c3b | ||
|
|
98d086b405 | ||
|
|
af0a6992e9 | ||
|
|
1d25370b59 | ||
|
|
cfd381e829 |
@@ -4,7 +4,7 @@ register_repository(
|
||||
REPOSITORY
|
||||
cloudflare/lol-html
|
||||
COMMIT
|
||||
e9e16dca48dd4a8ffbc77642bc4be60407585f11
|
||||
e3aa54798602dd27250fafde1b5a66f080046252
|
||||
)
|
||||
|
||||
set(LOLHTML_CWD ${VENDOR_PATH}/lolhtml/c-api)
|
||||
|
||||
@@ -515,6 +515,8 @@ pub const TransformTask = struct {
|
||||
.path = source.path,
|
||||
.virtual_source = &source,
|
||||
.replace_exports = this.replace_exports,
|
||||
.experimental_decorators = if (this.tsconfig) |ts| ts.experimental_decorators else false,
|
||||
.emit_decorator_metadata = if (this.tsconfig) |ts| ts.emit_decorator_metadata else false,
|
||||
};
|
||||
|
||||
const parse_result = this.transpiler.parse(parse_options, null) orelse {
|
||||
@@ -584,9 +586,8 @@ pub const TransformTask = struct {
|
||||
this.log.deinit();
|
||||
this.input_code.deinitAndUnprotect();
|
||||
this.output_code.deref();
|
||||
if (this.tsconfig) |tsconfig| {
|
||||
tsconfig.deinit();
|
||||
}
|
||||
// tsconfig is owned by JSTranspiler, not by TransformTask.
|
||||
// Do not free it here — JSTranspiler.deinit handles it.
|
||||
this.js_instance.deref();
|
||||
bun.destroy(this);
|
||||
}
|
||||
@@ -660,6 +661,9 @@ pub fn constructor(globalThis: *jsc.JSGlobalObject, callframe: *jsc.CallFrame) b
|
||||
});
|
||||
errdefer {
|
||||
this.config.log.deinit();
|
||||
if (this.config.tsconfig) |tsconfig| {
|
||||
tsconfig.deinit();
|
||||
}
|
||||
this.arena.deinit();
|
||||
this.ref_count.clearWithoutDestructor();
|
||||
bun.destroy(this);
|
||||
@@ -744,6 +748,9 @@ pub fn deinit(this: *JSTranspiler) void {
|
||||
this.buffer_writer.?.buffer.deinit();
|
||||
}
|
||||
|
||||
if (this.config.tsconfig) |tsconfig| {
|
||||
tsconfig.deinit();
|
||||
}
|
||||
this.arena.deinit();
|
||||
bun.destroy(this);
|
||||
}
|
||||
@@ -806,7 +813,8 @@ fn getParseResult(this: *JSTranspiler, allocator: std.mem.Allocator, code: []con
|
||||
.virtual_source = source,
|
||||
.replace_exports = this.config.runtime.replace_exports,
|
||||
.macro_js_ctx = macro_js_ctx,
|
||||
// .allocator = this.
|
||||
.experimental_decorators = if (this.config.tsconfig) |ts| ts.experimental_decorators else false,
|
||||
.emit_decorator_metadata = if (this.config.tsconfig) |ts| ts.emit_decorator_metadata else false,
|
||||
};
|
||||
|
||||
return this.transpiler.parse(parse_options, null);
|
||||
|
||||
@@ -287,7 +287,7 @@ pub const UnicodeRange = struct {
|
||||
if (digit < 10) return digit;
|
||||
// Force the 6th bit to be set to ensure ascii is lower case.
|
||||
// digit = (@as(u32, b) | 0b10_0000).wrapping_sub('a' as u32).saturating_add(10);
|
||||
digit = (@as(u32, b) | 0b10_0000) -% (@as(u32, 'a') +% 10);
|
||||
digit = ((@as(u32, b) | 0b10_0000) -% @as(u32, 'a')) +| 10;
|
||||
return if (digit < 16) digit else null;
|
||||
}
|
||||
};
|
||||
@@ -696,7 +696,7 @@ pub const FontFaceDeclarationParser = struct {
|
||||
return .{ .result = .{ .font_stretch = c } };
|
||||
}
|
||||
}
|
||||
} else if (bun.strings.eqlCaseInsensitiveASCIIICheckLength(name, "unicode-renage")) {
|
||||
} else if (bun.strings.eqlCaseInsensitiveASCIIICheckLength(name, "unicode-range")) {
|
||||
if (input.parseList(UnicodeRange, UnicodeRange.parse).asValue()) |c| {
|
||||
if (input.expectExhausted().isOk()) {
|
||||
return .{ .result = .{ .unicode_range = c } };
|
||||
|
||||
@@ -31,9 +31,6 @@ pub const enable_keepalive = true;
|
||||
|
||||
pub const atomic_file_watcher = env.isLinux;
|
||||
|
||||
// This change didn't seem to make a meaningful difference in microbenchmarks
|
||||
pub const latin1_is_now_ascii = false;
|
||||
|
||||
pub const http_buffer_pooling = true;
|
||||
|
||||
pub const disable_lolhtml = false;
|
||||
|
||||
@@ -584,7 +584,13 @@ pub fn installWithManager(
|
||||
try waitForEverythingExceptPeers(manager);
|
||||
}
|
||||
|
||||
if (manager.peer_dependencies.readableLength() > 0) {
|
||||
// Resolving a peer dep can create a NEW package whose own peer deps
|
||||
// get re-queued to `peer_dependencies` during `drainDependencyList`.
|
||||
// When all manifests are cached (synchronous resolution), no I/O tasks
|
||||
// are spawned, so `pendingTaskCount() == 0`. We must drain the peer
|
||||
// queue iteratively here — entering the event loop (`waitForPeers`)
|
||||
// with zero pending I/O would block forever.
|
||||
while (manager.peer_dependencies.readableLength() > 0) {
|
||||
try manager.processPeerDependencyList();
|
||||
manager.drainDependencyList();
|
||||
}
|
||||
|
||||
@@ -10,609 +10,13 @@ pub fn installIsolatedPackages(
|
||||
) OOM!PackageInstall.Summary {
|
||||
bun.analytics.Features.isolated_bun_install += 1;
|
||||
|
||||
const lockfile = manager.lockfile;
|
||||
|
||||
const store: Store = store: {
|
||||
var timer = std.time.Timer.start() catch unreachable;
|
||||
const pkgs = lockfile.packages.slice();
|
||||
const pkg_dependency_slices = pkgs.items(.dependencies);
|
||||
const pkg_resolutions = pkgs.items(.resolution);
|
||||
const pkg_names = pkgs.items(.name);
|
||||
|
||||
const resolutions = lockfile.buffers.resolutions.items;
|
||||
const dependencies = lockfile.buffers.dependencies.items;
|
||||
const string_buf = lockfile.buffers.string_bytes.items;
|
||||
|
||||
var nodes: Store.Node.List = .empty;
|
||||
|
||||
const QueuedNode = struct {
|
||||
parent_id: Store.Node.Id,
|
||||
dep_id: DependencyID,
|
||||
pkg_id: PackageID,
|
||||
};
|
||||
|
||||
var node_queue: bun.LinearFifo(QueuedNode, .Dynamic) = .init(lockfile.allocator);
|
||||
defer node_queue.deinit();
|
||||
|
||||
try node_queue.writeItem(.{
|
||||
.parent_id = .invalid,
|
||||
.dep_id = invalid_dependency_id,
|
||||
.pkg_id = 0,
|
||||
});
|
||||
|
||||
var dep_ids_sort_buf: std.ArrayListUnmanaged(DependencyID) = .empty;
|
||||
defer dep_ids_sort_buf.deinit(lockfile.allocator);
|
||||
|
||||
// Used by leaves and linked dependencies. They can be deduplicated early
|
||||
// because peers won't change them.
|
||||
//
|
||||
// In the pnpm repo without this map: 772,471 nodes
|
||||
// and with this map: 314,022 nodes
|
||||
var early_dedupe: std.AutoHashMap(PackageID, Store.Node.Id) = .init(lockfile.allocator);
|
||||
defer early_dedupe.deinit();
|
||||
|
||||
var peer_dep_ids: std.array_list.Managed(DependencyID) = .init(lockfile.allocator);
|
||||
defer peer_dep_ids.deinit();
|
||||
|
||||
var visited_parent_node_ids: std.array_list.Managed(Store.Node.Id) = .init(lockfile.allocator);
|
||||
defer visited_parent_node_ids.deinit();
|
||||
|
||||
// First pass: create full dependency tree with resolved peers
|
||||
next_node: while (node_queue.readItem()) |entry| {
|
||||
check_cycle: {
|
||||
// check for cycles
|
||||
const nodes_slice = nodes.slice();
|
||||
const node_pkg_ids = nodes_slice.items(.pkg_id);
|
||||
const node_dep_ids = nodes_slice.items(.dep_id);
|
||||
const node_parent_ids = nodes_slice.items(.parent_id);
|
||||
const node_nodes = nodes_slice.items(.nodes);
|
||||
|
||||
var curr_id = entry.parent_id;
|
||||
while (curr_id != .invalid) {
|
||||
if (node_pkg_ids[curr_id.get()] == entry.pkg_id) {
|
||||
// skip the new node, and add the previously added node to parent so it appears in
|
||||
// 'node_modules/.bun/parent@version/node_modules'.
|
||||
|
||||
const dep_id = node_dep_ids[curr_id.get()];
|
||||
if (dep_id == invalid_dependency_id and entry.dep_id == invalid_dependency_id) {
|
||||
node_nodes[entry.parent_id.get()].appendAssumeCapacity(curr_id);
|
||||
continue :next_node;
|
||||
}
|
||||
|
||||
if (dep_id == invalid_dependency_id or entry.dep_id == invalid_dependency_id) {
|
||||
// one is the root package, one is a dependency on the root package (it has a valid dep_id)
|
||||
// create a new node for it.
|
||||
break :check_cycle;
|
||||
}
|
||||
|
||||
const curr_dep = dependencies[dep_id];
|
||||
const entry_dep = dependencies[entry.dep_id];
|
||||
|
||||
// ensure the dependency name is the same before skipping the cycle. if they aren't
|
||||
// we lose dependency name information for the symlinks
|
||||
if (curr_dep.name_hash == entry_dep.name_hash and
|
||||
// also ensure workspace self deps are not skipped.
|
||||
// implicit workspace dep != explicit workspace dep
|
||||
curr_dep.behavior.workspace == entry_dep.behavior.workspace)
|
||||
{
|
||||
node_nodes[entry.parent_id.get()].appendAssumeCapacity(curr_id);
|
||||
continue :next_node;
|
||||
}
|
||||
}
|
||||
curr_id = node_parent_ids[curr_id.get()];
|
||||
}
|
||||
}
|
||||
|
||||
const node_id: Store.Node.Id = .from(@intCast(nodes.len));
|
||||
const pkg_deps = pkg_dependency_slices[entry.pkg_id];
|
||||
|
||||
// for skipping dependnecies of workspace packages and the root package. the dependencies
|
||||
// of these packages should only be pulled in once, but we might need to create more than
|
||||
// one entry if there's multiple dependencies on the workspace or root package.
|
||||
var skip_dependencies = entry.pkg_id == 0 and entry.dep_id != invalid_dependency_id;
|
||||
|
||||
if (entry.dep_id != invalid_dependency_id) {
|
||||
const entry_dep = dependencies[entry.dep_id];
|
||||
if (pkg_deps.len == 0 or entry_dep.version.tag == .workspace) dont_dedupe: {
|
||||
const dedupe_entry = try early_dedupe.getOrPut(entry.pkg_id);
|
||||
if (dedupe_entry.found_existing) {
|
||||
const dedupe_node_id = dedupe_entry.value_ptr.*;
|
||||
|
||||
const nodes_slice = nodes.slice();
|
||||
const node_nodes = nodes_slice.items(.nodes);
|
||||
const node_dep_ids = nodes_slice.items(.dep_id);
|
||||
|
||||
const dedupe_dep_id = node_dep_ids[dedupe_node_id.get()];
|
||||
if (dedupe_dep_id == invalid_dependency_id) {
|
||||
break :dont_dedupe;
|
||||
}
|
||||
const dedupe_dep = dependencies[dedupe_dep_id];
|
||||
|
||||
if (dedupe_dep.name_hash != entry_dep.name_hash) {
|
||||
break :dont_dedupe;
|
||||
}
|
||||
|
||||
if (dedupe_dep.version.tag == .workspace and entry_dep.version.tag == .workspace) {
|
||||
if (dedupe_dep.behavior.isWorkspace() != entry_dep.behavior.isWorkspace()) {
|
||||
// only attach the dependencies to one of the workspaces
|
||||
skip_dependencies = true;
|
||||
break :dont_dedupe;
|
||||
}
|
||||
}
|
||||
|
||||
node_nodes[entry.parent_id.get()].appendAssumeCapacity(dedupe_node_id);
|
||||
continue;
|
||||
}
|
||||
|
||||
dedupe_entry.value_ptr.* = node_id;
|
||||
}
|
||||
}
|
||||
|
||||
try nodes.append(lockfile.allocator, .{
|
||||
.pkg_id = entry.pkg_id,
|
||||
.dep_id = entry.dep_id,
|
||||
.parent_id = entry.parent_id,
|
||||
.nodes = if (skip_dependencies) .empty else try .initCapacity(lockfile.allocator, pkg_deps.len),
|
||||
.dependencies = if (skip_dependencies) .empty else try .initCapacity(lockfile.allocator, pkg_deps.len),
|
||||
});
|
||||
|
||||
const nodes_slice = nodes.slice();
|
||||
const node_parent_ids = nodes_slice.items(.parent_id);
|
||||
const node_dependencies = nodes_slice.items(.dependencies);
|
||||
const node_peers = nodes_slice.items(.peers);
|
||||
const node_nodes = nodes_slice.items(.nodes);
|
||||
|
||||
if (entry.parent_id.tryGet()) |parent_id| {
|
||||
node_nodes[parent_id].appendAssumeCapacity(node_id);
|
||||
}
|
||||
|
||||
if (skip_dependencies) {
|
||||
continue;
|
||||
}
|
||||
|
||||
dep_ids_sort_buf.clearRetainingCapacity();
|
||||
try dep_ids_sort_buf.ensureUnusedCapacity(lockfile.allocator, pkg_deps.len);
|
||||
for (pkg_deps.begin()..pkg_deps.end()) |_dep_id| {
|
||||
const dep_id: DependencyID = @intCast(_dep_id);
|
||||
dep_ids_sort_buf.appendAssumeCapacity(dep_id);
|
||||
}
|
||||
|
||||
// TODO: make this sort in an order that allows peers to be resolved last
|
||||
// and devDependency handling to match `hoistDependency`
|
||||
std.sort.pdq(
|
||||
DependencyID,
|
||||
dep_ids_sort_buf.items,
|
||||
Lockfile.DepSorter{
|
||||
.lockfile = lockfile,
|
||||
},
|
||||
Lockfile.DepSorter.isLessThan,
|
||||
);
|
||||
|
||||
peer_dep_ids.clearRetainingCapacity();
|
||||
|
||||
queue_deps: {
|
||||
if (packages_to_install) |packages| {
|
||||
if (node_id == .root) { // TODO: print an error when scanner is actually a dependency of a workspace (we should not support this)
|
||||
for (dep_ids_sort_buf.items) |dep_id| {
|
||||
const pkg_id = resolutions[dep_id];
|
||||
if (pkg_id == invalid_package_id) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (packages) |package_to_install| {
|
||||
if (package_to_install == pkg_id) {
|
||||
node_dependencies[node_id.get()].appendAssumeCapacity(.{ .dep_id = dep_id, .pkg_id = pkg_id });
|
||||
try node_queue.writeItem(.{
|
||||
.parent_id = node_id,
|
||||
.dep_id = dep_id,
|
||||
.pkg_id = pkg_id,
|
||||
});
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
break :queue_deps;
|
||||
}
|
||||
}
|
||||
|
||||
for (dep_ids_sort_buf.items) |dep_id| {
|
||||
if (Tree.isFilteredDependencyOrWorkspace(
|
||||
dep_id,
|
||||
entry.pkg_id,
|
||||
workspace_filters,
|
||||
install_root_dependencies,
|
||||
manager,
|
||||
lockfile,
|
||||
)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const pkg_id = resolutions[dep_id];
|
||||
const dep = dependencies[dep_id];
|
||||
|
||||
// TODO: handle duplicate dependencies. should be similar logic
|
||||
// like we have for dev dependencies in `hoistDependency`
|
||||
|
||||
if (!dep.behavior.isPeer()) {
|
||||
// simple case:
|
||||
// - add it as a dependency
|
||||
// - queue it
|
||||
node_dependencies[node_id.get()].appendAssumeCapacity(.{ .dep_id = dep_id, .pkg_id = pkg_id });
|
||||
try node_queue.writeItem(.{
|
||||
.parent_id = node_id,
|
||||
.dep_id = dep_id,
|
||||
.pkg_id = pkg_id,
|
||||
});
|
||||
continue;
|
||||
}
|
||||
|
||||
try peer_dep_ids.append(dep_id);
|
||||
}
|
||||
}
|
||||
|
||||
for (peer_dep_ids.items) |peer_dep_id| {
|
||||
const resolved_pkg_id, const auto_installed = resolved_pkg_id: {
|
||||
|
||||
// Go through the peers parents looking for a package with the same name.
|
||||
// If none is found, use current best version. Parents visited must have
|
||||
// the package id for the chosen peer marked as a transitive peer. Nodes
|
||||
// are deduplicated only if their package id and their transitive peer package
|
||||
// ids are equal.
|
||||
const peer_dep = dependencies[peer_dep_id];
|
||||
|
||||
// TODO: double check this
|
||||
// Start with the current package. A package
|
||||
// can satisfy it's own peers.
|
||||
var curr_id = node_id;
|
||||
|
||||
visited_parent_node_ids.clearRetainingCapacity();
|
||||
while (curr_id != .invalid) {
|
||||
for (node_dependencies[curr_id.get()].items) |ids| {
|
||||
const dep = dependencies[ids.dep_id];
|
||||
|
||||
if (dep.name_hash != peer_dep.name_hash) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const res = pkg_resolutions[ids.pkg_id];
|
||||
|
||||
if (peer_dep.version.tag != .npm or res.tag != .npm) {
|
||||
// TODO: print warning for this? we don't have a version
|
||||
// to compare to say if this satisfies or not.
|
||||
break :resolved_pkg_id .{ ids.pkg_id, false };
|
||||
}
|
||||
|
||||
const peer_dep_version = peer_dep.version.value.npm.version;
|
||||
const res_version = res.value.npm.version;
|
||||
|
||||
if (!peer_dep_version.satisfies(res_version, string_buf, string_buf)) {
|
||||
// TODO: add warning!
|
||||
}
|
||||
|
||||
break :resolved_pkg_id .{ ids.pkg_id, false };
|
||||
}
|
||||
|
||||
const curr_peers = node_peers[curr_id.get()];
|
||||
for (curr_peers.list.items) |ids| {
|
||||
const transitive_peer_dep = dependencies[ids.dep_id];
|
||||
|
||||
if (transitive_peer_dep.name_hash != peer_dep.name_hash) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// A transitive peer with the same name has already passed
|
||||
// through this node
|
||||
|
||||
if (!ids.auto_installed) {
|
||||
// The resolution was found here or above. Choose the same
|
||||
// peer resolution. No need to mark this node or above.
|
||||
|
||||
// TODO: add warning if not satisfies()!
|
||||
break :resolved_pkg_id .{ ids.pkg_id, false };
|
||||
}
|
||||
|
||||
// It didn't find a matching name and auto installed
|
||||
// from somewhere this peer can't reach. Choose best
|
||||
// version. Only mark all parents if resolution is
|
||||
// different from this transitive peer.
|
||||
|
||||
const best_version = resolutions[peer_dep_id];
|
||||
|
||||
if (best_version == invalid_package_id) {
|
||||
break :resolved_pkg_id .{ invalid_package_id, true };
|
||||
}
|
||||
|
||||
if (best_version == ids.pkg_id) {
|
||||
break :resolved_pkg_id .{ ids.pkg_id, true };
|
||||
}
|
||||
|
||||
// add the remaining parent ids
|
||||
while (curr_id != .invalid) {
|
||||
try visited_parent_node_ids.append(curr_id);
|
||||
curr_id = node_parent_ids[curr_id.get()];
|
||||
}
|
||||
|
||||
break :resolved_pkg_id .{ best_version, true };
|
||||
}
|
||||
|
||||
// TODO: prevent marking workspace and symlink deps with transitive peers
|
||||
|
||||
// add to visited parents after searching for a peer resolution.
|
||||
// if a node resolves a transitive peer, it can still be deduplicated
|
||||
try visited_parent_node_ids.append(curr_id);
|
||||
curr_id = node_parent_ids[curr_id.get()];
|
||||
}
|
||||
|
||||
// choose the current best version
|
||||
break :resolved_pkg_id .{ resolutions[peer_dep_id], true };
|
||||
};
|
||||
|
||||
if (resolved_pkg_id == invalid_package_id) {
|
||||
// these are optional peers that failed to find any dependency with a matching
|
||||
// name. they are completely excluded
|
||||
continue;
|
||||
}
|
||||
|
||||
for (visited_parent_node_ids.items) |visited_parent_id| {
|
||||
const ctx: Store.Node.TransitivePeer.OrderedArraySetCtx = .{
|
||||
.string_buf = string_buf,
|
||||
.pkg_names = pkg_names,
|
||||
};
|
||||
const peer: Store.Node.TransitivePeer = .{
|
||||
.dep_id = peer_dep_id,
|
||||
.pkg_id = resolved_pkg_id,
|
||||
.auto_installed = auto_installed,
|
||||
};
|
||||
try node_peers[visited_parent_id.get()].insert(lockfile.allocator, peer, &ctx);
|
||||
}
|
||||
|
||||
if (visited_parent_node_ids.items.len != 0) {
|
||||
// visited parents length == 0 means the node satisfied it's own
|
||||
// peer. don't queue.
|
||||
node_dependencies[node_id.get()].appendAssumeCapacity(.{ .dep_id = peer_dep_id, .pkg_id = resolved_pkg_id });
|
||||
try node_queue.writeItem(.{
|
||||
.parent_id = node_id,
|
||||
.dep_id = peer_dep_id,
|
||||
.pkg_id = resolved_pkg_id,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (manager.options.log_level.isVerbose()) {
|
||||
const full_tree_end = timer.read();
|
||||
timer.reset();
|
||||
Output.prettyErrorln("Resolved peers [{f}]", .{bun.fmt.fmtDurationOneDecimal(full_tree_end)});
|
||||
}
|
||||
|
||||
const DedupeInfo = struct {
|
||||
entry_id: Store.Entry.Id,
|
||||
dep_id: DependencyID,
|
||||
peers: Store.OrderedArraySet(Store.Node.TransitivePeer, Store.Node.TransitivePeer.OrderedArraySetCtx),
|
||||
};
|
||||
|
||||
var dedupe: std.AutoHashMapUnmanaged(PackageID, std.ArrayListUnmanaged(DedupeInfo)) = .empty;
|
||||
defer dedupe.deinit(lockfile.allocator);
|
||||
|
||||
var res_fmt_buf: std.array_list.Managed(u8) = .init(lockfile.allocator);
|
||||
defer res_fmt_buf.deinit();
|
||||
|
||||
const nodes_slice = nodes.slice();
|
||||
const node_pkg_ids = nodes_slice.items(.pkg_id);
|
||||
const node_dep_ids = nodes_slice.items(.dep_id);
|
||||
const node_peers: []const Store.Node.Peers = nodes_slice.items(.peers);
|
||||
const node_nodes = nodes_slice.items(.nodes);
|
||||
|
||||
var store: Store.Entry.List = .empty;
|
||||
|
||||
const QueuedEntry = struct {
|
||||
node_id: Store.Node.Id,
|
||||
entry_parent_id: Store.Entry.Id,
|
||||
};
|
||||
var entry_queue: bun.LinearFifo(QueuedEntry, .Dynamic) = .init(lockfile.allocator);
|
||||
defer entry_queue.deinit();
|
||||
|
||||
try entry_queue.writeItem(.{
|
||||
.node_id = .from(0),
|
||||
.entry_parent_id = .invalid,
|
||||
});
|
||||
|
||||
var public_hoisted: bun.StringArrayHashMap(void) = .init(manager.allocator);
|
||||
defer public_hoisted.deinit();
|
||||
|
||||
var hidden_hoisted: bun.StringArrayHashMap(void) = .init(manager.allocator);
|
||||
defer hidden_hoisted.deinit();
|
||||
|
||||
// Second pass: Deduplicate nodes when the pkg_id and peer set match an existing entry.
|
||||
next_entry: while (entry_queue.readItem()) |entry| {
|
||||
const pkg_id = node_pkg_ids[entry.node_id.get()];
|
||||
|
||||
const dedupe_entry = try dedupe.getOrPut(lockfile.allocator, pkg_id);
|
||||
if (!dedupe_entry.found_existing) {
|
||||
dedupe_entry.value_ptr.* = .{};
|
||||
} else {
|
||||
const curr_peers = node_peers[entry.node_id.get()];
|
||||
const curr_dep_id = node_dep_ids[entry.node_id.get()];
|
||||
|
||||
for (dedupe_entry.value_ptr.items) |info| {
|
||||
if (info.dep_id == invalid_dependency_id or curr_dep_id == invalid_dependency_id) {
|
||||
if (info.dep_id != curr_dep_id) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (info.dep_id != invalid_dependency_id and curr_dep_id != invalid_dependency_id) {
|
||||
const curr_dep = dependencies[curr_dep_id];
|
||||
const existing_dep = dependencies[info.dep_id];
|
||||
|
||||
if (existing_dep.version.tag == .workspace and curr_dep.version.tag == .workspace) {
|
||||
if (existing_dep.behavior.isWorkspace() != curr_dep.behavior.isWorkspace()) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const eql_ctx: Store.Node.TransitivePeer.OrderedArraySetCtx = .{
|
||||
.string_buf = string_buf,
|
||||
.pkg_names = pkg_names,
|
||||
};
|
||||
|
||||
if (info.peers.eql(&curr_peers, &eql_ctx)) {
|
||||
// dedupe! depend on the already created entry
|
||||
|
||||
const entries = store.slice();
|
||||
const entry_dependencies = entries.items(.dependencies);
|
||||
const entry_parents = entries.items(.parents);
|
||||
|
||||
var parents = &entry_parents[info.entry_id.get()];
|
||||
|
||||
if (curr_dep_id != invalid_dependency_id and dependencies[curr_dep_id].behavior.isWorkspace()) {
|
||||
try parents.append(lockfile.allocator, entry.entry_parent_id);
|
||||
continue :next_entry;
|
||||
}
|
||||
const ctx: Store.Entry.DependenciesOrderedArraySetCtx = .{
|
||||
.string_buf = string_buf,
|
||||
.dependencies = dependencies,
|
||||
};
|
||||
try entry_dependencies[entry.entry_parent_id.get()].insert(
|
||||
lockfile.allocator,
|
||||
.{ .entry_id = info.entry_id, .dep_id = curr_dep_id },
|
||||
&ctx,
|
||||
);
|
||||
try parents.append(lockfile.allocator, entry.entry_parent_id);
|
||||
continue :next_entry;
|
||||
}
|
||||
}
|
||||
|
||||
// nothing matched - create a new entry
|
||||
}
|
||||
|
||||
const new_entry_peer_hash: Store.Entry.PeerHash = peer_hash: {
|
||||
const peers = node_peers[entry.node_id.get()];
|
||||
if (peers.len() == 0) {
|
||||
break :peer_hash .none;
|
||||
}
|
||||
var hasher = bun.Wyhash11.init(0);
|
||||
for (peers.slice()) |peer_ids| {
|
||||
const pkg_name = pkg_names[peer_ids.pkg_id];
|
||||
hasher.update(pkg_name.slice(string_buf));
|
||||
const pkg_res = pkg_resolutions[peer_ids.pkg_id];
|
||||
res_fmt_buf.clearRetainingCapacity();
|
||||
try res_fmt_buf.writer().print("{f}", .{pkg_res.fmt(string_buf, .posix)});
|
||||
hasher.update(res_fmt_buf.items);
|
||||
}
|
||||
break :peer_hash .from(hasher.final());
|
||||
};
|
||||
|
||||
const new_entry_dep_id = node_dep_ids[entry.node_id.get()];
|
||||
|
||||
const new_entry_is_root = new_entry_dep_id == invalid_dependency_id;
|
||||
const new_entry_is_workspace = !new_entry_is_root and dependencies[new_entry_dep_id].version.tag == .workspace;
|
||||
|
||||
const new_entry_dependencies: Store.Entry.Dependencies = if (dedupe_entry.found_existing and new_entry_is_workspace)
|
||||
.empty
|
||||
else
|
||||
try .initCapacity(lockfile.allocator, node_nodes[entry.node_id.get()].items.len);
|
||||
|
||||
var new_entry_parents: std.ArrayListUnmanaged(Store.Entry.Id) = try .initCapacity(lockfile.allocator, 1);
|
||||
new_entry_parents.appendAssumeCapacity(entry.entry_parent_id);
|
||||
|
||||
const hoisted = hoisted: {
|
||||
if (new_entry_dep_id == invalid_dependency_id) {
|
||||
break :hoisted false;
|
||||
}
|
||||
|
||||
const dep_name = dependencies[new_entry_dep_id].name.slice(string_buf);
|
||||
|
||||
const hoist_pattern = manager.options.hoist_pattern orelse {
|
||||
const hoist_entry = try hidden_hoisted.getOrPut(dep_name);
|
||||
break :hoisted !hoist_entry.found_existing;
|
||||
};
|
||||
|
||||
if (hoist_pattern.isMatch(dep_name)) {
|
||||
const hoist_entry = try hidden_hoisted.getOrPut(dep_name);
|
||||
break :hoisted !hoist_entry.found_existing;
|
||||
}
|
||||
|
||||
break :hoisted false;
|
||||
};
|
||||
|
||||
const new_entry: Store.Entry = .{
|
||||
.node_id = entry.node_id,
|
||||
.dependencies = new_entry_dependencies,
|
||||
.parents = new_entry_parents,
|
||||
.peer_hash = new_entry_peer_hash,
|
||||
.hoisted = hoisted,
|
||||
};
|
||||
|
||||
const new_entry_id: Store.Entry.Id = .from(@intCast(store.len));
|
||||
try store.append(lockfile.allocator, new_entry);
|
||||
|
||||
if (entry.entry_parent_id.tryGet()) |entry_parent_id| skip_adding_dependency: {
|
||||
if (new_entry_dep_id != invalid_dependency_id and dependencies[new_entry_dep_id].behavior.isWorkspace()) {
|
||||
// skip implicit workspace dependencies on the root.
|
||||
break :skip_adding_dependency;
|
||||
}
|
||||
|
||||
const entries = store.slice();
|
||||
const entry_dependencies = entries.items(.dependencies);
|
||||
const ctx: Store.Entry.DependenciesOrderedArraySetCtx = .{
|
||||
.string_buf = string_buf,
|
||||
.dependencies = dependencies,
|
||||
};
|
||||
try entry_dependencies[entry_parent_id].insert(
|
||||
lockfile.allocator,
|
||||
.{ .entry_id = new_entry_id, .dep_id = new_entry_dep_id },
|
||||
&ctx,
|
||||
);
|
||||
|
||||
if (new_entry_dep_id != invalid_dependency_id) {
|
||||
if (entry.entry_parent_id == .root) {
|
||||
// make sure direct dependencies are not replaced
|
||||
const dep_name = dependencies[new_entry_dep_id].name.slice(string_buf);
|
||||
try public_hoisted.put(dep_name, {});
|
||||
} else {
|
||||
// transitive dependencies (also direct dependencies of workspaces!)
|
||||
const dep_name = dependencies[new_entry_dep_id].name.slice(string_buf);
|
||||
if (manager.options.public_hoist_pattern) |public_hoist_pattern| {
|
||||
if (public_hoist_pattern.isMatch(dep_name)) {
|
||||
const hoist_entry = try public_hoisted.getOrPut(dep_name);
|
||||
if (!hoist_entry.found_existing) {
|
||||
try entry_dependencies[0].insert(
|
||||
lockfile.allocator,
|
||||
.{ .entry_id = new_entry_id, .dep_id = new_entry_dep_id },
|
||||
&ctx,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try dedupe_entry.value_ptr.append(lockfile.allocator, .{
|
||||
.entry_id = new_entry_id,
|
||||
.dep_id = new_entry_dep_id,
|
||||
.peers = node_peers[entry.node_id.get()],
|
||||
});
|
||||
|
||||
for (node_nodes[entry.node_id.get()].items) |node_id| {
|
||||
try entry_queue.writeItem(.{
|
||||
.node_id = node_id,
|
||||
.entry_parent_id = new_entry_id,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (manager.options.log_level.isVerbose()) {
|
||||
const dedupe_end = timer.read();
|
||||
Output.prettyErrorln("Created store [{f}]", .{bun.fmt.fmtDurationOneDecimal(dedupe_end)});
|
||||
}
|
||||
|
||||
break :store .{
|
||||
.entries = store,
|
||||
.nodes = nodes,
|
||||
};
|
||||
};
|
||||
const store: Store = try .init(
|
||||
manager,
|
||||
install_root_dependencies,
|
||||
workspace_filters,
|
||||
packages_to_install,
|
||||
);
|
||||
defer store.deinit();
|
||||
|
||||
// setup node_modules/.bun
|
||||
const is_new_bun_modules = is_new_bun_modules: {
|
||||
@@ -676,8 +80,8 @@ pub fn installIsolatedPackages(
|
||||
}
|
||||
|
||||
// 3
|
||||
for (lockfile.workspace_paths.values()) |workspace_path| {
|
||||
var workspace_node_modules: bun.AutoRelPath = .from(workspace_path.slice(lockfile.buffers.string_bytes.items));
|
||||
for (manager.lockfile.workspace_paths.values()) |workspace_path| {
|
||||
var workspace_node_modules: bun.AutoRelPath = .from(workspace_path.slice(manager.lockfile.buffers.string_bytes.items));
|
||||
defer workspace_node_modules.deinit();
|
||||
|
||||
const basename = workspace_node_modules.basename();
|
||||
@@ -742,8 +146,8 @@ pub fn installIsolatedPackages(
|
||||
rename_path.undo(1);
|
||||
|
||||
// 5
|
||||
for (lockfile.workspace_paths.values()) |workspace_path| {
|
||||
var workspace_node_modules: bun.AutoRelPath = .from(workspace_path.slice(lockfile.buffers.string_bytes.items));
|
||||
for (manager.lockfile.workspace_paths.values()) |workspace_path| {
|
||||
var workspace_node_modules: bun.AutoRelPath = .from(workspace_path.slice(manager.lockfile.buffers.string_bytes.items));
|
||||
defer workspace_node_modules.deinit();
|
||||
|
||||
const basename = workspace_node_modules.basename();
|
||||
@@ -799,6 +203,7 @@ pub fn installIsolatedPackages(
|
||||
const entry_dependencies = entries.items(.dependencies);
|
||||
const entry_hoisted = entries.items(.hoisted);
|
||||
|
||||
const lockfile = manager.lockfile;
|
||||
const string_buf = lockfile.buffers.string_bytes.items;
|
||||
|
||||
const pkgs = lockfile.packages.slice();
|
||||
@@ -867,7 +272,7 @@ pub fn installIsolatedPackages(
|
||||
continue;
|
||||
},
|
||||
.root => {
|
||||
if (dep_id == invalid_dependency_id) {
|
||||
if (dep_id == install.invalid_dependency_id) {
|
||||
// .monotonic is okay in this block because the task isn't running on another
|
||||
// thread.
|
||||
entry_steps[entry_id.get()].store(.symlink_dependencies, .monotonic);
|
||||
@@ -1167,6 +572,8 @@ pub fn installIsolatedPackages(
|
||||
Output.err(err, "failed to install packages", .{});
|
||||
Global.exit(1);
|
||||
}
|
||||
|
||||
manager.thread_pool.waitForAll();
|
||||
}
|
||||
|
||||
if (manager.options.log_level.showProgress()) {
|
||||
@@ -1236,16 +643,10 @@ const sys = bun.sys;
|
||||
const Command = bun.cli.Command;
|
||||
|
||||
const install = bun.install;
|
||||
const DependencyID = install.DependencyID;
|
||||
const PackageID = install.PackageID;
|
||||
const PackageInstall = install.PackageInstall;
|
||||
const Resolution = install.Resolution;
|
||||
const Store = install.Store;
|
||||
const invalid_dependency_id = install.invalid_dependency_id;
|
||||
const invalid_package_id = install.invalid_package_id;
|
||||
|
||||
const Lockfile = install.Lockfile;
|
||||
const Tree = Lockfile.Tree;
|
||||
|
||||
const PackageManager = install.PackageManager;
|
||||
const ProgressStrings = PackageManager.ProgressStrings;
|
||||
|
||||
@@ -270,7 +270,7 @@ pub const Installer = struct {
|
||||
|
||||
const dep = this.lockfile.buffers.dependencies.items[dep_id];
|
||||
|
||||
if (dep.behavior.isWorkspace()) {
|
||||
if (dep.behavior.isWorkspace() or dep.version.tag == .workspace) {
|
||||
break :state .{ node_id, .skipped };
|
||||
}
|
||||
|
||||
@@ -339,6 +339,8 @@ pub const Installer = struct {
|
||||
|
||||
result: Result,
|
||||
|
||||
critical_section: bun.safety.CriticalSection = .{},
|
||||
|
||||
const Result = union(enum) {
|
||||
none,
|
||||
err: Error,
|
||||
@@ -1125,47 +1127,60 @@ pub const Installer = struct {
|
||||
pub fn callback(task: *ThreadPool.Task) void {
|
||||
const this: *Task = @fieldParentPtr("task", task);
|
||||
|
||||
this.critical_section.begin();
|
||||
|
||||
const res = this.run() catch |err| switch (err) {
|
||||
error.OutOfMemory => bun.outOfMemory(),
|
||||
};
|
||||
|
||||
// Hold locals to avoid touching `this` after push.
|
||||
const installer = this.installer;
|
||||
|
||||
switch (res) {
|
||||
.yield => {},
|
||||
.yield => {
|
||||
this.critical_section.end();
|
||||
},
|
||||
.run_scripts => |list| {
|
||||
if (comptime Environment.ci_assert) {
|
||||
bun.assertWithLocation(this.installer.store.entries.items(.scripts)[this.entry_id.get()] != null, @src());
|
||||
bun.assertWithLocation(installer.store.entries.items(.scripts)[this.entry_id.get()] != null, @src());
|
||||
}
|
||||
this.result = .{ .run_scripts = list };
|
||||
this.installer.task_queue.push(this);
|
||||
this.installer.manager.wake();
|
||||
// End the critical section before pushing: after push, the main thread may
|
||||
// reschedule this task and another worker could enter the critical section.
|
||||
this.critical_section.end();
|
||||
installer.task_queue.push(this);
|
||||
installer.manager.wake();
|
||||
},
|
||||
.done => {
|
||||
if (comptime Environment.ci_assert) {
|
||||
// .monotonic is okay because this should have been set by this thread.
|
||||
bun.assertWithLocation(this.installer.store.entries.items(.step)[this.entry_id.get()].load(.monotonic) == .done, @src());
|
||||
bun.assertWithLocation(installer.store.entries.items(.step)[this.entry_id.get()].load(.monotonic) == .done, @src());
|
||||
}
|
||||
this.result = .done;
|
||||
this.installer.task_queue.push(this);
|
||||
this.installer.manager.wake();
|
||||
this.critical_section.end();
|
||||
installer.task_queue.push(this);
|
||||
installer.manager.wake();
|
||||
},
|
||||
.blocked => {
|
||||
if (comptime Environment.ci_assert) {
|
||||
// .monotonic is okay because this should have been set by this thread.
|
||||
bun.assertWithLocation(this.installer.store.entries.items(.step)[this.entry_id.get()].load(.monotonic) == .check_if_blocked, @src());
|
||||
bun.assertWithLocation(installer.store.entries.items(.step)[this.entry_id.get()].load(.monotonic) == .check_if_blocked, @src());
|
||||
}
|
||||
this.result = .blocked;
|
||||
this.installer.task_queue.push(this);
|
||||
this.installer.manager.wake();
|
||||
this.critical_section.end();
|
||||
installer.task_queue.push(this);
|
||||
installer.manager.wake();
|
||||
},
|
||||
.fail => |err| {
|
||||
if (comptime Environment.ci_assert) {
|
||||
// .monotonic is okay because this should have been set by this thread.
|
||||
bun.assertWithLocation(this.installer.store.entries.items(.step)[this.entry_id.get()].load(.monotonic) != .done, @src());
|
||||
bun.assertWithLocation(installer.store.entries.items(.step)[this.entry_id.get()].load(.monotonic) != .done, @src());
|
||||
}
|
||||
this.installer.store.entries.items(.step)[this.entry_id.get()].store(.done, .release);
|
||||
installer.store.entries.items(.step)[this.entry_id.get()].store(.done, .release);
|
||||
this.result = .{ .err = err };
|
||||
this.installer.task_queue.push(this);
|
||||
this.installer.manager.wake();
|
||||
this.critical_section.end();
|
||||
installer.task_queue.push(this);
|
||||
installer.manager.wake();
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,8 +5,23 @@ const Ids = struct {
|
||||
|
||||
pub const Store = struct {
|
||||
/// Accessed from multiple threads
|
||||
entries: Entry.List,
|
||||
nodes: Node.List,
|
||||
entries: Entry.List = .empty,
|
||||
nodes: Node.List = .empty,
|
||||
|
||||
// allocator used for `entries` and `nodes`
|
||||
allocator: std.mem.Allocator,
|
||||
|
||||
pub fn appendNode(this: *Store, node: Node) OOM!Node.Id {
|
||||
const node_id: Node.Id = .from(@intCast(this.nodes.len));
|
||||
try this.nodes.append(this.allocator, node);
|
||||
return node_id;
|
||||
}
|
||||
|
||||
pub fn appendEntry(this: *Store, entry: Entry) OOM!Entry.Id {
|
||||
const entry_id: Entry.Id = .from(@intCast(this.entries.len));
|
||||
try this.entries.append(this.allocator, entry);
|
||||
return entry_id;
|
||||
}
|
||||
|
||||
const log = Output.scoped(.Store, .visible);
|
||||
|
||||
@@ -51,6 +66,718 @@ pub const Store = struct {
|
||||
|
||||
pub const Installer = @import("./Installer.zig").Installer;
|
||||
|
||||
const NextNode = struct {
|
||||
parent_id: Node.Id,
|
||||
dep_id: DependencyID,
|
||||
pkg_id: PackageID,
|
||||
|
||||
// no deinit because each field does not need to
|
||||
// be deinitialized. see `bun.memory.deinit`
|
||||
pub const deinit = void;
|
||||
};
|
||||
|
||||
// struct holding up-to-date pointers to multi array list fields
|
||||
// and some code moved into functions for reuse
|
||||
const CreateCtx = struct {
|
||||
store: Store,
|
||||
allocator: std.mem.Allocator,
|
||||
|
||||
// lockfile buffers
|
||||
string_buf: []const u8,
|
||||
dependencies: []const Dependency,
|
||||
resolutions: []const PackageID,
|
||||
pkg_names: []const String,
|
||||
pkg_resolutions: []const Resolution,
|
||||
pkg_name_hashes: []const PackageNameHash,
|
||||
pkg_dependency_slices: []const DependencySlice,
|
||||
|
||||
node_dep_ids: []DependencyID,
|
||||
node_pkg_ids: []PackageID,
|
||||
node_parent_ids: []Node.Id,
|
||||
node_dependencies: []std.ArrayListUnmanaged(Ids),
|
||||
node_peers: []Node.Peers,
|
||||
node_nodes: []std.ArrayListUnmanaged(Node.Id),
|
||||
|
||||
node_dedupe: std.AutoArrayHashMap(PackageID, Node.Id),
|
||||
|
||||
entry_dependencies: []Entry.Dependencies,
|
||||
entry_parents: []std.ArrayListUnmanaged(Entry.Id),
|
||||
|
||||
pub fn init(allocator: std.mem.Allocator, lockfile: *const Lockfile) OOM!@This() {
|
||||
const pkgs = lockfile.packages.slice();
|
||||
var ctx: @This() = .{
|
||||
.store = .{ .allocator = allocator },
|
||||
.allocator = allocator,
|
||||
.string_buf = lockfile.buffers.string_bytes.items,
|
||||
.dependencies = lockfile.buffers.dependencies.items,
|
||||
.resolutions = lockfile.buffers.resolutions.items,
|
||||
.pkg_names = pkgs.items(.name),
|
||||
.pkg_resolutions = pkgs.items(.resolution),
|
||||
.pkg_name_hashes = pkgs.items(.name_hash),
|
||||
.pkg_dependency_slices = pkgs.items(.dependencies),
|
||||
.node_dep_ids = &.{},
|
||||
.node_pkg_ids = &.{},
|
||||
.node_parent_ids = &.{},
|
||||
.node_dependencies = &.{},
|
||||
.node_peers = &.{},
|
||||
.node_nodes = &.{},
|
||||
.node_dedupe = .init(allocator),
|
||||
.entry_dependencies = &.{},
|
||||
.entry_parents = &.{},
|
||||
};
|
||||
|
||||
// Both of these will be similar in size to packages.len. Peer dependencies will make them slightly larger.
|
||||
try ctx.store.nodes.ensureUnusedCapacity(ctx.store.allocator, ctx.pkg_names.len);
|
||||
try ctx.store.entries.ensureUnusedCapacity(ctx.store.allocator, ctx.pkg_names.len);
|
||||
|
||||
return ctx;
|
||||
}
|
||||
|
||||
pub fn deinit(this: *@This()) void {
|
||||
this.node_dedupe.deinit();
|
||||
}
|
||||
|
||||
const NodeParentIterator = struct {
|
||||
next_id: Node.Id,
|
||||
node_parent_ids: []const Node.Id,
|
||||
|
||||
pub fn next(this: *@This()) ?Node.Id {
|
||||
if (this.next_id == .invalid) {
|
||||
return null;
|
||||
}
|
||||
const curr_id = this.next_id;
|
||||
this.next_id = this.node_parent_ids[curr_id.get()];
|
||||
return curr_id;
|
||||
}
|
||||
};
|
||||
|
||||
pub fn iterateNodeParents(this: *const @This(), first_parent_id: Node.Id) NodeParentIterator {
|
||||
return .{ .next_id = first_parent_id, .node_parent_ids = this.node_parent_ids };
|
||||
}
|
||||
|
||||
const AppendNodeResult = union(enum) {
|
||||
new_node: Node.Id,
|
||||
deduplicated,
|
||||
};
|
||||
|
||||
pub fn appendNode(this: *@This(), next_node: NextNode) OOM!AppendNodeResult {
|
||||
if (this.node_dedupe.get(next_node.pkg_id)) |dedupe_node_id| create_new_node: {
|
||||
const node_dep = this.dependencies[next_node.dep_id];
|
||||
|
||||
const dedupe_dep_id = this.node_dep_ids[dedupe_node_id.get()];
|
||||
const dedupe_dep = this.dependencies[dedupe_dep_id];
|
||||
|
||||
if (dedupe_dep.name_hash != node_dep.name_hash or
|
||||
dedupe_dep.behavior.workspace != node_dep.behavior.workspace)
|
||||
{
|
||||
// create a new node if it's an alias so we don't lose the alias name
|
||||
break :create_new_node;
|
||||
}
|
||||
|
||||
try this.addNodeToParentNodes(next_node.parent_id, dedupe_node_id);
|
||||
return .deduplicated;
|
||||
}
|
||||
|
||||
const pkg_deps = this.pkg_dependency_slices[next_node.pkg_id];
|
||||
|
||||
const node_id = try this.store.appendNode(.{
|
||||
.pkg_id = next_node.pkg_id,
|
||||
.dep_id = next_node.dep_id,
|
||||
.parent_id = next_node.parent_id,
|
||||
// capacity is set to the expected size after we
|
||||
// find the exact dependency count
|
||||
.nodes = .empty,
|
||||
.dependencies = try .initCapacity(this.allocator, pkg_deps.len),
|
||||
});
|
||||
|
||||
// update pointers
|
||||
const nodes = this.store.nodes.slice();
|
||||
this.node_dep_ids = nodes.items(.dep_id);
|
||||
this.node_pkg_ids = nodes.items(.pkg_id);
|
||||
this.node_parent_ids = nodes.items(.parent_id);
|
||||
this.node_dependencies = nodes.items(.dependencies);
|
||||
this.node_peers = nodes.items(.peers);
|
||||
this.node_nodes = nodes.items(.nodes);
|
||||
|
||||
return .{ .new_node = node_id };
|
||||
}
|
||||
|
||||
pub fn addNodeToParentNodes(this: *@This(), parent_id: Node.Id, node_id: Node.Id) OOM!void {
|
||||
this.node_nodes[parent_id.get()].appendAssumeCapacity(node_id);
|
||||
|
||||
if (this.node_nodes[parent_id.get()].items.len == this.node_dependencies[parent_id.get()].items.len) {
|
||||
// we've visited all the children nodes of the parent, see if we can add to the dedupe map.
|
||||
try this.maybeAddNodeToDedupeMap(parent_id);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn maybeAddNodeToDedupeMap(this: *@This(), node_id: Node.Id) OOM!void {
|
||||
if (this.node_peers[node_id.get()].list.items.len != 0) {
|
||||
// only nodes without peers (transitive or direct) are added to the map.
|
||||
return;
|
||||
}
|
||||
|
||||
const dep_id = this.node_dep_ids[node_id.get()];
|
||||
if (dep_id == invalid_dependency_id) {
|
||||
// no need to add the root package
|
||||
return;
|
||||
}
|
||||
|
||||
const dep = this.dependencies[dep_id];
|
||||
const pkg_id = this.node_pkg_ids[node_id.get()];
|
||||
|
||||
if (dep.name_hash != this.pkg_name_hashes[pkg_id]) {
|
||||
// don't add to the dedupe map if the dependency name does not match
|
||||
// the package name. this means it's an alias, and won't be as common
|
||||
// as a normal dependency on this package.
|
||||
return;
|
||||
}
|
||||
|
||||
const dedupe = try this.node_dedupe.getOrPut(pkg_id);
|
||||
|
||||
if (dedupe.found_existing) {
|
||||
bun.debugAssert(dep.version.tag == .workspace);
|
||||
return;
|
||||
}
|
||||
|
||||
dedupe.value_ptr.* = node_id;
|
||||
}
|
||||
|
||||
pub fn appendEntry(this: *@This(), entry: Entry) OOM!Entry.Id {
|
||||
const entry_id = try this.store.appendEntry(entry);
|
||||
|
||||
// update pointers
|
||||
const entries = this.store.entries.slice();
|
||||
this.entry_dependencies = entries.items(.dependencies);
|
||||
this.entry_parents = entries.items(.parents);
|
||||
|
||||
return entry_id;
|
||||
}
|
||||
};
|
||||
|
||||
pub fn init(
|
||||
manager: *PackageManager,
|
||||
install_root_dependencies: bool,
|
||||
workspace_filters: []const WorkspaceFilter,
|
||||
packages_to_install: ?[]const PackageID,
|
||||
) OOM!Store {
|
||||
var timer = std.time.Timer.start() catch unreachable;
|
||||
|
||||
var next_node_stack: bun.collections.ArrayListDefault(NextNode) = .init();
|
||||
defer next_node_stack.deinit();
|
||||
|
||||
try next_node_stack.append(.{
|
||||
.parent_id = .invalid,
|
||||
.dep_id = invalid_dependency_id,
|
||||
.pkg_id = 0,
|
||||
});
|
||||
|
||||
var ctx: CreateCtx = try .init(manager.allocator, manager.lockfile);
|
||||
defer ctx.deinit();
|
||||
|
||||
var dep_ids_sort_buf: bun.collections.ArrayListDefault(DependencyID) = .init();
|
||||
defer dep_ids_sort_buf.deinit();
|
||||
|
||||
var peer_dep_ids_buf: bun.collections.ArrayListDefault(DependencyID) = .init();
|
||||
defer peer_dep_ids_buf.deinit();
|
||||
|
||||
var visited_node_ids_buf: std.array_list.Managed(Node.Id) = .init(ctx.allocator);
|
||||
defer visited_node_ids_buf.deinit();
|
||||
|
||||
// First pass: create full dependency tree with resolved peers
|
||||
next_node: while (next_node_stack.pop()) |next_node| {
|
||||
check_cycle: {
|
||||
// check for cycles
|
||||
var parent_iter = ctx.iterateNodeParents(next_node.parent_id);
|
||||
while (parent_iter.next()) |parent_id| {
|
||||
if (ctx.node_pkg_ids[parent_id.get()] != next_node.pkg_id) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// pkg_id is the same. skip the new node, and add the previously added node
|
||||
// to parent so it appears in 'node_modules/.bun/parent@version/node_modules'.
|
||||
|
||||
const dep_id = ctx.node_dep_ids[parent_id.get()];
|
||||
if (dep_id == invalid_dependency_id and next_node.dep_id == invalid_dependency_id) {
|
||||
try ctx.addNodeToParentNodes(next_node.parent_id, parent_id);
|
||||
continue :next_node;
|
||||
}
|
||||
|
||||
if (dep_id == invalid_dependency_id or next_node.dep_id == invalid_dependency_id) {
|
||||
// one is the root package, one is a dependency on the root package (it has a valid dep_id)
|
||||
// create a new node for it.
|
||||
break :check_cycle;
|
||||
}
|
||||
|
||||
const parent_dep = ctx.dependencies[dep_id];
|
||||
const node_dep = ctx.dependencies[next_node.dep_id];
|
||||
|
||||
// ensure the dependency name is the same before skipping the cycle. if they aren't
|
||||
// we lose dependency name information for the symlinks
|
||||
if (parent_dep.name_hash == node_dep.name_hash and
|
||||
// also ensure workspace self deps are not skipped.
|
||||
// implicit workspace dep != explicit workspace dep
|
||||
parent_dep.behavior.workspace == node_dep.behavior.workspace)
|
||||
{
|
||||
try ctx.addNodeToParentNodes(next_node.parent_id, parent_id);
|
||||
continue :next_node;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const node_id = switch (try ctx.appendNode(next_node)) {
|
||||
.new_node => |id| id,
|
||||
.deduplicated => continue,
|
||||
};
|
||||
|
||||
const pkg_deps = ctx.pkg_dependency_slices[next_node.pkg_id];
|
||||
dep_ids_sort_buf.clearRetainingCapacity();
|
||||
try dep_ids_sort_buf.ensureUnusedCapacity(pkg_deps.len);
|
||||
for (pkg_deps.begin()..pkg_deps.end()) |_dep_id| {
|
||||
const dep_id: DependencyID = @intCast(_dep_id);
|
||||
dep_ids_sort_buf.appendAssumeCapacity(dep_id);
|
||||
}
|
||||
|
||||
// TODO: make this sort in an order that allows peers to be resolved last
|
||||
// and devDependency handling to match `hoistDependency`
|
||||
std.sort.pdq(
|
||||
DependencyID,
|
||||
dep_ids_sort_buf.items(),
|
||||
Lockfile.DepSorter{ .lockfile = manager.lockfile },
|
||||
Lockfile.DepSorter.isLessThan,
|
||||
);
|
||||
|
||||
peer_dep_ids_buf.clearRetainingCapacity();
|
||||
queue_deps: {
|
||||
if (packages_to_install) |packages| {
|
||||
if (node_id == .root) { // TODO: print an error when scanner is actually a dependency of a workspace (we should not support this)
|
||||
for (dep_ids_sort_buf.items()) |dep_id| {
|
||||
const pkg_id = ctx.resolutions[dep_id];
|
||||
if (pkg_id == invalid_package_id) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (packages) |package_to_install| {
|
||||
if (package_to_install == pkg_id) {
|
||||
ctx.node_dependencies[node_id.get()].appendAssumeCapacity(.{ .dep_id = dep_id, .pkg_id = pkg_id });
|
||||
try next_node_stack.append(.{
|
||||
.parent_id = node_id,
|
||||
.dep_id = dep_id,
|
||||
.pkg_id = pkg_id,
|
||||
});
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
break :queue_deps;
|
||||
}
|
||||
}
|
||||
|
||||
for (dep_ids_sort_buf.items()) |dep_id| {
|
||||
if (Tree.isFilteredDependencyOrWorkspace(
|
||||
dep_id,
|
||||
next_node.pkg_id,
|
||||
workspace_filters,
|
||||
install_root_dependencies,
|
||||
manager,
|
||||
manager.lockfile,
|
||||
)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const pkg_id = ctx.resolutions[dep_id];
|
||||
const dep = ctx.dependencies[dep_id];
|
||||
|
||||
// TODO: handle duplicate dependencies. should be similar logic
|
||||
// like we have for dev dependencies in `hoistDependency`
|
||||
|
||||
if (dep.behavior.isPeer()) {
|
||||
try peer_dep_ids_buf.append(dep_id);
|
||||
continue;
|
||||
}
|
||||
|
||||
// simple case:
|
||||
// - add it as a dependency
|
||||
// - queue it
|
||||
ctx.node_dependencies[node_id.get()].appendAssumeCapacity(.{ .dep_id = dep_id, .pkg_id = pkg_id });
|
||||
try next_node_stack.append(.{
|
||||
.parent_id = node_id,
|
||||
.dep_id = dep_id,
|
||||
.pkg_id = pkg_id,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
for (peer_dep_ids_buf.items()) |peer_dep_id| {
|
||||
const resolved_pkg_id, const auto_installed = resolved_pkg_id: {
|
||||
|
||||
// Go through the peers parents looking for a package with the same name.
|
||||
// If none is found, use current best version. Parents visited must have
|
||||
// the package id for the chosen peer marked as a transitive peer. Nodes
|
||||
// are deduplicated only if their package id and their transitive peer package
|
||||
// ids are equal.
|
||||
const peer_dep = ctx.dependencies[peer_dep_id];
|
||||
|
||||
// TODO: double check this
|
||||
// Start with the current package. A package
|
||||
// can satisfy it's own peers.
|
||||
var parent_iter = ctx.iterateNodeParents(node_id);
|
||||
|
||||
visited_node_ids_buf.clearRetainingCapacity();
|
||||
while (parent_iter.next()) |parent_id| {
|
||||
for (ctx.node_dependencies[parent_id.get()].items) |ids| {
|
||||
const dep = ctx.dependencies[ids.dep_id];
|
||||
|
||||
if (dep.name_hash != peer_dep.name_hash) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const res = ctx.pkg_resolutions[ids.pkg_id];
|
||||
|
||||
if (peer_dep.version.tag != .npm or res.tag != .npm) {
|
||||
// TODO: print warning for this? we don't have a version
|
||||
// to compare to say if this satisfies or not.
|
||||
break :resolved_pkg_id .{ ids.pkg_id, false };
|
||||
}
|
||||
|
||||
const peer_dep_version = peer_dep.version.value.npm.version;
|
||||
const res_version = res.value.npm.version;
|
||||
|
||||
if (!peer_dep_version.satisfies(res_version, ctx.string_buf, ctx.string_buf)) {
|
||||
// TODO: add warning!
|
||||
}
|
||||
|
||||
break :resolved_pkg_id .{ ids.pkg_id, false };
|
||||
}
|
||||
|
||||
const curr_peers = ctx.node_peers[parent_id.get()];
|
||||
for (curr_peers.list.items) |ids| {
|
||||
const transitive_peer_dep = ctx.dependencies[ids.dep_id];
|
||||
|
||||
if (transitive_peer_dep.name_hash != peer_dep.name_hash) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// A transitive peer with the same name has already passed
|
||||
// through this node
|
||||
|
||||
if (!ids.auto_installed) {
|
||||
// The resolution was found here or above. Choose the same
|
||||
// peer resolution. No need to mark this node or above.
|
||||
|
||||
// TODO: add warning if not satisfies()!
|
||||
break :resolved_pkg_id .{ ids.pkg_id, false };
|
||||
}
|
||||
|
||||
// It didn't find a matching name and auto installed
|
||||
// from somewhere this peer can't reach. Choose best
|
||||
// version. Only mark all parents if resolution is
|
||||
// different from this transitive peer.
|
||||
|
||||
const best_version = ctx.resolutions[peer_dep_id];
|
||||
|
||||
if (best_version == invalid_package_id) {
|
||||
break :resolved_pkg_id .{ invalid_package_id, true };
|
||||
}
|
||||
|
||||
if (best_version == ids.pkg_id) {
|
||||
break :resolved_pkg_id .{ ids.pkg_id, true };
|
||||
}
|
||||
|
||||
// add the remaining parent ids
|
||||
try visited_node_ids_buf.append(parent_id);
|
||||
while (parent_iter.next()) |remaining_parent_id| {
|
||||
try visited_node_ids_buf.append(remaining_parent_id);
|
||||
}
|
||||
|
||||
break :resolved_pkg_id .{ best_version, true };
|
||||
}
|
||||
|
||||
// TODO: prevent marking workspace and symlink deps with transitive peers
|
||||
|
||||
// add to visited parents after searching for a peer resolution.
|
||||
// if a node resolves a transitive peer, it can still be deduplicated
|
||||
try visited_node_ids_buf.append(parent_id);
|
||||
}
|
||||
|
||||
// choose the current best version
|
||||
break :resolved_pkg_id .{ ctx.resolutions[peer_dep_id], true };
|
||||
};
|
||||
|
||||
if (resolved_pkg_id == invalid_package_id) {
|
||||
// these are optional peers that failed to find any dependency with a matching
|
||||
// name. they are completely excluded.
|
||||
continue;
|
||||
}
|
||||
|
||||
for (visited_node_ids_buf.items) |visited_id| {
|
||||
const insert_ctx: Node.TransitivePeer.OrderedArraySetCtx = .{
|
||||
.string_buf = ctx.string_buf,
|
||||
.pkg_names = ctx.pkg_names,
|
||||
};
|
||||
const peer: Node.TransitivePeer = .{
|
||||
.dep_id = peer_dep_id,
|
||||
.pkg_id = resolved_pkg_id,
|
||||
.auto_installed = auto_installed,
|
||||
};
|
||||
try ctx.node_peers[visited_id.get()].insert(ctx.allocator, peer, &insert_ctx);
|
||||
}
|
||||
|
||||
if (visited_node_ids_buf.items.len != 0) {
|
||||
// visited parents length == 0 means the node satisfied it's own
|
||||
// peer. don't queue
|
||||
ctx.node_dependencies[node_id.get()].appendAssumeCapacity(.{ .dep_id = peer_dep_id, .pkg_id = resolved_pkg_id });
|
||||
try next_node_stack.append(.{
|
||||
.parent_id = node_id,
|
||||
.dep_id = peer_dep_id,
|
||||
.pkg_id = resolved_pkg_id,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
const node_dependencies_count = ctx.node_dependencies[node_id.get()].items.len;
|
||||
|
||||
try ctx.node_nodes[node_id.get()].ensureTotalCapacityPrecise(ctx.allocator, node_dependencies_count);
|
||||
|
||||
if (node_dependencies_count == 0) {
|
||||
// it's a leaf. we can try adding it to the dedupe map now
|
||||
try ctx.maybeAddNodeToDedupeMap(node_id);
|
||||
}
|
||||
|
||||
if (next_node.parent_id != .invalid) {
|
||||
try ctx.addNodeToParentNodes(next_node.parent_id, node_id);
|
||||
}
|
||||
}
|
||||
|
||||
if (manager.options.log_level.isVerbose()) {
|
||||
const full_tree_end = timer.read();
|
||||
timer.reset();
|
||||
Output.prettyErrorln("Resolved peers: {d} nodes [{f}]", .{
|
||||
ctx.store.nodes.len,
|
||||
bun.fmt.fmtDurationOneDecimal(full_tree_end),
|
||||
});
|
||||
}
|
||||
|
||||
const EntryDedupe = struct {
|
||||
entry_id: Entry.Id,
|
||||
dep_id: DependencyID,
|
||||
peers: OrderedArraySet(Node.TransitivePeer, Node.TransitivePeer.OrderedArraySetCtx),
|
||||
};
|
||||
|
||||
var entry_dedupe: std.AutoArrayHashMap(PackageID, bun.collections.ArrayListDefault(EntryDedupe)) = .init(ctx.allocator);
|
||||
defer entry_dedupe.deinit();
|
||||
|
||||
var res_fmt_buf: bun.collections.ArrayListDefault(u8) = .init();
|
||||
defer res_fmt_buf.deinit();
|
||||
|
||||
const NextEntry = struct {
|
||||
node_id: Node.Id,
|
||||
parent_id: Entry.Id,
|
||||
};
|
||||
|
||||
var next_entry_queue: bun.LinearFifo(NextEntry, .Dynamic) = .init(ctx.allocator);
|
||||
defer next_entry_queue.deinit();
|
||||
|
||||
try next_entry_queue.writeItem(.{
|
||||
.node_id = .from(0),
|
||||
.parent_id = .invalid,
|
||||
});
|
||||
|
||||
var public_hoisted: bun.StringArrayHashMap(void) = .init(ctx.allocator);
|
||||
defer public_hoisted.deinit();
|
||||
|
||||
var hidden_hoisted: bun.StringArrayHashMap(void) = .init(ctx.allocator);
|
||||
defer hidden_hoisted.deinit();
|
||||
|
||||
// Second pass: Deduplicate nodes when the pkg_id and peer set match an existing entry.
|
||||
next_entry: while (next_entry_queue.readItem()) |next_entry| {
|
||||
const pkg_id = ctx.node_pkg_ids[next_entry.node_id.get()];
|
||||
const dep_id = ctx.node_dep_ids[next_entry.node_id.get()];
|
||||
|
||||
const dedupe = try entry_dedupe.getOrPut(pkg_id);
|
||||
if (!dedupe.found_existing) {
|
||||
dedupe.value_ptr.* = .init();
|
||||
} else {
|
||||
const peers = ctx.node_peers[next_entry.node_id.get()];
|
||||
|
||||
for (dedupe.value_ptr.items()) |info| {
|
||||
if (info.dep_id == invalid_dependency_id or dep_id == invalid_dependency_id) {
|
||||
if (info.dep_id != dep_id) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (info.dep_id != invalid_dependency_id and dep_id != invalid_dependency_id) {
|
||||
const curr_dep = ctx.dependencies[dep_id];
|
||||
const existing_dep = ctx.dependencies[info.dep_id];
|
||||
|
||||
if (existing_dep.version.tag == .workspace and curr_dep.version.tag == .workspace) {
|
||||
if (existing_dep.behavior.isWorkspace() != curr_dep.behavior.isWorkspace()) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const eql_ctx: Node.TransitivePeer.OrderedArraySetCtx = .{
|
||||
.string_buf = ctx.string_buf,
|
||||
.pkg_names = ctx.pkg_names,
|
||||
};
|
||||
|
||||
if (info.peers.eql(&peers, &eql_ctx)) {
|
||||
// dedupe! depend on the already created entry
|
||||
|
||||
var parents = &ctx.entry_parents[info.entry_id.get()];
|
||||
|
||||
if (dep_id != invalid_dependency_id and ctx.dependencies[dep_id].behavior.isWorkspace()) {
|
||||
try parents.append(ctx.allocator, next_entry.parent_id);
|
||||
continue :next_entry;
|
||||
}
|
||||
const insert_ctx: Entry.DependenciesOrderedArraySetCtx = .{
|
||||
.string_buf = ctx.string_buf,
|
||||
.dependencies = ctx.dependencies,
|
||||
};
|
||||
try ctx.entry_dependencies[next_entry.parent_id.get()].insert(
|
||||
ctx.allocator,
|
||||
.{ .entry_id = info.entry_id, .dep_id = dep_id },
|
||||
&insert_ctx,
|
||||
);
|
||||
try parents.append(ctx.allocator, next_entry.parent_id);
|
||||
continue :next_entry;
|
||||
}
|
||||
}
|
||||
|
||||
// nothing matched - create a new entry
|
||||
}
|
||||
|
||||
const entry_id = try ctx.appendEntry(.{
|
||||
.node_id = next_entry.node_id,
|
||||
.dependencies = dependencies: {
|
||||
if (dedupe.found_existing and dep_id != invalid_dependency_id and ctx.dependencies[dep_id].version.tag == .workspace) {
|
||||
break :dependencies .empty;
|
||||
}
|
||||
|
||||
break :dependencies try .initCapacity(ctx.allocator, ctx.node_nodes[next_entry.node_id.get()].items.len);
|
||||
},
|
||||
.parents = parents: {
|
||||
var parents: std.ArrayListUnmanaged(Entry.Id) = try .initCapacity(ctx.allocator, 1);
|
||||
parents.appendAssumeCapacity(next_entry.parent_id);
|
||||
break :parents parents;
|
||||
},
|
||||
.peer_hash = peer_hash: {
|
||||
const peers = ctx.node_peers[next_entry.node_id.get()];
|
||||
if (peers.len() == 0) {
|
||||
break :peer_hash .none;
|
||||
}
|
||||
var hasher = bun.Wyhash11.init(0);
|
||||
for (peers.slice()) |peer_ids| {
|
||||
const pkg_name = ctx.pkg_names[peer_ids.pkg_id];
|
||||
hasher.update(pkg_name.slice(ctx.string_buf));
|
||||
const pkg_res = ctx.pkg_resolutions[peer_ids.pkg_id];
|
||||
res_fmt_buf.clearRetainingCapacity();
|
||||
try res_fmt_buf.writer().print("{f}", .{pkg_res.fmt(ctx.string_buf, .posix)});
|
||||
hasher.update(res_fmt_buf.items());
|
||||
}
|
||||
break :peer_hash .from(hasher.final());
|
||||
},
|
||||
.hoisted = hoisted: {
|
||||
if (dep_id == invalid_dependency_id) {
|
||||
break :hoisted false;
|
||||
}
|
||||
|
||||
const dep_name = ctx.dependencies[dep_id].name.slice(ctx.string_buf);
|
||||
|
||||
const hoist_pattern = manager.options.hoist_pattern orelse {
|
||||
const hoist_entry = try hidden_hoisted.getOrPut(dep_name);
|
||||
break :hoisted !hoist_entry.found_existing;
|
||||
};
|
||||
|
||||
if (hoist_pattern.isMatch(dep_name)) {
|
||||
const hoist_entry = try hidden_hoisted.getOrPut(dep_name);
|
||||
break :hoisted !hoist_entry.found_existing;
|
||||
}
|
||||
|
||||
break :hoisted false;
|
||||
},
|
||||
});
|
||||
|
||||
if (next_entry.parent_id != .invalid) skip_adding_dependency: {
|
||||
if (dep_id != invalid_dependency_id and ctx.dependencies[dep_id].behavior.isWorkspace()) {
|
||||
// skip implicit workspace dependencies on the root.
|
||||
break :skip_adding_dependency;
|
||||
}
|
||||
|
||||
const insert_ctx: Entry.DependenciesOrderedArraySetCtx = .{
|
||||
.string_buf = ctx.string_buf,
|
||||
.dependencies = ctx.dependencies,
|
||||
};
|
||||
try ctx.entry_dependencies[next_entry.parent_id.get()].insert(
|
||||
ctx.allocator,
|
||||
.{ .entry_id = entry_id, .dep_id = dep_id },
|
||||
&insert_ctx,
|
||||
);
|
||||
|
||||
if (dep_id == invalid_dependency_id) {
|
||||
break :skip_adding_dependency;
|
||||
}
|
||||
|
||||
const dep_name = ctx.dependencies[dep_id].name.slice(ctx.string_buf);
|
||||
if (next_entry.parent_id == .root) {
|
||||
// make sure direct dependencies are not replaced
|
||||
try public_hoisted.put(dep_name, {});
|
||||
} else {
|
||||
// transitive dependencies (including direct dependencies of workspaces!)
|
||||
const public_hoist_pattern = manager.options.public_hoist_pattern orelse {
|
||||
break :skip_adding_dependency;
|
||||
};
|
||||
|
||||
if (!public_hoist_pattern.isMatch(dep_name)) {
|
||||
break :skip_adding_dependency;
|
||||
}
|
||||
|
||||
const hoist_entry = try public_hoisted.getOrPut(dep_name);
|
||||
if (hoist_entry.found_existing) {
|
||||
break :skip_adding_dependency;
|
||||
}
|
||||
|
||||
try ctx.entry_dependencies[0].insert(
|
||||
ctx.allocator,
|
||||
.{ .entry_id = entry_id, .dep_id = dep_id },
|
||||
&insert_ctx,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
try dedupe.value_ptr.append(.{
|
||||
.entry_id = entry_id,
|
||||
.dep_id = dep_id,
|
||||
.peers = ctx.node_peers[next_entry.node_id.get()],
|
||||
});
|
||||
|
||||
for (ctx.node_nodes[next_entry.node_id.get()].items) |node_id| {
|
||||
try next_entry_queue.writeItem(.{
|
||||
.node_id = node_id,
|
||||
.parent_id = entry_id,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (manager.options.log_level.isVerbose()) {
|
||||
const dedupe_end = timer.read();
|
||||
Output.prettyErrorln("Created store: {d} entries [{f}]", .{
|
||||
ctx.store.entries.len,
|
||||
bun.fmt.fmtDurationOneDecimal(dedupe_end),
|
||||
});
|
||||
}
|
||||
|
||||
return ctx.store;
|
||||
}
|
||||
|
||||
pub fn deinit(this: *const Store) void {
|
||||
var nodes = this.nodes;
|
||||
nodes.deinit(this.allocator);
|
||||
var entries = this.entries;
|
||||
entries.deinit(this.allocator);
|
||||
}
|
||||
|
||||
/// Called from multiple threads. `parent_dedupe` should not be shared between threads.
|
||||
pub fn isCycle(this: *const Store, id: Entry.Id, maybe_parent_id: Entry.Id, parent_dedupe: *std.AutoArrayHashMap(Entry.Id, void)) bool {
|
||||
var i: usize = 0;
|
||||
@@ -555,7 +1282,15 @@ const install = bun.install;
|
||||
const Dependency = install.Dependency;
|
||||
const DependencyID = install.DependencyID;
|
||||
const PackageID = install.PackageID;
|
||||
const PackageNameHash = install.PackageNameHash;
|
||||
const Resolution = install.Resolution;
|
||||
const invalid_dependency_id = install.invalid_dependency_id;
|
||||
const invalid_package_id = install.invalid_package_id;
|
||||
|
||||
const Lockfile = install.Lockfile;
|
||||
const DependencySlice = Lockfile.DependencySlice;
|
||||
const Package = Lockfile.Package;
|
||||
const Tree = Lockfile.Tree;
|
||||
|
||||
const PackageManager = install.PackageManager;
|
||||
const WorkspaceFilter = PackageManager.WorkspaceFilter;
|
||||
|
||||
@@ -510,6 +510,12 @@ fn initRedirections(
|
||||
},
|
||||
.jsbuf => |val| {
|
||||
const globalObject = interpreter.event_loop.js.global;
|
||||
|
||||
if (file.jsbuf.idx >= interpreter.jsobjs.len) {
|
||||
globalObject.throw("Invalid JS object reference in shell", .{}) catch {};
|
||||
return .failed;
|
||||
}
|
||||
|
||||
if (interpreter.jsobjs[file.jsbuf.idx].asArrayBuffer(globalObject)) |buf| {
|
||||
const arraybuf: BuiltinIO.ArrayBuf = .{ .buf = jsc.ArrayBuffer.Strong{
|
||||
.array_buffer = buf,
|
||||
|
||||
@@ -792,13 +792,14 @@ pub const Interpreter = struct {
|
||||
out_parser: *?bun.shell.Parser,
|
||||
out_lex_result: *?shell.LexResult,
|
||||
) !ast.Script {
|
||||
const jsobjs_len: u32 = @intCast(jsobjs.len);
|
||||
const lex_result = brk: {
|
||||
if (bun.strings.isAllASCII(script)) {
|
||||
var lexer = bun.shell.LexerAscii.new(arena_allocator, script, jsstrings_to_escape);
|
||||
var lexer = bun.shell.LexerAscii.new(arena_allocator, script, jsstrings_to_escape, jsobjs_len);
|
||||
try lexer.lex();
|
||||
break :brk lexer.get_result();
|
||||
}
|
||||
var lexer = bun.shell.LexerUnicode.new(arena_allocator, script, jsstrings_to_escape);
|
||||
var lexer = bun.shell.LexerUnicode.new(arena_allocator, script, jsstrings_to_escape, jsobjs_len);
|
||||
try lexer.lex();
|
||||
break :brk lexer.get_result();
|
||||
};
|
||||
|
||||
@@ -2334,6 +2334,9 @@ pub fn NewLexer(comptime encoding: StringEncoding) type {
|
||||
/// Not owned by this struct
|
||||
string_refs: []bun.String,
|
||||
|
||||
/// Number of JS object references expected (for bounds validation)
|
||||
jsobjs_len: u32 = 0,
|
||||
|
||||
const SubShellKind = enum {
|
||||
/// (echo hi; echo hello)
|
||||
normal,
|
||||
@@ -2363,13 +2366,14 @@ pub fn NewLexer(comptime encoding: StringEncoding) type {
|
||||
delimit_quote: bool,
|
||||
};
|
||||
|
||||
pub fn new(alloc: Allocator, src: []const u8, strings_to_escape: []bun.String) @This() {
|
||||
pub fn new(alloc: Allocator, src: []const u8, strings_to_escape: []bun.String, jsobjs_len: u32) @This() {
|
||||
return .{
|
||||
.chars = Chars.init(src),
|
||||
.tokens = ArrayList(Token).init(alloc),
|
||||
.strpool = ArrayList(u8).init(alloc),
|
||||
.errors = ArrayList(LexError).init(alloc),
|
||||
.string_refs = strings_to_escape,
|
||||
.jsobjs_len = jsobjs_len,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -2400,6 +2404,7 @@ pub fn NewLexer(comptime encoding: StringEncoding) type {
|
||||
.word_start = self.word_start,
|
||||
.j = self.j,
|
||||
.string_refs = self.string_refs,
|
||||
.jsobjs_len = self.jsobjs_len,
|
||||
};
|
||||
sublexer.chars.state = .Normal;
|
||||
return sublexer;
|
||||
@@ -3358,7 +3363,7 @@ pub fn NewLexer(comptime encoding: StringEncoding) type {
|
||||
}
|
||||
|
||||
fn validateJSObjRefIdx(self: *@This(), idx: usize) bool {
|
||||
if (idx >= std.math.maxInt(u32)) {
|
||||
if (idx >= self.jsobjs_len) {
|
||||
self.add_error("Invalid JS object ref (out of bounds)");
|
||||
return false;
|
||||
}
|
||||
@@ -4129,7 +4134,7 @@ pub const ShellSrcBuilder = struct {
|
||||
};
|
||||
|
||||
/// Characters that need to escaped
|
||||
const SPECIAL_CHARS = [_]u8{ '~', '[', ']', '#', ';', '\n', '*', '{', ',', '}', '`', '$', '=', '(', ')', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '|', '>', '<', '&', '\'', '"', ' ', '\\' };
|
||||
const SPECIAL_CHARS = [_]u8{ '~', '[', ']', '#', ';', '\n', '*', '{', ',', '}', '`', '$', '=', '(', ')', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '|', '>', '<', '&', '\'', '"', ' ', '\\', SPECIAL_JS_CHAR };
|
||||
const SPECIAL_CHARS_TABLE: bun.bit_set.IntegerBitSet(256) = brk: {
|
||||
var table = bun.bit_set.IntegerBitSet(256).initEmpty();
|
||||
for (SPECIAL_CHARS) |c| {
|
||||
@@ -4554,15 +4559,16 @@ pub const TestingAPIs = struct {
|
||||
var script = std.array_list.Managed(u8).init(arena.allocator());
|
||||
try shellCmdFromJS(globalThis, string_args, &template_args, &jsobjs, &jsstrings, &script, marked_argument_buffer);
|
||||
|
||||
const jsobjs_len: u32 = @intCast(jsobjs.items.len);
|
||||
const lex_result = brk: {
|
||||
if (bun.strings.isAllASCII(script.items[0..])) {
|
||||
var lexer = LexerAscii.new(arena.allocator(), script.items[0..], jsstrings.items[0..]);
|
||||
var lexer = LexerAscii.new(arena.allocator(), script.items[0..], jsstrings.items[0..], jsobjs_len);
|
||||
lexer.lex() catch |err| {
|
||||
return globalThis.throwError(err, "failed to lex shell");
|
||||
};
|
||||
break :brk lexer.get_result();
|
||||
}
|
||||
var lexer = LexerUnicode.new(arena.allocator(), script.items[0..], jsstrings.items[0..]);
|
||||
var lexer = LexerUnicode.new(arena.allocator(), script.items[0..], jsstrings.items[0..], jsobjs_len);
|
||||
lexer.lex() catch |err| {
|
||||
return globalThis.throwError(err, "failed to lex shell");
|
||||
};
|
||||
|
||||
@@ -556,6 +556,10 @@ fn initRedirections(this: *Cmd, spawn_args: *Subprocess.SpawnArgs) bun.JSError!?
|
||||
if (this.base.eventLoop() != .js) @panic("JS values not allowed in this context");
|
||||
const global = this.base.eventLoop().js.global;
|
||||
|
||||
if (val.idx >= this.base.interpreter.jsobjs.len) {
|
||||
return global.throw("Invalid JS object reference in shell", .{});
|
||||
}
|
||||
|
||||
if (this.base.interpreter.jsobjs[val.idx].asArrayBuffer(global)) |buf| {
|
||||
const stdio: bun.shell.subproc.Stdio = .{ .array_buffer = jsc.ArrayBuffer.Strong{
|
||||
.array_buffer = buf,
|
||||
@@ -568,9 +572,9 @@ fn initRedirections(this: *Cmd, spawn_args: *Subprocess.SpawnArgs) bun.JSError!?
|
||||
if (this.node.redirect.stdin) {
|
||||
try spawn_args.stdio[stdin_no].extractBlob(global, .{ .Blob = blob }, stdin_no);
|
||||
} else if (this.node.redirect.stdout) {
|
||||
try spawn_args.stdio[stdin_no].extractBlob(global, .{ .Blob = blob }, stdout_no);
|
||||
try spawn_args.stdio[stdout_no].extractBlob(global, .{ .Blob = blob }, stdout_no);
|
||||
} else if (this.node.redirect.stderr) {
|
||||
try spawn_args.stdio[stdin_no].extractBlob(global, .{ .Blob = blob }, stderr_no);
|
||||
try spawn_args.stdio[stderr_no].extractBlob(global, .{ .Blob = blob }, stderr_no);
|
||||
}
|
||||
} else if (try jsc.WebCore.ReadableStream.fromJS(this.base.interpreter.jsobjs[val.idx], global)) |rstream| {
|
||||
_ = rstream;
|
||||
|
||||
@@ -426,12 +426,6 @@ pub const EncodeIntoResult = struct {
|
||||
written: u32 = 0,
|
||||
};
|
||||
pub fn allocateLatin1IntoUTF8(allocator: std.mem.Allocator, latin1_: []const u8) ![]u8 {
|
||||
if (comptime bun.FeatureFlags.latin1_is_now_ascii) {
|
||||
var out = try allocator.alloc(u8, latin1_.len);
|
||||
@memcpy(out[0..latin1_.len], latin1_);
|
||||
return out;
|
||||
}
|
||||
|
||||
const list = try std.array_list.Managed(u8).initCapacity(allocator, latin1_.len);
|
||||
var foo = try allocateLatin1IntoUTF8WithList(list, 0, latin1_);
|
||||
return try foo.toOwnedSlice();
|
||||
@@ -685,13 +679,6 @@ pub fn copyLatin1IntoUTF8(buf_: []u8, latin1_: []const u8) EncodeIntoResult {
|
||||
}
|
||||
|
||||
pub fn copyLatin1IntoUTF8StopOnNonASCII(buf_: []u8, latin1_: []const u8, comptime stop: bool) EncodeIntoResult {
|
||||
if (comptime bun.FeatureFlags.latin1_is_now_ascii) {
|
||||
const to_copy = @as(u32, @truncate(@min(buf_.len, latin1_.len)));
|
||||
@memcpy(buf_[0..to_copy], latin1_[0..to_copy]);
|
||||
|
||||
return .{ .written = to_copy, .read = to_copy };
|
||||
}
|
||||
|
||||
var buf = buf_;
|
||||
var latin1 = latin1_;
|
||||
|
||||
|
||||
@@ -589,7 +589,6 @@ function expectBundled(
|
||||
dotenv ||
|
||||
typeof production !== "undefined" ||
|
||||
bundling === false ||
|
||||
(run && target === "node") ||
|
||||
emitDCEAnnotations ||
|
||||
bundleWarnings ||
|
||||
env ||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import { file, spawn, write } from "bun";
|
||||
import { afterAll, beforeAll, describe, expect, test } from "bun:test";
|
||||
import { existsSync, lstatSync, readlinkSync } from "fs";
|
||||
import { mkdir, readlink, rm, symlink } from "fs/promises";
|
||||
import { VerdaccioRegistry, bunEnv, bunExe, readdirSorted, runBunInstall } from "harness";
|
||||
import { exists, mkdir, readlink, rm, symlink } from "fs/promises";
|
||||
import { VerdaccioRegistry, bunEnv, bunExe, readdirSorted, runBunInstall, tempDir } from "harness";
|
||||
import { join } from "path";
|
||||
|
||||
const registry = new VerdaccioRegistry();
|
||||
@@ -344,9 +344,7 @@ test("can install folder dependencies on root package", async () => {
|
||||
});
|
||||
|
||||
describe("isolated workspaces", () => {
|
||||
test("basic", async () => {
|
||||
const { packageJson, packageDir } = await registry.createTestDir({ bunfigOpts: { linker: "isolated" } });
|
||||
|
||||
async function createWorkspace(packageJson, packageDir) {
|
||||
await Promise.all([
|
||||
write(
|
||||
packageJson,
|
||||
@@ -383,6 +381,11 @@ describe("isolated workspaces", () => {
|
||||
}),
|
||||
),
|
||||
]);
|
||||
}
|
||||
test("basic", async () => {
|
||||
const { packageJson, packageDir } = await registry.createTestDir({ bunfigOpts: { linker: "isolated" } });
|
||||
|
||||
await createWorkspace(packageJson, packageDir);
|
||||
|
||||
await runBunInstall(bunEnv, packageDir);
|
||||
|
||||
@@ -417,6 +420,86 @@ describe("isolated workspaces", () => {
|
||||
});
|
||||
});
|
||||
|
||||
test("--filter only includes matched workspaces and transitively workspaces", async () => {
|
||||
const { packageJson, packageDir } = await registry.createTestDir({ bunfigOpts: { linker: "isolated" } });
|
||||
|
||||
await createWorkspace(packageJson, packageDir);
|
||||
|
||||
let { exited } = spawn({
|
||||
cmd: [bunExe(), "install", "--filter", "test-pkg-workspaces"],
|
||||
cwd: packageDir,
|
||||
stdout: "ignore",
|
||||
stderr: "ignore",
|
||||
env: bunEnv,
|
||||
});
|
||||
|
||||
expect(await exited).toBe(0);
|
||||
|
||||
// only the root workspace should have installed node_modules
|
||||
expect(
|
||||
await Promise.all([
|
||||
readdirSorted(join(packageDir, "node_modules")),
|
||||
readdirSorted(join(packageDir, "node_modules", ".bun")),
|
||||
exists(join(packageDir, "pkg-1", "node_modules")),
|
||||
exists(join(packageDir, "pkg-2", "node_modules")),
|
||||
]),
|
||||
).toEqual([[".bun", "no-deps"], ["no-deps@1.0.0", "node_modules"], false, false]);
|
||||
|
||||
await rm(join(packageDir, "node_modules"), { recursive: true });
|
||||
|
||||
// Should install pkg-1, and also pkg-2 because pkg-1
|
||||
// depends on pkg-2.
|
||||
({ exited } = spawn({
|
||||
cmd: [bunExe(), "install", "--filter", "pkg-1"],
|
||||
cwd: packageDir,
|
||||
env: bunEnv,
|
||||
stdout: "ignore",
|
||||
stderr: "ignore",
|
||||
}));
|
||||
|
||||
expect(await exited).toBe(0);
|
||||
|
||||
expect(
|
||||
await Promise.all([
|
||||
readdirSorted(join(packageDir, "node_modules")),
|
||||
readdirSorted(join(packageDir, "node_modules", ".bun")),
|
||||
readdirSorted(join(packageDir, "pkg-1", "node_modules")),
|
||||
readdirSorted(join(packageDir, "pkg-2", "node_modules")),
|
||||
]),
|
||||
).toEqual([
|
||||
[".bun"],
|
||||
["@types+is-number@1.0.0", "a-dep-b@1.0.0", "a-dep@1.0.1", "b-dep-a@1.0.0", "node_modules"],
|
||||
["@types", "a-dep", "pkg-2"],
|
||||
["b-dep-a"],
|
||||
]);
|
||||
|
||||
await Promise.all([
|
||||
rm(join(packageDir, "node_modules"), { recursive: true }),
|
||||
rm(join(packageDir, "pkg-1", "node_modules"), { recursive: true }),
|
||||
rm(join(packageDir, "pkg-2", "node_modules"), { recursive: true }),
|
||||
]);
|
||||
|
||||
// only pkg-2 should be installed
|
||||
({ exited } = spawn({
|
||||
cmd: [bunExe(), "install", "--filter", "pkg-2"],
|
||||
cwd: packageDir,
|
||||
env: bunEnv,
|
||||
stdout: "ignore",
|
||||
stderr: "ignore",
|
||||
}));
|
||||
|
||||
expect(await exited).toBe(0);
|
||||
|
||||
expect(
|
||||
await Promise.all([
|
||||
readdirSorted(join(packageDir, "node_modules")),
|
||||
readdirSorted(join(packageDir, "node_modules", ".bun")),
|
||||
exists(join(packageDir, "pkg-1", "node_modules")),
|
||||
readdirSorted(join(packageDir, "pkg-2", "node_modules")),
|
||||
]),
|
||||
).toEqual([[".bun"], ["a-dep-b@1.0.0", "b-dep-a@1.0.0", "node_modules"], false, ["b-dep-a"]]);
|
||||
});
|
||||
|
||||
test("workspace self dependencies create symlinks", async () => {
|
||||
const { packageDir } = await registry.createTestDir({
|
||||
bunfigOpts: { linker: "isolated" },
|
||||
@@ -595,7 +678,6 @@ describe("optional peers", () => {
|
||||
}
|
||||
|
||||
await checkInstall();
|
||||
await checkInstall();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1234,3 +1316,112 @@ test("runs lifecycle scripts correctly", async () => {
|
||||
expect(lifecyclePostinstallDir).toEqual(["lifecycle-postinstall"]);
|
||||
expect(allLifecycleScriptsDir).toEqual(["all-lifecycle-scripts"]);
|
||||
});
|
||||
|
||||
// When an auto-installed peer dependency has its OWN peer deps, those
|
||||
// transitive peers get re-queued during peer processing. If all manifest
|
||||
// loads are synchronous (cached with valid max-age) AND the transitive peer's
|
||||
// version constraint doesn't match what's already in the lockfile,
|
||||
// pendingTaskCount() stays at 0 and waitForPeers was skipped — leaving
|
||||
// the transitive peer's resolution unset (= invalid_package_id → filtered
|
||||
// from the install).
|
||||
test("transitive peer deps are resolved when resolution is fully synchronous", async () => {
|
||||
const packagesDir = join(import.meta.dir, "registry", "packages");
|
||||
|
||||
// Self-contained HTTP server that serves package manifests & tarballs
|
||||
// directly from the Verdaccio fixtures, with Cache-Control: max-age=300
|
||||
// to replicate npmjs.org behavior (fully synchronous on warm cache).
|
||||
using server = Bun.serve({
|
||||
port: 0,
|
||||
async fetch(req) {
|
||||
const url = new URL(req.url);
|
||||
const pathname = url.pathname;
|
||||
|
||||
// Tarball: /<name>/-/<name>-<version>.tgz
|
||||
if (pathname.endsWith(".tgz")) {
|
||||
const match = pathname.match(/\/([^/]+)\/-\/(.+\.tgz)$/);
|
||||
if (match) {
|
||||
const tarball = file(join(packagesDir, match[1], match[2]));
|
||||
if (await tarball.exists()) {
|
||||
return new Response(tarball, {
|
||||
headers: { "Content-Type": "application/octet-stream" },
|
||||
});
|
||||
}
|
||||
}
|
||||
return new Response("Not found", { status: 404 });
|
||||
}
|
||||
|
||||
// Manifest: /<name>
|
||||
const packageName = decodeURIComponent(pathname.slice(1));
|
||||
const metaFile = file(join(packagesDir, packageName, "package.json"));
|
||||
if (!(await metaFile.exists())) {
|
||||
return new Response("Not found", { status: 404 });
|
||||
}
|
||||
|
||||
// Rewrite tarball URLs to point at this server
|
||||
const meta = await metaFile.json();
|
||||
const port = server.port;
|
||||
for (const [ver, info] of Object.entries(meta.versions ?? {}) as [string, any][]) {
|
||||
if (info?.dist?.tarball) {
|
||||
info.dist.tarball = `http://localhost:${port}/${packageName}/-/${packageName}-${ver}.tgz`;
|
||||
}
|
||||
}
|
||||
|
||||
return new Response(JSON.stringify(meta), {
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
"Cache-Control": "public, max-age=300",
|
||||
},
|
||||
});
|
||||
},
|
||||
});
|
||||
|
||||
using packageDir = tempDir("transitive-peer-test-", {});
|
||||
const packageJson = join(String(packageDir), "package.json");
|
||||
const cacheDir = join(String(packageDir), ".bun-cache");
|
||||
const bunfig = `[install]\ncache = "${cacheDir.replaceAll("\\", "\\\\")}"\nregistry = "http://localhost:${server.port}/"\nlinker = "isolated"\n`;
|
||||
await write(join(String(packageDir), "bunfig.toml"), bunfig);
|
||||
|
||||
await write(
|
||||
packageJson,
|
||||
JSON.stringify({
|
||||
name: "test-transitive-peer",
|
||||
dependencies: {
|
||||
// Chain: uses-strict-peer → (peer) strict-peer-dep → (peer) no-deps@^2.0.0
|
||||
// Root has no-deps@1.0.0, which does NOT satisfy ^2.0.0. This forces
|
||||
// strict-peer-dep's peer `no-deps` through the full resolution pass
|
||||
// (can't reuse root's no-deps via getPackageID).
|
||||
"no-deps": "1.0.0",
|
||||
"uses-strict-peer": "1.0.0",
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
// First install: populates manifest cache (with max-age=300 from server)
|
||||
await runBunInstall(bunEnv, String(packageDir), { allowWarnings: true });
|
||||
|
||||
// Second install with NO lockfile and WARM cache. Manifests are fresh
|
||||
// (within max-age) so all loads are synchronous — this is the bug trigger.
|
||||
await rm(join(String(packageDir), "node_modules"), { recursive: true, force: true });
|
||||
await rm(join(String(packageDir), "bun.lock"), { force: true });
|
||||
await runBunInstall(bunEnv, String(packageDir), { allowWarnings: true });
|
||||
|
||||
// Entry names have peer hashes; find them dynamically
|
||||
const bunDir = join(String(packageDir), "node_modules", ".bun");
|
||||
const entries = await readdirSorted(bunDir);
|
||||
const strictPeerEntry = entries.find(e => e.startsWith("strict-peer-dep@1.0.0"));
|
||||
const usesStrictEntry = entries.find(e => e.startsWith("uses-strict-peer@1.0.0"));
|
||||
|
||||
// strict-peer-dep must exist (auto-installed via uses-strict-peer's peer)
|
||||
expect(strictPeerEntry).toBeDefined();
|
||||
expect(usesStrictEntry).toBeDefined();
|
||||
|
||||
// strict-peer-dep's own peer `no-deps` must be resolved and symlinked.
|
||||
// Without the fix: this symlink is missing because the transitive peer
|
||||
// queue was never drained after drainDependencyList re-queued it.
|
||||
expect(existsSync(join(bunDir, strictPeerEntry!, "node_modules", "no-deps"))).toBe(true);
|
||||
|
||||
// Verify the chain is intact
|
||||
expect(readlinkSync(join(bunDir, usesStrictEntry!, "node_modules", "strict-peer-dep"))).toBe(
|
||||
join("..", "..", strictPeerEntry!, "node_modules", "strict-peer-dep"),
|
||||
);
|
||||
});
|
||||
|
||||
@@ -0,0 +1,41 @@
|
||||
{
|
||||
"name": "strict-peer-dep",
|
||||
"versions": {
|
||||
"1.0.0": {
|
||||
"name": "strict-peer-dep",
|
||||
"version": "1.0.0",
|
||||
"peerDependencies": {
|
||||
"no-deps": "^2.0.0"
|
||||
},
|
||||
"_id": "strict-peer-dep@1.0.0",
|
||||
"_nodeVersion": "22.2.0",
|
||||
"_npmVersion": "10.8.1",
|
||||
"dist": {
|
||||
"integrity": "sha512-bz2RC/Fp4Nvc9aIiHB6Szko9m6sxNy/clIHnTAGeD9VSpQJTvlPAJqJ09lWo7N3q4JNLEqDTf3Mn+zNUsYOKWQ==",
|
||||
"shasum": "1548927b5ca502c008c3ab091fb707f96181ecaf",
|
||||
"tarball": "http://localhost:4873/strict-peer-dep/-/strict-peer-dep-1.0.0.tgz"
|
||||
},
|
||||
"contributors": []
|
||||
}
|
||||
},
|
||||
"time": {
|
||||
"modified": "2026-02-28T00:00:00.000Z",
|
||||
"created": "2026-02-28T00:00:00.000Z",
|
||||
"1.0.0": "2026-02-28T00:00:00.000Z"
|
||||
},
|
||||
"users": {},
|
||||
"dist-tags": {
|
||||
"latest": "1.0.0"
|
||||
},
|
||||
"_uplinks": {},
|
||||
"_distfiles": {},
|
||||
"_attachments": {
|
||||
"strict-peer-dep-1.0.0.tgz": {
|
||||
"shasum": "1548927b5ca502c008c3ab091fb707f96181ecaf",
|
||||
"version": "1.0.0"
|
||||
}
|
||||
},
|
||||
"_rev": "",
|
||||
"_id": "strict-peer-dep",
|
||||
"readme": "ERROR: No README data found!"
|
||||
}
|
||||
Binary file not shown.
@@ -0,0 +1,41 @@
|
||||
{
|
||||
"name": "uses-strict-peer",
|
||||
"versions": {
|
||||
"1.0.0": {
|
||||
"name": "uses-strict-peer",
|
||||
"version": "1.0.0",
|
||||
"peerDependencies": {
|
||||
"strict-peer-dep": "1.0.0"
|
||||
},
|
||||
"_id": "uses-strict-peer@1.0.0",
|
||||
"_nodeVersion": "22.2.0",
|
||||
"_npmVersion": "10.8.1",
|
||||
"dist": {
|
||||
"integrity": "sha512-RbQ5blabFjzZxf/5rXghqXxa2+Dmv/owDb1YzHwNOOBmxGJZTqPt3OIYHlsGX/wnPVjAP6gBwJl3nxLxU0pzlw==",
|
||||
"shasum": "7cff9823abdca5ab698f2c6b73410b87004960e9",
|
||||
"tarball": "http://localhost:4873/uses-strict-peer/-/uses-strict-peer-1.0.0.tgz"
|
||||
},
|
||||
"contributors": []
|
||||
}
|
||||
},
|
||||
"time": {
|
||||
"modified": "2026-02-28T00:00:00.000Z",
|
||||
"created": "2026-02-28T00:00:00.000Z",
|
||||
"1.0.0": "2026-02-28T00:00:00.000Z"
|
||||
},
|
||||
"users": {},
|
||||
"dist-tags": {
|
||||
"latest": "1.0.0"
|
||||
},
|
||||
"_uplinks": {},
|
||||
"_distfiles": {},
|
||||
"_attachments": {
|
||||
"uses-strict-peer-1.0.0.tgz": {
|
||||
"shasum": "7cff9823abdca5ab698f2c6b73410b87004960e9",
|
||||
"version": "1.0.0"
|
||||
}
|
||||
},
|
||||
"_rev": "",
|
||||
"_id": "uses-strict-peer",
|
||||
"readme": "ERROR: No README data found!"
|
||||
}
|
||||
Binary file not shown.
58
test/js/bun/shell/shell-sentinel-hardening.test.ts
Normal file
58
test/js/bun/shell/shell-sentinel-hardening.test.ts
Normal file
@@ -0,0 +1,58 @@
|
||||
import { $ } from "bun";
|
||||
import { describe, expect, test } from "bun:test";
|
||||
import { bunEnv, bunExe, tempDir } from "harness";
|
||||
|
||||
describe("shell sentinel character hardening", () => {
|
||||
test("string matching internal obj-ref prefix round-trips through interpolation", async () => {
|
||||
// \x08 is the shell's internal sentinel byte. When followed by "__bun_"
|
||||
// and then non-digit characters, the old code didn't escape \x08 (it wasn't
|
||||
// in SPECIAL_CHARS), so the raw bytes were injected into the script buffer.
|
||||
// The lexer then misinterpreted them as a malformed internal object
|
||||
// reference pattern and produced a lex error.
|
||||
// The suffix must contain non-digit, non-special chars so that:
|
||||
// 1. needsEscape() returns false without the \x08 fix
|
||||
// 2. looksLikeJSObjRef() matches the __bun_ prefix
|
||||
// 3. eatJSObjRef() fails because it finds no digit index
|
||||
const str = "\x08__bun_abc";
|
||||
const result = await $`echo ${str}`.text();
|
||||
expect(result).toBe(str + "\n");
|
||||
});
|
||||
|
||||
test("string matching internal str-ref prefix round-trips through interpolation", async () => {
|
||||
// Same issue but for the __bunstr_ prefix pattern.
|
||||
const str = "\x08__bunstr_abc";
|
||||
const result = await $`echo ${str}`.text();
|
||||
expect(result).toBe(str + "\n");
|
||||
});
|
||||
|
||||
test("raw sentinel injection with out-of-bounds index does not crash", async () => {
|
||||
// { raw: ... } bypasses string escaping, allowing injection of a sentinel
|
||||
// pattern with a digit suffix into the script buffer. The old
|
||||
// validateJSObjRefIdx only rejected indices >= maxInt(u32), so index 9999
|
||||
// was accepted. At execution time, accessing jsobjs[9999] on an empty
|
||||
// array caused a segfault. The fix checks against actual jsobjs.len.
|
||||
// Run in a subprocess so a crash on old bun doesn't kill the test runner.
|
||||
const testScript = [
|
||||
'import { $ } from "bun";',
|
||||
"const sentinel = String.fromCharCode(8) + '__bun_9999';",
|
||||
"try { await $`echo hello > ${{ raw: sentinel }}`; } catch {}",
|
||||
'console.log("OK");',
|
||||
].join("\n");
|
||||
|
||||
using dir = tempDir("sentinel-test", {
|
||||
"test.js": testScript,
|
||||
});
|
||||
|
||||
await using proc = Bun.spawn({
|
||||
cmd: [bunExe(), "test.js"],
|
||||
cwd: String(dir),
|
||||
env: bunEnv,
|
||||
stdout: "pipe",
|
||||
stderr: "pipe",
|
||||
});
|
||||
|
||||
const [stdout, exitCode] = await Promise.all([proc.stdout.text(), proc.exited]);
|
||||
expect(stdout.trim()).toBe("OK");
|
||||
expect(exitCode).toBe(0);
|
||||
});
|
||||
});
|
||||
76
test/js/bun/transpiler/transpiler-tsconfig-uaf.test.ts
Normal file
76
test/js/bun/transpiler/transpiler-tsconfig-uaf.test.ts
Normal file
@@ -0,0 +1,76 @@
|
||||
import { describe, expect, test } from "bun:test";
|
||||
|
||||
describe("Transpiler tsconfig lifetime", () => {
|
||||
test("multiple async transform() calls with tsconfig do not crash", async () => {
|
||||
const transpiler = new Bun.Transpiler({
|
||||
loader: "tsx",
|
||||
tsconfig: JSON.stringify({
|
||||
compilerOptions: {
|
||||
experimentalDecorators: true,
|
||||
jsx: "react",
|
||||
jsxFactory: "React.createElement",
|
||||
},
|
||||
}),
|
||||
});
|
||||
|
||||
// First async transform
|
||||
const result1 = await transpiler.transform("const x: number = 1;");
|
||||
expect(result1).toContain("const x = 1");
|
||||
|
||||
// Second async transform — would crash before the fix due to use-after-free
|
||||
// on the tsconfig pointer that was freed by the first TransformTask.deinit
|
||||
const result2 = await transpiler.transform("const y: number = 2;");
|
||||
expect(result2).toContain("const y = 2");
|
||||
|
||||
// Third call to be safe
|
||||
const result3 = await transpiler.transform("const z: number = 3;");
|
||||
expect(result3).toContain("const z = 3");
|
||||
});
|
||||
|
||||
test("async transform() followed by transformSync() with tsconfig does not crash", async () => {
|
||||
const transpiler = new Bun.Transpiler({
|
||||
loader: "tsx",
|
||||
tsconfig: JSON.stringify({
|
||||
compilerOptions: {
|
||||
experimentalDecorators: true,
|
||||
jsx: "react",
|
||||
jsxFactory: "React.createElement",
|
||||
},
|
||||
}),
|
||||
});
|
||||
|
||||
// Before this fix, async transform freed tsconfig in TransformTask.deinit
|
||||
const result1 = await transpiler.transform("const a: string = 'hello';");
|
||||
expect(result1).toContain('const a = "hello"');
|
||||
|
||||
// Sync transform would read freed memory without the fix
|
||||
const result2 = transpiler.transformSync("const b: string = 'world';");
|
||||
expect(result2).toContain('const b = "world"');
|
||||
});
|
||||
|
||||
test("tsconfig jsx settings are preserved across multiple async transforms", async () => {
|
||||
const transpiler = new Bun.Transpiler({
|
||||
loader: "tsx",
|
||||
tsconfig: JSON.stringify({
|
||||
compilerOptions: {
|
||||
jsx: "react",
|
||||
jsxFactory: "h",
|
||||
jsxFragmentFactory: "Fragment",
|
||||
},
|
||||
}),
|
||||
});
|
||||
|
||||
const code = "export default <div>hello</div>;";
|
||||
|
||||
const result1 = await transpiler.transform(code);
|
||||
expect(result1).toContain("h(");
|
||||
|
||||
// After the first async transform, tsconfig should still be valid
|
||||
const result2 = await transpiler.transform(code);
|
||||
expect(result2).toContain("h(");
|
||||
|
||||
// Sync should also work
|
||||
const result3 = transpiler.transformSync(code);
|
||||
expect(result3).toContain("h(");
|
||||
});
|
||||
});
|
||||
@@ -195,7 +195,7 @@ test/js/node/test/parallel/test-http-server-stale-close.js
|
||||
test/js/third_party/comlink/comlink.test.ts
|
||||
test/regression/issue/22635/22635.test.ts
|
||||
test/js/node/test/parallel/test-http-url.parse-https.request.js
|
||||
test/bundler/bundler_compile_autoload.test.ts
|
||||
test/bundler/bundler_compile_autoload.test.ts
|
||||
|
||||
# Bun::JSNodeHTTPServerSocket::clearSocketData
|
||||
test/js/node/test/parallel/test-http-server-keep-alive-max-requests-null.js
|
||||
|
||||
115
test/regression/issue/27575.test.ts
Normal file
115
test/regression/issue/27575.test.ts
Normal file
@@ -0,0 +1,115 @@
|
||||
import { expect, test } from "bun:test";
|
||||
|
||||
// https://github.com/oven-sh/bun/issues/27575
|
||||
// Bun.Transpiler ignored experimentalDecorators: true from tsconfig,
|
||||
// always emitting TC39-style decorators instead of legacy TypeScript decorators.
|
||||
|
||||
test("Bun.Transpiler respects experimentalDecorators: true from tsconfig", () => {
|
||||
const transpiler = new Bun.Transpiler({
|
||||
loader: "ts",
|
||||
target: "browser",
|
||||
tsconfig: JSON.stringify({
|
||||
compilerOptions: {
|
||||
experimentalDecorators: true,
|
||||
},
|
||||
}),
|
||||
});
|
||||
|
||||
const code = `
|
||||
function Prop() { return function(target: any, key: string) {}; }
|
||||
|
||||
class Foo {
|
||||
@Prop() bar: number = 0;
|
||||
}
|
||||
`;
|
||||
|
||||
const result = transpiler.transformSync(code);
|
||||
|
||||
// Legacy decorators use __legacyDecorateClassTS, NOT TC39 helpers
|
||||
expect(result).not.toContain("__decorateElement");
|
||||
expect(result).not.toContain("__decoratorStart");
|
||||
expect(result).not.toContain("__runInitializers");
|
||||
|
||||
// Legacy decorators produce __legacyDecorateClassTS calls
|
||||
expect(result).toContain("__legacyDecorateClassTS");
|
||||
});
|
||||
|
||||
test("Bun.Transpiler respects emitDecoratorMetadata: true from tsconfig", () => {
|
||||
const transpiler = new Bun.Transpiler({
|
||||
loader: "ts",
|
||||
target: "browser",
|
||||
tsconfig: JSON.stringify({
|
||||
compilerOptions: {
|
||||
experimentalDecorators: true,
|
||||
emitDecoratorMetadata: true,
|
||||
},
|
||||
}),
|
||||
});
|
||||
|
||||
const code = `
|
||||
function Dec() { return function(target: any, key: string) {}; }
|
||||
|
||||
class Foo {
|
||||
@Dec() bar: string = "";
|
||||
}
|
||||
`;
|
||||
|
||||
const result = transpiler.transformSync(code);
|
||||
|
||||
// Should emit legacy decorators with metadata
|
||||
expect(result).not.toContain("__decorateElement");
|
||||
expect(result).toContain("__legacyDecorateClassTS");
|
||||
expect(result).toContain("__legacyMetadataTS");
|
||||
});
|
||||
|
||||
test("Bun.Transpiler emits TC39 decorators when experimentalDecorators is not set", () => {
|
||||
const transpiler = new Bun.Transpiler({
|
||||
loader: "ts",
|
||||
target: "browser",
|
||||
tsconfig: JSON.stringify({
|
||||
compilerOptions: {},
|
||||
}),
|
||||
});
|
||||
|
||||
const code = `
|
||||
function Prop() { return function(target: any, key: string) {}; }
|
||||
|
||||
class Foo {
|
||||
@Prop() bar: number = 0;
|
||||
}
|
||||
`;
|
||||
|
||||
const result = transpiler.transformSync(code);
|
||||
|
||||
// TC39 decorators use __decorateElement / __decoratorStart / __runInitializers
|
||||
expect(result).toContain("__decorateElement");
|
||||
expect(result).not.toContain("__legacyDecorateClassTS");
|
||||
});
|
||||
|
||||
test("Bun.Transpiler.transform (async) respects experimentalDecorators: true", async () => {
|
||||
const transpiler = new Bun.Transpiler({
|
||||
loader: "ts",
|
||||
target: "browser",
|
||||
tsconfig: JSON.stringify({
|
||||
compilerOptions: {
|
||||
experimentalDecorators: true,
|
||||
},
|
||||
}),
|
||||
});
|
||||
|
||||
const code = `
|
||||
function Prop() { return function(target: any, key: string) {}; }
|
||||
|
||||
class Foo {
|
||||
@Prop() bar: number = 0;
|
||||
}
|
||||
`;
|
||||
|
||||
const result = await transpiler.transform(code);
|
||||
|
||||
// Legacy decorators use __legacyDecorateClassTS, NOT TC39 helpers
|
||||
expect(result).not.toContain("__decorateElement");
|
||||
expect(result).not.toContain("__decoratorStart");
|
||||
expect(result).not.toContain("__runInitializers");
|
||||
expect(result).toContain("__legacyDecorateClassTS");
|
||||
});
|
||||
40
test/regression/issue/27598.test.ts
Normal file
40
test/regression/issue/27598.test.ts
Normal file
@@ -0,0 +1,40 @@
|
||||
import { cssInternals } from "bun:internal-for-testing";
|
||||
import { expect, test } from "bun:test";
|
||||
|
||||
const { minifyTest, testWithOptions } = cssInternals;
|
||||
|
||||
test("unicode-range in @font-face is preserved", () => {
|
||||
const source = `@font-face {
|
||||
font-family: "Roboto Variable";
|
||||
unicode-range: U+0000-00FF, U+0131, U+0152-0153;
|
||||
}`;
|
||||
const expected = `@font-face {
|
||||
font-family: Roboto Variable;
|
||||
unicode-range: U+??, U+131, U+152-153;
|
||||
}`;
|
||||
expect(testWithOptions(source, expected)).toEqualIgnoringWhitespace(expected);
|
||||
});
|
||||
|
||||
test("unicode-range in @font-face is preserved when minified", () => {
|
||||
const source = `@font-face { font-family: "Roboto Variable"; unicode-range: U+0000-00FF, U+0131, U+0152-0153; }`;
|
||||
const expected = `@font-face{font-family:Roboto Variable;unicode-range:U+??,U+131,U+152-153}`;
|
||||
expect(minifyTest(source, expected)).toEqual(expected);
|
||||
});
|
||||
|
||||
test("unicode-range wildcard in @font-face is preserved", () => {
|
||||
const source = `@font-face { font-family: "Test"; unicode-range: U+4??; }`;
|
||||
const expected = `@font-face{font-family:Test;unicode-range:U+4??}`;
|
||||
expect(minifyTest(source, expected)).toEqual(expected);
|
||||
});
|
||||
|
||||
test("unicode-range with hex letters in @font-face is preserved", () => {
|
||||
const source = `@font-face { font-family: "Test"; unicode-range: U+A640-A69F; }`;
|
||||
const expected = `@font-face{font-family:Test;unicode-range:U+a640-a69f}`;
|
||||
expect(minifyTest(source, expected)).toEqual(expected);
|
||||
});
|
||||
|
||||
test("unicode-range single hex value in @font-face is preserved", () => {
|
||||
const source = `@font-face { font-family: "Test"; unicode-range: U+00FF; }`;
|
||||
const expected = `@font-face{font-family:Test;unicode-range:U+ff}`;
|
||||
expect(minifyTest(source, expected)).toEqual(expected);
|
||||
});
|
||||
Reference in New Issue
Block a user