Compare commits

...

7 Commits

Author SHA1 Message Date
Dylan Conway
7affa5ecb4 fix merge 2025-11-02 12:01:03 -08:00
Dylan Conway
2b3b6dc479 Merge branch 'main' into dylan/fix-peers 2025-11-02 11:57:04 -08:00
Dylan Conway
476ac83f4c test 2025-10-24 16:32:14 -07:00
autofix-ci[bot]
d36ef12cfc [autofix.ci] apply automated fixes 2025-10-24 07:33:24 +00:00
Dylan Conway
8376f9ef7e remove incorrect alias resolving 2025-10-24 00:30:27 -07:00
Dylan Conway
3ca1acb4f9 change Tree.Id to enum 2025-10-23 19:01:51 -07:00
Dylan Conway
23b075051d remove early resolve 2025-10-23 14:34:14 -07:00
31 changed files with 221 additions and 507 deletions

View File

@@ -115,7 +115,6 @@ pub const AsyncModule = struct {
.onPackageDownloadError = onPackageDownloadError,
.progress_bar = true,
},
true,
PackageManager.Options.LogLevel.default,
) catch unreachable;
} else {
@@ -128,7 +127,6 @@ pub const AsyncModule = struct {
.onPackageManifestError = onPackageManifestError,
.onPackageDownloadError = onPackageDownloadError,
},
true,
PackageManager.Options.LogLevel.default_no_progress,
) catch unreachable;
}

View File

@@ -340,7 +340,6 @@ pub const BunxCommand = struct {
defer requests_buf.deinit(ctx.allocator);
const update_requests = UpdateRequest.parse(
ctx.allocator,
null,
ctx.log,
&.{opts.package_name},
&requests_buf,

View File

@@ -24,7 +24,7 @@ pub const PackageInstaller = struct {
successfully_installed: Bitset,
tree_iterator: *Lockfile.Tree.Iterator(.node_modules),
command_ctx: Command.Context,
current_tree_id: Lockfile.Tree.Id = Lockfile.Tree.invalid_id,
current_tree_id: Lockfile.Tree.Id = .invalid,
// fields used for running lifecycle scripts when it's safe
//
@@ -48,7 +48,7 @@ pub const PackageInstaller = struct {
const debug = Output.scoped(.PackageInstaller, .hidden);
pub const NodeModulesFolder = struct {
tree_id: Lockfile.Tree.Id = 0,
tree_id: Lockfile.Tree.Id = .root,
path: std.ArrayList(u8) = std.ArrayList(u8).init(bun.default_allocator),
pub fn deinit(this: *NodeModulesFolder) void {
@@ -208,27 +208,27 @@ pub const PackageInstaller = struct {
log_level: Options.LogLevel,
) void {
if (comptime Environment.allow_assert) {
bun.assertWithLocation(tree_id != Lockfile.Tree.invalid_id, @src());
bun.assertWithLocation(tree_id != .invalid, @src());
}
const tree = &this.trees[tree_id];
const tree = &this.trees[tree_id.get()];
const current_count = tree.install_count;
const max = this.lockfile.buffers.trees.items[tree_id].dependencies.len;
const max = this.lockfile.buffers.trees.items[tree_id.get()].dependencies.len;
if (current_count == std.math.maxInt(usize)) {
if (comptime Environment.allow_assert)
Output.panic("Installed more packages than expected for tree id: {d}. Expected: {d}", .{ tree_id, max });
Output.panic("Installed more packages than expected for tree id: {d}. Expected: {d}", .{ tree_id.get(), max });
return;
}
const is_not_done = current_count + 1 < max;
this.trees[tree_id].install_count = if (is_not_done) current_count + 1 else std.math.maxInt(usize);
this.trees[tree_id.get()].install_count = if (is_not_done) current_count + 1 else std.math.maxInt(usize);
if (is_not_done) return;
this.completed_trees.set(tree_id);
this.completed_trees.set(tree_id.get());
if (tree.binaries.count() > 0) {
this.seen_bin_links.clearRetainingCapacity();
@@ -286,7 +286,7 @@ pub const PackageInstaller = struct {
// tree (0).
const global = if (!this.manager.options.global)
false
else if (tree_id != 0)
else if (tree_id != .root)
false
else global: {
for (this.manager.update_requests) |request| {
@@ -333,7 +333,7 @@ pub const PackageInstaller = struct {
this.node_modules.path.items.len = strings.withoutTrailingSlash(FileSystem.instance.top_level_dir).len + 1;
const rel_path, _ = Lockfile.Tree.relativePathAndDepth(
lockfile,
@intCast(tree_id),
.from(@intCast(tree_id)),
&node_modules_rel_path_buf,
&depth_buf,
.node_modules,
@@ -341,7 +341,7 @@ pub const PackageInstaller = struct {
bun.handleOom(this.node_modules.path.appendSlice(rel_path));
this.linkTreeBins(tree, @intCast(tree_id), &link_target_buf, &link_dest_buf, &link_rel_buf, log_level);
this.linkTreeBins(tree, .from(@intCast(tree_id)), &link_target_buf, &link_dest_buf, &link_rel_buf, log_level);
}
}
}
@@ -400,7 +400,7 @@ pub const PackageInstaller = struct {
const resolutions = lockfile.buffers.resolutions.items;
for (this.trees, 0..) |*tree, i| {
if (force or this.canInstallPackageForTree(this.lockfile.buffers.trees.items, @intCast(i))) {
if (force or this.canInstallPackageForTree(this.lockfile.buffers.trees.items, .from(@intCast(i)))) {
defer tree.pending_installs.clearRetainingCapacity();
// If installing these packages completes the tree, we don't allow it
@@ -488,7 +488,7 @@ pub const PackageInstaller = struct {
/// Check if a tree is ready to start running lifecycle scripts
pub fn canRunScripts(this: *PackageInstaller, scripts_tree_id: Lockfile.Tree.Id) bool {
const deps = this.tree_ids_to_trees_the_id_depends_on.at(scripts_tree_id);
const deps = this.tree_ids_to_trees_the_id_depends_on.at(scripts_tree_id.get());
// .monotonic is okay because this value isn't modified from any other thread.
return (deps.subsetOf(this.completed_trees) or
deps.eql(this.completed_trees)) and
@@ -498,10 +498,10 @@ pub const PackageInstaller = struct {
/// A tree can start installing packages when the parent has installed all its packages. If the parent
/// isn't finished, we need to wait because it's possible a package installed in this tree will be deleted by the parent.
pub fn canInstallPackageForTree(this: *const PackageInstaller, trees: []Lockfile.Tree, package_tree_id: Lockfile.Tree.Id) bool {
var curr_tree_id = trees[package_tree_id].parent;
while (curr_tree_id != Lockfile.Tree.invalid_id) {
if (!this.completed_trees.isSet(curr_tree_id)) return false;
curr_tree_id = trees[curr_tree_id].parent;
var curr_tree_id = trees[package_tree_id.get()].parent;
while (curr_tree_id != .invalid) {
if (!this.completed_trees.isSet(curr_tree_id.get())) return false;
curr_tree_id = trees[curr_tree_id.get()].parent;
}
return true;
@@ -1020,7 +1020,7 @@ pub const PackageInstaller = struct {
}
if (!is_pending_package_install and !this.canInstallPackageForTree(this.lockfile.buffers.trees.items, this.current_tree_id)) {
this.trees[this.current_tree_id].pending_installs.append(this.manager.allocator, .{
this.trees[this.current_tree_id.get()].pending_installs.append(this.manager.allocator, .{
.dependency_id = dependency_id,
.tree_id = this.current_tree_id,
.path = bun.handleOom(this.node_modules.path.clone()),
@@ -1084,7 +1084,7 @@ pub const PackageInstaller = struct {
}
if (this.bins[package_id].tag != .none) {
bun.handleOom(this.trees[this.current_tree_id].binaries.add(dependency_id));
bun.handleOom(this.trees[this.current_tree_id.get()].binaries.add(dependency_id));
}
const dep = this.lockfile.buffers.dependencies.items[dependency_id];
@@ -1238,7 +1238,7 @@ pub const PackageInstaller = struct {
}
} else {
if (this.bins[package_id].tag != .none) {
bun.handleOom(this.trees[this.current_tree_id].binaries.add(dependency_id));
bun.handleOom(this.trees[this.current_tree_id.get()].binaries.add(dependency_id));
}
var destination_dir: LazyPackageDestinationDir = .{

View File

@@ -97,11 +97,6 @@ global_link_dir_path: string = "",
onWake: WakeHandler = .{},
ci_mode: bun.LazyBool(computeIsContinuousIntegration, @This(), "ci_mode") = .{},
peer_dependencies: std.fifo.LinearFifo(DependencyID, .Dynamic) = .init(default_allocator),
// name hash from alias package name -> aliased package dependency version info
known_npm_aliases: NpmAliasMap = .{},
event_loop: jsc.AnyEventLoop,
// During `installPackages` we learn exactly what dependencies from --trust
@@ -1139,7 +1134,6 @@ const PreallocatedNetworkTasks = bun.HiveArray(NetworkTask, 128).Fallback;
const ResolveTaskQueue = bun.UnboundedQueue(Task, .next);
const RepositoryMap = std.HashMapUnmanaged(Task.Id, bun.FileDescriptor, IdentityContext(Task.Id), 80);
const NpmAliasMap = std.HashMapUnmanaged(PackageNameHash, Dependency.Version, IdentityContext(u64), 80);
const NetworkQueue = std.fifo.LinearFifo(*NetworkTask, .{ .Static = 32 });
const PatchTaskFifo = std.fifo.LinearFifo(*PatchTask, .{ .Static = 32 });

View File

@@ -4,13 +4,11 @@ pub fn enqueueDependencyWithMain(
/// This must be a *const to prevent UB
dependency: *const Dependency,
resolution: PackageID,
install_peer: bool,
) !void {
return this.enqueueDependencyWithMainAndSuccessFn(
id,
dependency,
resolution,
install_peer,
assignResolution,
null,
);
@@ -62,7 +60,6 @@ pub fn enqueueDependencyList(
i,
&dependency,
resolution,
false,
) catch |err| {
const note = .{
.fmt = "error occurred while resolving {}",
@@ -304,7 +301,7 @@ pub fn enqueueDependencyToRoot(
builder.allocate() catch |err| return .{ .failure = err };
const dep = dummy.cloneWithDifferentBuffers(this, name, version_buf, @TypeOf(&builder), &builder) catch unreachable;
const dep = dummy.cloneWithDifferentBuffers(name, version_buf, @TypeOf(&builder), &builder) catch unreachable;
builder.clamp();
const index = this.lockfile.buffers.dependencies.items.len;
this.lockfile.buffers.dependencies.append(this.allocator, dep) catch unreachable;
@@ -318,7 +315,6 @@ pub fn enqueueDependencyToRoot(
dep_id,
&this.lockfile.buffers.dependencies.items[dep_id],
invalid_package_id,
false,
assignRootResolution,
failRootResolution,
) catch |err| {
@@ -348,7 +344,6 @@ pub fn enqueueDependencyToRoot(
.onPackageManifestError = {},
.onPackageDownloadError = {},
},
false,
manager.options.log_level,
) catch |err| {
closure.err = err;
@@ -439,7 +434,6 @@ pub fn enqueueDependencyWithMainAndSuccessFn(
/// This must be a *const to prevent UB
dependency: *const Dependency,
resolution: PackageID,
install_peer: bool,
comptime successFn: SuccessFn,
comptime failFn: ?FailFn,
) !void {
@@ -452,29 +446,6 @@ pub fn enqueueDependencyWithMainAndSuccessFn(
};
const version = version: {
if (dependency.version.tag == .npm) {
if (this.known_npm_aliases.get(name_hash)) |aliased| {
const group = dependency.version.value.npm.version;
const buf = this.lockfile.buffers.string_bytes.items;
var curr_list: ?*const Semver.Query.List = &aliased.value.npm.version.head;
while (curr_list) |queries| {
var curr: ?*const Semver.Query = &queries.head;
while (curr) |query| {
if (group.satisfies(query.range.left.version, buf, buf) or group.satisfies(query.range.right.version, buf, buf)) {
name = aliased.value.npm.name;
name_hash = String.Builder.stringHash(this.lockfile.str(&name));
break :version aliased;
}
curr = query.next;
}
curr_list = queries.next;
}
// fallthrough. a package that matches the name of an alias but does not match
// the version should be enqueued as a normal npm dependency, overrides allowed
}
}
// allow overriding all dependencies unless the dependency is coming directly from an alias, "npm:<this dep>" or
// if it's a workspaceOnly dependency
if (!dependency.behavior.isWorkspace() and (dependency.version.tag != .npm or !dependency.version.value.npm.is_alias)) {
@@ -525,7 +496,6 @@ pub fn enqueueDependencyWithMainAndSuccessFn(
dependency.behavior,
id,
resolution,
install_peer,
successFn,
);
@@ -705,86 +675,79 @@ pub fn enqueueDependencyWithMainAndSuccessFn(
},
);
if (!dependency.behavior.isPeer() or install_peer) {
if (!this.hasCreatedNetworkTask(task_id, dependency.behavior.isRequired())) {
const needs_extended_manifest = this.options.minimum_release_age_ms != null;
if (this.options.enable.manifest_cache) {
var expired = false;
if (this.manifests.byNameHashAllowExpired(
this,
this.scopeForPackageName(name_str),
name_hash,
&expired,
.load_from_memory_fallback_to_disk,
needs_extended_manifest,
)) |manifest| {
loaded_manifest = manifest.*;
if (!this.hasCreatedNetworkTask(task_id, dependency.behavior.isRequired())) {
const needs_extended_manifest = this.options.minimum_release_age_ms != null;
if (this.options.enable.manifest_cache) {
var expired = false;
if (this.manifests.byNameHashAllowExpired(
this,
this.scopeForPackageName(name_str),
name_hash,
&expired,
.load_from_memory_fallback_to_disk,
needs_extended_manifest,
)) |manifest| {
loaded_manifest = manifest.*;
// If it's an exact package version already living in the cache
// We can skip the network request, even if it's beyond the caching period
if (version.tag == .npm and version.value.npm.version.isExact()) {
if (loaded_manifest.?.findByVersion(version.value.npm.version.head.head.range.left.version)) |find_result| {
if (this.options.minimum_release_age_ms) |min_age_ms| {
if (!loaded_manifest.?.shouldExcludeFromAgeFilter(this.options.minimum_release_age_excludes) and Npm.PackageManifest.isPackageVersionTooRecent(find_result.package, min_age_ms)) {
const package_name = this.lockfile.str(&name);
const min_age_seconds = min_age_ms / std.time.ms_per_s;
this.log.addErrorFmt(null, logger.Loc.Empty, this.allocator, "Version \"{s}@{}\" was published within minimum release age of {d} seconds", .{ package_name, find_result.version.fmt(this.lockfile.buffers.string_bytes.items), min_age_seconds }) catch {};
return;
}
}
if (getOrPutResolvedPackageWithFindResult(
this,
name_hash,
name,
dependency,
version,
id,
dependency.behavior,
&loaded_manifest.?,
find_result,
install_peer,
successFn,
) catch null) |new_resolve_result| {
resolve_result_ = new_resolve_result;
_ = this.network_dedupe_map.remove(task_id);
continue :retry_with_new_resolve_result;
// If it's an exact package version already living in the cache
// We can skip the network request, even if it's beyond the caching period
if (version.tag == .npm and version.value.npm.version.isExact()) {
if (loaded_manifest.?.findByVersion(version.value.npm.version.head.head.range.left.version)) |find_result| {
if (this.options.minimum_release_age_ms) |min_age_ms| {
if (!loaded_manifest.?.shouldExcludeFromAgeFilter(this.options.minimum_release_age_excludes) and Npm.PackageManifest.isPackageVersionTooRecent(find_result.package, min_age_ms)) {
const package_name = this.lockfile.str(&name);
const min_age_seconds = min_age_ms / std.time.ms_per_s;
this.log.addErrorFmt(null, logger.Loc.Empty, this.allocator, "Version \"{s}@{}\" was published within minimum release age of {d} seconds", .{ package_name, find_result.version.fmt(this.lockfile.buffers.string_bytes.items), min_age_seconds }) catch {};
return;
}
}
}
// Was it recent enough to just load it without the network call?
if (this.options.enable.manifest_cache_control and !expired) {
_ = this.network_dedupe_map.remove(task_id);
continue :retry_from_manifests_ptr;
if (getOrPutResolvedPackageWithFindResult(
this,
name_hash,
name,
dependency,
version,
id,
&loaded_manifest.?,
find_result,
successFn,
) catch null) |new_resolve_result| {
resolve_result_ = new_resolve_result;
_ = this.network_dedupe_map.remove(task_id);
continue :retry_with_new_resolve_result;
}
}
}
// Was it recent enough to just load it without the network call?
if (this.options.enable.manifest_cache_control and !expired) {
_ = this.network_dedupe_map.remove(task_id);
continue :retry_from_manifests_ptr;
}
}
if (PackageManager.verbose_install) {
Output.prettyErrorln("Enqueue package manifest for download: {s}", .{name_str});
}
var network_task = this.getNetworkTask();
network_task.* = .{
.package_manager = this,
.callback = undefined,
.task_id = task_id,
.allocator = this.allocator,
};
try network_task.forManifest(
name_str,
this.allocator,
this.scopeForPackageName(name_str),
if (loaded_manifest) |*manifest| manifest else null,
dependency.behavior.isOptional(),
needs_extended_manifest,
);
this.enqueueNetworkTask(network_task);
}
} else {
try this.peer_dependencies.writeItem(id);
return;
if (PackageManager.verbose_install) {
Output.prettyErrorln("Enqueue package manifest for download: {s}", .{name_str});
}
var network_task = this.getNetworkTask();
network_task.* = .{
.package_manager = this,
.callback = undefined,
.task_id = task_id,
.allocator = this.allocator,
};
try network_task.forManifest(
name_str,
this.allocator,
this.scopeForPackageName(name_str),
if (loaded_manifest) |*manifest| manifest else null,
dependency.behavior.isOptional(),
needs_extended_manifest,
);
this.enqueueNetworkTask(network_task);
}
var manifest_entry_parse = try this.task_queue.getOrPutContext(this.allocator, task_id, .{});
@@ -810,7 +773,7 @@ pub fn enqueueDependencyWithMainAndSuccessFn(
};
// First: see if we already loaded the git package in-memory
if (this.lockfile.getPackageID(name_hash, null, &res)) |pkg_id| {
if (this.lockfile.getPackageID(name_hash, &res)) |pkg_id| {
successFn(this, id, pkg_id);
return;
}
@@ -854,13 +817,6 @@ pub fn enqueueDependencyWithMainAndSuccessFn(
try entry.value_ptr.append(this.allocator, ctx);
}
if (dependency.behavior.isPeer()) {
if (!install_peer) {
try this.peer_dependencies.writeItem(id);
return;
}
}
if (this.hasCreatedNetworkTask(checkout_id, dependency.behavior.isRequired())) return;
this.task_batch.push(ThreadPool.Batch.from(this.enqueueGitCheckout(
@@ -877,13 +833,6 @@ pub fn enqueueDependencyWithMainAndSuccessFn(
if (!entry.found_existing) entry.value_ptr.* = .{};
try entry.value_ptr.append(this.allocator, ctx);
if (dependency.behavior.isPeer()) {
if (!install_peer) {
try this.peer_dependencies.writeItem(id);
return;
}
}
if (this.hasCreatedNetworkTask(clone_id, dependency.behavior.isRequired())) return;
this.task_batch.push(ThreadPool.Batch.from(enqueueGitClone(this, clone_id, alias, dep, id, dependency, &res, null)));
@@ -899,7 +848,7 @@ pub fn enqueueDependencyWithMainAndSuccessFn(
};
// First: see if we already loaded the github package in-memory
if (this.lockfile.getPackageID(name_hash, null, &res)) |pkg_id| {
if (this.lockfile.getPackageID(name_hash, &res)) |pkg_id| {
successFn(this, id, pkg_id);
return;
}
@@ -927,13 +876,6 @@ pub fn enqueueDependencyWithMainAndSuccessFn(
const callback_tag = comptime if (successFn == assignRootResolution) "root_dependency" else "dependency";
try entry.value_ptr.append(this.allocator, @unionInit(TaskCallbackContext, callback_tag, id));
if (dependency.behavior.isPeer()) {
if (!install_peer) {
try this.peer_dependencies.writeItem(id);
return;
}
}
if (try this.generateNetworkTaskForTarball(
task_id,
url,
@@ -960,7 +902,6 @@ pub fn enqueueDependencyWithMainAndSuccessFn(
dependency.behavior,
id,
resolution,
install_peer,
successFn,
) catch |err| brk: {
if (err == error.MissingPackageJSON) {
@@ -1085,7 +1026,7 @@ pub fn enqueueDependencyWithMainAndSuccessFn(
};
// First: see if we already loaded the tarball package in-memory
if (this.lockfile.getPackageID(name_hash, null, &res)) |pkg_id| {
if (this.lockfile.getPackageID(name_hash, &res)) |pkg_id| {
successFn(this, id, pkg_id);
return;
}
@@ -1115,13 +1056,6 @@ pub fn enqueueDependencyWithMainAndSuccessFn(
const callback_tag = comptime if (successFn == assignRootResolution) "root_dependency" else "dependency";
try entry.value_ptr.append(this.allocator, @unionInit(TaskCallbackContext, callback_tag, id));
if (dependency.behavior.isPeer()) {
if (!install_peer) {
try this.peer_dependencies.writeItem(id);
return;
}
}
switch (version.value.tarball.uri) {
.local => {
if (this.hasCreatedNetworkTask(task_id, dependency.behavior.isRequired())) return;
@@ -1366,23 +1300,23 @@ fn getOrPutResolvedPackageWithFindResult(
dependency: *const Dependency,
version: Dependency.Version,
dependency_id: DependencyID,
behavior: Behavior,
manifest: *const Npm.PackageManifest,
find_result: Npm.PackageManifest.FindResult,
install_peer: bool,
comptime successFn: SuccessFn,
) !?ResolvedPackageResult {
const should_update = this.to_update and
// If updating, only update packages in the current workspace
this.lockfile.isRootDependency(this, dependency_id) and
// no need to do a look up if update requests are empty (`bun update` with no args)
(this.update_requests.len == 0 or
this.updating_packages.contains(dependency.name.slice(this.lockfile.buffers.string_bytes.items)));
_ = version;
// TODO
// const should_update = this.to_update and
// // If updating, only update packages in the current workspace
// this.lockfile.isRootDependency(this, dependency_id) and
// // no need to do a look up if update requests are empty (`bun update` with no args)
// (this.update_requests.len == 0 or
// this.updating_packages.contains(dependency.name.slice(this.lockfile.buffers.string_bytes.items)));
// Was this package already allocated? Let's reuse the existing one.
if (this.lockfile.getPackageID(
name_hash,
if (should_update) null else version,
&.{
.tag = .npm,
.value = .{
@@ -1398,13 +1332,10 @@ fn getOrPutResolvedPackageWithFindResult(
.package = this.lockfile.packages.get(id),
.is_first_time = false,
};
} else if (behavior.isPeer() and !install_peer) {
return null;
}
// appendPackage sets the PackageID on the package
const package = try this.lockfile.appendPackage(try Lockfile.Package.fromNPM(
this,
this.allocator,
this.lockfile,
this.log,
@@ -1497,87 +1428,8 @@ fn getOrPutResolvedPackage(
behavior: Behavior,
dependency_id: DependencyID,
resolution: PackageID,
install_peer: bool,
comptime successFn: SuccessFn,
) !?ResolvedPackageResult {
if (install_peer and behavior.isPeer()) {
if (this.lockfile.package_index.get(name_hash)) |index| {
const resolutions: []Resolution = this.lockfile.packages.items(.resolution);
switch (index) {
.id => |existing_id| {
if (existing_id < resolutions.len) {
const existing_resolution = resolutions[existing_id];
if (resolutionSatisfiesDependency(this, existing_resolution, version)) {
successFn(this, dependency_id, existing_id);
return .{
// we must fetch it from the packages array again, incase the package array mutates the value in the `successFn`
.package = this.lockfile.packages.get(existing_id),
};
}
const res_tag = resolutions[existing_id].tag;
const ver_tag = version.tag;
if ((res_tag == .npm and ver_tag == .npm) or (res_tag == .git and ver_tag == .git) or (res_tag == .github and ver_tag == .github)) {
const existing_package = this.lockfile.packages.get(existing_id);
this.log.addWarningFmt(
null,
logger.Loc.Empty,
this.allocator,
"incorrect peer dependency \"{}@{}\"",
.{
existing_package.name.fmt(this.lockfile.buffers.string_bytes.items),
existing_package.resolution.fmt(this.lockfile.buffers.string_bytes.items, .auto),
},
) catch unreachable;
successFn(this, dependency_id, existing_id);
return .{
// we must fetch it from the packages array again, incase the package array mutates the value in the `successFn`
.package = this.lockfile.packages.get(existing_id),
};
}
}
},
.ids => |list| {
for (list.items) |existing_id| {
if (existing_id < resolutions.len) {
const existing_resolution = resolutions[existing_id];
if (resolutionSatisfiesDependency(this, existing_resolution, version)) {
successFn(this, dependency_id, existing_id);
return .{
.package = this.lockfile.packages.get(existing_id),
};
}
}
}
if (list.items[0] < resolutions.len) {
const res_tag = resolutions[list.items[0]].tag;
const ver_tag = version.tag;
if ((res_tag == .npm and ver_tag == .npm) or (res_tag == .git and ver_tag == .git) or (res_tag == .github and ver_tag == .github)) {
const existing_package_id = list.items[0];
const existing_package = this.lockfile.packages.get(existing_package_id);
this.log.addWarningFmt(
null,
logger.Loc.Empty,
this.allocator,
"incorrect peer dependency \"{}@{}\"",
.{
existing_package.name.fmt(this.lockfile.buffers.string_bytes.items),
existing_package.resolution.fmt(this.lockfile.buffers.string_bytes.items, .auto),
},
) catch unreachable;
successFn(this, dependency_id, list.items[0]);
return .{
// we must fetch it from the packages array again, incase the package array mutates the value in the `successFn`
.package = this.lockfile.packages.get(existing_package_id),
};
}
}
},
}
}
}
if (resolution < this.lockfile.packages.len) {
return .{ .package = this.lockfile.packages.get(resolution) };
}
@@ -1712,10 +1564,8 @@ fn getOrPutResolvedPackage(
dependency,
version,
dependency_id,
behavior,
manifest,
find_result,
install_peer,
successFn,
);
},

View File

@@ -124,7 +124,6 @@ pub fn populateManifestCache(manager: *PackageManager, packages: Packages) !void
.progress_bar = true,
.manifests_only = true,
},
true,
closure.manager.options.log_level,
) catch |err| {
closure.err = err;

View File

@@ -81,7 +81,7 @@ pub fn fromJS(globalThis: *jsc.JSGlobalObject, input: jsc.JSValue) bun.JSError!j
var array = Array{};
const update_requests = parseWithError(allocator, null, &log, all_positionals.items, &array, .add, false) catch {
const update_requests = parseWithError(allocator, &log, all_positionals.items, &array, .add, false) catch {
return globalThis.throwValue(try log.toJS(globalThis, bun.default_allocator, "Failed to parse dependencies"));
};
if (update_requests.len == 0) return .js_undefined;
@@ -103,18 +103,16 @@ pub fn fromJS(globalThis: *jsc.JSGlobalObject, input: jsc.JSValue) bun.JSError!j
pub fn parse(
allocator: std.mem.Allocator,
pm: ?*PackageManager,
log: *logger.Log,
positionals: []const string,
update_requests: *Array,
subcommand: Subcommand,
) []UpdateRequest {
return parseWithError(allocator, pm, log, positionals, update_requests, subcommand, true) catch Global.crash();
return parseWithError(allocator, log, positionals, update_requests, subcommand, true) catch Global.crash();
}
fn parseWithError(
allocator: std.mem.Allocator,
pm: ?*PackageManager,
log: *logger.Log,
positionals: []const string,
update_requests: *Array,
@@ -165,7 +163,6 @@ fn parseWithError(
null,
&SlicedString.init(input, value),
log,
pm,
) orelse {
if (fatal) {
Output.errGeneric("unrecognised dependency format: {s}", .{
@@ -188,7 +185,6 @@ fn parseWithError(
null,
&SlicedString.init(input, input),
log,
pm,
)) |ver| {
alias = null;
version = ver;

View File

@@ -250,8 +250,8 @@ pub fn installWithManager(
break :brk all_name_hashes;
};
manager.lockfile.overrides = try lockfile.overrides.clone(manager, &lockfile, manager.lockfile, builder);
manager.lockfile.catalogs = try lockfile.catalogs.clone(manager, &lockfile, manager.lockfile, builder);
manager.lockfile.overrides = try lockfile.overrides.clone(&lockfile, manager.lockfile, builder);
manager.lockfile.catalogs = try lockfile.catalogs.clone(&lockfile, manager.lockfile, builder);
manager.lockfile.trusted_dependencies = if (lockfile.trusted_dependencies) |trusted_dependencies|
try trusted_dependencies.clone(manager.lockfile.allocator)
@@ -274,7 +274,7 @@ pub fn installWithManager(
manager.lockfile.buffers.resolutions.items = manager.lockfile.buffers.resolutions.items.ptr[0 .. off + len];
for (new_dependencies, 0..) |new_dep, i| {
dependencies[i] = try new_dep.clone(manager, lockfile.buffers.string_bytes.items, *Lockfile.StringBuilder, builder);
dependencies[i] = try new_dep.clone(lockfile.buffers.string_bytes.items, *Lockfile.StringBuilder, builder);
if (mapping[i] != invalid_package_id) {
resolutions[i] = old_resolutions[mapping[i]];
}
@@ -364,7 +364,6 @@ pub fn installWithManager(
@truncate(dependency_i),
dependency,
invalid_package_id,
false,
);
}
}
@@ -380,7 +379,6 @@ pub fn installWithManager(
dep_id,
dep,
invalid_package_id,
false,
);
}
}
@@ -401,7 +399,6 @@ pub fn installWithManager(
dependency_i,
&dependency,
manager.lockfile.buffers.resolutions.items[dependency_i],
false,
);
}
}
@@ -482,7 +479,7 @@ pub fn installWithManager(
manager.drainDependencyList();
}
if (manager.pendingTaskCount() > 0 or manager.peer_dependencies.readableLength() > 0) {
if (manager.pendingTaskCount() > 0) {
if (root.dependencies.len > 0) {
_ = manager.getCacheDirectory();
_ = manager.getTemporaryDirectory();
@@ -496,17 +493,12 @@ pub fn installWithManager(
}
const runAndWaitFn = struct {
pub fn runAndWaitFn(comptime check_peers: bool, comptime only_pre_patch: bool) *const fn (*PackageManager) anyerror!void {
pub fn runAndWaitFn(comptime only_pre_patch: bool) *const fn (*PackageManager) anyerror!void {
return struct {
manager: *PackageManager,
err: ?anyerror = null,
pub fn isDone(closure: *@This()) bool {
var this = closure.manager;
if (comptime check_peers)
this.processPeerDependencyList() catch |err| {
closure.err = err;
return true;
};
this.drainDependencyList();
@@ -520,19 +512,12 @@ pub fn installWithManager(
.onPackageDownloadError = {},
.progress_bar = true,
},
check_peers,
this.options.log_level,
) catch |err| {
closure.err = err;
return true;
};
if (comptime check_peers) {
if (this.peer_dependencies.readableLength() > 0) {
return false;
}
}
if (comptime only_pre_patch) {
const pending_patch = this.pending_pre_calc_hashes.load(.monotonic);
return pending_patch == 0;
@@ -562,25 +547,15 @@ pub fn installWithManager(
}
}.runAndWaitFn;
const waitForCalcingPatchHashes = runAndWaitFn(false, true);
const waitForEverythingExceptPeers = runAndWaitFn(false, false);
const waitForPeers = runAndWaitFn(true, false);
const waitForCalcingPatchHashes = runAndWaitFn(true);
const waitForEverything = runAndWaitFn(false);
if (manager.lockfile.patched_dependencies.entries.len > 0) {
try waitForCalcingPatchHashes(manager);
}
if (manager.pendingTaskCount() > 0) {
try waitForEverythingExceptPeers(manager);
}
if (manager.peer_dependencies.readableLength() > 0) {
try manager.processPeerDependencyList();
manager.drainDependencyList();
}
if (manager.pendingTaskCount() > 0) {
try waitForPeers(manager);
try waitForEverything(manager);
}
if (log_level.showProgress()) {

View File

@@ -235,7 +235,6 @@ pub fn processDependencyListItem(
this: *PackageManager,
item: TaskCallbackContext,
any_root: ?*bool,
install_peer: bool,
) !void {
switch (item) {
.dependency => |dependency_id| {
@@ -246,7 +245,6 @@ pub fn processDependencyListItem(
dependency_id,
&dependency,
resolution,
install_peer,
);
},
.root_dependency => |dependency_id| {
@@ -257,7 +255,6 @@ pub fn processDependencyListItem(
dependency_id,
&dependency,
resolution,
install_peer,
assignRootResolution,
failRootResolution,
);
@@ -272,35 +269,18 @@ pub fn processDependencyListItem(
}
}
pub fn processPeerDependencyList(
this: *PackageManager,
) !void {
while (this.peer_dependencies.readItem()) |peer_dependency_id| {
const dependency = this.lockfile.buffers.dependencies.items[peer_dependency_id];
const resolution = this.lockfile.buffers.resolutions.items[peer_dependency_id];
try this.enqueueDependencyWithMain(
peer_dependency_id,
&dependency,
resolution,
true,
);
}
}
pub fn processDependencyList(
this: *PackageManager,
dep_list: TaskCallbackList,
comptime Ctx: type,
ctx: Ctx,
comptime callbacks: anytype,
install_peer: bool,
) !void {
if (dep_list.items.len > 0) {
var dependency_list = dep_list;
var any_root = false;
for (dependency_list.items) |item| {
try this.processDependencyListItem(item, &any_root, install_peer);
try this.processDependencyListItem(item, &any_root);
}
if (comptime @TypeOf(callbacks) != void and @TypeOf(callbacks.onResolve) != void) {

View File

@@ -4,7 +4,6 @@ pub fn runTasks(
comptime Ctx: type,
extract_ctx: Ctx,
comptime callbacks: anytype,
install_peer: bool,
log_level: Options.LogLevel,
) !void {
var has_updated_this_run = false;
@@ -319,7 +318,6 @@ pub fn runTasks(
Ctx,
extract_ctx,
callbacks,
install_peer,
);
continue;
@@ -560,7 +558,7 @@ pub fn runTasks(
const dependency_list = dependency_list_entry.value_ptr.*;
dependency_list_entry.value_ptr.* = .{};
try manager.processDependencyList(dependency_list, Ctx, extract_ctx, callbacks, install_peer);
try manager.processDependencyList(dependency_list, Ctx, extract_ctx, callbacks);
if (log_level.showProgress()) {
if (!has_updated_this_run) {
@@ -678,7 +676,7 @@ pub fn runTasks(
// will still have the original.
else => {},
}
try manager.processDependencyListItem(dep, &any_root, install_peer);
try manager.processDependencyListItem(dep, &any_root);
},
else => {
// if it's a node_module folder to install, handle that after we process all the dependencies within the onExtract callback.
@@ -693,7 +691,7 @@ pub fn runTasks(
const dependency_list = dependency_list_entry.value_ptr.*;
dependency_list_entry.value_ptr.* = .{};
try manager.processDependencyList(dependency_list, void, {}, {}, install_peer);
try manager.processDependencyList(dependency_list, void, {}, {});
}
manager.setPreinstallState(package_id, manager.lockfile, .done);
@@ -779,7 +777,7 @@ pub fn runTasks(
const dependency_list = dependency_list_entry.value_ptr.*;
dependency_list_entry.value_ptr.* = .{};
try manager.processDependencyList(dependency_list, Ctx, extract_ctx, callbacks, install_peer);
try manager.processDependencyList(dependency_list, Ctx, extract_ctx, callbacks);
}
if (log_level.showProgress()) {
@@ -865,7 +863,7 @@ pub fn runTasks(
var repo = &manager.lockfile.buffers.dependencies.items[id].version.value.git;
repo.resolved = pkg.resolution.value.git.resolved;
repo.package_name = pkg.name;
try manager.processDependencyListItem(dep, &any_root, install_peer);
try manager.processDependencyListItem(dep, &any_root);
},
else => {
// if it's a node_module folder to install, handle that after we process all the dependencies within the onExtract callback.
@@ -935,7 +933,6 @@ fn doFlushDependencyQueue(this: *PackageManager) void {
i,
&dependency,
lockfile.buffers.resolutions.items[i],
false,
) catch {};
}
}

View File

@@ -46,7 +46,7 @@ fn updatePackageJSONAndInstallWithManagerWithUpdatesAndUpdateRequests(
var updates: []UpdateRequest = if (manager.subcommand == .@"patch-commit" or manager.subcommand == .patch)
&[_]UpdateRequest{}
else
UpdateRequest.parse(ctx.allocator, manager, ctx.log, positionals, update_requests, manager.subcommand);
UpdateRequest.parse(ctx.allocator, ctx.log, positionals, update_requests, manager.subcommand);
try updatePackageJSONAndInstallWithManagerWithUpdates(
manager,
ctx,

View File

@@ -61,11 +61,11 @@ pub fn count(this: *const Dependency, buf: []const u8, comptime StringBuilder: t
this.countWithDifferentBuffers(buf, buf, StringBuilder, builder);
}
pub fn clone(this: *const Dependency, package_manager: *PackageManager, buf: []const u8, comptime StringBuilder: type, builder: StringBuilder) !Dependency {
return this.cloneWithDifferentBuffers(package_manager, buf, buf, StringBuilder, builder);
pub fn clone(this: *const Dependency, buf: []const u8, comptime StringBuilder: type, builder: StringBuilder) !Dependency {
return this.cloneWithDifferentBuffers(buf, buf, StringBuilder, builder);
}
pub fn cloneWithDifferentBuffers(this: *const Dependency, package_manager: *PackageManager, name_buf: []const u8, version_buf: []const u8, comptime StringBuilder: type, builder: StringBuilder) !Dependency {
pub fn cloneWithDifferentBuffers(this: *const Dependency, name_buf: []const u8, version_buf: []const u8, comptime StringBuilder: type, builder: StringBuilder) !Dependency {
const out_slice = builder.lockfile.buffers.string_bytes.items;
const new_literal = builder.append(String, this.version.literal.slice(version_buf));
const sliced = new_literal.sliced(out_slice);
@@ -82,7 +82,6 @@ pub fn cloneWithDifferentBuffers(this: *const Dependency, package_manager: *Pack
this.version.tag,
&sliced,
null,
package_manager,
) orelse Dependency.Version{},
.behavior = this.behavior,
};
@@ -99,7 +98,6 @@ pub const Context = struct {
allocator: std.mem.Allocator,
log: *logger.Log,
buffer: []const u8,
package_manager: ?*PackageManager,
};
/// Get the name of the package as it should appear in a remote registry.
@@ -396,7 +394,6 @@ pub const Version = struct {
tag,
sliced,
ctx.log,
ctx.package_manager,
) orelse Dependency.Version.zeroed;
}
@@ -872,10 +869,9 @@ pub inline fn parse(
dependency: string,
sliced: *const SlicedString,
log: ?*logger.Log,
manager: ?*PackageManager,
) ?Version {
const dep = std.mem.trimLeft(u8, dependency, " \t\n\r");
return parseWithTag(allocator, alias, alias_hash, dep, Version.Tag.infer(dep), sliced, log, manager);
return parseWithTag(allocator, alias, alias_hash, dep, Version.Tag.infer(dep), sliced, log);
}
pub fn parseWithOptionalTag(
@@ -886,7 +882,6 @@ pub fn parseWithOptionalTag(
tag: ?Dependency.Version.Tag,
sliced: *const SlicedString,
log: ?*logger.Log,
package_manager: ?*PackageManager,
) ?Version {
const dep = std.mem.trimLeft(u8, dependency, " \t\n\r");
return parseWithTag(
@@ -897,7 +892,6 @@ pub fn parseWithOptionalTag(
tag orelse Version.Tag.infer(dep),
sliced,
log,
package_manager,
);
}
@@ -909,7 +903,6 @@ pub fn parseWithTag(
tag: Dependency.Version.Tag,
sliced: *const SlicedString,
log_: ?*logger.Log,
package_manager: ?*PackageManager,
) ?Version {
switch (tag) {
.npm => {
@@ -968,16 +961,6 @@ pub fn parseWithTag(
.tag = .npm,
};
if (is_alias) {
if (package_manager) |pm| {
pm.known_npm_aliases.put(
allocator,
alias_hash.?,
result,
) catch unreachable;
}
}
return result;
},
.dist_tag => {
@@ -1294,7 +1277,7 @@ pub fn fromJS(globalThis: *jsc.JSGlobalObject, callframe: *jsc.CallFrame) bun.JS
var log = logger.Log.init(allocator);
const sliced = SlicedString.init(buf, name);
const dep: Version = Dependency.parse(allocator, SlicedString.init(buf, alias).value(), null, buf, &sliced, &log, null) orelse {
const dep: Version = Dependency.parse(allocator, SlicedString.init(buf, alias).value(), null, buf, &sliced, &log) orelse {
if (log.msgs.items.len > 0) {
return globalThis.throwValue(try log.toJS(globalThis, bun.default_allocator, "Failed to parse dependency"));
}

View File

@@ -99,14 +99,14 @@ pub fn installHoistedPackages(
// ids as dependents for the current tree parent
var deps = try Bitset.initEmpty(this.allocator, trees.len);
defer deps.deinit(this.allocator);
for (trees) |_curr| {
var curr = _curr;
tree_ids_to_trees_the_id_depends_on.set(curr.id, curr.id);
for (trees) |tree| {
tree_ids_to_trees_the_id_depends_on.set(tree.id.get(), tree.id.get());
while (curr.parent != Lockfile.Tree.invalid_id) {
deps.set(curr.id);
tree_ids_to_trees_the_id_depends_on.setUnion(curr.parent, deps);
curr = trees[curr.parent];
var curr = tree;
while (curr.parent != .invalid) {
deps.set(curr.id.get());
tree_ids_to_trees_the_id_depends_on.setUnion(curr.parent.get(), deps);
curr = trees[curr.parent.get()];
}
deps.setAll(false);
@@ -161,7 +161,7 @@ pub fn installHoistedPackages(
strings.withoutTrailingSlash(FileSystem.instance.top_level_dir),
),
),
.tree_id = 0,
.tree_id = .root,
},
.progress = progress,
.skip_verify_installed_version_number = skip_verify_installed_version_number,
@@ -205,7 +205,7 @@ pub fn installHoistedPackages(
installer.current_tree_id = node_modules.tree_id;
if (comptime Environment.allow_assert) {
bun.assert(node_modules.dependencies.len == this.lockfile.buffers.trees.items[installer.current_tree_id].dependencies.len);
bun.assert(node_modules.dependencies.len == this.lockfile.buffers.trees.items[installer.current_tree_id.get()].dependencies.len);
}
// cache line is 64 bytes on ARM64 and x64
@@ -232,7 +232,6 @@ pub fn installHoistedPackages(
.onPackageManifestError = {},
.onPackageDownloadError = {},
},
true,
log_level,
);
if (!installer.options.do.install_packages) return error.InstallFailed;
@@ -254,7 +253,6 @@ pub fn installHoistedPackages(
.onPackageManifestError = {},
.onPackageDownloadError = {},
},
true,
log_level,
);
if (!installer.options.do.install_packages) return error.InstallFailed;
@@ -280,7 +278,6 @@ pub fn installHoistedPackages(
.onPackageManifestError = {},
.onPackageDownloadError = {},
},
true,
pm.options.log_level,
) catch |err| {
closure.err = err;

View File

@@ -212,8 +212,8 @@ pub const ExtractData = struct {
};
pub const DependencyInstallContext = struct {
tree_id: Lockfile.Tree.Id = 0,
path: std.ArrayList(u8) = std.ArrayList(u8).init(bun.default_allocator),
tree_id: Lockfile.Tree.Id,
path: std.ArrayList(u8),
dependency_id: DependencyID,
};

View File

@@ -413,7 +413,7 @@ pub fn installIsolatedPackages(
defer entry_queue.deinit();
try entry_queue.writeItem(.{
.node_id = .from(0),
.node_id = .root,
.entry_parent_id = .invalid,
});
@@ -1134,7 +1134,6 @@ pub fn installIsolatedPackages(
.onPackageManifestError = {},
.onPackageDownloadError = {},
},
true,
pkg_manager.options.log_level,
) catch |err| {
wait.err = err;

View File

@@ -281,7 +281,7 @@ pub fn loadFromDir(
};
};
TextLockfile.parseIntoBinaryLockfile(this, allocator, json, source, log, manager) catch |err| {
TextLockfile.parseIntoBinaryLockfile(this, allocator, json, source, log) catch |err| {
switch (err) {
error.OutOfMemory => bun.outOfMemory(),
else => {
@@ -336,7 +336,7 @@ pub fn loadFromDir(
Output.panic("failed to print valid json from binary lockfile: {s}", .{@errorName(err)});
};
TextLockfile.parseIntoBinaryLockfile(this, allocator, json, source, log, manager) catch |err| {
TextLockfile.parseIntoBinaryLockfile(this, allocator, json, source, log) catch |err| {
Output.panic("failed to parse text lockfile converted from binary lockfile: {s}", .{@errorName(err)});
};
@@ -521,7 +521,6 @@ fn preprocessUpdateRequests(old: *Lockfile, manager: *PackageManager, updates: [
sliced.slice,
&sliced,
null,
manager,
) orelse Dependency.Version{};
}
}
@@ -594,7 +593,7 @@ pub fn getWorkspacePkgIfWorkspaceDep(this: *const Lockfile, id: DependencyID) Pa
/// Does this tree id belong to a workspace (including workspace root)?
/// TODO(dylan-conway) fix!
pub fn isWorkspaceTreeId(this: *const Lockfile, id: Tree.Id) bool {
return id == 0 or this.buffers.dependencies.items[this.buffers.trees.items[id].dependency_id].behavior.isWorkspace();
return id == .root or this.buffers.dependencies.items[this.buffers.trees.items[id.get()].dependency_id].behavior.isWorkspace();
}
/// Returns the package id of the workspace the install is taking place in.
@@ -663,8 +662,8 @@ pub fn cleanWithLogger(
old.overrides.count(old, &builder);
old.catalogs.count(old, &builder);
try builder.allocate();
new.overrides = try old.overrides.clone(manager, old, new, &builder);
new.catalogs = try old.catalogs.clone(manager, old, new, &builder);
new.overrides = try old.overrides.clone(old, new, &builder);
new.catalogs = try old.catalogs.clone(old, new, &builder);
}
// Step 1. Recreate the lockfile with only the packages that are still alive
@@ -687,7 +686,7 @@ pub fn cleanWithLogger(
};
// try clone_queue.ensureUnusedCapacity(root.dependencies.len);
_ = try root.clone(manager, old, new, package_id_mapping, &cloner);
_ = try root.clone(old, new, package_id_mapping, &cloner);
// Clone workspace_paths and workspace_versions at the end.
if (old.workspace_paths.count() > 0 or old.workspace_versions.count() > 0) {
@@ -859,7 +858,6 @@ pub const Cloner = struct {
const old_package = this.old.packages.get(to_clone.old_resolution);
this.lockfile.buffers.resolutions.items[to_clone.resolve_id] = try old_package.clone(
this.manager,
this.old,
this.lockfile,
this.mapping,
@@ -930,14 +928,14 @@ pub fn hoist(
try (Tree{}).processSubtree(
Tree.root_dep_id,
Tree.invalid_id,
.invalid,
method,
&builder,
);
// This goes breadth-first
while (builder.queue.readItem()) |item| {
try builder.list.items(.tree)[item.tree_id].processSubtree(
try builder.list.items(.tree)[item.tree_id.get()].processSubtree(
item.dependency_id,
item.hoist_root_id,
method,
@@ -1351,15 +1349,10 @@ pub fn getPackageID(
name_hash: u64,
// If non-null, attempt to use an existing package
// that satisfies this version range.
version: ?Dependency.Version,
resolution: *const Resolution,
) ?PackageID {
const entry = this.package_index.get(name_hash) orelse return null;
const resolutions: []const Resolution = this.packages.items(.resolution);
const npm_version = if (version) |v| switch (v.tag) {
.npm => v.value.npm.version,
else => null,
} else null;
const buf = this.buffers.string_bytes.items;
switch (entry) {
@@ -1369,10 +1362,6 @@ pub fn getPackageID(
if (resolutions[id].eql(resolution, buf, buf)) {
return id;
}
if (resolutions[id].tag == .npm and npm_version != null) {
if (npm_version.?.satisfies(resolutions[id].value.npm.version, buf, buf)) return id;
}
},
.ids => |ids| {
for (ids.items) |id| {
@@ -1381,10 +1370,6 @@ pub fn getPackageID(
if (resolutions[id].eql(resolution, buf, buf)) {
return id;
}
if (resolutions[id].tag == .npm and npm_version != null) {
if (npm_version.?.satisfies(resolutions[id].value.npm.version, buf, buf)) return id;
}
}
},
}
@@ -1512,7 +1497,7 @@ pub fn appendPackage(this: *Lockfile, package_: Lockfile.Package) OOM!Lockfile.P
pub fn appendPackageWithID(this: *Lockfile, package_: Lockfile.Package, id: PackageID) OOM!Lockfile.Package {
defer {
if (comptime Environment.allow_assert) {
assert(this.getPackageID(package_.name_hash, null, &package_.resolution) != null);
assert(this.getPackageID(package_.name_hash, &package_.resolution) != null);
}
}
var package = package_;

View File

@@ -1,6 +1,7 @@
const Buffers = @This();
trees: Tree.List = .{},
/// List of all dependency ids installed by `trees`. To be sliced with `tree.dependencies`.
hoisted_dependencies: DependencyIDList = .{},
/// This is the underlying buffer used for the `resolutions` external slices inside of `Package`
/// Should be the same length as `dependencies`
@@ -273,7 +274,7 @@ pub fn save(
pub fn legacyPackageToDependencyID(this: Buffers, dependency_visited: ?*Bitset, package_id: PackageID) !DependencyID {
switch (package_id) {
0 => return Tree.root_dep_id,
invalid_package_id => return invalid_package_id,
invalid_package_id => return invalid_dependency_id,
else => for (this.resolutions.items, 0..) |pkg_id, dep_id| {
if (pkg_id == package_id) {
if (dependency_visited) |visited| {
@@ -338,7 +339,6 @@ pub fn load(stream: *Stream, allocator: Allocator, log: *logger.Log, pm_: ?*Pack
.log = log,
.allocator = allocator,
.buffer = string_buf,
.package_manager = pm_,
};
this.dependencies.expandToCapacity();
@@ -393,6 +393,7 @@ const Dependency = bun.install.Dependency;
const DependencyID = install.DependencyID;
const PackageID = install.PackageID;
const PackageManager = bun.install.PackageManager;
const invalid_dependency_id = install.invalid_dependency_id;
const invalid_package_id = install.invalid_package_id;
const Lockfile = bun.install.Lockfile;

View File

@@ -109,7 +109,6 @@ pub fn parseCount(_: *CatalogMap, lockfile: *Lockfile, expr: Expr, builder: *Loc
pub fn parseAppend(
this: *CatalogMap,
pm: *PackageManager,
lockfile: *Lockfile,
log: *logger.Log,
source: *const logger.Source,
@@ -141,7 +140,6 @@ pub fn parseAppend(
version_sliced.slice,
&version_sliced,
log,
pm,
) orelse {
try log.addError(source, item.value.?.loc, "Invalid dependency version");
continue;
@@ -202,7 +200,6 @@ pub fn parseAppend(
version_sliced.slice,
&version_sliced,
log,
pm,
) orelse {
try log.addError(source, item.value.?.loc, "Invalid dependency version");
continue;
@@ -305,7 +302,6 @@ fn putEntriesFromPnpmLockfile(
version_sliced.slice,
&version_sliced,
log,
null,
) orelse {
return error.InvalidPnpmLockfile;
},
@@ -407,7 +403,7 @@ pub fn count(this: *CatalogMap, lockfile: *Lockfile, builder: *Lockfile.StringBu
}
}
pub fn clone(this: *CatalogMap, pm: *PackageManager, old: *Lockfile, new: *Lockfile, builder: *Lockfile.StringBuilder) OOM!CatalogMap {
pub fn clone(this: *CatalogMap, old: *Lockfile, new: *Lockfile, builder: *Lockfile.StringBuilder) OOM!CatalogMap {
var new_catalog: CatalogMap = .{};
try new_catalog.default.ensureTotalCapacity(new.allocator, this.default.count());
@@ -418,7 +414,7 @@ pub fn clone(this: *CatalogMap, pm: *PackageManager, old: *Lockfile, new: *Lockf
const dep = entry.value_ptr;
new_catalog.default.putAssumeCapacityContext(
builder.append(String, dep_name.slice(old.buffers.string_bytes.items)),
try dep.clone(pm, old.buffers.string_bytes.items, @TypeOf(builder), builder),
try dep.clone(old.buffers.string_bytes.items, @TypeOf(builder), builder),
String.arrayHashContext(new, null),
);
}
@@ -439,7 +435,7 @@ pub fn clone(this: *CatalogMap, pm: *PackageManager, old: *Lockfile, new: *Lockf
const dep = entry.value_ptr;
new_group.putAssumeCapacityContext(
builder.append(String, dep_name.slice(old.buffers.string_bytes.items)),
try dep.clone(pm, old.buffers.string_bytes.items, @TypeOf(builder), builder),
try dep.clone(old.buffers.string_bytes.items, @TypeOf(builder), builder),
String.arrayHashContext(new, null),
);
}
@@ -468,4 +464,3 @@ const String = bun.Semver.String;
const Dependency = bun.install.Dependency;
const Lockfile = bun.install.Lockfile;
const PackageManager = bun.install.PackageManager;

View File

@@ -54,14 +54,14 @@ pub fn count(this: *OverrideMap, lockfile: *Lockfile, builder: *Lockfile.StringB
}
}
pub fn clone(this: *OverrideMap, pm: *PackageManager, old_lockfile: *Lockfile, new_lockfile: *Lockfile, new_builder: *Lockfile.StringBuilder) !OverrideMap {
pub fn clone(this: *OverrideMap, old_lockfile: *Lockfile, new_lockfile: *Lockfile, new_builder: *Lockfile.StringBuilder) !OverrideMap {
var new = OverrideMap{};
try new.map.ensureTotalCapacity(new_lockfile.allocator, this.map.entries.len);
for (this.map.keys(), this.map.values()) |k, v| {
new.map.putAssumeCapacity(
k,
try v.clone(pm, old_lockfile.buffers.string_bytes.items, @TypeOf(new_builder), new_builder),
try v.clone(old_lockfile.buffers.string_bytes.items, @TypeOf(new_builder), new_builder),
);
}
@@ -111,7 +111,6 @@ pub fn parseCount(
/// It is assumed the input map is uninitialized (zero entries)
pub fn parseAppend(
this: *OverrideMap,
pm: *PackageManager,
lockfile: *Lockfile,
root_package: *Lockfile.Package,
log: *logger.Log,
@@ -123,9 +122,9 @@ pub fn parseAppend(
assert(this.map.entries.len == 0); // only call parse once
}
if (expr.asProperty("overrides")) |overrides| {
try this.parseFromOverrides(pm, lockfile, root_package, json_source, log, overrides.expr, builder);
try this.parseFromOverrides(lockfile, root_package, json_source, log, overrides.expr, builder);
} else if (expr.asProperty("resolutions")) |resolutions| {
try this.parseFromResolutions(pm, lockfile, root_package, json_source, log, resolutions.expr, builder);
try this.parseFromResolutions(lockfile, root_package, json_source, log, resolutions.expr, builder);
}
debug("parsed {d} overrides", .{this.map.entries.len});
}
@@ -133,7 +132,6 @@ pub fn parseAppend(
/// https://docs.npmjs.com/cli/v9/configuring-npm/package-json#overrides
pub fn parseFromOverrides(
this: *OverrideMap,
pm: *PackageManager,
lockfile: *Lockfile,
root_package: *Lockfile.Package,
source: *const logger.Source,
@@ -193,7 +191,6 @@ pub fn parseFromOverrides(
if (try parseOverrideValue(
"override",
lockfile,
pm,
root_package,
source,
value.loc,
@@ -211,7 +208,6 @@ pub fn parseFromOverrides(
/// yarn berry: https://yarnpkg.com/configuration/manifest#resolutions
pub fn parseFromResolutions(
this: *OverrideMap,
pm: *PackageManager,
lockfile: *Lockfile,
root_package: *Lockfile.Package,
source: *const logger.Source,
@@ -265,7 +261,6 @@ pub fn parseFromResolutions(
if (try parseOverrideValue(
"resolution",
lockfile,
pm,
root_package,
source,
value.loc,
@@ -283,7 +278,6 @@ pub fn parseFromResolutions(
pub fn parseOverrideValue(
comptime field: []const u8,
lockfile: *Lockfile,
package_manager: *PackageManager,
root_package: *Lockfile.Package,
source: *const logger.Source,
loc: logger.Loc,
@@ -331,7 +325,6 @@ pub fn parseOverrideValue(
literalSliced.slice,
&literalSliced,
log,
package_manager,
) orelse {
try log.addWarningFmt(source, loc, lockfile.allocator, "Invalid " ++ field ++ " value \"{s}\"", .{value});
return null;
@@ -356,5 +349,4 @@ const String = bun.Semver.String;
const Dependency = bun.install.Dependency;
const Lockfile = bun.install.Lockfile;
const PackageManager = bun.install.PackageManager;
const PackageNameHash = bun.install.PackageNameHash;

View File

@@ -79,7 +79,6 @@ pub fn Package(comptime SemverIntType: type) type {
pub fn clone(
this: *const @This(),
pm: *PackageManager,
old: *Lockfile,
new: *Lockfile,
package_id_mapping: []PackageID,
@@ -176,7 +175,6 @@ pub fn Package(comptime SemverIntType: type) type {
for (old_dependencies, dependencies) |old_dep, *new_dep| {
new_dep.* = try old_dep.clone(
pm,
old_string_buf,
*Lockfile.StringBuilder,
builder,
@@ -210,7 +208,6 @@ pub fn Package(comptime SemverIntType: type) type {
pub fn fromPackageJSON(
lockfile: *Lockfile,
pm: *PackageManager,
package_json: *PackageJSON,
comptime features: Features,
) !@This() {
@@ -270,7 +267,7 @@ pub fn Package(comptime SemverIntType: type) type {
for (package_dependencies) |dep| {
if (!dep.behavior.isEnabled(features)) continue;
dependencies[0] = try dep.clone(pm, source_buf, @TypeOf(&string_builder), &string_builder);
dependencies[0] = try dep.clone(source_buf, @TypeOf(&string_builder), &string_builder);
dependencies = dependencies[1..];
if (dependencies.len == 0) break;
}
@@ -300,7 +297,6 @@ pub fn Package(comptime SemverIntType: type) type {
}
pub fn fromNPM(
pm: *PackageManager,
allocator: Allocator,
lockfile: *Lockfile,
log: *logger.Log,
@@ -463,7 +459,6 @@ pub fn Package(comptime SemverIntType: type) type {
sliced.slice,
&sliced,
log,
pm,
) orelse Dependency.Version{},
};
@@ -1031,7 +1026,6 @@ pub fn Package(comptime SemverIntType: type) type {
tag,
&sliced,
log,
pm,
) orelse Dependency.Version{};
var workspace_range: ?Semver.Query.Group = null;
const name_hash = switch (dependency_version.tag) {
@@ -1100,7 +1094,6 @@ pub fn Package(comptime SemverIntType: type) type {
.workspace,
&path,
log,
pm,
)) |dep| {
dependency_version.tag = dep.tag;
dependency_version.value = dep.value;
@@ -1941,12 +1934,12 @@ pub fn Package(comptime SemverIntType: type) type {
// This function depends on package.dependencies being set, so it is done at the very end.
if (comptime features.is_main) {
try lockfile.overrides.parseAppend(pm, lockfile, package, log, source, json, &string_builder);
try lockfile.overrides.parseAppend(lockfile, package, log, source, json, &string_builder);
var found_any_catalog_or_catalog_object = false;
var has_workspaces = false;
if (json.get("workspaces")) |workspaces_expr| {
found_any_catalog_or_catalog_object = try lockfile.catalogs.parseAppend(pm, lockfile, log, source, workspaces_expr, &string_builder);
found_any_catalog_or_catalog_object = try lockfile.catalogs.parseAppend(lockfile, log, source, workspaces_expr, &string_builder);
has_workspaces = true;
}
@@ -1955,7 +1948,7 @@ pub fn Package(comptime SemverIntType: type) type {
// allow "catalog" and "catalogs" in top-level "package.json"
// so it's easier to guess.
if (!found_any_catalog_or_catalog_object and has_workspaces) {
_ = try lockfile.catalogs.parseAppend(pm, lockfile, log, source, json, &string_builder);
_ = try lockfile.catalogs.parseAppend(lockfile, log, source, json, &string_builder);
}
}

View File

@@ -1,6 +1,6 @@
const Tree = @This();
id: Id = invalid_id,
id: Id = .invalid,
// Should not be used for anything other than name
// through `folderName()`. There is not guarantee a dependency
@@ -8,14 +8,33 @@ id: Id = invalid_id,
// same version literal for packages hoisted.
dependency_id: DependencyID = invalid_dependency_id,
parent: Id = invalid_id,
parent: Id = .invalid,
dependencies: Lockfile.DependencyIDSlice = .{},
pub const external_size = @sizeOf(Id) + @sizeOf(PackageID) + @sizeOf(Id) + @sizeOf(Lockfile.DependencyIDSlice);
pub const External = [external_size]u8;
pub const Slice = ExternalSlice(Tree);
pub const List = std.ArrayListUnmanaged(Tree);
pub const Id = u32;
pub const Id = enum(u32) {
root = 0,
invalid = max,
_,
const max = std.math.maxInt(u32);
pub fn get(id: Id) u32 {
bun.debugAssert(id != .invalid);
return @intFromEnum(id);
}
pub fn from(id: u32) Id {
return @enumFromInt(id);
}
pub fn inc(id: *Id) void {
id.* = @enumFromInt(@intFromEnum(id.*) + 1);
}
};
pub fn folderName(this: *const Tree, deps: []const Dependency, buf: string) string {
const dep_id = this.dependency_id;
@@ -36,9 +55,9 @@ pub fn toExternal(this: Tree) External {
pub fn toTree(out: External) Tree {
return .{
.id = @bitCast(out[0..4].*),
.id = .from(@bitCast(out[0..4].*)),
.dependency_id = @bitCast(out[4..8].*),
.parent = @bitCast(out[8..12].*),
.parent = .from(@bitCast(out[8..12].*)),
.dependencies = .{
.off = @bitCast(out[12..16].*),
.len = @bitCast(out[16..20].*),
@@ -46,8 +65,7 @@ pub fn toTree(out: External) Tree {
};
}
pub const root_dep_id: DependencyID = invalid_package_id - 1;
pub const invalid_id: Id = std.math.maxInt(Id);
pub const root_dep_id: DependencyID = invalid_dependency_id - 1;
pub const HoistDependencyResult = union(enum) {
dependency_loop,
@@ -95,7 +113,7 @@ pub fn Iterator(comptime path_style: IteratorPathStyle) type {
pub fn init(lockfile: *const Lockfile) @This() {
var iter: @This() = .{
.tree_id = 0,
.tree_id = .root,
.lockfile = lockfile,
};
if (comptime path_style == .node_modules) {
@@ -105,7 +123,7 @@ pub fn Iterator(comptime path_style: IteratorPathStyle) type {
}
pub fn reset(this: *@This()) void {
this.tree_id = 0;
this.tree_id = .root;
}
pub const Next = struct {
@@ -126,20 +144,20 @@ pub fn Iterator(comptime path_style: IteratorPathStyle) type {
pub fn next(this: *@This(), completed_trees: if (path_style == .node_modules) ?*Bitset else void) ?Next {
const trees = this.lockfile.buffers.trees.items;
if (this.tree_id >= trees.len) return null;
if (this.tree_id.get() >= trees.len) return null;
while (trees[this.tree_id].dependencies.len == 0) {
while (trees[this.tree_id.get()].dependencies.len == 0) {
if (comptime path_style == .node_modules) {
if (completed_trees) |_completed_trees| {
_completed_trees.set(this.tree_id);
_completed_trees.set(this.tree_id.get());
}
}
this.tree_id += 1;
if (this.tree_id >= trees.len) return null;
this.tree_id.inc();
if (this.tree_id.get() >= trees.len) return null;
}
const current_tree_id = this.tree_id;
const tree = trees[current_tree_id];
const tree = trees[current_tree_id.get()];
const tree_dependencies = tree.dependencies.get(this.lockfile.buffers.hoisted_dependencies.items);
const relative_path, const depth = relativePathAndDepth(
@@ -150,7 +168,7 @@ pub fn Iterator(comptime path_style: IteratorPathStyle) type {
path_style,
);
this.tree_id += 1;
this.tree_id.inc();
return .{
.relative_path = relative_path,
@@ -173,7 +191,7 @@ pub fn relativePathAndDepth(
const trees = lockfile.buffers.trees.items;
var depth: usize = 0;
const tree = trees[tree_id];
const tree = trees[tree_id.get()];
var parent_id = tree.id;
var path_written: usize = switch (comptime path_style) {
@@ -181,16 +199,16 @@ pub fn relativePathAndDepth(
.pkg_path => 0,
};
depth_buf[0] = 0;
depth_buf[0] = .root;
if (tree.id > 0) {
if (tree.id != .root) {
const dependencies = lockfile.buffers.dependencies.items;
const buf = lockfile.buffers.string_bytes.items;
var depth_buf_len: usize = 1;
while (parent_id > 0 and parent_id < trees.len) {
while (parent_id != .root and parent_id.get() < trees.len) {
depth_buf[depth_buf_len] = parent_id;
parent_id = trees[parent_id].parent;
parent_id = trees[parent_id.get()].parent;
depth_buf_len += 1;
}
@@ -209,7 +227,7 @@ pub fn relativePathAndDepth(
}
const id = depth_buf[depth_buf_len];
const name = trees[id].folderName(dependencies, buf);
const name = trees[id.get()].folderName(dependencies, buf);
@memcpy(path_buf[path_written..][0..name.len], name);
path_written += name.len;
@@ -457,7 +475,7 @@ pub fn processSubtree(
try builder.list.append(builder.allocator, .{
.tree = .{
.parent = this.id,
.id = @as(Id, @truncate(builder.list.len)),
.id = .from(@truncate(builder.list.len)),
.dependency_id = dependency_id,
},
.dependencies = .{},
@@ -599,7 +617,7 @@ pub fn processSubtree(
builder.resolutions[unresolved_dep_id] = pkg_id;
}
}
for (dependency_lists[replace.id].items) |*placed_dep_id| {
for (dependency_lists[replace.id.get()].items) |*placed_dep_id| {
if (placed_dep_id.* == replace.dep_id) {
placed_dep_id.* = dep_id;
}
@@ -623,8 +641,8 @@ pub fn processSubtree(
try entry.value_ptr.append(dep_id);
},
.placement => |dest| {
bun.handleOom(dependency_lists[dest.id].append(builder.allocator, dep_id));
trees[dest.id].dependencies.len += 1;
bun.handleOom(dependency_lists[dest.id.get()].append(builder.allocator, dep_id));
trees[dest.id.get()].dependencies.len += 1;
if (pkg_id != invalid_package_id and builder.resolution_lists[pkg_id].len > 0) {
try builder.queue.writeItem(.{
.tree_id = dest.id,
@@ -639,7 +657,7 @@ pub fn processSubtree(
}
if (next.dependencies.len == 0) {
if (comptime Environment.allow_assert) assert(builder.list.len == next.id + 1);
if (comptime Environment.allow_assert) assert(builder.list.len == next.id.get() + 1);
_ = builder.list.pop();
}
}
@@ -659,7 +677,7 @@ fn hoistDependency(
comptime method: BuilderMethod,
builder: *Builder(method),
) !HoistDependencyResult {
const this_dependencies = this.dependencies.get(dependency_lists[this.id].items);
const this_dependencies = this.dependencies.get(dependency_lists[this.id.get()].items);
for (0..this_dependencies.len) |i| {
const dep_id = this_dependencies[i];
const dep = builder.dependencies[dep_id];
@@ -705,18 +723,13 @@ fn hoistDependency(
// or hoist if peer version allows it
if (dependency.behavior.isPeer()) {
if (dependency.version.tag == .npm) {
const resolution: Resolution = builder.lockfile.packages.items(.resolution)[res_id];
const version = dependency.version.value.npm.version;
if (resolution.tag == .npm and version.satisfies(resolution.value.npm.version, builder.buf(), builder.buf())) {
return .hoisted; // 1
}
}
return .hoisted;
}
// Root dependencies are manually chosen by the user. Allow them
// to hoist other peers even if they don't satisfy the version
if (builder.lockfile.isWorkspaceRootDependency(dep_id)) {
// TODO: warning about peer dependency version mismatch
if (dependency.version.tag == .npm) {
const resolution: Resolution = builder.lockfile.packages.items(.resolution)[builder.resolutions[dep_id]];
const version = dependency.version.value.npm.version;
if (resolution.tag == .npm and version.satisfies(resolution.value.npm.version, builder.buf(), builder.buf())) {
return .hoisted; // 1
}
}
@@ -737,8 +750,8 @@ fn hoistDependency(
}
// this dependency was not found in this tree, try hoisting or placing in the next parent
if (this.parent != invalid_id and this.id != hoist_root_id) {
const id = trees[this.parent].hoistDependency(
if (this.parent != .invalid and this.id != hoist_root_id) {
const id = trees[this.parent.get()].hoistDependency(
false,
hoist_root_id,
package_id,

View File

@@ -1118,7 +1118,6 @@ pub fn parseIntoBinaryLockfile(
root: JSON.Expr,
source: *const logger.Source,
log: *logger.Log,
manager: ?*PackageManager,
) ParseError!void {
lockfile.initEmpty(allocator);
@@ -1241,7 +1240,6 @@ pub fn parseIntoBinaryLockfile(
version_sliced.slice,
&version_sliced,
log,
manager,
) orelse {
try log.addError(source, value.loc, "Invalid override version");
return error.InvalidOverridesObject;
@@ -1291,7 +1289,6 @@ pub fn parseIntoBinaryLockfile(
version_sliced.slice,
&version_sliced,
log,
manager,
) orelse {
try log.addError(source, value.loc, "Invalid catalog version");
return error.InvalidCatalogObject;
@@ -1371,7 +1368,6 @@ pub fn parseIntoBinaryLockfile(
version_sliced.slice,
&version_sliced,
log,
manager,
) orelse {
try log.addError(source, value.loc, "Invalid catalog version");
return error.InvalidCatalogsObject;
@@ -2131,7 +2127,6 @@ fn parseAppendDependencies(
version_sliced.slice,
&version_sliced,
log,
null,
) orelse {
try log.addError(source, value.loc, "Invalid dependency version");
return error.InvalidDependencyVersion;
@@ -2222,7 +2217,6 @@ const Bin = Install.Bin;
const Dependency = Install.Dependency;
const DependencyID = Install.DependencyID;
const PackageID = Install.PackageID;
const PackageManager = bun.install.PackageManager;
const PackageNameHash = Install.PackageNameHash;
const Repository = Install.Repository;
const Resolution = Install.Resolution;

View File

@@ -459,7 +459,6 @@ pub fn load(
.allocator = allocator,
.log = log,
.buffer = lockfile.buffers.string_bytes.items,
.package_manager = manager,
};
for (overrides_name_hashes.items, override_versions_external.items) |name, value| {
map.putAssumeCapacity(name, Dependency.toDependency(value, context));
@@ -523,7 +522,6 @@ pub fn load(
.allocator = allocator,
.log = log,
.buffer = lockfile.buffers.string_bytes.items,
.package_manager = manager,
};
for (default_dep_names.items, default_deps.items) |dep_name, dep| {

View File

@@ -192,7 +192,7 @@ pub fn jsonStringify(this: *const Lockfile, w: anytype) !void {
const relative_path, const depth = Lockfile.Tree.relativePathAndDepth(
this,
@intCast(tree_id),
.from(@intCast(tree_id)),
&path_buf,
&depth_buf,
.node_modules,

View File

@@ -769,7 +769,6 @@ pub fn migrateNPMLockfile(
sliced.slice,
&sliced,
log,
manager,
) orelse {
return error.InvalidNPMLockfile;
};
@@ -850,7 +849,6 @@ pub fn migrateNPMLockfile(
tag,
&dep_resolved_sliced,
log,
manager,
) orelse return error.InvalidNPMLockfile;
break :dep_resolved dep_resolved;
@@ -1062,7 +1060,7 @@ pub fn migrateNPMLockfile(
// but after we write all the data, there is no excuse for this to fail.
//
// If this is hit, it means getOrPutID was not called on this package id. Look for where 'resolution[i]' is set
bun.assert(this.getPackageID(this.packages.items(.name_hash)[i], null, &r) != null);
bun.assert(this.getPackageID(this.packages.items(.name_hash)[i], &r) != null);
}
}
if (is_missing_resolutions) {

View File

@@ -283,7 +283,7 @@ pub const PatchTask = struct {
const dummy_node_modules: PackageManager.PackageInstaller.NodeModulesFolder = .{
.path = std.ArrayList(u8).init(this.manager.allocator),
.tree_id = 0,
.tree_id = .root,
};
const resolution_label, const resolution_tag = brk: {

View File

@@ -174,7 +174,6 @@ pub fn migratePnpmLockfile(
version_sliced.slice,
&version_sliced,
log,
manager,
) orelse {
return invalidPnpmLockfile();
},
@@ -908,7 +907,6 @@ fn parseAppendPackageDependencies(
version_sliced.slice,
&version_sliced,
log,
null,
) orelse {
return invalidPnpmLockfile();
},
@@ -1015,7 +1013,6 @@ fn parseAppendPackageDependencies(
version_sliced.slice,
&version_sliced,
log,
null,
) orelse {
return invalidPnpmLockfile();
},
@@ -1038,7 +1035,6 @@ fn parseAppendPackageDependencies(
version_sliced.slice,
&version_sliced,
log,
null,
) orelse {
return invalidPnpmLockfile();
},
@@ -1157,7 +1153,6 @@ fn parseAppendImporterDependencies(
specifier_sliced.slice,
&specifier_sliced,
log,
null,
) orelse {
return invalidPnpmLockfile();
},

View File

@@ -150,7 +150,6 @@ pub const FolderResolution = union(Tag) {
fn readPackageJSONFromDisk(
manager: *PackageManager,
abs: stringZ,
version: Dependency.Version,
comptime features: Features,
comptime ResolverType: type,
resolver: *ResolverType,
@@ -224,7 +223,7 @@ pub const FolderResolution = union(Tag) {
package.meta.setHasInstallScript(has_scripts);
if (manager.lockfile.getPackageID(package.name_hash, version, &package.resolution)) |existing_id| {
if (manager.lockfile.getPackageID(package.name_hash, &package.resolution)) |existing_id| {
package.meta.id = existing_id;
manager.lockfile.packages.set(existing_id, package);
return manager.lockfile.packages.get(existing_id);
@@ -265,7 +264,6 @@ pub const FolderResolution = union(Tag) {
break :global readPackageJSONFromDisk(
manager,
abs,
version,
Features.link,
SymlinkResolver,
&resolver,
@@ -279,7 +277,6 @@ pub const FolderResolution = union(Tag) {
break :folder readPackageJSONFromDisk(
manager,
abs,
version,
Features.folder,
Resolver,
&resolver,
@@ -292,7 +289,6 @@ pub const FolderResolution = union(Tag) {
break :workspace readPackageJSONFromDisk(
manager,
abs,
version,
Features.workspace,
WorkspaceResolver,
&resolver,
@@ -307,7 +303,6 @@ pub const FolderResolution = union(Tag) {
break :cache_folder readPackageJSONFromDisk(
manager,
abs,
version,
Features.npm,
CacheFolderResolver,
&resolver,

View File

@@ -483,7 +483,6 @@ fn processDeps(
deps_buf: []Dependency,
res_buf: []Install.PackageID,
log: *logger.Log,
manager: *Install.PackageManager,
yarn_entry_to_package_id: []const Install.PackageID,
) ![]Install.PackageID {
var deps_it = deps.iterator();
@@ -520,7 +519,6 @@ fn processDeps(
parsed_version,
&Semver.SlicedString.init(parsed_version, parsed_version),
log,
manager,
) orelse Dependency.Version{},
.behavior = .{
.prod = dep_type == .production,
@@ -714,7 +712,7 @@ pub fn migrateYarnLockfile(
if (string_builder.cap > 0) {
try string_builder.allocate();
}
try this.overrides.parseAppend(manager, this, &root_package, log, &package_json_source, package_json, &string_builder);
try this.overrides.parseAppend(this, &root_package, log, &package_json_source, package_json, &string_builder);
this.packages.set(0, root_package);
}
}
@@ -1043,7 +1041,6 @@ pub fn migrateYarnLockfile(
version_string.slice(this.buffers.string_bytes.items),
&version_string.sliced(this.buffers.string_bytes.items),
log,
manager,
) orelse Dependency.Version{},
.behavior = .{
.prod = dep.dep_type == .production,
@@ -1079,25 +1076,25 @@ pub fn migrateYarnLockfile(
const dependencies_start = dependencies_buf.ptr;
const resolutions_start = resolutions_buf.ptr;
if (entry.dependencies) |deps| {
const processed = try processDeps(deps, .production, &yarn_lock, &string_buf, dependencies_buf, resolutions_buf, log, manager, yarn_entry_to_package_id);
const processed = try processDeps(deps, .production, &yarn_lock, &string_buf, dependencies_buf, resolutions_buf, log, yarn_entry_to_package_id);
dependencies_buf = dependencies_buf[processed.len..];
resolutions_buf = resolutions_buf[processed.len..];
}
if (entry.optionalDependencies) |deps| {
const processed = try processDeps(deps, .optional, &yarn_lock, &string_buf, dependencies_buf, resolutions_buf, log, manager, yarn_entry_to_package_id);
const processed = try processDeps(deps, .optional, &yarn_lock, &string_buf, dependencies_buf, resolutions_buf, log, yarn_entry_to_package_id);
dependencies_buf = dependencies_buf[processed.len..];
resolutions_buf = resolutions_buf[processed.len..];
}
if (entry.peerDependencies) |deps| {
const processed = try processDeps(deps, .peer, &yarn_lock, &string_buf, dependencies_buf, resolutions_buf, log, manager, yarn_entry_to_package_id);
const processed = try processDeps(deps, .peer, &yarn_lock, &string_buf, dependencies_buf, resolutions_buf, log, yarn_entry_to_package_id);
dependencies_buf = dependencies_buf[processed.len..];
resolutions_buf = resolutions_buf[processed.len..];
}
if (entry.devDependencies) |deps| {
const processed = try processDeps(deps, .development, &yarn_lock, &string_buf, dependencies_buf, resolutions_buf, log, manager, yarn_entry_to_package_id);
const processed = try processDeps(deps, .development, &yarn_lock, &string_buf, dependencies_buf, resolutions_buf, log, yarn_entry_to_package_id);
dependencies_buf = dependencies_buf[processed.len..];
resolutions_buf = resolutions_buf[processed.len..];
}
@@ -1120,8 +1117,8 @@ pub fn migrateYarnLockfile(
try this.buffers.hoisted_dependencies.ensureTotalCapacity(allocator, this.buffers.dependencies.items.len * 2);
try this.buffers.trees.append(allocator, Tree{
.id = 0,
.parent = Tree.invalid_id,
.id = .root,
.parent = .invalid,
.dependency_id = Tree.root_dep_id,
.dependencies = .{
.off = 0,
@@ -1435,7 +1432,6 @@ pub fn migrateYarnLockfile(
dep_version_string.slice(this.buffers.string_bytes.items),
&sliced_string,
log,
manager,
) orelse Dependency.Version{};
parsed_version.literal = dep_version_string;
@@ -1501,7 +1497,6 @@ pub fn migrateYarnLockfile(
dep_version_string.slice(this.buffers.string_bytes.items),
&sliced_string,
log,
manager,
) orelse Dependency.Version{};
parsed_version.literal = dep_version_string;
@@ -1545,7 +1540,6 @@ pub fn migrateYarnLockfile(
dep_version_string.slice(this.buffers.string_bytes.items),
&sliced_string,
log,
manager,
) orelse Dependency.Version{};
parsed_version.literal = dep_version_string;
@@ -1589,7 +1583,6 @@ pub fn migrateYarnLockfile(
dep_version_string.slice(this.buffers.string_bytes.items),
&sliced_string,
log,
manager,
) orelse Dependency.Version{};
parsed_version.literal = dep_version_string;
@@ -1633,7 +1626,6 @@ pub fn migrateYarnLockfile(
dep_version_string.slice(this.buffers.string_bytes.items),
&sliced_string,
log,
manager,
) orelse Dependency.Version{};
parsed_version.literal = dep_version_string;

View File

@@ -921,7 +921,6 @@ pub const PackageJSON = struct {
.npm,
&sliced,
r.log,
pm,
)) |dependency_version| {
if (dependency_version.value.npm.version.isExact()) {
if (pm.lockfile.resolvePackageFromNameAndVersion(package_json.name, dependency_version)) |resolved| {
@@ -1043,7 +1042,6 @@ pub const PackageJSON = struct {
version_str,
&sliced_str,
r.log,
r.package_manager,
)) |dependency_version| {
const dependency = Dependency{
.name = name,

View File

@@ -1986,7 +1986,6 @@ pub const Resolver = struct {
esm.version,
&sliced_string,
r.log,
manager,
) orelse break :load_module_from_cache;
}
@@ -2309,7 +2308,6 @@ pub const Resolver = struct {
if (package_json_) |package_json| {
package = Package.fromPackageJSON(
pm.lockfile,
pm,
package_json,
Install.Features{
.dev_dependencies = true,