Track peer dependencies but don't install them

This commit is contained in:
Jarred Sumner
2021-12-09 21:21:43 -08:00
parent 0dc0d6c31c
commit 461c769ac4
7 changed files with 393 additions and 113 deletions

View File

@@ -32,6 +32,28 @@ pub fn addPicoHTTP(step: *std.build.LibExeObjStep, comptime with_obj: bool) void
// homebrew-provided icu4c
}
fn addInternalPackages(step: *std.build.LibExeObjStep, allocator: *std.mem.Allocator, target: anytype) !void {
var platform_label = if (target.isDarwin())
"darwin"
else
"linux";
step.addPackage(.{
.name = "io",
.path = .{ .path = try std.fmt.allocPrint(allocator, "src/io/io_{s}.zig", .{platform_label}) },
});
step.addPackage(.{
.name = "strings",
.path = .{ .path = "src/string_immutable.zig" },
});
step.addPackage(.{
.name = "clap",
.path = .{ .path = "src/deps/zig-clap/clap.zig" },
});
}
fn panicIfNotFound(comptime filepath: []const u8) []const u8 {
var file = std.fs.cwd().openFile(filepath, .{ .read = true }) catch |err| {
const linux_only = "\nOn Linux, you'll need to compile libiconv manually and copy the .a file into src/deps.";
@@ -153,10 +175,6 @@ pub fn build(b: *std.build.Builder) !void {
// exe.setLibCFile("libc.txt");
exe.linkLibC();
// exe.linkLibCpp();
exe.addPackage(.{
.name = "clap",
.path = .{ .path = "src/deps/zig-clap/clap.zig" },
});
exe.setOutputDir(output_dir);
var cwd_dir = std.fs.cwd();
@@ -266,6 +284,7 @@ pub fn build(b: *std.build.Builder) !void {
step,
true,
);
try addInternalPackages(step, b.allocator, target);
step.addObjectFile(panicIfNotFound("src/deps/libJavaScriptCore.a"));
step.addObjectFile(panicIfNotFound("src/deps/libWTF.a"));
@@ -316,23 +335,12 @@ pub fn build(b: *std.build.Builder) !void {
var obj = b.addObject(bun_executable_name, exe.root_src.?.path);
obj.setTarget(target);
addPicoHTTP(obj, false);
obj.addPackage(.{
.name = "clap",
.path = .{ .path = "src/deps/zig-clap/clap.zig" },
});
var platform_label = if (target.isDarwin())
"darwin"
else
"linux";
obj.addPackage(.{
.name = "io",
.path = .{ .path = try std.fmt.allocPrint(b.allocator, "src/io/io_{s}.zig", .{platform_label}) },
});
exe.addPackage(.{
.name = "io",
.path = .{ .path = try std.fmt.allocPrint(b.allocator, "src/io/io_{s}.zig", .{platform_label}) },
});
try addInternalPackages(
obj,
b.allocator,
target,
);
{
obj_step.dependOn(&b.addLog(

View File

@@ -40,6 +40,7 @@ const UpgradeCommand = @import("./cli/upgrade_command.zig").UpgradeCommand;
const InstallCommand = @import("./cli/install_command.zig").InstallCommand;
const AddCommand = @import("./cli/add_command.zig").AddCommand;
const RemoveCommand = @import("./cli/remove_command.zig").RemoveCommand;
const PackageManagerCommand = @import("./cli/package_manager_command.zig").PackageManagerCommand;
const InstallCompletionsCommand = @import("./cli/install_completions_command.zig").InstallCompletionsCommand;
const ShellCompletions = @import("./cli/shell_completions.zig");
@@ -490,6 +491,7 @@ const HelpCommand = struct {
\\> <r> <b><green>install<r> Install dependencies for a package.json <d>(bun i)<r>
\\> <r> <b><blue>add <r><d> {s:<16}<r> Add a dependency to package.json <d>(bun a)<r>
\\> <r> remove <r><d> {s:<16}<r> Remove a dependency from package.json <d>(bun rm)<r>
\\> <r> pm <r> More package manager-related subcommands
\\
\\> <r> <b><blue>upgrade <r> Get the latest version of Bun
\\> <r> <b><d>completions<r> Install shell completions for tab-completion
@@ -615,6 +617,8 @@ pub const Command = struct {
RootCommandMatcher.case("i"), RootCommandMatcher.case("install") => .InstallCommand,
RootCommandMatcher.case("c"), RootCommandMatcher.case("create") => .CreateCommand,
RootCommandMatcher.case("pm") => .PackageManagerCommand,
RootCommandMatcher.case("add"), RootCommandMatcher.case("update"), RootCommandMatcher.case("a") => .AddCommand,
RootCommandMatcher.case("remove"), RootCommandMatcher.case("rm") => .RemoveCommand,
@@ -694,6 +698,12 @@ pub const Command = struct {
try RemoveCommand.exec(ctx);
return;
},
.PackageManagerCommand => {
const ctx = try Command.Context.create(allocator, log, .PackageManagerCommand);
try PackageManagerCommand.exec(ctx);
return;
},
.GetCompletionsCommand => {
const ctx = try Command.Context.create(allocator, log, .GetCompletionsCommand);
var filter = ctx.positionals;
@@ -834,12 +844,14 @@ pub const Command = struct {
InstallCompletionsCommand,
RunCommand,
UpgradeCommand,
PackageManagerCommand,
pub const uses_global_options: std.EnumArray(Tag, bool) = std.EnumArray(Tag, bool).initDefault(true, .{
.CreateCommand = false,
.InstallCommand = false,
.AddCommand = false,
.RemoveCommand = false,
.PackageManagerCommand = false,
});
};
};

View File

@@ -0,0 +1,24 @@
const Command = @import("../cli.zig").Command;
const PackageManager = @import("../install/install.zig").PackageManager;
const std = @import("std");
const strings = @import("strings");
pub const PackageManagerCommand = struct {
pub fn printHelp(allocator: *std.mem.Allocator) void {}
pub fn exec(ctx: Command.Context) !void {
var args = try std.process.argsAlloc(ctx.allocator);
args = args[1..];
var first = std.mem.span(args[0]);
if (strings.eqlComptime(first, "pm")) {
args = args[1..];
}
if (args.len == 0) {
printHelp(ctx.allocator);
std.os.exit(0);
}
first = std.mem.span(args[0]);
}
};

View File

@@ -49,7 +49,9 @@ behavior: Behavior = Behavior.uninitialized,
/// Sorting order for dependencies is:
/// 1. [`dependencies`, `devDependencies`, `optionalDependencies`, `peerDependencies`]
/// 2. name
/// 2. name ASC
/// "name" must be ASC so that later, when we rebuild the lockfile
/// we insert it back in reverse order without an extra sorting pass
pub fn isLessThan(string_buf: []const u8, lhs: Dependency, rhs: Dependency) bool {
const behavior = lhs.behavior.cmp(rhs.behavior);
if (behavior != .eq) {
@@ -535,7 +537,7 @@ pub const Behavior = enum(u8) {
pub const peer: u8 = 1 << 4;
pub inline fn isOptional(this: Behavior) bool {
return (@enumToInt(this) & Behavior.optional) != 0;
return (@enumToInt(this) & Behavior.optional) != 0 and (@enumToInt(this) & Behavior.peer) == 0;
}
pub inline fn isDev(this: Behavior) bool {
@@ -550,6 +552,10 @@ pub const Behavior = enum(u8) {
return (@enumToInt(this) & Behavior.normal) != 0;
}
pub inline fn setOptional(this: Behavior, value: bool) Behavior {
return @intToEnum(Behavior, @enumToInt(this) | (@as(u8, @boolToInt(value))) << 2);
}
pub inline fn cmp(lhs: Behavior, rhs: Behavior) std.math.Order {
if (@enumToInt(lhs) == @enumToInt(rhs)) {
return .eq;

View File

@@ -339,7 +339,7 @@ pub const Features = struct {
optional_dependencies: bool = false,
dev_dependencies: bool = false,
scripts: bool = false,
peer_dependencies: bool = false,
peer_dependencies: bool = true,
is_main: bool = false,
check_for_duplicate_dependencies: bool = false,
@@ -485,57 +485,74 @@ pub const Lockfile = struct {
try new.packages.ensureTotalCapacity(old.allocator, old.packages.len);
try new.buffers.preallocate(old.buffers, old.allocator);
const InstallOrder = struct {
parent: PackageID,
children: PackageIDSlice,
};
old.scratch.dependency_list_queue.head = 0;
// Step 1. Recreate the lockfile with only the packages that are still alive
const root = old.rootPackage() orelse return error.NoPackage;
var slices = old.packages.slice();
var package_id_mapping = try old.allocator.alloc(PackageID, old.packages.len);
std.mem.set(
PackageID,
package_id_mapping,
invalid_package_id,
);
var clone_queue_ = PendingResolutions.init(old.allocator);
var clone_queue = &clone_queue_;
try clone_queue.ensureUnusedCapacity(root.dependencies.len);
var clone_queue_ = PendingResolutions.init();
var cloner = Cloner{
.old = old,
.lockfile = new,
.mapping = package_id_mapping,
.clone_queue = clone_queue_,
};
// try clone_queue.ensureUnusedCapacity(root.dependencies.len);
var duplicate_resolutions_bitset = try std.DynamicBitSetUnmanaged.initEmpty(old.buffers.resolutions.items.len, old.allocator);
var duplicate_resolutions_bitset_ptr = &duplicate_resolutions_bitset;
_ = try root.clone(old, new, package_id_mapping, clone_queue, duplicate_resolutions_bitset_ptr);
while (clone_queue.readItem()) |to_clone_| {
const to_clone: PendingResolution = to_clone_;
const mapping = package_id_mapping[to_clone.old_resolution];
if (mapping < max_package_id) {
new.buffers.resolutions.items[to_clone.resolve_id] = package_id_mapping[to_clone.old_resolution];
continue;
}
const old_package = old.packages.get(to_clone.old_resolution);
new.buffers.resolutions.items[to_clone.resolve_id] = try old_package.clone(
old,
new,
package_id_mapping,
clone_queue,
duplicate_resolutions_bitset_ptr,
);
}
_ = try root.clone(old, new, package_id_mapping, &cloner);
try cloner.flush();
return new;
}
const Cloner = struct {
clone_queue: PendingResolutions,
lockfile: *Lockfile,
old: *Lockfile,
mapping: []PackageID,
pub fn flush(this: *Cloner) anyerror!void {
const max_package_id = this.old.packages.len;
while (this.clone_queue.readItem()) |to_clone_| {
const to_clone: PendingResolution = to_clone_;
const mapping = this.mapping[to_clone.old_resolution];
if (mapping < max_package_id) {
this.lockfile.buffers.resolutions.items[to_clone.resolve_id] = this.mapping[to_clone.old_resolution];
continue;
}
const old_package = this.old.packages.get(to_clone.old_resolution);
this.lockfile.buffers.resolutions.items[to_clone.resolve_id] = try old_package.clone(
this.old,
this.lockfile,
this.mapping,
this,
);
}
}
};
const PendingResolution = struct {
old_resolution: PackageID,
resolve_id: PackageID,
parent: PackageID,
};
const PendingResolutions = std.fifo.LinearFifo(PendingResolution, .Dynamic);
const PendingResolutions = std.fifo.LinearFifo(PendingResolution, .{ .Static = 32 });
pub const Printer = struct {
lockfile: *Lockfile,
@@ -657,6 +674,60 @@ pub const Lockfile = struct {
}
}
pub const Tree = struct {
pub fn print(
this: *Printer,
comptime Writer: type,
writer: Writer,
comptime enable_ansi_colors: bool,
) !void {
var lockfile = this.lockfile;
const IDDepthPair = struct {
depth: u16 = 0,
id: PackageID,
};
var visited = try std.DynamicBitSetUnmanaged.initEmpty(this.lockfile.packages.len, this.lockfile.allocator);
var slice = this.lockfile.packages.slice();
const names: []const String = slice.items(.name);
const resolved: []const Resolution = slice.items(.resolution);
const metas: []const Lockfile.Package.Meta = slice.items(.meta);
if (names.len == 0) return;
const dependency_lists = slice.items(.dependencies);
const resolutions_list = slice.items(.resolutions);
const resolutions_buffer = this.lockfile.buffers.resolutions.items;
const dependencies_buffer = this.lockfile.buffers.dependencies.items;
const package_count = @truncate(PackageID, names.len);
const string_buf = this.lockfile.buffers.string_bytes.items;
const root = this.lockfile.rootPackage() orelse return;
visited.set(0);
for (names) |name, package_id| {
const package_name = name.slice(string_buf);
const dependency_list = dependency_lists[package_id];
try writer.print(
comptime Output.prettyFmt(" <r><b>{s}<r><d>@<b>{}<r><d> ({d} dependencies)<r>\n", enable_ansi_colors),
.{
package_name,
resolved[package_id].fmt(string_buf),
dependency_list.len,
},
);
if (visited.isSet(package_id)) {
continue;
}
visited.set(package_id);
}
}
};
pub const Yarn = struct {
pub fn print(
this: *Printer,
@@ -944,10 +1015,12 @@ pub const Lockfile = struct {
pub fn getPackageID(
this: *Lockfile,
name_hash: u64,
// if it's a peer dependency
version: ?Dependency.Version,
resolution: Resolution,
) ?PackageID {
const entry = this.package_index.get(name_hash) orelse return null;
const resolutions = this.packages.items(.resolution);
const resolutions: []const Resolution = this.packages.items(.resolution);
switch (entry) {
.PackageID => |id| {
if (comptime Environment.isDebug or Environment.isTest) {
@@ -961,10 +1034,23 @@ pub const Lockfile = struct {
this.buffers.string_bytes.items,
)) {
return id;
} else if (version) |version_| {
switch (version_.tag) {
.npm => {
// is it a peerDependency satisfied by a parent package?
if (version_.value.npm.satisfies(resolutions[id].value.npm)) {
return id;
}
},
else => return null,
}
}
},
.PackageIDMultiple => |multi_| {
const multi = std.mem.span(multi_);
const can_satisfy = version != null and version.?.tag == .npm;
for (multi) |id| {
if (comptime Environment.isDebug or Environment.isTest) {
std.debug.assert(id != invalid_package_id);
@@ -975,6 +1061,10 @@ pub const Lockfile = struct {
if (resolutions[id].eql(resolution, this.buffers.string_bytes.items, this.buffers.string_bytes.items)) {
return id;
}
if (can_satisfy and version.?.value.npm.satisfies(resolutions[id].value.npm)) {
return id;
}
}
},
}
@@ -1045,8 +1135,8 @@ pub const Lockfile = struct {
pub fn appendPackageWithID(this: *Lockfile, package_: Lockfile.Package, id: PackageID) !Lockfile.Package {
defer {
if (comptime Environment.isDebug) {
std.debug.assert(this.getPackageID(package_.name_hash, package_.resolution) != null);
std.debug.assert(this.getPackageID(package_.name_hash, package_.resolution).? == id);
std.debug.assert(this.getPackageID(package_.name_hash, null, package_.resolution) != null);
std.debug.assert(this.getPackageID(package_.name_hash, null, package_.resolution).? == id);
}
}
var package = package_;
@@ -1227,11 +1317,19 @@ pub const Lockfile = struct {
const DependencySlice = ExternalSlice(Dependency);
const PackageIDSlice = ExternalSlice(PackageID);
const NodeModulesFolderSlice = ExternalSlice(NodeModulesFolder);
const PackageIDList = std.ArrayListUnmanaged(PackageID);
const DependencyList = std.ArrayListUnmanaged(Dependency);
const StringBuffer = std.ArrayListUnmanaged(u8);
const SmallExternalStringBuffer = std.ArrayListUnmanaged(String);
const NodeModulesFolder = extern struct {
in: PackageID = 0,
packages: PackageIDSlice = PackageIDSlice{},
children: NodeModulesFolderSlice = NodeModulesFolderSlice{},
};
pub const Package = extern struct {
const DependencyGroup = struct {
prop: string,
@@ -1267,8 +1365,7 @@ pub const Lockfile = struct {
old: *Lockfile,
new: *Lockfile,
package_id_mapping: []PackageID,
clone_queue: *PendingResolutions,
duplicate_resolutions_bitset: *std.DynamicBitSetUnmanaged,
cloner: *Cloner,
) !PackageID {
const old_string_buf = old.buffers.string_bytes.items;
var builder_ = new.stringBuilder();
@@ -1280,6 +1377,7 @@ pub const Lockfile = struct {
const old_dependencies: []const Dependency = this.dependencies.get(old.buffers.dependencies.items);
const old_resolutions: []const PackageID = this.resolutions.get(old.buffers.resolutions.items);
for (old_dependencies) |dependency, i| {
dependency.count(old_string_buf, *Lockfile.StringBuilder, builder);
}
@@ -1292,7 +1390,7 @@ pub const Lockfile = struct {
const prev_len = @truncate(u32, new.buffers.dependencies.items.len);
const end = prev_len + @truncate(u32, old_dependencies.len);
const max_package_id = @truncate(u32, old.packages.len);
const max_package_id = @truncate(PackageID, old.packages.len);
new.buffers.dependencies.items = new.buffers.dependencies.items.ptr[0..end];
new.buffers.resolutions.items = new.buffers.resolutions.items.ptr[0..end];
@@ -1333,30 +1431,33 @@ pub const Lockfile = struct {
*Lockfile.StringBuilder,
builder,
);
const old_resolution = old_resolutions[i];
if (old_resolution < max_package_id) {
const mapped = package_id_mapping[old_resolution];
const resolve_id = new_package.resolutions.off + @truncate(u32, i);
if (!old.unique_packages.isSet(old_resolution)) duplicate_resolutions_bitset.set(resolve_id);
if (mapped < max_package_id) {
resolutions[i] = mapped;
} else {
try clone_queue.writeItem(
PendingResolution{
.old_resolution = old_resolution,
.parent = new_package.meta.id,
.resolve_id = resolve_id,
},
);
}
}
}
builder.clamp();
for (old_resolutions) |old_resolution, i| {
if (old_resolution >= max_package_id) continue;
if (cloner.clone_queue.writableLength() == 0) {
try cloner.flush();
}
const mapped = package_id_mapping[old_resolution];
const resolve_id = new_package.resolutions.off + @intCast(PackageID, i);
if (mapped < max_package_id) {
resolutions[i] = mapped;
} else {
cloner.clone_queue.writeItemAssumeCapacity(
PendingResolution{
.old_resolution = old_resolution,
.parent = new_package.meta.id,
.resolve_id = resolve_id,
},
);
}
}
return new_package.meta.id;
}
@@ -1466,16 +1567,21 @@ pub const Lockfile = struct {
const version_strings = map.value.get(manifest.external_strings_for_versions);
if (comptime Environment.isDebug) std.debug.assert(keys.len == version_strings.len);
const is_peer = comptime strings.eqlComptime(group.field, "peer_dependencies");
for (keys) |key, i| {
const version_string_ = version_strings[i];
const name: ExternalString = string_builder.appendWithHash(ExternalString, key.slice(string_buf), key.hash);
const dep_version = string_builder.appendWithHash(String, version_string_.slice(string_buf), version_string_.hash);
const sliced = dep_version.sliced(lockfile.buffers.string_bytes.items);
const dependency = Dependency{
.name = name.value,
.name_hash = name.hash,
.behavior = group.behavior,
.behavior = if (comptime is_peer)
group.behavior.setOptional(package_version.optional_peer_dependencies_len > i)
else
group.behavior,
.version = Dependency.parse(
allocator,
sliced.slice,
@@ -1983,6 +2089,8 @@ pub const Lockfile = struct {
resolutions: PackageIDList = PackageIDList{},
dependencies: DependencyList = DependencyList{},
extern_strings: SmallExternalStringBuffer = SmallExternalStringBuffer{},
// node_modules_folders: NodeModulesFolderList = NodeModulesFolderList{},
// node_modules_package_ids: PackageIDList = PackageIDList{},
string_bytes: StringBuffer = StringBuffer{},
pub fn preallocate(this: *Buffers, that: Buffers, allocator: *std.mem.Allocator) !void {
@@ -2941,15 +3049,20 @@ pub const PackageManager = struct {
name: String,
version: Dependency.Version,
dependency_id: PackageID,
is_peer: bool,
manifest: *const Npm.PackageManifest,
find_result: Npm.PackageManifest.FindResult,
) !?ResolvedPackageResult {
// Was this package already allocated? Let's reuse the existing one.
if (this.lockfile.getPackageID(name_hash, .{
.tag = .npm,
.value = .{ .npm = find_result.version },
})) |id| {
if (this.lockfile.getPackageID(
name_hash,
version,
.{
.tag = .npm,
.value = .{ .npm = find_result.version },
},
)) |id| {
return ResolvedPackageResult{
.package = this.lockfile.packages.get(id),
.is_first_time = false,
@@ -3105,6 +3218,7 @@ pub const PackageManager = struct {
name_hash: PackageNameHash,
name: String,
version: Dependency.Version,
is_peer: bool,
dependency_id: PackageID,
resolution: PackageID,
) !?ResolvedPackageResult {
@@ -3126,7 +3240,7 @@ pub const PackageManager = struct {
else => unreachable,
};
return try getOrPutResolvedPackageWithFindResult(this, name_hash, name, version, dependency_id, manifest, find_result);
return try getOrPutResolvedPackageWithFindResult(this, name_hash, name, version, dependency_id, is_peer, manifest, find_result);
},
else => return null,
@@ -3259,7 +3373,14 @@ pub const PackageManager = struct {
switch (dependency.version.tag) {
.npm, .dist_tag => {
retry_from_manifests_ptr: while (true) {
var resolve_result_ = this.getOrPutResolvedPackage(name_hash, name, version, id, resolution);
var resolve_result_ = this.getOrPutResolvedPackage(
name_hash,
name,
version,
dependency.behavior.isPeer(),
id,
resolution,
);
retry_with_new_resolve_result: while (true) {
const resolve_result = resolve_result_ catch |err| {
@@ -3327,7 +3448,7 @@ pub const PackageManager = struct {
this.enqueueNetworkTask(network_task);
}
}
} else {
} else if (!dependency.behavior.isPeer()) {
const task_id = Task.Id.forManifest(Task.Tag.package_manifest, this.lockfile.str(name));
var network_entry = try this.network_dedupe_map.getOrPutContext(this.allocator, task_id, .{});
if (!network_entry.found_existing) {
@@ -3349,6 +3470,7 @@ pub const PackageManager = struct {
name,
version,
id,
dependency.behavior.isPeer(),
&loaded_manifest.?,
find_result,
) catch null) |new_resolve_result| {
@@ -4921,7 +5043,6 @@ pub const PackageManager = struct {
const cache_dir = this.cache_directory;
lockfile.unique_packages.unset(0);
var toplevel_node_modules = lockfile.unique_packages.iterator(.{});
// If there was already a valid lockfile and so we did not resolve, i.e. there was zero network activity
// the packages could still not be in the cache dir
@@ -4953,12 +5074,29 @@ pub const PackageManager = struct {
var summary = PackageInstall.Summary{};
{
const toplevel_count = lockfile.unique_packages.count();
var parts = lockfile.packages.slice();
var metas = parts.items(.meta);
var names = parts.items(.name);
var dependency_lists = parts.items(.dependencies);
var dependencies = lockfile.buffers.dependencies.items;
var resolutions_buffer = lockfile.buffers.resolutions.items;
var resolution_lists = parts.items(.resolutions);
var resolutions = parts.items(.resolution);
const pending_task_offset = this.total_tasks;
const root_dependency_list = dependency_lists[0];
const root_resolution_list = resolution_lists[0];
var toplevel_packages = try lockfile.unique_packages.clone(this.allocator);
const max_package_id = @truncate(PackageID, names.len);
for (root_resolution_list.get(resolutions_buffer)) |package_id| {
if (package_id > max_package_id) continue;
toplevel_packages.set(package_id);
}
const toplevel_count = toplevel_packages.count();
var toplevel_node_modules = toplevel_packages.iterator(.{});
var installer = PackageInstaller{
.manager = this,
.metas = metas,
@@ -4975,8 +5113,6 @@ pub const PackageManager = struct {
.install_count = toplevel_count,
};
const pending_task_offset = this.total_tasks;
// When it's a Good Idea, run the install in single-threaded
// From benchmarking, apfs clonefile() is ~6x faster than copyfile() on macOS
// Running it in parallel is the same or slower.
@@ -5389,6 +5525,18 @@ pub const PackageManager = struct {
}
}
if (manager.options.log_level != .silent) {
var printer = Lockfile.Printer{
.lockfile = manager.lockfile,
.options = manager.options,
};
if (Output.enable_ansi_colors) {
try Lockfile.Printer.Tree.print(&printer, Output.WriterType, Output.writer(), true);
} else {
try Lockfile.Printer.Tree.print(&printer, Output.WriterType, Output.writer(), false);
}
}
Output.prettyln(" <green>+{d}<r> add | <cyan>{d}<r> update | <r><red>-{d}<r> remove | {d} installed | {d} deduped | {d} skipped | {d} failed", .{
manager.summary.add,
manager.summary.update,

View File

@@ -341,6 +341,7 @@ pub const PackageVersion = extern struct {
optional_dependencies: ExternalStringMap = ExternalStringMap{},
/// `"peerDependencies"` in [package.json](https://docs.npmjs.com/cli/v8/configuring-npm/package-json#peerdependencies)
/// if `optional_peer_dependencies_len` is > 0, then instead of alphabetical, the first N items are optional
peer_dependencies: ExternalStringMap = ExternalStringMap{},
/// `"devDependencies"` in [package.json](https://docs.npmjs.com/cli/v8/configuring-npm/package-json#devdependencies)
@@ -355,7 +356,8 @@ pub const PackageVersion = extern struct {
engines: ExternalStringMap = ExternalStringMap{},
/// `"peerDependenciesMeta"` in [package.json](https://docs.npmjs.com/cli/v8/configuring-npm/package-json#peerdependenciesmeta)
optional_peer_dependencies: ExternalStringMap = ExternalStringMap{},
/// if `optional_peer_dependencies_len` is > 0, then instead of alphabetical, the first N items of `peer_dependencies` are optional
optional_peer_dependencies_len: u32 = 0,
man_dir: ExternalString = ExternalString{},
@@ -705,6 +707,9 @@ pub const PackageManifest = struct {
threadlocal var external_string_maps_: ExternalStringMapDeduper = undefined;
threadlocal var external_string_maps_loaded: bool = false;
threadlocal var optional_peer_dep_names_: std.ArrayList(u64) = undefined;
threadlocal var optional_peer_dep_names_loaded: bool = false;
/// This parses [Abbreviated metadata](https://github.com/npm/registry/blob/master/docs/responses/package-metadata.md#abbreviated-metadata-format)
pub fn parse(
allocator: *std.mem.Allocator,
@@ -740,13 +745,21 @@ pub const PackageManifest = struct {
external_string_maps_loaded = true;
}
if (!optional_peer_dep_names_loaded) {
optional_peer_dep_names_ = std.ArrayList(u64).init(default_allocator);
optional_peer_dep_names_loaded = true;
}
var string_pool = string_pool_;
string_pool.clearRetainingCapacity();
var external_string_maps = external_string_maps_;
external_string_maps.clearRetainingCapacity();
var optional_peer_dep_names = optional_peer_dep_names_;
optional_peer_dep_names.clearRetainingCapacity();
defer string_pool_ = string_pool;
defer external_string_maps_ = external_string_maps;
defer optional_peer_dep_names_ = optional_peer_dep_names;
var string_builder = String.Builder{
.string_pool = string_pool,
@@ -1093,6 +1106,8 @@ pub const PackageManifest = struct {
}
}
var peer_dependency_len: usize = 0;
inline for (dependency_groups) |pair| {
if (prop.value.?.asProperty(comptime pair.prop)) |versioned_deps| {
const items = versioned_deps.expr.data.e_object.properties;
@@ -1104,7 +1119,30 @@ pub const PackageManifest = struct {
var name_hasher = std.hash.Wyhash.init(0);
var version_hasher = std.hash.Wyhash.init(0);
const is_peer = comptime strings.eqlComptime(pair.prop, "peerDependencies");
if (comptime is_peer) {
optional_peer_dep_names.clearRetainingCapacity();
if (prop.value.?.asProperty("peerDependenciesMeta")) |meta| {
if (meta.expr.data == .e_object) {
const meta_props = meta.expr.data.e_object.properties;
try optional_peer_dep_names.ensureUnusedCapacity(meta_props.len);
for (meta_props) |meta_prop| {
if (meta_prop.value.?.asProperty("optional")) |optional| {
if (optional.expr.data != .e_boolean or !optional.expr.data.e_boolean.value) {
continue;
}
optional_peer_dep_names.appendAssumeCapacity(String.Builder.stringHash(meta_prop.key.?.asString(allocator) orelse unreachable));
}
}
}
}
}
var i: usize = 0;
for (items) |item| {
const name_str = item.key.?.asString(allocator) orelse if (comptime Environment.allow_assert) unreachable else continue;
const version_str = item.value.?.asString(allocator) orelse if (comptime Environment.allow_assert) unreachable else continue;
@@ -1112,10 +1150,36 @@ pub const PackageManifest = struct {
this_names[i] = string_builder.append(ExternalString, name_str);
this_versions[i] = string_builder.append(ExternalString, version_str);
const names_hash_bytes = @bitCast([8]u8, this_names[i].hash);
name_hasher.update(&names_hash_bytes);
const versions_hash_bytes = @bitCast([8]u8, this_versions[i].hash);
version_hasher.update(&versions_hash_bytes);
if (comptime is_peer) {
if (std.mem.indexOfScalar(u64, optional_peer_dep_names.items, this_names[i].hash) != null) {
// For optional peer dependencies, we store a length instead of a whole separate array
// To make that work, we have to move optional peer dependencies to the front of the array
//
if (peer_dependency_len != i) {
const current_name = this_names[i];
this_names[i] = this_names[peer_dependency_len];
this_names[peer_dependency_len] = current_name;
const current_version = this_versions[i];
this_versions[i] = this_versions[peer_dependency_len];
this_versions[peer_dependency_len] = current_version;
peer_dependency_len += 1;
}
}
if (optional_peer_dep_names.items.len == 0) {
const names_hash_bytes = @bitCast([8]u8, this_names[i].hash);
name_hasher.update(&names_hash_bytes);
const versions_hash_bytes = @bitCast([8]u8, this_versions[i].hash);
version_hasher.update(&versions_hash_bytes);
}
} else {
const names_hash_bytes = @bitCast([8]u8, this_names[i].hash);
name_hasher.update(&names_hash_bytes);
const versions_hash_bytes = @bitCast([8]u8, this_versions[i].hash);
version_hasher.update(&versions_hash_bytes);
}
i += 1;
}
@@ -1126,6 +1190,19 @@ pub const PackageManifest = struct {
var version_list = ExternalStringList.init(version_extern_strings, this_versions);
if (count > 0) {
if (comptime is_peer) {
if (optional_peer_dep_names.items.len > 0) {
for (this_names[0..count]) |byte_str| {
const bytes = @bitCast([8]u8, byte_str.hash);
name_hasher.update(&bytes);
}
for (this_versions[0..count]) |byte_str| {
const bytes = @bitCast([8]u8, byte_str.hash);
version_hasher.update(&bytes);
}
}
}
const name_map_hash = name_hasher.final();
const version_map_hash = version_hasher.final();
@@ -1153,6 +1230,10 @@ pub const PackageManifest = struct {
.value = version_list,
};
if (comptime is_peer) {
package_version.optional_peer_dependencies_len = @truncate(u32, peer_dependency_len);
}
if (comptime Environment.allow_assert) {
const dependencies_list = @field(package_version, pair.field);

View File

@@ -107,10 +107,28 @@ pub const String = extern struct {
}
}
pub const Pointer = extern struct {
off: u32 = 0,
len: u32 = 0,
pub inline fn init(
buf: string,
in: string,
) Pointer {
std.debug.assert(@ptrToInt(buf.ptr) <= @ptrToInt(in.ptr) and ((@ptrToInt(in.ptr) + in.len) <= (@ptrToInt(buf.ptr) + buf.len)));
return Pointer{
.off = @truncate(u32, @ptrToInt(in.ptr) - @ptrToInt(buf.ptr)),
.len = @truncate(u32, in.len),
};
}
};
pub inline fn ptr(this: String) Pointer {
return @bitCast(Pointer, @as(u64, @truncate(u63, @bitCast(u64, this))));
}
// String must be a pointer because we reference it as a slice. It will become a dead pointer if it is copied.
pub fn slice(this: *const String, buf: string) string {
switch (this.bytes[max_inline_len - 1] & 128) {
0 => {
@@ -256,23 +274,6 @@ pub const String = extern struct {
}
};
pub const Pointer = extern struct {
off: u32 = 0,
len: u32 = 0,
pub inline fn init(
buf: string,
in: string,
) Pointer {
std.debug.assert(@ptrToInt(buf.ptr) <= @ptrToInt(in.ptr) and ((@ptrToInt(in.ptr) + in.len) <= (@ptrToInt(buf.ptr) + buf.len)));
return Pointer{
.off = @truncate(u32, @ptrToInt(in.ptr) - @ptrToInt(buf.ptr)),
.len = @truncate(u32, in.len),
};
}
};
comptime {
if (@sizeOf(String) != @sizeOf(Pointer)) {
@compileError("String types must be the same size");