diff --git a/cmake/sources/ZigSources.txt b/cmake/sources/ZigSources.txt index 1060e7fac6..2261399b49 100644 --- a/cmake/sources/ZigSources.txt +++ b/cmake/sources/ZigSources.txt @@ -562,6 +562,11 @@ src/install/hoisted_install.zig src/install/install_binding.zig src/install/install.zig src/install/integrity.zig +src/install/isolated_install.zig +src/install/isolated_install/Hardlinker.zig +src/install/isolated_install/Installer.zig +src/install/isolated_install/Store.zig +src/install/isolated_install/Symlinker.zig src/install/lifecycle_script_runner.zig src/install/lockfile.zig src/install/lockfile/Buffers.zig @@ -644,6 +649,10 @@ src/options.zig src/output.zig src/OutputFile.zig src/patch.zig +src/paths.zig +src/paths/EnvPath.zig +src/paths/path_buffer_pool.zig +src/paths/Path.zig src/perf.zig src/pool.zig src/Progress.zig diff --git a/package.json b/package.json index b6eb9de202..c5bcfc55a8 100644 --- a/package.json +++ b/package.json @@ -74,7 +74,7 @@ "clang-tidy:diff": "bun run analysis --target clang-tidy-diff", "zig-format": "bun run analysis:no-llvm --target zig-format", "zig-format:check": "bun run analysis:no-llvm --target zig-format-check", - "prettier": "bunx prettier@latest --plugin=prettier-plugin-organize-imports --config .prettierrc --write scripts packages src docs 'test/**/*.{test,spec}.{ts,tsx,js,jsx,mts,mjs,cjs,cts}' '!test/**/*fixture*.*'", + "prettier": "bunx --bun prettier@latest --plugin=prettier-plugin-organize-imports --config .prettierrc --write scripts packages src docs 'test/**/*.{test,spec}.{ts,tsx,js,jsx,mts,mjs,cjs,cts}' '!test/**/*fixture*.*'", "node:test": "node ./scripts/runner.node.mjs --quiet --exec-path=$npm_execpath --node-tests ", "node:test:cp": "bun ./scripts/fetch-node-test.ts ", "clean:zig": "rm -rf build/debug/cache/zig build/debug/CMakeCache.txt 'build/debug/*.o' .zig-cache zig-out || true", diff --git a/src/Watcher.zig b/src/Watcher.zig index c810af324a..6ebd50faf7 100644 --- a/src/Watcher.zig +++ b/src/Watcher.zig @@ -463,9 +463,9 @@ fn appendDirectoryAssumeCapacity( null, ); } else if (Environment.isLinux) { - const buf = bun.PathBufferPool.get(); + const buf = bun.path_buffer_pool.get(); defer { - bun.PathBufferPool.put(buf); + bun.path_buffer_pool.put(buf); } const path: [:0]const u8 = if (clone_file_path and file_path_.len > 0 and file_path_[file_path_.len - 1] == 0) file_path_[0 .. file_path_.len - 1 :0] diff --git a/src/analytics/analytics_thread.zig b/src/analytics/analytics_thread.zig index c6a4e58b94..2a70bea13c 100644 --- a/src/analytics/analytics_thread.zig +++ b/src/analytics/analytics_thread.zig @@ -93,6 +93,8 @@ pub const Features = struct { pub var loaders: usize = 0; pub var lockfile_migration_from_package_lock: usize = 0; pub var text_lockfile: usize = 0; + pub var isolated_bun_install: usize = 0; + pub var hoisted_bun_install: usize = 0; pub var macros: usize = 0; pub var no_avx2: usize = 0; pub var no_avx: usize = 0; diff --git a/src/bake/DevServer.zig b/src/bake/DevServer.zig index 9bdc249913..71acfebac7 100644 --- a/src/bake/DevServer.zig +++ b/src/bake/DevServer.zig @@ -642,8 +642,8 @@ pub fn init(options: Options) bun.JSOOM!*DevServer { errdefer types.deinit(allocator); for (options.framework.file_system_router_types, 0..) |fsr, i| { - const buf = bun.PathBufferPool.get(); - defer bun.PathBufferPool.put(buf); + const buf = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(buf); const joined_root = bun.path.joinAbsStringBuf(dev.root, buf, &.{fsr.root}, .auto); const entry = dev.server_transpiler.resolver.readDirInfoIgnoreError(joined_root) orelse continue; @@ -5180,8 +5180,8 @@ pub fn IncrementalGraph(side: bake.Side) type { dev.relative_path_buf_lock.lock(); defer dev.relative_path_buf_lock.unlock(); - const buf = bun.PathBufferPool.get(); - defer bun.PathBufferPool.put(buf); + const buf = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(buf); var file_paths = try ArrayListUnmanaged([]const u8).initCapacity(gpa, g.current_chunk_parts.items.len); errdefer file_paths.deinit(gpa); @@ -5464,8 +5464,8 @@ const DirectoryWatchStore = struct { => bun.debugAssert(false), } - const buf = bun.PathBufferPool.get(); - defer bun.PathBufferPool.put(buf); + const buf = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(buf); const joined = bun.path.joinAbsStringBuf(bun.path.dirname(import_source, .auto), buf, &.{specifier}, .auto); const dir = bun.path.dirname(joined, .auto); @@ -5894,8 +5894,8 @@ pub const SerializedFailure = struct { // For debugging, it is helpful to be able to see bundles. fn dumpBundle(dump_dir: std.fs.Dir, graph: bake.Graph, rel_path: []const u8, chunk: []const u8, wrap: bool) !void { - const buf = bun.PathBufferPool.get(); - defer bun.PathBufferPool.put(buf); + const buf = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(buf); const name = bun.path.joinAbsStringBuf("/", buf, &.{ @tagName(graph), rel_path, @@ -7648,8 +7648,8 @@ pub const SourceMapStore = struct { dev.relative_path_buf_lock.lock(); defer dev.relative_path_buf_lock.unlock(); - const buf = bun.PathBufferPool.get(); - defer bun.PathBufferPool.put(buf); + const buf = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(buf); for (paths) |native_file_path| { try source_map_strings.appendSlice(","); diff --git a/src/bun.js/api/BunObject.zig b/src/bun.js/api/BunObject.zig index a21108959e..6f1d6e5897 100644 --- a/src/bun.js/api/BunObject.zig +++ b/src/bun.js/api/BunObject.zig @@ -271,8 +271,8 @@ pub fn braces(global: *JSC.JSGlobalObject, brace_str: bun.String, opts: gen.Brac pub fn which(globalThis: *JSC.JSGlobalObject, callframe: *JSC.CallFrame) bun.JSError!JSC.JSValue { const arguments_ = callframe.arguments_old(2); - const path_buf = bun.PathBufferPool.get(); - defer bun.PathBufferPool.put(path_buf); + const path_buf = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(path_buf); var arguments = JSC.CallFrame.ArgumentsSlice.init(globalThis.bunVM(), arguments_.slice()); defer arguments.deinit(); const path_arg = arguments.nextEat() orelse { diff --git a/src/bun.js/api/bun/process.zig b/src/bun.js/api/bun/process.zig index 8a96256605..7812fc120f 100644 --- a/src/bun.js/api/bun/process.zig +++ b/src/bun.js/api/bun/process.zig @@ -132,7 +132,7 @@ pub const PidFDType = if (Environment.isLinux) fd_t else u0; pub const Process = struct { const Self = @This(); - const RefCount = bun.ptr.RefCount(@This(), "ref_count", deinit, .{}); + const RefCount = bun.ptr.ThreadSafeRefCount(@This(), "ref_count", deinit, .{}); pub const ref = RefCount.ref; pub const deref = RefCount.deref; diff --git a/src/bun.js/api/server.zig b/src/bun.js/api/server.zig index 60f6644d6f..5e3d78dbb5 100644 --- a/src/bun.js/api/server.zig +++ b/src/bun.js/api/server.zig @@ -2462,8 +2462,8 @@ pub fn NewServer(protocol_enum: enum { http, https }, development_kind: enum { d // So we first use a hash of the main field: const first_hash_segment: [8]u8 = brk: { - const buffer = bun.PathBufferPool.get(); - defer bun.PathBufferPool.put(buffer); + const buffer = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(buffer); const main = JSC.VirtualMachine.get().main; const len = @min(main.len, buffer.len); break :brk @bitCast(bun.hash(bun.strings.copyLowercase(main[0..len], buffer[0..len]))); @@ -2471,8 +2471,8 @@ pub fn NewServer(protocol_enum: enum { http, https }, development_kind: enum { d // And then we use a hash of their project root directory: const second_hash_segment: [8]u8 = brk: { - const buffer = bun.PathBufferPool.get(); - defer bun.PathBufferPool.put(buffer); + const buffer = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(buffer); const root = this.dev_server.?.root; const len = @min(root.len, buffer.len); break :brk @bitCast(bun.hash(bun.strings.copyLowercase(root[0..len], buffer[0..len]))); diff --git a/src/bun.js/node/dir_iterator.zig b/src/bun.js/node/dir_iterator.zig index c9ba26706b..d6a23f3fe1 100644 --- a/src/bun.js/node/dir_iterator.zig +++ b/src/bun.js/node/dir_iterator.zig @@ -18,6 +18,7 @@ const IteratorError = error{ AccessDenied, SystemResources } || posix.Unexpected const mem = std.mem; const strings = bun.strings; const Maybe = JSC.Maybe; +const FD = bun.FD; pub const IteratorResult = struct { name: PathString, @@ -50,7 +51,7 @@ pub const IteratorW = NewIterator(true); pub fn NewIterator(comptime use_windows_ospath: bool) type { return switch (builtin.os.tag) { .macos, .ios, .freebsd, .netbsd, .dragonfly, .openbsd, .solaris => struct { - dir: Dir, + dir: FD, seek: i64, buf: [8192]u8 align(@alignOf(std.posix.system.dirent)), index: usize, @@ -61,10 +62,6 @@ pub fn NewIterator(comptime use_windows_ospath: bool) type { pub const Error = IteratorError; - fn fd(self: *Self) posix.fd_t { - return self.dir.fd; - } - /// Memory such as file names referenced in this returned entry becomes invalid /// with subsequent calls to `next`, as well as when this `Dir` is deinitialized. pub const next = switch (builtin.os.tag) { @@ -94,7 +91,7 @@ pub fn NewIterator(comptime use_windows_ospath: bool) type { self.buf[self.buf.len - 4 ..][0..4].* = .{ 0, 0, 0, 0 }; const rc = posix.system.__getdirentries64( - self.dir.fd, + self.dir.cast(), &self.buf, self.buf.len, &self.seek, @@ -146,7 +143,7 @@ pub fn NewIterator(comptime use_windows_ospath: bool) type { } }, .linux => struct { - dir: Dir, + dir: FD, // The if guard is solely there to prevent compile errors from missing `linux.dirent64` // definition when compiling for other OSes. It doesn't do anything when compiling for Linux. buf: [8192]u8 align(if (builtin.os.tag != .linux) 1 else @alignOf(linux.dirent64)), @@ -158,16 +155,12 @@ pub fn NewIterator(comptime use_windows_ospath: bool) type { pub const Error = IteratorError; - fn fd(self: *Self) posix.fd_t { - return self.dir.fd; - } - /// Memory such as file names referenced in this returned entry becomes invalid /// with subsequent calls to `next`, as well as when this `Dir` is deinitialized. pub fn next(self: *Self) Result { start_over: while (true) { if (self.index >= self.end_index) { - const rc = linux.getdents64(self.dir.fd, &self.buf, self.buf.len); + const rc = linux.getdents64(self.dir.cast(), &self.buf, self.buf.len); if (Result.errnoSys(rc, .getdents64)) |err| return err; if (rc == 0) return .{ .result = null }; self.index = 0; @@ -208,7 +201,7 @@ pub fn NewIterator(comptime use_windows_ospath: bool) type { // While the official api docs guarantee FILE_BOTH_DIR_INFORMATION to be aligned properly // this may not always be the case (e.g. due to faulty VM/Sandboxing tools) const FILE_DIRECTORY_INFORMATION_PTR = *align(2) FILE_DIRECTORY_INFORMATION; - dir: Dir, + dir: FD, // This structure must be aligned on a LONGLONG (8-byte) boundary. // If a buffer contains two or more of these structures, the @@ -227,10 +220,6 @@ pub fn NewIterator(comptime use_windows_ospath: bool) type { const ResultT = if (use_windows_ospath) ResultW else Result; - fn fd(self: *Self) posix.fd_t { - return self.dir.fd; - } - /// Memory such as file names referenced in this returned entry becomes invalid /// with subsequent calls to `next`, as well as when this `Dir` is deinitialized. pub fn next(self: *Self) ResultT { @@ -244,7 +233,7 @@ pub fn NewIterator(comptime use_windows_ospath: bool) type { } const rc = w.ntdll.NtQueryDirectoryFile( - self.dir.fd, + self.dir.cast(), null, null, null, @@ -259,14 +248,14 @@ pub fn NewIterator(comptime use_windows_ospath: bool) type { self.first = false; if (io.Information == 0) { - bun.sys.syslog("NtQueryDirectoryFile({}) = 0", .{bun.FD.fromStdDir(self.dir)}); + bun.sys.syslog("NtQueryDirectoryFile({}) = 0", .{self.dir}); return .{ .result = null }; } self.index = 0; self.end_index = io.Information; // If the handle is not a directory, we'll get STATUS_INVALID_PARAMETER. if (rc == .INVALID_PARAMETER) { - bun.sys.syslog("NtQueryDirectoryFile({}) = {s}", .{ bun.FD.fromStdDir(self.dir), @tagName(rc) }); + bun.sys.syslog("NtQueryDirectoryFile({}) = {s}", .{ self.dir, @tagName(rc) }); return .{ .err = .{ .errno = @intFromEnum(bun.sys.SystemErrno.ENOTDIR), @@ -276,13 +265,13 @@ pub fn NewIterator(comptime use_windows_ospath: bool) type { } if (rc == .NO_MORE_FILES) { - bun.sys.syslog("NtQueryDirectoryFile({}) = {s}", .{ bun.FD.fromStdDir(self.dir), @tagName(rc) }); + bun.sys.syslog("NtQueryDirectoryFile({}) = {s}", .{ self.dir, @tagName(rc) }); self.end_index = self.index; return .{ .result = null }; } if (rc != .SUCCESS) { - bun.sys.syslog("NtQueryDirectoryFile({}) = {s}", .{ bun.FD.fromStdDir(self.dir), @tagName(rc) }); + bun.sys.syslog("NtQueryDirectoryFile({}) = {s}", .{ self.dir, @tagName(rc) }); if ((bun.windows.Win32Error.fromNTStatus(rc).toSystemErrno())) |errno| { return .{ @@ -301,7 +290,7 @@ pub fn NewIterator(comptime use_windows_ospath: bool) type { }; } - bun.sys.syslog("NtQueryDirectoryFile({}) = {d}", .{ bun.FD.fromStdDir(self.dir), self.end_index }); + bun.sys.syslog("NtQueryDirectoryFile({}) = {d}", .{ self.dir, self.end_index }); } const dir_info: FILE_DIRECTORY_INFORMATION_PTR = @ptrCast(@alignCast(&self.buf[self.index])); @@ -356,7 +345,7 @@ pub fn NewIterator(comptime use_windows_ospath: bool) type { } }, .wasi => struct { - dir: Dir, + dir: FD, buf: [8192]u8, // TODO align(@alignOf(os.wasi.dirent_t)), cookie: u64, index: usize, @@ -366,10 +355,6 @@ pub fn NewIterator(comptime use_windows_ospath: bool) type { pub const Error = IteratorError; - fn fd(self: *Self) posix.fd_t { - return self.dir.fd; - } - /// Memory such as file names referenced in this returned entry becomes invalid /// with subsequent calls to `next`, as well as when this `Dir` is deinitialized. pub fn next(self: *Self) Result { @@ -380,7 +365,7 @@ pub fn NewIterator(comptime use_windows_ospath: bool) type { start_over: while (true) { if (self.index >= self.end_index) { var bufused: usize = undefined; - switch (w.fd_readdir(self.fd, &self.buf, self.buf.len, self.cookie, &bufused)) { + switch (w.fd_readdir(self.dir.cast(), &self.buf, self.buf.len, self.cookie, &bufused)) { .SUCCESS => {}, .BADF => unreachable, // Dir is invalid or was opened without iteration ability .FAULT => unreachable, @@ -440,13 +425,9 @@ pub fn NewWrappedIterator(comptime path_type: PathType) type { return self.iter.next(); } - pub inline fn fd(self: *Self) posix.fd_t { - return self.iter.fd(); - } - pub const Error = IteratorError; - pub fn init(dir: Dir) Self { + pub fn init(dir: FD) Self { return Self{ .iter = switch (builtin.os.tag) { .macos, @@ -494,6 +475,6 @@ pub fn NewWrappedIterator(comptime path_type: PathType) type { pub const WrappedIterator = NewWrappedIterator(.u8); pub const WrappedIteratorW = NewWrappedIterator(.u16); -pub fn iterate(self: Dir, comptime path_type: PathType) NewWrappedIterator(path_type) { +pub fn iterate(self: FD, comptime path_type: PathType) NewWrappedIterator(path_type) { return NewWrappedIterator(path_type).init(self); } diff --git a/src/bun.js/node/node_fs.zig b/src/bun.js/node/node_fs.zig index bb06f08248..324a45e3de 100644 --- a/src/bun.js/node/node_fs.zig +++ b/src/bun.js/node/node_fs.zig @@ -269,7 +269,7 @@ pub const Async = struct { this.result = @field(NodeFS, "uv_" ++ @tagName(FunctionEnum))(&node_fs, this.args, @intFromEnum(req.result)); if (this.result == .err) { - this.result.err = this.result.err.clone(bun.default_allocator) catch bun.outOfMemory(); + this.result.err = this.result.err.clone(bun.default_allocator); std.mem.doNotOptimizeAway(&node_fs); } @@ -283,7 +283,7 @@ pub const Async = struct { this.result = @field(NodeFS, "uv_" ++ @tagName(FunctionEnum))(&node_fs, this.args, req, @intFromEnum(req.result)); if (this.result == .err) { - this.result.err = this.result.err.clone(bun.default_allocator) catch bun.outOfMemory(); + this.result.err = this.result.err.clone(bun.default_allocator); std.mem.doNotOptimizeAway(&node_fs); } @@ -382,7 +382,7 @@ pub const Async = struct { this.result = function(&node_fs, this.args, .@"async"); if (this.result == .err) { - this.result.err = this.result.err.clone(bun.default_allocator) catch bun.outOfMemory(); + this.result.err = this.result.err.clone(bun.default_allocator); std.mem.doNotOptimizeAway(&node_fs); } @@ -642,7 +642,7 @@ pub fn NewAsyncCpTask(comptime is_shell: bool) type { this.result = result; if (this.result == .err) { - this.result.err = this.result.err.clone(bun.default_allocator) catch bun.outOfMemory(); + this.result.err = this.result.err.clone(bun.default_allocator); } if (this.evtloop == .js) { @@ -859,8 +859,7 @@ pub fn NewAsyncCpTask(comptime is_shell: bool) type { }, } - const dir = fd.stdDir(); - var iterator = DirIterator.iterate(dir, if (Environment.isWindows) .u16 else .u8); + var iterator = DirIterator.iterate(fd, if (Environment.isWindows) .u16 else .u8); var entry = iterator.next(); while (switch (entry) { .err => |err| { @@ -3722,8 +3721,8 @@ pub const NodeFS = struct { } if (comptime Environment.isWindows) { - const dest_buf = bun.OSPathBufferPool.get(); - defer bun.OSPathBufferPool.put(dest_buf); + const dest_buf = bun.os_path_buffer_pool.get(); + defer bun.os_path_buffer_pool.put(dest_buf); const src = bun.strings.toKernel32Path(bun.reinterpretSlice(u16, &fs.sync_error_buf), args.src.slice()); const dest = bun.strings.toKernel32Path(dest_buf, args.dest.slice()); @@ -3913,8 +3912,8 @@ pub const NodeFS = struct { } pub fn mkdirRecursiveImpl(this: *NodeFS, args: Arguments.Mkdir, comptime Ctx: type, ctx: Ctx) Maybe(Return.Mkdir) { - const buf = bun.PathBufferPool.get(); - defer bun.PathBufferPool.put(buf); + const buf = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(buf); const path = args.path.osPathKernel32(buf); return switch (args.always_return_none) { @@ -4425,7 +4424,6 @@ pub const NodeFS = struct { comptime ExpectedType: type, entries: *std.ArrayList(ExpectedType), ) Maybe(void) { - const dir = fd.stdDir(); const is_u16 = comptime Environment.isWindows and (ExpectedType == bun.String or ExpectedType == bun.JSC.Node.Dirent); var dirent_path: bun.String = bun.String.dead; @@ -4433,15 +4431,15 @@ pub const NodeFS = struct { dirent_path.deref(); } - var iterator = DirIterator.iterate(dir, comptime if (is_u16) .u16 else .u8); + var iterator = DirIterator.iterate(fd, comptime if (is_u16) .u16 else .u8); var entry = iterator.next(); const re_encoding_buffer: ?*bun.PathBuffer = if (is_u16 and args.encoding != .utf8) - bun.PathBufferPool.get() + bun.path_buffer_pool.get() else null; defer if (is_u16 and args.encoding != .utf8) - bun.PathBufferPool.put(re_encoding_buffer.?); + bun.path_buffer_pool.put(re_encoding_buffer.?); while (switch (entry) { .err => |err| { @@ -4573,7 +4571,7 @@ pub const NodeFS = struct { } } - var iterator = DirIterator.iterate(fd.stdDir(), .u8); + var iterator = DirIterator.iterate(fd, .u8); var entry = iterator.next(); var dirent_path_prev: bun.String = bun.String.empty; defer { @@ -4727,7 +4725,7 @@ pub const NodeFS = struct { } } - var iterator = DirIterator.iterate(fd.stdDir(), .u8); + var iterator = DirIterator.iterate(fd, .u8); var entry = iterator.next(); var dirent_path_prev: bun.String = bun.String.dead; defer { @@ -5973,8 +5971,8 @@ pub const NodeFS = struct { pub fn osPathIntoSyncErrorBufOverlap(this: *NodeFS, slice: anytype) []const u8 { if (Environment.isWindows) { - const tmp = bun.OSPathBufferPool.get(); - defer bun.OSPathBufferPool.put(tmp); + const tmp = bun.os_path_buffer_pool.get(); + defer bun.os_path_buffer_pool.put(tmp); @memcpy(tmp[0..slice.len], slice); return bun.strings.fromWPath(&this.sync_error_buf, tmp[0..slice.len]); } @@ -6088,10 +6086,7 @@ pub const NodeFS = struct { .result => {}, } - var iterator = iterator: { - const dir = fd.stdDir(); - break :iterator DirIterator.iterate(dir, if (Environment.isWindows) .u16 else .u8); - }; + var iterator = DirIterator.iterate(fd, if (Environment.isWindows) .u16 else .u8); var entry = iterator.next(); while (switch (entry) { .err => |err| { @@ -6483,8 +6478,8 @@ pub const NodeFS = struct { .err => |err| return .{ .err = err }, .result => |src_fd| src_fd, }; - const wbuf = bun.OSPathBufferPool.get(); - defer bun.OSPathBufferPool.put(wbuf); + const wbuf = bun.os_path_buffer_pool.get(); + defer bun.os_path_buffer_pool.put(wbuf); const len = bun.windows.GetFinalPathNameByHandleW(handle.cast(), wbuf, wbuf.len, 0); if (len == 0) { return ret.errnoSysP(0, .copyfile, this.osPathIntoSyncErrorBuf(dest)) orelse dst_enoent_maybe; diff --git a/src/bun.js/node/node_fs_stat_watcher.zig b/src/bun.js/node/node_fs_stat_watcher.zig index 281a11b964..6052c180be 100644 --- a/src/bun.js/node/node_fs_stat_watcher.zig +++ b/src/bun.js/node/node_fs_stat_watcher.zig @@ -465,8 +465,8 @@ pub const StatWatcher = struct { pub fn init(args: Arguments) !*StatWatcher { log("init", .{}); - const buf = bun.PathBufferPool.get(); - defer bun.PathBufferPool.put(buf); + const buf = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(buf); var slice = args.path.slice(); if (bun.strings.startsWith(slice, "file://")) { slice = slice[6..]; diff --git a/src/bun.js/node/node_fs_watcher.zig b/src/bun.js/node/node_fs_watcher.zig index f886f8ea56..80f2e87820 100644 --- a/src/bun.js/node/node_fs_watcher.zig +++ b/src/bun.js/node/node_fs_watcher.zig @@ -167,7 +167,7 @@ pub const FSWatcher = struct { pub fn dupe(event: Event) !Event { return switch (event) { inline .rename, .change => |path, t| @unionInit(Event, @tagName(t), try bun.default_allocator.dupe(u8, path)), - .@"error" => |err| .{ .@"error" = try err.clone(bun.default_allocator) }, + .@"error" => |err| .{ .@"error" = err.clone(bun.default_allocator) }, inline else => |value, t| @unionInit(Event, @tagName(t), value), }; } @@ -643,11 +643,11 @@ pub const FSWatcher = struct { } pub fn init(args: Arguments) bun.JSC.Maybe(*FSWatcher) { - const joined_buf = bun.PathBufferPool.get(); - defer bun.PathBufferPool.put(joined_buf); + const joined_buf = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(joined_buf); const file_path: [:0]const u8 = brk: { - const buf = bun.PathBufferPool.get(); - defer bun.PathBufferPool.put(buf); + const buf = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(buf); var slice = args.path.slice(); if (bun.strings.startsWith(slice, "file://")) { slice = slice[6..]; diff --git a/src/bun.js/node/types.zig b/src/bun.js/node/types.zig index f884d5c411..4ff5a6c332 100644 --- a/src/bun.js/node/types.zig +++ b/src/bun.js/node/types.zig @@ -569,8 +569,8 @@ pub const PathLike = union(enum) { if (std.fs.path.isAbsolute(sliced)) { if (sliced.len > 2 and bun.path.isDriveLetter(sliced[0]) and sliced[1] == ':' and bun.path.isSepAny(sliced[2])) { // Add the long path syntax. This affects most of node:fs - const drive_resolve_buf = bun.PathBufferPool.get(); - defer bun.PathBufferPool.put(drive_resolve_buf); + const drive_resolve_buf = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(drive_resolve_buf); const rest = path_handler.PosixToWinNormalizer.resolveCWDWithExternalBufZ(drive_resolve_buf, sliced) catch @panic("Error while resolving path."); buf[0..4].* = bun.windows.long_path_prefix_u8; // When long path syntax is used, the entire string should be normalized @@ -619,8 +619,8 @@ pub const PathLike = union(enum) { pub fn osPathKernel32(this: PathLike, buf: *bun.PathBuffer) callconv(bun.callconv_inline) bun.OSPathSliceZ { if (comptime Environment.isWindows) { const s = this.slice(); - const b = bun.PathBufferPool.get(); - defer bun.PathBufferPool.put(b); + const b = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(b); if (s.len > 0 and bun.path.isSepAny(s[0])) { const resolve = path_handler.PosixToWinNormalizer.resolveCWDWithExternalBuf(buf, s) catch @panic("Error while resolving path."); const normal = path_handler.normalizeBuf(resolve, b, .windows); diff --git a/src/bun.zig b/src/bun.zig index b875783824..a39e0cb9b9 100644 --- a/src/bun.zig +++ b/src/bun.zig @@ -254,13 +254,22 @@ pub const stringZ = StringTypes.stringZ; pub const string = StringTypes.string; pub const CodePoint = StringTypes.CodePoint; -pub const MAX_PATH_BYTES: usize = if (Environment.isWasm) 1024 else std.fs.max_path_bytes; -pub const PathBuffer = [MAX_PATH_BYTES]u8; -pub const WPathBuffer = [std.os.windows.PATH_MAX_WIDE]u16; -pub const OSPathChar = if (Environment.isWindows) u16 else u8; -pub const OSPathSliceZ = [:0]const OSPathChar; -pub const OSPathSlice = []const OSPathChar; -pub const OSPathBuffer = if (Environment.isWindows) WPathBuffer else PathBuffer; +pub const paths = @import("./paths.zig"); +pub const MAX_PATH_BYTES = paths.MAX_PATH_BYTES; +pub const PathBuffer = paths.PathBuffer; +pub const PATH_MAX_WIDE = paths.PATH_MAX_WIDE; +pub const WPathBuffer = paths.WPathBuffer; +pub const OSPathChar = paths.OSPathChar; +pub const OSPathSliceZ = paths.OSPathSliceZ; +pub const OSPathSlice = paths.OSPathSlice; +pub const OSPathBuffer = paths.OSPathBuffer; +pub const Path = paths.Path; +pub const AbsPath = paths.AbsPath; +pub const RelPath = paths.RelPath; +pub const EnvPath = paths.EnvPath; +pub const path_buffer_pool = paths.path_buffer_pool; +pub const w_path_buffer_pool = paths.w_path_buffer_pool; +pub const os_path_buffer_pool = paths.os_path_buffer_pool; pub inline fn cast(comptime To: type, value: anytype) To { if (@typeInfo(@TypeOf(value)) == .int) { @@ -752,14 +761,18 @@ pub fn openDirA(dir: std.fs.Dir, path_: []const u8) !std.fs.Dir { } } -pub fn openDirForIteration(dir: std.fs.Dir, path_: []const u8) !std.fs.Dir { +pub fn openDirForIteration(dir: FD, path_: []const u8) sys.Maybe(FD) { if (comptime Environment.isWindows) { - const res = try sys.openDirAtWindowsA(.fromStdDir(dir), path_, .{ .iterable = true, .can_rename_or_delete = false, .read_only = true }).unwrap(); - return res.stdDir(); - } else { - const fd = try sys.openatA(.fromStdDir(dir), path_, O.DIRECTORY | O.CLOEXEC | O.RDONLY, 0).unwrap(); - return fd.stdDir(); + return sys.openDirAtWindowsA(dir, path_, .{ .iterable = true, .can_rename_or_delete = false, .read_only = true }); } + return sys.openatA(dir, path_, O.DIRECTORY | O.CLOEXEC | O.RDONLY, 0); +} + +pub fn openDirForIterationOSPath(dir: FD, path_: []const OSPathChar) sys.Maybe(FD) { + if (comptime Environment.isWindows) { + return sys.openDirAtWindows(dir, path_, .{ .iterable = true, .can_rename_or_delete = false, .read_only = true }); + } + return sys.openatA(dir, path_, O.DIRECTORY | O.CLOEXEC | O.RDONLY, 0); } pub fn openDirAbsolute(path_: []const u8) !std.fs.Dir { @@ -2712,8 +2725,8 @@ pub fn exitThread() noreturn { pub fn deleteAllPoolsForThreadExit() void { const pools_to_delete = .{ JSC.WebCore.ByteListPool, - bun.WPathBufferPool, - bun.PathBufferPool, + bun.w_path_buffer_pool, + bun.path_buffer_pool, bun.JSC.ConsoleObject.Formatter.Visited.Pool, bun.js_parser.StringVoidMap.Pool, }; @@ -2766,7 +2779,7 @@ pub fn errnoToZigErr(err: anytype) anyerror { pub const brotli = @import("./brotli.zig"); -pub fn iterateDir(dir: std.fs.Dir) DirIterator.Iterator { +pub fn iterateDir(dir: FD) DirIterator.Iterator { return DirIterator.iterate(dir, .u8).iter; } @@ -3642,6 +3655,15 @@ pub inline fn clear(val: anytype, allocator: std.mem.Allocator) void { } } +pub inline fn move(val: anytype) switch (@typeInfo(@TypeOf(val))) { + .pointer => |p| p.child, + else => @compileError("unexpected move type"), +} { + const tmp = val.*; + @constCast(val).* = undefined; + return tmp; +} + pub inline fn wrappingNegation(val: anytype) @TypeOf(val) { return 0 -% val; } @@ -3712,37 +3734,6 @@ pub noinline fn throwStackOverflow() StackOverflow!void { } const StackOverflow = error{StackOverflow}; -// This pool exists because on Windows, each path buffer costs 64 KB. -// This makes the stack memory usage very unpredictable, which means we can't really know how much stack space we have left. -// This pool is a workaround to make the stack memory usage more predictable. -// We keep up to 4 path buffers alive per thread at a time. -pub fn PathBufferPoolT(comptime T: type) type { - return struct { - const Pool = ObjectPool(T, null, true, 4); - - pub fn get() *T { - // use a threadlocal allocator so mimalloc deletes it on thread deinit. - return &Pool.get(bun.threadlocalAllocator()).data; - } - - pub fn put(buffer: *T) void { - var node: *Pool.Node = @alignCast(@fieldParentPtr("data", buffer)); - node.release(); - } - - pub fn deleteAll() void { - Pool.deleteAll(); - } - }; -} - -pub const PathBufferPool = PathBufferPoolT(bun.PathBuffer); -pub const WPathBufferPool = if (Environment.isWindows) PathBufferPoolT(bun.WPathBuffer) else struct { - // So it can be used in code that deletes all the pools. - pub fn deleteAll() void {} -}; -pub const OSPathBufferPool = if (Environment.isWindows) WPathBufferPool else PathBufferPool; - pub const S3 = @import("./s3/client.zig"); pub const ptr = @import("ptr.zig"); @@ -3764,13 +3755,12 @@ pub const highway = @import("./highway.zig"); pub const MemoryReportingAllocator = @import("allocators/MemoryReportingAllocator.zig"); -pub fn move(dest: []u8, src: []const u8) void { - if (comptime Environment.allow_assert) { - if (src.len != dest.len) { - bun.Output.panic("Move: src.len != dest.len, {d} != {d}", .{ src.len, dest.len }); - } - } - _ = bun.c.memmove(dest.ptr, src.ptr, src.len); -} - pub const mach_port = if (Environment.isMac) std.c.mach_port_t else u32; + +pub fn contains(item: anytype, list: *const std.ArrayListUnmanaged(@TypeOf(item))) bool { + const T = @TypeOf(item); + return switch (T) { + u8 => strings.containsChar(list.items, item), + else => std.mem.indexOfScalar(T, list.items, item) != null, + }; +} diff --git a/src/bundler/bundle_v2.zig b/src/bundler/bundle_v2.zig index dd4d7fd766..8844b3207d 100644 --- a/src/bundler/bundle_v2.zig +++ b/src/bundler/bundle_v2.zig @@ -2953,8 +2953,8 @@ pub const BundleV2 = struct { ) catch bun.outOfMemory(); } } else { - const buf = bun.PathBufferPool.get(); - defer bun.PathBufferPool.put(buf); + const buf = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(buf); const specifier_to_use = if (loader == .html and bun.strings.hasPrefix(import_record.path.text, bun.fs.FileSystem.instance.top_level_dir)) brk: { const specifier_to_use = import_record.path.text[bun.fs.FileSystem.instance.top_level_dir.len..]; if (Environment.isWindows) { diff --git a/src/cli/bunx_command.zig b/src/cli/bunx_command.zig index 08faf854f9..ab8aa095f5 100644 --- a/src/cli/bunx_command.zig +++ b/src/cli/bunx_command.zig @@ -196,8 +196,7 @@ pub const BunxCommand = struct { if (bin_prop.expr.asString(transpiler.allocator)) |dir_name| { const bin_dir = try bun.sys.openatA(dir_fd, dir_name, bun.O.RDONLY | bun.O.DIRECTORY, 0).unwrap(); defer bin_dir.close(); - const dir = std.fs.Dir{ .fd = bin_dir.cast() }; - var iterator = bun.DirIterator.iterate(dir, .u8); + var iterator = bun.DirIterator.iterate(bin_dir, .u8); var entry = iterator.next(); while (true) : (entry = iterator.next()) { const current = switch (entry) { diff --git a/src/cli/create_command.zig b/src/cli/create_command.zig index 119fa520b2..e9064147b4 100644 --- a/src/cli/create_command.zig +++ b/src/cli/create_command.zig @@ -484,7 +484,7 @@ pub const CreateCommand = struct { const destination_dir = destination_dir__; const Walker = @import("../walker_skippable.zig"); - var walker_ = try Walker.walk(template_dir, ctx.allocator, skip_files, skip_dirs); + var walker_ = try Walker.walk(.fromStdDir(template_dir), ctx.allocator, skip_files, skip_dirs); defer walker_.deinit(); const FileCopier = struct { @@ -498,7 +498,7 @@ pub const CreateCommand = struct { src_base_len: if (Environment.isWindows) usize else void, src_buf: if (Environment.isWindows) *bun.WPathBuffer else void, ) !void { - while (try walker.next()) |entry| { + while (try walker.next().unwrap()) |entry| { if (comptime Environment.isWindows) { if (entry.kind != .file and entry.kind != .directory) continue; @@ -561,7 +561,7 @@ pub const CreateCommand = struct { defer outfile.close(); defer node_.completeOne(); - const infile = bun.FD.fromStdFile(try entry.dir.openFile(entry.basename, .{ .mode = .read_only })); + const infile = try entry.dir.openat(entry.basename, bun.O.RDONLY, 0).unwrap(); defer infile.close(); // Assumption: you only really care about making sure something that was executable is still executable diff --git a/src/cli/init_command.zig b/src/cli/init_command.zig index 8ba5a7fc50..51cb2b146d 100644 --- a/src/cli/init_command.zig +++ b/src/cli/init_command.zig @@ -531,7 +531,7 @@ pub const InitCommand = struct { // Find any source file var dir = std.fs.cwd().openDir(".", .{ .iterate = true }) catch break :infer; defer dir.close(); - var it = bun.DirIterator.iterate(dir, .u8); + var it = bun.DirIterator.iterate(.fromStdDir(dir), .u8); while (try it.next().unwrap()) |file| { if (file.kind != .file) continue; const loader = bun.options.Loader.fromString(std.fs.path.extension(file.name.slice())) orelse @@ -1021,8 +1021,8 @@ const Template = enum { return false; } - const pathbuffer = bun.PathBufferPool.get(); - defer bun.PathBufferPool.put(pathbuffer); + const pathbuffer = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(pathbuffer); return bun.which(pathbuffer, bun.getenvZ("PATH") orelse return false, bun.fs.FileSystem.instance.top_level_dir, "claude") != null; } @@ -1097,8 +1097,8 @@ const Template = enum { if (Environment.isWindows) { if (bun.getenvZAnyCase("USER")) |user| { - const pathbuf = bun.PathBufferPool.get(); - defer bun.PathBufferPool.put(pathbuf); + const pathbuf = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(pathbuf); const path = std.fmt.bufPrintZ(pathbuf, "C:\\Users\\{s}\\AppData\\Local\\Programs\\Cursor\\Cursor.exe", .{user}) catch { return false; }; diff --git a/src/cli/link_command.zig b/src/cli/link_command.zig index 0c57ba7329..c66462721b 100644 --- a/src/cli/link_command.zig +++ b/src/cli/link_command.zig @@ -100,7 +100,7 @@ fn link(ctx: Command.Context) !void { ); link_path_buf[top_level.len] = 0; const link_path = link_path_buf[0..top_level.len :0]; - const global_path = try manager.globalLinkDirPath(); + const global_path = manager.globalLinkDirPath(); const dest_path = Path.joinAbsStringZ(global_path, &.{name}, .windows); switch (bun.sys.sys_uv.symlinkUV( link_path, @@ -128,16 +128,18 @@ fn link(ctx: Command.Context) !void { var link_target_buf: bun.PathBuffer = undefined; var link_dest_buf: bun.PathBuffer = undefined; var link_rel_buf: bun.PathBuffer = undefined; - var node_modules_path_buf: bun.PathBuffer = undefined; + + var node_modules_path = bun.AbsPath(.{}).initFdPath(.fromStdDir(node_modules)) catch |err| { + if (manager.options.log_level != .silent) { + Output.err(err, "failed to link binary", .{}); + } + Global.crash(); + }; + defer node_modules_path.deinit(); + var bin_linker = Bin.Linker{ .bin = package.bin, - .node_modules = .fromStdDir(node_modules), - .node_modules_path = bun.getFdPath(.fromStdDir(node_modules), &node_modules_path_buf) catch |err| { - if (manager.options.log_level != .silent) { - Output.err(err, "failed to link binary", .{}); - } - Global.crash(); - }, + .node_modules_path = &node_modules_path, .global_bin_path = manager.options.bin_path, // .destination_dir_subpath = destination_dir_subpath, diff --git a/src/cli/pack_command.zig b/src/cli/pack_command.zig index 5fb3cd3255..056c4a2a2b 100644 --- a/src/cli/pack_command.zig +++ b/src/cli/pack_command.zig @@ -297,7 +297,7 @@ pub const PackCommand = struct { } } - var dir_iter = DirIterator.iterate(dir, .u8); + var dir_iter = DirIterator.iterate(.fromStdDir(dir), .u8); while (dir_iter.next().unwrap() catch null) |entry| { if (entry.kind != .file and entry.kind != .directory) continue; @@ -451,7 +451,7 @@ pub const PackCommand = struct { } } - var iter = DirIterator.iterate(dir, .u8); + var iter = DirIterator.iterate(.fromStdDir(dir), .u8); while (iter.next().unwrap() catch null) |entry| { if (entry.kind != .file and entry.kind != .directory) continue; @@ -565,7 +565,7 @@ pub const PackCommand = struct { var additional_bundled_deps: std.ArrayListUnmanaged(DirInfo) = .{}; defer additional_bundled_deps.deinit(ctx.allocator); - var iter = DirIterator.iterate(dir, .u8); + var iter = DirIterator.iterate(.fromStdDir(dir), .u8); while (iter.next().unwrap() catch null) |entry| { if (entry.kind != .directory) continue; @@ -579,7 +579,7 @@ pub const PackCommand = struct { }; defer scoped_dir.close(); - var scoped_iter = DirIterator.iterate(scoped_dir, .u8); + var scoped_iter = DirIterator.iterate(.fromStdDir(scoped_dir), .u8); while (scoped_iter.next().unwrap() catch null) |sub_entry| { const entry_name = try entrySubpath(ctx.allocator, _entry_name, sub_entry.name.slice()); @@ -689,7 +689,7 @@ pub const PackCommand = struct { var dir, const dir_subpath, const dir_depth = dir_info; defer dir.close(); - var iter = DirIterator.iterate(dir, .u8); + var iter = DirIterator.iterate(.fromStdDir(dir), .u8); while (iter.next().unwrap() catch null) |entry| { if (entry.kind != .file and entry.kind != .directory) continue; @@ -849,7 +849,7 @@ pub const PackCommand = struct { } } - var dir_iter = DirIterator.iterate(dir, .u8); + var dir_iter = DirIterator.iterate(.fromStdDir(dir), .u8); while (dir_iter.next().unwrap() catch null) |entry| { if (entry.kind != .file and entry.kind != .directory) continue; diff --git a/src/cli/pm_trusted_command.zig b/src/cli/pm_trusted_command.zig index 6edfc0fdec..21f5ca301f 100644 --- a/src/cli/pm_trusted_command.zig +++ b/src/cli/pm_trusted_command.zig @@ -11,7 +11,6 @@ const String = bun.Semver.String; const PackageManager = Install.PackageManager; const PackageManagerCommand = @import("./package_manager_command.zig").PackageManagerCommand; const Lockfile = Install.Lockfile; -const Fs = @import("../fs.zig"); const Global = bun.Global; const DependencyID = Install.DependencyID; const ArrayIdentityContext = bun.ArrayIdentityContext; @@ -73,22 +72,14 @@ pub const UntrustedCommand = struct { var tree_iterator = Lockfile.Tree.Iterator(.node_modules).init(pm.lockfile); - const top_level_without_trailing_slash = strings.withoutTrailingSlash(Fs.FileSystem.instance.top_level_dir); - var abs_node_modules_path: std.ArrayListUnmanaged(u8) = .{}; - defer abs_node_modules_path.deinit(ctx.allocator); - try abs_node_modules_path.appendSlice(ctx.allocator, top_level_without_trailing_slash); - try abs_node_modules_path.append(ctx.allocator, std.fs.path.sep); + var node_modules_path: bun.AbsPath(.{ .sep = .auto }) = .initTopLevelDir(); + defer node_modules_path.deinit(); while (tree_iterator.next(null)) |node_modules| { - // + 1 because we want to keep the path separator - abs_node_modules_path.items.len = top_level_without_trailing_slash.len + 1; - try abs_node_modules_path.appendSlice(ctx.allocator, node_modules.relative_path); + const node_modules_path_save = node_modules_path.save(); + defer node_modules_path_save.restore(); - var node_modules_dir = bun.openDir(std.fs.cwd(), node_modules.relative_path) catch |err| { - if (err == error.ENOENT) continue; - return err; - }; - defer node_modules_dir.close(); + node_modules_path.append(node_modules.relative_path); for (node_modules.dependencies) |dep_id| { if (untrusted_dep_ids.contains(dep_id)) { @@ -97,12 +88,15 @@ pub const UntrustedCommand = struct { const package_id = pm.lockfile.buffers.resolutions.items[dep_id]; const resolution = &resolutions[package_id]; var package_scripts = scripts[package_id]; - var not_lazy: PackageManager.PackageInstaller.LazyPackageDestinationDir = .{ .dir = node_modules_dir }; + + const folder_name_save = node_modules_path.save(); + defer folder_name_save.restore(); + node_modules_path.append(alias); + const maybe_scripts_list = package_scripts.getList( pm.log, pm.lockfile, - ¬_lazy, - abs_node_modules_path.items, + &node_modules_path, alias, resolution, ) catch |err| { @@ -227,11 +221,8 @@ pub const TrustCommand = struct { // in the correct order as they would during a normal install var tree_iter = Lockfile.Tree.Iterator(.node_modules).init(pm.lockfile); - const top_level_without_trailing_slash = strings.withoutTrailingSlash(Fs.FileSystem.instance.top_level_dir); - var abs_node_modules_path: std.ArrayListUnmanaged(u8) = .{}; - defer abs_node_modules_path.deinit(ctx.allocator); - try abs_node_modules_path.appendSlice(ctx.allocator, top_level_without_trailing_slash); - try abs_node_modules_path.append(ctx.allocator, std.fs.path.sep); + var node_modules_path: bun.AbsPath(.{ .sep = .auto }) = .initTopLevelDir(); + defer node_modules_path.deinit(); var package_names_to_add: bun.StringArrayHashMapUnmanaged(void) = .{}; var scripts_at_depth: std.AutoArrayHashMapUnmanaged(usize, std.ArrayListUnmanaged(struct { @@ -243,8 +234,9 @@ pub const TrustCommand = struct { var scripts_count: usize = 0; while (tree_iter.next(null)) |node_modules| { - abs_node_modules_path.items.len = top_level_without_trailing_slash.len + 1; - try abs_node_modules_path.appendSlice(ctx.allocator, node_modules.relative_path); + const node_modules_path_save = node_modules_path.save(); + defer node_modules_path_save.restore(); + node_modules_path.append(node_modules.relative_path); var node_modules_dir = bun.openDir(std.fs.cwd(), node_modules.relative_path) catch |err| { if (err == error.ENOENT) continue; @@ -262,12 +254,15 @@ pub const TrustCommand = struct { } const resolution = &resolutions[package_id]; var package_scripts = scripts[package_id]; - var not_lazy = PackageManager.PackageInstaller.LazyPackageDestinationDir{ .dir = node_modules_dir }; + + var folder_save = node_modules_path.save(); + defer folder_save.restore(); + node_modules_path.append(alias); + const maybe_scripts_list = package_scripts.getList( pm.log, pm.lockfile, - ¬_lazy, - abs_node_modules_path.items, + &node_modules_path, alias, resolution, ) catch |err| { @@ -344,6 +339,7 @@ pub const TrustCommand = struct { info.scripts_list, optional, output_in_foreground, + null, ); if (pm.options.log_level.showProgress()) { diff --git a/src/cli/publish_command.zig b/src/cli/publish_command.zig index ae9cd7df01..c4257c5226 100644 --- a/src/cli/publish_command.zig +++ b/src/cli/publish_command.zig @@ -1149,7 +1149,7 @@ pub const PublishCommand = struct { var dir, const dir_subpath, const close_dir = dir_info; defer if (close_dir) dir.close(); - var iter = bun.DirIterator.iterate(dir, .u8); + var iter = bun.DirIterator.iterate(.fromStdDir(dir), .u8); while (iter.next().unwrap() catch null) |entry| { const name, const subpath = name_and_subpath: { const name = entry.name.slice(); diff --git a/src/cli/run_command.zig b/src/cli/run_command.zig index 4ff919dbe3..51872fb067 100644 --- a/src/cli/run_command.zig +++ b/src/cli/run_command.zig @@ -84,26 +84,24 @@ pub const RunCommand = struct { /// Find the "best" shell to use /// Cached to only run once pub fn findShell(PATH: string, cwd: string) ?stringZ { - const bufs = struct { - pub var shell_buf_once: bun.PathBuffer = undefined; - pub var found_shell: [:0]const u8 = ""; + const Once = struct { + var shell_buf: bun.PathBuffer = undefined; + pub var once = bun.once(struct { + pub fn run(PATH_: string, cwd_: string) ?stringZ { + if (findShellImpl(PATH_, cwd_)) |found| { + if (found.len < shell_buf.len) { + @memcpy(shell_buf[0..found.len], found); + shell_buf[found.len] = 0; + return shell_buf[0..found.len :0]; + } + } + + return null; + } + }.run); }; - if (bufs.found_shell.len > 0) { - return bufs.found_shell; - } - if (findShellImpl(PATH, cwd)) |found| { - if (found.len < bufs.shell_buf_once.len) { - @memcpy(bufs.shell_buf_once[0..found.len], found); - bufs.shell_buf_once[found.len] = 0; - bufs.found_shell = bufs.shell_buf_once[0..found.len :0]; - return bufs.found_shell; - } - - return found; - } - - return null; + return Once.once.call(.{ PATH, cwd }); } const BUN_BIN_NAME = if (Environment.isDebug) "bun-debug" else "bun"; diff --git a/src/cli/unlink_command.zig b/src/cli/unlink_command.zig index 85c9a9328e..a06648e331 100644 --- a/src/cli/unlink_command.zig +++ b/src/cli/unlink_command.zig @@ -55,7 +55,7 @@ fn unlink(ctx: Command.Context) !void { } } - switch (Syscall.lstat(Path.joinAbsStringZ(try manager.globalLinkDirPath(), &.{name}, .auto))) { + switch (Syscall.lstat(Path.joinAbsStringZ(manager.globalLinkDirPath(), &.{name}, .auto))) { .result => |stat| { if (!bun.S.ISLNK(@intCast(stat.mode))) { Output.prettyErrorln("success: package \"{s}\" is not globally linked, so there's nothing to do.", .{name}); @@ -91,17 +91,18 @@ fn unlink(ctx: Command.Context) !void { var link_target_buf: bun.PathBuffer = undefined; var link_dest_buf: bun.PathBuffer = undefined; var link_rel_buf: bun.PathBuffer = undefined; - var node_modules_path_buf: bun.PathBuffer = undefined; + + var node_modules_path = bun.AbsPath(.{}).initFdPath(.fromStdDir(node_modules)) catch |err| { + if (manager.options.log_level != .silent) { + Output.err(err, "failed to link binary", .{}); + } + Global.crash(); + }; + defer node_modules_path.deinit(); var bin_linker = Bin.Linker{ .bin = package.bin, - .node_modules = .fromStdDir(node_modules), - .node_modules_path = bun.getFdPath(.fromStdDir(node_modules), &node_modules_path_buf) catch |err| { - if (manager.options.log_level != .silent) { - Output.err(err, "failed to link binary", .{}); - } - Global.crash(); - }, + .node_modules_path = &node_modules_path, .global_bin_path = manager.options.bin_path, .package_name = strings.StringOrTinyString.init(name), .string_buf = lockfile.buffers.string_bytes.items, diff --git a/src/fd.zig b/src/fd.zig index afdf886a2e..d2a9f8383c 100644 --- a/src/fd.zig +++ b/src/fd.zig @@ -538,6 +538,36 @@ pub const FD = packed struct(backing_int) { return @enumFromInt(@as(backing_int, @bitCast(fd))); } + pub fn makePath(dir: FD, comptime T: type, subpath: []const T) !void { + return switch (T) { + u8 => bun.makePath(dir.stdDir(), subpath), + u16 => bun.makePathW(dir.stdDir(), subpath), + else => @compileError("unexpected type"), + }; + } + + pub fn makeOpenPath(dir: FD, comptime T: type, subpath: []const T) !FD { + return switch (T) { + u8 => { + if (comptime Environment.isWindows) { + return bun.sys.openDirAtWindowsA(dir, subpath, .{ .can_rename_or_delete = false, .create = true, .read_only = false }).unwrap(); + } + + return FD.fromStdDir(try dir.stdDir().makeOpenPath(subpath, .{ .iterate = true, .access_sub_paths = true })); + }, + u16 => { + if (comptime !Environment.isWindows) @compileError("unexpected type"); + return bun.sys.openDirAtWindows(dir, subpath, .{ .can_rename_or_delete = false, .create = true, .read_only = false }).unwrap(); + }, + else => @compileError("unexpected type"), + }; + } + + // TODO: make our own version of deleteTree + pub fn deleteTree(dir: FD, subpath: []const u8) !void { + try dir.stdDir().deleteTree(subpath); + } + // The following functions are from bun.sys but with the 'f' prefix dropped // where it is relevant. These functions all take FD as the first argument, // so that makes them Zig methods, even when declared in a separate file. diff --git a/src/feature_flags.zig b/src/feature_flags.zig index cb01ba3fac..a18d3c4add 100644 --- a/src/feature_flags.zig +++ b/src/feature_flags.zig @@ -15,8 +15,8 @@ pub const RuntimeFeatureFlag = enum { BUN_ENABLE_EXPERIMENTAL_SHELL_BUILTINS, BUN_FEATURE_FLAG_DISABLE_ADDRCONFIG, BUN_FEATURE_FLAG_DISABLE_ASYNC_TRANSPILER, - BUN_FEATURE_FLAG_DISABLE_DNS_CACHE, BUN_FEATURE_FLAG_DISABLE_DNS_CACHE_LIBINFO, + BUN_FEATURE_FLAG_DISABLE_DNS_CACHE, BUN_FEATURE_FLAG_DISABLE_INSTALL_INDEX, BUN_FEATURE_FLAG_DISABLE_IO_POOL, BUN_FEATURE_FLAG_DISABLE_IPV4, @@ -28,6 +28,7 @@ pub const RuntimeFeatureFlag = enum { BUN_FEATURE_FLAG_DISABLE_UV_FS_COPYFILE, BUN_FEATURE_FLAG_EXPERIMENTAL_BAKE, BUN_FEATURE_FLAG_FORCE_IO_POOL, + BUN_FEATURE_FLAG_FORCE_WINDOWS_JUNCTIONS, BUN_FEATURE_FLAG_LAST_MODIFIED_PRETEND_304, BUN_FEATURE_FLAG_NO_LIBDEFLATE, BUN_INSTRUMENTS, diff --git a/src/fs.zig b/src/fs.zig index c3a8533a24..a4d5b822ca 100644 --- a/src/fs.zig +++ b/src/fs.zig @@ -624,7 +624,7 @@ pub const FileSystem = struct { var existing = this.entries.atIndex(index) orelse return null; if (existing.* == .entries) { if (existing.entries.generation < generation) { - var handle = bun.openDirForIteration(std.fs.cwd(), existing.entries.dir) catch |err| { + var handle = bun.openDirForIteration(FD.cwd(), existing.entries.dir).unwrap() catch |err| { existing.entries.data.clearAndFree(bun.fs_allocator); return this.readDirectoryError(existing.entries.dir, err) catch unreachable; @@ -636,7 +636,7 @@ pub const FileSystem = struct { &existing.entries.data, existing.entries.dir, generation, - handle, + handle.stdDir(), void, void{}, @@ -982,7 +982,7 @@ pub const FileSystem = struct { ) !DirEntry { _ = fs; - var iter = bun.iterateDir(handle); + var iter = bun.iterateDir(.fromStdDir(handle)); var dir = DirEntry.init(_dir, generation); const allocator = bun.fs_allocator; errdefer dir.deinit(allocator); @@ -1382,10 +1382,10 @@ pub const FileSystem = struct { if (comptime bun.Environment.isWindows) { var file = bun.sys.getFileAttributes(absolute_path_c) orelse return error.FileNotFound; var depth: usize = 0; - const buf2: *bun.PathBuffer = bun.PathBufferPool.get(); - defer bun.PathBufferPool.put(buf2); - const buf3: *bun.PathBuffer = bun.PathBufferPool.get(); - defer bun.PathBufferPool.put(buf3); + const buf2: *bun.PathBuffer = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(buf2); + const buf3: *bun.PathBuffer = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(buf3); var current_buf: *bun.PathBuffer = buf2; var other_buf: *bun.PathBuffer = &outpath; diff --git a/src/glob/GlobWalker.zig b/src/glob/GlobWalker.zig index 2fb0b13f2d..b69fe5aabe 100644 --- a/src/glob/GlobWalker.zig +++ b/src/glob/GlobWalker.zig @@ -153,7 +153,7 @@ pub const SyscallAccessor = struct { } pub inline fn iterate(dir: Handle) DirIter { - return .{ .value = DirIterator.WrappedIterator.init(dir.value.stdDir()) }; + return .{ .value = DirIterator.WrappedIterator.init(dir.value) }; } }; diff --git a/src/hive_array.zig b/src/hive_array.zig index 042c4d2387..c1881a5c4b 100644 --- a/src/hive_array.zig +++ b/src/hive_array.zig @@ -3,6 +3,7 @@ const bun = @import("bun"); const assert = bun.assert; const mem = std.mem; const testing = std.testing; +const OOM = bun.OOM; /// An array that efficiently tracks which elements are in use. /// The pointers are intended to be stable @@ -114,7 +115,7 @@ pub fn HiveArray(comptime T: type, comptime capacity: u16) type { return self.allocator.create(T) catch bun.outOfMemory(); } - pub fn tryGet(self: *This) !*T { + pub fn tryGet(self: *This) OOM!*T { if (comptime capacity > 0) { if (self.hive.get()) |value| { return value; diff --git a/src/identity_context.zig b/src/identity_context.zig index 5e0cfe987e..171c253540 100644 --- a/src/identity_context.zig +++ b/src/identity_context.zig @@ -1,7 +1,11 @@ pub fn IdentityContext(comptime Key: type) type { return struct { pub fn hash(_: @This(), key: Key) u64 { - return key; + return switch (comptime @typeInfo(Key)) { + .@"enum" => @intFromEnum(key), + .int => key, + else => @compileError("unexpected identity context type"), + }; } pub fn eql(_: @This(), a: Key, b: Key) bool { diff --git a/src/install/NetworkTask.zig b/src/install/NetworkTask.zig index 8469ba5bd2..8f0e711f06 100644 --- a/src/install/NetworkTask.zig +++ b/src/install/NetworkTask.zig @@ -1,6 +1,6 @@ unsafe_http_client: AsyncHTTP = undefined, response: bun.http.HTTPClientResult = .{}, -task_id: u64, +task_id: Task.Id, url_buf: []const u8 = &[_]u8{}, retried: u16 = 0, allocator: std.mem.Allocator, @@ -24,7 +24,7 @@ next: ?*NetworkTask = null, pub const DedupeMapEntry = struct { is_required: bool, }; -pub const DedupeMap = std.HashMap(u64, DedupeMapEntry, IdentityContext(u64), 80); +pub const DedupeMap = std.HashMap(Task.Id, DedupeMapEntry, IdentityContext(Task.Id), 80); pub fn notify(this: *NetworkTask, async_http: *AsyncHTTP, result: bun.http.HTTPClientResult) void { defer this.package_manager.wake(); diff --git a/src/install/PackageInstall.zig b/src/install/PackageInstall.zig index 9736414a83..c65dfc098d 100644 --- a/src/install/PackageInstall.zig +++ b/src/install/PackageInstall.zig @@ -43,7 +43,7 @@ pub const PackageInstall = struct { package_name: String, package_version: string, - patch: Patch, + patch: ?Patch, // TODO: this is never read file_count: u32 = 0, @@ -53,15 +53,8 @@ pub const PackageInstall = struct { const ThisPackageInstall = @This(); pub const Patch = struct { - root_project_dir: ?[]const u8 = null, - patch_path: string = undefined, - patch_contents_hash: u64 = 0, - - pub const NULL = Patch{}; - - pub fn isNull(this: Patch) bool { - return this.root_project_dir == null; - } + path: string, + contents_hash: u64, }; const debug = Output.scoped(.install, true); @@ -140,12 +133,11 @@ pub const PackageInstall = struct { /// fn verifyPatchHash( this: *@This(), + patch: *const Patch, root_node_modules_dir: std.fs.Dir, ) bool { - bun.debugAssert(!this.patch.isNull()); - // hash from the .patch file, to be checked against bun tag - const patchfile_contents_hash = this.patch.patch_contents_hash; + const patchfile_contents_hash = patch.contents_hash; var buf: BuntagHashBuf = undefined; const bunhashtag = buntaghashbuf_make(&buf, patchfile_contents_hash); @@ -211,9 +203,12 @@ pub const PackageInstall = struct { this.verifyTransitiveSymlinkedFolder(root_node_modules_dir), else => this.verifyPackageJSONNameAndVersion(root_node_modules_dir, resolution.tag), }; - if (this.patch.isNull()) return verified; - if (!verified) return false; - return this.verifyPatchHash(root_node_modules_dir); + + if (this.patch) |*patch| { + if (!verified) return false; + return this.verifyPatchHash(patch, root_node_modules_dir); + } + return verified; } // Only check for destination directory in node_modules. We can't use package.json because @@ -415,7 +410,7 @@ pub const PackageInstall = struct { var cached_package_dir = bun.openDir(this.cache_dir, this.cache_dir_subpath) catch |err| return Result.fail(err, .opening_cache_dir, @errorReturnTrace()); defer cached_package_dir.close(); var walker_ = Walker.walk( - cached_package_dir, + .fromStdDir(cached_package_dir), this.allocator, &[_]bun.OSPathSlice{}, &[_]bun.OSPathSlice{}, @@ -429,7 +424,7 @@ pub const PackageInstall = struct { ) !u32 { var real_file_count: u32 = 0; var stackpath: [bun.MAX_PATH_BYTES]u8 = undefined; - while (try walker.next()) |entry| { + while (try walker.next().unwrap()) |entry| { switch (entry.kind) { .directory => { _ = bun.sys.mkdirat(.fromStdDir(destination_dir_), entry.path, 0o755); @@ -440,7 +435,7 @@ pub const PackageInstall = struct { const path: [:0]u8 = stackpath[0..entry.path.len :0]; const basename: [:0]u8 = stackpath[entry.path.len - entry.basename.len .. entry.path.len :0]; switch (bun.c.clonefileat( - entry.dir.fd, + entry.dir.cast(), basename, destination_dir_.fd, path, @@ -549,7 +544,7 @@ pub const PackageInstall = struct { return Result.fail(err, .opening_cache_dir, @errorReturnTrace()); state.walker = Walker.walk( - state.cached_package_dir, + .fromStdDir(state.cached_package_dir), this.allocator, &[_]bun.OSPathSlice{}, if (method == .symlink and this.cache_dir_subpath.len == 1 and this.cache_dir_subpath[0] == '.') @@ -635,7 +630,7 @@ pub const PackageInstall = struct { var copy_file_state: bun.CopyFileState = .{}; - while (try walker.next()) |entry| { + while (try walker.next().unwrap()) |entry| { if (comptime Environment.isWindows) { switch (entry.kind) { .directory, .file => {}, @@ -688,10 +683,9 @@ pub const PackageInstall = struct { } else { if (entry.kind != .file) continue; real_file_count += 1; - const openFile = std.fs.Dir.openFile; const createFile = std.fs.Dir.createFile; - var in_file = try openFile(entry.dir, entry.basename, .{ .mode = .read_only }); + var in_file = try entry.dir.openat(entry.basename, bun.O.RDONLY, 0).unwrap(); defer in_file.close(); debug("createFile {} {s}\n", .{ destination_dir_.fd, entry.path }); @@ -712,11 +706,11 @@ pub const PackageInstall = struct { defer outfile.close(); if (comptime Environment.isPosix) { - const stat = in_file.stat() catch continue; + const stat = in_file.stat().unwrap() catch continue; _ = bun.c.fchmod(outfile.handle, @intCast(stat.mode)); } - bun.copyFileWithState(.fromStdFile(in_file), .fromStdFile(outfile), ©_file_state).unwrap() catch |err| { + bun.copyFileWithState(in_file, .fromStdFile(outfile), ©_file_state).unwrap() catch |err| { if (progress_) |progress| { progress.root.end(); progress.refresh(); @@ -910,20 +904,20 @@ pub const PackageInstall = struct { var real_file_count: u32 = 0; var queue = if (Environment.isWindows) HardLinkWindowsInstallTask.getQueue(); - while (try walker.next()) |entry| { + while (try walker.next().unwrap()) |entry| { if (comptime Environment.isPosix) { switch (entry.kind) { .directory => { bun.MakePath.makePath(std.meta.Elem(@TypeOf(entry.path)), destination_dir, entry.path) catch {}; }, .file => { - std.posix.linkat(entry.dir.fd, entry.basename, destination_dir.fd, entry.path, 0) catch |err| { + std.posix.linkatZ(entry.dir.cast(), entry.basename, destination_dir.fd, entry.path, 0) catch |err| { if (err != error.PathAlreadyExists) { return err; } - std.posix.unlinkat(destination_dir.fd, entry.path, 0) catch {}; - try std.posix.linkat(entry.dir.fd, entry.basename, destination_dir.fd, entry.path, 0); + std.posix.unlinkatZ(destination_dir.fd, entry.path, 0) catch {}; + try std.posix.linkatZ(entry.dir.cast(), entry.basename, destination_dir.fd, entry.path, 0); }; real_file_count += 1; @@ -1019,7 +1013,7 @@ pub const PackageInstall = struct { head2: []if (Environment.isWindows) u16 else u8, ) !u32 { var real_file_count: u32 = 0; - while (try walker.next()) |entry| { + while (try walker.next().unwrap()) |entry| { if (comptime Environment.isPosix) { switch (entry.kind) { .directory => { @@ -1181,7 +1175,7 @@ pub const PackageInstall = struct { var unintall_task: *@This() = @fieldParentPtr("task", task); var debug_timer = bun.Output.DebugTimer.start(); defer { - _ = PackageManager.get().decrementPendingTasks(); + PackageManager.get().decrementPendingTasks(); PackageManager.get().wake(); } @@ -1334,12 +1328,12 @@ pub const PackageInstall = struct { // https://github.com/npm/cli/blob/162c82e845d410ede643466f9f8af78a312296cc/workspaces/arborist/lib/arborist/reify.js#L738 // https://github.com/npm/cli/commit/0e58e6f6b8f0cd62294642a502c17561aaf46553 - switch (bun.sys.symlinkOrJunction(dest_z, target_z)) { + switch (bun.sys.symlinkOrJunction(dest_z, target_z, null)) { .err => |err_| brk: { var err = err_; if (err.getErrno() == .EXIST) { _ = bun.sys.rmdirat(.fromStdDir(destination_dir), this.destination_dir_subpath); - switch (bun.sys.symlinkOrJunction(dest_z, target_z)) { + switch (bun.sys.symlinkOrJunction(dest_z, target_z, null)) { .err => |e| err = e, .result => break :brk, } @@ -1380,7 +1374,7 @@ pub const PackageInstall = struct { return switch (state) { .done => false, else => brk: { - if (this.patch.isNull()) { + if (this.patch == null) { const exists = switch (resolution_tag) { .npm => package_json_exists: { var buf = &PackageManager.cached_package_folder_name_buf; diff --git a/src/install/PackageInstaller.zig b/src/install/PackageInstaller.zig index 89f610522a..5f654a58cf 100644 --- a/src/install/PackageInstaller.zig +++ b/src/install/PackageInstaller.zig @@ -242,7 +242,6 @@ pub const PackageInstaller = struct { pub fn incrementTreeInstallCount( this: *PackageInstaller, tree_id: Lockfile.Tree.Id, - maybe_destination_dir: ?*LazyPackageDestinationDir, comptime should_install_packages: bool, log_level: Options.LogLevel, ) void { @@ -269,19 +268,13 @@ pub const PackageInstaller = struct { this.completed_trees.set(tree_id); - // Avoid opening this directory if we don't need to. if (tree.binaries.count() > 0) { - // Don't close this directory in here. It will be closed by the caller. - if (maybe_destination_dir) |maybe| { - if (maybe.getDir() catch null) |destination_dir| { - this.seen_bin_links.clearRetainingCapacity(); + this.seen_bin_links.clearRetainingCapacity(); - var link_target_buf: bun.PathBuffer = undefined; - var link_dest_buf: bun.PathBuffer = undefined; - var link_rel_buf: bun.PathBuffer = undefined; - this.linkTreeBins(tree, tree_id, destination_dir, &link_target_buf, &link_dest_buf, &link_rel_buf, log_level); - } - } + var link_target_buf: bun.PathBuffer = undefined; + var link_dest_buf: bun.PathBuffer = undefined; + var link_rel_buf: bun.PathBuffer = undefined; + this.linkTreeBins(tree, tree_id, &link_target_buf, &link_dest_buf, &link_rel_buf, log_level); } if (comptime should_install_packages) { @@ -295,7 +288,6 @@ pub const PackageInstaller = struct { this: *PackageInstaller, tree: *TreeContext, tree_id: TreeContext.Id, - destination_dir: std.fs.Dir, link_target_buf: []u8, link_dest_buf: []u8, link_rel_buf: []u8, @@ -303,6 +295,9 @@ pub const PackageInstaller = struct { ) void { const lockfile = this.lockfile; const string_buf = lockfile.buffers.string_bytes.items; + var node_modules_path: bun.AbsPath(.{}) = .from(this.node_modules.path.items); + defer node_modules_path.deinit(); + while (tree.binaries.removeOrNull()) |dep_id| { bun.assertWithLocation(dep_id < lockfile.buffers.dependencies.items.len, @src()); const package_id = lockfile.buffers.resolutions.items[dep_id]; @@ -319,8 +314,7 @@ pub const PackageInstaller = struct { .string_buf = string_buf, .extern_string_buf = lockfile.buffers.extern_strings.items, .seen = &this.seen_bin_links, - .node_modules_path = this.node_modules.path.items, - .node_modules = .fromStdDir(destination_dir), + .node_modules_path = &node_modules_path, .abs_target_buf = link_target_buf, .abs_dest_buf = link_dest_buf, .rel_buf = link_rel_buf, @@ -385,18 +379,7 @@ pub const PackageInstaller = struct { this.node_modules.path.appendSlice(rel_path) catch bun.outOfMemory(); - var destination_dir = this.node_modules.openDir(this.root_node_modules_folder) catch |err| { - if (log_level != .silent) { - Output.err(err, "Failed to open node_modules folder at {s}", .{ - bun.fmt.fmtPath(u8, this.node_modules.path.items, .{}), - }); - } - - continue; - }; - defer destination_dir.close(); - - this.linkTreeBins(tree, @intCast(tree_id), destination_dir, &link_target_buf, &link_dest_buf, &link_rel_buf, log_level); + this.linkTreeBins(tree, @intCast(tree_id), &link_target_buf, &link_dest_buf, &link_rel_buf, log_level); } } } @@ -417,6 +400,7 @@ pub const PackageInstaller = struct { entry.list, optional, output_in_foreground, + null, ) catch |err| { if (log_level != .silent) { const fmt = "\nerror: failed to spawn life-cycle scripts for {s}: {s}\n"; @@ -498,7 +482,7 @@ pub const PackageInstaller = struct { const optional = entry.optional; const output_in_foreground = false; - this.manager.spawnPackageLifecycleScripts(this.command_ctx, entry.list, optional, output_in_foreground) catch |err| { + this.manager.spawnPackageLifecycleScripts(this.command_ctx, entry.list, optional, output_in_foreground, null) catch |err| { if (log_level != .silent) { const fmt = "\nerror: failed to spawn life-cycle scripts for {s}: {s}\n"; const args = .{ package_name, @errorName(err) }; @@ -594,39 +578,24 @@ pub const PackageInstaller = struct { /// Install versions of a package which are waiting on a network request pub fn installEnqueuedPackagesAfterExtraction( this: *PackageInstaller, + task_id: Task.Id, dependency_id: DependencyID, data: *const ExtractData, log_level: Options.LogLevel, ) void { const package_id = this.lockfile.buffers.resolutions.items[dependency_id]; const name = this.names[package_id]; - const resolution = &this.resolutions[package_id]; - const task_id = switch (resolution.tag) { - .git => Task.Id.forGitCheckout(data.url, data.resolved), - .github => Task.Id.forTarball(data.url), - .local_tarball => Task.Id.forTarball(this.lockfile.str(&resolution.value.local_tarball)), - .remote_tarball => Task.Id.forTarball(this.lockfile.str(&resolution.value.remote_tarball)), - .npm => Task.Id.forNPMPackage(name.slice(this.lockfile.buffers.string_bytes.items), resolution.value.npm.version), - else => unreachable, - }; - if (!this.installEnqueuedPackagesImpl(name, task_id, log_level)) { - if (comptime Environment.allow_assert) { - Output.panic("Ran callback to install enqueued packages, but there was no task associated with it. {}:{} (dependency_id: {d})", .{ - bun.fmt.quote(name.slice(this.lockfile.buffers.string_bytes.items)), - bun.fmt.quote(data.url), - dependency_id, - }); - } - } - } + // const resolution = &this.resolutions[package_id]; + // const task_id = switch (resolution.tag) { + // .git => Task.Id.forGitCheckout(data.url, data.resolved), + // .github => Task.Id.forTarball(data.url), + // .local_tarball => Task.Id.forTarball(this.lockfile.str(&resolution.value.local_tarball)), + // .remote_tarball => Task.Id.forTarball(this.lockfile.str(&resolution.value.remote_tarball)), + // .npm => Task.Id.forNPMPackage(name.slice(this.lockfile.buffers.string_bytes.items), resolution.value.npm.version), + // else => unreachable, + // }; - pub fn installEnqueuedPackagesImpl( - this: *PackageInstaller, - name: String, - task_id: Task.Id.Type, - log_level: Options.LogLevel, - ) bool { if (this.manager.task_queue.fetchRemove(task_id)) |removed| { var callbacks = removed.value; defer callbacks.deinit(this.manager.allocator); @@ -638,7 +607,7 @@ pub const PackageInstaller = struct { if (callbacks.items.len == 0) { debug("Unexpected state: no callbacks for async task.", .{}); - return true; + return; } for (callbacks.items) |*cb| { @@ -664,9 +633,16 @@ pub const PackageInstaller = struct { ); this.node_modules.deinit(); } - return true; + return; + } + + if (comptime Environment.allow_assert) { + Output.panic("Ran callback to install enqueued packages, but there was no task associated with it. {}:{} (dependency_id: {d})", .{ + bun.fmt.quote(name.slice(this.lockfile.buffers.string_bytes.items)), + bun.fmt.quote(data.url), + dependency_id, + }); } - return false; } fn getInstalledPackageScriptsCount( @@ -674,7 +650,7 @@ pub const PackageInstaller = struct { alias: string, package_id: PackageID, resolution_tag: Resolution.Tag, - node_modules_folder: *LazyPackageDestinationDir, + folder_path: *bun.AbsPath(.{ .sep = .auto }), log_level: Options.LogLevel, ) usize { if (comptime Environment.allow_assert) { @@ -696,8 +672,7 @@ pub const PackageInstaller = struct { this.lockfile.allocator, &string_builder, this.manager.log, - node_modules_folder, - alias, + folder_path, ) catch |err| { if (log_level != .silent) { Output.errGeneric("failed to fill lifecycle scripts for {s}: {s}", .{ @@ -835,11 +810,10 @@ pub const PackageInstaller = struct { .destination_dir_subpath_buf = &this.destination_dir_subpath_buf, .allocator = this.lockfile.allocator, .package_name = pkg_name, - .patch = if (patch_patch) |p| PackageInstall.Patch{ - .patch_contents_hash = patch_contents_hash.?, - .patch_path = p, - .root_project_dir = FileSystem.instance.top_level_dir, - } else PackageInstall.Patch.NULL, + .patch = if (patch_patch) |p| .{ + .contents_hash = patch_contents_hash.?, + .path = p, + } else null, .package_version = package_version, .node_modules = &this.node_modules, .lockfile = this.lockfile, @@ -848,7 +822,6 @@ pub const PackageInstaller = struct { pkg_name.slice(this.lockfile.buffers.string_bytes.items), resolution.fmt(this.lockfile.buffers.string_bytes.items, .posix), }); - const pkg_has_patch = !installer.patch.isNull(); switch (resolution.tag) { .npm => { @@ -917,32 +890,7 @@ pub const PackageInstaller = struct { installer.cache_dir = std.fs.cwd(); }, .symlink => { - const directory = this.manager.globalLinkDir() catch |err| { - if (log_level != .silent) { - const fmt = "\nerror: unable to access global directory while installing {s}: {s}\n"; - const args = .{ pkg_name.slice(this.lockfile.buffers.string_bytes.items), @errorName(err) }; - - if (log_level.showProgress()) { - switch (Output.enable_ansi_colors) { - inline else => |enable_ansi_colors| { - this.progress.log(comptime Output.prettyFmt(fmt, enable_ansi_colors), args); - }, - } - } else { - Output.prettyErrorln(fmt, args); - } - } - - if (this.manager.options.enable.fail_early) { - Global.exit(1); - } - - Output.flush(); - this.summary.fail += 1; - - if (!installer.patch.isNull()) this.incrementTreeInstallCount(this.current_tree_id, null, !is_pending_package_install, log_level); - return; - }; + const directory = this.manager.globalLinkDir(); const folder = resolution.value.symlink.slice(this.lockfile.buffers.string_bytes.items); @@ -950,7 +898,7 @@ pub const PackageInstaller = struct { installer.cache_dir_subpath = "."; installer.cache_dir = std.fs.cwd(); } else { - const global_link_dir = this.manager.globalLinkDirPath() catch unreachable; + const global_link_dir = this.manager.globalLinkDirPath(); var ptr = &this.folder_path_buf; var remain: []u8 = this.folder_path_buf[0..]; @memcpy(ptr[0..global_link_dir.len], global_link_dir); @@ -971,7 +919,7 @@ pub const PackageInstaller = struct { if (comptime Environment.allow_assert) { @panic("Internal assertion failure: unexpected resolution tag"); } - if (!installer.patch.isNull()) this.incrementTreeInstallCount(this.current_tree_id, null, !is_pending_package_install, log_level); + this.incrementTreeInstallCount(this.current_tree_id, !is_pending_package_install, log_level); return; }, } @@ -983,7 +931,7 @@ pub const PackageInstaller = struct { this.summary.skipped += @intFromBool(!needs_install); if (needs_install) { - if (!remove_patch and resolution.tag.canEnqueueInstallTask() and installer.packageMissingFromCache(this.manager, package_id, resolution.tag)) { + if (resolution.tag.canEnqueueInstallTask() and installer.packageMissingFromCache(this.manager, package_id, resolution.tag)) { if (comptime Environment.allow_assert) { bun.assertWithLocation(resolution.canEnqueueInstallTask(), @src()); } @@ -1017,7 +965,6 @@ pub const PackageInstaller = struct { ) catch |err| switch (err) { error.OutOfMemory => bun.outOfMemory(), error.InvalidURL => this.failWithInvalidUrl( - pkg_has_patch, is_pending_package_install, log_level, ), @@ -1041,7 +988,6 @@ pub const PackageInstaller = struct { ) catch |err| switch (err) { error.OutOfMemory => bun.outOfMemory(), error.InvalidURL => this.failWithInvalidUrl( - pkg_has_patch, is_pending_package_install, log_level, ), @@ -1070,7 +1016,6 @@ pub const PackageInstaller = struct { ) catch |err| switch (err) { error.OutOfMemory => bun.outOfMemory(), error.InvalidURL => this.failWithInvalidUrl( - pkg_has_patch, is_pending_package_install, log_level, ), @@ -1080,7 +1025,7 @@ pub const PackageInstaller = struct { if (comptime Environment.allow_assert) { @panic("unreachable, handled above"); } - if (!installer.patch.isNull()) this.incrementTreeInstallCount(this.current_tree_id, null, !is_pending_package_install, log_level); + this.incrementTreeInstallCount(this.current_tree_id, !is_pending_package_install, log_level); this.summary.fail += 1; }, } @@ -1090,12 +1035,12 @@ pub const PackageInstaller = struct { // above checks if unpatched package is in cache, if not null apply patch in temp directory, copy // into cache, then install into node_modules - if (!installer.patch.isNull()) { + if (installer.patch) |patch| { if (installer.patchedPackageMissingFromCache(this.manager, package_id)) { const task = PatchTask.newApplyPatchHash( this.manager, package_id, - installer.patch.patch_contents_hash, + patch.contents_hash, patch_name_and_version_hash.?, ); task.callback.apply.install_context = .{ @@ -1126,7 +1071,7 @@ pub const PackageInstaller = struct { }); } this.summary.fail += 1; - if (!pkg_has_patch) this.incrementTreeInstallCount(this.current_tree_id, null, !is_pending_package_install, log_level); + this.incrementTreeInstallCount(this.current_tree_id, !is_pending_package_install, log_level); return; }; @@ -1185,10 +1130,14 @@ pub const PackageInstaller = struct { }; if (resolution.tag != .root and (resolution.tag == .workspace or is_trusted)) { + var folder_path: bun.AbsPath(.{ .sep = .auto }) = .from(this.node_modules.path.items); + defer folder_path.deinit(); + folder_path.append(alias.slice(this.lockfile.buffers.string_bytes.items)); + if (this.enqueueLifecycleScripts( alias.slice(this.lockfile.buffers.string_bytes.items), log_level, - &lazy_package_dir, + &folder_path, package_id, dep.behavior.optional, resolution, @@ -1212,11 +1161,15 @@ pub const PackageInstaller = struct { else => if (!is_trusted and this.metas[package_id].hasInstallScript()) { // Check if the package actually has scripts. `hasInstallScript` can be false positive if a package is published with // an auto binding.gyp rebuild script but binding.gyp is excluded from the published files. + var folder_path: bun.AbsPath(.{ .sep = .auto }) = .from(this.node_modules.path.items); + defer folder_path.deinit(); + folder_path.append(alias.slice(this.lockfile.buffers.string_bytes.items)); + const count = this.getInstalledPackageScriptsCount( alias.slice(this.lockfile.buffers.string_bytes.items), package_id, resolution.tag, - &lazy_package_dir, + &folder_path, log_level, ); if (count > 0) { @@ -1234,7 +1187,7 @@ pub const PackageInstaller = struct { }, } - if (!pkg_has_patch) this.incrementTreeInstallCount(this.current_tree_id, &lazy_package_dir, !is_pending_package_install, log_level); + this.incrementTreeInstallCount(this.current_tree_id, !is_pending_package_install, log_level); }, .failure => |cause| { if (comptime Environment.allow_assert) { @@ -1243,7 +1196,7 @@ pub const PackageInstaller = struct { // even if the package failed to install, we still need to increment the install // counter for this tree - if (!pkg_has_patch) this.incrementTreeInstallCount(this.current_tree_id, &lazy_package_dir, !is_pending_package_install, log_level); + this.incrementTreeInstallCount(this.current_tree_id, !is_pending_package_install, log_level); if (cause.err == error.DanglingSymlink) { Output.prettyErrorln( @@ -1333,7 +1286,7 @@ pub const PackageInstaller = struct { destination_dir.close(); } - defer if (!pkg_has_patch) this.incrementTreeInstallCount(this.current_tree_id, &destination_dir, !is_pending_package_install, log_level); + defer this.incrementTreeInstallCount(this.current_tree_id, !is_pending_package_install, log_level); const dep = this.lockfile.buffers.dependencies.items[dependency_id]; const truncated_dep_name_hash: TruncatedPackageNameHash = @truncate(dep.name_hash); @@ -1349,10 +1302,14 @@ pub const PackageInstaller = struct { }; if (resolution.tag != .root and is_trusted) { + var folder_path: bun.AbsPath(.{ .sep = .auto }) = .from(this.node_modules.path.items); + defer folder_path.deinit(); + folder_path.append(alias.slice(this.lockfile.buffers.string_bytes.items)); + if (this.enqueueLifecycleScripts( alias.slice(this.lockfile.buffers.string_bytes.items), log_level, - &destination_dir, + &folder_path, package_id, dep.behavior.optional, resolution, @@ -1375,12 +1332,11 @@ pub const PackageInstaller = struct { fn failWithInvalidUrl( this: *PackageInstaller, - pkg_has_patch: bool, comptime is_pending_package_install: bool, log_level: Options.LogLevel, ) void { this.summary.fail += 1; - if (!pkg_has_patch) this.incrementTreeInstallCount(this.current_tree_id, null, !is_pending_package_install, log_level); + this.incrementTreeInstallCount(this.current_tree_id, !is_pending_package_install, log_level); } // returns true if scripts are enqueued @@ -1388,7 +1344,7 @@ pub const PackageInstaller = struct { this: *PackageInstaller, folder_name: string, log_level: Options.LogLevel, - node_modules_folder: *LazyPackageDestinationDir, + package_path: *bun.AbsPath(.{ .sep = .auto }), package_id: PackageID, optional: bool, resolution: *const Resolution, @@ -1397,8 +1353,7 @@ pub const PackageInstaller = struct { const scripts_list = scripts.getList( this.manager.log, this.lockfile, - node_modules_folder, - this.node_modules.path.items, + package_path, folder_name, resolution, ) catch |err| { diff --git a/src/install/PackageManager.zig b/src/install/PackageManager.zig index d6af0e00ac..d4425eeaed 100644 --- a/src/install/PackageManager.zig +++ b/src/install/PackageManager.zig @@ -1,6 +1,5 @@ cache_directory_: ?std.fs.Dir = null, -// TODO(dylan-conway): remove this field when we move away from `std.ChildProcess` in repository.zig cache_directory_path: stringZ = "", temp_dir_: ?std.fs.Dir = null, temp_dir_path: stringZ = "", @@ -96,7 +95,6 @@ preinstall_state: std.ArrayListUnmanaged(PreinstallState) = .{}, global_link_dir: ?std.fs.Dir = null, global_dir: ?std.fs.Dir = null, global_link_dir_path: string = "", -wait_count: std.atomic.Value(usize) = std.atomic.Value(usize).init(0), onWake: WakeHandler = .{}, ci_mode: bun.LazyBool(computeIsContinuousIntegration, @This(), "ci_mode") = .{}, @@ -307,60 +305,69 @@ pub fn hasEnoughTimePassedBetweenWaitingMessages() bool { return false; } -pub fn configureEnvForScripts(this: *PackageManager, ctx: Command.Context, log_level: Options.LogLevel) !*transpiler.Transpiler { - if (this.env_configure) |*env_configure| { - return &env_configure.transpiler; - } - - // We need to figure out the PATH and other environment variables - // to do that, we re-use the code from bun run - // this is expensive, it traverses the entire directory tree going up to the root - // so we really only want to do it when strictly necessary - this.env_configure = .{ - .root_dir_info = undefined, - .transpiler = undefined, - }; - const this_transpiler: *transpiler.Transpiler = &this.env_configure.?.transpiler; - - const root_dir_info = try RunCommand.configureEnvForRun( - ctx, - this_transpiler, - this.env, - log_level != .silent, - false, - ); - - const init_cwd_entry = try this.env.map.getOrPutWithoutValue("INIT_CWD"); - if (!init_cwd_entry.found_existing) { - init_cwd_entry.key_ptr.* = try ctx.allocator.dupe(u8, init_cwd_entry.key_ptr.*); - init_cwd_entry.value_ptr.* = .{ - .value = try ctx.allocator.dupe(u8, strings.withoutTrailingSlash(FileSystem.instance.top_level_dir)), - .conditional = false, - }; - } - - this.env.loadCCachePath(this_transpiler.fs); - - { - var node_path: bun.PathBuffer = undefined; - if (this.env.getNodePath(this_transpiler.fs, &node_path)) |node_pathZ| { - _ = try this.env.loadNodeJSConfig(this_transpiler.fs, bun.default_allocator.dupe(u8, node_pathZ) catch bun.outOfMemory()); - } else brk: { - const current_path = this.env.get("PATH") orelse ""; - var PATH = try std.ArrayList(u8).initCapacity(bun.default_allocator, current_path.len); - try PATH.appendSlice(current_path); - var bun_path: string = ""; - RunCommand.createFakeTemporaryNodeExecutable(&PATH, &bun_path) catch break :brk; - try this.env.map.put("PATH", PATH.items); - _ = try this.env.loadNodeJSConfig(this_transpiler.fs, bun.default_allocator.dupe(u8, bun_path) catch bun.outOfMemory()); - } - } - - this.env_configure.?.root_dir_info = root_dir_info; - - return this_transpiler; +pub fn configureEnvForScripts(this: *PackageManager, ctx: Command.Context, log_level: Options.LogLevel) !transpiler.Transpiler { + return configureEnvForScriptsOnce.call(.{ this, ctx, log_level }); } +pub var configureEnvForScriptsOnce = bun.once(struct { + pub fn run(this: *PackageManager, ctx: Command.Context, log_level: Options.LogLevel) !transpiler.Transpiler { + + // We need to figure out the PATH and other environment variables + // to do that, we re-use the code from bun run + // this is expensive, it traverses the entire directory tree going up to the root + // so we really only want to do it when strictly necessary + var this_transpiler: transpiler.Transpiler = undefined; + _ = try RunCommand.configureEnvForRun( + ctx, + &this_transpiler, + this.env, + log_level != .silent, + false, + ); + + const init_cwd_entry = try this.env.map.getOrPutWithoutValue("INIT_CWD"); + if (!init_cwd_entry.found_existing) { + init_cwd_entry.key_ptr.* = try ctx.allocator.dupe(u8, init_cwd_entry.key_ptr.*); + init_cwd_entry.value_ptr.* = .{ + .value = try ctx.allocator.dupe(u8, strings.withoutTrailingSlash(FileSystem.instance.top_level_dir)), + .conditional = false, + }; + } + + this.env.loadCCachePath(this_transpiler.fs); + + { + // Run node-gyp jobs in parallel. + // https://github.com/nodejs/node-gyp/blob/7d883b5cf4c26e76065201f85b0be36d5ebdcc0e/lib/build.js#L150-L184 + const thread_count = bun.getThreadCount(); + if (thread_count > 2) { + if (!this_transpiler.env.has("JOBS")) { + var int_buf: [10]u8 = undefined; + const jobs_str = std.fmt.bufPrint(&int_buf, "{d}", .{thread_count}) catch unreachable; + this_transpiler.env.map.putAllocValue(bun.default_allocator, "JOBS", jobs_str) catch unreachable; + } + } + } + + { + var node_path: bun.PathBuffer = undefined; + if (this.env.getNodePath(this_transpiler.fs, &node_path)) |node_pathZ| { + _ = try this.env.loadNodeJSConfig(this_transpiler.fs, bun.default_allocator.dupe(u8, node_pathZ) catch bun.outOfMemory()); + } else brk: { + const current_path = this.env.get("PATH") orelse ""; + var PATH = try std.ArrayList(u8).initCapacity(bun.default_allocator, current_path.len); + try PATH.appendSlice(current_path); + var bun_path: string = ""; + RunCommand.createFakeTemporaryNodeExecutable(&PATH, &bun_path) catch break :brk; + try this.env.map.put("PATH", PATH.items); + _ = try this.env.loadNodeJSConfig(this_transpiler.fs, bun.default_allocator.dupe(u8, bun_path) catch bun.outOfMemory()); + } + } + + return this_transpiler; + } +}.run); + pub fn httpProxy(this: *PackageManager, url: URL) ?URL { return this.env.getHttpProxyFor(url); } @@ -409,7 +416,6 @@ pub fn wake(this: *PackageManager) void { this.onWake.getHandler()(ctx, this); } - _ = this.wait_count.fetchAdd(1, .monotonic); this.event_loop.wakeup(); } @@ -418,7 +424,7 @@ pub fn sleepUntil(this: *PackageManager, closure: anytype, comptime isDoneFn: an this.event_loop.tick(closure, isDoneFn); } -pub var cached_package_folder_name_buf: bun.PathBuffer = undefined; +pub threadlocal var cached_package_folder_name_buf: bun.PathBuffer = undefined; const Holder = struct { pub var ptr: *PackageManager = undefined; @@ -437,6 +443,96 @@ pub const FailFn = *const fn (*PackageManager, *const Dependency, PackageID, any pub const debug = Output.scoped(.PackageManager, true); +pub fn ensureTempNodeGypScript(this: *PackageManager) !void { + return ensureTempNodeGypScriptOnce.call(.{this}); +} + +var ensureTempNodeGypScriptOnce = bun.once(struct { + pub fn run(manager: *PackageManager) !void { + if (manager.node_gyp_tempdir_name.len > 0) return; + + const tempdir = manager.getTemporaryDirectory(); + var path_buf: bun.PathBuffer = undefined; + const node_gyp_tempdir_name = bun.span(try Fs.FileSystem.instance.tmpname("node-gyp", &path_buf, 12345)); + + // used later for adding to path for scripts + manager.node_gyp_tempdir_name = try manager.allocator.dupe(u8, node_gyp_tempdir_name); + + var node_gyp_tempdir = tempdir.makeOpenPath(manager.node_gyp_tempdir_name, .{}) catch |err| { + if (err == error.EEXIST) { + // it should not exist + Output.prettyErrorln("error: node-gyp tempdir already exists", .{}); + Global.crash(); + } + Output.prettyErrorln("error: {s} creating node-gyp tempdir", .{@errorName(err)}); + Global.crash(); + }; + defer node_gyp_tempdir.close(); + + const file_name = switch (Environment.os) { + else => "node-gyp", + .windows => "node-gyp.cmd", + }; + const mode = switch (Environment.os) { + else => 0o755, + .windows => 0, // windows does not have an executable bit + }; + + var node_gyp_file = node_gyp_tempdir.createFile(file_name, .{ .mode = mode }) catch |err| { + Output.prettyErrorln("error: {s} creating node-gyp tempdir", .{@errorName(err)}); + Global.crash(); + }; + defer node_gyp_file.close(); + + const content = switch (Environment.os) { + .windows => + \\if not defined npm_config_node_gyp ( + \\ bun x --silent node-gyp %* + \\) else ( + \\ node "%npm_config_node_gyp%" %* + \\) + \\ + , + else => + \\#!/bin/sh + \\if [ "x$npm_config_node_gyp" = "x" ]; then + \\ bun x --silent node-gyp $@ + \\else + \\ "$npm_config_node_gyp" $@ + \\fi + \\ + , + }; + + node_gyp_file.writeAll(content) catch |err| { + Output.prettyErrorln("error: {s} writing to " ++ file_name ++ " file", .{@errorName(err)}); + Global.crash(); + }; + + // Add our node-gyp tempdir to the path + const existing_path = manager.env.get("PATH") orelse ""; + var PATH = try std.ArrayList(u8).initCapacity(bun.default_allocator, existing_path.len + 1 + manager.temp_dir_name.len + 1 + manager.node_gyp_tempdir_name.len); + try PATH.appendSlice(existing_path); + if (existing_path.len > 0 and existing_path[existing_path.len - 1] != std.fs.path.delimiter) + try PATH.append(std.fs.path.delimiter); + try PATH.appendSlice(strings.withoutTrailingSlash(manager.temp_dir_name)); + try PATH.append(std.fs.path.sep); + try PATH.appendSlice(manager.node_gyp_tempdir_name); + try manager.env.map.put("PATH", PATH.items); + + const npm_config_node_gyp = try std.fmt.bufPrint(&path_buf, "{s}{s}{s}{s}{s}", .{ + strings.withoutTrailingSlash(manager.temp_dir_name), + std.fs.path.sep_str, + strings.withoutTrailingSlash(manager.node_gyp_tempdir_name), + std.fs.path.sep_str, + file_name, + }); + + const node_gyp_abs_dir = std.fs.path.dirname(npm_config_node_gyp).?; + try manager.env.map.putAllocKeyAndValue(manager.allocator, "BUN_WHICH_IGNORE_CWD", node_gyp_abs_dir); + } +}.run); + fn httpThreadOnInitError(err: HTTP.InitError, opts: HTTP.HTTPThread.InitOpts) noreturn { switch (err) { error.LoadCAFile => { @@ -728,6 +824,10 @@ pub fn init( bun.spawn.process.WaiterThread.setShouldUseWaiterThread(); } + if (bun.getRuntimeFeatureFlag(.BUN_FEATURE_FLAG_FORCE_WINDOWS_JUNCTIONS)) { + bun.sys.WindowsSymlinkOptions.has_failed_to_create_symlink = true; + } + if (PackageManager.verbose_install) { Output.prettyErrorln("Cache Dir: {s}", .{options.cache_directory}); Output.flush(); @@ -1016,18 +1116,20 @@ const default_max_simultaneous_requests_for_bun_install = 64; const default_max_simultaneous_requests_for_bun_install_for_proxies = 64; pub const TaskCallbackList = std.ArrayListUnmanaged(TaskCallbackContext); -const TaskDependencyQueue = std.HashMapUnmanaged(u64, TaskCallbackList, IdentityContext(u64), 80); +const TaskDependencyQueue = std.HashMapUnmanaged(Task.Id, TaskCallbackList, IdentityContext(Task.Id), 80); const PreallocatedTaskStore = bun.HiveArray(Task, 64).Fallback; const PreallocatedNetworkTasks = bun.HiveArray(NetworkTask, 128).Fallback; const ResolveTaskQueue = bun.UnboundedQueue(Task, .next); -const RepositoryMap = std.HashMapUnmanaged(u64, bun.FileDescriptor, IdentityContext(u64), 80); +const RepositoryMap = std.HashMapUnmanaged(Task.Id, bun.FileDescriptor, IdentityContext(Task.Id), 80); const NpmAliasMap = std.HashMapUnmanaged(PackageNameHash, Dependency.Version, IdentityContext(u64), 80); const NetworkQueue = std.fifo.LinearFifo(*NetworkTask, .{ .Static = 32 }); const PatchTaskFifo = std.fifo.LinearFifo(*PatchTask, .{ .Static = 32 }); +// pub const ensureTempNodeGypScript = directories.ensureTempNodeGypScript; + // @sortImports pub const CommandLineArguments = @import("./PackageManager/CommandLineArguments.zig"); @@ -1055,11 +1157,12 @@ pub const cachedNPMPackageFolderPrintBasename = directories.cachedNPMPackageFold pub const cachedTarballFolderName = directories.cachedTarballFolderName; pub const cachedTarballFolderNamePrint = directories.cachedTarballFolderNamePrint; pub const computeCacheDirAndSubpath = directories.computeCacheDirAndSubpath; -pub const ensureTempNodeGypScript = directories.ensureTempNodeGypScript; pub const fetchCacheDirectoryPath = directories.fetchCacheDirectoryPath; pub const getCacheDirectory = directories.getCacheDirectory; +pub const getCacheDirectoryAndAbsPath = directories.getCacheDirectoryAndAbsPath; pub const getTemporaryDirectory = directories.getTemporaryDirectory; pub const globalLinkDir = directories.globalLinkDir; +pub const globalLinkDirAndPath = directories.globalLinkDirAndPath; pub const globalLinkDirPath = directories.globalLinkDirPath; pub const isFolderInCache = directories.isFolderInCache; pub const pathForCachedNPMPath = directories.pathForCachedNPMPath; @@ -1089,6 +1192,7 @@ const lifecycle = @import("PackageManager/PackageManagerLifecycle.zig"); const LifecycleScriptTimeLog = lifecycle.LifecycleScriptTimeLog; pub const determinePreinstallState = lifecycle.determinePreinstallState; pub const ensurePreinstallStateListCapacity = lifecycle.ensurePreinstallStateListCapacity; +pub const findTrustedDependenciesFromUpdateRequests = lifecycle.findTrustedDependenciesFromUpdateRequests; pub const getPreinstallState = lifecycle.getPreinstallState; pub const hasNoMorePendingLifecycleScripts = lifecycle.hasNoMorePendingLifecycleScripts; pub const loadRootLifecycleScripts = lifecycle.loadRootLifecycleScripts; diff --git a/src/install/PackageManager/PackageManagerDirectories.zig b/src/install/PackageManager/PackageManagerDirectories.zig index 7b5ba804b5..98aa1f9cfa 100644 --- a/src/install/PackageManager/PackageManagerDirectories.zig +++ b/src/install/PackageManager/PackageManagerDirectories.zig @@ -5,6 +5,11 @@ pub inline fn getCacheDirectory(this: *PackageManager) std.fs.Dir { }; } +pub inline fn getCacheDirectoryAndAbsPath(this: *PackageManager) struct { FD, bun.AbsPath(.{}) } { + const cache_dir = this.getCacheDirectory(); + return .{ .fromStdDir(cache_dir), .from(this.cache_directory_path) }; +} + pub inline fn getTemporaryDirectory(this: *PackageManager) std.fs.Dir { return this.temp_dir_ orelse brk: { this.temp_dir_ = ensureTemporaryDirectory(this); @@ -356,23 +361,43 @@ pub fn setupGlobalDir(manager: *PackageManager, ctx: Command.Context) !void { manager.options.bin_path = path.ptr[0..path.len :0]; } -pub fn globalLinkDir(this: *PackageManager) !std.fs.Dir { +pub fn globalLinkDir(this: *PackageManager) std.fs.Dir { return this.global_link_dir orelse brk: { - var global_dir = try Options.openGlobalDir(this.options.explicit_global_directory); + var global_dir = Options.openGlobalDir(this.options.explicit_global_directory) catch |err| switch (err) { + error.@"No global directory found" => { + Output.errGeneric("failed to find a global directory for package caching and global link directories", .{}); + Global.exit(1); + }, + else => { + Output.err(err, "failed to open the global directory", .{}); + Global.exit(1); + }, + }; this.global_dir = global_dir; - this.global_link_dir = try global_dir.makeOpenPath("node_modules", .{}); + this.global_link_dir = global_dir.makeOpenPath("node_modules", .{}) catch |err| { + Output.err(err, "failed to open global link dir node_modules at '{}'", .{FD.fromStdDir(global_dir)}); + Global.exit(1); + }; var buf: bun.PathBuffer = undefined; - const _path = try bun.getFdPath(.fromStdDir(this.global_link_dir.?), &buf); - this.global_link_dir_path = try Fs.FileSystem.DirnameStore.instance.append([]const u8, _path); + const _path = bun.getFdPath(.fromStdDir(this.global_link_dir.?), &buf) catch |err| { + Output.err(err, "failed to get the full path of the global directory", .{}); + Global.exit(1); + }; + this.global_link_dir_path = Fs.FileSystem.DirnameStore.instance.append([]const u8, _path) catch bun.outOfMemory(); break :brk this.global_link_dir.?; }; } -pub fn globalLinkDirPath(this: *PackageManager) ![]const u8 { - _ = try this.globalLinkDir(); +pub fn globalLinkDirPath(this: *PackageManager) []const u8 { + _ = this.globalLinkDir(); return this.global_link_dir_path; } +pub fn globalLinkDirAndPath(this: *PackageManager) struct { std.fs.Dir, []const u8 } { + const dir = this.globalLinkDir(); + return .{ dir, this.global_link_dir_path }; +} + pub fn pathForCachedNPMPath( this: *PackageManager, buf: *bun.PathBuffer, @@ -492,14 +517,7 @@ pub fn computeCacheDirAndSubpath( cache_dir = std.fs.cwd(); }, .symlink => { - const directory = manager.globalLinkDir() catch |err| { - const fmt = "\nerror: unable to access global directory while installing {s}: {s}\n"; - const args = .{ name, @errorName(err) }; - - Output.prettyErrorln(fmt, args); - - Global.exit(1); - }; + const directory = manager.globalLinkDir(); const folder = resolution.value.symlink.slice(buf); @@ -507,7 +525,7 @@ pub fn computeCacheDirAndSubpath( cache_dir_subpath = "."; cache_dir = std.fs.cwd(); } else { - const global_link_dir = manager.globalLinkDirPath() catch unreachable; + const global_link_dir = manager.globalLinkDirPath(); var ptr = folder_path_buf; var remain: []u8 = folder_path_buf[0..]; @memcpy(ptr[0..global_link_dir.len], global_link_dir); @@ -719,90 +737,6 @@ const PatchHashFmt = struct { } }; -pub fn ensureTempNodeGypScript(this: *PackageManager) !void { - if (this.node_gyp_tempdir_name.len > 0) return; - - const tempdir = this.getTemporaryDirectory(); - var path_buf: bun.PathBuffer = undefined; - const node_gyp_tempdir_name = bun.span(try Fs.FileSystem.instance.tmpname("node-gyp", &path_buf, 12345)); - - // used later for adding to path for scripts - this.node_gyp_tempdir_name = try this.allocator.dupe(u8, node_gyp_tempdir_name); - - var node_gyp_tempdir = tempdir.makeOpenPath(this.node_gyp_tempdir_name, .{}) catch |err| { - if (err == error.EEXIST) { - // it should not exist - Output.prettyErrorln("error: node-gyp tempdir already exists", .{}); - Global.crash(); - } - Output.prettyErrorln("error: {s} creating node-gyp tempdir", .{@errorName(err)}); - Global.crash(); - }; - defer node_gyp_tempdir.close(); - - const file_name = switch (Environment.os) { - else => "node-gyp", - .windows => "node-gyp.cmd", - }; - const mode = switch (Environment.os) { - else => 0o755, - .windows => 0, // windows does not have an executable bit - }; - - var node_gyp_file = node_gyp_tempdir.createFile(file_name, .{ .mode = mode }) catch |err| { - Output.prettyErrorln("error: {s} creating node-gyp tempdir", .{@errorName(err)}); - Global.crash(); - }; - defer node_gyp_file.close(); - - const content = switch (Environment.os) { - .windows => - \\if not defined npm_config_node_gyp ( - \\ bun x --silent node-gyp %* - \\) else ( - \\ node "%npm_config_node_gyp%" %* - \\) - \\ - , - else => - \\#!/bin/sh - \\if [ "x$npm_config_node_gyp" = "x" ]; then - \\ bun x --silent node-gyp $@ - \\else - \\ "$npm_config_node_gyp" $@ - \\fi - \\ - , - }; - - node_gyp_file.writeAll(content) catch |err| { - Output.prettyErrorln("error: {s} writing to " ++ file_name ++ " file", .{@errorName(err)}); - Global.crash(); - }; - - // Add our node-gyp tempdir to the path - const existing_path = this.env.get("PATH") orelse ""; - var PATH = try std.ArrayList(u8).initCapacity(bun.default_allocator, existing_path.len + 1 + this.temp_dir_name.len + 1 + this.node_gyp_tempdir_name.len); - try PATH.appendSlice(existing_path); - if (existing_path.len > 0 and existing_path[existing_path.len - 1] != std.fs.path.delimiter) - try PATH.append(std.fs.path.delimiter); - try PATH.appendSlice(strings.withoutTrailingSlash(this.temp_dir_name)); - try PATH.append(std.fs.path.sep); - try PATH.appendSlice(this.node_gyp_tempdir_name); - try this.env.map.put("PATH", PATH.items); - - const npm_config_node_gyp = try std.fmt.bufPrint(&path_buf, "{s}{s}{s}{s}{s}", .{ - strings.withoutTrailingSlash(this.temp_dir_name), - std.fs.path.sep_str, - strings.withoutTrailingSlash(this.node_gyp_tempdir_name), - std.fs.path.sep_str, - file_name, - }); - - const node_gyp_abs_dir = std.fs.path.dirname(npm_config_node_gyp).?; - try this.env.map.putAllocKeyAndValue(this.allocator, "BUN_WHICH_IGNORE_CWD", node_gyp_abs_dir); -} - var using_fallback_temp_dir: bool = false; // @sortImports @@ -821,7 +755,6 @@ const Progress = bun.Progress; const default_allocator = bun.default_allocator; const string = bun.string; const stringZ = bun.stringZ; -const strings = bun.strings; const Command = bun.CLI.Command; const File = bun.sys.File; diff --git a/src/install/PackageManager/PackageManagerEnqueue.zig b/src/install/PackageManager/PackageManagerEnqueue.zig index 739582561e..ae89ab3ed0 100644 --- a/src/install/PackageManager/PackageManagerEnqueue.zig +++ b/src/install/PackageManager/PackageManagerEnqueue.zig @@ -209,7 +209,7 @@ pub fn enqueueGitForCheckout( pub fn enqueueParseNPMPackage( this: *PackageManager, - task_id: u64, + task_id: Task.Id, name: strings.StringOrTinyString, network_task: *NetworkTask, ) *ThreadPool.Task { @@ -652,7 +652,7 @@ pub fn enqueueDependencyWithMainAndSuccessFn( const name_str = this.lockfile.str(&name); const task_id = Task.Id.forManifest(name_str); - if (comptime Environment.allow_assert) bun.assert(task_id != 0); + if (comptime Environment.allow_assert) bun.assert(task_id.get() != 0); if (comptime Environment.allow_assert) debug( @@ -1132,7 +1132,7 @@ pub fn enqueueExtractNPMPackage( fn enqueueGitClone( this: *PackageManager, - task_id: u64, + task_id: Task.Id, name: string, repository: *const Repository, dep_id: DependencyID, @@ -1182,7 +1182,7 @@ fn enqueueGitClone( pub fn enqueueGitCheckout( this: *PackageManager, - task_id: u64, + task_id: Task.Id, dir: bun.FileDescriptor, dependency_id: DependencyID, name: string, @@ -1238,7 +1238,7 @@ pub fn enqueueGitCheckout( fn enqueueLocalTarball( this: *PackageManager, - task_id: u64, + task_id: Task.Id, dependency_id: DependencyID, name: string, path: string, @@ -1641,6 +1641,12 @@ fn getOrPutResolvedPackage( // .auto, // ); }; + + // if (strings.eqlLong(strings.withoutTrailingSlash(folder_path_abs), strings.withoutTrailingSlash(FileSystem.instance.top_level_dir), true)) { + // successFn(this, dependency_id, 0); + // return .{ .package = this.lockfile.packages.get(0) }; + // } + break :res FolderResolution.getOrPut(.{ .relative = .folder }, version, folder_path_abs, this); } @@ -1720,7 +1726,7 @@ fn getOrPutResolvedPackage( } }, .symlink => { - const res = FolderResolution.getOrPut(.{ .global = try this.globalLinkDirPath() }, version, this.lockfile.str(&version.value.symlink), this); + const res = FolderResolution.getOrPut(.{ .global = this.globalLinkDirPath() }, version, this.lockfile.str(&version.value.symlink), this); switch (res) { .err => |err| return err, diff --git a/src/install/PackageManager/PackageManagerLifecycle.zig b/src/install/PackageManager/PackageManagerLifecycle.zig index dff3c0ae80..1ba1aec35d 100644 --- a/src/install/PackageManager/PackageManagerLifecycle.zig +++ b/src/install/PackageManager/PackageManagerLifecycle.zig @@ -59,7 +59,7 @@ pub fn ensurePreinstallStateListCapacity(this: *PackageManager, count: usize) vo @memset(this.preinstall_state.items[offset..], PreinstallState.unknown); } -pub fn setPreinstallState(this: *PackageManager, package_id: PackageID, lockfile: *Lockfile, value: PreinstallState) void { +pub fn setPreinstallState(this: *PackageManager, package_id: PackageID, lockfile: *const Lockfile, value: PreinstallState) void { this.ensurePreinstallStateListCapacity(lockfile.packages.len); this.preinstall_state.items[package_id] = value; } @@ -218,7 +218,9 @@ pub fn loadRootLifecycleScripts(this: *PackageManager, root_package: Package) vo const buf = this.lockfile.buffers.string_bytes.items; // need to clone because this is a copy before Lockfile.cleanWithLogger const name = root_package.name.slice(buf); - const top_level_dir_without_trailing_slash = strings.withoutTrailingSlash(FileSystem.instance.top_level_dir); + + var top_level_dir: bun.AbsPath(.{ .sep = .auto }) = .initTopLevelDir(); + defer top_level_dir.deinit(); if (root_package.scripts.hasAny()) { const add_node_gyp_rebuild_script = root_package.scripts.install.isEmpty() and root_package.scripts.preinstall.isEmpty() and Syscall.exists(binding_dot_gyp_path); @@ -226,7 +228,7 @@ pub fn loadRootLifecycleScripts(this: *PackageManager, root_package: Package) vo this.root_lifecycle_scripts = root_package.scripts.createList( this.lockfile, buf, - top_level_dir_without_trailing_slash, + &top_level_dir, name, .root, add_node_gyp_rebuild_script, @@ -237,7 +239,7 @@ pub fn loadRootLifecycleScripts(this: *PackageManager, root_package: Package) vo this.root_lifecycle_scripts = root_package.scripts.createList( this.lockfile, buf, - top_level_dir_without_trailing_slash, + &top_level_dir, name, .root, true, @@ -252,6 +254,7 @@ pub fn spawnPackageLifecycleScripts( list: Lockfile.Package.Scripts.List, optional: bool, foreground: bool, + install_ctx: ?LifecycleScriptSubprocess.InstallCtx, ) !void { const log_level = this.options.log_level; var any_scripts = false; @@ -268,55 +271,99 @@ pub fn spawnPackageLifecycleScripts( try this.ensureTempNodeGypScript(); const cwd = list.cwd; - const this_transpiler = try this.configureEnvForScripts(ctx, log_level); - const original_path = this_transpiler.env.get("PATH") orelse ""; + var this_transpiler = try this.configureEnvForScripts(ctx, log_level); - var PATH = try std.ArrayList(u8).initCapacity(bun.default_allocator, original_path.len + 1 + "node_modules/.bin".len + cwd.len + 1); - var current_dir: ?*DirInfo = this_transpiler.resolver.readDirInfo(cwd) catch null; - bun.assert(current_dir != null); - while (current_dir) |dir| { - if (PATH.items.len > 0 and PATH.items[PATH.items.len - 1] != std.fs.path.delimiter) { - try PATH.append(std.fs.path.delimiter); - } - try PATH.appendSlice(strings.withoutTrailingSlash(dir.abs_path)); - if (!(dir.abs_path.len == 1 and dir.abs_path[0] == std.fs.path.sep)) { - try PATH.append(std.fs.path.sep); - } - try PATH.appendSlice(this.options.bin_path); - current_dir = dir.getParent(); + var script_env = try this_transpiler.env.map.cloneWithAllocator(bun.default_allocator); + defer script_env.map.deinit(); + + const original_path = script_env.get("PATH") orelse ""; + + var PATH: bun.EnvPath(.{}) = try .initCapacity(bun.default_allocator, original_path.len + 1 + "node_modules/.bin".len + cwd.len + 1); + defer PATH.deinit(); + + var parent: ?string = cwd; + + while (parent) |dir| { + var builder = PATH.pathComponentBuilder(); + builder.append(dir); + builder.append("node_modules/.bin"); + try builder.apply(); + + parent = std.fs.path.dirname(dir); } - if (original_path.len > 0) { - if (PATH.items.len > 0 and PATH.items[PATH.items.len - 1] != std.fs.path.delimiter) { - try PATH.append(std.fs.path.delimiter); + try PATH.append(original_path); + try script_env.put("PATH", PATH.slice()); + + const envp = try script_env.createNullDelimitedEnvMap(this.allocator); + + const shell_bin = shell_bin: { + if (comptime Environment.isWindows) { + break :shell_bin null; } - try PATH.appendSlice(original_path); - } + if (this.env.get("PATH")) |env_path| { + break :shell_bin bun.CLI.RunCommand.findShell(env_path, cwd); + } - this_transpiler.env.map.put("PATH", PATH.items) catch unreachable; + break :shell_bin null; + }; - // Run node-gyp jobs in parallel. - // https://github.com/nodejs/node-gyp/blob/7d883b5cf4c26e76065201f85b0be36d5ebdcc0e/lib/build.js#L150-L184 - const thread_count = bun.getThreadCount(); - if (thread_count > 2) { - if (!this_transpiler.env.has("JOBS")) { - var int_buf: [10]u8 = undefined; - const jobs_str = std.fmt.bufPrint(&int_buf, "{d}", .{thread_count}) catch unreachable; - this_transpiler.env.map.putAllocValue(bun.default_allocator, "JOBS", jobs_str) catch unreachable; + try LifecycleScriptSubprocess.spawnPackageScripts(this, list, envp, shell_bin, optional, log_level, foreground, install_ctx); +} + +pub fn findTrustedDependenciesFromUpdateRequests(this: *PackageManager) std.AutoArrayHashMapUnmanaged(TruncatedPackageNameHash, void) { + const parts = this.lockfile.packages.slice(); + // find all deps originating from --trust packages from cli + var set: std.AutoArrayHashMapUnmanaged(TruncatedPackageNameHash, void) = .{}; + if (this.options.do.trust_dependencies_from_args and this.lockfile.packages.len > 0) { + const root_deps = parts.items(.dependencies)[this.root_package_id.get(this.lockfile, this.workspace_name_hash)]; + var dep_id = root_deps.off; + const end = dep_id +| root_deps.len; + while (dep_id < end) : (dep_id += 1) { + const root_dep = this.lockfile.buffers.dependencies.items[dep_id]; + for (this.update_requests) |request| { + if (request.matches(root_dep, this.lockfile.buffers.string_bytes.items)) { + const package_id = this.lockfile.buffers.resolutions.items[dep_id]; + if (package_id == invalid_package_id) continue; + + const entry = set.getOrPut(this.lockfile.allocator, @truncate(root_dep.name_hash)) catch bun.outOfMemory(); + if (!entry.found_existing) { + const dependency_slice = parts.items(.dependencies)[package_id]; + addDependenciesToSet(&set, this.lockfile, dependency_slice); + } + break; + } + } } } - const envp = try this_transpiler.env.map.createNullDelimitedEnvMap(this.allocator); - try this_transpiler.env.map.put("PATH", original_path); - PATH.deinit(); + return set; +} - try LifecycleScriptSubprocess.spawnPackageScripts(this, list, envp, optional, log_level, foreground); +fn addDependenciesToSet( + names: *std.AutoArrayHashMapUnmanaged(TruncatedPackageNameHash, void), + lockfile: *Lockfile, + dependencies_slice: Lockfile.DependencySlice, +) void { + const begin = dependencies_slice.off; + const end = begin +| dependencies_slice.len; + var dep_id = begin; + while (dep_id < end) : (dep_id += 1) { + const package_id = lockfile.buffers.resolutions.items[dep_id]; + if (package_id == invalid_package_id) continue; + + const dep = lockfile.buffers.dependencies.items[dep_id]; + const entry = names.getOrPut(lockfile.allocator, @truncate(dep.name_hash)) catch bun.outOfMemory(); + if (!entry.found_existing) { + const dependency_slice = lockfile.packages.items(.dependencies)[package_id]; + addDependenciesToSet(names, lockfile, dependency_slice); + } + } } // @sortImports -const DirInfo = @import("../../resolver/dir_info.zig"); const std = @import("std"); const bun = @import("bun"); @@ -326,7 +373,6 @@ const Path = bun.path; const Syscall = bun.sys; const default_allocator = bun.default_allocator; const string = bun.string; -const strings = bun.strings; const Command = bun.CLI.Command; const Semver = bun.Semver; @@ -339,6 +385,8 @@ const LifecycleScriptSubprocess = bun.install.LifecycleScriptSubprocess; const PackageID = bun.install.PackageID; const PackageManager = bun.install.PackageManager; const PreinstallState = bun.install.PreinstallState; +const TruncatedPackageNameHash = bun.install.TruncatedPackageNameHash; +const invalid_package_id = bun.install.invalid_package_id; const Lockfile = bun.install.Lockfile; const Package = Lockfile.Package; diff --git a/src/install/PackageManager/install_with_manager.zig b/src/install/PackageManager/install_with_manager.zig index 34e1826504..c68a904239 100644 --- a/src/install/PackageManager/install_with_manager.zig +++ b/src/install/PackageManager/install_with_manager.zig @@ -199,6 +199,8 @@ pub fn installWithManager( lockfile.catalogs.count(&lockfile, builder); maybe_root.scripts.count(lockfile.buffers.string_bytes.items, *Lockfile.StringBuilder, builder); + manager.lockfile.node_linker = lockfile.node_linker; + const off = @as(u32, @truncate(manager.lockfile.buffers.dependencies.items.len)); const len = @as(u32, @truncate(new_dependencies.len)); var packages = manager.lockfile.packages.slice(); @@ -468,7 +470,6 @@ pub fn installWithManager( this, .{ .onExtract = {}, - .onPatch = {}, .onResolve = {}, .onPackageManifestError = {}, .onPackageDownloadError = {}, @@ -735,16 +736,33 @@ pub fn installWithManager( } } - var install_summary = PackageInstall.Summary{}; - if (manager.options.do.install_packages) { - install_summary = try @import("../hoisted_install.zig").installHoistedPackages( + const install_summary: PackageInstall.Summary = install_summary: { + if (!manager.options.do.install_packages) { + break :install_summary .{}; + } + + if (manager.lockfile.node_linker == .hoisted or + // TODO + manager.lockfile.node_linker == .auto) + { + break :install_summary try installHoistedPackages( + manager, + ctx, + workspace_filters.items, + install_root_dependencies, + log_level, + ); + } + + break :install_summary installIsolatedPackages( manager, ctx, - workspace_filters.items, install_root_dependencies, - log_level, - ); - } + workspace_filters.items, + ) catch |err| switch (err) { + error.OutOfMemory => bun.outOfMemory(), + }; + }; if (log_level != .silent) { try manager.log.print(Output.errorWriter()); @@ -821,7 +839,7 @@ pub fn installWithManager( // have finished, and lockfiles have been saved const optional = false; const output_in_foreground = true; - try manager.spawnPackageLifecycleScripts(ctx, scripts, optional, output_in_foreground); + try manager.spawnPackageLifecycleScripts(ctx, scripts, optional, output_in_foreground, null); while (manager.pending_lifecycle_script_tasks.load(.monotonic) > 0) { manager.reportSlowLifecycleScripts(); @@ -964,6 +982,8 @@ fn printBlockedPackagesInfo(summary: *const PackageInstall.Summary, global: bool // @sortImports const std = @import("std"); +const installHoistedPackages = @import("../hoisted_install.zig").installHoistedPackages; +const installIsolatedPackages = @import("../isolated_install.zig").installIsolatedPackages; const bun = @import("bun"); const Environment = bun.Environment; diff --git a/src/install/PackageManager/patchPackage.zig b/src/install/PackageManager/patchPackage.zig index cb78280b21..f443ea0a85 100644 --- a/src/install/PackageManager/patchPackage.zig +++ b/src/install/PackageManager/patchPackage.zig @@ -760,10 +760,9 @@ fn overwritePackageInNodeModulesFolder( var pathbuf2: bun.PathBuffer = undefined; // _ = pathbuf; // autofix - while (try walker.next()) |entry| { + while (try walker.next().unwrap()) |entry| { if (entry.kind != .file) continue; real_file_count += 1; - const openFile = std.fs.Dir.openFile; const createFile = std.fs.Dir.createFile; // 1. rename original file in node_modules to tmp_dir_in_node_modules @@ -807,7 +806,7 @@ fn overwritePackageInNodeModulesFolder( Global.crash(); }; } else if (comptime Environment.isPosix) { - var in_file = try openFile(entry.dir, entry.basename, .{ .mode = .read_only }); + var in_file = try entry.dir.openat(entry.basename, bun.O.RDONLY, 0).unwrap(); defer in_file.close(); @memcpy(pathbuf[0..entry.path.len], entry.path); @@ -824,10 +823,10 @@ fn overwritePackageInNodeModulesFolder( var outfile = try createFile(destination_dir_, entry.path, .{}); defer outfile.close(); - const stat = in_file.stat() catch continue; + const stat = in_file.stat().unwrap() catch continue; _ = bun.c.fchmod(outfile.handle, @intCast(stat.mode)); - bun.copyFileWithState(.fromStdFile(in_file), .fromStdFile(outfile), ©_file_state).unwrap() catch |err| { + bun.copyFileWithState(in_file, .fromStdFile(outfile), ©_file_state).unwrap() catch |err| { Output.prettyError("{s}: copying file {}", .{ @errorName(err), bun.fmt.fmtOSPath(entry.path, .{}) }); Global.crash(); }; @@ -840,7 +839,7 @@ fn overwritePackageInNodeModulesFolder( var pkg_in_cache_dir = try cache_dir.openDir(cache_dir_subpath, .{ .iterate = true }); defer pkg_in_cache_dir.close(); - var walker = Walker.walk(pkg_in_cache_dir, manager.allocator, &.{}, IGNORED_PATHS) catch bun.outOfMemory(); + var walker = Walker.walk(.fromStdDir(pkg_in_cache_dir), manager.allocator, &.{}, IGNORED_PATHS) catch bun.outOfMemory(); defer walker.deinit(); var buf1: if (bun.Environment.isWindows) bun.WPathBuffer else void = undefined; diff --git a/src/install/PackageManager/processDependencyList.zig b/src/install/PackageManager/processDependencyList.zig index 663305d6f5..f2ce90170b 100644 --- a/src/install/PackageManager/processDependencyList.zig +++ b/src/install/PackageManager/processDependencyList.zig @@ -291,8 +291,8 @@ pub fn processPeerDependencyList( pub fn processDependencyList( this: *PackageManager, dep_list: TaskCallbackList, - comptime Context: type, - ctx: Context, + comptime Ctx: type, + ctx: Ctx, comptime callbacks: anytype, install_peer: bool, ) !void { diff --git a/src/install/PackageManager/runTasks.zig b/src/install/PackageManager/runTasks.zig index 9f6ebcff78..000f3d8907 100644 --- a/src/install/PackageManager/runTasks.zig +++ b/src/install/PackageManager/runTasks.zig @@ -1,7 +1,7 @@ pub fn runTasks( manager: *PackageManager, - comptime ExtractCompletionContext: type, - extract_ctx: ExtractCompletionContext, + comptime Ctx: type, + extract_ctx: Ctx, comptime callbacks: anytype, install_peer: bool, log_level: Options.LogLevel, @@ -33,7 +33,7 @@ pub fn runTasks( var patch_tasks_iter = patch_tasks_batch.iterator(); while (patch_tasks_iter.next()) |ptask| { if (comptime Environment.allow_assert) bun.assert(manager.pendingTaskCount() > 0); - _ = manager.decrementPendingTasks(); + manager.decrementPendingTasks(); defer ptask.deinit(); try ptask.runFromMainThread(manager, log_level); if (ptask.callback == .apply) { @@ -42,7 +42,7 @@ pub fn runTasks( if (ptask.callback.apply.task_id) |task_id| { _ = task_id; // autofix - } else if (ExtractCompletionContext == *PackageInstaller) { + } else if (Ctx == *PackageInstaller) { if (ptask.callback.apply.install_context) |*ctx| { var installer: *PackageInstaller = extract_ctx; const path = ctx.path; @@ -68,11 +68,41 @@ pub fn runTasks( } } + if (Ctx == *Store.Installer) { + const installer: *Store.Installer = extract_ctx; + const batch = installer.tasks.popBatch(); + var iter = batch.iterator(); + while (iter.next()) |task| { + defer installer.preallocated_tasks.put(task); + switch (task.result) { + .none => { + if (comptime Environment.ci_assert) { + bun.assertWithLocation(false, @src()); + } + installer.onTaskComplete(task.entry_id, .success); + }, + .err => |err| { + installer.onTaskFail(task.entry_id, err); + }, + .blocked => { + installer.onTaskBlocked(task.entry_id); + }, + .done => { + if (comptime Environment.ci_assert) { + const step = installer.store.entries.items(.step)[task.entry_id.get()].load(.monotonic); + bun.assertWithLocation(step == .done, @src()); + } + installer.onTaskComplete(task.entry_id, .success); + }, + } + } + } + var network_tasks_batch = manager.async_network_task_queue.popBatch(); var network_tasks_iter = network_tasks_batch.iterator(); while (network_tasks_iter.next()) |task| { if (comptime Environment.allow_assert) bun.assert(manager.pendingTaskCount() > 0); - _ = manager.decrementPendingTasks(); + manager.decrementPendingTasks(); // We cannot free the network task at the end of this scope. // It may continue to be referenced in a future task. @@ -256,7 +286,7 @@ pub fn runTasks( try manager.processDependencyList( dependency_list, - ExtractCompletionContext, + Ctx, extract_ctx, callbacks, install_peer, @@ -449,7 +479,7 @@ pub fn runTasks( while (resolve_tasks_iter.next()) |task| { if (comptime Environment.allow_assert) bun.assert(manager.pendingTaskCount() > 0); defer manager.preallocated_resolve_tasks.put(task); - _ = manager.decrementPendingTasks(); + manager.decrementPendingTasks(); if (task.log.msgs.items.len > 0) { try task.log.print(Output.errorWriter()); @@ -500,7 +530,7 @@ pub fn runTasks( const dependency_list = dependency_list_entry.value_ptr.*; dependency_list_entry.value_ptr.* = .{}; - try manager.processDependencyList(dependency_list, ExtractCompletionContext, extract_ctx, callbacks, install_peer); + try manager.processDependencyList(dependency_list, Ctx, extract_ctx, callbacks, install_peer); if (log_level.showProgress()) { if (!has_updated_this_run) { @@ -561,14 +591,26 @@ pub fn runTasks( manager.extracted_count += 1; bun.Analytics.Features.extracted_packages += 1; - if (comptime @TypeOf(callbacks.onExtract) != void and ExtractCompletionContext == *PackageInstaller) { - extract_ctx.fixCachedLockfilePackageSlices(); - callbacks.onExtract( - extract_ctx, - dependency_id, - &task.data.extract, - log_level, - ); + if (comptime @TypeOf(callbacks.onExtract) != void) { + switch (Ctx) { + *PackageInstaller => { + extract_ctx.fixCachedLockfilePackageSlices(); + callbacks.onExtract( + extract_ctx, + task.id, + dependency_id, + &task.data.extract, + log_level, + ); + }, + *Store.Installer => { + callbacks.onExtract( + extract_ctx, + task.id, + ); + }, + else => @compileError("unexpected context type"), + } } else if (manager.processExtractedTarballPackage(&package_id, dependency_id, resolution, &task.data.extract, log_level)) |pkg| handle_pkg: { // In the middle of an install, you could end up needing to downlaod the github tarball for a dependency // We need to make sure we resolve the dependencies first before calling the onExtract callback @@ -626,11 +668,6 @@ pub fn runTasks( manager.setPreinstallState(package_id, manager.lockfile, .done); - if (comptime @TypeOf(callbacks.onExtract) != void and ExtractCompletionContext != *PackageInstaller) { - // handled *PackageInstaller above - callbacks.onExtract(extract_ctx, dependency_id, &task.data.extract, log_level); - } - if (log_level.showProgress()) { if (!has_updated_this_run) { manager.setNodeName(manager.downloads_node.?, alias, ProgressStrings.extract_emoji, true); @@ -671,7 +708,7 @@ pub fn runTasks( continue; } - if (comptime @TypeOf(callbacks.onExtract) != void and ExtractCompletionContext == *PackageInstaller) { + if (comptime @TypeOf(callbacks.onExtract) != void and Ctx == *PackageInstaller) { // Installing! // this dependency might be something other than a git dependency! only need the name and // behavior, use the resolution from the task. @@ -712,7 +749,7 @@ pub fn runTasks( const dependency_list = dependency_list_entry.value_ptr.*; dependency_list_entry.value_ptr.* = .{}; - try manager.processDependencyList(dependency_list, ExtractCompletionContext, extract_ctx, callbacks, install_peer); + try manager.processDependencyList(dependency_list, Ctx, extract_ctx, callbacks, install_peer); } if (log_level.showProgress()) { @@ -745,20 +782,32 @@ pub fn runTasks( continue; } - if (comptime @TypeOf(callbacks.onExtract) != void and ExtractCompletionContext == *PackageInstaller) { + if (comptime @TypeOf(callbacks.onExtract) != void) { // We've populated the cache, package already exists in memory. Call the package installer callback // and don't enqueue dependencies + switch (Ctx) { + *PackageInstaller => { - // TODO(dylan-conway) most likely don't need to call this now that the package isn't appended, but - // keeping just in case for now - extract_ctx.fixCachedLockfilePackageSlices(); + // TODO(dylan-conway) most likely don't need to call this now that the package isn't appended, but + // keeping just in case for now + extract_ctx.fixCachedLockfilePackageSlices(); - callbacks.onExtract( - extract_ctx, - git_checkout.dependency_id, - &task.data.git_checkout, - log_level, - ); + callbacks.onExtract( + extract_ctx, + task.id, + git_checkout.dependency_id, + &task.data.git_checkout, + log_level, + ); + }, + *Store.Installer => { + callbacks.onExtract( + extract_ctx, + task.id, + ); + }, + else => @compileError("unexpected context type"), + } } else if (manager.processExtractedTarballPackage( &package_id, git_checkout.dependency_id, @@ -795,13 +844,8 @@ pub fn runTasks( } } - if (comptime @TypeOf(callbacks.onExtract) != void) { - callbacks.onExtract( - extract_ctx, - git_checkout.dependency_id, - &task.data.git_checkout, - log_level, - ); + if (@TypeOf(callbacks.onExtract) != void) { + @compileError("ctx should be void"); } } @@ -825,8 +869,8 @@ pub inline fn incrementPendingTasks(manager: *PackageManager, count: u32) u32 { return manager.pending_tasks.fetchAdd(count, .monotonic); } -pub inline fn decrementPendingTasks(manager: *PackageManager) u32 { - return manager.pending_tasks.fetchSub(1, .monotonic); +pub inline fn decrementPendingTasks(manager: *PackageManager) void { + _ = manager.pending_tasks.fetchSub(1, .monotonic); } pub fn flushNetworkQueue(this: *PackageManager) void { @@ -934,7 +978,7 @@ pub fn allocGitHubURL(this: *const PackageManager, repository: *const Repository ) catch unreachable; } -pub fn hasCreatedNetworkTask(this: *PackageManager, task_id: u64, is_required: bool) bool { +pub fn hasCreatedNetworkTask(this: *PackageManager, task_id: Task.Id, is_required: bool) bool { const gpe = this.network_dedupe_map.getOrPut(task_id) catch bun.outOfMemory(); // if there's an existing network task that is optional, we want to make it non-optional if this one would be required @@ -946,13 +990,13 @@ pub fn hasCreatedNetworkTask(this: *PackageManager, task_id: u64, is_required: b return gpe.found_existing; } -pub fn isNetworkTaskRequired(this: *const PackageManager, task_id: u64) bool { +pub fn isNetworkTaskRequired(this: *const PackageManager, task_id: Task.Id) bool { return (this.network_dedupe_map.get(task_id) orelse return true).is_required; } pub fn generateNetworkTaskForTarball( this: *PackageManager, - task_id: u64, + task_id: Task.Id, url: string, is_required: bool, dependency_id: DependencyID, @@ -1035,6 +1079,7 @@ const PackageID = bun.install.PackageID; const PackageManifestError = bun.install.PackageManifestError; const PatchTask = bun.install.PatchTask; const Repository = bun.install.Repository; +const Store = bun.install.Store; const Task = bun.install.Task; const invalid_package_id = bun.install.invalid_package_id; diff --git a/src/install/PackageManagerTask.zig b/src/install/PackageManagerTask.zig index 7f3ce838b1..c0d17a0d06 100644 --- a/src/install/PackageManagerTask.zig +++ b/src/install/PackageManagerTask.zig @@ -7,64 +7,66 @@ data: Data, status: Status = Status.waiting, threadpool_task: ThreadPool.Task = ThreadPool.Task{ .callback = &callback }, log: logger.Log, -id: u64, +id: Id, err: ?anyerror = null, package_manager: *PackageManager, apply_patch_task: ?*PatchTask = null, next: ?*Task = null, /// An ID that lets us register a callback without keeping the same pointer around -pub fn NewID(comptime Hasher: type, comptime IDType: type) type { - return struct { - pub const Type = IDType; - pub fn forNPMPackage(package_name: string, package_version: Semver.Version) IDType { - var hasher = Hasher.init(0); - hasher.update("npm-package:"); - hasher.update(package_name); - hasher.update("@"); - hasher.update(std.mem.asBytes(&package_version)); - return hasher.final(); - } +pub const Id = enum(u64) { + _, - pub fn forBinLink(package_id: PackageID) IDType { - var hasher = Hasher.init(0); - hasher.update("bin-link:"); - hasher.update(std.mem.asBytes(&package_id)); - return hasher.final(); - } + pub fn get(this: @This()) u64 { + return @intFromEnum(this); + } - pub fn forManifest(name: string) IDType { - var hasher = Hasher.init(0); - hasher.update("manifest:"); - hasher.update(name); - return hasher.final(); - } + pub fn forNPMPackage(package_name: string, package_version: Semver.Version) Id { + var hasher = bun.Wyhash11.init(0); + hasher.update("npm-package:"); + hasher.update(package_name); + hasher.update("@"); + hasher.update(std.mem.asBytes(&package_version)); + return @enumFromInt(hasher.final()); + } - pub fn forTarball(url: string) IDType { - var hasher = Hasher.init(0); - hasher.update("tarball:"); - hasher.update(url); - return hasher.final(); - } + pub fn forBinLink(package_id: PackageID) Id { + var hasher = bun.Wyhash11.init(0); + hasher.update("bin-link:"); + hasher.update(std.mem.asBytes(&package_id)); + return @enumFromInt(hasher.final()); + } - // These cannot change: - // We persist them to the filesystem. - pub fn forGitClone(url: string) IDType { - var hasher = Hasher.init(0); - hasher.update(url); - return @as(u64, 4 << 61) | @as(u64, @as(u61, @truncate(hasher.final()))); - } + pub fn forManifest(name: string) Id { + var hasher = bun.Wyhash11.init(0); + hasher.update("manifest:"); + hasher.update(name); + return @enumFromInt(hasher.final()); + } - pub fn forGitCheckout(url: string, resolved: string) IDType { - var hasher = Hasher.init(0); - hasher.update(url); - hasher.update("@"); - hasher.update(resolved); - return @as(u64, 5 << 61) | @as(u64, @as(u61, @truncate(hasher.final()))); - } - }; -} -pub const Id = NewID(bun.Wyhash11, u64); + pub fn forTarball(url: string) Id { + var hasher = bun.Wyhash11.init(0); + hasher.update("tarball:"); + hasher.update(url); + return @enumFromInt(hasher.final()); + } + + // These cannot change: + // We persist them to the filesystem. + pub fn forGitClone(url: string) Id { + var hasher = bun.Wyhash11.init(0); + hasher.update(url); + return @enumFromInt(@as(u64, 4 << 61) | @as(u64, @as(u61, @truncate(hasher.final())))); + } + + pub fn forGitCheckout(url: string, resolved: string) Id { + var hasher = bun.Wyhash11.init(0); + hasher.update(url); + hasher.update("@"); + hasher.update(resolved); + return @enumFromInt(@as(u64, 5 << 61) | @as(u64, @as(u61, @truncate(hasher.final())))); + } +}; pub fn callback(task: *ThreadPool.Task) void { Output.Source.configureThread(); diff --git a/src/install/bin.zig b/src/install/bin.zig index 34d8f4ccb0..225c2eb075 100644 --- a/src/install/bin.zig +++ b/src/install/bin.zig @@ -563,8 +563,7 @@ pub const Bin = extern struct { // linking each tree. seen: ?*bun.StringHashMap(void), - node_modules: bun.FileDescriptor, - node_modules_path: []const u8, + node_modules_path: *bun.AbsPath(.{}), /// Used for generating relative paths package_name: strings.StringOrTinyString, @@ -692,7 +691,11 @@ pub const Bin = extern struct { return; } - bun.makePath(this.node_modules.stdDir(), ".bin") catch {}; + const node_modules_path_save = this.node_modules_path.save(); + this.node_modules_path.append(".bin"); + bun.makePath(std.fs.cwd(), this.node_modules_path.slice()) catch {}; + node_modules_path_save.restore(); + break :bunx_file bun.sys.File.openatOSPath(bun.invalid_fd, abs_bunx_file, bun.O.WRONLY | bun.O.CREAT | bun.O.TRUNC, 0o664).unwrap() catch |real_err| { this.err = real_err; return; @@ -785,7 +788,11 @@ pub const Bin = extern struct { return; } - bun.makePath(this.node_modules.stdDir(), ".bin") catch {}; + const node_modules_path_save = this.node_modules_path.save(); + this.node_modules_path.append(".bin"); + bun.makePath(std.fs.cwd(), this.node_modules_path.slice()) catch {}; + node_modules_path_save.restore(); + switch (bun.sys.symlink(rel_target, abs_dest)) { .err => |real_error| { // It was just created, no need to delete destination and symlink again @@ -815,7 +822,7 @@ pub const Bin = extern struct { /// uses `this.abs_target_buf` pub fn buildTargetPackageDir(this: *const Linker) []const u8 { - const dest_dir_without_trailing_slash = strings.withoutTrailingSlash(this.node_modules_path); + const dest_dir_without_trailing_slash = strings.withoutTrailingSlash(this.node_modules_path.slice()); var remain = this.abs_target_buf; @@ -834,7 +841,7 @@ pub const Bin = extern struct { } pub fn buildDestinationDir(this: *const Linker, global: bool) []u8 { - const dest_dir_without_trailing_slash = strings.withoutTrailingSlash(this.node_modules_path); + const dest_dir_without_trailing_slash = strings.withoutTrailingSlash(this.node_modules_path.slice()); var remain = this.abs_dest_buf; if (global) { diff --git a/src/install/dependency.zig b/src/install/dependency.zig index 217160edb7..4ea40ac399 100644 --- a/src/install/dependency.zig +++ b/src/install/dependency.zig @@ -165,6 +165,12 @@ pub fn toExternal(this: Dependency) External { return bytes; } +// Needed when a dependency uses workspace: protocol and isn't +// marked with workspace behavior. +pub fn isWorkspaceDep(this: *const Dependency) bool { + return this.behavior.isWorkspace() or this.version.tag == .workspace; +} + pub inline fn isSCPLikePath(dependency: string) bool { // Shortest valid expression: h:p if (dependency.len < 3) return false; @@ -1399,6 +1405,14 @@ pub const Behavior = packed struct(u8) { return .eq; } + if (lhs.isWorkspaceOnly() != rhs.isWorkspaceOnly()) { + // ensure isWorkspaceOnly deps are placed at the beginning + return if (lhs.isWorkspaceOnly()) + .lt + else + .gt; + } + if (lhs.isProd() != rhs.isProd()) { return if (lhs.isProd()) .gt diff --git a/src/install/hoisted_install.zig b/src/install/hoisted_install.zig index 79cce4ef2e..8cca648f7c 100644 --- a/src/install/hoisted_install.zig +++ b/src/install/hoisted_install.zig @@ -16,32 +16,9 @@ const ProgressStrings = PackageManager.ProgressStrings; const Bin = install.Bin; const PackageInstaller = PackageManager.PackageInstaller; const Bitset = bun.bit_set.DynamicBitSetUnmanaged; -const TruncatedPackageNameHash = install.TruncatedPackageNameHash; const PackageID = install.PackageID; -const invalid_package_id = install.invalid_package_id; const TreeContext = PackageInstaller.TreeContext; -fn addDependenciesToSet( - names: *std.AutoArrayHashMapUnmanaged(TruncatedPackageNameHash, void), - lockfile: *Lockfile, - dependencies_slice: Lockfile.DependencySlice, -) void { - const begin = dependencies_slice.off; - const end = begin +| dependencies_slice.len; - var dep_id = begin; - while (dep_id < end) : (dep_id += 1) { - const package_id = lockfile.buffers.resolutions.items[dep_id]; - if (package_id == invalid_package_id) continue; - - const dep = lockfile.buffers.dependencies.items[dep_id]; - const entry = names.getOrPut(lockfile.allocator, @truncate(dep.name_hash)) catch bun.outOfMemory(); - if (!entry.found_existing) { - const dependency_slice = lockfile.packages.items(.dependencies)[package_id]; - addDependenciesToSet(names, lockfile, dependency_slice); - } - } -} - pub fn installHoistedPackages( this: *PackageManager, ctx: Command.Context, @@ -49,6 +26,8 @@ pub fn installHoistedPackages( install_root_dependencies: bool, log_level: PackageManager.Options.LogLevel, ) !PackageInstall.Summary { + bun.Analytics.Features.hoisted_bun_install += 1; + const original_trees = this.lockfile.buffers.trees; const original_tree_dep_ids = this.lockfile.buffers.hoisted_dependencies; @@ -182,35 +161,6 @@ pub fn installHoistedPackages( // to make mistakes harder var parts = this.lockfile.packages.slice(); - const trusted_dependencies_from_update_requests: std.AutoArrayHashMapUnmanaged(TruncatedPackageNameHash, void) = trusted_deps: { - - // find all deps originating from --trust packages from cli - var set: std.AutoArrayHashMapUnmanaged(TruncatedPackageNameHash, void) = .{}; - if (this.options.do.trust_dependencies_from_args and this.lockfile.packages.len > 0) { - const root_deps = parts.items(.dependencies)[this.root_package_id.get(this.lockfile, this.workspace_name_hash)]; - var dep_id = root_deps.off; - const end = dep_id +| root_deps.len; - while (dep_id < end) : (dep_id += 1) { - const root_dep = this.lockfile.buffers.dependencies.items[dep_id]; - for (this.update_requests) |request| { - if (request.matches(root_dep, this.lockfile.buffers.string_bytes.items)) { - const package_id = this.lockfile.buffers.resolutions.items[dep_id]; - if (package_id == invalid_package_id) continue; - - const entry = set.getOrPut(this.lockfile.allocator, @truncate(root_dep.name_hash)) catch bun.outOfMemory(); - if (!entry.found_existing) { - const dependency_slice = parts.items(.dependencies)[package_id]; - addDependenciesToSet(&set, this.lockfile, dependency_slice); - } - break; - } - } - } - } - - break :trusted_deps set; - }; - break :brk PackageInstaller{ .manager = this, .options = &this.options, @@ -258,7 +208,7 @@ pub fn installHoistedPackages( } break :trees trees; }, - .trusted_dependencies_from_update_requests = trusted_dependencies_from_update_requests, + .trusted_dependencies_from_update_requests = this.findTrustedDependenciesFromUpdateRequests(), .seen_bin_links = bun.StringHashMap(void).init(this.allocator), }; }; @@ -298,7 +248,6 @@ pub fn installHoistedPackages( &installer, .{ .onExtract = PackageInstaller.installEnqueuedPackagesAfterExtraction, - .onPatch = PackageInstaller.installEnqueuedPackagesImpl, .onResolve = {}, .onPackageManifestError = {}, .onPackageDownloadError = {}, @@ -321,7 +270,6 @@ pub fn installHoistedPackages( &installer, .{ .onExtract = PackageInstaller.installEnqueuedPackagesAfterExtraction, - .onPatch = PackageInstaller.installEnqueuedPackagesImpl, .onResolve = {}, .onPackageManifestError = {}, .onPackageDownloadError = {}, @@ -348,7 +296,6 @@ pub fn installHoistedPackages( closure.installer, .{ .onExtract = PackageInstaller.installEnqueuedPackagesAfterExtraction, - .onPatch = PackageInstaller.installEnqueuedPackagesImpl, .onResolve = {}, .onPackageManifestError = {}, .onPackageDownloadError = {}, diff --git a/src/install/install.zig b/src/install/install.zig index e71aae1ad4..32464ef26f 100644 --- a/src/install/install.zig +++ b/src/install/install.zig @@ -16,6 +16,31 @@ pub fn buntaghashbuf_make(buf: *BuntagHashBuf, patch_hash: u64) [:0]u8 { return bunhashtag; } +pub const StorePathFormatter = struct { + str: string, + + pub fn format(this: StorePathFormatter, comptime _: string, _: std.fmt.FormatOptions, writer: anytype) @TypeOf(writer).Error!void { + // if (!this.opts.replace_slashes) { + // try writer.writeAll(this.str); + // return; + // } + + for (this.str) |c| { + switch (c) { + '/' => try writer.writeByte('+'), + '\\' => try writer.writeByte('+'), + else => try writer.writeByte(c), + } + } + } +}; + +pub fn fmtStorePath(str: string) StorePathFormatter { + return .{ + .str = str, + }; +} + // these bytes are skipped // so we just make it repeat bun bun bun bun bun bun bun bun bun // because why not @@ -192,6 +217,7 @@ pub const DependencyInstallContext = struct { pub const TaskCallbackContext = union(enum) { dependency: DependencyID, dependency_install_context: DependencyInstallContext, + isolated_package_install_context: Store.Entry.Id, root_dependency: DependencyID, root_request_id: PackageID, }; @@ -227,6 +253,7 @@ pub const LifecycleScriptSubprocess = @import("./lifecycle_script_runner.zig").L pub const PackageInstall = @import("./PackageInstall.zig").PackageInstall; pub const Repository = @import("./repository.zig").Repository; pub const Resolution = @import("./resolution.zig").Resolution; +pub const Store = @import("./isolated_install/Store.zig").Store; pub const ArrayIdentityContext = @import("../identity_context.zig").ArrayIdentityContext; pub const IdentityContext = @import("../identity_context.zig").IdentityContext; diff --git a/src/install/isolated_install.zig b/src/install/isolated_install.zig new file mode 100644 index 0000000000..5052fa7a50 --- /dev/null +++ b/src/install/isolated_install.zig @@ -0,0 +1,985 @@ +const log = Output.scoped(.IsolatedInstall, false); + +pub fn installIsolatedPackages( + manager: *PackageManager, + command_ctx: Command.Context, + install_root_dependencies: bool, + workspace_filters: []const WorkspaceFilter, +) OOM!PackageInstall.Summary { + bun.Analytics.Features.isolated_bun_install += 1; + + const lockfile = manager.lockfile; + + const store: Store = store: { + var timer = std.time.Timer.start() catch unreachable; + const pkgs = lockfile.packages.slice(); + const pkg_dependency_slices = pkgs.items(.dependencies); + const pkg_resolutions = pkgs.items(.resolution); + const pkg_names = pkgs.items(.name); + + const resolutions = lockfile.buffers.resolutions.items; + const dependencies = lockfile.buffers.dependencies.items; + const string_buf = lockfile.buffers.string_bytes.items; + + var nodes: Store.Node.List = .empty; + + const QueuedNode = struct { + parent_id: Store.Node.Id, + dep_id: DependencyID, + pkg_id: PackageID, + }; + + var node_queue: std.fifo.LinearFifo(QueuedNode, .Dynamic) = .init(lockfile.allocator); + defer node_queue.deinit(); + + try node_queue.writeItem(.{ + .parent_id = .invalid, + .dep_id = invalid_dependency_id, + .pkg_id = 0, + }); + + var dep_ids_sort_buf: std.ArrayListUnmanaged(DependencyID) = .empty; + defer dep_ids_sort_buf.deinit(lockfile.allocator); + + // Used by leaves and linked dependencies. They can be deduplicated early + // because peers won't change them. + // + // In the pnpm repo without this map: 772,471 nodes + // and with this map: 314,022 nodes + var early_dedupe: std.AutoHashMapUnmanaged(PackageID, Store.Node.Id) = .empty; + defer early_dedupe.deinit(lockfile.allocator); + + var peer_dep_ids: std.ArrayListUnmanaged(DependencyID) = .empty; + defer peer_dep_ids.deinit(lockfile.allocator); + + var visited_parent_node_ids: std.ArrayListUnmanaged(Store.Node.Id) = .empty; + defer visited_parent_node_ids.deinit(lockfile.allocator); + + // First pass: create full dependency tree with resolved peers + next_node: while (node_queue.readItem()) |entry| { + { + // check for cycles + const nodes_slice = nodes.slice(); + const node_pkg_ids = nodes_slice.items(.pkg_id); + const node_parent_ids = nodes_slice.items(.parent_id); + const node_nodes = nodes_slice.items(.nodes); + + var curr_id = entry.parent_id; + while (curr_id != .invalid) { + if (node_pkg_ids[curr_id.get()] == entry.pkg_id) { + // skip the new node, and add the previously added node to parent so it appears in + // 'node_modules/.bun/parent@version/node_modules' + node_nodes[entry.parent_id.get()].appendAssumeCapacity(curr_id); + continue :next_node; + } + curr_id = node_parent_ids[curr_id.get()]; + } + } + + const node_id: Store.Node.Id = .from(@intCast(nodes.len)); + const pkg_deps = pkg_dependency_slices[entry.pkg_id]; + + var skip_dependencies_of_workspace_node = false; + if (entry.dep_id != invalid_dependency_id) { + const entry_dep = dependencies[entry.dep_id]; + if (pkg_deps.len == 0 or entry_dep.isWorkspaceDep()) dont_dedupe: { + const dedupe_entry = try early_dedupe.getOrPut(lockfile.allocator, entry.pkg_id); + if (dedupe_entry.found_existing) { + const dedupe_node_id = dedupe_entry.value_ptr.*; + + const nodes_slice = nodes.slice(); + const node_nodes = nodes_slice.items(.nodes); + const node_dep_ids = nodes_slice.items(.dep_id); + + const dedupe_dep_id = node_dep_ids[dedupe_node_id.get()]; + const dedupe_dep = dependencies[dedupe_dep_id]; + + if (dedupe_dep.name_hash != entry_dep.name_hash) { + break :dont_dedupe; + } + + if (dedupe_dep.isWorkspaceDep() and entry_dep.isWorkspaceDep()) { + if (dedupe_dep.behavior.isWorkspaceOnly() != entry_dep.behavior.isWorkspaceOnly()) { + // only attach the dependencies to one of the workspaces + skip_dependencies_of_workspace_node = true; + break :dont_dedupe; + } + } + + node_nodes[entry.parent_id.get()].appendAssumeCapacity(dedupe_node_id); + continue; + } + + dedupe_entry.value_ptr.* = node_id; + } + } + + try nodes.append(lockfile.allocator, .{ + .pkg_id = entry.pkg_id, + .dep_id = entry.dep_id, + .parent_id = entry.parent_id, + .nodes = if (skip_dependencies_of_workspace_node) .empty else try .initCapacity(lockfile.allocator, pkg_deps.len), + .dependencies = if (skip_dependencies_of_workspace_node) .empty else try .initCapacity(lockfile.allocator, pkg_deps.len), + }); + + const nodes_slice = nodes.slice(); + const node_parent_ids = nodes_slice.items(.parent_id); + const node_dependencies = nodes_slice.items(.dependencies); + const node_peers = nodes_slice.items(.peers); + const node_nodes = nodes_slice.items(.nodes); + + if (entry.parent_id.tryGet()) |parent_id| { + node_nodes[parent_id].appendAssumeCapacity(node_id); + } + + if (skip_dependencies_of_workspace_node) { + continue; + } + + dep_ids_sort_buf.clearRetainingCapacity(); + try dep_ids_sort_buf.ensureUnusedCapacity(lockfile.allocator, pkg_deps.len); + for (pkg_deps.begin()..pkg_deps.end()) |_dep_id| { + const dep_id: DependencyID = @intCast(_dep_id); + dep_ids_sort_buf.appendAssumeCapacity(dep_id); + } + + // TODO: make this sort in an order that allows peers to be resolved last + // and devDependency handling to match `hoistDependency` + std.sort.pdq( + DependencyID, + dep_ids_sort_buf.items, + Lockfile.DepSorter{ + .lockfile = lockfile, + }, + Lockfile.DepSorter.isLessThan, + ); + + peer_dep_ids.clearRetainingCapacity(); + for (dep_ids_sort_buf.items) |dep_id| { + if (Tree.isFilteredDependencyOrWorkspace( + dep_id, + entry.pkg_id, + workspace_filters, + install_root_dependencies, + manager, + lockfile, + )) { + continue; + } + + const pkg_id = resolutions[dep_id]; + const dep = dependencies[dep_id]; + + // TODO: handle duplicate dependencies. should be similar logic + // like we have for dev dependencies in `hoistDependency` + + if (!dep.behavior.isPeer()) { + // simple case: + // - add it as a dependency + // - queue it + node_dependencies[node_id.get()].appendAssumeCapacity(.{ .dep_id = dep_id, .pkg_id = pkg_id }); + try node_queue.writeItem(.{ + .parent_id = node_id, + .dep_id = dep_id, + .pkg_id = pkg_id, + }); + continue; + } + + try peer_dep_ids.append(lockfile.allocator, dep_id); + } + + next_peer: for (peer_dep_ids.items) |peer_dep_id| { + const resolved_pkg_id, const auto_installed = resolved_pkg_id: { + + // Go through the peers parents looking for a package with the same name. + // If none is found, use current best version. Parents visited must have + // the package id for the chosen peer marked as a transitive peer. Nodes + // are deduplicated only if their package id and their transitive peer package + // ids are equal. + const peer_dep = dependencies[peer_dep_id]; + + // TODO: double check this + // Start with the current package. A package + // can satisfy it's own peers. + var curr_id = node_id; + + visited_parent_node_ids.clearRetainingCapacity(); + while (curr_id != .invalid) { + for (node_dependencies[curr_id.get()].items) |ids| { + const dep = dependencies[ids.dep_id]; + + if (dep.name_hash != peer_dep.name_hash) { + continue; + } + + const res = pkg_resolutions[ids.pkg_id]; + + if (peer_dep.version.tag != .npm or res.tag != .npm) { + // TODO: print warning for this? we don't have a version + // to compare to say if this satisfies or not. + break :resolved_pkg_id .{ ids.pkg_id, false }; + } + + const peer_dep_version = peer_dep.version.value.npm.version; + const res_version = res.value.npm.version; + + if (!peer_dep_version.satisfies(res_version, string_buf, string_buf)) { + // TODO: add warning! + } + + break :resolved_pkg_id .{ ids.pkg_id, false }; + } + + const curr_peers = node_peers[curr_id.get()]; + for (curr_peers.list.items) |ids| { + const transitive_peer_dep = dependencies[ids.dep_id]; + + if (transitive_peer_dep.name_hash != peer_dep.name_hash) { + continue; + } + + // A transitive peer with the same name has already passed + // through this node + + if (!ids.auto_installed) { + // The resolution was found here or above. Choose the same + // peer resolution. No need to mark this node or above. + + // TODO: add warning if not satisfies()! + break :resolved_pkg_id .{ ids.pkg_id, false }; + } + + // It didn't find a matching name and auto installed + // from somewhere this peer can't reach. Choose best + // version. Only mark all parents if resolution is + // different from this transitive peer. + + if (peer_dep.behavior.isOptionalPeer()) { + // exclude it + continue :next_peer; + } + + const best_version = resolutions[peer_dep_id]; + + if (best_version == ids.pkg_id) { + break :resolved_pkg_id .{ ids.pkg_id, true }; + } + + // add the remaining parent ids + while (curr_id != .invalid) { + try visited_parent_node_ids.append(lockfile.allocator, curr_id); + curr_id = node_parent_ids[curr_id.get()]; + } + + break :resolved_pkg_id .{ best_version, true }; + } + + // TODO: prevent marking workspace and symlink deps with transitive peers + + // add to visited parents after searching for a peer resolution. + // if a node resolves a transitive peer, it can still be deduplicated + try visited_parent_node_ids.append(lockfile.allocator, curr_id); + curr_id = node_parent_ids[curr_id.get()]; + } + + if (peer_dep.behavior.isOptionalPeer()) { + // exclude it + continue; + } + + // choose the current best version + break :resolved_pkg_id .{ resolutions[peer_dep_id], true }; + }; + + bun.debugAssert(resolved_pkg_id != invalid_package_id); + + for (visited_parent_node_ids.items) |visited_parent_id| { + const ctx: Store.Node.TransitivePeer.OrderedArraySetCtx = .{ + .string_buf = string_buf, + .pkg_names = pkg_names, + }; + const peer: Store.Node.TransitivePeer = .{ + .dep_id = peer_dep_id, + .pkg_id = resolved_pkg_id, + .auto_installed = auto_installed, + }; + try node_peers[visited_parent_id.get()].insert(lockfile.allocator, peer, &ctx); + } + + if (visited_parent_node_ids.items.len != 0) { + // visited parents length == 0 means the node satisfied it's own + // peer. don't queue. + node_dependencies[node_id.get()].appendAssumeCapacity(.{ .dep_id = peer_dep_id, .pkg_id = resolved_pkg_id }); + try node_queue.writeItem(.{ + .parent_id = node_id, + .dep_id = peer_dep_id, + .pkg_id = resolved_pkg_id, + }); + } + } + } + + if (manager.options.log_level.isVerbose()) { + const full_tree_end = timer.read(); + timer.reset(); + Output.prettyErrorln("Resolved peers [{}]", .{bun.fmt.fmtDurationOneDecimal(full_tree_end)}); + } + + const DedupeInfo = struct { + entry_id: Store.Entry.Id, + dep_id: DependencyID, + peers: Store.OrderedArraySet(Store.Node.TransitivePeer, Store.Node.TransitivePeer.OrderedArraySetCtx), + }; + + var dedupe: std.AutoHashMapUnmanaged(PackageID, std.ArrayListUnmanaged(DedupeInfo)) = .empty; + defer dedupe.deinit(lockfile.allocator); + + var res_fmt_buf: std.ArrayList(u8) = .init(lockfile.allocator); + defer res_fmt_buf.deinit(); + + const nodes_slice = nodes.slice(); + const node_pkg_ids = nodes_slice.items(.pkg_id); + const node_dep_ids = nodes_slice.items(.dep_id); + const node_peers: []const Store.Node.Peers = nodes_slice.items(.peers); + const node_nodes = nodes_slice.items(.nodes); + + var store: Store.Entry.List = .empty; + + const QueuedEntry = struct { + node_id: Store.Node.Id, + entry_parent_id: Store.Entry.Id, + }; + var entry_queue: std.fifo.LinearFifo(QueuedEntry, .Dynamic) = .init(lockfile.allocator); + defer entry_queue.deinit(); + + try entry_queue.writeItem(.{ + .node_id = .from(0), + .entry_parent_id = .invalid, + }); + + // Second pass: Deduplicate nodes when the pkg_id and peer set match an existing entry. + next_entry: while (entry_queue.readItem()) |entry| { + const pkg_id = node_pkg_ids[entry.node_id.get()]; + + const dedupe_entry = try dedupe.getOrPut(lockfile.allocator, pkg_id); + if (!dedupe_entry.found_existing) { + dedupe_entry.value_ptr.* = .{}; + } else { + const curr_peers = node_peers[entry.node_id.get()]; + const curr_dep_id = node_dep_ids[entry.node_id.get()]; + + for (dedupe_entry.value_ptr.items) |info| { + if (info.dep_id != invalid_dependency_id and curr_dep_id != invalid_dependency_id) { + const curr_dep = dependencies[curr_dep_id]; + const existing_dep = dependencies[info.dep_id]; + + if (existing_dep.isWorkspaceDep() and curr_dep.isWorkspaceDep()) { + if (existing_dep.behavior.isWorkspaceOnly() != curr_dep.behavior.isWorkspaceOnly()) { + continue; + } + } + } + + const eql_ctx: Store.Node.TransitivePeer.OrderedArraySetCtx = .{ + .string_buf = string_buf, + .pkg_names = pkg_names, + }; + + if (info.peers.eql(&curr_peers, &eql_ctx)) { + // dedupe! depend on the already created entry + + const entries = store.slice(); + const entry_dependencies = entries.items(.dependencies); + const entry_parents = entries.items(.parents); + + var parents = &entry_parents[info.entry_id.get()]; + + if (curr_dep_id != invalid_dependency_id and dependencies[curr_dep_id].behavior.isWorkspaceOnly()) { + try parents.append(lockfile.allocator, entry.entry_parent_id); + continue :next_entry; + } + const ctx: Store.Entry.DependenciesOrderedArraySetCtx = .{ + .string_buf = string_buf, + .dependencies = dependencies, + }; + try entry_dependencies[entry.entry_parent_id.get()].insert( + lockfile.allocator, + .{ .entry_id = info.entry_id, .dep_id = curr_dep_id }, + &ctx, + ); + try parents.append(lockfile.allocator, entry.entry_parent_id); + continue :next_entry; + } + } + + // nothing matched - create a new entry + } + + const new_entry_peer_hash: Store.Entry.PeerHash = peer_hash: { + const peers = node_peers[entry.node_id.get()]; + if (peers.len() == 0) { + break :peer_hash .none; + } + var hasher = bun.Wyhash11.init(0); + for (peers.slice()) |peer_ids| { + const pkg_name = pkg_names[peer_ids.pkg_id]; + hasher.update(pkg_name.slice(string_buf)); + const pkg_res = pkg_resolutions[peer_ids.pkg_id]; + res_fmt_buf.clearRetainingCapacity(); + try res_fmt_buf.writer().print("{}", .{pkg_res.fmt(string_buf, .posix)}); + hasher.update(res_fmt_buf.items); + } + break :peer_hash .from(hasher.final()); + }; + + const new_entry_dep_id = node_dep_ids[entry.node_id.get()]; + + const new_entry_is_root = new_entry_dep_id == invalid_dependency_id; + const new_entry_is_workspace = !new_entry_is_root and dependencies[new_entry_dep_id].isWorkspaceDep(); + + const new_entry_dependencies: Store.Entry.Dependencies = if (dedupe_entry.found_existing and new_entry_is_workspace) + .empty + else + try .initCapacity(lockfile.allocator, node_nodes[entry.node_id.get()].items.len); + + var new_entry_parents: std.ArrayListUnmanaged(Store.Entry.Id) = try .initCapacity(lockfile.allocator, 1); + new_entry_parents.appendAssumeCapacity(entry.entry_parent_id); + + const new_entry: Store.Entry = .{ + .node_id = entry.node_id, + .dependencies = new_entry_dependencies, + .parents = new_entry_parents, + .peer_hash = new_entry_peer_hash, + }; + + const new_entry_id: Store.Entry.Id = .from(@intCast(store.len)); + try store.append(lockfile.allocator, new_entry); + + if (entry.entry_parent_id.tryGet()) |entry_parent_id| skip_adding_dependency: { + if (new_entry_dep_id != invalid_dependency_id and dependencies[new_entry_dep_id].behavior.isWorkspaceOnly()) { + // skip implicit workspace dependencies on the root. + break :skip_adding_dependency; + } + + const entries = store.slice(); + const entry_dependencies = entries.items(.dependencies); + const ctx: Store.Entry.DependenciesOrderedArraySetCtx = .{ + .string_buf = string_buf, + .dependencies = dependencies, + }; + try entry_dependencies[entry_parent_id].insert( + lockfile.allocator, + .{ .entry_id = new_entry_id, .dep_id = new_entry_dep_id }, + &ctx, + ); + } + + try dedupe_entry.value_ptr.append(lockfile.allocator, .{ + .entry_id = new_entry_id, + .dep_id = new_entry_dep_id, + .peers = node_peers[entry.node_id.get()], + }); + + for (node_nodes[entry.node_id.get()].items) |node_id| { + try entry_queue.writeItem(.{ + .node_id = node_id, + .entry_parent_id = new_entry_id, + }); + } + } + + if (manager.options.log_level.isVerbose()) { + const dedupe_end = timer.read(); + Output.prettyErrorln("Created store [{}]", .{bun.fmt.fmtDurationOneDecimal(dedupe_end)}); + } + + break :store .{ + .entries = store, + .nodes = nodes, + }; + }; + + const cwd = FD.cwd(); + + const root_node_modules_dir, const is_new_root_node_modules, const bun_modules_dir, const is_new_bun_modules = root_dirs: { + const node_modules_path = bun.OSPathLiteral("node_modules"); + const bun_modules_path = bun.OSPathLiteral("node_modules/" ++ Store.modules_dir_name); + const existing_root_node_modules_dir = sys.openatOSPath(cwd, node_modules_path, bun.O.DIRECTORY | bun.O.RDONLY, 0o755).unwrap() catch { + sys.mkdirat(cwd, node_modules_path, 0o755).unwrap() catch |err| { + Output.err(err, "failed to create the './node_modules' directory", .{}); + Global.exit(1); + }; + + sys.mkdirat(cwd, bun_modules_path, 0o755).unwrap() catch |err| { + Output.err(err, "failed to create the './node_modules/.bun' directory", .{}); + Global.exit(1); + }; + + const new_root_node_modules_dir = sys.openatOSPath(cwd, node_modules_path, bun.O.DIRECTORY | bun.O.RDONLY, 0o755).unwrap() catch |err| { + Output.err(err, "failed to open the './node_modules' directory", .{}); + Global.exit(1); + }; + + const new_bun_modules_dir = sys.openatOSPath(cwd, bun_modules_path, bun.O.DIRECTORY | bun.O.RDONLY, 0o755).unwrap() catch |err| { + Output.err(err, "failed to open the './node_modules/.bun' directory", .{}); + Global.exit(1); + }; + + break :root_dirs .{ + new_root_node_modules_dir, + true, + new_bun_modules_dir, + true, + }; + }; + + const existing_bun_modules_dir = sys.openatOSPath(cwd, bun_modules_path, bun.O.DIRECTORY | bun.O.RDONLY, 0o755).unwrap() catch { + sys.mkdirat(cwd, bun_modules_path, 0o755).unwrap() catch |err| { + Output.err(err, "failed to create the './node_modules/.bun' directory", .{}); + Global.exit(1); + }; + + const new_bun_modules_dir = sys.openatOSPath(cwd, bun_modules_path, bun.O.DIRECTORY | bun.O.RDONLY, 0o755).unwrap() catch |err| { + Output.err(err, "failed to open the './node_modules/.bun' directory", .{}); + Global.exit(1); + }; + + break :root_dirs .{ + existing_root_node_modules_dir, + false, + new_bun_modules_dir, + true, + }; + }; + + break :root_dirs .{ + existing_root_node_modules_dir, + false, + existing_bun_modules_dir, + false, + }; + }; + _ = root_node_modules_dir; + _ = is_new_root_node_modules; + _ = bun_modules_dir; + // _ = is_new_bun_modules; + + { + var root_node: *Progress.Node = undefined; + // var download_node: Progress.Node = undefined; + var install_node: Progress.Node = undefined; + var scripts_node: Progress.Node = undefined; + var progress = &manager.progress; + + if (manager.options.log_level.showProgress()) { + progress.supports_ansi_escape_codes = Output.enable_ansi_colors_stderr; + root_node = progress.start("", 0); + // download_node = root_node.start(ProgressStrings.download(), 0); + install_node = root_node.start(ProgressStrings.install(), store.entries.len); + scripts_node = root_node.start(ProgressStrings.script(), 0); + + manager.downloads_node = null; + manager.scripts_node = &scripts_node; + } + + const nodes_slice = store.nodes.slice(); + const node_pkg_ids = nodes_slice.items(.pkg_id); + const node_dep_ids = nodes_slice.items(.dep_id); + + const entries = store.entries.slice(); + const entry_node_ids = entries.items(.node_id); + const entry_steps = entries.items(.step); + const entry_dependencies = entries.items(.dependencies); + + const string_buf = lockfile.buffers.string_bytes.items; + + const pkgs = lockfile.packages.slice(); + const pkg_names = pkgs.items(.name); + const pkg_name_hashes = pkgs.items(.name_hash); + const pkg_resolutions = pkgs.items(.resolution); + + var seen_entry_ids: std.AutoHashMapUnmanaged(Store.Entry.Id, void) = .empty; + defer seen_entry_ids.deinit(lockfile.allocator); + try seen_entry_ids.ensureTotalCapacity(lockfile.allocator, @intCast(store.entries.len)); + + // TODO: delete + var seen_workspace_ids: std.AutoHashMapUnmanaged(PackageID, void) = .empty; + defer seen_workspace_ids.deinit(lockfile.allocator); + + var installer: Store.Installer = .{ + .lockfile = lockfile, + .manager = manager, + .command_ctx = command_ctx, + .installed = try .initEmpty(manager.allocator, lockfile.packages.len), + .install_node = if (manager.options.log_level.showProgress()) &install_node else null, + .scripts_node = if (manager.options.log_level.showProgress()) &scripts_node else null, + .store = &store, + .preallocated_tasks = .init(bun.default_allocator), + .trusted_dependencies_mutex = .{}, + .trusted_dependencies_from_update_requests = manager.findTrustedDependenciesFromUpdateRequests(), + }; + + // add the pending task count upfront + _ = manager.incrementPendingTasks(@intCast(store.entries.len)); + + for (0..store.entries.len) |_entry_id| { + const entry_id: Store.Entry.Id = .from(@intCast(_entry_id)); + + const node_id = entry_node_ids[entry_id.get()]; + const pkg_id = node_pkg_ids[node_id.get()]; + + const pkg_name = pkg_names[pkg_id]; + const pkg_name_hash = pkg_name_hashes[pkg_id]; + const pkg_res: Resolution = pkg_resolutions[pkg_id]; + + switch (pkg_res.tag) { + else => { + // this is `uninitialized` or `single_file_module`. + bun.debugAssert(false); + entry_steps[entry_id.get()].store(.done, .monotonic); + installer.onTaskComplete(entry_id, .skipped); + continue; + }, + .root => { + if (entry_id == .root) { + entry_steps[entry_id.get()].store(.symlink_dependencies, .monotonic); + installer.startTask(entry_id); + continue; + } + entry_steps[entry_id.get()].store(.done, .monotonic); + installer.onTaskComplete(entry_id, .skipped); + continue; + }, + .workspace => { + // if injected=true this might be false + if (!(try seen_workspace_ids.getOrPut(lockfile.allocator, pkg_id)).found_existing) { + entry_steps[entry_id.get()].store(.symlink_dependencies, .monotonic); + installer.startTask(entry_id); + continue; + } + entry_steps[entry_id.get()].store(.done, .monotonic); + installer.onTaskComplete(entry_id, .success); + continue; + }, + .symlink => { + // no installation required, will only need to be linked to packages that depend on it. + bun.debugAssert(entry_dependencies[entry_id.get()].list.items.len == 0); + entry_steps[entry_id.get()].store(.done, .monotonic); + installer.onTaskComplete(entry_id, .skipped); + continue; + }, + .folder => { + // folders are always hardlinked to keep them up-to-date + installer.startTask(entry_id); + continue; + }, + + inline .npm, + .git, + .github, + .local_tarball, + .remote_tarball, + => |pkg_res_tag| { + const patch_info = try installer.packagePatchInfo(pkg_name, pkg_name_hash, &pkg_res); + + const needs_install = + manager.options.enable.force_install or + is_new_bun_modules or + patch_info == .remove or + needs_install: { + var store_path: bun.AbsPath(.{}) = .initTopLevelDir(); + defer store_path.deinit(); + installer.appendStorePath(&store_path, entry_id); + const exists = sys.existsZ(store_path.sliceZ()); + + break :needs_install switch (patch_info) { + .none => !exists, + // checked above + .remove => unreachable, + .patch => |patch| { + var hash_buf: install.BuntagHashBuf = undefined; + const hash = install.buntaghashbuf_make(&hash_buf, patch.contents_hash); + var patch_tag_path: bun.AbsPath(.{}) = .initTopLevelDir(); + defer patch_tag_path.deinit(); + installer.appendStorePath(&patch_tag_path, entry_id); + patch_tag_path.append(hash); + break :needs_install !sys.existsZ(patch_tag_path.sliceZ()); + }, + }; + }; + + if (!needs_install) { + entry_steps[entry_id.get()].store(.done, .monotonic); + installer.onTaskComplete(entry_id, .skipped); + continue; + } + + var pkg_cache_dir_subpath: bun.RelPath(.{ .sep = .auto }) = .from(switch (pkg_res_tag) { + .npm => manager.cachedNPMPackageFolderName(pkg_name.slice(string_buf), pkg_res.value.npm.version, patch_info.contentsHash()), + .git => manager.cachedGitFolderName(&pkg_res.value.git, patch_info.contentsHash()), + .github => manager.cachedGitHubFolderName(&pkg_res.value.github, patch_info.contentsHash()), + .local_tarball => manager.cachedTarballFolderName(pkg_res.value.local_tarball, patch_info.contentsHash()), + .remote_tarball => manager.cachedTarballFolderName(pkg_res.value.remote_tarball, patch_info.contentsHash()), + + else => comptime unreachable, + }); + defer pkg_cache_dir_subpath.deinit(); + + const cache_dir, const cache_dir_path = manager.getCacheDirectoryAndAbsPath(); + defer cache_dir_path.deinit(); + + const missing_from_cache = switch (manager.getPreinstallState(pkg_id)) { + .done => false, + else => missing_from_cache: { + if (patch_info == .none) { + const exists = switch (pkg_res_tag) { + .npm => exists: { + var cache_dir_path_save = pkg_cache_dir_subpath.save(); + defer cache_dir_path_save.restore(); + pkg_cache_dir_subpath.append("package.json"); + break :exists sys.existsAt(cache_dir, pkg_cache_dir_subpath.sliceZ()); + }, + else => sys.directoryExistsAt(cache_dir, pkg_cache_dir_subpath.sliceZ()).unwrapOr(false), + }; + if (exists) { + manager.setPreinstallState(pkg_id, installer.lockfile, .done); + } + break :missing_from_cache !exists; + } + + // TODO: why does this look like it will never work? + break :missing_from_cache true; + }, + }; + + if (!missing_from_cache) { + installer.startTask(entry_id); + continue; + } + + const ctx: install.TaskCallbackContext = .{ + .isolated_package_install_context = entry_id, + }; + + const dep_id = node_dep_ids[node_id.get()]; + const dep = lockfile.buffers.dependencies.items[dep_id]; + switch (pkg_res_tag) { + .npm => { + manager.enqueuePackageForDownload( + pkg_name.slice(string_buf), + dep_id, + pkg_id, + pkg_res.value.npm.version, + pkg_res.value.npm.url.slice(string_buf), + ctx, + patch_info.nameAndVersionHash(), + ) catch |err| switch (err) { + error.OutOfMemory => |oom| return oom, + error.InvalidURL => { + Output.err(err, "failed to enqueue package for download: {s}@{}", .{ + pkg_name.slice(string_buf), + pkg_res.fmt(string_buf, .auto), + }); + Output.flush(); + if (manager.options.enable.fail_early) { + Global.exit(1); + } + entry_steps[entry_id.get()].store(.done, .monotonic); + installer.onTaskComplete(entry_id, .fail); + continue; + }, + }; + }, + .git => { + manager.enqueueGitForCheckout( + dep_id, + dep.name.slice(string_buf), + &pkg_res, + ctx, + patch_info.nameAndVersionHash(), + ); + }, + .github => { + const url = manager.allocGitHubURL(&pkg_res.value.git); + defer manager.allocator.free(url); + manager.enqueueTarballForDownload( + dep_id, + pkg_id, + url, + ctx, + patch_info.nameAndVersionHash(), + ) catch |err| switch (err) { + error.OutOfMemory => bun.outOfMemory(), + error.InvalidURL => { + Output.err(err, "failed to enqueue github package for download: {s}@{}", .{ + pkg_name.slice(string_buf), + pkg_res.fmt(string_buf, .auto), + }); + Output.flush(); + if (manager.options.enable.fail_early) { + Global.exit(1); + } + entry_steps[entry_id.get()].store(.done, .monotonic); + installer.onTaskComplete(entry_id, .fail); + continue; + }, + }; + }, + .local_tarball => { + manager.enqueueTarballForReading( + dep_id, + dep.name.slice(string_buf), + &pkg_res, + ctx, + ); + }, + .remote_tarball => { + manager.enqueueTarballForDownload( + dep_id, + pkg_id, + pkg_res.value.remote_tarball.slice(string_buf), + ctx, + patch_info.nameAndVersionHash(), + ) catch |err| switch (err) { + error.OutOfMemory => bun.outOfMemory(), + error.InvalidURL => { + Output.err(err, "failed to enqueue tarball for download: {s}@{}", .{ + pkg_name.slice(string_buf), + pkg_res.fmt(string_buf, .auto), + }); + Output.flush(); + if (manager.options.enable.fail_early) { + Global.exit(1); + } + entry_steps[entry_id.get()].store(.done, .monotonic); + installer.onTaskComplete(entry_id, .fail); + continue; + }, + }; + }, + else => comptime unreachable, + } + }, + } + } + + if (manager.pendingTaskCount() > 0) { + const Wait = struct { + installer: *Store.Installer, + manager: *PackageManager, + err: ?anyerror = null, + + pub fn isDone(wait: *@This()) bool { + wait.manager.runTasks( + *Store.Installer, + wait.installer, + .{ + .onExtract = Store.Installer.onPackageExtracted, + .onResolve = {}, + .onPackageManifestError = {}, + .onPackageDownloadError = {}, + }, + true, + wait.manager.options.log_level, + ) catch |err| { + wait.err = err; + return true; + }; + + return wait.manager.pendingTaskCount() == 0; + } + }; + + var wait: Wait = .{ + .manager = manager, + .installer = &installer, + }; + + manager.sleepUntil(&wait, &Wait.isDone); + + if (wait.err) |err| { + Output.err(err, "failed to install packages", .{}); + Global.exit(1); + } + } + + if (manager.options.log_level.showProgress()) { + progress.root.end(); + progress.* = .{}; + } + + if (comptime Environment.ci_assert) { + var done = true; + next_entry: for (store.entries.items(.step), 0..) |entry_step, _entry_id| { + const entry_id: Store.Entry.Id = .from(@intCast(_entry_id)); + const step = entry_step.load(.monotonic); + + if (step == .done) { + continue; + } + + done = false; + + log("entry not done: {d}, {s}\n", .{ entry_id, @tagName(step) }); + + const deps = store.entries.items(.dependencies)[entry_id.get()]; + for (deps.slice()) |dep| { + const dep_step = entry_steps[dep.entry_id.get()].load(.monotonic); + if (dep_step != .done) { + log(", parents:\n - ", .{}); + const parent_ids = Store.Entry.debugGatherAllParents(entry_id, installer.store); + for (parent_ids) |parent_id| { + if (parent_id == .root) { + log("root ", .{}); + } else { + log("{d} ", .{parent_id.get()}); + } + } + + log("\n", .{}); + continue :next_entry; + } + } + + log(" and is able to run\n", .{}); + } + + bun.debugAssert(done); + } + + installer.summary.successfully_installed = installer.installed; + + return installer.summary; + } +} + +// @sortImports + +const std = @import("std"); + +const bun = @import("bun"); +const Environment = bun.Environment; +const FD = bun.FD; +const Global = bun.Global; +const OOM = bun.OOM; +const Output = bun.Output; +const Progress = bun.Progress; +const sys = bun.sys; +const Command = bun.CLI.Command; + +const install = bun.install; +const DependencyID = install.DependencyID; +const PackageID = install.PackageID; +const PackageInstall = install.PackageInstall; +const Resolution = install.Resolution; +const Store = install.Store; +const invalid_dependency_id = install.invalid_dependency_id; +const invalid_package_id = install.invalid_package_id; + +const Lockfile = install.Lockfile; +const Tree = Lockfile.Tree; + +const PackageManager = install.PackageManager; +const ProgressStrings = PackageManager.ProgressStrings; +const WorkspaceFilter = PackageManager.WorkspaceFilter; diff --git a/src/install/isolated_install/Hardlinker.zig b/src/install/isolated_install/Hardlinker.zig new file mode 100644 index 0000000000..f3b2a968f9 --- /dev/null +++ b/src/install/isolated_install/Hardlinker.zig @@ -0,0 +1,128 @@ +pub const Hardlinker = struct { + src_dir: FD, + src: bun.AbsPath(.{ .sep = .auto, .unit = .os }), + dest: bun.RelPath(.{ .sep = .auto, .unit = .os }), + + pub fn link(this: *Hardlinker, skip_dirnames: []const bun.OSPathSlice) OOM!sys.Maybe(void) { + var walker: Walker = try .walk( + this.src_dir, + bun.default_allocator, + &.{}, + skip_dirnames, + ); + defer walker.deinit(); + + if (comptime Environment.isWindows) { + while (switch (walker.next()) { + .result => |res| res, + .err => |err| return .initErr(err), + }) |entry| { + var src_save = this.src.save(); + defer src_save.restore(); + + this.src.append(entry.path); + + var dest_save = this.dest.save(); + defer dest_save.restore(); + + this.dest.append(entry.path); + + switch (entry.kind) { + .directory => { + FD.cwd().makePath(u16, this.dest.sliceZ()) catch {}; + }, + .file => { + switch (sys.link(u16, this.src.sliceZ(), this.dest.sliceZ())) { + .result => {}, + .err => |link_err1| switch (link_err1.getErrno()) { + .UV_EEXIST, + .EXIST, + => { + _ = sys.unlinkW(this.dest.sliceZ()); + switch (sys.link(u16, this.src.sliceZ(), this.dest.sliceZ())) { + .result => {}, + .err => |link_err2| return .initErr(link_err2), + } + }, + .UV_ENOENT, + .NOENT, + => { + const dest_parent = this.dest.dirname() orelse { + return .initErr(link_err1); + }; + + FD.cwd().makePath(u16, dest_parent) catch {}; + switch (sys.link(u16, this.src.sliceZ(), this.dest.sliceZ())) { + .result => {}, + .err => |link_err2| return .initErr(link_err2), + } + }, + else => return .initErr(link_err1), + }, + } + }, + else => {}, + } + } + + return .success; + } + + while (switch (walker.next()) { + .result => |res| res, + .err => |err| return .initErr(err), + }) |entry| { + var dest_save = this.dest.save(); + defer dest_save.restore(); + + this.dest.append(entry.path); + + switch (entry.kind) { + .directory => { + FD.cwd().makePath(u8, this.dest.sliceZ()) catch {}; + }, + .file => { + switch (sys.linkatZ(entry.dir, entry.basename, FD.cwd(), this.dest.sliceZ())) { + .result => {}, + .err => |link_err1| { + switch (link_err1.getErrno()) { + .EXIST => { + FD.cwd().deleteTree(this.dest.slice()) catch {}; + switch (sys.linkatZ(entry.dir, entry.basename, FD.cwd(), this.dest.sliceZ())) { + .result => {}, + .err => |link_err2| return .initErr(link_err2), + } + }, + .NOENT => { + const dest_parent = this.dest.dirname() orelse { + return .initErr(link_err1); + }; + + FD.cwd().makePath(u8, dest_parent) catch {}; + switch (sys.linkatZ(entry.dir, entry.basename, FD.cwd(), this.dest.sliceZ())) { + .result => {}, + .err => |link_err2| return .initErr(link_err2), + } + }, + else => return .initErr(link_err1), + } + }, + } + }, + else => {}, + } + } + + return .success; + } +}; + +// @sortImports + +const Walker = @import("../../walker_skippable.zig"); + +const bun = @import("bun"); +const Environment = bun.Environment; +const FD = bun.FD; +const OOM = bun.OOM; +const sys = bun.sys; diff --git a/src/install/isolated_install/Installer.zig b/src/install/isolated_install/Installer.zig new file mode 100644 index 0000000000..2209558e77 --- /dev/null +++ b/src/install/isolated_install/Installer.zig @@ -0,0 +1,1139 @@ +pub const Installer = struct { + trusted_dependencies_mutex: bun.Mutex, + // this is not const for `lockfile.trusted_dependencies` + lockfile: *Lockfile, + + summary: PackageInstall.Summary = .{ .successfully_installed = .empty }, + installed: Bitset, + install_node: ?*Progress.Node, + scripts_node: ?*Progress.Node, + + manager: *PackageManager, + command_ctx: Command.Context, + + store: *const Store, + + tasks: bun.UnboundedQueue(Task, .next) = .{}, + preallocated_tasks: Task.Preallocated, + + trusted_dependencies_from_update_requests: std.AutoArrayHashMapUnmanaged(TruncatedPackageNameHash, void), + + pub fn deinit(this: *const Installer) void { + this.trusted_dependencies_from_update_requests.deinit(this.lockfile.allocator); + } + + pub fn startTask(this: *Installer, entry_id: Store.Entry.Id) void { + const task = this.preallocated_tasks.get(); + + task.* = .{ + .entry_id = entry_id, + .installer = this, + }; + + this.manager.thread_pool.schedule(.from(&task.task)); + } + + pub fn onPackageExtracted(this: *Installer, task_id: install.Task.Id) void { + if (this.manager.task_queue.fetchRemove(task_id)) |removed| { + for (removed.value.items) |install_ctx| { + const entry_id = install_ctx.isolated_package_install_context; + this.startTask(entry_id); + } + } + } + + pub fn onTaskFail(this: *Installer, entry_id: Store.Entry.Id, err: Task.Error) void { + const string_buf = this.lockfile.buffers.string_bytes.items; + + const entries = this.store.entries.slice(); + const entry_node_ids = entries.items(.node_id); + + const nodes = this.store.nodes.slice(); + const node_pkg_ids = nodes.items(.pkg_id); + + const pkgs = this.lockfile.packages.slice(); + const pkg_names = pkgs.items(.name); + const pkg_resolutions = pkgs.items(.resolution); + + const node_id = entry_node_ids[entry_id.get()]; + const pkg_id = node_pkg_ids[node_id.get()]; + + const pkg_name = pkg_names[pkg_id]; + const pkg_res = pkg_resolutions[pkg_id]; + + switch (err) { + .link_package => |link_err| { + Output.err(link_err, "failed to link package: {s}@{}", .{ + pkg_name.slice(string_buf), + pkg_res.fmt(string_buf, .auto), + }); + }, + .symlink_dependencies => |symlink_err| { + Output.err(symlink_err, "failed to symlink dependencies for package: {s}@{}", .{ + pkg_name.slice(string_buf), + pkg_res.fmt(string_buf, .auto), + }); + }, + else => {}, + } + Output.flush(); + + // attempt deleting the package so the next install will install it again + switch (pkg_res.tag) { + .uninitialized, + .single_file_module, + .root, + .workspace, + .symlink, + => {}, + + _ => {}, + + // to be safe make sure we only delete packages in the store + .npm, + .git, + .github, + .local_tarball, + .remote_tarball, + .folder, + => { + var store_path: bun.RelPath(.{ .sep = .auto }) = .init(); + defer store_path.deinit(); + + store_path.appendFmt("node_modules/{}", .{ + Store.Entry.fmtStorePath(entry_id, this.store, this.lockfile), + }); + + _ = sys.unlink(store_path.sliceZ()); + }, + } + + if (this.manager.options.enable.fail_early) { + Global.exit(1); + } + + this.summary.fail += 1; + + this.decrementPendingTasks(entry_id); + this.resumeUnblockedTasks(); + } + + pub fn decrementPendingTasks(this: *Installer, entry_id: Store.Entry.Id) void { + _ = entry_id; + this.manager.decrementPendingTasks(); + } + + pub fn onTaskBlocked(this: *Installer, entry_id: Store.Entry.Id) void { + + // race: task decides it is blocked because one of it's dependencies has not finished. + // before the task can mark itself as blocked, the dependency finishes it's install, + // causing the task to never finish because resumeUnblockedTasks is called before + // it's state is set to blocked. + // + // fix: check if the task is unblocked after the task returns blocked, and only set/unset + // blocked from the main thread. + + var parent_dedupe: std.AutoArrayHashMap(Store.Entry.Id, void) = .init(bun.default_allocator); + defer parent_dedupe.deinit(); + + if (this.isTaskUnblocked(entry_id, &parent_dedupe)) { + this.store.entries.items(.step)[entry_id.get()].store(.symlink_dependency_binaries, .monotonic); + this.startTask(entry_id); + return; + } + + this.store.entries.items(.step)[entry_id.get()].store(.blocked, .monotonic); + } + + fn isTaskUnblocked(this: *Installer, entry_id: Store.Entry.Id, parent_dedupe: *std.AutoArrayHashMap(Store.Entry.Id, void)) bool { + const entries = this.store.entries.slice(); + const entry_deps = entries.items(.dependencies); + const entry_steps = entries.items(.step); + + const deps = entry_deps[entry_id.get()]; + for (deps.slice()) |dep| { + if (entry_steps[dep.entry_id.get()].load(.monotonic) != .done) { + parent_dedupe.clearRetainingCapacity(); + if (this.store.isCycle(entry_id, dep.entry_id, parent_dedupe)) { + continue; + } + + return false; + } + } + + return true; + } + + pub fn onTaskComplete(this: *Installer, entry_id: Store.Entry.Id, state: enum { success, skipped, fail }) void { + if (comptime Environment.ci_assert) { + bun.assertWithLocation(this.store.entries.items(.step)[entry_id.get()].load(.monotonic) == .done, @src()); + } + + this.decrementPendingTasks(entry_id); + this.resumeUnblockedTasks(); + + if (this.install_node) |node| { + node.completeOne(); + } + + switch (state) { + .success => { + this.summary.success += 1; + }, + .skipped => { + this.summary.skipped += 1; + }, + .fail => { + this.summary.fail += 1; + return; + }, + } + + const pkg_id = pkg_id: { + if (entry_id == .root) { + return; + } + + const node_id = this.store.entries.items(.node_id)[entry_id.get()]; + const nodes = this.store.nodes.slice(); + + const dep_id = nodes.items(.dep_id)[node_id.get()]; + + if (dep_id == invalid_dependency_id) { + // should be coverd by `entry_id == .root` above, but + // just in case + return; + } + + const dep = this.lockfile.buffers.dependencies.items[dep_id]; + + if (dep.behavior.isWorkspaceOnly()) { + return; + } + + break :pkg_id nodes.items(.pkg_id)[node_id.get()]; + }; + + const is_duplicate = this.installed.isSet(pkg_id); + this.summary.success += @intFromBool(!is_duplicate); + this.installed.set(pkg_id); + } + + // This function runs only on the main thread. The installer tasks threads + // will be changing values in `entry_step`, but the blocked state is only + // set on the main thread, allowing the code between + // `entry_steps[entry_id.get()].load(.monotonic)` + // and + // `entry_steps[entry_id.get()].store(.symlink_dependency_binaries, .monotonic)` + pub fn resumeUnblockedTasks(this: *Installer) void { + const entries = this.store.entries.slice(); + const entry_steps = entries.items(.step); + + var parent_dedupe: std.AutoArrayHashMap(Store.Entry.Id, void) = .init(bun.default_allocator); + defer parent_dedupe.deinit(); + + for (0..this.store.entries.len) |_entry_id| { + const entry_id: Store.Entry.Id = .from(@intCast(_entry_id)); + + const entry_step = entry_steps[entry_id.get()].load(.monotonic); + if (entry_step != .blocked) { + continue; + } + + if (!this.isTaskUnblocked(entry_id, &parent_dedupe)) { + continue; + } + + entry_steps[entry_id.get()].store(.symlink_dependency_binaries, .monotonic); + this.startTask(entry_id); + } + } + + pub const Task = struct { + const Preallocated = bun.HiveArray(Task, 128).Fallback; + + entry_id: Store.Entry.Id, + installer: *Installer, + + task: ThreadPool.Task = .{ .callback = &callback }, + next: ?*Task = null, + + result: Result = .none, + + const Result = union(enum) { + none, + err: Error, + blocked, + done, + }; + + const Error = union(Step) { + link_package: sys.Error, + symlink_dependencies: sys.Error, + check_if_blocked, + symlink_dependency_binaries, + run_preinstall: anyerror, + binaries: anyerror, + @"run (post)install and (pre/post)prepare": anyerror, + done, + blocked, + + pub fn clone(this: *const Error, allocator: std.mem.Allocator) Error { + return switch (this.*) { + .link_package => |err| .{ .link_package = err.clone(allocator) }, + .symlink_dependencies => |err| .{ .symlink_dependencies = err.clone(allocator) }, + .check_if_blocked => .check_if_blocked, + .symlink_dependency_binaries => .symlink_dependency_binaries, + .run_preinstall => |err| .{ .run_preinstall = err }, + .binaries => |err| .{ .binaries = err }, + .@"run (post)install and (pre/post)prepare" => |err| .{ .@"run (post)install and (pre/post)prepare" = err }, + .done => .done, + .blocked => .blocked, + }; + } + }; + + pub const Step = enum(u8) { + link_package, + symlink_dependencies, + + check_if_blocked, + + // blocked can only happen here + + symlink_dependency_binaries, + run_preinstall, + + // pause here while preinstall runs + + binaries, + @"run (post)install and (pre/post)prepare", + + // pause again while remaining scripts run. + + done, + + // only the main thread sets blocked, and only the main thread + // sets a blocked task to symlink_dependency_binaries + blocked, + }; + + fn nextStep(this: *Task, comptime current_step: Step) Step { + const next_step: Step = switch (comptime current_step) { + .link_package => .symlink_dependencies, + .symlink_dependencies => .check_if_blocked, + .check_if_blocked => .symlink_dependency_binaries, + .symlink_dependency_binaries => .run_preinstall, + .run_preinstall => .binaries, + .binaries => .@"run (post)install and (pre/post)prepare", + .@"run (post)install and (pre/post)prepare" => .done, + + .done, + .blocked, + => @compileError("unexpected step"), + }; + + this.installer.store.entries.items(.step)[this.entry_id.get()].store(next_step, .monotonic); + + return next_step; + } + + const Yield = union(enum) { + yield, + done, + blocked, + fail: Error, + + pub fn failure(e: Error) Yield { + return .{ .fail = e }; + } + }; + + fn run(this: *Task) OOM!Yield { + const installer = this.installer; + const manager = installer.manager; + const lockfile = installer.lockfile; + + const pkgs = installer.lockfile.packages.slice(); + const pkg_names = pkgs.items(.name); + const pkg_name_hashes = pkgs.items(.name_hash); + const pkg_resolutions = pkgs.items(.resolution); + const pkg_bins = pkgs.items(.bin); + const pkg_script_lists = pkgs.items(.scripts); + + const entries = installer.store.entries.slice(); + const entry_node_ids = entries.items(.node_id); + const entry_dependencies = entries.items(.dependencies); + const entry_steps = entries.items(.step); + const entry_scripts = entries.items(.scripts); + + const nodes = installer.store.nodes.slice(); + const node_pkg_ids = nodes.items(.pkg_id); + const node_dep_ids = nodes.items(.dep_id); + + const node_id = entry_node_ids[this.entry_id.get()]; + const pkg_id = node_pkg_ids[node_id.get()]; + const dep_id = node_dep_ids[node_id.get()]; + + const pkg_name = pkg_names[pkg_id]; + const pkg_name_hash = pkg_name_hashes[pkg_id]; + const pkg_res = pkg_resolutions[pkg_id]; + + return next_step: switch (entry_steps[this.entry_id.get()].load(.monotonic)) { + inline .link_package => |current_step| { + const string_buf = lockfile.buffers.string_bytes.items; + + var pkg_cache_dir_subpath: bun.RelPath(.{ .sep = .auto }) = .from(switch (pkg_res.tag) { + else => |tag| pkg_cache_dir_subpath: { + const patch_info = try installer.packagePatchInfo( + pkg_name, + pkg_name_hash, + &pkg_res, + ); + + break :pkg_cache_dir_subpath switch (tag) { + .npm => manager.cachedNPMPackageFolderName(pkg_name.slice(string_buf), pkg_res.value.npm.version, patch_info.contentsHash()), + .git => manager.cachedGitFolderName(&pkg_res.value.git, patch_info.contentsHash()), + .github => manager.cachedGitHubFolderName(&pkg_res.value.github, patch_info.contentsHash()), + .local_tarball => manager.cachedTarballFolderName(pkg_res.value.local_tarball, patch_info.contentsHash()), + .remote_tarball => manager.cachedTarballFolderName(pkg_res.value.remote_tarball, patch_info.contentsHash()), + + else => { + if (comptime Environment.ci_assert) { + bun.assertWithLocation(false, @src()); + } + + continue :next_step this.nextStep(current_step); + }, + }; + }, + + .folder => { + // the folder does not exist in the cache + const folder_dir = switch (bun.openDirForIteration(FD.cwd(), pkg_res.value.folder.slice(string_buf))) { + .result => |fd| fd, + .err => |err| return .failure(.{ .link_package = err }), + }; + defer folder_dir.close(); + + var src: bun.AbsPath(.{ .unit = .os, .sep = .auto }) = .initTopLevelDir(); + defer src.deinit(); + src.append(pkg_res.value.folder.slice(string_buf)); + + var dest: bun.RelPath(.{ .unit = .os, .sep = .auto }) = .init(); + defer dest.deinit(); + + installer.appendStorePath(&dest, this.entry_id); + + var hardlinker: Hardlinker = .{ + .src_dir = folder_dir, + .src = src, + .dest = dest, + }; + + switch (try hardlinker.link(&.{comptime bun.OSPathLiteral("node_modules")})) { + .result => {}, + .err => |err| return .failure(.{ .link_package = err }), + } + + continue :next_step this.nextStep(current_step); + }, + }); + defer pkg_cache_dir_subpath.deinit(); + + const cache_dir, const cache_dir_path = manager.getCacheDirectoryAndAbsPath(); + defer cache_dir_path.deinit(); + + var dest_subpath: bun.RelPath(.{ .sep = .auto, .unit = .os }) = .init(); + defer dest_subpath.deinit(); + + installer.appendStorePath(&dest_subpath, this.entry_id); + + // link the package + if (comptime Environment.isMac) { + if (install.PackageInstall.supported_method == .clonefile) hardlink_fallback: { + switch (sys.clonefileat(cache_dir, pkg_cache_dir_subpath.sliceZ(), FD.cwd(), dest_subpath.sliceZ())) { + .result => { + // success! move to next step + continue :next_step this.nextStep(current_step); + }, + .err => |clonefile_err1| { + switch (clonefile_err1.getErrno()) { + .XDEV => break :hardlink_fallback, + .OPNOTSUPP => break :hardlink_fallback, + .NOENT => { + const parent_dest_dir = std.fs.path.dirname(dest_subpath.slice()) orelse { + return .failure(.{ .link_package = clonefile_err1 }); + }; + + FD.cwd().makePath(u8, parent_dest_dir) catch {}; + + switch (sys.clonefileat(cache_dir, pkg_cache_dir_subpath.sliceZ(), FD.cwd(), dest_subpath.sliceZ())) { + .result => { + continue :next_step this.nextStep(current_step); + }, + .err => |clonefile_err2| { + return .failure(.{ .link_package = clonefile_err2 }); + }, + } + }, + else => { + break :hardlink_fallback; + }, + } + }, + } + } + } + + const cached_package_dir = cached_package_dir: { + if (comptime Environment.isWindows) { + break :cached_package_dir switch (sys.openDirAtWindowsA( + cache_dir, + pkg_cache_dir_subpath.slice(), + .{ .iterable = true, .can_rename_or_delete = false, .read_only = true }, + )) { + .result => |dir_fd| dir_fd, + .err => |err| { + return .failure(.{ .link_package = err }); + }, + }; + } + break :cached_package_dir switch (sys.openat( + cache_dir, + pkg_cache_dir_subpath.sliceZ(), + bun.O.DIRECTORY | bun.O.CLOEXEC | bun.O.RDONLY, + 0, + )) { + .result => |fd| fd, + .err => |err| { + return .failure(.{ .link_package = err }); + }, + }; + }; + defer cached_package_dir.close(); + + var src: bun.AbsPath(.{ .sep = .auto, .unit = .os }) = .from(cache_dir_path.slice()); + defer src.deinit(); + src.append(pkg_cache_dir_subpath.slice()); + + var hardlinker: Hardlinker = .{ + .src_dir = cached_package_dir, + .src = src, + .dest = dest_subpath, + }; + + switch (try hardlinker.link(&.{})) { + .result => {}, + .err => |err| return .failure(.{ .link_package = err }), + } + + continue :next_step this.nextStep(current_step); + }, + inline .symlink_dependencies => |current_step| { + const string_buf = lockfile.buffers.string_bytes.items; + const dependencies = lockfile.buffers.dependencies.items; + + for (entry_dependencies[this.entry_id.get()].slice()) |dep| { + const dep_node_id = entry_node_ids[dep.entry_id.get()]; + const dep_dep_id = node_dep_ids[dep_node_id.get()]; + const dep_name = dependencies[dep_dep_id].name; + + var dest: bun.Path(.{ .sep = .auto }) = .initTopLevelDir(); + defer dest.deinit(); + + installer.appendStoreNodeModulesPath(&dest, this.entry_id); + dest.append(dep_name.slice(string_buf)); + + var dep_store_path: bun.AbsPath(.{ .sep = .auto }) = .initTopLevelDir(); + defer dep_store_path.deinit(); + + installer.appendStorePath(&dep_store_path, dep.entry_id); + + const target = target: { + var dest_save = dest.save(); + defer dest_save.restore(); + + dest.undo(1); + break :target dest.relative(&dep_store_path); + }; + defer target.deinit(); + + const symlinker: Symlinker = .{ + .dest = dest, + .target = target, + .fallback_junction_target = dep_store_path, + }; + + const link_strategy: Symlinker.Strategy = if (pkg_res.tag == .root or pkg_res.tag == .workspace) + // root and workspace packages ensure their dependency symlinks + // exist unconditionally. To make sure it's fast, first readlink + // then create the symlink if necessary + .expect_existing + else + .expect_missing; + + switch (symlinker.ensureSymlink(link_strategy)) { + .result => {}, + .err => |err| { + return .failure(.{ .symlink_dependencies = err }); + }, + } + } + continue :next_step this.nextStep(current_step); + }, + inline .check_if_blocked => |current_step| { + // preinstall scripts need to run before binaries can be linked. Block here if any dependencies + // of this entry are not finished. Do not count cycles towards blocking. + + var parent_dedupe: std.AutoArrayHashMap(Store.Entry.Id, void) = .init(bun.default_allocator); + defer parent_dedupe.deinit(); + + if (!installer.isTaskUnblocked(this.entry_id, &parent_dedupe)) { + return .blocked; + } + + continue :next_step this.nextStep(current_step); + }, + inline .symlink_dependency_binaries => |current_step| { + installer.linkDependencyBins(this.entry_id) catch |err| { + return .failure(.{ .binaries = err }); + }; + + switch (pkg_res.tag) { + .uninitialized, + .root, + .workspace, + .folder, + .symlink, + .single_file_module, + => {}, + + _ => {}, + + .npm, + .git, + .github, + .local_tarball, + .remote_tarball, + => { + const string_buf = lockfile.buffers.string_bytes.items; + + var hidden_hoisted_node_modules: bun.Path(.{ .sep = .auto }) = .init(); + defer hidden_hoisted_node_modules.deinit(); + + hidden_hoisted_node_modules.append( + "node_modules" ++ std.fs.path.sep_str ++ ".bun" ++ std.fs.path.sep_str ++ "node_modules", + ); + hidden_hoisted_node_modules.append(pkg_name.slice(installer.lockfile.buffers.string_bytes.items)); + + var target: bun.RelPath(.{ .sep = .auto }) = .init(); + defer target.deinit(); + + target.append(".."); + if (strings.containsChar(pkg_name.slice(installer.lockfile.buffers.string_bytes.items), '/')) { + target.append(".."); + } + + target.appendFmt("{}/node_modules/{s}", .{ + Store.Entry.fmtStorePath(this.entry_id, installer.store, installer.lockfile), + pkg_name.slice(string_buf), + }); + + var full_target: bun.AbsPath(.{ .sep = .auto }) = .initTopLevelDir(); + defer full_target.deinit(); + + installer.appendStorePath(&full_target, this.entry_id); + + const symlinker: Symlinker = .{ + .dest = hidden_hoisted_node_modules, + .target = target, + .fallback_junction_target = full_target, + }; + _ = symlinker.ensureSymlink(.ignore_failure); + }, + } + + continue :next_step this.nextStep(current_step); + }, + inline .run_preinstall => |current_step| { + if (!installer.manager.options.do.run_scripts or this.entry_id == .root) { + continue :next_step this.nextStep(current_step); + } + + const string_buf = installer.lockfile.buffers.string_bytes.items; + + const dep = installer.lockfile.buffers.dependencies.items[dep_id]; + const truncated_dep_name_hash: TruncatedPackageNameHash = @truncate(dep.name_hash); + + const is_trusted, const is_trusted_through_update_request = brk: { + if (installer.trusted_dependencies_from_update_requests.contains(truncated_dep_name_hash)) { + break :brk .{ true, true }; + } + if (installer.lockfile.hasTrustedDependency(dep.name.slice(string_buf))) { + break :brk .{ true, false }; + } + break :brk .{ false, false }; + }; + + var pkg_cwd: bun.AbsPath(.{ .sep = .auto }) = .initTopLevelDir(); + defer pkg_cwd.deinit(); + + installer.appendStorePath(&pkg_cwd, this.entry_id); + + if (pkg_res.tag != .root and (pkg_res.tag == .workspace or is_trusted)) { + const pkg_scripts: *Package.Scripts = &pkg_script_lists[pkg_id]; + + var log = bun.logger.Log.init(bun.default_allocator); + defer log.deinit(); + + const scripts_list = pkg_scripts.getList( + &log, + installer.lockfile, + &pkg_cwd, + dep.name.slice(string_buf), + &pkg_res, + ) catch |err| { + return .failure(.{ .run_preinstall = err }); + }; + + if (scripts_list) |list| { + entry_scripts[this.entry_id.get()] = bun.create(bun.default_allocator, Package.Scripts.List, list); + + if (is_trusted_through_update_request) { + const trusted_dep_to_add = try installer.manager.allocator.dupe(u8, dep.name.slice(string_buf)); + + installer.trusted_dependencies_mutex.lock(); + defer installer.trusted_dependencies_mutex.unlock(); + + try installer.manager.trusted_deps_to_add_to_package_json.append( + installer.manager.allocator, + trusted_dep_to_add, + ); + if (installer.lockfile.trusted_dependencies == null) { + installer.lockfile.trusted_dependencies = .{}; + } + try installer.lockfile.trusted_dependencies.?.put(installer.manager.allocator, truncated_dep_name_hash, {}); + } + + if (list.first_index != 0) { + // has scripts but not a preinstall + continue :next_step this.nextStep(current_step); + } + + installer.manager.spawnPackageLifecycleScripts( + installer.command_ctx, + list, + dep.behavior.optional, + false, + .{ + .entry_id = this.entry_id, + .installer = installer, + }, + ) catch |err| { + return .failure(.{ .run_preinstall = err }); + }; + + return .yield; + } + } + + continue :next_step this.nextStep(current_step); + }, + inline .binaries => |current_step| { + if (this.entry_id == .root) { + continue :next_step this.nextStep(current_step); + } + + const bin = pkg_bins[pkg_id]; + if (bin.tag == .none) { + continue :next_step this.nextStep(current_step); + } + + const string_buf = installer.lockfile.buffers.string_bytes.items; + const dependencies = installer.lockfile.buffers.dependencies.items; + + const dep_name = dependencies[dep_id].name.slice(string_buf); + + const abs_target_buf = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(abs_target_buf); + const abs_dest_buf = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(abs_dest_buf); + const rel_buf = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(rel_buf); + + var seen: bun.StringHashMap(void) = .init(bun.default_allocator); + defer seen.deinit(); + + var node_modules_path: bun.AbsPath(.{}) = .initTopLevelDir(); + defer node_modules_path.deinit(); + + installer.appendStoreNodeModulesPath(&node_modules_path, this.entry_id); + + var bin_linker: Bin.Linker = .{ + .bin = bin, + .global_bin_path = installer.manager.options.bin_path, + .package_name = strings.StringOrTinyString.init(dep_name), + .string_buf = string_buf, + .extern_string_buf = installer.lockfile.buffers.extern_strings.items, + .seen = &seen, + .node_modules_path = &node_modules_path, + .abs_target_buf = abs_target_buf, + .abs_dest_buf = abs_dest_buf, + .rel_buf = rel_buf, + }; + + bin_linker.link(false); + + if (bin_linker.err) |err| { + return .failure(.{ .binaries = err }); + } + + continue :next_step this.nextStep(current_step); + }, + inline .@"run (post)install and (pre/post)prepare" => |current_step| { + if (!installer.manager.options.do.run_scripts or this.entry_id == .root) { + continue :next_step this.nextStep(current_step); + } + + var list = entry_scripts[this.entry_id.get()] orelse { + continue :next_step this.nextStep(current_step); + }; + + if (list.first_index == 0) { + for (list.items[1..], 1..) |item, i| { + if (item != null) { + list.first_index = @intCast(i); + break; + } + } + } + + if (list.first_index == 0) { + continue :next_step this.nextStep(current_step); + } + + const dep = installer.lockfile.buffers.dependencies.items[dep_id]; + + installer.manager.spawnPackageLifecycleScripts( + installer.command_ctx, + list.*, + dep.behavior.optional, + false, + .{ + .entry_id = this.entry_id, + .installer = installer, + }, + ) catch |err| { + return .failure(.{ .@"run (post)install and (pre/post)prepare" = err }); + }; + + // when these scripts finish the package install will be + // complete. the task does not have anymore work to complete + // so it does not return to the thread pool. + + return .yield; + }, + + .done => { + return .done; + }, + + .blocked => { + bun.debugAssert(false); + return .yield; + }, + }; + } + + pub fn callback(task: *ThreadPool.Task) void { + const this: *Task = @fieldParentPtr("task", task); + + const res = this.run() catch |err| switch (err) { + error.OutOfMemory => bun.outOfMemory(), + }; + + switch (res) { + .yield => {}, + .done => { + if (comptime Environment.ci_assert) { + bun.assertWithLocation(this.installer.store.entries.items(.step)[this.entry_id.get()].load(.monotonic) == .done, @src()); + } + this.result = .done; + this.installer.tasks.push(this); + this.installer.manager.wake(); + }, + .blocked => { + if (comptime Environment.ci_assert) { + bun.assertWithLocation(this.installer.store.entries.items(.step)[this.entry_id.get()].load(.monotonic) == .check_if_blocked, @src()); + } + this.result = .blocked; + this.installer.tasks.push(this); + this.installer.manager.wake(); + }, + .fail => |err| { + if (comptime Environment.ci_assert) { + bun.assertWithLocation(this.installer.store.entries.items(.step)[this.entry_id.get()].load(.monotonic) != .done, @src()); + } + this.installer.store.entries.items(.step)[this.entry_id.get()].store(.done, .monotonic); + this.result = .{ .err = err.clone(bun.default_allocator) }; + this.installer.tasks.push(this); + this.installer.manager.wake(); + }, + } + } + }; + + const PatchInfo = union(enum) { + none, + remove: struct { + name_and_version_hash: u64, + }, + patch: struct { + name_and_version_hash: u64, + patch_path: string, + contents_hash: u64, + }, + + pub fn contentsHash(this: *const @This()) ?u64 { + return switch (this.*) { + .none, .remove => null, + .patch => |patch| patch.contents_hash, + }; + } + + pub fn nameAndVersionHash(this: *const @This()) ?u64 { + return switch (this.*) { + .none, .remove => null, + .patch => |patch| patch.name_and_version_hash, + }; + } + }; + + pub fn packagePatchInfo( + this: *Installer, + pkg_name: String, + pkg_name_hash: PackageNameHash, + pkg_res: *const Resolution, + ) OOM!PatchInfo { + if (this.lockfile.patched_dependencies.entries.len == 0 and this.manager.patched_dependencies_to_remove.entries.len == 0) { + return .none; + } + + const string_buf = this.lockfile.buffers.string_bytes.items; + + var version_buf: std.ArrayListUnmanaged(u8) = .empty; + defer version_buf.deinit(bun.default_allocator); + + var writer = version_buf.writer(this.lockfile.allocator); + try writer.print("{s}@", .{pkg_name.slice(string_buf)}); + + switch (pkg_res.tag) { + .workspace => { + if (this.lockfile.workspace_versions.get(pkg_name_hash)) |workspace_version| { + try writer.print("{}", .{workspace_version.fmt(string_buf)}); + } + }, + else => { + try writer.print("{}", .{pkg_res.fmt(string_buf, .posix)}); + }, + } + + const name_and_version_hash = String.Builder.stringHash(version_buf.items); + + if (this.lockfile.patched_dependencies.get(name_and_version_hash)) |patch| { + return .{ + .patch = .{ + .name_and_version_hash = name_and_version_hash, + .patch_path = patch.path.slice(string_buf), + .contents_hash = patch.patchfileHash().?, + }, + }; + } + + if (this.manager.patched_dependencies_to_remove.contains(name_and_version_hash)) { + return .{ + .remove = .{ + .name_and_version_hash = name_and_version_hash, + }, + }; + } + + return .none; + } + + pub fn linkDependencyBins(this: *const Installer, parent_entry_id: Store.Entry.Id) !void { + const lockfile = this.lockfile; + const store = this.store; + + const string_buf = lockfile.buffers.string_bytes.items; + const extern_string_buf = lockfile.buffers.extern_strings.items; + + const entries = store.entries.slice(); + const entry_node_ids = entries.items(.node_id); + const entry_deps = entries.items(.dependencies); + + const nodes = store.nodes.slice(); + const node_pkg_ids = nodes.items(.pkg_id); + const node_dep_ids = nodes.items(.dep_id); + + const pkgs = lockfile.packages.slice(); + const pkg_bins = pkgs.items(.bin); + + const link_target_buf = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(link_target_buf); + const link_dest_buf = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(link_dest_buf); + const link_rel_buf = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(link_rel_buf); + + var seen: bun.StringHashMap(void) = .init(bun.default_allocator); + defer seen.deinit(); + + var node_modules_path: bun.AbsPath(.{}) = .initTopLevelDir(); + defer node_modules_path.deinit(); + + this.appendStoreNodeModulesPath(&node_modules_path, parent_entry_id); + + for (entry_deps[parent_entry_id.get()].slice()) |dep| { + const node_id = entry_node_ids[dep.entry_id.get()]; + const dep_id = node_dep_ids[node_id.get()]; + const pkg_id = node_pkg_ids[node_id.get()]; + const bin = pkg_bins[pkg_id]; + if (bin.tag == .none) { + continue; + } + + const alias = lockfile.buffers.dependencies.items[dep_id].name; + + var bin_linker: Bin.Linker = .{ + .bin = bin, + .global_bin_path = this.manager.options.bin_path, + .package_name = strings.StringOrTinyString.init(alias.slice(string_buf)), + .string_buf = string_buf, + .extern_string_buf = extern_string_buf, + .seen = &seen, + .node_modules_path = &node_modules_path, + .abs_target_buf = link_target_buf, + .abs_dest_buf = link_dest_buf, + .rel_buf = link_rel_buf, + }; + + bin_linker.link(false); + + if (bin_linker.err) |err| { + return err; + } + } + } + + pub fn appendStoreNodeModulesPath(this: *const Installer, buf: anytype, entry_id: Store.Entry.Id) void { + const string_buf = this.lockfile.buffers.string_bytes.items; + + const entries = this.store.entries.slice(); + const entry_node_ids = entries.items(.node_id); + + const nodes = this.store.nodes.slice(); + const node_pkg_ids = nodes.items(.pkg_id); + + const pkgs = this.lockfile.packages.slice(); + const pkg_resolutions = pkgs.items(.resolution); + + const node_id = entry_node_ids[entry_id.get()]; + const pkg_id = node_pkg_ids[node_id.get()]; + const pkg_res = pkg_resolutions[pkg_id]; + + switch (pkg_res.tag) { + .root => { + buf.append("node_modules"); + }, + .workspace => { + buf.append(pkg_res.value.workspace.slice(string_buf)); + buf.append("node_modules"); + }, + else => { + buf.appendFmt("node_modules/" ++ Store.modules_dir_name ++ "/{}/node_modules", .{ + Store.Entry.fmtStorePath(entry_id, this.store, this.lockfile), + }); + }, + } + } + + pub fn appendStorePath(this: *const Installer, buf: anytype, entry_id: Store.Entry.Id) void { + const string_buf = this.lockfile.buffers.string_bytes.items; + + const entries = this.store.entries.slice(); + const entry_node_ids = entries.items(.node_id); + + const nodes = this.store.nodes.slice(); + const node_pkg_ids = nodes.items(.pkg_id); + // const node_peers = nodes.items(.peers); + + const pkgs = this.lockfile.packages.slice(); + const pkg_names = pkgs.items(.name); + const pkg_resolutions = pkgs.items(.resolution); + + const node_id = entry_node_ids[entry_id.get()]; + // const peers = node_peers[node_id.get()]; + const pkg_id = node_pkg_ids[node_id.get()]; + const pkg_res = pkg_resolutions[pkg_id]; + + switch (pkg_res.tag) { + .root => {}, + .workspace => { + buf.append(pkg_res.value.workspace.slice(string_buf)); + }, + .symlink => { + const symlink_dir_path = this.manager.globalLinkDirPath(); + + buf.clear(); + buf.append(symlink_dir_path); + buf.append(pkg_res.value.symlink.slice(string_buf)); + }, + else => { + const pkg_name = pkg_names[pkg_id]; + buf.append("node_modules/" ++ Store.modules_dir_name); + buf.appendFmt("{}", .{ + Store.Entry.fmtStorePath(entry_id, this.store, this.lockfile), + }); + buf.append("node_modules"); + buf.append(pkg_name.slice(string_buf)); + }, + } + } +}; + +// @sortImports + +const std = @import("std"); +const Hardlinker = @import("./Hardlinker.zig").Hardlinker; +const Symlinker = @import("./Symlinker.zig").Symlinker; + +const bun = @import("bun"); +const Environment = bun.Environment; +const FD = bun.FD; +const Global = bun.Global; +const OOM = bun.OOM; +const Output = bun.Output; +const Progress = bun.Progress; +const ThreadPool = bun.ThreadPool; +const string = bun.string; +const strings = bun.strings; +const sys = bun.sys; +const Bitset = bun.bit_set.DynamicBitSetUnmanaged; +const Command = bun.CLI.Command; +const String = bun.Semver.String; + +const install = bun.install; +const Bin = install.Bin; +const PackageInstall = install.PackageInstall; +const PackageManager = install.PackageManager; +const PackageNameHash = install.PackageNameHash; +const Resolution = install.Resolution; +const Store = install.Store; +const TruncatedPackageNameHash = install.TruncatedPackageNameHash; +const invalid_dependency_id = install.invalid_dependency_id; + +const Lockfile = install.Lockfile; +const Package = Lockfile.Package; diff --git a/src/install/isolated_install/Store.zig b/src/install/isolated_install/Store.zig new file mode 100644 index 0000000000..c3dea7ad9f --- /dev/null +++ b/src/install/isolated_install/Store.zig @@ -0,0 +1,548 @@ +const Ids = struct { + dep_id: DependencyID, + pkg_id: PackageID, +}; + +pub const Store = struct { + entries: Entry.List, + nodes: Node.List, + + const log = Output.scoped(.Store, false); + + pub const modules_dir_name = ".bun"; + + fn NewId(comptime T: type) type { + return enum(u32) { + comptime { + _ = T; + } + + root = 0, + invalid = max, + _, + + const max = std.math.maxInt(u32); + + pub fn from(id: u32) @This() { + bun.debugAssert(id != max); + return @enumFromInt(id); + } + + pub fn get(id: @This()) u32 { + bun.debugAssert(id != .invalid); + return @intFromEnum(id); + } + + pub fn tryGet(id: @This()) ?u32 { + return if (id == .invalid) null else @intFromEnum(id); + } + + pub fn getOr(id: @This(), default: u32) u32 { + return if (id == .invalid) default else @intFromEnum(id); + } + }; + } + + comptime { + bun.assert(NewId(Entry) != NewId(Node)); + bun.assert(NewId(Entry) == NewId(Entry)); + } + + pub const Installer = @import("./Installer.zig").Installer; + + pub fn isCycle(this: *const Store, id: Entry.Id, maybe_parent_id: Entry.Id, parent_dedupe: *std.AutoArrayHashMap(Entry.Id, void)) bool { + var i: usize = 0; + var len: usize = 0; + + const entry_parents = this.entries.items(.parents); + + for (entry_parents[id.get()].items) |parent_id| { + if (parent_id == .invalid) { + continue; + } + if (parent_id == maybe_parent_id) { + return true; + } + parent_dedupe.put(parent_id, {}) catch bun.outOfMemory(); + } + + len = parent_dedupe.count(); + while (i < len) { + for (entry_parents[parent_dedupe.keys()[i].get()].items) |parent_id| { + if (parent_id == .invalid) { + continue; + } + if (parent_id == maybe_parent_id) { + return true; + } + parent_dedupe.put(parent_id, {}) catch bun.outOfMemory(); + len = parent_dedupe.count(); + } + i += 1; + } + + return false; + } + + // A unique entry in the store. As a path looks like: + // './node_modules/.bun/name@version/node_modules/name' + // or if peers are involved: + // './node_modules/.bun/name@version_peer1@version+peer2@version/node_modules/name' + // + // Entries are created for workspaces (including the root), but only in memory. If + // a module depends on a workspace, a symlink is created pointing outside the store + // directory to the workspace. + pub const Entry = struct { + // Used to get dependency name for destination path and peers + // for store path + node_id: Node.Id, + // parent_id: Id, + dependencies: Dependencies, + parents: std.ArrayListUnmanaged(Id) = .empty, + step: std.atomic.Value(Installer.Task.Step) = .init(.link_package), + + peer_hash: PeerHash, + + scripts: ?*Package.Scripts.List = null, + + pub const PeerHash = enum(u64) { + none = 0, + _, + + pub fn from(int: u64) @This() { + return @enumFromInt(int); + } + + pub fn cast(this: @This()) u64 { + return @intFromEnum(this); + } + }; + + const StorePathFormatter = struct { + entry_id: Id, + store: *const Store, + lockfile: *const Lockfile, + + pub fn format(this: @This(), comptime _: string, _: std.fmt.FormatOptions, writer: anytype) @TypeOf(writer).Error!void { + const store = this.store; + const entries = store.entries.slice(); + const entry_peer_hashes = entries.items(.peer_hash); + const entry_node_ids = entries.items(.node_id); + + const peer_hash = entry_peer_hashes[this.entry_id.get()]; + const node_id = entry_node_ids[this.entry_id.get()]; + const pkg_id = store.nodes.items(.pkg_id)[node_id.get()]; + + const string_buf = this.lockfile.buffers.string_bytes.items; + + const pkgs = this.lockfile.packages.slice(); + const pkg_names = pkgs.items(.name); + const pkg_resolutions = pkgs.items(.resolution); + + const pkg_name = pkg_names[pkg_id]; + const pkg_res = pkg_resolutions[pkg_id]; + + switch (pkg_res.tag) { + .folder => { + try writer.print("{}@file+{}", .{ + pkg_name.fmtStorePath(string_buf), + pkg_res.value.folder.fmtStorePath(string_buf), + }); + }, + else => { + try writer.print("{}@{}", .{ + pkg_name.fmtStorePath(string_buf), + pkg_res.fmtStorePath(string_buf), + }); + }, + } + + if (peer_hash != .none) { + try writer.print("+{}", .{ + bun.fmt.hexIntLower(peer_hash.cast()), + }); + } + } + }; + + pub fn fmtStorePath(entry_id: Id, store: *const Store, lockfile: *const Lockfile) StorePathFormatter { + return .{ .entry_id = entry_id, .store = store, .lockfile = lockfile }; + } + + pub fn debugGatherAllParents(entry_id: Id, store: *const Store) []const Id { + var i: usize = 0; + var len: usize = 0; + + const entry_parents = store.entries.items(.parents); + + var parents: std.AutoArrayHashMapUnmanaged(Entry.Id, void) = .empty; + // defer parents.deinit(bun.default_allocator); + + for (entry_parents[entry_id.get()].items) |parent_id| { + if (parent_id == .invalid) { + continue; + } + parents.put(bun.default_allocator, parent_id, {}) catch bun.outOfMemory(); + } + + len = parents.count(); + while (i < len) { + for (entry_parents[parents.entries.items(.key)[i].get()].items) |parent_id| { + if (parent_id == .invalid) { + continue; + } + parents.put(bun.default_allocator, parent_id, {}) catch bun.outOfMemory(); + len = parents.count(); + } + i += 1; + } + + return parents.keys(); + } + + pub const List = bun.MultiArrayList(Entry); + + const DependenciesItem = struct { + entry_id: Id, + + // TODO: this can be removed, and instead dep_id can be retrieved through: + // entry_id -> node_id -> node_dep_ids + dep_id: DependencyID, + }; + pub const Dependencies = OrderedArraySet(DependenciesItem, DependenciesOrderedArraySetCtx); + + pub const DependenciesOrderedArraySetCtx = struct { + string_buf: string, + dependencies: []const Dependency, + + pub fn eql(ctx: *const DependenciesOrderedArraySetCtx, l_item: DependenciesItem, r_item: DependenciesItem) bool { + if (l_item.entry_id != r_item.entry_id) { + return false; + } + + const dependencies = ctx.dependencies; + const l_dep = dependencies[l_item.dep_id]; + const r_dep = dependencies[r_item.dep_id]; + + return l_dep.name_hash == r_dep.name_hash; + } + + pub fn order(ctx: *const DependenciesOrderedArraySetCtx, l: DependenciesItem, r: DependenciesItem) std.math.Order { + const dependencies = ctx.dependencies; + const l_dep = dependencies[l.dep_id]; + const r_dep = dependencies[r.dep_id]; + + if (l.entry_id == r.entry_id and l_dep.name_hash == r_dep.name_hash) { + return .eq; + } + + // TODO: y r doing + if (l.entry_id == .invalid) { + if (r.entry_id == .invalid) { + return .eq; + } + return .lt; + } else if (r.entry_id == .invalid) { + if (l.entry_id == .invalid) { + return .eq; + } + return .gt; + } + + const string_buf = ctx.string_buf; + const l_dep_name = l_dep.name; + const r_dep_name = r_dep.name; + + return l_dep_name.order(&r_dep_name, string_buf, string_buf); + } + }; + + pub const Id = NewId(Entry); + + pub fn debugPrintList(list: *const List, lockfile: *Lockfile) void { + const string_buf = lockfile.buffers.string_bytes.items; + + const pkgs = lockfile.packages.slice(); + const pkg_names = pkgs.items(.name); + const pkg_resolutions = pkgs.items(.resolution); + + for (0..list.len) |entry_id| { + const entry = list.get(entry_id); + const entry_pkg_name = pkg_names[entry.pkg_id].slice(string_buf); + log( + \\entry ({d}): '{s}@{}' + \\ dep_name: {s} + \\ pkg_id: {d} + \\ parent_id: {} + \\ + , .{ + entry_id, + entry_pkg_name, + pkg_resolutions[entry.pkg_id].fmt(string_buf, .posix), + entry.dep_name.slice(string_buf), + entry.pkg_id, + entry.parent_id, + }); + + log(" dependencies ({d}):\n", .{entry.dependencies.items.len}); + for (entry.dependencies.items) |dep_entry_id| { + const dep_entry = list.get(dep_entry_id.get()); + log(" {s}@{}\n", .{ + pkg_names[dep_entry.pkg_id].slice(string_buf), + pkg_resolutions[dep_entry.pkg_id].fmt(string_buf, .posix), + }); + } + } + } + }; + + pub fn OrderedArraySet(comptime T: type, comptime Ctx: type) type { + return struct { + list: std.ArrayListUnmanaged(T) = .empty, + + pub const empty: @This() = .{}; + + pub fn initCapacity(allocator: std.mem.Allocator, n: usize) OOM!@This() { + const list: std.ArrayListUnmanaged(T) = try .initCapacity(allocator, n); + return .{ .list = list }; + } + + pub fn deinit(this: *@This(), allocator: std.mem.Allocator) void { + this.list.deinit(allocator); + } + + pub fn slice(this: *const @This()) []const T { + return this.list.items; + } + + pub fn len(this: *const @This()) usize { + return this.list.items.len; + } + + pub fn eql(l: *const @This(), r: *const @This(), ctx: *const Ctx) bool { + if (l.list.items.len != r.list.items.len) { + return false; + } + + for (l.list.items, r.list.items) |l_item, r_item| { + if (!ctx.eql(l_item, r_item)) { + return false; + } + } + + return true; + } + + pub fn insert(this: *@This(), allocator: std.mem.Allocator, new: T, ctx: *const Ctx) OOM!void { + for (0..this.list.items.len) |i| { + const existing = this.list.items[i]; + if (ctx.eql(new, existing)) { + return; + } + + const order = ctx.order(new, existing); + + if (order == .eq) { + return; + } + + if (order == .lt) { + try this.list.insert(allocator, i, new); + return; + } + } + + try this.list.append(allocator, new); + } + + pub fn insertAssumeCapacity(this: *@This(), new: T, ctx: *const Ctx) void { + for (0..this.list.items.len) |i| { + const existing = this.list.items[i]; + if (ctx.eql(new, existing)) { + return; + } + + const order = ctx.order(new, existing); + + if (order == .eq) { + return; + } + + if (order == .lt) { + this.list.insertAssumeCapacity(i, new); + return; + } + } + + this.list.appendAssumeCapacity(new); + } + }; + } + + // A node used to represent the full dependency tree. Uniqueness is determined + // from `pkg_id` and `peers` + pub const Node = struct { + dep_id: DependencyID, + pkg_id: PackageID, + parent_id: Id, + + dependencies: std.ArrayListUnmanaged(Ids) = .empty, + peers: Peers = .empty, + + // each node in this list becomes a symlink in the package's node_modules + nodes: std.ArrayListUnmanaged(Id) = .empty, + + pub const Peers = OrderedArraySet(TransitivePeer, TransitivePeer.OrderedArraySetCtx); + + pub const TransitivePeer = struct { + dep_id: DependencyID, + pkg_id: PackageID, + auto_installed: bool, + + pub const OrderedArraySetCtx = struct { + string_buf: string, + pkg_names: []const String, + + pub fn eql(ctx: *const OrderedArraySetCtx, l_item: TransitivePeer, r_item: TransitivePeer) bool { + _ = ctx; + return l_item.pkg_id == r_item.pkg_id; + } + + pub fn order(ctx: *const OrderedArraySetCtx, l: TransitivePeer, r: TransitivePeer) std.math.Order { + const l_pkg_id = l.pkg_id; + const r_pkg_id = r.pkg_id; + if (l_pkg_id == r_pkg_id) { + return .eq; + } + + const string_buf = ctx.string_buf; + const pkg_names = ctx.pkg_names; + const l_pkg_name = pkg_names[l_pkg_id]; + const r_pkg_name = pkg_names[r_pkg_id]; + + return l_pkg_name.order(&r_pkg_name, string_buf, string_buf); + } + }; + }; + + pub const List = bun.MultiArrayList(Node); + + pub fn deinitList(list: *const List, allocator: std.mem.Allocator) void { + list.deinit(allocator); + } + + pub fn debugPrint(this: *const Node, id: Id, lockfile: *const Lockfile) void { + const pkgs = lockfile.packages.slice(); + const pkg_names = pkgs.items(.name); + const pkg_resolutions = pkgs.items(.resolution); + + const string_buf = lockfile.buffers.string_bytes.items; + const deps = lockfile.buffers.dependencies.items; + + const dep_name = if (this.dep_id == invalid_dependency_id) "root" else deps[this.dep_id].name.slice(string_buf); + const dep_version = if (this.dep_id == invalid_dependency_id) "root" else deps[this.dep_id].version.literal.slice(string_buf); + + log( + \\node({d}) + \\ deps: {s}@{s} + \\ res: {s}@{} + \\ + , .{ + id, + dep_name, + dep_version, + pkg_names[this.pkg_id].slice(string_buf), + pkg_resolutions[this.pkg_id].fmt(string_buf, .posix), + }); + } + + pub const Id = NewId(Node); + + pub fn debugPrintList(list: *const List, lockfile: *const Lockfile) void { + const string_buf = lockfile.buffers.string_bytes.items; + const dependencies = lockfile.buffers.dependencies.items; + + const pkgs = lockfile.packages.slice(); + const pkg_names = pkgs.items(.name); + const pkg_resolutions = pkgs.items(.resolution); + + for (0..list.len) |node_id| { + const node = list.get(node_id); + const node_pkg_name = pkg_names[node.pkg_id].slice(string_buf); + log( + \\node ({d}): '{s}' + \\ dep_id: {d} + \\ pkg_id: {d} + \\ parent_id: {} + \\ + , .{ + node_id, + node_pkg_name, + node.dep_id, + node.pkg_id, + node.parent_id, + }); + + log(" dependencies ({d}):\n", .{node.dependencies.items.len}); + for (node.dependencies.items) |ids| { + const dep = dependencies[ids.dep_id]; + const dep_name = dep.name.slice(string_buf); + + const pkg_name = pkg_names[ids.pkg_id].slice(string_buf); + const pkg_res = pkg_resolutions[ids.pkg_id]; + + log(" {s}@{} ({s}@{s})\n", .{ + pkg_name, + pkg_res.fmt(string_buf, .posix), + dep_name, + dep.version.literal.slice(string_buf), + }); + } + + log(" nodes ({d}): ", .{node.nodes.items.len}); + for (node.nodes.items, 0..) |id, i| { + log("{d}", .{id.get()}); + if (i != node.nodes.items.len - 1) { + log(",", .{}); + } + } + + log("\n peers ({d}):\n", .{node.peers.list.items.len}); + for (node.peers.list.items) |ids| { + const dep = dependencies[ids.dep_id]; + const dep_name = dep.name.slice(string_buf); + const pkg_name = pkg_names[ids.pkg_id].slice(string_buf); + const pkg_res = pkg_resolutions[ids.pkg_id]; + + log(" {s}@{} ({s}@{s})\n", .{ + pkg_name, + pkg_res.fmt(string_buf, .posix), + dep_name, + dep.version.literal.slice(string_buf), + }); + } + } + } + }; +}; + +// @sortImports + +const std = @import("std"); + +const bun = @import("bun"); +const OOM = bun.OOM; +const Output = bun.Output; +const string = bun.string; + +const Semver = bun.Semver; +const String = Semver.String; + +const install = bun.install; +const Dependency = install.Dependency; +const DependencyID = install.DependencyID; +const PackageID = install.PackageID; +const invalid_dependency_id = install.invalid_dependency_id; + +const Lockfile = install.Lockfile; +const Package = Lockfile.Package; diff --git a/src/install/isolated_install/Symlinker.zig b/src/install/isolated_install/Symlinker.zig new file mode 100644 index 0000000000..48c4233b24 --- /dev/null +++ b/src/install/isolated_install/Symlinker.zig @@ -0,0 +1,115 @@ +pub const Symlinker = struct { + dest: bun.Path(.{ .sep = .auto }), + target: bun.RelPath(.{ .sep = .auto }), + fallback_junction_target: bun.AbsPath(.{ .sep = .auto }), + + pub fn symlink(this: *const @This()) sys.Maybe(void) { + if (comptime Environment.isWindows) { + return sys.symlinkOrJunction(this.dest.sliceZ(), this.target.sliceZ(), this.fallback_junction_target.sliceZ()); + } + return sys.symlink(this.target.sliceZ(), this.dest.sliceZ()); + } + + pub const Strategy = enum { + expect_existing, + expect_missing, + ignore_failure, + }; + + pub fn ensureSymlink( + this: *const @This(), + strategy: Strategy, + ) sys.Maybe(void) { + return switch (strategy) { + .ignore_failure => { + return switch (this.symlink()) { + .result => .success, + .err => |symlink_err| switch (symlink_err.getErrno()) { + .NOENT => { + const dest_parent = this.dest.dirname() orelse { + return .success; + }; + + FD.cwd().makePath(u8, dest_parent) catch {}; + _ = this.symlink(); + return .success; + }, + else => .success, + }, + }; + }, + .expect_missing => { + return switch (this.symlink()) { + .result => .success, + .err => |symlink_err1| switch (symlink_err1.getErrno()) { + .NOENT => { + const dest_parent = this.dest.dirname() orelse { + return .initErr(symlink_err1); + }; + + FD.cwd().makePath(u8, dest_parent) catch {}; + return this.symlink(); + }, + .EXIST => { + FD.cwd().deleteTree(this.dest.sliceZ()) catch {}; + return this.symlink(); + }, + else => .initErr(symlink_err1), + }, + }; + }, + .expect_existing => { + const current_link_buf = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(current_link_buf); + var current_link: []const u8 = switch (sys.readlink(this.dest.sliceZ(), current_link_buf)) { + .result => |res| res, + .err => |readlink_err| return switch (readlink_err.getErrno()) { + .NOENT => switch (this.symlink()) { + .result => .success, + .err => |symlink_err| switch (symlink_err.getErrno()) { + .NOENT => { + const dest_parent = this.dest.dirname() orelse { + return .initErr(symlink_err); + }; + + FD.cwd().makePath(u8, dest_parent) catch {}; + return this.symlink(); + }, + else => .initErr(symlink_err), + }, + }, + else => { + FD.cwd().deleteTree(this.dest.sliceZ()) catch {}; + return this.symlink(); + }, + }, + }; + + // libuv adds a trailing slash to junctions. + current_link = strings.withoutTrailingSlash(current_link); + + if (strings.eqlLong(current_link, this.target.sliceZ(), true)) { + return .success; + } + + if (comptime Environment.isWindows) { + if (strings.eqlLong(current_link, this.fallback_junction_target.slice(), true)) { + return .success; + } + } + + // this existing link is pointing to the wrong package + _ = sys.unlink(this.dest.sliceZ()); + return this.symlink(); + }, + }; + } +}; + +// @sortImports + +const bun = @import("bun"); +const Environment = bun.Environment; +const FD = bun.FD; +const strings = bun.strings; +const sys = bun.sys; diff --git a/src/install/lifecycle_script_runner.zig b/src/install/lifecycle_script_runner.zig index 0ccb974c52..2b249771b8 100644 --- a/src/install/lifecycle_script_runner.zig +++ b/src/install/lifecycle_script_runner.zig @@ -8,6 +8,7 @@ const Global = bun.Global; const JSC = bun.JSC; const Timer = std.time.Timer; const string = bun.string; +const Store = bun.install.Store; const Process = bun.spawn.Process; const log = Output.scoped(.Script, false); @@ -24,6 +25,7 @@ pub const LifecycleScriptSubprocess = struct { has_called_process_exit: bool = false, manager: *PackageManager, envp: [:null]?[*:0]const u8, + shell_bin: ?[:0]const u8, timer: ?Timer = null, @@ -33,8 +35,15 @@ pub const LifecycleScriptSubprocess = struct { optional: bool = false, started_at: u64 = 0, + ctx: ?InstallCtx, + heap: bun.io.heap.IntrusiveField(LifecycleScriptSubprocess) = .{}, + pub const InstallCtx = struct { + entry_id: Store.Entry.Id, + installer: *Store.Installer, + }; + pub const List = bun.io.heap.Intrusive(LifecycleScriptSubprocess, *PackageManager, sortByStartedAt); fn sortByStartedAt(_: *PackageManager, a: *LifecycleScriptSubprocess, b: *LifecycleScriptSubprocess) bool { @@ -94,9 +103,6 @@ pub const LifecycleScriptSubprocess = struct { this.handleExit(process.status); } - // This is only used on the main thread. - var cwd_z_buf: bun.PathBuffer = undefined; - fn resetOutputFlags(output: *OutputReader, fd: bun.FileDescriptor) void { output.flags.nonblocking = true; output.flags.socket = true; @@ -139,7 +145,6 @@ pub const LifecycleScriptSubprocess = struct { const manager = this.manager; const original_script = this.scripts.items[next_script_index].?; const cwd = this.scripts.cwd; - const env = manager.env; this.stdout.setParent(this); this.stderr.setParent(this); @@ -148,11 +153,9 @@ pub const LifecycleScriptSubprocess = struct { this.current_script_index = next_script_index; this.has_called_process_exit = false; - const shell_bin = if (Environment.isWindows) null else bun.CLI.RunCommand.findShell(env.get("PATH") orelse "", cwd) orelse null; - - var copy_script = try std.ArrayList(u8).initCapacity(manager.allocator, original_script.script.len + 1); + var copy_script = try std.ArrayList(u8).initCapacity(manager.allocator, original_script.len + 1); defer copy_script.deinit(); - try bun.CLI.RunCommand.replacePackageManagerRun(©_script, original_script.script); + try bun.CLI.RunCommand.replacePackageManagerRun(©_script, original_script); try copy_script.append(0); const combined_script: [:0]u8 = copy_script.items[0 .. copy_script.items.len - 1 :0]; @@ -174,8 +177,8 @@ pub const LifecycleScriptSubprocess = struct { log("{s} - {s} $ {s}", .{ this.package_name, this.scriptName(), combined_script }); - var argv = if (shell_bin != null and !Environment.isWindows) [_]?[*:0]const u8{ - shell_bin.?, + var argv = if (this.shell_bin != null and !Environment.isWindows) [_]?[*:0]const u8{ + this.shell_bin.?, "-c", combined_script, null, @@ -347,6 +350,10 @@ pub const LifecycleScriptSubprocess = struct { if (exit.code > 0) { if (this.optional) { + if (this.ctx) |ctx| { + ctx.installer.store.entries.items(.step)[ctx.entry_id.get()].store(.done, .monotonic); + ctx.installer.onTaskComplete(ctx.entry_id, .fail); + } _ = this.manager.pending_lifecycle_script_tasks.fetchSub(1, .monotonic); this.deinitAndDeletePackage(); return; @@ -383,6 +390,21 @@ pub const LifecycleScriptSubprocess = struct { } } + if (this.ctx) |ctx| { + switch (this.current_script_index) { + // preinstall + 0 => { + const previous_step = ctx.installer.store.entries.items(.step)[ctx.entry_id.get()].swap(.binaries, .monotonic); + bun.debugAssert(previous_step == .run_preinstall); + ctx.installer.startTask(ctx.entry_id); + _ = this.manager.pending_lifecycle_script_tasks.fetchSub(1, .monotonic); + this.deinit(); + return; + }, + else => {}, + } + } + for (this.current_script_index + 1..Lockfile.Scripts.names.len) |new_script_index| { if (this.scripts.items[new_script_index] != null) { this.resetPolls(); @@ -403,6 +425,15 @@ pub const LifecycleScriptSubprocess = struct { }); } + if (this.ctx) |ctx| { + const previous_step = ctx.installer.store.entries.items(.step)[ctx.entry_id.get()].swap(.done, .monotonic); + if (comptime Environment.ci_assert) { + bun.assertWithLocation(this.current_script_index != 0, @src()); + bun.assertWithLocation(previous_step == .@"run (post)install and (pre/post)prepare", @src()); + } + ctx.installer.onTaskComplete(ctx.entry_id, .success); + } + // the last script finished _ = this.manager.pending_lifecycle_script_tasks.fetchSub(1, .monotonic); @@ -422,6 +453,10 @@ pub const LifecycleScriptSubprocess = struct { }, .err => |err| { if (this.optional) { + if (this.ctx) |ctx| { + ctx.installer.store.entries.items(.step)[ctx.entry_id.get()].store(.done, .monotonic); + ctx.installer.onTaskComplete(ctx.entry_id, .fail); + } _ = this.manager.pending_lifecycle_script_tasks.fetchSub(1, .monotonic); this.deinitAndDeletePackage(); return; @@ -506,17 +541,21 @@ pub const LifecycleScriptSubprocess = struct { manager: *PackageManager, list: Lockfile.Package.Scripts.List, envp: [:null]?[*:0]const u8, + shell_bin: ?[:0]const u8, optional: bool, log_level: PackageManager.Options.LogLevel, foreground: bool, + ctx: ?InstallCtx, ) !void { var lifecycle_subprocess = LifecycleScriptSubprocess.new(.{ .manager = manager, .envp = envp, + .shell_bin = shell_bin, .scripts = list, .package_name = list.package_name, .foreground = foreground, .optional = optional, + .ctx = ctx, }); if (log_level.isVerbose()) { diff --git a/src/install/lockfile.zig b/src/install/lockfile.zig index 1bb7834bc4..209af71f1a 100644 --- a/src/install/lockfile.zig +++ b/src/install/lockfile.zig @@ -26,15 +26,52 @@ patched_dependencies: PatchedDependenciesMap = .{}, overrides: OverrideMap = .{}, catalogs: CatalogMap = .{}, +node_linker: NodeLinker = .auto, + +pub const NodeLinker = enum(u8) { + // If workspaces are used: isolated + // If not: hoisted + // Used when nodeLinker is absent from package.json/bun.lock/bun.lockb + auto, + + hoisted, + isolated, + + pub fn fromStr(input: string) ?NodeLinker { + if (strings.eqlComptime(input, "hoisted")) { + return .hoisted; + } + if (strings.eqlComptime(input, "isolated")) { + return .isolated; + } + return null; + } +}; + +pub const DepSorter = struct { + lockfile: *const Lockfile, + + pub fn isLessThan(sorter: @This(), l: DependencyID, r: DependencyID) bool { + const deps_buf = sorter.lockfile.buffers.dependencies.items; + const string_buf = sorter.lockfile.buffers.string_bytes.items; + + const l_dep = &deps_buf[l]; + const r_dep = &deps_buf[r]; + + return switch (l_dep.behavior.cmp(r_dep.behavior)) { + .lt => true, + .gt => false, + .eq => strings.order(l_dep.name.slice(string_buf), r_dep.name.slice(string_buf)) == .lt, + }; + } +}; + pub const Stream = std.io.FixedBufferStream([]u8); pub const default_filename = "bun.lockb"; pub const Scripts = struct { const MAX_PARALLEL_PROCESSES = 10; - pub const Entry = struct { - script: string, - }; - pub const Entries = std.ArrayListUnmanaged(Entry); + pub const Entries = std.ArrayListUnmanaged(string); pub const names = [_]string{ "preinstall", @@ -73,7 +110,7 @@ pub const Scripts = struct { inline for (Scripts.names) |hook| { const list = &@field(this, hook); for (list.items) |entry| { - allocator.free(entry.script); + allocator.free(entry); } list.deinit(allocator); } @@ -618,6 +655,8 @@ pub fn cleanWithLogger( try new.buffers.preallocate(old.buffers, old.allocator); try new.patched_dependencies.ensureTotalCapacity(old.allocator, old.patched_dependencies.entries.len); + new.node_linker = old.node_linker; + old.scratch.dependency_list_queue.head = 0; { @@ -873,8 +912,6 @@ pub fn hoist( const allocator = lockfile.allocator; var slice = lockfile.packages.slice(); - var path_buf: bun.PathBuffer = undefined; - var builder = Tree.Builder(method){ .name_hashes = slice.items(.name_hash), .queue = .init(allocator), @@ -885,7 +922,6 @@ pub fn hoist( .log = log, .lockfile = lockfile, .manager = manager, - .path_buf = &path_buf, .install_root_dependencies = install_root_dependencies, .workspace_filters = workspace_filters, }; @@ -895,7 +931,6 @@ pub fn hoist( Tree.invalid_id, method, &builder, - if (method == .filter) manager.options.log_level, ); // This goes breadth-first @@ -905,7 +940,6 @@ pub fn hoist( item.hoist_root_id, method, &builder, - if (method == .filter) manager.options.log_level, ); } @@ -1207,6 +1241,7 @@ pub fn initEmpty(this: *Lockfile, allocator: Allocator) void { .workspace_versions = .{}, .overrides = .{}, .catalogs = .{}, + .node_linker = .auto, .meta_hash = zero_hash, }; } @@ -1807,8 +1842,8 @@ pub fn generateMetaHash(this: *Lockfile, print_name_version_string: bool, packag inline for (comptime std.meta.fieldNames(Lockfile.Scripts)) |field_name| { const scripts = @field(this.scripts, field_name); for (scripts.items) |script| { - if (script.script.len > 0) { - string_builder.fmtCount("{s}: {s}\n", .{ field_name, script.script }); + if (script.len > 0) { + string_builder.fmtCount("{s}: {s}\n", .{ field_name, script }); has_scripts = true; } } @@ -1843,8 +1878,8 @@ pub fn generateMetaHash(this: *Lockfile, print_name_version_string: bool, packag inline for (comptime std.meta.fieldNames(Lockfile.Scripts)) |field_name| { const scripts = @field(this.scripts, field_name); for (scripts.items) |script| { - if (script.script.len > 0) { - _ = string_builder.fmt("{s}: {s}\n", .{ field_name, script.script }); + if (script.len > 0) { + _ = string_builder.fmt("{s}: {s}\n", .{ field_name, script }); } } } @@ -1952,17 +1987,17 @@ pub const default_trusted_dependencies = brk: { @compileError("default-trusted-dependencies.txt is too large, please increase 'max_default_trusted_dependencies' in lockfile.zig"); } - // just in case there's duplicates from truncating - if (map.has(dep)) @compileError("Duplicate hash due to u64 -> u32 truncation"); - - map.putAssumeCapacity(dep, {}); + const entry = map.getOrPutAssumeCapacity(dep); + if (entry.found_existing) { + @compileError("Duplicate trusted dependency: " ++ dep); + } } const final = map; break :brk &final; }; -pub fn hasTrustedDependency(this: *Lockfile, name: []const u8) bool { +pub fn hasTrustedDependency(this: *const Lockfile, name: []const u8) bool { if (this.trusted_dependencies) |trusted_dependencies| { const hash = @as(u32, @truncate(String.Builder.stringHash(name))); return trusted_dependencies.contains(hash); diff --git a/src/install/lockfile/Package.zig b/src/install/lockfile/Package.zig index 09e1ebb295..8edae3a083 100644 --- a/src/install/lockfile/Package.zig +++ b/src/install/lockfile/Package.zig @@ -527,6 +527,7 @@ pub const Package = extern struct { update: u32 = 0, overrides_changed: bool = false, catalogs_changed: bool = false, + node_linker_changed: bool = false, // bool for if this dependency should be added to lockfile trusted dependencies. // it is false when the new trusted dependency is coming from the default list. @@ -543,6 +544,7 @@ pub const Package = extern struct { pub inline fn hasDiffs(this: Summary) bool { return this.add > 0 or this.remove > 0 or this.update > 0 or this.overrides_changed or this.catalogs_changed or + this.node_linker_changed or this.added_trusted_dependencies.count() > 0 or this.removed_trusted_dependencies.count() > 0 or this.patched_dependencies_changed; @@ -658,6 +660,10 @@ pub const Package = extern struct { } } } + + if (from_lockfile.node_linker != to_lockfile.node_linker) { + summary.node_linker_changed = true; + } } trusted_dependencies: { @@ -1576,6 +1582,19 @@ pub const Package = extern struct { if (json.get("workspaces")) |workspaces_expr| { lockfile.catalogs.parseCount(lockfile, workspaces_expr, &string_builder); + + if (workspaces_expr.get("nodeLinker")) |node_linker_expr| { + if (!node_linker_expr.isString()) { + try log.addError(source, node_linker_expr.loc, "Expected one of \"isolated\" or \"hoisted\""); + return error.InvalidPackageJSON; + } + + const node_linker_str = node_linker_expr.data.e_string.slice(allocator); + lockfile.node_linker = Lockfile.NodeLinker.fromStr(node_linker_str) orelse { + try log.addError(source, node_linker_expr.loc, "Expected one of \"isolated\" or \"hoisted\""); + return error.InvalidPackageJSON; + }; + } } } diff --git a/src/install/lockfile/Package/Scripts.zig b/src/install/lockfile/Package/Scripts.zig index a93ce33020..516a8017e8 100644 --- a/src/install/lockfile/Package/Scripts.zig +++ b/src/install/lockfile/Package/Scripts.zig @@ -17,12 +17,22 @@ pub const Scripts = extern struct { } pub const List = struct { - items: [Lockfile.Scripts.names.len]?Lockfile.Scripts.Entry, + items: [Lockfile.Scripts.names.len]?string, first_index: u8, total: u8, cwd: stringZ, package_name: string, + pub fn initPreinstall(allocator: std.mem.Allocator, preinstall: string, cwd: string, package_name: string) @This() { + return .{ + .items = .{ allocator.dupe(u8, preinstall) catch bun.outOfMemory(), null, null, null, null, null }, + .first_index = 0, + .total = 1, + .cwd = allocator.dupeZ(u8, cwd) catch bun.outOfMemory(), + .package_name = allocator.dupe(u8, package_name) catch bun.outOfMemory(), + }; + } + pub fn printScripts( this: Package.Scripts.List, resolution: *const Resolution, @@ -51,28 +61,28 @@ pub const Scripts = extern struct { if (maybe_script) |script| { Output.pretty(fmt, .{ Lockfile.Scripts.names[script_index], - script.script, + script, }); } } } - pub fn first(this: Package.Scripts.List) Lockfile.Scripts.Entry { + pub fn first(this: Package.Scripts.List) string { if (comptime Environment.allow_assert) { assert(this.items[this.first_index] != null); } return this.items[this.first_index].?; } - pub fn deinit(this: Package.Scripts.List, allocator: std.mem.Allocator) void { - for (this.items) |maybe_item| { - if (maybe_item) |item| { - allocator.free(item.script); - } - } + // pub fn deinit(this: Package.Scripts.List, allocator: std.mem.Allocator) void { + // for (this.items) |maybe_item| { + // if (maybe_item) |item| { + // allocator.free(item); + // } + // } - allocator.free(this.cwd); - } + // allocator.free(this.cwd); + // } pub fn appendToLockfile(this: Package.Scripts.List, lockfile: *Lockfile) void { inline for (this.items, 0..) |maybe_script, i| { @@ -110,37 +120,31 @@ pub const Scripts = extern struct { pub fn getScriptEntries( this: *const Package.Scripts, - lockfile: *Lockfile, + lockfile: *const Lockfile, lockfile_buf: string, resolution_tag: Resolution.Tag, add_node_gyp_rebuild_script: bool, // return: first_index, total, entries - ) struct { i8, u8, [Lockfile.Scripts.names.len]?Lockfile.Scripts.Entry } { + ) struct { i8, u8, [Lockfile.Scripts.names.len]?string } { const allocator = lockfile.allocator; var script_index: u8 = 0; var first_script_index: i8 = -1; - var scripts: [6]?Lockfile.Scripts.Entry = .{null} ** 6; + var scripts: [6]?string = .{null} ** 6; var counter: u8 = 0; if (add_node_gyp_rebuild_script) { { script_index += 1; - const entry: Lockfile.Scripts.Entry = .{ - .script = allocator.dupe(u8, "node-gyp rebuild") catch unreachable, - }; if (first_script_index == -1) first_script_index = @intCast(script_index); - scripts[script_index] = entry; + scripts[script_index] = allocator.dupe(u8, "node-gyp rebuild") catch unreachable; script_index += 1; counter += 1; } // missing install and preinstall, only need to check postinstall if (!this.postinstall.isEmpty()) { - const entry: Lockfile.Scripts.Entry = .{ - .script = allocator.dupe(u8, this.preinstall.slice(lockfile_buf)) catch unreachable, - }; if (first_script_index == -1) first_script_index = @intCast(script_index); - scripts[script_index] = entry; + scripts[script_index] = allocator.dupe(u8, this.preinstall.slice(lockfile_buf)) catch unreachable; counter += 1; } script_index += 1; @@ -154,11 +158,8 @@ pub const Scripts = extern struct { inline for (install_scripts) |hook| { const script = @field(this, hook); if (!script.isEmpty()) { - const entry: Lockfile.Scripts.Entry = .{ - .script = allocator.dupe(u8, script.slice(lockfile_buf)) catch unreachable, - }; if (first_script_index == -1) first_script_index = @intCast(script_index); - scripts[script_index] = entry; + scripts[script_index] = allocator.dupe(u8, script.slice(lockfile_buf)) catch unreachable; counter += 1; } script_index += 1; @@ -176,11 +177,8 @@ pub const Scripts = extern struct { inline for (prepare_scripts) |hook| { const script = @field(this, hook); if (!script.isEmpty()) { - const entry: Lockfile.Scripts.Entry = .{ - .script = allocator.dupe(u8, script.slice(lockfile_buf)) catch unreachable, - }; if (first_script_index == -1) first_script_index = @intCast(script_index); - scripts[script_index] = entry; + scripts[script_index] = allocator.dupe(u8, script.slice(lockfile_buf)) catch unreachable; counter += 1; } script_index += 1; @@ -189,11 +187,8 @@ pub const Scripts = extern struct { .workspace => { script_index += 1; if (!this.prepare.isEmpty()) { - const entry: Lockfile.Scripts.Entry = .{ - .script = allocator.dupe(u8, this.prepare.slice(lockfile_buf)) catch unreachable, - }; if (first_script_index == -1) first_script_index = @intCast(script_index); - scripts[script_index] = entry; + scripts[script_index] = allocator.dupe(u8, this.prepare.slice(lockfile_buf)) catch unreachable; counter += 1; } script_index += 2; @@ -206,9 +201,9 @@ pub const Scripts = extern struct { pub fn createList( this: *const Package.Scripts, - lockfile: *Lockfile, + lockfile: *const Lockfile, lockfile_buf: []const u8, - cwd_: string, + cwd_: *bun.AbsPath(.{ .sep = .auto }), package_name: string, resolution_tag: Resolution.Tag, add_node_gyp_rebuild_script: bool, @@ -219,16 +214,10 @@ pub const Scripts = extern struct { var cwd_buf: if (Environment.isWindows) bun.PathBuffer else void = undefined; const cwd = if (comptime !Environment.isWindows) - cwd_ + cwd_.slice() else brk: { - @memcpy(cwd_buf[0..cwd_.len], cwd_); - cwd_buf[cwd_.len] = 0; - const cwd_handle = bun.openDirNoRenamingOrDeletingWindows(bun.invalid_fd, cwd_buf[0..cwd_.len :0]) catch break :brk cwd_; - - var buf: bun.WPathBuffer = undefined; - const new_cwd = bun.windows.GetFinalPathNameByHandle(cwd_handle.fd, .{}, &buf) catch break :brk cwd_; - - break :brk strings.convertUTF16toUTF8InBuffer(&cwd_buf, new_cwd) catch break :brk cwd_; + const cwd_handle = bun.openDirNoRenamingOrDeletingWindows(bun.invalid_fd, cwd_.sliceZ()) catch break :brk cwd_.slice(); + break :brk FD.fromStdDir(cwd_handle).getFdPath(&cwd_buf) catch break :brk cwd_.slice(); }; return .{ @@ -274,54 +263,36 @@ pub const Scripts = extern struct { pub fn getList( this: *Package.Scripts, log: *logger.Log, - lockfile: *Lockfile, - node_modules: *PackageManager.PackageInstaller.LazyPackageDestinationDir, - abs_node_modules_path: string, + lockfile: *const Lockfile, + folder_path: *bun.AbsPath(.{ .sep = .auto }), folder_name: string, resolution: *const Resolution, ) !?Package.Scripts.List { - var path_buf: [bun.MAX_PATH_BYTES * 2]u8 = undefined; if (this.hasAny()) { const add_node_gyp_rebuild_script = if (lockfile.hasTrustedDependency(folder_name) and this.install.isEmpty() and this.preinstall.isEmpty()) brk: { - const binding_dot_gyp_path = Path.joinAbsStringZ( - abs_node_modules_path, - &[_]string{ folder_name, "binding.gyp" }, - .auto, - ); + var save = folder_path.save(); + defer save.restore(); + folder_path.append("binding.gyp"); - break :brk bun.sys.exists(binding_dot_gyp_path); + break :brk bun.sys.exists(folder_path.slice()); } else false; - const cwd = Path.joinAbsStringBufZTrailingSlash( - abs_node_modules_path, - &path_buf, - &[_]string{folder_name}, - .auto, - ); - return this.createList( lockfile, lockfile.buffers.string_bytes.items, - cwd, + folder_path, folder_name, resolution.tag, add_node_gyp_rebuild_script, ); } else if (!this.filled) { - const abs_folder_path = Path.joinAbsStringBufZTrailingSlash( - abs_node_modules_path, - &path_buf, - &[_]string{folder_name}, - .auto, - ); return this.createFromPackageJSON( log, lockfile, - node_modules, - abs_folder_path, + folder_path, folder_name, resolution.tag, ); @@ -335,14 +306,16 @@ pub const Scripts = extern struct { allocator: std.mem.Allocator, string_builder: *Lockfile.StringBuilder, log: *logger.Log, - node_modules: *PackageManager.PackageInstaller.LazyPackageDestinationDir, - folder_name: string, + folder_path: *bun.AbsPath(.{ .sep = .auto }), ) !void { const json = brk: { + var save = folder_path.save(); + defer save.restore(); + folder_path.append("package.json"); + const json_src = brk2: { - const json_path = bun.path.joinZ([_]string{ folder_name, "package.json" }, .auto); - const buf = try bun.sys.File.readFrom(try node_modules.getDir(), json_path, allocator).unwrap(); - break :brk2 logger.Source.initPathString(json_path, buf); + const buf = try bun.sys.File.readFrom(bun.FD.cwd(), folder_path.sliceZ(), allocator).unwrap(); + break :brk2 logger.Source.initPathString(folder_path.slice(), buf); }; initializeStore(); @@ -362,9 +335,8 @@ pub const Scripts = extern struct { pub fn createFromPackageJSON( this: *Package.Scripts, log: *logger.Log, - lockfile: *Lockfile, - node_modules: *PackageManager.PackageInstaller.LazyPackageDestinationDir, - abs_folder_path: string, + lockfile: *const Lockfile, + folder_path: *bun.AbsPath(.{ .sep = .auto }), folder_name: string, resolution_tag: Resolution.Tag, ) !?Package.Scripts.List { @@ -372,22 +344,20 @@ pub const Scripts = extern struct { tmp.initEmpty(lockfile.allocator); defer tmp.deinit(); var builder = tmp.stringBuilder(); - try this.fillFromPackageJSON(lockfile.allocator, &builder, log, node_modules, folder_name); + try this.fillFromPackageJSON(lockfile.allocator, &builder, log, folder_path); const add_node_gyp_rebuild_script = if (this.install.isEmpty() and this.preinstall.isEmpty()) brk: { - const binding_dot_gyp_path = Path.joinAbsStringZ( - abs_folder_path, - &[_]string{"binding.gyp"}, - .auto, - ); + const save = folder_path.save(); + defer save.restore(); + folder_path.append("binding.gyp"); - break :brk bun.sys.exists(binding_dot_gyp_path); + break :brk bun.sys.exists(folder_path.slice()); } else false; return this.createList( lockfile, tmp.buffers.string_bytes.items, - abs_folder_path, + folder_path, folder_name, resolution_tag, add_node_gyp_rebuild_script, @@ -402,8 +372,6 @@ const JSAst = bun.JSAst; const JSON = bun.JSON; const Lockfile = install.Lockfile; const Output = bun.Output; -const PackageManager = install.PackageManager; -const Path = bun.path; const Resolution = bun.install.Resolution; const Semver = bun.Semver; const String = Semver.String; @@ -419,3 +387,4 @@ const stringZ = [:0]const u8; const strings = bun.strings; const Package = Lockfile.Package; const debug = Output.scoped(.Lockfile, true); +const FD = bun.FD; diff --git a/src/install/lockfile/Tree.zig b/src/install/lockfile/Tree.zig index f20ff9b013..46da478468 100644 --- a/src/install/lockfile/Tree.zig +++ b/src/install/lockfile/Tree.zig @@ -244,7 +244,6 @@ pub fn Builder(comptime method: BuilderMethod) type { sort_buf: std.ArrayListUnmanaged(DependencyID) = .{}, workspace_filters: if (method == .filter) []const WorkspaceFilter else void = if (method == .filter) &.{}, install_root_dependencies: if (method == .filter) bool else void, - path_buf: []u8, pub fn maybeReportError(this: *@This(), comptime fmt: string, args: anytype) void { this.log.addErrorFmt(null, logger.Loc.Empty, this.allocator, fmt, args) catch {}; @@ -316,13 +315,120 @@ pub fn Builder(comptime method: BuilderMethod) type { }; } +pub fn isFilteredDependencyOrWorkspace( + dep_id: DependencyID, + parent_pkg_id: PackageID, + workspace_filters: []const WorkspaceFilter, + install_root_dependencies: bool, + manager: *const PackageManager, + lockfile: *const Lockfile, +) bool { + const pkg_id = lockfile.buffers.resolutions.items[dep_id]; + if (pkg_id >= lockfile.packages.len) { + return true; + } + + const pkgs = lockfile.packages.slice(); + const pkg_names = pkgs.items(.name); + const pkg_metas = pkgs.items(.meta); + const pkg_resolutions = pkgs.items(.resolution); + + const dep = lockfile.buffers.dependencies.items[dep_id]; + const res = &pkg_resolutions[pkg_id]; + const parent_res = &pkg_resolutions[parent_pkg_id]; + + if (pkg_metas[pkg_id].isDisabled()) { + if (manager.options.log_level.isVerbose()) { + const meta = &pkg_metas[pkg_id]; + const name = lockfile.str(&pkg_names[pkg_id]); + if (!meta.os.isMatch() and !meta.arch.isMatch()) { + Output.prettyErrorln("Skip installing {s} - cpu & os mismatch", .{name}); + } else if (!meta.os.isMatch()) { + Output.prettyErrorln("Skip installing {s} - os mismatch", .{name}); + } else if (!meta.arch.isMatch()) { + Output.prettyErrorln("Skip installing {s} - cpu mismatch", .{name}); + } + } + return true; + } + + if (dep.behavior.isBundled()) { + return true; + } + + const dep_features = switch (parent_res.tag) { + .root, .workspace, .folder => manager.options.local_package_features, + else => manager.options.remote_package_features, + }; + + if (!dep.behavior.isEnabled(dep_features)) { + return true; + } + + // Filtering only applies to the root package dependencies. Also + // --filter has a different meaning if a new package is being installed. + if (manager.subcommand != .install or parent_pkg_id != 0) { + return false; + } + + if (!dep.behavior.isWorkspaceOnly()) { + if (!install_root_dependencies) { + return true; + } + + return false; + } + + var workspace_matched = workspace_filters.len == 0; + + for (workspace_filters) |filter| { + var filter_path: bun.AbsPath(.{ .sep = .posix }) = .initTopLevelDir(); + defer filter_path.deinit(); + + const pattern, const name_or_path = switch (filter) { + .all => { + workspace_matched = true; + continue; + }, + .name => |name_pattern| .{ + name_pattern, + pkg_names[pkg_id].slice(lockfile.buffers.string_bytes.items), + }, + .path => |path_pattern| path_pattern: { + if (res.tag != .workspace) { + return false; + } + + filter_path.join(&.{res.value.workspace.slice(lockfile.buffers.string_bytes.items)}); + + break :path_pattern .{ path_pattern, filter_path.slice() }; + }, + }; + + switch (bun.glob.match(undefined, pattern, name_or_path)) { + .match, .negate_match => workspace_matched = true, + + .negate_no_match => { + // always skip if a pattern specifically says "!" + workspace_matched = false; + break; + }, + + .no_match => { + // keep looking + }, + } + } + + return !workspace_matched; +} + pub fn processSubtree( this: *const Tree, dependency_id: DependencyID, hoist_root_id: Tree.Id, comptime method: BuilderMethod, builder: *Builder(method), - log_level: if (method == .filter) PackageManager.Options.LogLevel else void, ) SubtreeError!void { const parent_pkg_id = switch (dependency_id) { root_dep_id => 0, @@ -350,8 +456,6 @@ pub fn processSubtree( const pkgs = builder.lockfile.packages.slice(); const pkg_resolutions = pkgs.items(.resolution); - const pkg_metas = pkgs.items(.meta); - const pkg_names = pkgs.items(.name); builder.sort_buf.clearRetainingCapacity(); try builder.sort_buf.ensureUnusedCapacity(builder.allocator, resolution_list.len); @@ -360,31 +464,13 @@ pub fn processSubtree( builder.sort_buf.appendAssumeCapacity(@intCast(dep_id)); } - const DepSorter = struct { - lockfile: *const Lockfile, - - pub fn isLessThan(sorter: @This(), l: DependencyID, r: DependencyID) bool { - const deps_buf = sorter.lockfile.buffers.dependencies.items; - const string_buf = sorter.lockfile.buffers.string_bytes.items; - - const l_dep = deps_buf[l]; - const r_dep = deps_buf[r]; - - return switch (l_dep.behavior.cmp(r_dep.behavior)) { - .lt => true, - .gt => false, - .eq => strings.order(l_dep.name.slice(string_buf), r_dep.name.slice(string_buf)) == .lt, - }; - } - }; - std.sort.pdq( DependencyID, builder.sort_buf.items, - DepSorter{ + Lockfile.DepSorter{ .lockfile = builder.lockfile, }, - DepSorter.isLessThan, + Lockfile.DepSorter.isLessThan, ); for (builder.sort_buf.items) |dep_id| { @@ -394,101 +480,16 @@ pub fn processSubtree( // filter out disabled dependencies if (comptime method == .filter) { - if (builder.lockfile.isResolvedDependencyDisabled( + if (isFilteredDependencyOrWorkspace( dep_id, - switch (pkg_resolutions[parent_pkg_id].tag) { - .root, .workspace, .folder => builder.manager.options.local_package_features, - else => builder.manager.options.remote_package_features, - }, - &pkg_metas[pkg_id], + parent_pkg_id, + builder.workspace_filters, + builder.install_root_dependencies, + builder.manager, + builder.lockfile, )) { - if (log_level.isVerbose()) { - const meta = &pkg_metas[pkg_id]; - const name = builder.lockfile.str(&pkg_names[pkg_id]); - if (!meta.os.isMatch() and !meta.arch.isMatch()) { - Output.prettyErrorln("Skip installing '{s}' cpu & os mismatch", .{name}); - } else if (!meta.os.isMatch()) { - Output.prettyErrorln("Skip installing '{s}' os mismatch", .{name}); - } else if (!meta.arch.isMatch()) { - Output.prettyErrorln("Skip installing '{s}' cpu mismatch", .{name}); - } - } - continue; } - - if (builder.manager.subcommand == .install) dont_skip: { - // only do this when parent is root. workspaces are always dependencies of the root - // package, and the root package is always called with `processSubtree` - if (parent_pkg_id == 0 and builder.workspace_filters.len > 0) { - if (!builder.dependencies[dep_id].behavior.isWorkspaceOnly()) { - if (builder.install_root_dependencies) { - break :dont_skip; - } - - continue; - } - - var match = false; - - for (builder.workspace_filters) |workspace_filter| { - const res_id = builder.resolutions[dep_id]; - - const pattern, const path_or_name = switch (workspace_filter) { - .name => |pattern| .{ pattern, pkg_names[res_id].slice(builder.buf()) }, - - .path => |pattern| path: { - const res = &pkg_resolutions[res_id]; - if (res.tag != .workspace) { - break :dont_skip; - } - const res_path = res.value.workspace.slice(builder.buf()); - - // occupy `builder.path_buf` - var abs_res_path = strings.withoutTrailingSlash(bun.path.joinAbsStringBuf( - FileSystem.instance.top_level_dir, - builder.path_buf, - &.{res_path}, - .auto, - )); - - if (comptime Environment.isWindows) { - abs_res_path = abs_res_path[Path.windowsVolumeNameLen(abs_res_path)[0]..]; - Path.dangerouslyConvertPathToPosixInPlace(u8, builder.path_buf[0..abs_res_path.len]); - } - - break :path .{ - pattern, - abs_res_path, - }; - }, - - .all => { - match = true; - continue; - }, - }; - - switch (bun.glob.walk.matchImpl(builder.allocator, pattern, path_or_name)) { - .match, .negate_match => match = true, - - .negate_no_match => { - // always skip if a pattern specifically says "!" - match = false; - break; - }, - - .no_match => { - // keep current - }, - } - } - - if (!match) { - continue; - } - } - } } const hoisted: HoistDependencyResult = hoisted: { @@ -646,7 +647,6 @@ const DependencyID = install.DependencyID; const DependencyIDList = Lockfile.DependencyIDList; const Environment = bun.Environment; const ExternalSlice = Lockfile.ExternalSlice; -const FileSystem = bun.fs.FileSystem; const Lockfile = install.Lockfile; const OOM = bun.OOM; const Output = bun.Output; @@ -666,7 +666,6 @@ const invalid_package_id = install.invalid_package_id; const logger = bun.logger; const string = []const u8; const stringZ = bun.stringZ; -const strings = bun.strings; const z_allocator = bun.z_allocator; const bun = @import("bun"); diff --git a/src/install/lockfile/bun.lock.zig b/src/install/lockfile/bun.lock.zig index 7b782d5f69..db50bdf193 100644 --- a/src/install/lockfile/bun.lock.zig +++ b/src/install/lockfile/bun.lock.zig @@ -91,6 +91,14 @@ pub const Stringifier = struct { try writer.print("\"lockfileVersion\": {d},\n", .{@intFromEnum(Version.current)}); try writeIndent(writer, indent); + if (lockfile.node_linker != .auto) { + try writer.print( + \\"nodeLinker": "{s}", + \\ + , .{@tagName(lockfile.node_linker)}); + try writeIndent(writer, indent); + } + try writer.writeAll("\"workspaces\": {\n"); try incIndent(writer, indent); { @@ -1002,6 +1010,7 @@ const ParseError = OOM || error{ InvalidOverridesObject, InvalidCatalogObject, InvalidCatalogsObject, + InvalidNodeLinkerValue, InvalidDependencyName, InvalidDependencyVersion, InvalidPackageResolution, @@ -1344,7 +1353,7 @@ pub fn parseIntoBinaryLockfile( if (!key.isString() or key.data.e_string.len() == 0) { try log.addError(source, key.loc, "Expected a non-empty string"); - return error.InvalidCatalogObject; + return error.InvalidCatalogsObject; } const dep_name_str = key.asString(allocator).?; @@ -1353,7 +1362,7 @@ pub fn parseIntoBinaryLockfile( if (!value.isString()) { try log.addError(source, value.loc, "Expected a string"); - return error.InvalidCatalogObject; + return error.InvalidCatalogsObject; } const version_str = value.asString(allocator).?; @@ -1374,7 +1383,7 @@ pub fn parseIntoBinaryLockfile( manager, ) orelse { try log.addError(source, value.loc, "Invalid catalog version"); - return error.InvalidCatalogObject; + return error.InvalidCatalogsObject; }, }; @@ -1386,7 +1395,7 @@ pub fn parseIntoBinaryLockfile( if (entry.found_existing) { try log.addError(source, key.loc, "Duplicate catalog entry"); - return error.InvalidCatalogObject; + return error.InvalidCatalogsObject; } entry.value_ptr.* = dep; @@ -1394,6 +1403,21 @@ pub fn parseIntoBinaryLockfile( } } + if (root.get("nodeLinker")) |node_linker_expr| { + if (!node_linker_expr.isString()) { + try log.addError(source, node_linker_expr.loc, "Expected a string"); + return error.InvalidNodeLinkerValue; + } + + const node_linker_str = node_linker_expr.data.e_string.slice(allocator); + lockfile.node_linker = BinaryLockfile.NodeLinker.fromStr(node_linker_str) orelse { + try log.addError(source, node_linker_expr.loc, "Expected one of \"isolated\" or \"hoisted\""); + return error.InvalidNodeLinkerValue; + }; + } else { + lockfile.node_linker = .auto; + } + const workspaces_obj = root.getObject("workspaces") orelse { try log.addError(source, root.loc, "Missing a workspaces object property"); return error.InvalidWorkspaceObject; diff --git a/src/install/lockfile/bun.lockb.zig b/src/install/lockfile/bun.lockb.zig index 8f546724f4..29f46c22eb 100644 --- a/src/install/lockfile/bun.lockb.zig +++ b/src/install/lockfile/bun.lockb.zig @@ -7,6 +7,7 @@ const has_trusted_dependencies_tag: u64 = @bitCast(@as([8]u8, "tRuStEDd".*)); const has_empty_trusted_dependencies_tag: u64 = @bitCast(@as([8]u8, "eMpTrUsT".*)); const has_overrides_tag: u64 = @bitCast(@as([8]u8, "oVeRriDs".*)); const has_catalogs_tag: u64 = @bitCast(@as([8]u8, "cAtAlOgS".*)); +const has_node_linker_tag: u64 = @bitCast(@as([8]u8, "nOdLiNkR".*)); pub fn save(this: *Lockfile, verbose_log: bool, bytes: *std.ArrayList(u8), total_size: *usize, end_pos: *usize) !void { @@ -244,6 +245,11 @@ pub fn save(this: *Lockfile, verbose_log: bool, bytes: *std.ArrayList(u8), total } } + if (this.node_linker != .auto) { + try writer.writeAll(std.mem.asBytes(&has_node_linker_tag)); + try writer.writeInt(u8, @intFromEnum(this.node_linker), .little); + } + total_size.* = try stream.getPos(); try writer.writeAll(&alignment_bytes_to_repeat_buffer); @@ -520,6 +526,21 @@ pub fn load( } } + { + lockfile.node_linker = .auto; + + const remaining_in_buffer = total_buffer_size -| stream.pos; + + if (remaining_in_buffer > 8 and total_buffer_size <= stream.buffer.len) { + const next_num = try reader.readInt(u64, .little); + if (next_num == has_node_linker_tag) { + lockfile.node_linker = try reader.readEnum(Lockfile.NodeLinker, .little); + } else { + stream.pos -= 8; + } + } + } + lockfile.scratch = Lockfile.Scratch.init(allocator); lockfile.package_index = PackageIndex.Map.initContext(allocator, .{}); lockfile.string_pool = StringPool.init(allocator); diff --git a/src/install/patch_install.zig b/src/install/patch_install.zig index 20463990ae..7723582ab6 100644 --- a/src/install/patch_install.zig +++ b/src/install/patch_install.zig @@ -84,7 +84,7 @@ pub const PatchTask = struct { cache_dir_subpath_without_patch_hash: stringZ, /// this is non-null if this was called before a Task, for example extracting - task_id: ?Task.Id.Type = null, + task_id: ?Task.Id = null, install_context: ?struct { dependency_id: DependencyID, tree_id: Lockfile.Tree.Id, @@ -324,7 +324,7 @@ pub const PatchTask = struct { .cache_dir_subpath = this.callback.apply.cache_dir_subpath_without_patch_hash, .destination_dir_subpath = tempdir_name, .destination_dir_subpath_buf = tmpname_buf[0..], - .patch = .{}, + .patch = null, .progress = null, .package_name = pkg_name, .package_version = resolution_label, diff --git a/src/install/repository.zig b/src/install/repository.zig index a41a81af60..09f8953c76 100644 --- a/src/install/repository.zig +++ b/src/install/repository.zig @@ -293,11 +293,52 @@ pub const Repository = extern struct { return lhs.resolved.eql(rhs.resolved, lhs_buf, rhs_buf); } - pub fn formatAs(this: *const Repository, label: string, buf: []const u8, comptime layout: []const u8, opts: std.fmt.FormatOptions, writer: anytype) !void { + pub fn formatAs(this: *const Repository, label: string, buf: []const u8, comptime layout: []const u8, opts: std.fmt.FormatOptions, writer: anytype) @TypeOf(writer).Error!void { const formatter = Formatter{ .label = label, .repository = this, .buf = buf }; return try formatter.format(layout, opts, writer); } + pub fn fmtStorePath(this: *const Repository, label: string, string_buf: string) StorePathFormatter { + return .{ + .repo = this, + .label = label, + .string_buf = string_buf, + }; + } + + pub const StorePathFormatter = struct { + repo: *const Repository, + label: string, + string_buf: string, + + pub fn format(this: StorePathFormatter, comptime _: string, _: std.fmt.FormatOptions, writer: anytype) @TypeOf(writer).Error!void { + try writer.print("{}", .{Install.fmtStorePath(this.label)}); + + if (!this.repo.owner.isEmpty()) { + try writer.print("{}", .{this.repo.owner.fmtStorePath(this.string_buf)}); + // try writer.writeByte(if (this.opts.replace_slashes) '+' else '/'); + try writer.writeByte('+'); + } else if (Dependency.isSCPLikePath(this.repo.repo.slice(this.string_buf))) { + // try writer.print("ssh:{s}", .{if (this.opts.replace_slashes) "++" else "//"}); + try writer.writeAll("ssh:++"); + } + + try writer.print("{}", .{this.repo.repo.fmtStorePath(this.string_buf)}); + + if (!this.repo.resolved.isEmpty()) { + try writer.writeByte('+'); // this would be '#' but it's not valid on windows + var resolved = this.repo.resolved.slice(this.string_buf); + if (strings.lastIndexOfChar(resolved, '-')) |i| { + resolved = resolved[i + 1 ..]; + } + try writer.print("{}", .{Install.fmtStorePath(resolved)}); + } else if (!this.repo.committish.isEmpty()) { + try writer.writeByte('+'); // this would be '#' but it's not valid on windows + try writer.print("{}", .{this.repo.committish.fmtStorePath(this.string_buf)}); + } + } + }; + pub fn fmt(this: *const Repository, label: string, buf: []const u8) Formatter { return .{ .repository = this, @@ -310,7 +351,7 @@ pub const Repository = extern struct { label: []const u8 = "", buf: []const u8, repository: *const Repository, - pub fn format(formatter: Formatter, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) !void { + pub fn format(formatter: Formatter, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) @TypeOf(writer).Error!void { if (comptime Environment.allow_assert) bun.assert(formatter.label.len > 0); try writer.writeAll(formatter.label); @@ -458,14 +499,14 @@ pub const Repository = extern struct { env: DotEnv.Map, log: *logger.Log, cache_dir: std.fs.Dir, - task_id: u64, + task_id: Install.Task.Id, name: string, url: string, attempt: u8, ) !std.fs.Dir { bun.Analytics.Features.git_dependencies += 1; const folder_name = try std.fmt.bufPrintZ(&folder_name_buf, "{any}.git", .{ - bun.fmt.hexIntLower(task_id), + bun.fmt.hexIntLower(task_id.get()), }); return if (cache_dir.openDirZ(folder_name, .{})) |dir| fetch: { @@ -523,10 +564,10 @@ pub const Repository = extern struct { repo_dir: std.fs.Dir, name: string, committish: string, - task_id: u64, + task_id: Install.Task.Id, ) !string { const path = Path.joinAbsString(PackageManager.get().cache_directory_path, &.{try std.fmt.bufPrint(&folder_name_buf, "{any}.git", .{ - bun.fmt.hexIntLower(task_id), + bun.fmt.hexIntLower(task_id.get()), })}, .auto); _ = repo_dir; diff --git a/src/install/resolution.zig b/src/install/resolution.zig index 50fe0d936e..b30775c6aa 100644 --- a/src/install/resolution.zig +++ b/src/install/resolution.zig @@ -189,6 +189,37 @@ pub const Resolution = extern struct { }; } + const StorePathFormatter = struct { + res: *const Resolution, + string_buf: string, + // opts: String.StorePathFormatter.Options, + + pub fn format(this: StorePathFormatter, comptime _: string, _: std.fmt.FormatOptions, writer: anytype) @TypeOf(writer).Error!void { + const string_buf = this.string_buf; + const res = this.res.value; + switch (this.res.tag) { + .root => try writer.writeAll("root"), + .npm => try writer.print("{}", .{res.npm.version.fmt(string_buf)}), + .local_tarball => try writer.print("{}", .{res.local_tarball.fmtStorePath(string_buf)}), + .remote_tarball => try writer.print("{}", .{res.remote_tarball.fmtStorePath(string_buf)}), + .folder => try writer.print("{}", .{res.folder.fmtStorePath(string_buf)}), + .git => try writer.print("{}", .{res.git.fmtStorePath("git+", string_buf)}), + .github => try writer.print("{}", .{res.github.fmtStorePath("github+", string_buf)}), + .workspace => try writer.print("{}", .{res.workspace.fmtStorePath(string_buf)}), + .symlink => try writer.print("{}", .{res.symlink.fmtStorePath(string_buf)}), + .single_file_module => try writer.print("{}", .{res.single_file_module.fmtStorePath(string_buf)}), + else => {}, + } + } + }; + + pub fn fmtStorePath(this: *const Resolution, string_buf: string) StorePathFormatter { + return .{ + .res = this, + .string_buf = string_buf, + }; + } + pub fn fmtURL(this: *const Resolution, string_bytes: []const u8) URLFormatter { return URLFormatter{ .resolution = this, .buf = string_bytes }; } @@ -257,7 +288,7 @@ pub const Resolution = extern struct { buf: []const u8, - pub fn format(formatter: URLFormatter, comptime layout: []const u8, opts: std.fmt.FormatOptions, writer: anytype) !void { + pub fn format(formatter: URLFormatter, comptime layout: []const u8, opts: std.fmt.FormatOptions, writer: anytype) @TypeOf(writer).Error!void { const buf = formatter.buf; const value = formatter.resolution.value; switch (formatter.resolution.tag) { @@ -280,7 +311,7 @@ pub const Resolution = extern struct { buf: []const u8, path_sep: bun.fmt.PathFormatOptions.Sep, - pub fn format(formatter: Formatter, comptime layout: []const u8, opts: std.fmt.FormatOptions, writer: anytype) !void { + pub fn format(formatter: Formatter, comptime layout: []const u8, opts: std.fmt.FormatOptions, writer: anytype) @TypeOf(writer).Error!void { const buf = formatter.buf; const value = formatter.resolution.value; switch (formatter.resolution.tag) { diff --git a/src/macho.zig b/src/macho.zig index 9ada9746e6..051a0628b8 100644 --- a/src/macho.zig +++ b/src/macho.zig @@ -169,7 +169,7 @@ pub const MachoFile = struct { // We need to shift [...data after __BUN] forward by size_diff bytes. const after_bun_slice = self.data.items[original_data_end + @as(usize, @intCast(size_diff)) ..]; const prev_after_bun_slice = prev_data_slice[original_segsize..]; - bun.move(after_bun_slice, prev_after_bun_slice); + bun.memmove(after_bun_slice, prev_after_bun_slice); // Now we copy the u32 size header std.mem.writeInt(u32, self.data.items[original_fileoff..][0..4], @intCast(data.len), .little); diff --git a/src/multi_array_list.zig b/src/multi_array_list.zig index b544d3043d..a9a4276490 100644 --- a/src/multi_array_list.zig +++ b/src/multi_array_list.zig @@ -184,9 +184,9 @@ pub fn MultiArrayList(comptime T: type) type { }; /// Release all allocated memory. - pub fn deinit(self: *Self, gpa: Allocator) void { + pub fn deinit(self: *const Self, gpa: Allocator) void { gpa.free(self.allocatedBytes()); - self.* = undefined; + @constCast(self).* = undefined; } /// The caller owns the returned memory. Empties this MultiArrayList. diff --git a/src/paths.zig b/src/paths.zig new file mode 100644 index 0000000000..dd9aea3a0f --- /dev/null +++ b/src/paths.zig @@ -0,0 +1,24 @@ +const std = @import("std"); +const bun = @import("bun"); +const Environment = bun.Environment; + +const paths = @import("./paths/Path.zig"); +pub const Path = paths.Path; +pub const AbsPath = paths.AbsPath; +pub const RelPath = paths.RelPath; + +pub const EnvPath = @import("./paths/EnvPath.zig").EnvPath; + +const pools = @import("./paths/path_buffer_pool.zig"); +pub const path_buffer_pool = pools.path_buffer_pool; +pub const w_path_buffer_pool = pools.w_path_buffer_pool; +pub const os_path_buffer_pool = pools.os_path_buffer_pool; + +pub const MAX_PATH_BYTES: usize = if (Environment.isWasm) 1024 else std.fs.max_path_bytes; +pub const PathBuffer = [MAX_PATH_BYTES]u8; +pub const PATH_MAX_WIDE = std.os.windows.PATH_MAX_WIDE; +pub const WPathBuffer = [PATH_MAX_WIDE]u16; +pub const OSPathChar = if (Environment.isWindows) u16 else u8; +pub const OSPathSliceZ = [:0]const OSPathChar; +pub const OSPathSlice = []const OSPathChar; +pub const OSPathBuffer = if (Environment.isWindows) WPathBuffer else PathBuffer; diff --git a/src/paths/EnvPath.zig b/src/paths/EnvPath.zig new file mode 100644 index 0000000000..63e86bef49 --- /dev/null +++ b/src/paths/EnvPath.zig @@ -0,0 +1,90 @@ +const std = @import("std"); +const bun = @import("bun"); +const AbsPath = bun.AbsPath; +const string = bun.string; +const strings = bun.strings; +const OOM = bun.OOM; + +pub const EnvPathOptions = struct { + // +}; + +fn trimPathDelimiters(input: string) string { + var trimmed = input; + while (trimmed.len > 0 and trimmed[0] == std.fs.path.delimiter) { + trimmed = trimmed[1..]; + } + while (trimmed.len > 0 and trimmed[trimmed.len - 1] == std.fs.path.delimiter) { + trimmed = trimmed[0 .. trimmed.len - 1]; + } + return trimmed; +} + +pub fn EnvPath(comptime opts: EnvPathOptions) type { + return struct { + allocator: std.mem.Allocator, + buf: std.ArrayListUnmanaged(u8) = .empty, + + pub fn init(allocator: std.mem.Allocator) @This() { + return .{ .allocator = allocator }; + } + + pub fn initCapacity(allocator: std.mem.Allocator, capacity: usize) OOM!@This() { + return .{ .allocator = allocator, .buf = try .initCapacity(allocator, capacity) }; + } + + pub fn deinit(this: *const @This()) void { + @constCast(this).buf.deinit(this.allocator); + } + + pub fn slice(this: *const @This()) string { + return this.buf.items; + } + + pub fn append(this: *@This(), input: anytype) OOM!void { + const trimmed: string = switch (@TypeOf(input)) { + []u8, []const u8 => strings.withoutTrailingSlash(trimPathDelimiters(input)), + + // assume already trimmed + else => input.slice(), + }; + + if (trimmed.len == 0) { + return; + } + + if (this.buf.items.len != 0) { + try this.buf.ensureUnusedCapacity(this.allocator, trimmed.len + 1); + this.buf.appendAssumeCapacity(std.fs.path.delimiter); + this.buf.appendSliceAssumeCapacity(trimmed); + } else { + try this.buf.appendSlice(this.allocator, trimmed); + } + } + + pub const PathComponentBuilder = struct { + env_path: *EnvPath(opts), + path_buf: AbsPath(.{ .sep = .auto }), + + pub fn append(this: *@This(), component: string) void { + this.path_buf.append(component); + } + + pub fn appendFmt(this: *@This(), comptime component_fmt: string, component_args: anytype) void { + this.path_buf.appendFmt(component_fmt, component_args); + } + + pub fn apply(this: *@This()) OOM!void { + try this.env_path.append(&this.path_buf); + this.path_buf.deinit(); + } + }; + + pub fn pathComponentBuilder(this: *@This()) PathComponentBuilder { + return .{ + .env_path = this, + .path_buf = .init(), + }; + } + }; +} diff --git a/src/paths/Path.zig b/src/paths/Path.zig new file mode 100644 index 0000000000..61339c44d4 --- /dev/null +++ b/src/paths/Path.zig @@ -0,0 +1,808 @@ +const std = @import("std"); +const bun = @import("bun"); +const Output = bun.Output; +const PathBuffer = bun.PathBuffer; +const WPathBuffer = bun.WPathBuffer; +const Environment = bun.Environment; +const FD = bun.FD; + +const Options = struct { + check_length: CheckLength = .assume_always_less_than_max_path, + sep: PathSeparators = .any, + kind: Kind = .any, + buf_type: BufType = .pool, + unit: Unit = .u8, + + const Unit = enum { + u8, + u16, + os, + }; + + const BufType = enum { + pool, + // stack, + // array_list, + }; + + const Kind = enum { + abs, + rel, + + // not recommended, but useful when you don't know + any, + }; + + const CheckLength = enum { + assume_always_less_than_max_path, + check_for_greater_than_max_path, + }; + + const PathSeparators = enum { + any, + auto, + posix, + windows, + + pub fn char(comptime sep: @This()) u8 { + return switch (sep) { + .any => @compileError("use the existing slash"), + .auto => std.fs.path.sep, + .posix => std.fs.path.sep_posix, + .windows => std.fs.path.sep_windows, + }; + } + }; + + pub fn pathUnit(comptime opts: @This()) type { + return switch (opts.unit) { + .u8 => u8, + .u16 => u16, + .os => if (Environment.isWindows) u16 else u8, + }; + } + + pub fn notPathUnit(comptime opts: @This()) type { + return switch (opts.unit) { + .u8 => u16, + .u16 => u8, + .os => if (Environment.isWindows) u8 else u16, + }; + } + + pub fn maxPathLength(comptime opts: @This()) usize { + switch (comptime opts.check_length) { + .assume_always_less_than_max_path => @compileError("max path length is not needed"), + .check_for_greater_than_max_path => { + return switch (comptime opts.unit) { + .u8 => bun.MAX_PATH_BYTES, + .u16 => bun.PATH_MAX_WIDE, + .os => if (Environment.isWindows) bun.PATH_MAX_WIDE else bun.MAX_PATH_BYTES, + }; + }, + } + } + + pub fn Buf(comptime opts: @This()) type { + return switch (opts.buf_type) { + .pool => struct { + pooled: switch (opts.unit) { + .u8 => *PathBuffer, + .u16 => *WPathBuffer, + .os => if (Environment.isWindows) *WPathBuffer else *PathBuffer, + }, + len: usize, + + pub fn setLength(this: *@This(), new_len: usize) void { + this.len = new_len; + } + + pub fn append(this: *@This(), characters: anytype, add_separator: bool) void { + if (add_separator) { + switch (comptime opts.sep) { + .any, .auto => this.pooled[this.len] = std.fs.path.sep, + .posix => this.pooled[this.len] = std.fs.path.sep_posix, + .windows => this.pooled[this.len] = std.fs.path.sep_windows, + } + this.len += 1; + } + + if (opts.inputChildType(@TypeOf(characters)) == opts.pathUnit()) { + switch (comptime opts.sep) { + .any => { + @memcpy(this.pooled[this.len..][0..characters.len], characters); + this.len += characters.len; + }, + .auto, .posix, .windows => { + for (characters) |c| { + switch (c) { + '/', '\\' => this.pooled[this.len] = opts.sep.char(), + else => this.pooled[this.len] = c, + } + this.len += 1; + } + }, + } + } else { + switch (opts.inputChildType(@TypeOf(characters))) { + u8 => { + const converted = bun.strings.convertUTF8toUTF16InBuffer(this.pooled[this.len..], characters); + if (comptime opts.sep != .any) { + for (this.pooled[this.len..][0..converted.len], 0..) |c, off| { + switch (c) { + '/', '\\' => this.pooled[this.len + off] = opts.sep.char(), + else => {}, + } + } + } + this.len += converted.len; + }, + u16 => { + const converted = bun.strings.convertUTF16toUTF8InBuffer(this.pooled[this.len..], characters) catch unreachable; + if (comptime opts.sep != .any) { + for (this.pooled[this.len..][0..converted.len], 0..) |c, off| { + switch (c) { + '/', '\\' => this.pooled[this.len + off] = opts.sep.char(), + else => {}, + } + } + } + this.len += converted.len; + }, + else => @compileError("unexpected character type"), + } + } + + // switch (@TypeOf(characters)) { + // []u8, []const u8, [:0]u8, [:0]const u8 => { + // if (opts.unit == .u8) { + // this.appendT() + // } + // } + // } + } + + // fn append(this: *@This(), characters: []const opts.pathUnit(), add_separator: bool) void { + // if (add_separator) {} + // switch (comptime opts.sep) { + // .any => { + // @memcpy(this.pooled[this.len..][0..characters.len], characters); + // this.len += characters.len; + // }, + // .auto, .posix, .windows => { + // for (characters) |c| { + // switch (c) { + // '/', '\\' => this.pooled[this.len] = opts.sep.char(), + // else => this.pooled[this.len] = c, + // } + // this.len += 1; + // } + // }, + // } + // } + + fn convertAppend(this: *@This(), characters: []const opts.notPathUnit()) void { + _ = this; + _ = characters; + // switch (comptime opts.sep) { + // .any => { + // switch (opts.notPathUnit()) { + // .u8 => { + // const converted = bun.strings.convertUTF8toUTF16InBuffer(this.pooled[this.len..], characters); + // }, + // } + // }, + // } + } + }, + // .stack => struct { + // buf: PathBuffer, + // len: u16, + // }, + // .array_list => struct { + // list: std.ArrayList(opts.pathUnit()), + // }, + + }; + } + + const Error = error{MaxPathExceeded}; + + pub fn ResultFn(comptime opts: @This()) fn (comptime T: type) type { + return struct { + pub fn Result(comptime T: type) type { + return switch (opts.check_length) { + .assume_always_less_than_max_path => T, + .check_for_greater_than_max_path => Error!T, + }; + } + }.Result; + } + + pub fn inputChildType(comptime opts: @This(), comptime InputType: type) type { + _ = opts; + return switch (@typeInfo(std.meta.Child(InputType))) { + // handle string literals + .array => |array| array.child, + else => std.meta.Child(InputType), + }; + } +}; + +pub fn AbsPath(comptime opts: Options) type { + var copy = opts; + copy.kind = .abs; + return Path(copy); +} + +pub fn RelPath(comptime opts: Options) type { + var copy = opts; + copy.kind = .rel; + return Path(copy); +} + +pub fn Path(comptime opts: Options) type { + const Result = opts.ResultFn(); + + // if (opts.unit == .u16 and !Environment.isWindows) { + // @compileError("utf16 not supported"); + // } + + // const log = Output.scoped(.Path, false); + + return struct { + _buf: opts.Buf(), + + pub fn init() @This() { + switch (comptime opts.buf_type) { + .pool => { + return .{ + ._buf = .{ + .pooled = switch (opts.unit) { + .u8 => bun.path_buffer_pool.get(), + .u16 => bun.w_path_buffer_pool.get(), + .os => if (comptime Environment.isWindows) + bun.w_path_buffer_pool.get() + else + bun.path_buffer_pool.get(), + }, + .len = 0, + }, + }; + }, + } + } + + pub fn deinit(this: *const @This()) void { + switch (comptime opts.buf_type) { + .pool => { + switch (opts.unit) { + .u8 => bun.path_buffer_pool.put(this._buf.pooled), + .u16 => bun.w_path_buffer_pool.put(this._buf.pooled), + .os => if (comptime Environment.isWindows) + bun.w_path_buffer_pool.put(this._buf.pooled) + else + bun.path_buffer_pool.put(this._buf.pooled), + } + }, + } + @constCast(this).* = undefined; + } + + pub fn move(this: *const @This()) @This() { + const moved = this.*; + @constCast(this).* = undefined; + return moved; + } + + pub fn initTopLevelDir() @This() { + bun.debugAssert(bun.fs.FileSystem.instance_loaded); + const top_level_dir = bun.fs.FileSystem.instance.top_level_dir; + + const trimmed = switch (comptime opts.kind) { + .abs => trimmed: { + bun.debugAssert(isInputAbsolute(top_level_dir)); + break :trimmed trimInput(.abs, top_level_dir); + }, + .rel => @compileError("cannot create a relative path from top_level_dir"), + .any => trimInput(.abs, top_level_dir), + }; + + var this = init(); + this._buf.append(trimmed, false); + return this; + } + + pub fn initFdPath(fd: FD) !@This() { + switch (comptime opts.kind) { + .abs => {}, + .rel => @compileError("cannot create a relative path from getFdPath"), + .any => {}, + } + + var this = init(); + switch (comptime opts.buf_type) { + .pool => { + const raw = try fd.getFdPath(this._buf.pooled); + const trimmed = trimInput(.abs, raw); + this._buf.len = trimmed.len; + }, + } + + return this; + } + + pub fn from(input: anytype) Result(@This()) { + switch (comptime @TypeOf(input)) { + []u8, []const u8, [:0]u8, [:0]const u8 => {}, + []u16, []const u16, [:0]u16, [:0]const u16 => {}, + else => @compileError("unsupported type: " ++ @typeName(@TypeOf(input))), + } + const trimmed = switch (comptime opts.kind) { + .abs => trimmed: { + bun.debugAssert(isInputAbsolute(input)); + break :trimmed trimInput(.abs, input); + }, + .rel => trimmed: { + bun.debugAssert(!isInputAbsolute(input)); + break :trimmed trimInput(.rel, input); + }, + .any => trimInput(if (isInputAbsolute(input)) .abs else .rel, input), + }; + + if (comptime opts.check_length == .check_for_greater_than_max_path) { + if (trimmed.len >= opts.maxPathLength()) { + return error.MaxPathExceeded; + } + } + + var this = init(); + this._buf.append(trimmed, false); + return this; + } + + pub fn isAbsolute(this: *const @This()) bool { + return switch (comptime opts.kind) { + .abs => @compileError("already known to be absolute"), + .rel => @compileError("already known to not be absolute"), + .any => isInputAbsolute(this.slice()), + }; + } + + pub fn basename(this: *const @This()) []const opts.pathUnit() { + return bun.strings.basename(opts.pathUnit(), this.slice()); + } + + pub fn basenameZ(this: *const @This()) [:0]const opts.pathUnit() { + const full = this.sliceZ(); + const base = bun.strings.basename(opts.pathUnit(), full); + return full[full.len - base.len ..][0..base.len :0]; + } + + pub fn dirname(this: *const @This()) ?[]const opts.pathUnit() { + return bun.Dirname.dirname(opts.pathUnit(), this.slice()); + } + + pub fn slice(this: *const @This()) []const opts.pathUnit() { + switch (comptime opts.buf_type) { + .pool => return this._buf.pooled[0..this._buf.len], + } + } + + pub fn sliceZ(this: *const @This()) [:0]const opts.pathUnit() { + switch (comptime opts.buf_type) { + .pool => { + this._buf.pooled[this._buf.len] = 0; + return this._buf.pooled[0..this._buf.len :0]; + }, + } + } + + // pub fn buf(this: *const @This()) []opts.pathUnit() { + // switch (comptime opts.buf_type) { + // .pool => { + // return this._buf.pooled; + // }, + // } + // } + + pub fn len(this: *const @This()) usize { + switch (comptime opts.buf_type) { + .pool => { + return this._buf.len; + }, + } + } + + pub fn clone(this: *const @This()) @This() { + switch (comptime opts.buf_type) { + .pool => { + var cloned = init(); + @memcpy(cloned._buf.pooled[0..this._buf.len], this._buf.pooled[0..this._buf.len]); + cloned._buf.len = this._buf.len; + return cloned; + }, + } + } + + pub fn clear(this: *@This()) void { + this._buf.setLength(0); + } + + pub fn rootLen(input: anytype) ?usize { + if (comptime Environment.isWindows) { + if (input.len > 2 and input[1] == ':' and switch (input[2]) { + '/', '\\' => true, + else => false, + }) { + const letter = input[0]; + if (('a' <= letter and letter <= 'z') or ('A' <= letter and letter <= 'Z')) { + // C:\ + return 3; + } + } + + if (input.len > 5 and + switch (input[0]) { + '/', '\\' => true, + else => false, + } and + switch (input[1]) { + '/', '\\' => true, + else => false, + } and + switch (input[2]) { + '\\', '.' => false, + else => true, + }) + { + var i: usize = 3; + // \\network\share\ + // ^ + while (i < input.len and switch (input[i]) { + '/', '\\' => false, + else => true, + }) { + i += 1; + } + + i += 1; + // \\network\share\ + // ^ + const start = i; + while (i < input.len and switch (input[i]) { + '/', '\\' => false, + else => true, + }) { + i += 1; + } + + if (start != i and i < input.len and switch (input[i]) { + '/', '\\' => true, + else => false, + }) { + // \\network\share\ + // ^ + if (i + 1 < input.len) { + return i + 1; + } + return i; + } + } + + if (input.len > 0 and switch (input[0]) { + '/', '\\' => true, + else => false, + }) { + // \ + return 1; + } + + return null; + } + + if (input.len > 0 and input[0] == '/') { + // / + return 1; + } + + return null; + } + + const TrimInputKind = enum { + abs, + rel, + }; + + fn trimInput(kind: TrimInputKind, input: anytype) []const opts.inputChildType(@TypeOf(input)) { + var trimmed: []const opts.inputChildType(@TypeOf(input)) = input[0..]; + + if (comptime Environment.isWindows) { + switch (kind) { + .abs => { + const root_len = rootLen(input) orelse 0; + while (trimmed.len > root_len and switch (trimmed[trimmed.len - 1]) { + '/', '\\' => true, + else => false, + }) { + trimmed = trimmed[0 .. trimmed.len - 1]; + } + }, + .rel => { + if (trimmed.len > 1 and trimmed[0] == '.') { + const c = trimmed[1]; + if (c == '/' or c == '\\') { + trimmed = trimmed[2..]; + } + } + while (trimmed.len > 0 and switch (trimmed[0]) { + '/', '\\' => true, + else => false, + }) { + trimmed = trimmed[1..]; + } + while (trimmed.len > 0 and switch (trimmed[trimmed.len - 1]) { + '/', '\\' => true, + else => false, + }) { + trimmed = trimmed[0 .. trimmed.len - 1]; + } + }, + } + + return trimmed; + } + + switch (kind) { + .abs => { + const root_len = rootLen(input) orelse 0; + while (trimmed.len > root_len and trimmed[trimmed.len - 1] == '/') { + trimmed = trimmed[0 .. trimmed.len - 1]; + } + }, + .rel => { + if (trimmed.len > 1 and trimmed[0] == '.' and trimmed[1] == '/') { + trimmed = trimmed[2..]; + } + while (trimmed.len > 0 and trimmed[0] == '/') { + trimmed = trimmed[1..]; + } + + while (trimmed.len > 0 and trimmed[trimmed.len - 1] == '/') { + trimmed = trimmed[0 .. trimmed.len - 1]; + } + }, + } + + return trimmed; + } + + fn isInputAbsolute(input: anytype) bool { + if (input.len == 0) { + return false; + } + + if (input[0] == '/') { + return true; + } + + if (comptime Environment.isWindows) { + if (input[0] == '\\') { + return true; + } + + if (input.len < 3) { + return false; + } + + if (input[1] == ':' and switch (input[2]) { + '/', '\\' => true, + else => false, + }) { + return true; + } + } + + return false; + } + + pub fn append(this: *@This(), input: anytype) Result(void) { + const needs_sep = this.len() > 0 and switch (comptime opts.sep) { + .any => switch (this.slice()[this.len() - 1]) { + '/', '\\' => false, + else => true, + }, + else => this.slice()[this.len() - 1] != opts.sep.char(), + }; + + switch (comptime opts.kind) { + .abs => { + const has_root = this.len() > 0; + + if (comptime Environment.isDebug) { + if (has_root) { + bun.debugAssert(!isInputAbsolute(input)); + } else { + bun.debugAssert(isInputAbsolute(input)); + } + } + + const trimmed = trimInput(if (has_root) .rel else .abs, input); + + if (trimmed.len == 0) { + return; + } + + if (comptime opts.check_length == .check_for_greater_than_max_path) { + if (this.len() + trimmed.len + @intFromBool(needs_sep) >= opts.maxPathLength()) { + return error.MaxPathExceeded; + } + } + + this._buf.append(trimmed, needs_sep); + }, + .rel => { + bun.debugAssert(!isInputAbsolute(input)); + + const trimmed = trimInput(.rel, input); + + if (trimmed.len == 0) { + return; + } + + if (comptime opts.check_length == .check_for_greater_than_max_path) { + if (this.len() + trimmed.len + @intFromBool(needs_sep) >= opts.maxPathLength()) { + return error.MaxPathExceeded; + } + } + + this._buf.append(trimmed, needs_sep); + }, + .any => { + const input_is_absolute = isInputAbsolute(input); + + if (comptime Environment.isDebug) { + if (needs_sep) { + bun.debugAssert(!input_is_absolute); + } + } + + const trimmed = trimInput(if (this.len() > 0) + // anything appended to an existing path should be trimmed + // as a relative path + .rel + else if (isInputAbsolute(input)) + // path is empty, trim based on input + .abs + else + .rel, input); + + if (trimmed.len == 0) { + return; + } + + if (comptime opts.check_length == .check_for_greater_than_max_path) { + if (this.len() + trimmed.len + @intFromBool(needs_sep) >= opts.maxPathLength()) { + return error.MaxPathExceeded; + } + } + + this._buf.append(trimmed, needs_sep); + }, + } + } + + pub fn appendFmt(this: *@This(), comptime fmt: []const u8, args: anytype) Result(void) { + // TODO: there's probably a better way to do this. needed for trimming slashes + var temp: Path(.{ .buf_type = .pool }) = .init(); + defer temp.deinit(); + + const input = switch (comptime opts.buf_type) { + .pool => std.fmt.bufPrint(temp._buf.pooled, fmt, args) catch { + if (comptime opts.check_length == .check_for_greater_than_max_path) { + return error.MaxPathExceeded; + } + unreachable; + }, + }; + + return this.append(input); + } + + pub fn join(this: *@This(), parts: []const []const opts.pathUnit()) Result(void) { + switch (comptime opts.unit) { + .u8 => {}, + .u16 => @compileError("unsupported unit type"), + .os => if (Environment.isWindows) @compileError("unsupported unit type"), + } + + switch (comptime opts.kind) { + .abs => {}, + .rel => @compileError("cannot join with relative path"), + .any => { + bun.debugAssert(this.isAbsolute()); + }, + } + + const cloned = this.clone(); + defer cloned.deinit(); + + switch (comptime opts.buf_type) { + .pool => { + const joined = bun.path.joinAbsStringBuf( + cloned.slice(), + this._buf.pooled, + parts, + switch (opts.sep) { + .any, .auto => .auto, + .posix => .posix, + .windows => .windows, + }, + ); + + const trimmed = trimInput(.abs, joined); + this._buf.len = trimmed.len; + }, + } + } + + pub fn relative(this: *const @This(), to: anytype) RelPath(opts) { + switch (comptime opts.buf_type) { + .pool => { + var output: RelPath(opts) = .init(); + const rel = bun.path.relativeBufZ(output._buf.pooled, this.slice(), to.slice()); + const trimmed = trimInput(.rel, rel); + output._buf.len = trimmed.len; + return output; + }, + } + } + + pub fn undo(this: *@This(), n_components: usize) void { + const min_len = switch (comptime opts.kind) { + .abs => rootLen(this.slice()) orelse 0, + .rel => 0, + .any => min_len: { + if (this.isAbsolute()) { + break :min_len rootLen(this.slice()) orelse 0; + } + break :min_len 0; + }, + }; + + var i: usize = 0; + while (i < n_components) { + const slash = switch (comptime opts.sep) { + .any => std.mem.lastIndexOfAny(opts.pathUnit(), this.slice(), &.{ std.fs.path.sep_posix, std.fs.path.sep_windows }), + .auto => std.mem.lastIndexOfScalar(opts.pathUnit(), this.slice(), std.fs.path.sep), + .posix => std.mem.lastIndexOfScalar(opts.pathUnit(), this.slice(), std.fs.path.sep_posix), + .windows => std.mem.lastIndexOfScalar(opts.pathUnit(), this.slice(), std.fs.path.sep_windows), + } orelse { + this._buf.setLength(min_len); + return; + }; + + if (slash < min_len) { + this._buf.setLength(min_len); + return; + } + + this._buf.setLength(slash); + i += 1; + } + } + + const ResetScope = struct { + path: *Path(opts), + saved_len: usize, + + pub fn restore(this: *const ResetScope) void { + this.path._buf.setLength(this.saved_len); + } + }; + + pub fn save(this: *@This()) ResetScope { + return .{ .path = this, .saved_len = this.len() }; + } + }; +} diff --git a/src/paths/path_buffer_pool.zig b/src/paths/path_buffer_pool.zig new file mode 100644 index 0000000000..3489d920a0 --- /dev/null +++ b/src/paths/path_buffer_pool.zig @@ -0,0 +1,34 @@ +const bun = @import("bun"); +const Environment = bun.Environment; +const ObjectPool = bun.ObjectPool; +const PathBuffer = bun.PathBuffer; +const WPathBuffer = bun.WPathBuffer; + +// This pool exists because on Windows, each path buffer costs 64 KB. +// This makes the stack memory usage very unpredictable, which means we can't really know how much stack space we have left. +// This pool is a workaround to make the stack memory usage more predictable. +// We keep up to 4 path buffers alive per thread at a time. +fn PathBufferPoolT(comptime T: type) type { + return struct { + const Pool = ObjectPool(T, null, true, 4); + + pub fn get() *T { + // use a threadlocal allocator so mimalloc deletes it on thread deinit. + return &Pool.get(bun.threadlocalAllocator()).data; + } + + pub fn put(buffer: *const T) void { + // there's no deinit function on T so @constCast is fine + var node: *Pool.Node = @alignCast(@fieldParentPtr("data", @constCast(buffer))); + node.release(); + } + + pub fn deleteAll() void { + Pool.deleteAll(); + } + }; +} + +pub const path_buffer_pool = PathBufferPoolT(PathBuffer); +pub const w_path_buffer_pool = PathBufferPoolT(WPathBuffer); +pub const os_path_buffer_pool = if (Environment.isWindows) w_path_buffer_pool else path_buffer_pool; diff --git a/src/resolver/resolver.zig b/src/resolver/resolver.zig index 733127d023..24d52e4a00 100644 --- a/src/resolver/resolver.zig +++ b/src/resolver/resolver.zig @@ -89,7 +89,7 @@ const bufs = struct { pub threadlocal var esm_absolute_package_path_joined: bun.PathBuffer = undefined; pub threadlocal var dir_entry_paths_to_resolve: [256]DirEntryResolveQueueItem = undefined; - pub threadlocal var open_dirs: [256]std.fs.Dir = undefined; + pub threadlocal var open_dirs: [256]FD = undefined; pub threadlocal var resolve_without_remapping: bun.PathBuffer = undefined; pub threadlocal var index: bun.PathBuffer = undefined; pub threadlocal var dir_info_uncached_filename: bun.PathBuffer = undefined; @@ -2216,7 +2216,7 @@ pub const Resolver = struct { var dir_entries_option: *Fs.FileSystem.RealFS.EntriesOption = undefined; var needs_iter = true; var in_place: ?*Fs.FileSystem.DirEntry = null; - const open_dir = bun.openDirForIteration(std.fs.cwd(), dir_path) catch |err| { + const open_dir = bun.openDirForIteration(FD.cwd(), dir_path).unwrap() catch |err| { // TODO: handle this error better r.log.addErrorFmt( null, @@ -2264,10 +2264,10 @@ pub const Resolver = struct { dir_entries_ptr.* = new_entry; if (r.store_fd) { - dir_entries_ptr.fd = .fromStdDir(open_dir); + dir_entries_ptr.fd = open_dir; } - bun.fs.debug("readdir({}, {s}) = {d}", .{ bun.FD.fromStdDir(open_dir), dir_path, dir_entries_ptr.data.count() }); + bun.fs.debug("readdir({}, {s}) = {d}", .{ open_dir, dir_path, dir_entries_ptr.data.count() }); dir_entries_option = rfs.entries.put(&cached_dir_entry_result, .{ .entries = dir_entries_ptr, @@ -2288,7 +2288,7 @@ pub const Resolver = struct { // to check for a parent package.json null, allocators.NotFound, - .fromStdDir(open_dir), + open_dir, package_id, ); return dir_info_ptr; @@ -2783,9 +2783,9 @@ pub const Resolver = struct { // When this function halts, any item not processed means it's not found. defer { if (open_dir_count > 0 and (!r.store_fd or r.fs.fs.needToCloseFiles())) { - const open_dirs: []std.fs.Dir = bufs(.open_dirs)[0..open_dir_count]; + const open_dirs = bufs(.open_dirs)[0..open_dir_count]; for (open_dirs) |open_dir| { - bun.FD.fromStdDir(open_dir).close(); + open_dir.close(); } } } @@ -2810,8 +2810,8 @@ pub const Resolver = struct { defer top_parent = queue_top.result; queue_slice.len -= 1; - const open_dir: std.fs.Dir = if (queue_top.fd.isValid()) - queue_top.fd.stdDir() + const open_dir: FD = if (queue_top.fd.isValid()) + queue_top.fd else open_dir: { // This saves us N copies of .toPosixPath // which was likely the perf gain from resolving directories relative to the parent directory, anyway. @@ -2820,19 +2820,20 @@ pub const Resolver = struct { defer path.ptr[queue_top.unsafe_path.len] = prev_char; const sentinel = path.ptr[0..queue_top.unsafe_path.len :0]; - const open_req = if (comptime Environment.isPosix) - std.fs.openDirAbsoluteZ( + const open_req = if (comptime Environment.isPosix) open_req: { + const dir_result = std.fs.openDirAbsoluteZ( sentinel, .{ .no_follow = !follow_symlinks, .iterate = true }, - ) - else if (comptime Environment.isWindows) open_req: { + ) catch |err| break :open_req err; + break :open_req FD.fromStdDir(dir_result); + } else if (comptime Environment.isWindows) open_req: { const dirfd_result = bun.sys.openDirAtWindowsA(bun.invalid_fd, sentinel, .{ .iterable = true, .no_follow = !follow_symlinks, .read_only = true, }); if (dirfd_result.unwrap()) |result| { - break :open_req result.stdDir(); + break :open_req result; } else |err| { break :open_req err; } @@ -2879,7 +2880,7 @@ pub const Resolver = struct { }; if (!queue_top.fd.isValid()) { - Fs.FileSystem.setMaxFd(open_dir.fd); + Fs.FileSystem.setMaxFd(open_dir.cast()); // these objects mostly just wrap the file descriptor, so it's fine to keep it. bufs(.open_dirs)[open_dir_count] = open_dir; open_dir_count += 1; @@ -2945,13 +2946,13 @@ pub const Resolver = struct { if (in_place) |existing| { existing.data.clearAndFree(allocator); } - new_entry.fd = if (r.store_fd) .fromStdDir(open_dir) else .invalid; + new_entry.fd = if (r.store_fd) open_dir else .invalid; var dir_entries_ptr = in_place orelse allocator.create(Fs.FileSystem.DirEntry) catch unreachable; dir_entries_ptr.* = new_entry; dir_entries_option = try rfs.entries.put(&cached_dir_entry_result, .{ .entries = dir_entries_ptr, }); - bun.fs.debug("readdir({}, {s}) = {d}", .{ bun.FD.fromStdDir(open_dir), dir_path, dir_entries_ptr.data.count() }); + bun.fs.debug("readdir({}, {s}) = {d}", .{ open_dir, dir_path, dir_entries_ptr.data.count() }); } // We must initialize it as empty so that the result index is correct. @@ -2966,7 +2967,7 @@ pub const Resolver = struct { cached_dir_entry_result.index, r.dir_cache.atIndex(top_parent.index), top_parent.index, - .fromStdDir(open_dir), + open_dir, null, ); diff --git a/src/semver/SemverString.zig b/src/semver/SemverString.zig index 9f278ac087..ee30c31e1a 100644 --- a/src/semver/SemverString.zig +++ b/src/semver/SemverString.zig @@ -135,7 +135,7 @@ pub const String = extern struct { str: *const String, buf: string, - pub fn format(formatter: Formatter, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) !void { + pub fn format(formatter: Formatter, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) @TypeOf(writer).Error!void { const str = formatter.str; try writer.writeAll(str.slice(formatter.buf)); } @@ -159,11 +159,33 @@ pub const String = extern struct { quote: bool = true, }; - pub fn format(formatter: JsonFormatter, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) !void { + pub fn format(formatter: JsonFormatter, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) @TypeOf(writer).Error!void { try writer.print("{}", .{bun.fmt.formatJSONStringUTF8(formatter.str.slice(formatter.buf), .{ .quote = formatter.opts.quote })}); } }; + pub inline fn fmtStorePath(self: *const String, buf: []const u8) StorePathFormatter { + return .{ + .buf = buf, + .str = self, + }; + } + + pub const StorePathFormatter = struct { + str: *const String, + buf: string, + + pub fn format(this: StorePathFormatter, comptime _: string, _: std.fmt.FormatOptions, writer: anytype) @TypeOf(writer).Error!void { + for (this.str.slice(this.buf)) |c| { + switch (c) { + '/' => try writer.writeByte('+'), + '\\' => try writer.writeByte('+'), + else => try writer.writeByte(c), + } + } + } + }; + pub fn Sorter(comptime direction: enum { asc, desc }) type { return struct { lhs_buf: []const u8, diff --git a/src/shell/builtin/ls.zig b/src/shell/builtin/ls.zig index c3c3da25bd..06f75972c3 100644 --- a/src/shell/builtin/ls.zig +++ b/src/shell/builtin/ls.zig @@ -323,7 +323,7 @@ pub const ShellLsTask = struct { std.fmt.format(writer, "{s}:\n", .{this.path}) catch bun.outOfMemory(); } - var iterator = DirIterator.iterate(fd.stdDir(), .u8); + var iterator = DirIterator.iterate(fd, .u8); var entry = iterator.next(); // If `-a` is used, "." and ".." should show up as results. However, diff --git a/src/shell/builtin/rm.zig b/src/shell/builtin/rm.zig index 31c6268adf..8eca2e16da 100644 --- a/src/shell/builtin/rm.zig +++ b/src/shell/builtin/rm.zig @@ -860,7 +860,7 @@ pub const ShellRmTask = struct { return Maybe(void).success; } - var iterator = DirIterator.iterate(fd.stdDir(), .u8); + var iterator = DirIterator.iterate(fd, .u8); var entry = iterator.next(); var remove_child_vtable = RemoveFileVTable{ diff --git a/src/shell/builtin/which.zig b/src/shell/builtin/which.zig index 29eba3c740..50b55ee488 100644 --- a/src/shell/builtin/which.zig +++ b/src/shell/builtin/which.zig @@ -30,8 +30,8 @@ pub fn start(this: *Which) Yield { } if (this.bltn().stdout.needsIO() == null) { - const path_buf = bun.PathBufferPool.get(); - defer bun.PathBufferPool.put(path_buf); + const path_buf = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(path_buf); const PATH = this.bltn().parentCmd().base.shell.export_env.get(EnvStr.initSlice("PATH")) orelse EnvStr.initSlice(""); var had_not_found = false; for (args) |arg_raw| { @@ -68,8 +68,8 @@ pub fn next(this: *Which) Yield { const arg_raw = multiargs.args_slice[multiargs.arg_idx]; const arg = arg_raw[0..std.mem.len(arg_raw)]; - const path_buf = bun.PathBufferPool.get(); - defer bun.PathBufferPool.put(path_buf); + const path_buf = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(path_buf); const PATH = this.bltn().parentCmd().base.shell.export_env.get(EnvStr.initSlice("PATH")) orelse EnvStr.initSlice(""); const resolved = which(path_buf, PATH.slice(), this.bltn().parentCmd().base.shell.cwdZ(), arg) orelse { diff --git a/src/shell/interpreter.zig b/src/shell/interpreter.zig index 1da175db59..a62c7e12b5 100644 --- a/src/shell/interpreter.zig +++ b/src/shell/interpreter.zig @@ -820,8 +820,8 @@ pub const Interpreter = struct { }; // Avoid the large stack allocation on Windows. - const pathbuf = bun.PathBufferPool.get(); - defer bun.PathBufferPool.put(pathbuf); + const pathbuf = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(pathbuf); const cwd: [:0]const u8 = switch (Syscall.getcwdZ(pathbuf)) { .result => |cwd| cwd, .err => |err| { @@ -1701,15 +1701,15 @@ pub const ShellSyscall = struct { pub fn statat(dir: bun.FileDescriptor, path_: [:0]const u8) Maybe(bun.Stat) { if (bun.Environment.isWindows) { - const buf: *bun.PathBuffer = bun.PathBufferPool.get(); - defer bun.PathBufferPool.put(buf); + const buf: *bun.PathBuffer = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(buf); const path = switch (getPath(dir, path_, buf)) { .err => |e| return .{ .err = e }, .result => |p| p, }; return switch (Syscall.stat(path)) { - .err => |e| .{ .err = e.clone(bun.default_allocator) catch bun.outOfMemory() }, + .err => |e| .{ .err = e.clone(bun.default_allocator) }, .result => |s| .{ .result = s }, }; } @@ -1723,8 +1723,8 @@ pub const ShellSyscall = struct { if (bun.Environment.isWindows) { if (flags & bun.O.DIRECTORY != 0) { if (ResolvePath.Platform.posix.isAbsolute(path[0..path.len])) { - const buf: *bun.PathBuffer = bun.PathBufferPool.get(); - defer bun.PathBufferPool.put(buf); + const buf: *bun.PathBuffer = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(buf); const p = switch (getPath(dir, path, buf)) { .result => |p| p, .err => |e| return .{ .err = e }, @@ -1740,8 +1740,8 @@ pub const ShellSyscall = struct { }; } - const buf: *bun.PathBuffer = bun.PathBufferPool.get(); - defer bun.PathBufferPool.put(buf); + const buf: *bun.PathBuffer = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(buf); const p = switch (getPath(dir, path, buf)) { .result => |p| p, .err => |e| return .{ .err = e }, diff --git a/src/shell/states/Cmd.zig b/src/shell/states/Cmd.zig index c5bf72fbe2..71cfa06a08 100644 --- a/src/shell/states/Cmd.zig +++ b/src/shell/states/Cmd.zig @@ -487,8 +487,8 @@ fn initSubproc(this: *Cmd) Yield { return this.exec.bltn.start(); } - const path_buf = bun.PathBufferPool.get(); - defer bun.PathBufferPool.put(path_buf); + const path_buf = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(path_buf); const resolved = which(path_buf, spawn_args.PATH, spawn_args.cwd, first_arg_real) orelse blk: { if (bun.strings.eqlComptime(first_arg_real, "bun") or bun.strings.eqlComptime(first_arg_real, "bun-debug")) blk2: { break :blk bun.selfExePath() catch break :blk2; diff --git a/src/sourcemap/sourcemap.zig b/src/sourcemap/sourcemap.zig index e664edf77d..b580dfe61d 100644 --- a/src/sourcemap/sourcemap.zig +++ b/src/sourcemap/sourcemap.zig @@ -1100,8 +1100,8 @@ pub fn getSourceMapImpl( // try to load a .map file if (load_hint != .is_inline_map) try_external: { - var load_path_buf: *bun.PathBuffer = bun.PathBufferPool.get(); - defer bun.PathBufferPool.put(load_path_buf); + var load_path_buf: *bun.PathBuffer = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(load_path_buf); if (source_filename.len + 4 > load_path_buf.len) break :try_external; @memcpy(load_path_buf[0..source_filename.len], source_filename); diff --git a/src/string/paths.zig b/src/string/paths.zig index 07813f037e..0a7eba3dc7 100644 --- a/src/string/paths.zig +++ b/src/string/paths.zig @@ -118,18 +118,6 @@ pub fn toNTPath16(wbuf: []u16, path: []const u16) [:0]u16 { return wbuf[0 .. toWPathNormalized16(wbuf[prefix.len..], path).len + prefix.len :0]; } -pub fn toNTMaxPath(buf: []u8, utf8: []const u8) [:0]const u8 { - if (!std.fs.path.isAbsoluteWindows(utf8) or utf8.len <= 260) { - @memcpy(buf[0..utf8.len], utf8); - buf[utf8.len] = 0; - return buf[0..utf8.len :0]; - } - - const prefix = bun.windows.nt_maxpath_prefix_u8; - buf[0..prefix.len].* = prefix; - return buf[0 .. toPathNormalized(buf[prefix.len..], utf8).len + prefix.len :0]; -} - pub fn addNTPathPrefix(wbuf: []u16, utf16: []const u16) [:0]u16 { wbuf[0..bun.windows.nt_object_prefix.len].* = bun.windows.nt_object_prefix; @memcpy(wbuf[bun.windows.nt_object_prefix.len..][0..utf16.len], utf16); @@ -155,6 +143,11 @@ pub const toNTDir = toNTPath; pub fn toExtendedPathNormalized(wbuf: []u16, utf8: []const u8) [:0]const u16 { bun.unsafeAssert(wbuf.len > 4); + if (hasPrefixComptime(utf8, bun.windows.long_path_prefix_u8) or + hasPrefixComptime(utf8, bun.windows.nt_object_prefix_u8)) + { + return toWPathNormalized(wbuf, utf8); + } wbuf[0..4].* = bun.windows.long_path_prefix; return wbuf[0 .. toWPathNormalized(wbuf[4..], utf8).len + 4 :0]; } @@ -168,8 +161,8 @@ pub fn toWPathNormalizeAutoExtend(wbuf: []u16, utf8: []const u8) [:0]const u16 { } pub fn toWPathNormalized(wbuf: []u16, utf8: []const u8) [:0]u16 { - const renormalized = bun.PathBufferPool.get(); - defer bun.PathBufferPool.put(renormalized); + const renormalized = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(renormalized); var path_to_use = normalizeSlashesOnly(renormalized, utf8, '\\'); @@ -195,8 +188,8 @@ pub fn toWPathNormalized16(wbuf: []u16, path: []const u16) [:0]u16 { } pub fn toPathNormalized(buf: []u8, utf8: []const u8) [:0]const u8 { - const renormalized = bun.PathBufferPool.get(); - defer bun.PathBufferPool.put(renormalized); + const renormalized = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(renormalized); var path_to_use = normalizeSlashesOnly(renormalized, utf8, '\\'); @@ -235,12 +228,12 @@ pub fn normalizeSlashesOnly(buf: []u8, utf8: []const u8, comptime desired_slash: pub fn toWDirNormalized(wbuf: []u16, utf8: []const u8) [:0]const u16 { var renormalized: ?*bun.PathBuffer = null; - defer if (renormalized) |r| bun.PathBufferPool.put(r); + defer if (renormalized) |r| bun.path_buffer_pool.put(r); var path_to_use = utf8; if (bun.strings.containsChar(utf8, '/')) { - renormalized = bun.PathBufferPool.get(); + renormalized = bun.path_buffer_pool.get(); @memcpy(renormalized.?[0..utf8.len], utf8); for (renormalized.?[0..utf8.len]) |*c| { if (c.* == '/') { @@ -447,6 +440,67 @@ pub fn removeLeadingDotSlash(slice: []const u8) callconv(bun.callconv_inline) [] return slice; } +// Copied from std, modified to accept input type +pub fn basename(comptime T: type, input: []const T) []const T { + if (comptime Environment.isWindows) { + return basenameWindows(T, input); + } + return basenamePosix(T, input); +} + +fn basenamePosix(comptime T: type, input: []const T) []const T { + if (input.len == 0) + return &[_]u8{}; + + var end_index: usize = input.len - 1; + while (input[end_index] == '/') { + if (end_index == 0) + return &.{}; + end_index -= 1; + } + var start_index: usize = end_index; + end_index += 1; + while (input[start_index] != '/') { + if (start_index == 0) + return input[0..end_index]; + start_index -= 1; + } + + return input[start_index + 1 .. end_index]; +} + +fn basenameWindows(comptime T: type, input: []const T) []const T { + if (input.len == 0) + return &.{}; + + var end_index: usize = input.len - 1; + while (true) { + const byte = input[end_index]; + if (byte == '/' or byte == '\\') { + if (end_index == 0) + return &.{}; + end_index -= 1; + continue; + } + if (byte == ':' and end_index == 1) { + return &.{}; + } + break; + } + + var start_index: usize = end_index; + end_index += 1; + while (input[start_index] != '/' and input[start_index] != '\\' and + !(input[start_index] == ':' and start_index == 1)) + { + if (start_index == 0) + return input[0..end_index]; + start_index -= 1; + } + + return input[start_index + 1 .. end_index]; +} + const bun = @import("bun"); const std = @import("std"); const Environment = bun.Environment; diff --git a/src/string_immutable.zig b/src/string_immutable.zig index 7c4f6637f3..2a78d74483 100644 --- a/src/string_immutable.zig +++ b/src/string_immutable.zig @@ -2329,7 +2329,6 @@ pub const startsWithWindowsDriveLetter = _paths.startsWithWindowsDriveLetter; pub const startsWithWindowsDriveLetterT = _paths.startsWithWindowsDriveLetterT; pub const toExtendedPathNormalized = _paths.toExtendedPathNormalized; pub const toKernel32Path = _paths.toKernel32Path; -pub const toNTMaxPath = _paths.toNTMaxPath; pub const toNTPath = _paths.toNTPath; pub const toNTPath16 = _paths.toNTPath16; pub const toPath = _paths.toPath; @@ -2347,6 +2346,7 @@ pub const withoutLeadingSlash = _paths.withoutLeadingSlash; pub const withoutNTPrefix = _paths.withoutNTPrefix; pub const withoutTrailingSlash = _paths.withoutTrailingSlash; pub const withoutTrailingSlashWindowsPath = _paths.withoutTrailingSlashWindowsPath; +pub const basename = _paths.basename; pub const log = bun.Output.scoped(.STR, true); pub const grapheme = @import("./grapheme.zig"); diff --git a/src/sys.zig b/src/sys.zig index 84490643ba..6ab9d10606 100644 --- a/src/sys.zig +++ b/src/sys.zig @@ -57,6 +57,7 @@ const Environment = bun.Environment; const JSC = bun.JSC; const MAX_PATH_BYTES = bun.MAX_PATH_BYTES; const SystemError = JSC.SystemError; +const FD = bun.FD; const linux = syscall; @@ -215,6 +216,7 @@ pub const Tag = enum(u8) { chmod, chown, clonefile, + clonefileat, close, copy_file_range, copyfile, @@ -343,10 +345,10 @@ pub const Error = struct { syscall: sys.Tag = sys.Tag.TODO, dest: []const u8 = "", - pub fn clone(this: *const Error, allocator: std.mem.Allocator) !Error { + pub fn clone(this: *const Error, allocator: std.mem.Allocator) Error { var copy = this.*; - copy.path = try allocator.dupe(u8, copy.path); - copy.dest = try allocator.dupe(u8, copy.dest); + copy.path = allocator.dupe(u8, copy.path) catch bun.outOfMemory(); + copy.dest = allocator.dupe(u8, copy.dest) catch bun.outOfMemory(); return copy; } @@ -665,8 +667,8 @@ pub fn getcwdZ(buf: *bun.PathBuffer) Maybe([:0]const u8) { buf[0] = 0; if (comptime Environment.isWindows) { - var wbuf = bun.WPathBufferPool.get(); - defer bun.WPathBufferPool.put(wbuf); + var wbuf = bun.w_path_buffer_pool.get(); + defer bun.w_path_buffer_pool.put(wbuf); const len: windows.DWORD = kernel32.GetCurrentDirectoryW(wbuf.len, wbuf); if (Result.errnoSysP(len, .getcwd, buf)) |err| return err; return Result{ .result = bun.strings.fromWPath(buf, wbuf[0..len]) }; @@ -758,8 +760,8 @@ pub fn chdirOSPath(path: bun.stringZ, destination: if (Environment.isPosix) bun. } if (comptime Environment.isWindows) { - const wbuf = bun.WPathBufferPool.get(); - defer bun.WPathBufferPool.put(wbuf); + const wbuf = bun.w_path_buffer_pool.get(); + defer bun.w_path_buffer_pool.put(wbuf); if (c.SetCurrentDirectoryW(bun.strings.toWDirPath(wbuf, destination)) == windows.FALSE) { log("SetCurrentDirectory({s}) = {d}", .{ destination, kernel32.GetLastError() }); return Maybe(void).errnoSysPD(0, .chdir, path, destination) orelse Maybe(void).success; @@ -906,8 +908,8 @@ pub fn lutimes(path: [:0]const u8, atime: JSC.Node.TimeLike, mtime: JSC.Node.Tim } pub fn mkdiratA(dir_fd: bun.FileDescriptor, file_path: []const u8) Maybe(void) { - const buf = bun.WPathBufferPool.get(); - defer bun.WPathBufferPool.put(buf); + const buf = bun.w_path_buffer_pool.get(); + defer bun.w_path_buffer_pool.put(buf); return mkdiratW(dir_fd, bun.strings.toWPathNormalized(buf, file_path)); } @@ -932,7 +934,7 @@ pub const mkdirat = if (Environment.isWindows) else mkdiratPosix; -pub fn mkdiratW(dir_fd: bun.FileDescriptor, file_path: []const u16, _: i32) Maybe(void) { +pub fn mkdiratW(dir_fd: bun.FileDescriptor, file_path: [:0]const u16, _: i32) Maybe(void) { const dir_to_make = openDirAtWindowsNtPath(dir_fd, file_path, .{ .iterable = false, .can_rename_or_delete = true, .create = true }); if (dir_to_make == .err) { return .{ .err = dir_to_make.err }; @@ -968,8 +970,8 @@ pub fn mkdir(file_path: [:0]const u8, flags: mode_t) Maybe(void) { .linux => Maybe(void).errnoSysP(syscall.mkdir(file_path, flags), .mkdir, file_path) orelse Maybe(void).success, .windows => { - const wbuf = bun.WPathBufferPool.get(); - defer bun.WPathBufferPool.put(wbuf); + const wbuf = bun.w_path_buffer_pool.get(); + defer bun.w_path_buffer_pool.put(wbuf); return Maybe(void).errnoSysP( bun.windows.CreateDirectoryW(bun.strings.toKernel32Path(wbuf, file_path).ptr, null), .mkdir, @@ -1001,8 +1003,8 @@ pub fn mkdirA(file_path: []const u8, flags: mode_t) Maybe(void) { } if (comptime Environment.isWindows) { - const wbuf = bun.WPathBufferPool.get(); - defer bun.WPathBufferPool.put(wbuf); + const wbuf = bun.w_path_buffer_pool.get(); + defer bun.w_path_buffer_pool.put(wbuf); const wpath = bun.strings.toKernel32Path(wbuf, file_path); assertIsValidWindowsPath(u16, wpath); return Maybe(void).errnoSysP( @@ -1066,8 +1068,8 @@ pub fn normalizePathWindows( if (comptime T != u8 and T != u16) { @compileError("normalizePathWindows only supports u8 and u16 character types"); } - const wbuf = if (T != u16) bun.WPathBufferPool.get(); - defer if (T != u16) bun.WPathBufferPool.put(wbuf); + const wbuf = if (T != u16) bun.w_path_buffer_pool.get(); + defer if (T != u16) bun.w_path_buffer_pool.put(wbuf); var path = if (T == u16) path_ else bun.strings.convertUTF8toUTF16InBuffer(wbuf, path_); if (std.fs.path.isAbsoluteWindowsWTF16(path)) { @@ -1137,8 +1139,8 @@ pub fn normalizePathWindows( path = path[2..]; } - const buf1 = bun.WPathBufferPool.get(); - defer bun.WPathBufferPool.put(buf1); + const buf1 = bun.w_path_buffer_pool.get(); + defer bun.w_path_buffer_pool.put(buf1); @memcpy(buf1[0..base_path.len], base_path); buf1[base_path.len] = '\\'; @memcpy(buf1[base_path.len + 1 .. base_path.len + 1 + path.len], path); @@ -1292,8 +1294,8 @@ fn openDirAtWindowsT( path: []const T, options: WindowsOpenDirOptions, ) Maybe(bun.FileDescriptor) { - const wbuf = bun.WPathBufferPool.get(); - defer bun.WPathBufferPool.put(wbuf); + const wbuf = bun.w_path_buffer_pool.get(); + defer bun.w_path_buffer_pool.put(wbuf); const norm = switch (normalizePathWindows(T, dirFd, path, wbuf, .{})) { .err => |err| return .{ .err = err }, @@ -1611,8 +1613,8 @@ pub fn openFileAtWindowsT( path: []const T, options: NtCreateFileOptions, ) Maybe(bun.FileDescriptor) { - const wbuf = bun.WPathBufferPool.get(); - defer bun.WPathBufferPool.put(wbuf); + const wbuf = bun.w_path_buffer_pool.get(); + defer bun.w_path_buffer_pool.put(wbuf); const norm = switch (normalizePathWindows(T, dirFd, path, wbuf, .{})) { .err => |err| return .{ .err = err }, @@ -2555,11 +2557,11 @@ pub fn renameat2(from_dir: bun.FileDescriptor, from: [:0]const u8, to_dir: bun.F pub fn renameat(from_dir: bun.FileDescriptor, from: [:0]const u8, to_dir: bun.FileDescriptor, to: [:0]const u8) Maybe(void) { if (Environment.isWindows) { - const w_buf_from = bun.WPathBufferPool.get(); - const w_buf_to = bun.WPathBufferPool.get(); + const w_buf_from = bun.w_path_buffer_pool.get(); + const w_buf_to = bun.w_path_buffer_pool.get(); defer { - bun.WPathBufferPool.put(w_buf_from); - bun.WPathBufferPool.put(w_buf_to); + bun.w_path_buffer_pool.put(w_buf_from); + bun.w_path_buffer_pool.put(w_buf_to); } const rc = bun.windows.renameAtW( @@ -2599,8 +2601,10 @@ pub fn symlink(target: [:0]const u8, dest: [:0]const u8) Maybe(void) { while (true) { if (Maybe(void).errnoSys(syscall.symlink(target, dest), .symlink)) |err| { if (err.getErrno() == .INTR) continue; + log("symlink({s}, {s}) = {s}", .{ target, dest, @tagName(err.getErrno()) }); return err; } + log("symlink({s}, {s}) = 0", .{ target, dest }); return Maybe(void).success; } } @@ -2609,8 +2613,10 @@ pub fn symlinkat(target: [:0]const u8, dirfd: bun.FileDescriptor, dest: [:0]cons while (true) { if (Maybe(void).errnoSys(syscall.symlinkat(target, dirfd.cast(), dest), .symlinkat)) |err| { if (err.getErrno() == .INTR) continue; + log("symlinkat({s}, {}, {s}) = {s}", .{ target, dirfd, dest, @tagName(err.getErrno()) }); return err; } + log("symlinkat({s}, {}, {s}) = 0", .{ target, dirfd, dest }); return Maybe(void).success; } } @@ -2634,15 +2640,21 @@ pub const WindowsSymlinkOptions = packed struct { pub var has_failed_to_create_symlink = false; }; -pub fn symlinkOrJunction(dest: [:0]const u8, target: [:0]const u8) Maybe(void) { - if (comptime !Environment.isWindows) @compileError("symlinkOrJunction is windows only"); +/// Symlinks on Windows can be relative or absolute, and junctions can +/// only be absolute. Passing `null` for `abs_fallback_junction_target` +/// is saying `target` is already absolute. +pub fn symlinkOrJunction(dest: [:0]const u8, target: [:0]const u8, abs_fallback_junction_target: ?[:0]const u8) Maybe(void) { + if (comptime !Environment.isWindows) { + // return symlink(target, dest); + @compileError("windows only plz!!"); + } if (!WindowsSymlinkOptions.has_failed_to_create_symlink) { - const sym16 = bun.WPathBufferPool.get(); - const target16 = bun.WPathBufferPool.get(); + const sym16 = bun.w_path_buffer_pool.get(); + const target16 = bun.w_path_buffer_pool.get(); defer { - bun.WPathBufferPool.put(sym16); - bun.WPathBufferPool.put(target16); + bun.w_path_buffer_pool.put(sym16); + bun.w_path_buffer_pool.put(target16); } const sym_path = bun.strings.toWPathNormalizeAutoExtend(sym16, dest); const target_path = bun.strings.toWPathNormalizeAutoExtend(target16, target); @@ -2651,14 +2663,26 @@ pub fn symlinkOrJunction(dest: [:0]const u8, target: [:0]const u8) Maybe(void) { return Maybe(void).success; }, .err => |err| { - if (err.getErrno() == .EXIST) { - return .{ .err = err }; + switch (err.getErrno()) { + .EXIST, .NOENT => { + // if the destination already exists, or a component + // of the destination doesn't exist, return the error + // without trying junctions. + return .{ .err = err }; + }, + else => { + // fallthrough to junction + }, } }, } } - return sys_uv.symlinkUV(target, dest, bun.windows.libuv.UV_FS_SYMLINK_JUNCTION); + return sys_uv.symlinkUV( + abs_fallback_junction_target orelse target, + dest, + bun.windows.libuv.UV_FS_SYMLINK_JUNCTION, + ); } pub fn symlinkW(dest: [:0]const u16, target: [:0]const u16, options: WindowsSymlinkOptions) Maybe(void) { @@ -2684,6 +2708,20 @@ pub fn symlinkW(dest: [:0]const u16, target: [:0]const u16, options: WindowsSyml } if (errno.toSystemErrno()) |err| { + switch (err) { + .ENOENT, + .EEXIST, + => { + return .{ + .err = .{ + .errno = @intFromEnum(err), + .syscall = .symlink, + }, + }; + }, + + else => {}, + } WindowsSymlinkOptions.has_failed_to_create_symlink = true; return .{ .err = .{ @@ -2712,12 +2750,46 @@ pub fn clonefile(from: [:0]const u8, to: [:0]const u8) Maybe(void) { while (true) { if (Maybe(void).errnoSys(c.clonefile(from, to, 0), .clonefile)) |err| { if (err.getErrno() == .INTR) continue; + log("clonefile({s}, {s}) = {s}", .{ from, to, @tagName(err.getErrno()) }); return err; } + log("clonefile({s}, {s}) = 0", .{ from, to }); return Maybe(void).success; } } +pub fn clonefileat(from: FD, from_path: [:0]const u8, to: FD, to_path: [:0]const u8) Maybe(void) { + if (comptime !Environment.isMac) { + @compileError("macOS only"); + } + + while (true) { + if (Maybe(void).errnoSys(c.clonefileat(from.cast(), from_path, to.cast(), to_path, 0), .clonefileat)) |err| { + if (err.getErrno() == .INTR) continue; + log( + \\clonefileat( + \\ {}, + \\ {s}, + \\ {}, + \\ {s}, + \\) = {s} + \\ + , .{ from, from_path, to, to_path, @tagName(err.getErrno()) }); + return err; + } + log( + \\clonefileat( + \\ {}, + \\ {s}, + \\ {}, + \\ {s}, + \\) = 0 + \\ + , .{ from, from_path, to, to_path }); + return .success; + } +} + pub fn copyfile(from: [:0]const u8, to: [:0]const u8, flags: posix.system.COPYFILE) Maybe(void) { if (comptime !Environment.isMac) @compileError("macOS only"); @@ -2743,8 +2815,10 @@ pub fn fcopyfile(fd_in: bun.FileDescriptor, fd_out: bun.FileDescriptor, flags: p } pub fn unlinkW(from: [:0]const u16) Maybe(void) { - if (windows.DeleteFileW(from.ptr) != 0) { - return .{ .err = Error.fromCode(bun.windows.getLastErrno(), .unlink) }; + const ret = windows.DeleteFileW(from); + if (Maybe(void).errnoSys(ret, .unlink)) |err| { + log("DeleteFileW({s}) = {s}", .{ bun.fmt.fmtPath(u16, from, .{}), @tagName(err.getErrno()) }); + return err; } return Maybe(void).success; @@ -2752,14 +2826,15 @@ pub fn unlinkW(from: [:0]const u16) Maybe(void) { pub fn unlink(from: [:0]const u8) Maybe(void) { if (comptime Environment.isWindows) { - const w_buf = bun.WPathBufferPool.get(); - defer bun.WPathBufferPool.put(w_buf); - return unlinkW(bun.strings.toNTPath(w_buf, from)); + const w_buf = bun.w_path_buffer_pool.get(); + defer bun.w_path_buffer_pool.put(w_buf); + return unlinkW(bun.strings.toWPathNormalizeAutoExtend(w_buf, from)); } while (true) { if (Maybe(void).errnoSysP(syscall.unlink(from), .unlink, from)) |err| { if (err.getErrno() == .INTR) continue; + log("unlink({s}) = {s}", .{ from, @tagName(err.getErrno()) }); return err; } @@ -2775,8 +2850,8 @@ pub fn rmdirat(dirfd: bun.FileDescriptor, to: anytype) Maybe(void) { pub fn unlinkatWithFlags(dirfd: bun.FileDescriptor, to: anytype, flags: c_uint) Maybe(void) { if (Environment.isWindows) { if (comptime std.meta.Elem(@TypeOf(to)) == u8) { - const w_buf = bun.WPathBufferPool.get(); - defer bun.WPathBufferPool.put(w_buf); + const w_buf = bun.w_path_buffer_pool.get(); + defer bun.w_path_buffer_pool.put(w_buf); return unlinkatWithFlags(dirfd, bun.strings.toNTPath(w_buf, bun.span(to)), flags); } @@ -2790,7 +2865,7 @@ pub fn unlinkatWithFlags(dirfd: bun.FileDescriptor, to: anytype, flags: c_uint) if (Maybe(void).errnoSysFP(syscall.unlinkat(dirfd.cast(), to, flags), .unlink, dirfd, to)) |err| { if (err.getErrno() == .INTR) continue; if (comptime Environment.allow_assert) - log("unlinkat({}, {s}) = {d}", .{ dirfd, bun.sliceTo(to, 0), @intFromEnum(err.getErrno()) }); + log("unlinkat({}, {s}) = {s}", .{ dirfd, bun.sliceTo(to, 0), @tagName(err.getErrno()) }); return err; } if (comptime Environment.allow_assert) @@ -2808,7 +2883,7 @@ pub fn unlinkat(dirfd: bun.FileDescriptor, to: anytype) Maybe(void) { if (Maybe(void).errnoSysFP(syscall.unlinkat(dirfd.cast(), to, 0), .unlink, dirfd, to)) |err| { if (err.getErrno() == .INTR) continue; if (comptime Environment.allow_assert) - log("unlinkat({}, {s}) = {d}", .{ dirfd, bun.sliceTo(to, 0), @intFromEnum(err.getErrno()) }); + log("unlinkat({}, {s}) = {s}", .{ dirfd, bun.sliceTo(to, 0), @tagName(err.getErrno()) }); return err; } if (comptime Environment.allow_assert) @@ -3232,8 +3307,8 @@ pub fn getFileAttributes(path: anytype) ?WindowsFileAttributes { const attributes: WindowsFileAttributes = @bitCast(dword); return attributes; } else { - const wbuf = bun.WPathBufferPool.get(); - defer bun.WPathBufferPool.put(wbuf); + const wbuf = bun.w_path_buffer_pool.get(); + defer bun.w_path_buffer_pool.put(wbuf); const path_to_use = bun.strings.toKernel32Path(wbuf, path); return getFileAttributes(path_to_use); } @@ -3434,8 +3509,8 @@ pub const ExistsAtType = enum { }; pub fn existsAtType(fd: bun.FileDescriptor, subpath: anytype) Maybe(ExistsAtType) { if (comptime Environment.isWindows) { - const wbuf = bun.WPathBufferPool.get(); - defer bun.WPathBufferPool.put(wbuf); + const wbuf = bun.w_path_buffer_pool.get(); + defer bun.w_path_buffer_pool.put(wbuf); var path = if (std.meta.Child(@TypeOf(subpath)) == u16) bun.strings.toNTPath16(wbuf, subpath) else @@ -3496,8 +3571,8 @@ pub fn existsAtType(fd: bun.FileDescriptor, subpath: anytype) Maybe(ExistsAtType } if (std.meta.sentinel(@TypeOf(subpath)) == null) { - const path_buf = bun.PathBufferPool.get(); - defer bun.PathBufferPool.put(path_buf); + const path_buf = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(path_buf); @memcpy(path_buf[0..subpath.len], subpath); path_buf[subpath.len] = 0; const slice: [:0]const u8 = @ptrCast(path_buf); @@ -3701,28 +3776,69 @@ pub fn dup(fd: bun.FileDescriptor) Maybe(bun.FileDescriptor) { return dupWithFlags(fd, 0); } -pub fn linkat(dir_fd: bun.FileDescriptor, basename: []const u8, dest_dir_fd: bun.FileDescriptor, dest_name: []const u8) Maybe(void) { - return Maybe(void).errnoSysP( - std.c.linkat( - @intCast(dir_fd), - &(std.posix.toPosixPath(basename) catch return .{ - .err = .{ - .errno = @intFromEnum(E.NOMEM), - .syscall = .open, - }, - }), - @intCast(dest_dir_fd), - &(std.posix.toPosixPath(dest_name) catch return .{ - .err = .{ - .errno = @intFromEnum(E.NOMEM), - .syscall = .open, - }, - }), - 0, - ), - .link, - basename, - ) orelse Maybe(void).success; +pub fn link(comptime T: type, src: [:0]const T, dest: [:0]const T) Maybe(void) { + if (comptime Environment.isWindows) { + if (T == u8) { + return sys_uv.link(src, dest); + } + + const ret = bun.windows.CreateHardLinkW(dest, src, null); + if (Maybe(void).errnoSys(ret, .link)) |err| { + log("CreateHardLinkW({s}, {s}) = {s}", .{ + bun.fmt.fmtPath(T, dest, .{}), + bun.fmt.fmtPath(T, src, .{}), + @tagName(err.getErrno()), + }); + return err; + } + + log("CreateHardLinkW({s}, {s}) = 0", .{ + bun.fmt.fmtPath(T, dest, .{}), + bun.fmt.fmtPath(T, src, .{}), + }); + return .success; + } + + if (T == u16) { + @compileError("unexpected path type"); + } + + const ret = std.c.link(src, dest); + if (Maybe(void).errnoSysP(ret, .link, src)) |err| { + log("link({s}, {s}) = {s}", .{ src, dest, @tagName(err.getErrno()) }); + return err; + } + log("link({s}, {s}) = 0", .{ src, dest }); + return .success; +} + +pub fn linkat(src: bun.FileDescriptor, src_path: []const u8, dest: bun.FileDescriptor, dest_path: []const u8) Maybe(void) { + return linkatZ( + src, + &(std.posix.toPosixPath(src_path) catch return .{ + .err = .{ + .errno = @intFromEnum(E.NOMEM), + .syscall = .link, + }, + }), + dest, + &(std.posix.toPosixPath(dest_path) catch return .{ + .err = .{ + .errno = @intFromEnum(E.NOMEM), + .syscall = .link, + }, + }), + ); +} + +pub fn linkatZ(src: FD, src_path: [:0]const u8, dest: FD, dest_path: [:0]const u8) Maybe(void) { + const ret = std.c.linkat(src.cast(), src_path, dest.cast(), dest_path, 0); + if (Maybe(void).errnoSysP(ret, .link, src_path)) |err| { + log("linkat({}, {s}, {}, {s}) = {s}", .{ src, src_path, dest, dest_path, @tagName(err.getErrno()) }); + return err; + } + log("linkat({}, {s}, {}, {s}) = 0", .{ src, src_path, dest, dest_path }); + return .success; } pub fn linkatTmpfile(tmpfd: bun.FileDescriptor, dirfd: bun.FileDescriptor, name: [:0]const u8) Maybe(void) { diff --git a/src/walker_skippable.zig b/src/walker_skippable.zig index 679c5192dd..a352a32059 100644 --- a/src/walker_skippable.zig +++ b/src/walker_skippable.zig @@ -6,6 +6,9 @@ const path = std.fs.path; const DirIterator = bun.DirIterator; const Environment = bun.Environment; const OSPathSlice = bun.OSPathSlice; +const OSPathSliceZ = bun.OSPathSliceZ; +const OOM = bun.OOM; +const FD = bun.FD; stack: std.ArrayList(StackItem), name_buffer: NameBufferList, @@ -16,17 +19,16 @@ seed: u64 = 0, const NameBufferList = std.ArrayList(bun.OSPathChar); -const Dir = std.fs.Dir; const WrappedIterator = DirIterator.NewWrappedIterator(if (Environment.isWindows) .u16 else .u8); pub const WalkerEntry = struct { /// The containing directory. This can be used to operate directly on `basename` /// rather than `path`, avoiding `error.NameTooLong` for deeply nested paths. /// The directory remains open until `next` or `deinit` is called. - dir: Dir, - basename: OSPathSlice, - path: OSPathSlice, - kind: Dir.Entry.Kind, + dir: FD, + basename: OSPathSliceZ, + path: OSPathSliceZ, + kind: std.fs.Dir.Entry.Kind, }; const StackItem = struct { @@ -37,13 +39,13 @@ const StackItem = struct { /// After each call to this function, and on deinit(), the memory returned /// from this function becomes invalid. A copy must be made in order to keep /// a reference to the path. -pub fn next(self: *Walker) !?WalkerEntry { +pub fn next(self: *Walker) bun.sys.Maybe(?WalkerEntry) { while (self.stack.items.len != 0) { // `top` becomes invalid after appending to `self.stack` var top = &self.stack.items[self.stack.items.len - 1]; var dirname_len = top.dirname_len; switch (top.iter.next()) { - .err => |err| return bun.errnoToZigErr(err.errno), + .err => |err| return .initErr(err), .result => |res| { if (res) |base| { switch (base.kind) { @@ -79,37 +81,32 @@ pub fn next(self: *Walker) !?WalkerEntry { self.name_buffer.shrinkRetainingCapacity(dirname_len); if (self.name_buffer.items.len != 0) { - try self.name_buffer.append(path.sep); + self.name_buffer.append(path.sep) catch bun.outOfMemory(); dirname_len += 1; } - try self.name_buffer.appendSlice(base.name.slice()); + self.name_buffer.appendSlice(base.name.slice()) catch bun.outOfMemory(); const cur_len = self.name_buffer.items.len; - try self.name_buffer.append(0); - self.name_buffer.shrinkRetainingCapacity(cur_len); + self.name_buffer.append(0) catch bun.outOfMemory(); if (base.kind == .directory) { - var new_dir = (if (Environment.isWindows) - top.iter.iter.dir.openDirW(base.name.sliceAssumeZ(), .{ .iterate = true }) - else - top.iter.iter.dir.openDir(base.name.slice(), .{ .iterate = true })) catch |err| switch (err) { - error.NameTooLong => unreachable, // no path sep in base.name - else => |e| return e, + const new_dir = switch (bun.openDirForIterationOSPath(top.iter.iter.dir, base.name.slice())) { + .result => |fd| fd, + .err => |err| return .initErr(err), }; { - errdefer new_dir.close(); - try self.stack.append(StackItem{ + self.stack.append(StackItem{ .iter = DirIterator.iterate(new_dir, if (Environment.isWindows) .u16 else .u8), - .dirname_len = self.name_buffer.items.len, - }); + .dirname_len = cur_len, + }) catch bun.outOfMemory(); top = &self.stack.items[self.stack.items.len - 1]; } } - return WalkerEntry{ + return .initResult(WalkerEntry{ .dir = top.iter.iter.dir, - .basename = self.name_buffer.items[dirname_len..], - .path = self.name_buffer.items, + .basename = self.name_buffer.items[dirname_len..cur_len :0], + .path = self.name_buffer.items[0..cur_len :0], .kind = base.kind, - }; + }); } else { var item = self.stack.pop().?; if (self.stack.items.len != 0) { @@ -119,7 +116,7 @@ pub fn next(self: *Walker) !?WalkerEntry { }, } } - return null; + return .initResult(null); } pub fn deinit(self: *Walker) void { @@ -142,11 +139,11 @@ pub fn deinit(self: *Walker) void { /// The order of returned file system entries is undefined. /// `self` will not be closed after walking it. pub fn walk( - self: Dir, + self: FD, allocator: Allocator, skip_filenames: []const OSPathSlice, skip_dirnames: []const OSPathSlice, -) !Walker { +) OOM!Walker { var name_buffer = NameBufferList.init(allocator); errdefer name_buffer.deinit(); diff --git a/src/which.zig b/src/which.zig index 34bed1b26b..2bb4f57340 100644 --- a/src/which.zig +++ b/src/which.zig @@ -20,8 +20,8 @@ pub fn which(buf: *bun.PathBuffer, path: []const u8, cwd: []const u8, bin: []con bun.Output.scoped(.which, true)("path={s} cwd={s} bin={s}", .{ path, cwd, bin }); if (bun.Environment.os == .windows) { - const convert_buf = bun.WPathBufferPool.get(); - defer bun.WPathBufferPool.put(convert_buf); + const convert_buf = bun.w_path_buffer_pool.get(); + defer bun.w_path_buffer_pool.put(convert_buf); const result = whichWin(convert_buf, path, cwd, bin) orelse return null; const result_converted = bun.strings.convertUTF16toUTF8InBuffer(buf, result) catch unreachable; buf[result_converted.len] = 0; @@ -133,8 +133,8 @@ fn searchBinInPath(buf: *bun.WPathBuffer, path_buf: *bun.PathBuffer, path: []con /// It is similar to Get-Command in powershell. pub fn whichWin(buf: *bun.WPathBuffer, path: []const u8, cwd: []const u8, bin: []const u8) ?[:0]const u16 { if (bin.len == 0) return null; - const path_buf = bun.PathBufferPool.get(); - defer bun.PathBufferPool.put(path_buf); + const path_buf = bun.path_buffer_pool.get(); + defer bun.path_buffer_pool.put(path_buf); const check_windows_extensions = !endsWithExtension(bin); diff --git a/test/cli/install/bun-pack.test.ts b/test/cli/install/bun-pack.test.ts index ea3693d1e6..31fa4e005d 100644 --- a/test/cli/install/bun-pack.test.ts +++ b/test/cli/install/bun-pack.test.ts @@ -12,7 +12,7 @@ beforeEach(() => { packageDir = tmpdirSync(); }); -async function packExpectError(cwd: string, env: NodeJS.ProcessEnv, ...args: string[]) { +async function packExpectError(cwd: string, env: NodeJS.Dict, ...args: string[]) { const { stdout, stderr, exited } = spawn({ cmd: [bunExe(), "pm", "pack", ...args], cwd, diff --git a/test/cli/install/isolated-install.test.ts b/test/cli/install/isolated-install.test.ts new file mode 100644 index 0000000000..bf8aac23f6 --- /dev/null +++ b/test/cli/install/isolated-install.test.ts @@ -0,0 +1,433 @@ +import { file, write } from "bun"; +import { afterAll, beforeAll, describe, expect, setDefaultTimeout, test } from "bun:test"; +import { existsSync, readlinkSync } from "fs"; +import { VerdaccioRegistry, bunEnv, readdirSorted, runBunInstall } from "harness"; +import { join } from "path"; + +const registry = new VerdaccioRegistry(); + +beforeAll(async () => { + setDefaultTimeout(10 * 60 * 1000); + await registry.start(); +}); + +afterAll(() => { + registry.stop(); +}); + +describe("basic", () => { + test("single dependency", async () => { + const { packageJson, packageDir } = await registry.createTestDir(); + + await write( + packageJson, + JSON.stringify({ + name: "test-pkg-1", + workspaces: { + nodeLinker: "isolated", + }, + dependencies: { + "no-deps": "1.0.0", + }, + }), + ); + + await runBunInstall(bunEnv, packageDir); + + expect(readlinkSync(join(packageDir, "node_modules", "no-deps"))).toBe( + join(".bun", "no-deps@1.0.0", "node_modules", "no-deps"), + ); + expect(readlinkSync(join(packageDir, "node_modules", ".bun", "node_modules", "no-deps"))).toBe( + join("..", "no-deps@1.0.0", "node_modules", "no-deps"), + ); + expect( + await file( + join(packageDir, "node_modules", ".bun", "no-deps@1.0.0", "node_modules", "no-deps", "package.json"), + ).json(), + ).toEqual({ + name: "no-deps", + version: "1.0.0", + }); + }); + + test("scope package", async () => { + const { packageJson, packageDir } = await registry.createTestDir(); + + await write( + packageJson, + JSON.stringify({ + name: "test-pkg-2", + workspaces: { + nodeLinker: "isolated", + }, + dependencies: { + "@types/is-number": "1.0.0", + }, + }), + ); + + await runBunInstall(bunEnv, packageDir); + + expect(readlinkSync(join(packageDir, "node_modules", "@types", "is-number"))).toBe( + join("..", ".bun", "@types+is-number@1.0.0", "node_modules", "@types", "is-number"), + ); + expect(readlinkSync(join(packageDir, "node_modules", ".bun", "node_modules", "@types", "is-number"))).toBe( + join("..", "..", "@types+is-number@1.0.0", "node_modules", "@types", "is-number"), + ); + expect( + await file( + join( + packageDir, + "node_modules", + ".bun", + "@types+is-number@1.0.0", + "node_modules", + "@types", + "is-number", + "package.json", + ), + ).json(), + ).toEqual({ + name: "@types/is-number", + version: "1.0.0", + }); + }); + + test("transitive dependencies", async () => { + const { packageJson, packageDir } = await registry.createTestDir(); + + await write( + packageJson, + JSON.stringify({ + name: "test-pkg-3", + workspaces: { + nodeLinker: "isolated", + }, + dependencies: { + "two-range-deps": "1.0.0", + }, + }), + ); + + await runBunInstall(bunEnv, packageDir); + + expect(await readdirSorted(join(packageDir, "node_modules"))).toEqual([".bun", "two-range-deps"]); + expect(readlinkSync(join(packageDir, "node_modules", "two-range-deps"))).toBe( + join(".bun", "two-range-deps@1.0.0", "node_modules", "two-range-deps"), + ); + expect(readlinkSync(join(packageDir, "node_modules", ".bun", "node_modules", "two-range-deps"))).toBe( + join("..", "two-range-deps@1.0.0", "node_modules", "two-range-deps"), + ); + expect(readlinkSync(join(packageDir, "node_modules", ".bun", "node_modules", "no-deps"))).toBe( + join("..", "no-deps@1.1.0", "node_modules", "no-deps"), + ); + expect(readlinkSync(join(packageDir, "node_modules", ".bun", "node_modules", "@types", "is-number"))).toBe( + join("..", "..", "@types+is-number@2.0.0", "node_modules", "@types", "is-number"), + ); + expect( + await file( + join( + packageDir, + "node_modules", + ".bun", + "two-range-deps@1.0.0", + "node_modules", + "two-range-deps", + "package.json", + ), + ).json(), + ).toEqual({ + name: "two-range-deps", + version: "1.0.0", + dependencies: { + "no-deps": "^1.0.0", + "@types/is-number": ">=1.0.0", + }, + }); + expect( + await readdirSorted(join(packageDir, "node_modules", ".bun", "two-range-deps@1.0.0", "node_modules")), + ).toEqual(["@types", "no-deps", "two-range-deps"]); + expect( + readlinkSync( + join(packageDir, "node_modules", ".bun", "two-range-deps@1.0.0", "node_modules", "@types", "is-number"), + ), + ).toBe(join("..", "..", "..", "@types+is-number@2.0.0", "node_modules", "@types", "is-number")); + expect( + readlinkSync(join(packageDir, "node_modules", ".bun", "two-range-deps@1.0.0", "node_modules", "no-deps")), + ).toBe(join("..", "..", "no-deps@1.1.0", "node_modules", "no-deps")); + expect( + await file( + join(packageDir, "node_modules", ".bun", "no-deps@1.1.0", "node_modules", "no-deps", "package.json"), + ).json(), + ).toEqual({ + name: "no-deps", + version: "1.1.0", + }); + expect( + await file( + join( + packageDir, + "node_modules", + ".bun", + "@types+is-number@2.0.0", + "node_modules", + "@types", + "is-number", + "package.json", + ), + ).json(), + ).toEqual({ + name: "@types/is-number", + version: "2.0.0", + }); + }); +}); + +test("handles cyclic dependencies", async () => { + const { packageJson, packageDir } = await registry.createTestDir(); + + await write( + packageJson, + JSON.stringify({ + name: "test-pkg-cyclic", + workspaces: { + nodeLinker: "isolated", + }, + dependencies: { + "a-dep-b": "1.0.0", + }, + }), + ); + + await runBunInstall(bunEnv, packageDir); + + expect(readlinkSync(join(packageDir, "node_modules", "a-dep-b"))).toBe( + join(".bun", "a-dep-b@1.0.0", "node_modules", "a-dep-b"), + ); + expect(readlinkSync(join(packageDir, "node_modules", ".bun", "node_modules", "a-dep-b"))).toBe( + join("..", "a-dep-b@1.0.0", "node_modules", "a-dep-b"), + ); + expect(readlinkSync(join(packageDir, "node_modules", ".bun", "node_modules", "b-dep-a"))).toBe( + join("..", "b-dep-a@1.0.0", "node_modules", "b-dep-a"), + ); + expect( + await file( + join(packageDir, "node_modules", ".bun", "a-dep-b@1.0.0", "node_modules", "a-dep-b", "package.json"), + ).json(), + ).toEqual({ + name: "a-dep-b", + version: "1.0.0", + dependencies: { + "b-dep-a": "1.0.0", + }, + }); + + expect(readlinkSync(join(packageDir, "node_modules", ".bun", "a-dep-b@1.0.0", "node_modules", "b-dep-a"))).toBe( + join("..", "..", "b-dep-a@1.0.0", "node_modules", "b-dep-a"), + ); + expect( + await file( + join(packageDir, "node_modules", ".bun", "a-dep-b@1.0.0", "node_modules", "b-dep-a", "package.json"), + ).json(), + ).toEqual({ + name: "b-dep-a", + version: "1.0.0", + dependencies: { + "a-dep-b": "1.0.0", + }, + }); +}); + +test("can install folder dependencies", async () => { + const { packageJson, packageDir } = await registry.createTestDir(); + + await write( + packageJson, + JSON.stringify({ + name: "test-pkg-folder-deps", + workspaces: { + nodeLinker: "isolated", + }, + dependencies: { + "folder-dep": "file:./pkg-1", + }, + }), + ); + + await write(join(packageDir, "pkg-1", "package.json"), JSON.stringify({ name: "folder-dep", version: "1.0.0" })); + + await runBunInstall(bunEnv, packageDir); + + expect(readlinkSync(join(packageDir, "node_modules", "folder-dep"))).toBe( + join(".bun", "folder-dep@file+pkg-1", "node_modules", "folder-dep"), + ); + expect( + await file( + join(packageDir, "node_modules", ".bun", "folder-dep@file+pkg-1", "node_modules", "folder-dep", "package.json"), + ).json(), + ).toEqual({ + name: "folder-dep", + version: "1.0.0", + }); + + await write(join(packageDir, "pkg-1", "index.js"), "module.exports = 'hello from pkg-1';"); + + await runBunInstall(bunEnv, packageDir, { savesLockfile: false }); + expect(readlinkSync(join(packageDir, "node_modules", "folder-dep"))).toBe( + join(".bun", "folder-dep@file+pkg-1", "node_modules", "folder-dep"), + ); + expect( + await file( + join(packageDir, "node_modules", ".bun", "folder-dep@file+pkg-1", "node_modules", "folder-dep", "index.js"), + ).text(), + ).toBe("module.exports = 'hello from pkg-1';"); +}); + +describe("isolated workspaces", () => { + test("basic", async () => { + const { packageJson, packageDir } = await registry.createTestDir(); + + await Promise.all([ + write( + packageJson, + JSON.stringify({ + name: "test-pkg-workspaces", + workspaces: { + nodeLinker: "isolated", + packages: ["pkg-1", "pkg-2"], + }, + dependencies: { + "no-deps": "1.0.0", + }, + }), + ), + write( + join(packageDir, "pkg-1", "package.json"), + JSON.stringify({ + name: "pkg-1", + version: "1.0.0", + dependencies: { + "a-dep": "1.0.1", + "pkg-2": "workspace:", + "@types/is-number": "1.0.0", + }, + }), + ), + write( + join(packageDir, "pkg-2", "package.json"), + JSON.stringify({ + name: "pkg-2", + version: "1.0.0", + dependencies: { + "b-dep-a": "1.0.0", + }, + }), + ), + ]); + + await runBunInstall(bunEnv, packageDir); + + expect(existsSync(join(packageDir, "node_modules", "pkg-1"))).toBeFalse(); + expect(readlinkSync(join(packageDir, "pkg-1", "node_modules", "pkg-2"))).toBe(join("..", "..", "pkg-2")); + expect(await readdirSorted(join(packageDir, "node_modules"))).toEqual([".bun", "no-deps"]); + expect(readlinkSync(join(packageDir, "node_modules", "no-deps"))).toBe( + join(".bun", "no-deps@1.0.0", "node_modules", "no-deps"), + ); + + expect(await readdirSorted(join(packageDir, "pkg-1", "node_modules"))).toEqual(["@types", "a-dep", "pkg-2"]); + expect(await readdirSorted(join(packageDir, "pkg-2", "node_modules"))).toEqual(["b-dep-a"]); + expect(await readdirSorted(join(packageDir, "node_modules", ".bun"))).toEqual([ + "@types+is-number@1.0.0", + "a-dep-b@1.0.0", + "a-dep@1.0.1", + "b-dep-a@1.0.0", + "no-deps@1.0.0", + "node_modules", + ]); + + expect(readlinkSync(join(packageDir, "node_modules", ".bun", "node_modules", "no-deps"))).toBe( + join("..", "no-deps@1.0.0", "node_modules", "no-deps"), + ); + expect( + await file( + join(packageDir, "node_modules", ".bun", "no-deps@1.0.0", "node_modules", "no-deps", "package.json"), + ).json(), + ).toEqual({ + name: "no-deps", + version: "1.0.0", + }); + }); +}); + +test("many transitive dependencies", async () => { + const { packageJson, packageDir } = await registry.createTestDir(); + + await write( + packageJson, + JSON.stringify({ + name: "test-pkg-many-transitive-deps", + workspaces: { + nodeLinker: "isolated", + }, + dependencies: { + "alias-loop-1": "1.0.0", + "alias-loop-2": "1.0.0", + "1-peer-dep-a": "1.0.0", + "basic-1": "1.0.0", + "is-number": "1.0.0", + }, + }), + ); + + await runBunInstall(bunEnv, packageDir); + + expect(await readdirSorted(join(packageDir, "node_modules"))).toEqual([ + ".bun", + "1-peer-dep-a", + "alias-loop-1", + "alias-loop-2", + "basic-1", + "is-number", + ]); + expect(readlinkSync(join(packageDir, "node_modules", "alias-loop-1"))).toBe( + join(".bun", "alias-loop-1@1.0.0", "node_modules", "alias-loop-1"), + ); + expect(readlinkSync(join(packageDir, "node_modules", ".bun", "node_modules", "alias-loop-1"))).toBe( + join("..", "alias-loop-1@1.0.0", "node_modules", "alias-loop-1"), + ); + expect(readlinkSync(join(packageDir, "node_modules", ".bun", "node_modules", "alias-loop-2"))).toBe( + join("..", "alias-loop-2@1.0.0", "node_modules", "alias-loop-2"), + ); + expect( + await file( + join(packageDir, "node_modules", ".bun", "alias-loop-1@1.0.0", "node_modules", "alias-loop-1", "package.json"), + ).json(), + ).toEqual({ + name: "alias-loop-1", + version: "1.0.0", + dependencies: { + "alias1": "npm:alias-loop-2@*", + }, + }); + expect( + await file( + join(packageDir, "node_modules", ".bun", "alias-loop-2@1.0.0", "node_modules", "alias-loop-2", "package.json"), + ).json(), + ).toEqual({ + name: "alias-loop-2", + version: "1.0.0", + dependencies: { + "alias2": "npm:alias-loop-1@*", + }, + }); + // expect(await readdirSorted(join(packageDir, "node_modules", ".bun", "alias-loop-1@1.0.0", "node_modules"))).toEqual([ + // "alias1", + // "alias-loop-1", + // ]); + // expect(readlinkSync(join(packageDir, "node_modules", ".bun", "alias-loop-1@1.0.0", "node_modules", "alias1"))).toBe( + // join("..", "..", "alias-loop-2@1.0.0", "node_modules", "alias-loop-2"), + // ); + // expect(readlinkSync(join(packageDir, "node_modules", ".bun", "alias-loop-2@1.0.0", "node_modules", "alias2"))).toBe( + // join("..", "..", "alias-loop-1@1.0.0", "node_modules", "alias-loop-1"), + // ); +}); diff --git a/test/cli/install/registry/packages/a-dep-b/a-dep-b-1.0.0.tgz b/test/cli/install/registry/packages/a-dep-b/a-dep-b-1.0.0.tgz new file mode 100644 index 0000000000..a97705bcbe Binary files /dev/null and b/test/cli/install/registry/packages/a-dep-b/a-dep-b-1.0.0.tgz differ diff --git a/test/cli/install/registry/packages/a-dep-b/package.json b/test/cli/install/registry/packages/a-dep-b/package.json new file mode 100644 index 0000000000..6313607fcf --- /dev/null +++ b/test/cli/install/registry/packages/a-dep-b/package.json @@ -0,0 +1,44 @@ +{ + "name": "a-dep-b", + "versions": { + "1.0.0": { + "name": "a-dep-b", + "version": "1.0.0", + "dependencies": { + "b-dep-a": "1.0.0" + }, + "_id": "a-dep-b@1.0.0", + "_integrity": "sha512-PW1l4ruYaxcIw4rMkOVzb9zcR2srZhTPv2H2aH7QFc7vVxkD7EEMGHg1GPT8ycLFb8vriydUXEPwOy1FcbodaQ==", + "_nodeVersion": "22.6.0", + "_npmVersion": "10.8.3", + "integrity": "sha512-PW1l4ruYaxcIw4rMkOVzb9zcR2srZhTPv2H2aH7QFc7vVxkD7EEMGHg1GPT8ycLFb8vriydUXEPwOy1FcbodaQ==", + "shasum": "ed69ada9bf7341ed905c41f1282bd87713cc315f", + "dist": { + "integrity": "sha512-PW1l4ruYaxcIw4rMkOVzb9zcR2srZhTPv2H2aH7QFc7vVxkD7EEMGHg1GPT8ycLFb8vriydUXEPwOy1FcbodaQ==", + "shasum": "ed69ada9bf7341ed905c41f1282bd87713cc315f", + "tarball": "http://http://localhost:4873/a-dep-b/-/a-dep-b-1.0.0.tgz" + }, + "contributors": [] + } + }, + "time": { + "modified": "2025-06-01T20:45:08.728Z", + "created": "2025-06-01T20:45:08.728Z", + "1.0.0": "2025-06-01T20:45:08.728Z" + }, + "users": {}, + "dist-tags": { + "latest": "1.0.0" + }, + "_uplinks": {}, + "_distfiles": {}, + "_attachments": { + "a-dep-b-1.0.0.tgz": { + "shasum": "ed69ada9bf7341ed905c41f1282bd87713cc315f", + "version": "1.0.0" + } + }, + "_rev": "", + "_id": "a-dep-b", + "readme": "" +} \ No newline at end of file diff --git a/test/cli/install/registry/packages/b-dep-a/b-dep-a-1.0.0.tgz b/test/cli/install/registry/packages/b-dep-a/b-dep-a-1.0.0.tgz new file mode 100644 index 0000000000..7dd885257c Binary files /dev/null and b/test/cli/install/registry/packages/b-dep-a/b-dep-a-1.0.0.tgz differ diff --git a/test/cli/install/registry/packages/b-dep-a/package.json b/test/cli/install/registry/packages/b-dep-a/package.json new file mode 100644 index 0000000000..349d09efc8 --- /dev/null +++ b/test/cli/install/registry/packages/b-dep-a/package.json @@ -0,0 +1,44 @@ +{ + "name": "b-dep-a", + "versions": { + "1.0.0": { + "name": "b-dep-a", + "version": "1.0.0", + "dependencies": { + "a-dep-b": "1.0.0" + }, + "_id": "b-dep-a@1.0.0", + "_integrity": "sha512-1owp4Wy5QE893BGgjDQGZm9Oayk38MA++fXmPTQA1WY/NFQv7CcCVpK2Ht/4mU4KejDeHOxaAj7qbzv1dSQA2w==", + "_nodeVersion": "22.6.0", + "_npmVersion": "10.8.3", + "integrity": "sha512-1owp4Wy5QE893BGgjDQGZm9Oayk38MA++fXmPTQA1WY/NFQv7CcCVpK2Ht/4mU4KejDeHOxaAj7qbzv1dSQA2w==", + "shasum": "3d94682ad5231596f47745e03ef3d59af5945e1d", + "dist": { + "integrity": "sha512-1owp4Wy5QE893BGgjDQGZm9Oayk38MA++fXmPTQA1WY/NFQv7CcCVpK2Ht/4mU4KejDeHOxaAj7qbzv1dSQA2w==", + "shasum": "3d94682ad5231596f47745e03ef3d59af5945e1d", + "tarball": "http://http://localhost:4873/b-dep-a/-/b-dep-a-1.0.0.tgz" + }, + "contributors": [] + } + }, + "time": { + "modified": "2025-06-01T20:45:23.481Z", + "created": "2025-06-01T20:45:23.481Z", + "1.0.0": "2025-06-01T20:45:23.481Z" + }, + "users": {}, + "dist-tags": { + "latest": "1.0.0" + }, + "_uplinks": {}, + "_distfiles": {}, + "_attachments": { + "b-dep-a-1.0.0.tgz": { + "shasum": "3d94682ad5231596f47745e03ef3d59af5945e1d", + "version": "1.0.0" + } + }, + "_rev": "", + "_id": "b-dep-a", + "readme": "" +} \ No newline at end of file diff --git a/test/cli/install/registry/packages/diff-peer-1/diff-peer-1-1.0.0.tgz b/test/cli/install/registry/packages/diff-peer-1/diff-peer-1-1.0.0.tgz new file mode 100644 index 0000000000..6efe2d9c80 Binary files /dev/null and b/test/cli/install/registry/packages/diff-peer-1/diff-peer-1-1.0.0.tgz differ diff --git a/test/cli/install/registry/packages/diff-peer-1/package.json b/test/cli/install/registry/packages/diff-peer-1/package.json new file mode 100644 index 0000000000..3630c213f8 --- /dev/null +++ b/test/cli/install/registry/packages/diff-peer-1/package.json @@ -0,0 +1,42 @@ +{ + "name": "diff-peer-1", + "versions": { + "1.0.0": { + "name": "diff-peer-1", + "version": "1.0.0", + "dependencies": { + "has-peer": "1.0.0", + "peer-no-deps": "1.0.0" + }, + "_id": "diff-peer-1@1.0.0", + "_nodeVersion": "23.10.0", + "_npmVersion": "10.9.2", + "dist": { + "integrity": "sha512-a9nTh3aUOE6VDmn23Q9v6JUqBGnsnSBGcZ7P5Qff+5YuJ3KhWd0rbY/+DLDpwO7zAsTzKP1Bs9KtWDwQHzocVA==", + "shasum": "2a72f1f0e12b5a7790c26cce6b0e018b47e06c90", + "tarball": "http://localhost:4873/diff-peer-1/-/diff-peer-1-1.0.0.tgz" + }, + "contributors": [] + } + }, + "time": { + "modified": "2025-06-08T19:48:23.111Z", + "created": "2025-06-08T19:48:23.111Z", + "1.0.0": "2025-06-08T19:48:23.111Z" + }, + "users": {}, + "dist-tags": { + "latest": "1.0.0" + }, + "_uplinks": {}, + "_distfiles": {}, + "_attachments": { + "diff-peer-1-1.0.0.tgz": { + "shasum": "2a72f1f0e12b5a7790c26cce6b0e018b47e06c90", + "version": "1.0.0" + } + }, + "_rev": "", + "_id": "diff-peer-1", + "readme": "ERROR: No README data found!" +} \ No newline at end of file diff --git a/test/cli/install/registry/packages/diff-peer-2/diff-peer-2-1.0.0.tgz b/test/cli/install/registry/packages/diff-peer-2/diff-peer-2-1.0.0.tgz new file mode 100644 index 0000000000..20e7f30b3f Binary files /dev/null and b/test/cli/install/registry/packages/diff-peer-2/diff-peer-2-1.0.0.tgz differ diff --git a/test/cli/install/registry/packages/diff-peer-2/package.json b/test/cli/install/registry/packages/diff-peer-2/package.json new file mode 100644 index 0000000000..73646097ba --- /dev/null +++ b/test/cli/install/registry/packages/diff-peer-2/package.json @@ -0,0 +1,42 @@ +{ + "name": "diff-peer-2", + "versions": { + "1.0.0": { + "name": "diff-peer-2", + "version": "1.0.0", + "dependencies": { + "has-peer": "1.0.0", + "peer-no-deps": "1.0.1" + }, + "_id": "diff-peer-2@1.0.0", + "_nodeVersion": "23.10.0", + "_npmVersion": "10.9.2", + "dist": { + "integrity": "sha512-SPuo1oUuIxLXS9SJa35qU74g3rhBuK5mbdI1HGdRKQJXByDrF+msNAitd0v1g+tDknVHP9otSZllp7XelLQorQ==", + "shasum": "4d0819fe19cb838ed81b346c1f07b823158a541f", + "tarball": "http://localhost:4873/diff-peer-2/-/diff-peer-2-1.0.0.tgz" + }, + "contributors": [] + } + }, + "time": { + "modified": "2025-06-08T19:48:32.766Z", + "created": "2025-06-08T19:48:32.766Z", + "1.0.0": "2025-06-08T19:48:32.766Z" + }, + "users": {}, + "dist-tags": { + "latest": "1.0.0" + }, + "_uplinks": {}, + "_distfiles": {}, + "_attachments": { + "diff-peer-2-1.0.0.tgz": { + "shasum": "4d0819fe19cb838ed81b346c1f07b823158a541f", + "version": "1.0.0" + } + }, + "_rev": "", + "_id": "diff-peer-2", + "readme": "ERROR: No README data found!" +} \ No newline at end of file diff --git a/test/cli/install/registry/packages/has-peer/has-peer-1.0.0.tgz b/test/cli/install/registry/packages/has-peer/has-peer-1.0.0.tgz new file mode 100644 index 0000000000..e5796727d9 Binary files /dev/null and b/test/cli/install/registry/packages/has-peer/has-peer-1.0.0.tgz differ diff --git a/test/cli/install/registry/packages/has-peer/package.json b/test/cli/install/registry/packages/has-peer/package.json new file mode 100644 index 0000000000..bd11b78261 --- /dev/null +++ b/test/cli/install/registry/packages/has-peer/package.json @@ -0,0 +1,41 @@ +{ + "name": "has-peer", + "versions": { + "1.0.0": { + "name": "has-peer", + "version": "1.0.0", + "peerDependencies": { + "peer-no-deps": "^1.0.0" + }, + "_id": "has-peer@1.0.0", + "_nodeVersion": "23.10.0", + "_npmVersion": "10.9.2", + "dist": { + "integrity": "sha512-Q7Sg8KeLCUYEurarnoM/c31svn1IvmwYtkZ7DQdzJg4qzONeXs5u/q32iguDmzGS330ch/GnTiwnUVdhIuB8cQ==", + "shasum": "e0a4f8b2812eec8eada2aef68b71cdf572236702", + "tarball": "http://localhost:4873/has-peer/-/has-peer-1.0.0.tgz" + }, + "contributors": [] + } + }, + "time": { + "modified": "2025-06-08T19:49:59.426Z", + "created": "2025-06-08T19:49:59.426Z", + "1.0.0": "2025-06-08T19:49:59.426Z" + }, + "users": {}, + "dist-tags": { + "latest": "1.0.0" + }, + "_uplinks": {}, + "_distfiles": {}, + "_attachments": { + "has-peer-1.0.0.tgz": { + "shasum": "e0a4f8b2812eec8eada2aef68b71cdf572236702", + "version": "1.0.0" + } + }, + "_rev": "", + "_id": "has-peer", + "readme": "ERROR: No README data found!" +} \ No newline at end of file diff --git a/test/cli/install/registry/packages/peer-no-deps/package.json b/test/cli/install/registry/packages/peer-no-deps/package.json new file mode 100644 index 0000000000..844368ab96 --- /dev/null +++ b/test/cli/install/registry/packages/peer-no-deps/package.json @@ -0,0 +1,74 @@ +{ + "name": "peer-no-deps", + "versions": { + "1.0.0": { + "name": "peer-no-deps", + "version": "1.0.0", + "_id": "peer-no-deps@1.0.0", + "_nodeVersion": "23.10.0", + "_npmVersion": "10.9.2", + "dist": { + "integrity": "sha512-SfaNgbuAdCAj30SPPmdUNQLMFYoQcBD2dS7cxyv+dutkDyCY/ZzxGwK2syEkzN7QuZNdXouiNRx43mdxC/YpfA==", + "shasum": "508a718b20f2e452919a86fc2add84c008f120d2", + "tarball": "http://localhost:4873/peer-no-deps/-/peer-no-deps-1.0.0.tgz" + }, + "contributors": [] + }, + "1.0.1": { + "name": "peer-no-deps", + "version": "1.0.1", + "_id": "peer-no-deps@1.0.1", + "_nodeVersion": "23.10.0", + "_npmVersion": "10.9.2", + "dist": { + "integrity": "sha512-V/R/oUJEjX8GWwGs6Ayye+6alHjRj0eKkpDJPzywgUjTt0iQIaTDSRCgieMfHLgB1JSFs2ogyppAXX5cwQ7lWw==", + "shasum": "7f21c80e4f2ec05c453a73aa78f995e21d8008d1", + "tarball": "http://localhost:4873/peer-no-deps/-/peer-no-deps-1.0.1.tgz" + }, + "contributors": [] + }, + "2.0.0": { + "name": "peer-no-deps", + "version": "2.0.0", + "_id": "peer-no-deps@2.0.0", + "_nodeVersion": "23.10.0", + "_npmVersion": "10.9.2", + "dist": { + "integrity": "sha512-CR+AY66qH9+QUbKt7dxuH4iw36/mFIkpk1I8Lf+2DfucwGRcc0qwYswXQy+70jtz7ylHkmUMbhhgcMsIdsfK+w==", + "shasum": "5ae71b940adc2f9a1b346897183e7042591735c0", + "tarball": "http://localhost:4873/peer-no-deps/-/peer-no-deps-2.0.0.tgz" + }, + "contributors": [] + } + }, + "time": { + "modified": "2025-06-08T22:04:06.599Z", + "created": "2025-06-08T19:50:19.891Z", + "1.0.0": "2025-06-08T19:50:19.891Z", + "1.0.1": "2025-06-08T19:50:23.698Z", + "2.0.0": "2025-06-08T22:04:06.599Z" + }, + "users": {}, + "dist-tags": { + "latest": "2.0.0" + }, + "_uplinks": {}, + "_distfiles": {}, + "_attachments": { + "peer-no-deps-1.0.0.tgz": { + "shasum": "508a718b20f2e452919a86fc2add84c008f120d2", + "version": "1.0.0" + }, + "peer-no-deps-1.0.1.tgz": { + "shasum": "7f21c80e4f2ec05c453a73aa78f995e21d8008d1", + "version": "1.0.1" + }, + "peer-no-deps-2.0.0.tgz": { + "shasum": "5ae71b940adc2f9a1b346897183e7042591735c0", + "version": "2.0.0" + } + }, + "_rev": "", + "_id": "peer-no-deps", + "readme": "ERROR: No README data found!" +} \ No newline at end of file diff --git a/test/cli/install/registry/packages/peer-no-deps/peer-no-deps-1.0.0.tgz b/test/cli/install/registry/packages/peer-no-deps/peer-no-deps-1.0.0.tgz new file mode 100644 index 0000000000..5d240567bb Binary files /dev/null and b/test/cli/install/registry/packages/peer-no-deps/peer-no-deps-1.0.0.tgz differ diff --git a/test/cli/install/registry/packages/peer-no-deps/peer-no-deps-1.0.1.tgz b/test/cli/install/registry/packages/peer-no-deps/peer-no-deps-1.0.1.tgz new file mode 100644 index 0000000000..f512bfe1db Binary files /dev/null and b/test/cli/install/registry/packages/peer-no-deps/peer-no-deps-1.0.1.tgz differ diff --git a/test/cli/install/registry/packages/peer-no-deps/peer-no-deps-2.0.0.tgz b/test/cli/install/registry/packages/peer-no-deps/peer-no-deps-2.0.0.tgz new file mode 100644 index 0000000000..a610fdfec1 Binary files /dev/null and b/test/cli/install/registry/packages/peer-no-deps/peer-no-deps-2.0.0.tgz differ diff --git a/test/harness.ts b/test/harness.ts index 001d6b39e0..b6a0ad0a83 100644 --- a/test/harness.ts +++ b/test/harness.ts @@ -1125,7 +1125,7 @@ export function tmpdirSync(pattern: string = "bun.test."): string { } export async function runBunInstall( - env: NodeJS.ProcessEnv, + env: NodeJS.Dict, cwd: string, options?: { allowWarnings?: boolean; @@ -1213,7 +1213,7 @@ export async function runBunUpdate( return { out: out.replace(/\s*\[[0-9\.]+m?s\]\s*$/, "").split(/\r?\n/), err, exitCode }; } -export async function pack(cwd: string, env: NodeJS.ProcessEnv, ...args: string[]) { +export async function pack(cwd: string, env: NodeJS.Dict, ...args: string[]) { const { stdout, stderr, exited } = Bun.spawn({ cmd: [bunExe(), "pm", "pack", ...args], cwd, @@ -1647,7 +1647,7 @@ export class VerdaccioRegistry { async writeBunfig(dir: string, opts: BunfigOpts = {}) { let bunfig = ` [install] - cache = "${join(dir, ".bun-cache")}" + cache = "${join(dir, ".bun-cache").replaceAll("\\", "\\\\")}" `; if ("saveTextLockfile" in opts) { bunfig += `saveTextLockfile = ${opts.saveTextLockfile} diff --git a/test/internal/ban-words.test.ts b/test/internal/ban-words.test.ts index 1a78466ae7..9ef078e799 100644 --- a/test/internal/ban-words.test.ts +++ b/test/internal/ban-words.test.ts @@ -32,15 +32,15 @@ const words: Record "== alloc.ptr": { reason: "The std.mem.Allocator context pointer can be undefined, which makes this comparison undefined behavior" }, "!= alloc.ptr": { reason: "The std.mem.Allocator context pointer can be undefined, which makes this comparison undefined behavior" }, - [String.raw`: [a-zA-Z0-9_\.\*\?\[\]\(\)]+ = undefined,`]: { reason: "Do not default a struct field to undefined", limit: 243, regex: true }, + [String.raw`: [a-zA-Z0-9_\.\*\?\[\]\(\)]+ = undefined,`]: { reason: "Do not default a struct field to undefined", limit: 242, regex: true }, "usingnamespace": { reason: "Zig 0.15 will remove `usingnamespace`" }, "catch unreachable": { reason: "For out-of-memory, prefer 'catch bun.outOfMemory()'", limit: 1866 }, - "std.fs.Dir": { reason: "Prefer bun.sys + bun.FD instead of std.fs", limit: 179 }, - "std.fs.cwd": { reason: "Prefer bun.FD.cwd()", limit: 103 }, + "std.fs.Dir": { reason: "Prefer bun.sys + bun.FD instead of std.fs", limit: 170 }, + "std.fs.cwd": { reason: "Prefer bun.FD.cwd()", limit: 102 }, "std.fs.File": { reason: "Prefer bun.sys + bun.FD instead of std.fs", limit: 62 }, ".stdFile()": { reason: "Prefer bun.sys + bun.FD instead of std.fs.File. Zig hides 'errno' when Bun wants to match libuv", limit: 18 }, - ".stdDir()": { reason: "Prefer bun.sys + bun.FD instead of std.fs.File. Zig hides 'errno' when Bun wants to match libuv", limit: 49 }, + ".stdDir()": { reason: "Prefer bun.sys + bun.FD instead of std.fs.File. Zig hides 'errno' when Bun wants to match libuv", limit: 40 }, ".arguments_old(": { reason: "Please migrate to .argumentsAsArray() or another argument API", limit: 280 }, "// autofix": { reason: "Evaluate if this variable should be deleted entirely or explicitly discarded.", limit: 173 }, diff --git a/test/no-validate-exceptions.txt b/test/no-validate-exceptions.txt index e1a5bffacb..6275da6dc6 100644 --- a/test/no-validate-exceptions.txt +++ b/test/no-validate-exceptions.txt @@ -34,6 +34,7 @@ test/bundler/esbuild/css.test.ts test/bundler/esbuild/dce.test.ts test/bundler/esbuild/extra.test.ts test/bundler/esbuild/importstar.test.ts +test/cli/install/isolated-install.test.ts test/bundler/esbuild/importstar_ts.test.ts test/bundler/esbuild/loader.test.ts test/bundler/esbuild/lower.test.ts