From c85dd4e3bff14e66ae362d5178c357d7cb507b6f Mon Sep 17 00:00:00 2001 From: Zack Radisic <56137411+zackradisic@users.noreply.github.com> Date: Thu, 6 Jun 2024 17:48:05 -0700 Subject: [PATCH] feat: `bun patch` (#11470) Co-authored-by: Dylan Conway Co-authored-by: zackradisic Co-authored-by: Jarred Sumner --- CMakeLists.txt | 5 + build.zig | 5 + docs/cli/patch-commit.md | 7 + docs/cli/patch.md | 10 + package.json | 1 + src/bun.js/bindings/bindings.zig | 4 + src/bun.js/module_loader.zig | 4 +- src/bun.zig | 19 + src/cli.zig | 44 +- src/cli/patch_command.zig | 8 + src/cli/patch_commit_command.zig | 8 + src/env.zig | 1 + src/install/extract_tarball.zig | 6 +- src/install/install.zig | 4135 +++++++++++++------- src/install/lockfile.zig | 139 +- src/install/patch_install.zig | 571 +++ src/install/repository.zig | 2 +- src/js/internal-for-testing.ts | 5 + src/js_ast.zig | 19 + src/logger.zig | 4 + src/output.zig | 12 +- src/patch.zig | 1365 +++++++ src/resolver/resolver.zig | 1 + src/shell/interpreter.zig | 12 +- src/string_immutable.zig | 14 + src/sys.zig | 42 +- test/cli/install/bun-install-patch.test.ts | 382 ++ test/cli/install/bun-install.test.ts | 1 + test/harness.ts | 2 +- test/js/bun/patch/patch.test.ts | 863 ++++ test/js/bun/shell/bunshell.test.ts | 2 +- 31 files changed, 6273 insertions(+), 1420 deletions(-) create mode 100644 docs/cli/patch-commit.md create mode 100644 docs/cli/patch.md create mode 100644 src/cli/patch_command.zig create mode 100644 src/cli/patch_commit_command.zig create mode 100644 src/install/patch_install.zig create mode 100644 src/patch.zig create mode 100644 test/cli/install/bun-install-patch.test.ts create mode 100644 test/js/bun/patch/patch.test.ts diff --git a/CMakeLists.txt b/CMakeLists.txt index 97ce2ea14f..d7116359f3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -340,6 +340,10 @@ if(NOT CANARY) set(CANARY 0) endif() +if (NOT ENABLE_LOGS) + set(ENABLE_LOGS false) +endif() + if(NOT ZIG_OPTIMIZE) set(ZIG_OPTIMIZE ${DEFAULT_ZIG_OPTIMIZE}) endif() @@ -874,6 +878,7 @@ if(NOT BUN_LINK_ONLY AND NOT BUN_CPP_ONLY) "-Doptimize=${ZIG_OPTIMIZE}" "-Dcpu=${CPU_TARGET}" "-Dtarget=${ZIG_TARGET}" + "-Denable_logs=${ENABLE_LOGS}" DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/build.zig" "${ZIG_FILES}" diff --git a/build.zig b/build.zig index 17a80e983c..ac400df2e0 100644 --- a/build.zig +++ b/build.zig @@ -65,6 +65,7 @@ fn addInternalPackages(b: *Build, step: *CompileStep, _: std.mem.Allocator, _: [ } const BunBuildOptions = struct { + enable_logs: bool = false, is_canary: bool = false, canary_revision: u32 = 0, sha: [:0]const u8 = "", @@ -111,6 +112,7 @@ const BunBuildOptions = struct { pub fn step(this: BunBuildOptions, b: anytype) *std.build.OptionsStep { var opts = b.addOptions(); + opts.addOption(@TypeOf(this.enable_logs), "enable_logs", this.enable_logs); opts.addOption(@TypeOf(this.is_canary), "is_canary", this.is_canary); opts.addOption(@TypeOf(this.canary_revision), "canary_revision", this.canary_revision); opts.addOption( @@ -313,6 +315,8 @@ pub fn build_(b: *Build) !void { } } + const enable_logs = if (b.option(bool, "enable_logs", "Enable logs in release")) |l| l else false; + const is_canary, const canary_revision = if (b.option(u32, "canary", "Treat this as a canary build")) |rev| if (rev == 0) .{ false, 0 } @@ -321,6 +325,7 @@ pub fn build_(b: *Build) !void { else .{ false, 0 }; break :brk .{ + .enable_logs = enable_logs, .is_canary = is_canary, .canary_revision = canary_revision, .version = b.option([]const u8, "version", "Value of `Bun.version`") orelse "0.0.0", diff --git a/docs/cli/patch-commit.md b/docs/cli/patch-commit.md new file mode 100644 index 0000000000..159022d08b --- /dev/null +++ b/docs/cli/patch-commit.md @@ -0,0 +1,7 @@ +After having prepared a package for patching with [`bun patch`](/docs/cli/patch), you can install with `bun patch-commit `. + +### `--patches-dir` + +By default, `bun patch-commit` will use the `patches` directory in the temporary directory. + +You can specify a different directory with the `--patches-dir` flag. diff --git a/docs/cli/patch.md b/docs/cli/patch.md new file mode 100644 index 0000000000..922446543d --- /dev/null +++ b/docs/cli/patch.md @@ -0,0 +1,10 @@ +If you need to modify the contents of a package, call `bun patch ` with the package's name (and optionally a version), +for example: + +```bash +$ bun patch react +``` + +This will copy the package to a temporary directory, where you can make changes to the package's contents. + +Once you're done making changes, run `bun patch-commit ` to have Bun install the patched package. diff --git a/package.json b/package.json index 27103eb743..efac87afd9 100644 --- a/package.json +++ b/package.json @@ -34,6 +34,7 @@ "build:tidy": "BUN_SILENT=1 cmake --log-level=WARNING . -DZIG_OPTIMIZE=Debug -DUSE_DEBUG_JSC=ON -DBUN_TIDY_ONLY=ON -DCMAKE_BUILD_TYPE=Debug -GNinja -Bbuild-tidy >> ${GITHUB_STEP_SUMMARY:-/dev/stdout} && BUN_SILENT=1 ninja -Cbuild-tidy >> ${GITHUB_STEP_SUMMARY:-/dev/stdout}", "build:tidy-extra": "cmake . -DZIG_OPTIMIZE=Debug -DUSE_DEBUG_JSC=ON -DBUN_TIDY_ONLY_EXTRA=ON -DCMAKE_BUILD_TYPE=Debug -GNinja -Bbuild-tidy && ninja -Cbuild-tidy", "build:release": "cmake . -DCMAKE_BUILD_TYPE=Release -GNinja -Bbuild-release && ninja -Cbuild-release", + "build:release:with_logs": "cmake . -DCMAKE_BUILD_TYPE=Release -DENABLE_LOGS=true -GNinja -Bbuild-release && ninja -Cbuild-release", "build:debug-zig-release": "cmake . -DCMAKE_BUILD_TYPE=Release -DZIG_OPTIMIZE=Debug -GNinja -Bbuild-debug-zig-release && ninja -Cbuild-debug-zig-release", "build:safe": "cmake . -DZIG_OPTIMIZE=ReleaseSafe -DUSE_DEBUG_JSC=ON -DCMAKE_BUILD_TYPE=Release -GNinja -Bbuild-safe && ninja -Cbuild-safe", "build:windows": "cmake -B build -S . -G Ninja -DCMAKE_BUILD_TYPE=Debug && ninja -Cbuild", diff --git a/src/bun.js/bindings/bindings.zig b/src/bun.js/bindings/bindings.zig index 19a6a28150..479ec60da7 100644 --- a/src/bun.js/bindings/bindings.zig +++ b/src/bun.js/bindings/bindings.zig @@ -1616,6 +1616,10 @@ pub const SystemError = extern struct { return @enumFromInt(this.errno * -1); } + pub fn toAnyhowError(this: SystemError) bun.anyhow.Error { + return bun.anyhow.Error.newSys(this); + } + pub fn deref(this: *const SystemError) void { this.path.deref(); this.code.deref(); diff --git a/src/bun.js/module_loader.zig b/src/bun.js/module_loader.zig index fec8fa23ab..e839bca157 100644 --- a/src/bun.js/module_loader.zig +++ b/src/bun.js/module_loader.zig @@ -1069,7 +1069,9 @@ pub const ModuleLoader = struct { const package = pm.lockfile.packages.get(package_id); bun.assert(package.resolution.tag != .root); - switch (pm.determinePreinstallState(package, pm.lockfile)) { + var name_and_version_hash: ?u64 = null; + var patchfile_hash: ?u64 = null; + switch (pm.determinePreinstallState(package, pm.lockfile, &name_and_version_hash, &patchfile_hash)) { .done => { // we are only truly done if all the dependencies are done. const current_tasks = pm.total_tasks; diff --git a/src/bun.zig b/src/bun.zig index 6e6b1e9f26..8b3a30e00b 100644 --- a/src/bun.zig +++ b/src/bun.zig @@ -47,6 +47,8 @@ pub const PackageJSON = @import("./resolver/package_json.zig").PackageJSON; pub const fmt = @import("./fmt.zig"); pub const allocators = @import("./allocators.zig"); +pub const patch = @import("./patch.zig"); + pub const glob = @import("./glob.zig"); pub const shell = struct { @@ -140,6 +142,23 @@ pub const FileDescriptor = enum(FileDescriptorInt) { return toFD(std.fs.cwd().fd); } + pub fn eq(this: FileDescriptor, that: FileDescriptor) bool { + if (Environment.isPosix) return this.int() == that.int(); + + const this_ = FDImpl.decode(this); + const that_ = FDImpl.decode(that); + return switch (this_.kind) { + .system => switch (that_.kind) { + .system => this_.value.as_system == that_.value.as_system, + .uv => false, + }, + .uv => switch (that_.kind) { + .system => false, + .uv => this_.value.as_uv == that_.value.as_uv, + }, + }; + } + pub fn isStdio(fd: FileDescriptor) bool { // fd.assertValid(); const decoded = FDImpl.decode(fd); diff --git a/src/cli.zig b/src/cli.zig index 55752698ec..6f7b39726e 100644 --- a/src/cli.zig +++ b/src/cli.zig @@ -100,6 +100,8 @@ pub const UpdateCommand = @import("./cli/update_command.zig").UpdateCommand; pub const UpgradeCommand = @import("./cli/upgrade_command.zig").UpgradeCommand; pub const BunxCommand = @import("./cli/bunx_command.zig").BunxCommand; pub const ExecCommand = @import("./cli/exec_command.zig").ExecCommand; +pub const PatchCommand = @import("./cli/patch_command.zig").PatchCommand; +pub const PatchCommitCommand = @import("./cli/patch_commit_command.zig").PatchCommitCommand; pub const Arguments = struct { pub fn loader_resolver(in: string) !Api.Loader { @@ -1341,6 +1343,8 @@ pub const Command = struct { RootCommandMatcher.case("add"), RootCommandMatcher.case("a") => .AddCommand, RootCommandMatcher.case("update") => .UpdateCommand, + RootCommandMatcher.case("patch") => .PatchCommand, + RootCommandMatcher.case("patch-commit") => .PatchCommitCommand, RootCommandMatcher.case("r"), RootCommandMatcher.case("remove"), @@ -1475,6 +1479,20 @@ pub const Command = struct { try UpdateCommand.exec(ctx); return; }, + .PatchCommand => { + if (comptime bun.fast_debug_build_mode and bun.fast_debug_build_cmd != .PatchCommand) unreachable; + const ctx = try Command.init(allocator, log, .PatchCommand); + + try PatchCommand.exec(ctx); + return; + }, + .PatchCommitCommand => { + if (comptime bun.fast_debug_build_mode and bun.fast_debug_build_cmd != .PatchCommitCommand) unreachable; + const ctx = try Command.init(allocator, log, .PatchCommitCommand); + + try PatchCommitCommand.exec(ctx); + return; + }, .BunxCommand => { if (comptime bun.fast_debug_build_mode and bun.fast_debug_build_cmd != .BunxCommand) unreachable; const ctx = try Command.init(allocator, log, .BunxCommand); @@ -2040,10 +2058,12 @@ pub const Command = struct { ReplCommand, ReservedCommand, ExecCommand, + PatchCommand, + PatchCommitCommand, /// Used by crash reports. /// - /// This must be kept in sync with + /// This must be kept in sync with https://github.com/oven-sh/bun.report/blob/62601d8aafb9c0d29554dfc3f8854044ec04d367/backend/remap.ts#L10 pub fn char(this: Tag) u8 { return switch (this) { .AddCommand => 'I', @@ -2069,6 +2089,8 @@ pub const Command = struct { .ReplCommand => 'G', .ReservedCommand => 'w', .ExecCommand => 'e', + .PatchCommand => 'x', + .PatchCommitCommand => 'z', }; } @@ -2294,14 +2316,24 @@ pub const Command = struct { pub fn readGlobalConfig(this: Tag) bool { return switch (this) { - .BunxCommand, .PackageManagerCommand, .InstallCommand, .AddCommand, .RemoveCommand, .UpdateCommand => true, + .BunxCommand, .PackageManagerCommand, .InstallCommand, .AddCommand, .RemoveCommand, .UpdateCommand, .PatchCommand, .PatchCommitCommand => true, else => false, }; } pub fn isNPMRelated(this: Tag) bool { return switch (this) { - .BunxCommand, .LinkCommand, .UnlinkCommand, .PackageManagerCommand, .InstallCommand, .AddCommand, .RemoveCommand, .UpdateCommand => true, + .BunxCommand, + .LinkCommand, + .UnlinkCommand, + .PackageManagerCommand, + .InstallCommand, + .AddCommand, + .RemoveCommand, + .UpdateCommand, + .PatchCommand, + .PatchCommitCommand, + => true, else => false, }; } @@ -2313,6 +2345,8 @@ pub const Command = struct { .AddCommand = true, .RemoveCommand = true, .UpdateCommand = true, + .PatchCommand = true, + .PatchCommitCommand = true, .PackageManagerCommand = true, .BunxCommand = true, .AutoCommand = true, @@ -2327,6 +2361,8 @@ pub const Command = struct { .AddCommand = true, .RemoveCommand = true, .UpdateCommand = true, + .PatchCommand = true, + .PatchCommitCommand = true, .PackageManagerCommand = true, .BunxCommand = true, }); @@ -2337,6 +2373,8 @@ pub const Command = struct { .AddCommand = false, .RemoveCommand = false, .UpdateCommand = false, + .PatchCommand = false, + .PatchCommitCommand = false, .PackageManagerCommand = false, .LinkCommand = false, .UnlinkCommand = false, diff --git a/src/cli/patch_command.zig b/src/cli/patch_command.zig new file mode 100644 index 0000000000..bc211fef24 --- /dev/null +++ b/src/cli/patch_command.zig @@ -0,0 +1,8 @@ +const Command = @import("../cli.zig").Command; +const PackageManager = @import("../install/install.zig").PackageManager; + +pub const PatchCommand = struct { + pub fn exec(ctx: Command.Context) !void { + try PackageManager.patch(ctx); + } +}; diff --git a/src/cli/patch_commit_command.zig b/src/cli/patch_commit_command.zig new file mode 100644 index 0000000000..fd8490e65c --- /dev/null +++ b/src/cli/patch_commit_command.zig @@ -0,0 +1,8 @@ +const Command = @import("../cli.zig").Command; +const PackageManager = @import("../install/install.zig").PackageManager; + +pub const PatchCommitCommand = struct { + pub fn exec(ctx: Command.Context) !void { + try PackageManager.patchCommit(ctx); + } +}; diff --git a/src/env.zig b/src/env.zig index b7b654da1c..1a9ed55e88 100644 --- a/src/env.zig +++ b/src/env.zig @@ -44,6 +44,7 @@ pub const is_canary = BuildOptions.is_canary; pub const canary_revision = if (is_canary) BuildOptions.canary_revision else ""; pub const dump_source = isDebug and !isTest; pub const base_path = BuildOptions.base_path ++ "/"; +pub const allow_logs = BuildOptions.enable_logs; pub const version: std.SemanticVersion = BuildOptions.version; pub const version_string = std.fmt.comptimePrint("{d}.{d}.{d}", .{ version.major, version.minor, version.patch }); diff --git a/src/install/extract_tarball.zig b/src/install/extract_tarball.zig index 44cecb272e..2635fe9e8d 100644 --- a/src/install/extract_tarball.zig +++ b/src/install/extract_tarball.zig @@ -279,9 +279,9 @@ fn extract(this: *const ExtractTarball, tgz_bytes: []const u8) !Install.ExtractD } } const folder_name = switch (this.resolution.tag) { - .npm => this.package_manager.cachedNPMPackageFolderNamePrint(&folder_name_buf, name, this.resolution.value.npm.version), - .github => PackageManager.cachedGitHubFolderNamePrint(&folder_name_buf, resolved), - .local_tarball, .remote_tarball => PackageManager.cachedTarballFolderNamePrint(&folder_name_buf, this.url.slice()), + .npm => this.package_manager.cachedNPMPackageFolderNamePrint(&folder_name_buf, name, this.resolution.value.npm.version, null), + .github => PackageManager.cachedGitHubFolderNamePrint(&folder_name_buf, resolved, null), + .local_tarball, .remote_tarball => PackageManager.cachedTarballFolderNamePrint(&folder_name_buf, this.url.slice(), null), else => unreachable, }; if (folder_name.len == 0 or (folder_name.len == 1 and folder_name[0] == '/')) @panic("Tried to delete root and stopped it"); diff --git a/src/install/install.zig b/src/install/install.zig index 0bf53875c4..34c8ae93ff 100644 --- a/src/install/install.zig +++ b/src/install/install.zig @@ -56,8 +56,22 @@ threadlocal var initialized_store = false; const Futex = @import("../futex.zig"); pub const Lockfile = @import("./lockfile.zig"); +pub const PatchedDep = Lockfile.PatchedDep; const Walker = @import("../walker_skippable.zig"); +const anyhow = bun.anyhow; + +pub const bun_hash_tag = ".bun-tag-"; +pub const max_hex_hash_len: comptime_int = brk: { + var buf: [128]u8 = undefined; + break :brk (std.fmt.bufPrint(buf[0..], "{x}", .{std.math.maxInt(u64)}) catch @panic("Buf wasn't big enough.")).len; +}; +pub const max_buntag_hash_buf_len: comptime_int = max_hex_hash_len + bun_hash_tag.len + 1; +pub const BuntagHashBuf = [max_buntag_hash_buf_len]u8; + +pub const patch = @import("./patch_install.zig"); +pub const PatchTask = patch.PatchTask; + // these bytes are skipped // so we just make it repeat bun bun bun bun bun bun bun bun bun // because why not @@ -111,6 +125,7 @@ pub fn initializeMiniStore() void { const IdentityContext = @import("../identity_context.zig").IdentityContext; const ArrayIdentityContext = @import("../identity_context.zig").ArrayIdentityContext; const NetworkQueue = std.fifo.LinearFifo(*NetworkTask, .{ .Static = 32 }); +const PatchTaskFifo = std.fifo.LinearFifo(*PatchTask, .{ .Static = 32 }); const Semver = @import("./semver.zig"); const ExternalString = Semver.ExternalString; const String = Semver.String; @@ -189,6 +204,7 @@ pub const ExternalStringMap = extern struct { value: ExternalStringList = .{}, }; +pub const PackageNameAndVersionHash = u64; pub const PackageNameHash = u64; // Use String.Builder.stringHash to compute this pub const TruncatedPackageNameHash = u32; // @truncate String.Builder.stringHash to compute this @@ -226,6 +242,8 @@ const NetworkTask = struct { git_checkout: void, local_tarball: void, }, + /// Key in patchedDependencies in package.json + apply_patch_task: ?*PatchTask = null, next: ?*NetworkTask = null, pub const DedupeMap = std.HashMap(u64, void, IdentityContext(u64), 80); @@ -532,6 +550,7 @@ pub const Features = struct { peer_dependencies: bool = true, trusted_dependencies: bool = false, workspaces: bool = false, + patched_dependencies: bool = false, check_for_duplicate_dependencies: bool = false, @@ -551,6 +570,7 @@ pub const Features = struct { .is_main = true, .optional_dependencies = true, .trusted_dependencies = true, + .patched_dependencies = true, .workspaces = true, }; @@ -581,16 +601,20 @@ pub const Features = struct { }; }; -pub const PreinstallState = enum(u2) { +pub const PreinstallState = enum(u4) { unknown = 0, - done = 1, - extract = 2, - extracting = 3, + done, + extract, + extracting, + calc_patch_hash, + calcing_patch_hash, + apply_patch, + applying_patch, }; /// Schedule long-running callbacks for a task /// Slow stuff is broken into tasks, each can run independently without locks -const Task = struct { +pub const Task = struct { tag: Tag, request: Request, data: Data, @@ -600,11 +624,13 @@ const Task = struct { id: u64, err: ?anyerror = null, package_manager: *PackageManager, + apply_patch_task: ?*PatchTask = null, next: ?*Task = null, /// An ID that lets us register a callback without keeping the same pointer around pub fn NewID(comptime Hasher: type, comptime IDType: type) type { return struct { + pub const Type = IDType; pub fn forNPMPackage(package_name: string, package_version: Semver.Version) IDType { var hasher = Hasher.init(0); hasher.update("npm-package:"); @@ -661,6 +687,17 @@ const Task = struct { var this = @fieldParentPtr(Task, "threadpool_task", task); const manager = this.package_manager; defer { + if (this.status == .success) { + if (this.apply_patch_task) |pt| { + defer pt.deinit(); + pt.apply() catch bun.outOfMemory(); + if (pt.callback.apply.logger.errors > 0) { + defer pt.callback.apply.logger.deinit(); + // this.log.addErrorFmt(null, logger.Loc.Empty, bun.default_allocator, "failed to apply patch: {}", .{e}) catch unreachable; + pt.callback.apply.logger.printForLogLevel(Output.writer()) catch {}; + } + } + } manager.resolve_tasks.push(this); manager.wake(); } @@ -875,1429 +912,1570 @@ pub const ExtractData = struct { } = null, }; -pub const PackageInstall = struct { - cache_dir: std.fs.Dir, - cache_dir_subpath: stringZ = "", - destination_dir_subpath: stringZ = "", - destination_dir_subpath_buf: []u8, +const PkgInstallKind = enum { + regular, + patch, +}; +pub const PackageInstall = NewPackageInstall(.regular); +pub const PreparePatchPackageInstall = NewPackageInstall(.patch); +pub fn NewPackageInstall(comptime kind: PkgInstallKind) type { + const do_progress = kind != .patch; + const ProgressT = if (do_progress) *Progress else struct {}; + return struct { + cache_dir: std.fs.Dir, + cache_dir_subpath: stringZ = "", + destination_dir_subpath: stringZ = "", + destination_dir_subpath_buf: []u8, - allocator: std.mem.Allocator, + allocator: std.mem.Allocator, - progress: *Progress, + progress: ProgressT, - package_name: string, - package_version: string, - file_count: u32 = 0, - node_modules: *const PackageManager.NodeModulesFolder, + package_name: string, + package_version: string, + patch: Patch = .{}, + file_count: u32 = 0, + node_modules: *const PackageManager.NodeModulesFolder, - const debug = Output.scoped(.install, true); + const ThisPackageInstall = @This(); - pub const Summary = struct { - fail: u32 = 0, - success: u32 = 0, - skipped: u32 = 0, + const Patch = switch (kind) { + .regular => struct { + root_project_dir: ?[]const u8 = null, + patch_path: string = undefined, + patch_contents_hash: u64 = 0, - // bitset of dependency ids - successfully_installed: ?Bitset = null, + pub const NULL = Patch{}; - /// Package name hash -> number of scripts skipped. - /// Multiple versions of the same package might add to the count, and each version - /// might have a different number of scripts - packages_with_blocked_scripts: std.AutoArrayHashMapUnmanaged(TruncatedPackageNameHash, usize) = .{}, - }; - - pub const Method = enum { - clonefile, - - /// Slower than clonefile - clonefile_each_dir, - - /// On macOS, slow. - /// On Linux, fast. - hardlink, - - /// Slowest if single-threaded - /// Note that copyfile does technically support recursion - /// But I suspect it is slower in practice than manually doing it because: - /// - it adds syscalls - /// - it runs in userspace - /// - it reads each dir twice incase the first pass modifies it - copyfile, - - /// Used for file: when file: points to a parent directory - /// example: "file:../" - symlink, - - const BackendSupport = std.EnumArray(Method, bool); - pub const map = std.ComptimeStringMap(Method, .{ - .{ "clonefile", Method.clonefile }, - .{ "clonefile_each_dir", Method.clonefile_each_dir }, - .{ "hardlink", Method.hardlink }, - .{ "copyfile", Method.copyfile }, - .{ "symlink", Method.symlink }, - }); - - pub const macOS = BackendSupport.initDefault(false, .{ - .clonefile = true, - .clonefile_each_dir = true, - .hardlink = true, - .copyfile = true, - .symlink = true, - }); - - pub const linux = BackendSupport.initDefault(false, .{ - .hardlink = true, - .copyfile = true, - .symlink = true, - }); - - pub const windows = BackendSupport.initDefault(false, .{ - .hardlink = true, - .copyfile = true, - }); - - pub inline fn isSupported(this: Method) bool { - if (comptime Environment.isMac) return macOS.get(this); - if (comptime Environment.isLinux) return linux.get(this); - if (comptime Environment.isWindows) return windows.get(this); - - return false; - } - }; - - // 1. verify that .bun-tag exists (was it installed from bun?) - // 2. check .bun-tag against the resolved version - fn verifyGitResolution( - this: *PackageInstall, - repo: *const Repository, - buf: []const u8, - root_node_modules_dir: std.fs.Dir, - ) bool { - bun.copy(u8, this.destination_dir_subpath_buf[this.destination_dir_subpath.len..], std.fs.path.sep_str ++ ".bun-tag"); - this.destination_dir_subpath_buf[this.destination_dir_subpath.len + std.fs.path.sep_str.len + ".bun-tag".len] = 0; - const bun_tag_path: [:0]u8 = this.destination_dir_subpath_buf[0 .. this.destination_dir_subpath.len + std.fs.path.sep_str.len + ".bun-tag".len :0]; - defer this.destination_dir_subpath_buf[this.destination_dir_subpath.len] = 0; - var git_tag_stack_fallback = std.heap.stackFallback(2048, bun.default_allocator); - const allocator = git_tag_stack_fallback.get(); - - var destination_dir = this.node_modules.openDir(root_node_modules_dir) catch return false; - defer { - if (std.fs.cwd().fd != destination_dir.fd) destination_dir.close(); - } - - const bun_tag_file = File.readFrom( - destination_dir, - bun_tag_path, - allocator, - ).unwrap() catch return false; - defer allocator.free(bun_tag_file); - - return strings.eqlLong(repo.resolved.slice(buf), bun_tag_file, true); - } - - pub fn verify( - this: *PackageInstall, - resolution: *const Resolution, - buf: []const u8, - root_node_modules_dir: std.fs.Dir, - ) bool { - return switch (resolution.tag) { - .git => this.verifyGitResolution(&resolution.value.git, buf, root_node_modules_dir), - .github => this.verifyGitResolution(&resolution.value.github, buf, root_node_modules_dir), - else => this.verifyPackageJSONNameAndVersion(root_node_modules_dir, resolution.tag), + pub fn isNull(this: Patch) bool { + return this.root_project_dir == null; + } + }, + .patch => struct {}, }; - } - fn verifyPackageJSONNameAndVersion(this: *PackageInstall, root_node_modules_dir: std.fs.Dir, resolution_tag: Resolution.Tag) bool { - const allocator = this.allocator; - var total: usize = 0; - var read: usize = 0; + const debug = Output.scoped(.install, true); - var body_pool = Npm.Registry.BodyPool.get(allocator); - var mutable: MutableString = body_pool.data; - defer { - body_pool.data = mutable; - Npm.Registry.BodyPool.release(body_pool); + pub const Summary = struct { + fail: u32 = 0, + success: u32 = 0, + skipped: u32 = 0, + successfully_installed: ?Bitset = null, + + /// Package name hash -> number of scripts skipped. + /// Multiple versions of the same package might add to the count, and each version + /// might have a different number of scripts + packages_with_blocked_scripts: std.AutoArrayHashMapUnmanaged(TruncatedPackageNameHash, usize) = .{}, + }; + + pub const Method = enum { + clonefile, + + /// Slower than clonefile + clonefile_each_dir, + + /// On macOS, slow. + /// On Linux, fast. + hardlink, + + /// Slowest if single-threaded + /// Note that copyfile does technically support recursion + /// But I suspect it is slower in practice than manually doing it because: + /// - it adds syscalls + /// - it runs in userspace + /// - it reads each dir twice incase the first pass modifies it + copyfile, + + /// Used for file: when file: points to a parent directory + /// example: "file:../" + symlink, + + const BackendSupport = std.EnumArray(Method, bool); + pub const map = std.ComptimeStringMap(Method, .{ + .{ "clonefile", Method.clonefile }, + .{ "clonefile_each_dir", Method.clonefile_each_dir }, + .{ "hardlink", Method.hardlink }, + .{ "copyfile", Method.copyfile }, + .{ "symlink", Method.symlink }, + }); + + pub const macOS = BackendSupport.initDefault(false, .{ + .clonefile = true, + .clonefile_each_dir = true, + .hardlink = true, + .copyfile = true, + .symlink = true, + }); + + pub const linux = BackendSupport.initDefault(false, .{ + .hardlink = true, + .copyfile = true, + .symlink = true, + }); + + pub const windows = BackendSupport.initDefault(false, .{ + .hardlink = true, + .copyfile = true, + }); + + pub inline fn isSupported(this: Method) bool { + if (comptime Environment.isMac) return macOS.get(this); + if (comptime Environment.isLinux) return linux.get(this); + if (comptime Environment.isWindows) return windows.get(this); + + return false; + } + }; + + /// + fn verifyPatchHash( + this: *@This(), + root_node_modules_dir: std.fs.Dir, + ) bool { + bun.debugAssert(!this.patch.isNull()); + + // hash from the .patch file, to be checked against bun tag + const patchfile_contents_hash = this.patch.patch_contents_hash; + var buf: BuntagHashBuf = undefined; + @memcpy(buf[0..bun_hash_tag.len], bun_hash_tag); + const digits = std.fmt.bufPrint(buf[bun_hash_tag.len..], "{x}", .{patchfile_contents_hash}) catch bun.outOfMemory(); + const bunhashtag = buf[0 .. bun_hash_tag.len + digits.len]; + + const patch_tag_path = bun.path.joinZ(&[_][]const u8{ + this.destination_dir_subpath, + bunhashtag, + }, .posix); + + if (comptime bun.Environment.isPosix) { + _ = bun.sys.fstatat(bun.toFD(root_node_modules_dir.fd), patch_tag_path).unwrap() catch return false; + } else { + switch (bun.sys.openat(bun.toFD(root_node_modules_dir.fd), patch_tag_path, std.os.O.RDONLY, 0)) { + .err => return false, + .result => |fd| _ = bun.sys.close(fd), + } + } + return true; } - // Read the file - // Return false on any error. - // Don't keep it open while we're parsing the JSON. - // The longer the file stays open, the more likely it causes issues for - // other processes on Windows. - const source = brk: { - mutable.reset(); - mutable.list.expandToCapacity(); - bun.copy(u8, this.destination_dir_subpath_buf[this.destination_dir_subpath.len..], std.fs.path.sep_str ++ "package.json"); - this.destination_dir_subpath_buf[this.destination_dir_subpath.len + std.fs.path.sep_str.len + "package.json".len] = 0; - const package_json_path: [:0]u8 = this.destination_dir_subpath_buf[0 .. this.destination_dir_subpath.len + std.fs.path.sep_str.len + "package.json".len :0]; + // 1. verify that .bun-tag exists (was it installed from bun?) + // 2. check .bun-tag against the resolved version + fn verifyGitResolution( + this: *@This(), + repo: *const Repository, + buf: []const u8, + root_node_modules_dir: std.fs.Dir, + ) bool { + bun.copy(u8, this.destination_dir_subpath_buf[this.destination_dir_subpath.len..], std.fs.path.sep_str ++ ".bun-tag"); + this.destination_dir_subpath_buf[this.destination_dir_subpath.len + std.fs.path.sep_str.len + ".bun-tag".len] = 0; + const bun_tag_path: [:0]u8 = this.destination_dir_subpath_buf[0 .. this.destination_dir_subpath.len + std.fs.path.sep_str.len + ".bun-tag".len :0]; defer this.destination_dir_subpath_buf[this.destination_dir_subpath.len] = 0; + var git_tag_stack_fallback = std.heap.stackFallback(2048, bun.default_allocator); + const allocator = git_tag_stack_fallback.get(); var destination_dir = this.node_modules.openDir(root_node_modules_dir) catch return false; defer { if (std.fs.cwd().fd != destination_dir.fd) destination_dir.close(); } - var package_json_file = File.openat(destination_dir, package_json_path, std.os.O.RDONLY, 0).unwrap() catch return false; - defer package_json_file.close(); + const bun_tag_file = File.readFrom( + destination_dir, + bun_tag_path, + allocator, + ).unwrap() catch return false; + defer allocator.free(bun_tag_file); - // Heuristic: most package.jsons will be less than 2048 bytes. - read = package_json_file.read(mutable.list.items[total..]).unwrap() catch return false; - var remain = mutable.list.items[@min(total, read)..]; - if (read > 0 and remain.len < 1024) { - mutable.growBy(4096) catch return false; - mutable.list.expandToCapacity(); + return strings.eqlLong(repo.resolved.slice(buf), bun_tag_file, true); + } + + // TODO: patched dependencies + pub fn verify( + this: *@This(), + resolution: *const Resolution, + buf: []const u8, + root_node_modules_dir: std.fs.Dir, + ) bool { + const verified = + switch (resolution.tag) { + .git => this.verifyGitResolution(&resolution.value.git, buf, root_node_modules_dir), + .github => this.verifyGitResolution(&resolution.value.github, buf, root_node_modules_dir), + else => this.verifyPackageJSONNameAndVersion(root_node_modules_dir, resolution.tag), + }; + if (comptime kind == .patch) return verified; + if (this.patch.isNull()) return verified; + if (!verified) return false; + return this.verifyPatchHash(root_node_modules_dir); + } + + fn verifyPackageJSONNameAndVersion(this: *PackageInstall, root_node_modules_dir: std.fs.Dir, resolution_tag: Resolution.Tag) bool { + const allocator = this.allocator; + var total: usize = 0; + var read: usize = 0; + + var body_pool = Npm.Registry.BodyPool.get(allocator); + var mutable: MutableString = body_pool.data; + defer { + body_pool.data = mutable; + Npm.Registry.BodyPool.release(body_pool); } - while (read > 0) : (read = package_json_file.read(remain).unwrap() catch return false) { - total += read; - + // Read the file + // Return false on any error. + // Don't keep it open while we're parsing the JSON. + // The longer the file stays open, the more likely it causes issues for + // other processes on Windows. + const source = brk: { + mutable.reset(); mutable.list.expandToCapacity(); - remain = mutable.list.items[total..]; + bun.copy(u8, this.destination_dir_subpath_buf[this.destination_dir_subpath.len..], std.fs.path.sep_str ++ "package.json"); + this.destination_dir_subpath_buf[this.destination_dir_subpath.len + std.fs.path.sep_str.len + "package.json".len] = 0; + const package_json_path: [:0]u8 = this.destination_dir_subpath_buf[0 .. this.destination_dir_subpath.len + std.fs.path.sep_str.len + "package.json".len :0]; + defer this.destination_dir_subpath_buf[this.destination_dir_subpath.len] = 0; - if (remain.len < 1024) { + var destination_dir = this.node_modules.openDir(root_node_modules_dir) catch return false; + defer { + if (std.fs.cwd().fd != destination_dir.fd) destination_dir.close(); + } + + var package_json_file = File.openat(destination_dir, package_json_path, std.os.O.RDONLY, 0).unwrap() catch return false; + defer package_json_file.close(); + + // Heuristic: most package.jsons will be less than 2048 bytes. + read = package_json_file.read(mutable.list.items[total..]).unwrap() catch return false; + var remain = mutable.list.items[@min(total, read)..]; + if (read > 0 and remain.len < 1024) { mutable.growBy(4096) catch return false; + mutable.list.expandToCapacity(); } - mutable.list.expandToCapacity(); - remain = mutable.list.items[total..]; - } - // If it's not long enough to have {"name": "foo", "version": "1.2.0"}, there's no way it's valid - const minimum = if (resolution_tag == .workspace and this.package_version.len == 0) - // workspaces aren't required to have a version - "{\"name\":\"\"}".len + this.package_name.len - else - "{\"name\":\"\",\"version\":\"\"}".len + this.package_name.len + this.package_version.len; + while (read > 0) : (read = package_json_file.read(remain).unwrap() catch return false) { + total += read; - if (total < minimum) return false; + mutable.list.expandToCapacity(); + remain = mutable.list.items[total..]; - break :brk logger.Source.initPathString(bun.span(package_json_path), mutable.list.items[0..total]); - }; - - var log = logger.Log.init(allocator); - defer log.deinit(); - - initializeStore(); - - var package_json_checker = json_parser.PackageJSONVersionChecker.init(allocator, &source, &log) catch return false; - _ = package_json_checker.parseExpr() catch return false; - if (log.errors > 0 or !package_json_checker.has_found_name) return false; - // workspaces aren't required to have a version - if (!package_json_checker.has_found_version and resolution_tag != .workspace) return false; - - const found_version = package_json_checker.found_version; - // Check if the version matches - if (!strings.eql(found_version, this.package_version)) { - const offset = brk: { - // ASCII only. - for (0..found_version.len) |c| { - switch (found_version[c]) { - // newlines & whitespace - ' ', - '\t', - '\n', - '\r', - std.ascii.control_code.vt, - std.ascii.control_code.ff, - - // version separators - 'v', - '=', - => {}, - else => { - break :brk c; - }, + if (remain.len < 1024) { + mutable.growBy(4096) catch return false; } + mutable.list.expandToCapacity(); + remain = mutable.list.items[total..]; } - // If we didn't find any of these characters, there's no point in checking the version again. - // it will never match. - return false; + + // If it's not long enough to have {"name": "foo", "version": "1.2.0"}, there's no way it's valid + const minimum = if (resolution_tag == .workspace and this.package_version.len == 0) + // workspaces aren't required to have a version + "{\"name\":\"\"}".len + this.package_name.len + else + "{\"name\":\"\",\"version\":\"\"}".len + this.package_name.len + this.package_version.len; + + if (total < minimum) return false; + + break :brk logger.Source.initPathString(bun.span(package_json_path), mutable.list.items[0..total]); }; - if (!strings.eql(found_version[offset..], this.package_version)) return false; - } + var log = logger.Log.init(allocator); + defer log.deinit(); - // lastly, check the name. - return strings.eql(package_json_checker.found_name, this.package_name); - } + initializeStore(); - pub const Result = union(Tag) { - success: void, - fail: struct { - err: anyerror, - step: Step, + var package_json_checker = json_parser.PackageJSONVersionChecker.init(allocator, &source, &log) catch return false; + _ = package_json_checker.parseExpr() catch return false; + if (log.errors > 0 or !package_json_checker.has_found_name) return false; + // workspaces aren't required to have a version + if (!package_json_checker.has_found_version and resolution_tag != .workspace) return false; - pub inline fn isPackageMissingFromCache(this: @This()) bool { - return (this.err == error.FileNotFound or this.err == error.ENOENT) and this.step == .opening_cache_dir; + const found_version = package_json_checker.found_version; + // Check if the version matches + if (!strings.eql(found_version, this.package_version)) { + const offset = brk: { + // ASCII only. + for (0..found_version.len) |c| { + switch (found_version[c]) { + // newlines & whitespace + ' ', + '\t', + '\n', + '\r', + std.ascii.control_code.vt, + std.ascii.control_code.ff, + + // version separators + 'v', + '=', + => {}, + else => { + break :brk c; + }, + } + } + // If we didn't find any of these characters, there's no point in checking the version again. + // it will never match. + return false; + }; + + if (!strings.eql(found_version[offset..], this.package_version)) return false; } - }, - pub inline fn success() Result { - return .{ .success = {} }; + // lastly, check the name. + return strings.eql(package_json_checker.found_name, this.package_name); } - pub fn fail(err: anyerror, step: Step) Result { - return .{ - .fail = .{ - .err = err, - .step = step, + pub const Result = union(Tag) { + success: void, + fail: struct { + err: anyerror, + step: Step, + + pub inline fn isPackageMissingFromCache(this: @This()) bool { + return (this.err == error.FileNotFound or this.err == error.ENOENT) and this.step == .opening_cache_dir; + } + }, + + pub inline fn success() Result { + return .{ .success = {} }; + } + + pub fn fail(err: anyerror, step: Step) Result { + return .{ + .fail = .{ + .err = err, + .step = step, + }, + }; + } + + pub fn isFail(this: @This()) bool { + return switch (this) { + .success => false, + .fail => true, + }; + } + + pub const Tag = enum { + success, + fail, + }; + }; + + pub const Step = enum { + copyfile, + opening_cache_dir, + opening_dest_dir, + copying_files, + linking, + linking_dependency, + patching, + + pub fn name(this: Step) []const u8 { + return switch (this) { + .copyfile, .copying_files => "copying files from cache to destination", + .opening_cache_dir => "opening cache/package/version dir", + .opening_dest_dir => "opening node_modules/package dir", + .linking => "linking bins", + .linking_dependency => "linking dependency/workspace to node_modules", + .patching => "patching dependency", + }; + } + }; + + var supported_method: Method = if (Environment.isMac) + Method.clonefile + else + Method.hardlink; + + fn installWithClonefileEachDir(this: *@This(), destination_dir: std.fs.Dir) !Result { + var cached_package_dir = bun.openDir(this.cache_dir, this.cache_dir_subpath) catch |err| return Result{ + .fail = .{ .err = err, .step = .opening_cache_dir }, + }; + defer cached_package_dir.close(); + var walker_ = Walker.walk( + cached_package_dir, + this.allocator, + &[_]bun.OSPathSlice{}, + &[_]bun.OSPathSlice{}, + ) catch |err| return Result{ + .fail = .{ .err = err, .step = .opening_cache_dir }, + }; + defer walker_.deinit(); + + const FileCopier = struct { + pub fn copy( + destination_dir_: std.fs.Dir, + walker: *Walker, + ) !u32 { + var real_file_count: u32 = 0; + var stackpath: [bun.MAX_PATH_BYTES]u8 = undefined; + while (try walker.next()) |entry| { + switch (entry.kind) { + .directory => { + _ = bun.sys.mkdirat(bun.toFD(destination_dir_.fd), entry.path, 0o755); + }, + .file => { + bun.copy(u8, &stackpath, entry.path); + stackpath[entry.path.len] = 0; + const path: [:0]u8 = stackpath[0..entry.path.len :0]; + const basename: [:0]u8 = stackpath[entry.path.len - entry.basename.len .. entry.path.len :0]; + switch (C.clonefileat( + entry.dir.fd, + basename, + destination_dir_.fd, + path, + 0, + )) { + 0 => {}, + else => |errno| switch (std.os.errno(errno)) { + .XDEV => return error.NotSupported, // not same file system + .OPNOTSUPP => return error.NotSupported, + .NOENT => return error.FileNotFound, + // sometimes the downlowded npm package has already node_modules with it, so just ignore exist error here + .EXIST => {}, + .ACCES => return error.AccessDenied, + else => return error.Unexpected, + }, + } + + real_file_count += 1; + }, + else => {}, + } + } + + return real_file_count; + } + }; + + var subdir = destination_dir.makeOpenPath(bun.span(this.destination_dir_subpath), .{}) catch |err| return Result{ + .fail = .{ .err = err, .step = .opening_dest_dir }, + }; + + defer subdir.close(); + + this.file_count = FileCopier.copy( + subdir, + &walker_, + ) catch |err| return Result{ + .fail = .{ .err = err, .step = .copying_files }, + }; + + return Result{ + .success = {}, + }; + } + + // https://www.unix.com/man-page/mojave/2/fclonefileat/ + fn installWithClonefile(this: *@This(), destination_dir: std.fs.Dir) !Result { + if (comptime !Environment.isMac) @compileError("clonefileat() is macOS only."); + + if (this.destination_dir_subpath[0] == '@') { + if (strings.indexOfCharZ(this.destination_dir_subpath, std.fs.path.sep)) |slash| { + this.destination_dir_subpath_buf[slash] = 0; + const subdir = this.destination_dir_subpath_buf[0..slash :0]; + destination_dir.makeDirZ(subdir) catch {}; + this.destination_dir_subpath_buf[slash] = std.fs.path.sep; + } + } + + return switch (C.clonefileat( + this.cache_dir.fd, + this.cache_dir_subpath, + destination_dir.fd, + this.destination_dir_subpath, + 0, + )) { + 0 => .{ .success = {} }, + else => |errno| switch (std.os.errno(errno)) { + .XDEV => error.NotSupported, // not same file system + .OPNOTSUPP => error.NotSupported, + .NOENT => error.FileNotFound, + // We first try to delete the directory + // But, this can happen if this package contains a node_modules folder + // We want to continue installing as many packages as we can, so we shouldn't block while downloading + // We use the slow path in this case + .EXIST => try this.installWithClonefileEachDir(destination_dir), + .ACCES => return error.AccessDenied, + else => error.Unexpected, }, }; } - pub fn isFail(this: @This()) bool { - return switch (this) { - .success => false, - .fail => true, - }; - } + const InstallDirState = struct { + cached_package_dir: std.fs.Dir = undefined, + walker: Walker = undefined, + subdir: std.fs.Dir = if (Environment.isWindows) std.fs.Dir{ .fd = std.os.windows.INVALID_HANDLE_VALUE } else undefined, + buf: bun.windows.WPathBuffer = if (Environment.isWindows) undefined else {}, + buf2: bun.windows.WPathBuffer = if (Environment.isWindows) undefined else {}, + to_copy_buf: if (Environment.isWindows) []u16 else void = if (Environment.isWindows) undefined else {}, + to_copy_buf2: if (Environment.isWindows) []u16 else void = if (Environment.isWindows) undefined else {}, - pub const Tag = enum { - success, - fail, - }; - }; - - pub const Step = enum { - copyfile, - opening_cache_dir, - opening_dest_dir, - copying_files, - linking, - linking_dependency, - - pub fn name(this: Step) []const u8 { - return switch (this) { - .copyfile, .copying_files => "copying files from cache to destination", - .opening_cache_dir => "opening cache/package/version dir", - .opening_dest_dir => "opening node_modules/package dir", - .linking => "linking bins", - .linking_dependency => "linking dependency/workspace to node_modules", - }; - } - }; - - var supported_method: Method = if (Environment.isMac) - Method.clonefile - else - Method.hardlink; - - fn installWithClonefileEachDir(this: *PackageInstall, destination_dir: std.fs.Dir) !Result { - var cached_package_dir = bun.openDir(this.cache_dir, this.cache_dir_subpath) catch |err| return Result{ - .fail = .{ .err = err, .step = .opening_cache_dir }, - }; - defer cached_package_dir.close(); - var walker_ = Walker.walk( - cached_package_dir, - this.allocator, - &[_]bun.OSPathSlice{}, - &[_]bun.OSPathSlice{}, - ) catch |err| return Result{ - .fail = .{ .err = err, .step = .opening_cache_dir }, - }; - defer walker_.deinit(); - - const FileCopier = struct { - pub fn copy( - destination_dir_: std.fs.Dir, - walker: *Walker, - ) !u32 { - var real_file_count: u32 = 0; - var stackpath: bun.PathBuffer = undefined; - while (try walker.next()) |entry| { - switch (entry.kind) { - .directory => { - _ = bun.sys.mkdirat(bun.toFD(destination_dir_.fd), entry.path, 0o755); - }, - .file => { - bun.copy(u8, &stackpath, entry.path); - stackpath[entry.path.len] = 0; - const path: [:0]u8 = stackpath[0..entry.path.len :0]; - const basename: [:0]u8 = stackpath[entry.path.len - entry.basename.len .. entry.path.len :0]; - switch (C.clonefileat( - entry.dir.fd, - basename, - destination_dir_.fd, - path, - 0, - )) { - 0 => {}, - else => |errno| switch (std.os.errno(errno)) { - .XDEV => return error.NotSupported, // not same file system - .OPNOTSUPP => return error.NotSupported, - .NOENT => return error.FileNotFound, - // sometimes the downlowded npm package has already node_modules with it, so just ignore exist error here - .EXIST => {}, - .ACCES => return error.AccessDenied, - else => return error.Unexpected, - }, - } - - real_file_count += 1; - }, - else => {}, - } + pub fn deinit(this: *@This()) void { + if (!Environment.isWindows) { + this.subdir.close(); } - - return real_file_count; + defer this.walker.deinit(); + defer this.cached_package_dir.close(); } }; - var subdir = destination_dir.makeOpenPath(bun.span(this.destination_dir_subpath), .{}) catch |err| return Result{ - .fail = .{ .err = err, .step = .opening_dest_dir }, - }; + threadlocal var node_fs_for_package_installer: bun.JSC.Node.NodeFS = .{}; - defer subdir.close(); + fn initInstallDir(this: *@This(), state: *InstallDirState, destination_dir: std.fs.Dir) Result { + const destbase = destination_dir; + const destpath = this.destination_dir_subpath; - this.file_count = FileCopier.copy( - subdir, - &walker_, - ) catch |err| return Result{ - .fail = .{ .err = err, .step = .copying_files }, - }; + state.cached_package_dir = bun.openDir(this.cache_dir, this.cache_dir_subpath) catch |err| return Result{ + .fail = .{ .err = err, .step = .opening_cache_dir }, + }; + state.walker = Walker.walk( + state.cached_package_dir, + this.allocator, + &[_]bun.OSPathSlice{}, + &[_]bun.OSPathSlice{}, + ) catch bun.outOfMemory(); - return Result{ - .success = {}, - }; - } - - // https://www.unix.com/man-page/mojave/2/fclonefileat/ - fn installWithClonefile(this: *PackageInstall, destination_dir: std.fs.Dir) !Result { - if (comptime !Environment.isMac) @compileError("clonefileat() is macOS only."); - - if (this.destination_dir_subpath[0] == '@') { - if (strings.indexOfCharZ(this.destination_dir_subpath, std.fs.path.sep)) |slash| { - this.destination_dir_subpath_buf[slash] = 0; - const subdir = this.destination_dir_subpath_buf[0..slash :0]; - destination_dir.makeDirZ(subdir) catch {}; - this.destination_dir_subpath_buf[slash] = std.fs.path.sep; - } - } - - return switch (C.clonefileat( - this.cache_dir.fd, - this.cache_dir_subpath, - destination_dir.fd, - this.destination_dir_subpath, - 0, - )) { - 0 => .{ .success = {} }, - else => |errno| switch (std.os.errno(errno)) { - .XDEV => error.NotSupported, // not same file system - .OPNOTSUPP => error.NotSupported, - .NOENT => error.FileNotFound, - // We first try to delete the directory - // But, this can happen if this package contains a node_modules folder - // We want to continue installing as many packages as we can, so we shouldn't block while downloading - // We use the slow path in this case - .EXIST => try this.installWithClonefileEachDir(destination_dir), - .ACCES => return error.AccessDenied, - else => error.Unexpected, - }, - }; - } - - const InstallDirState = struct { - cached_package_dir: std.fs.Dir = undefined, - walker: Walker = undefined, - subdir: std.fs.Dir = if (Environment.isWindows) std.fs.Dir{ .fd = std.os.windows.INVALID_HANDLE_VALUE } else undefined, - buf: bun.windows.WPathBuffer = if (Environment.isWindows) undefined else {}, - buf2: bun.windows.WPathBuffer = if (Environment.isWindows) undefined else {}, - to_copy_buf: if (Environment.isWindows) []u16 else void = if (Environment.isWindows) undefined else {}, - to_copy_buf2: if (Environment.isWindows) []u16 else void = if (Environment.isWindows) undefined else {}, - - pub fn deinit(this: *@This()) void { if (!Environment.isWindows) { - this.subdir.close(); + state.subdir = destbase.makeOpenPath(bun.span(destpath), .{ + .iterate = true, + .access_sub_paths = true, + }) catch |err| { + state.cached_package_dir.close(); + state.walker.deinit(); + return Result.fail(err, .opening_dest_dir); + }; + return Result.success(); } - defer this.walker.deinit(); - defer this.cached_package_dir.close(); - } - }; - threadlocal var node_fs_for_package_installer: bun.JSC.Node.NodeFS = .{}; - - fn initInstallDir(this: *PackageInstall, state: *InstallDirState, destination_dir: std.fs.Dir) Result { - const destbase = destination_dir; - const destpath = this.destination_dir_subpath; - - state.cached_package_dir = bun.openDir(this.cache_dir, this.cache_dir_subpath) catch |err| return Result{ - .fail = .{ .err = err, .step = .opening_cache_dir }, - }; - state.walker = Walker.walk( - state.cached_package_dir, - this.allocator, - &[_]bun.OSPathSlice{}, - &[_]bun.OSPathSlice{}, - ) catch bun.outOfMemory(); - - if (!Environment.isWindows) { - state.subdir = destbase.makeOpenPath(bun.span(destpath), .{ - .iterate = true, - .access_sub_paths = true, - }) catch |err| { + const dest_path_length = bun.windows.kernel32.GetFinalPathNameByHandleW(destbase.fd, &state.buf, state.buf.len, 0); + if (dest_path_length == 0) { + const e = bun.windows.Win32Error.get(); + const err = if (e.toSystemErrno()) |sys_err| bun.errnoToZigErr(sys_err) else error.Unexpected; state.cached_package_dir.close(); state.walker.deinit(); return Result.fail(err, .opening_dest_dir); + } + + var i: usize = dest_path_length; + if (state.buf[i] != '\\') { + state.buf[i] = '\\'; + i += 1; + } + + i += bun.strings.toWPathNormalized(state.buf[i..], destpath).len; + state.buf[i] = std.fs.path.sep_windows; + i += 1; + state.buf[i] = 0; + const fullpath = state.buf[0..i :0]; + + _ = node_fs_for_package_installer.mkdirRecursiveOSPathImpl(void, {}, fullpath, 0, false).unwrap() catch |err| { + state.cached_package_dir.close(); + state.walker.deinit(); + return Result.fail(err, .copying_files); }; + state.to_copy_buf = state.buf[fullpath.len..]; + + const cache_path_length = bun.windows.kernel32.GetFinalPathNameByHandleW(state.cached_package_dir.fd, &state.buf2, state.buf2.len, 0); + if (cache_path_length == 0) { + const e = bun.windows.Win32Error.get(); + const err = if (e.toSystemErrno()) |sys_err| bun.errnoToZigErr(sys_err) else error.Unexpected; + state.cached_package_dir.close(); + state.walker.deinit(); + return Result.fail(err, .copying_files); + } + const cache_path = state.buf2[0..cache_path_length]; + var to_copy_buf2: []u16 = undefined; + if (state.buf2[cache_path.len - 1] != '\\') { + state.buf2[cache_path.len] = '\\'; + to_copy_buf2 = state.buf2[cache_path.len + 1 ..]; + } else { + to_copy_buf2 = state.buf2[cache_path.len..]; + } + + state.to_copy_buf2 = to_copy_buf2; return Result.success(); } - const dest_path_length = bun.windows.kernel32.GetFinalPathNameByHandleW(destbase.fd, &state.buf, state.buf.len, 0); - if (dest_path_length == 0) { - const e = bun.windows.Win32Error.get(); - const err = if (e.toSystemErrno()) |sys_err| bun.errnoToZigErr(sys_err) else error.Unexpected; - state.cached_package_dir.close(); - state.walker.deinit(); - return Result.fail(err, .opening_dest_dir); - } + fn installWithCopyfile(this: *@This(), destination_dir: std.fs.Dir) Result { + var state = InstallDirState{}; + const res = this.initInstallDir(&state, destination_dir); + if (res.isFail()) return res; + defer state.deinit(); - var i: usize = dest_path_length; - if (state.buf[i] != '\\') { - state.buf[i] = '\\'; - i += 1; - } + const FileCopier = struct { + pub fn copy( + destination_dir_: std.fs.Dir, + walker: *Walker, + progress_: ProgressT, + to_copy_into1: if (Environment.isWindows) []u16 else void, + head1: if (Environment.isWindows) []u16 else void, + to_copy_into2: if (Environment.isWindows) []u16 else void, + head2: if (Environment.isWindows) []u16 else void, + ) !u32 { + var real_file_count: u32 = 0; - i += bun.strings.toWPathNormalized(state.buf[i..], destpath).len; - state.buf[i] = std.fs.path.sep_windows; - i += 1; - state.buf[i] = 0; - const fullpath = state.buf[0..i :0]; + var copy_file_state: bun.CopyFileState = .{}; - _ = node_fs_for_package_installer.mkdirRecursiveOSPathImpl(void, {}, fullpath, 0, false).unwrap() catch |err| { - state.cached_package_dir.close(); - state.walker.deinit(); - return Result.fail(err, .copying_files); - }; - state.to_copy_buf = state.buf[fullpath.len..]; - - const cache_path_length = bun.windows.kernel32.GetFinalPathNameByHandleW(state.cached_package_dir.fd, &state.buf2, state.buf2.len, 0); - if (cache_path_length == 0) { - const e = bun.windows.Win32Error.get(); - const err = if (e.toSystemErrno()) |sys_err| bun.errnoToZigErr(sys_err) else error.Unexpected; - state.cached_package_dir.close(); - state.walker.deinit(); - return Result.fail(err, .copying_files); - } - const cache_path = state.buf2[0..cache_path_length]; - var to_copy_buf2: []u16 = undefined; - if (state.buf2[cache_path.len - 1] != '\\') { - state.buf2[cache_path.len] = '\\'; - to_copy_buf2 = state.buf2[cache_path.len + 1 ..]; - } else { - to_copy_buf2 = state.buf2[cache_path.len..]; - } - - state.to_copy_buf2 = to_copy_buf2; - return Result.success(); - } - - fn installWithCopyfile(this: *PackageInstall, destination_dir: std.fs.Dir) Result { - var state = InstallDirState{}; - const res = this.initInstallDir(&state, destination_dir); - if (res.isFail()) return res; - defer state.deinit(); - - const FileCopier = struct { - pub fn copy( - destination_dir_: std.fs.Dir, - walker: *Walker, - progress_: *Progress, - to_copy_into1: if (Environment.isWindows) []u16 else void, - head1: if (Environment.isWindows) []u16 else void, - to_copy_into2: if (Environment.isWindows) []u16 else void, - head2: if (Environment.isWindows) []u16 else void, - ) !u32 { - var real_file_count: u32 = 0; - - var copy_file_state: bun.CopyFileState = .{}; - - while (try walker.next()) |entry| { - if (comptime Environment.isWindows) { - switch (entry.kind) { - .directory, .file => {}, - else => continue, - } - - if (entry.path.len > to_copy_into1.len or entry.path.len > to_copy_into2.len) { - return error.NameTooLong; - } - - @memcpy(to_copy_into1[0..entry.path.len], entry.path); - head1[entry.path.len + (head1.len - to_copy_into1.len)] = 0; - const dest: [:0]u16 = head1[0 .. entry.path.len + head1.len - to_copy_into1.len :0]; - - @memcpy(to_copy_into2[0..entry.path.len], entry.path); - head2[entry.path.len + (head1.len - to_copy_into2.len)] = 0; - const src: [:0]u16 = head2[0 .. entry.path.len + head2.len - to_copy_into2.len :0]; - - switch (entry.kind) { - .directory => { - if (bun.windows.CreateDirectoryExW(src.ptr, dest.ptr, null) == 0) { - bun.MakePath.makePath(u16, destination_dir_, entry.path) catch {}; - } - }, - .file => { - if (bun.windows.CopyFileW(src.ptr, dest.ptr, 0) == 0) { - if (bun.Dirname.dirname(u16, entry.path)) |entry_dirname| { - bun.MakePath.makePath(u16, destination_dir_, entry_dirname) catch {}; - if (bun.windows.CopyFileW(src.ptr, dest.ptr, 0) != 0) { - continue; - } - } - - progress_.root.end(); - progress_.refresh(); - - if (bun.windows.Win32Error.get().toSystemErrno()) |err| { - Output.prettyError("{s}: copying file {}", .{ @tagName(err), bun.fmt.fmtOSPath(entry.path, .{}) }); - } else { - Output.prettyError("error copying file {}", .{bun.fmt.fmtOSPath(entry.path, .{})}); - } - - Global.crash(); - } - }, - else => unreachable, // handled above - } - } else { - if (entry.kind != .file) continue; - real_file_count += 1; - const openFile = std.fs.Dir.openFile; - const createFile = std.fs.Dir.createFile; - - var in_file = try openFile(entry.dir, entry.basename, .{ .mode = .read_only }); - defer in_file.close(); - - var outfile = createFile(destination_dir_, entry.path, .{}) catch brk: { - if (bun.Dirname.dirname(bun.OSPathChar, entry.path)) |entry_dirname| { - bun.MakePath.makePath(bun.OSPathChar, destination_dir_, entry_dirname) catch {}; + while (try walker.next()) |entry| { + if (comptime Environment.isWindows) { + switch (entry.kind) { + .directory, .file => {}, + else => continue, } - break :brk createFile(destination_dir_, entry.path, .{}) catch |err| { - progress_.root.end(); - progress_.refresh(); + if (entry.path.len > to_copy_into1.len or entry.path.len > to_copy_into2.len) { + return error.NameTooLong; + } - Output.prettyErrorln("{s}: copying file {}", .{ @errorName(err), bun.fmt.fmtOSPath(entry.path, .{}) }); - Global.crash(); - }; - }; - defer outfile.close(); + @memcpy(to_copy_into1[0..entry.path.len], entry.path); + head1[entry.path.len + (head1.len - to_copy_into1.len)] = 0; + const dest: [:0]u16 = head1[0 .. entry.path.len + head1.len - to_copy_into1.len :0]; - if (comptime Environment.isPosix) { - const stat = in_file.stat() catch continue; - _ = C.fchmod(outfile.handle, @intCast(stat.mode)); - } + @memcpy(to_copy_into2[0..entry.path.len], entry.path); + head2[entry.path.len + (head1.len - to_copy_into2.len)] = 0; + const src: [:0]u16 = head2[0 .. entry.path.len + head2.len - to_copy_into2.len :0]; - bun.copyFileWithState(in_file.handle, outfile.handle, ©_file_state) catch |err| { - progress_.root.end(); - - progress_.refresh(); - - Output.prettyError("{s}: copying file {}", .{ @errorName(err), bun.fmt.fmtOSPath(entry.path, .{}) }); - Global.crash(); - }; - } - } - - return real_file_count; - } - }; - - this.file_count = FileCopier.copy( - state.subdir, - &state.walker, - this.progress, - if (Environment.isWindows) state.to_copy_buf else void{}, - if (Environment.isWindows) &state.buf else void{}, - if (Environment.isWindows) state.to_copy_buf2 else void{}, - if (Environment.isWindows) &state.buf2 else void{}, - ) catch |err| return Result{ - .fail = .{ .err = err, .step = .copying_files }, - }; - - return Result{ - .success = {}, - }; - } - - fn NewTaskQueue(comptime TaskType: type) type { - return struct { - remaining: std.atomic.Value(u32) = std.atomic.Value(u32).init(0), - errored_task: ?*TaskType = null, - thread_pool: *ThreadPool, - wake_value: std.atomic.Value(u32) = std.atomic.Value(u32).init(0), - - pub fn completeOne(this: *@This()) void { - @fence(.Release); - if (this.remaining.fetchSub(1, .Monotonic) == 1) { - _ = this.wake_value.fetchAdd(1, .Monotonic); - bun.Futex.wake(&this.wake_value, std.math.maxInt(u32)); - } - } - - pub fn push(this: *@This(), task: *TaskType) void { - _ = this.remaining.fetchAdd(1, .Monotonic); - this.thread_pool.schedule(bun.ThreadPool.Batch.from(&task.task)); - } - - pub fn wait(this: *@This()) void { - @fence(.Acquire); - this.wake_value.store(0, .Monotonic); - while (this.remaining.load(.Monotonic) > 0) { - bun.Futex.wait(&this.wake_value, 0, std.time.ns_per_ms * 5) catch {}; - } - } - }; - } - - const HardLinkWindowsInstallTask = struct { - bytes: []u16, - src: [:0]bun.OSPathChar, - dest: [:0]bun.OSPathChar, - basename: u16, - task: bun.JSC.WorkPoolTask = .{ .callback = &runFromThreadPool }, - err: ?anyerror = null, - - pub const Queue = NewTaskQueue(@This()); - var queue: Queue = undefined; - pub fn getQueue() *Queue { - queue = Queue{ - .thread_pool = &PackageManager.instance.thread_pool, - }; - return &queue; - } - - pub fn init(src: []const bun.OSPathChar, dest: []const bun.OSPathChar, basename: []const bun.OSPathChar) *@This() { - const allocation_size = - (src.len) + 1 + (dest.len) + 1; - - const combined = bun.default_allocator.alloc(u16, allocation_size) catch bun.outOfMemory(); - var remaining = combined; - @memcpy(remaining[0..src.len], src); - remaining[src.len] = 0; - const src_ = remaining[0..src.len :0]; - remaining = remaining[src.len + 1 ..]; - - @memcpy(remaining[0..dest.len], dest); - remaining[dest.len] = 0; - const dest_ = remaining[0..dest.len :0]; - remaining = remaining[dest.len + 1 ..]; - - return @This().new(.{ - .bytes = combined, - .src = src_, - .dest = dest_, - .basename = @truncate(basename.len), - }); - } - - pub fn runFromThreadPool(task: *bun.JSC.WorkPoolTask) void { - var iter = @fieldParentPtr(@This(), "task", task); - defer queue.completeOne(); - if (iter.run()) |err| { - iter.err = err; - queue.errored_task = iter; - return; - } - iter.deinit(); - } - - pub fn deinit(task: *@This()) void { - bun.default_allocator.free(task.bytes); - task.destroy(); - } - - pub usingnamespace bun.New(@This()); - - pub fn run(task: *@This()) ?anyerror { - const src = task.src; - const dest = task.dest; - - if (bun.windows.CreateHardLinkW(dest.ptr, src.ptr, null) != 0) { - return null; - } - - switch (bun.windows.GetLastError()) { - .ALREADY_EXISTS, .FILE_EXISTS, .CANNOT_MAKE => { - // Race condition: this shouldn't happen - if (comptime Environment.isDebug) - debug( - "CreateHardLinkW returned EEXIST, this shouldn't happen: {}", - .{bun.fmt.fmtPath(u16, dest, .{})}, - ); - _ = bun.windows.DeleteFileW(dest.ptr); - if (bun.windows.CreateHardLinkW(dest.ptr, src.ptr, null) != 0) { - return null; - } - }, - else => {}, - } - - dest[dest.len - task.basename - 1] = 0; - const dirpath = dest[0 .. dest.len - task.basename - 1 :0]; - _ = node_fs_for_package_installer.mkdirRecursiveOSPathImpl(void, {}, dirpath, 0, false).unwrap() catch {}; - dest[dest.len - task.basename - 1] = std.fs.path.sep; - - if (bun.windows.CreateHardLinkW(dest.ptr, src.ptr, null) != 0) { - return null; - } - - if (PackageManager.verbose_install) { - const once_log = struct { - var once = false; - - pub fn get() bool { - const prev = once; - once = true; - return !prev; - } - }.get(); - - if (once_log) { - Output.warn("CreateHardLinkW failed, falling back to CopyFileW: {} -> {}\n", .{ - bun.fmt.fmtOSPath(src, .{}), - bun.fmt.fmtOSPath(dest, .{}), - }); - } - } - - if (bun.windows.CopyFileW(src.ptr, dest.ptr, 0) != 0) { - return null; - } - - return bun.windows.getLastError(); - } - }; - - fn installWithHardlink(this: *PackageInstall, dest_dir: std.fs.Dir) !Result { - var state = InstallDirState{}; - const res = this.initInstallDir(&state, dest_dir); - if (res.isFail()) return res; - defer state.deinit(); - - const FileCopier = struct { - pub fn copy( - destination_dir: std.fs.Dir, - walker: *Walker, - to_copy_into1: if (Environment.isWindows) []u16 else void, - head1: if (Environment.isWindows) []u16 else void, - to_copy_into2: if (Environment.isWindows) []u16 else void, - head2: if (Environment.isWindows) []u16 else void, - ) !u32 { - var real_file_count: u32 = 0; - var queue = if (Environment.isWindows) HardLinkWindowsInstallTask.getQueue() else {}; - - while (try walker.next()) |entry| { - if (comptime Environment.isPosix) { - switch (entry.kind) { - .directory => { - bun.MakePath.makePath(std.meta.Elem(@TypeOf(entry.path)), destination_dir, entry.path) catch {}; - }, - .file => { - std.os.linkat(entry.dir.fd, entry.basename, destination_dir.fd, entry.path, 0) catch |err| { - if (err != error.PathAlreadyExists) { - return err; + switch (entry.kind) { + .directory => { + if (bun.windows.CreateDirectoryExW(src.ptr, dest.ptr, null) == 0) { + bun.MakePath.makePath(u16, destination_dir_, entry.path) catch {}; } - - std.os.unlinkat(destination_dir.fd, entry.path, 0) catch {}; - try std.os.linkat(entry.dir.fd, entry.basename, destination_dir.fd, entry.path, 0); - }; - - real_file_count += 1; - }, - else => {}, - } - } else { - switch (entry.kind) { - .file => {}, - else => continue, - } - - if (entry.path.len > to_copy_into1.len or entry.path.len > to_copy_into2.len) { - return error.NameTooLong; - } - - @memcpy(to_copy_into1[0..entry.path.len], entry.path); - head1[entry.path.len + (head1.len - to_copy_into1.len)] = 0; - const dest: [:0]u16 = head1[0 .. entry.path.len + head1.len - to_copy_into1.len :0]; - - @memcpy(to_copy_into2[0..entry.path.len], entry.path); - head2[entry.path.len + (head1.len - to_copy_into2.len)] = 0; - const src: [:0]u16 = head2[0 .. entry.path.len + head2.len - to_copy_into2.len :0]; - - queue.push(HardLinkWindowsInstallTask.init(src, dest, entry.basename)); - real_file_count += 1; - } - } - - if (comptime Environment.isWindows) { - queue.wait(); - - if (queue.errored_task) |task| { - if (task.err) |err| { - return err; - } - } - } - - return real_file_count; - } - }; - - this.file_count = FileCopier.copy( - state.subdir, - &state.walker, - state.to_copy_buf, - if (Environment.isWindows) &state.buf else void{}, - state.to_copy_buf2, - if (Environment.isWindows) &state.buf2 else void{}, - ) catch |err| { - bun.handleErrorReturnTrace(err, @errorReturnTrace()); - - if (comptime Environment.isWindows) { - if (err == error.FailedToCopyFile) { - return Result.fail(err, .copying_files); - } - } else if (err == error.NotSameFileSystem or err == error.ENXIO) { - return err; - } - return Result.fail(err, .copying_files); - }; - - return Result{ - .success = {}, - }; - } - - fn installWithSymlink(this: *PackageInstall, dest_dir: std.fs.Dir) !Result { - var state = InstallDirState{}; - const res = this.initInstallDir(&state, dest_dir); - if (res.isFail()) return res; - defer state.deinit(); - - var buf2: bun.PathBuffer = undefined; - var to_copy_buf2: []u8 = undefined; - if (Environment.isPosix) { - const cache_dir_path = try bun.getFdPath(state.cached_package_dir.fd, &buf2); - if (cache_dir_path.len > 0 and cache_dir_path[cache_dir_path.len - 1] != std.fs.path.sep) { - buf2[cache_dir_path.len] = std.fs.path.sep; - to_copy_buf2 = buf2[cache_dir_path.len + 1 ..]; - } else { - to_copy_buf2 = buf2[cache_dir_path.len..]; - } - } - - const FileCopier = struct { - pub fn copy( - destination_dir: std.fs.Dir, - walker: *Walker, - to_copy_into1: if (Environment.isWindows) []u16 else void, - head1: if (Environment.isWindows) []u16 else void, - to_copy_into2: []if (Environment.isWindows) u16 else u8, - head2: []if (Environment.isWindows) u16 else u8, - ) !u32 { - var real_file_count: u32 = 0; - while (try walker.next()) |entry| { - if (comptime Environment.isPosix) { - switch (entry.kind) { - .directory => { - bun.MakePath.makePath(std.meta.Elem(@TypeOf(entry.path)), destination_dir, entry.path) catch {}; - }, - .file => { - @memcpy(to_copy_into2[0..entry.path.len], entry.path); - head2[entry.path.len + (head2.len - to_copy_into2.len)] = 0; - const target: [:0]u8 = head2[0 .. entry.path.len + head2.len - to_copy_into2.len :0]; - - std.os.symlinkat(target, destination_dir.fd, entry.path) catch |err| { - if (err != error.PathAlreadyExists) { - return err; - } - - std.os.unlinkat(destination_dir.fd, entry.path, 0) catch {}; - try std.os.symlinkat(entry.basename, destination_dir.fd, entry.path); - }; - - real_file_count += 1; - }, - else => {}, - } - } else { - switch (entry.kind) { - .directory, .file => {}, - else => continue, - } - - if (entry.path.len > to_copy_into1.len or entry.path.len > to_copy_into2.len) { - return error.NameTooLong; - } - - @memcpy(to_copy_into1[0..entry.path.len], entry.path); - head1[entry.path.len + (head1.len - to_copy_into1.len)] = 0; - const dest: [:0]u16 = head1[0 .. entry.path.len + head1.len - to_copy_into1.len :0]; - - @memcpy(to_copy_into2[0..entry.path.len], entry.path); - head2[entry.path.len + (head1.len - to_copy_into2.len)] = 0; - const src: [:0]u16 = head2[0 .. entry.path.len + head2.len - to_copy_into2.len :0]; - - switch (entry.kind) { - .directory => { - if (bun.windows.CreateDirectoryExW(src.ptr, dest.ptr, null) == 0) { - bun.MakePath.makePath(u16, destination_dir, entry.path) catch {}; - } - }, - .file => { - switch (bun.sys.symlinkW(dest, src, .{})) { - .err => |err| { + }, + .file => { + if (bun.windows.CopyFileW(src.ptr, dest.ptr, 0) == 0) { if (bun.Dirname.dirname(u16, entry.path)) |entry_dirname| { - bun.MakePath.makePath(u16, destination_dir, entry_dirname) catch {}; - if (bun.sys.symlinkW(dest, src, .{}) == .result) { + bun.MakePath.makePath(u16, destination_dir_, entry_dirname) catch {}; + if (bun.windows.CopyFileW(src.ptr, dest.ptr, 0) != 0) { continue; } } - if (PackageManager.verbose_install) { - const once_log = struct { - var once = false; - - pub fn get() bool { - const prev = once; - once = true; - return !prev; - } - }.get(); - - if (once_log) { - Output.warn("CreateHardLinkW failed, falling back to CopyFileW: {} -> {}\n", .{ - bun.fmt.fmtOSPath(src, .{}), - bun.fmt.fmtOSPath(dest, .{}), - }); - } + if (comptime do_progress) { + progress_.root.end(); + progress_.refresh(); } - return bun.errnoToZigErr(err.errno); - }, - .result => {}, + if (bun.windows.Win32Error.get().toSystemErrno()) |err| { + Output.prettyError("{s}: copying file {}", .{ @tagName(err), bun.fmt.fmtOSPath(entry.path, .{}) }); + } else { + Output.prettyError("error copying file {}", .{bun.fmt.fmtOSPath(entry.path, .{})}); + } + + Global.crash(); + } + }, + else => unreachable, // handled above + } + } else { + if (entry.kind != .file) continue; + real_file_count += 1; + const openFile = std.fs.Dir.openFile; + const createFile = std.fs.Dir.createFile; + + var in_file = try openFile(entry.dir, entry.basename, .{ .mode = .read_only }); + defer in_file.close(); + + debug("createFile {} {s}\n", .{ destination_dir_.fd, entry.path }); + var outfile = createFile(destination_dir_, entry.path, .{}) catch brk: { + if (bun.Dirname.dirname(bun.OSPathChar, entry.path)) |entry_dirname| { + bun.MakePath.makePath(bun.OSPathChar, destination_dir_, entry_dirname) catch {}; } - }, - else => unreachable, // handled above + break :brk createFile(destination_dir_, entry.path, .{}) catch |err| { + if (do_progress) { + progress_.root.end(); + progress_.refresh(); + } + + Output.prettyErrorln("{s}: copying file {}", .{ @errorName(err), bun.fmt.fmtOSPath(entry.path, .{}) }); + Global.crash(); + }; + }; + defer outfile.close(); + + if (comptime Environment.isPosix) { + const stat = in_file.stat() catch continue; + _ = C.fchmod(outfile.handle, @intCast(stat.mode)); + } + + bun.copyFileWithState(in_file.handle, outfile.handle, ©_file_state) catch |err| { + if (do_progress) { + progress_.root.end(); + progress_.refresh(); + } + + Output.prettyError("{s}: copying file {}", .{ @errorName(err), bun.fmt.fmtOSPath(entry.path, .{}) }); + Global.crash(); + }; } } + + return real_file_count; + } + }; + + this.file_count = FileCopier.copy( + state.subdir, + &state.walker, + this.progress, + if (Environment.isWindows) state.to_copy_buf else void{}, + if (Environment.isWindows) &state.buf else void{}, + if (Environment.isWindows) state.to_copy_buf2 else void{}, + if (Environment.isWindows) &state.buf2 else void{}, + ) catch |err| return Result{ + .fail = .{ .err = err, .step = .copying_files }, + }; + + return Result{ + .success = {}, + }; + } + + fn NewTaskQueue(comptime TaskType: type) type { + return struct { + remaining: std.atomic.Value(u32) = std.atomic.Value(u32).init(0), + errored_task: ?*TaskType = null, + thread_pool: *ThreadPool, + wake_value: std.atomic.Value(u32) = std.atomic.Value(u32).init(0), + + pub fn completeOne(this: *@This()) void { + @fence(.Release); + if (this.remaining.fetchSub(1, .Monotonic) == 1) { + _ = this.wake_value.fetchAdd(1, .Monotonic); + bun.Futex.wake(&this.wake_value, std.math.maxInt(u32)); + } } - return real_file_count; - } - }; - - this.file_count = FileCopier.copy( - state.subdir, - &state.walker, - if (Environment.isWindows) state.to_copy_buf else void{}, - if (Environment.isWindows) &state.buf else void{}, - if (Environment.isWindows) state.to_copy_buf2 else to_copy_buf2, - if (Environment.isWindows) &state.buf2 else &buf2, - ) catch |err| { - if (comptime Environment.isWindows) { - if (err == error.FailedToCopyFile) { - return Result.fail(err, .copying_files); + pub fn push(this: *@This(), task: *TaskType) void { + _ = this.remaining.fetchAdd(1, .Monotonic); + this.thread_pool.schedule(bun.ThreadPool.Batch.from(&task.task)); } - } else if (err == error.NotSameFileSystem or err == error.ENXIO) { - return err; - } - return Result.fail(err, .copying_files); - }; - return Result{ - .success = {}, - }; - } - - pub fn uninstall(this: *PackageInstall, destination_dir: std.fs.Dir) void { - destination_dir.deleteTree(bun.span(this.destination_dir_subpath)) catch {}; - } - - pub fn uninstallBeforeInstall(this: *PackageInstall, destination_dir: std.fs.Dir) void { - var rand_path_buf: [48]u8 = undefined; - const temp_path = std.fmt.bufPrintZ(&rand_path_buf, ".old-{}", .{std.fmt.fmtSliceHexUpper(std.mem.asBytes(&bun.fastRandom()))}) catch unreachable; - switch (bun.sys.renameat(bun.toFD(destination_dir), this.destination_dir_subpath, bun.toFD(destination_dir), temp_path)) { - .err => { - // if it fails, that means the directory doesn't exist or was inaccessible - }, - .result => { - // Uninstall can sometimes take awhile in a large directory - // tree. Since we're renaming the directory to a randomly - // generated name, we can delete it in another thread without - // worrying about race conditions or blocking the main thread. - // - // This should be a slight improvement to CI environments. - // - // on macOS ARM64 in a project with Gatsby, @mui/icons-material, and Next.js: - // - // ❯ hyperfine "bun install --ignore-scripts" "bun-1.1.2 install --ignore-scripts" --prepare="rm -rf node_modules/**/package.json" --warmup=2 - // Benchmark 1: bun install --ignore-scripts - // Time (mean ± σ): 2.281 s ± 0.027 s [User: 0.041 s, System: 6.851 s] - // Range (min … max): 2.231 s … 2.312 s 10 runs - // - // Benchmark 2: bun-1.1.2 install --ignore-scripts - // Time (mean ± σ): 3.315 s ± 0.033 s [User: 0.029 s, System: 2.237 s] - // Range (min … max): 3.279 s … 3.356 s 10 runs - // - // Summary - // bun install --ignore-scripts ran - // 1.45 ± 0.02 times faster than bun-1.1.2 install --ignore-scripts - // - - const UninstallTask = struct { - absolute_path: []const u8, - task: JSC.WorkPoolTask = .{ .callback = &run }, - pub fn run(task: *JSC.WorkPoolTask) void { - var unintall_task = @fieldParentPtr(@This(), "task", task); - var debug_timer = bun.Output.DebugTimer.start(); - defer { - _ = PackageManager.instance.decrementPendingTasks(); - PackageManager.instance.wake(); - } - - defer unintall_task.deinit(); - const dirname = std.fs.path.dirname(unintall_task.absolute_path) orelse { - Output.debugWarn("Unexpectedly failed to get dirname of {s}", .{unintall_task.absolute_path}); - return; - }; - const basename = std.fs.path.basename(unintall_task.absolute_path); - - var dir = bun.openDirA(std.fs.cwd(), dirname) catch |err| { - if (comptime Environment.isDebug) { - Output.debugWarn("Failed to delete {s}: {s}", .{ unintall_task.absolute_path, @errorName(err) }); - } - return; - }; - defer _ = bun.sys.close(bun.toFD(dir.fd)); - - dir.deleteTree(basename) catch |err| { - if (comptime Environment.isDebug) { - Output.debugWarn("Failed to delete {s} in {s}: {s}", .{ basename, dirname, @errorName(err) }); - } - }; - - if (Environment.isDebug) { - _ = &debug_timer; - debug("deleteTree({s}, {s}) = {}", .{ basename, dirname, debug_timer }); - } + pub fn wait(this: *@This()) void { + @fence(.Acquire); + this.wake_value.store(0, .Monotonic); + while (this.remaining.load(.Monotonic) > 0) { + bun.Futex.wait(&this.wake_value, 0, std.time.ns_per_ms * 5) catch {}; } + } + }; + } - pub fn deinit(uninstall_task: *@This()) void { - bun.default_allocator.free(uninstall_task.absolute_path); - uninstall_task.destroy(); - } + const HardLinkWindowsInstallTask = struct { + bytes: []u16, + src: [:0]bun.OSPathChar, + dest: [:0]bun.OSPathChar, + basename: u16, + task: bun.JSC.WorkPoolTask = .{ .callback = &runFromThreadPool }, + err: ?anyerror = null, - pub usingnamespace bun.New(@This()); + pub const Queue = NewTaskQueue(@This()); + var queue: Queue = undefined; + pub fn getQueue() *Queue { + queue = Queue{ + .thread_pool = &PackageManager.instance.thread_pool, }; - var task = UninstallTask.new(.{ - .absolute_path = bun.default_allocator.dupeZ(u8, bun.path.joinAbsString(FileSystem.instance.top_level_dir, &.{ this.node_modules.path.items, temp_path }, .auto)) catch bun.outOfMemory(), + return &queue; + } + + pub fn init(src: []const bun.OSPathChar, dest: []const bun.OSPathChar, basename: []const bun.OSPathChar) *@This() { + const allocation_size = + (src.len) + 1 + (dest.len) + 1; + + const combined = bun.default_allocator.alloc(u16, allocation_size) catch bun.outOfMemory(); + var remaining = combined; + @memcpy(remaining[0..src.len], src); + remaining[src.len] = 0; + const src_ = remaining[0..src.len :0]; + remaining = remaining[src.len + 1 ..]; + + @memcpy(remaining[0..dest.len], dest); + remaining[dest.len] = 0; + const dest_ = remaining[0..dest.len :0]; + remaining = remaining[dest.len + 1 ..]; + + return @This().new(.{ + .bytes = combined, + .src = src_, + .dest = dest_, + .basename = @truncate(basename.len), }); - PackageManager.instance.thread_pool.schedule(bun.ThreadPool.Batch.from(&task.task)); - _ = PackageManager.instance.incrementPendingTasks(1); - }, - } - } + } - pub fn isDanglingSymlink(path: [:0]const u8) bool { - if (comptime Environment.isLinux) { - const rc = Syscall.system.open(path, @as(u32, std.os.O.PATH | 0), @as(u32, 0)); - switch (Syscall.getErrno(rc)) { - .SUCCESS => { - _ = Syscall.system.close(@intCast(rc)); - return false; - }, - else => return true, + pub fn runFromThreadPool(task: *bun.JSC.WorkPoolTask) void { + var iter = @fieldParentPtr(@This(), "task", task); + defer queue.completeOne(); + if (iter.run()) |err| { + iter.err = err; + queue.errored_task = iter; + return; + } + iter.deinit(); } - } else if (comptime Environment.isWindows) { - switch (bun.sys.sys_uv.open(path, 0, 0)) { - .err => { - return true; - }, - .result => |fd| { - _ = bun.sys.close(fd); - return false; - }, - } - } else { - const rc = Syscall.system.open(path, @as(u32, 0), @as(u32, 0)); - switch (Syscall.getErrno(rc)) { - .SUCCESS => { - _ = Syscall.system.close(rc); - return false; - }, - else => return true, - } - } - } - pub fn isDanglingWindowsBinLink(node_mod_fd: bun.FileDescriptor, path: []const u16, temp_buffer: []u8) bool { - const WinBinLinkingShim = @import("./windows-shim/BinLinkingShim.zig"); - const bin_path = bin_path: { - const fd = bun.sys.openatWindows(node_mod_fd, path, std.os.O.RDONLY).unwrap() catch return true; - defer _ = bun.sys.close(fd); - const size = fd.asFile().readAll(temp_buffer) catch return true; - const decoded = WinBinLinkingShim.looseDecode(temp_buffer[0..size]) orelse return true; - bun.assert(decoded.flags.isValid()); // looseDecode ensures valid flags - break :bin_path decoded.bin_path; + pub fn deinit(task: *@This()) void { + bun.default_allocator.free(task.bytes); + task.destroy(); + } + + pub usingnamespace bun.New(@This()); + + pub fn run(task: *@This()) ?anyerror { + const src = task.src; + const dest = task.dest; + + if (bun.windows.CreateHardLinkW(dest.ptr, src.ptr, null) != 0) { + return null; + } + + switch (bun.windows.GetLastError()) { + .ALREADY_EXISTS, .FILE_EXISTS, .CANNOT_MAKE => { + // Race condition: this shouldn't happen + if (comptime Environment.isDebug) + debug( + "CreateHardLinkW returned EEXIST, this shouldn't happen: {}", + .{bun.fmt.fmtPath(u16, dest, .{})}, + ); + _ = bun.windows.DeleteFileW(dest.ptr); + if (bun.windows.CreateHardLinkW(dest.ptr, src.ptr, null) != 0) { + return null; + } + }, + else => {}, + } + + dest[dest.len - task.basename - 1] = 0; + const dirpath = dest[0 .. dest.len - task.basename - 1 :0]; + _ = node_fs_for_package_installer.mkdirRecursiveOSPathImpl(void, {}, dirpath, 0, false).unwrap() catch {}; + dest[dest.len - task.basename - 1] = std.fs.path.sep; + + if (bun.windows.CreateHardLinkW(dest.ptr, src.ptr, null) != 0) { + return null; + } + + if (PackageManager.verbose_install) { + const once_log = struct { + var once = false; + + pub fn get() bool { + const prev = once; + once = true; + return !prev; + } + }.get(); + + if (once_log) { + Output.warn("CreateHardLinkW failed, falling back to CopyFileW: {} -> {}\n", .{ + bun.fmt.fmtOSPath(src, .{}), + bun.fmt.fmtOSPath(dest, .{}), + }); + } + } + + if (bun.windows.CopyFileW(src.ptr, dest.ptr, 0) != 0) { + return null; + } + + return bun.windows.getLastError(); + } }; - { - const fd = bun.sys.openatWindows(node_mod_fd, bin_path, std.os.O.RDONLY).unwrap() catch return true; - _ = bun.sys.close(fd); - } + fn installWithHardlink(this: *@This(), dest_dir: std.fs.Dir) !Result { + var state = InstallDirState{}; + const res = this.initInstallDir(&state, dest_dir); + if (res.isFail()) return res; + defer state.deinit(); - return false; - } + const FileCopier = struct { + pub fn copy( + destination_dir: std.fs.Dir, + walker: *Walker, + to_copy_into1: if (Environment.isWindows) []u16 else void, + head1: if (Environment.isWindows) []u16 else void, + to_copy_into2: if (Environment.isWindows) []u16 else void, + head2: if (Environment.isWindows) []u16 else void, + ) !u32 { + var real_file_count: u32 = 0; + var queue = if (Environment.isWindows) HardLinkWindowsInstallTask.getQueue() else {}; - pub fn installFromLink(this: *PackageInstall, skip_delete: bool, destination_dir: std.fs.Dir) Result { - const dest_path = this.destination_dir_subpath; - // If this fails, we don't care. - // we'll catch it the next error - if (!skip_delete and !strings.eqlComptime(dest_path, ".")) this.uninstallBeforeInstall(destination_dir); + while (try walker.next()) |entry| { + if (comptime Environment.isPosix) { + switch (entry.kind) { + .directory => { + bun.MakePath.makePath(std.meta.Elem(@TypeOf(entry.path)), destination_dir, entry.path) catch {}; + }, + .file => { + std.os.linkat(entry.dir.fd, entry.basename, destination_dir.fd, entry.path, 0) catch |err| { + if (err != error.PathAlreadyExists) { + return err; + } - const subdir = std.fs.path.dirname(dest_path); + std.os.unlinkat(destination_dir.fd, entry.path, 0) catch {}; + try std.os.linkat(entry.dir.fd, entry.basename, destination_dir.fd, entry.path, 0); + }; - var dest_buf: bun.PathBuffer = undefined; - // cache_dir_subpath in here is actually the full path to the symlink pointing to the linked package - const symlinked_path = this.cache_dir_subpath; - var to_buf: bun.PathBuffer = undefined; - const to_path = this.cache_dir.realpath(symlinked_path, &to_buf) catch |err| return Result{ - .fail = .{ - .err = err, - .step = .linking_dependency, - }, - }; + real_file_count += 1; + }, + else => {}, + } + } else { + switch (entry.kind) { + .file => {}, + else => continue, + } - const dest = std.fs.path.basename(dest_path); - // When we're linking on Windows, we want to avoid keeping the source directory handle open - if (comptime Environment.isWindows) { - var wbuf: bun.WPathBuffer = undefined; - const dest_path_length = bun.windows.kernel32.GetFinalPathNameByHandleW(destination_dir.fd, &wbuf, dest_buf.len, 0); - if (dest_path_length == 0) { - const e = bun.windows.Win32Error.get(); - const err = if (e.toSystemErrno()) |sys_err| bun.errnoToZigErr(sys_err) else error.Unexpected; - return Result.fail(err, .linking_dependency); - } + if (entry.path.len > to_copy_into1.len or entry.path.len > to_copy_into2.len) { + return error.NameTooLong; + } - var i: usize = dest_path_length; - if (wbuf[i] != '\\') { - wbuf[i] = '\\'; - i += 1; - } + @memcpy(to_copy_into1[0..entry.path.len], entry.path); + head1[entry.path.len + (head1.len - to_copy_into1.len)] = 0; + const dest: [:0]u16 = head1[0 .. entry.path.len + head1.len - to_copy_into1.len :0]; - if (subdir) |dir| { - i += bun.strings.toWPathNormalized(wbuf[i..], dir).len; - wbuf[i] = std.fs.path.sep_windows; - i += 1; - wbuf[i] = 0; - const fullpath = wbuf[0..i :0]; + @memcpy(to_copy_into2[0..entry.path.len], entry.path); + head2[entry.path.len + (head1.len - to_copy_into2.len)] = 0; + const src: [:0]u16 = head2[0 .. entry.path.len + head2.len - to_copy_into2.len :0]; - _ = node_fs_for_package_installer.mkdirRecursiveOSPathImpl(void, {}, fullpath, 0, false).unwrap() catch |err| { - return Result.fail(err, .linking_dependency); - }; - } - - const res = strings.copyUTF16IntoUTF8(dest_buf[0..], []const u16, wbuf[0..i], true); - var offset: usize = res.written; - if (dest_buf[offset - 1] != std.fs.path.sep_windows) { - dest_buf[offset] = std.fs.path.sep_windows; - offset += 1; - } - @memcpy(dest_buf[offset .. offset + dest.len], dest); - offset += dest.len; - dest_buf[offset] = 0; - - const dest_z = dest_buf[0..offset :0]; - - to_buf[to_path.len] = 0; - const target_z = to_buf[0..to_path.len :0]; - - // https://github.com/npm/cli/blob/162c82e845d410ede643466f9f8af78a312296cc/workspaces/arborist/lib/arborist/reify.js#L738 - // https://github.com/npm/cli/commit/0e58e6f6b8f0cd62294642a502c17561aaf46553 - switch (bun.sys.symlinkOrJunctionOnWindows(dest_z, target_z)) { - .err => |err_| brk: { - var err = err_; - if (err.getErrno() == .EXIST) { - _ = bun.sys.rmdirat(bun.toFD(destination_dir), this.destination_dir_subpath); - switch (bun.sys.symlinkOrJunctionOnWindows(dest_z, target_z)) { - .err => |e| err = e, - .result => break :brk, + queue.push(HardLinkWindowsInstallTask.init(src, dest, entry.basename)); + real_file_count += 1; } } - return Result{ + if (comptime Environment.isWindows) { + queue.wait(); + + if (queue.errored_task) |task| { + if (task.err) |err| { + return err; + } + } + } + + return real_file_count; + } + }; + + this.file_count = FileCopier.copy( + state.subdir, + &state.walker, + state.to_copy_buf, + if (Environment.isWindows) &state.buf else void{}, + state.to_copy_buf2, + if (Environment.isWindows) &state.buf2 else void{}, + ) catch |err| { + bun.handleErrorReturnTrace(err, @errorReturnTrace()); + + if (comptime Environment.isWindows) { + if (err == error.FailedToCopyFile) { + return Result.fail(err, .copying_files); + } + } else if (err == error.NotSameFileSystem or err == error.ENXIO) { + return err; + } + return Result.fail(err, .copying_files); + }; + + return Result{ + .success = {}, + }; + } + + fn installWithSymlink(this: *@This(), dest_dir: std.fs.Dir) !Result { + var state = InstallDirState{}; + const res = this.initInstallDir(&state, dest_dir); + if (res.isFail()) return res; + defer state.deinit(); + + var buf2: bun.PathBuffer = undefined; + var to_copy_buf2: []u8 = undefined; + if (Environment.isPosix) { + const cache_dir_path = try bun.getFdPath(state.cached_package_dir.fd, &buf2); + if (cache_dir_path.len > 0 and cache_dir_path[cache_dir_path.len - 1] != std.fs.path.sep) { + buf2[cache_dir_path.len] = std.fs.path.sep; + to_copy_buf2 = buf2[cache_dir_path.len + 1 ..]; + } else { + to_copy_buf2 = buf2[cache_dir_path.len..]; + } + } + + const FileCopier = struct { + pub fn copy( + destination_dir: std.fs.Dir, + walker: *Walker, + to_copy_into1: if (Environment.isWindows) []u16 else void, + head1: if (Environment.isWindows) []u16 else void, + to_copy_into2: []if (Environment.isWindows) u16 else u8, + head2: []if (Environment.isWindows) u16 else u8, + ) !u32 { + var real_file_count: u32 = 0; + while (try walker.next()) |entry| { + if (comptime Environment.isPosix) { + switch (entry.kind) { + .directory => { + bun.MakePath.makePath(std.meta.Elem(@TypeOf(entry.path)), destination_dir, entry.path) catch {}; + }, + .file => { + @memcpy(to_copy_into2[0..entry.path.len], entry.path); + head2[entry.path.len + (head2.len - to_copy_into2.len)] = 0; + const target: [:0]u8 = head2[0 .. entry.path.len + head2.len - to_copy_into2.len :0]; + + std.os.symlinkat(target, destination_dir.fd, entry.path) catch |err| { + if (err != error.PathAlreadyExists) { + return err; + } + + std.os.unlinkat(destination_dir.fd, entry.path, 0) catch {}; + try std.os.symlinkat(entry.basename, destination_dir.fd, entry.path); + }; + + real_file_count += 1; + }, + else => {}, + } + } else { + switch (entry.kind) { + .directory, .file => {}, + else => continue, + } + + if (entry.path.len > to_copy_into1.len or entry.path.len > to_copy_into2.len) { + return error.NameTooLong; + } + + @memcpy(to_copy_into1[0..entry.path.len], entry.path); + head1[entry.path.len + (head1.len - to_copy_into1.len)] = 0; + const dest: [:0]u16 = head1[0 .. entry.path.len + head1.len - to_copy_into1.len :0]; + + @memcpy(to_copy_into2[0..entry.path.len], entry.path); + head2[entry.path.len + (head1.len - to_copy_into2.len)] = 0; + const src: [:0]u16 = head2[0 .. entry.path.len + head2.len - to_copy_into2.len :0]; + + switch (entry.kind) { + .directory => { + if (bun.windows.CreateDirectoryExW(src.ptr, dest.ptr, null) == 0) { + bun.MakePath.makePath(u16, destination_dir, entry.path) catch {}; + } + }, + .file => { + switch (bun.sys.symlinkW(dest, src, .{})) { + .err => |err| { + if (bun.Dirname.dirname(u16, entry.path)) |entry_dirname| { + bun.MakePath.makePath(u16, destination_dir, entry_dirname) catch {}; + if (bun.sys.symlinkW(dest, src, .{}) == .result) { + continue; + } + } + + if (PackageManager.verbose_install) { + const once_log = struct { + var once = false; + + pub fn get() bool { + const prev = once; + once = true; + return !prev; + } + }.get(); + + if (once_log) { + Output.warn("CreateHardLinkW failed, falling back to CopyFileW: {} -> {}\n", .{ + bun.fmt.fmtOSPath(src, .{}), + bun.fmt.fmtOSPath(dest, .{}), + }); + } + } + + return bun.errnoToZigErr(err.errno); + }, + .result => {}, + } + }, + else => unreachable, // handled above + } + } + } + + return real_file_count; + } + }; + + this.file_count = FileCopier.copy( + state.subdir, + &state.walker, + if (Environment.isWindows) state.to_copy_buf else void{}, + if (Environment.isWindows) &state.buf else void{}, + if (Environment.isWindows) state.to_copy_buf2 else to_copy_buf2, + if (Environment.isWindows) &state.buf2 else &buf2, + ) catch |err| { + if (comptime Environment.isWindows) { + if (err == error.FailedToCopyFile) { + return Result.fail(err, .copying_files); + } + } else if (err == error.NotSameFileSystem or err == error.ENXIO) { + return err; + } + return Result.fail(err, .copying_files); + }; + + return Result{ + .success = {}, + }; + } + + pub fn uninstall(this: *@This(), destination_dir: std.fs.Dir) void { + destination_dir.deleteTree(bun.span(this.destination_dir_subpath)) catch {}; + } + + pub fn uninstallBeforeInstall(this: *@This(), destination_dir: std.fs.Dir) void { + var rand_path_buf: [48]u8 = undefined; + const temp_path = std.fmt.bufPrintZ(&rand_path_buf, ".old-{}", .{std.fmt.fmtSliceHexUpper(std.mem.asBytes(&bun.fastRandom()))}) catch unreachable; + switch (bun.sys.renameat(bun.toFD(destination_dir), this.destination_dir_subpath, bun.toFD(destination_dir), temp_path)) { + .err => { + // if it fails, that means the directory doesn't exist or was inaccessible + }, + .result => { + // Uninstall can sometimes take awhile in a large directory + // tree. Since we're renaming the directory to a randomly + // generated name, we can delete it in another thread without + // worrying about race conditions or blocking the main thread. + // + // This should be a slight improvement to CI environments. + // + // on macOS ARM64 in a project with Gatsby, @mui/icons-material, and Next.js: + // + // ❯ hyperfine "bun install --ignore-scripts" "bun-1.1.2 install --ignore-scripts" --prepare="rm -rf node_modules/**/package.json" --warmup=2 + // Benchmark 1: bun install --ignore-scripts + // Time (mean ± σ): 2.281 s ± 0.027 s [User: 0.041 s, System: 6.851 s] + // Range (min … max): 2.231 s … 2.312 s 10 runs + // + // Benchmark 2: bun-1.1.2 install --ignore-scripts + // Time (mean ± σ): 3.315 s ± 0.033 s [User: 0.029 s, System: 2.237 s] + // Range (min … max): 3.279 s … 3.356 s 10 runs + // + // Summary + // bun install --ignore-scripts ran + // 1.45 ± 0.02 times faster than bun-1.1.2 install --ignore-scripts + // + + const UninstallTask = struct { + absolute_path: []const u8, + task: JSC.WorkPoolTask = .{ .callback = &run }, + pub fn run(task: *JSC.WorkPoolTask) void { + var unintall_task = @fieldParentPtr(@This(), "task", task); + var debug_timer = bun.Output.DebugTimer.start(); + defer { + _ = PackageManager.instance.decrementPendingTasks(); + PackageManager.instance.wake(); + } + + defer unintall_task.deinit(); + const dirname = std.fs.path.dirname(unintall_task.absolute_path) orelse { + Output.debugWarn("Unexpectedly failed to get dirname of {s}", .{unintall_task.absolute_path}); + return; + }; + const basename = std.fs.path.basename(unintall_task.absolute_path); + + var dir = bun.openDirA(std.fs.cwd(), dirname) catch |err| { + if (comptime Environment.isDebug) { + Output.debugWarn("Failed to delete {s}: {s}", .{ unintall_task.absolute_path, @errorName(err) }); + } + return; + }; + defer _ = bun.sys.close(bun.toFD(dir.fd)); + + dir.deleteTree(basename) catch |err| { + if (comptime Environment.isDebug) { + Output.debugWarn("Failed to delete {s} in {s}: {s}", .{ basename, dirname, @errorName(err) }); + } + }; + + if (Environment.isDebug) { + _ = &debug_timer; + debug("deleteTree({s}, {s}) = {}", .{ basename, dirname, debug_timer }); + } + } + + pub fn deinit(uninstall_task: *@This()) void { + bun.default_allocator.free(uninstall_task.absolute_path); + uninstall_task.destroy(); + } + + pub usingnamespace bun.New(@This()); + }; + var task = UninstallTask.new(.{ + .absolute_path = bun.default_allocator.dupeZ(u8, bun.path.joinAbsString(FileSystem.instance.top_level_dir, &.{ this.node_modules.path.items, temp_path }, .auto)) catch bun.outOfMemory(), + }); + PackageManager.instance.thread_pool.schedule(bun.ThreadPool.Batch.from(&task.task)); + _ = PackageManager.instance.incrementPendingTasks(1); + }, + } + } + + pub fn isDanglingSymlink(path: [:0]const u8) bool { + if (comptime Environment.isLinux) { + const rc = Syscall.system.open(path, @as(u32, std.os.O.PATH | 0), @as(u32, 0)); + switch (Syscall.getErrno(rc)) { + .SUCCESS => { + _ = Syscall.system.close(@intCast(rc)); + return false; + }, + else => return true, + } + } else if (comptime Environment.isWindows) { + switch (bun.sys.sys_uv.open(path, 0, 0)) { + .err => { + return true; + }, + .result => |fd| { + _ = bun.sys.close(fd); + return false; + }, + } + } else { + const rc = Syscall.system.open(path, @as(u32, 0), @as(u32, 0)); + switch (Syscall.getErrno(rc)) { + .SUCCESS => { + _ = Syscall.system.close(rc); + return false; + }, + else => return true, + } + } + } + + pub fn isDanglingWindowsBinLink(node_mod_fd: bun.FileDescriptor, path: []const u16, temp_buffer: []u8) bool { + const WinBinLinkingShim = @import("./windows-shim/BinLinkingShim.zig"); + const bin_path = bin_path: { + const fd = bun.sys.openatWindows(node_mod_fd, path, std.os.O.RDONLY).unwrap() catch return true; + defer _ = bun.sys.close(fd); + const size = fd.asFile().readAll(temp_buffer) catch return true; + const decoded = WinBinLinkingShim.looseDecode(temp_buffer[0..size]) orelse return true; + bun.assert(decoded.flags.isValid()); // looseDecode ensures valid flags + break :bin_path decoded.bin_path; + }; + + { + const fd = bun.sys.openatWindows(node_mod_fd, bin_path, std.os.O.RDONLY).unwrap() catch return true; + _ = bun.sys.close(fd); + } + + return false; + } + + pub fn installFromLink(this: *@This(), skip_delete: bool, destination_dir: std.fs.Dir) Result { + const dest_path = this.destination_dir_subpath; + // If this fails, we don't care. + // we'll catch it the next error + if (!skip_delete and !strings.eqlComptime(dest_path, ".")) this.uninstallBeforeInstall(destination_dir); + + const subdir = std.fs.path.dirname(dest_path); + + var dest_buf: bun.PathBuffer = undefined; + // cache_dir_subpath in here is actually the full path to the symlink pointing to the linked package + const symlinked_path = this.cache_dir_subpath; + var to_buf: [bun.MAX_PATH_BYTES]u8 = undefined; + const to_path = this.cache_dir.realpath(symlinked_path, &to_buf) catch |err| return Result{ + .fail = .{ + .err = err, + .step = .linking_dependency, + }, + }; + + const dest = std.fs.path.basename(dest_path); + // When we're linking on Windows, we want to avoid keeping the source directory handle open + if (comptime Environment.isWindows) { + var wbuf: bun.WPathBuffer = undefined; + const dest_path_length = bun.windows.kernel32.GetFinalPathNameByHandleW(destination_dir.fd, &wbuf, dest_buf.len, 0); + if (dest_path_length == 0) { + const e = bun.windows.Win32Error.get(); + const err = if (e.toSystemErrno()) |sys_err| bun.errnoToZigErr(sys_err) else error.Unexpected; + return Result.fail(err, .linking_dependency); + } + + var i: usize = dest_path_length; + if (wbuf[i] != '\\') { + wbuf[i] = '\\'; + i += 1; + } + + if (subdir) |dir| { + i += bun.strings.toWPathNormalized(wbuf[i..], dir).len; + wbuf[i] = std.fs.path.sep_windows; + i += 1; + wbuf[i] = 0; + const fullpath = wbuf[0..i :0]; + + _ = node_fs_for_package_installer.mkdirRecursiveOSPathImpl(void, {}, fullpath, 0, false).unwrap() catch |err| { + return Result.fail(err, .linking_dependency); + }; + } + + const res = strings.copyUTF16IntoUTF8(dest_buf[0..], []const u16, wbuf[0..i], true); + var offset: usize = res.written; + if (dest_buf[offset - 1] != std.fs.path.sep_windows) { + dest_buf[offset] = std.fs.path.sep_windows; + offset += 1; + } + @memcpy(dest_buf[offset .. offset + dest.len], dest); + offset += dest.len; + dest_buf[offset] = 0; + + const dest_z = dest_buf[0..offset :0]; + + to_buf[to_path.len] = 0; + const target_z = to_buf[0..to_path.len :0]; + + // https://github.com/npm/cli/blob/162c82e845d410ede643466f9f8af78a312296cc/workspaces/arborist/lib/arborist/reify.js#L738 + // https://github.com/npm/cli/commit/0e58e6f6b8f0cd62294642a502c17561aaf46553 + switch (bun.sys.symlinkOrJunctionOnWindows(dest_z, target_z)) { + .err => |err_| brk: { + var err = err_; + if (err.getErrno() == .EXIST) { + _ = bun.sys.rmdirat(bun.toFD(destination_dir), this.destination_dir_subpath); + switch (bun.sys.symlinkOrJunctionOnWindows(dest_z, target_z)) { + .err => |e| err = e, + .result => break :brk, + } + } + + return Result{ + .fail = .{ + .err = bun.errnoToZigErr(err.errno), + .step = .linking_dependency, + }, + }; + }, + .result => {}, + } + } else { + var dest_dir = if (subdir) |dir| brk: { + break :brk bun.MakePath.makeOpenPath(destination_dir, dir, .{}) catch |err| return Result{ .fail = .{ - .err = bun.errnoToZigErr(err.errno), + .err = err, .step = .linking_dependency, }, }; - }, - .result => {}, - } - } else { - var dest_dir = if (subdir) |dir| brk: { - break :brk bun.MakePath.makeOpenPath(destination_dir, dir, .{}) catch |err| return Result{ + } else destination_dir; + defer { + if (subdir != null) dest_dir.close(); + } + + const dest_dir_path = bun.getFdPath(dest_dir.fd, &dest_buf) catch |err| return Result{ + .fail = .{ + .err = err, + .step = .linking_dependency, + }, + }; + + const target = Path.relative(dest_dir_path, to_path); + std.os.symlinkat(target, dest_dir.fd, dest) catch |err| return Result{ .fail = .{ .err = err, .step = .linking_dependency, }, }; - } else destination_dir; - defer { - if (subdir != null) dest_dir.close(); } - const dest_dir_path = bun.getFdPath(dest_dir.fd, &dest_buf) catch |err| return Result{ + if (isDanglingSymlink(symlinked_path)) return Result{ .fail = .{ - .err = err, + .err = error.DanglingSymlink, .step = .linking_dependency, }, }; - const target = Path.relative(dest_dir_path, to_path); - std.os.symlinkat(target, dest_dir.fd, dest) catch |err| return Result{ - .fail = .{ - .err = err, - .step = .linking_dependency, + return Result{ + .success = {}, + }; + } + + pub fn getInstallMethod(this: *const @This()) Method { + return if (strings.eqlComptime(this.cache_dir_subpath, ".") or strings.hasPrefixComptime(this.cache_dir_subpath, "..")) + Method.symlink + else + supported_method; + } + + pub fn packageMissingFromCache(this: *@This(), manager: *PackageManager, package_id: PackageID) bool { + const state = manager.getPreinstallState(package_id); + return switch (state) { + .done => false, + else => brk: { + if (this.patch.isNull()) { + const exists = Syscall.directoryExistsAt(this.cache_dir.fd, this.cache_dir_subpath).unwrap() catch false; + if (exists) manager.setPreinstallState(package_id, manager.lockfile, .done); + break :brk !exists; + } + const cache_dir_subpath_without_patch_hash = this.cache_dir_subpath[0 .. std.mem.lastIndexOf(u8, this.cache_dir_subpath, "_patch_hash=") orelse @panic("Patched dependency cache dir subpath does not have the \"_patch_hash=HASH\" suffix. This is a bug, please file a GitHub issue.")]; + @memcpy(bun.path.join_buf[0..cache_dir_subpath_without_patch_hash.len], cache_dir_subpath_without_patch_hash); + bun.path.join_buf[cache_dir_subpath_without_patch_hash.len] = 0; + const exists = Syscall.directoryExistsAt(this.cache_dir.fd, bun.path.join_buf[0..cache_dir_subpath_without_patch_hash.len :0]).unwrap() catch false; + if (exists) manager.setPreinstallState(package_id, manager.lockfile, .done); + break :brk !exists; }, }; } - if (isDanglingSymlink(symlinked_path)) return Result{ - .fail = .{ - .err = error.DanglingSymlink, - .step = .linking_dependency, - }, - }; + fn patchedPackageMissingFromCache(this: *@This(), manager: *PackageManager, package_id: PackageID, patchfile_hash: u64) bool { + _ = patchfile_hash; // autofix - return Result{ - .success = {}, - }; - } - - pub fn getInstallMethod(this: *const PackageInstall) Method { - return if (strings.eqlComptime(this.cache_dir_subpath, ".") or strings.hasPrefixComptime(this.cache_dir_subpath, "..")) - Method.symlink - else - supported_method; - } - - pub fn packageMissingFromCache(this: *PackageInstall, manager: *PackageManager, package_id: PackageID) bool { - return switch (manager.getPreinstallState(package_id)) { - .done => false, - else => brk: { - const exists = Syscall.directoryExistsAt(this.cache_dir.fd, this.cache_dir_subpath).unwrap() catch false; - if (exists) manager.setPreinstallState(package_id, manager.lockfile, .done); - break :brk !exists; - }, - }; - } - - pub fn install(this: *PackageInstall, skip_delete: bool, destination_dir: std.fs.Dir) Result { - // If this fails, we don't care. - // we'll catch it the next error - if (!skip_delete and !strings.eqlComptime(this.destination_dir_subpath, ".")) this.uninstallBeforeInstall(destination_dir); - - var supported_method_to_use = this.getInstallMethod(); - - switch (supported_method_to_use) { - .clonefile => { - if (comptime Environment.isMac) { - - // First, attempt to use clonefile - // if that fails due to ENOTSUP, mark it as unsupported and then fall back to copyfile - if (this.installWithClonefile(destination_dir)) |result| { - return result; - } else |err| { - switch (err) { - error.NotSupported => { - supported_method = .copyfile; - supported_method_to_use = .copyfile; - }, - error.FileNotFound => return Result{ - .fail = .{ .err = error.FileNotFound, .step = .opening_cache_dir }, - }, - else => return Result{ - .fail = .{ .err = err, .step = .copying_files }, - }, - } - } - } - }, - .clonefile_each_dir => { - if (comptime Environment.isMac) { - if (this.installWithClonefileEachDir(destination_dir)) |result| { - return result; - } else |err| { - switch (err) { - error.NotSupported => { - supported_method = .copyfile; - supported_method_to_use = .copyfile; - }, - error.FileNotFound => return Result{ - .fail = .{ .err = error.FileNotFound, .step = .opening_cache_dir }, - }, - else => return Result{ - .fail = .{ .err = err, .step = .copying_files }, - }, - } - } - } - }, - .hardlink => { - if (this.installWithHardlink(destination_dir)) |result| { - return result; - } else |err| outer: { - if (comptime !Environment.isWindows) { - if (err == error.NotSameFileSystem) { - supported_method = .copyfile; - supported_method_to_use = .copyfile; - break :outer; - } - } - - switch (err) { - error.FileNotFound => return Result{ - .fail = .{ .err = error.FileNotFound, .step = .opening_cache_dir }, - }, - else => return Result{ - .fail = .{ .err = err, .step = .copying_files }, - }, - } - } - }, - .symlink => { - if (comptime Environment.isWindows) { - supported_method_to_use = .copyfile; - } else { - if (this.installWithSymlink(destination_dir)) |result| { - return result; - } else |err| { - switch (err) { - error.FileNotFound => return Result{ - .fail = .{ .err = error.FileNotFound, .step = .opening_cache_dir }, - }, - else => return Result{ - .fail = .{ .err = err, .step = .copying_files }, - }, - } - } - } - }, - else => {}, + // const patch_hash_prefix = "_patch_hash="; + // var patch_hash_part_buf: [patch_hash_prefix.len + max_buntag_hash_buf_len + 1]u8 = undefined; + // @memcpy(patch_hash_part_buf[0..patch_hash_prefix.len], patch_hash_prefix); + // const hash_str = std.fmt.bufPrint(patch_hash_part_buf[patch_hash_prefix.len..], "{x}", .{patchfile_hash}) catch unreachable; + // const patch_hash_part = patch_hash_part_buf[0 .. patch_hash_prefix.len + hash_str.len]; + // @memcpy(bun.path.join_buf[0..this.cache_dir_subpath.len], this.cache_dir_subpath); + // @memcpy(bun.path.join_buf[this.cache_dir_subpath.len .. this.cache_dir_subpath.len + patch_hash_part.len], patch_hash_part); + // bun.path.join_buf[this.cache_dir_subpath.len + patch_hash_part.len] = 0; + // const patch_cache_dir_subpath = bun.path.join_buf[0 .. this.cache_dir_subpath.len + patch_hash_part.len :0]; + const exists = Syscall.directoryExistsAt(this.cache_dir.fd, this.cache_dir_subpath).unwrap() catch false; + if (exists) manager.setPreinstallState(package_id, manager.lockfile, .done); + return !exists; } - if (supported_method_to_use != .copyfile) return Result{ - .success = {}, - }; + pub fn install(this: *@This(), skip_delete: bool, destination_dir: std.fs.Dir) Result { + // If this fails, we don't care. + // we'll catch it the next error + if (!skip_delete and !strings.eqlComptime(this.destination_dir_subpath, ".")) this.uninstallBeforeInstall(destination_dir); - // TODO: linux io_uring - return this.installWithCopyfile(destination_dir); - } -}; + if (comptime kind == .regular) return this.installImpl(skip_delete, destination_dir, this.getInstallMethod()); + + const result = this.installImpl(skip_delete, destination_dir, this.getInstallMethod()); + if (result == .fail) return result; + const fd = bun.toFD(destination_dir.fd); + const subpath = bun.path.joinZ(&[_][]const u8{ this.destination_dir_subpath, ".bun-patch-tag" }); + const tag_fd = switch (bun.sys.openat(fd, subpath, std.os.O.CREAT | std.os.O.WRONLY, 0o666)) { + .err => |e| return .{ .fail = .{ .err = bun.errnoToZigErr(e.getErrno()), .step = Step.patching } }, + .result => |f| f, + }; + defer _ = bun.sys.close(tag_fd); + + if (bun.sys.File.writeAll(.{ .handle = tag_fd }, this.package_version).asErr()) |e| return .{ .fail = .{ .err = bun.errnoToZigErr(e.getErrno()), .step = Step.patching } }; + } + + pub fn installWithMethod(this: *@This(), skip_delete: bool, destination_dir: std.fs.Dir, method: Method) Result { + // If this fails, we don't care. + // we'll catch it the next error + if (!skip_delete and !strings.eqlComptime(this.destination_dir_subpath, ".")) this.uninstallBeforeInstall(destination_dir); + + if (comptime kind == .regular) return this.installImpl(skip_delete, destination_dir, method); + + const result = this.installImpl(skip_delete, destination_dir, method); + if (result == .fail) return result; + const fd = bun.toFD(destination_dir.fd); + const subpath = bun.path.joinZ(&[_][]const u8{ this.destination_dir_subpath, ".bun-patch-tag" }, .auto); + const tag_fd = switch (bun.sys.openat(fd, subpath, std.os.O.CREAT | std.os.O.WRONLY | std.os.O.TRUNC, 0o666)) { + .err => |e| return .{ .fail = .{ .err = bun.errnoToZigErr(e.getErrno()), .step = Step.patching } }, + .result => |f| f, + }; + defer _ = bun.sys.close(tag_fd); + if (bun.sys.File.writeAll(.{ .handle = tag_fd }, this.package_version).asErr()) |e| return .{ .fail = .{ .err = bun.errnoToZigErr(e.getErrno()), .step = Step.patching } }; + return result; + } + + pub fn installImpl(this: *@This(), skip_delete: bool, destination_dir: std.fs.Dir, method_: Method) Result { + // If this fails, we don't care. + // we'll catch it the next error + if (!skip_delete and !strings.eqlComptime(this.destination_dir_subpath, ".")) this.uninstallBeforeInstall(destination_dir); + defer { + if (kind == .patch) { + const fd = bun.toFD(destination_dir.fd); + _ = fd; // autofix + } + } + + var supported_method_to_use = method_; + + switch (supported_method_to_use) { + .clonefile => { + if (comptime Environment.isMac) { + + // First, attempt to use clonefile + // if that fails due to ENOTSUP, mark it as unsupported and then fall back to copyfile + if (this.installWithClonefile(destination_dir)) |result| { + return result; + } else |err| { + switch (err) { + error.NotSupported => { + supported_method = .copyfile; + supported_method_to_use = .copyfile; + }, + error.FileNotFound => return Result{ + .fail = .{ .err = error.FileNotFound, .step = .opening_cache_dir }, + }, + else => return Result{ + .fail = .{ .err = err, .step = .copying_files }, + }, + } + } + } + }, + .clonefile_each_dir => { + if (comptime Environment.isMac) { + if (this.installWithClonefileEachDir(destination_dir)) |result| { + return result; + } else |err| { + switch (err) { + error.NotSupported => { + supported_method = .copyfile; + supported_method_to_use = .copyfile; + }, + error.FileNotFound => return Result{ + .fail = .{ .err = error.FileNotFound, .step = .opening_cache_dir }, + }, + else => return Result{ + .fail = .{ .err = err, .step = .copying_files }, + }, + } + } + } + }, + .hardlink => { + if (this.installWithHardlink(destination_dir)) |result| { + return result; + } else |err| outer: { + if (comptime !Environment.isWindows) { + if (err == error.NotSameFileSystem) { + supported_method = .copyfile; + supported_method_to_use = .copyfile; + break :outer; + } + } + + switch (err) { + error.FileNotFound => return Result{ + .fail = .{ .err = error.FileNotFound, .step = .opening_cache_dir }, + }, + else => return Result{ + .fail = .{ .err = err, .step = .copying_files }, + }, + } + } + }, + .symlink => { + if (comptime Environment.isWindows) { + supported_method_to_use = .copyfile; + } else { + if (this.installWithSymlink(destination_dir)) |result| { + return result; + } else |err| { + switch (err) { + error.FileNotFound => return Result{ + .fail = .{ .err = error.FileNotFound, .step = .opening_cache_dir }, + }, + else => return Result{ + .fail = .{ .err = err, .step = .copying_files }, + }, + } + } + } + }, + else => {}, + } + + if (supported_method_to_use != .copyfile) return Result{ + .success = {}, + }; + + // TODO: linux io_uring + return this.installWithCopyfile(destination_dir); + } + }; +} pub const Resolution = @import("./resolution.zig").Resolution; const Progress = std.Progress; @@ -2473,6 +2651,10 @@ pub const PackageManager = struct { network_tarball_batch: ThreadPool.Batch = .{}, network_resolve_batch: ThreadPool.Batch = .{}, network_task_fifo: NetworkQueue = undefined, + patch_apply_batch: ThreadPool.Batch = .{}, + patch_calc_hash_batch: ThreadPool.Batch = .{}, + patch_task_fifo: PatchTaskFifo = PatchTaskFifo.init(), + patch_task_queue: PatchTaskQueue = .{}, pending_tasks: std.atomic.Value(u32) = std.atomic.Value(u32).init(0), total_tasks: u32 = 0, preallocated_network_tasks: PreallocatedNetworkTasks = PreallocatedNetworkTasks.init(bun.default_allocator), @@ -2668,6 +2850,7 @@ pub const PackageManager = struct { pub var verbose_install = false; + pub const PatchTaskQueue = bun.UnboundedQueue(PatchTask, .next); pub const AsyncNetworkTaskQueue = bun.UnboundedQueue(NetworkTask, .next); pub const ScriptRunEnvironment = struct { @@ -3071,7 +3254,14 @@ pub const PackageManager = struct { } return this.preinstall_state.items[package_id]; } - pub fn determinePreinstallState(manager: *PackageManager, this: Package, lockfile: *Lockfile) PreinstallState { + + pub fn determinePreinstallState( + manager: *PackageManager, + this: Package, + lockfile: *Lockfile, + out_name_and_version_hash: *?u64, + out_patchfile_hash: *?u64, + ) PreinstallState { switch (manager.getPreinstallState(this.meta.id)) { .unknown => { @@ -3082,12 +3272,34 @@ pub const PackageManager = struct { return .done; } + const patch_hash: ?u64 = brk: { + if (manager.lockfile.patched_dependencies.entries.len == 0) break :brk null; + var sfb = std.heap.stackFallback(1024, manager.lockfile.allocator); + const name_and_version = std.fmt.allocPrint( + sfb.get(), + "{s}@{}", + .{ + this.name.slice(manager.lockfile.buffers.string_bytes.items), + this.resolution.fmt(manager.lockfile.buffers.string_bytes.items, .posix), + }, + ) catch unreachable; + const name_and_version_hash = String.Builder.stringHash(name_and_version); + const patched_dep = manager.lockfile.patched_dependencies.get(name_and_version_hash) orelse break :brk null; + defer out_name_and_version_hash.* = name_and_version_hash; + if (patched_dep.patchfile_hash_is_null) { + manager.setPreinstallState(this.meta.id, manager.lockfile, .calc_patch_hash); + return .calc_patch_hash; + } + out_patchfile_hash.* = patched_dep.patchfileHash().?; + break :brk patched_dep.patchfileHash().?; + }; + const folder_path = switch (this.resolution.tag) { - .git => manager.cachedGitFolderNamePrintAuto(&this.resolution.value.git), - .github => manager.cachedGitHubFolderNamePrintAuto(&this.resolution.value.github), - .npm => manager.cachedNPMPackageFolderName(lockfile.str(&this.name), this.resolution.value.npm.version), - .local_tarball => manager.cachedTarballFolderName(this.resolution.value.local_tarball), - .remote_tarball => manager.cachedTarballFolderName(this.resolution.value.remote_tarball), + .git => manager.cachedGitFolderNamePrintAuto(&this.resolution.value.git, patch_hash), + .github => manager.cachedGitHubFolderNamePrintAuto(&this.resolution.value.github, patch_hash), + .npm => manager.cachedNPMPackageFolderName(lockfile.str(&this.name), this.resolution.value.npm.version, patch_hash), + .local_tarball => manager.cachedTarballFolderName(this.resolution.value.local_tarball, patch_hash), + .remote_tarball => manager.cachedTarballFolderName(this.resolution.value.remote_tarball, patch_hash), else => "", }; @@ -3101,6 +3313,28 @@ pub const PackageManager = struct { return .done; } + // If the package is patched, then `folder_path` looks like: + // is-even@1.0.0_patch_hash=abc8s6dedhsddfkahaldfjhlj + // + // If that's not in the cache, we need to put it there: + // 1. extract the non-patched pkg in the cache + // 2. copy non-patched pkg into temp dir + // 3. apply patch to temp dir + // 4. rename temp dir to `folder_path` + if (patch_hash != null) { + const non_patched_path_ = folder_path[0 .. std.mem.indexOf(u8, folder_path, "_patch_hash=") orelse @panic("todo this is bad")]; + const non_patched_path = manager.lockfile.allocator.dupeZ(u8, non_patched_path_) catch bun.outOfMemory(); + defer manager.lockfile.allocator.free(non_patched_path); + if (manager.isFolderInCache(non_patched_path)) { + manager.setPreinstallState(this.meta.id, manager.lockfile, .apply_patch); + // yay step 1 is already done for us + return .apply_patch; + } + // we need to extract non-patched pkg into the cache + manager.setPreinstallState(this.meta.id, lockfile, .extract); + return .extract; + } + manager.setPreinstallState(this.meta.id, lockfile, .extract); return .extract; }, @@ -3392,70 +3626,80 @@ pub const PackageManager = struct { ) catch unreachable; } - pub fn cachedGitFolderNamePrint(buf: []u8, resolved: string) stringZ { - return std.fmt.bufPrintZ(buf, "@G@{s}", .{resolved}) catch unreachable; + pub fn cachedGitFolderNamePrint(buf: []u8, resolved: string, patch_hash: ?u64) stringZ { + return std.fmt.bufPrintZ(buf, "@G@{s}{}", .{ resolved, PatchHashFmt{ .hash = patch_hash } }) catch unreachable; } - pub fn cachedGitFolderName(this: *const PackageManager, repository: *const Repository) stringZ { - return cachedGitFolderNamePrint(&cached_package_folder_name_buf, this.lockfile.str(&repository.resolved)); + pub fn cachedGitFolderName(this: *const PackageManager, repository: *const Repository, patch_hash: ?u64) stringZ { + return cachedGitFolderNamePrint(&cached_package_folder_name_buf, this.lockfile.str(&repository.resolved), patch_hash); } - pub fn cachedGitFolderNamePrintAuto(this: *const PackageManager, repository: *const Repository) stringZ { + pub const PatchHashFmt = struct { + hash: ?u64 = null, + + pub fn format(this: *const PatchHashFmt, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) !void { + if (this.hash) |h| { + try writer.print("_patch_hash={x}", .{h}); + } + } + }; + + pub fn cachedGitFolderNamePrintAuto(this: *const PackageManager, repository: *const Repository, patch_hash: ?u64) stringZ { if (!repository.resolved.isEmpty()) { - return this.cachedGitFolderName(repository); + return this.cachedGitFolderName(repository, patch_hash); } if (!repository.repo.isEmpty() and !repository.committish.isEmpty()) { const string_buf = this.lockfile.buffers.string_bytes.items; return std.fmt.bufPrintZ( &cached_package_folder_name_buf, - "@G@{any}", - .{repository.committish.fmt(string_buf)}, + "@G@{any}{}", + .{ + repository.committish.fmt(string_buf), + PatchHashFmt{ .hash = patch_hash }, + }, ) catch unreachable; } return ""; } - pub fn cachedGitHubFolderNamePrint(buf: []u8, resolved: string) stringZ { - return std.fmt.bufPrintZ(buf, "@GH@{s}", .{resolved}) catch unreachable; + pub fn cachedGitHubFolderNamePrint(buf: []u8, resolved: string, patch_hash: ?u64) stringZ { + return std.fmt.bufPrintZ(buf, "@GH@{s}{}", .{ resolved, PatchHashFmt{ .hash = patch_hash } }) catch unreachable; } - pub fn cachedGitHubFolderName(this: *const PackageManager, repository: *const Repository) stringZ { - return cachedGitHubFolderNamePrint(&cached_package_folder_name_buf, this.lockfile.str(&repository.resolved)); + pub fn cachedGitHubFolderName(this: *const PackageManager, repository: *const Repository, patch_hash: ?u64) stringZ { + return cachedGitHubFolderNamePrint(&cached_package_folder_name_buf, this.lockfile.str(&repository.resolved), patch_hash); } - fn cachedGitHubFolderNamePrintGuess(buf: []u8, string_buf: []const u8, repository: *const Repository) stringZ { + fn cachedGitHubFolderNamePrintGuess(buf: []u8, string_buf: []const u8, repository: *const Repository, patch_hash: ?u64) stringZ { return std.fmt.bufPrintZ( buf, - "@GH@{any}-{any}-{any}", - .{ - repository.owner.fmt(string_buf), - repository.repo.fmt(string_buf), - repository.committish.fmt(string_buf), - }, + "@GH@{any}-{any}-{any}{}", + .{ repository.owner.fmt(string_buf), repository.repo.fmt(string_buf), repository.committish.fmt(string_buf), PatchHashFmt{ .hash = patch_hash } }, ) catch unreachable; } - pub fn cachedGitHubFolderNamePrintAuto(this: *const PackageManager, repository: *const Repository) stringZ { + pub fn cachedGitHubFolderNamePrintAuto(this: *const PackageManager, repository: *const Repository, patch_hash: ?u64) stringZ { if (!repository.resolved.isEmpty()) { - return this.cachedGitHubFolderName(repository); + return this.cachedGitHubFolderName(repository, patch_hash); } if (!repository.owner.isEmpty() and !repository.repo.isEmpty() and !repository.committish.isEmpty()) { - return cachedGitHubFolderNamePrintGuess(&cached_package_folder_name_buf, this.lockfile.buffers.string_bytes.items, repository); + return cachedGitHubFolderNamePrintGuess(&cached_package_folder_name_buf, this.lockfile.buffers.string_bytes.items, repository, patch_hash); } return ""; } // TODO: normalize to alphanumeric - pub fn cachedNPMPackageFolderNamePrint(this: *const PackageManager, buf: []u8, name: string, version: Semver.Version) stringZ { + pub fn cachedNPMPackageFolderNamePrint(this: *const PackageManager, buf: []u8, name: string, version: Semver.Version, patch_hash: ?u64) stringZ { const scope = this.scopeForPackageName(name); - const basename = cachedNPMPackageFolderPrintBasename(buf, name, version); + const basename = cachedNPMPackageFolderPrintBasename(buf, name, version, null); if (scope.name.len == 0 and !this.options.did_override_default_scope) { + if (patch_hash != null) return cachedNPMPackageFolderPrintBasename(buf, name, version, patch_hash); return basename; } @@ -3464,9 +3708,9 @@ pub const PackageManager = struct { var end: []u8 = undefined; if (scope.url.hostname.len > 32 or available.len < 64) { const visible_hostname = scope.url.hostname[0..@min(scope.url.hostname.len, 12)]; - end = std.fmt.bufPrint(available, "@@{s}__{any}", .{ visible_hostname, bun.fmt.hexIntLower(String.Builder.stringHash(scope.url.href)) }) catch unreachable; + end = std.fmt.bufPrint(available, "@@{s}__{any}{}", .{ visible_hostname, bun.fmt.hexIntLower(String.Builder.stringHash(scope.url.href)), PatchHashFmt{ .hash = patch_hash } }) catch unreachable; } else { - end = std.fmt.bufPrint(available, "@@{s}", .{scope.url.hostname}) catch unreachable; + end = std.fmt.bufPrint(available, "@@{s}{}", .{ scope.url.hostname, PatchHashFmt{ .hash = patch_hash } }) catch unreachable; } buf[spanned.len + end.len] = 0; @@ -3478,17 +3722,17 @@ pub const PackageManager = struct { return cachedNPMPackageFolderPrintBasename(&cached_package_folder_name_buf, name, version); } - pub fn cachedNPMPackageFolderName(this: *const PackageManager, name: string, version: Semver.Version) stringZ { - return this.cachedNPMPackageFolderNamePrint(&cached_package_folder_name_buf, name, version); + pub fn cachedNPMPackageFolderName(this: *const PackageManager, name: string, version: Semver.Version, patch_hash: ?u64) stringZ { + return this.cachedNPMPackageFolderNamePrint(&cached_package_folder_name_buf, name, version, patch_hash); } // TODO: normalize to alphanumeric - pub fn cachedNPMPackageFolderPrintBasename(buf: []u8, name: string, version: Semver.Version) stringZ { + pub fn cachedNPMPackageFolderPrintBasename(buf: []u8, name: string, version: Semver.Version, patch_hash: ?u64) stringZ { if (version.tag.hasPre()) { if (version.tag.hasBuild()) { return std.fmt.bufPrintZ( buf, - "{s}@{d}.{d}.{d}-{any}+{any}", + "{s}@{d}.{d}.{d}-{any}+{any}{}", .{ name, version.major, @@ -3496,48 +3740,52 @@ pub const PackageManager = struct { version.patch, bun.fmt.hexIntLower(version.tag.pre.hash), bun.fmt.hexIntUpper(version.tag.build.hash), + PatchHashFmt{ .hash = patch_hash }, }, ) catch unreachable; } return std.fmt.bufPrintZ( buf, - "{s}@{d}.{d}.{d}-{any}", + "{s}@{d}.{d}.{d}-{any}{}", .{ name, version.major, version.minor, version.patch, bun.fmt.hexIntLower(version.tag.pre.hash), + PatchHashFmt{ .hash = patch_hash }, }, ) catch unreachable; } if (version.tag.hasBuild()) { return std.fmt.bufPrintZ( buf, - "{s}@{d}.{d}.{d}+{any}", + "{s}@{d}.{d}.{d}+{any}{}", .{ name, version.major, version.minor, version.patch, bun.fmt.hexIntUpper(version.tag.build.hash), + PatchHashFmt{ .hash = patch_hash }, }, ) catch unreachable; } - return std.fmt.bufPrintZ(buf, "{s}@{d}.{d}.{d}", .{ + return std.fmt.bufPrintZ(buf, "{s}@{d}.{d}.{d}{}", .{ name, version.major, version.minor, version.patch, + PatchHashFmt{ .hash = patch_hash }, }) catch unreachable; } - pub fn cachedTarballFolderNamePrint(buf: []u8, url: string) stringZ { - return std.fmt.bufPrintZ(buf, "@T@{any}", .{bun.fmt.hexIntLower(String.Builder.stringHash(url))}) catch unreachable; + pub fn cachedTarballFolderNamePrint(buf: []u8, url: string, patch_hash: ?u64) stringZ { + return std.fmt.bufPrintZ(buf, "@T@{any}{}", .{ bun.fmt.hexIntLower(String.Builder.stringHash(url)), PatchHashFmt{ .hash = patch_hash } }) catch unreachable; } - pub fn cachedTarballFolderName(this: *const PackageManager, url: String) stringZ { - return cachedTarballFolderNamePrint(&cached_package_folder_name_buf, this.lockfile.str(&url)); + pub fn cachedTarballFolderName(this: *const PackageManager, url: String, patch_hash: ?u64) stringZ { + return cachedTarballFolderNamePrint(&cached_package_folder_name_buf, this.lockfile.str(&url), patch_hash); } pub fn isFolderInCache(this: *PackageManager, folder_path: stringZ) bool { @@ -3590,6 +3838,139 @@ pub const PackageManager = struct { } } + /// this is copy pasted from `installPackageWithNameAndResolution()` + /// it's not great to do this + pub fn computeCacheDirAndSubpath( + manager: *PackageManager, + pkg_name: string, + resolution: *const Resolution, + patch_hash: ?u64, + ) struct { cache_dir: std.fs.Dir, cache_dir_subpath: stringZ } { + const name = pkg_name; + const buf = manager.lockfile.buffers.string_bytes.items; + _ = buf; // autofix + var cache_dir = std.fs.cwd(); + var cache_dir_subpath: stringZ = ""; + + switch (resolution.tag) { + .npm => { + cache_dir_subpath = manager.cachedNPMPackageFolderName(name, resolution.value.npm.version, patch_hash); + cache_dir = manager.getCacheDirectory(); + }, + .git => { + cache_dir_subpath = manager.cachedGitFolderName( + &resolution.value.git, + patch_hash, + ); + cache_dir = manager.getCacheDirectory(); + }, + .github => { + cache_dir_subpath = manager.cachedGitHubFolderName(&resolution.value.github, patch_hash); + cache_dir = manager.getCacheDirectory(); + }, + .folder => { + @panic("TODO @zack fix"); + // const folder = resolution.value.folder.slice(buf); + // // Handle when a package depends on itself via file: + // // example: + // // "mineflayer": "file:." + // if (folder.len == 0 or (folder.len == 1 and folder[0] == '.')) { + // cache_dir_subpath = "."; + // } else { + // @memcpy(manager.folder_path_buf[0..folder.len], folder); + // this.folder_path_buf[folder.len] = 0; + // cache_dir_subpath = this.folder_path_buf[0..folder.len :0]; + // } + // cache_dir = std.fs.cwd(); + }, + .local_tarball => { + cache_dir_subpath = manager.cachedTarballFolderName(resolution.value.local_tarball, patch_hash); + cache_dir = manager.getCacheDirectory(); + }, + .remote_tarball => { + cache_dir_subpath = manager.cachedTarballFolderName(resolution.value.remote_tarball, patch_hash); + cache_dir = manager.getCacheDirectory(); + }, + .workspace => { + @panic("TODO @zack fix"); + // const folder = resolution.value.workspace.slice(buf); + // // Handle when a package depends on itself + // if (folder.len == 0 or (folder.len == 1 and folder[0] == '.')) { + // cache_dir_subpath = "."; + // } else { + // @memcpy(this.folder_path_buf[0..folder.len], folder); + // this.folder_path_buf[folder.len] = 0; + // cache_dir_subpath = this.folder_path_buf[0..folder.len :0]; + // } + // cache_dir = std.fs.cwd(); + }, + .symlink => { + @panic("TODO @zack fix"); + // const directory = manager.globalLinkDir() catch |err| { + // if (comptime log_level != .silent) { + // const fmt = "\nerror: unable to access global directory while installing {s}: {s}\n"; + // const args = .{ name, @errorName(err) }; + + // if (comptime log_level.showProgress()) { + // switch (Output.enable_ansi_colors) { + // inline else => |enable_ansi_colors| { + // this.progress.log(comptime Output.prettyFmt(fmt, enable_ansi_colors), args); + // }, + // } + // } else { + // Output.prettyErrorln(fmt, args); + // } + // } + + // if (manager.options.enable.fail_early) { + // Global.exit(1); + // } + + // Output.flush(); + // this.summary.fail += 1; + // this.incrementTreeInstallCount(this.current_tree_id, !is_pending_package_install, log_level); + // return; + // }; + + // const folder = resolution.value.symlink.slice(buf); + + // if (folder.len == 0 or (folder.len == 1 and folder[0] == '.')) { + // cache_dir_subpath = "."; + // cache_dir = std.fs.cwd(); + // } else { + // const global_link_dir = manager.globalLinkDirPath() catch unreachable; + // var ptr = &this.folder_path_buf; + // var remain: []u8 = this.folder_path_buf[0..]; + // @memcpy(ptr[0..global_link_dir.len], global_link_dir); + // remain = remain[global_link_dir.len..]; + // if (global_link_dir[global_link_dir.len - 1] != std.fs.path.sep) { + // remain[0] = std.fs.path.sep; + // remain = remain[1..]; + // } + // @memcpy(remain[0..folder.len], folder); + // remain = remain[folder.len..]; + // remain[0] = 0; + // const len = @intFromPtr(remain.ptr) - @intFromPtr(ptr); + // cache_dir_subpath = this.folder_path_buf[0..len :0]; + // cache_dir = directory; + // } + }, + else => { + @panic("TODO @zack fix"); + // if (comptime Environment.allow_assert) { + // @panic("Internal assertion failure: unexpected resolution tag"); + // } + // this.incrementTreeInstallCount(this.current_tree_id, !is_pending_package_install, log_level); + // return; + }, + } + + return .{ + .cache_dir = cache_dir, + .cache_dir_subpath = cache_dir_subpath, + }; + } + pub fn getInstalledVersionsFromDiskCache(this: *PackageManager, tags_buf: *std.ArrayList(u8), package_name: []const u8, allocator: std.mem.Allocator) !std.ArrayList(Semver.Version) { var list = std.ArrayList(Semver.Version).init(allocator); var dir = this.getCacheDirectory().openDir(package_name, .{ @@ -3692,8 +4073,13 @@ pub const PackageManager = struct { /// Is this the first time we've seen this package? is_first_time: bool = false, - /// Pending network task to schedule - network_task: ?*NetworkTask = null, + task: ?union(enum) { + /// Pending network task to schedule + network_task: *NetworkTask, + + /// Apply patch task or calc patch hash task + patch_task: *PatchTask, + } = null, }; fn getOrPutResolvedPackageWithFindResult( @@ -3754,7 +4140,16 @@ pub const PackageManager = struct { if (comptime Environment.allow_assert) bun.assert(package.meta.id != invalid_package_id); defer successFn(this, dependency_id, package.meta.id); - return switch (this.determinePreinstallState(package, this.lockfile)) { + // non-null if the package is in "patchedDependencies" + var name_and_version_hash: ?u64 = null; + var patchfile_hash: ?u64 = null; + + return switch (this.determinePreinstallState( + package, + this.lockfile, + &name_and_version_hash, + &patchfile_hash, + )) { // Is this package already in the cache? // We don't need to download the tarball, but we should enqueue dependencies .done => .{ .package = package, .is_first_time = true }, @@ -3762,15 +4157,45 @@ pub const PackageManager = struct { .extract => .{ .package = package, .is_first_time = true, - .network_task = try this.generateNetworkTaskForTarball( - Task.Id.forNPMPackage( - this.lockfile.str(&name), - package.resolution.value.npm.version, + .task = .{ + .network_task = try this.generateNetworkTaskForTarball( + Task.Id.forNPMPackage( + this.lockfile.str(&name), + package.resolution.value.npm.version, + ), + manifest.str(&find_result.package.tarball_url), + dependency_id, + package, + name_and_version_hash, + ) orelse unreachable, + }, + }, + .calc_patch_hash => .{ + .package = package, + .is_first_time = true, + .task = .{ + .patch_task = PatchTask.newCalcPatchHash( + this, + name_and_version_hash.?, + .{ + .pkg_id = package.meta.id, + .dependency_id = dependency_id, + .url = this.allocator.dupe(u8, manifest.str(&find_result.package.tarball_url)) catch bun.outOfMemory(), + }, ), - manifest.str(&find_result.package.tarball_url), - dependency_id, - package, - ) orelse unreachable, + }, + }, + .apply_patch => .{ + .package = package, + .is_first_time = true, + .task = .{ + .patch_task = PatchTask.newApplyPatchHash( + this, + package.meta.id, + patchfile_hash.?, + name_and_version_hash.?, + ), + }, }, else => unreachable, }; @@ -3787,6 +4212,8 @@ pub const PackageManager = struct { url: string, dependency_id: DependencyID, package: Lockfile.Package, + /// if patched then we need to do apply step after network task is done + patch_name_and_version_hash: ?u64, ) !?*NetworkTask { if (this.hasCreatedNetworkTask(task_id)) { return null; @@ -3799,6 +4226,12 @@ pub const PackageManager = struct { .callback = undefined, .allocator = this.allocator, .package_manager = this, + .apply_patch_task = if (patch_name_and_version_hash) |h| brk: { + const patch_hash = this.lockfile.patched_dependencies.get(h).?.patchfileHash().?; + const task = PatchTask.newApplyPatchHash(this, package.meta.id, patch_hash, h); + task.callback.apply.task_id = task_id; + break :brk task; + } else null, }; const scope = this.scopeForPackageName(this.lockfile.str(&package.name)); @@ -3829,7 +4262,7 @@ pub const PackageManager = struct { return network_task; } - fn enqueueNetworkTask(this: *PackageManager, task: *NetworkTask) void { + pub fn enqueueNetworkTask(this: *PackageManager, task: *NetworkTask) void { if (this.network_task_fifo.writableLength() == 0) { this.flushNetworkQueue(); } @@ -3837,6 +4270,15 @@ pub const PackageManager = struct { this.network_task_fifo.writeItemAssumeCapacity(task); } + pub fn enqueuePatchTask(this: *PackageManager, task: *PatchTask) void { + debug("Enqueue patch task: 0x{x} {s}", .{ @intFromPtr(task), @tagName(task.callback) }); + if (this.patch_task_fifo.writableLength() == 0) { + this.flushPatchTaskQueue(); + } + + this.patch_task_fifo.writeItemAssumeCapacity(task); + } + const SuccessFn = *const fn (*PackageManager, DependencyID, PackageID) void; const FailFn = *const fn (*PackageManager, *const Dependency, PackageID, anyerror) void; fn assignResolution(this: *PackageManager, dependency_id: DependencyID, package_id: PackageID) void { @@ -4199,6 +4641,9 @@ pub const PackageManager = struct { task_id: u64, name: string, repository: *const Repository, + dependency: *const Dependency, + /// if patched then we need to do apply step after network task is done + patch_name_and_version_hash: ?u64, ) *ThreadPool.Task { var task = this.preallocated_resolve_tasks.get(); task.* = Task{ @@ -4220,6 +4665,17 @@ pub const PackageManager = struct { }, }, .id = task_id, + .apply_patch_task = if (patch_name_and_version_hash) |h| brk: { + const dep = dependency; + const pkg_id = switch (this.lockfile.package_index.get(dep.name_hash) orelse @panic("Package not found")) { + .PackageID => |p| p, + .PackageIDMultiple => |ps| ps.items[0], // TODO is this correct + }; + const patch_hash = this.lockfile.patched_dependencies.get(h).?.patchfileHash().?; + const pt = PatchTask.newApplyPatchHash(this, pkg_id, patch_hash, h); + pt.callback.apply.task_id = task_id; + break :brk pt; + } else null, .data = undefined, }; return &task.threadpool_task; @@ -4233,6 +4689,8 @@ pub const PackageManager = struct { name: string, resolution: Resolution, resolved: string, + /// if patched then we need to do apply step after network task is done + patch_name_and_version_hash: ?u64, ) *ThreadPool.Task { var task = this.preallocated_resolve_tasks.get(); task.* = Task{ @@ -4261,6 +4719,17 @@ pub const PackageManager = struct { ) catch unreachable, }, }, + .apply_patch_task = if (patch_name_and_version_hash) |h| brk: { + const dep = this.lockfile.buffers.dependencies.items[dependency_id]; + const pkg_id = switch (this.lockfile.package_index.get(dep.name_hash) orelse @panic("Package not found")) { + .PackageID => |p| p, + .PackageIDMultiple => |ps| ps.items[0], // TODO is this correct + }; + const patch_hash = this.lockfile.patched_dependencies.get(h).?.patchfileHash().?; + const pt = PatchTask.newApplyPatchHash(this, pkg_id, patch_hash, h); + pt.callback.apply.task_id = task_id; + break :brk pt; + } else null, .id = task_id, .data = undefined, }; @@ -4559,10 +5028,23 @@ pub const PackageManager = struct { } } - if (result.network_task) |network_task| { - if (this.getPreinstallState(result.package.meta.id) == .extract) { - this.setPreinstallState(result.package.meta.id, this.lockfile, .extracting); - this.enqueueNetworkTask(network_task); + if (result.task != null) { + switch (result.task.?) { + .network_task => |network_task| { + if (this.getPreinstallState(result.package.meta.id) == .extract) { + this.setPreinstallState(result.package.meta.id, this.lockfile, .extracting); + this.enqueueNetworkTask(network_task); + } + }, + .patch_task => |patch_task| { + if (patch_task.callback == .calc_hash and this.getPreinstallState(result.package.meta.id) == .calc_patch_hash) { + this.setPreinstallState(result.package.meta.id, this.lockfile, .calcing_patch_hash); + this.enqueuePatchTask(patch_task); + } else if (patch_task.callback == .apply and this.getPreinstallState(result.package.meta.id) == .apply_patch) { + this.setPreinstallState(result.package.meta.id, this.lockfile, .applying_patch); + this.enqueuePatchTask(patch_task); + } + }, } } @@ -4744,6 +5226,7 @@ pub const PackageManager = struct { alias, res, resolved, + null, ))); } else { var entry = this.task_queue.getOrPutContext(this.allocator, clone_id, .{}) catch unreachable; @@ -4754,7 +5237,7 @@ pub const PackageManager = struct { if (this.hasCreatedNetworkTask(clone_id)) return; - this.task_batch.push(ThreadPool.Batch.from(this.enqueueGitClone(clone_id, alias, dep))); + this.task_batch.push(ThreadPool.Batch.from(this.enqueueGitClone(clone_id, alias, dep, dependency, null))); } }, .github => { @@ -4804,11 +5287,17 @@ pub const PackageManager = struct { } } - if (try this.generateNetworkTaskForTarball(task_id, url, id, .{ - .name = dependency.name, - .name_hash = dependency.name_hash, - .resolution = res, - })) |network_task| { + if (try this.generateNetworkTaskForTarball( + task_id, + url, + id, + .{ + .name = dependency.name, + .name_hash = dependency.name_hash, + .resolution = res, + }, + null, + )) |network_task| { this.enqueueNetworkTask(network_task); } }, @@ -4868,7 +5357,7 @@ pub const PackageManager = struct { } // should not trigger a network call - if (comptime Environment.allow_assert) bun.assert(result.network_task == null); + if (comptime Environment.allow_assert) bun.assert(result.task == null); if (comptime Environment.allow_assert) debug( @@ -4998,11 +5487,17 @@ pub const PackageManager = struct { ))); }, .remote => { - if (try this.generateNetworkTaskForTarball(task_id, url, id, .{ - .name = dependency.name, - .name_hash = dependency.name_hash, - .resolution = res, - })) |network_task| { + if (try this.generateNetworkTaskForTarball( + task_id, + url, + id, + .{ + .name = dependency.name, + .name_hash = dependency.name_hash, + .resolution = res, + }, + null, + )) |network_task| { this.enqueueNetworkTask(network_task); } }, @@ -5020,6 +5515,14 @@ pub const PackageManager = struct { } } + fn flushPatchTaskQueue(this: *PackageManager) void { + var patch_task_fifo = &this.patch_task_fifo; + + while (patch_task_fifo.readItem()) |patch_task| { + patch_task.schedule(if (patch_task.callback == .apply) &this.patch_apply_batch else &this.patch_calc_hash_batch); + } + } + fn doFlushDependencyQueue(this: *PackageManager) void { var lockfile = this.lockfile; var dependency_queue = &lockfile.scratch.dependency_list_queue; @@ -5046,21 +5549,27 @@ pub const PackageManager = struct { this.flushNetworkQueue(); this.doFlushDependencyQueue(); this.flushNetworkQueue(); + this.flushPatchTaskQueue(); if (this.total_tasks == last_count) break; } } pub fn scheduleTasks(manager: *PackageManager) usize { - const count = manager.task_batch.len + manager.network_resolve_batch.len + manager.network_tarball_batch.len; + const count = manager.task_batch.len + manager.network_resolve_batch.len + manager.network_tarball_batch.len + manager.patch_apply_batch.len + manager.patch_calc_hash_batch.len; _ = manager.incrementPendingTasks(@truncate(count)); + manager.thread_pool.schedule(manager.patch_apply_batch); + manager.thread_pool.schedule(manager.patch_calc_hash_batch); manager.thread_pool.schedule(manager.task_batch); manager.network_resolve_batch.push(manager.network_tarball_batch); HTTP.http_thread.schedule(manager.network_resolve_batch); manager.task_batch = .{}; manager.network_tarball_batch = .{}; manager.network_resolve_batch = .{}; + manager.patch_apply_batch = .{}; + manager.patch_calc_hash_batch = .{}; + // TODO probably have to put patch tasks here return count; } @@ -5485,6 +5994,48 @@ pub const PackageManager = struct { var timestamp_this_tick: ?u32 = null; + var patch_tasks_batch = manager.patch_task_queue.popBatch(); + var patch_tasks_iter = patch_tasks_batch.iterator(); + while (patch_tasks_iter.next()) |ptask| { + if (comptime Environment.allow_assert) bun.assert(manager.pendingTaskCount() > 0); + _ = manager.decrementPendingTasks(); + defer ptask.deinit(); + try ptask.runFromMainThread(manager, log_level); + if (ptask.callback == .apply) { + if (comptime @TypeOf(callbacks.onExtract) != void) { + if (ptask.callback.apply.task_id) |task_id| { + _ = task_id; // autofix + + // const name = manager.lockfile.packages.items(.name)[ptask.callback.apply.pkg_id].slice(manager.lockfile.buffers.string_bytes.items); + // if (!callbacks.onPatch(extract_ctx, name, task_id, log_level)) { + // if (comptime Environment.allow_assert) { + // Output.panic("Ran callback to install enqueued packages, but there was no task associated with it.", .{}); + // } + // } + } else if (ExtractCompletionContext == *PackageInstaller) { + if (ptask.callback.apply.install_context) |*ctx| { + var installer: *PackageInstaller = extract_ctx; + const path = ctx.path; + ctx.path = std.ArrayList(u8).init(bun.default_allocator); + installer.node_modules.path = path; + installer.current_tree_id = ctx.tree_id; + const pkg_id = ptask.callback.apply.pkg_id; + + installer.installPackageWithNameAndResolution( + ctx.dependency_id, + pkg_id, + log_level, + ptask.callback.apply.pkgname, + ptask.callback.apply.resolution, + false, + false, + ); + } + } + } + } + } + var network_tasks_batch = manager.async_network_task_queue.popBatch(); var network_tasks_iter = network_tasks_batch.iterator(); while (network_tasks_iter.next()) |task| { @@ -5983,6 +6534,9 @@ pub const PackageManager = struct { manager.setPreinstallState(package_id, manager.lockfile, .done); + // if (task.tag == .extract and task.request.extract.network.apply_patch_task != null) { + // manager.enqueuePatchTask(task.request.extract.network.apply_patch_task.?); + // } else if (comptime @TypeOf(callbacks.onExtract) != void) { if (ExtractCompletionContext == *PackageInstaller) { extract_ctx.fixCachedLockfilePackageSlices(); @@ -6172,6 +6726,12 @@ pub const PackageManager = struct { .dev_dependencies = true, .workspaces = true, }, + patch_features: union(enum) { + patch: struct {}, + commit: struct { + patches_dir: string, + }, + } = .{ .patch = .{} }, // The idea here is: // 1. package has a platform-specific binary to install // 2. To prevent downloading & installing incompatible versions, they stick the "real" one in optionalDependencies @@ -6632,6 +7192,16 @@ pub const PackageManager = struct { this.update.development = cli.development; if (!this.update.development) this.update.optional = cli.optional; + + if (subcommand == .patch) { + // TODO args + } else if (subcommand == .patch_commit) { + this.patch_features = .{ + .commit = .{ + .patches_dir = cli.patch_commit.patches_dir, + }, + }; + } } else { this.log_level = if (default_disable_progress_bar) LogLevel.default_no_progress else LogLevel.default; PackageManager.verbose_install = false; @@ -6743,6 +7313,43 @@ pub const PackageManager = struct { before_install: bool = false, }; + pub fn editPatchedDependencies( + manager: *PackageManager, + package_json: *Expr, + patch_key: []const u8, + patchfile_path: []const u8, + ) !void { + + // const pkg_to_patch = manager. + var patched_dependencies = brk: { + if (package_json.asProperty("patchedDependencies")) |query| { + if (query.expr.data == .e_object) + break :brk query.expr.data.e_object.*; + } + break :brk E.Object{}; + }; + + const patchfile_expr = try Expr.init( + E.String, + E.String{ + .data = patchfile_path, + }, + logger.Loc.Empty, + ).clone(manager.allocator); + + try patched_dependencies.put( + manager.allocator, + patch_key, + patchfile_expr, + ); + + try package_json.data.e_object.put( + manager.allocator, + "patchedDependencies", + try Expr.init(E.Object, patched_dependencies, logger.Loc.Empty).clone(manager.allocator), + ); + } + pub fn editTrustedDependencies(allocator: std.mem.Allocator, package_json: *Expr, names_to_add: []string) !void { var len = names_to_add.len; @@ -7537,6 +8144,8 @@ pub const PackageManager = struct { remove, link, unlink, + patch, + patch_commit, }; pub fn init(ctx: Command.Context, comptime subcommand: Subcommand) !*PackageManager { @@ -7801,6 +8410,7 @@ pub const PackageManager = struct { manager.* = PackageManager{ .options = options, .network_task_fifo = NetworkQueue.init(), + .patch_task_fifo = PatchTaskFifo.init(), .allocator = ctx.allocator, .log = ctx.log, .root_dir = entries_option.entries, @@ -8016,6 +8626,17 @@ pub const PackageManager = struct { file.close(); } + // parse dependency of positional arg string (may include name@version for example) + // get the precise version from the lockfile (there may be multiple) + // copy the contents into a temp folder + pub inline fn patch(ctx: Command.Context) !void { + try updatePackageJSONAndInstallCatchError(ctx, .patch); + } + + pub inline fn patchCommit(ctx: Command.Context) !void { + try updatePackageJSONAndInstallCatchError(ctx, .patch_commit); + } + pub inline fn update(ctx: Command.Context) !void { try updatePackageJSONAndInstallCatchError(ctx, .update); } @@ -8397,6 +9018,15 @@ pub const PackageManager = struct { clap.parseParam(" ... \"name\" uninstall package as a link") catch unreachable, }; + const patch_params = install_params_ ++ [_]ParamType{ + clap.parseParam(" ... \"name\" of the package to patch") catch unreachable, + }; + + const patch_commit_params = install_params_ ++ [_]ParamType{ + clap.parseParam(" ... \"dir\" containing changes to a package") catch unreachable, + clap.parseParam("--patches-dir The directory to put the patch file") catch unreachable, + }; + pub const CommandLineArguments = struct { registry: string = "", cache_dir: string = "", @@ -8437,6 +9067,18 @@ pub const PackageManager = struct { concurrent_scripts: ?usize = null, + patch: PatchOpts = .{}, + patch_commit: PatchCommitOpts = .{}, + + const PatchOpts = struct { + edit_dir: ?[]const u8 = null, + ignore_existing: bool = false, + }; + + const PatchCommitOpts = struct { + patches_dir: []const u8 = "patches", + }; + const Omit = struct { dev: bool = false, optional: bool = true, @@ -8505,6 +9147,47 @@ pub const PackageManager = struct { Output.pretty("\n\n" ++ outro_text ++ "\n", .{}); Output.flush(); }, + Subcommand.patch => { + const intro_text = + \\Usage: bun patch \@\ + \\ + \\Prepare a package for patching. + \\ + ; + + Output.pretty("\n" ++ intro_text, .{}); + Output.flush(); + Output.pretty("\nFlags:", .{}); + Output.flush(); + clap.simpleHelp(&PackageManager.patch_params); + // Output.pretty("\n\n" ++ outro_text ++ "\n", .{}); + Output.flush(); + }, + Subcommand.patch_commit => { + const intro_text = + \\Usage: bun patch-commit \ + \\ + \\Generate a patc out of a directory and save it. + \\ + \\Options: + \\ --patches-dir The directory to save the patch file + \\ + ; + // const outro_text = + // \\Options: + // \\ --edit-dir + // \\ bun update + // \\ + // \\Full documentation is available at https://bun.sh/docs/cli/update + // ; + Output.pretty("\n" ++ intro_text, .{}); + Output.flush(); + Output.pretty("\nFlags:", .{}); + Output.flush(); + clap.simpleHelp(&PackageManager.patch_params); + // Output.pretty("\n\n" ++ outro_text ++ "\n", .{}); + Output.flush(); + }, Subcommand.pm => { PackageManagerCommand.printHelp(); }, @@ -8616,6 +9299,8 @@ pub const PackageManager = struct { .remove => remove_params, .link => link_params, .unlink => unlink_params, + .patch => patch_params, + .patch_commit => patch_commit_params, }; var diag = clap.Diagnostic{}; @@ -8653,12 +9338,23 @@ pub const PackageManager = struct { // link and unlink default to not saving, all others default to // saving. + // TODO: I think `bun patch` command goes here if (comptime subcommand == .link or subcommand == .unlink) { cli.no_save = !args.flag("--save"); } else { cli.no_save = args.flag("--no-save"); } + if (comptime subcommand == .patch) { + cli.patch = .{}; + } + + if (comptime subcommand == .patch_commit) { + cli.patch_commit = .{ + .patches_dir = args.option("--patches-dir") orelse "patches", + }; + } + if (args.option("--config")) |opt| { cli.config = opt; } @@ -8729,6 +9425,16 @@ pub const PackageManager = struct { cli.positionals = args.positionals(); + if (subcommand == .patch and cli.positionals.len < 2) { + Output.errGeneric("Missing pkg to patch\n", .{}); + Global.crash(); + } + + if (subcommand == .patch_commit and cli.positionals.len < 2) { + Output.errGeneric("Missing pkg folder to patch\n", .{}); + Global.crash(); + } + if (cli.production and cli.trusted) { Output.errGeneric("The '--production' and '--trust' flags together are not supported because the --trust flag potentially modifies the lockfile after installing packages\n", .{}); Global.crash(); @@ -8899,6 +9605,10 @@ pub const PackageManager = struct { Output.prettyErrorln("No package.json, so nothing to remove\n", .{}); Global.crash(); }, + .patch => { + Output.prettyErrorln("No package.json, so nothing to patch\n", .{}); + Global.crash(); + }, else => { try attemptToCreatePackageJSON(); break :brk try PackageManager.init(ctx, subcommand); @@ -8918,6 +9628,10 @@ pub const PackageManager = struct { inline else => |log_level| try manager.updatePackageJSONAndInstallWithManager(ctx, log_level), } + if (comptime subcommand == .patch) { + try manager.preparePatch(); + } + if (manager.any_failed_to_install) { Global.exit(1); } @@ -9088,6 +9802,23 @@ pub const PackageManager = struct { } } }, + .patch_commit => { + _ = manager.lockfile.loadFromDisk( + manager, + manager.allocator, + manager.log, + manager.options.lockfile_path, + true, + ); + var pathbuf: bun.PathBuffer = undefined; + const stuff = try manager.doPatchCommit(&pathbuf, log_level); + try PackageJSONEditor.editPatchedDependencies( + manager, + ¤t_package_json.root, + stuff.patch_key, + stuff.patchfile_path, + ); + }, .link, .add, .update => { // `bun update ` is basically the same as `bun add `, except // update will not exceed the current dependency range if it exists @@ -9299,6 +10030,498 @@ pub const PackageManager = struct { } } + /// - Arg is name and possibly version (e.g. "is-even" or "is-even@1.0.0") + /// - Find package that satisfies name and version + /// - Copy contents of package into temp dir + /// - Give that to user + fn preparePatch(manager: *PackageManager) !void { + const @"pkg + maybe version to patch" = manager.options.positionals[1]; + const name: []const u8, const version: ?[]const u8 = brk: { + if (std.mem.indexOfScalar(u8, @"pkg + maybe version to patch", '@')) |version_delimiter| { + break :brk .{ + @"pkg + maybe version to patch"[0..version_delimiter], + @"pkg + maybe version to patch"[version_delimiter + 1 ..], + }; + } + break :brk .{ + @"pkg + maybe version to patch", + null, + }; + }; + + const name_hash = String.Builder.stringHash(name); + + const strbuf = manager.lockfile.buffers.string_bytes.items; + + const pkg_id: u64 = brk: { + var buf: [1024]u8 = undefined; + var i: usize = 0; + + const pkg_hashes = manager.lockfile.packages.items(.name_hash); + var matches_count: u32 = 0; + var first_match: ?u64 = null; + while (i < manager.lockfile.packages.len) { + if (std.mem.indexOfScalar(u64, pkg_hashes[i..], name_hash)) |idx| { + defer i += idx + 1; + const pkg_id = i + idx; + const pkg = manager.lockfile.packages.get(pkg_id); + const pkg_name = pkg.name.slice(strbuf); + if (!std.mem.eql(u8, pkg_name, name)) continue; + matches_count += 1; + + // if they supplied a version it needs to match it, + // otherwise we'll just pick the first one we see, if there are multiple we throw error + if (version) |v| { + const label = std.fmt.bufPrint(buf[0..], "{}", .{pkg.resolution.fmt(strbuf, .posix)}) catch @panic("Resolution name too long"); + if (std.mem.eql(u8, label, v)) break :brk pkg_id; + } else { + first_match = pkg_id; + } + } else break; + } + if (first_match) |id| { + if (matches_count > 1) { + Output.prettyErrorln( + "\nerror: please specify a precise version:", + .{}, + ); + i = 0; + while (i < manager.lockfile.packages.len) { + if (std.mem.indexOfScalar(u64, pkg_hashes[i..], name_hash)) |idx| { + defer i += idx + 1; + const pkg_id = i + idx; + const pkg = manager.lockfile.packages.get(pkg_id); + if (!std.mem.eql(u8, pkg.name.slice(strbuf), name)) continue; + + Output.prettyError(" {s}@{}\n", .{ pkg.name.slice(strbuf), pkg.resolution.fmt(strbuf, .posix) }); + } else break; + } + Output.flush(); + Global.crash(); + return; + } + break :brk id; + } + Output.prettyErrorln( + "\nerror: could not find package: {s}\n", + .{@"pkg + maybe version to patch"}, + ); + Output.flush(); + return; + }; + + const pkg = manager.lockfile.packages.get(pkg_id); + + const resolution: *const Resolution = &manager.lockfile.packages.items(.resolution)[pkg_id]; + const stuff = manager.computeCacheDirAndSubpath(name, resolution, null); + const cache_dir_subpath: [:0]const u8 = stuff.cache_dir_subpath; + const cache_dir: std.fs.Dir = stuff.cache_dir; + + // copy the contents into a tempdir + var tmpname_buf: [1024]u8 = undefined; + const tempdir_name = bun.span(try bun.fs.FileSystem.instance.tmpname("tmp", &tmpname_buf, bun.fastRandom())); + const tmpdir = try bun.fs.FileSystem.instance.tmpdir(); + var destination_dir = try tmpdir.makeOpenPath(tempdir_name, .{}); + defer destination_dir.close(); + + var resolution_buf: [512]u8 = undefined; + const resolution_label = std.fmt.bufPrint(&resolution_buf, "{}", .{pkg.resolution.fmt(strbuf, .posix)}) catch unreachable; + const dummy_node_modules = .{ + .path = std.ArrayList(u8).init(manager.allocator), + .tree_id = 0, + }; + var pkg_install = PreparePatchPackageInstall{ + .allocator = manager.allocator, + .cache_dir = cache_dir, + .cache_dir_subpath = cache_dir_subpath, + .destination_dir_subpath = tempdir_name, + .destination_dir_subpath_buf = tmpname_buf[0..], + .progress = .{}, + .package_name = name, + .package_version = resolution_label, + // dummy value + .node_modules = &dummy_node_modules, + }; + + switch (pkg_install.installWithMethod(true, tmpdir, .copyfile)) { + .success => {}, + .fail => |reason| { + Output.prettyErrorln( + "\nerror: failed to copy package to temp directory: {s}, during step: {s}\n", + .{ + @errorName(reason.err), + reason.step.name(), + }, + ); + Output.flush(); + return; + }, + } + + var pathbuf: bun.PathBuffer = undefined; + const pkg_to_patch_dir = switch (bun.sys.getFdPath(bun.toFD(destination_dir.fd), &pathbuf)) { + .result => |fd| fd, + .err => |e| { + Output.prettyErrorln( + "\nerror: {}\n", + .{ + e.toSystemError(), + }, + ); + Output.flush(); + return; + }, + }; + + Output.pretty("\nTo patch {s}, edit the following folder:\n\n {s}\n", .{ name, pkg_to_patch_dir }); + Output.pretty("\nOnce you're done with your changes, run:\n\n bun patch-commit '{s}'\n", .{pkg_to_patch_dir}); + + return; + } + + const PatchCommitResult = struct { + patch_key: []const u8, + patchfile_path: []const u8, + }; + + /// - Arg is the tempdir containing the package with changes + /// - Get the patch file contents by running git diff on the temp dir and the original package dir + /// - Write the patch file to $PATCHES_DIR/$PKG_NAME_AND_VERSION.patch + /// - Update "patchedDependencies" in package.json + /// - Run install to install newly patched pkg + fn doPatchCommit( + manager: *PackageManager, + pathbuf: *bun.PathBuffer, + comptime log_level: Options.LogLevel, + ) !PatchCommitResult { + var lockfile: *Lockfile = try manager.allocator.create(Lockfile); + defer lockfile.deinit(); + switch (lockfile.loadFromDisk(manager, manager.allocator, manager.log, manager.options.lockfile_path, true)) { + .not_found => { + Output.panic("Lockfile not found", .{}); + }, + .err => |cause| { + if (log_level != .silent) { + switch (cause.step) { + .open_file => Output.prettyError("error opening lockfile: {s}\n", .{ + @errorName(cause.value), + }), + .parse_file => Output.prettyError("error parsing lockfile: {s}\n", .{ + @errorName(cause.value), + }), + .read_file => Output.prettyError("error reading lockfile: {s}\n", .{ + @errorName(cause.value), + }), + .migrating => Output.prettyError("error migrating lockfile: {s}\n", .{ + @errorName(cause.value), + }), + } + + if (manager.options.enable.fail_early) { + Output.prettyError("failed to load lockfile\n", .{}); + } else { + Output.prettyError("ignoring lockfile\n", .{}); + } + + Output.flush(); + } + Global.crash(); + }, + .ok => {}, + } + + const patched_pkg_folder = manager.options.positionals[1]; + if (patched_pkg_folder.len >= bun.MAX_PATH_BYTES) { + Output.prettyError("error: argument provided is too long\n", .{}); + Output.flush(); + Global.crash(); + } + @memcpy(pathbuf[0..patched_pkg_folder.len], patched_pkg_folder); + pathbuf[patched_pkg_folder.len] = 0; + + var versionbuf: [1024]u8 = undefined; + const version = switch (patchCommitGetVersion( + &versionbuf, + bun.path.joinZ(&[_][]const u8{ patched_pkg_folder, ".bun-patch-tag" }, .auto), + )) { + .result => |v| v, + .err => |e| { + Output.prettyError("error: failed to get bun patch tag: {}\n", .{e.toSystemError()}); + Output.flush(); + Global.crash(); + }, + }; + + const package_json_source: logger.Source = brk: { + const patched_pkg_folderZ = pathbuf[0..patched_pkg_folder.len :0]; + const pkgjsonpath = bun.path.joinZ(&[_][]const u8{ + patched_pkg_folderZ, + "package.json", + }, .auto); + + switch (bun.sys.File.toSource(pkgjsonpath, manager.allocator)) { + .result => |s| break :brk s, + .err => |e| { + Output.prettyError( + "error: failed to read package.json: {}\n", + .{e.withPath(pkgjsonpath).toSystemError()}, + ); + Output.flush(); + Global.crash(); + }, + } + }; + defer manager.allocator.free(package_json_source.contents); + + var package = Lockfile.Package{}; + try package.parse(lockfile, manager.allocator, manager.log, package_json_source, void, {}, Features.folder); + const name = lockfile.str(&package.name); + var resolution_buf: [1024]u8 = undefined; + const actual_package = switch (lockfile.package_index.get(package.name_hash) orelse { + Output.prettyError( + "error: failed to find package in lockfile package index, this is a bug in Bun. Please file a GitHub issue.\n", + .{}, + ); + Output.flush(); + Global.crash(); + }) { + .PackageID => |id| lockfile.packages.get(id), + .PackageIDMultiple => |ids| brk: { + for (ids.items) |id| { + const pkg = lockfile.packages.get(id); + const resolution_label = std.fmt.bufPrint(&resolution_buf, "{}", .{pkg.resolution.fmt(lockfile.buffers.string_bytes.items, .posix)}) catch unreachable; + if (std.mem.eql(u8, resolution_label, version)) { + break :brk pkg; + } + } + Output.prettyError("error: could not find package with name: {s}\n", .{ + package.name.slice(lockfile.buffers.string_bytes.items), + }); + Output.flush(); + Global.crash(); + }, + }; + const resolution_label = std.fmt.bufPrint(&resolution_buf, "{s}@{}", .{ name, actual_package.resolution.fmt(lockfile.buffers.string_bytes.items, .posix) }) catch unreachable; + const stuff = manager.computeCacheDirAndSubpath(name, &actual_package.resolution, null); + + const patchfile_contents = brk: { + const new_folder = patched_pkg_folder; + var buf2: bun.PathBuffer = undefined; + const old_folder = old_folder: { + const cache_dir_path = switch (bun.sys.getFdPath(bun.toFD(stuff.cache_dir.fd), &buf2)) { + .result => |s| s, + .err => |e| { + Output.prettyError( + "error: failed to read from cache {}\n", + .{e.toSystemError()}, + ); + Output.flush(); + Global.crash(); + }, + }; + break :old_folder bun.path.join(&[_][]const u8{ + cache_dir_path, + stuff.cache_dir_subpath, + }, .posix); + }; + break :brk switch (bun.patch.gitDiff(manager.allocator, old_folder, new_folder) catch |e| { + Output.prettyError( + "error: failed to make diff {s}\n", + .{@errorName(e)}, + ); + Output.flush(); + Global.crash(); + }) { + .result => |stdout| stdout, + .err => |stderr| { + defer stderr.deinit(); + const Truncate = struct { + stderr: std.ArrayList(u8), + + pub fn format( + this: *const @This(), + comptime _: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, + ) !void { + const truncate_stderr = this.stderr.items.len > 256; + if (truncate_stderr) { + try writer.print("{s}... ({d} more bytes)", .{ this.stderr.items[0..256], this.stderr.items.len - 256 }); + } else try writer.print("{s}", .{this.stderr.items[0..]}); + } + }; + Output.prettyError( + "error: failed to make diff {}\n", + .{ + Truncate{ .stderr = stderr }, + }, + ); + Output.flush(); + Global.crash(); + }, + }; + }; + defer patchfile_contents.deinit(); + + // write the patch contents to temp file then rename + var tmpname_buf: [1024]u8 = undefined; + const tempfile_name = bun.span(try bun.fs.FileSystem.instance.tmpname("tmp", &tmpname_buf, bun.fastRandom())); + const tmpdir = try bun.fs.FileSystem.instance.tmpdir(); + const tmpfd = switch (bun.sys.openat( + bun.toFD(tmpdir.fd), + tempfile_name, + std.os.O.RDWR | std.os.O.CREAT, + 0o666, + )) { + .result => |fd| fd, + .err => |e| { + Output.prettyError( + "error: failed to open temp file {}\n", + .{e.toSystemError()}, + ); + Output.flush(); + Global.crash(); + }, + }; + defer _ = bun.sys.close(tmpfd); + + if (bun.sys.File.writeAll(.{ .handle = tmpfd }, patchfile_contents.items).asErr()) |e| { + Output.prettyError( + "error: failed to write patch to temp file {}\n", + .{e.toSystemError()}, + ); + Output.flush(); + Global.crash(); + } + + @memcpy(resolution_buf[resolution_label.len .. resolution_label.len + ".patch".len], ".patch"); + var patch_filename: []const u8 = resolution_buf[0 .. resolution_label.len + ".patch".len]; + var deinit = false; + if (escapePatchFilename(manager.allocator, patch_filename)) |escaped| { + deinit = true; + patch_filename = escaped; + } + defer if (deinit) manager.allocator.free(patch_filename); + + const path_in_patches_dir = bun.path.joinZ( + &[_][]const u8{ + manager.options.patch_features.commit.patches_dir, + patch_filename, + }, + .posix, + ); + + var nodefs = bun.JSC.Node.NodeFS{}; + const args = bun.JSC.Node.Arguments.Mkdir{ + .path = .{ .string = bun.PathString.init(manager.options.patch_features.commit.patches_dir) }, + }; + if (nodefs.mkdirRecursive(args, .sync).asErr()) |e| { + Output.prettyError( + "error: failed to make patches dir {}\n", + .{e.toSystemError()}, + ); + Output.flush(); + Global.crash(); + } + + // rename to patches dir + if (bun.sys.renameat2( + bun.toFD(tmpdir.fd), + tempfile_name, + bun.FD.cwd(), + path_in_patches_dir, + .{ .exclude = true }, + ).asErr()) |e| { + Output.prettyError( + "error: failed to renaming patch file to patches dir {}\n", + .{e.toSystemError()}, + ); + Output.flush(); + Global.crash(); + } + + const patch_key = std.fmt.allocPrint(manager.allocator, "{s}", .{resolution_label}) catch bun.outOfMemory(); + const patchfile_path = manager.allocator.dupe(u8, path_in_patches_dir) catch bun.outOfMemory(); + _ = bun.sys.unlink(bun.path.joinZ(&[_][]const u8{ patched_pkg_folder, ".bun-patch-tag" }, .auto)); + + return .{ + .patch_key = patch_key, + .patchfile_path = patchfile_path, + }; + } + + fn patchCommitGetVersion( + buf: *[1024]u8, + patch_tag_path: [:0]const u8, + ) bun.sys.Maybe(string) { + const patch_tag_fd = switch (bun.sys.open(patch_tag_path, std.os.O.RDONLY, 0)) { + .result => |fd| fd, + .err => |e| return .{ .err = e }, + }; + defer { + _ = bun.sys.close(patch_tag_fd); + // we actually need to delete this + _ = bun.sys.unlink(patch_tag_path); + } + + const version = switch (bun.sys.File.readFillBuf(.{ .handle = patch_tag_fd }, buf[0..])) { + .result => |v| v, + .err => |e| return .{ .err = e }, + }; + + // maybe if someone opens it in their editor and hits save a newline will be inserted, + // so trim that off + return .{ .result = std.mem.trimRight(u8, version, " \n\r\t") }; + } + + fn escapePatchFilename(allocator: std.mem.Allocator, name: []const u8) ?[]const u8 { + const EscapeVal = enum { + @"/", + @"\\", + @" ", + @"\n", + @"\r", + @"\t", + // @".", + other, + + pub fn escaped(this: @This()) ?[]const u8 { + return switch (this) { + .@"/" => "%2F", + .@"\\" => "%5c", + .@" " => "%20", + .@"\n" => "%0A", + .@"\r" => "%0D", + .@"\t" => "%09", + // .@"." => "%2E", + .other => null, + }; + } + }; + const ESCAPE_TABLE: [256]EscapeVal = comptime brk: { + var table: [256]EscapeVal = [_]EscapeVal{.other} ** 256; + const ty = @typeInfo(EscapeVal); + for (ty.Enum.fields) |field| { + if (field.name.len == 1) { + const c = field.name[0]; + table[c] = @enumFromInt(field.value); + } + } + break :brk table; + }; + var count: usize = 0; + for (name) |c| count += if (ESCAPE_TABLE[c].escaped()) |e| e.len else 1; + if (count == name.len) return null; + var buf = allocator.alloc(u8, count) catch bun.outOfMemory(); + var i: usize = 0; + for (name) |c| { + const e = ESCAPE_TABLE[c].escaped() orelse &[_]u8{c}; + @memcpy(buf[i..][0..e.len], e); + i += e.len; + } + return buf; + } + var cwd_buf: bun.PathBuffer = undefined; var package_json_cwd_buf: bun.PathBuffer = undefined; pub var package_json_cwd: string = ""; @@ -9637,7 +10860,7 @@ pub const PackageManager = struct { } /// Install versions of a package which are waiting on a network request - pub fn installEnqueuedPackages( + pub fn installEnqueuedPackagesAfterExtraction( this: *PackageInstaller, dependency_id: DependencyID, data: *const ExtractData, @@ -9654,6 +10877,20 @@ pub const PackageManager = struct { .npm => Task.Id.forNPMPackage(name, resolution.value.npm.version), else => unreachable, }; + + if (!this.installEnqueuedPackagesImpl(name, task_id, log_level)) { + if (comptime Environment.allow_assert) { + Output.panic("Ran callback to install enqueued packages, but there was no task associated with it. {d} {any}", .{ dependency_id, data.* }); + } + } + } + + pub fn installEnqueuedPackagesImpl( + this: *PackageInstaller, + name: []const u8, + task_id: Task.Id.Type, + comptime log_level: Options.LogLevel, + ) bool { if (this.manager.task_queue.fetchRemove(task_id)) |removed| { var callbacks = removed.value; defer callbacks.deinit(this.manager.allocator); @@ -9665,7 +10902,7 @@ pub const PackageManager = struct { if (callbacks.items.len == 0) { debug("Unexpected state: no callbacks for async task.", .{}); - return; + return true; } for (callbacks.items) |*cb| { @@ -9691,11 +10928,9 @@ pub const PackageManager = struct { ); this.node_modules.deinit(); } - } else { - if (comptime Environment.allow_assert) { - Output.panic("Ran callback to install enqueued packages, but there was no task associated with it. {d} {any}", .{ dependency_id, data.* }); - } + return true; } + return false; } fn getInstalledPackageScriptsCount( @@ -9780,6 +11015,10 @@ pub const PackageManager = struct { return count; } + fn getPatchfileHash(patchfile_path: []const u8) ?u64 { + _ = patchfile_path; // autofix + } + fn installPackageWithNameAndResolution( this: *PackageInstaller, dependency_id: DependencyID, @@ -9817,6 +11056,25 @@ pub const PackageManager = struct { break :brk ""; } else std.fmt.bufPrint(&resolution_buf, "{}", .{resolution.fmt(buf, .posix)}) catch unreachable; + const patch_patch, const patch_contents_hash, const patch_name_and_version_hash = brk: { + if (this.manager.lockfile.patched_dependencies.entries.len == 0) break :brk .{ null, null, null }; + var sfb = std.heap.stackFallback(1024, this.lockfile.allocator); + const name_and_version = std.fmt.allocPrint(sfb.get(), "{s}@{s}", .{ name, package_version }) catch unreachable; + defer sfb.get().free(name_and_version); + const name_and_version_hash = String.Builder.stringHash(name_and_version); + + const patchdep = this.lockfile.patched_dependencies.get(name_and_version_hash) orelse break :brk .{ null, null, null }; + bun.assert(!patchdep.patchfile_hash_is_null); + // if (!patchdep.patchfile_hash_is_null) { + // this.manager.enqueuePatchTask(PatchTask.newCalcPatchHash(this, package_id, name_and_version_hash, dependency_id, url: string)) + // } + break :brk .{ + patchdep.path.slice(this.lockfile.buffers.string_bytes.items), + patchdep.patchfileHash().?, + name_and_version_hash, + }; + }; + var installer = PackageInstall{ .progress = this.progress, .cache_dir = undefined, @@ -9825,22 +11083,28 @@ pub const PackageManager = struct { .destination_dir_subpath_buf = &this.destination_dir_subpath_buf, .allocator = this.lockfile.allocator, .package_name = name, + .patch = if (patch_patch) |p| PackageInstall.Patch{ + .patch_contents_hash = patch_contents_hash.?, + .patch_path = p, + .root_project_dir = FileSystem.instance.top_level_dir, + } else PackageInstall.Patch.NULL, .package_version = package_version, .node_modules = &this.node_modules, }; debug("Installing {s}@{s}", .{ name, resolution.fmt(buf, .posix) }); + const pkg_has_patch = !installer.patch.isNull(); switch (resolution.tag) { .npm => { - installer.cache_dir_subpath = this.manager.cachedNPMPackageFolderName(name, resolution.value.npm.version); + installer.cache_dir_subpath = this.manager.cachedNPMPackageFolderName(name, resolution.value.npm.version, patch_contents_hash); installer.cache_dir = this.manager.getCacheDirectory(); }, .git => { - installer.cache_dir_subpath = this.manager.cachedGitFolderName(&resolution.value.git); + installer.cache_dir_subpath = this.manager.cachedGitFolderName(&resolution.value.git, patch_contents_hash); installer.cache_dir = this.manager.getCacheDirectory(); }, .github => { - installer.cache_dir_subpath = this.manager.cachedGitHubFolderName(&resolution.value.github); + installer.cache_dir_subpath = this.manager.cachedGitHubFolderName(&resolution.value.github, patch_contents_hash); installer.cache_dir = this.manager.getCacheDirectory(); }, .folder => { @@ -9858,11 +11122,11 @@ pub const PackageManager = struct { installer.cache_dir = std.fs.cwd(); }, .local_tarball => { - installer.cache_dir_subpath = this.manager.cachedTarballFolderName(resolution.value.local_tarball); + installer.cache_dir_subpath = this.manager.cachedTarballFolderName(resolution.value.local_tarball, patch_contents_hash); installer.cache_dir = this.manager.getCacheDirectory(); }, .remote_tarball => { - installer.cache_dir_subpath = this.manager.cachedTarballFolderName(resolution.value.remote_tarball); + installer.cache_dir_subpath = this.manager.cachedTarballFolderName(resolution.value.remote_tarball, patch_contents_hash); installer.cache_dir = this.manager.getCacheDirectory(); }, .workspace => { @@ -9900,7 +11164,7 @@ pub const PackageManager = struct { Output.flush(); this.summary.fail += 1; - this.incrementTreeInstallCount(this.current_tree_id, !is_pending_package_install, log_level); + if (!installer.patch.isNull()) this.incrementTreeInstallCount(this.current_tree_id, !is_pending_package_install, log_level); return; }; @@ -9931,7 +11195,7 @@ pub const PackageManager = struct { if (comptime Environment.allow_assert) { @panic("Internal assertion failure: unexpected resolution tag"); } - this.incrementTreeInstallCount(this.current_tree_id, !is_pending_package_install, log_level); + if (!installer.patch.isNull()) this.incrementTreeInstallCount(this.current_tree_id, !is_pending_package_install, log_level); return; }, } @@ -9963,6 +11227,7 @@ pub const PackageManager = struct { alias, resolution, context, + patch_name_and_version_hash, ); }, .github => { @@ -9973,6 +11238,7 @@ pub const PackageManager = struct { package_id, url, context, + patch_name_and_version_hash, ); }, .local_tarball => { @@ -9989,6 +11255,7 @@ pub const PackageManager = struct { package_id, resolution.value.remote_tarball.slice(buf), context, + patch_name_and_version_hash, ); }, .npm => { @@ -10007,13 +11274,14 @@ pub const PackageManager = struct { resolution.value.npm.version, resolution.value.npm.url.slice(buf), context, + patch_name_and_version_hash, ); }, else => { if (comptime Environment.allow_assert) { @panic("unreachable, handled above"); } - this.incrementTreeInstallCount(this.current_tree_id, !is_pending_package_install, log_level); + if (!installer.patch.isNull()) this.incrementTreeInstallCount(this.current_tree_id, !is_pending_package_install, log_level); this.summary.fail += 1; }, } @@ -10021,6 +11289,26 @@ pub const PackageManager = struct { return; } + // above checks if unpatched package is in cache, if not null apply patch in temp directory, copy + // into cache, then install into node_modules + if (!installer.patch.isNull()) { + if (installer.patchedPackageMissingFromCache(this.manager, package_id, installer.patch.patch_contents_hash)) { + const task = PatchTask.newApplyPatchHash( + this.manager, + package_id, + installer.patch.patch_contents_hash, + patch_name_and_version_hash.?, + ); + task.callback.apply.install_context = .{ + .dependency_id = dependency_id, + .tree_id = this.current_tree_id, + .path = this.node_modules.path.clone() catch bun.outOfMemory(), + }; + this.manager.enqueuePatchTask(task); + return; + } + } + if (!is_pending_package_install and !this.canInstallPackageForTree(this.lockfile.buffers.trees.items, this.current_tree_id)) { this.pending_installs_to_tree_id[this.current_tree_id].append(this.manager.allocator, .{ .dependency_id = dependency_id, @@ -10039,7 +11327,7 @@ pub const PackageManager = struct { }); } this.summary.fail += 1; - this.incrementTreeInstallCount(this.current_tree_id, !is_pending_package_install, log_level); + if (!pkg_has_patch) this.incrementTreeInstallCount(this.current_tree_id, !is_pending_package_install, log_level); return; }; @@ -10151,7 +11439,7 @@ pub const PackageManager = struct { } } - this.incrementTreeInstallCount(this.current_tree_id, !is_pending_package_install, log_level); + if (!pkg_has_patch) this.incrementTreeInstallCount(this.current_tree_id, !is_pending_package_install, log_level); }, .fail => |cause| { if (comptime Environment.allow_assert) { @@ -10160,7 +11448,7 @@ pub const PackageManager = struct { // even if the package failed to install, we still need to increment the install // counter for this tree - this.incrementTreeInstallCount(this.current_tree_id, !is_pending_package_install, log_level); + if (!pkg_has_patch) this.incrementTreeInstallCount(this.current_tree_id, !is_pending_package_install, log_level); if (cause.err == error.DanglingSymlink) { Output.prettyErrorln( @@ -10219,7 +11507,7 @@ pub const PackageManager = struct { }, } } else { - defer this.incrementTreeInstallCount(this.current_tree_id, !is_pending_package_install, log_level); + defer if (!pkg_has_patch) this.incrementTreeInstallCount(this.current_tree_id, !is_pending_package_install, log_level); var destination_dir = this.node_modules.makeAndOpenDir(this.root_node_modules_folder) catch |err| { if (log_level != .silent) { @@ -10342,6 +11630,15 @@ pub const PackageManager = struct { this: *PackageInstaller, dependency_id: DependencyID, comptime log_level: Options.LogLevel, + ) void { + this.installPackageImpl(dependency_id, log_level, true); + } + + pub fn installPackageImpl( + this: *PackageInstaller, + dependency_id: DependencyID, + comptime log_level: Options.LogLevel, + comptime increment_tree_count: bool, ) void { const package_id = this.lockfile.buffers.resolutions.items[dependency_id]; const meta = &this.metas[package_id]; @@ -10351,7 +11648,7 @@ pub const PackageManager = struct { if (comptime log_level.showProgress()) { this.node.completeOne(); } - this.incrementTreeInstallCount(this.current_tree_id, !is_pending_package_install, log_level); + if (comptime increment_tree_count) this.incrementTreeInstallCount(this.current_tree_id, !is_pending_package_install, log_level); return; } @@ -10377,6 +11674,7 @@ pub const PackageManager = struct { alias: string, resolution: *const Resolution, task_context: TaskCallbackContext, + patch_name_and_version_hash: ?u64, ) void { const repository = &resolution.value.git; const url = this.lockfile.str(&repository.repo); @@ -10396,14 +11694,7 @@ pub const PackageManager = struct { if (checkout_queue.found_existing) return; if (this.git_repositories.get(clone_id)) |repo_fd| { - this.task_batch.push(ThreadPool.Batch.from(this.enqueueGitCheckout( - checkout_id, - repo_fd, - dependency_id, - alias, - resolution.*, - resolved, - ))); + this.task_batch.push(ThreadPool.Batch.from(this.enqueueGitCheckout(checkout_id, repo_fd, dependency_id, alias, resolution.*, resolved, patch_name_and_version_hash))); } else { var clone_queue = this.task_queue.getOrPut(this.allocator, clone_id) catch unreachable; if (!clone_queue.found_existing) { @@ -10417,7 +11708,7 @@ pub const PackageManager = struct { if (clone_queue.found_existing) return; - this.task_batch.push(ThreadPool.Batch.from(this.enqueueGitClone(clone_id, alias, repository))); + this.task_batch.push(ThreadPool.Batch.from(this.enqueueGitClone(clone_id, alias, repository, &this.lockfile.buffers.dependencies.items[dependency_id], null))); } } @@ -10429,6 +11720,7 @@ pub const PackageManager = struct { version: Semver.Version, url: []const u8, task_context: TaskCallbackContext, + patch_name_and_version_hash: ?u64, ) void { const task_id = Task.Id.forNPMPackage(name, version); var task_queue = this.task_queue.getOrPut(this.allocator, task_id) catch unreachable; @@ -10448,6 +11740,7 @@ pub const PackageManager = struct { url, dependency_id, this.lockfile.packages.get(package_id), + patch_name_and_version_hash, ) catch unreachable) |task| { task.schedule(&this.network_tarball_batch); if (this.network_tarball_batch.len > 0) { @@ -10462,6 +11755,7 @@ pub const PackageManager = struct { package_id: PackageID, url: string, task_context: TaskCallbackContext, + patch_name_and_version_hash: ?u64, ) void { const task_id = Task.Id.forTarball(url); var task_queue = this.task_queue.getOrPut(this.allocator, task_id) catch unreachable; @@ -10481,6 +11775,7 @@ pub const PackageManager = struct { url, dependency_id, this.lockfile.packages.get(package_id), + patch_name_and_version_hash, ) catch unreachable) |task| { task.schedule(&this.network_tarball_batch); if (this.network_tarball_batch.len > 0) { @@ -10795,7 +12090,8 @@ pub const PackageManager = struct { *PackageInstaller, &installer, .{ - .onExtract = PackageInstaller.installEnqueuedPackages, + .onExtract = PackageInstaller.installEnqueuedPackagesAfterExtraction, + .onPatch = PackageInstaller.installEnqueuedPackagesImpl, .onResolve = {}, .onPackageManifestError = {}, .onPackageDownloadError = {}, @@ -10817,7 +12113,8 @@ pub const PackageManager = struct { *PackageInstaller, &installer, .{ - .onExtract = PackageInstaller.installEnqueuedPackages, + .onExtract = PackageInstaller.installEnqueuedPackagesAfterExtraction, + .onPatch = PackageInstaller.installEnqueuedPackagesImpl, .onResolve = {}, .onPackageManifestError = {}, .onPackageDownloadError = {}, @@ -10841,7 +12138,8 @@ pub const PackageManager = struct { *PackageInstaller, closure.installer, .{ - .onExtract = PackageInstaller.installEnqueuedPackages, + .onExtract = PackageInstaller.installEnqueuedPackagesAfterExtraction, + .onPatch = PackageInstaller.installEnqueuedPackagesImpl, .onResolve = {}, .onPackageManifestError = {}, .onPackageDownloadError = {}, @@ -11173,6 +12471,7 @@ pub const PackageManager = struct { for (lockfile.workspace_paths.values()) |path| builder.count(path.slice(lockfile.buffers.string_bytes.items)); for (lockfile.workspace_versions.values()) |version| version.count(lockfile.buffers.string_bytes.items, *Lockfile.StringBuilder, builder); + for (lockfile.patched_dependencies.values()) |patch_dep| builder.count(patch_dep.path.slice(lockfile.buffers.string_bytes.items)); lockfile.overrides.count(&lockfile, builder); maybe_root.scripts.count(lockfile.buffers.string_bytes.items, *Lockfile.StringBuilder, builder); @@ -11266,6 +12565,30 @@ pub const PackageManager = struct { } } + // Update patched dependencies + { + var iter = lockfile.patched_dependencies.iterator(); + // TODO: if one key is present in manager.lockfile and not present in lockfile we should get rid of it + while (iter.next()) |entry| { + const pkg_name_and_version_hash = entry.key_ptr.*; + bun.debugAssert(entry.value_ptr.patchfile_hash_is_null); + const gop = try manager.lockfile.patched_dependencies.getOrPut(manager.lockfile.allocator, pkg_name_and_version_hash); + if (!gop.found_existing) { + gop.value_ptr.* = .{ + .path = builder.append(String, entry.value_ptr.*.path.slice(lockfile.buffers.string_bytes.items)), + }; + gop.value_ptr.setPatchfileHash(null); + // gop.value_ptr.path = gop.value_ptr.path; + } else if (!bun.strings.eql( + gop.value_ptr.path.slice(manager.lockfile.buffers.string_bytes.items), + entry.value_ptr.path.slice(lockfile.buffers.string_bytes.items), + )) { + gop.value_ptr.path = builder.append(String, entry.value_ptr.*.path.slice(lockfile.buffers.string_bytes.items)); + gop.value_ptr.setPatchfileHash(null); + } + } + } + builder.clamp(); if (manager.summary.overrides_changed and all_name_hashes.len > 0) { @@ -11339,7 +12662,15 @@ pub const PackageManager = struct { _ = manager.getTemporaryDirectory(); } manager.enqueueDependencyList(root.dependencies); + { + var iter = manager.lockfile.patched_dependencies.iterator(); + while (iter.next()) |entry| if (entry.value_ptr.patchfile_hash_is_null) manager.enqueuePatchTask(PatchTask.newCalcPatchHash(manager, entry.key_ptr.*, null)); + } } else { + { + var iter = manager.lockfile.patched_dependencies.iterator(); + while (iter.next()) |entry| if (entry.value_ptr.patchfile_hash_is_null) manager.enqueuePatchTask(PatchTask.newCalcPatchHash(manager, entry.key_ptr.*, null)); + } // Anything that needs to be downloaded from an update needs to be scheduled here manager.drainDependencyList(); } @@ -11377,6 +12708,7 @@ pub const PackageManager = struct { this, .{ .onExtract = {}, + .onPatch = {}, .onResolve = {}, .onPackageManifestError = {}, .onPackageDownloadError = {}, @@ -11451,6 +12783,7 @@ pub const PackageManager = struct { manager.options.enable.exact_versions, log_level, ); + if (manager.lockfile.packages.len > 0) { root = manager.lockfile.packages.get(0); } diff --git a/src/install/lockfile.zig b/src/install/lockfile.zig index 7f1a46d6d9..4a48b3e03d 100644 --- a/src/install/lockfile.zig +++ b/src/install/lockfile.zig @@ -79,6 +79,7 @@ const Origin = Install.Origin; const PackageID = Install.PackageID; const PackageInstall = Install.PackageInstall; const PackageNameHash = Install.PackageNameHash; +const PackageNameAndVersionHash = Install.PackageNameAndVersionHash; const TruncatedPackageNameHash = Install.TruncatedPackageNameHash; const Resolution = @import("./resolution.zig").Resolution; const Crypto = @import("../sha.zig").Hashers; @@ -90,6 +91,23 @@ const zero_hash = std.mem.zeroes(MetaHash); pub const NameHashMap = std.ArrayHashMapUnmanaged(PackageNameHash, String, ArrayIdentityContext.U64, false); pub const TrustedDependenciesSet = std.ArrayHashMapUnmanaged(TruncatedPackageNameHash, void, ArrayIdentityContext, false); pub const VersionHashMap = std.ArrayHashMapUnmanaged(PackageNameHash, Semver.Version, ArrayIdentityContext.U64, false); +pub const PatchedDependenciesMap = std.ArrayHashMapUnmanaged(PackageNameAndVersionHash, PatchedDep, ArrayIdentityContext.U64, false); +pub const PatchedDep = extern struct { + /// e.g. "patches/is-even@1.0.0.patch" + path: String, + _padding: [7]u8 = [_]u8{0} ** 7, + patchfile_hash_is_null: bool = true, + /// the hash of the patch file contents + __patchfile_hash: u64 = 0, + + pub fn setPatchfileHash(this: *PatchedDep, val: ?u64) void { + this.patchfile_hash_is_null = val == null; + this.__patchfile_hash = if (val) |v| v else 0; + } + pub fn patchfileHash(this: *const PatchedDep) ?u64 { + return if (this.patchfile_hash_is_null) null else this.__patchfile_hash; + } +}; const File = bun.sys.File; const assertNoUninitializedPadding = @import("./padding_checker.zig").assertNoUninitializedPadding; @@ -130,6 +148,7 @@ workspace_versions: VersionHashMap = .{}, /// Optional because `trustedDependencies` in package.json might be an /// empty list or it might not exist trusted_dependencies: ?TrustedDependenciesSet = null, +patched_dependencies: PatchedDependenciesMap = .{}, overrides: OverrideMap = .{}, const Stream = std.io.FixedBufferStream([]u8); @@ -253,6 +272,7 @@ pub fn loadFromBytes(this: *Lockfile, buf: []u8, allocator: Allocator, log: *log this.workspace_paths = .{}; this.workspace_versions = .{}; this.overrides = .{}; + this.patched_dependencies = .{}; const load_result = Lockfile.Serializer.load(this, &stream, allocator, log) catch |err| { return LoadFromDiskResult{ .err = .{ .step = .parse_file, .value = err } }; @@ -844,6 +864,7 @@ pub fn cleanWithLogger( try new.package_index.ensureTotalCapacity(old.package_index.capacity()); try new.packages.ensureTotalCapacity(old.allocator, old.packages.len); try new.buffers.preallocate(old.buffers, old.allocator); + try new.patched_dependencies.ensureTotalCapacity(old.allocator, old.patched_dependencies.entries.len); old.scratch.dependency_list_queue.head = 0; @@ -945,6 +966,18 @@ pub fn cleanWithLogger( new.scripts = old_scripts; new.meta_hash = old.meta_hash; + { + var builder = new.stringBuilder(); + for (old.patched_dependencies.values()) |patched_dep| builder.count(patched_dep.path.slice(old.buffers.string_bytes.items)); + try builder.allocate(); + for (old.patched_dependencies.keys(), old.patched_dependencies.values()) |k, v| { + bun.assert(!v.patchfile_hash_is_null); + var patchdep = v; + patchdep.path = builder.append(String, patchdep.path.slice(old.buffers.string_bytes.items)); + try new.patched_dependencies.put(new.allocator, k, patchdep); + } + } + // Don't allow invalid memory to happen if (updates.len > 0) { const string_buf = new.buffers.string_bytes.items; @@ -3151,6 +3184,7 @@ pub const Package = extern struct { this.resolution.count(old_string_buf, *Lockfile.StringBuilder, builder); this.meta.count(old_string_buf, *Lockfile.StringBuilder, builder); this.scripts.count(old_string_buf, *Lockfile.StringBuilder, builder); + for (old.patched_dependencies.values()) |patched_dep| builder.count(patched_dep.path.slice(old.buffers.string_bytes.items)); const new_extern_string_count = this.bin.count(old_string_buf, old_extern_string_buf, *Lockfile.StringBuilder, builder); const old_dependencies: []const Dependency = this.dependencies.get(old.buffers.dependencies.items); const old_resolutions: []const PackageID = this.resolutions.get(old.buffers.resolutions.items); @@ -3573,6 +3607,8 @@ pub const Package = extern struct { added_trusted_dependencies: std.ArrayHashMapUnmanaged(TruncatedPackageNameHash, bool, ArrayIdentityContext, false) = .{}, removed_trusted_dependencies: TrustedDependenciesSet = .{}, + patched_dependencies_changed: bool = false, + pub inline fn sum(this: *Summary, that: Summary) void { this.add += that.add; this.remove += that.remove; @@ -3582,7 +3618,8 @@ pub const Package = extern struct { pub inline fn hasDiffs(this: Summary) bool { return this.add > 0 or this.remove > 0 or this.update > 0 or this.overrides_changed or this.added_trusted_dependencies.count() > 0 or - this.removed_trusted_dependencies.count() > 0; + this.removed_trusted_dependencies.count() > 0 or + this.patched_dependencies_changed; } }; @@ -3726,6 +3763,21 @@ pub const Package = extern struct { } } + summary.patched_dependencies_changed = patched_dependencies_changed: { + if (from_lockfile.patched_dependencies.entries.len != to_lockfile.patched_dependencies.entries.len) break :patched_dependencies_changed true; + var iter = to_lockfile.patched_dependencies.iterator(); + while (iter.next()) |entry| { + if (from_lockfile.patched_dependencies.get(entry.key_ptr.*)) |val| { + if (!std.mem.eql( + u8, + val.path.slice(from_lockfile.buffers.string_bytes.items), + entry.value_ptr.path.slice(to_lockfile.buffers.string_bytes.items), + )) break :patched_dependencies_changed true; + } else break :patched_dependencies_changed true; + } + break :patched_dependencies_changed false; + }; + for (from_deps, 0..) |*from_dep, i| { found: { const prev_i = to_i; @@ -4578,6 +4630,17 @@ pub const Package = extern struct { } } + if (json.asProperty("patchedDependencies")) |patched_deps| { + const obj = patched_deps.expr.data.e_object; + for (obj.properties.slice()) |prop| { + const key = prop.key.?; + const value = prop.value.?; + if (key.isString() and value.isString()) { + string_builder.count(value.asString(allocator).?); + } + } + } + if (comptime !features.is_main) { if (json.asProperty("version")) |version_q| { if (version_q.expr.asString(allocator)) |version_str| { @@ -4937,6 +5000,21 @@ pub const Package = extern struct { } } + if (json.asProperty("patchedDependencies")) |patched_deps| { + const obj = patched_deps.expr.data.e_object; + lockfile.patched_dependencies.ensureTotalCapacity(allocator, obj.properties.len) catch unreachable; + for (obj.properties.slice()) |prop| { + const key = prop.key.?; + const value = prop.value.?; + if (key.isString() and value.isString()) { + var sfb = std.heap.stackFallback(1024, allocator); + const keyhash = key.asStringHash(sfb.get(), String.Builder.stringHash) orelse unreachable; + const patch_path = string_builder.append(String, value.asString(allocator).?); + lockfile.patched_dependencies.put(allocator, keyhash, .{ .path = patch_path }) catch unreachable; + } + } + } + if (json.asProperty("directories")) |dirs| { // https://docs.npmjs.com/cli/v8/configuring-npm/package-json#directoriesbin // Because of the way the bin directive works, @@ -5405,6 +5483,7 @@ pub fn deinit(this: *Lockfile) void { if (this.trusted_dependencies) |*trusted_dependencies| { trusted_dependencies.deinit(this.allocator); } + this.patched_dependencies.deinit(this.allocator); this.workspace_paths.deinit(this.allocator); this.workspace_versions.deinit(this.allocator); this.overrides.deinit(this.allocator); @@ -5752,6 +5831,7 @@ pub const Serializer = struct { pub const version = "bun-lockfile-format-v0\n"; const header_bytes: string = "#!/usr/bin/env bun\n" ++ version; + const has_patched_dependencies_tag: u64 = @bitCast(@as([8]u8, "pAtChEdD".*)); const has_workspace_package_ids_tag: u64 = @bitCast(@as([8]u8, "wOrKsPaC".*)); const has_trusted_dependencies_tag: u64 = @bitCast(@as([8]u8, "tRuStEDd".*)); const has_empty_trusted_dependencies_tag: u64 = @bitCast(@as([8]u8, "eMpTrUsT".*)); @@ -5902,6 +5982,30 @@ pub const Serializer = struct { ); } + if (this.patched_dependencies.entries.len > 0) { + for (this.patched_dependencies.values()) |patched_dep| bun.assert(!patched_dep.patchfile_hash_is_null); + + try writer.writeAll(std.mem.asBytes(&has_patched_dependencies_tag)); + + try Lockfile.Buffers.writeArray( + StreamType, + stream, + @TypeOf(writer), + writer, + []PackageNameAndVersionHash, + this.patched_dependencies.keys(), + ); + + try Lockfile.Buffers.writeArray( + StreamType, + stream, + @TypeOf(writer), + writer, + []PatchedDep, + this.patched_dependencies.values(), + ); + } + total_size.* = try stream.getPos(); try writer.writeAll(&alignment_bytes_to_repeat_buffer); @@ -6085,6 +6189,39 @@ pub const Serializer = struct { } } + { + const remaining_in_buffer = total_buffer_size -| stream.pos; + + if (remaining_in_buffer > 8 and total_buffer_size <= stream.buffer.len) { + const next_num = try reader.readInt(u64, .little); + if (next_num == has_patched_dependencies_tag) { + var patched_dependencies_name_and_version_hashes = + try Lockfile.Buffers.readArray( + stream, + allocator, + std.ArrayListUnmanaged(PackageNameAndVersionHash), + ); + defer patched_dependencies_name_and_version_hashes.deinit(allocator); + + var map = lockfile.patched_dependencies; + defer lockfile.patched_dependencies = map; + + try map.ensureTotalCapacity(allocator, patched_dependencies_name_and_version_hashes.items.len); + const patched_dependencies_paths = try Lockfile.Buffers.readArray( + stream, + allocator, + std.ArrayListUnmanaged(PatchedDep), + ); + + for (patched_dependencies_name_and_version_hashes.items, patched_dependencies_paths.items) |name_hash, patch_path| { + map.putAssumeCapacity(name_hash, patch_path); + } + } else { + stream.pos -= 8; + } + } + } + lockfile.scratch = Lockfile.Scratch.init(allocator); lockfile.package_index = PackageIndex.Map.initContext(allocator, .{}); lockfile.string_pool = StringPool.initContext(allocator, .{}); diff --git a/src/install/patch_install.zig b/src/install/patch_install.zig new file mode 100644 index 0000000000..bbdd7e8e0f --- /dev/null +++ b/src/install/patch_install.zig @@ -0,0 +1,571 @@ +const bun = @import("root").bun; +const std = @import("std"); + +const string = bun.string; +const stringZ = bun.stringZ; +const Output = bun.Output; +const Global = bun.Global; +const Environment = bun.Environment; +const strings = bun.strings; +const MutableString = bun.MutableString; + +const logger = bun.logger; + +const PackageManager = bun.PackageManager; +pub const PackageID = bun.install.PackageID; +pub const DependencyID = bun.install.DependencyID; + +const Task = bun.install.Task; +pub const Lockfile = @import("./lockfile.zig"); +pub const PatchedDep = Lockfile.PatchedDep; + +const ThreadPool = bun.ThreadPool; + +pub const Resolution = @import("./resolution.zig").Resolution; +const Progress = std.Progress; + +pub const PackageInstall = bun.install.PackageInstall; +pub const PreparePatchPackageInstall = bun.install.PreparePatchPackageInstall; + +const Fs = @import("../fs.zig"); +const FileSystem = Fs.FileSystem; + +pub const bun_hash_tag = bun.install.bun_hash_tag; +pub const max_hex_hash_len: comptime_int = brk: { + var buf: [128]u8 = undefined; + break :brk (std.fmt.bufPrint(buf[0..], "{x}", .{std.math.maxInt(u64)}) catch @panic("Buf wasn't big enough.")).len; +}; +pub const max_buntag_hash_buf_len: comptime_int = max_hex_hash_len + bun_hash_tag.len + 1; +pub const BuntagHashBuf = [max_buntag_hash_buf_len]u8; + +pub const PatchTask = struct { + manager: *PackageManager, + project_dir: []const u8, + callback: union(enum) { + calc_hash: CalcPatchHash, + apply: ApplyPatch, + }, + task: ThreadPool.Task = .{ + .callback = runFromThreadPool, + }, + next: ?*PatchTask = null, + + const debug = bun.Output.scoped(.InstallPatch, false); + + fn errDupePath(e: bun.sys.Error) bun.sys.Error { + if (e.path.len > 0) return e.withPath(bun.default_allocator.dupe(u8, e.path) catch bun.outOfMemory()); + return e; + } + + const Maybe = bun.sys.Maybe; + + const CalcPatchHash = struct { + patchfile_path: []const u8, + name_and_version_hash: u64, + + state: ?EnqueueAfterState = null, + + result: ?Maybe(u64) = null, + + const EnqueueAfterState = struct { + pkg_id: PackageID, + dependency_id: DependencyID, + url: string, + }; + }; + + const ApplyPatch = struct { + pkg_id: PackageID, + patch_hash: u64, + name_and_version_hash: u64, + resolution: *const Resolution, + patchfilepath: []const u8, + pkgname: []const u8, + + cache_dir: std.fs.Dir, + cache_dir_subpath: stringZ, + cache_dir_subpath_without_patch_hash: stringZ, + + /// this is non-null if this was called before a Task, for example extracting + task_id: ?Task.Id.Type = null, + install_context: ?struct { + dependency_id: DependencyID, + tree_id: Lockfile.Tree.Id, + path: std.ArrayList(u8), + } = null, + // dependency_id: ?struct = null, + + logger: logger.Log, + }; + + pub fn deinit(this: *PatchTask) void { + switch (this.callback) { + .apply => { + this.manager.allocator.free(this.callback.apply.patchfilepath); + this.manager.allocator.free(this.callback.apply.cache_dir_subpath); + this.manager.allocator.free(this.callback.apply.pkgname); + if (this.callback.apply.install_context) |ictx| ictx.path.deinit(); + }, + .calc_hash => { + // TODO: how to deinit `this.callback.calc_hash.network_task` + if (this.callback.calc_hash.state) |state| this.manager.allocator.free(state.url); + if (this.callback.calc_hash.result) |r| { + if (r.asErr()) |e| { + if (e.path.len > 0) bun.default_allocator.free(e.path); + } + } + this.manager.allocator.free(this.callback.calc_hash.patchfile_path); + }, + } + bun.destroy(this); + } + + pub fn runFromThreadPool(task: *ThreadPool.Task) void { + var patch_task: *PatchTask = @fieldParentPtr(PatchTask, "task", task); + patch_task.runFromThreadPoolImpl(); + } + + pub fn runFromThreadPoolImpl(this: *PatchTask) void { + debug("runFromThreadPoolImpl {s}", .{@tagName(this.callback)}); + defer { + defer this.manager.wake(); + this.manager.patch_task_queue.push(this); + } + switch (this.callback) { + .calc_hash => { + this.callback.calc_hash.result = this.calcHash(); + }, + .apply => { + this.apply() catch bun.outOfMemory(); + }, + } + } + + pub fn runFromMainThread( + this: *PatchTask, + manager: *PackageManager, + comptime log_level: PackageManager.Options.LogLevel, + ) !void { + debug("runFromThreadMainThread {s}", .{@tagName(this.callback)}); + switch (this.callback) { + .calc_hash => try this.runFromMainThreadCalcHash(manager, log_level), + .apply => this.runFromMainThreadApply(manager), + } + } + + pub fn runFromMainThreadApply(this: *PatchTask, manager: *PackageManager) void { + _ = manager; // autofix + if (this.callback.apply.logger.errors > 0) { + defer this.callback.apply.logger.deinit(); + // this.log.addErrorFmt(null, logger.Loc.Empty, bun.default_allocator, "failed to apply patch: {}", .{e}) catch unreachable; + this.callback.apply.logger.printForLogLevel(Output.writer()) catch {}; + } + } + + fn runFromMainThreadCalcHash( + this: *PatchTask, + manager: *PackageManager, + comptime log_level: PackageManager.Options.LogLevel, + ) !void { + // TODO only works for npm package + // need to switch on version.tag and handle each case appropriately + const calc_hash = &this.callback.calc_hash; + const hash = switch (calc_hash.result orelse @panic("Calc hash didn't run, this is a bug in Bun.")) { + .result => |h| h, + .err => |e| { + if (e.getErrno() == bun.C.E.NOENT) { + const fmt = "\n\nerror: could not find patch file {s}\n\nPlease make sure it exists.\n\nTo create a new patch file run:\n\n bun patch {s}\n"; + const args = .{ + this.callback.calc_hash.patchfile_path, + manager.lockfile.patched_dependencies.get(calc_hash.name_and_version_hash).?.path.slice(manager.lockfile.buffers.string_bytes.items), + }; + if (comptime log_level.showProgress()) { + Output.prettyWithPrinterFn(fmt, args, Progress.log, &manager.progress); + } else { + Output.prettyErrorln( + fmt, + args, + ); + Output.flush(); + } + Global.crash(); + } + + const fmt = "\n\nerror: {s}{s} while calculating hash for patchfile: {s}\n"; + const args = .{ @tagName(e.getErrno()), e.path, this.callback.calc_hash.patchfile_path }; + if (comptime log_level.showProgress()) { + Output.prettyWithPrinterFn(fmt, args, Progress.log, &manager.progress); + } else { + Output.prettyErrorln( + fmt, + args, + ); + Output.flush(); + } + Global.crash(); + + return; + }, + }; + + var gop = manager.lockfile.patched_dependencies.getOrPut(manager.allocator, calc_hash.name_and_version_hash) catch bun.outOfMemory(); + if (gop.found_existing) { + gop.value_ptr.setPatchfileHash(hash); + } else @panic("No entry for patched dependency, this is a bug in Bun."); + + if (calc_hash.state) |state| { + const url = state.url; + const pkg_id = state.pkg_id; + const dep_id = state.dependency_id; + + const pkg = manager.lockfile.packages.get(pkg_id); + + var out_name_and_version_hash: ?u64 = null; + var out_patchfile_hash: ?u64 = null; + manager.setPreinstallState(pkg.meta.id, manager.lockfile, .unknown); + switch (manager.determinePreinstallState(pkg, manager.lockfile, &out_name_and_version_hash, &out_patchfile_hash)) { + .done => { + // patched pkg in folder path, should now be handled by PackageInstall.install() + debug("pkg: {s} done", .{pkg.name.slice(manager.lockfile.buffers.string_bytes.items)}); + }, + .extract => { + debug("pkg: {s} extract", .{pkg.name.slice(manager.lockfile.buffers.string_bytes.items)}); + const network_task = try manager.generateNetworkTaskForTarball( + // TODO: not just npm package + Task.Id.forNPMPackage( + manager.lockfile.str(&pkg.name), + pkg.resolution.value.npm.version, + ), + url, + dep_id, + pkg, + this.callback.calc_hash.name_and_version_hash, + ) orelse unreachable; + if (manager.getPreinstallState(pkg.meta.id) == .extract) { + manager.setPreinstallState(pkg.meta.id, manager.lockfile, .extracting); + manager.enqueueNetworkTask(network_task); + } + }, + .apply_patch => { + debug("pkg: {s} apply patch", .{pkg.name.slice(manager.lockfile.buffers.string_bytes.items)}); + const patch_task = PatchTask.newApplyPatchHash( + manager, + pkg.meta.id, + hash, + this.callback.calc_hash.name_and_version_hash, + ); + if (manager.getPreinstallState(pkg.meta.id) == .apply_patch) { + manager.setPreinstallState(pkg.meta.id, manager.lockfile, .applying_patch); + manager.enqueuePatchTask(patch_task); + } + }, + else => {}, + } + } + } + + // 1. Parse patch file + // 2. Create temp dir to do all the modifications + // 3. Copy un-patched pkg into temp dir + // 4. Apply patches to pkg in temp dir + // 5. Add bun tag for patch hash + // 6. rename() newly patched pkg to cache + pub fn apply(this: *PatchTask) !void { + var log = this.callback.apply.logger; + debug("apply patch task", .{}); + bun.assert(this.callback == .apply); + + const strbuf: []const u8 = this.manager.lockfile.buffers.string_bytes.items; + + const patch: *const ApplyPatch = &this.callback.apply; + const dir = this.project_dir; + const patchfile_path = patch.patchfilepath; + + // 1. Parse the patch file + const absolute_patchfile_path = bun.path.joinZ(&[_][]const u8{ + dir, + patchfile_path, + }, .auto); + // TODO: can the patch file be anything other than utf-8? + + const patchfile_txt = switch (bun.sys.File.readFrom( + bun.FD.cwd(), + absolute_patchfile_path, + this.manager.allocator, + )) { + .result => |txt| txt, + .err => |e| { + try log.addErrorFmtNoLoc( + this.manager.allocator, + "failed to read patchfile: {}", + .{e.toSystemError()}, + ); + return; + }, + }; + defer this.manager.allocator.free(patchfile_txt); + var patchfile = bun.patch.parsePatchFile(patchfile_txt) catch |e| { + try log.addErrorFmtNoLoc( + this.manager.allocator, + "failed to parse patchfile: {s}", + .{@errorName(e)}, + ); + return; + }; + defer patchfile.deinit(bun.default_allocator); + + // 2. Create temp dir to do all the modifications + var tmpname_buf: [1024]u8 = undefined; + const tempdir_name = bun.span(bun.fs.FileSystem.instance.tmpname("tmp", &tmpname_buf, bun.fastRandom()) catch bun.outOfMemory()); + const system_tmpdir = bun.fs.FileSystem.instance.tmpdir() catch |e| { + try log.addErrorFmtNoLoc( + this.manager.allocator, + "failed to creating temp dir: {s}", + .{@errorName(e)}, + ); + return; + }; + + const pkg_name = this.callback.apply.pkgname; + + var resolution_buf: [512]u8 = undefined; + const resolution_label = std.fmt.bufPrint(&resolution_buf, "{}", .{this.callback.apply.resolution.fmt(strbuf, .posix)}) catch unreachable; + + const dummy_node_modules = .{ + .path = std.ArrayList(u8).init(this.manager.allocator), + .tree_id = 0, + }; + + // 3. copy the unpatched files into temp dir + var pkg_install = PreparePatchPackageInstall{ + .allocator = bun.default_allocator, + .cache_dir = this.callback.apply.cache_dir, + .cache_dir_subpath = this.callback.apply.cache_dir_subpath_without_patch_hash, + .destination_dir_subpath = tempdir_name, + .destination_dir_subpath_buf = tmpname_buf[0..], + .progress = .{}, + .package_name = pkg_name, + .package_version = resolution_label, + // dummy value + .node_modules = &dummy_node_modules, + }; + + switch (pkg_install.installImpl(true, system_tmpdir, .copyfile)) { + .success => {}, + .fail => |reason| { + return try log.addErrorFmtNoLoc( + this.manager.allocator, + "{s} while executing step: {s}", + .{ @errorName(reason.err), reason.step.name() }, + ); + }, + } + + var patch_pkg_dir = system_tmpdir.openDir(tempdir_name, .{}) catch |e| return try log.addErrorFmtNoLoc( + this.manager.allocator, + "failed trying to open temporary dir to apply patch to package: {s}", + .{@errorName(e)}, + ); + defer patch_pkg_dir.close(); + + // 4. apply patch + if (patchfile.apply(this.manager.allocator, bun.toFD(patch_pkg_dir.fd))) |e| { + return try log.addErrorFmtNoLoc( + this.manager.allocator, + "failed applying patch file: {}", + .{e}, + ); + } + + // 5. Add bun tag + const bun_tag_prefix = bun_hash_tag; + var buntagbuf: BuntagHashBuf = undefined; + @memcpy(buntagbuf[0..bun_tag_prefix.len], bun_tag_prefix); + const hashlen = (std.fmt.bufPrint(buntagbuf[bun_tag_prefix.len..], "{x}", .{this.callback.apply.patch_hash}) catch unreachable).len; + buntagbuf[bun_tag_prefix.len + hashlen] = 0; + const buntagfd = switch (bun.sys.openat(bun.toFD(patch_pkg_dir.fd), buntagbuf[0 .. bun_tag_prefix.len + hashlen :0], std.os.O.RDWR | std.os.O.CREAT, 0o666)) { + .result => |fd| fd, + .err => |e| { + return try log.addErrorFmtNoLoc(this.manager.allocator, "{}", .{e}); + }, + }; + _ = bun.sys.close(buntagfd); + + // 6. rename to cache dir + const path_in_tmpdir = bun.path.joinZ( + &[_][]const u8{ + tempdir_name, + // tempdir_name, + }, + .auto, + ); + // var allocated = false; + // const package_name_z = brk: { + // if (this.package_name.len < tmpname_buf.len) { + // @memcpy(tmpname_buf[0..this.package_name.len], this.package_name); + // tmpname_buf[this.package_name.len] = 0; + // break :brk tmpname_buf[0..this.package_name.len :0]; + // } + // allocated = true; + // break :brk this.manager.allocator.dupeZ(u8, this.package_name) catch bun.outOfMemory(); + // }; + // defer if (allocated) this.manager.allocator.free(package_name_z); + + worked: { + if (bun.sys.renameat2( + bun.toFD(system_tmpdir.fd), + path_in_tmpdir, + bun.toFD(this.callback.apply.cache_dir.fd), + this.callback.apply.cache_dir_subpath, + .{ + .exclude = true, + }, + ).asErr()) |e_| { + var e = e_; + + if (if (comptime bun.Environment.isWindows) switch (e.getErrno()) { + bun.C.E.NOTEMPTY, bun.C.E.EXIST => true, + else => false, + } else switch (e.getErrno()) { + bun.C.E.NOTEMPTY, bun.C.E.EXIST, bun.C.E.OPNOTSUPP => true, + else => false, + }) { + switch (bun.sys.renameat2( + bun.toFD(system_tmpdir.fd), + path_in_tmpdir, + bun.toFD(this.callback.apply.cache_dir.fd), + this.callback.apply.cache_dir_subpath, + .{ + .exchange = true, + }, + )) { + .err => |ee| e = ee, + .result => break :worked, + } + } + return try log.addErrorFmtNoLoc(this.manager.allocator, "{}", .{e}); + } + } + } + + pub fn calcHash(this: *PatchTask) Maybe(u64) { + bun.assert(this.callback == .calc_hash); + + const dir = this.project_dir; + const patchfile_path = this.callback.calc_hash.patchfile_path; + + // parse the patch file + const absolute_patchfile_path = bun.path.joinZ(&[_][]const u8{ + dir, + patchfile_path, + }, .auto); + + const stat: bun.Stat = switch (bun.sys.stat(absolute_patchfile_path)) { + .err => |e| return .{ .err = errDupePath(e) }, + .result => |s| s, + }; + const size: u64 = @intCast(stat.size); + + const fd = switch (bun.sys.open(absolute_patchfile_path, std.os.O.RDONLY, 0)) { + .err => |e| return .{ .err = errDupePath(e) }, + .result => |fd| fd, + }; + defer _ = bun.sys.close(fd); + + var hasher = bun.Wyhash11.init(0); + + // what's a good number for this? page size i guess + const STACK_SIZE = 16384; + + var stack: [STACK_SIZE]u8 = undefined; + var read: usize = 0; + while (read < size) { + var i: usize = 0; + while (i < STACK_SIZE and i < size) { + switch (bun.sys.read(fd, stack[i..])) { + .result => |w| i += w, + .err => |e| return .{ .err = errDupePath(e) }, + } + } + read += i; + hasher.update(stack[0..i]); + } + + return .{ .result = hasher.final() }; + } + + pub fn notify(this: *PatchTask) void { + defer this.manager.wake(); + this.manager.patch_task_queue.push(this); + } + + pub fn schedule(this: *PatchTask, batch: *ThreadPool.Batch) void { + batch.push(ThreadPool.Batch.from(&this.task)); + } + + pub fn newCalcPatchHash( + manager: *PackageManager, + name_and_version_hash: u64, + state: ?CalcPatchHash.EnqueueAfterState, + ) *PatchTask { + const patchdep = manager.lockfile.patched_dependencies.get(name_and_version_hash) orelse @panic("This is a bug"); + bun.debugAssert(patchdep.patchfile_hash_is_null); + const patchfile_path = manager.allocator.dupeZ(u8, patchdep.path.slice(manager.lockfile.buffers.string_bytes.items)) catch bun.outOfMemory(); + + const pt = bun.new(PatchTask, .{ + .callback = .{ + .calc_hash = .{ + .state = state, + .patchfile_path = patchfile_path, + .name_and_version_hash = name_and_version_hash, + }, + }, + .manager = manager, + .project_dir = FileSystem.instance.top_level_dir, + }); + + return pt; + } + + pub fn newApplyPatchHash( + pkg_manager: *PackageManager, + pkg_id: PackageID, + patch_hash: u64, + name_and_version_hash: u64, + ) *PatchTask { + const pkg_name = pkg_manager.lockfile.packages.items(.name)[pkg_id]; + const resolution: *const Resolution = &pkg_manager.lockfile.packages.items(.resolution)[pkg_id]; + + const stuff = pkg_manager.computeCacheDirAndSubpath( + pkg_name.slice(pkg_manager.lockfile.buffers.string_bytes.items), + resolution, + patch_hash, + ); + + const patchfilepath = pkg_manager.allocator.dupe(u8, pkg_manager.lockfile.patched_dependencies.get(name_and_version_hash).?.path.slice(pkg_manager.lockfile.buffers.string_bytes.items)) catch bun.outOfMemory(); + + const pt = bun.new(PatchTask, .{ + .callback = .{ + .apply = .{ + .pkg_id = pkg_id, + .resolution = resolution, + .patch_hash = patch_hash, + .name_and_version_hash = name_and_version_hash, + .cache_dir = stuff.cache_dir, + .patchfilepath = patchfilepath, + .pkgname = pkg_manager.allocator.dupe(u8, pkg_name.slice(pkg_manager.lockfile.buffers.string_bytes.items)) catch bun.outOfMemory(), + .logger = logger.Log.init(pkg_manager.allocator), + // need to dupe this as it's calculated using + // `PackageManager.cached_package_folder_name_buf` which may be + // modified + .cache_dir_subpath = pkg_manager.allocator.dupeZ(u8, stuff.cache_dir_subpath) catch bun.outOfMemory(), + .cache_dir_subpath_without_patch_hash = pkg_manager.allocator.dupeZ(u8, stuff.cache_dir_subpath[0 .. std.mem.indexOf(u8, stuff.cache_dir_subpath, "_patch_hash=") orelse @panic("This is a bug in Bun.")]) catch bun.outOfMemory(), + }, + }, + .manager = pkg_manager, + .project_dir = FileSystem.instance.top_level_dir, + }); + + return pt; + } +}; diff --git a/src/install/repository.zig b/src/install/repository.zig index 19fcfd2830..6842d55552 100644 --- a/src/install/repository.zig +++ b/src/install/repository.zig @@ -309,7 +309,7 @@ pub const Repository = extern struct { resolved: string, ) !ExtractData { bun.Analytics.Features.git_dependencies += 1; - const folder_name = PackageManager.cachedGitFolderNamePrint(&folder_name_buf, resolved); + const folder_name = PackageManager.cachedGitFolderNamePrint(&folder_name_buf, resolved, null); var package_dir = bun.openDir(cache_dir, folder_name) catch |not_found| brk: { if (not_found != error.ENOENT) return not_found; diff --git a/src/js/internal-for-testing.ts b/src/js/internal-for-testing.ts index cb09bdda77..89e6c8758b 100644 --- a/src/js/internal-for-testing.ts +++ b/src/js/internal-for-testing.ts @@ -17,6 +17,11 @@ export const TLSBinding = $cpp("NodeTLS.cpp", "createNodeTLSBinding"); export const SQL = $cpp("JSSQLStatement.cpp", "createJSSQLStatementConstructor"); +export const patchInternals = { + parse: $newZigFunction("patch.zig", "TestingAPIs.parse", 1), + apply: $newZigFunction("patch.zig", "TestingAPIs.apply", 2), +}; + export const shellInternals = { lex: $newZigFunction("shell.zig", "TestingAPIs.shellLex", 1), parse: $newZigFunction("shell.zig", "TestingAPIs.shellParse", 1), diff --git a/src/js_ast.zig b/src/js_ast.zig index 974fd5084b..2f75b137e6 100644 --- a/src/js_ast.zig +++ b/src/js_ast.zig @@ -3365,6 +3365,13 @@ pub const Expr = struct { return expr.data.e_string.string(allocator) catch null; } + pub inline fn isString(expr: *const Expr) bool { + return switch (expr.data) { + .e_string, .e_utf8_string => true, + else => false, + }; + } + pub inline fn asString(expr: *const Expr, allocator: std.mem.Allocator) ?string { switch (expr.data) { .e_string => |str| return str.string(allocator) catch bun.outOfMemory(), @@ -3372,6 +3379,18 @@ pub const Expr = struct { else => return null, } } + pub inline fn asStringHash(expr: *const Expr, allocator: std.mem.Allocator, comptime hash_fn: *const fn (buf: []const u8) callconv(.Inline) u64) ?u64 { + switch (expr.data) { + .e_string => |str| { + if (str.isUTF8()) return hash_fn(str.data); + const utf8_str = str.string(allocator) catch return null; + defer allocator.free(utf8_str); + return hash_fn(utf8_str); + }, + .e_utf8_string => |str| return hash_fn(str.data), + else => return null, + } + } pub inline fn asStringCloned(expr: *const Expr, allocator: std.mem.Allocator) ?string { switch (expr.data) { diff --git a/src/logger.zig b/src/logger.zig index 9002e3b099..23c74879f1 100644 --- a/src/logger.zig +++ b/src/logger.zig @@ -1004,6 +1004,10 @@ pub const Log = struct { }); } + pub fn addErrorFmtNoLoc(log: *Log, allocator: std.mem.Allocator, comptime text: string, args: anytype) !void { + try log.addErrorFmt(null, Loc.Empty, allocator, text, args); + } + pub fn addErrorFmt(log: *Log, source: ?*const Source, l: Loc, allocator: std.mem.Allocator, comptime text: string, args: anytype) !void { @setCold(true); log.errors += 1; diff --git a/src/output.zig b/src/output.zig index 570b89fd0a..936f76577b 100644 --- a/src/output.zig +++ b/src/output.zig @@ -241,7 +241,7 @@ pub const Source = struct { Output.Source.init(stdout, stderr) .set(); - if (comptime Environment.isDebug) { + if (comptime Environment.isDebug or Environment.allow_logs) { initScopedDebugWriterAtStartup(); } } @@ -569,7 +569,7 @@ pub fn Scoped(comptime tag: anytype, comptime disabled: bool) type { else => tag, }; - if (comptime !Environment.isDebug) { + if (comptime !Environment.isDebug and !Environment.allow_logs) { return struct { pub fn isVisible() bool { return false; @@ -619,6 +619,12 @@ pub fn Scoped(comptime tag: anytype, comptime disabled: bool) type { return; } + if (Environment.allow_logs) ScopedDebugWriter.disable_inside_log += 1; + defer { + if (Environment.allow_logs) + ScopedDebugWriter.disable_inside_log -= 1; + } + if (!isVisible()) return; @@ -1000,7 +1006,7 @@ pub fn initScopedDebugWriterAtStartup() void { ScopedDebugWriter.scoped_file_writer = source.stream.quietWriter(); } fn scopedWriter() File.QuietWriter { - if (comptime !Environment.isDebug) { + if (comptime !Environment.isDebug and !Environment.allow_logs) { @compileError("scopedWriter() should only be called in debug mode"); } diff --git a/src/patch.zig b/src/patch.zig new file mode 100644 index 0000000000..3c7e0f08f6 --- /dev/null +++ b/src/patch.zig @@ -0,0 +1,1365 @@ +const std = @import("std"); +const bun = @import("root").bun; +const JSC = bun.JSC; +const Allocator = std.mem.Allocator; +const List = std.ArrayListUnmanaged; + +const WHITESPACE: []const u8 = " \t\n\r"; + +// TODO: calculate this for different systems +const PAGE_SIZE = 16384; + +/// All strings point to the original patch file text +pub const PatchFilePart = union(enum) { + file_patch: *FilePatch, + file_deletion: *FileDeletion, + file_creation: *FileCreation, + file_rename: *FileRename, + file_mode_change: *FileModeChange, + + pub fn deinit(this: *PatchFilePart, allocator: Allocator) void { + switch (this.*) { + .file_patch => this.file_patch.deinit(allocator), + .file_deletion => this.file_deletion.deinit(allocator), + .file_creation => this.file_creation.deinit(allocator), + .file_rename => this.file_rename.deinit(allocator), + .file_mode_change => this.file_mode_change.deinit(allocator), + } + } +}; + +pub const PatchFile = struct { + parts: List(PatchFilePart) = .{}, + + const ScratchBuffer = struct { + buf: std.ArrayList(u8), + + fn deinit(scratch: *@This()) void { + scratch.buf.deinit(); + } + + fn clear(scratch: *@This()) void { + scratch.buf.clearRetainingCapacity(); + } + + fn dupeZ(scratch: *@This(), path: []const u8) [:0]const u8 { + const start = scratch.buf.items.len; + scratch.buf.appendSlice(path) catch unreachable; + scratch.buf.append(0) catch unreachable; + return scratch.buf.items[start .. start + path.len :0]; + } + }; + + pub fn deinit(this: *PatchFile, allocator: Allocator) void { + for (this.parts.items) |*part| part.deinit(allocator); + this.parts.deinit(allocator); + } + + const ApplyState = struct { + pathbuf: bun.PathBuffer = undefined, + patch_dir_abs_path: ?[:0]const u8 = null, + + fn patchDirAbsPath(state: *@This(), fd: bun.FileDescriptor) JSC.Maybe([:0]const u8) { + if (state.patch_dir_abs_path) |p| return .{ .result = p }; + return switch (bun.sys.getFdPath(fd, &state.pathbuf)) { + .result => |p| { + state.patch_dir_abs_path = state.pathbuf[0..p.len :0]; + return .{ .result = state.patch_dir_abs_path.? }; + }, + .err => |e| return .{ .err = e.withFd(fd) }, + }; + } + }; + + pub fn apply(this: *const PatchFile, allocator: Allocator, patch_dir: bun.FileDescriptor) ?JSC.SystemError { + var state: ApplyState = .{}; + var sfb = std.heap.stackFallback(1024, allocator); + var arena = bun.ArenaAllocator.init(sfb.get()); + + for (this.parts.items) |*part| { + defer _ = arena.reset(.retain_capacity); + switch (part.*) { + .file_deletion => { + const pathz = arena.allocator().dupeZ(u8, part.file_deletion.path) catch bun.outOfMemory(); + + if (bun.sys.unlinkat(patch_dir, pathz).asErr()) |e| { + return e.withPath(pathz).toSystemError(); + } + }, + .file_rename => { + const from_path = arena.allocator().dupeZ(u8, part.file_rename.from_path) catch bun.outOfMemory(); + const to_path = arena.allocator().dupeZ(u8, part.file_rename.to_path) catch bun.outOfMemory(); + + if (std.fs.path.dirname(to_path)) |todir| { + const abs_patch_dir = switch (state.patchDirAbsPath(patch_dir)) { + .result => |p| p, + .err => |e| return e.toSystemError(), + }; + const path_to_make = bun.path.joinZ(&[_][]const u8{ + abs_patch_dir, + todir, + }, .auto); + var nodefs = bun.JSC.Node.NodeFS{}; + if (nodefs.mkdirRecursive(.{ + .path = .{ .string = bun.PathString.init(path_to_make) }, + .recursive = true, + .mode = 0o755, + }, .sync).asErr()) |e| return e.toSystemError(); + } + + if (bun.sys.renameat(patch_dir, from_path, patch_dir, to_path).asErr()) |e| { + return e.toSystemError(); + } + }, + .file_creation => { + const filepath = bun.PathString.init(arena.allocator().dupeZ(u8, part.file_creation.path) catch bun.outOfMemory()); + const filedir = bun.path.dirname(filepath.slice(), .auto); + const mode = part.file_creation.mode; + + var nodefs = bun.JSC.Node.NodeFS{}; + if (filedir.len > 0) { + if (nodefs.mkdirRecursive(.{ + .path = .{ .string = bun.PathString.init(filedir) }, + .recursive = true, + .mode = @intCast(@intFromEnum(mode)), + }, .sync).asErr()) |e| return e.toSystemError(); + } + + const newfile_fd = switch (bun.sys.openat( + patch_dir, + filepath.sliceAssumeZ(), + std.os.O.CREAT | std.os.O.WRONLY | std.os.O.TRUNC, + mode.toBunMode(), + )) { + .result => |fd| fd, + .err => |e| return e.withPath(filepath.slice()).toSystemError(), + }; + defer _ = bun.sys.close(newfile_fd); + + const hunk = part.file_creation.hunk orelse { + continue; + }; + + const last_line = hunk.parts.items[0].lines.items.len -| 1; + + const no_newline_at_end_of_file = hunk.parts.items[0].no_newline_at_end_of_file; + + const count = count: { + var total: usize = 0; + for (hunk.parts.items[0].lines.items, 0..) |line, i| { + total += line.len; + total += @intFromBool(i < last_line); + } + total += @intFromBool(!no_newline_at_end_of_file); + break :count total; + }; + + const file_alloc = if (count <= PAGE_SIZE) arena.allocator() else bun.default_allocator; + + // TODO: this additional allocation is probably not necessary in all cases and should be avoided or use stack buffer + const file_contents = brk: { + var contents = file_alloc.alloc(u8, count) catch bun.outOfMemory(); + var i: usize = 0; + for (hunk.parts.items[0].lines.items, 0..) |line, idx| { + @memcpy(contents[i .. i + line.len], line); + i += line.len; + if (idx < last_line or !no_newline_at_end_of_file) { + contents[i] = '\n'; + i += 1; + } + } + break :brk contents; + }; + defer file_alloc.free(file_contents); + + var written: usize = 0; + while (written < file_contents.len) { + switch (bun.sys.write(newfile_fd, file_contents[written..])) { + .result => |bytes| written += bytes, + .err => |e| return e.withPath(filepath.slice()).toSystemError(), + } + } + }, + .file_patch => { + // TODO: should we compute the hash of the original file and check it against the on in the patch? + if (applyPatch(part.file_patch, &arena, patch_dir, &state).asErr()) |e| { + return e.toSystemError(); + } + }, + .file_mode_change => { + const newmode = part.file_mode_change.new_mode; + const filepath = arena.allocator().dupeZ(u8, part.file_mode_change.path) catch bun.outOfMemory(); + if (comptime bun.Environment.isPosix) { + if (bun.sys.fchmodat(patch_dir, filepath, newmode.toBunMode(), 0).asErr()) |e| { + return e.toSystemError(); + } + } + + if (comptime bun.Environment.isWindows) { + const absfilepath = switch (state.patchDirAbsPath(patch_dir)) { + .result => |p| p, + .err => |e| return e.toSystemError(), + }; + const fd = switch (bun.sys.open(bun.path.joinZ(&[_][]const u8{ absfilepath, filepath }, .auto), std.os.O.RDWR, 0)) { + .err => |e| return e.toSystemError(), + .result => |f| f, + }; + defer _ = bun.sys.close(fd); + if (bun.sys.fchmod(fd, newmode.toBunMode()).asErr()) |e| { + return e.toSystemError(); + } + } + }, + } + } + + return null; + } + + /// Invariants: + /// - Hunk parts are ordered by first to last in file + /// - The original starting line and the patched starting line are equal in the first hunk part + /// + /// TODO: this is a very naive and slow implementation which works by creating a list of lines + /// we can speed it up by: + /// - If file size <= PAGE_SIZE, read the whole file into memory. memcpy/memmove the file contents around will be fast + /// - If file size > PAGE_SIZE, rather than making a list of lines, make a list of chunks + fn applyPatch( + patch: *const FilePatch, + arena: *bun.ArenaAllocator, + patch_dir: bun.FileDescriptor, + state: *ApplyState, + ) JSC.Maybe(void) { + const file_path: [:0]const u8 = arena.allocator().dupeZ(u8, patch.path) catch bun.outOfMemory(); + + // Need to get the mode of the original file + // And also get the size to read file into memory + const stat = switch (if (bun.Environment.isPosix) + bun.sys.fstatat(patch_dir, file_path) + else + bun.sys.stat( + switch (state.patchDirAbsPath(patch_dir)) { + .result => |p| bun.path.joinZ(&[_][]const u8{ p, file_path }, .auto), + .err => |e| return .{ .err = e }, + }, + )) { + .err => |e| return .{ .err = e.withPath(file_path) }, + .result => |stat| stat, + }; + + // Purposefully use `bun.default_allocator` here because if the file size is big like + // 1gb we don't want to have 1gb hanging around in memory until arena is cleared + // + // But if the file size is small, like less than a single page, it's probably ok + // to use the arena + const use_arena: bool = stat.size <= PAGE_SIZE; + const file_alloc = if (use_arena) arena.allocator() else bun.default_allocator; + const filebuf = patch_dir.asDir().readFileAlloc(file_alloc, file_path, 1024 * 1024 * 1024 * 4) catch return .{ .err = bun.sys.Error.fromCode(.INVAL, .read).withPath(file_path) }; + defer file_alloc.free(filebuf); + + var file_line_count: usize = 0; + const lines_count = brk: { + var count: usize = 0; + var iter = std.mem.splitScalar(u8, filebuf, '\n'); + while (iter.next()) |_| : (count += 1) {} + file_line_count = count; + + // Adjust to account for the changes + for (patch.hunks.items) |*hunk| { + count = @intCast(@as(i64, @intCast(count)) + @as(i64, @intCast(hunk.header.patched.len)) - @as(i64, @intCast(hunk.header.original.len))); + for (hunk.parts.items) |*part_| { + const part: *PatchMutationPart = part_; + switch (part.type) { + .deletion => { + // deleting the no newline pragma so we are actually adding a line + count += if (part.no_newline_at_end_of_file) 1 else 0; + }, + .insertion => { + count -= if (part.no_newline_at_end_of_file) 1 else 0; + }, + .context => {}, + } + } + } + + break :brk count; + }; + + // TODO: i hate this + var lines = std.ArrayListUnmanaged([]const u8).initCapacity(bun.default_allocator, lines_count) catch bun.outOfMemory(); + defer lines.deinit(bun.default_allocator); + { + var iter = std.mem.splitScalar(u8, filebuf, '\n'); + var i: usize = 0; + while (iter.next()) |line| : (i += 1) { + lines.append(bun.default_allocator, line) catch bun.outOfMemory(); + } + bun.debugAssert(i == file_line_count); + } + + for (patch.hunks.items) |*hunk| { + var line_cursor = hunk.header.patched.start - 1; + for (hunk.parts.items) |*part_| { + const part: *PatchMutationPart = part_; + switch (part.type) { + .context => { + // TODO: check if the lines match in the original file? + line_cursor += @intCast(part.lines.items.len); + }, + .insertion => { + const lines_to_insert = lines.addManyAt(bun.default_allocator, line_cursor, part.lines.items.len) catch bun.outOfMemory(); + @memcpy(lines_to_insert, part.lines.items); + line_cursor += @intCast(part.lines.items.len); + if (part.no_newline_at_end_of_file) { + _ = lines.pop(); + } + }, + .deletion => { + // TODO: check if the lines match in the original file? + lines.replaceRange(bun.default_allocator, line_cursor, part.lines.items.len, &.{}) catch bun.outOfMemory(); + if (part.no_newline_at_end_of_file) { + lines.append(bun.default_allocator, "") catch bun.outOfMemory(); + } + // line_cursor -= part.lines.items.len; + }, + } + } + } + + const file_fd = switch (bun.sys.openat( + patch_dir, + file_path, + std.os.O.CREAT | std.os.O.WRONLY | std.os.O.TRUNC, + @intCast(stat.mode), + )) { + .err => |e| return .{ .err = e.withPath(file_path) }, + .result => |fd| fd, + }; + defer { + _ = bun.sys.close(file_fd); + } + + const contents = std.mem.join(bun.default_allocator, "\n", lines.items) catch bun.outOfMemory(); + defer bun.default_allocator.free(contents); + + var written: usize = 0; + while (written < contents.len) { + written += switch (bun.sys.write(file_fd, contents[written..])) { + .result => |w| w, + .err => |e| return .{ .err = e.withPath(file_path) }, + }; + } + + return JSC.Maybe(void).success; + } +}; + +const FileDeets = struct { + diff_line_from_path: ?[]const u8 = null, + diff_line_to_path: ?[]const u8 = null, + old_mode: ?[]const u8 = null, + new_mode: ?[]const u8 = null, + deleted_file_mode: ?[]const u8 = null, + new_file_mode: ?[]const u8 = null, + rename_from: ?[]const u8 = null, + rename_to: ?[]const u8 = null, + before_hash: ?[]const u8 = null, + after_hash: ?[]const u8 = null, + from_path: ?[]const u8 = null, + to_path: ?[]const u8 = null, + hunks: List(Hunk) = .{}, + + fn takeHunks(this: *FileDeets) List(Hunk) { + const hunks = this.hunks; + this.hunks = .{}; + return hunks; + } + + fn deinit(this: *FileDeets, allocator: Allocator) void { + for (this.hunks.items) |*hunk| { + hunk.deinit(allocator); + } + this.hunks.deinit(allocator); + } + + fn nullifyEmptyStrings(this: *FileDeets) void { + const fields: []const std.builtin.Type.StructField = std.meta.fields(FileDeets); + + inline for (fields) |field| { + if (field.type == ?[]const u8) { + const value = @field(this, field.name); + if (value != null and value.?.len == 0) { + @field(this, field.name) = null; + } + } + } + } +}; + +pub const PatchMutationPart = struct { + type: PartType, + lines: List([]const u8) = .{}, + /// This technically can only be on the last part of a hunk + no_newline_at_end_of_file: bool = false, + + /// Ensure context, insertion, deletion values are in sync with HunkLineType enum + pub const PartType = enum(u2) { context = 0, insertion, deletion }; + + pub fn deinit(this: *PatchMutationPart, allocator: Allocator) void { + this.lines.deinit(allocator); + } +}; + +pub const Hunk = struct { + header: Header, + parts: List(PatchMutationPart) = .{}, + + pub const Header = struct { + original: struct { + start: u32, + len: u32, + }, + patched: struct { + start: u32, + len: u32, + }, + + pub const zeroes = std.mem.zeroes(Header); + }; + + pub fn deinit(this: *Hunk, allocator: Allocator) void { + for (this.parts.items) |*part| { + part.deinit(allocator); + } + this.parts.deinit(allocator); + } + + pub fn verifyIntegrity(this: *const Hunk) bool { + var original_length: usize = 0; + var patched_length: usize = 0; + + for (this.parts.items) |part| { + switch (part.type) { + .context => { + patched_length += part.lines.items.len; + original_length += part.lines.items.len; + }, + .insertion => patched_length += part.lines.items.len, + .deletion => original_length += part.lines.items.len, + } + } + + if (original_length != this.header.original.len or patched_length != this.header.patched.len) return false; + return true; + } +}; + +pub const FileMode = enum(u32) { + non_executable = 0o644, + executable = 0o755, + + pub fn toBunMode(this: FileMode) bun.Mode { + return @intCast(@intFromEnum(this)); + } + + pub fn fromU32(mode: u32) ?FileMode { + switch (mode) { + 0o644 => return .non_executable, + 0o755 => return .executable, + else => return null, + } + } +}; + +pub const FileRename = struct { + from_path: []const u8, + to_path: []const u8, + + /// Does not allocate + pub fn deinit(_: *FileRename, _: Allocator) void {} +}; + +pub const FileModeChange = struct { + path: []const u8, + old_mode: FileMode, + new_mode: FileMode, + + /// Does not allocate + pub fn deinit(_: *FileModeChange, _: Allocator) void {} +}; + +pub const FilePatch = struct { + path: []const u8, + hunks: List(Hunk), + before_hash: ?[]const u8, + after_hash: ?[]const u8, + + pub fn deinit(this: *FilePatch, allocator: Allocator) void { + for (this.hunks.items) |*hunk| hunk.deinit(allocator); + this.hunks.deinit(allocator); + bun.destroy(this); + } +}; + +pub const FileDeletion = struct { + path: []const u8, + mode: FileMode, + hunk: ?*Hunk, + hash: ?[]const u8, + + pub fn deinit(this: *FileDeletion, allocator: Allocator) void { + if (this.hunk) |hunk| hunk.deinit(allocator); + bun.destroy(this); + } +}; + +pub const FileCreation = struct { + path: []const u8, + mode: FileMode, + hunk: ?*Hunk, + hash: ?[]const u8, + + pub fn deinit(this: *FileCreation, allocator: Allocator) void { + if (this.hunk) |hunk| hunk.deinit(allocator); + bun.destroy(this); + } +}; + +pub const PatchFilePartKind = enum { + file_patch, + file_deletion, + file_creation, + file_rename, + file_mode_change, +}; + +const ParseErr = error{ + empty_patchfile, + unrecognized_pragma, + no_newline_at_eof_pragma_encountered_without_context, + hunk_lines_encountered_before_hunk_header, + hunk_header_integrity_check_failed, + bad_diff_line, + bad_header_line, + rename_from_and_to_not_give, + no_path_given_for_file_deletion, + no_path_given_for_file_creation, + bad_file_mode, +}; + +/// NOTE: the returned `PatchFile` struct will contain pointers to original file text so make sure to not deallocate `file` +pub fn parsePatchFile(file: []const u8) ParseErr!PatchFile { + var lines_parser = PatchLinesParser{}; + defer lines_parser.deinit(bun.default_allocator, false); + + lines_parser.parse(file, .{}) catch |err| brk: { + // TODO: the parser can be refactored to remove this as it is a hacky workaround, like detecting while parsing if legacy diffs are used + if (err == ParseErr.hunk_header_integrity_check_failed) { + lines_parser.reset(bun.default_allocator); + break :brk try lines_parser.parse(file, .{ .support_legacy_diffs = true }); + } + return err; + }; + + const files = lines_parser.result.items; + return try patchFileSecondPass(files); +} + +fn patchFileSecondPass(files: []FileDeets) ParseErr!PatchFile { + var result: PatchFile = .{}; + + for (files) |*file| { + const ty: PatchFilePartKind = if (file.rename_from != null and file.rename_from.?.len > 0) + .file_rename + else if (file.deleted_file_mode != null and file.deleted_file_mode.?.len > 0) + .file_deletion + else if (file.new_file_mode != null and file.new_file_mode.?.len > 0) + .file_creation + else if (file.hunks.items.len > 0) + .file_patch + else + .file_mode_change; + + var destination_file_path: ?[]const u8 = null; + + switch (ty) { + .file_rename => { + if (file.rename_from == null or file.rename_to == null) return ParseErr.rename_from_and_to_not_give; + + result.parts.append( + bun.default_allocator, + .{ + .file_rename = bun.new( + FileRename, + FileRename{ + .from_path = file.rename_from.?, + .to_path = file.rename_to.?, + }, + ), + }, + ) catch unreachable; + + destination_file_path = file.rename_to; + }, + .file_deletion => { + const path = file.diff_line_from_path orelse file.from_path orelse { + return ParseErr.no_path_given_for_file_deletion; + }; + result.parts.append(bun.default_allocator, .{ + .file_deletion = bun.new(FileDeletion, FileDeletion{ + .hunk = if (file.hunks.items.len > 0) brk: { + var value = file.hunks.items[0]; + file.hunks.items[0] = .{ + .header = Hunk.Header.zeroes, + }; + break :brk bun.dupe(Hunk, &value); + } else null, + .path = path, + .mode = parseFileMode(file.deleted_file_mode.?) orelse { + return ParseErr.bad_file_mode; + }, + .hash = file.before_hash, + }), + }) catch unreachable; + }, + .file_creation => { + const path = file.diff_line_to_path orelse file.to_path orelse { + return ParseErr.no_path_given_for_file_creation; + }; + result.parts.append(bun.default_allocator, .{ + .file_creation = bun.new(FileCreation, FileCreation{ + .hunk = if (file.hunks.items.len > 0) brk: { + var value = file.hunks.items[0]; + file.hunks.items[0] = .{ + .header = Hunk.Header.zeroes, + }; + break :brk bun.dupe(Hunk, &value); + } else null, + .path = path, + .mode = parseFileMode(file.new_file_mode.?) orelse { + return ParseErr.bad_file_mode; + }, + .hash = file.after_hash, + }), + }) catch unreachable; + }, + .file_patch, .file_mode_change => { + destination_file_path = file.to_path orelse file.diff_line_to_path; + }, + } + + if (destination_file_path != null and file.old_mode != null and file.new_mode != null and !std.mem.eql(u8, file.old_mode.?, file.new_mode.?)) { + result.parts.append(bun.default_allocator, .{ + .file_mode_change = bun.new(FileModeChange, FileModeChange{ + .path = destination_file_path.?, + .old_mode = parseFileMode(file.old_mode.?) orelse { + return ParseErr.bad_file_mode; + }, + .new_mode = parseFileMode(file.new_mode.?) orelse { + return ParseErr.bad_file_mode; + }, + }), + }) catch unreachable; + } + + if (destination_file_path != null and file.hunks.items.len > 0) { + result.parts.append(bun.default_allocator, .{ + .file_patch = bun.new(FilePatch, FilePatch{ + .path = destination_file_path.?, + .hunks = file.takeHunks(), + .before_hash = file.before_hash, + .after_hash = file.after_hash, + }), + }) catch unreachable; + } + } + + return result; +} + +fn parseFileMode(mode: []const u8) ?FileMode { + const parsed_mode = (std.fmt.parseInt(u32, mode, 8) catch return null) & 0o777; + return FileMode.fromU32(parsed_mode); +} + +const LookbackIterator = struct { + inner: std.mem.SplitIterator(u8, .scalar), + prev_index: usize = 0, + + pub fn fromInner(inner: std.mem.SplitIterator(u8, .scalar)) LookbackIterator { + return LookbackIterator{ .inner = inner }; + } + + pub fn next(this: *LookbackIterator) ?[]const u8 { + this.prev_index = this.inner.index orelse this.prev_index; + return this.inner.next(); + } + + pub fn back(this: *LookbackIterator) void { + this.inner.index = this.prev_index; + } +}; + +const PatchLinesParser = struct { + result: List(FileDeets) = .{}, + current_file_patch: FileDeets = .{}, + state: State = .parsing_header, + current_hunk: ?Hunk = null, + current_hunk_mutation_part: ?PatchMutationPart = null, + + const State = enum { parsing_header, parsing_hunks }; + + const HunkLineType = enum(u3) { + /// Additional context + context = 0, + + /// Example: + /// + sjfskdjfsdf + insertion, + + /// Example: + /// - sjfskdjfsdf + deletion, + + /// Example: + /// @@ -1,3 +1,3 @@ + header, + + /// Example: + /// \ No newline at end of file + pragma, + }; + + fn deinit(this: *PatchLinesParser, allocator: Allocator, comptime clear_result_retaining_capacity: bool) void { + this.current_file_patch.deinit(allocator); + if (this.current_hunk) |*hunk| hunk.deinit(allocator); + if (this.current_hunk_mutation_part) |*part| part.deinit(allocator); + for (this.result.items) |*file_deet| file_deet.deinit(allocator); + if (comptime clear_result_retaining_capacity) { + this.result.clearRetainingCapacity(); + } else { + this.result.deinit(allocator); + } + } + + fn reset(this: *PatchLinesParser, allocator: Allocator) void { + this.deinit(allocator, true); + this.result.clearRetainingCapacity(); + this.* = .{ + .result = this.result, + }; + } + + pub fn parse( + this: *PatchLinesParser, + file_: []const u8, + opts: struct { support_legacy_diffs: bool = false }, + ) ParseErr!void { + if (file_.len == 0) return ParseErr.empty_patchfile; + const end = brk: { + var iter = std.mem.splitBackwardsScalar(u8, file_, '\n'); + var prev: usize = file_.len; + if (iter.next()) |last_line| { + if (last_line.len == 0) { + prev = iter.index.?; + } + } + break :brk prev; + }; + if (end == 0 or end > file_.len) return; + const file = file_[0..end]; + var lines = LookbackIterator.fromInner(std.mem.splitScalar(u8, file, '\n')); + + while (lines.next()) |line| { + switch (this.state) { + .parsing_header => { + if (bun.strings.hasPrefix(line, "@@")) { + this.state = .parsing_hunks; + this.current_file_patch.hunks = .{}; + lines.back(); + } else if (bun.strings.hasPrefix(line, "diff --git ")) { + if (this.current_file_patch.diff_line_from_path != null) { + this.commitFilePatch(); + } + // Equivalent to: + // const match = line.match(/^diff --git a\/(.*?) b\/(.*?)\s*$/) + // currentFilePatch.diffLineFromPath = match[1] + // currentFilePatch.diffLineToPath = match[2] + const match = parseDiffLinePaths(line) orelse { + // TODO: store line somewhere + return ParseErr.bad_diff_line; + }; + this.current_file_patch.diff_line_from_path = match[0]; + this.current_file_patch.diff_line_to_path = match[1]; + } else if (bun.strings.hasPrefix(line, "old mode ")) { + this.current_file_patch.old_mode = std.mem.trim(u8, line["old mode ".len..], WHITESPACE); + } else if (bun.strings.hasPrefix(line, "new mode ")) { + this.current_file_patch.new_mode = std.mem.trim(u8, line["new mode ".len..], WHITESPACE); + } else if (bun.strings.hasPrefix(line, "deleted file mode ")) { + this.current_file_patch.deleted_file_mode = std.mem.trim(u8, line["deleted file mode ".len..], WHITESPACE); + } else if (bun.strings.hasPrefix(line, "new file mode ")) { + this.current_file_patch.new_file_mode = std.mem.trim(u8, line["new file mode ".len..], WHITESPACE); + } else if (bun.strings.hasPrefix(line, "rename from ")) { + this.current_file_patch.rename_from = std.mem.trim(u8, line["rename from ".len..], WHITESPACE); + } else if (bun.strings.hasPrefix(line, "rename to ")) { + this.current_file_patch.rename_to = std.mem.trim(u8, line["rename to ".len..], WHITESPACE); + } else if (bun.strings.hasPrefix(line, "index ")) { + const hashes = parseDiffHashes(line["index ".len..]) orelse continue; + this.current_file_patch.before_hash = hashes[0]; + this.current_file_patch.after_hash = hashes[1]; + } else if (bun.strings.hasPrefix(line, "--- ")) { + this.current_file_patch.from_path = std.mem.trim(u8, line["--- a/".len..], WHITESPACE); + } else if (bun.strings.hasPrefix(line, "+++ ")) { + this.current_file_patch.to_path = std.mem.trim(u8, line["+++ b/".len..], WHITESPACE); + } + }, + .parsing_hunks => { + if (opts.support_legacy_diffs and bun.strings.hasPrefix(line, "--- a/")) { + this.state = .parsing_header; + this.commitFilePatch(); + lines.back(); + continue; + } + // parsing hunks + const hunk_line_type: HunkLineType = brk: { + if (line.len == 0) + // treat blank lines as context + break :brk .context; + + break :brk switch (line[0]) { + '@' => @as(HunkLineType, .header), + '-' => @as(HunkLineType, .deletion), + '+' => @as(HunkLineType, .insertion), + ' ' => @as(HunkLineType, .context), + '\\' => @as(HunkLineType, .pragma), + '\r' => @as(HunkLineType, .context), + else => null, + } orelse { + // unrecognized, bail out + this.state = .parsing_header; + this.commitFilePatch(); + lines.back(); + continue; + }; + }; + + switch (hunk_line_type) { + .header => { + this.commitHunk(); + this.current_hunk = try parseHunkHeaderLine(line); + }, + .pragma => { + if (!bun.strings.hasPrefix(line, "\\ No newline at end of file")) { + // TODO: store line + return ParseErr.unrecognized_pragma; + } + if (this.current_hunk_mutation_part == null) { + return ParseErr.no_newline_at_eof_pragma_encountered_without_context; + } + this.current_hunk_mutation_part.?.no_newline_at_end_of_file = true; + }, + .insertion, .deletion, .context => { + if (this.current_hunk == null) { + return ParseErr.hunk_lines_encountered_before_hunk_header; + } + if (this.current_hunk_mutation_part != null and @intFromEnum(this.current_hunk_mutation_part.?.type) != @intFromEnum(hunk_line_type)) { + this.current_hunk.?.parts.append(bun.default_allocator, this.current_hunk_mutation_part.?) catch unreachable; + this.current_hunk_mutation_part = null; + } + + if (this.current_hunk_mutation_part == null) { + this.current_hunk_mutation_part = .{ + .type = @enumFromInt(@intFromEnum(hunk_line_type)), + }; + } + + this.current_hunk_mutation_part.?.lines.append(bun.default_allocator, line[@min(1, line.len)..]) catch unreachable; + }, + } + }, + } + } + + this.commitFilePatch(); + + for (this.result.items) |file_deet| { + for (file_deet.hunks.items) |hunk| { + if (!hunk.verifyIntegrity()) { + return ParseErr.hunk_header_integrity_check_failed; + } + } + } + } + + fn commitHunk(this: *PatchLinesParser) void { + if (this.current_hunk) |*hunk| { + if (this.current_hunk_mutation_part) |mutation_part| { + hunk.parts.append(bun.default_allocator, mutation_part) catch unreachable; + this.current_hunk_mutation_part = null; + } + this.current_file_patch.hunks.append(bun.default_allocator, hunk.*) catch unreachable; + this.current_hunk = null; + } + } + + fn commitFilePatch(this: *PatchLinesParser) void { + this.commitHunk(); + this.current_file_patch.nullifyEmptyStrings(); + this.result.append(bun.default_allocator, this.current_file_patch) catch unreachable; + this.current_file_patch = .{}; + } + + fn parseHunkHeaderLineImpl(text_: []const u8) ParseErr!struct { line_nr: u32, line_count: u32, rest: []const u8 } { + var text = text_; + const DIGITS = brk: { + var set = std.bit_set.IntegerBitSet(256).initEmpty(); + for ('0'..'9' + 1) |c| set.set(c); + break :brk set; + }; + + // @@ -100,32 +100,32 @@ + // ^ + const line_nr_start: usize = 0; + var line_nr_end: usize = 0; + var saw_comma: bool = false; + var saw_whitespace: bool = false; + while (line_nr_end < text.len) { + if (text[line_nr_end] == ',') { + saw_comma = true; + break; + } else if (text[line_nr_end] == ' ') { + saw_whitespace = true; + break; + } + if (!DIGITS.isSet(text[line_nr_end])) return ParseErr.bad_header_line; + line_nr_end += 1; + } + if (!saw_comma and !saw_whitespace) return ParseErr.bad_header_line; + const line_nr = text[line_nr_start..line_nr_end]; + var line_nr_count: []const u8 = "1"; + if (line_nr_end + 1 >= text.len) return ParseErr.bad_header_line; + + text = text[line_nr_end..]; + if (text.len == 0) return ParseErr.bad_header_line; + + // @@ -100,32 +100,32 @@ + // ^ + // but the comma can be optional + if (saw_comma) { + text = text[1..]; + saw_whitespace = false; + const first_col_start = 0; + var first_col_end: usize = 0; + while (first_col_end < text.len) { + if (text[first_col_end] == ' ') { + saw_whitespace = true; + break; + } + if (!DIGITS.isSet(text[first_col_end])) return ParseErr.bad_header_line; + first_col_end += 1; + } + if (!saw_whitespace) return ParseErr.bad_header_line; + line_nr_count = text[first_col_start..first_col_end]; + text = text[first_col_end..]; + } + + return .{ + .line_nr = @max(1, std.fmt.parseInt(u32, line_nr, 10) catch return ParseErr.bad_header_line), + .line_count = std.fmt.parseInt(u32, line_nr_count, 10) catch return ParseErr.bad_header_line, + .rest = text, + }; + } + + fn parseHunkHeaderLine(line_: []const u8) ParseErr!Hunk { + // const match = headerLine.trim() + // .match(/^@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@.*/) + + var line = std.mem.trim(u8, line_, WHITESPACE); + // @@ -100,32 +100,32 @@ + // ^^^^ + // this part + if (!(line.len >= 4 and line[0] == '@' and line[1] == '@' and line[2] == ' ' and line[3] == '-')) + // TODO: store line + return ParseErr.bad_header_line; + + if (line.len <= 4) return ParseErr.bad_header_line; + + // @@ -100,32 +100,32 @@ + // ^ + line = line[4..]; + + const first_result = try parseHunkHeaderLineImpl(line); + // @@ -100,32 +100,32 @@ + // ^ + line = first_result.rest; + if (line.len < 2 or line[1] != '+') return ParseErr.bad_header_line; + line = line[2..]; + + const second_result = try parseHunkHeaderLineImpl(line); + // @@ -100,32 +100,32 @@ + // ^ + line = second_result.rest; + + if (line.len >= 3 and line[0] == ' ' and line[1] == '@' and line[2] == '@') { + return Hunk{ + .header = .{ + .original = .{ .start = first_result.line_nr, .len = first_result.line_count }, + .patched = .{ .start = second_result.line_nr, .len = second_result.line_count }, + }, + }; + } + + return ParseErr.bad_header_line; + } + + fn parseDiffHashes(line: []const u8) ?struct { []const u8, []const u8 } { + // index 2de83dd..842652c 100644 + // ^ + // we expect that we are here + bun.debugAssert(!bun.strings.hasPrefix(line, "index ")); + + // From @pnpm/patch-package the regex is this: + // const match = line.match(/(\w+)\.\.(\w+)/) + + const delimiter_start = std.mem.indexOf(u8, line, "..") orelse return null; + + const VALID_CHARS: std.bit_set.IntegerBitSet(256) = comptime brk: { + var bitset = std.bit_set.IntegerBitSet(256).initEmpty(); + // TODO: the regex uses \w which is [a-zA-Z0-9_] + for ('0'..'9' + 1) |c| bitset.set(c); + for ('a'..'z' + 1) |c| bitset.set(c); + for ('A'..'Z' + 1) |c| bitset.set(c); + bitset.set('_'); + break :brk bitset; + }; + + const a_part = line[0..delimiter_start]; + for (a_part) |c| if (!VALID_CHARS.isSet(c)) return null; + + const b_part_start = delimiter_start + 2; + if (b_part_start >= line.len) return null; + const lmao_bro = line[b_part_start..]; + std.mem.doNotOptimizeAway(lmao_bro); + const b_part_end = if (std.mem.indexOfAny(u8, line[b_part_start..], " \n\r\t")) |pos| pos + b_part_start else line.len; + + const b_part = line[b_part_start..b_part_end]; + for (a_part) |c| if (!VALID_CHARS.isSet(c)) return null; + for (b_part) |c| if (!VALID_CHARS.isSet(c)) return null; + + return .{ a_part, b_part }; + } + + fn parseDiffLinePaths(line: []const u8) ?struct { []const u8, []const u8 } { + // From @pnpm/patch-package the regex is this: + // const match = line.match(/^diff --git a\/(.*?) b\/(.*?)\s*$/) + + const prefix = "diff --git a/"; + if (!bun.strings.hasPrefix(line, prefix)) return null; + // diff --git a/banana.ts b/banana.ts + // ^ + var rest = line[prefix.len..]; + if (rest.len == 0) return null; + + const a_path_start_index = 0; + var a_path_end_index: usize = 0; + var b_path_start_index: usize = 0; + + var i: usize = 0; + while (true) { + const start_of_b_part = std.mem.indexOfScalar(u8, rest[i..], 'b') orelse return null; + i += start_of_b_part; + if (i > 0 and rest[i - 1] == ' ' and i + 1 < rest.len and rest[i + 1] == '/') { + // diff --git a/banana.ts b/banana.ts + // ^ ^ + // | | + // a_path_end_index + | + // b_path_start_index + + a_path_end_index = i - 1; + b_path_start_index = i + 2; + break; + } + i += 1; + } + + const a_path = rest[a_path_start_index..a_path_end_index]; + const b_path = std.mem.trimRight(u8, rest[b_path_start_index..], " \n\r\t"); + return .{ a_path, b_path }; + } +}; + +pub const TestingAPIs = struct { + const ApplyArgs = struct { + patchfile_txt: JSC.ZigString.Slice, + patchfile: PatchFile, + dirfd: bun.FileDescriptor, + + pub fn deinit(this: *ApplyArgs) void { + this.patchfile_txt.deinit(); + this.patchfile.deinit(bun.default_allocator); + if (bun.FileDescriptor.cwd().eq(this.dirfd)) { + _ = bun.sys.close(this.dirfd); + } + } + }; + pub fn apply(globalThis: *JSC.JSGlobalObject, callframe: *JSC.CallFrame) callconv(.C) JSC.JSValue { + var args = switch (parseApplyArgs(globalThis, callframe)) { + .err => |e| return e, + .result => |a| a, + }; + defer args.deinit(); + + if (args.patchfile.apply(bun.default_allocator, args.dirfd)) |err| { + globalThis.throwValue(err.toErrorInstance(globalThis)); + return .undefined; + } + + return .true; + } + /// Used in JS tests, see `internal-for-testing.ts` and patch tests. + pub fn parse(globalThis: *JSC.JSGlobalObject, callframe: *JSC.CallFrame) callconv(.C) JSC.JSValue { + const arguments_ = callframe.arguments(2); + var arguments = JSC.Node.ArgumentsSlice.init(globalThis.bunVM(), arguments_.slice()); + + const patchfile_src_js = arguments.nextEat() orelse { + globalThis.throw("TestingAPIs.parse: expected at least 1 argument, got 0", .{}); + return .undefined; + }; + const patchfile_src_bunstr = patchfile_src_js.toBunString(globalThis); + const patchfile_src = patchfile_src_bunstr.toUTF8(bun.default_allocator); + + var patchfile = parsePatchFile(patchfile_src.slice()) catch |e| { + if (e == error.hunk_header_integrity_check_failed) { + globalThis.throwError(e, "this indicates either that the supplied patch file was incorrect, or there is a bug in Bun. Please check your .patch file, or open a GitHub issue :)"); + } else globalThis.throwError(e, "failed to parse patch file"); + + return .undefined; + }; + defer patchfile.deinit(bun.default_allocator); + + const str = std.json.stringifyAlloc(bun.default_allocator, patchfile, .{}) catch { + globalThis.throwOutOfMemory(); + return .undefined; + }; + const outstr = bun.String.fromUTF8(str); + return outstr.toJS(globalThis); + } + + pub fn parseApplyArgs(globalThis: *JSC.JSGlobalObject, callframe: *JSC.CallFrame) bun.JSC.Node.Maybe(ApplyArgs, JSC.JSValue) { + const arguments_ = callframe.arguments(2); + var arguments = JSC.Node.ArgumentsSlice.init(globalThis.bunVM(), arguments_.slice()); + + const patchfile_js = arguments.nextEat() orelse { + globalThis.throw("apply: expected at least 1 argument, got 0", .{}); + return .{ .err = .undefined }; + }; + + const dir_fd = if (arguments.nextEat()) |dir_js| brk: { + var bunstr = dir_js.toBunString(globalThis); + defer bunstr.deref(); + const path = bunstr.toOwnedSliceZ(bun.default_allocator) catch unreachable; + defer bun.default_allocator.free(path); + + break :brk switch (bun.sys.open(path, std.os.O.DIRECTORY | std.os.O.RDONLY, 0)) { + .err => |e| { + globalThis.throwValue(e.withPath(path).toJSC(globalThis)); + return .{ .err = .undefined }; + }, + .result => |fd| fd, + }; + } else bun.FileDescriptor.cwd(); + + const patchfile_bunstr = patchfile_js.toBunString(globalThis); + defer patchfile_bunstr.deref(); + const patchfile_src = patchfile_bunstr.toUTF8(bun.default_allocator); + + const patch_file = parsePatchFile(patchfile_src.slice()) catch |e| { + if (bun.FileDescriptor.cwd().eq(dir_fd)) { + _ = bun.sys.close(dir_fd); + } + + patchfile_src.deinit(); + globalThis.throwError(e, "failed to parse patchfile"); + return .{ .err = .undefined }; + }; + + return .{ + .result = ApplyArgs{ + .dirfd = dir_fd, + .patchfile = patch_file, + .patchfile_txt = patchfile_src, + }, + }; + } +}; + +pub fn gitDiff( + allocator: std.mem.Allocator, + old_folder_: []const u8, + new_folder_: []const u8, +) !bun.JSC.Node.Maybe(std.ArrayList(u8), std.ArrayList(u8)) { + const old_folder: []const u8 = if (comptime bun.Environment.isWindows) brk: { + // backslash in the path fucks everything up + const cpy = allocator.alloc(u8, old_folder_.len) catch bun.outOfMemory(); + @memcpy(cpy, old_folder_); + std.mem.replaceScalar(u8, cpy, '\\', '/'); + break :brk cpy; + } else old_folder_; + const new_folder = if (comptime bun.Environment.isWindows) brk: { + const cpy = allocator.alloc(u8, new_folder_.len) catch bun.outOfMemory(); + @memcpy(cpy, new_folder_); + std.mem.replaceScalar(u8, cpy, '\\', '/'); + break :brk cpy; + } else new_folder_; + + defer if (comptime bun.Environment.isWindows) { + allocator.free(old_folder); + allocator.free(new_folder); + }; + + var child_proc = std.ChildProcess.init( + &[_][]const u8{ + "git", + "-c", + "core.safecrlf=false", + "diff", + "--src-prefix=a/", + "--dst-prefix=b/", + "--ignore-cr-at-eol", + "--irreversible-delete", + "--full-index", + "--no-index", + old_folder, + new_folder, + }, + allocator, + ); + // unfortunately, git diff returns non-zero exit codes even when it succeeds. + // we have to check that stderr was not empty to know if it failed + child_proc.stdout_behavior = .Pipe; + child_proc.stderr_behavior = .Pipe; + var map = std.process.EnvMap.init(allocator); + defer map.deinit(); + if (bun.getenvZ("PATH")) |v| try map.put("PATH", v); + try map.put("GIT_CONFIG_NOSYSTEM", "1"); + try map.put("HOME", ""); + try map.put("XDG_CONFIG_HOME", ""); + try map.put("USERPROFILE", ""); + + child_proc.env_map = ↦ + var stdout = std.ArrayList(u8).init(allocator); + var stderr = std.ArrayList(u8).init(allocator); + var deinit_stdout = true; + var deinit_stderr = true; + defer { + if (deinit_stdout) stdout.deinit(); + if (deinit_stderr) stderr.deinit(); + } + try child_proc.spawn(); + try child_proc.collectOutput(&stdout, &stderr, 1024 * 1024 * 4); + _ = try child_proc.wait(); + if (stderr.items.len > 0) { + deinit_stderr = false; + return .{ .err = stderr }; + } + + try gitDiffPostprocess(&stdout, old_folder, new_folder); + deinit_stdout = false; + return .{ .result = stdout }; +} + +/// Now we need to do the equivalent of these regex subtitutions. +/// +/// Assume that: +/// aFolder = old_folder = "the_old_folder" +/// bFolder = new_folder = "the_new_folder" +/// +/// We use the --src-prefix=a/ and --dst-prefix=b/ options with git diff, +/// so the paths end up looking like so: +/// +/// - a/the_old_folder/package.json +/// - b/the_old_folder/package.json +/// - a/the_older_folder/src/index.js +/// - b/the_older_folder/src/index.js +/// +/// We need to strip out all references to "the_old_folder" and "the_new_folder": +/// - a/package.json +/// - b/package.json +/// - a/src/index.js +/// - b/src/index.js +/// +/// The operations look roughy like the following sequence of substitutions and regexes: +/// .replace(new RegExp(`(a|b)(${escapeStringRegexp(`/${removeTrailingAndLeadingSlash(aFolder)}/`)})`, "g"), "$1/") +/// .replace(new RegExp(`(a|b)${escapeStringRegexp(`/${removeTrailingAndLeadingSlash(bFolder)}/`)}`, "g"), "$1/") +fn gitDiffPostprocess(stdout: *std.ArrayList(u8), old_folder: []const u8, new_folder: []const u8) !void { + const old_folder_trimmed = std.mem.trim(u8, old_folder, "/"); + const new_folder_trimmed = std.mem.trim(u8, new_folder, "/"); + + var old_buf: bun.PathBuffer = undefined; + var new_buf: bun.PathBuffer = undefined; + + const @"a/$old_folder/", const @"b/$new_folder/" = brk: { + old_buf[0] = 'a'; + old_buf[1] = '/'; + @memcpy(old_buf[2..][0..old_folder_trimmed.len], old_folder_trimmed); + old_buf[2 + old_folder_trimmed.len] = '/'; + + new_buf[0] = 'b'; + new_buf[1] = '/'; + @memcpy(new_buf[2..][0..new_folder_trimmed.len], new_folder_trimmed); + new_buf[2 + new_folder_trimmed.len] = '/'; + + break :brk .{ old_buf[0 .. 2 + old_folder_trimmed.len + 1], new_buf[0 .. 2 + old_folder_trimmed.len + 1] }; + }; + + var line_iter = std.mem.splitScalar(u8, stdout.items, '\n'); + while (line_iter.next()) |line| { + if (shouldSkipLine(line)) continue; + if (std.mem.indexOf(u8, line, @"a/$old_folder/")) |idx| { + const @"$old_folder/ start" = idx + 2; + const line_start = line_iter.index.? - 1 - line.len; + line_iter.index.? -= 1 + line.len; + try stdout.replaceRange(line_start + @"$old_folder/ start", old_folder_trimmed.len + 1, ""); + continue; + } + if (std.mem.indexOf(u8, line, @"b/$new_folder/")) |idx| { + const @"$new_folder/ start" = idx + 2; + const line_start = line_iter.index.? - 1 - line.len; + try stdout.replaceRange(line_start + @"$new_folder/ start", new_folder_trimmed.len + 1, ""); + line_iter.index.? -= new_folder_trimmed.len + 1; + } + } +} + +/// We need to remove occurences of "a/" and "b/" and "$old_folder/" and +/// "$new_folder/" but we don't want to remove them from the actual patch +/// content (maybe someone had a/$old_folder/foo.txt in the changed files). +/// +/// To do that we have to skip the lines in the patch file that correspond +/// to changes. +/// +/// ```patch +/// +/// diff --git a/numbers.txt b/banana.txt +/// old mode 100644 +/// new mode 100755 +/// similarity index 96% +/// rename from numbers.txt +/// rename to banana.txt +/// index fbf1785..92d2c5f +/// --- a/numbers.txt +/// +++ b/banana.txt +/// @@ -1,4 +1,4 @@ +/// -one +/// +ne +/// +/// two +/// ``` +fn shouldSkipLine(line: []const u8) bool { + return line.len == 0 or + (switch (line[0]) { + ' ', '-', '+' => true, + else => false, + } and + // line like: "--- a/numbers.txt" or "+++ b/numbers.txt" we should not skip + (!(line.len >= 4 and (std.mem.eql(u8, line[0..4], "--- ") or std.mem.eql(u8, line[0..4], "+++ "))))); +} diff --git a/src/resolver/resolver.zig b/src/resolver/resolver.zig index b8627673ae..0a2ce3ccd4 100644 --- a/src/resolver/resolver.zig +++ b/src/resolver/resolver.zig @@ -1967,6 +1967,7 @@ pub const Resolver = struct { .{ .root_request_id = 0, }, + null, ); return .{ diff --git a/src/shell/interpreter.zig b/src/shell/interpreter.zig index 929abdcdb5..7d41887a85 100644 --- a/src/shell/interpreter.zig +++ b/src/shell/interpreter.zig @@ -6863,16 +6863,22 @@ pub const Interpreter = struct { pub fn start(this: *Echo) Maybe(void) { const args = this.bltn.argsSlice(); + var has_leading_newline: bool = false; const args_len = args.len; for (args, 0..) |arg, i| { - const len = std.mem.len(arg); - this.output.appendSlice(arg[0..len]) catch bun.outOfMemory(); + const thearg = std.mem.span(arg); if (i < args_len - 1) { + this.output.appendSlice(thearg) catch bun.outOfMemory(); this.output.append(' ') catch bun.outOfMemory(); + } else { + if (thearg.len > 0 and thearg[thearg.len - 1] == '\n') { + has_leading_newline = true; + } + this.output.appendSlice(bun.strings.trimSubsequentLeadingChars(thearg, '\n')) catch bun.outOfMemory(); } } - this.output.append('\n') catch bun.outOfMemory(); + if (!has_leading_newline) this.output.append('\n') catch bun.outOfMemory(); if (!this.bltn.stdout.needsIO()) { _ = this.bltn.writeNoIO(.stdout, this.output.items[0..]); diff --git a/src/string_immutable.zig b/src/string_immutable.zig index 56889d11bd..e36a9a0e32 100644 --- a/src/string_immutable.zig +++ b/src/string_immutable.zig @@ -4037,6 +4037,20 @@ pub fn encodeBytesToHex(destination: []u8, source: []const u8) usize { return to_read * 2; } +/// Leave a single leading char +/// ```zig +/// trimSubsequentLeadingChars("foo\n\n\n\n", '\n') -> "foo\n" +/// ``` +pub fn trimSubsequentLeadingChars(slice: []const u8, char: u8) []const u8 { + if (slice.len == 0) return slice; + var end = slice.len - 1; + var endend = slice.len; + while (end > 0 and slice[end] == char) : (end -= 1) { + endend = end + 1; + } + return slice[0..endend]; +} + pub fn trimLeadingChar(slice: []const u8, char: u8) []const u8 { if (indexOfNotChar(slice, char)) |i| { return slice[i..]; diff --git a/src/sys.zig b/src/sys.zig index 824ae3f6f8..d9a4ce3bc4 100644 --- a/src/sys.zig +++ b/src/sys.zig @@ -75,6 +75,7 @@ pub const Tag = enum(u8) { copy_file_range, copyfile, fchmod, + fchmodat, fchown, fcntl, fdatasync, @@ -147,6 +148,7 @@ pub const Tag = enum(u8) { uv_spawn, uv_pipe, uv_tty_set_mode, + uv_open_osfhandle, // Below this line are Windows API calls only. @@ -359,6 +361,13 @@ pub fn fchmod(fd: bun.FileDescriptor, mode: bun.Mode) Maybe(void) { Maybe(void).success; } +pub fn fchmodat(fd: bun.FileDescriptor, path: [:0]const u8, mode: bun.Mode, flags: i32) Maybe(void) { + if (comptime Environment.isWindows) @compileError("Use fchmod instead"); + + return Maybe(void).errnoSys(C.fchmodat(fd.cast(), path.ptr, mode, flags), .fchmodat) orelse + Maybe(void).success; +} + pub fn chdirOSPath(destination: bun.OSPathSliceZ) Maybe(void) { assertIsValidWindowsPath(bun.OSPathChar, destination); @@ -461,7 +470,14 @@ pub fn lstat(path: [:0]const u8) Maybe(bun.Stat) { } pub fn fstat(fd: bun.FileDescriptor) Maybe(bun.Stat) { - if (Environment.isWindows) return sys_uv.fstat(fd); + if (Environment.isWindows) { + const dec = bun.FDImpl.decode(fd); + if (dec.kind == .system) { + const uvfd = bun.toLibUVOwnedFD(fd) catch return .{ .err = Error.fromCode(.MFILE, .uv_open_osfhandle) }; + defer _ = bun.sys.close(uvfd); + return sys_uv.fstat(fd); + } else return sys_uv.fstat(fd); + } var stat_ = mem.zeroes(bun.Stat); @@ -511,7 +527,7 @@ pub fn mkdiratW(dir_fd: bun.FileDescriptor, file_path: []const u16, _: i32) Mayb } pub fn fstatat(fd: bun.FileDescriptor, path: [:0]const u8) Maybe(bun.Stat) { - if (Environment.isWindows) @compileError("TODO"); + if (Environment.isWindows) @compileError("Use fstat on Windows"); var stat_ = mem.zeroes(bun.Stat); if (Maybe(bun.Stat).errnoSys(sys.fstatat(fd.int(), path, &stat_, 0), .fstatat)) |err| { log("fstatat({}, {s}) = {s}", .{ fd, path, @tagName(err.getErrno()) }); @@ -2868,6 +2884,28 @@ pub const File = struct { return self.bytes.items; } }; + pub fn readFillBuf(this: File, buf: []u8) Maybe([]u8) { + var read_amount: usize = 0; + while (read_amount < buf.len) { + switch (if (comptime Environment.isPosix) + bun.sys.pread(this.handle, buf[read_amount..], @intCast(read_amount)) + else + bun.sys.read(this.handle, buf[read_amount..])) { + .err => |err| { + return .{ .err = err }; + }, + .result => |bytes_read| { + if (bytes_read == 0) { + break; + } + + read_amount += bytes_read; + }, + } + } + + return .{ .result = buf[0..read_amount] }; + } pub fn readToEndWithArrayList(this: File, list: *std.ArrayList(u8)) Maybe(usize) { const size = switch (this.getEndPos()) { .err => |err| { diff --git a/test/cli/install/bun-install-patch.test.ts b/test/cli/install/bun-install-patch.test.ts new file mode 100644 index 0000000000..da972c13b7 --- /dev/null +++ b/test/cli/install/bun-install-patch.test.ts @@ -0,0 +1,382 @@ +import { $ } from "bun"; +import { bunExe, bunEnv as env, toBeValidBin, toHaveBins, toBeWorkspaceLink, tempDirWithFiles, bunEnv } from "harness"; +import { afterAll, afterEach, beforeAll, beforeEach, expect, it, describe, test, setDefaultTimeout } from "bun:test"; + +describe("patch", async () => { + const is_even_patch = /* patch */ `diff --git a/index.js b/index.js +index 832d92223a9ec491364ee10dcbe3ad495446ab80..bc652e496c165a7415880ef4520c0ab302bf0765 100644 +--- a/index.js ++++ b/index.js +@@ -10,5 +10,6 @@ + var isOdd = require('is-odd'); + + module.exports = function isEven(i) { ++ console.log("HI"); + return !isOdd(i); + }; +`; + const is_even_patch2 = /* patch */ `diff --git a/index.js b/index.js +index 832d92223a9ec491364ee10dcbe3ad495446ab80..217353bf51861fe4fdba68cb98bc5f361c7730e1 100644 +--- a/index.js ++++ b/index.js +@@ -5,10 +5,11 @@ + * Released under the MIT License. + */ + +-'use strict'; ++"use strict"; + +-var isOdd = require('is-odd'); ++var isOdd = require("is-odd"); + + module.exports = function isEven(i) { ++ console.log("lmao"); + return !isOdd(i); + }; +`; + + const is_odd_patch = /* patch */ `diff --git a/index.js b/index.js +index c8950c17b265104bcf27f8c345df1a1b13a78950..084439e9692a1e94a759d1a34a47282a1d145a30 100644 +--- a/index.js ++++ b/index.js +@@ -5,16 +5,17 @@ + * Released under the MIT License. + */ + +-'use strict'; ++"use strict"; + +-var isNumber = require('is-number'); ++var isNumber = require("is-number"); + + module.exports = function isOdd(i) { ++ console.log("Hi from isOdd!"); + if (!isNumber(i)) { +- throw new TypeError('is-odd expects a number.'); ++ throw new TypeError("is-odd expects a number."); + } + if (Number(i) !== Math.floor(i)) { +- throw new RangeError('is-odd expects an integer.'); ++ throw new RangeError("is-odd expects an integer."); + } + return !!(~~i & 1); + }; +`; + + const is_odd_patch2 = /* patch */ `diff --git a/index.js b/index.js +index c8950c17b265104bcf27f8c345df1a1b13a78950..7ce57ab96400ab0ff4fac7e06f6e02c2a5825852 100644 +--- a/index.js ++++ b/index.js +@@ -5,16 +5,17 @@ + * Released under the MIT License. + */ + +-'use strict'; ++"use strict"; + +-var isNumber = require('is-number'); ++var isNumber = require("is-number"); + + module.exports = function isOdd(i) { ++ console.log("lmao"); + if (!isNumber(i)) { +- throw new TypeError('is-odd expects a number.'); ++ throw new TypeError("is-odd expects a number."); + } + if (Number(i) !== Math.floor(i)) { +- throw new RangeError('is-odd expects an integer.'); ++ throw new RangeError("is-odd expects an integer."); + } + return !!(~~i & 1); + }; +`; + + const filepathEscape: (x: string) => string = + process.platform === "win32" + ? (s: string) => { + const charsToEscape = new Set(["/", ":"]); + return s + .split("") + .map(c => (charsToEscape.has(c) ? "_" : c)) + .join(""); + } + : (x: string) => x; + + const versions: [version: string, patchVersion?: string][] = [ + ["1.0.0"], + ["github:i-voted-for-trump/is-even", "github:i-voted-for-trump/is-even#585f800"], + [ + "git@github.com:i-voted-for-trump/is-even.git", + "git+ssh://git@github.com:i-voted-for-trump/is-even.git#585f8002bb16f7bec723a47349b67df451f1b25d", + ], + ]; + + describe("should patch a dependency when its dependencies are not hoisted", async () => { + // is-even depends on is-odd ^0.1.2 and we add is-odd 3.0.1, which should be hoisted + for (const [version, patchVersion_] of versions) { + const patchFilename = filepathEscape(`is-even@${version}.patch`); + const patchVersion = patchVersion_ ?? version; + test(version, async () => { + const filedir = tempDirWithFiles("patch1", { + "package.json": JSON.stringify({ + "name": "bun-patch-test", + "module": "index.ts", + "type": "module", + "patchedDependencies": { + [`is-even@${patchVersion}`]: `patches/${patchFilename}`, + }, + "dependencies": { + "is-even": version, + "is-odd": "3.0.1", + }, + }), + patches: { + [patchFilename]: is_even_patch, + }, + "index.ts": /* ts */ `import isEven from 'is-even'; isEven(2); console.log('lol')`, + }); + console.log("TEMP:", filedir); + await $`${bunExe()} i`.env(bunEnv).cwd(filedir); + const { stdout, stderr } = await $`${bunExe()} run index.ts`.env(bunEnv).cwd(filedir); + expect(stderr.toString()).toBe(""); + expect(stdout.toString()).toContain("HI\n"); + }); + } + }); + + test("should patch a non-hoisted dependency", async () => { + const filedir = tempDirWithFiles("patch1", { + "package.json": JSON.stringify({ + "name": "bun-patch-test", + "module": "index.ts", + "type": "module", + "patchedDependencies": { + [`is-odd@0.1.2`]: `patches/is-odd@0.1.2.patch`, + }, + "dependencies": { + "is-even": "1.0.0", + "is-odd": "3.0.1", + }, + }), + patches: { + "is-odd@0.1.2.patch": is_odd_patch, + }, + "index.ts": /* ts */ `import isEven from 'is-even'; isEven(2); console.log('lol')`, + }); + console.log("TEMP:", filedir); + await $`${bunExe()} i`.env(bunEnv).cwd(filedir); + const { stdout, stderr } = await $`${bunExe()} run index.ts`.env(bunEnv).cwd(filedir); + expect(stderr.toString()).toBe(""); + expect(stdout.toString()).toContain("Hi from isOdd!\n"); + }); + + describe("should patch a dependency", async () => { + for (const [version, patchVersion_] of versions) { + const patchFilename = filepathEscape(`is-even@${version}.patch`); + const patchVersion = patchVersion_ ?? version; + test(version, async () => { + const filedir = tempDirWithFiles("patch1", { + "package.json": JSON.stringify({ + "name": "bun-patch-test", + "module": "index.ts", + "type": "module", + "patchedDependencies": { + [`is-even@${patchVersion}`]: `patches/${patchFilename}`, + }, + "dependencies": { + "is-even": version, + }, + }), + patches: { + [patchFilename]: is_even_patch, + }, + "index.ts": /* ts */ `import isEven from 'is-even'; isEven(2); console.log('lol')`, + }); + console.log("TEMP:", filedir); + await $`${bunExe()} i`.env(bunEnv).cwd(filedir); + const { stdout, stderr } = await $`${bunExe()} run index.ts`.env(bunEnv).cwd(filedir); + expect(stderr.toString()).toBe(""); + expect(stdout.toString()).toContain("HI\n"); + }); + } + }); + + test("should patch a transitive dependency", async () => { + const version = "0.1.2"; + const patchFilename = filepathEscape(`is-odd@${version}.patch`); + const filedir = tempDirWithFiles("patch1", { + "package.json": JSON.stringify({ + "name": "bun-patch-test", + "module": "index.ts", + "type": "module", + "patchedDependencies": { + [`is-odd@${version}`]: `patches/${patchFilename}`, + }, + "dependencies": { + "is-even": "1.0.0", + }, + }), + patches: { + [patchFilename]: is_odd_patch, + }, + "index.ts": /* ts */ `import isEven from 'is-even'; isEven(2); console.log('lol')`, + }); + + await $`${bunExe()} i`.env(bunEnv).cwd(filedir); + const { stdout, stderr } = await $`${bunExe()} run index.ts`.env(bunEnv).cwd(filedir); + expect(stderr.toString()).toBe(""); + expect(stdout.toString()).toContain("Hi from isOdd!\n"); + }); + + describe("should patch a dependency after it was already installed", async () => { + for (const [version, patchVersion_] of versions) { + const patchfileName = filepathEscape(`is-even@${version}.patch`); + const patchVersion = patchVersion_ ?? version; + test(version, async () => { + const filedir = tempDirWithFiles("patch1", { + "package.json": JSON.stringify({ + "name": "bun-patch-test", + "module": "index.ts", + "type": "module", + "dependencies": { + "is-even": version, + }, + }), + patches: { + [patchfileName]: is_even_patch, + }, + "index.ts": /* ts */ `import isEven from 'is-even'; isEven(2); console.log('lol')`, + }); + + console.log("File", filedir); + + await $`${bunExe()} i`.env(bunEnv).cwd(filedir); + + await $`echo ${JSON.stringify({ + "name": "bun-patch-test", + "module": "index.ts", + "type": "module", + "patchedDependencies": { + [`is-even@${patchVersion}`]: `patches/${patchfileName}`, + }, + "dependencies": { + "is-even": version, + }, + })} > package.json` + .env(bunEnv) + .cwd(filedir); + + await $`${bunExe()} i`.env(bunEnv).cwd(filedir); + + const { stdout, stderr } = await $`${bunExe()} run index.ts`.env(bunEnv).cwd(filedir); + expect(stderr.toString()).toBe(""); + expect(stdout.toString()).toContain("HI\n"); + }); + } + }); + + it("should patch a transitive dependency after it was already installed", async () => { + const filedir = tempDirWithFiles("patch1", { + "package.json": JSON.stringify({ + "name": "bun-patch-test", + "module": "index.ts", + "type": "module", + "dependencies": { + "is-even": "1.0.0", + }, + }), + patches: { + "is-odd@0.1.2.patch": is_odd_patch, + }, + "index.ts": /* ts */ `import isEven from 'is-even'; isEven(2); console.log('lol')`, + }); + + console.log("File", filedir); + + await $`${bunExe()} i`.env(bunEnv).cwd(filedir); + + await $`echo ${JSON.stringify({ + "name": "bun-patch-test", + "module": "index.ts", + "type": "module", + "patchedDependencies": { + "is-odd@0.1.2": "patches/is-odd@0.1.2.patch", + }, + "dependencies": { + "is-even": "1.0.0", + }, + })} > package.json` + .env(bunEnv) + .cwd(filedir); + + await $`${bunExe()} i`.env(bunEnv).cwd(filedir); + + const { stdout, stderr } = await $`${bunExe()} run index.ts`.env(bunEnv).cwd(filedir); + expect(stderr.toString()).toBe(""); + expect(stdout.toString()).toContain("Hi from isOdd!\n"); + }); + + describe("should update a dependency when the patchfile changes", async () => { + $.throws(true); + for (const [version, patchVersion_] of versions) { + const patchFilename = filepathEscape(`is-even@${version}.patch`); + const patchVersion = patchVersion_ ?? version; + test(version, async () => { + const filedir = tempDirWithFiles("patch1", { + "package.json": JSON.stringify({ + "name": "bun-patch-test", + "module": "index.ts", + "type": "module", + "patchedDependencies": { + [`is-even@${patchVersion}`]: `patches/${patchFilename}`, + }, + "dependencies": { + "is-even": version, + }, + }), + patches: { + [patchFilename]: is_even_patch2, + }, + "index.ts": /* ts */ `import isEven from 'is-even'; isEven(2); console.log('lol')`, + }); + + await $`${bunExe()} i`.env(bunEnv).cwd(filedir); + + await $`echo ${is_even_patch2} > patches/is-even@${version}.patch; ${bunExe()} i`.env(bunEnv).cwd(filedir); + + const { stdout, stderr } = await $`${bunExe()} run index.ts`.env(bunEnv).cwd(filedir); + expect(stderr.toString()).toBe(""); + expect(stdout.toString()).toContain("lmao\n"); + }); + } + }); + + it("should update a transitive dependency when the patchfile changes", async () => { + $.throws(true); + const filedir = tempDirWithFiles("patch1", { + "package.json": JSON.stringify({ + "name": "bun-patch-test", + "module": "index.ts", + "type": "module", + "patchedDependencies": { + "is-odd@0.1.2": "patches/is-odd@0.1.2.patch", + }, + "dependencies": { + "is-even": "1.0.0", + }, + }), + patches: { + ["is-odd@0.1.2.patch"]: is_odd_patch2, + }, + "index.ts": /* ts */ `import isEven from 'is-even'; isEven(2); console.log('lol')`, + }); + + await $`${bunExe()} i`.env(bunEnv).cwd(filedir); + + await $`echo ${is_odd_patch2} > patches/is-odd@0.1.2.patch; ${bunExe()} i`.env(bunEnv).cwd(filedir); + + const { stdout, stderr } = await $`${bunExe()} run index.ts`.env(bunEnv).cwd(filedir); + expect(stderr.toString()).toBe(""); + expect(stdout.toString()).toContain("lmao\n"); + }); +}); diff --git a/test/cli/install/bun-install.test.ts b/test/cli/install/bun-install.test.ts index 2639cd8a0c..a4bef985d3 100644 --- a/test/cli/install/bun-install.test.ts +++ b/test/cli/install/bun-install.test.ts @@ -1,3 +1,4 @@ +import { $ } from "bun"; import { file, listen, Socket, spawn } from "bun"; import { afterAll, afterEach, beforeAll, beforeEach, expect, it, describe, test, setDefaultTimeout } from "bun:test"; import { bunExe, bunEnv as env, toBeValidBin, toHaveBins, toBeWorkspaceLink, tempDirWithFiles, bunEnv } from "harness"; diff --git a/test/harness.ts b/test/harness.ts index 1726d67ccc..48d752e5b1 100644 --- a/test/harness.ts +++ b/test/harness.ts @@ -416,7 +416,7 @@ export async function toMatchNodeModulesAt(lockfile: any, root: string) { return { pass: false, message: () => ` -Expected at ${join(path, treeDep.name)}: ${JSON.stringify({ name: treePkg.name, version: treePkg.resolution.value })} +Expected at ${join(path, treeDep.name)}: ${JSON.stringify({ name: treePkg.name, version: treePkg.resolution.value })} Received ${JSON.stringify({ name: onDisk.name, version: onDisk.version })}`, }; } diff --git a/test/js/bun/patch/patch.test.ts b/test/js/bun/patch/patch.test.ts new file mode 100644 index 0000000000..e3352f212e --- /dev/null +++ b/test/js/bun/patch/patch.test.ts @@ -0,0 +1,863 @@ +import { $ } from "bun"; +import { describe, test, expect, it } from "bun:test"; +import { patchInternals } from "bun:internal-for-testing"; +import { tempDirWithFiles as __tempDirWithFiles } from "harness"; +import { join as __join } from "node:path"; +import fs from "fs/promises"; +const { parse, apply } = patchInternals; + +const makeDiff = async (aFolder: string, bFolder: string, cwd: string): Promise => { + const { stdout, stderr } = + await $`git -c core.safecrlf=false diff --src-prefix=a/ --dst-prefix=b/ --ignore-cr-at-eol --irreversible-delete --full-index --no-index ${aFolder} ${bFolder}` + .env( + // https://github.com/pnpm/pnpm/blob/45f4262f0369cadf41cea3b823e8932eae157c4b/patching/plugin-commands-patching/src/patchCommit.ts#L117 + { + ...process.env, + // #region Predictable output + // These variables aim to ignore the global git config so we get predictable output + // https://git-scm.com/docs/git#Documentation/git.txt-codeGITCONFIGNOSYSTEMcode + GIT_CONFIG_NOSYSTEM: "1", + HOME: "", + XDG_CONFIG_HOME: "", + USERPROFILE: "", + }, + ) + .quiet() + .cwd(cwd) + // For some reason git diff returns exit code 1 when it is not an error + // So we must check that there is no stderr output instead of the exit code + // to determine if the command was successful + .throws(false); + + if (stderr.length > 0) throw new Error(stderr.toString()); + + const patch = stdout.toString(); + + return patch + .replace(new RegExp(`(a|b)(${escapeStringRegexp(`/${removeTrailingAndLeadingSlash(aFolder)}/`)})`, "g"), "$1/") + .replace(new RegExp(`(a|b)${escapeStringRegexp(`/${removeTrailingAndLeadingSlash(bFolder)}/`)}`, "g"), "$1/") + .replace(new RegExp(escapeStringRegexp(`${aFolder}/`), "g"), "") + .replace(new RegExp(escapeStringRegexp(`${bFolder}/`), "g"), ""); + // .replace(/\n\\ No newline at end of file\n$/, "\n"); +}; + +const tempDirWithFiles: typeof __tempDirWithFiles = + process.platform === "win32" + ? (a, b) => __tempDirWithFiles(a.replaceAll("\\", "/"), b).replaceAll("\\", "/") + : __tempDirWithFiles; +const join = + process.platform === "win32" + ? (...strings: string[]): string => __join(...strings.map(s => s.replaceAll("\\", "/"))).replaceAll("\\", "/") + : __join; + +describe("apply", () => { + describe("deletion", () => { + test("simple", async () => { + const files = { + "a/hey.txt": "hello!", + "a/byebye.txt": "goodbye :(", + "b/hey.txt": "hello!", + }; + const tempdir = tempDirWithFiles("patch-test", files); + + const afolder = join(tempdir, "a"); + const bfolder = join(tempdir, "b"); + + console.log("makeDiff args", afolder, bfolder); + const patchfile = await makeDiff(afolder, bfolder, tempdir); + + console.log("PATCHFILE", patchfile); + console.log("afolder", afolder); + await apply(patchfile, afolder); + + expect(await $`cat ${join(afolder, "hey.txt")}`.cwd(tempdir).text()).toBe(files["b/hey.txt"]); + expect( + await $`if ls -d ${join(afolder, "byebye.txt")}; then echo oops; else echo okay!; fi;`.cwd(tempdir).text(), + ).toBe("okay!\n"); + }); + }); + + describe("creation", () => { + test("simple", async () => { + const files = { + "a": {}, + "b/newfile.txt": "hey im new here!", + }; + const tempdir = tempDirWithFiles("patch-test", files); + + const afolder = join(tempdir, "a"); + const bfolder = join(tempdir, "b"); + + const patchfile = await makeDiff(afolder, bfolder, tempdir); + + await apply(patchfile, afolder); + + expect(await $`cat ${join(afolder, "newfile.txt")}`.cwd(tempdir).text()).toBe(files["b/newfile.txt"]); + }); + + test("multi-line", async () => { + const files = { + "a": {}, + "b/newfile.txt": "hey im new here!\nhello", + }; + const tempdir = tempDirWithFiles("patch-test", files); + + const afolder = join(tempdir, "a"); + const bfolder = join(tempdir, "b"); + + const patchfile = await makeDiff(afolder, bfolder, tempdir); + + await apply(patchfile, afolder); + + expect(await $`cat ${join(afolder, "newfile.txt")}`.cwd(tempdir).text()).toBe(files["b/newfile.txt"]); + }); + }); + + describe("rename", () => { + test("files", async () => { + const files = { + "a/hey.txt": "hello!", + "b/heynow.txt": "hello!", + }; + const tempdir = tempDirWithFiles("patch-test", files); + + const afolder = join(tempdir, "a"); + const bfolder = join(tempdir, "b"); + + const patchfile = await makeDiff(afolder, bfolder, tempdir); + + await apply(patchfile, afolder); + + expect(await $`cat ${join(afolder, "heynow.txt")}`.cwd(tempdir).text()).toBe(files["b/heynow.txt"]); + expect( + await $`if ls -d ${join(afolder, "hey.txt")}; then echo oops; else echo okay!; fi;`.cwd(tempdir).text(), + ).toBe("okay!\n"); + }); + + test("folders", async () => { + const files = { + "a/foo/hey.txt": "hello!", + "a/foo/hi.txt": "hello!", + "a/foo/lmao.txt": "lmao!", + "b/foo": {}, + "b/bar/hey.txt": "hello!", + "b/bar/hi.txt": "hello!", + "b/bar/lmao.txt": "lmao!", + }; + const tempdir = tempDirWithFiles("patch-test", files); + + const afolder = join(tempdir, "a"); + const bfolder = join(tempdir, "b"); + + const patchfile = await makeDiff(afolder, bfolder, tempdir); + + await apply(patchfile, afolder); + + // Should we remove the folder if it's empty? Technically running `git apply` does this + // But git does not track empty directories so it's not really a problem + // expect( + // await $`if ls -d ${join(afolder, "foo")}; then echo should not exist!; else echo okay!; fi;` + // .cwd(tempdir) + // .text(), + // ).toBe("okay!\n"); + + expect(await $`cat ${join(afolder, "bar", "hey.txt")}`.cwd(tempdir).text()).toBe(files["b/bar/hey.txt"]); + expect(await $`cat ${join(afolder, "bar", "hi.txt")}`.cwd(tempdir).text()).toBe(files["b/bar/hi.txt"]); + expect(await $`cat ${join(afolder, "bar", "lmao.txt")}`.cwd(tempdir).text()).toBe(files["b/bar/lmao.txt"]); + expect( + await $`ls ${join(afolder, "bar")}` + .cwd(tempdir) + .text() + .then((out: string) => + out + .split("\n") + .filter(x => x !== "") + .sort(), + ), + ).toEqual(["hey.txt", "hi.txt", "lmao.txt"].sort()); + }); + }); + + describe("mode change", () => { + // chmod doesn't do anything on windows so skiip + test.if(process.platform !== "win32")("simple", async () => { + const files = { + "a/hi.txt": "hello!", + "b/hi.txt": "hi!", + }; + + const tempdir = tempDirWithFiles("patch-test", files); + + const afolder = join(tempdir, "a"); + const bfolder = join(tempdir, "b"); + + await fs.chmod(join(bfolder, "hi.txt"), 0o755); + + const patchfile = await makeDiff(afolder, bfolder, tempdir); + + await apply(patchfile, afolder); + + expect(await $`cat ${join(afolder, "hi.txt")}`.cwd(tempdir).text()).toBe(files["b/hi.txt"]); + const stat = await fs.stat(join(afolder, "hi.txt")); + expect((stat.mode & parseInt("777", 8)).toString(8)).toBe("755"); + }); + }); + + describe("patch", () => { + test("simple insertion", async () => { + const afile = `hello!\n`; + const bfile = `hello!\nwassup?\n`; + + const tempdir = tempDirWithFiles("patch-test", { + "a/hello.txt": afile, + "b/hello.txt": bfile, + }); + + const afolder = join(tempdir, "a"); + const bfolder = join(tempdir, "b"); + + const patchfile = await makeDiff(afolder, bfolder, tempdir); + + await apply(patchfile, afolder); + + expect(await $`cat ${join(afolder, "hello.txt")}`.cwd(tempdir).text()).toBe(bfile); + }); + + test("simple deletion", async () => { + const afile = `hello!\nwassup?\n`; + const bfile = `hello!\n`; + + const tempdir = tempDirWithFiles("patch-test", { + "a/hello.txt": afile, + "b/hello.txt": bfile, + }); + + const afolder = join(tempdir, "a"); + const bfolder = join(tempdir, "b"); + + const patchfile = await makeDiff(afolder, bfolder, tempdir); + + await apply(patchfile, afolder); + + expect(await $`cat ${join(afolder, "hello.txt")}`.cwd(tempdir).text()).toBe(bfile); + }); + + test("multi insertion", async () => { + const afile = `hello!\n`; + const bfile = `lol\nhello!\nwassup?\n`; + + const tempdir = tempDirWithFiles("patch-test", { + "a/hello.txt": afile, + "b/hello.txt": bfile, + }); + + const afolder = join(tempdir, "a"); + const bfolder = join(tempdir, "b"); + + const patchfile = await makeDiff(afolder, bfolder, tempdir); + + await apply(patchfile, afolder); + + expect(await $`cat ${join(afolder, "hello.txt")}`.cwd(tempdir).text()).toBe(bfile); + }); + + test("multi deletion", async () => { + const afile = `hello!\nwassup?\nlmao\n`; + const bfile = `wassup?\n`; + + const tempdir = tempDirWithFiles("patch-test", { + "a/hello.txt": afile, + "b/hello.txt": bfile, + }); + + const afolder = join(tempdir, "a"); + const bfolder = join(tempdir, "b"); + + const patchfile = await makeDiff(afolder, bfolder, tempdir); + + await apply(patchfile, afolder); + + expect(await $`cat ${join(afolder, "hello.txt")}`.cwd(tempdir).text()).toBe(bfile); + }); + + test("multi-hunk insertion", async () => { + const afile = `0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20`; + const bfile = `0 +0.5 hi +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +19.5 lol hi +20`; + + const tempdir = tempDirWithFiles("patch-test", { + "a/hello.txt": afile, + "b/hello.txt": bfile, + }); + + const afolder = join(tempdir, "a"); + const bfolder = join(tempdir, "b"); + + const patchfile = await makeDiff(afolder, bfolder, tempdir); + + await apply(patchfile, afolder); + + expect(await $`cat ${join(afolder, "hello.txt")}`.cwd(tempdir).text()).toBe(bfile); + }); + + test("multi-hunk deletion", async () => { + const bfile = `0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20`; + const afile = `0 +0.5 hi +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +19.5 lol hi +20`; + + const tempdir = tempDirWithFiles("patch-test", { + "a/hello.txt": afile, + "b/hello.txt": bfile, + }); + + const afolder = join(tempdir, "a"); + const bfolder = join(tempdir, "b"); + + const patchfile = await makeDiff(afolder, bfolder, tempdir); + + await apply(patchfile, afolder); + + expect(await $`cat ${join(afolder, "hello.txt")}`.cwd(tempdir).text()).toBe(bfile); + }); + }); + + describe("No newline at end of file", () => { + // TODO: simple, multiline, multiple hunks + }); +}); + +describe("parse", () => { + test("works for a simple case", () => { + expect(JSON.parse(parse(patch))).toEqual({ + "parts": { + "items": [ + { + "file_patch": { + "path": "banana.ts", + "hunks": { + "items": [ + { + "header": { "original": { "start": 1, "len": 5 }, "patched": { "start": 1, "len": 5 } }, + "parts": { + "items": [ + { + "type": "context", + "lines": { "items": ["this", "is", ""], "capacity": 8 }, + "no_newline_at_end_of_file": false, + }, + { + "type": "deletion", + "lines": { "items": ["a"], "capacity": 8 }, + "no_newline_at_end_of_file": false, + }, + { + "type": "insertion", + "lines": { "items": [""], "capacity": 8 }, + "no_newline_at_end_of_file": false, + }, + { + "type": "context", + "lines": { "items": ["file"], "capacity": 8 }, + "no_newline_at_end_of_file": false, + }, + ], + "capacity": 8, + }, + }, + ], + "capacity": 8, + }, + "before_hash": "2de83dd", + "after_hash": "842652c", + }, + }, + ], + "capacity": 8, + }, + }); + }); + + test("fails when the patch file has invalid headers", () => { + expect(() => parse(invalidHeaders1)).toThrow(); + expect(() => parse(invalidHeaders2)).toThrow(); + expect(() => parse(invalidHeaders3)).toThrow(); + expect(() => parse(invalidHeaders4)).toThrow(); + expect(() => parse(invalidHeaders5)).toThrow(); + }); + + test("is OK when blank lines are accidentally created", () => { + expect(parse(accidentalBlankLine)).toEqual(parse(patch)); + }); + + test(`can handle files with CRLF line breaks`, () => { + expect(JSON.parse(parse(crlfLineBreaks))).toEqual({ + "parts": { + "items": [ + { + "file_creation": { + "path": "banana.ts", + "mode": "non_executable", + "hunk": { + "header": { "original": { "start": 1, "len": 0 }, "patched": { "start": 1, "len": 1 } }, + "parts": { + "items": [ + { + "type": "insertion", + "lines": { "items": ["this is a new file\r"], "capacity": 8 }, + "no_newline_at_end_of_file": false, + }, + ], + "capacity": 8, + }, + }, + "hash": "3e1267f", + }, + }, + ], + "capacity": 8, + }, + }); + }); + + test("works", () => { + expect(JSON.parse(parse(modeChangeAndModifyAndRename))).toEqual({ + "parts": { + "items": [ + { "file_rename": { "from_path": "numbers.txt", "to_path": "banana.txt" } }, + { "file_mode_change": { "path": "banana.txt", "old_mode": "non_executable", "new_mode": "executable" } }, + { + "file_patch": { + "path": "banana.txt", + "hunks": { + "items": [ + { + "header": { "original": { "start": 1, "len": 4 }, "patched": { "start": 1, "len": 4 } }, + "parts": { + "items": [ + { + "type": "deletion", + "lines": { "items": ["one"], "capacity": 8 }, + "no_newline_at_end_of_file": false, + }, + { + "type": "insertion", + "lines": { "items": ["ne"], "capacity": 8 }, + "no_newline_at_end_of_file": false, + }, + { + "type": "context", + "lines": { "items": ["", "two", ""], "capacity": 8 }, + "no_newline_at_end_of_file": false, + }, + ], + "capacity": 8, + }, + }, + ], + "capacity": 8, + }, + "before_hash": "fbf1785", + "after_hash": "92d2c5f", + }, + }, + ], + "capacity": 8, + }, + }); + }); + + test("parses old-style patches", () => { + expect(JSON.parse(parse(oldStylePatch))).toEqual({ + "parts": { + "items": [ + { + "file_patch": { + "path": "node_modules/graphql/utilities/assertValidName.js", + "hunks": { + "items": [ + { + "header": { "original": { "start": 41, "len": 10 }, "patched": { "start": 41, "len": 11 } }, + "parts": { + "items": [ + { + "type": "context", + "lines": { + "items": [ + " */", + "function isValidNameError(name, node) {", + " !(typeof name === 'string') ? (0, _invariant2.default)(0, 'Expected string') : void 0;", + ], + "capacity": 8, + }, + "no_newline_at_end_of_file": false, + }, + { + "type": "deletion", + "lines": { + "items": [ + " if (name.length > 1 && name[0] === '_' && name[1] === '_') {", + " return new _GraphQLError.GraphQLError('Name \"' + name + '\" must not begin with \"__\", which is reserved by ' + 'GraphQL introspection.', node);", + " }", + ], + "capacity": 8, + }, + "no_newline_at_end_of_file": false, + }, + { + "type": "insertion", + "lines": { + "items": [ + " // if (name.length > 1 && name[0] === '_' && name[1] === '_') {", + " // return new _GraphQLError.GraphQLError('Name \"' + name + '\" must not begin with \"__\", which is reserved by ' + 'GraphQL introspection.', node);", + " // }", + ], + "capacity": 8, + }, + "no_newline_at_end_of_file": false, + }, + { + "type": "context", + "lines": { + "items": [ + " if (!NAME_RX.test(name)) {", + " return new _GraphQLError.GraphQLError('Names must match /^[_a-zA-Z][_a-zA-Z0-9]*$/ but \"' + name + '\" does not.', node);", + " }", + ], + "capacity": 8, + }, + "no_newline_at_end_of_file": false, + }, + { + "type": "insertion", + "lines": { "items": [""], "capacity": 8 }, + "no_newline_at_end_of_file": false, + }, + { + "type": "context", + "lines": { "items": ["}"], "capacity": 8 }, + "no_newline_at_end_of_file": true, + }, + ], + "capacity": 8, + }, + }, + ], + "capacity": 8, + }, + "before_hash": null, + "after_hash": null, + }, + }, + { + "file_patch": { + "path": "node_modules/graphql/utilities/assertValidName.mjs", + "hunks": { + "items": [ + { + "header": { "original": { "start": 29, "len": 9 }, "patched": { "start": 29, "len": 9 } }, + "parts": { + "items": [ + { + "type": "context", + "lines": { + "items": [ + " */", + "export function isValidNameError(name, node) {", + " !(typeof name === 'string') ? invariant(0, 'Expected string') : void 0;", + ], + "capacity": 8, + }, + "no_newline_at_end_of_file": false, + }, + { + "type": "deletion", + "lines": { + "items": [ + " if (name.length > 1 && name[0] === '_' && name[1] === '_') {", + " return new GraphQLError('Name \"' + name + '\" must not begin with \"__\", which is reserved by ' + 'GraphQL introspection.', node);", + " }", + ], + "capacity": 8, + }, + "no_newline_at_end_of_file": false, + }, + { + "type": "insertion", + "lines": { + "items": [ + " // if (name.length > 1 && name[0] === '_' && name[1] === '_') {", + " // return new GraphQLError('Name \"' + name + '\" must not begin with \"__\", which is reserved by ' + 'GraphQL introspection.', node);", + " // }", + ], + "capacity": 8, + }, + "no_newline_at_end_of_file": false, + }, + { + "type": "context", + "lines": { + "items": [ + " if (!NAME_RX.test(name)) {", + " return new GraphQLError('Names must match /^[_a-zA-Z][_a-zA-Z0-9]*$/ but \"' + name + '\" does not.', node);", + " }", + ], + "capacity": 8, + }, + "no_newline_at_end_of_file": false, + }, + ], + "capacity": 8, + }, + }, + ], + "capacity": 8, + }, + "before_hash": null, + "after_hash": null, + }, + }, + ], + "capacity": 8, + }, + }); + }); +}); + +const patch = `diff --git a/banana.ts b/banana.ts\nindex 2de83dd..842652c 100644\n--- a/banana.ts\n+++ b/banana.ts\n@@ -1,5 +1,5 @@\n this\n is\n \n-a\n+\n file\n`; + +const invalidHeaders1 = /* diff */ `diff --git a/banana.ts b/banana.ts +index 2de83dd..842652c 100644 +--- a/banana.ts ++++ b/banana.ts +@@ -1,5 +1,4 @@ + this + is + +-a ++ + file +`; + +const invalidHeaders2 = /* diff */ `diff --git a/banana.ts b/banana.ts +index 2de83dd..842652c 100644 +--- a/banana.ts ++++ b/banana.ts +@@ -1,4 +1,5 @@ + this + is + +-a ++ + file +`; + +const invalidHeaders3 = /* diff */ `diff --git a/banana.ts b/banana.ts +index 2de83dd..842652c 100644 +--- a/banana.ts ++++ b/banana.ts +@@ -1,0 +1,5 @@ + this + is + +-a ++ + file +`; +const invalidHeaders4 = /* diff */ `diff --git a/banana.ts b/banana.ts +index 2de83dd..842652c 100644 +--- a/banana.ts ++++ b/banana.ts +@@ -1,5 +1,0 @@ + this + is + +-a ++ + file +`; + +const invalidHeaders5 = /* diff */ `diff --git a/banana.ts b/banana.ts +index 2de83dd..842652c 100644 +--- a/banana.ts ++++ b/banana.ts +@@ -1,5 +1,5@@ + this + is + +-a ++ + file +`; + +const accidentalBlankLine = /* diff */ `diff --git a/banana.ts b/banana.ts +index 2de83dd..842652c 100644 +--- a/banana.ts ++++ b/banana.ts +@@ -1,5 +1,5 @@ + this + is + +-a ++ + file +`; + +const crlfLineBreaks = /* diff */ `diff --git a/banana.ts b/banana.ts +new file mode 100644 +index 0000000..3e1267f +--- /dev/null ++++ b/banana.ts +@@ -0,0 +1 @@ ++this is a new file +`.replace(/\n/g, "\r\n"); + +const modeChangeAndModifyAndRename = /* diff */ `diff --git a/numbers.txt b/banana.txt +old mode 100644 +new mode 100755 +similarity index 96% +rename from numbers.txt +rename to banana.txt +index fbf1785..92d2c5f +--- a/numbers.txt ++++ b/banana.txt +@@ -1,4 +1,4 @@ +-one ++ne + + two + +`; + +const oldStylePatch = /* diff */ `patch-package +--- a/node_modules/graphql/utilities/assertValidName.js ++++ b/node_modules/graphql/utilities/assertValidName.js +@@ -41,10 +41,11 @@ function assertValidName(name) { + */ + function isValidNameError(name, node) { + !(typeof name === 'string') ? (0, _invariant2.default)(0, 'Expected string') : void 0; +- if (name.length > 1 && name[0] === '_' && name[1] === '_') { +- return new _GraphQLError.GraphQLError('Name "' + name + '" must not begin with "__", which is reserved by ' + 'GraphQL introspection.', node); +- } ++ // if (name.length > 1 && name[0] === '_' && name[1] === '_') { ++ // return new _GraphQLError.GraphQLError('Name "' + name + '" must not begin with "__", which is reserved by ' + 'GraphQL introspection.', node); ++ // } + if (!NAME_RX.test(name)) { + return new _GraphQLError.GraphQLError('Names must match /^[_a-zA-Z][_a-zA-Z0-9]*$/ but "' + name + '" does not.', node); + } ++ + } +\\ No newline at end of file +--- a/node_modules/graphql/utilities/assertValidName.mjs ++++ b/node_modules/graphql/utilities/assertValidName.mjs +@@ -29,9 +29,9 @@ export function assertValidName(name) { + */ + export function isValidNameError(name, node) { + !(typeof name === 'string') ? invariant(0, 'Expected string') : void 0; +- if (name.length > 1 && name[0] === '_' && name[1] === '_') { +- return new GraphQLError('Name "' + name + '" must not begin with "__", which is reserved by ' + 'GraphQL introspection.', node); +- } ++ // if (name.length > 1 && name[0] === '_' && name[1] === '_') { ++ // return new GraphQLError('Name "' + name + '" must not begin with "__", which is reserved by ' + 'GraphQL introspection.', node); ++ // } + if (!NAME_RX.test(name)) { + return new GraphQLError('Names must match /^[_a-zA-Z][_a-zA-Z0-9]*$/ but "' + name + '" does not.', node); + } +`; +function escapeStringRegexp(string: string) { + if (typeof string !== "string") { + throw new TypeError("Expected a string"); + } + + // Escape characters with special meaning either inside or outside character sets. + // Use a simple backslash escape when it’s always valid, and a `\xnn` escape when the simpler form would be disallowed by Unicode patterns’ stricter grammar. + return string.replace(/[|\\{}()[\]^$+*?.]/g, "\\$&").replace(/-/g, "\\x2d"); +} + +function removeTrailingAndLeadingSlash(p: string): string { + if (p[0] === "/" || p.endsWith("/")) { + return p.replace(/^\/|\/$/g, ""); + } + return p; +} diff --git a/test/js/bun/shell/bunshell.test.ts b/test/js/bun/shell/bunshell.test.ts index 939a45567d..1f6eb848b0 100644 --- a/test/js/bun/shell/bunshell.test.ts +++ b/test/js/bun/shell/bunshell.test.ts @@ -801,7 +801,7 @@ ${temp_dir}` TestBuilder.command`echo ${"|"}`.stdout("|\n").runAsTest("pipe"); TestBuilder.command`echo ${"="}`.stdout("=\n").runAsTest("equals"); TestBuilder.command`echo ${";"}`.stdout(";\n").runAsTest("semicolon"); - TestBuilder.command`echo ${"\n"}`.stdout("\n\n").runAsTest("newline"); + TestBuilder.command`echo ${"\n"}`.stdout("\n").runAsTest("newline"); TestBuilder.command`echo ${"{"}`.stdout("{\n").runAsTest("left_brace"); TestBuilder.command`echo ${"}"}`.stdout("}\n").runAsTest("right_brace"); TestBuilder.command`echo ${","}`.stdout(",\n").runAsTest("comma");