diff --git a/docs/install/workspaces.md b/docs/install/workspaces.md index dc3219679c..64d2445132 100644 --- a/docs/install/workspaces.md +++ b/docs/install/workspaces.md @@ -41,7 +41,7 @@ In the root `package.json`, the `"workspaces"` key is used to indicate which sub **Glob support** — Bun supports full glob syntax in `"workspaces"` (see [here](https://bun.sh/docs/api/glob#supported-glob-patterns) for a comprehensive list of supported syntax), _except_ for exclusions (e.g. `!**/excluded/**`), which are not implemented yet. {% /callout %} -Each workspace has it's own `package.json` When referencing other packages in the monorepo, use `"workspace:*"` as the version field in your `package.json`. +Each workspace has it's own `package.json`. When referencing other packages in the monorepo, semver or workspace protocols (e.g. `workspace:*`) can be used as the version field in your `package.json`. ```json { @@ -53,10 +53,6 @@ Each workspace has it's own `package.json` When referencing other packages in th } ``` -{% callout %} -**Version support** — Bun supports simple `workspace:*` versions in `"dependencies"`. Full version syntax (e.g. `workspace:^*`) is not yet supported. -{% /callout %} - Workspaces have a couple major benefits. - **Code can be split into logical parts.** If one package relies on another, you can simply add it as a dependency in `package.json`. If package `b` depends on `a`, `bun install` will install your local `packages/a` directory into `node_modules` instead of downloading it from the npm registry. diff --git a/patches/libarchive/archive_write_add_filter_gzip.c.patch b/patches/libarchive/archive_write_add_filter_gzip.c.patch new file mode 100644 index 0000000000..bdbdf68f21 --- /dev/null +++ b/patches/libarchive/archive_write_add_filter_gzip.c.patch @@ -0,0 +1,58 @@ +--- a/libarchive/archive_write_add_filter_gzip.c ++++ b/libarchive/archive_write_add_filter_gzip.c +@@ -58,6 +58,7 @@ archive_write_set_compression_gzip(struct archive *a) + struct private_data { + int compression_level; + int timestamp; ++ unsigned char os; + #ifdef HAVE_ZLIB_H + z_stream stream; + int64_t total_in; +@@ -106,6 +107,7 @@ archive_write_add_filter_gzip(struct archive *_a) + archive_set_error(&a->archive, ENOMEM, "Out of memory"); + return (ARCHIVE_FATAL); + } ++ data->os = 3; /* default Unix */ + f->data = data; + f->open = &archive_compressor_gzip_open; + f->options = &archive_compressor_gzip_options; +@@ -166,6 +168,30 @@ archive_compressor_gzip_options(struct archive_write_filter *f, const char *key, + return (ARCHIVE_OK); + } + ++ if (strcmp(key, "os") == 0) { ++ if (value == NULL) ++ return (ARCHIVE_WARN); ++ ++ if (strcmp(value, "FAT") == 0) data->os = 0; ++ else if (strcmp(value, "Amiga") == 0) data->os = 1; ++ else if (strcmp(value, "VMS") == 0 || strcmp(value, "OpenVMS") == 0) data->os = 2; ++ else if (strcmp(value, "Unix") == 0) data->os = 3; ++ else if (strcmp(value, "VM") == 0 || strcmp(value, "VM/CMS") == 0) data->os = 4; ++ else if (strcmp(value, "Atari TOS") == 0) data->os = 5; ++ else if (strcmp(value, "HPFS") == 0) data->os = 6; ++ else if (strcmp(value, "Macintosh") == 0) data->os = 7; ++ else if (strcmp(value, "Z-System") == 0) data->os = 8; ++ else if (strcmp(value, "CP/M") == 0) data->os = 9; ++ else if (strcmp(value, "TOPS-20") == 0) data->os = 10; ++ else if (strcmp(value, "NTFS") == 0) data->os = 11; ++ else if (strcmp(value, "QDOS") == 0) data->os = 12; ++ else if (strcmp(value, "Acorn RISCOS") == 0) data->os = 13; ++ else if (strcmp(value, "Unknown") == 0) data->os = 255; ++ else return (ARCHIVE_WARN); ++ ++ return (ARCHIVE_OK); ++ } ++ + /* Note: The "warn" return is just to inform the options + * supervisor that we didn't handle it. It will generate + * a suitable error if no one used this option. */ +@@ -226,7 +252,7 @@ archive_compressor_gzip_open(struct archive_write_filter *f) + data->compressed[8] = 4; + else + data->compressed[8] = 0; +- data->compressed[9] = 3; /* OS=Unix */ ++ data->compressed[9] = data->os; + data->stream.next_out += 10; + data->stream.avail_out -= 10; + diff --git a/src/StandaloneModuleGraph.zig b/src/StandaloneModuleGraph.zig index 00be32eda7..a58989280f 100644 --- a/src/StandaloneModuleGraph.zig +++ b/src/StandaloneModuleGraph.zig @@ -1039,7 +1039,7 @@ pub const StandaloneModuleGraph = struct { bun.JSAst.Expr.Data.Store.reset(); bun.JSAst.Stmt.Data.Store.reset(); } - var json = bun.JSON.ParseJSON(&json_src, &log, arena, false) catch + var json = bun.JSON.parse(&json_src, &log, arena, false) catch return error.InvalidSourceMap; const mappings_str = json.get("mappings") orelse diff --git a/src/bun.js/ConsoleObject.zig b/src/bun.js/ConsoleObject.zig index 58141dec83..b58cee7c92 100644 --- a/src/bun.js/ConsoleObject.zig +++ b/src/bun.js/ConsoleObject.zig @@ -1778,7 +1778,7 @@ pub const Formatter = struct { writer.print( comptime Output.prettyFmt("{s}: ", enable_ansi_colors), - .{JSPrinter.formatJSONString(key.slice())}, + .{bun.fmt.formatJSONString(key.slice())}, ); } } else if (Environment.isDebug and is_private_symbol) { diff --git a/src/bun.js/javascript.zig b/src/bun.js/javascript.zig index dab8a72585..b5d4c5bea6 100644 --- a/src/bun.js/javascript.zig +++ b/src/bun.js/javascript.zig @@ -2666,7 +2666,7 @@ pub const VirtualMachine = struct { "{s} resolving preload {}", .{ @errorName(e), - js_printer.formatJSONString(preload), + bun.fmt.formatJSONString(preload), }, ) catch unreachable; return e; @@ -2678,7 +2678,7 @@ pub const VirtualMachine = struct { this.allocator, "preload not found {}", .{ - js_printer.formatJSONString(preload), + bun.fmt.formatJSONString(preload), }, ) catch unreachable; return error.ModuleNotFound; diff --git a/src/bun.js/module_loader.zig b/src/bun.js/module_loader.zig index 1d9d176bc6..8e0d014928 100644 --- a/src/bun.js/module_loader.zig +++ b/src/bun.js/module_loader.zig @@ -184,9 +184,9 @@ fn dumpSourceStringFailiable(vm: *VirtualMachine, specifier: string, written: [] \\ "mappings": "{}" \\}} , .{ - js_printer.formatJSONStringUTF8(std.fs.path.basename(specifier)), - js_printer.formatJSONStringUTF8(specifier), - js_printer.formatJSONStringUTF8(source_file), + bun.fmt.formatJSONStringUTF8(std.fs.path.basename(specifier)), + bun.fmt.formatJSONStringUTF8(specifier), + bun.fmt.formatJSONStringUTF8(source_file), mappings.formatVLQs(), }); try bufw.flush(); diff --git a/src/bun.js/test/pretty_format.zig b/src/bun.js/test/pretty_format.zig index 3b2720b995..501709db4f 100644 --- a/src/bun.js/test/pretty_format.zig +++ b/src/bun.js/test/pretty_format.zig @@ -861,7 +861,7 @@ pub const JestPrettyFormat = struct { writer.print( comptime Output.prettyFmt("{s}: ", enable_ansi_colors), - .{JSPrinter.formatJSONString(key.slice())}, + .{bun.fmt.formatJSONString(key.slice())}, ); } } else { diff --git a/src/bun.zig b/src/bun.zig index 8f979c5196..2cda4881b5 100644 --- a/src/bun.zig +++ b/src/bun.zig @@ -96,6 +96,8 @@ pub const JSError = error{ JSError, }; +pub const detectCI = @import("./ci_info.zig").detectCI; + pub const C = @import("root").C; pub const sha = @import("./sha.zig"); pub const FeatureFlags = @import("feature_flags.zig"); @@ -299,6 +301,8 @@ pub fn platformIOVecToSlice(iovec: PlatformIOVec) []u8 { return iovec.base[0..iovec.len]; } +pub const libarchive = @import("./libarchive/libarchive.zig"); + pub const StringTypes = @import("string_types.zig"); pub const stringZ = StringTypes.stringZ; pub const string = StringTypes.string; @@ -2231,9 +2235,12 @@ pub const Stat = if (Environment.isWindows) windows.libuv.uv_stat_t else std.pos pub var argv: [][:0]const u8 = &[_][:0]const u8{}; pub fn initArgv(allocator: std.mem.Allocator) !void { - if (comptime !Environment.isWindows) { - argv = try std.process.argsAlloc(allocator); - } else { + if (comptime Environment.isPosix) { + argv = try allocator.alloc([:0]const u8, std.os.argv.len); + for (0..argv.len) |i| { + argv[i] = std.mem.sliceTo(std.os.argv[i], 0); + } + } else if (comptime Environment.isWindows) { // Zig's implementation of `std.process.argsAlloc()`on Windows platforms // is not reliable, specifically the way it splits the command line string. // @@ -2283,6 +2290,8 @@ pub fn initArgv(allocator: std.mem.Allocator) !void { } argv = out_argv; + } else { + argv = try std.process.argsAlloc(allocator); } } diff --git a/src/bundler.zig b/src/bundler.zig index a729e3d19d..dc9b55abe8 100644 --- a/src/bundler.zig +++ b/src/bundler.zig @@ -1455,9 +1455,9 @@ pub const Bundler = struct { // We allow importing tsconfig.*.json or jsconfig.*.json with comments // These files implicitly become JSONC files, which aligns with the behavior of text editors. if (source.path.isJSONCFile()) - json_parser.ParseTSConfig(&source, bundler.log, allocator, false) catch return null + json_parser.parseTSConfig(&source, bundler.log, allocator, false) catch return null else - json_parser.ParseJSON(&source, bundler.log, allocator, false) catch return null + json_parser.parse(&source, bundler.log, allocator, false) catch return null else if (kind == .toml) TOML.parse(&source, bundler.log, allocator) catch return null else diff --git a/src/bunfig.zig b/src/bunfig.zig index 6ad52eac40..f141edcd2f 100644 --- a/src/bunfig.zig +++ b/src/bunfig.zig @@ -785,7 +785,7 @@ pub const Bunfig = struct { ctx.log.addErrorFmt(&source, logger.Loc.Empty, allocator, "Failed to parse", .{}) catch unreachable; } return err; - } else JSONParser.ParseTSConfig(&source, ctx.log, allocator, true) catch |err| { + } else JSONParser.parseTSConfig(&source, ctx.log, allocator, true) catch |err| { if (ctx.log.errors + ctx.log.warnings == log_count) { ctx.log.addErrorFmt(&source, logger.Loc.Empty, allocator, "Failed to parse", .{}) catch unreachable; } diff --git a/src/cache.zig b/src/cache.zig index 6934a8aa6e..4ea8616cac 100644 --- a/src/cache.zig +++ b/src/cache.zig @@ -308,17 +308,17 @@ pub const Json = struct { // They are JSON files with comments and trailing commas. // Sometimes tooling expects this to work. if (source.path.isJSONCFile()) { - return try parse(cache, log, source, allocator, json_parser.ParseTSConfig, true); + return try parse(cache, log, source, allocator, json_parser.parseTSConfig, true); } - return try parse(cache, log, source, allocator, json_parser.ParseJSON, false); + return try parse(cache, log, source, allocator, json_parser.parse, false); } pub fn parsePackageJSON(cache: *@This(), log: *logger.Log, source: logger.Source, allocator: std.mem.Allocator, comptime force_utf8: bool) anyerror!?js_ast.Expr { - return try parse(cache, log, source, allocator, json_parser.ParseTSConfig, force_utf8); + return try parse(cache, log, source, allocator, json_parser.parseTSConfig, force_utf8); } pub fn parseTSConfig(cache: *@This(), log: *logger.Log, source: logger.Source, allocator: std.mem.Allocator) anyerror!?js_ast.Expr { - return try parse(cache, log, source, allocator, json_parser.ParseTSConfig, true); + return try parse(cache, log, source, allocator, json_parser.parseTSConfig, true); } }; diff --git a/src/ci_info.zig b/src/ci_info.zig new file mode 100644 index 0000000000..ed88efc291 --- /dev/null +++ b/src/ci_info.zig @@ -0,0 +1,421 @@ +// A modified port of ci-info@4.0.0 (https://github.com/watson/ci-info) +// Only gets the CI name, `isPR` is not implemented. + +// Names are changed to match what `npm publish` uses +// https://github.com/npm/cli/blob/63d6a732c3c0e9c19fd4d147eaa5cc27c29b168d/workspaces/config/lib/definitions/definitions.js#L2129 +// `name.toLowerCase().split(' ').join('-')` + +const std = @import("std"); +const bun = @import("root").bun; +const strings = bun.strings; + +var ci_name: ?[]const u8 = null; + +pub fn detectCI() ?[]const u8 { + const ci = ci_name orelse ci_name: { + CI.once.call(); + break :ci_name ci_name.?; + }; + + return if (ci.len == 0) null else ci; +} + +const CI = enum { + @"agola-ci", + appcircle, + appveyor, + @"aws-codebuild", + @"azure-pipelines", + bamboo, + @"bitbucket-pipelines", + bitrise, + buddy, + buildkite, + circleci, + @"cirrus-ci", + codefresh, + codemagic, + codeship, + drone, + dsari, + earthly, + @"expo-application-services", + gerrit, + @"gitea-actions", + @"github-actions", + @"gitlab-ci", + gocd, + @"google-cloud-build", + @"harness-ci", + // heroku, + hudson, + jenkins, + layerci, + @"magnum-ci", + @"netlify-ci", + nevercode, + prow, + releasehub, + render, + @"sail-ci", + screwdriver, + semaphore, + sourcehut, + @"strider-cd", + taskcluster, + teamcity, + @"travis-ci", + vela, + vercel, + @"visual-studio-app-center", + woodpecker, + @"xcode-cloud", + @"xcode-server", + + pub var once = std.once(struct { + pub fn once() void { + var name: []const u8 = ""; + defer ci_name = name; + + if (bun.getenvZ("CI")) |ci| { + if (strings.eqlComptime(ci, "false")) { + return; + } + } + + // Special case Heroku + if (bun.getenvZ("NODE")) |node| { + if (strings.containsComptime(node, "/app/.heroku/node/bin/node")) { + name = "heroku"; + return; + } + } + + ci: for (CI.array.values, 0..) |item, i| { + const any, const pairs = item; + + pairs: for (pairs) |pair| { + const key, const value = pair; + + if (bun.getenvZ(key)) |env| { + if (value.len == 0 or bun.strings.eqlLong(env, value, true)) { + if (!any) continue :pairs; + + name = @tagName(Array.Indexer.keyForIndex(i)); + return; + } + } + + if (!any) continue :ci; + } + + if (!any) { + name = @tagName(Array.Indexer.keyForIndex(i)); + return; + } + } + } + }.once); + + pub const Array = std.EnumArray(CI, struct { bool, []const [2][:0]const u8 }); + + pub const array = Array.init(.{ + .@"agola-ci" = .{ + false, + &.{ + .{ "AGOLA_GIT_REF", "" }, + }, + }, + .appcircle = .{ + false, + &.{ + .{ "AC_APPCIRCLE", "" }, + }, + }, + .appveyor = .{ + false, + &.{ + .{ "APPVEYOR", "" }, + }, + }, + .@"aws-codebuild" = .{ + false, + &.{ + .{ "CODEBUILD_BUILD_ARN", "" }, + }, + }, + .@"azure-pipelines" = .{ + false, + &.{ + .{ "TF_BUILD", "" }, + }, + }, + .bamboo = .{ + false, + &.{ + .{ "bamboo_planKey", "" }, + }, + }, + .@"bitbucket-pipelines" = .{ + false, + &.{ + .{ "BITBUCKET_COMMIT", "" }, + }, + }, + .bitrise = .{ + false, + &.{ + .{ "BITRISE_IO", "" }, + }, + }, + .buddy = .{ + false, + &.{ + .{ "BUDDY_WORKSPACE_ID", "" }, + }, + }, + .buildkite = .{ + false, + &.{ + .{ "BUILDKITE", "" }, + }, + }, + .circleci = .{ + false, + &.{ + .{ "CIRCLECI", "" }, + }, + }, + .@"cirrus-ci" = .{ + false, + &.{ + .{ "CIRRUS_CI", "" }, + }, + }, + .codefresh = .{ + false, + &.{ + .{ "CF_BUILD_ID", "" }, + }, + }, + .codemagic = .{ + false, + &.{ + .{ "CM_BUILD_ID", "" }, + }, + }, + .codeship = .{ + false, + &.{ + .{ "CI_NAME", "codeship" }, + }, + }, + .drone = .{ + false, + &.{ + .{ "DRONE", "" }, + }, + }, + .dsari = .{ + false, + &.{ + .{ "DSARI", "" }, + }, + }, + .earthly = .{ + false, + &.{ + .{ "EARTHLY_CI", "" }, + }, + }, + .@"expo-application-services" = .{ + false, + &.{ + .{ "EAS_BUILD", "" }, + }, + }, + .gerrit = .{ + false, + &.{ + .{ "GERRIT_PROJECT", "" }, + }, + }, + .@"gitea-actions" = .{ + false, + &.{ + .{ "GITEA_ACTIONS", "" }, + }, + }, + .@"github-actions" = .{ + false, + &.{ + .{ "GITHUB_ACTIONS", "" }, + }, + }, + .@"gitlab-ci" = .{ + false, + &.{ + .{ "GITLAB_CI", "" }, + }, + }, + .gocd = .{ + false, + &.{ + .{ "GO_PIPELINE_LABEL", "" }, + }, + }, + .@"google-cloud-build" = .{ + false, + &.{ + .{ "BUILDER_OUTPUT", "" }, + }, + }, + .@"harness-ci" = .{ + false, + &.{ + .{ "HARNESS_BUILD_ID", "" }, + }, + }, + .hudson = .{ + false, + &.{ + .{ "HUDSON_URL", "" }, + }, + }, + .jenkins = .{ + false, + &.{ + .{ "JENKINS_URL", "" }, + .{ "BUILD_ID", "" }, + }, + }, + .layerci = .{ + false, + &.{ + .{ "LAYERCI", "" }, + }, + }, + .@"magnum-ci" = .{ + false, + &.{ + .{ "MAGNUM", "" }, + }, + }, + .@"netlify-ci" = .{ + false, + &.{ + .{ "NETLIFY", "" }, + }, + }, + .nevercode = .{ + false, + &.{ + .{ "NEVERCODE", "" }, + }, + }, + .prow = .{ + false, + &.{ + .{ "PROW_JOB_ID", "" }, + }, + }, + .releasehub = .{ + false, + &.{ + .{ "RELEASE_BUILD_ID", "" }, + }, + }, + .render = .{ + false, + &.{ + .{ "RENDER", "" }, + }, + }, + .@"sail-ci" = .{ + false, + &.{ + .{ "SAILCI", "" }, + }, + }, + .screwdriver = .{ + false, + &.{ + .{ "SCREWDRIVER", "" }, + }, + }, + .semaphore = .{ + false, + &.{ + .{ "SEMAPHORE", "" }, + }, + }, + .sourcehut = .{ + false, + &.{ + .{ "CI_NAME", "sourcehut" }, + }, + }, + .@"strider-cd" = .{ + false, + &.{ + .{ "STRIDER", "" }, + }, + }, + .taskcluster = .{ + false, + &.{ + .{ "TASK_ID", "" }, + .{ "RUN_ID", "" }, + }, + }, + .teamcity = .{ + false, + &.{ + .{ "TEAMCITY_VERSION", "" }, + }, + }, + .@"travis-ci" = .{ + false, + &.{ + .{ "TRAVIS", "" }, + }, + }, + .vela = .{ + false, + &.{ + .{ "VELA", "" }, + }, + }, + .vercel = .{ + true, + &.{ + .{ "NOW_BUILDER", "" }, + .{ "VERCEL", "" }, + }, + }, + .@"visual-studio-app-center" = .{ + false, + &.{ + .{ "APPCENTER_BUILD_ID", "" }, + }, + }, + .woodpecker = .{ + false, + &.{ + .{ "CI", "woodpecker" }, + }, + }, + .@"xcode-cloud" = .{ + false, + &.{ + .{ "CI_XCODE_PROJECT", "" }, + }, + }, + .@"xcode-server" = .{ + false, + &.{ + .{ "XCS", "" }, + }, + }, + }); +}; diff --git a/src/cli.zig b/src/cli.zig index 6e30b2b95c..a3395f7ca1 100644 --- a/src/cli.zig +++ b/src/cli.zig @@ -44,6 +44,7 @@ const MacroMap = @import("./resolver/package_json.zig").MacroMap; const TestCommand = @import("./cli/test_command.zig").TestCommand; pub var start_time: i128 = undefined; const Bunfig = @import("./bunfig.zig").Bunfig; +const OOM = bun.OOM; pub const Cli = struct { pub const CompileTarget = @import("./compile_target.zig"); @@ -116,6 +117,9 @@ pub const ExecCommand = @import("./cli/exec_command.zig").ExecCommand; pub const PatchCommand = @import("./cli/patch_command.zig").PatchCommand; pub const PatchCommitCommand = @import("./cli/patch_commit_command.zig").PatchCommitCommand; pub const OutdatedCommand = @import("./cli/outdated_command.zig").OutdatedCommand; +pub const PublishCommand = @import("./cli/publish_command.zig").PublishCommand; +pub const PackCommand = @import("./cli/pack_command.zig").PackCommand; +pub const InitCommand = @import("./cli/init_command.zig").InitCommand; pub const Arguments = struct { pub fn loader_resolver(in: string) !Api.Loader { @@ -327,14 +331,25 @@ pub const Arguments = struct { return null; } - pub fn loadConfig(allocator: std.mem.Allocator, user_config_path_: ?string, ctx: Command.Context, comptime cmd: Command.Tag) !void { + pub fn loadConfig(allocator: std.mem.Allocator, user_config_path_: ?string, ctx: Command.Context, comptime cmd: Command.Tag) OOM!void { var config_buf: bun.PathBuffer = undefined; if (comptime cmd.readGlobalConfig()) { if (!ctx.has_loaded_global_config) { ctx.has_loaded_global_config = true; if (getHomeConfigPath(&config_buf)) |path| { - try loadConfigPath(allocator, true, path, ctx, comptime cmd); + loadConfigPath(allocator, true, path, ctx, comptime cmd) catch |err| { + if (ctx.log.hasAny()) { + switch (Output.enable_ansi_colors) { + inline else => |enable_ansi_colors| { + ctx.log.printForLogLevelWithEnableAnsiColors(Output.errorWriter(), enable_ansi_colors) catch {}; + }, + } + } + if (ctx.log.hasAny()) Output.printError("\n", .{}); + Output.err(err, "failed to load bunfig", .{}); + Global.crash(); + }; } } } @@ -382,7 +397,18 @@ pub const Arguments = struct { config_path = config_buf[0..config_path_.len :0]; } - try loadConfigPath(allocator, auto_loaded, config_path, ctx, comptime cmd); + loadConfigPath(allocator, auto_loaded, config_path, ctx, comptime cmd) catch |err| { + if (ctx.log.hasAny()) { + switch (Output.enable_ansi_colors) { + inline else => |enable_ansi_colors| { + ctx.log.printForLogLevelWithEnableAnsiColors(Output.errorWriter(), enable_ansi_colors) catch {}; + }, + } + } + if (ctx.log.hasAny()) Output.printError("\n", .{}); + Output.err(err, "failed to load bunfig", .{}); + Global.crash(); + }; } pub fn loadConfigWithCmdArgs( @@ -390,7 +416,7 @@ pub const Arguments = struct { allocator: std.mem.Allocator, args: clap.Args(clap.Help, cmd.params()), ctx: Command.Context, - ) !void { + ) OOM!void { return try loadConfig(allocator, args.option("--config"), ctx, comptime cmd); } @@ -1068,7 +1094,6 @@ const AutoCommand = struct { try HelpCommand.execWithReason(allocator, .invalid_command); } }; -const InitCommand = @import("./cli/init_command.zig").InitCommand; pub const HelpCommand = struct { pub fn exec(allocator: std.mem.Allocator) !void { @@ -1214,9 +1239,9 @@ pub const HelpCommand = struct { printWithReason(reason, false); if (reason == .invalid_command) { - std.process.exit(1); + Global.exit(1); } - std.process.exit(0); + Global.exit(0); } }; @@ -1393,7 +1418,7 @@ pub const Command = struct { // std.process.args allocates! const ArgsIterator = struct { - buf: [][:0]const u8 = undefined, + buf: [][:0]const u8, i: u32 = 0, pub fn next(this: *ArgsIterator) ?[]const u8 { @@ -1456,7 +1481,7 @@ pub const Command = struct { } const first_arg_name = next_arg; - const RootCommandMatcher = strings.ExactSizeMatcher(16); + const RootCommandMatcher = strings.ExactSizeMatcher(12); return switch (RootCommandMatcher.match(first_arg_name)) { RootCommandMatcher.case("init") => .InitCommand, @@ -1505,6 +1530,7 @@ pub const Command = struct { RootCommandMatcher.case("exec") => .ExecCommand, RootCommandMatcher.case("outdated") => .OutdatedCommand, + RootCommandMatcher.case("publish") => .PublishCommand, // These are reserved for future use by Bun, so that someone // doing `bun deploy` to run a script doesn't accidentally break @@ -1518,7 +1544,6 @@ pub const Command = struct { RootCommandMatcher.case("login") => .ReservedCommand, RootCommandMatcher.case("logout") => .ReservedCommand, RootCommandMatcher.case("whoami") => .ReservedCommand, - RootCommandMatcher.case("publish") => .ReservedCommand, RootCommandMatcher.case("prune") => .ReservedCommand, RootCommandMatcher.case("list") => .ReservedCommand, RootCommandMatcher.case("why") => .ReservedCommand, @@ -1648,6 +1673,13 @@ pub const Command = struct { try OutdatedCommand.exec(ctx); return; }, + .PublishCommand => { + if (comptime bun.fast_debug_build_mode and bun.fast_debug_build_cmd != .PublishCommand) unreachable; + const ctx = try Command.init(allocator, log, .PublishCommand); + + try PublishCommand.exec(ctx); + return; + }, .BunxCommand => { if (comptime bun.fast_debug_build_mode and bun.fast_debug_build_cmd != .BunxCommand) unreachable; const ctx = try Command.init(allocator, log, .BunxCommand); @@ -1690,6 +1722,20 @@ pub const Command = struct { if (comptime bun.fast_debug_build_mode and bun.fast_debug_build_cmd != .PackageManagerCommand) unreachable; const ctx = try Command.init(allocator, log, .PackageManagerCommand); + // const maybe_subcommand, const maybe_arg = PackageManagerCommand.which(command_index); + // if (maybe_subcommand) |subcommand| { + // return switch (subcommand) { + // inline else => |tag| try PackageManagerCommand.exec(ctx, tag), + // }; + // } + + // PackageManagerCommand.printHelp(); + + // if (maybe_arg) |arg| { + // Output.errGeneric("\"{s}\" unknown command", .{arg}); + // Global.crash(); + // } + try PackageManagerCommand.exec(ctx); return; }, @@ -2216,6 +2262,7 @@ pub const Command = struct { PatchCommand, PatchCommitCommand, OutdatedCommand, + PublishCommand, /// Used by crash reports. /// @@ -2248,6 +2295,7 @@ pub const Command = struct { .PatchCommand => 'x', .PatchCommitCommand => 'z', .OutdatedCommand => 'o', + .PublishCommand => 'k', }; } @@ -2471,9 +2519,10 @@ pub const Command = struct { , .{}); Output.flush(); }, - .OutdatedCommand => { + .OutdatedCommand, .PublishCommand => { Install.PackageManager.CommandLineArguments.printHelp(switch (cmd) { .OutdatedCommand => .outdated, + .PublishCommand => .publish, }); }, else => { @@ -2493,6 +2542,7 @@ pub const Command = struct { .PatchCommand, .PatchCommitCommand, .OutdatedCommand, + .PublishCommand, => true, else => false, }; @@ -2511,6 +2561,7 @@ pub const Command = struct { .PatchCommand, .PatchCommitCommand, .OutdatedCommand, + .PublishCommand, => true, else => false, }; @@ -2531,6 +2582,7 @@ pub const Command = struct { .RunCommand = true, .RunAsNodeCommand = true, .OutdatedCommand = true, + .PublishCommand = true, }); pub const always_loads_config: std.EnumArray(Tag, bool) = std.EnumArray(Tag, bool).initDefault(false, .{ @@ -2545,6 +2597,7 @@ pub const Command = struct { .PackageManagerCommand = true, .BunxCommand = true, .OutdatedCommand = true, + .PublishCommand = true, }); pub const uses_global_options: std.EnumArray(Tag, bool) = std.EnumArray(Tag, bool).initDefault(true, .{ @@ -2560,6 +2613,7 @@ pub const Command = struct { .UnlinkCommand = false, .BunxCommand = false, .OutdatedCommand = false, + .PublishCommand = false, }); }; }; diff --git a/src/cli/bunx_command.zig b/src/cli/bunx_command.zig index c0bb2fa7f7..21ce662274 100644 --- a/src/cli/bunx_command.zig +++ b/src/cli/bunx_command.zig @@ -82,7 +82,7 @@ pub const BunxCommand = struct { bun.JSAst.Expr.Data.Store.create(); bun.JSAst.Stmt.Data.Store.create(); - const expr = try bun.JSON.ParsePackageJSONUTF8(&source, bundler.log, bundler.allocator); + const expr = try bun.JSON.parsePackageJSONUTF8(&source, bundler.log, bundler.allocator); // choose the first package that fits if (expr.get("bin")) |bin_expr| { diff --git a/src/cli/create_command.zig b/src/cli/create_command.zig index 7b7bf50c7c..d45a8b0408 100644 --- a/src/cli/create_command.zig +++ b/src/cli/create_command.zig @@ -31,8 +31,8 @@ const fs = @import("../fs.zig"); const URL = @import("../url.zig").URL; const HTTP = bun.http; -const ParseJSON = @import("../json_parser.zig").ParseJSONUTF8; -const Archive = @import("../libarchive/libarchive.zig").Archive; +const JSON = bun.JSON; +const Archiver = bun.libarchive.Archiver; const Zlib = @import("../zlib.zig"); const JSPrinter = bun.js_printer; const DotEnv = @import("../env_loader.zig"); @@ -377,19 +377,19 @@ pub const CreateCommand = struct { progress.refresh(); - var pluckers: [1]Archive.Plucker = if (!create_options.skip_package_json) - [1]Archive.Plucker{try Archive.Plucker.init(comptime strings.literal(bun.OSPathChar, "package.json"), 2048, ctx.allocator)} + var pluckers: [1]Archiver.Plucker = if (!create_options.skip_package_json) + [1]Archiver.Plucker{try Archiver.Plucker.init(comptime strings.literal(bun.OSPathChar, "package.json"), 2048, ctx.allocator)} else - [1]Archive.Plucker{undefined}; + [1]Archiver.Plucker{undefined}; - var archive_context = Archive.Context{ + var archive_context = Archiver.Context{ .pluckers = pluckers[0..@as(usize, @intCast(@intFromBool(!create_options.skip_package_json)))], .all_files = undefined, .overwrite_list = bun.StringArrayHashMap(void).init(ctx.allocator), }; if (!create_options.overwrite) { - try Archive.getOverwritingFileList( + try Archiver.getOverwritingFileList( tarball_buf_list.items, destination, &archive_context, @@ -427,7 +427,7 @@ pub const CreateCommand = struct { } } - _ = try Archive.extractToDisk( + _ = try Archiver.extractToDisk( tarball_buf_list.items, destination, &archive_context, @@ -701,7 +701,7 @@ pub const CreateCommand = struct { var source = logger.Source.initPathString("package.json", package_json_contents.list.items); - var package_json_expr = ParseJSON(&source, ctx.log, ctx.allocator) catch { + var package_json_expr = JSON.parseUTF8(&source, ctx.log, ctx.allocator) catch { package_json_file = null; break :process_package_json; }; @@ -1983,7 +1983,7 @@ pub const Example = struct { async_http.client.progress_node = progress; async_http.client.flags.reject_unauthorized = env_loader.getTLSRejectUnauthorized(); - const response = try async_http.sendSync(true); + const response = try async_http.sendSync(); switch (response.status_code) { 404 => return error.GitHubRepositoryNotFound, @@ -2060,7 +2060,7 @@ pub const Example = struct { async_http.client.progress_node = progress; async_http.client.flags.reject_unauthorized = env_loader.getTLSRejectUnauthorized(); - var response = try async_http.sendSync(true); + var response = try async_http.sendSync(); switch (response.status_code) { 404 => return error.ExampleNotFound, @@ -2075,7 +2075,7 @@ pub const Example = struct { refresher.refresh(); initializeStore(); var source = logger.Source.initPathString("package.json", mutable.list.items); - var expr = ParseJSON(&source, ctx.log, ctx.allocator) catch |err| { + var expr = JSON.parseUTF8(&source, ctx.log, ctx.allocator) catch |err| { progress.end(); refresher.refresh(); @@ -2151,7 +2151,7 @@ pub const Example = struct { refresher.maybeRefresh(); - response = try async_http.sendSync(true); + response = try async_http.sendSync(); refresher.maybeRefresh(); @@ -2194,7 +2194,7 @@ pub const Example = struct { async_http.client.progress_node = progress_node; } - const response = async_http.sendSync(true) catch |err| { + const response = async_http.sendSync() catch |err| { switch (err) { error.WouldBlock => { Output.prettyErrorln("Request timed out while trying to fetch examples list. Please try again", .{}); @@ -2214,7 +2214,7 @@ pub const Example = struct { initializeStore(); var source = logger.Source.initPathString("examples.json", mutable.list.items); - const examples_object = ParseJSON(&source, ctx.log, ctx.allocator) catch |err| { + const examples_object = JSON.parseUTF8(&source, ctx.log, ctx.allocator) catch |err| { if (ctx.log.errors > 0) { if (Output.enable_ansi_colors) { try ctx.log.printForLogLevelWithEnableAnsiColors(Output.errorWriter(), true); diff --git a/src/cli/filter_arg.zig b/src/cli/filter_arg.zig index 297233df3a..99031058d5 100644 --- a/src/cli/filter_arg.zig +++ b/src/cli/filter_arg.zig @@ -5,7 +5,7 @@ const string = bun.string; const Output = bun.Output; const Global = bun.Global; const strings = bun.strings; -const json_parser = bun.JSON; +const JSON = bun.JSON; const Glob = @import("../glob.zig"); const Package = @import("../install/lockfile.zig").Package; @@ -65,7 +65,7 @@ pub fn getCandidatePackagePatterns(allocator: std.mem.Allocator, log: *bun.logge }; defer allocator.free(json_source.contents); - const json = try json_parser.ParsePackageJSONUTF8(&json_source, log, allocator); + const json = try JSON.parsePackageJSONUTF8(&json_source, log, allocator); const prop = json.asProperty("workspaces") orelse continue; diff --git a/src/cli/init_command.zig b/src/cli/init_command.zig index 35d22af2fe..16d4d407a7 100644 --- a/src/cli/init_command.zig +++ b/src/cli/init_command.zig @@ -12,7 +12,7 @@ const std = @import("std"); const open = @import("../open.zig"); const CLI = @import("../cli.zig"); const Fs = @import("../fs.zig"); -const ParseJSON = @import("../json_parser.zig").ParsePackageJSONUTF8; +const JSON = bun.JSON; const js_parser = bun.js_parser; const js_ast = bun.JSAst; const linker = @import("../linker.zig"); @@ -26,11 +26,10 @@ fn exists(path: anytype) bool { return bun.sys.exists(path); } pub const InitCommand = struct { - fn prompt( + pub fn prompt( alloc: std.mem.Allocator, comptime label: string, default: []const u8, - _: bool, ) ![]const u8 { Output.pretty(label, .{}); if (default.len > 0) { @@ -171,7 +170,7 @@ pub const InitCommand = struct { process_package_json: { var source = logger.Source.initPathString("package.json", package_json_contents.list.items); var log = logger.Log.init(alloc); - var package_json_expr = ParseJSON(&source, &log, alloc) catch { + var package_json_expr = JSON.parsePackageJSONUTF8(&source, &log, alloc) catch { package_json_file = null; break :process_package_json; }; @@ -248,7 +247,6 @@ pub const InitCommand = struct { alloc, "package name ", fields.name, - Output.enable_ansi_colors_stdout, ) catch |err| { if (err == error.EndOfStream) return; return err; @@ -260,7 +258,6 @@ pub const InitCommand = struct { alloc, "entry point ", fields.entry_point, - Output.enable_ansi_colors_stdout, ) catch |err| { if (err == error.EndOfStream) return; return err; @@ -439,7 +436,7 @@ pub const InitCommand = struct { " \"'", fields.entry_point, )) { - Output.prettyln(" bun run {any}", .{JSPrinter.formatJSONString(fields.entry_point)}); + Output.prettyln(" bun run {any}", .{bun.fmt.formatJSONString(fields.entry_point)}); } else { Output.prettyln(" bun run {s}", .{fields.entry_point}); } diff --git a/src/cli/pack_command.zig b/src/cli/pack_command.zig index c36df036b6..4d4bc36b40 100644 --- a/src/cli/pack_command.zig +++ b/src/cli/pack_command.zig @@ -34,6 +34,8 @@ const BoringSSL = bun.BoringSSL; const sha = bun.sha; const LogLevel = PackageManager.Options.LogLevel; const FileDescriptor = bun.FileDescriptor; +const Publish = bun.CLI.PublishCommand; +const Dependency = Install.Dependency; pub const PackCommand = struct { pub const Context = struct { @@ -49,49 +51,30 @@ pub const PackCommand = struct { bundled_deps: std.ArrayListUnmanaged(BundledDep) = .{}, - stats: struct { + stats: Stats = .{}, + + const Stats = struct { unpacked_size: usize = 0, total_files: usize = 0, ignored_files: usize = 0, ignored_directories: usize = 0, packed_size: usize = 0, bundled_deps: usize = 0, - } = .{}, - - pub const BundledDep = struct { - name: string, - was_packed: bool = false, - from_root_package_json: bool, }; - const IntegrityFormatter = struct { - bytes: [sha.SHA512.digest]u8, - - pub fn format(this: IntegrityFormatter, comptime _: string, _: std.fmt.FormatOptions, writer: anytype) !void { - var buf: [std.base64.standard.Encoder.calcSize(sha.SHA512.digest)]u8 = undefined; - const count = bun.simdutf.base64.encode(this.bytes[0..sha.SHA512.digest], &buf, false); - - const encoded = buf[0..count]; - - try writer.print("sha512-{s}[...]{s}", .{ encoded[0..13], encoded[encoded.len - 15 ..] }); - } - }; - - fn fmtIntegrity(bytes: [sha.SHA512.digest]u8) IntegrityFormatter { - return .{ - .bytes = bytes, - }; - } - - pub fn printSummary(this: *const Context, sha1_digest: ?[sha.SHA1.digest]u8, sha512_digest: ?[sha.SHA512.digest]u8, comptime log_level: LogLevel) void { - if (comptime log_level != .silent) { - const stats = this.stats; + pub fn printSummary( + stats: Stats, + maybe_shasum: ?[sha.SHA1.digest]u8, + maybe_integrity: ?[sha.SHA512.digest]u8, + log_level: LogLevel, + ) void { + if (log_level != .silent) { Output.prettyln("\nTotal files: {d}", .{stats.total_files}); - if (sha1_digest) |sha1| { - Output.prettyln("Shasum: {s}", .{bun.fmt.bytesToHex(sha1, .lower)}); + if (maybe_shasum) |shasum| { + Output.prettyln("Shasum: {s}", .{bun.fmt.bytesToHex(shasum, .lower)}); } - if (sha512_digest) |sha512| { - Output.prettyln("Integrity: {}", .{fmtIntegrity(sha512)}); + if (maybe_integrity) |integrity| { + Output.prettyln("Integrity: {}", .{bun.fmt.integrity(integrity, .short)}); } Output.prettyln("Unpacked size: {}", .{ bun.fmt.size(stats.unpacked_size, .{ .space_between_number_and_unit = false }), @@ -108,6 +91,12 @@ pub const PackCommand = struct { } }; + pub const BundledDep = struct { + name: string, + was_packed: bool = false, + from_root_package_json: bool, + }; + pub fn execWithManager(ctx: Command.Context, manager: *PackageManager) !void { Output.prettyln("bun pack v" ++ Global.package_json_version_with_sha ++ "", .{}); Output.flush(); @@ -146,7 +135,7 @@ pub const PackCommand = struct { }), } - if (ctx.log.hasErrors()) { + if (manager.log.hasErrors()) { switch (Output.enable_ansi_colors) { inline else => |enable_ansi_colors| try manager.log.printForLogLevelWithEnableAnsiColors( Output.errorWriter(), @@ -173,7 +162,7 @@ pub const PackCommand = struct { // } // just pack the current workspace - pack(&pack_ctx, manager.original_package_json_path, log_level) catch |err| { + pack(&pack_ctx, manager.original_package_json_path, log_level, false) catch |err| { switch (err) { error.OutOfMemory => bun.outOfMemory(), error.MissingPackageName, error.MissingPackageVersion => { @@ -219,13 +208,19 @@ pub const PackCommand = struct { return execWithManager(ctx, manager); } - const PackError = OOM || error{ - MissingPackageName, - InvalidPackageName, - MissingPackageVersion, - InvalidPackageVersion, - MissingPackageJSON, - }; + pub fn PackError(comptime for_publish: bool) type { + return OOM || error{ + MissingPackageName, + InvalidPackageName, + MissingPackageVersion, + InvalidPackageVersion, + MissingPackageJSON, + } || + if (for_publish) error{ + RestrictedUnscopedPackage, + PrivatePackage, + } else error{}; + } const package_prefix = "package/"; @@ -280,25 +275,25 @@ pub const PackCommand = struct { }; fn iterateIncludedProjectTree( - ctx: *Context, + allocator: std.mem.Allocator, includes: []const Pattern, root_dir: std.fs.Dir, comptime log_level: LogLevel, ) OOM!PackQueue { - var pack_queue = PackQueue.init(ctx.allocator, {}); + var pack_queue = PackQueue.init(allocator, {}); var ignores: std.ArrayListUnmanaged(IgnorePatterns) = .{}; - defer ignores.deinit(ctx.allocator); + defer ignores.deinit(allocator); var dirs: std.ArrayListUnmanaged(DirInfo) = .{}; - defer dirs.deinit(ctx.allocator); + defer dirs.deinit(allocator); - try dirs.append(ctx.allocator, .{ root_dir, "", 1 }); + try dirs.append(allocator, .{ root_dir, "", 1 }); var included_dirs: std.ArrayListUnmanaged(DirInfo) = .{}; - defer included_dirs.deinit(ctx.allocator); + defer included_dirs.deinit(allocator); - var subpath_dedupe = bun.StringHashMap(void).init(ctx.allocator); + var subpath_dedupe = bun.StringHashMap(void).init(allocator); defer subpath_dedupe.deinit(); // first find included dirs and files @@ -315,7 +310,7 @@ pub const PackCommand = struct { if (entry.kind != .file and entry.kind != .directory) continue; const entry_name = entry.name.slice(); - const entry_subpath = try entrySubpath(ctx, dir_subpath, entry_name); + const entry_subpath = try entrySubpath(allocator, dir_subpath, entry_name); var included = false; @@ -358,7 +353,7 @@ pub const PackCommand = struct { if (!included) { if (entry.kind == .directory) { const subdir = openSubdir(dir, entry_name, entry_subpath); - try dirs.append(ctx.allocator, .{ subdir, entry_subpath, dir_depth + 1 }); + try dirs.append(allocator, .{ subdir, entry_subpath, dir_depth + 1 }); } continue; @@ -367,7 +362,7 @@ pub const PackCommand = struct { switch (entry.kind) { .directory => { const subdir = openSubdir(dir, entry_name, entry_subpath); - try included_dirs.append(ctx.allocator, .{ subdir, entry_subpath, dir_depth + 1 }); + try included_dirs.append(allocator, .{ subdir, entry_subpath, dir_depth + 1 }); }, .file => { const dedupe_entry = try subpath_dedupe.getOrPut(entry_subpath); @@ -383,7 +378,7 @@ pub const PackCommand = struct { // for each included dir, traverse it's entries, exclude any with `negate_no_match`. for (included_dirs.items) |included_dir_info| { - try addEntireTree(ctx, included_dir_info, &pack_queue, &subpath_dedupe, log_level); + try addEntireTree(allocator, included_dir_info, &pack_queue, &subpath_dedupe, log_level); } return pack_queue; @@ -391,19 +386,19 @@ pub const PackCommand = struct { /// Adds all files in a directory tree to `pack_list` (default ignores still apply) fn addEntireTree( - ctx: *Context, + allocator: std.mem.Allocator, root_dir_info: DirInfo, pack_queue: *PackQueue, maybe_dedupe: ?*bun.StringHashMap(void), comptime log_level: LogLevel, ) OOM!void { var dirs: std.ArrayListUnmanaged(DirInfo) = .{}; - defer dirs.deinit(ctx.allocator); + defer dirs.deinit(allocator); - try dirs.append(ctx.allocator, root_dir_info); + try dirs.append(allocator, root_dir_info); var ignores: std.ArrayListUnmanaged(IgnorePatterns) = .{}; - defer ignores.deinit(ctx.allocator); + defer ignores.deinit(allocator); while (dirs.popOrNull()) |dir_info| { var dir, const dir_subpath, const dir_depth = dir_info; @@ -412,12 +407,12 @@ pub const PackCommand = struct { while (ignores.getLastOrNull()) |last| { if (last.depth < dir_depth) break; - last.deinit(ctx.allocator); + last.deinit(allocator); ignores.items.len -= 1; } - if (try IgnorePatterns.readFromDisk(ctx, dir, dir_depth)) |patterns| { - try ignores.append(ctx.allocator, patterns); + if (try IgnorePatterns.readFromDisk(allocator, dir, dir_depth)) |patterns| { + try ignores.append(allocator, patterns); } if (comptime Environment.isDebug) { @@ -434,7 +429,7 @@ pub const PackCommand = struct { if (entry.kind != .file and entry.kind != .directory) continue; const entry_name = entry.name.slice(); - const entry_subpath = try entrySubpath(ctx, dir_subpath, entry_name); + const entry_subpath = try entrySubpath(allocator, dir_subpath, entry_name); if (dir_depth == root_dir_info[2]) { if (entry.kind == .directory and strings.eqlComptime(entry_name, "node_modules")) continue; @@ -465,7 +460,7 @@ pub const PackCommand = struct { .directory => { const subdir = openSubdir(dir, entry_name, entry_subpath); - try dirs.append(ctx.allocator, .{ + try dirs.append(allocator, .{ subdir, entry_subpath, dir_depth + 1, @@ -492,11 +487,11 @@ pub const PackCommand = struct { } fn entrySubpath( - ctx: *Context, + allocator: std.mem.Allocator, dir_subpath: string, entry_name: string, ) OOM!stringZ { - return std.fmt.allocPrintZ(ctx.allocator, "{s}{s}{s}", .{ + return std.fmt.allocPrintZ(allocator, "{s}{s}{s}", .{ dir_subpath, if (dir_subpath.len == 0) "" else "/", entry_name, @@ -552,7 +547,7 @@ pub const PackCommand = struct { bun.assertWithLocation(dep.from_root_package_json, @src()); if (!strings.eqlLong(entry_name, dep.name, true)) continue; - const entry_subpath = try entrySubpath(ctx, "node_modules", entry_name); + const entry_subpath = try entrySubpath(ctx.allocator, "node_modules", entry_name); const dedupe_entry = try dedupe.getOrPut(entry_subpath); if (dedupe_entry.found_existing) { @@ -628,7 +623,7 @@ pub const PackCommand = struct { if (entry.kind != .file and entry.kind != .directory) continue; const entry_name = entry.name.slice(); - const entry_subpath = try entrySubpath(ctx, dir_subpath, entry_name); + const entry_subpath = try entrySubpath(ctx.allocator, dir_subpath, entry_name); if (dir_depth == bundled_dir_info[2]) root_depth: { if (strings.eqlComptime(entry_name, "package.json")) { @@ -639,7 +634,7 @@ pub const PackCommand = struct { Global.crash(); }; - const json = JSON.ParsePackageJSONUTF8(&source, ctx.manager.log, ctx.allocator) catch + const json = JSON.parsePackageJSONUTF8(&source, ctx.manager.log, ctx.allocator) catch break :root_depth; // for each dependency in `dependencies` find the closest node_modules folder @@ -738,21 +733,21 @@ pub const PackCommand = struct { /// Returns a list of files to pack and another list of files from bundled dependencies fn iterateProjectTree( - ctx: *Context, + allocator: std.mem.Allocator, root_dir: std.fs.Dir, comptime log_level: LogLevel, ) OOM!PackQueue { - var pack_queue = PackQueue.init(ctx.allocator, {}); + var pack_queue = PackQueue.init(allocator, {}); var ignores: std.ArrayListUnmanaged(IgnorePatterns) = .{}; - defer ignores.deinit(ctx.allocator); + defer ignores.deinit(allocator); // Stacks and depth-first traversal. Doing so means we can push and pop from // ignore patterns without needing to clone the entire list for future use. var dirs: std.ArrayListUnmanaged(DirInfo) = .{}; - defer dirs.deinit(ctx.allocator); + defer dirs.deinit(allocator); - try dirs.append(ctx.allocator, .{ root_dir, "", 1 }); + try dirs.append(allocator, .{ root_dir, "", 1 }); while (dirs.popOrNull()) |dir_info| { var dir, const dir_subpath, const dir_depth = dir_info; @@ -766,12 +761,12 @@ pub const PackCommand = struct { if (last.depth < dir_depth) break; // pop patterns from files greater than or equal to the current depth. - last.deinit(ctx.allocator); + last.deinit(allocator); ignores.items.len -= 1; } - if (try IgnorePatterns.readFromDisk(ctx, dir, dir_depth)) |patterns| { - try ignores.append(ctx.allocator, patterns); + if (try IgnorePatterns.readFromDisk(allocator, dir, dir_depth)) |patterns| { + try ignores.append(allocator, patterns); } if (comptime Environment.isDebug) { @@ -788,7 +783,7 @@ pub const PackCommand = struct { if (entry.kind != .file and entry.kind != .directory) continue; const entry_name = entry.name.slice(); - const entry_subpath = try entrySubpath(ctx, dir_subpath, entry_name); + const entry_subpath = try entrySubpath(allocator, dir_subpath, entry_name); if (dir_depth == 1) { // Special case root package.json. It is always included @@ -823,7 +818,7 @@ pub const PackCommand = struct { .directory => { const subdir = openSubdir(dir, entry_name, entry_subpath); - try dirs.append(ctx.allocator, .{ + try dirs.append(allocator, .{ subdir, entry_subpath, dir_depth + 1, @@ -838,11 +833,11 @@ pub const PackCommand = struct { } fn getBundledDeps( - ctx: *Context, + allocator: std.mem.Allocator, json: Expr, comptime field: string, - ) OOM!?std.ArrayListUnmanaged(Context.BundledDep) { - var deps: std.ArrayListUnmanaged(Context.BundledDep) = .{}; + ) OOM!?std.ArrayListUnmanaged(BundledDep) { + var deps: std.ArrayListUnmanaged(BundledDep) = .{}; const bundled_deps = json.get(field) orelse return null; invalid_field: { @@ -851,8 +846,8 @@ pub const PackCommand = struct { else => break :invalid_field, }; while (iter.next()) |bundled_dep_item| { - const bundled_dep = bundled_dep_item.asStringCloned(ctx.allocator) orelse break :invalid_field; - try deps.append(ctx.allocator, .{ + const bundled_dep = try bundled_dep_item.asStringCloned(allocator) orelse break :invalid_field; + try deps.append(allocator, .{ .name = bundled_dep, .from_root_package_json = true, }); @@ -876,7 +871,7 @@ pub const PackCommand = struct { }; fn getPackageBins( - ctx: *Context, + allocator: std.mem.Allocator, json: Expr, ) OOM![]const BinInfo { var bins: std.ArrayListUnmanaged(BinInfo) = .{}; @@ -884,10 +879,10 @@ pub const PackCommand = struct { var path_buf: PathBuffer = undefined; if (json.asProperty("bin")) |bin| { - if (bin.expr.asString(ctx.allocator)) |bin_str| { + if (bin.expr.asString(allocator)) |bin_str| { const normalized = bun.path.normalizeBuf(bin_str, &path_buf, .posix); - try bins.append(ctx.allocator, .{ - .path = try ctx.allocator.dupe(u8, normalized), + try bins.append(allocator, .{ + .path = try allocator.dupe(u8, normalized), .type = .file, }); return bins.items; @@ -899,10 +894,10 @@ pub const PackCommand = struct { for (bin_obj.properties.slice()) |bin_prop| { if (bin_prop.value) |bin_prop_value| { - if (bin_prop_value.asString(ctx.allocator)) |bin_str| { + if (bin_prop_value.asString(allocator)) |bin_str| { const normalized = bun.path.normalizeBuf(bin_str, &path_buf, .posix); - try bins.append(ctx.allocator, .{ - .path = try ctx.allocator.dupe(u8, normalized), + try bins.append(allocator, .{ + .path = try allocator.dupe(u8, normalized), .type = .file, }); } @@ -919,10 +914,10 @@ pub const PackCommand = struct { switch (directories.expr.data) { .e_object => |directories_obj| { if (directories_obj.asProperty("bin")) |bin| { - if (bin.expr.asString(ctx.allocator)) |bin_str| { + if (bin.expr.asString(allocator)) |bin_str| { const normalized = bun.path.normalizeBuf(bin_str, &path_buf, .posix); - try bins.append(ctx.allocator, .{ - .path = try ctx.allocator.dupe(u8, normalized), + try bins.append(allocator, .{ + .path = try allocator.dupe(u8, normalized), .type = .dir, }); } @@ -1079,11 +1074,12 @@ pub const PackCommand = struct { const BufferedFileReader = std.io.BufferedReader(1024 * 512, File.Reader); - fn pack( + pub fn pack( ctx: *Context, abs_package_json_path: stringZ, comptime log_level: LogLevel, - ) PackError!void { + comptime for_publish: bool, + ) PackError(for_publish)!if (for_publish) Publish.Context(true) else void { const manager = ctx.manager; const json = switch (manager.workspace_package_json_cache.getWithPath(manager.allocator, manager.log, abs_package_json_path, .{ .guess_indentation = true, @@ -1104,16 +1100,54 @@ pub const PackCommand = struct { .entry => |entry| entry, }; + if (comptime for_publish) { + if (json.root.get("publishConfig")) |config| { + if (manager.options.publish_config.tag.len == 0) { + if (try config.getStringCloned(ctx.allocator, "tag")) |tag| { + manager.options.publish_config.tag = tag; + } + } + if (manager.options.publish_config.access == null) { + if (try config.getString(ctx.allocator, "access")) |access| { + manager.options.publish_config.access = PackageManager.Options.Access.fromStr(access[0]) orelse { + Output.errGeneric("invalid `access` value: '{s}'", .{access[0]}); + Global.crash(); + }; + } + } + } + + // maybe otp + } + const package_name_expr: Expr = json.root.get("name") orelse return error.MissingPackageName; - const package_name = package_name_expr.asStringCloned(ctx.allocator) orelse return error.InvalidPackageName; - defer ctx.allocator.free(package_name); + const package_name = try package_name_expr.asStringCloned(ctx.allocator) orelse return error.InvalidPackageName; + if (comptime for_publish) { + const is_scoped = try Dependency.isScopedPackageName(package_name); + if (manager.options.publish_config.access) |access| { + if (access == .restricted and !is_scoped) { + return error.RestrictedUnscopedPackage; + } + } + } + defer if (comptime !for_publish) ctx.allocator.free(package_name); if (package_name.len == 0) return error.InvalidPackageName; const package_version_expr: Expr = json.root.get("version") orelse return error.MissingPackageVersion; - const package_version = package_version_expr.asStringCloned(ctx.allocator) orelse return error.InvalidPackageVersion; - defer ctx.allocator.free(package_version); + const package_version = try package_version_expr.asStringCloned(ctx.allocator) orelse return error.InvalidPackageVersion; + defer if (comptime !for_publish) ctx.allocator.free(package_version); if (package_version.len == 0) return error.InvalidPackageVersion; + if (comptime for_publish) { + if (json.root.get("private")) |private| { + if (private.asBool()) |is_private| { + if (is_private) { + return error.PrivatePackage; + } + } + } + } + var this_bundler: bun.bundler.Bundler = undefined; _ = RunCommand.configureEnvForRun( @@ -1134,12 +1168,38 @@ pub const PackCommand = struct { const abs_workspace_path: string = strings.withoutTrailingSlash(strings.withoutSuffixComptime(abs_package_json_path, "package.json")); - const postpack_script: ?string = postpack_script: { + const postpack_script, const publish_script: ?[]const u8, const postpublish_script: ?[]const u8 = post_scripts: { // --ignore-scripts - if (!manager.options.do.run_scripts) break :postpack_script null; + if (!manager.options.do.run_scripts) break :post_scripts .{ null, null, null }; - const scripts = json.root.asProperty("scripts") orelse break :postpack_script null; - if (scripts.expr.data != .e_object) break :postpack_script null; + const scripts = json.root.asProperty("scripts") orelse break :post_scripts .{ null, null, null }; + if (scripts.expr.data != .e_object) break :post_scripts .{ null, null, null }; + + if (comptime for_publish) { + if (scripts.expr.get("prepublishOnly")) |prepublish_only_script_str| { + if (prepublish_only_script_str.asString(ctx.allocator)) |prepublish_only| { + _ = RunCommand.runPackageScriptForeground( + ctx.command_ctx, + ctx.allocator, + prepublish_only, + "prepublishOnly", + abs_workspace_path, + this_bundler.env, + &.{}, + manager.options.log_level == .silent, + ctx.command_ctx.debug.use_system_shell, + ) catch |err| { + switch (err) { + error.MissingShell => { + Output.errGeneric("failed to find shell executable to run prepublishOnly script", .{}); + Global.crash(); + }, + error.OutOfMemory => |oom| return oom, + } + }; + } + } + } if (scripts.expr.get("prepack")) |prepack_script| { if (prepack_script.asString(ctx.allocator)) |prepack_script_str| { @@ -1189,13 +1249,25 @@ pub const PackCommand = struct { } } + var postpack_script: ?[]const u8 = null; if (scripts.expr.get("postpack")) |postpack| { - if (postpack.asString(ctx.allocator)) |postpack_str| { - break :postpack_script postpack_str; - } + postpack_script = postpack.asString(ctx.allocator); } - break :postpack_script null; + if (comptime for_publish) { + var publish_script: ?[]const u8 = null; + var postpublish_script: ?[]const u8 = null; + if (scripts.expr.get("publish")) |publish| { + publish_script = try publish.asStringCloned(ctx.allocator); + } + if (scripts.expr.get("postpublish")) |postpublish| { + postpublish_script = try postpublish.asStringCloned(ctx.allocator); + } + + break :post_scripts .{ postpack_script, publish_script, postpublish_script }; + } + + break :post_scripts .{ postpack_script, null, null }; }; var root_dir = root_dir: { @@ -1211,8 +1283,8 @@ pub const PackCommand = struct { }; defer root_dir.close(); - ctx.bundled_deps = try getBundledDeps(ctx, json.root, "bundledDependencies") orelse - try getBundledDeps(ctx, json.root, "bundleDependencies") orelse + ctx.bundled_deps = try getBundledDeps(ctx.allocator, json.root, "bundledDependencies") orelse + try getBundledDeps(ctx.allocator, json.root, "bundleDependencies") orelse .{}; var pack_queue = pack_queue: { @@ -1225,7 +1297,7 @@ pub const PackCommand = struct { var files_array = _files_array; while (files_array.next()) |files_entry| { if (files_entry.asString(ctx.allocator)) |file_entry_str| { - const parsed = try Pattern.fromUTF8(ctx, file_entry_str) orelse continue; + const parsed = try Pattern.fromUTF8(ctx.allocator, file_entry_str) orelse continue; try includes.append(ctx.allocator, parsed); continue; } @@ -1234,7 +1306,7 @@ pub const PackCommand = struct { } break :pack_queue try iterateIncludedProjectTree( - ctx, + ctx.allocator, includes.items, root_dir, log_level, @@ -1248,7 +1320,7 @@ pub const PackCommand = struct { // pack from project root break :pack_queue try iterateProjectTree( - ctx, + ctx.allocator, root_dir, log_level, ); @@ -1266,15 +1338,23 @@ pub const PackCommand = struct { printArchivedFilesAndPackages(ctx, root_dir, true, &pack_queue, 0); - if (manager.options.pack_destination.len == 0) { - Output.pretty("\n{}\n", .{fmtTarballFilename(package_name, package_version)}); - } else { - var dest_buf: PathBuffer = undefined; - const abs_tarball_dest, _ = absTarballDestination(ctx, abs_workspace_path, package_name, package_version, &dest_buf); - Output.pretty("\n{s}\n", .{abs_tarball_dest}); + if (comptime !for_publish) { + if (manager.options.pack_destination.len == 0) { + Output.pretty("\n{}\n", .{fmtTarballFilename(package_name, package_version)}); + } else { + var dest_buf: PathBuffer = undefined; + const abs_tarball_dest, _ = absTarballDestination( + ctx.manager.options.pack_destination, + abs_workspace_path, + package_name, + package_version, + &dest_buf, + ); + Output.pretty("\n{s}\n", .{abs_tarball_dest}); + } } - ctx.printSummary(null, null, log_level); + Context.printSummary(ctx.stats, null, null, log_level); if (postpack_script) |postpack_script_str| { _ = RunCommand.runPackageScriptForeground( @@ -1297,10 +1377,37 @@ pub const PackCommand = struct { } }; } + + if (comptime for_publish) { + var dest_buf: bun.PathBuffer = undefined; + const abs_tarball_dest, _ = absTarballDestination( + ctx.manager.options.pack_destination, + abs_workspace_path, + package_name, + package_version, + &dest_buf, + ); + return .{ + .allocator = ctx.allocator, + .command_ctx = ctx.command_ctx, + .manager = manager, + .package_name = package_name, + .package_version = package_version, + .abs_tarball_path = try ctx.allocator.dupeZ(u8, abs_tarball_dest), + .tarball_bytes = "", + .shasum = undefined, + .integrity = undefined, + .uses_workspaces = false, + .publish_script = publish_script, + .postpublish_script = postpublish_script, + .script_env = this_bundler.env, + }; + } + return; } - const bins = try getPackageBins(ctx, json.root); + const bins = try getPackageBins(ctx.allocator, json.root); defer for (bins) |bin| ctx.allocator.free(bin.path); var print_buf = std.ArrayList(u8).init(ctx.allocator); @@ -1316,7 +1423,7 @@ pub const PackCommand = struct { }, else => {}, } - switch (archive.writeSetCompressionGzip()) { + switch (archive.writeAddFilterGzip()) { .failed, .fatal, .warn => { Output.errGeneric("failed to set archive compression to gzip: {s}", .{archive.errorString()}); Global.crash(); @@ -1337,6 +1444,14 @@ pub const PackCommand = struct { } print_buf.clearRetainingCapacity(); + switch (archive.writeSetFilterOption(null, "os", "Unknown")) { + .failed, .fatal, .warn => { + Output.errGeneric("failed to set os to `Unknown`: {s}", .{archive.errorString()}); + Global.crash(); + }, + else => {}, + } + switch (archive.writeSetOptions("gzip:!timestamp")) { .failed, .fatal, .warn => { Output.errGeneric("failed to unset gzip timestamp option: {s}", .{archive.errorString()}); @@ -1347,7 +1462,7 @@ pub const PackCommand = struct { var dest_buf: PathBuffer = undefined; const abs_tarball_dest, const abs_tarball_dest_dir_end = absTarballDestination( - ctx, + ctx.manager.options.pack_destination, abs_workspace_path, package_name, package_version, @@ -1385,7 +1500,7 @@ pub const PackCommand = struct { var entry = Archive.Entry.new2(archive); - const package_json_size = archive_with_progress: { + const package_json = archive_with_progress: { var progress: if (log_level == .silent) void else Progress = if (comptime log_level == .silent) {} else .{}; var node = if (comptime log_level == .silent) {} else node: { progress.supports_ansi_escape_codes = Output.enable_ansi_colors; @@ -1395,7 +1510,7 @@ pub const PackCommand = struct { }; defer if (comptime log_level != .silent) node.end(); - entry, const edited_package_json_size = try editAndArchivePackageJSON(ctx, archive, entry, root_dir, json); + entry, const edited_package_json = try editAndArchivePackageJSON(ctx, archive, entry, root_dir, json); if (comptime log_level != .silent) node.completeOne(); while (pack_queue.removeOrNull()) |pathname| { @@ -1461,7 +1576,7 @@ pub const PackCommand = struct { ); } - break :archive_with_progress edited_package_json_size; + break :archive_with_progress edited_package_json; }; entry.free(); @@ -1482,10 +1597,10 @@ pub const PackCommand = struct { else => {}, } - var sha1_digest: sha.SHA1.Digest = undefined; - var sha512_digest: sha.SHA512.Digest = undefined; + var shasum: sha.SHA1.Digest = undefined; + var integrity: sha.SHA512.Digest = undefined; - { + const tarball_bytes = tarball_bytes: { const tarball_file = File.open(abs_tarball_dest, bun.O.RDONLY, 0).unwrap() catch |err| { Output.err(err, "failed to open tarball at: \"{s}\"", .{abs_tarball_dest}); Global.crash(); @@ -1498,6 +1613,23 @@ pub const PackCommand = struct { var sha512 = sha.SHA512.init(); defer sha512.deinit(); + if (comptime for_publish) { + const tarball_bytes = tarball_file.readToEnd(ctx.allocator).unwrap() catch |err| { + Output.err(err, "failed to read tarball: \"{s}\"", .{abs_tarball_dest}); + Global.crash(); + }; + + sha1.update(tarball_bytes); + sha512.update(tarball_bytes); + + sha1.final(&shasum); + sha512.final(&integrity); + + ctx.stats.packed_size = tarball_bytes.len; + + break :tarball_bytes tarball_bytes; + } + file_reader.* = .{ .unbuffered_reader = tarball_file.reader(), }; @@ -1517,29 +1649,36 @@ pub const PackCommand = struct { }; } - sha1.final(&sha1_digest); - sha512.final(&sha512_digest); + sha1.final(&shasum); + sha512.final(&integrity); ctx.stats.packed_size = size; - } + }; printArchivedFilesAndPackages( ctx, root_dir, false, pack_list, - package_json_size, + package_json.len, ); - if (manager.options.pack_destination.len == 0) { - Output.pretty("\n{}\n", .{fmtTarballFilename(package_name, package_version)}); - } else { - Output.pretty("\n{s}\n", .{abs_tarball_dest}); + if (comptime !for_publish) { + if (manager.options.pack_destination.len == 0) { + Output.pretty("\n{}\n", .{fmtTarballFilename(package_name, package_version)}); + } else { + Output.pretty("\n{s}\n", .{abs_tarball_dest}); + } } - ctx.printSummary(sha1_digest, sha512_digest, log_level); + Context.printSummary(ctx.stats, shasum, integrity, log_level); + + if (comptime for_publish) { + Output.flush(); + } if (postpack_script) |postpack_script_str| { + Output.pretty("\n", .{}); _ = RunCommand.runPackageScriptForeground( ctx.command_ctx, ctx.allocator, @@ -1560,10 +1699,28 @@ pub const PackCommand = struct { } }; } + + if (comptime for_publish) { + return .{ + .allocator = ctx.allocator, + .command_ctx = ctx.command_ctx, + .manager = manager, + .package_name = package_name, + .package_version = package_version, + .abs_tarball_path = try ctx.allocator.dupeZ(u8, abs_tarball_dest), + .tarball_bytes = tarball_bytes, + .shasum = shasum, + .integrity = integrity, + .uses_workspaces = false, + .publish_script = publish_script, + .postpublish_script = postpublish_script, + .script_env = this_bundler.env, + }; + } } fn absTarballDestination( - ctx: *Context, + pack_destination: string, abs_workspace_path: string, package_name: string, package_version: string, @@ -1572,7 +1729,7 @@ pub const PackCommand = struct { const tarball_destination_dir = bun.path.joinAbsStringBuf( abs_workspace_path, dest_buf, - &.{ctx.manager.options.pack_destination}, + &.{pack_destination}, .auto, ); @@ -1634,8 +1791,8 @@ pub const PackCommand = struct { entry: *Archive.Entry, root_dir: std.fs.Dir, json: *PackageManager.WorkspacePackageJSONCache.MapEntry, - ) OOM!struct { *Archive.Entry, usize } { - const edited_package_json = try editRootPackageJSON(ctx, json); + ) OOM!struct { *Archive.Entry, string } { + const edited_package_json = try editRootPackageJSON(ctx.allocator, ctx.lockfile, json); const stat = bun.sys.fstatat(bun.toFD(root_dir), "package.json").unwrap() catch |err| { Output.err(err, "failed to stat package.json", .{}); @@ -1661,7 +1818,7 @@ pub const PackCommand = struct { ctx.stats.unpacked_size += @intCast(archive.writeData(edited_package_json)); - return .{ entry.clear(), edited_package_json.len }; + return .{ entry.clear(), edited_package_json }; } fn addArchiveEntry( @@ -1693,11 +1850,7 @@ pub const PackCommand = struct { var perm: bun.Mode = @intCast(stat.mode); // https://github.com/npm/cli/blob/ec105f400281a5bfd17885de1ea3d54d0c231b27/node_modules/pacote/lib/util/tar-create-options.js#L20 - if (comptime !Environment.isWindows) { - // on windows we create a shim executable. the bin file permissions - // do not need to change - if (isPackageBin(bins, filename)) perm |= 0o111; - } + if (isPackageBin(bins, filename)) perm |= 0o111; entry.setPerm(@intCast(perm)); // '1985-10-26T08:15:00.000Z' @@ -1734,7 +1887,8 @@ pub const PackCommand = struct { /// Strip workspace protocols from dependency versions then /// returns the printed json fn editRootPackageJSON( - ctx: *Context, + allocator: std.mem.Allocator, + maybe_lockfile: ?*Lockfile, json: *PackageManager.WorkspacePackageJSONCache.MapEntry, ) OOM!string { for ([_]string{ @@ -1750,7 +1904,7 @@ pub const PackCommand = struct { if (dependency.key == null) continue; if (dependency.value == null) continue; - const package_spec = dependency.value.?.asString(ctx.allocator) orelse continue; + const package_spec = dependency.value.?.asString(allocator) orelse continue; if (strings.withoutPrefixIfPossibleComptime(package_spec, "workspace:")) |without_workspace_protocol| { // TODO: make semver parsing more strict. `^`, `~` are not valid @@ -1771,7 +1925,7 @@ pub const PackCommand = struct { // TODO: this might be too strict const c = without_workspace_protocol[0]; if (c == '^' or c == '~' or c == '*') { - const dependency_name = dependency.key.?.asString(ctx.allocator) orelse { + const dependency_name = dependency.key.?.asString(allocator) orelse { Output.errGeneric("expected string value for dependency name in \"{s}\"", .{ dependency_group, }); @@ -1780,15 +1934,15 @@ pub const PackCommand = struct { failed_to_resolve: { // find the current workspace version and append to package spec without `workspace:` - const lockfile = ctx.lockfile orelse break :failed_to_resolve; + const lockfile = maybe_lockfile orelse break :failed_to_resolve; const workspace_version = lockfile.workspace_versions.get(Semver.String.Builder.stringHash(dependency_name)) orelse break :failed_to_resolve; dependency.value = Expr.allocate( - ctx.manager.allocator, + allocator, E.String, .{ - .data = try std.fmt.allocPrint(ctx.allocator, "{s}{}", .{ + .data = try std.fmt.allocPrint(allocator, "{s}{}", .{ switch (c) { '^' => "^", '~' => "~", @@ -1814,10 +1968,10 @@ pub const PackCommand = struct { } dependency.value = Expr.allocate( - ctx.manager.allocator, + allocator, E.String, .{ - .data = try ctx.allocator.dupe(u8, without_workspace_protocol), + .data = try allocator.dupe(u8, without_workspace_protocol), }, .{}, ); @@ -1830,8 +1984,8 @@ pub const PackCommand = struct { } const has_trailing_newline = json.source.contents.len > 0 and json.source.contents[json.source.contents.len - 1] == '\n'; - var buffer_writer = try js_printer.BufferWriter.init(ctx.allocator); - try buffer_writer.buffer.list.ensureTotalCapacity(ctx.allocator, json.source.contents.len + 1); + var buffer_writer = try js_printer.BufferWriter.init(allocator); + try buffer_writer.buffer.list.ensureTotalCapacity(allocator, json.source.contents.len + 1); buffer_writer.append_newline = has_trailing_newline; var package_json_writer = js_printer.BufferPrinter.init(buffer_writer); @@ -1872,7 +2026,7 @@ pub const PackCommand = struct { @"leading **/": bool, - pub fn fromUTF8(ctx: *Context, pattern: string) OOM!?Pattern { + pub fn fromUTF8(allocator: std.mem.Allocator, pattern: string) OOM!?Pattern { var remain = pattern; var @"has leading **/, (could start with '!')" = false; const has_leading_or_middle_slash, const has_trailing_slash, const add_negate = check_slashes: { @@ -1915,10 +2069,10 @@ pub const PackCommand = struct { }; const length = bun.simdutf.length.utf32.from.utf8.le(remain) + @intFromBool(add_negate); - const buf = try ctx.allocator.alloc(u32, length); + const buf = try allocator.alloc(u32, length); const result = bun.simdutf.convert.utf8.to.utf32.with_errors.le(remain, buf[@intFromBool(add_negate)..]); if (!result.isSuccessful()) { - ctx.allocator.free(buf); + allocator.free(buf); return null; } @@ -1982,9 +2136,9 @@ pub const PackCommand = struct { } // ignore files are always ignored, don't need to worry about opening or reading twice - pub fn readFromDisk(ctx: *Context, dir: std.fs.Dir, dir_depth: usize) OOM!?IgnorePatterns { + pub fn readFromDisk(allocator: std.mem.Allocator, dir: std.fs.Dir, dir_depth: usize) OOM!?IgnorePatterns { var patterns: std.ArrayListUnmanaged(Pattern) = .{}; - errdefer patterns.deinit(ctx.allocator); + errdefer patterns.deinit(allocator); var ignore_kind: Kind = .@".npmignore"; @@ -2005,10 +2159,10 @@ pub const PackCommand = struct { }; defer ignore_file.close(); - const contents = File.from(ignore_file).readToEnd(ctx.allocator).unwrap() catch |err| { + const contents = File.from(ignore_file).readToEnd(allocator).unwrap() catch |err| { ignoreFileFail(dir, ignore_kind, .read, err); }; - defer ctx.allocator.free(contents); + defer allocator.free(contents); var has_rel_path = false; @@ -2030,8 +2184,8 @@ pub const PackCommand = struct { if (trimmed.len == 0) continue; - const parsed = try Pattern.fromUTF8(ctx, trimmed) orelse continue; - try patterns.append(ctx.allocator, parsed); + const parsed = try Pattern.fromUTF8(allocator, trimmed) orelse continue; + try patterns.append(allocator, parsed); has_rel_path = has_rel_path or parsed.rel_path; } @@ -2190,39 +2344,64 @@ pub const bindings = struct { var entries_info = std.ArrayList(EntryInfo).init(bun.default_allocator); defer entries_info.deinit(); - const archive = libarchive.archive_read_new(); - defer { - _ = libarchive.archive_read_close(archive); - _ = libarchive.archive_read_free(archive); + const archive = Archive.readNew(); + + switch (archive.readSupportFormatTar()) { + .failed, .fatal, .warn => { + global.throw("failed to support tar: {s}", .{archive.errorString()}); + return .zero; + }, + else => {}, + } + switch (archive.readSupportFormatGnutar()) { + .failed, .fatal, .warn => { + global.throw("failed to support gnutar: {s}", .{archive.errorString()}); + return .zero; + }, + else => {}, + } + switch (archive.readSupportFilterGzip()) { + .failed, .fatal, .warn => { + global.throw("failed to support gzip compression: {s}", .{archive.errorString()}); + return .zero; + }, + else => {}, } - _ = libarchive.archive_read_support_format_tar(archive); - _ = libarchive.archive_read_support_format_gnutar(archive); - _ = libarchive.archive_read_support_compression_gzip(archive); + switch (archive.readSetOptions("read_concatenated_archives")) { + .failed, .fatal, .warn => { + global.throw("failed to set read_concatenated_archives option: {s}", .{archive.errorString()}); + return .zero; + }, + else => {}, + } - _ = libarchive.archive_read_set_options(archive, "read_concatenated_archives"); + switch (archive.readOpenMemory(tarball)) { + .failed, .fatal, .warn => { + global.throw("failed to open archive in memory: {s}", .{archive.errorString()}); + return .zero; + }, + else => {}, + } - _ = libarchive.archive_read_open_memory(archive, tarball.ptr, tarball.len); - - var archive_entry: *libarchive.archive_entry = undefined; - - var header_status: Archive.Result = @enumFromInt(libarchive.archive_read_next_header(archive, &archive_entry)); + var archive_entry: *Archive.Entry = undefined; + var header_status = archive.readNextHeader(&archive_entry); var read_buf = std.ArrayList(u8).init(bun.default_allocator); defer read_buf.deinit(); - while (header_status != .eof) : (header_status = @enumFromInt(libarchive.archive_read_next_header(archive, &archive_entry))) { + while (header_status != .eof) : (header_status = archive.readNextHeader(&archive_entry)) { switch (header_status) { .eof => unreachable, .retry => continue, .failed, .fatal => { - global.throw("failed to read next archive header: {s}", .{Archive.errorString(@ptrCast(archive))}); + global.throw("failed to read archive header: {s}", .{Archive.errorString(@ptrCast(archive))}); return .zero; }, else => { - const pathname = std.mem.sliceTo(libarchive.archive_entry_pathname(archive_entry), 0); - const kind = bun.C.kindFromMode(libarchive.archive_entry_filetype(archive_entry)); - const perm = libarchive.archive_entry_perm(archive_entry); + const pathname = archive_entry.pathname(); + const kind = bun.C.kindFromMode(archive_entry.filetype()); + const perm = archive_entry.perm(); var entry_info: EntryInfo = .{ .pathname = String.createUTF8(pathname), @@ -2231,11 +2410,11 @@ pub const bindings = struct { }; if (kind == .file) { - const size: usize = @intCast(libarchive.archive_entry_size(archive_entry)); - read_buf.ensureTotalCapacity(size) catch bun.outOfMemory(); + const size: usize = @intCast(archive_entry.size()); + read_buf.resize(size) catch bun.outOfMemory(); defer read_buf.clearRetainingCapacity(); - const read = libarchive.archive_read_data(archive, read_buf.items.ptr, size); + const read = archive.readData(read_buf.items); if (read < 0) { global.throw("failed to read archive entry \"{}\": {s}", .{ bun.fmt.fmtPath(u8, pathname, .{}), @@ -2252,6 +2431,21 @@ pub const bindings = struct { } } + switch (archive.readClose()) { + .failed, .fatal, .warn => { + global.throw("failed to close read archive: {s}", .{archive.errorString()}); + return .zero; + }, + else => {}, + } + switch (archive.readFree()) { + .failed, .fatal, .warn => { + global.throw("failed to close read archive: {s}", .{archive.errorString()}); + return .zero; + }, + else => {}, + } + const entries = JSArray.createEmpty(global, entries_info.items.len); for (entries_info.items, 0..) |entry, i| { diff --git a/src/cli/package_manager_command.zig b/src/cli/package_manager_command.zig index 90c2cb11d3..f1ce6f4ad8 100644 --- a/src/cli/package_manager_command.zig +++ b/src/cli/package_manager_command.zig @@ -24,7 +24,7 @@ const UntrustedCommand = @import("./pm_trusted_command.zig").UntrustedCommand; const TrustCommand = @import("./pm_trusted_command.zig").TrustCommand; const DefaultTrustedCommand = @import("./pm_trusted_command.zig").DefaultTrustedCommand; const Environment = bun.Environment; -const PackCommand = @import("./pack_command.zig").PackCommand; +pub const PackCommand = @import("./pack_command.zig").PackCommand; const ByName = struct { dependencies: []const Dependency, diff --git a/src/cli/pm_trusted_command.zig b/src/cli/pm_trusted_command.zig index 159aad49b8..ae9a57a2d1 100644 --- a/src/cli/pm_trusted_command.zig +++ b/src/cli/pm_trusted_command.zig @@ -370,7 +370,7 @@ pub const TrustCommand = struct { const package_json_source = logger.Source.initPathString(PackageManager.package_json_cwd, package_json_contents); - var package_json = bun.JSON.ParseJSONUTF8(&package_json_source, ctx.log, ctx.allocator) catch |err| { + var package_json = bun.JSON.parseUTF8(&package_json_source, ctx.log, ctx.allocator) catch |err| { switch (Output.enable_ansi_colors) { inline else => |enable_ansi_colors| ctx.log.printForLogLevelWithEnableAnsiColors(Output.errorWriter(), enable_ansi_colors) catch {}, } diff --git a/src/cli/publish_command.zig b/src/cli/publish_command.zig new file mode 100644 index 0000000000..3ffe09ed41 --- /dev/null +++ b/src/cli/publish_command.zig @@ -0,0 +1,1074 @@ +const std = @import("std"); +const bun = @import("root").bun; +const Command = bun.CLI.Command; +const Output = bun.Output; +const Global = bun.Global; +const http = bun.http; +const OOM = bun.OOM; +const Headers = http.Headers; +const HeaderBuilder = http.HeaderBuilder; +const MutableString = bun.MutableString; +const URL = bun.URL; +const install = bun.install; +const PackageManager = install.PackageManager; +const strings = bun.strings; +const string = bun.string; +const stringZ = bun.stringZ; +const File = bun.sys.File; +const JSON = bun.JSON; +const sha = bun.sha; +const path = bun.path; +const FileSystem = bun.fs.FileSystem; +const Environment = bun.Environment; +const Archive = bun.libarchive.lib.Archive; +const logger = bun.logger; +const Dependency = install.Dependency; +const Pack = bun.CLI.PackCommand; +const Lockfile = install.Lockfile; +const MimeType = http.MimeType; +const Expr = bun.js_parser.Expr; +const prompt = bun.CLI.InitCommand.prompt; +const Npm = install.Npm; +const Run = bun.CLI.RunCommand; +const DotEnv = bun.DotEnv; +const Open = @import("../open.zig"); + +pub const PublishCommand = struct { + pub fn Context(comptime directory_publish: bool) type { + return struct { + manager: *PackageManager, + allocator: std.mem.Allocator, + command_ctx: Command.Context, + + package_name: string, + package_version: string, + abs_tarball_path: stringZ, + tarball_bytes: string, + shasum: sha.SHA1.Digest, + integrity: sha.SHA512.Digest, + uses_workspaces: bool, + + publish_script: if (directory_publish) ?[]const u8 else void = if (directory_publish) null else {}, + postpublish_script: if (directory_publish) ?[]const u8 else void = if (directory_publish) null else {}, + script_env: if (directory_publish) *DotEnv.Loader else void, + + const FromTarballError = OOM || error{ + MissingPackageJSON, + InvalidPackageJSON, + MissingPackageName, + MissingPackageVersion, + InvalidPackageName, + InvalidPackageVersion, + PrivatePackage, + RestrictedUnscopedPackage, + }; + + /// Retrieve information for publishing from a tarball path, `bun publish path/to/tarball.tgz` + pub fn fromTarballPath( + ctx: Command.Context, + manager: *PackageManager, + tarball_path: string, + ) FromTarballError!Context(directory_publish) { + var abs_buf: bun.PathBuffer = undefined; + const abs_tarball_path = path.joinAbsStringBufZ( + FileSystem.instance.top_level_dir, + &abs_buf, + &[_]string{tarball_path}, + .auto, + ); + + const tarball_bytes = File.readFrom(bun.invalid_fd, abs_tarball_path, ctx.allocator).unwrap() catch |err| { + Output.err(err, "failed to read tarball: '{s}'", .{tarball_path}); + Global.crash(); + }; + + var maybe_package_json_contents: ?[]const u8 = null; + + var iter = switch (Archive.Iterator.init(tarball_bytes)) { + .err => |err| { + Output.errGeneric("{s}: {s}", .{ + err.message, + err.archive.errorString(), + }); + + Global.crash(); + }, + .result => |res| res, + }; + + var unpacked_size: usize = 0; + var total_files: usize = 0; + + Output.print("\n", .{}); + + while (switch (iter.next()) { + .err => |err| { + Output.errGeneric("{s}: {s}", .{ err.message, err.archive.errorString() }); + Global.crash(); + }, + .result => |res| res, + }) |next| { + const pathname = if (comptime Environment.isWindows) + next.entry.pathnameW() + else + next.entry.pathname(); + + const size = next.entry.size(); + + unpacked_size += @intCast(@max(0, size)); + total_files += @intFromBool(next.kind == .file); + + // this is option `strip: 1` (npm expects a `package/` prefix for all paths) + if (strings.indexOfAnyT(bun.OSPathChar, pathname, "/\\")) |slash| { + const stripped = pathname[slash + 1 ..]; + if (stripped.len == 0) continue; + + Output.pretty("packed {} {}\n", .{ + bun.fmt.size(size, .{ .space_between_number_and_unit = false }), + bun.fmt.fmtOSPath(stripped, .{}), + }); + + if (next.kind != .file) continue; + + if (strings.indexOfAnyT(bun.OSPathChar, stripped, "/\\") == null) { + + // check for package.json, readme.md, ... + const filename = pathname[slash + 1 ..]; + + if (maybe_package_json_contents == null and strings.eqlCaseInsensitiveT(bun.OSPathChar, filename, "package.json")) { + maybe_package_json_contents = switch (try next.readEntryData(ctx.allocator, iter.archive)) { + .err => |err| { + Output.errGeneric("{s}: {s}", .{ err.message, err.archive.errorString() }); + Global.crash(); + }, + .result => |bytes| bytes, + }; + } + } + } else { + Output.pretty("packed {} {}\n", .{ + bun.fmt.size(size, .{ .space_between_number_and_unit = false }), + bun.fmt.fmtOSPath(pathname, .{}), + }); + } + } + + switch (iter.deinit()) { + .err => |err| { + Output.errGeneric("{s}: {s}", .{ err.message, err.archive.errorString() }); + Global.crash(); + }, + .result => {}, + } + + const package_json_contents = maybe_package_json_contents orelse return error.MissingPackageJSON; + + const package_name, const package_version = package_info: { + defer ctx.allocator.free(package_json_contents); + + const source = logger.Source.initPathString("package.json", package_json_contents); + const json = JSON.parsePackageJSONUTF8(&source, manager.log, ctx.allocator) catch |err| { + return switch (err) { + error.OutOfMemory => |oom| return oom, + else => error.InvalidPackageJSON, + }; + }; + + if (json.get("private")) |private| { + if (private.asBool()) |is_private| { + if (is_private) { + return error.PrivatePackage; + } + } + } + + if (json.get("publishConfig")) |config| { + if (manager.options.publish_config.tag.len == 0) { + if (try config.getStringCloned(ctx.allocator, "tag")) |tag| { + manager.options.publish_config.tag = tag; + } + } + + if (manager.options.publish_config.access == null) { + if (try config.getString(ctx.allocator, "access")) |access| { + manager.options.publish_config.access = PackageManager.Options.Access.fromStr(access[0]) orelse { + Output.errGeneric("invalid `access` value: '{s}'", .{access[0]}); + Global.crash(); + }; + } + } + + // maybe otp + } + + const name = try json.getStringCloned(ctx.allocator, "name") orelse return error.MissingPackageName; + const is_scoped = try Dependency.isScopedPackageName(name); + + if (manager.options.publish_config.access) |access| { + if (access == .restricted and !is_scoped) { + return error.RestrictedUnscopedPackage; + } + } + + const version = try json.getStringCloned(ctx.allocator, "version") orelse return error.MissingPackageVersion; + if (version.len == 0) return error.InvalidPackageVersion; + + break :package_info .{ name, version }; + }; + + var shasum: sha.SHA1.Digest = undefined; + var sha1 = sha.SHA1.init(); + defer sha1.deinit(); + + sha1.update(tarball_bytes); + sha1.final(&shasum); + + var integrity: sha.SHA512.Digest = undefined; + var sha512 = sha.SHA512.init(); + defer sha512.deinit(); + + sha512.update(tarball_bytes); + sha512.final(&integrity); + + Pack.Context.printSummary( + .{ + .total_files = total_files, + .unpacked_size = unpacked_size, + .packed_size = tarball_bytes.len, + }, + shasum, + integrity, + manager.options.log_level, + ); + + return .{ + .manager = manager, + .allocator = ctx.allocator, + .package_name = package_name, + .package_version = package_version, + .abs_tarball_path = try ctx.allocator.dupeZ(u8, abs_tarball_path), + .tarball_bytes = tarball_bytes, + .shasum = shasum, + .integrity = integrity, + .uses_workspaces = false, + .command_ctx = ctx, + .script_env = {}, + }; + } + + const FromWorkspaceError = Pack.PackError(true); + + /// `bun publish` without a tarball path. Automatically pack the current workspace and get + /// information required for publishing + pub fn fromWorkspace( + ctx: Command.Context, + manager: *PackageManager, + ) FromWorkspaceError!Context(directory_publish) { + var lockfile: Lockfile = undefined; + const load_from_disk_result = lockfile.loadFromDisk( + manager, + manager.allocator, + manager.log, + manager.options.lockfile_path, + false, + ); + + var pack_ctx: Pack.Context = .{ + .allocator = ctx.allocator, + .manager = manager, + .command_ctx = ctx, + .lockfile = switch (load_from_disk_result) { + .ok => |ok| ok.lockfile, + .not_found => null, + .err => |cause| err: { + switch (cause.step) { + .open_file => { + if (cause.value == error.ENOENT) break :err null; + Output.errGeneric("failed to open lockfile: {s}", .{@errorName(cause.value)}); + }, + .parse_file => { + Output.errGeneric("failed to parse lockfile: {s}", .{@errorName(cause.value)}); + }, + .read_file => { + Output.errGeneric("failed to read lockfile: {s}", .{@errorName(cause.value)}); + }, + .migrating => { + Output.errGeneric("failed to migrate lockfile: {s}", .{@errorName(cause.value)}); + }, + } + + if (manager.log.hasErrors()) { + switch (Output.enable_ansi_colors) { + inline else => |enable_ansi_colors| { + manager.log.printForLogLevelWithEnableAnsiColors(Output.errorWriter(), enable_ansi_colors) catch {}; + }, + } + } + + Global.crash(); + }, + }, + }; + + return switch (manager.options.log_level) { + inline else => |log_level| Pack.pack(&pack_ctx, manager.original_package_json_path, log_level, true), + }; + } + }; + } + + pub fn exec(ctx: Command.Context) !void { + Output.prettyln("bun publish v" ++ Global.package_json_version_with_sha ++ "", .{}); + Output.flush(); + + const cli = try PackageManager.CommandLineArguments.parse(ctx.allocator, .publish); + + const manager, const original_cwd = PackageManager.init(ctx, cli, .publish) catch |err| { + if (!cli.silent) { + if (err == error.MissingPackageJSON) { + Output.errGeneric("missing package.json, nothing to publish", .{}); + } + Output.errGeneric("failed to initialize bun install: {s}", .{@errorName(err)}); + } + Global.crash(); + }; + defer ctx.allocator.free(original_cwd); + + if (cli.positionals.len > 1) { + const context = Context(false).fromTarballPath(ctx, manager, cli.positionals[1]) catch |err| { + switch (err) { + error.OutOfMemory => bun.outOfMemory(), + error.MissingPackageName => { + Output.errGeneric("missing `name` string in package.json", .{}); + }, + error.MissingPackageVersion => { + Output.errGeneric("missing `version` string in package.json", .{}); + }, + error.InvalidPackageName, error.InvalidPackageVersion => { + Output.errGeneric("package.json `name` and `version` fields must be non-empty strings", .{}); + }, + error.MissingPackageJSON => { + Output.errGeneric("failed to find package.json in tarball '{s}'", .{cli.positionals[1]}); + }, + error.InvalidPackageJSON => { + switch (Output.enable_ansi_colors) { + inline else => |enable_ansi_colors| { + manager.log.printForLogLevelWithEnableAnsiColors(Output.errorWriter(), enable_ansi_colors) catch {}; + }, + } + Output.errGeneric("failed to parse tarball package.json", .{}); + }, + error.PrivatePackage => { + Output.errGeneric("attempted to publish a private package", .{}); + }, + error.RestrictedUnscopedPackage => { + Output.errGeneric("unable to restrict access to unscoped package", .{}); + }, + } + Global.crash(); + }; + + publish(false, &context) catch |err| { + switch (err) { + error.OutOfMemory => bun.outOfMemory(), + error.NeedAuth => { + Output.errGeneric("missing authentication (run `bunx npm login`)", .{}); + Global.crash(); + }, + } + }; + + Output.prettyln("\n + {s}@{s}{s}", .{ + context.package_name, + Dependency.withoutBuildTag(context.package_version), + if (manager.options.dry_run) " (dry-run)" else "", + }); + + return; + } + + const context = Context(true).fromWorkspace(ctx, manager) catch |err| { + switch (err) { + error.OutOfMemory => bun.outOfMemory(), + error.MissingPackageName => { + Output.errGeneric("missing `name` string in package.json", .{}); + }, + error.MissingPackageVersion => { + Output.errGeneric("missing `version` string in package.json", .{}); + }, + error.InvalidPackageName, error.InvalidPackageVersion => { + Output.errGeneric("package.json `name` and `version` fields must be non-empty strings", .{}); + }, + error.MissingPackageJSON => { + Output.errGeneric("failed to find package.json from: '{s}'", .{FileSystem.instance.top_level_dir}); + }, + error.RestrictedUnscopedPackage => { + Output.errGeneric("unable to restrict access to unscoped package", .{}); + }, + error.PrivatePackage => { + Output.errGeneric("attempted to publish a private package", .{}); + }, + } + Global.crash(); + }; + + // TODO: read this into memory + _ = bun.sys.unlink(context.abs_tarball_path); + + publish(true, &context) catch |err| { + switch (err) { + error.OutOfMemory => bun.outOfMemory(), + error.NeedAuth => { + Output.errGeneric("missing authentication (run `bunx npm login`)", .{}); + Global.crash(); + }, + } + }; + + Output.prettyln("\n + {s}@{s}{s}", .{ + context.package_name, + Dependency.withoutBuildTag(context.package_version), + if (manager.options.dry_run) " (dry-run)" else "", + }); + + if (manager.options.do.run_scripts) { + const abs_workspace_path: string = strings.withoutTrailingSlash(strings.withoutSuffixComptime(manager.original_package_json_path, "package.json")); + if (context.publish_script) |publish_script| { + _ = Run.runPackageScriptForeground( + context.command_ctx, + context.allocator, + publish_script, + "publish", + abs_workspace_path, + context.script_env, + &.{}, + context.manager.options.log_level == .silent, + context.command_ctx.debug.use_system_shell, + ) catch |err| { + switch (err) { + error.MissingShell => { + Output.errGeneric("failed to find shell executable to run publish script", .{}); + Global.crash(); + }, + error.OutOfMemory => |oom| return oom, + } + }; + } + + if (context.postpublish_script) |postpublish_script| { + _ = Run.runPackageScriptForeground( + context.command_ctx, + context.allocator, + postpublish_script, + "postpublish", + abs_workspace_path, + context.script_env, + &.{}, + context.manager.options.log_level == .silent, + context.command_ctx.debug.use_system_shell, + ) catch |err| { + switch (err) { + error.MissingShell => { + Output.errGeneric("failed to find shell executable to run postpublish script", .{}); + Global.crash(); + }, + error.OutOfMemory => |oom| return oom, + } + }; + } + } + } + + const PublishError = OOM || error{ + NeedAuth, + }; + + pub fn publish( + comptime directory_publish: bool, + ctx: *const Context(directory_publish), + ) PublishError!void { + const registry = ctx.manager.scopeForPackageName(ctx.package_name); + + if (registry.token.len == 0 and (registry.url.password.len == 0 or registry.url.username.len == 0)) { + return error.NeedAuth; + } + + // continues from `printSummary` + Output.pretty( + \\Tag: {s} + \\Access: {s} + \\Registry: {s} + \\ + , .{ + if (ctx.manager.options.publish_config.tag.len > 0) ctx.manager.options.publish_config.tag else "latest", + if (ctx.manager.options.publish_config.access) |access| @tagName(access) else "default", + registry.url.href, + }); + + // dry-run stops here + if (ctx.manager.options.dry_run) return; + + const publish_req_body = try constructPublishRequestBody(directory_publish, ctx, registry); + + var print_buf: std.ArrayListUnmanaged(u8) = .{}; + defer print_buf.deinit(ctx.allocator); + var print_writer = print_buf.writer(ctx.allocator); + + const publish_headers = try constructPublishHeaders( + ctx.allocator, + &print_buf, + registry, + publish_req_body.len, + if (ctx.manager.options.publish_config.otp.len > 0) ctx.manager.options.publish_config.otp else null, + ctx.uses_workspaces, + ctx.manager.options.publish_config.auth_type, + ); + + var response_buf = try MutableString.init(ctx.allocator, 1024); + + try print_writer.print("{s}/{s}", .{ + strings.withoutTrailingSlash(registry.url.href), + bun.fmt.dependencyUrl(ctx.package_name), + }); + const publish_url = URL.parse(try ctx.allocator.dupe(u8, print_buf.items)); + print_buf.clearRetainingCapacity(); + + var req = http.AsyncHTTP.initSync( + ctx.allocator, + .PUT, + publish_url, + publish_headers.entries, + publish_headers.content.ptr.?[0..publish_headers.content.len], + &response_buf, + publish_req_body, + null, + null, + .follow, + ); + + const res = req.sendSync() catch |err| { + switch (err) { + error.OutOfMemory => |oom| return oom, + else => { + Output.err(err, "failed to publish package", .{}); + Global.crash(); + }, + } + }; + + switch (res.status_code) { + 400...std.math.maxInt(@TypeOf(res.status_code)) => { + const prompt_for_otp = prompt_for_otp: { + if (res.status_code != 401) break :prompt_for_otp false; + + if (authenticate: { + for (res.headers) |header| { + if (strings.eqlCaseInsensitiveASCII(header.name, "www-authenticate", true)) { + break :authenticate header.value; + } + } + break :authenticate null; + }) |@"www-authenticate"| { + var iter = strings.split(@"www-authenticate", ","); + while (iter.next()) |part| { + const trimmed = strings.trim(part, &strings.whitespace_chars); + if (strings.eqlCaseInsensitiveASCII(trimmed, "ipaddress", true)) { + Output.errGeneric("login is not allowed from your IP address", .{}); + Global.crash(); + } else if (strings.eqlCaseInsensitiveASCII(trimmed, "otp", true)) { + break :prompt_for_otp true; + } + } + + Output.errGeneric("unable to authenticate, need: {s}", .{@"www-authenticate"}); + Global.crash(); + } else if (strings.containsComptime(response_buf.list.items, "one-time pass")) { + // missing www-authenticate header but one-time pass is still included + break :prompt_for_otp true; + } + + break :prompt_for_otp false; + }; + + if (!prompt_for_otp) { + // general error + return handleResponseErrors(directory_publish, ctx, &req, &res, &response_buf, true); + } + + const otp = try getOTP(directory_publish, ctx, registry, &response_buf, &print_buf); + + const otp_headers = try constructPublishHeaders( + ctx.allocator, + &print_buf, + registry, + publish_req_body.len, + otp, + ctx.uses_workspaces, + ctx.manager.options.publish_config.auth_type, + ); + + response_buf.reset(); + + var otp_req = http.AsyncHTTP.initSync( + ctx.allocator, + .PUT, + publish_url, + otp_headers.entries, + otp_headers.content.ptr.?[0..otp_headers.content.len], + &response_buf, + publish_req_body, + null, + null, + .follow, + ); + + const otp_res = otp_req.sendSync() catch |err| { + switch (err) { + error.OutOfMemory => |oom| return oom, + else => { + Output.err(err, "failed to publish package", .{}); + Global.crash(); + }, + } + }; + + switch (otp_res.status_code) { + 400...std.math.maxInt(@TypeOf(otp_res.status_code)) => { + return handleResponseErrors(directory_publish, ctx, &otp_req, &otp_res, &response_buf, true); + }, + else => {}, + } + }, + else => {}, + } + } + + fn handleResponseErrors( + comptime directory_publish: bool, + ctx: *const Context(directory_publish), + req: *const http.AsyncHTTP, + res: *const bun.picohttp.Response, + response_body: *MutableString, + comptime check_for_success: bool, + ) OOM!void { + const message = message: { + const source = logger.Source.initPathString("???", response_body.list.items); + const json = JSON.parseUTF8(&source, ctx.manager.log, ctx.allocator) catch |err| { + switch (err) { + error.OutOfMemory => |oom| return oom, + else => break :message null, + } + }; + + if (comptime check_for_success) { + if (json.get("success")) |success_expr| { + if (success_expr.asBool()) |successful| { + if (successful) { + // possible to hit this with otp responses + return; + } + } + } + } + + const @"error", _ = try json.getString(ctx.allocator, "error") orelse break :message null; + break :message @"error"; + }; + + Output.prettyErrorln("\n{d}{s}{s}: {s}\n{s}{s}", .{ + res.status_code, + if (res.status.len > 0) " " else "", + res.status, + bun.fmt.redactedNpmUrl(req.url.href), + if (message != null) "\n - " else "", + message orelse "", + }); + Global.crash(); + } + + const GetOTPError = OOM || error{}; + + fn pressEnterToOpenInBrowser(auth_url: stringZ) void { + // unset `ENABLE_VIRTUAL_TERMINAL_INPUT` on windows. This prevents backspace from + // deleting the entire line + const original_mode: if (Environment.isWindows) ?bun.windows.DWORD else void = if (comptime Environment.isWindows) + bun.win32.unsetStdioModeFlags(0, bun.windows.ENABLE_VIRTUAL_TERMINAL_INPUT) catch null + else {}; + + defer if (comptime Environment.isWindows) { + if (original_mode) |mode| { + _ = bun.windows.SetConsoleMode(bun.win32.STDIN_FD.cast(), mode); + } + }; + + while ('\n' != Output.buffered_stdin.reader().readByte() catch return) {} + + var child = std.process.Child.init(&.{ Open.opener, auth_url }, bun.default_allocator); + _ = child.spawnAndWait() catch return; + } + + fn getOTP( + comptime directory_publish: bool, + ctx: *const Context(directory_publish), + registry: *const Npm.Registry.Scope, + response_buf: *MutableString, + print_buf: *std.ArrayListUnmanaged(u8), + ) GetOTPError![]const u8 { + const res_source = logger.Source.initPathString("???", response_buf.list.items); + + if (JSON.parseUTF8(&res_source, ctx.manager.log, ctx.allocator) catch |err| res_json: { + switch (err) { + error.OutOfMemory => |oom| return oom, + + // https://github.com/npm/cli/blob/63d6a732c3c0e9c19fd4d147eaa5cc27c29b168d/node_modules/npm-registry-fetch/lib/check-response.js#L65 + // invalid json is ignored + else => break :res_json null, + } + }) |json| try_web: { + const auth_url_str = try json.getStringClonedZ(ctx.allocator, "authUrl") orelse break :try_web; + + // important to clone because it belongs to `response_buf`, and `response_buf` will be + // reused with the following requests + const done_url_str = try json.getStringCloned(ctx.allocator, "doneUrl") orelse break :try_web; + const done_url = URL.parse(done_url_str); + + Output.prettyln("\nAuthenticate your account at (press ENTER to open in browser):\n", .{}); + + const offset = 0; + const padding = 1; + + const horizontal = if (Output.enable_ansi_colors) "─" else "-"; + const vertical = if (Output.enable_ansi_colors) "│" else "|"; + const top_left = if (Output.enable_ansi_colors) "┌" else "|"; + const top_right = if (Output.enable_ansi_colors) "┐" else "|"; + const bottom_left = if (Output.enable_ansi_colors) "└" else "|"; + const bottom_right = if (Output.enable_ansi_colors) "┘" else "|"; + + const width = (padding * 2) + auth_url_str.len; + + for (0..offset) |_| Output.print(" ", .{}); + Output.print("{s}", .{top_left}); + for (0..width) |_| Output.print("{s}", .{horizontal}); + Output.println("{s}", .{top_right}); + + for (0..offset) |_| Output.print(" ", .{}); + Output.print("{s}", .{vertical}); + for (0..padding) |_| Output.print(" ", .{}); + Output.pretty("{s}", .{auth_url_str}); + for (0..padding) |_| Output.print(" ", .{}); + Output.println("{s}", .{vertical}); + + for (0..offset) |_| Output.print(" ", .{}); + Output.print("{s}", .{bottom_left}); + for (0..width) |_| Output.print("{s}", .{horizontal}); + Output.println("{s}", .{bottom_right}); + Output.flush(); + + // on another thread because pressing enter is not required + (std.Thread.spawn(.{}, pressEnterToOpenInBrowser, .{auth_url_str}) catch |err| { + Output.err(err, "failed to spawn thread for opening auth url", .{}); + Global.crash(); + }).detach(); + + var auth_headers = try constructPublishHeaders( + ctx.allocator, + print_buf, + registry, + null, + null, + ctx.uses_workspaces, + ctx.manager.options.publish_config.auth_type, + ); + + while (true) { + response_buf.reset(); + + var req = http.AsyncHTTP.initSync( + ctx.allocator, + .GET, + done_url, + auth_headers.entries, + auth_headers.content.ptr.?[0..auth_headers.content.len], + response_buf, + "", + null, + null, + .follow, + ); + + const res = req.sendSync() catch |err| { + switch (err) { + error.OutOfMemory => |oom| return oom, + else => { + Output.err(err, "failed to send OTP request", .{}); + Global.crash(); + }, + } + }; + + switch (res.status_code) { + 202 => { + // retry + const nanoseconds = nanoseconds: { + default: for (res.headers) |header| { + if (strings.eqlCaseInsensitiveASCII(header.name, "retry-after", true)) { + const trimmed = strings.trim(header.value, &strings.whitespace_chars); + const seconds = bun.fmt.parseInt(u32, trimmed, 10) catch break :default; + break :nanoseconds seconds * std.time.ns_per_s; + } + } + + break :nanoseconds 500 * std.time.ns_per_ms; + }; + + std.time.sleep(nanoseconds); + continue; + }, + 200 => { + // login successful + const otp_done_source = logger.Source.initPathString("???", response_buf.list.items); + const otp_done_json = JSON.parseUTF8(&otp_done_source, ctx.manager.log, ctx.allocator) catch |err| { + switch (err) { + error.OutOfMemory => |oom| return oom, + else => { + Output.err("WebLogin", "failed to parse response json", .{}); + Global.crash(); + }, + } + }; + + return try otp_done_json.getStringCloned(ctx.allocator, "token") orelse { + Output.err("WebLogin", "missing `token` field in reponse json", .{}); + Global.crash(); + }; + }, + else => { + try handleResponseErrors(directory_publish, ctx, &req, &res, response_buf, false); + }, + } + } + } + + // classic + return prompt(ctx.allocator, "\nThis operation requires a one-time password.\nEnter OTP: ", "") catch |err| { + switch (err) { + error.OutOfMemory => |oom| return oom, + else => { + Output.err(err, "failed to read OTP input", .{}); + Global.crash(); + }, + } + }; + } + + fn constructPublishHeaders( + allocator: std.mem.Allocator, + print_buf: *std.ArrayListUnmanaged(u8), + registry: *const Npm.Registry.Scope, + maybe_json_len: ?usize, + maybe_otp: ?[]const u8, + uses_workspaces: bool, + auth_type: ?PackageManager.Options.AuthType, + ) OOM!http.HeaderBuilder { + var print_writer = print_buf.writer(allocator); + var headers: http.HeaderBuilder = .{}; + const npm_auth_type = if (maybe_otp == null) + if (auth_type) |auth| @tagName(auth) else "web" + else + "legacy"; + const ci_name = bun.detectCI(); + + { + headers.count("accept", "*/*"); + headers.count("accept-encoding", "gzip,deflate"); + + if (registry.token.len > 0) { + try print_writer.print("Bearer {s}", .{registry.token}); + headers.count("authorization", print_buf.items); + print_buf.clearRetainingCapacity(); + } else if (registry.auth.len > 0) { + try print_writer.print("Basic {s}", .{registry.auth}); + headers.count("authorization", print_buf.items); + print_buf.clearRetainingCapacity(); + } + + if (maybe_json_len != null) { + // not using `MimeType.json.value`, verdaccio will fail if it's anything other than `application/json` + headers.count("content-type", "application/json"); + } + + headers.count("npm-auth-type", npm_auth_type); + if (maybe_otp) |otp| { + headers.count("npm-otp", otp); + } + headers.count("npm-command", "publish"); + + try print_writer.print("{s} {s} {s} workspaces/{}{s}{s}", .{ + Global.user_agent, + Global.os_name, + Global.arch_name, + uses_workspaces, + if (ci_name != null) " ci/" else "", + ci_name orelse "", + }); + // headers.count("user-agent", "npm/10.8.3 node/v22.6.0 darwin arm64 workspaces/false"); + headers.count("user-agent", print_buf.items); + print_buf.clearRetainingCapacity(); + + headers.count("Connection", "keep-alive"); + headers.count("Host", registry.url.host); + + if (maybe_json_len) |json_len| { + try print_writer.print("{d}", .{json_len}); + headers.count("Content-Length", print_buf.items); + print_buf.clearRetainingCapacity(); + } + } + + try headers.allocate(allocator); + + { + headers.append("accept", "*/*"); + headers.append("accept-encoding", "gzip,deflate"); + + if (registry.token.len > 0) { + try print_writer.print("Bearer {s}", .{registry.token}); + headers.append("authorization", print_buf.items); + print_buf.clearRetainingCapacity(); + } else if (registry.auth.len > 0) { + try print_writer.print("Basic {s}", .{registry.auth}); + headers.append("authorization", print_buf.items); + print_buf.clearRetainingCapacity(); + } + + if (maybe_json_len != null) { + // not using `MimeType.json.value`, verdaccio will fail if it's anything other than `application/json` + headers.append("content-type", "application/json"); + } + + headers.append("npm-auth-type", npm_auth_type); + if (maybe_otp) |otp| { + headers.append("npm-otp", otp); + } + headers.append("npm-command", "publish"); + + try print_writer.print("{s} {s} {s} workspaces/{}{s}{s}", .{ + Global.user_agent, + Global.os_name, + Global.arch_name, + uses_workspaces, + if (ci_name != null) " ci/" else "", + ci_name orelse "", + }); + // headers.append("user-agent", "npm/10.8.3 node/v22.6.0 darwin arm64 workspaces/false"); + headers.append("user-agent", print_buf.items); + print_buf.clearRetainingCapacity(); + + headers.append("Connection", "keep-alive"); + headers.append("Host", registry.url.host); + + if (maybe_json_len) |json_len| { + try print_writer.print("{d}", .{json_len}); + headers.append("Content-Length", print_buf.items); + print_buf.clearRetainingCapacity(); + } + } + + return headers; + } + + fn constructPublishRequestBody( + comptime directory_publish: bool, + ctx: *const Context(directory_publish), + registry: *const Npm.Registry.Scope, + ) OOM![]const u8 { + const tag = if (ctx.manager.options.publish_config.tag.len > 0) + ctx.manager.options.publish_config.tag + else + "latest"; + + const encoded_tarball_len = std.base64.standard.Encoder.calcSize(ctx.tarball_bytes.len); + const version_without_build_tag = Dependency.withoutBuildTag(ctx.package_version); + + var buf = try std.ArrayListUnmanaged(u8).initCapacity( + ctx.allocator, + ctx.package_name.len * 5 + + version_without_build_tag.len * 4 + + ctx.abs_tarball_path.len + + encoded_tarball_len, + ); + var writer = buf.writer(ctx.allocator); + + try writer.print("{{\"_id\":\"{s}\",\"name\":\"{s}\"", .{ + ctx.package_name, + ctx.package_name, + }); + + try writer.print(",\"dist-tags\":{{\"{s}\":\"{s}\"}}", .{ + tag, + version_without_build_tag, + }); + + // "versions" + { + try writer.print(",\"versions\":{{\"{s}\":{{\"name\":\"{s}\",\"version\":\"{s}\"", .{ + version_without_build_tag, + ctx.package_name, + version_without_build_tag, + }); + + try writer.print(",\"_id\": \"{s}@{s}\"", .{ + ctx.package_name, + version_without_build_tag, + }); + + try writer.print(",\"_integrity\":\"{}\"", .{ + bun.fmt.integrity(ctx.integrity, .full), + }); + + try writer.print(",\"_nodeVersion\":\"{s}\",\"_npmVersion\":\"{s}\"", .{ + Environment.reported_nodejs_version, + // TODO: npm version + "10.8.3", + }); + + try writer.print(",\"dist\":{{\"integrity\":\"{}\",\"shasum\":\"{s}\"", .{ + bun.fmt.integrity(ctx.integrity, .full), + bun.fmt.bytesToHex(ctx.shasum, .lower), + }); + + // https://github.com/npm/cli/blob/63d6a732c3c0e9c19fd4d147eaa5cc27c29b168d/workspaces/libnpmpublish/lib/publish.js#L118 + // https:// -> http:// + try writer.print(",\"tarball\":\"http://{s}/{s}/-/{s}\"}}}}}}", .{ + strings.withoutTrailingSlash(registry.url.href), + ctx.package_name, + std.fs.path.basename(ctx.abs_tarball_path), + }); + } + + if (ctx.manager.options.publish_config.access) |access| { + try writer.print(",\"access\":\"{s}\"", .{@tagName(access)}); + } else { + try writer.writeAll(",\"access\":null"); + } + + // "_attachments" + { + try writer.print(",\"_attachments\":{{\"{s}\":{{\"content_type\":\"{s}\",\"data\":\"", .{ + std.fs.path.basename(ctx.abs_tarball_path), + "application/octet-stream", + }); + + try buf.ensureUnusedCapacity(ctx.allocator, encoded_tarball_len); + buf.items.len += encoded_tarball_len; + const count = bun.simdutf.base64.encode(ctx.tarball_bytes, buf.items[buf.items.len - encoded_tarball_len ..], false); + bun.assertWithLocation(count == encoded_tarball_len, @src()); + + try writer.print("\",\"length\":{d}}}}}}}", .{ + ctx.tarball_bytes.len, + }); + } + + return buf.items; + } +}; diff --git a/src/cli/upgrade_command.zig b/src/cli/upgrade_command.zig index c5fca9ef54..c75452a0fd 100644 --- a/src/cli/upgrade_command.zig +++ b/src/cli/upgrade_command.zig @@ -30,7 +30,7 @@ const bundler = bun.bundler; const fs = @import("../fs.zig"); const URL = @import("../url.zig").URL; const HTTP = bun.http; -const ParseJSON = @import("../json_parser.zig").ParseJSONUTF8; +const JSON = bun.JSON; const Archive = @import("../libarchive/libarchive.zig").Archive; const Zlib = @import("../zlib.zig"); const JSPrinter = bun.js_printer; @@ -251,7 +251,7 @@ pub const UpgradeCommand = struct { async_http.client.flags.reject_unauthorized = env_loader.getTLSRejectUnauthorized(); if (!silent) async_http.client.progress_node = progress.?; - const response = try async_http.sendSync(true); + const response = try async_http.sendSync(); switch (response.status_code) { 404 => return error.HTTP404, @@ -266,7 +266,7 @@ pub const UpgradeCommand = struct { defer if (comptime silent) log.deinit(); var source = logger.Source.initPathString("releases.json", metadata_body.list.items); initializeStore(); - var expr = ParseJSON(&source, &log, allocator) catch |err| { + var expr = JSON.parseUTF8(&source, &log, allocator) catch |err| { if (!silent) { progress.?.end(); refresher.?.refresh(); @@ -533,7 +533,7 @@ pub const UpgradeCommand = struct { async_http.client.progress_node = progress; async_http.client.flags.reject_unauthorized = env_loader.getTLSRejectUnauthorized(); - const response = try async_http.sendSync(true); + const response = try async_http.sendSync(); switch (response.status_code) { 404 => { diff --git a/src/compile_target.zig b/src/compile_target.zig index 67d0c0aab1..a6ec5f076c 100644 --- a/src/compile_target.zig +++ b/src/compile_target.zig @@ -170,7 +170,7 @@ pub fn downloadToPath(this: *const CompileTarget, env: *bun.DotEnv.Loader, alloc async_http.client.progress_node = progress; async_http.client.flags.reject_unauthorized = env.getTLSRejectUnauthorized(); - const response = try async_http.sendSync(true); + const response = try async_http.sendSync(); switch (response.status_code) { 404 => { @@ -254,13 +254,13 @@ pub fn downloadToPath(this: *const CompileTarget, env: *bun.DotEnv.Loader, alloc var node = refresher.start("Extracting", 0); defer node.end(); - const libarchive = @import("./libarchive//libarchive.zig"); + const libarchive = bun.libarchive; var tmpname_buf: [1024]u8 = undefined; const tempdir_name = bun.span(try bun.fs.FileSystem.instance.tmpname("tmp", &tmpname_buf, bun.fastRandom())); var tmpdir = try std.fs.cwd().makeOpenPath(tempdir_name, .{}); defer tmpdir.close(); defer std.fs.cwd().deleteTree(tempdir_name) catch {}; - _ = libarchive.Archive.extractToDir( + _ = libarchive.Archiver.extractToDir( tarball_bytes.items, tmpdir, null, diff --git a/src/defines.zig b/src/defines.zig index 504585e39e..39495728af 100644 --- a/src/defines.zig +++ b/src/defines.zig @@ -127,7 +127,7 @@ pub const DefineData = struct { .path = defines_path, .key_path = fs.Path.initWithNamespace("defines", "internal"), }; - const expr = try json_parser.ParseEnvJSON(&source, _log, allocator); + const expr = try json_parser.parseEnvJSON(&source, _log, allocator); const cloned = try expr.data.deepClone(allocator); user_defines.putAssumeCapacity(entry.key_ptr.*, DefineData{ .value = cloned, diff --git a/src/deps/picohttp.zig b/src/deps/picohttp.zig index 0f44ed7e72..08770fcde2 100644 --- a/src/deps/picohttp.zig +++ b/src/deps/picohttp.zig @@ -214,7 +214,7 @@ const StatusCodeFormatter = struct { pub const Response = struct { minor_version: usize = 0, - status_code: usize = 0, + status_code: u32 = 0, status: []const u8 = "", headers: []Header = &.{}, bytes_read: c_int = 0, @@ -295,7 +295,7 @@ pub const Response = struct { }, else => Response{ .minor_version = @as(usize, @intCast(minor_version)), - .status_code = @as(usize, @intCast(status_code)), + .status_code = @as(u32, @intCast(status_code)), .status = status, .headers = src[0..@min(num_headers, src.len)], .bytes_read = rc, diff --git a/src/exact_size_matcher.zig b/src/exact_size_matcher.zig index 1b780f54eb..f3a7d75c2c 100644 --- a/src/exact_size_matcher.zig +++ b/src/exact_size_matcher.zig @@ -5,7 +5,7 @@ pub fn ExactSizeMatcher(comptime max_bytes: usize) type { switch (max_bytes) { 1, 2, 4, 8, 12, 16 => {}, else => { - @compileError("max_bytes must be 1, 2, 4, 8, or 12."); + @compileError("max_bytes must be 1, 2, 4, 8, 12, or 16."); }, } diff --git a/src/fmt.zig b/src/fmt.zig index f2b9ec6f63..cf3094a518 100644 --- a/src/fmt.zig +++ b/src/fmt.zig @@ -7,6 +7,7 @@ const js_lexer = bun.js_lexer; const ComptimeStringMap = bun.ComptimeStringMap; const fmt = std.fmt; const Environment = bun.Environment; +const sha = bun.sha; pub usingnamespace std.fmt; @@ -106,6 +107,112 @@ pub fn Table( }; } +pub const RedactedNpmUrlFormatter = struct { + url: string, + + pub fn format(this: @This(), comptime _: string, _: std.fmt.FormatOptions, writer: anytype) !void { + var i: usize = 0; + while (i < this.url.len) { + if (strings.startsWithUUID(this.url[i..])) { + try writer.writeAll("***"); + i += 36; + continue; + } + + const npm_secret_len = strings.startsWithNpmSecret(this.url[i..]); + if (npm_secret_len > 0) { + try writer.writeAll("***"); + i += npm_secret_len; + continue; + } + + // TODO: redact password from `https://username:password@registry.com/` + + try writer.writeByte(this.url[i]); + i += 1; + } + } +}; + +pub fn redactedNpmUrl(str: string) RedactedNpmUrlFormatter { + return .{ + .url = str, + }; +} + +// https://github.com/npm/cli/blob/63d6a732c3c0e9c19fd4d147eaa5cc27c29b168d/node_modules/npm-package-arg/lib/npa.js#L163 +pub const DependencyUrlFormatter = struct { + url: string, + + pub fn format(this: @This(), comptime _: string, _: std.fmt.FormatOptions, writer: anytype) !void { + var remain = this.url; + while (strings.indexOfChar(remain, '/')) |slash| { + try writer.writeAll(remain[0..slash]); + try writer.writeAll("%2f"); + remain = remain[slash + 1 ..]; + } + try writer.writeAll(remain); + } +}; + +pub fn dependencyUrl(url: string) DependencyUrlFormatter { + return .{ + .url = url, + }; +} + +const IntegrityFormatStyle = enum { + short, + full, +}; + +pub fn IntegrityFormatter(comptime style: IntegrityFormatStyle) type { + return struct { + bytes: [sha.SHA512.digest]u8, + + pub fn format(this: @This(), comptime _: string, _: std.fmt.FormatOptions, writer: anytype) !void { + var buf: [std.base64.standard.Encoder.calcSize(sha.SHA512.digest)]u8 = undefined; + const count = bun.simdutf.base64.encode(this.bytes[0..sha.SHA512.digest], &buf, false); + + const encoded = buf[0..count]; + + if (comptime style == .short) + try writer.print("sha512-{s}[...]{s}", .{ encoded[0..13], encoded[encoded.len - 15 ..] }) + else + try writer.print("sha512-{s}", .{encoded}); + } + }; +} + +pub fn integrity(bytes: [sha.SHA512.digest]u8, comptime style: IntegrityFormatStyle) IntegrityFormatter(style) { + return .{ .bytes = bytes }; +} + +const JSONFormatter = struct { + input: []const u8, + + pub fn format(self: JSONFormatter, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) !void { + try bun.js_printer.writeJSONString(self.input, @TypeOf(writer), writer, .latin1); + } +}; + +const JSONFormatterUTF8 = struct { + input: []const u8, + + pub fn format(self: JSONFormatterUTF8, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) !void { + try bun.js_printer.writeJSONString(self.input, @TypeOf(writer), writer, .utf8); + } +}; + +/// Expects latin1 +pub fn formatJSONString(text: []const u8) JSONFormatter { + return .{ .input = text }; +} + +pub fn formatJSONStringUTF8(text: []const u8) JSONFormatterUTF8 { + return .{ .input = text }; +} + const SharedTempBuffer = [32 * 1024]u8; fn getSharedBuffer() []u8 { return std.mem.asBytes(shared_temp_buffer_ptr orelse brk: { diff --git a/src/http.zig b/src/http.zig index 1cfb72daee..f3f3783be7 100644 --- a/src/http.zig +++ b/src/http.zig @@ -2372,11 +2372,33 @@ pub const AsyncHTTP = struct { return this; } - pub fn initSync(allocator: std.mem.Allocator, method: Method, url: URL, headers: Headers.Entries, headers_buf: string, response_buffer: *MutableString, request_body: []const u8, http_proxy: ?URL, hostname: ?[]u8, redirect_type: FetchRedirect) AsyncHTTP { - return @This().init(allocator, method, url, headers, headers_buf, response_buffer, request_body, undefined, redirect_type, .{ - .http_proxy = http_proxy, - .hostname = hostname, - }); + pub fn initSync( + allocator: std.mem.Allocator, + method: Method, + url: URL, + headers: Headers.Entries, + headers_buf: string, + response_buffer: *MutableString, + request_body: []const u8, + http_proxy: ?URL, + hostname: ?[]u8, + redirect_type: FetchRedirect, + ) AsyncHTTP { + return @This().init( + allocator, + method, + url, + headers, + headers_buf, + response_buffer, + request_body, + undefined, + redirect_type, + .{ + .http_proxy = http_proxy, + .hostname = hostname, + }, + ); } fn reset(this: *AsyncHTTP) !void { @@ -2456,7 +2478,7 @@ pub const AsyncHTTP = struct { this.channel.writeItem(result) catch unreachable; } - pub fn sendSync(this: *AsyncHTTP, comptime _: bool) anyerror!picohttp.Response { + pub fn sendSync(this: *AsyncHTTP) anyerror!picohttp.Response { HTTPThread.init(); var ctx = try bun.default_allocator.create(SingleHTTPChannel); @@ -2469,14 +2491,13 @@ pub const AsyncHTTP = struct { var batch = bun.ThreadPool.Batch{}; this.schedule(bun.default_allocator, &batch); http_thread.schedule(batch); - while (true) { - const result: HTTPClientResult = ctx.channel.readItem() catch unreachable; - if (result.fail) |e| return e; - assert(result.metadata != null); - return result.metadata.?.response; - } - unreachable; + const result = ctx.channel.readItem() catch unreachable; + if (result.fail) |err| { + return err; + } + assert(result.metadata != null); + return result.metadata.?.response; } pub fn onAsyncHTTPCallback(this: *AsyncHTTP, async_http: *AsyncHTTP, result: HTTPClientResult) void { diff --git a/src/ini.zig b/src/ini.zig index 96229711f4..73a2c86cc6 100644 --- a/src/ini.zig +++ b/src/ini.zig @@ -242,7 +242,7 @@ pub const Parser = struct { var log = bun.logger.Log.init(arena_allocator); defer log.deinit(); // Try to parse it and it if fails will just treat it as a string - const json_val: Expr = bun.JSON.ParseJSONUTF8Impl(&src, &log, arena_allocator, true) catch { + const json_val: Expr = bun.JSON.parseUTF8Impl(&src, &log, arena_allocator, true) catch { break :out; }; @@ -882,28 +882,11 @@ pub fn loadNpmrcFromFile( ) void { var log = bun.logger.Log.init(allocator); defer log.deinit(); - const npmrc_file = switch (bun.sys.openat(bun.FD.cwd(), npmrc_path, bun.O.RDONLY, 0)) { - .result => |fd| fd, - .err => |err| { - if (auto_loaded) return; - Output.prettyErrorln("{}\nwhile opening .npmrc \"{s}\"", .{ - err, - npmrc_path, - }); - Global.exit(1); - }, - }; - defer _ = bun.sys.close(npmrc_file); - const source = switch (bun.sys.File.toSource(npmrc_path, allocator)) { - .result => |s| s, - .err => |e| { - Output.prettyErrorln("{}\nwhile reading .npmrc \"{s}\"", .{ - e, - npmrc_path, - }); - Global.exit(1); - }, + const source = bun.sys.File.toSource(npmrc_path, allocator).unwrap() catch |err| { + if (auto_loaded) return; + Output.err(err, "failed to read .npmrc: \"{s}\"", .{npmrc_path}); + Global.crash(); }; defer allocator.free(source.contents); diff --git a/src/install/dependency.zig b/src/install/dependency.zig index d21b8be319..6a1bd3a961 100644 --- a/src/install/dependency.zig +++ b/src/install/dependency.zig @@ -284,6 +284,25 @@ pub fn unscopedPackageName(name: []const u8) []const u8 { return name_[(strings.indexOfChar(name_, '/') orelse return name) + 1 ..]; } +pub fn isScopedPackageName(name: string) error{InvalidPackageName}!bool { + if (name.len == 0) return error.InvalidPackageName; + + if (name[0] != '@') return false; + + if (strings.indexOfChar(name, '/')) |slash| { + if (slash != 1 and slash != name.len - 1) { + return true; + } + } + + return error.InvalidPackageName; +} + +/// assumes version is valid +pub fn withoutBuildTag(version: string) string { + if (strings.indexOfChar(version, '+')) |plus| return version[0..plus] else return version; +} + pub const Version = struct { tag: Tag = .uninitialized, literal: String = .{}, diff --git a/src/install/extract_tarball.zig b/src/install/extract_tarball.zig index da159b0d56..d3bf173b93 100644 --- a/src/install/extract_tarball.zig +++ b/src/install/extract_tarball.zig @@ -200,7 +200,7 @@ fn extract(this: *const ExtractTarball, tgz_bytes: []const u8) !Install.ExtractD defer extract_destination.close(); - const Archive = @import("../libarchive/libarchive.zig").Archive; + const Archiver = bun.libarchive.Archiver; const Zlib = @import("../zlib.zig"); var zlib_pool = Npm.Registry.BodyPool.get(default_allocator); zlib_pool.data.reset(); @@ -278,7 +278,7 @@ fn extract(this: *const ExtractTarball, tgz_bytes: []const u8) !Install.ExtractD var dirname_reader = DirnameReader{ .outdirname = &resolved }; switch (PackageManager.verbose_install) { - inline else => |log| _ = try Archive.extractToDir( + inline else => |log| _ = try Archiver.extractToDir( zlib_pool.data.list.items, extract_destination, null, @@ -304,7 +304,7 @@ fn extract(this: *const ExtractTarball, tgz_bytes: []const u8) !Install.ExtractD } }, else => switch (PackageManager.verbose_install) { - inline else => |log| _ = try Archive.extractToDir( + inline else => |log| _ = try Archiver.extractToDir( zlib_pool.data.list.items, extract_destination, null, diff --git a/src/install/install.zig b/src/install/install.zig index dd27ed84b0..17b8edceee 100644 --- a/src/install/install.zig +++ b/src/install/install.zig @@ -18,7 +18,7 @@ const JSLexer = bun.js_lexer; const logger = bun.logger; const js_parser = bun.js_parser; -const json_parser = bun.JSON; +const JSON = bun.JSON; const JSPrinter = bun.js_printer; const linker = @import("../linker.zig"); @@ -1236,7 +1236,7 @@ pub fn NewPackageInstall(comptime kind: PkgInstallKind) type { initializeStore(); - var package_json_checker = json_parser.PackageJSONVersionChecker.init(allocator, &source, &log) catch return false; + var package_json_checker = JSON.PackageJSONVersionChecker.init(allocator, &source, &log) catch return false; _ = package_json_checker.parseExpr() catch return false; if (log.errors > 0 or !package_json_checker.has_found_name) return false; // workspaces aren't required to have a version @@ -2828,7 +2828,7 @@ pub const PackageManager = struct { if (comptime opts.init_reset_store) initializeStore(); - const json = json_parser.ParsePackageJSONUTF8WithOpts( + const json = JSON.parsePackageJSONUTF8WithOpts( &source, log, allocator, @@ -2882,7 +2882,7 @@ pub const PackageManager = struct { if (comptime opts.init_reset_store) initializeStore(); - const json_result = json_parser.ParsePackageJSONUTF8WithOpts( + const json_result = JSON.parsePackageJSONUTF8WithOpts( &source, log, allocator, @@ -6087,7 +6087,7 @@ pub const PackageManager = struct { json.buf, ); initializeStore(); - const json_root = json_parser.ParsePackageJSONUTF8( + const json_root = JSON.parsePackageJSONUTF8( &package_json_source, manager.log, manager.allocator, @@ -6941,6 +6941,37 @@ pub const PackageManager = struct { max_concurrent_lifecycle_scripts: usize, + publish_config: PublishConfig = .{}, + + pub const PublishConfig = struct { + access: ?Access = null, + tag: string = "", + otp: string = "", + auth_type: ?AuthType = null, + }; + + pub const Access = enum { + public, + restricted, + + const map = bun.ComptimeEnumMap(Access); + + pub fn fromStr(str: string) ?Access { + return map.get(str); + } + }; + + pub const AuthType = enum { + legacy, + web, + + const map = bun.ComptimeEnumMap(AuthType); + + pub fn fromStr(str: string) ?AuthType { + return map.get(str); + } + }; + pub fn shouldPrintCommandName(this: *const Options) bool { return this.log_level != .silent and this.do.summary; } @@ -7046,7 +7077,7 @@ pub const PackageManager = struct { allocator: std.mem.Allocator, log: *logger.Log, env: *DotEnv.Loader, - cli_: ?CommandLineArguments, + maybe_cli: ?CommandLineArguments, bun_install_: ?*Api.BunInstall, subcommand: Subcommand, ) !void { @@ -7200,20 +7231,6 @@ pub const PackageManager = struct { } } - if (cli_) |cli| { - if (cli.registry.len > 0) { - this.scope.url = URL.parse(cli.registry); - } - - if (cli.exact) { - this.enable.exact_versions = true; - } - - if (cli.token.len > 0) { - this.scope.token = cli.token; - } - } - if (env.get("BUN_CONFIG_YARN_LOCKFILE") != null) { this.do.save_yarn_lock = true; } @@ -7246,7 +7263,19 @@ pub const PackageManager = struct { this.enable.manifest_cache_control = false; } - if (cli_) |cli| { + if (maybe_cli) |cli| { + if (cli.registry.len > 0) { + this.scope.url = URL.parse(cli.registry); + } + + if (cli.exact) { + this.enable.exact_versions = true; + } + + if (cli.token.len > 0) { + this.scope.token = cli.token; + } + if (cli.no_save) { this.do.save_lockfile = false; this.do.write_package_json = false; @@ -7350,6 +7379,19 @@ pub const PackageManager = struct { }; }, } + + if (cli.publish_config.access) |cli_access| { + this.publish_config.access = cli_access; + } + if (cli.publish_config.tag.len > 0) { + this.publish_config.tag = cli.publish_config.tag; + } + if (cli.publish_config.otp.len > 0) { + this.publish_config.otp = cli.publish_config.otp; + } + if (cli.publish_config.auth_type) |auth_type| { + this.publish_config.auth_type = auth_type; + } } else { this.log_level = if (default_disable_progress_bar) LogLevel.default_no_progress else LogLevel.default; PackageManager.verbose_install = false; @@ -7664,7 +7706,7 @@ pub const PackageManager = struct { const value = dep.value orelse continue; if (value.data != .e_string) continue; - const version_literal = value.asStringCloned(allocator) orelse bun.outOfMemory(); + const version_literal = try value.asStringCloned(allocator) orelse bun.outOfMemory(); var tag = Dependency.Version.Tag.infer(version_literal); // only updating dependencies with npm versions, and dist-tags if `--latest`. @@ -7681,7 +7723,7 @@ pub const PackageManager = struct { } } - const key_str = key.asStringCloned(allocator) orelse unreachable; + const key_str = try key.asStringCloned(allocator) orelse unreachable; const entry = manager.updating_packages.getOrPut(allocator, key_str) catch bun.outOfMemory(); // If a dependency is present in more than one dependency group, only one of it's versions @@ -7865,7 +7907,7 @@ pub const PackageManager = struct { replacing += 1; } else { if (manager.subcommand == .update and options.before_install) add_packages_to_update: { - const version_literal = value.expr.asStringCloned(allocator) orelse break :add_packages_to_update; + const version_literal = try value.expr.asStringCloned(allocator) orelse break :add_packages_to_update; var tag = Dependency.Version.Tag.infer(version_literal); if (tag != .npm and tag != .dist_tag) break :add_packages_to_update; @@ -8250,6 +8292,18 @@ pub const PackageManager = struct { @"patch-commit", outdated, pack, + publish, + + // bin, + // hash, + // @"hash-print", + // @"hash-string", + // cache, + // @"default-trusted", + // untrusted, + // trust, + // ls, + // migrate, pub fn canGloballyInstallPackages(this: Subcommand) bool { return switch (this) { @@ -8265,6 +8319,14 @@ pub const PackageManager = struct { else => false, }; } + + // TODO: make all subcommands find root and chdir + pub fn shouldChdirToRoot(this: Subcommand) bool { + return switch (this) { + .link => false, + else => true, + }; + } }; pub fn init( @@ -8389,7 +8451,7 @@ pub const PackageManager = struct { // Check if this is a workspace; if so, use root package var found = false; - if (subcommand != .link) { + if (subcommand.shouldChdirToRoot()) { if (!created_package_json) { while (std.fs.path.dirname(this_cwd)) |parent| : (this_cwd = parent) { const parent_without_trailing_slash = strings.withoutTrailingSlash(parent); @@ -8412,7 +8474,7 @@ pub const PackageManager = struct { const json_path = try bun.getFdPath(json_file.handle, &package_json_cwd_buf); const json_source = logger.Source.initPathString(json_path, json_buf[0..json_len]); initializeStore(); - const json = try json_parser.ParsePackageJSONUTF8(&json_source, ctx.log, ctx.allocator); + const json = try JSON.parsePackageJSONUTF8(&json_source, ctx.log, ctx.allocator); if (json.asProperty("workspaces")) |prop| { const json_array = switch (prop.expr.data) { .e_array => |arr| arr, @@ -9139,7 +9201,7 @@ pub const PackageManager = struct { else "Possible values: \"hardlink\" (default), \"symlink\", \"copyfile\""; - const install_params_ = [_]ParamType{ + const shared_params = [_]ParamType{ clap.parseParam("-c, --config ? Specify path to config file (bunfig.toml)") catch unreachable, clap.parseParam("-y, --yarn Write a yarn.lock file (yarn v1)") catch unreachable, clap.parseParam("-p, --production Don't install devDependencies") catch unreachable, @@ -9165,7 +9227,7 @@ pub const PackageManager = struct { clap.parseParam("-h, --help Print this help menu") catch unreachable, }; - pub const install_params: []const ParamType = &(install_params_ ++ [_]ParamType{ + pub const install_params: []const ParamType = &(shared_params ++ [_]ParamType{ clap.parseParam("-d, --dev Add dependency to \"devDependencies\"") catch unreachable, clap.parseParam("-D, --development") catch unreachable, clap.parseParam("--optional Add dependency to \"optionalDependencies\"") catch unreachable, @@ -9173,12 +9235,12 @@ pub const PackageManager = struct { clap.parseParam(" ... ") catch unreachable, }); - pub const update_params: []const ParamType = &(install_params_ ++ [_]ParamType{ + pub const update_params: []const ParamType = &(shared_params ++ [_]ParamType{ clap.parseParam("--latest Update packages to their latest versions") catch unreachable, clap.parseParam(" ... \"name\" of packages to update") catch unreachable, }); - pub const pm_params: []const ParamType = &(install_params_ ++ [_]ParamType{ + pub const pm_params: []const ParamType = &(shared_params ++ [_]ParamType{ clap.parseParam("-a, --all") catch unreachable, // clap.parseParam("--filter ... Pack each matching workspace") catch unreachable, clap.parseParam("--destination The directory the tarball will be saved in") catch unreachable, @@ -9186,7 +9248,7 @@ pub const PackageManager = struct { clap.parseParam(" ... ") catch unreachable, }); - pub const add_params: []const ParamType = &(install_params_ ++ [_]ParamType{ + pub const add_params: []const ParamType = &(shared_params ++ [_]ParamType{ clap.parseParam("-d, --dev Add dependency to \"devDependencies\"") catch unreachable, clap.parseParam("-D, --development") catch unreachable, clap.parseParam("--optional Add dependency to \"optionalDependencies\"") catch unreachable, @@ -9194,42 +9256,51 @@ pub const PackageManager = struct { clap.parseParam(" ... \"name\" or \"name@version\" of package(s) to install") catch unreachable, }); - pub const remove_params: []const ParamType = &(install_params_ ++ [_]ParamType{ + pub const remove_params: []const ParamType = &(shared_params ++ [_]ParamType{ clap.parseParam(" ... \"name\" of package(s) to remove from package.json") catch unreachable, }); - pub const link_params: []const ParamType = &(install_params_ ++ [_]ParamType{ + pub const link_params: []const ParamType = &(shared_params ++ [_]ParamType{ clap.parseParam(" ... \"name\" install package as a link") catch unreachable, }); - pub const unlink_params: []const ParamType = &(install_params_ ++ [_]ParamType{ + pub const unlink_params: []const ParamType = &(shared_params ++ [_]ParamType{ clap.parseParam(" ... \"name\" uninstall package as a link") catch unreachable, }); - const patch_params: []const ParamType = &(install_params_ ++ [_]ParamType{ + const patch_params: []const ParamType = &(shared_params ++ [_]ParamType{ clap.parseParam(" ... \"name\" of the package to patch") catch unreachable, clap.parseParam("--commit Install a package containing modifications in `dir`") catch unreachable, clap.parseParam("--patches-dir The directory to put the patch file in (only if --commit is used)") catch unreachable, }); - const patch_commit_params: []const ParamType = &(install_params_ ++ [_]ParamType{ + const patch_commit_params: []const ParamType = &(shared_params ++ [_]ParamType{ clap.parseParam(" ... \"dir\" containing changes to a package") catch unreachable, clap.parseParam("--patches-dir The directory to put the patch file") catch unreachable, }); - const outdated_params: []const ParamType = &(install_params_ ++ [_]ParamType{ + const outdated_params: []const ParamType = &(shared_params ++ [_]ParamType{ // clap.parseParam("--json Output outdated information in JSON format") catch unreachable, clap.parseParam("--filter ... Display outdated dependencies for each matching workspace") catch unreachable, clap.parseParam(" ... Package patterns to filter by") catch unreachable, }); - const pack_params: []const ParamType = &(install_params_ ++ [_]ParamType{ + const pack_params: []const ParamType = &(shared_params ++ [_]ParamType{ // clap.parseParam("--filter ... Pack each matching workspace") catch unreachable, clap.parseParam("--destination The directory the tarball will be saved in") catch unreachable, clap.parseParam("--gzip-level Specify a custom compression level for gzip. Default is 9.") catch unreachable, clap.parseParam(" ... ") catch unreachable, }); + const publish_params: []const ParamType = &(shared_params ++ [_]ParamType{ + clap.parseParam(" ... Package tarball to publish") catch unreachable, + clap.parseParam("--access Set access level for scoped packages") catch unreachable, + clap.parseParam("--tag Tag the release. Default is \"latest\"") catch unreachable, + clap.parseParam("--otp Provide a one-time password for authentication") catch unreachable, + clap.parseParam("--auth-type Specify the type of one-time password authentication (default is 'web')") catch unreachable, + clap.parseParam("--gzip-level Specify a custom compression level for gzip. Default is 9.") catch unreachable, + }); + pub const CommandLineArguments = struct { cache_dir: string = "", lockfile: string = "", @@ -9276,6 +9347,8 @@ pub const PackageManager = struct { registry: string = "", + publish_config: Options.PublishConfig = .{}, + const PatchOpts = union(enum) { nothing: struct {}, patch: struct {}, @@ -9537,6 +9610,31 @@ pub const PackageManager = struct { Output.pretty("\n\n" ++ outro_text ++ "\n", .{}); Output.flush(); }, + .publish => { + const intro_text = + \\Usage: bun publish [flags] + ; + + const outro_text = + \\Examples: + \\ Publish the package in the current working directory with public access. + \\ bun publish --access public + \\ + \\ Publish a pre-existing package tarball. + \\ bun publish ./path/to/tarball.tgz + \\ + \\ Publish with tag 'next'. + \\ bun publish --tag next + \\ + ; + + Output.pretty("\n" ++ intro_text ++ "\n", .{}); + Output.flush(); + Output.pretty("\nFlags:", .{}); + clap.simpleHelp(PackageManager.publish_params); + Output.pretty("\n\n" ++ outro_text ++ "\n", .{}); + Output.flush(); + }, } } @@ -9555,6 +9653,7 @@ pub const PackageManager = struct { .@"patch-commit" => patch_commit_params, .outdated => outdated_params, .pack => pack_params, + .publish => publish_params, }; var diag = clap.Diagnostic{}; @@ -9601,9 +9700,11 @@ pub const PackageManager = struct { // cli.json_output = args.flag("--json"); } - if (comptime subcommand == .pack or subcommand == .pm) { - if (args.option("--destination")) |dest| { - cli.pack_destination = dest; + if (comptime subcommand == .pack or subcommand == .pm or subcommand == .publish) { + if (comptime subcommand != .publish) { + if (args.option("--destination")) |dest| { + cli.pack_destination = dest; + } } if (args.option("--gzip-level")) |level| { @@ -9611,6 +9712,30 @@ pub const PackageManager = struct { } } + if (comptime subcommand == .publish) { + if (args.option("--tag")) |tag| { + cli.publish_config.tag = tag; + } + + if (args.option("--access")) |access| { + cli.publish_config.access = Options.Access.fromStr(access) orelse { + Output.errGeneric("invalid `access` value: '{s}'", .{access}); + Global.crash(); + }; + } + + if (args.option("--otp")) |otp| { + cli.publish_config.otp = otp; + } + + if (args.option("--auth-type")) |auth_type| { + cli.publish_config.auth_type = Options.AuthType.fromStr(auth_type) orelse { + Output.errGeneric("invalid `auth-type` value: '{s}'", .{auth_type}); + Global.crash(); + }; + } + } + // link and unlink default to not saving, all others default to // saving. if (comptime subcommand == .link or subcommand == .unlink) { @@ -10463,7 +10588,7 @@ pub const PackageManager = struct { // Now, we _re_ parse our in-memory edited package.json // so we can commit the version we changed from the lockfile - var new_package_json = json_parser.ParsePackageJSONUTF8(&source, manager.log, manager.allocator) catch |err| { + var new_package_json = JSON.parsePackageJSONUTF8(&source, manager.log, manager.allocator) catch |err| { Output.prettyErrorln("package.json failed to parse due to error {s}", .{@errorName(err)}); Global.crash(); }; @@ -10829,7 +10954,7 @@ pub const PackageManager = struct { defer manager.allocator.free(package_json_source.contents); initializeStore(); - const json = json_parser.ParsePackageJSONUTF8AlwaysDecode(&package_json_source, manager.log, manager.allocator) catch |err| { + const json = JSON.parsePackageJSONUTF8AlwaysDecode(&package_json_source, manager.log, manager.allocator) catch |err| { switch (Output.enable_ansi_colors) { inline else => |enable_ansi_colors| { manager.log.printForLogLevelWithEnableAnsiColors(Output.errorWriter(), enable_ansi_colors) catch {}; @@ -11246,7 +11371,7 @@ pub const PackageManager = struct { defer manager.allocator.free(package_json_source.contents); initializeStore(); - const json = json_parser.ParsePackageJSONUTF8AlwaysDecode(&package_json_source, manager.log, manager.allocator) catch |err| { + const json = JSON.parsePackageJSONUTF8AlwaysDecode(&package_json_source, manager.log, manager.allocator) catch |err| { switch (Output.enable_ansi_colors) { inline else => |enable_ansi_colors| { manager.log.printForLogLevelWithEnableAnsiColors(Output.errorWriter(), enable_ansi_colors) catch {}; diff --git a/src/install/lockfile.zig b/src/install/lockfile.zig index 3e89da50b7..fa7e39a01c 100644 --- a/src/install/lockfile.zig +++ b/src/install/lockfile.zig @@ -18,7 +18,7 @@ const logger = bun.logger; const js_parser = bun.js_parser; const Expr = @import("../js_ast.zig").Expr; -const json_parser = bun.JSON; +const JSON = bun.JSON; const JSPrinter = bun.js_printer; const linker = @import("../linker.zig"); @@ -3098,7 +3098,7 @@ pub const Package = extern struct { }; initializeStore(); - break :brk try json_parser.ParsePackageJSONUTF8( + break :brk try JSON.parsePackageJSONUTF8( &json_src, log, allocator, @@ -3945,7 +3945,7 @@ pub const Package = extern struct { comptime features: Features, ) !void { initializeStore(); - const json = json_parser.ParsePackageJSONUTF8AlwaysDecode(&source, log, allocator) catch |err| { + const json = JSON.parsePackageJSONUTF8AlwaysDecode(&source, log, allocator) catch |err| { switch (Output.enable_ansi_colors) { inline else => |enable_ansi_colors| { log.printForLogLevelWithEnableAnsiColors(Output.errorWriter(), enable_ansi_colors) catch {}; @@ -4338,7 +4338,7 @@ pub const Package = extern struct { }).unwrap(); const name_expr = workspace_json.root.get("name") orelse return error.MissingPackageName; - const name = name_expr.asStringCloned(allocator) orelse return error.MissingPackageName; + const name = try name_expr.asStringCloned(allocator) orelse return error.MissingPackageName; var entry = WorkspaceEntry{ .name = name, @@ -4346,7 +4346,7 @@ pub const Package = extern struct { }; debug("processWorkspaceName({s}) = {s}", .{ abs_package_json_path, entry.name }); if (workspace_json.root.get("version")) |version_expr| { - if (version_expr.asStringCloned(allocator)) |version| { + if (try version_expr.asStringCloned(allocator)) |version| { entry.version = version; } } @@ -4376,7 +4376,7 @@ pub const Package = extern struct { for (arr.slice()) |item| { // TODO: when does this get deallocated? - const input_path = item.asStringZ(allocator) orelse { + const input_path = try item.asStringZ(allocator) orelse { log.addErrorFmt(source, item.loc, allocator, \\Workspaces expects an array of strings, like: \\ "workspaces": [ @@ -5050,7 +5050,7 @@ pub const Package = extern struct { const value = prop.value.?; if (key.isString() and value.isString()) { var sfb = std.heap.stackFallback(1024, allocator); - const keyhash = key.asStringHash(sfb.get(), String.Builder.stringHash) orelse unreachable; + const keyhash = try key.asStringHash(sfb.get(), String.Builder.stringHash) orelse unreachable; const patch_path = string_builder.append(String, value.asString(allocator).?); lockfile.patched_dependencies.put(allocator, keyhash, .{ .path = patch_path }) catch unreachable; } diff --git a/src/install/migration.zig b/src/install/migration.zig index aa8043126e..6c44a9e59a 100644 --- a/src/install/migration.zig +++ b/src/install/migration.zig @@ -137,7 +137,7 @@ pub fn migrateNPMLockfile( Install.initializeStore(); const json_src = logger.Source.initPathString(abs_path, data); - const json = bun.JSON.ParseJSONUTF8(&json_src, log, allocator) catch return error.InvalidNPMLockfile; + const json = bun.JSON.parseUTF8(&json_src, log, allocator) catch return error.InvalidNPMLockfile; if (json.data != .e_object) { return error.InvalidNPMLockfile; diff --git a/src/install/npm.zig b/src/install/npm.zig index 77b7f55aa2..8352923ad6 100644 --- a/src/install/npm.zig +++ b/src/install/npm.zig @@ -1280,7 +1280,7 @@ pub const PackageManifest = struct { defer bun.JSAst.Stmt.Data.Store.memory_allocator.?.pop(); var arena = bun.ArenaAllocator.init(allocator); defer arena.deinit(); - const json = json_parser.ParseJSONUTF8( + const json = json_parser.parseUTF8( &source, log, arena.allocator(), diff --git a/src/js_ast.zig b/src/js_ast.zig index ffc73d4398..9a3651f87c 100644 --- a/src/js_ast.zig +++ b/src/js_ast.zig @@ -28,6 +28,7 @@ const js_lexer = @import("./js_lexer.zig"); const TypeScript = @import("./js_parser.zig").TypeScript; const ThreadlocalArena = @import("./mimalloc_arena.zig").Arena; const MimeType = bun.http.MimeType; +const OOM = bun.OOM; /// This is the index to the automatically-generated part containing code that /// calls "__export(exports, { ... getters ... })". This is used to generate @@ -1908,7 +1909,6 @@ pub const E = struct { pub const Rope = struct { head: Expr, next: ?*Rope = null, - const OOM = error{OutOfMemory}; pub fn append(this: *Rope, expr: Expr, allocator: std.mem.Allocator) OOM!*Rope { if (this.next) |next| { return try next.append(expr, allocator); @@ -2497,7 +2497,7 @@ pub const E = struct { strings.eqlComptimeUTF16(s.slice16()[0..value.len], value); } - pub fn string(s: *const String, allocator: std.mem.Allocator) !bun.string { + pub fn string(s: *const String, allocator: std.mem.Allocator) OOM!bun.string { if (s.isUTF8()) { return s.data; } else { @@ -2505,7 +2505,7 @@ pub const E = struct { } } - pub fn stringZ(s: *const String, allocator: std.mem.Allocator) !bun.stringZ { + pub fn stringZ(s: *const String, allocator: std.mem.Allocator) OOM!bun.stringZ { if (s.isUTF8()) { return allocator.dupeZ(u8, s.data); } else { @@ -2513,9 +2513,9 @@ pub const E = struct { } } - pub fn stringCloned(s: *const String, allocator: std.mem.Allocator) !bun.string { + pub fn stringCloned(s: *const String, allocator: std.mem.Allocator) OOM!bun.string { if (s.isUTF8()) { - return try allocator.dupe(u8, s.data); + return allocator.dupe(u8, s.data); } else { return strings.toUTF8Alloc(allocator, s.slice16()); } @@ -3349,7 +3349,7 @@ pub const Expr = struct { if (mime_type.category == .json) { var source = logger.Source.initPathString("fetch.json", bytes); - var out_expr = JSONParser.ParseJSONForMacro(&source, log, allocator) catch { + var out_expr = JSONParser.parseForMacro(&source, log, allocator) catch { return error.MacroFailed; }; out_expr.loc = loc; @@ -3424,10 +3424,54 @@ pub const Expr = struct { return this.data.toJS(allocator, globalObject, opts); } + pub inline fn isArray(this: *const Expr) bool { + return this.data == .e_array; + } + + pub inline fn isObject(this: *const Expr) bool { + return this.data == .e_object; + } + pub fn get(expr: *const Expr, name: string) ?Expr { return if (asProperty(expr, name)) |query| query.expr else null; } + pub fn getString(expr: *const Expr, allocator: std.mem.Allocator, name: string) OOM!?struct { string, logger.Loc } { + if (asProperty(expr, name)) |q| { + if (q.expr.asString(allocator)) |str| { + return .{ + str, + q.expr.loc, + }; + } + } + return null; + } + + pub fn getNumber(expr: *const Expr, name: string) ?struct { f64, logger.Loc } { + if (asProperty(expr, name)) |q| { + if (q.expr.asNumber()) |num| { + return .{ + num, + q.expr.loc, + }; + } + } + return null; + } + + pub fn getStringCloned(expr: *const Expr, allocator: std.mem.Allocator, name: string) OOM!?string { + return if (asProperty(expr, name)) |q| q.expr.asStringCloned(allocator) else null; + } + + pub fn getStringClonedZ(expr: *const Expr, allocator: std.mem.Allocator, name: string) OOM!?stringZ { + return if (asProperty(expr, name)) |q| q.expr.asStringZ(allocator) else null; + } + + pub fn getArray(expr: *const Expr, name: string) ?ArrayIterator { + return if (asProperty(expr, name)) |q| q.expr.asArray() else null; + } + pub fn getRope(self: *const Expr, rope: *const E.Object.Rope) ?E.Object.RopeQuery { if (self.get(rope.head.data.e_string.data)) |existing| { switch (existing.data) { @@ -3522,11 +3566,11 @@ pub const Expr = struct { else => return null, } } - pub inline fn asStringHash(expr: *const Expr, allocator: std.mem.Allocator, comptime hash_fn: *const fn (buf: []const u8) callconv(.Inline) u64) ?u64 { + pub inline fn asStringHash(expr: *const Expr, allocator: std.mem.Allocator, comptime hash_fn: *const fn (buf: []const u8) callconv(.Inline) u64) OOM!?u64 { switch (expr.data) { .e_string => |str| { if (str.isUTF8()) return hash_fn(str.data); - const utf8_str = str.string(allocator) catch return null; + const utf8_str = try str.string(allocator); defer allocator.free(utf8_str); return hash_fn(utf8_str); }, @@ -3535,18 +3579,18 @@ pub const Expr = struct { } } - pub inline fn asStringCloned(expr: *const Expr, allocator: std.mem.Allocator) ?string { + pub inline fn asStringCloned(expr: *const Expr, allocator: std.mem.Allocator) OOM!?string { switch (expr.data) { - .e_string => |str| return str.stringCloned(allocator) catch bun.outOfMemory(), - .e_utf8_string => |str| return allocator.dupe(u8, str.data) catch bun.outOfMemory(), + .e_string => |str| return try str.stringCloned(allocator), + .e_utf8_string => |str| return try allocator.dupe(u8, str.data), else => return null, } } - pub inline fn asStringZ(expr: *const Expr, allocator: std.mem.Allocator) ?stringZ { + pub inline fn asStringZ(expr: *const Expr, allocator: std.mem.Allocator) OOM!?stringZ { switch (expr.data) { - .e_string => |str| return str.stringZ(allocator) catch bun.outOfMemory(), - .e_utf8_string => |str| return allocator.dupeZ(u8, str.data) catch bun.outOfMemory(), + .e_string => |str| return try str.stringZ(allocator), + .e_utf8_string => |str| return try allocator.dupeZ(u8, str.data), else => return null, } } @@ -3559,6 +3603,12 @@ pub const Expr = struct { return expr.data.e_boolean.value; } + pub fn asNumber(expr: *const Expr) ?f64 { + if (expr.data != .e_number) return null; + + return expr.data.e_number.value; + } + pub const EFlags = enum { none, ts_decorator }; const Serializable = struct { diff --git a/src/js_printer.zig b/src/js_printer.zig index 026e8b30e9..5bc363937f 100644 --- a/src/js_printer.zig +++ b/src/js_printer.zig @@ -336,31 +336,6 @@ pub fn quoteForJSONBuffer(text: []const u8, bytes: *MutableString, comptime asci bytes.appendChar('"') catch unreachable; } -const JSONFormatter = struct { - input: []const u8, - - pub fn format(self: JSONFormatter, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) !void { - try writeJSONString(self.input, @TypeOf(writer), writer, .latin1); - } -}; - -const JSONFormatterUTF8 = struct { - input: []const u8, - - pub fn format(self: JSONFormatterUTF8, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) !void { - try writeJSONString(self.input, @TypeOf(writer), writer, .utf8); - } -}; - -/// Expects latin1 -pub fn formatJSONString(text: []const u8) JSONFormatter { - return .{ .input = text }; -} - -pub fn formatJSONStringUTF8(text: []const u8) JSONFormatterUTF8 { - return .{ .input = text }; -} - pub fn writeJSONString(input: []const u8, comptime Writer: type, writer: Writer, comptime encoding: strings.Encoding) !void { try writer.writeAll("\""); var text = input; diff --git a/src/json_parser.zig b/src/json_parser.zig index 56a5e6ac88..4998d86f0e 100644 --- a/src/json_parser.zig +++ b/src/json_parser.zig @@ -710,7 +710,7 @@ var empty_array_data = Expr.Data{ .e_array = &empty_array }; /// Parse JSON /// This leaves UTF-16 strings as UTF-16 strings /// The JavaScript Printer will handle escaping strings if necessary -pub fn ParseJSON( +pub fn parse( source: *const logger.Source, log: *logger.Log, allocator: std.mem.Allocator, @@ -743,7 +743,7 @@ pub fn ParseJSON( /// This eagerly transcodes UTF-16 strings into UTF-8 strings /// Use this when the text may need to be reprinted to disk as JSON (and not as JavaScript) /// Eagerly converting UTF-8 to UTF-16 can cause a performance issue -pub fn ParsePackageJSONUTF8( +pub fn parsePackageJSONUTF8( source: *const logger.Source, log: *logger.Log, allocator: std.mem.Allocator, @@ -779,7 +779,7 @@ pub fn ParsePackageJSONUTF8( return try parser.parseExpr(false, true); } -pub fn ParsePackageJSONUTF8AlwaysDecode( +pub fn parsePackageJSONUTF8AlwaysDecode( source: *const logger.Source, log: *logger.Log, allocator: std.mem.Allocator, @@ -820,7 +820,7 @@ const JsonResult = struct { indentation: Indentation = .{}, }; -pub fn ParsePackageJSONUTF8WithOpts( +pub fn parsePackageJSONUTF8WithOpts( source: *const logger.Source, log: *logger.Log, allocator: std.mem.Allocator, @@ -864,15 +864,15 @@ pub fn ParsePackageJSONUTF8WithOpts( /// This eagerly transcodes UTF-16 strings into UTF-8 strings /// Use this when the text may need to be reprinted to disk as JSON (and not as JavaScript) /// Eagerly converting UTF-8 to UTF-16 can cause a performance issue -pub fn ParseJSONUTF8( +pub fn parseUTF8( source: *const logger.Source, log: *logger.Log, allocator: std.mem.Allocator, ) !Expr { - return try ParseJSONUTF8Impl(source, log, allocator, false); + return try parseUTF8Impl(source, log, allocator, false); } -pub fn ParseJSONUTF8Impl( +pub fn parseUTF8Impl( source: *const logger.Source, log: *logger.Log, allocator: std.mem.Allocator, @@ -909,7 +909,7 @@ pub fn ParseJSONUTF8Impl( } return result; } -pub fn ParseJSONForMacro(source: *const logger.Source, log: *logger.Log, allocator: std.mem.Allocator) !Expr { +pub fn parseForMacro(source: *const logger.Source, log: *logger.Log, allocator: std.mem.Allocator) !Expr { switch (source.contents.len) { // This is to be consisntent with how disabled JS files are handled 0 => { @@ -944,7 +944,7 @@ pub const JSONParseResult = struct { }; }; -pub fn ParseJSONForBundling(source: *const logger.Source, log: *logger.Log, allocator: std.mem.Allocator) !JSONParseResult { +pub fn parseForBundling(source: *const logger.Source, log: *logger.Log, allocator: std.mem.Allocator) !JSONParseResult { switch (source.contents.len) { // This is to be consisntent with how disabled JS files are handled 0 => { @@ -973,7 +973,7 @@ pub fn ParseJSONForBundling(source: *const logger.Source, log: *logger.Log, allo // threadlocal var env_json_auto_quote_buffer: MutableString = undefined; // threadlocal var env_json_auto_quote_buffer_loaded: bool = false; -pub fn ParseEnvJSON(source: *const logger.Source, log: *logger.Log, allocator: std.mem.Allocator) !Expr { +pub fn parseEnvJSON(source: *const logger.Source, log: *logger.Log, allocator: std.mem.Allocator) !Expr { switch (source.contents.len) { // This is to be consisntent with how disabled JS files are handled 0 => { @@ -1024,7 +1024,7 @@ pub fn ParseEnvJSON(source: *const logger.Source, log: *logger.Log, allocator: s } } -pub fn ParseTSConfig(source: *const logger.Source, log: *logger.Log, allocator: std.mem.Allocator, comptime force_utf8: bool) !Expr { +pub fn parseTSConfig(source: *const logger.Source, log: *logger.Log, allocator: std.mem.Allocator, comptime force_utf8: bool) !Expr { switch (source.contents.len) { // This is to be consisntent with how disabled JS files are handled 0 => { @@ -1073,7 +1073,7 @@ fn expectPrintedJSON(_contents: string, expected: string) !void { "source.json", contents, ); - const expr = try ParseJSON(&source, &log, default_allocator); + const expr = try parse(&source, &log, default_allocator); if (log.msgs.items.len > 0) { Output.panic("--FAIL--\nExpr {s}\nLog: {s}\n--FAIL--", .{ expr, log.msgs.items[0].data.text }); diff --git a/src/libarchive/libarchive-bindings.zig b/src/libarchive/libarchive-bindings.zig index 924cd7b031..d9bd69630a 100644 --- a/src/libarchive/libarchive-bindings.zig +++ b/src/libarchive/libarchive-bindings.zig @@ -1,14 +1,16 @@ +const std = @import("std"); const bun = @import("root").bun; -pub const wchar_t = u16; -pub const la_int64_t = i64; -pub const la_ssize_t = isize; -pub const struct_archive = opaque {}; -pub const struct_archive_entry = opaque {}; -pub const archive_entry = struct_archive_entry; +const wchar_t = u16; +const la_int64_t = i64; +const la_ssize_t = isize; +const struct_archive = opaque {}; +const struct_archive_entry = opaque {}; +const archive_entry = struct_archive_entry; const mode_t = bun.Mode; const FILE = @import("std").c.FILE; // const time_t = @import("std").c.time_t; const dev_t = @import("std").c.dev_t; +const OOM = bun.OOM; pub const FileType = enum(mode_t) { regular = 0o100000, @@ -25,111 +27,267 @@ pub const SymlinkType = enum(c_int) { file = 1, directory = 2, }; -pub const time_t = isize; -pub const ARCHIVE_VERSION_ONLY_STRING = "3.5.3dev"; -pub const ARCHIVE_VERSION_STRING = "libarchive " ++ ARCHIVE_VERSION_ONLY_STRING; -pub const ARCHIVE_EOF = @as(c_int, 1); -pub const ARCHIVE_OK = @as(c_int, 0); -pub const ARCHIVE_RETRY = -@as(c_int, 10); -pub const ARCHIVE_WARN = -@as(c_int, 20); -pub const ARCHIVE_FAILED = -@as(c_int, 25); -pub const ARCHIVE_FATAL = -@as(c_int, 30); -pub const ARCHIVE_FILTER_NONE = @as(c_int, 0); -pub const ARCHIVE_FILTER_GZIP = @as(c_int, 1); -pub const ARCHIVE_FILTER_BZIP2 = @as(c_int, 2); -pub const ARCHIVE_FILTER_COMPRESS = @as(c_int, 3); -pub const ARCHIVE_FILTER_PROGRAM = @as(c_int, 4); -pub const ARCHIVE_FILTER_LZMA = @as(c_int, 5); -pub const ARCHIVE_FILTER_XZ = @as(c_int, 6); -pub const ARCHIVE_FILTER_UU = @as(c_int, 7); -pub const ARCHIVE_FILTER_RPM = @as(c_int, 8); -pub const ARCHIVE_FILTER_LZIP = @as(c_int, 9); -pub const ARCHIVE_FILTER_LRZIP = @as(c_int, 10); -pub const ARCHIVE_FILTER_LZOP = @as(c_int, 11); -pub const ARCHIVE_FILTER_GRZIP = @as(c_int, 12); -pub const ARCHIVE_FILTER_LZ4 = @as(c_int, 13); -pub const ARCHIVE_FILTER_ZSTD = @as(c_int, 14); -pub const ARCHIVE_COMPRESSION_NONE = ARCHIVE_FILTER_NONE; -pub const ARCHIVE_COMPRESSION_GZIP = ARCHIVE_FILTER_GZIP; -pub const ARCHIVE_COMPRESSION_BZIP2 = ARCHIVE_FILTER_BZIP2; -pub const ARCHIVE_COMPRESSION_COMPRESS = ARCHIVE_FILTER_COMPRESS; -pub const ARCHIVE_COMPRESSION_PROGRAM = ARCHIVE_FILTER_PROGRAM; -pub const ARCHIVE_COMPRESSION_LZMA = ARCHIVE_FILTER_LZMA; -pub const ARCHIVE_COMPRESSION_XZ = ARCHIVE_FILTER_XZ; -pub const ARCHIVE_COMPRESSION_UU = ARCHIVE_FILTER_UU; -pub const ARCHIVE_COMPRESSION_RPM = ARCHIVE_FILTER_RPM; -pub const ARCHIVE_COMPRESSION_LZIP = ARCHIVE_FILTER_LZIP; -pub const ARCHIVE_COMPRESSION_LRZIP = ARCHIVE_FILTER_LRZIP; -pub const ARCHIVE_FORMAT_BASE_MASK = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0xff0000, .hexadecimal); -pub const ARCHIVE_FORMAT_CPIO = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0x10000, .hexadecimal); -pub const ARCHIVE_FORMAT_CPIO_POSIX = ARCHIVE_FORMAT_CPIO | @as(c_int, 1); -pub const ARCHIVE_FORMAT_CPIO_BIN_LE = ARCHIVE_FORMAT_CPIO | @as(c_int, 2); -pub const ARCHIVE_FORMAT_CPIO_BIN_BE = ARCHIVE_FORMAT_CPIO | @as(c_int, 3); -pub const ARCHIVE_FORMAT_CPIO_SVR4_NOCRC = ARCHIVE_FORMAT_CPIO | @as(c_int, 4); -pub const ARCHIVE_FORMAT_CPIO_SVR4_CRC = ARCHIVE_FORMAT_CPIO | @as(c_int, 5); -pub const ARCHIVE_FORMAT_CPIO_AFIO_LARGE = ARCHIVE_FORMAT_CPIO | @as(c_int, 6); -pub const ARCHIVE_FORMAT_CPIO_PWB = ARCHIVE_FORMAT_CPIO | @as(c_int, 7); -pub const ARCHIVE_FORMAT_SHAR = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0x20000, .hexadecimal); -pub const ARCHIVE_FORMAT_SHAR_BASE = ARCHIVE_FORMAT_SHAR | @as(c_int, 1); -pub const ARCHIVE_FORMAT_SHAR_DUMP = ARCHIVE_FORMAT_SHAR | @as(c_int, 2); -pub const ARCHIVE_FORMAT_TAR = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0x30000, .hexadecimal); -pub const ARCHIVE_FORMAT_TAR_USTAR = ARCHIVE_FORMAT_TAR | @as(c_int, 1); -pub const ARCHIVE_FORMAT_TAR_PAX_INTERCHANGE = ARCHIVE_FORMAT_TAR | @as(c_int, 2); -pub const ARCHIVE_FORMAT_TAR_PAX_RESTRICTED = ARCHIVE_FORMAT_TAR | @as(c_int, 3); -pub const ARCHIVE_FORMAT_TAR_GNUTAR = ARCHIVE_FORMAT_TAR | @as(c_int, 4); -pub const ARCHIVE_FORMAT_ISO9660 = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0x40000, .hexadecimal); -pub const ARCHIVE_FORMAT_ISO9660_ROCKRIDGE = ARCHIVE_FORMAT_ISO9660 | @as(c_int, 1); -pub const ARCHIVE_FORMAT_ZIP = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0x50000, .hexadecimal); -pub const ARCHIVE_FORMAT_EMPTY = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0x60000, .hexadecimal); -pub const ARCHIVE_FORMAT_AR = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0x70000, .hexadecimal); -pub const ARCHIVE_FORMAT_AR_GNU = ARCHIVE_FORMAT_AR | @as(c_int, 1); -pub const ARCHIVE_FORMAT_AR_BSD = ARCHIVE_FORMAT_AR | @as(c_int, 2); -pub const ARCHIVE_FORMAT_MTREE = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0x80000, .hexadecimal); -pub const ARCHIVE_FORMAT_RAW = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0x90000, .hexadecimal); -pub const ARCHIVE_FORMAT_XAR = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0xA0000, .hexadecimal); -pub const ARCHIVE_FORMAT_LHA = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0xB0000, .hexadecimal); -pub const ARCHIVE_FORMAT_CAB = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0xC0000, .hexadecimal); -pub const ARCHIVE_FORMAT_RAR = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0xD0000, .hexadecimal); -pub const ARCHIVE_FORMAT_7ZIP = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0xE0000, .hexadecimal); -pub const ARCHIVE_FORMAT_WARC = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0xF0000, .hexadecimal); -pub const ARCHIVE_FORMAT_RAR_V5 = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0x100000, .hexadecimal); -pub const ARCHIVE_READ_FORMAT_CAPS_NONE = @as(c_int, 0); -pub const ARCHIVE_READ_FORMAT_CAPS_ENCRYPT_DATA = @as(c_int, 1) << @as(c_int, 0); -pub const ARCHIVE_READ_FORMAT_CAPS_ENCRYPT_METADATA = @as(c_int, 1) << @as(c_int, 1); -pub const ARCHIVE_READ_FORMAT_ENCRYPTION_UNSUPPORTED = -@as(c_int, 2); -pub const ARCHIVE_READ_FORMAT_ENCRYPTION_DONT_KNOW = -@as(c_int, 1); -pub const ARCHIVE_EXTRACT_OWNER = @as(c_int, 0x0001); -pub const ARCHIVE_EXTRACT_PERM = @as(c_int, 0x0002); -pub const ARCHIVE_EXTRACT_TIME = @as(c_int, 0x0004); -pub const ARCHIVE_EXTRACT_NO_OVERWRITE = @as(c_int, 0x0008); -pub const ARCHIVE_EXTRACT_UNLINK = @as(c_int, 0x0010); -pub const ARCHIVE_EXTRACT_ACL = @as(c_int, 0x0020); -pub const ARCHIVE_EXTRACT_FFLAGS = @as(c_int, 0x0040); -pub const ARCHIVE_EXTRACT_XATTR = @as(c_int, 0x0080); -pub const ARCHIVE_EXTRACT_SECURE_SYMLINKS = @as(c_int, 0x0100); -pub const ARCHIVE_EXTRACT_SECURE_NODOTDOT = @as(c_int, 0x0200); -pub const ARCHIVE_EXTRACT_NO_AUTODIR = @as(c_int, 0x0400); -pub const ARCHIVE_EXTRACT_NO_OVERWRITE_NEWER = @as(c_int, 0x0800); -pub const ARCHIVE_EXTRACT_SPARSE = @as(c_int, 0x1000); -pub const ARCHIVE_EXTRACT_MAC_METADATA = @as(c_int, 0x2000); -pub const ARCHIVE_EXTRACT_NO_HFS_COMPRESSION = @as(c_int, 0x4000); -pub const ARCHIVE_EXTRACT_HFS_COMPRESSION_FORCED = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0x8000, .hexadecimal); -pub const ARCHIVE_EXTRACT_SECURE_NOABSOLUTEPATHS = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0x10000, .hexadecimal); -pub const ARCHIVE_EXTRACT_CLEAR_NOCHANGE_FFLAGS = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0x20000, .hexadecimal); -pub const ARCHIVE_EXTRACT_SAFE_WRITES = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0x40000, .hexadecimal); -pub const ARCHIVE_READDISK_RESTORE_ATIME = @as(c_int, 0x0001); -pub const ARCHIVE_READDISK_HONOR_NODUMP = @as(c_int, 0x0002); -pub const ARCHIVE_READDISK_MAC_COPYFILE = @as(c_int, 0x0004); -pub const ARCHIVE_READDISK_NO_TRAVERSE_MOUNTS = @as(c_int, 0x0008); -pub const ARCHIVE_READDISK_NO_XATTR = @as(c_int, 0x0010); -pub const ARCHIVE_READDISK_NO_ACL = @as(c_int, 0x0020); -pub const ARCHIVE_READDISK_NO_FFLAGS = @as(c_int, 0x0040); -pub const ARCHIVE_MATCH_MTIME = @as(c_int, 0x0100); -pub const ARCHIVE_MATCH_CTIME = @as(c_int, 0x0200); -pub const ARCHIVE_MATCH_NEWER = @as(c_int, 0x0001); -pub const ARCHIVE_MATCH_OLDER = @as(c_int, 0x0002); -pub const ARCHIVE_MATCH_EQUAL = @as(c_int, 0x0010); +const time_t = isize; + +pub const Flags = struct { + pub const Extract = enum(c_int) { + owner = ARCHIVE_EXTRACT_OWNER, + perm = ARCHIVE_EXTRACT_PERM, + time = ARCHIVE_EXTRACT_TIME, + no_overwrite = ARCHIVE_EXTRACT_NO_OVERWRITE, + unlink = ARCHIVE_EXTRACT_UNLINK, + acl = ARCHIVE_EXTRACT_ACL, + fflags = ARCHIVE_EXTRACT_FFLAGS, + xattr = ARCHIVE_EXTRACT_XATTR, + secure_symlinks = ARCHIVE_EXTRACT_SECURE_SYMLINKS, + secure_nodotdot = ARCHIVE_EXTRACT_SECURE_NODOTDOT, + no_autodir = ARCHIVE_EXTRACT_NO_AUTODIR, + no_overwrite_newer = ARCHIVE_EXTRACT_NO_OVERWRITE_NEWER, + sparse = ARCHIVE_EXTRACT_SPARSE, + mac_metadata = ARCHIVE_EXTRACT_MAC_METADATA, + no_hfs_compression = ARCHIVE_EXTRACT_NO_HFS_COMPRESSION, + hfs_compression_forced = ARCHIVE_EXTRACT_HFS_COMPRESSION_FORCED, + secure_noabsolutepaths = ARCHIVE_EXTRACT_SECURE_NOABSOLUTEPATHS, + clear_nochange_fflags = ARCHIVE_EXTRACT_CLEAR_NOCHANGE_FFLAGS, + safe_writes = ARCHIVE_EXTRACT_SAFE_WRITES, + }; + + // Deprecated + // pub const Compression = enum(c_int) { + // none = ARCHIVE_COMPRESSION_NONE, + // gzip = ARCHIVE_COMPRESSION_GZIP, + // bzip2 = ARCHIVE_COMPRESSION_BZIP2, + // compress = ARCHIVE_COMPRESSION_COMPRESS, + // program = ARCHIVE_COMPRESSION_PROGRAM, + // lzma = ARCHIVE_COMPRESSION_LZMA, + // xz = ARCHIVE_COMPRESSION_XZ, + // uu = ARCHIVE_COMPRESSION_UU, + // rpm = ARCHIVE_COMPRESSION_RPM, + // lzip = ARCHIVE_COMPRESSION_LZIP, + // lrzip = ARCHIVE_COMPRESSION_LRZIP, + // }; + + pub const Format = enum(c_int) { + base_mask = ARCHIVE_FORMAT_BASE_MASK, + cpio = ARCHIVE_FORMAT_CPIO, + cpio_posix = ARCHIVE_FORMAT_CPIO_POSIX, + cpio_bin_le = ARCHIVE_FORMAT_CPIO_BIN_LE, + cpio_bin_be = ARCHIVE_FORMAT_CPIO_BIN_BE, + cpio_svr4_nocrc = ARCHIVE_FORMAT_CPIO_SVR4_NOCRC, + cpio_svr4_crc = ARCHIVE_FORMAT_CPIO_SVR4_CRC, + cpio_afio_large = ARCHIVE_FORMAT_CPIO_AFIO_LARGE, + cpio_pwb = ARCHIVE_FORMAT_CPIO_PWB, + shar = ARCHIVE_FORMAT_SHAR, + shar_base = ARCHIVE_FORMAT_SHAR_BASE, + shar_dump = ARCHIVE_FORMAT_SHAR_DUMP, + tar = ARCHIVE_FORMAT_TAR, + tar_ustar = ARCHIVE_FORMAT_TAR_USTAR, + tar_pax_interchange = ARCHIVE_FORMAT_TAR_PAX_INTERCHANGE, + tar_pax_restricted = ARCHIVE_FORMAT_TAR_PAX_RESTRICTED, + tar_gnutar = ARCHIVE_FORMAT_TAR_GNUTAR, + iso9660 = ARCHIVE_FORMAT_ISO9660, + iso9660_rockridge = ARCHIVE_FORMAT_ISO9660_ROCKRIDGE, + zip = ARCHIVE_FORMAT_ZIP, + empty = ARCHIVE_FORMAT_EMPTY, + ar = ARCHIVE_FORMAT_AR, + ar_gnu = ARCHIVE_FORMAT_AR_GNU, + ar_bsd = ARCHIVE_FORMAT_AR_BSD, + mtree = ARCHIVE_FORMAT_MTREE, + raw = ARCHIVE_FORMAT_RAW, + xar = ARCHIVE_FORMAT_XAR, + lha = ARCHIVE_FORMAT_LHA, + cab = ARCHIVE_FORMAT_CAB, + rar = ARCHIVE_FORMAT_RAR, + @"7zip" = ARCHIVE_FORMAT_7ZIP, + warc = ARCHIVE_FORMAT_WARC, + rar_v5 = ARCHIVE_FORMAT_RAR_V5, + }; + + pub const Filter = enum(c_int) { + none = ARCHIVE_FILTER_NONE, + gzip = ARCHIVE_FILTER_GZIP, + bzip2 = ARCHIVE_FILTER_BZIP2, + compress = ARCHIVE_FILTER_COMPRESS, + program = ARCHIVE_FILTER_PROGRAM, + lzma = ARCHIVE_FILTER_LZMA, + xz = ARCHIVE_FILTER_XZ, + uu = ARCHIVE_FILTER_UU, + rpm = ARCHIVE_FILTER_RPM, + lzip = ARCHIVE_FILTER_LZIP, + lrzip = ARCHIVE_FILTER_LRZIP, + lzop = ARCHIVE_FILTER_LZOP, + grzip = ARCHIVE_FILTER_GRZIP, + lz4 = ARCHIVE_FILTER_LZ4, + zstd = ARCHIVE_FILTER_ZSTD, + }; + + pub const EntryDigest = enum(c_int) { + md5 = ARCHIVE_ENTRY_DIGEST_MD5, + rmd160 = ARCHIVE_ENTRY_DIGEST_RMD160, + sha1 = ARCHIVE_ENTRY_DIGEST_SHA1, + sha256 = ARCHIVE_ENTRY_DIGEST_SHA256, + sha384 = ARCHIVE_ENTRY_DIGEST_SHA384, + sha512 = ARCHIVE_ENTRY_DIGEST_SHA512, + }; + + pub const EntryACL = enum(c_int) { + entry_acl_execute = ARCHIVE_ENTRY_ACL_EXECUTE, + write = ARCHIVE_ENTRY_ACL_WRITE, + read = ARCHIVE_ENTRY_ACL_READ, + read_data = ARCHIVE_ENTRY_ACL_READ_DATA, + list_directory = ARCHIVE_ENTRY_ACL_LIST_DIRECTORY, + write_data = ARCHIVE_ENTRY_ACL_WRITE_DATA, + add_file = ARCHIVE_ENTRY_ACL_ADD_FILE, + append_data = ARCHIVE_ENTRY_ACL_APPEND_DATA, + add_subdirectory = ARCHIVE_ENTRY_ACL_ADD_SUBDIRECTORY, + read_named_attrs = ARCHIVE_ENTRY_ACL_READ_NAMED_ATTRS, + write_named_attrs = ARCHIVE_ENTRY_ACL_WRITE_NAMED_ATTRS, + delete_child = ARCHIVE_ENTRY_ACL_DELETE_CHILD, + read_attributes = ARCHIVE_ENTRY_ACL_READ_ATTRIBUTES, + write_attributes = ARCHIVE_ENTRY_ACL_WRITE_ATTRIBUTES, + delete = ARCHIVE_ENTRY_ACL_DELETE, + read_acl = ARCHIVE_ENTRY_ACL_READ_ACL, + write_acl = ARCHIVE_ENTRY_ACL_WRITE_ACL, + write_owner = ARCHIVE_ENTRY_ACL_WRITE_OWNER, + synchronize = ARCHIVE_ENTRY_ACL_SYNCHRONIZE, + perms_posix1_e = ARCHIVE_ENTRY_ACL_PERMS_POSIX1E, + perms_nfs4 = ARCHIVE_ENTRY_ACL_PERMS_NFS4, + entry_inherited = ARCHIVE_ENTRY_ACL_ENTRY_INHERITED, + entry_file_inherit = ARCHIVE_ENTRY_ACL_ENTRY_FILE_INHERIT, + entry_directory_inherit = ARCHIVE_ENTRY_ACL_ENTRY_DIRECTORY_INHERIT, + entry_no_propagate_inherit = ARCHIVE_ENTRY_ACL_ENTRY_NO_PROPAGATE_INHERIT, + entry_inherit_only = ARCHIVE_ENTRY_ACL_ENTRY_INHERIT_ONLY, + entry_successful_access = ARCHIVE_ENTRY_ACL_ENTRY_SUCCESSFUL_ACCESS, + entry_failed_access = ARCHIVE_ENTRY_ACL_ENTRY_FAILED_ACCESS, + inheritance_nfs4 = ARCHIVE_ENTRY_ACL_INHERITANCE_NFS4, + type_access = ARCHIVE_ENTRY_ACL_TYPE_ACCESS, + type_default = ARCHIVE_ENTRY_ACL_TYPE_DEFAULT, + type_allow = ARCHIVE_ENTRY_ACL_TYPE_ALLOW, + type_deny = ARCHIVE_ENTRY_ACL_TYPE_DENY, + type_audit = ARCHIVE_ENTRY_ACL_TYPE_AUDIT, + type_alarm = ARCHIVE_ENTRY_ACL_TYPE_ALARM, + type_posix1_e = ARCHIVE_ENTRY_ACL_TYPE_POSIX1E, + type_nfs4 = ARCHIVE_ENTRY_ACL_TYPE_NFS4, + user = ARCHIVE_ENTRY_ACL_USER, + user_obj = ARCHIVE_ENTRY_ACL_USER_OBJ, + group = ARCHIVE_ENTRY_ACL_GROUP, + group_obj = ARCHIVE_ENTRY_ACL_GROUP_OBJ, + mask = ARCHIVE_ENTRY_ACL_MASK, + other = ARCHIVE_ENTRY_ACL_OTHER, + everyone = ARCHIVE_ENTRY_ACL_EVERYONE, + style_extra_id = ARCHIVE_ENTRY_ACL_STYLE_EXTRA_ID, + style_mark_default = ARCHIVE_ENTRY_ACL_STYLE_MARK_DEFAULT, + style_solaris = ARCHIVE_ENTRY_ACL_STYLE_SOLARIS, + style_separator_comma = ARCHIVE_ENTRY_ACL_STYLE_SEPARATOR_COMMA, + style_compact = ARCHIVE_ENTRY_ACL_STYLE_COMPACT, + }; +}; + +const ARCHIVE_VERSION_ONLY_STRING = "3.5.3dev"; +const ARCHIVE_VERSION_STRING = "libarchive " ++ ARCHIVE_VERSION_ONLY_STRING; +const ARCHIVE_EOF = @as(c_int, 1); +const ARCHIVE_OK = @as(c_int, 0); +const ARCHIVE_RETRY = -@as(c_int, 10); +const ARCHIVE_WARN = -@as(c_int, 20); +const ARCHIVE_FAILED = -@as(c_int, 25); +const ARCHIVE_FATAL = -@as(c_int, 30); +const ARCHIVE_FILTER_NONE = @as(c_int, 0); +const ARCHIVE_FILTER_GZIP = @as(c_int, 1); +const ARCHIVE_FILTER_BZIP2 = @as(c_int, 2); +const ARCHIVE_FILTER_COMPRESS = @as(c_int, 3); +const ARCHIVE_FILTER_PROGRAM = @as(c_int, 4); +const ARCHIVE_FILTER_LZMA = @as(c_int, 5); +const ARCHIVE_FILTER_XZ = @as(c_int, 6); +const ARCHIVE_FILTER_UU = @as(c_int, 7); +const ARCHIVE_FILTER_RPM = @as(c_int, 8); +const ARCHIVE_FILTER_LZIP = @as(c_int, 9); +const ARCHIVE_FILTER_LRZIP = @as(c_int, 10); +const ARCHIVE_FILTER_LZOP = @as(c_int, 11); +const ARCHIVE_FILTER_GRZIP = @as(c_int, 12); +const ARCHIVE_FILTER_LZ4 = @as(c_int, 13); +const ARCHIVE_FILTER_ZSTD = @as(c_int, 14); +// Deprecated +// pub const ARCHIVE_COMPRESSION_NONE = ARCHIVE_FILTER_NONE; +// pub const ARCHIVE_COMPRESSION_GZIP = ARCHIVE_FILTER_GZIP; +// pub const ARCHIVE_COMPRESSION_BZIP2 = ARCHIVE_FILTER_BZIP2; +// pub const ARCHIVE_COMPRESSION_COMPRESS = ARCHIVE_FILTER_COMPRESS; +// pub const ARCHIVE_COMPRESSION_PROGRAM = ARCHIVE_FILTER_PROGRAM; +// pub const ARCHIVE_COMPRESSION_LZMA = ARCHIVE_FILTER_LZMA; +// pub const ARCHIVE_COMPRESSION_XZ = ARCHIVE_FILTER_XZ; +// pub const ARCHIVE_COMPRESSION_UU = ARCHIVE_FILTER_UU; +// pub const ARCHIVE_COMPRESSION_RPM = ARCHIVE_FILTER_RPM; +// pub const ARCHIVE_COMPRESSION_LZIP = ARCHIVE_FILTER_LZIP; +// pub const ARCHIVE_COMPRESSION_LRZIP = ARCHIVE_FILTER_LRZIP; +const ARCHIVE_FORMAT_BASE_MASK = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0xff0000, .hexadecimal); +const ARCHIVE_FORMAT_CPIO = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0x10000, .hexadecimal); +const ARCHIVE_FORMAT_CPIO_POSIX = ARCHIVE_FORMAT_CPIO | @as(c_int, 1); +const ARCHIVE_FORMAT_CPIO_BIN_LE = ARCHIVE_FORMAT_CPIO | @as(c_int, 2); +const ARCHIVE_FORMAT_CPIO_BIN_BE = ARCHIVE_FORMAT_CPIO | @as(c_int, 3); +const ARCHIVE_FORMAT_CPIO_SVR4_NOCRC = ARCHIVE_FORMAT_CPIO | @as(c_int, 4); +const ARCHIVE_FORMAT_CPIO_SVR4_CRC = ARCHIVE_FORMAT_CPIO | @as(c_int, 5); +const ARCHIVE_FORMAT_CPIO_AFIO_LARGE = ARCHIVE_FORMAT_CPIO | @as(c_int, 6); +const ARCHIVE_FORMAT_CPIO_PWB = ARCHIVE_FORMAT_CPIO | @as(c_int, 7); +const ARCHIVE_FORMAT_SHAR = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0x20000, .hexadecimal); +const ARCHIVE_FORMAT_SHAR_BASE = ARCHIVE_FORMAT_SHAR | @as(c_int, 1); +const ARCHIVE_FORMAT_SHAR_DUMP = ARCHIVE_FORMAT_SHAR | @as(c_int, 2); +const ARCHIVE_FORMAT_TAR = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0x30000, .hexadecimal); +const ARCHIVE_FORMAT_TAR_USTAR = ARCHIVE_FORMAT_TAR | @as(c_int, 1); +const ARCHIVE_FORMAT_TAR_PAX_INTERCHANGE = ARCHIVE_FORMAT_TAR | @as(c_int, 2); +const ARCHIVE_FORMAT_TAR_PAX_RESTRICTED = ARCHIVE_FORMAT_TAR | @as(c_int, 3); +const ARCHIVE_FORMAT_TAR_GNUTAR = ARCHIVE_FORMAT_TAR | @as(c_int, 4); +const ARCHIVE_FORMAT_ISO9660 = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0x40000, .hexadecimal); +const ARCHIVE_FORMAT_ISO9660_ROCKRIDGE = ARCHIVE_FORMAT_ISO9660 | @as(c_int, 1); +const ARCHIVE_FORMAT_ZIP = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0x50000, .hexadecimal); +const ARCHIVE_FORMAT_EMPTY = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0x60000, .hexadecimal); +const ARCHIVE_FORMAT_AR = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0x70000, .hexadecimal); +const ARCHIVE_FORMAT_AR_GNU = ARCHIVE_FORMAT_AR | @as(c_int, 1); +const ARCHIVE_FORMAT_AR_BSD = ARCHIVE_FORMAT_AR | @as(c_int, 2); +const ARCHIVE_FORMAT_MTREE = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0x80000, .hexadecimal); +const ARCHIVE_FORMAT_RAW = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0x90000, .hexadecimal); +const ARCHIVE_FORMAT_XAR = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0xA0000, .hexadecimal); +const ARCHIVE_FORMAT_LHA = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0xB0000, .hexadecimal); +const ARCHIVE_FORMAT_CAB = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0xC0000, .hexadecimal); +const ARCHIVE_FORMAT_RAR = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0xD0000, .hexadecimal); +const ARCHIVE_FORMAT_7ZIP = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0xE0000, .hexadecimal); +const ARCHIVE_FORMAT_WARC = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0xF0000, .hexadecimal); +const ARCHIVE_FORMAT_RAR_V5 = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0x100000, .hexadecimal); +const ARCHIVE_READ_FORMAT_CAPS_NONE = @as(c_int, 0); +const ARCHIVE_READ_FORMAT_CAPS_ENCRYPT_DATA = @as(c_int, 1) << @as(c_int, 0); +const ARCHIVE_READ_FORMAT_CAPS_ENCRYPT_METADATA = @as(c_int, 1) << @as(c_int, 1); +const ARCHIVE_READ_FORMAT_ENCRYPTION_UNSUPPORTED = -@as(c_int, 2); +const ARCHIVE_READ_FORMAT_ENCRYPTION_DONT_KNOW = -@as(c_int, 1); +const ARCHIVE_EXTRACT_OWNER = @as(c_int, 0x0001); +const ARCHIVE_EXTRACT_PERM = @as(c_int, 0x0002); +const ARCHIVE_EXTRACT_TIME = @as(c_int, 0x0004); +const ARCHIVE_EXTRACT_NO_OVERWRITE = @as(c_int, 0x0008); +const ARCHIVE_EXTRACT_UNLINK = @as(c_int, 0x0010); +const ARCHIVE_EXTRACT_ACL = @as(c_int, 0x0020); +const ARCHIVE_EXTRACT_FFLAGS = @as(c_int, 0x0040); +const ARCHIVE_EXTRACT_XATTR = @as(c_int, 0x0080); +const ARCHIVE_EXTRACT_SECURE_SYMLINKS = @as(c_int, 0x0100); +const ARCHIVE_EXTRACT_SECURE_NODOTDOT = @as(c_int, 0x0200); +const ARCHIVE_EXTRACT_NO_AUTODIR = @as(c_int, 0x0400); +const ARCHIVE_EXTRACT_NO_OVERWRITE_NEWER = @as(c_int, 0x0800); +const ARCHIVE_EXTRACT_SPARSE = @as(c_int, 0x1000); +const ARCHIVE_EXTRACT_MAC_METADATA = @as(c_int, 0x2000); +const ARCHIVE_EXTRACT_NO_HFS_COMPRESSION = @as(c_int, 0x4000); +const ARCHIVE_EXTRACT_HFS_COMPRESSION_FORCED = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0x8000, .hexadecimal); +const ARCHIVE_EXTRACT_SECURE_NOABSOLUTEPATHS = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0x10000, .hexadecimal); +const ARCHIVE_EXTRACT_CLEAR_NOCHANGE_FFLAGS = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0x20000, .hexadecimal); +const ARCHIVE_EXTRACT_SAFE_WRITES = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0x40000, .hexadecimal); +const ARCHIVE_READDISK_RESTORE_ATIME = @as(c_int, 0x0001); +const ARCHIVE_READDISK_HONOR_NODUMP = @as(c_int, 0x0002); +const ARCHIVE_READDISK_MAC_COPYFILE = @as(c_int, 0x0004); +const ARCHIVE_READDISK_NO_TRAVERSE_MOUNTS = @as(c_int, 0x0008); +const ARCHIVE_READDISK_NO_XATTR = @as(c_int, 0x0010); +const ARCHIVE_READDISK_NO_ACL = @as(c_int, 0x0020); +const ARCHIVE_READDISK_NO_FFLAGS = @as(c_int, 0x0040); +const ARCHIVE_MATCH_MTIME = @as(c_int, 0x0100); +const ARCHIVE_MATCH_CTIME = @as(c_int, 0x0200); +const ARCHIVE_MATCH_NEWER = @as(c_int, 0x0001); +const ARCHIVE_MATCH_OLDER = @as(c_int, 0x0002); +const ARCHIVE_MATCH_EQUAL = @as(c_int, 0x0010); pub const Archive = opaque { pub const Result = enum(i32) { @@ -176,7 +334,9 @@ pub const Archive = opaque { extern fn archive_error_string(*Archive) [*c]const u8; pub fn errorString(archive: *Archive) []const u8 { - return bun.sliceTo(archive_error_string(archive), 0); + const err_str = archive_error_string(archive); + if (err_str == null) return ""; + return bun.sliceTo(err_str, 0); } extern fn archive_write_new() *Archive; @@ -189,7 +349,7 @@ pub const Archive = opaque { return archive_write_close(archive); } - pub extern fn archive_write_finish(*Archive) Result; + extern fn archive_write_finish(*Archive) Result; pub fn writeFinish(archive: *Archive) Result { return archive_write_finish(archive); } @@ -199,7 +359,7 @@ pub const Archive = opaque { return archive_free(archive); } - pub extern fn archive_write_set_options(_a: *Archive, opts: [*c]const u8) Result; + extern fn archive_write_set_options(_a: *Archive, opts: [*c]const u8) Result; pub fn writeSetOptions(archive: *Archive, opts: [:0]const u8) Result { return archive_write_set_options(archive, opts); } @@ -209,46 +369,114 @@ pub const Archive = opaque { return archive_write_set_format_pax_restricted(archive); } - pub extern fn archive_write_set_format_gnutar(*Archive) Result; + extern fn archive_write_set_format_gnutar(*Archive) Result; pub fn writeSetFormatGnutar(archive: *Archive) Result { return archive_write_set_format_gnutar(archive); } - pub extern fn archive_write_set_format_7zip(*Archive) Result; + extern fn archive_write_set_format_7zip(*Archive) Result; pub fn writeSetFormat7zip(archive: *Archive) Result { return archive_write_set_format_7zip(archive); } - pub extern fn archive_write_set_format_pax(*Archive) Result; + extern fn archive_write_set_format_pax(*Archive) Result; pub fn writeSetFormatPax(archive: *Archive) Result { return archive_write_set_format_pax(archive); } - pub extern fn archive_write_set_format_ustar(*Archive) Result; + extern fn archive_write_set_format_ustar(*Archive) Result; pub fn writeSetFormatUstar(archive: *Archive) Result { return archive_write_set_format_ustar(archive); } - pub extern fn archive_write_set_format_zip(*Archive) Result; + extern fn archive_write_set_format_zip(*Archive) Result; pub fn writeSetFormatZip(archive: *Archive) Result { return archive_write_set_format_zip(archive); } - pub extern fn archive_write_set_format_shar(*Archive) Result; + extern fn archive_write_set_format_shar(*Archive) Result; pub fn writeSetFormatShar(archive: *Archive) Result { return archive_write_set_format_shar(archive); } - extern fn archive_write_set_compression_gzip(*Archive) Result; - pub fn writeSetCompressionGzip(archive: *Archive) Result { - return archive_write_set_compression_gzip(archive); + extern fn archive_write_set_format(*struct_archive, format_code: i32) Result; + pub fn writeSetFormat(archive: *Archive, format: Flags.Format) Result { + return archive_write_set_format(archive, @intFromEnum(format)); } + // deprecated + // + // extern fn archive_write_set_compression_gzip(*Archive) Result; + // pub fn writeSetCompressionGzip(archive: *Archive) Result { + // return archive_write_set_compression_gzip(archive); + // } + extern fn archive_write_add_filter_gzip(*Archive) Result; pub fn writeAddFilterGzip(archive: *Archive) Result { return archive_write_add_filter_gzip(archive); } + extern fn archive_write_add_filter(*Archive, filter_code: i32) Result; + pub fn writeAddFilter(archive: *Archive, filter: Flags.Filter) Result { + return archive_write_add_filter(archive, @intFromEnum(filter)); + } + extern fn archive_write_add_filter_by_name(*Archive, name: [*c]const u8) Result; + pub fn writeAddFilterByName(archive: *Archive, name: [:0]const u8) Result { + return archive_write_add_filter_by_name(archive, name.ptr); + } + extern fn archive_write_add_filter_b64encode(*Archive) Result; + pub fn writeAddFilterB64encode(archive: *Archive) Result { + return archive_write_add_filter_b64encode(archive); + } + // extern fn archive_write_add_filter_bzip2(*Archive) Result; + // pub fn writeAddFilterBzip2(archive: *Archive) Result { + // return archive_write_add_filter_bzip2(archive); + // } + extern fn archive_write_add_filter_compress(*Archive) Result; + pub fn writeAddFilterCompress(archive: *Archive) Result { + return archive_write_add_filter_compress(archive); + } + extern fn archive_write_add_filter_grzip(*Archive) Result; + pub fn writeAddFilterGrzip(archive: *Archive) Result { + return archive_write_add_filter_grzip(archive); + } + extern fn archive_write_add_filter_lrzip(*Archive) Result; + pub fn writeAddFilterLrzip(archive: *Archive) Result { + return archive_write_add_filter_lrzip(archive); + } + extern fn archive_write_add_filter_lz4(*Archive) Result; + pub fn writeAddFilterLz4(archive: *Archive) Result { + return archive_write_add_filter_lz4(archive); + } + extern fn archive_write_add_filter_lzip(*Archive) Result; + pub fn writeAddFilterLzip(archive: *Archive) Result { + return archive_write_add_filter_lzip(archive); + } + extern fn archive_write_add_filter_lzma(*Archive) Result; + pub fn writeAddFilterLzma(archive: *Archive) Result { + return archive_write_add_filter_lzma(archive); + } + extern fn archive_write_add_filter_lzop(*Archive) Result; + pub fn writeAddFilterLzop(archive: *Archive) Result { + return archive_write_add_filter_lzop(archive); + } + extern fn archive_write_add_filter_none(*Archive) Result; + pub fn writeAddFilterNone(archive: *Archive) Result { + return archive_write_add_filter_none(archive); + } + extern fn archive_write_add_filter_uuencode(*Archive) Result; + pub fn writeAddFilterUuencode(archive: *Archive) Result { + return archive_write_add_filter_uuencode(archive); + } + extern fn archive_write_add_filter_xz(*Archive) Result; + pub fn writeAddFilterXz(archive: *Archive) Result { + return archive_write_add_filter_xz(archive); + } + extern fn archive_write_add_filter_zstd(*Archive) Result; + pub fn writeAddFilterZstd(archive: *Archive) Result { + return archive_write_add_filter_zstd(archive); + } + extern fn archive_write_set_filter_option(*Archive, [*c]const u8, [*c]const u8, [*c]const u8) Result; pub fn writeSetFilterOption(archive: *Archive, m: ?[:0]const u8, o: [:0]const u8, v: [:0]const u8) Result { return archive_write_set_filter_option(archive, m orelse null, o, v); @@ -259,12 +487,12 @@ pub const Archive = opaque { return archive_write_open_filename(archive, filename); } - pub extern fn archive_write_open_fd(*Archive, _fd: c_int) Result; + extern fn archive_write_open_fd(*Archive, _fd: c_int) Result; pub fn writeOpenFd(archive: *Archive, fd: bun.FileDescriptor) Result { return archive_write_open_fd(archive, fd.cast()); } - pub extern fn archive_write_open_memory(*Archive, _buffer: ?*anyopaque, _buffSize: usize, _used: [*c]usize) Result; + extern fn archive_write_open_memory(*Archive, _buffer: ?*anyopaque, _buffSize: usize, _used: [*c]usize) Result; pub fn writeOpenMemory(archive: *Archive, buf: ?*anyopaque, buf_size: usize, used: *usize) Result { return archive_write_open_memory(archive, buf, buf_size, used); } @@ -279,23 +507,260 @@ pub const Archive = opaque { return archive_write_data(archive, data.ptr, data.len); } - pub extern fn archive_write_finish_entry(*Archive) Result; + extern fn archive_write_finish_entry(*Archive) Result; pub fn writeFinishEntry(archive: *Archive) Result { return archive_write_finish_entry(archive); } - pub extern fn archive_write_free(*Archive) Result; + extern fn archive_write_free(*Archive) Result; pub fn writeFree(archive: *Archive) Result { return archive_write_free(archive); } + extern fn archive_read_new() *Archive; + pub fn readNew() *Archive { + return archive_read_new(); + } + + extern fn archive_read_close(*Archive) Result; + pub fn readClose(archive: *Archive) Result { + return archive_read_close(archive); + } + + pub extern fn archive_read_free(*Archive) Result; + pub fn readFree(archive: *Archive) Result { + return archive_read_free(archive); + } + + pub extern fn archive_read_finish(*Archive) Result; + pub fn readFinish(archive: *Archive) Result { + return archive_read_finish(archive); + } + + // these are deprecated + // + // extern fn archive_read_support_compression_all(*Archive) Result; + // pub fn readSupportCompressionAll(archive: *Archive) Result { + // return archive_read_support_compression_all(archive); + // } + // extern fn archive_read_support_compression_bzip2(*Archive) Result; + // pub fn readSupportCompressionBzip2(archive: *Archive) Result { + // return archive_read_support_compression_bzip2(archive); + // } + // extern fn archive_read_support_compression_compress(*Archive) Result; + // pub fn readSupportCompressionCompress(archive: *Archive) Result { + // return archive_read_support_compression_compress(archive); + // } + // extern fn archive_read_support_compression_gzip(*Archive) Result; + // pub fn readSupportCompressionGzip(archive: *Archive) Result { + // return archive_read_support_compression_gzip(archive); + // } + // extern fn archive_read_support_compression_lzip(*Archive) Result; + // pub fn readSupportCompressionLzip(archive: *Archive) Result { + // return archive_read_support_compression_lzip(archive); + // } + // extern fn archive_read_support_compression_lzma(*Archive) Result; + // pub fn readSupportCompressionLzma(archive: *Archive) Result { + // return archive_read_support_compression_lzma(archive); + // } + // extern fn archive_read_support_compression_none(*Archive) Result; + // pub fn readSupportCompressionNone(archive: *Archive) Result { + // return archive_read_support_compression_none(archive); + // } + // extern fn archive_read_support_compression_rpm(*Archive) Result; + // pub fn readSupportCompressionRpm(archive: *Archive) Result { + // return archive_read_support_compression_rpm(archive); + // } + // extern fn archive_read_support_compression_uu(*Archive) Result; + // pub fn readSupportCompressionUu(archive: *Archive) Result { + // return archive_read_support_compression_uu(archive); + // } + // extern fn archive_read_support_compression_xz(*Archive) Result; + // pub fn readSupportCompressionXz(archive: *Archive) Result { + // return archive_read_support_compression_xz(archive); + // } + + extern fn archive_read_support_format_7zip(*Archive) Result; + pub fn readSupportFormat7zip(archive: *Archive) Result { + return archive_read_support_format_7zip(archive); + } + extern fn archive_read_support_format_all(*Archive) Result; + pub fn readSupportFormatAll(archive: *Archive) Result { + return archive_read_support_format_all(archive); + } + extern fn archive_read_support_format_ar(*Archive) Result; + pub fn readSupportFormatAr(archive: *Archive) Result { + return archive_read_support_format_ar(archive); + } + extern fn archive_read_support_format_by_code(*Archive, c_int) Result; + pub fn readSupportFormatByCode(archive: *Archive, code: i32) Result { + return archive_read_support_format_by_code(archive, code); + } + extern fn archive_read_support_format_cab(*Archive) Result; + pub fn readSupportFormatCab(archive: *Archive) Result { + return archive_read_support_format_cab(archive); + } + extern fn archive_read_support_format_cpio(*Archive) Result; + pub fn readSupportFormatCpio(archive: *Archive) Result { + return archive_read_support_format_cpio(archive); + } + extern fn archive_read_support_format_empty(*Archive) Result; + pub fn readSupportFormatEmpty(archive: *Archive) Result { + return archive_read_support_format_empty(archive); + } + extern fn archive_read_support_format_gnutar(*Archive) Result; + pub fn readSupportFormatGnutar(archive: *Archive) Result { + return archive_read_support_format_gnutar(archive); + } + extern fn archive_read_support_format_iso9660(*Archive) Result; + pub fn readSupportFormatIso9660(archive: *Archive) Result { + return archive_read_support_format_iso9660(archive); + } + extern fn archive_read_support_format_lha(*Archive) Result; + pub fn readSupportFormatLha(archive: *Archive) Result { + return archive_read_support_format_lha(archive); + } + extern fn archive_read_support_format_mtree(*Archive) Result; + pub fn readSupportFormatMtree(archive: *Archive) Result { + return archive_read_support_format_mtree(archive); + } + extern fn archive_read_support_format_rar(*Archive) Result; + pub fn readSupportFormatRar(archive: *Archive) Result { + return archive_read_support_format_rar(archive); + } + extern fn archive_read_support_format_rar5(*Archive) Result; + pub fn readSupportFormatRar5(archive: *Archive) Result { + return archive_read_support_format_rar5(archive); + } + extern fn archive_read_support_format_raw(*Archive) Result; + pub fn readSupportFormatRaw(archive: *Archive) Result { + return archive_read_support_format_raw(archive); + } + extern fn archive_read_support_format_tar(*Archive) Result; + pub fn readSupportFormatTar(archive: *Archive) Result { + return archive_read_support_format_tar(archive); + } + extern fn archive_read_support_format_warc(*Archive) Result; + pub fn readSupportFormatWarc(archive: *Archive) Result { + return archive_read_support_format_warc(archive); + } + extern fn archive_read_support_format_xar(*Archive) Result; + pub fn readSupportFormatXar(archive: *Archive) Result { + return archive_read_support_format_xar(archive); + } + extern fn archive_read_support_format_zip(*Archive) Result; + pub fn readSupportFormatZip(archive: *Archive) Result { + return archive_read_support_format_zip(archive); + } + extern fn archive_read_support_format_zip_streamable(*Archive) Result; + pub fn readSupportFormatZipStreamable(archive: *Archive) Result { + return archive_read_support_format_zip_streamable(archive); + } + extern fn archive_read_support_format_zip_seekable(*Archive) Result; + pub fn readSupportFormatZipSeekable(archive: *Archive) Result { + return archive_read_support_format_zip_seekable(archive); + } + + extern fn archive_read_set_options(*Archive, [*c]const u8) Result; + pub fn readSetOptions(archive: *Archive, opts: [:0]const u8) Result { + return archive_read_set_options(archive, opts.ptr); + } + + extern fn archive_read_open_memory(*Archive, ?*const anyopaque, usize) Result; + pub fn readOpenMemory(archive: *Archive, buf: []const u8) Result { + return archive_read_open_memory(archive, buf.ptr, buf.len); + } + + extern fn archive_read_next_header(*Archive, **Entry) Result; + pub fn readNextHeader(archive: *Archive, entry: **Entry) Result { + return archive_read_next_header(archive, entry); + } + extern fn archive_read_next_header2(*Archive, *Entry) Result; + pub fn readNextHeader2(archive: *Archive, entry: *Entry) Result { + return archive_read_next_header2(archive, entry); + } + + extern fn archive_read_data(*Archive, ?*anyopaque, usize) isize; + pub fn readData(archive: *Archive, buf: []u8) isize { + return archive_read_data(archive, buf.ptr, buf.len); + } + extern fn archive_read_data_into_fd(*Archive, fd: c_int) Result; + pub fn readDataIntoFd(archive: *Archive, fd: c_int) Result { + return archive_read_data_into_fd(archive, fd); + } + + extern fn archive_read_support_filter_all(*Archive) Result; + pub fn readSupportFilterAll(archive: *Archive) Result { + return archive_read_support_filter_all(archive); + } + extern fn archive_read_support_filter_by_code(*Archive, c_int) Result; + pub fn readSupportFilterByCode(archive: *Archive, code: i32) Result { + return archive_read_support_filter_by_code(archive, code); + } + // extern fn archive_read_support_filter_bzip2(*Archive) Result; + // pub fn readSupportFilterbZip2(archive: *Archive) Result { + // return archive_read_support_filter_bzip2(archive); + // } + extern fn archive_read_support_filter_compress(*Archive) Result; + pub fn readSupportFilterCompress(archive: *Archive) Result { + return archive_read_support_filter_compress(archive); + } + extern fn archive_read_support_filter_gzip(*Archive) Result; + pub fn readSupportFilterGzip(archive: *Archive) Result { + return archive_read_support_filter_gzip(archive); + } + extern fn archive_read_support_filter_grzip(*Archive) Result; + pub fn readSupportFilterGrzip(archive: *Archive) Result { + return archive_read_support_filter_grzip(archive); + } + extern fn archive_read_support_filter_lrzip(*Archive) Result; + pub fn readSupportFilterLrzip(archive: *Archive) Result { + return archive_read_support_filter_lrzip(archive); + } + extern fn archive_read_support_filter_lz4(*Archive) Result; + pub fn readSupportFilterLz4(archive: *Archive) Result { + return archive_read_support_filter_lz4(archive); + } + extern fn archive_read_support_filter_lzip(*Archive) Result; + pub fn readSupportFilterLzip(archive: *Archive) Result { + return archive_read_support_filter_lzip(archive); + } + extern fn archive_read_support_filter_lzma(*Archive) Result; + pub fn readSupportFilterLzma(archive: *Archive) Result { + return archive_read_support_filter_lzma(archive); + } + extern fn archive_read_support_filter_lzop(*Archive) Result; + pub fn readSupportFilterLzop(archive: *Archive) Result { + return archive_read_support_filter_lzop(archive); + } + extern fn archive_read_support_filter_none(*Archive) Result; + pub fn readSupportFilterNone(archive: *Archive) Result { + return archive_read_support_filter_none(archive); + } + extern fn archive_read_support_filter_rpm(*Archive) Result; + pub fn readSupportFilterRpm(archive: *Archive) Result { + return archive_read_support_filter_rpm(archive); + } + extern fn archive_read_support_filter_uu(*Archive) Result; + pub fn readSupportFilterUu(archive: *Archive) Result { + return archive_read_support_filter_uu(archive); + } + extern fn archive_read_support_filter_xz(*Archive) Result; + pub fn readSupportFilterXz(archive: *Archive) Result { + return archive_read_support_filter_xz(archive); + } + extern fn archive_read_support_filter_zstd(*Archive) Result; + pub fn readSupportFilterZstd(archive: *Archive) Result { + return archive_read_support_filter_zstd(archive); + } + pub const Entry = opaque { extern fn archive_entry_new() *Entry; pub fn new() *Entry { return archive_entry_new(); } - pub extern fn archive_entry_new2(*Archive) *Entry; + extern fn archive_entry_new2(*Archive) *Entry; pub fn new2(archive: *Archive) *Entry { return archive_entry_new2(archive); } @@ -306,41 +771,41 @@ pub const Archive = opaque { } extern fn archive_entry_set_pathname(*Entry, [*c]const u8) void; - pub fn setPathname(entry: *Entry, pathname: [:0]const u8) void { - archive_entry_set_pathname(entry, pathname); + pub fn setPathname(entry: *Entry, name: [:0]const u8) void { + archive_entry_set_pathname(entry, name); } extern fn archive_entry_set_pathname_utf8(*Entry, [*c]const u8) void; - pub fn setPathnameUtf8(entry: *Entry, pathname: [:0]const u8) void { - archive_entry_set_pathname_utf8(entry, pathname); + pub fn setPathnameUtf8(entry: *Entry, name: [:0]const u8) void { + archive_entry_set_pathname_utf8(entry, name); } extern fn archive_entry_copy_pathname(*Entry, [*c]const u8) void; - pub fn copyPathname(entry: *Entry, pathname: [:0]const u8) void { - return archive_entry_copy_pathname(entry, pathname); + pub fn copyPathname(entry: *Entry, name: [:0]const u8) void { + return archive_entry_copy_pathname(entry, name); } - pub extern fn archive_entry_copy_pathname_w(*Entry, [*c]const u16) void; - pub fn copyPathnameW(entry: *Entry, pathname: [:0]const u16) void { - return archive_entry_copy_pathname_w(entry, pathname); + extern fn archive_entry_copy_pathname_w(*Entry, [*c]const u16) void; + pub fn copyPathnameW(entry: *Entry, name: [:0]const u16) void { + return archive_entry_copy_pathname_w(entry, name); } extern fn archive_entry_set_size(*Entry, i64) void; - pub fn setSize(entry: *Entry, size: i64) void { - archive_entry_set_size(entry, size); + pub fn setSize(entry: *Entry, s: i64) void { + archive_entry_set_size(entry, s); } extern fn archive_entry_set_filetype(*Entry, c_uint) void; - pub fn setFiletype(entry: *Entry, filetype: u32) void { - archive_entry_set_filetype(entry, filetype); + pub fn setFiletype(entry: *Entry, @"type": u32) void { + archive_entry_set_filetype(entry, @"type"); } extern fn archive_entry_set_perm(*Entry, bun.Mode) void; - pub fn setPerm(entry: *Entry, perm: bun.Mode) void { - archive_entry_set_perm(entry, perm); + pub fn setPerm(entry: *Entry, p: bun.Mode) void { + archive_entry_set_perm(entry, p); } - pub extern fn archive_entry_set_mode(*Entry, bun.Mode) void; + extern fn archive_entry_set_mode(*Entry, bun.Mode) void; pub fn setMode(entry: *Entry, mode: bun.Mode) void { archive_entry_set_mode(entry, mode); } @@ -354,6 +819,175 @@ pub const Archive = opaque { pub fn clear(entry: *Entry) *Entry { return archive_entry_clear(entry); } + + extern fn archive_entry_pathname(*Entry) [*c]const u8; + pub fn pathname(entry: *Entry) [:0]const u8 { + return bun.sliceTo(archive_entry_pathname(entry), 0); + } + extern fn archive_entry_pathname_utf8(*Entry) [*c]const u8; + pub fn pathnameUtf8(entry: *Entry) [:0]const u8 { + return bun.sliceTo(archive_entry_pathname_utf8(entry), 0); + } + extern fn archive_entry_pathname_w(*Entry) [*c]const u16; + pub fn pathnameW(entry: *Entry) [:0]const u16 { + return bun.sliceTo(archive_entry_pathname_w(entry), 0); + } + extern fn archive_entry_filetype(*Entry) bun.Mode; + pub fn filetype(entry: *Entry) bun.Mode { + return archive_entry_filetype(entry); + } + extern fn archive_entry_perm(*Entry) bun.Mode; + pub fn perm(entry: *Entry) bun.Mode { + return archive_entry_perm(entry); + } + extern fn archive_entry_size(*Entry) i64; + pub fn size(entry: *Entry) i64 { + return archive_entry_size(entry); + } + extern fn archive_entry_symlink(*Entry) [*c]const u8; + pub fn symlink(entry: *Entry) [:0]const u8 { + return bun.sliceTo(archive_entry_symlink(entry), 0); + } + pub extern fn archive_entry_symlink_utf8(*Entry) [*c]const u8; + pub fn symlinkUtf8(entry: *Entry) [:0]const u8 { + return bun.sliceTo(archive_entry_symlink_utf8(entry), 0); + } + pub extern fn archive_entry_symlink_type(*Entry) SymlinkType; + pub fn symlinkType(entry: *Entry) SymlinkType { + return archive_entry_symlink_type(entry); + } + pub extern fn archive_entry_symlink_w(*Entry) [*c]const u16; + pub fn symlinkW(entry: *Entry) [:0]const u16 { + return bun.sliceTo(archive_entry_symlink_w(entry), 0); + } + }; + + pub const Iterator = struct { + archive: *Archive, + filter: std.EnumSet(std.fs.File.Kind), + + fn Result(comptime T: type) type { + return union(enum) { + err: struct { + archive: *Archive, + message: []const u8, + }, + result: T, + + pub fn err(arch: *Archive, msg: []const u8) @This() { + return .{ .err = .{ .message = msg, .archive = arch } }; + } + + pub fn res(value: T) @This() { + return .{ .result = value }; + } + }; + } + + pub fn init(tarball_bytes: []const u8) Iterator.Result(@This()) { + const Return = Iterator.Result(@This()); + + const archive = Archive.readNew(); + + switch (archive.readSupportFormatTar()) { + .failed, .fatal, .warn => { + return Return.err(archive, "failed to enable tar format support"); + }, + else => {}, + } + switch (archive.readSupportFormatGnutar()) { + .failed, .fatal, .warn => { + return Return.err(archive, "failed to enable gnutar format support"); + }, + else => {}, + } + switch (archive.readSupportFilterGzip()) { + .failed, .fatal, .warn => { + return Return.err(archive, "failed to enable support for gzip compression"); + }, + else => {}, + } + + switch (archive.readSetOptions("read_concatenated_archives")) { + .failed, .fatal, .warn => { + return Return.err(archive, "failed to set option `read_concatenated_archives`"); + }, + else => {}, + } + + switch (archive.readOpenMemory(tarball_bytes)) { + .failed, .fatal, .warn => { + return Return.err(archive, "failed to read tarball"); + }, + else => {}, + } + + return Return.res(.{ + .archive = archive, + .filter = std.EnumSet(std.fs.File.Kind).initEmpty(), + }); + } + + const NextEntry = struct { + entry: *Archive.Entry, + kind: std.fs.File.Kind, + + pub fn readEntryData(this: *const @This(), allocator: std.mem.Allocator, archive: *Archive) OOM!Iterator.Result([]const u8) { + const Return = Iterator.Result([]const u8); + const size = this.entry.size(); + if (size < 0) return Return.err(archive, "invalid archive entry size"); + + const buf = try allocator.alloc(u8, @intCast(size)); + + const read = archive.readData(buf); + if (read < 0) { + return Return.err(archive, "failed to read archive data"); + } + return Return.res(buf[0..@intCast(read)]); + } + }; + + pub fn next(this: *@This()) Iterator.Result(?NextEntry) { + const Return = Iterator.Result(?NextEntry); + + var entry: *Archive.Entry = undefined; + while (true) { + return switch (this.archive.readNextHeader(&entry)) { + .retry => continue, + .eof => Return.res(null), + .ok => { + const kind = bun.C.kindFromMode(entry.filetype()); + + if (this.filter.contains(kind)) continue; + + return Return.res(.{ + .entry = entry, + .kind = kind, + }); + }, + else => Return.err(this.archive, "failed to read archive header"), + }; + } + } + + pub fn deinit(this: *@This()) Iterator.Result(void) { + const Return = Iterator.Result(void); + + switch (this.archive.readClose()) { + .failed, .fatal, .warn => { + return Return.err(this.archive, "failed to close archive read"); + }, + else => {}, + } + switch (this.archive.readFree()) { + .failed, .fatal, .warn => { + return Return.err(this.archive, "failed to free archive read"); + }, + else => {}, + } + + return Return.res({}); + } }; }; @@ -366,57 +1000,10 @@ pub const archive_close_callback = *const fn (*struct_archive, *anyopaque) callc pub const archive_free_callback = *const fn (*struct_archive, *anyopaque) callconv(.C) c_int; pub const archive_switch_callback = *const fn (*struct_archive, *anyopaque, ?*anyopaque) callconv(.C) c_int; pub const archive_passphrase_callback = *const fn (*struct_archive, *anyopaque) callconv(.C) [*c]const u8; -pub extern fn archive_read_new() *struct_archive; -pub extern fn archive_read_support_compression_all(*struct_archive) c_int; -pub extern fn archive_read_support_compression_bzip2(*struct_archive) c_int; -pub extern fn archive_read_support_compression_compress(*struct_archive) c_int; -pub extern fn archive_read_support_compression_gzip(*struct_archive) c_int; -pub extern fn archive_read_support_compression_lzip(*struct_archive) c_int; -pub extern fn archive_read_support_compression_lzma(*struct_archive) c_int; -pub extern fn archive_read_support_compression_none(*struct_archive) c_int; pub extern fn archive_read_support_compression_program(*struct_archive, command: [*c]const u8) c_int; pub extern fn archive_read_support_compression_program_signature(*struct_archive, [*c]const u8, ?*const anyopaque, usize) c_int; -pub extern fn archive_read_support_compression_rpm(*struct_archive) c_int; -pub extern fn archive_read_support_compression_uu(*struct_archive) c_int; -pub extern fn archive_read_support_compression_xz(*struct_archive) c_int; -pub extern fn archive_read_support_filter_all(*struct_archive) c_int; -pub extern fn archive_read_support_filter_by_code(*struct_archive, c_int) c_int; -pub extern fn archive_read_support_filter_bzip2(*struct_archive) c_int; -pub extern fn archive_read_support_filter_compress(*struct_archive) c_int; -pub extern fn archive_read_support_filter_gzip(*struct_archive) c_int; -pub extern fn archive_read_support_filter_grzip(*struct_archive) c_int; -pub extern fn archive_read_support_filter_lrzip(*struct_archive) c_int; -pub extern fn archive_read_support_filter_lz4(*struct_archive) c_int; -pub extern fn archive_read_support_filter_lzip(*struct_archive) c_int; -pub extern fn archive_read_support_filter_lzma(*struct_archive) c_int; -pub extern fn archive_read_support_filter_lzop(*struct_archive) c_int; -pub extern fn archive_read_support_filter_none(*struct_archive) c_int; pub extern fn archive_read_support_filter_program(*struct_archive, command: [*c]const u8) c_int; pub extern fn archive_read_support_filter_program_signature(*struct_archive, [*c]const u8, ?*const anyopaque, usize) c_int; -pub extern fn archive_read_support_filter_rpm(*struct_archive) c_int; -pub extern fn archive_read_support_filter_uu(*struct_archive) c_int; -pub extern fn archive_read_support_filter_xz(*struct_archive) c_int; -pub extern fn archive_read_support_filter_zstd(*struct_archive) c_int; -pub extern fn archive_read_support_format_7zip(*struct_archive) c_int; -pub extern fn archive_read_support_format_all(*struct_archive) c_int; -pub extern fn archive_read_support_format_ar(*struct_archive) c_int; -pub extern fn archive_read_support_format_by_code(*struct_archive, c_int) c_int; -pub extern fn archive_read_support_format_cab(*struct_archive) c_int; -pub extern fn archive_read_support_format_cpio(*struct_archive) c_int; -pub extern fn archive_read_support_format_empty(*struct_archive) c_int; -pub extern fn archive_read_support_format_gnutar(*struct_archive) c_int; -pub extern fn archive_read_support_format_iso9660(*struct_archive) c_int; -pub extern fn archive_read_support_format_lha(*struct_archive) c_int; -pub extern fn archive_read_support_format_mtree(*struct_archive) c_int; -pub extern fn archive_read_support_format_rar(*struct_archive) c_int; -pub extern fn archive_read_support_format_rar5(*struct_archive) c_int; -pub extern fn archive_read_support_format_raw(*struct_archive) c_int; -pub extern fn archive_read_support_format_tar(*struct_archive) c_int; -pub extern fn archive_read_support_format_warc(*struct_archive) c_int; -pub extern fn archive_read_support_format_xar(*struct_archive) c_int; -pub extern fn archive_read_support_format_zip(*struct_archive) c_int; -pub extern fn archive_read_support_format_zip_streamable(*struct_archive) c_int; -pub extern fn archive_read_support_format_zip_seekable(*struct_archive) c_int; pub extern fn archive_read_set_format(*struct_archive, c_int) c_int; pub extern fn archive_read_append_filter(*struct_archive, c_int) c_int; pub extern fn archive_read_append_filter_program(*struct_archive, [*c]const u8) c_int; @@ -439,62 +1026,36 @@ pub extern fn archive_read_open_filename(*struct_archive, _filename: [*c]const u pub extern fn archive_read_open_filenames(*struct_archive, _filenames: [*c][*c]const u8, _block_size: usize) c_int; pub extern fn archive_read_open_filename_w(*struct_archive, _filename: [*c]const wchar_t, _block_size: usize) c_int; pub extern fn archive_read_open_file(*struct_archive, _filename: [*c]const u8, _block_size: usize) c_int; -pub extern fn archive_read_open_memory(*struct_archive, buff: ?*const anyopaque, size: usize) c_int; pub extern fn archive_read_open_memory2(a: *struct_archive, buff: ?*const anyopaque, size: usize, read_size: usize) c_int; pub extern fn archive_read_open_fd(*struct_archive, _fd: c_int, _block_size: usize) c_int; pub extern fn archive_read_open_FILE(*struct_archive, _file: [*c]FILE) c_int; -pub extern fn archive_read_next_header(*struct_archive, [*c]*struct_archive_entry) c_int; -pub extern fn archive_read_next_header2(*struct_archive, *struct_archive_entry) c_int; pub extern fn archive_read_header_position(*struct_archive) la_int64_t; pub extern fn archive_read_has_encrypted_entries(*struct_archive) c_int; pub extern fn archive_read_format_capabilities(*struct_archive) c_int; -pub extern fn archive_read_data(*struct_archive, ?*anyopaque, usize) la_ssize_t; pub extern fn archive_seek_data(*struct_archive, la_int64_t, c_int) la_int64_t; pub extern fn archive_read_data_block(a: *struct_archive, buff: [*c]*const anyopaque, size: [*c]usize, offset: [*c]la_int64_t) c_int; pub extern fn archive_read_data_skip(*struct_archive) c_int; -pub extern fn archive_read_data_into_fd(*struct_archive, fd: c_int) c_int; pub extern fn archive_read_set_format_option(_a: *struct_archive, m: [*c]const u8, o: [*c]const u8, v: [*c]const u8) c_int; pub extern fn archive_read_set_filter_option(_a: *struct_archive, m: [*c]const u8, o: [*c]const u8, v: [*c]const u8) c_int; -pub extern fn archive_read_set_option(_a: *struct_archive, m: [*c]const u8, o: [*c]const u8, v: [*c]const u8) c_int; -pub extern fn archive_read_set_options(_a: *struct_archive, opts: [*c]const u8) c_int; pub extern fn archive_read_add_passphrase(*struct_archive, [*c]const u8) c_int; pub extern fn archive_read_set_passphrase_callback(*struct_archive, client_data: ?*anyopaque, ?archive_passphrase_callback) c_int; pub extern fn archive_read_extract(*struct_archive, *struct_archive_entry, flags: c_int) c_int; pub extern fn archive_read_extract2(*struct_archive, *struct_archive_entry, *struct_archive) c_int; pub extern fn archive_read_extract_set_progress_callback(*struct_archive, _progress_func: ?*const fn (?*anyopaque) callconv(.C) void, _user_data: ?*anyopaque) void; pub extern fn archive_read_extract_set_skip_file(*struct_archive, la_int64_t, la_int64_t) void; -pub extern fn archive_read_close(*struct_archive) c_int; -pub extern fn archive_read_free(*struct_archive) c_int; -pub extern fn archive_read_finish(*struct_archive) c_int; pub extern fn archive_write_set_bytes_per_block(*struct_archive, bytes_per_block: c_int) c_int; pub extern fn archive_write_get_bytes_per_block(*struct_archive) c_int; pub extern fn archive_write_set_bytes_in_last_block(*struct_archive, bytes_in_last_block: c_int) c_int; pub extern fn archive_write_get_bytes_in_last_block(*struct_archive) c_int; pub extern fn archive_write_set_skip_file(*struct_archive, la_int64_t, la_int64_t) c_int; -pub extern fn archive_write_set_compression_bzip2(*struct_archive) c_int; -pub extern fn archive_write_set_compression_compress(*struct_archive) c_int; -pub extern fn archive_write_set_compression_lzip(*struct_archive) c_int; -pub extern fn archive_write_set_compression_lzma(*struct_archive) c_int; -pub extern fn archive_write_set_compression_none(*struct_archive) c_int; -pub extern fn archive_write_set_compression_program(*struct_archive, cmd: [*c]const u8) c_int; -pub extern fn archive_write_set_compression_xz(*struct_archive) c_int; -pub extern fn archive_write_add_filter(*struct_archive, filter_code: c_int) c_int; -pub extern fn archive_write_add_filter_by_name(*struct_archive, name: [*c]const u8) c_int; -pub extern fn archive_write_add_filter_b64encode(*struct_archive) c_int; -pub extern fn archive_write_add_filter_bzip2(*struct_archive) c_int; -pub extern fn archive_write_add_filter_compress(*struct_archive) c_int; -pub extern fn archive_write_add_filter_grzip(*struct_archive) c_int; -pub extern fn archive_write_add_filter_lrzip(*struct_archive) c_int; -pub extern fn archive_write_add_filter_lz4(*struct_archive) c_int; -pub extern fn archive_write_add_filter_lzip(*struct_archive) c_int; -pub extern fn archive_write_add_filter_lzma(*struct_archive) c_int; -pub extern fn archive_write_add_filter_lzop(*struct_archive) c_int; -pub extern fn archive_write_add_filter_none(*struct_archive) c_int; -pub extern fn archive_write_add_filter_program(*struct_archive, cmd: [*c]const u8) c_int; -pub extern fn archive_write_add_filter_uuencode(*struct_archive) c_int; -pub extern fn archive_write_add_filter_xz(*struct_archive) c_int; -pub extern fn archive_write_add_filter_zstd(*struct_archive) c_int; -pub extern fn archive_write_set_format(*struct_archive, format_code: c_int) c_int; +// Deprecated +// pub extern fn archive_write_set_compression_bzip2(*struct_archive) c_int; +// pub extern fn archive_write_set_compression_compress(*struct_archive) c_int; +// pub extern fn archive_write_set_compression_lzip(*struct_archive) c_int; +// pub extern fn archive_write_set_compression_lzma(*struct_archive) c_int; +// pub extern fn archive_write_set_compression_none(*struct_archive) c_int; +// pub extern fn archive_write_set_compression_program(*struct_archive, cmd: [*c]const u8) c_int; +// pub extern fn archive_write_set_compression_xz(*struct_archive) c_int; pub extern fn archive_write_set_format_by_name(*struct_archive, name: [*c]const u8) c_int; pub extern fn archive_write_set_format_ar_bsd(*struct_archive) c_int; pub extern fn archive_write_set_format_ar_svr4(*struct_archive) c_int; @@ -616,7 +1177,6 @@ pub extern fn archive_entry_dev(*struct_archive_entry) dev_t; pub extern fn archive_entry_dev_is_set(*struct_archive_entry) c_int; pub extern fn archive_entry_devmajor(*struct_archive_entry) dev_t; pub extern fn archive_entry_devminor(*struct_archive_entry) dev_t; -pub extern fn archive_entry_filetype(*struct_archive_entry) mode_t; pub extern fn archive_entry_fflags(*struct_archive_entry, [*c]u64, [*c]u64) void; pub extern fn archive_entry_fflags_text(*struct_archive_entry) [*c]const u8; pub extern fn archive_entry_gid(*struct_archive_entry) la_int64_t; @@ -634,22 +1194,13 @@ pub extern fn archive_entry_mtime(*struct_archive_entry) time_t; pub extern fn archive_entry_mtime_nsec(*struct_archive_entry) c_long; pub extern fn archive_entry_mtime_is_set(*struct_archive_entry) c_int; pub extern fn archive_entry_nlink(*struct_archive_entry) c_uint; -pub extern fn archive_entry_pathname(*struct_archive_entry) [*c]const u8; -pub extern fn archive_entry_pathname_utf8(*struct_archive_entry) [*c]const u8; -pub extern fn archive_entry_pathname_w(*struct_archive_entry) [*c]const wchar_t; -pub extern fn archive_entry_perm(*struct_archive_entry) mode_t; pub extern fn archive_entry_rdev(*struct_archive_entry) dev_t; pub extern fn archive_entry_rdevmajor(*struct_archive_entry) dev_t; pub extern fn archive_entry_rdevminor(*struct_archive_entry) dev_t; pub extern fn archive_entry_sourcepath(*struct_archive_entry) [*c]const u8; pub extern fn archive_entry_sourcepath_w(*struct_archive_entry) [*c]const wchar_t; -pub extern fn archive_entry_size(*struct_archive_entry) la_int64_t; pub extern fn archive_entry_size_is_set(*struct_archive_entry) c_int; pub extern fn archive_entry_strmode(*struct_archive_entry) [*c]const u8; -pub extern fn archive_entry_symlink(*struct_archive_entry) [*c]const u8; -pub extern fn archive_entry_symlink_utf8(*struct_archive_entry) [*c]const u8; -pub extern fn archive_entry_symlink_type(*struct_archive_entry) SymlinkType; -pub extern fn archive_entry_symlink_w(*struct_archive_entry) [*c]const wchar_t; pub extern fn archive_entry_uid(*struct_archive_entry) la_int64_t; pub extern fn archive_entry_uname(*struct_archive_entry) [*c]const u8; pub extern fn archive_entry_uname_utf8(*struct_archive_entry) [*c]const u8; diff --git a/src/libarchive/libarchive.zig b/src/libarchive/libarchive.zig index 2cc794de95..7765ab4e46 100644 --- a/src/libarchive/libarchive.zig +++ b/src/libarchive/libarchive.zig @@ -13,7 +13,7 @@ const stringZ = bun.stringZ; const default_allocator = bun.default_allocator; const C = bun.C; const std = @import("std"); -const struct_archive = lib.struct_archive; +const Archive = lib.Archive; const JSC = bun.JSC; pub const Seek = enum(c_int) { set = std.posix.SEEK_SET, @@ -21,168 +21,6 @@ pub const Seek = enum(c_int) { end = std.posix.SEEK_END, }; -pub const Flags = struct { - pub const Extract = enum(c_int) { - owner = lib.ARCHIVE_EXTRACT_OWNER, - perm = lib.ARCHIVE_EXTRACT_PERM, - time = lib.ARCHIVE_EXTRACT_TIME, - no_overwrite = lib.ARCHIVE_EXTRACT_NO_OVERWRITE, - unlink = lib.ARCHIVE_EXTRACT_UNLINK, - acl = lib.ARCHIVE_EXTRACT_ACL, - fflags = lib.ARCHIVE_EXTRACT_FFLAGS, - xattr = lib.ARCHIVE_EXTRACT_XATTR, - secure_symlinks = lib.ARCHIVE_EXTRACT_SECURE_SYMLINKS, - secure_nodotdot = lib.ARCHIVE_EXTRACT_SECURE_NODOTDOT, - no_autodir = lib.ARCHIVE_EXTRACT_NO_AUTODIR, - no_overwrite_newer = lib.ARCHIVE_EXTRACT_NO_OVERWRITE_NEWER, - sparse = lib.ARCHIVE_EXTRACT_SPARSE, - mac_metadata = lib.ARCHIVE_EXTRACT_MAC_METADATA, - no_hfs_compression = lib.ARCHIVE_EXTRACT_NO_HFS_COMPRESSION, - hfs_compression_forced = lib.ARCHIVE_EXTRACT_HFS_COMPRESSION_FORCED, - secure_noabsolutepaths = lib.ARCHIVE_EXTRACT_SECURE_NOABSOLUTEPATHS, - clear_nochange_fflags = lib.ARCHIVE_EXTRACT_CLEAR_NOCHANGE_FFLAGS, - safe_writes = lib.ARCHIVE_EXTRACT_SAFE_WRITES, - }; - - pub const Compression = enum(c_int) { - none = lib.ARCHIVE_COMPRESSION_NONE, - gzip = lib.ARCHIVE_COMPRESSION_GZIP, - bzip2 = lib.ARCHIVE_COMPRESSION_BZIP2, - compress = lib.ARCHIVE_COMPRESSION_COMPRESS, - program = lib.ARCHIVE_COMPRESSION_PROGRAM, - lzma = lib.ARCHIVE_COMPRESSION_LZMA, - xz = lib.ARCHIVE_COMPRESSION_XZ, - uu = lib.ARCHIVE_COMPRESSION_UU, - rpm = lib.ARCHIVE_COMPRESSION_RPM, - lzip = lib.ARCHIVE_COMPRESSION_LZIP, - lrzip = lib.ARCHIVE_COMPRESSION_LRZIP, - }; - - pub const Format = enum(c_int) { - base_mask = lib.ARCHIVE_FORMAT_BASE_MASK, - cpio = lib.ARCHIVE_FORMAT_CPIO, - cpio_posix = lib.ARCHIVE_FORMAT_CPIO_POSIX, - cpio_bin_le = lib.ARCHIVE_FORMAT_CPIO_BIN_LE, - cpio_bin_be = lib.ARCHIVE_FORMAT_CPIO_BIN_BE, - cpio_svr4_nocrc = lib.ARCHIVE_FORMAT_CPIO_SVR4_NOCRC, - cpio_svr4_crc = lib.ARCHIVE_FORMAT_CPIO_SVR4_CRC, - cpio_afio_large = lib.ARCHIVE_FORMAT_CPIO_AFIO_LARGE, - cpio_pwb = lib.ARCHIVE_FORMAT_CPIO_PWB, - shar = lib.ARCHIVE_FORMAT_SHAR, - shar_base = lib.ARCHIVE_FORMAT_SHAR_BASE, - shar_dump = lib.ARCHIVE_FORMAT_SHAR_DUMP, - tar = lib.ARCHIVE_FORMAT_TAR, - tar_ustar = lib.ARCHIVE_FORMAT_TAR_USTAR, - tar_pax_interchange = lib.ARCHIVE_FORMAT_TAR_PAX_INTERCHANGE, - tar_pax_restricted = lib.ARCHIVE_FORMAT_TAR_PAX_RESTRICTED, - tar_gnutar = lib.ARCHIVE_FORMAT_TAR_GNUTAR, - iso9660 = lib.ARCHIVE_FORMAT_ISO9660, - iso9660_rockridge = lib.ARCHIVE_FORMAT_ISO9660_ROCKRIDGE, - zip = lib.ARCHIVE_FORMAT_ZIP, - empty = lib.ARCHIVE_FORMAT_EMPTY, - ar = lib.ARCHIVE_FORMAT_AR, - ar_gnu = lib.ARCHIVE_FORMAT_AR_GNU, - ar_bsd = lib.ARCHIVE_FORMAT_AR_BSD, - mtree = lib.ARCHIVE_FORMAT_MTREE, - raw = lib.ARCHIVE_FORMAT_RAW, - xar = lib.ARCHIVE_FORMAT_XAR, - lha = lib.ARCHIVE_FORMAT_LHA, - cab = lib.ARCHIVE_FORMAT_CAB, - rar = lib.ARCHIVE_FORMAT_RAR, - @"7zip" = lib.ARCHIVE_FORMAT_7ZIP, - warc = lib.ARCHIVE_FORMAT_WARC, - rar_v5 = lib.ARCHIVE_FORMAT_RAR_V5, - }; - - pub const Filter = enum(c_int) { - none = lib.ARCHIVE_FILTER_NONE, - gzip = lib.ARCHIVE_FILTER_GZIP, - bzip2 = lib.ARCHIVE_FILTER_BZIP2, - compress = lib.ARCHIVE_FILTER_COMPRESS, - program = lib.ARCHIVE_FILTER_PROGRAM, - lzma = lib.ARCHIVE_FILTER_LZMA, - xz = lib.ARCHIVE_FILTER_XZ, - uu = lib.ARCHIVE_FILTER_UU, - rpm = lib.ARCHIVE_FILTER_RPM, - lzip = lib.ARCHIVE_FILTER_LZIP, - lrzip = lib.ARCHIVE_FILTER_LRZIP, - lzop = lib.ARCHIVE_FILTER_LZOP, - grzip = lib.ARCHIVE_FILTER_GRZIP, - lz4 = lib.ARCHIVE_FILTER_LZ4, - zstd = lib.ARCHIVE_FILTER_ZSTD, - }; - - pub const EntryDigest = enum(c_int) { - md5 = lib.ARCHIVE_ENTRY_DIGEST_MD5, - rmd160 = lib.ARCHIVE_ENTRY_DIGEST_RMD160, - sha1 = lib.ARCHIVE_ENTRY_DIGEST_SHA1, - sha256 = lib.ARCHIVE_ENTRY_DIGEST_SHA256, - sha384 = lib.ARCHIVE_ENTRY_DIGEST_SHA384, - sha512 = lib.ARCHIVE_ENTRY_DIGEST_SHA512, - }; - - pub const EntryACL = enum(c_int) { - entry_acl_execute = lib.ARCHIVE_ENTRY_ACL_EXECUTE, - write = lib.ARCHIVE_ENTRY_ACL_WRITE, - read = lib.ARCHIVE_ENTRY_ACL_READ, - read_data = lib.ARCHIVE_ENTRY_ACL_READ_DATA, - list_directory = lib.ARCHIVE_ENTRY_ACL_LIST_DIRECTORY, - write_data = lib.ARCHIVE_ENTRY_ACL_WRITE_DATA, - add_file = lib.ARCHIVE_ENTRY_ACL_ADD_FILE, - append_data = lib.ARCHIVE_ENTRY_ACL_APPEND_DATA, - add_subdirectory = lib.ARCHIVE_ENTRY_ACL_ADD_SUBDIRECTORY, - read_named_attrs = lib.ARCHIVE_ENTRY_ACL_READ_NAMED_ATTRS, - write_named_attrs = lib.ARCHIVE_ENTRY_ACL_WRITE_NAMED_ATTRS, - delete_child = lib.ARCHIVE_ENTRY_ACL_DELETE_CHILD, - read_attributes = lib.ARCHIVE_ENTRY_ACL_READ_ATTRIBUTES, - write_attributes = lib.ARCHIVE_ENTRY_ACL_WRITE_ATTRIBUTES, - delete = lib.ARCHIVE_ENTRY_ACL_DELETE, - read_acl = lib.ARCHIVE_ENTRY_ACL_READ_ACL, - write_acl = lib.ARCHIVE_ENTRY_ACL_WRITE_ACL, - write_owner = lib.ARCHIVE_ENTRY_ACL_WRITE_OWNER, - synchronize = lib.ARCHIVE_ENTRY_ACL_SYNCHRONIZE, - perms_posix1_e = lib.ARCHIVE_ENTRY_ACL_PERMS_POSIX1E, - perms_nfs4 = lib.ARCHIVE_ENTRY_ACL_PERMS_NFS4, - entry_inherited = lib.ARCHIVE_ENTRY_ACL_ENTRY_INHERITED, - entry_file_inherit = lib.ARCHIVE_ENTRY_ACL_ENTRY_FILE_INHERIT, - entry_directory_inherit = lib.ARCHIVE_ENTRY_ACL_ENTRY_DIRECTORY_INHERIT, - entry_no_propagate_inherit = lib.ARCHIVE_ENTRY_ACL_ENTRY_NO_PROPAGATE_INHERIT, - entry_inherit_only = lib.ARCHIVE_ENTRY_ACL_ENTRY_INHERIT_ONLY, - entry_successful_access = lib.ARCHIVE_ENTRY_ACL_ENTRY_SUCCESSFUL_ACCESS, - entry_failed_access = lib.ARCHIVE_ENTRY_ACL_ENTRY_FAILED_ACCESS, - inheritance_nfs4 = lib.ARCHIVE_ENTRY_ACL_INHERITANCE_NFS4, - type_access = lib.ARCHIVE_ENTRY_ACL_TYPE_ACCESS, - type_default = lib.ARCHIVE_ENTRY_ACL_TYPE_DEFAULT, - type_allow = lib.ARCHIVE_ENTRY_ACL_TYPE_ALLOW, - type_deny = lib.ARCHIVE_ENTRY_ACL_TYPE_DENY, - type_audit = lib.ARCHIVE_ENTRY_ACL_TYPE_AUDIT, - type_alarm = lib.ARCHIVE_ENTRY_ACL_TYPE_ALARM, - type_posix1_e = lib.ARCHIVE_ENTRY_ACL_TYPE_POSIX1E, - type_nfs4 = lib.ARCHIVE_ENTRY_ACL_TYPE_NFS4, - user = lib.ARCHIVE_ENTRY_ACL_USER, - user_obj = lib.ARCHIVE_ENTRY_ACL_USER_OBJ, - group = lib.ARCHIVE_ENTRY_ACL_GROUP, - group_obj = lib.ARCHIVE_ENTRY_ACL_GROUP_OBJ, - mask = lib.ARCHIVE_ENTRY_ACL_MASK, - other = lib.ARCHIVE_ENTRY_ACL_OTHER, - everyone = lib.ARCHIVE_ENTRY_ACL_EVERYONE, - style_extra_id = lib.ARCHIVE_ENTRY_ACL_STYLE_EXTRA_ID, - style_mark_default = lib.ARCHIVE_ENTRY_ACL_STYLE_MARK_DEFAULT, - style_solaris = lib.ARCHIVE_ENTRY_ACL_STYLE_SOLARIS, - style_separator_comma = lib.ARCHIVE_ENTRY_ACL_STYLE_SEPARATOR_COMMA, - style_compact = lib.ARCHIVE_ENTRY_ACL_STYLE_COMPACT, - }; -}; - -pub const Status = enum(c_int) { - eof = lib.ARCHIVE_EOF, - ok = lib.ARCHIVE_OK, - retry = lib.ARCHIVE_RETRY, - warn = lib.ARCHIVE_WARN, - failed = lib.ARCHIVE_FAILED, - fatal = lib.ARCHIVE_FATAL, -}; - pub const BufferReadStream = struct { const Stream = @This(); buf: []const u8, @@ -190,27 +28,27 @@ pub const BufferReadStream = struct { block_size: usize = 16384, - archive: *struct_archive, + archive: *Archive, reading: bool = false, pub fn init(this: *BufferReadStream, buf: []const u8) void { this.* = BufferReadStream{ .buf = buf, .pos = 0, - .archive = lib.archive_read_new(), + .archive = Archive.readNew(), .reading = false, }; } pub fn deinit(this: *BufferReadStream) void { - _ = lib.archive_read_close(this.archive); + _ = this.archive.readClose(); // don't free it if we never actually read it // if (this.reading) { // _ = lib.archive_read_free(this.archive); // } } - pub fn openRead(this: *BufferReadStream) c_int { + pub fn openRead(this: *BufferReadStream) Archive.Result { // lib.archive_read_set_open_callback(this.archive, this.); // _ = lib.archive_read_set_read_callback(this.archive, archive_read_callback); // _ = lib.archive_read_set_seek_callback(this.archive, archive_seek_callback); @@ -219,21 +57,21 @@ pub const BufferReadStream = struct { // // lib.archive_read_set_switch_callback(this.archive, this.archive_s); // _ = lib.archive_read_set_callback_data(this.archive, this); - _ = lib.archive_read_support_format_tar(this.archive); - _ = lib.archive_read_support_format_gnutar(this.archive); - _ = lib.archive_read_support_compression_gzip(this.archive); + _ = this.archive.readSupportFormatTar(); + _ = this.archive.readSupportFormatGnutar(); + _ = this.archive.readSupportFilterGzip(); // Ignore zeroed blocks in the archive, which occurs when multiple tar archives // have been concatenated together. // Without this option, only the contents of // the first concatenated archive would be read. - _ = lib.archive_read_set_options(this.archive, "read_concatenated_archives"); + _ = this.archive.readSetOptions("read_concatenated_archives"); // _ = lib.archive_read_support_filter_none(this.archive); - const rc = lib.archive_read_open_memory(this.archive, this.buf.ptr, this.buf.len); + const rc = this.archive.readOpenMemory(this.buf); - this.reading = rc > -1; + this.reading = @intFromEnum(rc) > -1; // _ = lib.archive_read_support_compression_all(this.archive); @@ -249,14 +87,14 @@ pub const BufferReadStream = struct { } pub fn archive_close_callback( - _: *struct_archive, + _: *Archive, _: *anyopaque, ) callconv(.C) c_int { return 0; } pub fn archive_read_callback( - _: *struct_archive, + _: *Archive, ctx_: *anyopaque, buffer: [*c]*const anyopaque, ) callconv(.C) lib.la_ssize_t { @@ -271,7 +109,7 @@ pub const BufferReadStream = struct { } pub fn archive_skip_callback( - _: *struct_archive, + _: *Archive, ctx_: *anyopaque, offset: lib.la_int64_t, ) callconv(.C) lib.la_int64_t { @@ -287,7 +125,7 @@ pub const BufferReadStream = struct { } pub fn archive_seek_callback( - _: *struct_archive, + _: *Archive, ctx_: *anyopaque, offset: lib.la_int64_t, whence: c_int, @@ -317,7 +155,7 @@ pub const BufferReadStream = struct { } // pub fn archive_write_callback( - // archive: *struct_archive, + // archive: *Archive, // ctx_: *anyopaque, // buffer: *const anyopaque, // len: usize, @@ -326,20 +164,20 @@ pub const BufferReadStream = struct { // } // pub fn archive_close_callback( - // archive: *struct_archive, + // archive: *Archive, // ctx_: *anyopaque, // ) callconv(.C) c_int { // var this = fromCtx(ctx_); // } // pub fn archive_free_callback( - // archive: *struct_archive, + // archive: *Archive, // ctx_: *anyopaque, // ) callconv(.C) c_int { // var this = fromCtx(ctx_); // } // pub fn archive_switch_callback( - // archive: *struct_archive, + // archive: *Archive, // ctx1: *anyopaque, // ctx2: *anyopaque, // ) callconv(.C) c_int { @@ -350,7 +188,7 @@ pub const BufferReadStream = struct { const Kind = std.fs.File.Kind; -pub const Archive = struct { +pub const Archiver = struct { // impl: *lib.archive = undefined, // buf: []const u8 = undefined, // dir: FileDescriptorType = 0, @@ -389,12 +227,12 @@ pub const Archive = struct { pub fn getOverwritingFileList( file_buffer: []const u8, root: []const u8, - ctx: *Archive.Context, + ctx: *Archiver.Context, comptime FilePathAppender: type, appender: FilePathAppender, comptime depth_to_skip: usize, ) !void { - var entry: *lib.archive_entry = undefined; + var entry: *Archive.Entry = undefined; var stream: BufferReadStream = undefined; stream.init(file_buffer); @@ -413,17 +251,17 @@ pub const Archive = struct { }; loop: while (true) { - const r = @as(Status, @enumFromInt(lib.archive_read_next_header(archive, &entry))); + const r = archive.readNextHeader(&entry); switch (r) { - Status.eof => break :loop, - Status.retry => continue :loop, - Status.failed, Status.fatal => return error.Fail, + .eof => break :loop, + .retry => continue :loop, + .failed, .fatal => return error.Fail, else => { // do not use the utf8 name there // it will require us to pull in libiconv // though we should probably validate the utf8 here nonetheless - var pathname: [:0]const u8 = std.mem.sliceTo(lib.archive_entry_pathname(entry).?, 0); + var pathname = entry.pathname(); var tokenizer = std.mem.tokenize(u8, bun.asByteSlice(pathname), std.fs.path.sep_str); comptime var depth_i: usize = 0; inline while (depth_i < depth_to_skip) : (depth_i += 1) { @@ -434,7 +272,7 @@ pub const Archive = struct { pathname = std.mem.sliceTo(pathname_.ptr[0..pathname_.len :0], 0); const dirname = std.mem.trim(u8, std.fs.path.dirname(bun.asByteSlice(pathname)) orelse "", std.fs.path.sep_str); - const size = @as(usize, @intCast(@max(lib.archive_entry_size(entry), 0))); + const size: usize = @intCast(@max(entry.size(), 0)); if (size > 0) { var opened = dir.openFileZ(pathname, .{ .mode = .write_only }) catch continue :loop; defer opened.close(); @@ -479,12 +317,12 @@ pub const Archive = struct { pub fn extractToDir( file_buffer: []const u8, dir: std.fs.Dir, - ctx: ?*Archive.Context, + ctx: ?*Archiver.Context, comptime ContextType: type, appender: ContextType, options: ExtractOptions, ) !u32 { - var entry: *lib.archive_entry = undefined; + var entry: *Archive.Entry = undefined; var stream: BufferReadStream = undefined; stream.init(file_buffer); @@ -497,12 +335,12 @@ pub const Archive = struct { var normalized_buf: bun.OSPathBuffer = undefined; loop: while (true) { - const r: Status = @enumFromInt(lib.archive_read_next_header(archive, &entry)); + const r = archive.readNextHeader(&entry); switch (r) { - Status.eof => break :loop, - Status.retry => continue :loop, - Status.failed, Status.fatal => return error.Fail, + .eof => break :loop, + .retry => continue :loop, + .failed, .fatal => return error.Fail, else => { // TODO: // Due to path separator replacement and other copies that happen internally, libarchive changes the @@ -513,9 +351,9 @@ pub const Archive = struct { // Ideally, we find a way to tell libarchive to not convert the strings to wide characters and also to not // replace path separators. We can do both of these with our own normalization and utf8/utf16 string conversion code. var pathname: bun.OSPathSliceZ = if (comptime Environment.isWindows) - std.mem.sliceTo(lib.archive_entry_pathname_w(entry), 0) + entry.pathnameW() else - std.mem.sliceTo(lib.archive_entry_pathname(entry), 0); + entry.pathname(); if (comptime ContextType != void and @hasDecl(std.meta.Child(ContextType), "onFirstDirectoryName")) { if (appender.needs_first_dirname) { @@ -531,7 +369,7 @@ pub const Archive = struct { } } - const kind = C.kindFromMode(lib.archive_entry_filetype(entry)); + const kind = C.kindFromMode(entry.filetype()); if (options.npm) { // - ignore entries other than files (`true` can only be returned if type is file) @@ -584,8 +422,8 @@ pub const Archive = struct { count += 1; switch (kind) { - Kind.directory => { - var mode = @as(i32, @intCast(lib.archive_entry_perm(entry))); + .directory => { + var mode = @as(i32, @intCast(entry.perm())); // if dirs are readable, then they should be listable // https://github.com/npm/node-tar/blob/main/lib/mode-fix.js @@ -609,24 +447,22 @@ pub const Archive = struct { }; } }, - Kind.sym_link => { - const link_target = lib.archive_entry_symlink(entry).?; + .sym_link => { + const link_target = entry.symlink(); if (Environment.isPosix) { - std.posix.symlinkatZ(link_target, dir_fd, path) catch |err| brk: { + bun.sys.symlinkat(link_target, bun.toFD(dir_fd), path).unwrap() catch |err| brk: { switch (err) { - error.AccessDenied, error.FileNotFound => { + error.EPERM, error.ENOENT => { dir.makePath(std.fs.path.dirname(path_slice) orelse return err) catch {}; - break :brk try std.posix.symlinkatZ(link_target, dir_fd, path); - }, - else => { - return err; + break :brk try bun.sys.symlinkat(link_target, bun.toFD(dir_fd), path).unwrap(); }, + else => return err, } }; } }, - Kind.file => { - const mode: bun.Mode = if (comptime Environment.isWindows) 0 else @intCast(lib.archive_entry_perm(entry)); + .file => { + const mode: bun.Mode = if (comptime Environment.isWindows) 0 else @intCast(entry.perm()); const file_handle_native = brk: { if (Environment.isWindows) { @@ -683,8 +519,7 @@ pub const Archive = struct { _ = bun.sys.close(file_handle); }; - const entry_size = @max(lib.archive_entry_size(entry), 0); - const size = @as(usize, @intCast(entry_size)); + const size: usize = @intCast(@max(entry.size(), 0)); if (size > 0) { if (ctx) |ctx_| { const hash: u64 = if (ctx_.pluckers.len > 0) @@ -703,7 +538,7 @@ pub const Archive = struct { if (plucker_.filename_hash == hash) { try plucker_.contents.inflate(size); plucker_.contents.list.expandToCapacity(); - const read = lib.archive_read_data(archive, plucker_.contents.list.items.ptr, size); + const read = archive.readData(plucker_.contents.list.items); try plucker_.contents.inflate(@as(usize, @intCast(read))); plucker_.found = read > 0; plucker_.fd = file_handle; @@ -719,17 +554,17 @@ pub const Archive = struct { C.preallocate_file( file_handle.cast(), 0, - entry_size, + @intCast(size), ) catch {}; } } var retries_remaining: u8 = 5; possibly_retry: while (retries_remaining != 0) : (retries_remaining -= 1) { - switch (lib.archive_read_data_into_fd(archive, bun.uvfdcast(file_handle))) { - lib.ARCHIVE_EOF => break :loop, - lib.ARCHIVE_OK => break :possibly_retry, - lib.ARCHIVE_RETRY => { + switch (archive.readDataIntoFd(bun.uvfdcast(file_handle))) { + .eof => break :loop, + .ok => break :possibly_retry, + .retry => { if (options.log) { Output.err("libarchive error", "extracting {}, retry {d} / {d}", .{ bun.fmt.fmtOSPath(path_slice, .{}), @@ -764,7 +599,7 @@ pub const Archive = struct { pub fn extractToDisk( file_buffer: []const u8, root: []const u8, - ctx: ?*Archive.Context, + ctx: ?*Archiver.Context, comptime FilePathAppender: type, appender: FilePathAppender, comptime options: ExtractOptions, diff --git a/src/open.zig b/src/open.zig index bb83d9b60d..c14c2895c9 100644 --- a/src/open.zig +++ b/src/open.zig @@ -11,7 +11,7 @@ const C = bun.C; const std = @import("std"); const DotEnv = @import("env_loader.zig"); -const opener = switch (@import("builtin").target.os.tag) { +pub const opener = switch (@import("builtin").target.os.tag) { .macos => "/usr/bin/open", .windows => "start", else => "xdg-open", diff --git a/src/sourcemap/sourcemap.zig b/src/sourcemap/sourcemap.zig index 87cdd0558e..dc5cbdb8ac 100644 --- a/src/sourcemap/sourcemap.zig +++ b/src/sourcemap/sourcemap.zig @@ -132,7 +132,7 @@ pub fn parseJSON( bun.JSAst.Stmt.Data.Store.reset(); } debug("parse (JSON, {d} bytes)", .{source.len}); - var json = bun.JSON.ParseJSON(&json_src, &log, arena, false) catch { + var json = bun.JSON.parse(&json_src, &log, arena, false) catch { return error.InvalidJSON; }; diff --git a/src/sql/postgres.zig b/src/sql/postgres.zig index dd90a550e1..93168b63e5 100644 --- a/src/sql/postgres.zig +++ b/src/sql/postgres.zig @@ -1917,7 +1917,7 @@ pub const types = struct { defer value.deinit(); var str = bun.String.fromUTF8(value.slice()); defer str.deref(); - const parse_result = JSValue.parseJSON(str.toJS(globalObject), globalObject); + const parse_result = JSValue.parse(str.toJS(globalObject), globalObject); if (parse_result.isAnyError()) { globalObject.throwValue(parse_result); return error.JSError; diff --git a/src/string_immutable.zig b/src/string_immutable.zig index 191d5b97c7..da92209a5e 100644 --- a/src/string_immutable.zig +++ b/src/string_immutable.zig @@ -216,6 +216,83 @@ pub fn isNPMPackageName(target: string) bool { return !scoped or slash_index > 0 and slash_index + 1 < target.len; } +pub fn startsWithUUID(str: string) bool { + const uuid_len = 36; + if (str.len < uuid_len) return false; + for (0..8) |i| { + switch (str[i]) { + '0'...'9', 'a'...'f', 'A'...'F' => {}, + else => return false, + } + } + if (str[8] != '-') return false; + for (9..13) |i| { + switch (str[i]) { + '0'...'9', 'a'...'f', 'A'...'F' => {}, + else => return false, + } + } + if (str[13] != '-') return false; + for (14..18) |i| { + switch (str[i]) { + '0'...'9', 'a'...'f', 'A'...'F' => {}, + else => return false, + } + } + if (str[18] != '-') return false; + for (19..23) |i| { + switch (str[i]) { + '0'...'9', 'a'...'f', 'A'...'F' => {}, + else => return false, + } + } + if (str[23] != '-') return false; + for (24..36) |i| { + switch (str[i]) { + '0'...'9', 'a'...'f', 'A'...'F' => {}, + else => return false, + } + } + return true; +} + +/// https://github.com/npm/cli/blob/63d6a732c3c0e9c19fd4d147eaa5cc27c29b168d/node_modules/%40npmcli/redact/lib/matchers.js#L7 +/// /\b(npms?_)[a-zA-Z0-9]{36,48}\b/gi +/// Returns the length of the secret if one exist. +pub fn startsWithNpmSecret(str: string) u8 { + if (str.len < "npm_".len + 36) return 0; + + if (!strings.hasPrefixCaseInsensitive(str, "npm")) return 0; + + var i: u8 = "npm".len; + + if (str[i] == '_') { + i += 1; + } else if (str[i] == 's' or str[i] == 'S') { + i += 1; + if (str[i] != '_') return 0; + i += 1; + } else { + return 0; + } + + const min_len = i + 36; + const max_len = i + 48; + + while (i < max_len) : (i += 1) { + if (i == str.len) { + return if (i >= min_len) i else 0; + } + + switch (str[i]) { + '0'...'9', 'a'...'z', 'A'...'Z' => {}, + else => return if (i >= min_len) i else 0, + } + } + + return i; +} + pub fn indexAnyComptime(target: string, comptime chars: string) ?usize { for (target, 0..) |parent, i| { inline for (chars) |char| { @@ -919,6 +996,31 @@ pub fn eqlCaseInsensitiveASCII(a: string, b: string, comptime check_len: bool) b return bun.C.strncasecmp(a.ptr, b.ptr, a.len) == 0; } +pub fn eqlCaseInsensitiveT(comptime T: type, a: []const T, b: []const u8) bool { + if (a.len != b.len or a.len == 0) return false; + if (comptime T == u8) return eqlCaseInsensitiveASCIIIgnoreLength(a, b); + + for (a, b) |c, d| { + switch (c) { + 'a'...'z' => if (c != d and c & 0b11011111 != d) return false, + 'A'...'Z' => if (c != d and c | 0b00100000 != d) return false, + else => if (c != d) return false, + } + } + + return true; +} + +pub fn hasPrefixCaseInsensitiveT(comptime T: type, str: []const T, prefix: []const u8) bool { + if (str.len < prefix.len) return false; + + return eqlCaseInsensitiveT(T, str[0..prefix.len], prefix); +} + +pub fn hasPrefixCaseInsensitive(str: []const u8, prefix: []const u8) bool { + return hasPrefixCaseInsensitiveT(u8, str, prefix); +} + pub fn eqlLong(a_str: string, b_str: string, comptime check_len: bool) bool { const len = b_str.len; diff --git a/src/string_mutable.zig b/src/string_mutable.zig index c894eaf2e9..41d7ca0813 100644 --- a/src/string_mutable.zig +++ b/src/string_mutable.zig @@ -9,6 +9,7 @@ const js_lexer = bun.js_lexer; const string = bun.string; const stringZ = bun.stringZ; const CodePoint = bun.CodePoint; +const OOM = bun.OOM; pub const MutableString = struct { allocator: std.mem.Allocator, @@ -18,7 +19,7 @@ pub const MutableString = struct { return MutableString.init(allocator, 2048); } - pub const Writer = std.io.Writer(*@This(), anyerror, MutableString.writeAll); + pub const Writer = std.io.Writer(*@This(), OOM, MutableString.writeAll); pub fn writer(self: *MutableString) Writer { return Writer{ .context = self, @@ -40,11 +41,11 @@ pub const MutableString = struct { return bun.isSliceInBuffer(slice, this.list.items.ptr[0..this.list.capacity]); } - pub fn growIfNeeded(self: *MutableString, amount: usize) !void { + pub fn growIfNeeded(self: *MutableString, amount: usize) OOM!void { try self.list.ensureUnusedCapacity(self.allocator, amount); } - pub fn write(self: *MutableString, bytes: anytype) !usize { + pub fn write(self: *MutableString, bytes: anytype) OOM!usize { bun.debugAssert(bytes.len == 0 or !bun.isSliceInBuffer(bytes, self.list.allocatedSlice())); try self.list.appendSlice(self.allocator, bytes); return bytes.len; @@ -54,7 +55,7 @@ pub const MutableString = struct { return BufferedWriter{ .context = self }; } - pub fn init(allocator: std.mem.Allocator, capacity: usize) std.mem.Allocator.Error!MutableString { + pub fn init(allocator: std.mem.Allocator, capacity: usize) OOM!MutableString { return MutableString{ .allocator = allocator, .list = if (capacity > 0) try std.ArrayListUnmanaged(u8).initCapacity(allocator, capacity) else @@ -67,7 +68,7 @@ pub const MutableString = struct { pub const ensureUnusedCapacity = growIfNeeded; - pub fn initCopy(allocator: std.mem.Allocator, str: anytype) !MutableString { + pub fn initCopy(allocator: std.mem.Allocator, str: anytype) OOM!MutableString { var mutable = try MutableString.init(allocator, str.len); try mutable.copy(str); return mutable; @@ -443,7 +444,7 @@ pub const MutableString = struct { } }; - pub fn writeAll(self: *MutableString, bytes: string) std.mem.Allocator.Error!usize { + pub fn writeAll(self: *MutableString, bytes: string) OOM!usize { try self.list.appendSlice(self.allocator, bytes); return bytes.len; } diff --git a/test/bun.lockb b/test/bun.lockb index 30b73a146f..c6449d11b3 100755 Binary files a/test/bun.lockb and b/test/bun.lockb differ diff --git a/test/cli/install/bun-pack.test.ts b/test/cli/install/bun-pack.test.ts index f9aa414179..4f82725bc3 100644 --- a/test/cli/install/bun-pack.test.ts +++ b/test/cli/install/bun-pack.test.ts @@ -2,7 +2,7 @@ import { file, spawn, write } from "bun"; import { readTarball } from "bun:internal-for-testing"; import { beforeEach, describe, expect, test } from "bun:test"; import { exists, mkdir, rm } from "fs/promises"; -import { bunEnv, bunExe, isWindows, runBunInstall, tmpdirSync } from "harness"; +import { bunEnv, bunExe, runBunInstall, tmpdirSync, pack } from "harness"; import { join } from "path"; var packageDir: string; @@ -11,30 +11,6 @@ beforeEach(() => { packageDir = tmpdirSync(); }); -async function pack(cwd: string, env: NodeJS.ProcessEnv, ...args: string[]) { - const { stdout, stderr, exited } = spawn({ - cmd: [bunExe(), "pm", "pack", ...args], - cwd, - stdout: "pipe", - stderr: "pipe", - stdin: "ignore", - env, - }); - - const err = await Bun.readableStreamToText(stderr); - expect(err).not.toContain("error:"); - expect(err).not.toContain("warning:"); - expect(err).not.toContain("failed"); - expect(err).not.toContain("panic:"); - - const out = await Bun.readableStreamToText(stdout); - - const exitCode = await exited; - expect(exitCode).toBe(0); - - return { out, err }; -} - async function packExpectError(cwd: string, env: NodeJS.ProcessEnv, ...args: string[]) { const { stdout, stderr, exited } = spawn({ cmd: [bunExe(), "pm", "pack", ...args], @@ -974,11 +950,7 @@ describe("bins", () => { ]); expect(tarball.entries[0].perm & 0o644).toBe(0o644); - if (isWindows) { - expect(tarball.entries[1].perm & 0o111).toBe(0); - } else { - expect(tarball.entries[1].perm & (0o644 | 0o111)).toBe(0o644 | 0o111); - } + expect(tarball.entries[1].perm & (0o644 | 0o111)).toBe(0o644 | 0o111); }); test("directory", async () => { @@ -1013,13 +985,8 @@ describe("bins", () => { ]); expect(tarball.entries[0].perm & 0o644).toBe(0o644); - if (isWindows) { - expect(tarball.entries[1].perm & 0o111).toBe(0); - expect(tarball.entries[2].perm & 0o111).toBe(0); - } else { - expect(tarball.entries[1].perm & (0o644 | 0o111)).toBe(0o644 | 0o111); - expect(tarball.entries[2].perm & (0o644 | 0o111)).toBe(0o644 | 0o111); - } + expect(tarball.entries[1].perm & (0o644 | 0o111)).toBe(0o644 | 0o111); + expect(tarball.entries[2].perm & (0o644 | 0o111)).toBe(0o644 | 0o111); }); }); diff --git a/test/cli/install/registry/bun-install-registry.test.ts b/test/cli/install/registry/bun-install-registry.test.ts index 8966e19116..756c168b57 100644 --- a/test/cli/install/registry/bun-install-registry.test.ts +++ b/test/cli/install/registry/bun-install-registry.test.ts @@ -14,6 +14,7 @@ import { randomPort, runBunInstall, runBunUpdate, + pack, tempDirWithFiles, tmpdirSync, toBeValidBin, @@ -108,7 +109,6 @@ function registryUrl() { * Returns auth token */ async function generateRegistryUser(username: string, password: string): Promise { - console.log("GENERATE REGISTRY USER"); if (users[username]) { throw new Error("that user already exists"); } else users[username] = password; @@ -130,7 +130,6 @@ async function generateRegistryUser(username: string, password: string): Promise if (response.ok) { const data = await response.json(); - console.log(`Token: ${data.token}`); return data.token; } else { throw new Error("Failed to create user:", response.statusText); @@ -514,6 +513,446 @@ ${Object.keys(opts) ); }); +export async function publish( + env: any, + cwd: string, + ...args: string[] +): Promise<{ out: string; err: string; exitCode: number }> { + const { stdout, stderr, exited } = spawn({ + cmd: [bunExe(), "publish", ...args], + cwd, + stdout: "pipe", + stderr: "pipe", + env, + }); + + const out = await Bun.readableStreamToText(stdout); + const err = await Bun.readableStreamToText(stderr); + const exitCode = await exited; + return { out, err, exitCode }; +} + +async function authBunfig(user: string) { + const authToken = await generateRegistryUser(user, user); + return ` + [install] + cache = false + registry = { url = "http://localhost:${port}/", token = "${authToken}" } + `; +} + +describe("publish", async () => { + describe("otp", async () => { + for (const setAuthHeader of [true, false]) { + test("mock web login" + (setAuthHeader ? "" : " (without auth header)"), async () => { + using mockRegistry = Bun.serve({ + port: 0, + async fetch(req) { + if (req.url.endsWith("otp-pkg-1")) { + if (req.headers.get("npm-otp") === authToken) { + return new Response("OK", { status: 200 }); + } else { + const headers = new Headers(); + if (setAuthHeader) headers.set("www-authenticate", "OTP"); + return new Response( + JSON.stringify({ + // this isn't accurate, but we just want to check that finding this string works + mock: setAuthHeader ? "" : "one-time password", + + authUrl: `http://localhost:${this.port}/auth`, + doneUrl: `http://localhost:${this.port}/done`, + }), + { + status: 401, + headers, + }, + ); + } + } else if (req.url.endsWith("auth")) { + expect.unreachable("url given to user, bun publish should not request"); + } else if (req.url.endsWith("done")) { + // send a fake response saying the user has authenticated successfully with the auth url + return new Response(JSON.stringify({ token: authToken }), { status: 200 }); + } + + expect.unreachable("unexpected url"); + }, + }); + + const authToken = await generateRegistryUser("otp" + (setAuthHeader ? "" : "noheader"), "otp"); + const bunfig = ` + [install] + cache = false + registry = { url = "http://localhost:${mockRegistry.port}", token = "${authToken}" }`; + await Promise.all([ + rm(join(import.meta.dir, "packages", "otp-pkg-1"), { recursive: true, force: true }), + write(join(packageDir, "bunfig.toml"), bunfig), + write( + join(packageDir, "package.json"), + JSON.stringify({ + name: "otp-pkg-1", + version: "2.2.2", + dependencies: { + "otp-pkg-1": "2.2.2", + }, + }), + ), + ]); + + const { out, err, exitCode } = await publish(env, packageDir); + expect(exitCode).toBe(0); + }); + } + }); + test("can publish a package then install it", async () => { + const bunfig = await authBunfig("basic"); + await Promise.all([ + rm(join(import.meta.dir, "packages", "publish-pkg-1"), { recursive: true, force: true }), + write( + join(packageDir, "package.json"), + JSON.stringify({ + name: "publish-pkg-1", + version: "1.1.1", + dependencies: { + "publish-pkg-1": "1.1.1", + }, + }), + ), + write(join(packageDir, "bunfig.toml"), bunfig), + ]); + + const { out, err, exitCode } = await publish(env, packageDir); + expect(err).not.toContain("error:"); + expect(err).not.toContain("warn:"); + expect(exitCode).toBe(0); + + await runBunInstall(env, packageDir); + expect(await exists(join(packageDir, "node_modules", "publish-pkg-1", "package.json"))).toBeTrue(); + }); + test("can publish from a tarball", async () => { + const bunfig = await authBunfig("tarball"); + const json = { + name: "publish-pkg-2", + version: "2.2.2", + dependencies: { + "publish-pkg-2": "2.2.2", + }, + }; + await Promise.all([ + rm(join(import.meta.dir, "packages", "publish-pkg-2"), { recursive: true, force: true }), + write(join(packageDir, "package.json"), JSON.stringify(json)), + write(join(packageDir, "bunfig.toml"), bunfig), + ]); + + await pack(packageDir, env); + + let { out, err, exitCode } = await publish(env, packageDir, "./publish-pkg-2-2.2.2.tgz"); + expect(err).not.toContain("error:"); + expect(err).not.toContain("warn:"); + expect(exitCode).toBe(0); + + await runBunInstall(env, packageDir); + expect(await exists(join(packageDir, "node_modules", "publish-pkg-2", "package.json"))).toBeTrue(); + + await Promise.all([ + rm(join(import.meta.dir, "packages", "publish-pkg-2"), { recursive: true, force: true }), + rm(join(packageDir, "bun.lockb"), { recursive: true, force: true }), + rm(join(packageDir, "node_modules"), { recursive: true, force: true }), + ]); + + // now with an absoute path + ({ out, err, exitCode } = await publish(env, packageDir, join(packageDir, "publish-pkg-2-2.2.2.tgz"))); + expect(err).not.toContain("error:"); + expect(err).not.toContain("warn:"); + expect(exitCode).toBe(0); + + await runBunInstall(env, packageDir); + expect(await file(join(packageDir, "node_modules", "publish-pkg-2", "package.json")).json()).toEqual(json); + }); + + test("can publish workspace package", async () => { + const bunfig = await authBunfig("workspace"); + const pkgJson = { + name: "publish-pkg-3", + version: "3.3.3", + dependencies: { + "publish-pkg-3": "3.3.3", + }, + }; + await Promise.all([ + rm(join(import.meta.dir, "packages", "publish-pkg-3"), { recursive: true, force: true }), + write(join(packageDir, "bunfig.toml"), bunfig), + write( + join(packageDir, "package.json"), + JSON.stringify({ + name: "root", + workspaces: ["packages/*"], + }), + ), + write(join(packageDir, "packages", "publish-pkg-3", "package.json"), JSON.stringify(pkgJson)), + ]); + + await publish(env, join(packageDir, "packages", "publish-pkg-3")); + + await write( + join(packageDir, "package.json"), + JSON.stringify({ name: "root", "dependencies": { "publish-pkg-3": "3.3.3" } }), + ); + + await runBunInstall(env, packageDir); + + expect(await file(join(packageDir, "node_modules", "publish-pkg-3", "package.json")).json()).toEqual(pkgJson); + }); + + describe("--dry-run", async () => { + test("does not publish", async () => { + const bunfig = await authBunfig("dryrun"); + await Promise.all([ + rm(join(import.meta.dir, "packages", "dry-run-1"), { recursive: true, force: true }), + write(join(packageDir, "bunfig.toml"), bunfig), + write( + join(packageDir, "package.json"), + JSON.stringify({ + name: "dry-run-1", + version: "1.1.1", + dependencies: { + "dry-run-1": "1.1.1", + }, + }), + ), + ]); + + const { out, err, exitCode } = await publish(env, packageDir, "--dry-run"); + expect(exitCode).toBe(0); + + expect(await exists(join(import.meta.dir, "packages", "dry-run-1"))).toBeFalse(); + }); + test("does not publish from tarball path", async () => { + const bunfig = await authBunfig("dryruntarball"); + await Promise.all([ + rm(join(import.meta.dir, "packages", "dry-run-2"), { recursive: true, force: true }), + write(join(packageDir, "bunfig.toml"), bunfig), + write( + join(packageDir, "package.json"), + JSON.stringify({ + name: "dry-run-2", + version: "2.2.2", + dependencies: { + "dry-run-2": "2.2.2", + }, + }), + ), + ]); + + await pack(packageDir, env); + + const { out, err, exitCode } = await publish(env, packageDir, "./dry-run-2-2.2.2.tgz", "--dry-run"); + expect(exitCode).toBe(0); + + expect(await exists(join(import.meta.dir, "packages", "dry-run-2"))).toBeFalse(); + }); + }); + + describe("lifecycle scripts", async () => { + const script = `const fs = require("fs"); + fs.writeFileSync(process.argv[2] + ".txt", \` +prepublishOnly: \${fs.existsSync("prepublishOnly.txt")} +publish: \${fs.existsSync("publish.txt")} +postpublish: \${fs.existsSync("postpublish.txt")} +prepack: \${fs.existsSync("prepack.txt")} +prepare: \${fs.existsSync("prepare.txt")} +postpack: \${fs.existsSync("postpack.txt")}\`)`; + const json = { + name: "publish-pkg-4", + version: "4.4.4", + scripts: { + // should happen in this order + "prepublishOnly": `${bunExe()} script.js prepublishOnly`, + "prepack": `${bunExe()} script.js prepack`, + "prepare": `${bunExe()} script.js prepare`, + "postpack": `${bunExe()} script.js postpack`, + "publish": `${bunExe()} script.js publish`, + "postpublish": `${bunExe()} script.js postpublish`, + }, + dependencies: { + "publish-pkg-4": "4.4.4", + }, + }; + + for (const arg of ["", "--dry-run"]) { + test(`should run in order${arg ? " (--dry-run)" : ""}`, async () => { + const bunfig = await authBunfig("lifecycle" + (arg ? "dry" : "")); + await Promise.all([ + rm(join(import.meta.dir, "packages", "publish-pkg-4"), { recursive: true, force: true }), + write(join(packageDir, "package.json"), JSON.stringify(json)), + write(join(packageDir, "script.js"), script), + write(join(packageDir, "bunfig.toml"), bunfig), + ]); + + const { out, err, exitCode } = await publish(env, packageDir, arg); + expect(exitCode).toBe(0); + + const results = await Promise.all([ + file(join(packageDir, "prepublishOnly.txt")).text(), + file(join(packageDir, "prepack.txt")).text(), + file(join(packageDir, "prepare.txt")).text(), + file(join(packageDir, "postpack.txt")).text(), + file(join(packageDir, "publish.txt")).text(), + file(join(packageDir, "postpublish.txt")).text(), + ]); + + expect(results).toEqual([ + "\nprepublishOnly: false\npublish: false\npostpublish: false\nprepack: false\nprepare: false\npostpack: false", + "\nprepublishOnly: true\npublish: false\npostpublish: false\nprepack: false\nprepare: false\npostpack: false", + "\nprepublishOnly: true\npublish: false\npostpublish: false\nprepack: true\nprepare: false\npostpack: false", + "\nprepublishOnly: true\npublish: false\npostpublish: false\nprepack: true\nprepare: true\npostpack: false", + "\nprepublishOnly: true\npublish: false\npostpublish: false\nprepack: true\nprepare: true\npostpack: true", + "\nprepublishOnly: true\npublish: true\npostpublish: false\nprepack: true\nprepare: true\npostpack: true", + ]); + }); + } + + test("--ignore-scripts", async () => { + const bunfig = await authBunfig("ignorescripts"); + await Promise.all([ + rm(join(import.meta.dir, "packages", "publish-pkg-5"), { recursive: true, force: true }), + write(join(packageDir, "package.json"), JSON.stringify(json)), + write(join(packageDir, "script.js"), script), + write(join(packageDir, "bunfig.toml"), bunfig), + ]); + + const { out, err, exitCode } = await publish(env, packageDir, "--ignore-scripts"); + expect(exitCode).toBe(0); + + const results = await Promise.all([ + exists(join(packageDir, "prepublishOnly.txt")), + exists(join(packageDir, "prepack.txt")), + exists(join(packageDir, "prepare.txt")), + exists(join(packageDir, "postpack.txt")), + exists(join(packageDir, "publish.txt")), + exists(join(packageDir, "postpublish.txt")), + ]); + + expect(results).toEqual([false, false, false, false, false, false]); + }); + }); + + test("attempting to publish a private package should fail", async () => { + const bunfig = await authBunfig("privatepackage"); + await Promise.all([ + rm(join(import.meta.dir, "packages", "publish-pkg-6"), { recursive: true, force: true }), + write( + join(packageDir, "package.json"), + JSON.stringify({ + name: "publish-pkg-6", + version: "6.6.6", + private: true, + dependencies: { + "publish-pkg-6": "6.6.6", + }, + }), + ), + write(join(packageDir, "bunfig.toml"), bunfig), + ]); + + // should fail + let { out, err, exitCode } = await publish(env, packageDir); + expect(exitCode).toBe(1); + expect(err).toContain("error: attempted to publish a private package"); + expect(await exists(join(import.meta.dir, "packages", "publish-pkg-6-6.6.6.tgz"))).toBeFalse(); + + // try tarball + await pack(packageDir, env); + ({ out, err, exitCode } = await publish(env, packageDir, "./publish-pkg-6-6.6.6.tgz")); + expect(exitCode).toBe(1); + expect(err).toContain("error: attempted to publish a private package"); + expect(await exists(join(packageDir, "publish-pkg-6-6.6.6.tgz"))).toBeTrue(); + }); + + describe("access", async () => { + test("--access", async () => { + const bunfig = await authBunfig("accessflag"); + await Promise.all([ + rm(join(import.meta.dir, "packages", "publish-pkg-7"), { recursive: true, force: true }), + write(join(packageDir, "bunfig.toml"), bunfig), + write( + join(packageDir, "package.json"), + JSON.stringify({ + name: "publish-pkg-7", + version: "7.7.7", + }), + ), + ]); + + // should fail + let { out, err, exitCode } = await publish(env, packageDir, "--access", "restricted"); + expect(exitCode).toBe(1); + expect(err).toContain("error: unable to restrict access to unscoped package"); + + ({ out, err, exitCode } = await publish(env, packageDir, "--access", "public")); + expect(exitCode).toBe(0); + + expect(await exists(join(import.meta.dir, "packages", "publish-pkg-7"))).toBeTrue(); + }); + + for (const access of ["restricted", "public"]) { + test(`access ${access}`, async () => { + const bunfig = await authBunfig("access" + access); + + const pkgJson = { + name: "@secret/publish-pkg-8", + version: "8.8.8", + dependencies: { + "@secret/publish-pkg-8": "8.8.8", + }, + publishConfig: { + access, + }, + }; + + await Promise.all([ + rm(join(import.meta.dir, "packages", "@secret", "publish-pkg-8"), { recursive: true, force: true }), + write(join(packageDir, "bunfig.toml"), bunfig), + write(join(packageDir, "package.json"), JSON.stringify(pkgJson)), + ]); + + let { out, err, exitCode } = await publish(env, packageDir); + expect(exitCode).toBe(0); + + await runBunInstall(env, packageDir); + + expect(await file(join(packageDir, "node_modules", "@secret", "publish-pkg-8", "package.json")).json()).toEqual( + pkgJson, + ); + }); + } + }); + + describe("tag", async () => { + test("can publish with a tag", async () => { + const bunfig = await authBunfig("simpletag"); + const pkgJson = { + name: "publish-pkg-9", + version: "9.9.9", + dependencies: { + "publish-pkg-9": "simpletag", + }, + }; + await Promise.all([ + rm(join(import.meta.dir, "packages", "publish-pkg-9"), { recursive: true, force: true }), + write(join(packageDir, "bunfig.toml"), bunfig), + write(join(packageDir, "package.json"), JSON.stringify(pkgJson)), + ]); + + let { out, err, exitCode } = await publish(env, packageDir, "--tag", "simpletag"); + expect(exitCode).toBe(0); + + await runBunInstall(env, packageDir); + expect(await file(join(packageDir, "node_modules", "publish-pkg-9", "package.json")).json()).toEqual(pkgJson); + }); + }); +}); + describe("package.json indentation", async () => { test("works for root and workspace packages", async () => { await Promise.all([ diff --git a/test/cli/install/registry/verdaccio.yaml b/test/cli/install/registry/verdaccio.yaml index e46176c27a..6d12f17444 100644 --- a/test/cli/install/registry/verdaccio.yaml +++ b/test/cli/install/registry/verdaccio.yaml @@ -75,6 +75,11 @@ packages: publish: $authenticated unpublish: $authenticated + "@secret/*": + access: $authenticated + publish: $authenticated + unpublish: $authenticated + "@*/*": # scoped packages access: $all @@ -92,7 +97,7 @@ packages: # allow all known users to publish/publish packages # (anyone can register by default, remember?) - publish: $authenticated + publish: $all unpublish: $all # if package is not available locally, proxy requests to 'npmjs' registry diff --git a/test/harness.ts b/test/harness.ts index 8abf61302e..fd49ed84ca 100644 --- a/test/harness.ts +++ b/test/harness.ts @@ -1048,6 +1048,30 @@ export async function runBunUpdate( return { out: out.replace(/\s*\[[0-9\.]+m?s\]\s*$/, "").split(/\r?\n/), err, exitCode }; } +export async function pack(cwd: string, env: NodeJS.ProcessEnv, ...args: string[]) { + const { stdout, stderr, exited } = Bun.spawn({ + cmd: [bunExe(), "pm", "pack", ...args], + cwd, + stdout: "pipe", + stderr: "pipe", + stdin: "ignore", + env, + }); + + const err = await Bun.readableStreamToText(stderr); + expect(err).not.toContain("error:"); + expect(err).not.toContain("warning:"); + expect(err).not.toContain("failed"); + expect(err).not.toContain("panic:"); + + const out = await Bun.readableStreamToText(stdout); + + const exitCode = await exited; + expect(exitCode).toBe(0); + + return { out, err }; +} + // If you need to modify, clone it export const expiredTls = Object.freeze({ cert: "-----BEGIN CERTIFICATE-----\nMIIDXTCCAkWgAwIBAgIJAKLdQVPy90jjMA0GCSqGSIb3DQEBCwUAMEUxCzAJBgNV\nBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX\naWRnaXRzIFB0eSBMdGQwHhcNMTkwMjAzMTQ0OTM1WhcNMjAwMjAzMTQ0OTM1WjBF\nMQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50\nZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB\nCgKCAQEA7i7IIEdICTiSTVx+ma6xHxOtcbd6wGW3nkxlCkJ1UuV8NmY5ovMsGnGD\nhJJtUQ2j5ig5BcJUf3tezqCNW4tKnSOgSISfEAKvpn2BPvaFq3yx2Yjz0ruvcGKp\nDMZBXmB/AAtGyN/UFXzkrcfppmLHJTaBYGG6KnmU43gPkSDy4iw46CJFUOupc51A\nFIz7RsE7mbT1plCM8e75gfqaZSn2k+Wmy+8n1HGyYHhVISRVvPqkS7gVLSVEdTea\nUtKP1Vx/818/HDWk3oIvDVWI9CFH73elNxBkMH5zArSNIBTehdnehyAevjY4RaC/\nkK8rslO3e4EtJ9SnA4swOjCiqAIQEwIDAQABo1AwTjAdBgNVHQ4EFgQUv5rc9Smm\n9c4YnNf3hR49t4rH4yswHwYDVR0jBBgwFoAUv5rc9Smm9c4YnNf3hR49t4rH4ysw\nDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEATcL9CAAXg0u//eYUAlQa\nL+l8yKHS1rsq1sdmx7pvsmfZ2g8ONQGfSF3TkzkI2OOnCBokeqAYuyT8awfdNUtE\nEHOihv4ZzhK2YZVuy0fHX2d4cCFeQpdxno7aN6B37qtsLIRZxkD8PU60Dfu9ea5F\nDDynnD0TUabna6a0iGn77yD8GPhjaJMOz3gMYjQFqsKL252isDVHEDbpVxIzxPmN\nw1+WK8zRNdunAcHikeoKCuAPvlZ83gDQHp07dYdbuZvHwGj0nfxBLc9qt90XsBtC\n4IYR7c/bcLMmKXYf0qoQ4OzngsnPI5M+v9QEHvYWaKVwFY4CTcSNJEwfXw+BAeO5\nOA==\n-----END CERTIFICATE-----", diff --git a/test/package.json b/test/package.json index 08096cce5d..7406bf4486 100644 --- a/test/package.json +++ b/test/package.json @@ -59,7 +59,7 @@ "svelte": "3.55.1", "typescript": "5.0.2", "undici": "5.20.0", - "verdaccio": "5.27.0", + "verdaccio": "6.0.0", "vitest": "0.32.2", "webpack": "5.88.0", "webpack-cli": "4.7.2",