diff --git a/.coderabbit.yaml b/.coderabbit.yaml new file mode 100644 index 0000000000..5a0c7d0aad --- /dev/null +++ b/.coderabbit.yaml @@ -0,0 +1,143 @@ +language: en-US + +reviews: + profile: assertive + request_changes_workflow: false + high_level_summary: false + high_level_summary_placeholder: "@coderabbitai summary" + high_level_summary_in_walkthrough: true + auto_title_placeholder: "@coderabbitai" + review_status: false + commit_status: false + fail_commit_status: false + collapse_walkthrough: false + changed_files_summary: true + sequence_diagrams: false + estimate_code_review_effort: false + assess_linked_issues: true + related_issues: true + related_prs: true + suggested_labels: false + suggested_reviewers: true + in_progress_fortune: false + poem: false + abort_on_close: true + + path_filters: + - "!test/js/node/test/" + + auto_review: + enabled: true + auto_incremental_review: true + drafts: false + + finishing_touches: + docstrings: + enabled: false + unit_tests: + enabled: false + + pre_merge_checks: + docstrings: + mode: off + title: + mode: warning + description: + mode: warning + issue_assessment: + mode: warning + + tools: + shellcheck: + enabled: true + ruff: + enabled: true + markdownlint: + enabled: true + github-checks: + enabled: true + timeout_ms: 90000 + languagetool: + enabled: true + enabled_only: false + level: default + biome: + enabled: true + hadolint: + enabled: true + swiftlint: + enabled: true + phpstan: + enabled: true + level: default + phpmd: + enabled: true + phpcs: + enabled: true + golangci-lint: + enabled: true + yamllint: + enabled: true + gitleaks: + enabled: true + checkov: + enabled: true + detekt: + enabled: true + eslint: + enabled: true + flake8: + enabled: true + rubocop: + enabled: true + buf: + enabled: true + regal: + enabled: true + actionlint: + enabled: true + pmd: + enabled: true + clang: + enabled: true + cppcheck: + enabled: true + semgrep: + enabled: true + circleci: + enabled: true + clippy: + enabled: true + sqlfluff: + enabled: true + prismaLint: + enabled: true + pylint: + enabled: true + oxc: + enabled: true + shopifyThemeCheck: + enabled: true + luacheck: + enabled: true + brakeman: + enabled: true + dotenvLint: + enabled: true + htmlhint: + enabled: true + checkmake: + enabled: true + osvScanner: + enabled: true + +chat: + auto_reply: true + +knowledge_base: + opt_out: false + code_guidelines: + enabled: true + filePatterns: + - "**/.cursor/rules/*.mdc" + - "**/CLAUDE.md" diff --git a/cmake/CompilerFlags.cmake b/cmake/CompilerFlags.cmake index b9ae804f06..cff32fb166 100644 --- a/cmake/CompilerFlags.cmake +++ b/cmake/CompilerFlags.cmake @@ -215,46 +215,6 @@ if(ENABLE_ASSERTIONS) DESCRIPTION "Do not eliminate null-pointer checks" -fno-delete-null-pointer-checks ) - - register_compiler_definitions( - DESCRIPTION "Enable libc++ assertions" - _LIBCPP_ENABLE_ASSERTIONS=1 - _LIBCPP_HARDENING_MODE=_LIBCPP_HARDENING_MODE_EXTENSIVE ${RELEASE} - _LIBCPP_HARDENING_MODE=_LIBCPP_HARDENING_MODE_DEBUG ${DEBUG} - ) - - # Nix glibc already sets _FORTIFY_SOURCE, don't override it - if(NOT DEFINED ENV{NIX_CC}) - register_compiler_definitions( - DESCRIPTION "Enable fortified sources (Release only)" - _FORTIFY_SOURCE=3 ${RELEASE} - ) - endif() - - if(LINUX) - register_compiler_definitions( - DESCRIPTION "Enable glibc++ assertions" - _GLIBCXX_ASSERTIONS=1 - ) - endif() -else() - register_compiler_definitions( - DESCRIPTION "Disable debug assertions" - NDEBUG=1 - ) - - register_compiler_definitions( - DESCRIPTION "Disable libc++ assertions" - _LIBCPP_ENABLE_ASSERTIONS=0 - _LIBCPP_HARDENING_MODE=_LIBCPP_HARDENING_MODE_NONE - ) - - if(LINUX) - register_compiler_definitions( - DESCRIPTION "Disable glibc++ assertions" - _GLIBCXX_ASSERTIONS=0 - ) - endif() endif() # --- Diagnostics --- @@ -305,14 +265,6 @@ if(UNIX AND CI) ) endif() -# --- Features --- - -# Valgrind cannot handle SSE4.2 instructions -# This is needed for picohttpparser -if(ENABLE_VALGRIND AND ARCH STREQUAL "x64") - register_compiler_definitions(__SSE4_2__=0) -endif() - # --- Other --- # Workaround for CMake and clang-cl bug. diff --git a/cmake/Globals.cmake b/cmake/Globals.cmake index 941e98b421..a5484a8b08 100644 --- a/cmake/Globals.cmake +++ b/cmake/Globals.cmake @@ -136,13 +136,6 @@ else() set(WARNING WARNING) endif() -# TODO: This causes flaky zig builds in CI, so temporarily disable it. -# if(CI) -# set(DEFAULT_VENDOR_PATH ${CACHE_PATH}/vendor) -# else() -# set(DEFAULT_VENDOR_PATH ${CWD}/vendor) -# endif() - optionx(VENDOR_PATH FILEPATH "The path to the vendor directory" DEFAULT ${CWD}/vendor) optionx(TMP_PATH FILEPATH "The path to the temporary directory" DEFAULT ${BUILD_PATH}/tmp) @@ -917,10 +910,6 @@ function(register_compiler_flags) endforeach() endfunction() -function(register_compiler_definitions) - -endfunction() - # register_linker_flags() # Description: # Registers a linker flag, similar to `add_link_options()`. diff --git a/cmake/Options.cmake b/cmake/Options.cmake index 93a3698563..ac6ce10c74 100644 --- a/cmake/Options.cmake +++ b/cmake/Options.cmake @@ -140,11 +140,6 @@ if(ENABLE_ASAN AND ENABLE_LTO) setx(ENABLE_LTO OFF) endif() -if(USE_VALGRIND AND NOT USE_BASELINE) - message(WARNING "If valgrind is enabled, baseline must also be enabled") - setx(USE_BASELINE ON) -endif() - if(BUILDKITE_COMMIT) set(DEFAULT_REVISION ${BUILDKITE_COMMIT}) else() diff --git a/cmake/analysis/RunCppCheck.cmake b/cmake/analysis/RunCppCheck.cmake deleted file mode 100644 index a384a44863..0000000000 --- a/cmake/analysis/RunCppCheck.cmake +++ /dev/null @@ -1,33 +0,0 @@ -# https://cppcheck.sourceforge.io/ - -find_command( - VARIABLE - CPPCHECK_EXECUTABLE - COMMAND - cppcheck - REQUIRED - OFF -) - -set(CPPCHECK_COMMAND ${CPPCHECK_EXECUTABLE} - --cppcheck-build-dir=${BUILD_PATH}/cppcheck - --project=${BUILD_PATH}/compile_commands.json - --clang=${CMAKE_CXX_COMPILER} - --std=c++${CMAKE_CXX_STANDARD} - --report-progress - --showtime=summary -) - -register_command( - TARGET - cppcheck - COMMENT - "Running cppcheck" - COMMAND - ${CMAKE_COMMAND} -E make_directory cppcheck - && ${CPPCHECK_COMMAND} - CWD - ${BUILD_PATH} - TARGETS - ${bun} -) diff --git a/cmake/analysis/RunCppLint.cmake b/cmake/analysis/RunCppLint.cmake deleted file mode 100644 index 5b9264ecf5..0000000000 --- a/cmake/analysis/RunCppLint.cmake +++ /dev/null @@ -1,22 +0,0 @@ -find_command( - VARIABLE - CPPLINT_PROGRAM - COMMAND - cpplint - REQUIRED - OFF -) - -register_command( - TARGET - cpplint - COMMENT - "Running cpplint" - COMMAND - ${CPPLINT_PROGRAM} - ${BUN_CPP_SOURCES} - CWD - ${BUILD_PATH} - TARGETS - ${bun} -) diff --git a/cmake/analysis/RunIWYU.cmake b/cmake/analysis/RunIWYU.cmake deleted file mode 100644 index 0ea555f2f5..0000000000 --- a/cmake/analysis/RunIWYU.cmake +++ /dev/null @@ -1,67 +0,0 @@ -# IWYU = "Include What You Use" -# https://include-what-you-use.org/ - -setx(IWYU_SOURCE_PATH ${CACHE_PATH}/iwyu-${LLVM_VERSION}) -setx(IWYU_BUILD_PATH ${IWYU_SOURCE_PATH}/build) -setx(IWYU_PROGRAM ${IWYU_BUILD_PATH}/bin/include-what-you-use) - -register_repository( - NAME - iwyu - REPOSITORY - include-what-you-use/include-what-you-use - BRANCH - clang_${LLVM_VERSION} - PATH - ${IWYU_SOURCE_PATH} -) - -register_command( - TARGET - build-iwyu - COMMENT - "Building iwyu" - COMMAND - ${CMAKE_COMMAND} - -B${IWYU_BUILD_PATH} - -G${CMAKE_GENERATOR} - -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} - -DCMAKE_CXX_COMPILER_LAUNCHER=${CMAKE_CXX_COMPILER_LAUNCHER} - -DIWYU_LLVM_ROOT_PATH=${LLVM_PREFIX} - && ${CMAKE_COMMAND} - --build ${IWYU_BUILD_PATH} - CWD - ${IWYU_SOURCE_PATH} - TARGETS - clone-iwyu -) - -find_command( - VARIABLE - PYTHON_EXECUTABLE - COMMAND - python3 - python - VERSION - >=3.0.0 - REQUIRED - OFF -) - -register_command( - TARGET - iwyu - COMMENT - "Running iwyu" - COMMAND - ${CMAKE_COMMAND} - -E env IWYU_BINARY=${IWYU_PROGRAM} - ${PYTHON_EXECUTABLE} - ${IWYU_SOURCE_PATH}/iwyu_tool.py - -p ${BUILD_PATH} - CWD - ${BUILD_PATH} - TARGETS - build-iwyu - ${bun} -) diff --git a/cmake/targets/BuildBun.cmake b/cmake/targets/BuildBun.cmake index b5adbc4d43..6155f10c7b 100644 --- a/cmake/targets/BuildBun.cmake +++ b/cmake/targets/BuildBun.cmake @@ -45,12 +45,6 @@ else() endif() set(LLVM_ZIG_CODEGEN_THREADS 0) -# This makes the build slower, so we turn it off for now. -# if (DEBUG) -# include(ProcessorCount) -# ProcessorCount(CPU_COUNT) -# set(LLVM_ZIG_CODEGEN_THREADS ${CPU_COUNT}) -# endif() # --- Dependencies --- @@ -71,9 +65,6 @@ set(BUN_DEPENDENCIES ) include(CloneZstd) -# foreach(dependency ${BUN_DEPENDENCIES}) -# include(Clone${dependency}) -# endforeach() # --- Codegen --- @@ -1261,15 +1252,9 @@ if(LINUX) target_link_libraries(${bun} PUBLIC libatomic.so) endif() - if(USE_SYSTEM_ICU) - target_link_libraries(${bun} PRIVATE libicudata.a) - target_link_libraries(${bun} PRIVATE libicui18n.a) - target_link_libraries(${bun} PRIVATE libicuuc.a) - else() - target_link_libraries(${bun} PRIVATE ${WEBKIT_LIB_PATH}/libicudata.a) - target_link_libraries(${bun} PRIVATE ${WEBKIT_LIB_PATH}/libicui18n.a) - target_link_libraries(${bun} PRIVATE ${WEBKIT_LIB_PATH}/libicuuc.a) - endif() + target_link_libraries(${bun} PRIVATE ${WEBKIT_LIB_PATH}/libicudata.a) + target_link_libraries(${bun} PRIVATE ${WEBKIT_LIB_PATH}/libicui18n.a) + target_link_libraries(${bun} PRIVATE ${WEBKIT_LIB_PATH}/libicuuc.a) endif() if(WIN32) @@ -1322,32 +1307,32 @@ if(NOT BUN_CPP_ONLY) OUTPUTS ${BUILD_PATH}/${bunStripExe} ) - + # Then sign both executables on Windows if(WIN32 AND ENABLE_WINDOWS_CODESIGNING) set(SIGN_SCRIPT "${CMAKE_SOURCE_DIR}/.buildkite/scripts/sign-windows.ps1") - + # Verify signing script exists if(NOT EXISTS "${SIGN_SCRIPT}") message(FATAL_ERROR "Windows signing script not found: ${SIGN_SCRIPT}") endif() - + # Use PowerShell for Windows code signing (native Windows, no path issues) - find_program(POWERSHELL_EXECUTABLE + find_program(POWERSHELL_EXECUTABLE NAMES pwsh.exe powershell.exe - PATHS + PATHS "C:/Program Files/PowerShell/7" "C:/Program Files (x86)/PowerShell/7" "C:/Windows/System32/WindowsPowerShell/v1.0" DOC "Path to PowerShell executable" ) - + if(NOT POWERSHELL_EXECUTABLE) set(POWERSHELL_EXECUTABLE "powershell.exe") endif() - + message(STATUS "Using PowerShell executable: ${POWERSHELL_EXECUTABLE}") - + # Sign both bun-profile.exe and bun.exe after stripping register_command( TARGET diff --git a/cmake/targets/BuildLibDeflate.cmake b/cmake/targets/BuildLibDeflate.cmake index 66e89fa4b8..3f6eba7304 100644 --- a/cmake/targets/BuildLibDeflate.cmake +++ b/cmake/targets/BuildLibDeflate.cmake @@ -4,7 +4,7 @@ register_repository( REPOSITORY ebiggers/libdeflate COMMIT - 96836d7d9d10e3e0d53e6edb54eb908514e336c4 + c8c56a20f8f621e6a966b716b31f1dedab6a41e3 ) register_cmake_command( diff --git a/packages/bun-types/serve.d.ts b/packages/bun-types/serve.d.ts index ee45723bcd..c465af924c 100644 --- a/packages/bun-types/serve.d.ts +++ b/packages/bun-types/serve.d.ts @@ -202,6 +202,16 @@ declare module "bun" { */ isSubscribed(topic: string): boolean; + /** + * Returns an array of all topics the client is currently subscribed to. + * + * @example + * ws.subscribe("chat"); + * ws.subscribe("notifications"); + * console.log(ws.subscriptions); // ["chat", "notifications"] + */ + readonly subscriptions: string[]; + /** * Batches `send()` and `publish()` operations, which makes it faster to send data. * diff --git a/packages/bun-uws/src/HttpContext.h b/packages/bun-uws/src/HttpContext.h index 8f001bf0da..2d1e7b79b9 100644 --- a/packages/bun-uws/src/HttpContext.h +++ b/packages/bun-uws/src/HttpContext.h @@ -412,7 +412,6 @@ private: /* Timeout on uncork failure */ auto [written, failed] = ((AsyncSocket *) returnedData)->uncork(); if (written > 0 || failed) { - httpResponseData->isIdle = true; /* All Http sockets timeout by this, and this behavior match the one in HttpResponse::cork */ ((HttpResponse *) s)->resetTimeout(); } diff --git a/scripts/auto-close-duplicates.ts b/scripts/auto-close-duplicates.ts index d0c33575d7..a6a35d5158 100644 --- a/scripts/auto-close-duplicates.ts +++ b/scripts/auto-close-duplicates.ts @@ -26,6 +26,11 @@ interface GitHubReaction { content: string; } +interface GitHubEvent { + event: string; + created_at: string; +} + async function sleep(ms: number): Promise { return new Promise(resolve => setTimeout(resolve, ms)); } @@ -153,6 +158,13 @@ async function fetchAllReactions( return allReactions; } +async function wasIssueReopened(owner: string, repo: string, issueNumber: number, token: string): Promise { + const events: GitHubEvent[] = await githubRequest(`/repos/${owner}/${repo}/issues/${issueNumber}/events`, token); + + // Check if there's a "reopened" event in the issue's timeline + return events.some(event => event.event === "reopened"); +} + function escapeRegExp(str: string): string { return str.replace(/[.*+?^${}()|[\]\\]/g, "\\$&"); } @@ -185,15 +197,15 @@ async function closeIssueAsDuplicate( duplicateOfNumber: number, token: string, ): Promise { - // Close the issue as duplicate and add the duplicate label await githubRequest(`/repos/${owner}/${repo}/issues/${issueNumber}`, token, "PATCH", { state: "closed", state_reason: "duplicate", - labels: ["duplicate"], }); - + // Close the issue as duplicate await githubRequest(`/repos/${owner}/${repo}/issues/${issueNumber}/comments`, token, "POST", { - body: `This issue has been automatically closed as a duplicate of #${duplicateOfNumber}. + body: `Duplicate of #${duplicateOfNumber}. + +This issue has been automatically closed as a duplicate. If this is incorrect, please re-open this issue or create a new one. @@ -305,16 +317,23 @@ async function autoCloseDuplicates(): Promise { } console.log(`[DEBUG] Issue #${issue.number} - checking reactions on duplicate comment...`); - const reactions = await fetchAllReactions(owner, repo, lastDupeComment.id, token, issue.user.id); + const reactions = await fetchAllReactions(owner, repo, lastDupeComment.id, token); console.log(`[DEBUG] Issue #${issue.number} - duplicate comment has ${reactions.length} reactions`); - const authorThumbsDown = reactions.some( - reaction => reaction.user.id === issue.user.id && reaction.content === "-1", - ); - console.log(`[DEBUG] Issue #${issue.number} - author thumbs down reaction: ${authorThumbsDown}`); + const hasThumbsDown = reactions.some(reaction => reaction.content === "-1"); + console.log(`[DEBUG] Issue #${issue.number} - has thumbs down reaction: ${hasThumbsDown}`); - if (authorThumbsDown) { - console.log(`[DEBUG] Issue #${issue.number} - author disagreed with duplicate detection, skipping`); + if (hasThumbsDown) { + console.log(`[DEBUG] Issue #${issue.number} - someone disagreed with duplicate detection, skipping`); + continue; + } + + console.log(`[DEBUG] Issue #${issue.number} - checking if issue was reopened...`); + const wasReopened = await wasIssueReopened(owner, repo, issue.number, token); + console.log(`[DEBUG] Issue #${issue.number} - was reopened: ${wasReopened}`); + + if (wasReopened) { + console.log(`[DEBUG] Issue #${issue.number} - issue was previously reopened, skipping auto-close`); continue; } diff --git a/scripts/bootstrap.sh b/scripts/bootstrap.sh index 62cd622cc6..eff79ab94c 100755 --- a/scripts/bootstrap.sh +++ b/scripts/bootstrap.sh @@ -986,6 +986,7 @@ install_build_essentials() { xz-utils \ pkg-config \ golang + install_packages apache2-utils ;; dnf | yum) install_packages \ @@ -1013,6 +1014,7 @@ install_build_essentials() { ninja \ go \ xz + install_packages apache2-utils ;; esac diff --git a/src/allocators.zig b/src/allocators.zig index e0547f79cb..c9ae3146cd 100644 --- a/src/allocators.zig +++ b/src/allocators.zig @@ -10,7 +10,6 @@ pub const AllocationScopeIn = allocation_scope.AllocationScopeIn; pub const NullableAllocator = @import("./allocators/NullableAllocator.zig"); pub const MaxHeapAllocator = @import("./allocators/MaxHeapAllocator.zig"); -pub const MemoryReportingAllocator = @import("./allocators/MemoryReportingAllocator.zig"); pub const LinuxMemFdAllocator = @import("./allocators/LinuxMemFdAllocator.zig"); pub const MaybeOwned = @import("./allocators/maybe_owned.zig").MaybeOwned; diff --git a/src/allocators/MemoryReportingAllocator.zig b/src/allocators/MemoryReportingAllocator.zig deleted file mode 100644 index 4859adc26e..0000000000 --- a/src/allocators/MemoryReportingAllocator.zig +++ /dev/null @@ -1,96 +0,0 @@ -const MemoryReportingAllocator = @This(); - -const log = bun.Output.scoped(.MEM, .visible); - -child_allocator: std.mem.Allocator, -memory_cost: std.atomic.Value(usize) = std.atomic.Value(usize).init(0), - -fn alloc(context: *anyopaque, n: usize, alignment: std.mem.Alignment, return_address: usize) ?[*]u8 { - const this: *MemoryReportingAllocator = @alignCast(@ptrCast(context)); - const result = this.child_allocator.rawAlloc(n, alignment, return_address) orelse return null; - _ = this.memory_cost.fetchAdd(n, .monotonic); - if (comptime Environment.allow_assert) - log("malloc({d}) = {d}", .{ n, this.memory_cost.raw }); - return result; -} - -pub fn discard(this: *MemoryReportingAllocator, buf: []const u8) void { - _ = this.memory_cost.fetchSub(buf.len, .monotonic); - if (comptime Environment.allow_assert) - log("discard({d}) = {d}", .{ buf.len, this.memory_cost.raw }); -} - -fn resize(context: *anyopaque, buf: []u8, alignment: std.mem.Alignment, new_len: usize, ret_addr: usize) bool { - const this: *MemoryReportingAllocator = @alignCast(@ptrCast(context)); - if (this.child_allocator.rawResize(buf, alignment, new_len, ret_addr)) { - _ = this.memory_cost.fetchAdd(new_len -| buf.len, .monotonic); - if (comptime Environment.allow_assert) - log("resize() = {d}", .{this.memory_cost.raw}); - return true; - } else { - return false; - } -} - -fn free(context: *anyopaque, buf: []u8, alignment: std.mem.Alignment, ret_addr: usize) void { - const this: *MemoryReportingAllocator = @alignCast(@ptrCast(context)); - this.child_allocator.rawFree(buf, alignment, ret_addr); - - if (comptime Environment.allow_assert) { - _ = this.memory_cost.fetchSub(buf.len, .monotonic); - log("free({d}) = {d}", .{ buf.len, this.memory_cost.raw }); - } -} - -pub fn wrap(this: *MemoryReportingAllocator, allocator_: std.mem.Allocator) std.mem.Allocator { - this.* = .{ - .child_allocator = allocator_, - }; - - return this.allocator(); -} - -pub fn allocator(this: *MemoryReportingAllocator) std.mem.Allocator { - return std.mem.Allocator{ - .ptr = this, - .vtable = &MemoryReportingAllocator.VTable, - }; -} - -pub fn report(this: *MemoryReportingAllocator, vm: *jsc.VM) void { - const mem = this.memory_cost.load(.monotonic); - if (mem > 0) { - vm.reportExtraMemory(mem); - if (comptime Environment.allow_assert) - log("report({d})", .{mem}); - } -} - -pub inline fn assert(this: *const MemoryReportingAllocator) void { - if (comptime !Environment.allow_assert) { - return; - } - - const memory_cost = this.memory_cost.load(.monotonic); - if (memory_cost > 0) { - Output.panic("MemoryReportingAllocator still has {d} bytes allocated", .{memory_cost}); - } -} - -pub fn isInstance(allocator_: std.mem.Allocator) bool { - return allocator_.vtable == &VTable; -} - -pub const VTable = std.mem.Allocator.VTable{ - .alloc = &alloc, - .resize = &resize, - .remap = &std.mem.Allocator.noRemap, - .free = &free, -}; - -const std = @import("std"); - -const bun = @import("bun"); -const Environment = bun.Environment; -const Output = bun.Output; -const jsc = bun.jsc; diff --git a/src/bun.js.zig b/src/bun.js.zig index 1a4fafbbd4..9adb3e267a 100644 --- a/src/bun.js.zig +++ b/src/bun.js.zig @@ -264,6 +264,17 @@ pub const Run = struct { vm.hot_reload = this.ctx.debug.hot_reload; vm.onUnhandledRejection = &onUnhandledRejectionBeforeClose; + // Start CPU profiler if enabled + if (this.ctx.runtime_options.cpu_prof.enabled) { + const cpu_prof_opts = this.ctx.runtime_options.cpu_prof; + + vm.cpu_profiler_config = CPUProfiler.CPUProfilerConfig{ + .name = cpu_prof_opts.name, + .dir = cpu_prof_opts.dir, + }; + CPUProfiler.startCPUProfiler(vm.jsc_vm); + } + this.addConditionalGlobals(); do_redis_preconnect: { // This must happen within the API lock, which is why it's not in the "doPreconnect" function @@ -529,6 +540,7 @@ const VirtualMachine = jsc.VirtualMachine; const string = []const u8; +const CPUProfiler = @import("./bun.js/bindings/BunCPUProfiler.zig"); const options = @import("./options.zig"); const std = @import("std"); const Command = @import("./cli.zig").Command; diff --git a/src/bun.js/VirtualMachine.zig b/src/bun.js/VirtualMachine.zig index b5fe09398e..0e8bba0807 100644 --- a/src/bun.js/VirtualMachine.zig +++ b/src/bun.js/VirtualMachine.zig @@ -48,6 +48,7 @@ unhandled_pending_rejection_to_capture: ?*JSValue = null, standalone_module_graph: ?*bun.StandaloneModuleGraph = null, smol: bool = false, dns_result_order: DNSResolver.Order = .verbatim, +cpu_profiler_config: ?CPUProfilerConfig = null, counters: Counters = .{}, hot_reload: bun.cli.Command.HotReload = .none, @@ -721,7 +722,7 @@ pub inline fn autoGarbageCollect(this: *const VirtualMachine) void { pub fn reload(this: *VirtualMachine, _: *HotReloader.Task) void { Output.debug("Reloading...", .{}); - const should_clear_terminal = !this.transpiler.env.hasSetNoClearTerminalOnReload(!Output.enable_ansi_colors); + const should_clear_terminal = !this.transpiler.env.hasSetNoClearTerminalOnReload(!Output.enable_ansi_colors_stdout); if (this.hot_reload == .watch) { Output.flush(); bun.reloadProcess( @@ -832,6 +833,15 @@ pub fn setEntryPointEvalResultCJS(this: *VirtualMachine, value: JSValue) callcon } pub fn onExit(this: *VirtualMachine) void { + // Write CPU profile if profiling was enabled - do this FIRST before any shutdown begins + // Grab the config and null it out to make this idempotent + if (this.cpu_profiler_config) |config| { + this.cpu_profiler_config = null; + CPUProfiler.stopAndWriteProfile(this.jsc_vm, config) catch |err| { + Output.err(err, "Failed to write CPU profile", .{}); + }; + } + this.exit_handler.dispatchOnExit(); this.is_shutting_down = true; @@ -855,6 +865,7 @@ pub fn globalExit(this: *VirtualMachine) noreturn { // FIXME: we should be doing this, but we're not, but unfortunately doing it // causes like 50+ tests to break // this.eventLoop().tick(); + if (this.shouldDestructMainThreadOnExit()) { if (this.eventLoop().forever_timer) |t| t.deinit(true); Zig__GlobalObject__destructOnExit(this.global); @@ -1967,7 +1978,7 @@ pub fn printException( .stack_check = bun.StackCheck.init(), }; defer formatter.deinit(); - if (Output.enable_ansi_colors) { + if (Output.enable_ansi_colors_stderr) { this.printErrorlikeObject(exception.value(), exception, exception_list, &formatter, Writer, writer, true, allow_side_effects); } else { this.printErrorlikeObject(exception.value(), exception, exception_list, &formatter, Writer, writer, false, allow_side_effects); @@ -2006,7 +2017,7 @@ pub noinline fn runErrorHandler(this: *VirtualMachine, result: JSValue, exceptio .error_display_level = .full, }; defer formatter.deinit(); - switch (Output.enable_ansi_colors) { + switch (Output.enable_ansi_colors_stderr) { inline else => |enable_colors| this.printErrorlikeObject(result, null, exception_list, &formatter, @TypeOf(writer), writer, enable_colors, true), } } @@ -3703,6 +3714,9 @@ const PackageManager = @import("../install/install.zig").PackageManager; const URL = @import("../url.zig").URL; const Allocator = std.mem.Allocator; +const CPUProfiler = @import("./bindings/BunCPUProfiler.zig"); +const CPUProfilerConfig = CPUProfiler.CPUProfilerConfig; + const bun = @import("bun"); const Async = bun.Async; const DotEnv = bun.DotEnv; diff --git a/src/bun.js/api/BunObject.zig b/src/bun.js/api/BunObject.zig index 60a13a1e83..596aa7c18f 100644 --- a/src/bun.js/api/BunObject.zig +++ b/src/bun.js/api/BunObject.zig @@ -561,7 +561,7 @@ pub fn getOrigin(globalThis: *jsc.JSGlobalObject, _: *jsc.JSObject) jsc.JSValue pub fn enableANSIColors(globalThis: *jsc.JSGlobalObject, _: *jsc.JSObject) jsc.JSValue { _ = globalThis; - return JSValue.jsBoolean(Output.enable_ansi_colors); + return JSValue.jsBoolean(Output.enable_ansi_colors_stdout or Output.enable_ansi_colors_stderr); } fn getMain(globalThis: *jsc.JSGlobalObject) callconv(jsc.conv) jsc.JSValue { diff --git a/src/bun.js/api/server.classes.ts b/src/bun.js/api/server.classes.ts index ccbd36e8fe..6eb83cff93 100644 --- a/src/bun.js/api/server.classes.ts +++ b/src/bun.js/api/server.classes.ts @@ -312,6 +312,9 @@ export default [ fn: "isSubscribed", length: 1, }, + subscriptions: { + getter: "getSubscriptions", + }, remoteAddress: { getter: "getRemoteAddress", cache: true, diff --git a/src/bun.js/api/server/ServerWebSocket.zig b/src/bun.js/api/server/ServerWebSocket.zig index a47a4795ce..6bdac151e2 100644 --- a/src/bun.js/api/server/ServerWebSocket.zig +++ b/src/bun.js/api/server/ServerWebSocket.zig @@ -1232,6 +1232,18 @@ pub fn isSubscribed( return JSValue.jsBoolean(this.websocket().isSubscribed(topic.slice())); } +pub fn getSubscriptions( + this: *ServerWebSocket, + globalThis: *jsc.JSGlobalObject, +) bun.JSError!JSValue { + if (this.isClosed()) { + return try JSValue.createEmptyArray(globalThis, 0); + } + + // Get the JSValue directly from C++ + return this.websocket().getTopicsAsJSArray(globalThis); +} + pub fn getRemoteAddress( this: *ServerWebSocket, globalThis: *jsc.JSGlobalObject, diff --git a/src/bun.js/bindings/BunCPUProfiler.cpp b/src/bun.js/bindings/BunCPUProfiler.cpp new file mode 100644 index 0000000000..e31dccaae6 --- /dev/null +++ b/src/bun.js/bindings/BunCPUProfiler.cpp @@ -0,0 +1,335 @@ +#include "root.h" +#include "BunCPUProfiler.h" +#include "ZigGlobalObject.h" +#include "helpers.h" +#include "BunString.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern "C" void Bun__startCPUProfiler(JSC::VM* vm); +extern "C" BunString Bun__stopCPUProfilerAndGetJSON(JSC::VM* vm); + +namespace Bun { + +// Store the profiling start time in microseconds since Unix epoch +static double s_profilingStartTime = 0.0; + +void startCPUProfiler(JSC::VM& vm) +{ + // Capture the wall clock time when profiling starts (before creating stopwatch) + // This will be used as the profile's startTime + s_profilingStartTime = MonotonicTime::now().approximateWallTime().secondsSinceEpoch().value() * 1000000.0; + + // Create a stopwatch and start it + auto stopwatch = WTF::Stopwatch::create(); + stopwatch->start(); + + JSC::SamplingProfiler& samplingProfiler = vm.ensureSamplingProfiler(WTFMove(stopwatch)); + + // Set sampling interval to 1ms (1000 microseconds) to match Node.js + samplingProfiler.setTimingInterval(WTF::Seconds::fromMicroseconds(1000)); + + samplingProfiler.noticeCurrentThreadAsJSCExecutionThread(); + samplingProfiler.start(); +} + +struct ProfileNode { + int id; + WTF::String functionName; + WTF::String url; + int scriptId; + int lineNumber; + int columnNumber; + int hitCount; + WTF::Vector children; +}; + +WTF::String stopCPUProfilerAndGetJSON(JSC::VM& vm) +{ + JSC::SamplingProfiler* profiler = vm.samplingProfiler(); + if (!profiler) + return WTF::String(); + + // Shut down the profiler thread first - this is critical! + profiler->shutdown(); + + // Need to hold the VM lock to safely access stack traces + JSC::JSLockHolder locker(vm); + + // Defer GC while we're working with stack traces + JSC::DeferGC deferGC(vm); + + auto& lock = profiler->getLock(); + WTF::Locker profilerLocker { lock }; + + // releaseStackTraces() calls processUnverifiedStackTraces() internally + auto stackTraces = profiler->releaseStackTraces(); + + if (stackTraces.isEmpty()) + return WTF::String(); + + // Build Chrome CPU Profiler format + // Map from stack frame signature to node ID + WTF::HashMap nodeMap; + WTF::Vector nodes; + + // Create root node + ProfileNode rootNode; + rootNode.id = 1; + rootNode.functionName = "(root)"_s; + rootNode.url = ""_s; + rootNode.scriptId = 0; + rootNode.lineNumber = -1; + rootNode.columnNumber = -1; + rootNode.hitCount = 0; + nodes.append(WTFMove(rootNode)); + + int nextNodeId = 2; + WTF::Vector samples; + WTF::Vector timeDeltas; + + // Create an index array to process stack traces in chronological order + // We can't sort stackTraces directly because StackTrace has deleted copy assignment + WTF::Vector sortedIndices; + sortedIndices.reserveInitialCapacity(stackTraces.size()); + for (size_t i = 0; i < stackTraces.size(); i++) { + sortedIndices.append(i); + } + + // Sort indices by monotonic timestamp to ensure chronological order + // Use timestamp instead of stopwatchTimestamp for better resolution + // This is critical for calculating correct timeDeltas between samples + std::sort(sortedIndices.begin(), sortedIndices.end(), [&stackTraces](size_t a, size_t b) { + return stackTraces[a].timestamp < stackTraces[b].timestamp; + }); + + // Use the profiling start time that was captured when profiling began + // This ensures the first timeDelta represents the time from profiling start to first sample + double startTime = s_profilingStartTime; + double lastTime = s_profilingStartTime; + + // Process each stack trace in chronological order + for (size_t idx : sortedIndices) { + auto& stackTrace = stackTraces[idx]; + if (stackTrace.frames.isEmpty()) { + samples.append(1); // Root node + // Use monotonic timestamp converted to wall clock time + double currentTime = stackTrace.timestamp.approximateWallTime().secondsSinceEpoch().value() * 1000000.0; + double delta = std::max(0.0, currentTime - lastTime); + timeDeltas.append(static_cast(delta)); + lastTime = currentTime; + continue; + } + + int currentParentId = 1; // Start from root + + // Process frames from bottom to top (reverse order for Chrome format) + for (int i = stackTrace.frames.size() - 1; i >= 0; i--) { + auto& frame = stackTrace.frames[i]; + + WTF::String functionName; + WTF::String url; + int scriptId = 0; + int lineNumber = -1; + int columnNumber = -1; + + // Get function name - displayName works for all frame types + functionName = frame.displayName(vm); + + if (frame.frameType == JSC::SamplingProfiler::FrameType::Executable && frame.executable) { + auto sourceProviderAndID = frame.sourceProviderAndID(); + auto* provider = std::get<0>(sourceProviderAndID); + if (provider) { + url = provider->sourceURL(); + scriptId = static_cast(provider->asID()); + + // Convert absolute paths to file:// URLs + // Check for: + // - Unix absolute path: /path/to/file + // - Windows drive letter: C:\path or C:/path + // - Windows UNC path: \\server\share + bool isAbsolutePath = false; + if (!url.isEmpty()) { + if (url[0] == '/') { + // Unix absolute path + isAbsolutePath = true; + } else if (url.length() >= 2 && url[1] == ':') { + // Windows drive letter (e.g., C:\) + char firstChar = url[0]; + if ((firstChar >= 'A' && firstChar <= 'Z') || (firstChar >= 'a' && firstChar <= 'z')) { + isAbsolutePath = true; + } + } else if (url.length() >= 2 && url[0] == '\\' && url[1] == '\\') { + // Windows UNC path (e.g., \\server\share) + isAbsolutePath = true; + } + } + + if (isAbsolutePath) { + url = WTF::URL::fileURLWithFileSystemPath(url).string(); + } + } + + if (frame.hasExpressionInfo()) { + // Apply sourcemap if available + JSC::LineColumn sourceMappedLineColumn = frame.semanticLocation.lineColumn; + if (provider) { +#if USE(BUN_JSC_ADDITIONS) + auto& fn = vm.computeLineColumnWithSourcemap(); + if (fn) { + fn(vm, provider, sourceMappedLineColumn); + } +#endif + } + lineNumber = static_cast(sourceMappedLineColumn.line); + columnNumber = static_cast(sourceMappedLineColumn.column); + } + } + + // Create a unique key for this frame based on parent + callFrame + // This creates separate nodes for the same function in different call paths + WTF::StringBuilder keyBuilder; + keyBuilder.append(currentParentId); + keyBuilder.append(':'); + keyBuilder.append(functionName); + keyBuilder.append(':'); + keyBuilder.append(url); + keyBuilder.append(':'); + keyBuilder.append(scriptId); + keyBuilder.append(':'); + keyBuilder.append(lineNumber); + keyBuilder.append(':'); + keyBuilder.append(columnNumber); + + WTF::String key = keyBuilder.toString(); + + int nodeId; + auto it = nodeMap.find(key); + if (it == nodeMap.end()) { + // Create new node + nodeId = nextNodeId++; + nodeMap.add(key, nodeId); + + ProfileNode node; + node.id = nodeId; + node.functionName = functionName; + node.url = url; + node.scriptId = scriptId; + node.lineNumber = lineNumber; + node.columnNumber = columnNumber; + node.hitCount = 0; + + nodes.append(WTFMove(node)); + + // Add this node as child of parent + if (currentParentId > 0) { + nodes[currentParentId - 1].children.append(nodeId); + } + } else { + // Node already exists with this parent+callFrame combination + nodeId = it->value; + } + + currentParentId = nodeId; + + // If this is the top frame, increment hit count + if (i == 0) { + nodes[nodeId - 1].hitCount++; + } + } + + // Add sample pointing to the top frame + samples.append(currentParentId); + + // Add time delta + // Use monotonic timestamp converted to wall clock time + double currentTime = stackTrace.timestamp.approximateWallTime().secondsSinceEpoch().value() * 1000000.0; + double delta = std::max(0.0, currentTime - lastTime); + timeDeltas.append(static_cast(delta)); + lastTime = currentTime; + } + + // endTime is the wall clock time of the last sample + double endTime = lastTime; + + // Build JSON using WTF::JSON + using namespace WTF; + auto json = JSON::Object::create(); + + // Add nodes array + auto nodesArray = JSON::Array::create(); + for (const auto& node : nodes) { + auto nodeObj = JSON::Object::create(); + nodeObj->setInteger("id"_s, node.id); + + auto callFrame = JSON::Object::create(); + callFrame->setString("functionName"_s, node.functionName); + callFrame->setString("scriptId"_s, WTF::String::number(node.scriptId)); + callFrame->setString("url"_s, node.url); + callFrame->setInteger("lineNumber"_s, node.lineNumber); + callFrame->setInteger("columnNumber"_s, node.columnNumber); + + nodeObj->setValue("callFrame"_s, callFrame); + nodeObj->setInteger("hitCount"_s, node.hitCount); + + if (!node.children.isEmpty()) { + auto childrenArray = JSON::Array::create(); + WTF::HashSet seenChildren; + for (int childId : node.children) { + if (seenChildren.add(childId).isNewEntry) { + childrenArray->pushInteger(childId); + } + } + nodeObj->setValue("children"_s, childrenArray); + } + + nodesArray->pushValue(nodeObj); + } + json->setValue("nodes"_s, nodesArray); + + // Add timing info in microseconds + // Note: Using setDouble() instead of setInteger() because setInteger() has precision + // issues with large values (> 2^31). Chrome DevTools expects microseconds since Unix epoch, + // which are typically 16-digit numbers. JSON numbers can represent these precisely. + json->setDouble("startTime"_s, startTime); + json->setDouble("endTime"_s, endTime); + + // Add samples array + auto samplesArray = JSON::Array::create(); + for (int sample : samples) { + samplesArray->pushInteger(sample); + } + json->setValue("samples"_s, samplesArray); + + // Add timeDeltas array + auto timeDeltasArray = JSON::Array::create(); + for (long long delta : timeDeltas) { + timeDeltasArray->pushInteger(delta); + } + json->setValue("timeDeltas"_s, timeDeltasArray); + + return json->toJSONString(); +} + +} // namespace Bun + +extern "C" void Bun__startCPUProfiler(JSC::VM* vm) +{ + Bun::startCPUProfiler(*vm); +} + +extern "C" BunString Bun__stopCPUProfilerAndGetJSON(JSC::VM* vm) +{ + WTF::String result = Bun::stopCPUProfilerAndGetJSON(*vm); + return Bun::toStringRef(result); +} diff --git a/src/bun.js/bindings/BunCPUProfiler.h b/src/bun.js/bindings/BunCPUProfiler.h new file mode 100644 index 0000000000..4a7b9c739e --- /dev/null +++ b/src/bun.js/bindings/BunCPUProfiler.h @@ -0,0 +1,20 @@ +#pragma once + +#include "root.h" +#include + +namespace JSC { +class JSGlobalObject; +class VM; +} + +namespace Bun { + +// Start the CPU profiler +void startCPUProfiler(JSC::VM& vm); + +// Stop the CPU profiler and convert to Chrome CPU profiler JSON format +// Returns JSON string, or empty string on failure +WTF::String stopCPUProfilerAndGetJSON(JSC::VM& vm); + +} // namespace Bun diff --git a/src/bun.js/bindings/BunCPUProfiler.zig b/src/bun.js/bindings/BunCPUProfiler.zig new file mode 100644 index 0000000000..e99b445238 --- /dev/null +++ b/src/bun.js/bindings/BunCPUProfiler.zig @@ -0,0 +1,98 @@ +pub const CPUProfilerConfig = struct { + name: []const u8, + dir: []const u8, +}; + +// C++ function declarations +extern fn Bun__startCPUProfiler(vm: *jsc.VM) void; +extern fn Bun__stopCPUProfilerAndGetJSON(vm: *jsc.VM) bun.String; + +pub fn startCPUProfiler(vm: *jsc.VM) void { + Bun__startCPUProfiler(vm); +} + +pub fn stopAndWriteProfile(vm: *jsc.VM, config: CPUProfilerConfig) !void { + const json_string = Bun__stopCPUProfilerAndGetJSON(vm); + defer json_string.deref(); + + if (json_string.isEmpty()) { + // No profile data or profiler wasn't started + return; + } + + const json_slice = json_string.toUTF8(bun.default_allocator); + defer json_slice.deinit(); + + // Determine the output path using AutoAbsPath + var path_buf: bun.AutoAbsPath = .initTopLevelDir(); + defer path_buf.deinit(); + + try buildOutputPath(&path_buf, config); + + // Convert to OS-specific path (UTF-16 on Windows, UTF-8 elsewhere) + var path_buf_os: bun.OSPathBuffer = undefined; + const output_path_os: bun.OSPathSliceZ = if (bun.Environment.isWindows) + bun.strings.convertUTF8toUTF16InBufferZ(&path_buf_os, path_buf.sliceZ()) + else + path_buf.sliceZ(); + + // Write the profile to disk using bun.sys.File.writeFile + const result = bun.sys.File.writeFile(bun.FD.cwd(), output_path_os, json_slice.slice()); + if (result.asErr()) |err| { + // If we got ENOENT, PERM, or ACCES, try creating the directory and retry + const errno = err.getErrno(); + if (errno == .NOENT or errno == .PERM or errno == .ACCES) { + if (config.dir.len > 0) { + bun.makePath(bun.FD.cwd().stdDir(), config.dir) catch {}; + // Retry write + const retry_result = bun.sys.File.writeFile(bun.FD.cwd(), output_path_os, json_slice.slice()); + if (retry_result.asErr()) |_| { + return error.WriteFailed; + } + } else { + return error.WriteFailed; + } + } else { + return error.WriteFailed; + } + } +} + +fn buildOutputPath(path: *bun.AutoAbsPath, config: CPUProfilerConfig) !void { + // Generate filename + var filename_buf: bun.PathBuffer = undefined; + const filename = if (config.name.len > 0) + config.name + else + try generateDefaultFilename(&filename_buf); + + // Append directory if specified + if (config.dir.len > 0) { + path.append(config.dir); + } + + // Append filename + path.append(filename); +} + +fn generateDefaultFilename(buf: *bun.PathBuffer) ![]const u8 { + // Generate filename like: CPU.{timestamp}.{pid}.cpuprofile + // Use microsecond timestamp for uniqueness + const timespec = bun.timespec.now(); + const pid = if (bun.Environment.isWindows) + std.os.windows.GetCurrentProcessId() + else + std.c.getpid(); + + const epoch_microseconds: u64 = @intCast(timespec.sec *% 1_000_000 +% @divTrunc(timespec.nsec, 1000)); + + return try std.fmt.bufPrint(buf, "CPU.{d}.{d}.cpuprofile", .{ + epoch_microseconds, + pid, + }); +} + +const std = @import("std"); + +const bun = @import("bun"); +const jsc = bun.jsc; diff --git a/src/bun.js/bindings/BunProcess.cpp b/src/bun.js/bindings/BunProcess.cpp index 81bd8c979f..671711baf5 100644 --- a/src/bun.js/bindings/BunProcess.cpp +++ b/src/bun.js/bindings/BunProcess.cpp @@ -3512,6 +3512,24 @@ extern "C" void Bun__Process__queueNextTick2(GlobalObject* globalObject, Encoded process->queueNextTick<2>(globalObject, function, { JSValue::decode(arg1), JSValue::decode(arg2) }); } +// This does the equivalent of +// return require.cache.get(Bun.main) +static JSValue constructMainModuleProperty(VM& vm, JSObject* processObject) +{ + auto scope = DECLARE_THROW_SCOPE(vm); + auto* globalObject = defaultGlobalObject(processObject->globalObject()); + auto* bun = globalObject->bunObject(); + RETURN_IF_EXCEPTION(scope, {}); + auto& builtinNames = Bun::builtinNames(vm); + JSValue mainValue = bun->get(globalObject, builtinNames.mainPublicName()); + RETURN_IF_EXCEPTION(scope, {}); + auto* requireMap = globalObject->requireMap(); + RETURN_IF_EXCEPTION(scope, {}); + JSValue mainModule = requireMap->get(globalObject, mainValue); + RETURN_IF_EXCEPTION(scope, {}); + return mainModule; +} + JSValue Process::constructNextTickFn(JSC::VM& vm, Zig::GlobalObject* globalObject) { JSNextTickQueue* nextTickQueueObject; @@ -3908,7 +3926,7 @@ extern "C" void Process__emitErrorEvent(Zig::GlobalObject* global, EncodedJSValu hrtime constructProcessHrtimeObject PropertyCallback isBun constructIsBun PropertyCallback kill Process_functionKill Function 2 - mainModule processObjectInternalsMainModuleCodeGenerator Builtin|Accessor + mainModule constructMainModuleProperty PropertyCallback memoryUsage constructMemoryUsage PropertyCallback moduleLoadList Process_stubEmptyArray PropertyCallback nextTick constructProcessNextTickFn PropertyCallback diff --git a/src/bun.js/bindings/BunString.cpp b/src/bun.js/bindings/BunString.cpp index ce4bbd8f51..6c90ec612e 100644 --- a/src/bun.js/bindings/BunString.cpp +++ b/src/bun.js/bindings/BunString.cpp @@ -624,6 +624,22 @@ extern "C" BunString URL__getHrefJoin(BunString* baseStr, BunString* relativeStr return Bun::toStringRef(url.string()); } +extern "C" BunString URL__hash(WTF::URL* url) +{ + const auto& fragment = url->fragmentIdentifier().isEmpty() + ? emptyString() + : url->fragmentIdentifierWithLeadingNumberSign().toStringWithoutCopying(); + return Bun::toStringRef(fragment); +} + +extern "C" BunString URL__fragmentIdentifier(WTF::URL* url) +{ + const auto& fragment = url->fragmentIdentifier().isEmpty() + ? emptyString() + : url->fragmentIdentifier().toStringWithoutCopying(); + return Bun::toStringRef(fragment); +} + extern "C" WTF::URL* URL__fromString(BunString* input) { auto&& str = input->toWTFString(); diff --git a/src/bun.js/bindings/FormatStackTraceForJS.cpp b/src/bun.js/bindings/FormatStackTraceForJS.cpp index 9869f17c68..be5241027a 100644 --- a/src/bun.js/bindings/FormatStackTraceForJS.cpp +++ b/src/bun.js/bindings/FormatStackTraceForJS.cpp @@ -541,6 +541,29 @@ WTF::String computeErrorInfoWrapperToString(JSC::VM& vm, Vector& sta return result; } +void computeLineColumnWithSourcemap(JSC::VM& vm, JSC::SourceProvider* _Nonnull sourceProvider, JSC::LineColumn& lineColumn) +{ + auto sourceURL = sourceProvider->sourceURL(); + if (sourceURL.isEmpty()) { + return; + } + + OrdinalNumber line = OrdinalNumber::fromOneBasedInt(lineColumn.line); + OrdinalNumber column = OrdinalNumber::fromOneBasedInt(lineColumn.column); + + ZigStackFrame frame = {}; + frame.position.line_zero_based = line.zeroBasedInt(); + frame.position.column_zero_based = column.zeroBasedInt(); + frame.source_url = Bun::toStringRef(sourceURL); + + Bun__remapStackFramePositions(Bun::vm(vm), &frame, 1); + + if (frame.remapped) { + lineColumn.line = frame.position.line().oneBasedInt(); + lineColumn.column = frame.position.column().oneBasedInt(); + } +} + JSC::JSValue computeErrorInfoWrapperToJSValue(JSC::VM& vm, Vector& stackTrace, unsigned int& line_in, unsigned int& column_in, String& sourceURL, JSObject* errorInstance, void* bunErrorData) { OrdinalNumber line = OrdinalNumber::fromOneBasedInt(line_in); diff --git a/src/bun.js/bindings/FormatStackTraceForJS.h b/src/bun.js/bindings/FormatStackTraceForJS.h index 38acdcb5c1..2de3fcd52c 100644 --- a/src/bun.js/bindings/FormatStackTraceForJS.h +++ b/src/bun.js/bindings/FormatStackTraceForJS.h @@ -82,7 +82,7 @@ JSC_DECLARE_CUSTOM_SETTER(errorInstanceLazyStackCustomSetter); // Internal wrapper functions for JSC error info callbacks WTF::String computeErrorInfoWrapperToString(JSC::VM& vm, WTF::Vector& stackTrace, unsigned int& line_in, unsigned int& column_in, WTF::String& sourceURL, void* bunErrorData); JSC::JSValue computeErrorInfoWrapperToJSValue(JSC::VM& vm, WTF::Vector& stackTrace, unsigned int& line_in, unsigned int& column_in, WTF::String& sourceURL, JSC::JSObject* errorInstance, void* bunErrorData); - +void computeLineColumnWithSourcemap(JSC::VM& vm, JSC::SourceProvider* _Nonnull sourceProvider, JSC::LineColumn& lineColumn); } // namespace Bun namespace Zig { diff --git a/src/bun.js/bindings/JSGlobalObject.zig b/src/bun.js/bindings/JSGlobalObject.zig index 478558a99a..3ef7062c74 100644 --- a/src/bun.js/bindings/JSGlobalObject.zig +++ b/src/bun.js/bindings/JSGlobalObject.zig @@ -376,7 +376,7 @@ pub const JSGlobalObject = opaque { } pub fn throwPretty(this: *JSGlobalObject, comptime fmt: [:0]const u8, args: anytype) bun.JSError { - const instance = switch (Output.enable_ansi_colors) { + const instance = switch (Output.enable_ansi_colors_stderr) { inline else => |enabled| this.createErrorInstance(Output.prettyFmt(fmt, enabled), args), }; bun.assert(instance != .zero); diff --git a/src/bun.js/bindings/JSValue.zig b/src/bun.js/bindings/JSValue.zig index 9a208b2d4b..15157d95f9 100644 --- a/src/bun.js/bindings/JSValue.zig +++ b/src/bun.js/bindings/JSValue.zig @@ -727,7 +727,7 @@ pub const JSValue = enum(i64) { defer buf.deinit(); var writer = buf.writer(); - switch (Output.enable_ansi_colors) { + switch (Output.enable_ansi_colors_stderr) { inline else => |enabled| try writer.print(Output.prettyFmt(fmt, enabled), args), } return String.init(buf.slice()).toJS(globalThis); @@ -1136,9 +1136,6 @@ pub const JSValue = enum(i64) { pub fn asArrayBuffer(this: JSValue, global: *JSGlobalObject) ?ArrayBuffer { var out: ArrayBuffer = undefined; - // `ptr` might not get set if the ArrayBuffer is empty, so make sure it starts out with a - // defined value. - out.ptr = &.{}; if (JSC__JSValue__asArrayBuffer(this, global, &out)) { return out; } diff --git a/src/bun.js/bindings/URL.zig b/src/bun.js/bindings/URL.zig index 2d75fe6301..42691e2646 100644 --- a/src/bun.js/bindings/URL.zig +++ b/src/bun.js/bindings/URL.zig @@ -16,6 +16,20 @@ pub const URL = opaque { extern fn URL__getFileURLString(*String) String; extern fn URL__getHrefJoin(*String, *String) String; extern fn URL__pathFromFileURL(*String) String; + extern fn URL__hash(*URL) String; + extern fn URL__fragmentIdentifier(*URL) String; + + /// Includes the leading '#'. + pub fn hash(url: *URL) String { + jsc.markBinding(@src()); + return URL__hash(url); + } + + /// Exactly the same as hash, excluding the leading '#'. + pub fn fragmentIdentifier(url: *URL) String { + jsc.markBinding(@src()); + return URL__fragmentIdentifier(url); + } pub fn hrefFromString(str: bun.String) String { jsc.markBinding(@src()); diff --git a/src/bun.js/bindings/ZigGlobalObject.cpp b/src/bun.js/bindings/ZigGlobalObject.cpp index a2f8b35b4c..d53dce02e3 100644 --- a/src/bun.js/bindings/ZigGlobalObject.cpp +++ b/src/bun.js/bindings/ZigGlobalObject.cpp @@ -503,6 +503,7 @@ extern "C" JSC::JSGlobalObject* Zig__GlobalObject__create(void* console_client, vm.setOnComputeErrorInfo(computeErrorInfoWrapperToString); vm.setOnComputeErrorInfoJSValue(computeErrorInfoWrapperToJSValue); + vm.setComputeLineColumnWithSourcemap(computeLineColumnWithSourcemap); vm.setOnEachMicrotaskTick([](JSC::VM& vm) -> void { // if you process.nextTick on a microtask we need this auto* globalObject = defaultGlobalObject(); diff --git a/src/bun.js/bindings/bindings.cpp b/src/bun.js/bindings/bindings.cpp index a3153d67e1..a0a514865d 100644 --- a/src/bun.js/bindings/bindings.cpp +++ b/src/bun.js/bindings/bindings.cpp @@ -3086,11 +3086,7 @@ bool JSC__JSValue__asArrayBuffer( } } out->_value = JSValue::encode(value); - if (data) { - // Avoid setting `ptr` to null; the corresponding Zig field is a non-optional pointer. - // The caller should have already set `ptr` to a zero-length array. - out->ptr = static_cast(data); - } + out->ptr = static_cast(data); return true; } @@ -6071,10 +6067,7 @@ extern "C" void JSC__ArrayBuffer__deref(JSC::ArrayBuffer* self) { self->deref(); extern "C" void JSC__ArrayBuffer__asBunArrayBuffer(JSC::ArrayBuffer* self, Bun__ArrayBuffer* out) { const std::size_t byteLength = self->byteLength(); - if (void* data = self->data()) { - // Avoid setting `ptr` to null; it's a non-optional pointer in Zig. - out->ptr = static_cast(data); - } + out->ptr = static_cast(self->data()); out->len = byteLength; out->byte_len = byteLength; out->_value = 0; diff --git a/src/bun.js/bindings/napi.cpp b/src/bun.js/bindings/napi.cpp index cc8f63465f..63027dd403 100644 --- a/src/bun.js/bindings/napi.cpp +++ b/src/bun.js/bindings/napi.cpp @@ -1990,16 +1990,40 @@ extern "C" napi_status napi_create_external_buffer(napi_env env, size_t length, NAPI_CHECK_ARG(env, result); Zig::GlobalObject* globalObject = toJS(env); - - auto arrayBuffer = ArrayBuffer::createFromBytes({ reinterpret_cast(data), length }, createSharedTask([env = WTF::Ref(*env), finalize_hint, finalize_cb](void* p) { - NAPI_LOG("external buffer finalizer"); - env->doFinalizer(finalize_cb, p, finalize_hint); - })); + JSC::VM& vm = JSC::getVM(globalObject); auto* subclassStructure = globalObject->JSBufferSubclassStructure(); + if (data == nullptr || length == 0) { + + // TODO: is there a way to create a detached uint8 array? + auto arrayBuffer = JSC::ArrayBuffer::createUninitialized(0, 1); + auto* buffer = JSC::JSUint8Array::create(globalObject, subclassStructure, WTFMove(arrayBuffer), 0, 0); + NAPI_RETURN_IF_EXCEPTION(env); + buffer->existingBuffer()->detach(vm); + + vm.heap.addFinalizer(buffer, [env = WTF::Ref(*env), finalize_cb, data, finalize_hint](JSCell* cell) -> void { + NAPI_LOG("external buffer finalizer (empty buffer)"); + env->doFinalizer(finalize_cb, data, finalize_hint); + }); + + *result = toNapi(buffer, globalObject); + NAPI_RETURN_SUCCESS(env); + } + + auto arrayBuffer = ArrayBuffer::createFromBytes({ reinterpret_cast(data), length }, createSharedTask([](void*) { + // do nothing + })); + auto* buffer = JSC::JSUint8Array::create(globalObject, subclassStructure, WTFMove(arrayBuffer), 0, length); NAPI_RETURN_IF_EXCEPTION(env); + // setup finalizer after creating the array. if it throws callers of napi_create_external_buffer are expected + // to free input + vm.heap.addFinalizer(buffer, [env = WTF::Ref(*env), finalize_cb, data, finalize_hint](JSCell* cell) -> void { + NAPI_LOG("external buffer finalizer"); + env->doFinalizer(finalize_cb, data, finalize_hint); + }); + *result = toNapi(buffer, globalObject); NAPI_RETURN_SUCCESS(env); } diff --git a/src/bun.js/bindings/uws_bindings.cpp b/src/bun.js/bindings/uws_bindings.cpp new file mode 100644 index 0000000000..838f9f0aa7 --- /dev/null +++ b/src/bun.js/bindings/uws_bindings.cpp @@ -0,0 +1,47 @@ +// clang-format off +#include "root.h" + +#include "JavaScriptCore/JSGlobalObject.h" +#include "JavaScriptCore/JSArray.h" +#include "JavaScriptCore/ObjectConstructor.h" +#include "wtf/text/WTFString.h" +#include +#include +#include + +typedef void uws_websocket_t; + +using TLSWebSocket = uWS::WebSocket; +using TCPWebSocket = uWS::WebSocket; + +// Template helpers (must be outside extern "C") +template +static JSC::EncodedJSValue uws_ws_get_topics_as_js_array_impl(uws_websocket_t *ws, void* globalObject) { + JSC::JSGlobalObject* global = reinterpret_cast(globalObject); + JSC::VM& vm = global->vm(); + + using WebSocketType = typename std::conditional::type; + WebSocketType *uws = reinterpret_cast(ws); + + JSC::MarkedArgumentBuffer args; + { + // Scope ensures the iterator lock is released before constructArray + uws->iterateTopics([&](std::string_view topic) { + auto str = WTF::String::fromUTF8ReplacingInvalidSequences(std::span { + reinterpret_cast(topic.data()), + topic.length() + }); + args.append(JSC::jsString(vm, str)); + }); + } + + return JSC::JSValue::encode(JSC::constructArray(global, static_cast(nullptr), args)); +} + +extern "C" JSC::EncodedJSValue uws_ws_get_topics_as_js_array(int ssl, uws_websocket_t *ws, void* globalObject) { + if (ssl) { + return uws_ws_get_topics_as_js_array_impl(ws, globalObject); + } else { + return uws_ws_get_topics_as_js_array_impl(ws, globalObject); + } +} diff --git a/src/bun.js/event_loop/README.md b/src/bun.js/event_loop/README.md new file mode 100644 index 0000000000..78c5ee0228 --- /dev/null +++ b/src/bun.js/event_loop/README.md @@ -0,0 +1,360 @@ +# Bun Event Loop Architecture + +This document explains how Bun's event loop works, including task draining, microtasks, process.nextTick, setTimeout ordering, and I/O polling integration. + +## Overview + +Bun's event loop is built on top of **uSockets** (a cross-platform event loop based on epoll/kqueue) and integrates with **JavaScriptCore's** microtask queue and a custom **process.nextTick** queue. The event loop processes tasks in a specific order to ensure correct JavaScript semantics while maximizing performance. + +## Core Components + +### 1. Task Queue (`src/bun.js/event_loop/Task.zig`) + +A tagged pointer union containing various async task types (file I/O, network requests, timers, etc.). Tasks are queued by various subsystems and drained by the main event loop. + +### 2. Immediate Tasks (`event_loop.zig:14-15`) + +Two separate queues for `setImmediate()`: + +- **`immediate_tasks`**: Tasks to run on the current tick +- **`next_immediate_tasks`**: Tasks to run on the next tick + +This prevents infinite loops when `setImmediate` is called within a `setImmediate` callback. + +### 3. Concurrent Task Queue (`event_loop.zig:17`) + +Thread-safe queue for tasks enqueued from worker threads or async operations. These are moved to the main task queue before processing. + +### 4. Deferred Task Queue (`src/bun.js/event_loop/DeferredTaskQueue.zig`) + +For operations that should be batched and deferred until after microtasks drain (e.g., buffered HTTP response writes, file sink flushes). This avoids excessive system calls while maintaining responsiveness. + +### 5. Process.nextTick Queue (`src/bun.js/bindings/JSNextTickQueue.cpp`) + +Node.js-compatible implementation of `process.nextTick()`, which runs before microtasks but after each task. + +### 6. Microtask Queue (JavaScriptCore VM) + +Built-in JSC microtask queue for promises and queueMicrotask. + +## Event Loop Flow + +### Main Tick Flow (`event_loop.zig:477-513`) + +``` +┌─────────────────────────────────────┐ +│ 1. Tick concurrent tasks │ ← Move tasks from concurrent queue +└──────────────┬──────────────────────┘ + │ + ▼ +┌─────────────────────────────────────┐ +│ 2. Process GC timer │ +└──────────────┬──────────────────────┘ + │ + ▼ +┌─────────────────────────────────────┐ +│ 3. Drain regular task queue │ ← tickQueueWithCount() +│ For each task: │ +│ - Run task │ +│ - Release weak refs │ +│ - Drain microtasks │ +│ (See detailed flow below) │ +└──────────────┬──────────────────────┘ + │ + ▼ +┌─────────────────────────────────────┐ +│ 4. Handle rejected promises │ +└─────────────────────────────────────┘ +``` + +### autoTick Flow (`event_loop.zig:349-401`) + +This is called when the event loop is active and needs to wait for I/O: + +``` +┌─────────────────────────────────────┐ +│ 1. Tick immediate tasks │ ← setImmediate() callbacks +└──────────────┬──────────────────────┘ + │ + ▼ +┌─────────────────────────────────────┐ +│ 2. Update date header timer │ +└──────────────┬──────────────────────┘ + │ + ▼ +┌─────────────────────────────────────┐ +│ 3. Process GC timer │ +└──────────────┬──────────────────────┘ + │ + ▼ +┌─────────────────────────────────────┐ +│ 4. Poll I/O via uSockets │ ← epoll_wait/kevent with timeout +│ (epoll_kqueue.c:251-320) │ +│ - Dispatch ready polls │ +│ - Each I/O event treated as task│ +└──────────────┬──────────────────────┘ + │ + ▼ +┌─────────────────────────────────────┐ +│ 5. Drain timers (POSIX) │ ← setTimeout/setInterval callbacks +└──────────────┬──────────────────────┘ + │ + ▼ +┌─────────────────────────────────────┐ +│ 6. Call VM.onAfterEventLoop() │ +└──────────────┬──────────────────────┘ + │ + ▼ +┌─────────────────────────────────────┐ +│ 7. Handle rejected promises │ +└─────────────────────────────────────┘ +``` + +## Task Draining Algorithm + +### For Regular Tasks (`Task.zig:97-512`) + +For each task dequeued from the task queue: + +``` +┌─────────────────────────────────────────────────────────────┐ +│ FOR EACH TASK in task queue: │ +│ │ +│ 1. RUN THE TASK (Task.zig:135-506) │ +│ └─> Execute task.runFromJSThread() or equivalent │ +│ │ +│ 2. DRAIN MICROTASKS (Task.zig:508) │ +│ └─> drainMicrotasksWithGlobal() │ +│ │ │ +│ ├─> RELEASE WEAK REFS (event_loop.zig:129) │ +│ │ └─> VM.releaseWeakRefs() │ +│ │ │ +│ ├─> CALL JSC__JSGlobalObject__drainMicrotasks() │ +│ │ (ZigGlobalObject.cpp:2793-2840) │ +│ │ │ │ +│ │ ├─> IF nextTick queue exists and not empty: │ +│ │ │ └─> Call processTicksAndRejections() │ +│ │ │ (ProcessObjectInternals.ts:295-335) │ +│ │ │ │ │ +│ │ │ └─> DO-WHILE loop: │ +│ │ │ ├─> Process ALL nextTick callbacks │ +│ │ │ │ (with try/catch & async ctx) │ +│ │ │ │ │ +│ │ │ └─> drainMicrotasks() │ +│ │ │ (promises, queueMicrotask) │ +│ │ │ WHILE queue not empty │ +│ │ │ │ +│ │ └─> ALWAYS call vm.drainMicrotasks() again │ +│ │ (safety net for any remaining microtasks) │ +│ │ │ +│ └─> RUN DEFERRED TASK QUEUE (event_loop.zig:136-138)│ +│ └─> deferred_tasks.run() │ +│ (buffered writes, file sink flushes, etc.) │ +│ │ +└─────────────────────────────────────────────────────────────┘ +``` + +### Key Points + +#### Process.nextTick Ordering (`ZigGlobalObject.cpp:2818-2829`) + +The process.nextTick queue is special: + +- It runs **before** microtasks +- After processing **all** nextTick callbacks in the current batch, microtasks are drained +- This creates batched processing with interleaving between nextTick generations and promises: + +```javascript +Promise.resolve().then(() => console.log("promise 1")); +process.nextTick(() => { + console.log("nextTick 1"); + Promise.resolve().then(() => console.log("promise 2")); +}); +process.nextTick(() => console.log("nextTick 2")); + +// Output: +// nextTick 1 +// nextTick 2 +// promise 1 +// promise 2 +``` + +If a nextTick callback schedules another nextTick, it goes to the next batch: + +```javascript +process.nextTick(() => { + console.log("nextTick 1"); + process.nextTick(() => console.log("nextTick 3")); + Promise.resolve().then(() => console.log("promise 2")); +}); +process.nextTick(() => console.log("nextTick 2")); +Promise.resolve().then(() => console.log("promise 1")); + +// Output: +// nextTick 1 +// nextTick 2 +// promise 1 +// promise 2 +// nextTick 3 +``` + +The implementation (`ProcessObjectInternals.ts:295-335`): + +```typescript +function processTicksAndRejections() { + var tock; + do { + while ((tock = queue.shift()) !== null) { + // Run the callback with async context + try { + callback(...args); + } catch (e) { + reportUncaughtException(e); + } + } + + drainMicrotasks(); // ← Drain promises after each batch + } while (!queue.isEmpty()); +} +``` + +#### Deferred Task Queue (`DeferredTaskQueue.zig:44-61`) + +Runs after microtasks to batch operations: + +- Used for buffered HTTP writes, file sink flushes +- Prevents re-entrancy issues +- Balances latency vs. throughput + +The queue maintains a map of `(pointer, task_fn)` pairs and runs each task. If a task returns `true`, it remains in the queue for the next drain; if `false`, it's removed. + +## I/O Polling Integration + +### uSockets Event Loop (`epoll_kqueue.c:251-320`) + +The I/O poll is integrated into the event loop via `us_loop_run_bun_tick()`: + +``` +┌─────────────────────────────────────────────────────────────┐ +│ us_loop_run_bun_tick(): │ +│ │ +│ 1. EMIT PRE-CALLBACK (us_internal_loop_pre) │ +│ │ +│ 2. CALL Bun__JSC_onBeforeWait(jsc_vm) │ +│ └─> Notify VM we're about to block │ +│ │ +│ 3. POLL I/O │ +│ ├─> epoll_pwait2() [Linux] │ +│ └─> kevent64() [macOS/BSD] │ +│ └─> Block with timeout until I/O ready │ +│ │ +│ 4. FOR EACH READY POLL: │ +│ │ │ +│ ├─> Check events & errors │ +│ │ │ +│ └─> us_internal_dispatch_ready_poll() │ +│ │ │ +│ └─> This enqueues tasks or callbacks that will: │ +│ - Add tasks to the concurrent task queue │ +│ - Eventually trigger drainMicrotasks │ +│ │ +│ 5. EMIT POST-CALLBACK (us_internal_loop_post) │ +│ │ +└─────────────────────────────────────────────────────────────┘ +``` + +### I/O Events Handling + +When I/O becomes ready (socket readable/writable, file descriptor ready): + +1. The poll is dispatched via `us_internal_dispatch_ready_poll()` or `Bun__internal_dispatch_ready_poll()` +2. This triggers the appropriate callback **synchronously during the I/O poll phase** +3. The callback may: + - Directly execute JavaScript (must use `EventLoop.enter()/exit()`) + - Enqueue a task to the concurrent task queue for later processing + - Update internal state and return (e.g., `FilePoll.onUpdate()`) +4. If JavaScript is called via `enter()/exit()`, microtasks are drained when `entered_event_loop_count` reaches 0 + +**Important**: I/O callbacks don't automatically get the microtask draining behavior - they must explicitly wrap JS calls in `enter()/exit()` or use `runCallback()` to ensure proper microtask handling. This is why some I/O operations enqueue tasks to the concurrent queue instead of running JavaScript directly. + +## setTimeout and setInterval Ordering + +Timers are handled differently based on platform: + +### POSIX (`event_loop.zig:396`) + +```zig +ctx.timer.drainTimers(ctx); +``` + +Timers are drained after I/O polling. Each timer callback: + +1. Is wrapped in `enter()`/`exit()` +2. Triggers microtask draining after execution +3. Can enqueue new tasks + +### Windows + +Uses the uv_timer_t mechanism integrated into the uSockets loop. + +### Timer vs. setImmediate Ordering + +```javascript +setTimeout(() => console.log("timeout"), 0); +setImmediate(() => console.log("immediate")); + +// Output is typically: +// immediate +// timeout +``` + +This is because: + +- `setImmediate` runs in `tickImmediateTasks()` before I/O polling +- `setTimeout` fires after I/O polling (even with 0ms) +- However, this can vary based on timing and event loop state + +## Enter/Exit Mechanism + +The event loop uses a counter to track when to drain microtasks: + +```zig +pub fn enter(this: *EventLoop) void { + this.entered_event_loop_count += 1; +} + +pub fn exit(this: *EventLoop) void { + const count = this.entered_event_loop_count; + if (count == 1 and !this.virtual_machine.is_inside_deferred_task_queue) { + this.drainMicrotasksWithGlobal(this.global, this.virtual_machine.jsc_vm) catch {}; + } + this.entered_event_loop_count -= 1; +} +``` + +This ensures microtasks are only drained once per top-level event loop task, even if JavaScript calls into native code which calls back into JavaScript multiple times. + +## Summary + +The Bun event loop processes work in this order: + +1. **Immediate tasks** (setImmediate) +2. **I/O polling** (epoll/kqueue) +3. **Timer callbacks** (setTimeout/setInterval) +4. **Regular tasks** from the task queue + - For each task: + - Run the task + - Release weak references + - Check for nextTick queue + - If active: Run nextTick callbacks, drain microtasks after each + - If not: Just drain microtasks + - Drain deferred task queue +5. **Handle rejected promises** + +This architecture ensures: + +- ✅ Correct Node.js semantics for process.nextTick vs. promises +- ✅ Efficient batching of I/O operations +- ✅ Minimal microtask latency +- ✅ Prevention of infinite loops from self-enqueueing tasks +- ✅ Proper async context propagation diff --git a/src/bun.js/hot_reloader.zig b/src/bun.js/hot_reloader.zig index 2f00201003..ec49bcf0f9 100644 --- a/src/bun.js/hot_reloader.zig +++ b/src/bun.js/hot_reloader.zig @@ -291,7 +291,7 @@ pub fn NewHotReloader(comptime Ctx: type, comptime EventLoopType: type, comptime this.transpiler.resolver.watcher = bun.resolver.ResolveWatcher(*Watcher, Watcher.onMaybeWatchDirectory).init(this.bun_watcher.?); } - clear_screen = !this.transpiler.env.hasSetNoClearTerminalOnReload(!Output.enable_ansi_colors); + clear_screen = !this.transpiler.env.hasSetNoClearTerminalOnReload(!Output.enable_ansi_colors_stdout); reloader.getContext().start() catch @panic("Failed to start File Watcher"); } diff --git a/src/bun.js/jsc/array_buffer.zig b/src/bun.js/jsc/array_buffer.zig index 19b8cde91e..add592e206 100644 --- a/src/bun.js/jsc/array_buffer.zig +++ b/src/bun.js/jsc/array_buffer.zig @@ -1,11 +1,15 @@ pub const ArrayBuffer = extern struct { - ptr: [*]u8 = &[0]u8{}, + ptr: ?[*]u8 = null, len: usize = 0, byte_len: usize = 0, value: jsc.JSValue = jsc.JSValue.zero, typed_array_type: jsc.JSValue.JSType = .Cell, shared: bool = false, + pub fn isDetached(this: *const ArrayBuffer) bool { + return this.ptr == null; + } + // require('buffer').kMaxLength. // keep in sync with Bun::Buffer::kMaxLength pub const max_size = std.math.maxInt(c_uint); @@ -315,7 +319,10 @@ pub const ArrayBuffer = extern struct { /// new ArrayBuffer(view.buffer, view.byteOffset, view.byteLength) /// ``` pub inline fn byteSlice(this: *const @This()) []u8 { - return this.ptr[0..this.byte_len]; + if (this.isDetached()) { + return &.{}; + } + return this.ptr.?[0..this.byte_len]; } /// The equivalent of @@ -330,7 +337,10 @@ pub const ArrayBuffer = extern struct { } pub inline fn asU16Unaligned(this: *const @This()) []align(1) u16 { - return @ptrCast(this.ptr[0 .. this.byte_len / @sizeOf(u16) * @sizeOf(u16)]); + if (this.isDetached()) { + return &.{}; + } + return @ptrCast(this.ptr.?[0 .. this.byte_len / @sizeOf(u16) * @sizeOf(u16)]); } pub inline fn asU32(this: *const @This()) []u32 { @@ -338,7 +348,10 @@ pub const ArrayBuffer = extern struct { } pub inline fn asU32Unaligned(this: *const @This()) []align(1) u32 { - return @ptrCast(this.ptr[0 .. this.byte_len / @sizeOf(u32) * @sizeOf(u32)]); + if (this.isDetached()) { + return &.{}; + } + return @ptrCast(this.ptr.?[0 .. this.byte_len / @sizeOf(u32) * @sizeOf(u32)]); } pub const BinaryType = enum(u4) { @@ -668,7 +681,6 @@ pub const JSCArrayBuffer = opaque { pub fn asArrayBuffer(self: *Self) ArrayBuffer { var out: ArrayBuffer = undefined; - out.ptr = &.{}; // `ptr` might not get set if the ArrayBuffer is empty JSC__ArrayBuffer__asBunArrayBuffer(self, &out); return out; } diff --git a/src/bun.js/test/diff_format.zig b/src/bun.js/test/diff_format.zig index ee78b91e84..d4d60f7d05 100644 --- a/src/bun.js/test/diff_format.zig +++ b/src/bun.js/test/diff_format.zig @@ -11,7 +11,7 @@ pub const DiffFormatter = struct { // defer scope.deinit(); // TODO: fix leaks const allocator = scope.allocator(); - const diff_config: DiffConfig = .default(Output.isAIAgent(), Output.enable_ansi_colors); + const diff_config: DiffConfig = .default(Output.isAIAgent(), Output.enable_ansi_colors_stderr); if (this.expected_string != null and this.received_string != null) { const received = this.received_string.?; diff --git a/src/bun.js/test/expect.zig b/src/bun.js/test/expect.zig index 8a1326a11c..441970f034 100644 --- a/src/bun.js/test/expect.zig +++ b/src/bun.js/test/expect.zig @@ -109,7 +109,7 @@ pub const Expect = struct { } pub fn throwPrettyMatcherError(globalThis: *JSGlobalObject, custom_label: bun.String, matcher_name: anytype, matcher_params: anytype, flags: Flags, comptime message_fmt: string, message_args: anytype) bun.JSError { - switch (Output.enable_ansi_colors) { + switch (Output.enable_ansi_colors_stderr) { inline else => |colors| { const chain = switch (flags.promise) { .resolves => if (flags.not) Output.prettyFmt("resolves.not.", colors) else Output.prettyFmt("resolves.", colors), @@ -164,7 +164,7 @@ pub const Expect = struct { }; value.ensureStillAlive(); - const matcher_params = switch (Output.enable_ansi_colors) { + const matcher_params = switch (Output.enable_ansi_colors_stderr) { inline else => |colors| comptime Output.prettyFmt(matcher_params_fmt, colors), }; return processPromise(this.custom_label, this.flags, globalThis, value, matcher_name, matcher_params, false); @@ -747,7 +747,8 @@ pub const Expect = struct { if (bun.detectCI()) |_| { if (!update) { const signature = comptime getSignature(fn_name, "", false); - return this.throw(globalThis, signature, "\n\nMatcher error: Updating inline snapshots is disabled in CI environments unless --update-snapshots is used.\nTo override, set the environment variable CI=false.", .{}); + // Only creating new snapshots can reach here (updating with mismatches errors earlier with diff) + return this.throw(globalThis, signature, "\n\nMatcher error: Inline snapshot creation is disabled in CI environments unless --update-snapshots is used.\nTo override, set the environment variable CI=false.\n\nReceived: {s}", .{pretty_value.slice()}); } } var buntest_strong = this.bunTest() orelse { @@ -827,21 +828,35 @@ pub const Expect = struct { try this.matchAndFmtSnapshot(globalThis, value, property_matchers, &pretty_value, fn_name); const existing_value = Jest.runner.?.snapshots.getOrPut(this, pretty_value.slice(), hint) catch |err| { - var formatter = jsc.ConsoleObject.Formatter{ .globalThis = globalThis }; - defer formatter.deinit(); var buntest_strong = this.bunTest() orelse return globalThis.throw("Snapshot matchers cannot be used outside of a test", .{}); defer buntest_strong.deinit(); const buntest = buntest_strong.get(); const test_file_path = Jest.runner.?.files.get(buntest.file_id).source.path.text; + const runner = Jest.runner.?; return switch (err) { error.FailedToOpenSnapshotFile => globalThis.throw("Failed to open snapshot file for test file: {s}", .{test_file_path}), error.FailedToMakeSnapshotDirectory => globalThis.throw("Failed to make snapshot directory for test file: {s}", .{test_file_path}), error.FailedToWriteSnapshotFile => globalThis.throw("Failed write to snapshot file: {s}", .{test_file_path}), error.SyntaxError, error.ParseError => globalThis.throw("Failed to parse snapshot file for: {s}", .{test_file_path}), - error.SnapshotCreationNotAllowedInCI => globalThis.throw("Snapshot creation is not allowed in CI environments unless --update-snapshots is used\nIf this is not a CI environment, set the environment variable CI=false to force allow.\n\nReceived: {any}", .{value.toFmt(&formatter)}), + error.SnapshotCreationNotAllowedInCI => blk: { + const snapshot_name = runner.snapshots.last_error_snapshot_name; + defer if (snapshot_name) |name| { + runner.snapshots.allocator.free(name); + runner.snapshots.last_error_snapshot_name = null; + }; + if (snapshot_name) |name| { + break :blk globalThis.throw("Snapshot creation is disabled in CI environments unless --update-snapshots is used\nTo override, set the environment variable CI=false.\n\nSnapshot name: \"{s}\"\nReceived: {s}", .{ name, pretty_value.slice() }); + } else { + break :blk globalThis.throw("Snapshot creation is disabled in CI environments unless --update-snapshots is used\nTo override, set the environment variable CI=false.\n\nReceived: {s}", .{pretty_value.slice()}); + } + }, error.SnapshotInConcurrentGroup => globalThis.throw("Snapshot matchers are not supported in concurrent tests", .{}), error.TestNotActive => globalThis.throw("Snapshot matchers are not supported after the test has finished executing", .{}), - else => globalThis.throw("Failed to snapshot value: {any}", .{value.toFmt(&formatter)}), + else => blk: { + var formatter = jsc.ConsoleObject.Formatter{ .globalThis = globalThis }; + defer formatter.deinit(); + break :blk globalThis.throw("Failed to snapshot value: {any}", .{value.toFmt(&formatter)}); + }, }; }; @@ -1011,7 +1026,7 @@ pub const Expect = struct { "Matcher functions should return an object in the following format:\n" ++ " {{message?: string | function, pass: boolean}}\n" ++ "'{any}' was returned"; - const err = switch (Output.enable_ansi_colors) { + const err = switch (Output.enable_ansi_colors_stderr) { inline else => |colors| globalThis.createErrorInstance(Output.prettyFmt(fmt, colors), .{ matcher_name, result.toFmt(&formatter) }), }; err.put(globalThis, ZigString.static("name"), bun.String.static("InvalidMatcherError").toJS(globalThis)); @@ -1097,7 +1112,7 @@ pub const Expect = struct { } const matcher_params = CustomMatcherParamsFormatter{ - .colors = Output.enable_ansi_colors, + .colors = Output.enable_ansi_colors_stderr, .globalThis = globalThis, .matcher_fn = matcher_fn, }; @@ -1131,7 +1146,7 @@ pub const Expect = struct { const matcher_name = try matcher_fn.getName(globalThis); const matcher_params = CustomMatcherParamsFormatter{ - .colors = Output.enable_ansi_colors, + .colors = Output.enable_ansi_colors_stderr, .globalThis = globalThis, .matcher_fn = matcher_fn, }; @@ -1840,7 +1855,7 @@ pub const ExpectMatcherUtils = struct { var writer = buffered_writer.writer(); if (comptime color_or_null) |color| { - if (Output.enable_ansi_colors) { + if (Output.enable_ansi_colors_stderr) { try writer.writeAll(Output.prettyFmt(color, true)); } } @@ -1850,7 +1865,7 @@ pub const ExpectMatcherUtils = struct { try writer.print("{}", .{value.toFmt(&formatter)}); if (comptime color_or_null) |_| { - if (Output.enable_ansi_colors) { + if (Output.enable_ansi_colors_stderr) { try writer.writeAll(Output.prettyFmt("", true)); } } diff --git a/src/bun.js/test/expect/toHaveBeenCalledWith.zig b/src/bun.js/test/expect/toHaveBeenCalledWith.zig index 9ca450da6f..2ff75bd71c 100644 --- a/src/bun.js/test/expect/toHaveBeenCalledWith.zig +++ b/src/bun.js/test/expect/toHaveBeenCalledWith.zig @@ -101,7 +101,7 @@ pub fn toHaveBeenCalledWith(this: *Expect, globalThis: *JSGlobalObject, callfram \\ Number of calls: {d} ; - switch (Output.enable_ansi_colors) { + switch (Output.enable_ansi_colors_stderr) { inline else => |colors| { return this.throw(globalThis, signature, Output.prettyFmt("\n\n" ++ fmt ++ "\n", colors), .{ expected_args_js_array.toFmt(&formatter), diff --git a/src/bun.js/test/expect/toHaveReturnedWith.zig b/src/bun.js/test/expect/toHaveReturnedWith.zig index a7c5de26d6..3de3197ada 100644 --- a/src/bun.js/test/expect/toHaveReturnedWith.zig +++ b/src/bun.js/test/expect/toHaveReturnedWith.zig @@ -106,7 +106,7 @@ pub fn toHaveReturnedWith(this: *Expect, globalThis: *JSGlobalObject, callframe: \\ Number of calls: {d} ; - switch (Output.enable_ansi_colors) { + switch (Output.enable_ansi_colors_stderr) { inline else => |colors| { return this.throw(globalThis, signature, Output.prettyFmt("\n\n" ++ fmt ++ "\n", colors), .{ expected.toFmt(&formatter), @@ -131,7 +131,7 @@ pub fn toHaveReturnedWith(this: *Expect, globalThis: *JSGlobalObject, callframe: \\ Number of returns: {d} ; - switch (Output.enable_ansi_colors) { + switch (Output.enable_ansi_colors_stderr) { inline else => |colors| { return this.throw(globalThis, signature, Output.prettyFmt("\n\n" ++ fmt ++ "\n", colors), .{ expected.toFmt(&formatter), diff --git a/src/bun.js/test/jest.zig b/src/bun.js/test/jest.zig index 6639cd5e5a..6ceb285a08 100644 --- a/src/bun.js/test/jest.zig +++ b/src/bun.js/test/jest.zig @@ -486,7 +486,7 @@ pub fn captureTestLineNumber(callframe: *jsc.CallFrame, globalThis: *JSGlobalObj pub fn errorInCI(globalObject: *jsc.JSGlobalObject, message: []const u8) bun.JSError!void { if (bun.detectCI()) |_| { - return globalObject.throwPretty("{s}\nIf this is not a CI environment, set the environment variable CI=false to force allow.", .{message}); + return globalObject.throwPretty("{s}\nTo override, set the environment variable CI=false.", .{message}); } } diff --git a/src/bun.js/test/snapshot.zig b/src/bun.js/test/snapshot.zig index e3dac38abf..b1939bd5c6 100644 --- a/src/bun.js/test/snapshot.zig +++ b/src/bun.js/test/snapshot.zig @@ -16,6 +16,7 @@ pub const Snapshots = struct { _current_file: ?File = null, snapshot_dir_path: ?string = null, inline_snapshots_to_write: *std.AutoArrayHashMap(TestRunner.File.ID, std.ArrayList(InlineSnapshotToWrite)), + last_error_snapshot_name: ?[]const u8 = null, pub const InlineSnapshotToWrite = struct { line: c_ulong, @@ -87,6 +88,12 @@ pub const Snapshots = struct { // Prevent snapshot creation in CI environments unless --update-snapshots is used if (bun.detectCI()) |_| { if (!this.update_snapshots) { + // Store the snapshot name for error reporting + if (this.last_error_snapshot_name) |old_name| { + this.allocator.free(old_name); + this.last_error_snapshot_name = null; + } + this.last_error_snapshot_name = try this.allocator.dupe(u8, name_with_counter); return error.SnapshotCreationNotAllowedInCI; } } diff --git a/src/bun.js/webcore/TextEncoder.zig b/src/bun.js/webcore/TextEncoder.zig index 351c671381..e9394ee676 100644 --- a/src/bun.js/webcore/TextEncoder.zig +++ b/src/bun.js/webcore/TextEncoder.zig @@ -206,7 +206,7 @@ pub export fn TextEncoder__encodeRopeString( if (array == .zero) { array = jsc.JSValue.createUninitializedUint8Array(globalThis, length) catch return .zero; array.ensureStillAlive(); - @memcpy(array.asArrayBuffer(globalThis).?.ptr[0..length], buf_to_use[0..length]); + @memcpy(array.asArrayBuffer(globalThis).?.byteSlice(), buf_to_use[0..length]); } return array; diff --git a/src/bun.js/webcore/fetch.zig b/src/bun.js/webcore/fetch.zig index 21e3783906..7f4e8be9ab 100644 --- a/src/bun.js/webcore/fetch.zig +++ b/src/bun.js/webcore/fetch.zig @@ -58,1351 +58,7 @@ pub const fetch_type_error_strings: JSTypeErrorEnum = brk: { break :brk errors; }; -pub const FetchTasklet = struct { - pub const ResumableSink = jsc.WebCore.ResumableFetchSink; - - const log = Output.scoped(.FetchTasklet, .visible); - sink: ?*ResumableSink = null, - http: ?*http.AsyncHTTP = null, - result: http.HTTPClientResult = .{}, - metadata: ?http.HTTPResponseMetadata = null, - javascript_vm: *VirtualMachine = undefined, - global_this: *JSGlobalObject = undefined, - request_body: HTTPRequestBody = undefined, - request_body_streaming_buffer: ?*http.ThreadSafeStreamBuffer = null, - - /// buffer being used by AsyncHTTP - response_buffer: MutableString = undefined, - /// buffer used to stream response to JS - scheduled_response_buffer: MutableString = undefined, - /// response weak ref we need this to track the response JS lifetime - response: jsc.Weak(FetchTasklet) = .{}, - /// native response ref if we still need it when JS is discarted - native_response: ?*Response = null, - ignore_data: bool = false, - /// stream strong ref if any is available - readable_stream_ref: jsc.WebCore.ReadableStream.Strong = .{}, - request_headers: Headers = Headers{ .allocator = undefined }, - promise: jsc.JSPromise.Strong, - concurrent_task: jsc.ConcurrentTask = .{}, - poll_ref: Async.KeepAlive = .{}, - memory_reporter: *bun.MemoryReportingAllocator, - /// For Http Client requests - /// when Content-Length is provided this represents the whole size of the request - /// If chunked encoded this will represent the total received size (ignoring the chunk headers) - /// If is not chunked encoded and Content-Length is not provided this will be unknown - body_size: http.HTTPClientResult.BodySize = .unknown, - - /// This is url + proxy memory buffer and is owned by FetchTasklet - /// We always clone url and proxy (if informed) - url_proxy_buffer: []const u8 = "", - - signal: ?*jsc.WebCore.AbortSignal = null, - signals: http.Signals = .{}, - signal_store: http.Signals.Store = .{}, - has_schedule_callback: std.atomic.Value(bool) = std.atomic.Value(bool).init(false), - - // must be stored because AbortSignal stores reason weakly - abort_reason: jsc.Strong.Optional = .empty, - - // custom checkServerIdentity - check_server_identity: jsc.Strong.Optional = .empty, - reject_unauthorized: bool = true, - upgraded_connection: bool = false, - // Custom Hostname - hostname: ?[]u8 = null, - is_waiting_body: bool = false, - is_waiting_abort: bool = false, - is_waiting_request_stream_start: bool = false, - mutex: Mutex, - - tracker: jsc.Debugger.AsyncTaskTracker, - - ref_count: std.atomic.Value(u32) = std.atomic.Value(u32).init(1), - - pub fn ref(this: *FetchTasklet) void { - const count = this.ref_count.fetchAdd(1, .monotonic); - bun.debugAssert(count > 0); - } - - pub fn deref(this: *FetchTasklet) void { - const count = this.ref_count.fetchSub(1, .monotonic); - bun.debugAssert(count > 0); - - if (count == 1) { - this.deinit() catch |err| switch (err) {}; - } - } - - pub fn derefFromThread(this: *FetchTasklet) void { - const count = this.ref_count.fetchSub(1, .monotonic); - bun.debugAssert(count > 0); - - if (count == 1) { - // this is really unlikely to happen, but can happen - // lets make sure that we always call deinit from main thread - - this.javascript_vm.eventLoop().enqueueTaskConcurrent(jsc.ConcurrentTask.fromCallback(this, FetchTasklet.deinit)); - } - } - - pub const HTTPRequestBody = union(enum) { - AnyBlob: AnyBlob, - Sendfile: http.SendFile, - ReadableStream: jsc.WebCore.ReadableStream.Strong, - - pub const Empty: HTTPRequestBody = .{ .AnyBlob = .{ .Blob = .{} } }; - - pub fn store(this: *HTTPRequestBody) ?*Blob.Store { - return switch (this.*) { - .AnyBlob => this.AnyBlob.store(), - else => null, - }; - } - - pub fn slice(this: *const HTTPRequestBody) []const u8 { - return switch (this.*) { - .AnyBlob => this.AnyBlob.slice(), - else => "", - }; - } - - pub fn detach(this: *HTTPRequestBody) void { - switch (this.*) { - .AnyBlob => this.AnyBlob.detach(), - .ReadableStream => |*stream| { - stream.deinit(); - }, - .Sendfile => { - if (@max(this.Sendfile.offset, this.Sendfile.remain) > 0) - this.Sendfile.fd.close(); - this.Sendfile.offset = 0; - this.Sendfile.remain = 0; - }, - } - } - - pub fn fromJS(globalThis: *JSGlobalObject, value: JSValue) bun.JSError!HTTPRequestBody { - var body_value = try Body.Value.fromJS(globalThis, value); - if (body_value == .Used or (body_value == .Locked and (body_value.Locked.action != .none or body_value.Locked.isDisturbed2(globalThis)))) { - return globalThis.ERR(.BODY_ALREADY_USED, "body already used", .{}).throw(); - } - if (body_value == .Locked) { - if (body_value.Locked.readable.has()) { - // just grab the ref - return FetchTasklet.HTTPRequestBody{ .ReadableStream = body_value.Locked.readable }; - } - const readable = try body_value.toReadableStream(globalThis); - if (!readable.isEmptyOrUndefinedOrNull() and body_value == .Locked and body_value.Locked.readable.has()) { - return FetchTasklet.HTTPRequestBody{ .ReadableStream = body_value.Locked.readable }; - } - } - return FetchTasklet.HTTPRequestBody{ .AnyBlob = body_value.useAsAnyBlob() }; - } - - pub fn needsToReadFile(this: *HTTPRequestBody) bool { - return switch (this.*) { - .AnyBlob => |blob| blob.needsToReadFile(), - else => false, - }; - } - - pub fn isS3(this: *const HTTPRequestBody) bool { - return switch (this.*) { - .AnyBlob => |*blob| blob.isS3(), - else => false, - }; - } - - pub fn hasContentTypeFromUser(this: *HTTPRequestBody) bool { - return switch (this.*) { - .AnyBlob => |blob| blob.hasContentTypeFromUser(), - else => false, - }; - } - - pub fn getAnyBlob(this: *HTTPRequestBody) ?*AnyBlob { - return switch (this.*) { - .AnyBlob => &this.AnyBlob, - else => null, - }; - } - - pub fn hasBody(this: *HTTPRequestBody) bool { - return switch (this.*) { - .AnyBlob => |blob| blob.size() > 0, - .ReadableStream => |*stream| stream.has(), - .Sendfile => true, - }; - } - }; - - pub fn init(_: std.mem.Allocator) anyerror!FetchTasklet { - return FetchTasklet{}; - } - - fn clearSink(this: *FetchTasklet) void { - if (this.sink) |sink| { - this.sink = null; - sink.deref(); - } - if (this.request_body_streaming_buffer) |buffer| { - this.request_body_streaming_buffer = null; - buffer.clearDrainCallback(); - buffer.deref(); - } - } - - fn clearData(this: *FetchTasklet) void { - log("clearData ", .{}); - const allocator = this.memory_reporter.allocator(); - if (this.url_proxy_buffer.len > 0) { - allocator.free(this.url_proxy_buffer); - this.url_proxy_buffer.len = 0; - } - - if (this.hostname) |hostname| { - allocator.free(hostname); - this.hostname = null; - } - - if (this.result.certificate_info) |*certificate| { - certificate.deinit(bun.default_allocator); - this.result.certificate_info = null; - } - - this.request_headers.entries.deinit(allocator); - this.request_headers.buf.deinit(allocator); - this.request_headers = Headers{ .allocator = undefined }; - - if (this.http) |http_| { - http_.clearData(); - } - - if (this.metadata != null) { - this.metadata.?.deinit(allocator); - this.metadata = null; - } - - this.response_buffer.deinit(); - this.response.deinit(); - if (this.native_response) |response| { - this.native_response = null; - - response.unref(); - } - - this.readable_stream_ref.deinit(); - - this.scheduled_response_buffer.deinit(); - if (this.request_body != .ReadableStream or this.is_waiting_request_stream_start) { - this.request_body.detach(); - } - - this.abort_reason.deinit(); - this.check_server_identity.deinit(); - this.clearAbortSignal(); - // Clear the sink only after the requested ended otherwise we would potentialy lose the last chunk - this.clearSink(); - } - - // XXX: 'fn (*FetchTasklet) error{}!void' coerces to 'fn (*FetchTasklet) bun.JSError!void' but 'fn (*FetchTasklet) void' does not - pub fn deinit(this: *FetchTasklet) error{}!void { - log("deinit", .{}); - - bun.assert(this.ref_count.load(.monotonic) == 0); - - this.clearData(); - - var reporter = this.memory_reporter; - const allocator = reporter.allocator(); - - if (this.http) |http_| { - this.http = null; - allocator.destroy(http_); - } - allocator.destroy(this); - // reporter.assert(); - bun.default_allocator.destroy(reporter); - } - - fn getCurrentResponse(this: *FetchTasklet) ?*Response { - // we need a body to resolve the promise when buffering - if (this.native_response) |response| { - return response; - } - - // if we did not have a direct reference we check if the Weak ref is still alive - if (this.response.get()) |response_js| { - if (response_js.as(Response)) |response| { - return response; - } - } - - return null; - } - - pub fn startRequestStream(this: *FetchTasklet) void { - this.is_waiting_request_stream_start = false; - bun.assert(this.request_body == .ReadableStream); - if (this.request_body.ReadableStream.get(this.global_this)) |stream| { - if (this.signal) |signal| { - if (signal.aborted()) { - stream.abort(this.global_this); - return; - } - } - - const globalThis = this.global_this; - this.ref(); // lets only unref when sink is done - // +1 because the task refs the sink - const sink = ResumableSink.initExactRefs(globalThis, stream, this, 2); - this.sink = sink; - } - } - - pub fn onBodyReceived(this: *FetchTasklet) bun.JSTerminated!void { - const success = this.result.isSuccess(); - const globalThis = this.global_this; - // reset the buffer if we are streaming or if we are not waiting for bufferig anymore - var buffer_reset = true; - log("onBodyReceived success={} has_more={}", .{ success, this.result.has_more }); - defer { - if (buffer_reset) { - this.scheduled_response_buffer.reset(); - } - } - - if (!success) { - var err = this.onReject(); - var need_deinit = true; - defer if (need_deinit) err.deinit(); - var js_err = JSValue.zero; - // if we are streaming update with error - if (this.readable_stream_ref.get(globalThis)) |readable| { - if (readable.ptr == .Bytes) { - js_err = err.toJS(globalThis); - js_err.ensureStillAlive(); - try readable.ptr.Bytes.onData( - .{ - .err = .{ .JSValue = js_err }, - }, - bun.default_allocator, - ); - } - } - if (this.sink) |sink| { - if (js_err == .zero) { - js_err = err.toJS(globalThis); - js_err.ensureStillAlive(); - } - sink.cancel(js_err); - return; - } - // if we are buffering resolve the promise - if (this.getCurrentResponse()) |response| { - need_deinit = false; // body value now owns the error - const body = response.getBodyValue(); - try body.toErrorInstance(err, globalThis); - } - return; - } - - if (this.readable_stream_ref.get(globalThis)) |readable| { - log("onBodyReceived readable_stream_ref", .{}); - if (readable.ptr == .Bytes) { - readable.ptr.Bytes.size_hint = this.getSizeHint(); - // body can be marked as used but we still need to pipe the data - const scheduled_response_buffer = &this.scheduled_response_buffer.list; - - const chunk = scheduled_response_buffer.items; - - if (this.result.has_more) { - try readable.ptr.Bytes.onData( - .{ - .temporary = bun.ByteList.fromBorrowedSliceDangerous(chunk), - }, - bun.default_allocator, - ); - } else { - var prev = this.readable_stream_ref; - this.readable_stream_ref = .{}; - defer prev.deinit(); - buffer_reset = false; - - try readable.ptr.Bytes.onData( - .{ - .temporary_and_done = bun.ByteList.fromBorrowedSliceDangerous(chunk), - }, - bun.default_allocator, - ); - } - return; - } - } - - if (this.getCurrentResponse()) |response| { - log("onBodyReceived Current Response", .{}); - const sizeHint = this.getSizeHint(); - response.setSizeHint(sizeHint); - if (response.getBodyReadableStream(globalThis)) |readable| { - log("onBodyReceived CurrentResponse BodyReadableStream", .{}); - if (readable.ptr == .Bytes) { - const scheduled_response_buffer = this.scheduled_response_buffer.list; - - const chunk = scheduled_response_buffer.items; - - if (this.result.has_more) { - try readable.ptr.Bytes.onData( - .{ - .temporary = bun.ByteList.fromBorrowedSliceDangerous(chunk), - }, - bun.default_allocator, - ); - } else { - readable.value.ensureStillAlive(); - response.detachReadableStream(globalThis); - try readable.ptr.Bytes.onData( - .{ - .temporary_and_done = bun.ByteList.fromBorrowedSliceDangerous(chunk), - }, - bun.default_allocator, - ); - } - - return; - } - } - - // we will reach here when not streaming, this is also the only case we dont wanna to reset the buffer - buffer_reset = false; - if (!this.result.has_more) { - var scheduled_response_buffer = this.scheduled_response_buffer.list; - this.memory_reporter.discard(scheduled_response_buffer.allocatedSlice()); - const body = response.getBodyValue(); - // done resolve body - var old = body.*; - const body_value = Body.Value{ - .InternalBlob = .{ - .bytes = scheduled_response_buffer.toManaged(bun.default_allocator), - }, - }; - body.* = body_value; - log("onBodyReceived body_value length={}", .{body_value.InternalBlob.bytes.items.len}); - - this.scheduled_response_buffer = .{ - .allocator = this.memory_reporter.allocator(), - .list = .{ - .items = &.{}, - .capacity = 0, - }, - }; - - if (old == .Locked) { - log("onBodyReceived old.resolve", .{}); - try old.resolve(body, this.global_this, response.getFetchHeaders()); - } - } - } - } - - pub fn onProgressUpdate(this: *FetchTasklet) bun.JSTerminated!void { - jsc.markBinding(@src()); - log("onProgressUpdate", .{}); - this.mutex.lock(); - this.has_schedule_callback.store(false, .monotonic); - const is_done = !this.result.has_more; - - const vm = this.javascript_vm; - // vm is shutting down we cannot touch JS - if (vm.isShuttingDown()) { - this.mutex.unlock(); - if (is_done) { - this.deref(); - } - return; - } - - const globalThis = this.global_this; - defer { - this.mutex.unlock(); - // if we are not done we wait until the next call - if (is_done) { - var poll_ref = this.poll_ref; - this.poll_ref = .{}; - poll_ref.unref(vm); - this.deref(); - } - } - if (this.is_waiting_request_stream_start and this.result.can_stream) { - // start streaming - this.startRequestStream(); - } - // if we already respond the metadata and still need to process the body - if (this.is_waiting_body) { - try this.onBodyReceived(); - return; - } - if (this.metadata == null and this.result.isSuccess()) return; - - // if we abort because of cert error - // we wait the Http Client because we already have the response - // we just need to deinit - if (this.is_waiting_abort) { - return; - } - const promise_value = this.promise.valueOrEmpty(); - - if (promise_value.isEmptyOrUndefinedOrNull()) { - log("onProgressUpdate: promise_value is null", .{}); - this.promise.deinit(); - return; - } - - if (this.result.certificate_info) |certificate_info| { - this.result.certificate_info = null; - defer certificate_info.deinit(bun.default_allocator); - - // we receive some error - if (this.reject_unauthorized and !this.checkServerIdentity(certificate_info)) { - log("onProgressUpdate: aborted due certError", .{}); - // we need to abort the request - const promise = promise_value.asAnyPromise().?; - const tracker = this.tracker; - var result = this.onReject(); - defer result.deinit(); - - promise_value.ensureStillAlive(); - try promise.reject(globalThis, result.toJS(globalThis)); - - tracker.didDispatch(globalThis); - this.promise.deinit(); - return; - } - // everything ok - if (this.metadata == null) { - log("onProgressUpdate: metadata is null", .{}); - return; - } - } - - const tracker = this.tracker; - tracker.willDispatch(globalThis); - defer { - log("onProgressUpdate: promise_value is not null", .{}); - tracker.didDispatch(globalThis); - this.promise.deinit(); - } - const success = this.result.isSuccess(); - const result = switch (success) { - true => jsc.Strong.Optional.create(this.onResolve(), globalThis), - false => brk: { - // in this case we wanna a jsc.Strong.Optional so we just convert it - var value = this.onReject(); - const err = value.toJS(globalThis); - if (this.sink) |sink| { - sink.cancel(err); - } - break :brk value.JSValue; - }, - }; - - promise_value.ensureStillAlive(); - const Holder = struct { - held: jsc.Strong.Optional, - promise: jsc.Strong.Optional, - globalObject: *jsc.JSGlobalObject, - task: jsc.AnyTask, - - pub fn resolve(self: *@This()) bun.JSTerminated!void { - // cleanup - defer bun.default_allocator.destroy(self); - defer self.held.deinit(); - defer self.promise.deinit(); - // resolve the promise - var prom = self.promise.swap().asAnyPromise().?; - const res = self.held.swap(); - res.ensureStillAlive(); - try prom.resolve(self.globalObject, res); - } - - pub fn reject(self: *@This()) bun.JSTerminated!void { - // cleanup - defer bun.default_allocator.destroy(self); - defer self.held.deinit(); - defer self.promise.deinit(); - - // reject the promise - var prom = self.promise.swap().asAnyPromise().?; - const res = self.held.swap(); - res.ensureStillAlive(); - try prom.reject(self.globalObject, res); - } - }; - var holder = bun.handleOom(bun.default_allocator.create(Holder)); - holder.* = .{ - .held = result, - // we need the promise to be alive until the task is done - .promise = this.promise.strong, - .globalObject = globalThis, - .task = undefined, - }; - this.promise.strong = .empty; - holder.task = switch (success) { - true => jsc.AnyTask.New(Holder, Holder.resolve).init(holder), - false => jsc.AnyTask.New(Holder, Holder.reject).init(holder), - }; - - vm.enqueueTask(jsc.Task.init(&holder.task)); - } - - pub fn checkServerIdentity(this: *FetchTasklet, certificate_info: http.CertificateInfo) bool { - if (this.check_server_identity.get()) |check_server_identity| { - check_server_identity.ensureStillAlive(); - if (certificate_info.cert.len > 0) { - const cert = certificate_info.cert; - var cert_ptr = cert.ptr; - if (BoringSSL.d2i_X509(null, &cert_ptr, @intCast(cert.len))) |x509| { - const globalObject = this.global_this; - defer x509.free(); - const js_cert = X509.toJS(x509, globalObject) catch |err| { - switch (err) { - error.JSError => {}, - error.OutOfMemory => globalObject.throwOutOfMemory() catch {}, - error.JSTerminated => {}, - } - const check_result = globalObject.tryTakeException().?; - // mark to wait until deinit - this.is_waiting_abort = this.result.has_more; - this.abort_reason.set(globalObject, check_result); - this.signal_store.aborted.store(true, .monotonic); - this.tracker.didCancel(this.global_this); - // we need to abort the request - if (this.http) |http_| http.http_thread.scheduleShutdown(http_); - this.result.fail = error.ERR_TLS_CERT_ALTNAME_INVALID; - return false; - }; - var hostname: bun.String = bun.String.cloneUTF8(certificate_info.hostname); - defer hostname.deref(); - const js_hostname = hostname.toJS(globalObject); - js_hostname.ensureStillAlive(); - js_cert.ensureStillAlive(); - const check_result = check_server_identity.call(globalObject, .js_undefined, &.{ js_hostname, js_cert }) catch |err| globalObject.takeException(err); - - // > Returns object [...] on failure - if (check_result.isAnyError()) { - // mark to wait until deinit - this.is_waiting_abort = this.result.has_more; - this.abort_reason.set(globalObject, check_result); - this.signal_store.aborted.store(true, .monotonic); - this.tracker.didCancel(this.global_this); - - // we need to abort the request - if (this.http) |http_| { - http.http_thread.scheduleShutdown(http_); - } - this.result.fail = error.ERR_TLS_CERT_ALTNAME_INVALID; - return false; - } - - // > On success, returns - // We treat any non-error value as a success. - return true; - } - } - } - this.result.fail = error.ERR_TLS_CERT_ALTNAME_INVALID; - return false; - } - - fn getAbortError(this: *FetchTasklet) ?Body.Value.ValueError { - if (this.abort_reason.has()) { - defer this.clearAbortSignal(); - const out = this.abort_reason; - - this.abort_reason = .empty; - return Body.Value.ValueError{ .JSValue = out }; - } - - if (this.signal) |signal| { - if (signal.reasonIfAborted(this.global_this)) |reason| { - defer this.clearAbortSignal(); - return reason.toBodyValueError(this.global_this); - } - } - - return null; - } - - fn clearAbortSignal(this: *FetchTasklet) void { - const signal = this.signal orelse return; - this.signal = null; - defer { - signal.pendingActivityUnref(); - signal.unref(); - } - - signal.cleanNativeBindings(this); - } - - pub fn onReject(this: *FetchTasklet) Body.Value.ValueError { - bun.assert(this.result.fail != null); - log("onReject", .{}); - - if (this.getAbortError()) |err| { - return err; - } - - if (this.result.abortReason()) |reason| { - return .{ .AbortReason = reason }; - } - - // some times we don't have metadata so we also check http.url - const path = if (this.metadata) |metadata| - bun.String.cloneUTF8(metadata.url) - else if (this.http) |http_| - bun.String.cloneUTF8(http_.url.href) - else - bun.String.empty; - - const fetch_error = jsc.SystemError{ - .code = bun.String.static(switch (this.result.fail.?) { - error.ConnectionClosed => "ECONNRESET", - else => |e| @errorName(e), - }), - .message = switch (this.result.fail.?) { - error.ConnectionClosed => bun.String.static("The socket connection was closed unexpectedly. For more information, pass `verbose: true` in the second argument to fetch()"), - error.FailedToOpenSocket => bun.String.static("Was there a typo in the url or port?"), - error.TooManyRedirects => bun.String.static("The response redirected too many times. For more information, pass `verbose: true` in the second argument to fetch()"), - error.ConnectionRefused => bun.String.static("Unable to connect. Is the computer able to access the url?"), - error.RedirectURLInvalid => bun.String.static("Redirect URL in Location header is invalid."), - - error.UNABLE_TO_GET_ISSUER_CERT => bun.String.static("unable to get issuer certificate"), - error.UNABLE_TO_GET_CRL => bun.String.static("unable to get certificate CRL"), - error.UNABLE_TO_DECRYPT_CERT_SIGNATURE => bun.String.static("unable to decrypt certificate's signature"), - error.UNABLE_TO_DECRYPT_CRL_SIGNATURE => bun.String.static("unable to decrypt CRL's signature"), - error.UNABLE_TO_DECODE_ISSUER_PUBLIC_KEY => bun.String.static("unable to decode issuer public key"), - error.CERT_SIGNATURE_FAILURE => bun.String.static("certificate signature failure"), - error.CRL_SIGNATURE_FAILURE => bun.String.static("CRL signature failure"), - error.CERT_NOT_YET_VALID => bun.String.static("certificate is not yet valid"), - error.CRL_NOT_YET_VALID => bun.String.static("CRL is not yet valid"), - error.CERT_HAS_EXPIRED => bun.String.static("certificate has expired"), - error.CRL_HAS_EXPIRED => bun.String.static("CRL has expired"), - error.ERROR_IN_CERT_NOT_BEFORE_FIELD => bun.String.static("format error in certificate's notBefore field"), - error.ERROR_IN_CERT_NOT_AFTER_FIELD => bun.String.static("format error in certificate's notAfter field"), - error.ERROR_IN_CRL_LAST_UPDATE_FIELD => bun.String.static("format error in CRL's lastUpdate field"), - error.ERROR_IN_CRL_NEXT_UPDATE_FIELD => bun.String.static("format error in CRL's nextUpdate field"), - error.OUT_OF_MEM => bun.String.static("out of memory"), - error.DEPTH_ZERO_SELF_SIGNED_CERT => bun.String.static("self signed certificate"), - error.SELF_SIGNED_CERT_IN_CHAIN => bun.String.static("self signed certificate in certificate chain"), - error.UNABLE_TO_GET_ISSUER_CERT_LOCALLY => bun.String.static("unable to get local issuer certificate"), - error.UNABLE_TO_VERIFY_LEAF_SIGNATURE => bun.String.static("unable to verify the first certificate"), - error.CERT_CHAIN_TOO_LONG => bun.String.static("certificate chain too long"), - error.CERT_REVOKED => bun.String.static("certificate revoked"), - error.INVALID_CA => bun.String.static("invalid CA certificate"), - error.INVALID_NON_CA => bun.String.static("invalid non-CA certificate (has CA markings)"), - error.PATH_LENGTH_EXCEEDED => bun.String.static("path length constraint exceeded"), - error.PROXY_PATH_LENGTH_EXCEEDED => bun.String.static("proxy path length constraint exceeded"), - error.PROXY_CERTIFICATES_NOT_ALLOWED => bun.String.static("proxy certificates not allowed, please set the appropriate flag"), - error.INVALID_PURPOSE => bun.String.static("unsupported certificate purpose"), - error.CERT_UNTRUSTED => bun.String.static("certificate not trusted"), - error.CERT_REJECTED => bun.String.static("certificate rejected"), - error.APPLICATION_VERIFICATION => bun.String.static("application verification failure"), - error.SUBJECT_ISSUER_MISMATCH => bun.String.static("subject issuer mismatch"), - error.AKID_SKID_MISMATCH => bun.String.static("authority and subject key identifier mismatch"), - error.AKID_ISSUER_SERIAL_MISMATCH => bun.String.static("authority and issuer serial number mismatch"), - error.KEYUSAGE_NO_CERTSIGN => bun.String.static("key usage does not include certificate signing"), - error.UNABLE_TO_GET_CRL_ISSUER => bun.String.static("unable to get CRL issuer certificate"), - error.UNHANDLED_CRITICAL_EXTENSION => bun.String.static("unhandled critical extension"), - error.KEYUSAGE_NO_CRL_SIGN => bun.String.static("key usage does not include CRL signing"), - error.KEYUSAGE_NO_DIGITAL_SIGNATURE => bun.String.static("key usage does not include digital signature"), - error.UNHANDLED_CRITICAL_CRL_EXTENSION => bun.String.static("unhandled critical CRL extension"), - error.INVALID_EXTENSION => bun.String.static("invalid or inconsistent certificate extension"), - error.INVALID_POLICY_EXTENSION => bun.String.static("invalid or inconsistent certificate policy extension"), - error.NO_EXPLICIT_POLICY => bun.String.static("no explicit policy"), - error.DIFFERENT_CRL_SCOPE => bun.String.static("Different CRL scope"), - error.UNSUPPORTED_EXTENSION_FEATURE => bun.String.static("Unsupported extension feature"), - error.UNNESTED_RESOURCE => bun.String.static("RFC 3779 resource not subset of parent's resources"), - error.PERMITTED_VIOLATION => bun.String.static("permitted subtree violation"), - error.EXCLUDED_VIOLATION => bun.String.static("excluded subtree violation"), - error.SUBTREE_MINMAX => bun.String.static("name constraints minimum and maximum not supported"), - error.UNSUPPORTED_CONSTRAINT_TYPE => bun.String.static("unsupported name constraint type"), - error.UNSUPPORTED_CONSTRAINT_SYNTAX => bun.String.static("unsupported or invalid name constraint syntax"), - error.UNSUPPORTED_NAME_SYNTAX => bun.String.static("unsupported or invalid name syntax"), - error.CRL_PATH_VALIDATION_ERROR => bun.String.static("CRL path validation error"), - error.SUITE_B_INVALID_VERSION => bun.String.static("Suite B: certificate version invalid"), - error.SUITE_B_INVALID_ALGORITHM => bun.String.static("Suite B: invalid public key algorithm"), - error.SUITE_B_INVALID_CURVE => bun.String.static("Suite B: invalid ECC curve"), - error.SUITE_B_INVALID_SIGNATURE_ALGORITHM => bun.String.static("Suite B: invalid signature algorithm"), - error.SUITE_B_LOS_NOT_ALLOWED => bun.String.static("Suite B: curve not allowed for this LOS"), - error.SUITE_B_CANNOT_SIGN_P_384_WITH_P_256 => bun.String.static("Suite B: cannot sign P-384 with P-256"), - error.HOSTNAME_MISMATCH => bun.String.static("Hostname mismatch"), - error.EMAIL_MISMATCH => bun.String.static("Email address mismatch"), - error.IP_ADDRESS_MISMATCH => bun.String.static("IP address mismatch"), - error.INVALID_CALL => bun.String.static("Invalid certificate verification context"), - error.STORE_LOOKUP => bun.String.static("Issuer certificate lookup error"), - error.NAME_CONSTRAINTS_WITHOUT_SANS => bun.String.static("Issuer has name constraints but leaf has no SANs"), - error.UNKNOWN_CERTIFICATE_VERIFICATION_ERROR => bun.String.static("unknown certificate verification error"), - - else => |e| bun.String.createFormat("{s} fetching \"{}\". For more information, pass `verbose: true` in the second argument to fetch()", .{ - @errorName(e), - path, - }) catch |err| bun.handleOom(err), - }, - .path = path, - }; - - return .{ .SystemError = fetch_error }; - } - - pub fn onReadableStreamAvailable(ctx: *anyopaque, globalThis: *jsc.JSGlobalObject, readable: jsc.WebCore.ReadableStream) void { - const this = bun.cast(*FetchTasklet, ctx); - this.readable_stream_ref = jsc.WebCore.ReadableStream.Strong.init(readable, globalThis); - } - - pub fn onStartStreamingHTTPResponseBodyCallback(ctx: *anyopaque) jsc.WebCore.DrainResult { - const this = bun.cast(*FetchTasklet, ctx); - if (this.signal_store.aborted.load(.monotonic)) { - return jsc.WebCore.DrainResult{ - .aborted = {}, - }; - } - - if (this.http) |http_| { - http_.enableResponseBodyStreaming(); - - // If the server sent the headers and the response body in two separate socket writes - // and if the server doesn't close the connection by itself - // and doesn't send any follow-up data - // then we must make sure the HTTP thread flushes. - bun.http.http_thread.scheduleResponseBodyDrain(http_.async_http_id); - } - - this.mutex.lock(); - defer this.mutex.unlock(); - const size_hint = this.getSizeHint(); - - var scheduled_response_buffer = this.scheduled_response_buffer.list; - // This means we have received part of the body but not the whole thing - if (scheduled_response_buffer.items.len > 0) { - this.memory_reporter.discard(scheduled_response_buffer.allocatedSlice()); - this.scheduled_response_buffer = .{ - .allocator = this.memory_reporter.allocator(), - .list = .{ - .items = &.{}, - .capacity = 0, - }, - }; - - return .{ - .owned = .{ - .list = scheduled_response_buffer.toManaged(bun.default_allocator), - .size_hint = size_hint, - }, - }; - } - - return .{ - .estimated_size = size_hint, - }; - } - - fn getSizeHint(this: *FetchTasklet) Blob.SizeType { - return switch (this.body_size) { - .content_length => @truncate(this.body_size.content_length), - .total_received => @truncate(this.body_size.total_received), - .unknown => 0, - }; - } - - fn toBodyValue(this: *FetchTasklet) Body.Value { - if (this.getAbortError()) |err| { - return .{ .Error = err }; - } - if (this.is_waiting_body) { - const response = Body.Value{ - .Locked = .{ - .size_hint = this.getSizeHint(), - .task = this, - .global = this.global_this, - .onStartStreaming = FetchTasklet.onStartStreamingHTTPResponseBodyCallback, - .onReadableStreamAvailable = FetchTasklet.onReadableStreamAvailable, - }, - }; - return response; - } - - var scheduled_response_buffer = this.scheduled_response_buffer.list; - this.memory_reporter.discard(scheduled_response_buffer.allocatedSlice()); - const response = Body.Value{ - .InternalBlob = .{ - .bytes = scheduled_response_buffer.toManaged(bun.default_allocator), - }, - }; - this.scheduled_response_buffer = .{ - .allocator = this.memory_reporter.allocator(), - .list = .{ - .items = &.{}, - .capacity = 0, - }, - }; - - return response; - } - - fn toResponse(this: *FetchTasklet) Response { - log("toResponse", .{}); - bun.assert(this.metadata != null); - // at this point we always should have metadata - const metadata = this.metadata.?; - const http_response = metadata.response; - this.is_waiting_body = this.result.has_more; - return Response.init( - .{ - .headers = FetchHeaders.createFromPicoHeaders(http_response.headers), - .status_code = @as(u16, @truncate(http_response.status_code)), - .status_text = bun.String.createAtomIfPossible(http_response.status), - }, - Body{ - .value = this.toBodyValue(), - }, - bun.String.createAtomIfPossible(metadata.url), - this.result.redirected, - ); - } - - fn ignoreRemainingResponseBody(this: *FetchTasklet) void { - log("ignoreRemainingResponseBody", .{}); - // enabling streaming will make the http thread to drain into the main thread (aka stop buffering) - // without a stream ref, response body or response instance alive it will just ignore the result - if (this.http) |http_| { - http_.enableResponseBodyStreaming(); - } - // we should not keep the process alive if we are ignoring the body - const vm = this.javascript_vm; - this.poll_ref.unref(vm); - // clean any remaining refereces - this.readable_stream_ref.deinit(); - this.response.deinit(); - - if (this.native_response) |response| { - response.unref(); - this.native_response = null; - } - - this.ignore_data = true; - } - - export fn Bun__FetchResponse_finalize(this: *FetchTasklet) callconv(.C) void { - log("onResponseFinalize", .{}); - if (this.native_response) |response| { - const body = response.getBodyValue(); - // Three scenarios: - // - // 1. We are streaming, in which case we should not ignore the body. - // 2. We were buffering, in which case - // 2a. if we have no promise, we should ignore the body. - // 2b. if we have a promise, we should keep loading the body. - // 3. We never started buffering, in which case we should ignore the body. - // - // Note: We cannot call .get() on the ReadableStreamRef. This is called inside a finalizer. - if (body.* != .Locked or this.readable_stream_ref.held.has()) { - // Scenario 1 or 3. - return; - } - - if (body.Locked.promise) |promise| { - if (promise.isEmptyOrUndefinedOrNull()) { - // Scenario 2b. - this.ignoreRemainingResponseBody(); - } - } else { - // Scenario 3. - this.ignoreRemainingResponseBody(); - } - } - } - comptime { - _ = Bun__FetchResponse_finalize; - } - - pub fn onResolve(this: *FetchTasklet) JSValue { - log("onResolve", .{}); - const response = bun.new(Response, this.toResponse()); - const response_js = Response.makeMaybePooled(@as(*jsc.JSGlobalObject, this.global_this), response); - response_js.ensureStillAlive(); - this.response = jsc.Weak(FetchTasklet).create(response_js, this.global_this, .FetchResponse, this); - this.native_response = response.ref(); - return response_js; - } - - pub fn get( - allocator: std.mem.Allocator, - globalThis: *jsc.JSGlobalObject, - fetch_options: *const FetchOptions, - promise: jsc.JSPromise.Strong, - ) !*FetchTasklet { - var jsc_vm = globalThis.bunVM(); - var fetch_tasklet = try allocator.create(FetchTasklet); - - fetch_tasklet.* = .{ - .mutex = .{}, - .scheduled_response_buffer = .{ - .allocator = fetch_options.memory_reporter.allocator(), - .list = .{ - .items = &.{}, - .capacity = 0, - }, - }, - .response_buffer = MutableString{ - .allocator = fetch_options.memory_reporter.allocator(), - .list = .{ - .items = &.{}, - .capacity = 0, - }, - }, - .http = try allocator.create(http.AsyncHTTP), - .javascript_vm = jsc_vm, - .request_body = fetch_options.body, - .global_this = globalThis, - .promise = promise, - .request_headers = fetch_options.headers, - .url_proxy_buffer = fetch_options.url_proxy_buffer, - .signal = fetch_options.signal, - .hostname = fetch_options.hostname, - .tracker = jsc.Debugger.AsyncTaskTracker.init(jsc_vm), - .memory_reporter = fetch_options.memory_reporter, - .check_server_identity = fetch_options.check_server_identity, - .reject_unauthorized = fetch_options.reject_unauthorized, - .upgraded_connection = fetch_options.upgraded_connection, - }; - - fetch_tasklet.signals = fetch_tasklet.signal_store.to(); - - fetch_tasklet.tracker.didSchedule(globalThis); - - if (fetch_tasklet.request_body.store()) |store| { - store.ref(); - } - - var proxy: ?ZigURL = null; - if (fetch_options.proxy) |proxy_opt| { - if (!proxy_opt.isEmpty()) { //if is empty just ignore proxy - proxy = fetch_options.proxy orelse jsc_vm.transpiler.env.getHttpProxyFor(fetch_options.url); - } - } else { - proxy = jsc_vm.transpiler.env.getHttpProxyFor(fetch_options.url); - } - - if (fetch_tasklet.check_server_identity.has() and fetch_tasklet.reject_unauthorized) { - fetch_tasklet.signal_store.cert_errors.store(true, .monotonic); - } else { - fetch_tasklet.signals.cert_errors = null; - } - - // This task gets queued on the HTTP thread. - fetch_tasklet.http.?.* = http.AsyncHTTP.init( - fetch_options.memory_reporter.allocator(), - fetch_options.method, - fetch_options.url, - fetch_options.headers.entries, - fetch_options.headers.buf.items, - &fetch_tasklet.response_buffer, - fetch_tasklet.request_body.slice(), - http.HTTPClientResult.Callback.New( - *FetchTasklet, - // handles response events (on headers, on body, etc.) - FetchTasklet.callback, - ).init(fetch_tasklet), - fetch_options.redirect_type, - .{ - .http_proxy = proxy, - .hostname = fetch_options.hostname, - .signals = fetch_tasklet.signals, - .unix_socket_path = fetch_options.unix_socket_path, - .disable_timeout = fetch_options.disable_timeout, - .disable_keepalive = fetch_options.disable_keepalive, - .disable_decompression = fetch_options.disable_decompression, - .reject_unauthorized = fetch_options.reject_unauthorized, - .verbose = fetch_options.verbose, - .tls_props = fetch_options.ssl_config, - }, - ); - // enable streaming the write side - const isStream = fetch_tasklet.request_body == .ReadableStream; - fetch_tasklet.http.?.client.flags.is_streaming_request_body = isStream; - fetch_tasklet.is_waiting_request_stream_start = isStream; - if (isStream) { - const buffer = http.ThreadSafeStreamBuffer.new(.{}); - buffer.setDrainCallback(FetchTasklet, FetchTasklet.onWriteRequestDataDrain, fetch_tasklet); - fetch_tasklet.request_body_streaming_buffer = buffer; - fetch_tasklet.http.?.request_body = .{ - .stream = .{ - .buffer = buffer, - .ended = false, - }, - }; - } - // TODO is this necessary? the http client already sets the redirect type, - // so manually setting it here seems redundant - if (fetch_options.redirect_type != FetchRedirect.follow) { - fetch_tasklet.http.?.client.remaining_redirect_count = 0; - } - - // we want to return after headers are received - fetch_tasklet.signal_store.header_progress.store(true, .monotonic); - - if (fetch_tasklet.request_body == .Sendfile) { - bun.assert(fetch_options.url.isHTTP()); - bun.assert(fetch_options.proxy == null); - fetch_tasklet.http.?.request_body = .{ .sendfile = fetch_tasklet.request_body.Sendfile }; - } - - if (fetch_tasklet.signal) |signal| { - signal.pendingActivityRef(); - fetch_tasklet.signal = signal.listen(FetchTasklet, fetch_tasklet, FetchTasklet.abortListener); - } - return fetch_tasklet; - } - - pub fn abortListener(this: *FetchTasklet, reason: JSValue) void { - log("abortListener", .{}); - reason.ensureStillAlive(); - this.abort_reason.set(this.global_this, reason); - this.abortTask(); - if (this.sink) |sink| { - sink.cancel(reason); - return; - } - } - - /// This is ALWAYS called from the http thread and we cannot touch the buffer here because is locked - pub fn onWriteRequestDataDrain(this: *FetchTasklet) void { - // ref until the main thread callback is called - this.ref(); - this.javascript_vm.eventLoop().enqueueTaskConcurrent(jsc.ConcurrentTask.fromCallback(this, FetchTasklet.resumeRequestDataStream)); - } - - /// This is ALWAYS called from the main thread - // XXX: 'fn (*FetchTasklet) error{}!void' coerces to 'fn (*FetchTasklet) bun.JSError!void' but 'fn (*FetchTasklet) void' does not - pub fn resumeRequestDataStream(this: *FetchTasklet) error{}!void { - // deref when done because we ref inside onWriteRequestDataDrain - defer this.deref(); - log("resumeRequestDataStream", .{}); - if (this.sink) |sink| { - if (this.signal) |signal| { - if (signal.aborted()) { - // already aborted; nothing to drain - return; - } - } - sink.drain(); - } - } - - pub fn writeRequestData(this: *FetchTasklet, data: []const u8) ResumableSinkBackpressure { - log("writeRequestData {}", .{data.len}); - if (this.signal) |signal| { - if (signal.aborted()) { - return .done; - } - } - const thread_safe_stream_buffer = this.request_body_streaming_buffer orelse return .done; - const stream_buffer = thread_safe_stream_buffer.acquire(); - defer thread_safe_stream_buffer.release(); - const highWaterMark = if (this.sink) |sink| sink.highWaterMark else 16384; - - var needs_schedule = false; - defer if (needs_schedule) { - // wakeup the http thread to write the data - http.http_thread.scheduleRequestWrite(this.http.?, .data); - }; - - // dont have backpressure so we will schedule the data to be written - // if we have backpressure the onWritable will drain the buffer - needs_schedule = stream_buffer.isEmpty(); - if (this.upgraded_connection) { - bun.handleOom(stream_buffer.write(data)); - } else { - //16 is the max size of a hex number size that represents 64 bits + 2 for the \r\n - var formated_size_buffer: [18]u8 = undefined; - const formated_size = std.fmt.bufPrint( - formated_size_buffer[0..], - "{x}\r\n", - .{data.len}, - ) catch |err| switch (err) { - error.NoSpaceLeft => unreachable, - }; - bun.handleOom(stream_buffer.ensureUnusedCapacity(formated_size.len + data.len + 2)); - stream_buffer.writeAssumeCapacity(formated_size); - stream_buffer.writeAssumeCapacity(data); - stream_buffer.writeAssumeCapacity("\r\n"); - } - - // pause the stream if we hit the high water mark - return if (stream_buffer.size() >= highWaterMark) .backpressure else .want_more; - } - - pub fn writeEndRequest(this: *FetchTasklet, err: ?jsc.JSValue) void { - log("writeEndRequest hasError? {}", .{err != null}); - defer this.deref(); - if (err) |jsError| { - if (this.signal_store.aborted.load(.monotonic) or this.abort_reason.has()) { - return; - } - if (!jsError.isUndefinedOrNull()) { - this.abort_reason.set(this.global_this, jsError); - } - this.abortTask(); - } else { - if (!this.upgraded_connection) { - // If is not upgraded we need to send the terminating chunk - const thread_safe_stream_buffer = this.request_body_streaming_buffer orelse return; - const stream_buffer = thread_safe_stream_buffer.acquire(); - defer thread_safe_stream_buffer.release(); - bun.handleOom(stream_buffer.write(http.end_of_chunked_http1_1_encoding_response_body)); - } - if (this.http) |http_| { - // just tell to write the end of the chunked encoding aka 0\r\n\r\n - http.http_thread.scheduleRequestWrite(http_, .end); - } - } - } - - pub fn abortTask(this: *FetchTasklet) void { - this.signal_store.aborted.store(true, .monotonic); - this.tracker.didCancel(this.global_this); - - if (this.http) |http_| { - http.http_thread.scheduleShutdown(http_); - } - } - - const FetchOptions = struct { - method: Method, - headers: Headers, - body: HTTPRequestBody, - disable_timeout: bool, - disable_keepalive: bool, - disable_decompression: bool, - reject_unauthorized: bool, - url: ZigURL, - verbose: http.HTTPVerboseLevel = .none, - redirect_type: FetchRedirect = FetchRedirect.follow, - proxy: ?ZigURL = null, - url_proxy_buffer: []const u8 = "", - signal: ?*jsc.WebCore.AbortSignal = null, - globalThis: ?*JSGlobalObject, - // Custom Hostname - hostname: ?[]u8 = null, - memory_reporter: *bun.MemoryReportingAllocator, - check_server_identity: jsc.Strong.Optional = .empty, - unix_socket_path: ZigString.Slice, - ssl_config: ?*SSLConfig = null, - upgraded_connection: bool = false, - }; - - pub fn queue( - allocator: std.mem.Allocator, - global: *JSGlobalObject, - fetch_options: *const FetchOptions, - promise: jsc.JSPromise.Strong, - ) !*FetchTasklet { - http.HTTPThread.init(&.{}); - var node = try get( - allocator, - global, - fetch_options, - promise, - ); - - var batch = bun.ThreadPool.Batch{}; - node.http.?.schedule(allocator, &batch); - node.poll_ref.ref(global.bunVM()); - - // increment ref so we can keep it alive until the http client is done - node.ref(); - http.http_thread.schedule(batch); - - return node; - } - - /// Called from HTTP thread. Handles HTTP events received from socket. - pub fn callback(task: *FetchTasklet, async_http: *http.AsyncHTTP, result: http.HTTPClientResult) void { - // at this point only this thread is accessing result to is no race condition - const is_done = !result.has_more; - // we are done with the http client so we can deref our side - // this is a atomic operation and will enqueue a task to deinit on the main thread - defer if (is_done) task.derefFromThread(); - - task.mutex.lock(); - // we need to unlock before task.deref(); - defer task.mutex.unlock(); - task.http.?.* = async_http.*; - task.http.?.response_buffer = async_http.response_buffer; - - log("callback success={} ignore_data={} has_more={} bytes={}", .{ result.isSuccess(), task.ignore_data, result.has_more, result.body.?.list.items.len }); - - const prev_metadata = task.result.metadata; - const prev_cert_info = task.result.certificate_info; - task.result = result; - - // Preserve pending certificate info if it was preovided in the previous update. - if (task.result.certificate_info == null) { - if (prev_cert_info) |cert_info| { - task.result.certificate_info = cert_info; - } - } - - // metadata should be provided only once - if (result.metadata orelse prev_metadata) |metadata| { - log("added callback metadata", .{}); - if (task.metadata == null) { - task.metadata = metadata; - } - - task.result.metadata = null; - } - - task.body_size = result.body_size; - - const success = result.isSuccess(); - task.response_buffer = result.body.?.*; - - if (task.ignore_data) { - task.response_buffer.reset(); - - if (task.scheduled_response_buffer.list.capacity > 0) { - task.scheduled_response_buffer.deinit(); - task.scheduled_response_buffer = .{ - .allocator = task.memory_reporter.allocator(), - .list = .{ - .items = &.{}, - .capacity = 0, - }, - }; - } - if (success and result.has_more) { - // we are ignoring the body so we should not receive more data, so will only signal when result.has_more = true - return; - } - } else { - if (success) { - _ = bun.handleOom(task.scheduled_response_buffer.write(task.response_buffer.list.items)); - } - // reset for reuse - task.response_buffer.reset(); - } - - if (task.has_schedule_callback.cmpxchgStrong(false, true, .acquire, .monotonic)) |has_schedule_callback| { - if (has_schedule_callback) { - return; - } - } - - task.javascript_vm.eventLoop().enqueueTaskConcurrent(task.concurrent_task.from(task, .manual_deinit)); - } -}; +pub const FetchTasklet = @import("./fetch/FetchTasklet.zig").FetchTasklet; fn dataURLResponse( _data_url: DataURL, @@ -1516,17 +172,10 @@ pub fn Bun__fetch_( bun.analytics.Features.fetch += 1; const vm = jsc.VirtualMachine.get(); - var memory_reporter = bun.handleOom(bun.default_allocator.create(bun.MemoryReportingAllocator)); // used to clean up dynamically allocated memory on error (a poor man's errdefer) var is_error = false; var upgraded_connection = false; - var allocator = memory_reporter.wrap(bun.default_allocator); - errdefer bun.default_allocator.destroy(memory_reporter); - defer { - memory_reporter.report(globalThis.vm()); - - if (is_error) bun.default_allocator.destroy(memory_reporter); - } + var allocator = bun.default_allocator; if (arguments.len == 0) { const err = ctx.toTypeError(.MISSING_ARGS, fetch_error_no_args, .{}); @@ -2696,7 +1345,6 @@ pub fn Bun__fetch_( .globalThis = globalThis, .ssl_config = ssl_config, .hostname = hostname, - .memory_reporter = memory_reporter, .upgraded_connection = upgraded_connection, .check_server_identity = if (check_server_identity.isEmptyOrUndefinedOrNull()) .empty else .create(check_server_identity, globalThis), .unix_socket_path = unix_socket_path, @@ -2745,21 +1393,16 @@ fn setHeaders(headers: *?Headers, new_headers: []const picohttp.Header, allocato const string = []const u8; -const X509 = @import("../api/bun/x509.zig"); const std = @import("std"); const DataURL = @import("../../resolver/data_url.zig").DataURL; const Method = @import("../../http/Method.zig").Method; const ZigURL = @import("../../url.zig").URL; const bun = @import("bun"); -const Async = bun.Async; const Environment = bun.Environment; -const MutableString = bun.MutableString; -const Mutex = bun.Mutex; const Output = bun.Output; const picohttp = bun.picohttp; const s3 = bun.S3; -const BoringSSL = bun.BoringSSL.c; const FetchHeaders = bun.webcore.FetchHeaders; const PosixToWinNormalizer = bun.path.PosixToWinNormalizer; const SSLConfig = bun.api.server.ServerConfig.SSLConfig; @@ -2779,7 +1422,6 @@ const JSType = jsc.C.JSType; const Body = jsc.WebCore.Body; const Request = jsc.WebCore.Request; const Response = jsc.WebCore.Response; -const ResumableSinkBackpressure = jsc.WebCore.ResumableSinkBackpressure; const Blob = jsc.WebCore.Blob; const AnyBlob = jsc.WebCore.Blob.Any; diff --git a/src/bun.js/webcore/fetch/FetchTasklet.zig b/src/bun.js/webcore/fetch/FetchTasklet.zig new file mode 100644 index 0000000000..b4f79058a4 --- /dev/null +++ b/src/bun.js/webcore/fetch/FetchTasklet.zig @@ -0,0 +1,1368 @@ +pub const FetchTasklet = struct { + pub const ResumableSink = jsc.WebCore.ResumableFetchSink; + + const log = Output.scoped(.FetchTasklet, .visible); + sink: ?*ResumableSink = null, + http: ?*http.AsyncHTTP = null, + result: http.HTTPClientResult = .{}, + metadata: ?http.HTTPResponseMetadata = null, + javascript_vm: *VirtualMachine = undefined, + global_this: *JSGlobalObject = undefined, + request_body: HTTPRequestBody = undefined, + request_body_streaming_buffer: ?*http.ThreadSafeStreamBuffer = null, + + /// buffer being used by AsyncHTTP + response_buffer: MutableString = undefined, + /// buffer used to stream response to JS + scheduled_response_buffer: MutableString = undefined, + /// response weak ref we need this to track the response JS lifetime + response: jsc.Weak(FetchTasklet) = .{}, + /// native response ref if we still need it when JS is discarted + native_response: ?*Response = null, + ignore_data: bool = false, + /// stream strong ref if any is available + readable_stream_ref: jsc.WebCore.ReadableStream.Strong = .{}, + request_headers: Headers = Headers{ .allocator = undefined }, + promise: jsc.JSPromise.Strong, + concurrent_task: jsc.ConcurrentTask = .{}, + poll_ref: Async.KeepAlive = .{}, + /// For Http Client requests + /// when Content-Length is provided this represents the whole size of the request + /// If chunked encoded this will represent the total received size (ignoring the chunk headers) + /// If is not chunked encoded and Content-Length is not provided this will be unknown + body_size: http.HTTPClientResult.BodySize = .unknown, + + /// This is url + proxy memory buffer and is owned by FetchTasklet + /// We always clone url and proxy (if informed) + url_proxy_buffer: []const u8 = "", + + signal: ?*jsc.WebCore.AbortSignal = null, + signals: http.Signals = .{}, + signal_store: http.Signals.Store = .{}, + has_schedule_callback: std.atomic.Value(bool) = std.atomic.Value(bool).init(false), + + // must be stored because AbortSignal stores reason weakly + abort_reason: jsc.Strong.Optional = .empty, + + // custom checkServerIdentity + check_server_identity: jsc.Strong.Optional = .empty, + reject_unauthorized: bool = true, + upgraded_connection: bool = false, + // Custom Hostname + hostname: ?[]u8 = null, + is_waiting_body: bool = false, + is_waiting_abort: bool = false, + is_waiting_request_stream_start: bool = false, + mutex: Mutex, + + tracker: jsc.Debugger.AsyncTaskTracker, + + ref_count: std.atomic.Value(u32) = std.atomic.Value(u32).init(1), + + pub fn ref(this: *FetchTasklet) void { + const count = this.ref_count.fetchAdd(1, .monotonic); + bun.debugAssert(count > 0); + } + + pub fn deref(this: *FetchTasklet) void { + const count = this.ref_count.fetchSub(1, .monotonic); + bun.debugAssert(count > 0); + + if (count == 1) { + this.deinit() catch |err| switch (err) {}; + } + } + + pub fn derefFromThread(this: *FetchTasklet) void { + const count = this.ref_count.fetchSub(1, .monotonic); + bun.debugAssert(count > 0); + + if (count == 1) { + // this is really unlikely to happen, but can happen + // lets make sure that we always call deinit from main thread + + this.javascript_vm.eventLoop().enqueueTaskConcurrent(jsc.ConcurrentTask.fromCallback(this, FetchTasklet.deinit)); + } + } + + pub const HTTPRequestBody = union(enum) { + AnyBlob: AnyBlob, + Sendfile: http.SendFile, + ReadableStream: jsc.WebCore.ReadableStream.Strong, + + pub const Empty: HTTPRequestBody = .{ .AnyBlob = .{ .Blob = .{} } }; + + pub fn store(this: *HTTPRequestBody) ?*Blob.Store { + return switch (this.*) { + .AnyBlob => this.AnyBlob.store(), + else => null, + }; + } + + pub fn slice(this: *const HTTPRequestBody) []const u8 { + return switch (this.*) { + .AnyBlob => this.AnyBlob.slice(), + else => "", + }; + } + + pub fn detach(this: *HTTPRequestBody) void { + switch (this.*) { + .AnyBlob => this.AnyBlob.detach(), + .ReadableStream => |*stream| { + stream.deinit(); + }, + .Sendfile => { + if (@max(this.Sendfile.offset, this.Sendfile.remain) > 0) + this.Sendfile.fd.close(); + this.Sendfile.offset = 0; + this.Sendfile.remain = 0; + }, + } + } + + pub fn fromJS(globalThis: *JSGlobalObject, value: JSValue) bun.JSError!HTTPRequestBody { + var body_value = try Body.Value.fromJS(globalThis, value); + if (body_value == .Used or (body_value == .Locked and (body_value.Locked.action != .none or body_value.Locked.isDisturbed2(globalThis)))) { + return globalThis.ERR(.BODY_ALREADY_USED, "body already used", .{}).throw(); + } + if (body_value == .Locked) { + if (body_value.Locked.readable.has()) { + // just grab the ref + return FetchTasklet.HTTPRequestBody{ .ReadableStream = body_value.Locked.readable }; + } + const readable = try body_value.toReadableStream(globalThis); + if (!readable.isEmptyOrUndefinedOrNull() and body_value == .Locked and body_value.Locked.readable.has()) { + return FetchTasklet.HTTPRequestBody{ .ReadableStream = body_value.Locked.readable }; + } + } + return FetchTasklet.HTTPRequestBody{ .AnyBlob = body_value.useAsAnyBlob() }; + } + + pub fn needsToReadFile(this: *HTTPRequestBody) bool { + return switch (this.*) { + .AnyBlob => |blob| blob.needsToReadFile(), + else => false, + }; + } + + pub fn isS3(this: *const HTTPRequestBody) bool { + return switch (this.*) { + .AnyBlob => |*blob| blob.isS3(), + else => false, + }; + } + + pub fn hasContentTypeFromUser(this: *HTTPRequestBody) bool { + return switch (this.*) { + .AnyBlob => |blob| blob.hasContentTypeFromUser(), + else => false, + }; + } + + pub fn getAnyBlob(this: *HTTPRequestBody) ?*AnyBlob { + return switch (this.*) { + .AnyBlob => &this.AnyBlob, + else => null, + }; + } + + pub fn hasBody(this: *HTTPRequestBody) bool { + return switch (this.*) { + .AnyBlob => |blob| blob.size() > 0, + .ReadableStream => |*stream| stream.has(), + .Sendfile => true, + }; + } + }; + + pub fn init(_: std.mem.Allocator) anyerror!FetchTasklet { + return FetchTasklet{}; + } + + fn clearSink(this: *FetchTasklet) void { + if (this.sink) |sink| { + this.sink = null; + sink.deref(); + } + if (this.request_body_streaming_buffer) |buffer| { + this.request_body_streaming_buffer = null; + buffer.clearDrainCallback(); + buffer.deref(); + } + } + + fn clearData(this: *FetchTasklet) void { + log("clearData ", .{}); + const allocator = bun.default_allocator; + if (this.url_proxy_buffer.len > 0) { + allocator.free(this.url_proxy_buffer); + this.url_proxy_buffer.len = 0; + } + + if (this.hostname) |hostname| { + allocator.free(hostname); + this.hostname = null; + } + + if (this.result.certificate_info) |*certificate| { + certificate.deinit(bun.default_allocator); + this.result.certificate_info = null; + } + + this.request_headers.entries.deinit(allocator); + this.request_headers.buf.deinit(allocator); + this.request_headers = Headers{ .allocator = undefined }; + + if (this.http) |http_| { + http_.clearData(); + } + + if (this.metadata != null) { + this.metadata.?.deinit(allocator); + this.metadata = null; + } + + this.response_buffer.deinit(); + this.response.deinit(); + if (this.native_response) |response| { + this.native_response = null; + + response.unref(); + } + + this.readable_stream_ref.deinit(); + + this.scheduled_response_buffer.deinit(); + if (this.request_body != .ReadableStream or this.is_waiting_request_stream_start) { + this.request_body.detach(); + } + + this.abort_reason.deinit(); + this.check_server_identity.deinit(); + this.clearAbortSignal(); + // Clear the sink only after the requested ended otherwise we would potentialy lose the last chunk + this.clearSink(); + } + + // XXX: 'fn (*FetchTasklet) error{}!void' coerces to 'fn (*FetchTasklet) bun.JSError!void' but 'fn (*FetchTasklet) void' does not + pub fn deinit(this: *FetchTasklet) error{}!void { + log("deinit", .{}); + + bun.assert(this.ref_count.load(.monotonic) == 0); + + this.clearData(); + + const allocator = bun.default_allocator; + + if (this.http) |http_| { + this.http = null; + allocator.destroy(http_); + } + allocator.destroy(this); + } + + fn getCurrentResponse(this: *FetchTasklet) ?*Response { + // we need a body to resolve the promise when buffering + if (this.native_response) |response| { + return response; + } + + // if we did not have a direct reference we check if the Weak ref is still alive + if (this.response.get()) |response_js| { + if (response_js.as(Response)) |response| { + return response; + } + } + + return null; + } + + pub fn startRequestStream(this: *FetchTasklet) void { + this.is_waiting_request_stream_start = false; + bun.assert(this.request_body == .ReadableStream); + if (this.request_body.ReadableStream.get(this.global_this)) |stream| { + if (this.signal) |signal| { + if (signal.aborted()) { + stream.abort(this.global_this); + return; + } + } + + const globalThis = this.global_this; + this.ref(); // lets only unref when sink is done + // +1 because the task refs the sink + const sink = ResumableSink.initExactRefs(globalThis, stream, this, 2); + this.sink = sink; + } + } + + pub fn onBodyReceived(this: *FetchTasklet) bun.JSTerminated!void { + const success = this.result.isSuccess(); + const globalThis = this.global_this; + // reset the buffer if we are streaming or if we are not waiting for bufferig anymore + var buffer_reset = true; + log("onBodyReceived success={} has_more={}", .{ success, this.result.has_more }); + defer { + if (buffer_reset) { + this.scheduled_response_buffer.reset(); + } + } + + if (!success) { + var err = this.onReject(); + var need_deinit = true; + defer if (need_deinit) err.deinit(); + var js_err = JSValue.zero; + // if we are streaming update with error + if (this.readable_stream_ref.get(globalThis)) |readable| { + if (readable.ptr == .Bytes) { + js_err = err.toJS(globalThis); + js_err.ensureStillAlive(); + try readable.ptr.Bytes.onData( + .{ + .err = .{ .JSValue = js_err }, + }, + bun.default_allocator, + ); + } + } + if (this.sink) |sink| { + if (js_err == .zero) { + js_err = err.toJS(globalThis); + js_err.ensureStillAlive(); + } + sink.cancel(js_err); + return; + } + // if we are buffering resolve the promise + if (this.getCurrentResponse()) |response| { + need_deinit = false; // body value now owns the error + const body = response.getBodyValue(); + try body.toErrorInstance(err, globalThis); + } + return; + } + + if (this.readable_stream_ref.get(globalThis)) |readable| { + log("onBodyReceived readable_stream_ref", .{}); + if (readable.ptr == .Bytes) { + readable.ptr.Bytes.size_hint = this.getSizeHint(); + // body can be marked as used but we still need to pipe the data + const scheduled_response_buffer = &this.scheduled_response_buffer.list; + + const chunk = scheduled_response_buffer.items; + + if (this.result.has_more) { + try readable.ptr.Bytes.onData( + .{ + .temporary = bun.ByteList.fromBorrowedSliceDangerous(chunk), + }, + bun.default_allocator, + ); + } else { + var prev = this.readable_stream_ref; + this.readable_stream_ref = .{}; + defer prev.deinit(); + buffer_reset = false; + + try readable.ptr.Bytes.onData( + .{ + .temporary_and_done = bun.ByteList.fromBorrowedSliceDangerous(chunk), + }, + bun.default_allocator, + ); + } + return; + } + } + + if (this.getCurrentResponse()) |response| { + log("onBodyReceived Current Response", .{}); + const sizeHint = this.getSizeHint(); + response.setSizeHint(sizeHint); + if (response.getBodyReadableStream(globalThis)) |readable| { + log("onBodyReceived CurrentResponse BodyReadableStream", .{}); + if (readable.ptr == .Bytes) { + const scheduled_response_buffer = this.scheduled_response_buffer.list; + + const chunk = scheduled_response_buffer.items; + + if (this.result.has_more) { + try readable.ptr.Bytes.onData( + .{ + .temporary = bun.ByteList.fromBorrowedSliceDangerous(chunk), + }, + bun.default_allocator, + ); + } else { + readable.value.ensureStillAlive(); + response.detachReadableStream(globalThis); + try readable.ptr.Bytes.onData( + .{ + .temporary_and_done = bun.ByteList.fromBorrowedSliceDangerous(chunk), + }, + bun.default_allocator, + ); + } + + return; + } + } + + // we will reach here when not streaming, this is also the only case we dont wanna to reset the buffer + buffer_reset = false; + if (!this.result.has_more) { + var scheduled_response_buffer = this.scheduled_response_buffer.list; + const body = response.getBodyValue(); + // done resolve body + var old = body.*; + const body_value = Body.Value{ + .InternalBlob = .{ + .bytes = scheduled_response_buffer.toManaged(bun.default_allocator), + }, + }; + body.* = body_value; + log("onBodyReceived body_value length={}", .{body_value.InternalBlob.bytes.items.len}); + + this.scheduled_response_buffer = .{ + .allocator = bun.default_allocator, + .list = .{ + .items = &.{}, + .capacity = 0, + }, + }; + + if (old == .Locked) { + log("onBodyReceived old.resolve", .{}); + try old.resolve(body, this.global_this, response.getFetchHeaders()); + } + } + } + } + + pub fn onProgressUpdate(this: *FetchTasklet) bun.JSTerminated!void { + jsc.markBinding(@src()); + log("onProgressUpdate", .{}); + this.mutex.lock(); + this.has_schedule_callback.store(false, .monotonic); + const is_done = !this.result.has_more; + + const vm = this.javascript_vm; + // vm is shutting down we cannot touch JS + if (vm.isShuttingDown()) { + this.mutex.unlock(); + if (is_done) { + this.deref(); + } + return; + } + + const globalThis = this.global_this; + defer { + this.mutex.unlock(); + // if we are not done we wait until the next call + if (is_done) { + var poll_ref = this.poll_ref; + this.poll_ref = .{}; + poll_ref.unref(vm); + this.deref(); + } + } + if (this.is_waiting_request_stream_start and this.result.can_stream) { + // start streaming + this.startRequestStream(); + } + // if we already respond the metadata and still need to process the body + if (this.is_waiting_body) { + try this.onBodyReceived(); + return; + } + if (this.metadata == null and this.result.isSuccess()) return; + + // if we abort because of cert error + // we wait the Http Client because we already have the response + // we just need to deinit + if (this.is_waiting_abort) { + return; + } + const promise_value = this.promise.valueOrEmpty(); + + if (promise_value.isEmptyOrUndefinedOrNull()) { + log("onProgressUpdate: promise_value is null", .{}); + this.promise.deinit(); + return; + } + + if (this.result.certificate_info) |certificate_info| { + this.result.certificate_info = null; + defer certificate_info.deinit(bun.default_allocator); + + // we receive some error + if (this.reject_unauthorized and !this.checkServerIdentity(certificate_info)) { + log("onProgressUpdate: aborted due certError", .{}); + // we need to abort the request + const promise = promise_value.asAnyPromise().?; + const tracker = this.tracker; + var result = this.onReject(); + defer result.deinit(); + + promise_value.ensureStillAlive(); + try promise.reject(globalThis, result.toJS(globalThis)); + + tracker.didDispatch(globalThis); + this.promise.deinit(); + return; + } + // everything ok + if (this.metadata == null) { + log("onProgressUpdate: metadata is null", .{}); + return; + } + } + + const tracker = this.tracker; + tracker.willDispatch(globalThis); + defer { + log("onProgressUpdate: promise_value is not null", .{}); + tracker.didDispatch(globalThis); + this.promise.deinit(); + } + const success = this.result.isSuccess(); + const result = switch (success) { + true => jsc.Strong.Optional.create(this.onResolve(), globalThis), + false => brk: { + // in this case we wanna a jsc.Strong.Optional so we just convert it + var value = this.onReject(); + const err = value.toJS(globalThis); + if (this.sink) |sink| { + sink.cancel(err); + } + break :brk value.JSValue; + }, + }; + + promise_value.ensureStillAlive(); + const Holder = struct { + held: jsc.Strong.Optional, + promise: jsc.Strong.Optional, + globalObject: *jsc.JSGlobalObject, + task: jsc.AnyTask, + + pub fn resolve(self: *@This()) bun.JSTerminated!void { + // cleanup + defer bun.default_allocator.destroy(self); + defer self.held.deinit(); + defer self.promise.deinit(); + // resolve the promise + var prom = self.promise.swap().asAnyPromise().?; + const res = self.held.swap(); + res.ensureStillAlive(); + try prom.resolve(self.globalObject, res); + } + + pub fn reject(self: *@This()) bun.JSTerminated!void { + // cleanup + defer bun.default_allocator.destroy(self); + defer self.held.deinit(); + defer self.promise.deinit(); + + // reject the promise + var prom = self.promise.swap().asAnyPromise().?; + const res = self.held.swap(); + res.ensureStillAlive(); + try prom.reject(self.globalObject, res); + } + }; + var holder = bun.handleOom(bun.default_allocator.create(Holder)); + holder.* = .{ + .held = result, + // we need the promise to be alive until the task is done + .promise = this.promise.strong, + .globalObject = globalThis, + .task = undefined, + }; + this.promise.strong = .empty; + holder.task = switch (success) { + true => jsc.AnyTask.New(Holder, Holder.resolve).init(holder), + false => jsc.AnyTask.New(Holder, Holder.reject).init(holder), + }; + + vm.enqueueTask(jsc.Task.init(&holder.task)); + } + + pub fn checkServerIdentity(this: *FetchTasklet, certificate_info: http.CertificateInfo) bool { + if (this.check_server_identity.get()) |check_server_identity| { + check_server_identity.ensureStillAlive(); + if (certificate_info.cert.len > 0) { + const cert = certificate_info.cert; + var cert_ptr = cert.ptr; + if (BoringSSL.d2i_X509(null, &cert_ptr, @intCast(cert.len))) |x509| { + const globalObject = this.global_this; + defer x509.free(); + const js_cert = X509.toJS(x509, globalObject) catch |err| { + switch (err) { + error.JSError => {}, + error.OutOfMemory => globalObject.throwOutOfMemory() catch {}, + error.JSTerminated => {}, + } + const check_result = globalObject.tryTakeException().?; + // mark to wait until deinit + this.is_waiting_abort = this.result.has_more; + this.abort_reason.set(globalObject, check_result); + this.signal_store.aborted.store(true, .monotonic); + this.tracker.didCancel(this.global_this); + // we need to abort the request + if (this.http) |http_| http.http_thread.scheduleShutdown(http_); + this.result.fail = error.ERR_TLS_CERT_ALTNAME_INVALID; + return false; + }; + var hostname: bun.String = bun.String.cloneUTF8(certificate_info.hostname); + defer hostname.deref(); + const js_hostname = hostname.toJS(globalObject); + js_hostname.ensureStillAlive(); + js_cert.ensureStillAlive(); + const check_result = check_server_identity.call(globalObject, .js_undefined, &.{ js_hostname, js_cert }) catch |err| globalObject.takeException(err); + + // > Returns object [...] on failure + if (check_result.isAnyError()) { + // mark to wait until deinit + this.is_waiting_abort = this.result.has_more; + this.abort_reason.set(globalObject, check_result); + this.signal_store.aborted.store(true, .monotonic); + this.tracker.didCancel(this.global_this); + + // we need to abort the request + if (this.http) |http_| { + http.http_thread.scheduleShutdown(http_); + } + this.result.fail = error.ERR_TLS_CERT_ALTNAME_INVALID; + return false; + } + + // > On success, returns + // We treat any non-error value as a success. + return true; + } + } + } + this.result.fail = error.ERR_TLS_CERT_ALTNAME_INVALID; + return false; + } + + fn getAbortError(this: *FetchTasklet) ?Body.Value.ValueError { + if (this.abort_reason.has()) { + defer this.clearAbortSignal(); + const out = this.abort_reason; + + this.abort_reason = .empty; + return Body.Value.ValueError{ .JSValue = out }; + } + + if (this.signal) |signal| { + if (signal.reasonIfAborted(this.global_this)) |reason| { + defer this.clearAbortSignal(); + return reason.toBodyValueError(this.global_this); + } + } + + return null; + } + + fn clearAbortSignal(this: *FetchTasklet) void { + const signal = this.signal orelse return; + this.signal = null; + defer { + signal.pendingActivityUnref(); + signal.unref(); + } + + signal.cleanNativeBindings(this); + } + + pub fn onReject(this: *FetchTasklet) Body.Value.ValueError { + bun.assert(this.result.fail != null); + log("onReject", .{}); + + if (this.getAbortError()) |err| { + return err; + } + + if (this.result.abortReason()) |reason| { + return .{ .AbortReason = reason }; + } + + // some times we don't have metadata so we also check http.url + const path = if (this.metadata) |metadata| + bun.String.cloneUTF8(metadata.url) + else if (this.http) |http_| + bun.String.cloneUTF8(http_.url.href) + else + bun.String.empty; + + const fetch_error = jsc.SystemError{ + .code = bun.String.static(switch (this.result.fail.?) { + error.ConnectionClosed => "ECONNRESET", + else => |e| @errorName(e), + }), + .message = switch (this.result.fail.?) { + error.ConnectionClosed => bun.String.static("The socket connection was closed unexpectedly. For more information, pass `verbose: true` in the second argument to fetch()"), + error.FailedToOpenSocket => bun.String.static("Was there a typo in the url or port?"), + error.TooManyRedirects => bun.String.static("The response redirected too many times. For more information, pass `verbose: true` in the second argument to fetch()"), + error.ConnectionRefused => bun.String.static("Unable to connect. Is the computer able to access the url?"), + error.RedirectURLInvalid => bun.String.static("Redirect URL in Location header is invalid."), + + error.UNABLE_TO_GET_ISSUER_CERT => bun.String.static("unable to get issuer certificate"), + error.UNABLE_TO_GET_CRL => bun.String.static("unable to get certificate CRL"), + error.UNABLE_TO_DECRYPT_CERT_SIGNATURE => bun.String.static("unable to decrypt certificate's signature"), + error.UNABLE_TO_DECRYPT_CRL_SIGNATURE => bun.String.static("unable to decrypt CRL's signature"), + error.UNABLE_TO_DECODE_ISSUER_PUBLIC_KEY => bun.String.static("unable to decode issuer public key"), + error.CERT_SIGNATURE_FAILURE => bun.String.static("certificate signature failure"), + error.CRL_SIGNATURE_FAILURE => bun.String.static("CRL signature failure"), + error.CERT_NOT_YET_VALID => bun.String.static("certificate is not yet valid"), + error.CRL_NOT_YET_VALID => bun.String.static("CRL is not yet valid"), + error.CERT_HAS_EXPIRED => bun.String.static("certificate has expired"), + error.CRL_HAS_EXPIRED => bun.String.static("CRL has expired"), + error.ERROR_IN_CERT_NOT_BEFORE_FIELD => bun.String.static("format error in certificate's notBefore field"), + error.ERROR_IN_CERT_NOT_AFTER_FIELD => bun.String.static("format error in certificate's notAfter field"), + error.ERROR_IN_CRL_LAST_UPDATE_FIELD => bun.String.static("format error in CRL's lastUpdate field"), + error.ERROR_IN_CRL_NEXT_UPDATE_FIELD => bun.String.static("format error in CRL's nextUpdate field"), + error.OUT_OF_MEM => bun.String.static("out of memory"), + error.DEPTH_ZERO_SELF_SIGNED_CERT => bun.String.static("self signed certificate"), + error.SELF_SIGNED_CERT_IN_CHAIN => bun.String.static("self signed certificate in certificate chain"), + error.UNABLE_TO_GET_ISSUER_CERT_LOCALLY => bun.String.static("unable to get local issuer certificate"), + error.UNABLE_TO_VERIFY_LEAF_SIGNATURE => bun.String.static("unable to verify the first certificate"), + error.CERT_CHAIN_TOO_LONG => bun.String.static("certificate chain too long"), + error.CERT_REVOKED => bun.String.static("certificate revoked"), + error.INVALID_CA => bun.String.static("invalid CA certificate"), + error.INVALID_NON_CA => bun.String.static("invalid non-CA certificate (has CA markings)"), + error.PATH_LENGTH_EXCEEDED => bun.String.static("path length constraint exceeded"), + error.PROXY_PATH_LENGTH_EXCEEDED => bun.String.static("proxy path length constraint exceeded"), + error.PROXY_CERTIFICATES_NOT_ALLOWED => bun.String.static("proxy certificates not allowed, please set the appropriate flag"), + error.INVALID_PURPOSE => bun.String.static("unsupported certificate purpose"), + error.CERT_UNTRUSTED => bun.String.static("certificate not trusted"), + error.CERT_REJECTED => bun.String.static("certificate rejected"), + error.APPLICATION_VERIFICATION => bun.String.static("application verification failure"), + error.SUBJECT_ISSUER_MISMATCH => bun.String.static("subject issuer mismatch"), + error.AKID_SKID_MISMATCH => bun.String.static("authority and subject key identifier mismatch"), + error.AKID_ISSUER_SERIAL_MISMATCH => bun.String.static("authority and issuer serial number mismatch"), + error.KEYUSAGE_NO_CERTSIGN => bun.String.static("key usage does not include certificate signing"), + error.UNABLE_TO_GET_CRL_ISSUER => bun.String.static("unable to get CRL issuer certificate"), + error.UNHANDLED_CRITICAL_EXTENSION => bun.String.static("unhandled critical extension"), + error.KEYUSAGE_NO_CRL_SIGN => bun.String.static("key usage does not include CRL signing"), + error.KEYUSAGE_NO_DIGITAL_SIGNATURE => bun.String.static("key usage does not include digital signature"), + error.UNHANDLED_CRITICAL_CRL_EXTENSION => bun.String.static("unhandled critical CRL extension"), + error.INVALID_EXTENSION => bun.String.static("invalid or inconsistent certificate extension"), + error.INVALID_POLICY_EXTENSION => bun.String.static("invalid or inconsistent certificate policy extension"), + error.NO_EXPLICIT_POLICY => bun.String.static("no explicit policy"), + error.DIFFERENT_CRL_SCOPE => bun.String.static("Different CRL scope"), + error.UNSUPPORTED_EXTENSION_FEATURE => bun.String.static("Unsupported extension feature"), + error.UNNESTED_RESOURCE => bun.String.static("RFC 3779 resource not subset of parent's resources"), + error.PERMITTED_VIOLATION => bun.String.static("permitted subtree violation"), + error.EXCLUDED_VIOLATION => bun.String.static("excluded subtree violation"), + error.SUBTREE_MINMAX => bun.String.static("name constraints minimum and maximum not supported"), + error.UNSUPPORTED_CONSTRAINT_TYPE => bun.String.static("unsupported name constraint type"), + error.UNSUPPORTED_CONSTRAINT_SYNTAX => bun.String.static("unsupported or invalid name constraint syntax"), + error.UNSUPPORTED_NAME_SYNTAX => bun.String.static("unsupported or invalid name syntax"), + error.CRL_PATH_VALIDATION_ERROR => bun.String.static("CRL path validation error"), + error.SUITE_B_INVALID_VERSION => bun.String.static("Suite B: certificate version invalid"), + error.SUITE_B_INVALID_ALGORITHM => bun.String.static("Suite B: invalid public key algorithm"), + error.SUITE_B_INVALID_CURVE => bun.String.static("Suite B: invalid ECC curve"), + error.SUITE_B_INVALID_SIGNATURE_ALGORITHM => bun.String.static("Suite B: invalid signature algorithm"), + error.SUITE_B_LOS_NOT_ALLOWED => bun.String.static("Suite B: curve not allowed for this LOS"), + error.SUITE_B_CANNOT_SIGN_P_384_WITH_P_256 => bun.String.static("Suite B: cannot sign P-384 with P-256"), + error.HOSTNAME_MISMATCH => bun.String.static("Hostname mismatch"), + error.EMAIL_MISMATCH => bun.String.static("Email address mismatch"), + error.IP_ADDRESS_MISMATCH => bun.String.static("IP address mismatch"), + error.INVALID_CALL => bun.String.static("Invalid certificate verification context"), + error.STORE_LOOKUP => bun.String.static("Issuer certificate lookup error"), + error.NAME_CONSTRAINTS_WITHOUT_SANS => bun.String.static("Issuer has name constraints but leaf has no SANs"), + error.UNKNOWN_CERTIFICATE_VERIFICATION_ERROR => bun.String.static("unknown certificate verification error"), + + else => |e| bun.String.createFormat("{s} fetching \"{}\". For more information, pass `verbose: true` in the second argument to fetch()", .{ + @errorName(e), + path, + }) catch |err| bun.handleOom(err), + }, + .path = path, + }; + + return .{ .SystemError = fetch_error }; + } + + pub fn onReadableStreamAvailable(ctx: *anyopaque, globalThis: *jsc.JSGlobalObject, readable: jsc.WebCore.ReadableStream) void { + const this = bun.cast(*FetchTasklet, ctx); + this.readable_stream_ref = jsc.WebCore.ReadableStream.Strong.init(readable, globalThis); + } + + pub fn onStartStreamingHTTPResponseBodyCallback(ctx: *anyopaque) jsc.WebCore.DrainResult { + const this = bun.cast(*FetchTasklet, ctx); + if (this.signal_store.aborted.load(.monotonic)) { + return jsc.WebCore.DrainResult{ + .aborted = {}, + }; + } + + if (this.http) |http_| { + http_.enableResponseBodyStreaming(); + + // If the server sent the headers and the response body in two separate socket writes + // and if the server doesn't close the connection by itself + // and doesn't send any follow-up data + // then we must make sure the HTTP thread flushes. + bun.http.http_thread.scheduleResponseBodyDrain(http_.async_http_id); + } + + this.mutex.lock(); + defer this.mutex.unlock(); + const size_hint = this.getSizeHint(); + + var scheduled_response_buffer = this.scheduled_response_buffer.list; + // This means we have received part of the body but not the whole thing + if (scheduled_response_buffer.items.len > 0) { + this.scheduled_response_buffer = .{ + .allocator = bun.default_allocator, + .list = .{ + .items = &.{}, + .capacity = 0, + }, + }; + + return .{ + .owned = .{ + .list = scheduled_response_buffer.toManaged(bun.default_allocator), + .size_hint = size_hint, + }, + }; + } + + return .{ + .estimated_size = size_hint, + }; + } + + fn getSizeHint(this: *FetchTasklet) Blob.SizeType { + return switch (this.body_size) { + .content_length => @truncate(this.body_size.content_length), + .total_received => @truncate(this.body_size.total_received), + .unknown => 0, + }; + } + + fn toBodyValue(this: *FetchTasklet) Body.Value { + if (this.getAbortError()) |err| { + return .{ .Error = err }; + } + if (this.is_waiting_body) { + const response = Body.Value{ + .Locked = .{ + .size_hint = this.getSizeHint(), + .task = this, + .global = this.global_this, + .onStartStreaming = FetchTasklet.onStartStreamingHTTPResponseBodyCallback, + .onReadableStreamAvailable = FetchTasklet.onReadableStreamAvailable, + }, + }; + return response; + } + + var scheduled_response_buffer = this.scheduled_response_buffer.list; + const response = Body.Value{ + .InternalBlob = .{ + .bytes = scheduled_response_buffer.toManaged(bun.default_allocator), + }, + }; + this.scheduled_response_buffer = .{ + .allocator = bun.default_allocator, + .list = .{ + .items = &.{}, + .capacity = 0, + }, + }; + + return response; + } + + fn toResponse(this: *FetchTasklet) Response { + log("toResponse", .{}); + bun.assert(this.metadata != null); + // at this point we always should have metadata + const metadata = this.metadata.?; + const http_response = metadata.response; + this.is_waiting_body = this.result.has_more; + return Response.init( + .{ + .headers = FetchHeaders.createFromPicoHeaders(http_response.headers), + .status_code = @as(u16, @truncate(http_response.status_code)), + .status_text = bun.String.createAtomIfPossible(http_response.status), + }, + Body{ + .value = this.toBodyValue(), + }, + bun.String.createAtomIfPossible(metadata.url), + this.result.redirected, + ); + } + + fn ignoreRemainingResponseBody(this: *FetchTasklet) void { + log("ignoreRemainingResponseBody", .{}); + // enabling streaming will make the http thread to drain into the main thread (aka stop buffering) + // without a stream ref, response body or response instance alive it will just ignore the result + if (this.http) |http_| { + http_.enableResponseBodyStreaming(); + } + // we should not keep the process alive if we are ignoring the body + const vm = this.javascript_vm; + this.poll_ref.unref(vm); + // clean any remaining refereces + this.readable_stream_ref.deinit(); + this.response.deinit(); + + if (this.native_response) |response| { + response.unref(); + this.native_response = null; + } + + this.ignore_data = true; + } + + export fn Bun__FetchResponse_finalize(this: *FetchTasklet) callconv(.C) void { + log("onResponseFinalize", .{}); + if (this.native_response) |response| { + const body = response.getBodyValue(); + // Three scenarios: + // + // 1. We are streaming, in which case we should not ignore the body. + // 2. We were buffering, in which case + // 2a. if we have no promise, we should ignore the body. + // 2b. if we have a promise, we should keep loading the body. + // 3. We never started buffering, in which case we should ignore the body. + // + // Note: We cannot call .get() on the ReadableStreamRef. This is called inside a finalizer. + if (body.* != .Locked or this.readable_stream_ref.held.has()) { + // Scenario 1 or 3. + return; + } + + if (body.Locked.promise) |promise| { + if (promise.isEmptyOrUndefinedOrNull()) { + // Scenario 2b. + this.ignoreRemainingResponseBody(); + } + } else { + // Scenario 3. + this.ignoreRemainingResponseBody(); + } + } + } + comptime { + _ = Bun__FetchResponse_finalize; + } + + pub fn onResolve(this: *FetchTasklet) JSValue { + log("onResolve", .{}); + const response = bun.new(Response, this.toResponse()); + const response_js = Response.makeMaybePooled(@as(*jsc.JSGlobalObject, this.global_this), response); + response_js.ensureStillAlive(); + this.response = jsc.Weak(FetchTasklet).create(response_js, this.global_this, .FetchResponse, this); + this.native_response = response.ref(); + return response_js; + } + + pub fn get( + allocator: std.mem.Allocator, + globalThis: *jsc.JSGlobalObject, + fetch_options: *const FetchOptions, + promise: jsc.JSPromise.Strong, + ) !*FetchTasklet { + var jsc_vm = globalThis.bunVM(); + var fetch_tasklet = try allocator.create(FetchTasklet); + + fetch_tasklet.* = .{ + .mutex = .{}, + .scheduled_response_buffer = .{ + .allocator = bun.default_allocator, + .list = .{ + .items = &.{}, + .capacity = 0, + }, + }, + .response_buffer = MutableString{ + .allocator = bun.default_allocator, + .list = .{ + .items = &.{}, + .capacity = 0, + }, + }, + .http = try allocator.create(http.AsyncHTTP), + .javascript_vm = jsc_vm, + .request_body = fetch_options.body, + .global_this = globalThis, + .promise = promise, + .request_headers = fetch_options.headers, + .url_proxy_buffer = fetch_options.url_proxy_buffer, + .signal = fetch_options.signal, + .hostname = fetch_options.hostname, + .tracker = jsc.Debugger.AsyncTaskTracker.init(jsc_vm), + .check_server_identity = fetch_options.check_server_identity, + .reject_unauthorized = fetch_options.reject_unauthorized, + .upgraded_connection = fetch_options.upgraded_connection, + }; + + fetch_tasklet.signals = fetch_tasklet.signal_store.to(); + + fetch_tasklet.tracker.didSchedule(globalThis); + + if (fetch_tasklet.request_body.store()) |store| { + store.ref(); + } + + var proxy: ?ZigURL = null; + if (fetch_options.proxy) |proxy_opt| { + if (!proxy_opt.isEmpty()) { //if is empty just ignore proxy + proxy = fetch_options.proxy orelse jsc_vm.transpiler.env.getHttpProxyFor(fetch_options.url); + } + } else { + proxy = jsc_vm.transpiler.env.getHttpProxyFor(fetch_options.url); + } + + if (fetch_tasklet.check_server_identity.has() and fetch_tasklet.reject_unauthorized) { + fetch_tasklet.signal_store.cert_errors.store(true, .monotonic); + } else { + fetch_tasklet.signals.cert_errors = null; + } + + // This task gets queued on the HTTP thread. + fetch_tasklet.http.?.* = http.AsyncHTTP.init( + bun.default_allocator, + fetch_options.method, + fetch_options.url, + fetch_options.headers.entries, + fetch_options.headers.buf.items, + &fetch_tasklet.response_buffer, + fetch_tasklet.request_body.slice(), + http.HTTPClientResult.Callback.New( + *FetchTasklet, + // handles response events (on headers, on body, etc.) + FetchTasklet.callback, + ).init(fetch_tasklet), + fetch_options.redirect_type, + .{ + .http_proxy = proxy, + .hostname = fetch_options.hostname, + .signals = fetch_tasklet.signals, + .unix_socket_path = fetch_options.unix_socket_path, + .disable_timeout = fetch_options.disable_timeout, + .disable_keepalive = fetch_options.disable_keepalive, + .disable_decompression = fetch_options.disable_decompression, + .reject_unauthorized = fetch_options.reject_unauthorized, + .verbose = fetch_options.verbose, + .tls_props = fetch_options.ssl_config, + }, + ); + // enable streaming the write side + const isStream = fetch_tasklet.request_body == .ReadableStream; + fetch_tasklet.http.?.client.flags.is_streaming_request_body = isStream; + fetch_tasklet.is_waiting_request_stream_start = isStream; + if (isStream) { + const buffer = http.ThreadSafeStreamBuffer.new(.{}); + buffer.setDrainCallback(FetchTasklet, FetchTasklet.onWriteRequestDataDrain, fetch_tasklet); + fetch_tasklet.request_body_streaming_buffer = buffer; + fetch_tasklet.http.?.request_body = .{ + .stream = .{ + .buffer = buffer, + .ended = false, + }, + }; + } + // TODO is this necessary? the http client already sets the redirect type, + // so manually setting it here seems redundant + if (fetch_options.redirect_type != FetchRedirect.follow) { + fetch_tasklet.http.?.client.remaining_redirect_count = 0; + } + + // we want to return after headers are received + fetch_tasklet.signal_store.header_progress.store(true, .monotonic); + + if (fetch_tasklet.request_body == .Sendfile) { + bun.assert(fetch_options.url.isHTTP()); + bun.assert(fetch_options.proxy == null); + fetch_tasklet.http.?.request_body = .{ .sendfile = fetch_tasklet.request_body.Sendfile }; + } + + if (fetch_tasklet.signal) |signal| { + signal.pendingActivityRef(); + fetch_tasklet.signal = signal.listen(FetchTasklet, fetch_tasklet, FetchTasklet.abortListener); + } + return fetch_tasklet; + } + + pub fn abortListener(this: *FetchTasklet, reason: JSValue) void { + log("abortListener", .{}); + reason.ensureStillAlive(); + this.abort_reason.set(this.global_this, reason); + this.abortTask(); + if (this.sink) |sink| { + sink.cancel(reason); + return; + } + } + + /// This is ALWAYS called from the http thread and we cannot touch the buffer here because is locked + pub fn onWriteRequestDataDrain(this: *FetchTasklet) void { + // ref until the main thread callback is called + this.ref(); + this.javascript_vm.eventLoop().enqueueTaskConcurrent(jsc.ConcurrentTask.fromCallback(this, FetchTasklet.resumeRequestDataStream)); + } + + /// This is ALWAYS called from the main thread + // XXX: 'fn (*FetchTasklet) error{}!void' coerces to 'fn (*FetchTasklet) bun.JSError!void' but 'fn (*FetchTasklet) void' does not + pub fn resumeRequestDataStream(this: *FetchTasklet) error{}!void { + // deref when done because we ref inside onWriteRequestDataDrain + defer this.deref(); + log("resumeRequestDataStream", .{}); + if (this.sink) |sink| { + if (this.signal) |signal| { + if (signal.aborted()) { + // already aborted; nothing to drain + return; + } + } + sink.drain(); + } + } + + pub fn writeRequestData(this: *FetchTasklet, data: []const u8) ResumableSinkBackpressure { + log("writeRequestData {}", .{data.len}); + if (this.signal) |signal| { + if (signal.aborted()) { + return .done; + } + } + const thread_safe_stream_buffer = this.request_body_streaming_buffer orelse return .done; + const stream_buffer = thread_safe_stream_buffer.acquire(); + defer thread_safe_stream_buffer.release(); + const highWaterMark = if (this.sink) |sink| sink.highWaterMark else 16384; + + var needs_schedule = false; + defer if (needs_schedule) { + // wakeup the http thread to write the data + http.http_thread.scheduleRequestWrite(this.http.?, .data); + }; + + // dont have backpressure so we will schedule the data to be written + // if we have backpressure the onWritable will drain the buffer + needs_schedule = stream_buffer.isEmpty(); + if (this.upgraded_connection) { + bun.handleOom(stream_buffer.write(data)); + } else { + //16 is the max size of a hex number size that represents 64 bits + 2 for the \r\n + var formated_size_buffer: [18]u8 = undefined; + const formated_size = std.fmt.bufPrint( + formated_size_buffer[0..], + "{x}\r\n", + .{data.len}, + ) catch |err| switch (err) { + error.NoSpaceLeft => unreachable, + }; + bun.handleOom(stream_buffer.ensureUnusedCapacity(formated_size.len + data.len + 2)); + stream_buffer.writeAssumeCapacity(formated_size); + stream_buffer.writeAssumeCapacity(data); + stream_buffer.writeAssumeCapacity("\r\n"); + } + + // pause the stream if we hit the high water mark + return if (stream_buffer.size() >= highWaterMark) .backpressure else .want_more; + } + + pub fn writeEndRequest(this: *FetchTasklet, err: ?jsc.JSValue) void { + log("writeEndRequest hasError? {}", .{err != null}); + defer this.deref(); + if (err) |jsError| { + if (this.signal_store.aborted.load(.monotonic) or this.abort_reason.has()) { + return; + } + if (!jsError.isUndefinedOrNull()) { + this.abort_reason.set(this.global_this, jsError); + } + this.abortTask(); + } else { + if (!this.upgraded_connection) { + // If is not upgraded we need to send the terminating chunk + const thread_safe_stream_buffer = this.request_body_streaming_buffer orelse return; + const stream_buffer = thread_safe_stream_buffer.acquire(); + defer thread_safe_stream_buffer.release(); + bun.handleOom(stream_buffer.write(http.end_of_chunked_http1_1_encoding_response_body)); + } + if (this.http) |http_| { + // just tell to write the end of the chunked encoding aka 0\r\n\r\n + http.http_thread.scheduleRequestWrite(http_, .end); + } + } + } + + pub fn abortTask(this: *FetchTasklet) void { + this.signal_store.aborted.store(true, .monotonic); + this.tracker.didCancel(this.global_this); + + if (this.http) |http_| { + http.http_thread.scheduleShutdown(http_); + } + } + + const FetchOptions = struct { + method: Method, + headers: Headers, + body: HTTPRequestBody, + disable_timeout: bool, + disable_keepalive: bool, + disable_decompression: bool, + reject_unauthorized: bool, + url: ZigURL, + verbose: http.HTTPVerboseLevel = .none, + redirect_type: FetchRedirect = FetchRedirect.follow, + proxy: ?ZigURL = null, + url_proxy_buffer: []const u8 = "", + signal: ?*jsc.WebCore.AbortSignal = null, + globalThis: ?*JSGlobalObject, + // Custom Hostname + hostname: ?[]u8 = null, + check_server_identity: jsc.Strong.Optional = .empty, + unix_socket_path: ZigString.Slice, + ssl_config: ?*SSLConfig = null, + upgraded_connection: bool = false, + }; + + pub fn queue( + allocator: std.mem.Allocator, + global: *JSGlobalObject, + fetch_options: *const FetchOptions, + promise: jsc.JSPromise.Strong, + ) !*FetchTasklet { + http.HTTPThread.init(&.{}); + var node = try get( + allocator, + global, + fetch_options, + promise, + ); + + var batch = bun.ThreadPool.Batch{}; + node.http.?.schedule(allocator, &batch); + node.poll_ref.ref(global.bunVM()); + + // increment ref so we can keep it alive until the http client is done + node.ref(); + http.http_thread.schedule(batch); + + return node; + } + + /// Called from HTTP thread. Handles HTTP events received from socket. + pub fn callback(task: *FetchTasklet, async_http: *http.AsyncHTTP, result: http.HTTPClientResult) void { + // at this point only this thread is accessing result to is no race condition + const is_done = !result.has_more; + // we are done with the http client so we can deref our side + // this is a atomic operation and will enqueue a task to deinit on the main thread + defer if (is_done) task.derefFromThread(); + + task.mutex.lock(); + // we need to unlock before task.deref(); + defer task.mutex.unlock(); + task.http.?.* = async_http.*; + task.http.?.response_buffer = async_http.response_buffer; + + log("callback success={} ignore_data={} has_more={} bytes={}", .{ result.isSuccess(), task.ignore_data, result.has_more, result.body.?.list.items.len }); + + const prev_metadata = task.result.metadata; + const prev_cert_info = task.result.certificate_info; + task.result = result; + + // Preserve pending certificate info if it was preovided in the previous update. + if (task.result.certificate_info == null) { + if (prev_cert_info) |cert_info| { + task.result.certificate_info = cert_info; + } + } + + // metadata should be provided only once + if (result.metadata orelse prev_metadata) |metadata| { + log("added callback metadata", .{}); + if (task.metadata == null) { + task.metadata = metadata; + } + + task.result.metadata = null; + } + + task.body_size = result.body_size; + + const success = result.isSuccess(); + task.response_buffer = result.body.?.*; + + if (task.ignore_data) { + task.response_buffer.reset(); + + if (task.scheduled_response_buffer.list.capacity > 0) { + task.scheduled_response_buffer.deinit(); + task.scheduled_response_buffer = .{ + .allocator = bun.default_allocator, + .list = .{ + .items = &.{}, + .capacity = 0, + }, + }; + } + if (success and result.has_more) { + // we are ignoring the body so we should not receive more data, so will only signal when result.has_more = true + return; + } + } else { + if (success) { + _ = bun.handleOom(task.scheduled_response_buffer.write(task.response_buffer.list.items)); + } + // reset for reuse + task.response_buffer.reset(); + } + + if (task.has_schedule_callback.cmpxchgStrong(false, true, .acquire, .monotonic)) |has_schedule_callback| { + if (has_schedule_callback) { + return; + } + } + + task.javascript_vm.eventLoop().enqueueTaskConcurrent(task.concurrent_task.from(task, .manual_deinit)); + } +}; + +const X509 = @import("../../api/bun/x509.zig"); +const std = @import("std"); +const Method = @import("../../../http/Method.zig").Method; +const ZigURL = @import("../../../url.zig").URL; + +const bun = @import("bun"); +const Async = bun.Async; +const MutableString = bun.MutableString; +const Mutex = bun.Mutex; +const Output = bun.Output; +const BoringSSL = bun.BoringSSL.c; +const FetchHeaders = bun.webcore.FetchHeaders; +const SSLConfig = bun.api.server.ServerConfig.SSLConfig; + +const http = bun.http; +const FetchRedirect = http.FetchRedirect; +const Headers = bun.http.Headers; + +const jsc = bun.jsc; +const JSGlobalObject = jsc.JSGlobalObject; +const JSPromise = jsc.JSPromise; +const JSValue = jsc.JSValue; +const VirtualMachine = jsc.VirtualMachine; +const ZigString = jsc.ZigString; + +const Body = jsc.WebCore.Body; +const Response = jsc.WebCore.Response; +const ResumableSinkBackpressure = jsc.WebCore.ResumableSinkBackpressure; + +const Blob = jsc.WebCore.Blob; +const AnyBlob = jsc.WebCore.Blob.Any; diff --git a/src/bun.zig b/src/bun.zig index fb4b98ba84..31382d0cd7 100644 --- a/src/bun.zig +++ b/src/bun.zig @@ -683,7 +683,6 @@ pub const MimallocArena = allocators.MimallocArena; pub const AllocationScope = allocators.AllocationScope; pub const NullableAllocator = allocators.NullableAllocator; pub const MaxHeapAllocator = allocators.MaxHeapAllocator; -pub const MemoryReportingAllocator = allocators.MemoryReportingAllocator; pub const isSliceInBuffer = allocators.isSliceInBuffer; pub const isSliceInBufferT = allocators.isSliceInBufferT; diff --git a/src/cli.zig b/src/cli.zig index 981269f3cc..71617062a2 100644 --- a/src/cli.zig +++ b/src/cli.zig @@ -387,6 +387,11 @@ pub const Command = struct { expose_gc: bool = false, preserve_symlinks_main: bool = false, console_depth: ?u16 = null, + cpu_prof: struct { + enabled: bool = false, + name: []const u8 = "", + dir: []const u8 = "", + } = .{}, }; var global_cli_ctx: Context = undefined; @@ -617,7 +622,7 @@ pub const Command = struct { RootCommandMatcher.case("logout") => .ReservedCommand, RootCommandMatcher.case("whoami") => .PackageManagerCommand, RootCommandMatcher.case("prune") => .ReservedCommand, - RootCommandMatcher.case("list") => .ReservedCommand, + RootCommandMatcher.case("list") => .PackageManagerCommand, RootCommandMatcher.case("why") => .WhyCommand, RootCommandMatcher.case("-e") => .AutoCommand, diff --git a/src/cli/Arguments.zig b/src/cli/Arguments.zig index 2c55ffb5d1..3de212fc83 100644 --- a/src/cli/Arguments.zig +++ b/src/cli/Arguments.zig @@ -85,6 +85,9 @@ pub const runtime_params_ = [_]ParamType{ clap.parseParam("--inspect ? Activate Bun's debugger") catch unreachable, clap.parseParam("--inspect-wait ? Activate Bun's debugger, wait for a connection before executing") catch unreachable, clap.parseParam("--inspect-brk ? Activate Bun's debugger, set breakpoint on first line of code and wait") catch unreachable, + clap.parseParam("--cpu-prof Start CPU profiler and write profile to disk on exit") catch unreachable, + clap.parseParam("--cpu-prof-name Specify the name of the CPU profile file") catch unreachable, + clap.parseParam("--cpu-prof-dir Specify the directory where the CPU profile will be saved") catch unreachable, clap.parseParam("--if-present Exit without an error if the entrypoint does not exist") catch unreachable, clap.parseParam("--no-install Disable auto install in the Bun runtime") catch unreachable, clap.parseParam("--install Configure auto-install behavior. One of \"auto\" (default, auto-installs when no node_modules), \"fallback\" (missing packages only), \"force\" (always).") catch unreachable, @@ -214,7 +217,34 @@ pub const test_only_params = [_]ParamType{ }; pub const test_params = test_only_params ++ runtime_params_ ++ transpiler_params_ ++ base_params_; +fn loadGlobalBunfig(allocator: std.mem.Allocator, ctx: Command.Context, comptime cmd: Command.Tag) !void { + if (ctx.has_loaded_global_config) return; + + ctx.has_loaded_global_config = true; + + var config_buf: bun.PathBuffer = undefined; + if (getHomeConfigPath(&config_buf)) |path| { + try loadBunfig(allocator, true, path, ctx, comptime cmd); + } +} + pub fn loadConfigPath(allocator: std.mem.Allocator, auto_loaded: bool, config_path: [:0]const u8, ctx: Command.Context, comptime cmd: Command.Tag) !void { + if (comptime cmd.readGlobalConfig()) { + loadGlobalBunfig(allocator, ctx, cmd) catch |err| { + if (auto_loaded) return; + + Output.prettyErrorln("{}\nreading global config \"{s}\"", .{ + err, + config_path, + }); + Global.exit(1); + }; + } + + try loadBunfig(allocator, auto_loaded, config_path, ctx, cmd); +} + +fn loadBunfig(allocator: std.mem.Allocator, auto_loaded: bool, config_path: [:0]const u8, ctx: Command.Context, comptime cmd: Command.Tag) !void { const source = switch (bun.sys.File.toSource(config_path, allocator, .{ .convert_bom = true })) { .result => |s| s, .err => |err| { @@ -226,7 +256,6 @@ pub fn loadConfigPath(allocator: std.mem.Allocator, auto_loaded: bool, config_pa Global.exit(1); }, }; - js_ast.Stmt.Data.Store.create(); js_ast.Expr.Data.Store.create(); defer { @@ -238,6 +267,7 @@ pub fn loadConfigPath(allocator: std.mem.Allocator, auto_loaded: bool, config_pa ctx.log.level = original_level; } ctx.log.level = logger.Log.Level.warn; + ctx.debug.loaded_bunfig = true; try Bunfig.parse(allocator, &source, ctx, cmd); } @@ -291,7 +321,6 @@ pub fn loadConfig(allocator: std.mem.Allocator, user_config_path_: ?string, ctx: if (config_path_.len == 0) { return; } - defer ctx.debug.loaded_bunfig = true; var config_path: [:0]u8 = undefined; if (config_path_[0] == '/') { @memcpy(config_buf[0..config_path_.len], config_path_); @@ -778,6 +807,24 @@ pub fn parse(allocator: std.mem.Allocator, ctx: Command.Context, comptime cmd: C bun.jsc.RuntimeTranspilerCache.is_disabled = true; } + if (args.flag("--cpu-prof")) { + ctx.runtime_options.cpu_prof.enabled = true; + if (args.option("--cpu-prof-name")) |name| { + ctx.runtime_options.cpu_prof.name = name; + } + if (args.option("--cpu-prof-dir")) |dir| { + ctx.runtime_options.cpu_prof.dir = dir; + } + } else { + // Warn if --cpu-prof-name or --cpu-prof-dir is used without --cpu-prof + if (args.option("--cpu-prof-name")) |_| { + Output.warn("--cpu-prof-name requires --cpu-prof to be enabled", .{}); + } + if (args.option("--cpu-prof-dir")) |_| { + Output.warn("--cpu-prof-dir requires --cpu-prof to be enabled", .{}); + } + } + if (args.flag("--no-deprecation")) { Bun__Node__ProcessNoDeprecation = true; } diff --git a/src/cli/create_command.zig b/src/cli/create_command.zig index 7edff915f2..412c4eab6d 100644 --- a/src/cli/create_command.zig +++ b/src/cli/create_command.zig @@ -131,7 +131,7 @@ pub const ProgressBuf = struct { } pub fn pretty(comptime fmt: string, args: anytype) !string { - if (Output.enable_ansi_colors) { + if (Output.enable_ansi_colors_stdout) { return ProgressBuf.print(comptime Output.prettyFmt(fmt, true), args); } else { return ProgressBuf.print(comptime Output.prettyFmt(fmt, false), args); @@ -2212,7 +2212,7 @@ pub const Example = struct { ); async_http.client.flags.reject_unauthorized = env_loader.getTLSRejectUnauthorized(); - if (Output.enable_ansi_colors) { + if (Output.enable_ansi_colors_stdout) { async_http.client.progress_node = progress_node; } diff --git a/src/cli/init_command.zig b/src/cli/init_command.zig index e2ed22ead6..4f88e56bbf 100644 --- a/src/cli/init_command.zig +++ b/src/cli/init_command.zig @@ -42,7 +42,7 @@ pub const InitCommand = struct { extern fn Bun__ttySetMode(fd: i32, mode: i32) i32; fn processRadioButton(label: string, comptime Choices: type) !Choices { - const colors = Output.enable_ansi_colors; + const colors = Output.enable_ansi_colors_stdout; const choices = switch (colors) { inline else => |colors_comptime| comptime choices: { const choices_fields = bun.meta.EnumFields(Choices); diff --git a/src/cli/outdated_command.zig b/src/cli/outdated_command.zig index c3b742568d..df8162d0a3 100644 --- a/src/cli/outdated_command.zig +++ b/src/cli/outdated_command.zig @@ -69,7 +69,7 @@ pub const OutdatedCommand = struct { .ok => |ok| ok.lockfile, }; - switch (Output.enable_ansi_colors) { + switch (Output.enable_ansi_colors_stdout) { inline else => |enable_ansi_colors| { if (manager.options.filter_patterns.len > 0) { const filters = manager.options.filter_patterns; diff --git a/src/cli/pack_command.zig b/src/cli/pack_command.zig index f527bfc64b..a8564ea9fb 100644 --- a/src/cli/pack_command.zig +++ b/src/cli/pack_command.zig @@ -1635,7 +1635,7 @@ pub const PackCommand = struct { var node: *Progress.Node = undefined; if (log_level.showProgress()) { progress = .{}; - progress.supports_ansi_escape_codes = Output.enable_ansi_colors; + progress.supports_ansi_escape_codes = Output.enable_ansi_colors_stderr; node = progress.start("", pack_queue.count() + bundled_pack_queue.count() + 1); node.unit = " files"; } diff --git a/src/cli/package_manager_command.zig b/src/cli/package_manager_command.zig index cf296288c2..e50dcbfa33 100644 --- a/src/cli/package_manager_command.zig +++ b/src/cli/package_manager_command.zig @@ -103,7 +103,7 @@ pub const PackageManagerCommand = struct { \\ --quiet only output the tarball filename \\ bun pm bin print the path to bin folder \\ -g print the global path to bin folder - \\ bun pm ls list the dependency tree according to the current lockfile + \\ bun list list the dependency tree according to the current lockfile \\ --all list the entire dependency tree according to the current lockfile \\ bun pm why \ show dependency tree explaining why a package is installed \\ bun pm whoami print the current npm username @@ -158,7 +158,13 @@ pub const PackageManagerCommand = struct { }; defer ctx.allocator.free(cwd); - const subcommand = if (is_direct_whoami) "whoami" else getSubcommand(&pm.options.positionals); + var subcommand = if (is_direct_whoami) "whoami" else getSubcommand(&pm.options.positionals); + + // Normalize "list" to "ls" (handles both "bun list" and "bun pm list") + if (strings.eqlComptime(subcommand, "list")) { + subcommand = "ls"; + } + if (pm.options.global) { try pm.setupGlobalDir(ctx); } diff --git a/src/cli/publish_command.zig b/src/cli/publish_command.zig index 8951d05654..08cd1aada0 100644 --- a/src/cli/publish_command.zig +++ b/src/cli/publish_command.zig @@ -779,12 +779,12 @@ pub const PublishCommand = struct { const offset = 0; const padding = 1; - const horizontal = if (Output.enable_ansi_colors) "─" else "-"; - const vertical = if (Output.enable_ansi_colors) "│" else "|"; - const top_left = if (Output.enable_ansi_colors) "┌" else "|"; - const top_right = if (Output.enable_ansi_colors) "┐" else "|"; - const bottom_left = if (Output.enable_ansi_colors) "└" else "|"; - const bottom_right = if (Output.enable_ansi_colors) "┘" else "|"; + const horizontal = if (Output.enable_ansi_colors_stdout) "─" else "-"; + const vertical = if (Output.enable_ansi_colors_stdout) "│" else "|"; + const top_left = if (Output.enable_ansi_colors_stdout) "┌" else "|"; + const top_right = if (Output.enable_ansi_colors_stdout) "┐" else "|"; + const bottom_left = if (Output.enable_ansi_colors_stdout) "└" else "|"; + const bottom_right = if (Output.enable_ansi_colors_stdout) "┘" else "|"; const width = (padding * 2) + auth_url_str.len; diff --git a/src/cli/update_interactive_command.zig b/src/cli/update_interactive_command.zig index 7380fce640..a06308e400 100644 --- a/src/cli/update_interactive_command.zig +++ b/src/cli/update_interactive_command.zig @@ -96,19 +96,24 @@ pub const UpdateInteractiveCommand = struct { }; const new_package_json_source = try manager.allocator.dupe(u8, package_json_writer.ctx.writtenWithoutTrailingZero()); - defer manager.allocator.free(new_package_json_source); // Write the updated package.json const write_file = std.fs.cwd().createFile(package_json_path, .{}) catch |err| { + manager.allocator.free(new_package_json_source); Output.errGeneric("Failed to write package.json at {s}: {s}", .{ package_json_path, @errorName(err) }); return err; }; defer write_file.close(); write_file.writeAll(new_package_json_source) catch |err| { + manager.allocator.free(new_package_json_source); Output.errGeneric("Failed to write package.json at {s}: {s}", .{ package_json_path, @errorName(err) }); return err; }; + + // Update the cache so installWithManager sees the new package.json + // This is critical - without this, installWithManager will use the cached old version + package_json.*.source.contents = new_package_json_source; } pub fn exec(ctx: Command.Context) !void { @@ -1162,7 +1167,7 @@ pub const UpdateInteractiveCommand = struct { } fn processMultiSelect(state: *MultiSelectState, initial_terminal_size: TerminalSize) ![]bool { - const colors = Output.enable_ansi_colors; + const colors = Output.enable_ansi_colors_stdout; // Clear any previous progress output Output.print("\r\x1B[2K", .{}); // Clear entire line @@ -1427,7 +1432,7 @@ pub const UpdateInteractiveCommand = struct { const uses_default_registry = pkg.manager.options.scope.url_hash == Install.Npm.Registry.default_url_hash and pkg.manager.scopeForPackageName(pkg.name).url_hash == Install.Npm.Registry.default_url_hash; - const package_url = if (Output.enable_ansi_colors and uses_default_registry) + const package_url = if (Output.enable_ansi_colors_stdout and uses_default_registry) try std.fmt.allocPrint(bun.default_allocator, "https://npmjs.org/package/{s}/v/{s}", .{ pkg.name, brk: { if (selected) { if (pkg.use_latest) { @@ -1976,11 +1981,40 @@ fn updateNamedCatalog( } fn preserveVersionPrefix(original_version: string, new_version: string, allocator: std.mem.Allocator) !string { - if (original_version.len > 0) { - const first_char = original_version[0]; + if (original_version.len > 1) { + var orig_version = original_version; + var alias: ?string = null; + + // Preserve npm: prefix + if (strings.withoutPrefixIfPossibleComptime(original_version, "npm:")) |after_npm| { + if (strings.lastIndexOfChar(after_npm, '@')) |i| { + alias = after_npm[0..i]; + if (i + 2 < after_npm.len) { + orig_version = after_npm[i + 1 ..]; + } + } else { + alias = after_npm; + } + } + + // Preserve other version prefixes + const first_char = orig_version[0]; if (first_char == '^' or first_char == '~' or first_char == '>' or first_char == '<' or first_char == '=') { + const second_char = orig_version[1]; + if ((first_char == '>' or first_char == '<') and second_char == '=') { + if (alias) |a| { + return try std.fmt.allocPrint(allocator, "npm:{s}@{c}={s}", .{ a, first_char, new_version }); + } + return try std.fmt.allocPrint(allocator, "{c}={s}", .{ first_char, new_version }); + } + if (alias) |a| { + return try std.fmt.allocPrint(allocator, "npm:{s}@{c}{s}", .{ a, first_char, new_version }); + } return try std.fmt.allocPrint(allocator, "{c}{s}", .{ first_char, new_version }); } + if (alias) |a| { + return try std.fmt.allocPrint(allocator, "npm:{s}@{s}", .{ a, new_version }); + } } return try allocator.dupe(u8, new_version); } diff --git a/src/comptime_string_map.zig b/src/comptime_string_map.zig index 4d65cf870f..771e70782c 100644 --- a/src/comptime_string_map.zig +++ b/src/comptime_string_map.zig @@ -299,6 +299,17 @@ pub fn ComptimeStringMapWithKeyType(comptime KeyType: type, comptime V: type, co return null; } + + /// Lookup the first-defined string key for a given value. + /// + /// Linear search. + pub fn getKey(value: V) ?[]const KeyType { + inline for (kvs) |kv| { + if (kv.value == value) return kv.key; + } + + return null; + } }; } diff --git a/src/crash_handler.zig b/src/crash_handler.zig index d1d98ff074..4a8bfdae92 100644 --- a/src/crash_handler.zig +++ b/src/crash_handler.zig @@ -255,11 +255,11 @@ pub fn crashHandler( has_printed_message = true; } } else { - if (Output.enable_ansi_colors) { + if (Output.enable_ansi_colors_stderr) { writer.writeAll(Output.prettyFmt("", true)) catch std.posix.abort(); } writer.writeAll("oh no") catch std.posix.abort(); - if (Output.enable_ansi_colors) { + if (Output.enable_ansi_colors_stderr) { writer.writeAll(Output.prettyFmt(": multiple threads are crashing\n", true)) catch std.posix.abort(); } else { writer.writeAll(Output.prettyFmt(": multiple threads are crashing\n", true)) catch std.posix.abort(); @@ -267,13 +267,13 @@ pub fn crashHandler( } if (reason != .out_of_memory or debug_trace) { - if (Output.enable_ansi_colors) { + if (Output.enable_ansi_colors_stderr) { writer.writeAll(Output.prettyFmt("", true)) catch std.posix.abort(); } writer.writeAll("panic") catch std.posix.abort(); - if (Output.enable_ansi_colors) { + if (Output.enable_ansi_colors_stderr) { writer.writeAll(Output.prettyFmt("", true)) catch std.posix.abort(); } @@ -294,7 +294,7 @@ pub fn crashHandler( } writer.writeAll(": ") catch std.posix.abort(); - if (Output.enable_ansi_colors) { + if (Output.enable_ansi_colors_stderr) { writer.writeAll(Output.prettyFmt("", true)) catch std.posix.abort(); } writer.print("{}\n", .{reason}) catch std.posix.abort(); @@ -385,7 +385,7 @@ pub fn crashHandler( if (!has_printed_message) { has_printed_message = true; writer.writeAll("oh no") catch std.posix.abort(); - if (Output.enable_ansi_colors) { + if (Output.enable_ansi_colors_stderr) { writer.writeAll(Output.prettyFmt(": ", true)) catch std.posix.abort(); } else { writer.writeAll(Output.prettyFmt(": ", true)) catch std.posix.abort(); @@ -435,7 +435,7 @@ pub fn crashHandler( } } - if (Output.enable_ansi_colors) { + if (Output.enable_ansi_colors_stderr) { writer.print(Output.prettyFmt("", true), .{}) catch std.posix.abort(); } @@ -452,7 +452,7 @@ pub fn crashHandler( writer.writeAll("\n") catch std.posix.abort(); } - if (Output.enable_ansi_colors) { + if (Output.enable_ansi_colors_stderr) { writer.writeAll(Output.prettyFmt("\n", true)) catch std.posix.abort(); } else { writer.writeAll("\n") catch std.posix.abort(); @@ -957,7 +957,7 @@ pub fn printMetadata(writer: anytype) !void { } } - if (Output.enable_ansi_colors) { + if (Output.enable_ansi_colors_stderr) { try writer.writeAll(Output.prettyFmt("", true)); } @@ -1045,7 +1045,7 @@ pub fn printMetadata(writer: anytype) !void { try writer.writeAll("\n"); } - if (Output.enable_ansi_colors) { + if (Output.enable_ansi_colors_stderr) { try writer.writeAll(Output.prettyFmt("", true)); } try writer.writeAll("\n"); diff --git a/src/deps/uws/WebSocket.zig b/src/deps/uws/WebSocket.zig index 31ea914e75..f2256393f6 100644 --- a/src/deps/uws/WebSocket.zig +++ b/src/deps/uws/WebSocket.zig @@ -49,6 +49,9 @@ pub fn NewWebSocket(comptime ssl_flag: c_int) type { pub fn isSubscribed(this: *WebSocket, topic: []const u8) bool { return c.uws_ws_is_subscribed(ssl_flag, this.raw(), topic.ptr, topic.len); } + pub fn getTopicsAsJSArray(this: *WebSocket, globalObject: *JSGlobalObject) JSValue { + return c.uws_ws_get_topics_as_js_array(ssl_flag, this.raw(), globalObject); + } pub fn publish(this: *WebSocket, topic: []const u8, message: []const u8) bool { return c.uws_ws_publish(ssl_flag, this.raw(), topic.ptr, topic.len, message.ptr, message.len); @@ -162,6 +165,12 @@ pub const AnyWebSocket = union(enum) { .tcp => c.uws_ws_is_subscribed(0, this.raw(), topic.ptr, topic.len), }; } + pub fn getTopicsAsJSArray(this: AnyWebSocket, globalObject: *JSGlobalObject) JSValue { + return switch (this) { + .ssl => c.uws_ws_get_topics_as_js_array(1, this.raw(), globalObject), + .tcp => c.uws_ws_get_topics_as_js_array(0, this.raw(), globalObject), + }; + } // pub fn iterateTopics(this: AnyWebSocket) { // return uws_ws_iterate_topics(ssl_flag, this.raw(), callback: ?*const fn ([*c]const u8, usize, ?*anyopaque) callconv(.C) void, user_data: ?*anyopaque) void; // } @@ -338,6 +347,7 @@ pub const c = struct { pub extern fn uws_ws_unsubscribe(ssl: i32, ws: ?*RawWebSocket, topic: [*c]const u8, length: usize) bool; pub extern fn uws_ws_is_subscribed(ssl: i32, ws: ?*RawWebSocket, topic: [*c]const u8, length: usize) bool; pub extern fn uws_ws_iterate_topics(ssl: i32, ws: ?*RawWebSocket, callback: ?*const fn ([*c]const u8, usize, ?*anyopaque) callconv(.C) void, user_data: ?*anyopaque) void; + pub extern fn uws_ws_get_topics_as_js_array(ssl: i32, ws: *RawWebSocket, globalObject: *JSGlobalObject) JSValue; pub extern fn uws_ws_publish(ssl: i32, ws: ?*RawWebSocket, topic: [*]const u8, topic_length: usize, message: [*]const u8, message_length: usize) bool; pub extern fn uws_ws_publish_with_options(ssl: i32, ws: ?*RawWebSocket, topic: [*c]const u8, topic_length: usize, message: [*c]const u8, message_length: usize, opcode: Opcode, compress: bool) bool; pub extern fn uws_ws_get_buffered_amount(ssl: i32, ws: ?*RawWebSocket) usize; @@ -351,6 +361,9 @@ const bun = @import("bun"); const std = @import("std"); const uws_app_t = @import("./App.zig").uws_app_t; +const JSGlobalObject = bun.jsc.JSGlobalObject; +const JSValue = bun.jsc.JSValue; + const uws = bun.uws; const NewApp = uws.NewApp; const Opcode = uws.Opcode; diff --git a/src/fmt.zig b/src/fmt.zig index a3faac9a6d..df5266eeaa 100644 --- a/src/fmt.zig +++ b/src/fmt.zig @@ -707,12 +707,6 @@ pub const QuickAndDirtyJavaScriptSyntaxHighlighter = struct { check_for_unhighlighted_write: bool = true, redact_sensitive_information: bool = false, - - pub const default: Options = .{ - .enable_colors = Output.enable_ansi_colors, - .check_for_no_highlighting = true, - .redact_sensitive_information = false, - }; }; const ColorCode = enum { diff --git a/src/install/PackageInstaller.zig b/src/install/PackageInstaller.zig index fa615f787b..3e5e633bbf 100644 --- a/src/install/PackageInstaller.zig +++ b/src/install/PackageInstaller.zig @@ -369,7 +369,7 @@ pub const PackageInstaller = struct { const args = .{ name, @errorName(err) }; if (log_level.showProgress()) { - switch (Output.enable_ansi_colors) { + switch (Output.enable_ansi_colors_stderr) { inline else => |enable_ansi_colors| { this.progress.log(comptime Output.prettyFmt(fmt, enable_ansi_colors), args); }, @@ -452,7 +452,7 @@ pub const PackageInstaller = struct { const args = .{ package_name, @errorName(err) }; if (log_level.showProgress()) { - switch (Output.enable_ansi_colors) { + switch (Output.enable_ansi_colors_stderr) { inline else => |enable_ansi_colors| { this.progress.log(comptime Output.prettyFmt(fmt, enable_ansi_colors), args); }, @@ -1328,7 +1328,7 @@ pub const PackageInstaller = struct { const args = .{ folder_name, @errorName(err) }; if (log_level.showProgress()) { - switch (Output.enable_ansi_colors) { + switch (Output.enable_ansi_colors_stderr) { inline else => |enable_ansi_colors| { this.progress.log(comptime Output.prettyFmt(fmt, enable_ansi_colors), args); }, diff --git a/src/install/PackageManager/ProgressStrings.zig b/src/install/PackageManager/ProgressStrings.zig index 308ae7f5bc..eabe5e40fd 100644 --- a/src/install/PackageManager/ProgressStrings.zig +++ b/src/install/PackageManager/ProgressStrings.zig @@ -25,23 +25,23 @@ pub const ProgressStrings = struct { pub const script_emoji: string = " ⚙️ "; pub inline fn download() string { - return if (Output.isEmojiEnabled()) download_with_emoji else download_no_emoji; + return if (Output.enable_ansi_colors_stderr) download_with_emoji else download_no_emoji; } pub inline fn save() string { - return if (Output.isEmojiEnabled()) save_with_emoji else save_no_emoji; + return if (Output.enable_ansi_colors_stderr) save_with_emoji else save_no_emoji; } pub inline fn extract() string { - return if (Output.isEmojiEnabled()) extract_with_emoji else extract_no_emoji; + return if (Output.enable_ansi_colors_stderr) extract_with_emoji else extract_no_emoji; } pub inline fn install() string { - return if (Output.isEmojiEnabled()) install_with_emoji else install_no_emoji; + return if (Output.enable_ansi_colors_stderr) install_with_emoji else install_no_emoji; } pub inline fn script() string { - return if (Output.isEmojiEnabled()) script_with_emoji else script_no_emoji; + return if (Output.enable_ansi_colors_stderr) script_with_emoji else script_no_emoji; } }; @@ -52,7 +52,7 @@ pub fn setNodeName( emoji: string, comptime is_first: bool, ) void { - if (Output.isEmojiEnabled()) { + if (Output.enable_ansi_colors_stderr) { if (is_first) { @memcpy(this.progress_name_buf[0..emoji.len], emoji); @memcpy(this.progress_name_buf[emoji.len..][0..name.len], name); diff --git a/src/install/PackageManager/install_with_manager.zig b/src/install/PackageManager/install_with_manager.zig index cb2406913c..62615573ae 100644 --- a/src/install/PackageManager/install_with_manager.zig +++ b/src/install/PackageManager/install_with_manager.zig @@ -947,7 +947,7 @@ fn printInstallSummary( // We deliberately do not disable it after this. Output.enableBuffering(); const writer = Output.writerBuffered(); - switch (Output.enable_ansi_colors) { + switch (Output.enable_ansi_colors_stdout) { inline else => |enable_ansi_colors| { try Lockfile.Printer.Tree.print(&printer, this, @TypeOf(writer), writer, enable_ansi_colors, log_level); }, diff --git a/src/install/PackageManager/security_scanner.zig b/src/install/PackageManager/security_scanner.zig index f1ad798378..ad1f9e0f7a 100644 --- a/src/install/PackageManager/security_scanner.zig +++ b/src/install/PackageManager/security_scanner.zig @@ -982,7 +982,7 @@ pub const SecurityScanSubprocess = struct { }, } } else if (this.manager.options.log_level != .silent and duration >= 1000) { - const maybeHourglass = if (Output.isEmojiEnabled()) "⏳" else ""; + const maybeHourglass = if (Output.enable_ansi_colors_stderr) "⏳" else ""; if (packages_scanned == 1) { Output.prettyErrorln("{s}[{s}] Scanning 1 package took {d}ms", .{ maybeHourglass, security_scanner, duration }); } else { diff --git a/src/install/dependency.zig b/src/install/dependency.zig index e92f49f051..6e3ce8e255 100644 --- a/src/install/dependency.zig +++ b/src/install/dependency.zig @@ -173,45 +173,6 @@ pub inline fn isSCPLikePath(dependency: string) bool { return false; } -/// `isGitHubShorthand` from npm -/// https://github.com/npm/cli/blob/22731831e22011e32fa0ca12178e242c2ee2b33d/node_modules/hosted-git-info/lib/from-url.js#L6 -pub inline fn isGitHubRepoPath(dependency: string) bool { - // Shortest valid expression: u/r - if (dependency.len < 3) return false; - - var hash_index: usize = 0; - - // the branch could have slashes - // - oven-sh/bun#brach/name - var first_slash_index: usize = 0; - - for (dependency, 0..) |c, i| { - switch (c) { - '/' => { - if (i == 0) return false; - if (first_slash_index == 0) { - first_slash_index = i; - } - }, - '#' => { - if (i == 0) return false; - if (hash_index > 0) return false; - if (first_slash_index == 0) return false; - hash_index = i; - }, - // Not allowed in username - '.', '_' => { - if (first_slash_index == 0) return false; - }, - // Must be alphanumeric - '-', 'a'...'z', 'A'...'Z', '0'...'9' => {}, - else => return false, - } - } - - return hash_index != dependency.len - 1 and first_slash_index > 0 and first_slash_index != dependency.len - 1; -} - /// Github allows for the following format of URL: /// https://github.com///tarball/ /// This is a legacy (but still supported) method of retrieving a tarball of an @@ -533,6 +494,10 @@ pub const Version = struct { return .folder; } + // Allocator necessary for slow paths. + var stackFallback = std.heap.stackFallback(1024, bun.default_allocator); + const allocator = stackFallback.get(); + switch (dependency[0]) { // =1 // >1.2 @@ -599,11 +564,20 @@ pub const Version = struct { if (url.len > 2) { switch (url[0]) { ':' => { + // TODO(markovejnovic): This check for testing whether the URL + // is a Git URL shall be moved to npm_package_arg.zig when that + // is implemented. if (strings.hasPrefixComptime(url, "://")) { url = url["://".len..]; if (strings.hasPrefixComptime(url, "github.com/")) { - if (isGitHubRepoPath(url["github.com/".len..])) return .github; + if (hosted_git_info.isGitHubShorthand(url["github.com/".len..])) return .github; } + + if (hosted_git_info.HostedGitInfo.fromUrl(allocator, dependency) catch null) |info| { + defer info.deinit(); + return hgiToTag(info); + } + return .git; } }, @@ -633,15 +607,21 @@ pub const Version = struct { else => false, }) { if (strings.hasPrefixComptime(url, "github.com/")) { - if (isGitHubRepoPath(url["github.com/".len..])) return .github; + if (hosted_git_info.isGitHubShorthand(url["github.com/".len..])) return .github; } + + if (hosted_git_info.HostedGitInfo.fromUrl(allocator, dependency) catch null) |info| { + defer info.deinit(); + return hgiToTag(info); + } + return .git; } } }, 'h' => { if (strings.hasPrefixComptime(url, "hub:")) { - if (isGitHubRepoPath(url["hub:".len..])) return .github; + if (hosted_git_info.isGitHubShorthand(url["hub:".len..])) return .github; } }, else => {}, @@ -673,11 +653,12 @@ pub const Version = struct { if (strings.hasPrefixComptime(url, "github.com/")) { const path = url["github.com/".len..]; if (isGitHubTarballPath(path)) return .tarball; - if (isGitHubRepoPath(path)) return .github; + if (hosted_git_info.isGitHubShorthand(path)) return .github; } - if (strings.indexOfChar(url, '.')) |dot| { - if (Repository.Hosts.has(url[0..dot])) return .git; + if (hosted_git_info.HostedGitInfo.fromUrl(allocator, dependency) catch null) |info| { + defer info.deinit(); + return hgiToTag(info); } return .tarball; @@ -698,9 +679,11 @@ pub const Version = struct { url = url["git@".len..]; } - if (strings.indexOfChar(url, '.')) |dot| { - if (Repository.Hosts.has(url[0..dot])) return .git; + if (hosted_git_info.HostedGitInfo.fromUrl(allocator, dependency) catch null) |info| { + defer info.deinit(); + return hgiToTag(info); } + return .git; } } }, @@ -732,7 +715,7 @@ pub const Version = struct { // virt@example.com:repo.git 'v' => { if (isTarball(dependency)) return .tarball; - if (isGitHubRepoPath(dependency)) return .github; + if (hosted_git_info.isGitHubShorthand(dependency)) return .github; if (isSCPLikePath(dependency)) return .git; if (dependency.len == 1) return .dist_tag; return switch (dependency[1]) { @@ -765,11 +748,20 @@ pub const Version = struct { // foo.tgz // bar.tar.gz if (isTarball(dependency)) return .tarball; + // user/repo // user/repo#main - if (isGitHubRepoPath(dependency)) return .github; + if (hosted_git_info.isGitHubShorthand(dependency)) return .github; + // git@example.com:path/to/repo.git - if (isSCPLikePath(dependency)) return .git; + if (isSCPLikePath(dependency)) { + if (hosted_git_info.HostedGitInfo.fromUrl(allocator, dependency) catch null) |info| { + defer info.deinit(); + return hgiToTag(info); + } + return .git; + } + // beta if (!strings.containsChar(dependency, '|')) { @@ -785,7 +777,14 @@ pub const Version = struct { return .js_undefined; } - const tag = try Tag.fromJS(globalObject, arguments[0]) orelse return .js_undefined; + // Convert JSValue to string slice + const dependency_str = try arguments[0].toBunString(globalObject); + defer dependency_str.deref(); + var as_utf8 = dependency_str.toUTF8(bun.default_allocator); + defer as_utf8.deinit(); + + // Infer the tag from the dependency string + const tag = Tag.infer(as_utf8.slice()); var str = bun.String.init(@tagName(tag)); return str.transferToJS(globalObject); } @@ -1041,70 +1040,51 @@ pub fn parseWithTag( }; }, .github => { - var from_url = false; - var input = dependency; - if (strings.hasPrefixComptime(input, "github:")) { - input = input["github:".len..]; - } else if (strings.hasPrefixComptime(input, "git://github.com/")) { - input = input["git://github.com/".len..]; - from_url = true; - } else { - if (strings.hasPrefixComptime(input, "git+")) { - input = input["git+".len..]; - } - if (strings.hasPrefixComptime(input, "http")) { - var url = input["http".len..]; - if (url.len > 2) { - switch (url[0]) { - ':' => { - if (strings.hasPrefixComptime(url, "://")) { - url = url["://".len..]; - } - }, - 's' => { - if (strings.hasPrefixComptime(url, "s://")) { - url = url["s://".len..]; - } - }, - else => {}, - } - if (strings.hasPrefixComptime(url, "github.com/")) { - input = url["github.com/".len..]; - from_url = true; - } - } - } - } + const info = bun.handleOom( + hosted_git_info.HostedGitInfo.fromUrl(allocator, dependency), + ) catch { + return null; + } orelse { + return null; + }; + defer info.deinit(); - if (comptime Environment.allow_assert) bun.assert(isGitHubRepoPath(input)); + // Now we have parsed info, we need to find these substrings in the original dependency + // to create String objects that point to the original buffer + const owner_str = info.user orelse ""; + const repo_str = info.project; + const committish_str = info.committish orelse ""; - var hash_index: usize = 0; - var slash_index: usize = 0; - for (input, 0..) |c, i| { - switch (c) { - '/' => { - slash_index = i; - }, - '#' => { - hash_index = i; - break; - }, - else => {}, - } - } + // Find owner in dependency string + const owner_idx = strings.indexOf(dependency, owner_str); + const owner = if (owner_idx) |idx| + sliced.sub(dependency[idx .. idx + owner_str.len]).value() + else + String.from(""); - var repo = if (hash_index == 0) input[slash_index + 1 ..] else input[slash_index + 1 .. hash_index]; - if (from_url and strings.endsWithComptime(repo, ".git")) { - repo = repo[0 .. repo.len - ".git".len]; - } + // Find repo in dependency string + const repo_idx = strings.indexOf(dependency, repo_str); + const repo = if (repo_idx) |idx| + sliced.sub(dependency[idx .. idx + repo_str.len]).value() + else + String.from(""); + + // Find committish in dependency string + const committish = if (committish_str.len > 0) blk: { + const committish_idx = strings.indexOf(dependency, committish_str); + break :blk if (committish_idx) |idx| + sliced.sub(dependency[idx .. idx + committish_str.len]).value() + else + String.from(""); + } else String.from(""); return .{ .literal = sliced.value(), .value = .{ .github = .{ - .owner = sliced.sub(input[0..slash_index]).value(), - .repo = sliced.sub(repo).value(), - .committish = if (hash_index == 0) String.from("") else sliced.sub(input[hash_index + 1 ..]).value(), + .owner = owner, + .repo = repo, + .committish = committish, }, }, .tag = .github, @@ -1454,9 +1434,17 @@ pub const Behavior = packed struct(u8) { } }; +fn hgiToTag(info: hosted_git_info.HostedGitInfo) Version.Tag { + return switch (info.host_provider) { + .github => if (info.default_representation == .shortcut) .github else .git, + .bitbucket, .gitlab, .gist, .sourcehut => .git, + }; +} + const string = []const u8; const Environment = @import("../env.zig"); +const hosted_git_info = @import("./hosted_git_info.zig"); const std = @import("std"); const Repository = @import("./repository.zig").Repository; diff --git a/src/install/hosted_git_info.zig b/src/install/hosted_git_info.zig new file mode 100644 index 0000000000..93cb88cef4 --- /dev/null +++ b/src/install/hosted_git_info.zig @@ -0,0 +1,1750 @@ +//! Resolves Git URLs and metadata. +//! +//! This library mimics https://www.npmjs.com/package/hosted-git-info. At the time of writing, the +//! latest version is 9.0.0. Although @markovejnovic believes there are bugs in the original +//! library, this library aims to be bug-for-bug compatible with the original. +//! +//! One thing that's really notable is that hosted-git-info supports extensions and we currently +//! offer no support for extensions. This could be added in the future if necessary. +//! +//! # Core Concepts +//! +//! The goal of this library is to transform a Git URL or a "shortcut" (which is a shorthand for a +//! longer URL) into a structured representation of the relevant Git repository. +//! +//! ## Shortcuts +//! +//! A shortcut is a shorthand for a longer URL. For example, `github:user/repo` is a shortcut which +//! resolves to a full Github URL. `gitlab:user/repo` is another example of a shortcut. +//! +//! # Types +//! +//! This library revolves around a couple core types which are briefly described here. +//! +//! ## `HostedGitInfo` +//! +//! This is the main API point of this library. It encapsulates information about a Git repository. +//! To parse URLs into this structure, use the `fromUrl` member function. +//! +//! ## `HostProvider` +//! +//! This enumeration defines all the known Git host providers. Each provider has slightly different +//! properties which need to be accounted for. Further details are provided in its documentation. +//! +//! ## `UrlProtocol` +//! +//! This is a type that encapsulates the different types of protocols that a URL may have. This +//! includes three different cases: +//! +//! - `well_defined`: A protocol which is directly supported by this library. +//! - `custom`: A protocol which is not known by this library, but is specified in the URL. +//! TODO(markovejnovic): How is this handled? +//! - `unknown`: A protocol which is not specified in the URL. +//! +//! ## `WellDefinedProtocol` +//! +//! This type represents the set of known protocols by this library. Each protocol has slightly +//! different properties which need to be accounted for. +//! +//! It's noteworthy that `WellDefinedProtocol` doesn't refer to "true" protocols, but includes fake +//! tags like `github:` which are handled as "shortcuts" by this library. + +/// Represents how a URL should be reported when formatting it as a string. +/// +/// Input strings may be given in any format and they may be formatted in any format. If you wish +/// to format a URL in a specific format, you can use its `format*` methods. However, each input +/// string has a "default" representation which is used when calling `toString()`. Depending on the +/// input, the default representation may be different. +const Representation = enum { + /// foo/bar + shortcut, + /// git+ssh://git@domain/user/project.git#committish + sshurl, + /// ssh://domain/user/project.git#committish + ssh, + /// https://domain/user/project.git#committish + https, + /// git://domain/user/project.git#committish + git, + /// http://domain/user/project.git#committish + http, +}; + +pub const HostedGitInfo = struct { + const Self = @This(); + + committish: ?[]const u8, + project: []const u8, + user: ?[]const u8, + host_provider: HostProvider, + default_representation: Representation, + + _memory_buffer: []const u8, + _allocator: std.mem.Allocator, + + /// Helper function to decode a percent-encoded string and append it to a StringBuilder. + /// Returns the decoded slice and updates the StringBuilder's length. + /// + /// The reason we need to do this is because we get URLs like github:user%20name/repo and we + /// need to decode them to 'user name/repo'. It would be nice if we could get all the + /// functionality of jsc.URL WITHOUT the percent-encoding, but alas, we cannot. And we need the + /// jsc.URL functionality for parsing, validating and punycode-decoding the URL. + /// + /// Therefore, we use this function to first take a URL string, encode it into a *jsc.URL and + /// then decode it back to a normal string. Kind of a lot of work, but it works. + fn decodeAndAppend( + sb: *bun.StringBuilder, + input: []const u8, + ) error{ OutOfMemory, InvalidURL }![]const u8 { + const writable = sb.writable(); + var stream = std.io.fixedBufferStream(writable); + const decoded_len = PercentEncoding.decode( + @TypeOf(stream.writer()), + stream.writer(), + input, + ) catch { + return error.InvalidURL; + }; + sb.len += decoded_len; + return writable[0..decoded_len]; + } + + fn copyFrom( + committish: ?[]const u8, + project: []const u8, + user: ?[]const u8, + host_provider: HostProvider, + default_representation: Representation, + allocator: std.mem.Allocator, + ) error{ OutOfMemory, InvalidURL }!Self { + var sb = bun.StringBuilder{}; + + if (user) |u| sb.count(u); + sb.count(project); + if (committish) |c| sb.count(c); + + sb.allocate(allocator) catch return error.OutOfMemory; + + // Decode user, project, committish while copying + const user_part = if (user) |u| try decodeAndAppend(&sb, u) else null; + const project_part = try decodeAndAppend(&sb, project); + const committish_part = if (committish) |c| try decodeAndAppend(&sb, c) else null; + + const owned_buffer = sb.allocatedSlice(); + + return .{ + .committish = committish_part, + .project = project_part, + .user = user_part, + .host_provider = host_provider, + .default_representation = default_representation, + ._memory_buffer = owned_buffer, + ._allocator = allocator, + }; + } + + /// Initialize a HostedGitInfo from an extracted structure. + /// Takes ownership of the extracted structure. + fn moveFromExtracted( + extracted: *HostProvider.Config.formatters.extract.Result, + host_provider: HostProvider, + default_representation: Representation, + ) Self { + const moved = extracted.move(); + return .{ + .committish = extracted.committish, + .project = extracted.project, + .user = extracted.user, + .host_provider = host_provider, + .default_representation = default_representation, + ._memory_buffer = moved.buffer, + ._allocator = moved.allocator, + }; + } + + /// Clean up owned memory + pub fn deinit(self: *const Self) void { + self._allocator.free(self._memory_buffer); + } + + /// Convert this HostedGitInfo to a JavaScript object + pub fn toJS(self: *const Self, go: *jsc.JSGlobalObject) jsc.JSValue { + const obj = jsc.JSValue.createEmptyObject(go, 6); + obj.put( + go, + jsc.ZigString.static("type"), + bun.String.fromBytes(self.host_provider.typeStr()).toJS(go), + ); + obj.put( + go, + jsc.ZigString.static("domain"), + bun.String.fromBytes(self.host_provider.domain()).toJS(go), + ); + obj.put( + go, + jsc.ZigString.static("project"), + bun.String.fromBytes(self.project).toJS(go), + ); + obj.put( + go, + jsc.ZigString.static("user"), + if (self.user) |user| bun.String.fromBytes(user).toJS(go) else .null, + ); + obj.put( + go, + jsc.ZigString.static("committish"), + if (self.committish) |committish| + bun.String.fromBytes(committish).toJS(go) + else + .null, + ); + obj.put( + go, + jsc.ZigString.static("default"), + bun.String.fromBytes(@tagName(self.default_representation)).toJS(go), + ); + + return obj; + } + + pub const StringPair = struct { + save_spec: []const u8, + fetch_spec: ?[]const u8, + }; + + /// Given a URL-like (including shortcuts) string, parses it into a HostedGitInfo structure. + /// The HostedGitInfo is valid only for as long as `git_url` is valid. + pub fn fromUrl( + allocator: std.mem.Allocator, + git_url: []const u8, + ) error{ OutOfMemory, InvalidURL }!?Self { + // git_url_mut may carry two ownership semantics: + // - It aliases `git_url`, in which case it must not be freed. + // - It actually points to a new allocation, in which case it must be freed. + var git_url_mut = git_url; + defer if (git_url.ptr != git_url_mut.ptr) allocator.free(git_url_mut); + + if (isGitHubShorthand(git_url)) { + // In this case we have to prefix the url with `github:`. + // + // NOTE(markovejnovic): I don't exactly understand why this is treated specially. + // + // TODO(markovejnovic): Perhaps we can avoid this allocation... + // This one seems quite easy to get rid of. + git_url_mut = bun.handleOom(bun.strings.concat(allocator, &.{ "github:", git_url })); + } + + const parsed = parseUrl(allocator, git_url_mut) catch { + return null; + }; + defer parsed.url.deinit(); + + const host_provider = switch (parsed.proto) { + .well_formed => |p| p.hostProvider() orelse HostProvider.fromUrlDomain(parsed.url), + .unknown => HostProvider.fromUrlDomain(parsed.url), + .custom => HostProvider.fromUrl(parsed.url), + } orelse return null; + + const is_shortcut = parsed.proto == .well_formed and parsed.proto.well_formed.isShortcut(); + if (!is_shortcut) { + var extracted = try host_provider.extract(allocator, parsed.url) orelse return null; + return HostedGitInfo.moveFromExtracted( + &extracted, + host_provider, + parsed.proto.defaultRepresentation(), + ); + } + + // Shortcut path: github:user/repo, gitlab:user/repo, etc. (from-url.js line 68-96) + const pathname_owned = try parsed.url.pathname().toOwnedSlice(allocator); + defer allocator.free(pathname_owned); + + // Strip leading / (from-url.js line 69) + var pathname = bun.strings.trimPrefixComptime(u8, pathname_owned, "/"); + + // Strip auth (from-url.js line 70-74) + if (bun.strings.indexOfChar(pathname, '@')) |first_at| { + pathname = pathname[first_at + 1 ..]; + } + + // extract user and project from pathname (from-url.js line 76-86) + var user_part: ?[]const u8 = null; + const project_part: []const u8 = blk: { + if (bun.strings.lastIndexOfChar(pathname, '/')) |last_slash| { + const user_str = pathname[0..last_slash]; + // We want nulls only, never empty strings (from-url.js line 79-82) + if (user_str.len > 0) { + user_part = user_str; + } + break :blk pathname[last_slash + 1 ..]; + } else { + break :blk pathname; + } + }; + + // Strip .git suffix (from-url.js line 88-90) + const project_trimmed = bun.strings.trimSuffixComptime(project_part, ".git"); + + // Get committish from URL fragment (from-url.js line 92-94) + const fragment = try parsed.url.fragmentIdentifier().toOwnedSlice(allocator); + defer allocator.free(fragment); + const committish: ?[]const u8 = if (fragment.len > 0) fragment else null; + + // copyFrom will URL-decode user, project, and committish + return try HostedGitInfo.copyFrom( + committish, + project_trimmed, + user_part, + host_provider, + .shortcut, // Shortcuts always use shortcut representation + allocator, + ); + } +}; + +/// Handles input like git:github.com:user/repo and inserting the // after the first : if necessary +/// +/// May error with `error.InvalidGitUrl` if the URL is not valid. +/// +/// Note that this may or may not allocate but it manages its own memory. +fn parseUrl(allocator: std.mem.Allocator, npa_str: []const u8) error{ InvalidGitUrl, OutOfMemory }!struct { + url: *jsc.URL, + proto: UrlProtocol, +} { + // Certain users can provide values like user:password@github.com:foo/bar and we want to + // "correct" the protocol to be git+ssh://user:password@github.com:foo/bar + var proto_pair = normalizeProtocol(npa_str); + defer proto_pair.deinit(); + + // TODO(markovejnovic): We might be able to avoid this allocation if we rework how jsc.URL + // accepts strings. + const maybe_url = proto_pair.toUrl(allocator); + if (maybe_url) |url| return .{ .url = url, .proto = proto_pair.protocol }; + + // Now that may fail, if the URL is not nicely formatted. In that case, we try to correct the + // URL and parse it. + var corrected = try correctUrl(&proto_pair, allocator); + defer corrected.deinit(); + const corrected_url = corrected.toUrl(allocator); + if (corrected_url) |url| return .{ .url = url, .proto = corrected.protocol }; + + // Otherwise, we complain. + return error.InvalidGitUrl; +} + +/// Enumeration of possible URL protocols. +pub const WellDefinedProtocol = enum { + const Self = @This(); + + git, + git_plus_file, + git_plus_ftp, + git_plus_http, + git_plus_https, + git_plus_rsync, + git_plus_ssh, + http, + https, + ssh, + + // Non-standard protocols. + github, + bitbucket, + gitlab, + gist, + sourcehut, + + /// Mapping from protocol string (without colon) to WellDefinedProtocol. + pub const strings = bun.ComptimeStringMap(Self, .{ + .{ "bitbucket", .bitbucket }, + .{ "gist", .gist }, + .{ "git+file", .git_plus_file }, + .{ "git+ftp", .git_plus_ftp }, + .{ "git+http", .git_plus_http }, + .{ "git+https", .git_plus_https }, + .{ "git+rsync", .git_plus_rsync }, + .{ "git+ssh", .git_plus_ssh }, + .{ "git", .git }, + .{ "github", .github }, + .{ "gitlab", .gitlab }, + .{ "http", .http }, + .{ "https", .https }, + .{ "sourcehut", .sourcehut }, + .{ "ssh", .ssh }, + }); + + /// Look up a protocol from a string that includes the trailing colon (e.g., "https:"). + /// This method strips the colon before looking up in the strings map. + pub fn fromStringWithColon(protocol_with_colon: []const u8) ?Self { + return if (protocol_with_colon.len == 0) + return null + else + strings.get(bun.strings.trimSuffixComptime(protocol_with_colon, ":")); + } + + /// Maximum length of any protocol string in the strings map (computed at compile time). + pub const max_protocol_length: comptime_int = blk: { + var max: usize = 0; + for (strings.kvs) |kv| { + if (kv.key.len > max) { + max = kv.key.len; + } + } + break :blk max; + }; + + /// Buffer type for holding a protocol string with colon (e.g., "git+rsync:"). + /// Sized to hold the longest protocol name plus one character for the colon. + pub const StringWithColonBuffer = [max_protocol_length + 1]u8; + + /// Get the protocol string with colon (e.g., "https:") for a given protocol enum. + /// Takes a buffer pointer to hold the result. + /// Returns a slice into that buffer containing the protocol string with colon. + pub fn toStringWithColon(self: Self, buf: *StringWithColonBuffer) []const u8 { + // Look up the protocol string (without colon) from the map + const protocol_str = strings.getKey(self).?; + + // Copy to buffer and append colon + @memcpy(buf[0..protocol_str.len], protocol_str); + buf[protocol_str.len] = ':'; + return buf[0 .. protocol_str.len + 1]; + } + + /// The set of characters that must appear between . + /// For example, in `git+ssh://user@host:repo`, the `//` is the magic string. Some protocols + /// don't support this, for example `github:user/repo` is valid. + /// + /// Kind of arbitrary and implemented to match hosted-git-info's behavior. + fn protocolResourceIdentifierConcatenationToken(self: Self) []const u8 { + return switch (self) { + .git, + .git_plus_file, + .git_plus_ftp, + .git_plus_http, + .git_plus_https, + .git_plus_rsync, + .git_plus_ssh, + .http, + .https, + .ssh, + => "//", + .github, .bitbucket, .gitlab, .gist, .sourcehut => "", + }; + } + + /// Determine the default representation for this protocol. + /// Mirrors the logic in from-url.js line 110. + fn defaultRepresentation(self: Self) Representation { + return switch (self) { + .git_plus_ssh, .ssh, .git_plus_http => .sshurl, + .git_plus_https => .https, + .git_plus_file, .git_plus_ftp, .git_plus_rsync, .git => .git, + .http => .http, + .https => .https, + .github, .bitbucket, .gitlab, .gist, .sourcehut => .shortcut, + }; + } + + /// Certain protocols will have associated host providers. This method returns the associated + /// host provider, if one exists. + fn hostProvider(self: Self) ?HostProvider { + return switch (self) { + .github => .github, + .bitbucket => .bitbucket, + .gitlab => .gitlab, + .gist => .gist, + .sourcehut => .sourcehut, + else => null, + }; + } + + fn isShortcut(self: Self) bool { + return switch (self) { + .github, .bitbucket, .gitlab, .gist, .sourcehut => true, + else => false, + }; + } +}; + +/// Test whether the given node-package-arg string is a GitHub shorthand. +/// +/// This mirrors the implementation of hosted-git-info, though it is significantly faster. +pub fn isGitHubShorthand(npa_str: []const u8) bool { + // The implementation in hosted-git-info is a multi-pass algorithm. We've opted to implement a + // single-pass algorithm for better performance. + // + // This could be even faster with SIMD but this is probably good enough for now. + if (npa_str.len < 1) { + return false; + } + + // Implements doesNotStartWithDot + if (npa_str[0] == '.' or npa_str[0] == '/') { + return false; + } + + var pound_idx: ?usize = null; + var seen_slash = false; + + for (npa_str, 0..) |c, i| { + switch (c) { + // Implement atOnlyAfterHash and colonOnlyAfterHash + ':', '@' => { + if (pound_idx == null) { + return false; + } + }, + + '#' => { + pound_idx = i; + }, + '/' => { + // Implements secondSlashOnlyAfterHash + if (seen_slash and pound_idx == null) { + return false; + } + + seen_slash = true; + }, + else => { + // Implement spaceOnlyAfterHash + if (std.ascii.isWhitespace(c) and pound_idx == null) { + return false; + } + }, + } + } + + // Implements doesNotEndWithSlash + const does_not_end_with_slash = + if (pound_idx) |pi| + npa_str[pi - 1] != '/' + else + npa_str.len >= 1 and npa_str[npa_str.len - 1] != '/'; + + // Implement hasSlash + return seen_slash and does_not_end_with_slash; +} + +const UrlProtocol = union(enum) { + well_formed: WellDefinedProtocol, + + // A protocol which is not known by the library. Includes the : character, but not the + // double-slash, so `foo://bar` would yield `foo:`. + custom: []const u8, + + // Either no protocol was specified or the library couldn't figure it out. + unknown, + + /// Deduces the default representation for this protocol. + pub fn defaultRepresentation(self: UrlProtocol) Representation { + return switch (self) { + .well_formed => self.well_formed.defaultRepresentation(), + else => .sshurl, // Unknown/custom protocols default to sshurl + }; + } +}; + +const UrlProtocolPair = struct { + const Self = @This(); + + url: union(enum) { + managed: struct { + buf: []const u8, + allocator: std.mem.Allocator, + }, + unmanaged: []const u8, + }, + protocol: UrlProtocol, + + pub fn urlSlice(self: *const Self) []const u8 { + return switch (self.url) { + .managed => |s| s.buf, + .unmanaged => |s| s, + }; + } + + pub fn deinit(self: *Self) void { + switch (self.url) { + .managed => |*u| { + u.allocator.free(u.buf); + }, + .unmanaged => |_| {}, + } + } + + /// Given a protocol pair, create a jsc.URL if possible. May allocate, but owns its memory. + fn toUrl(self: *const UrlProtocolPair, allocator: std.mem.Allocator) ?*jsc.URL { + // Ehhh.. Old IE's max path length was 2K so let's just use that. I searched for a + // statistical distribution of URL lengths and found nothing. + const long_url_thresh = 2048; + + var alloc = std.heap.stackFallback(long_url_thresh, allocator); + + var protocol_buf: WellDefinedProtocol.StringWithColonBuffer = undefined; + + return concatPartsToUrl( + alloc.get(), + switch (self.protocol) { + // If we have no protocol, we can assume it is git+ssh. + .unknown => &.{ "git+ssh://", self.urlSlice() }, + .custom => |proto_str| &.{ proto_str, "//", self.urlSlice() }, + // This feels counter-intuitive but is correct. It's not github://foo/bar, it's + // github:foo/bar. + .well_formed => |proto_tag| &.{ + proto_tag.toStringWithColon(&protocol_buf), + // Wordy name for a double-slash or empty string. github:foo/bar is valid, but + // git+ssh://foo/bar is also valid. + proto_tag.protocolResourceIdentifierConcatenationToken(), + self.urlSlice(), + }, + }, + ); + } + + fn concatPartsToUrl(allocator: std.mem.Allocator, parts: []const []const u8) ?*jsc.URL { + // TODO(markovejnovic): There is a sad unnecessary allocation here that I don't know how to + // get rid of -- in theory, URL.zig could allocate once. + const new_str = bun.handleOom(bun.strings.concat(allocator, parts)); + defer allocator.free(new_str); + return jsc.URL.fromString(bun.String.init(new_str)); + } +}; + +/// Given a loose string that may or may not be a valid URL, attempt to normalize it. +/// +/// Returns a struct containing the URL string with the `protocol://` part removed and a tagged +/// enumeration. If the protocol is known, it is returned as a WellDefinedProtocol. If the protocol +/// is specified in the URL, it is given as a slice and if it is not specified, the `unknown` field +/// is returned. The result is a view into `npa_str` which must, consequently, remain stable. +/// +/// This mirrors the `correctProtocol` function in `hosted-git-info/parse-url.js`. +fn normalizeProtocol(npa_str: []const u8) UrlProtocolPair { + var first_colon_idx: i32 = -1; + if (bun.strings.indexOfChar(npa_str, ':')) |idx| { + first_colon_idx = @intCast(idx); + } + + // The cast here is safe -- first_colon_idx is guaranteed to be [-1, infty) + const proto_slice = npa_str[0..@intCast(first_colon_idx + 1)]; + + if (WellDefinedProtocol.fromStringWithColon(proto_slice)) |url_protocol| { + // We need to slice off the protocol from the string. Note there are two very annoying + // cases -- one where the protocol string is foo://bar and one where it is foo:bar. + var post_colon = bun.strings.substring(npa_str, @intCast(first_colon_idx + 1), null); + + return .{ + .url = .{ + .unmanaged = if (bun.strings.hasPrefixComptime(post_colon, "//")) + post_colon[2..post_colon.len] + else + post_colon, + }, + .protocol = .{ .well_formed = url_protocol }, + }; + } + + // Now we search for the @ character to see if we have a user@host:path GIT+SSH style URL. + const first_at_idx = bun.strings.indexOfChar(npa_str, '@'); + if (first_at_idx) |at_idx| { + // We have an @ in the string + if (first_colon_idx != -1) { + // We have a : in the string. + if (at_idx > first_colon_idx) { + // The @ is after the :, so we have something like user:pass@host which is a valid + // URL. and should be promoted to git_plus_ssh. It's guaranteed that the issue is + // not that we have proto://user@host:path because we would've caught that above. + return .{ + .url = .{ .unmanaged = npa_str }, + .protocol = .{ .well_formed = .git_plus_ssh }, + }; + } else { + // Otherwise we have something like user@host:path which is also a valid URL. + // Things are, however, different, since we don't really know what the protocol is. + // Remember, we would've hit the proto://user@host:path above. + + // NOTE(markovejnovic): I don't, at this moment, understand how exactly + // hosted-git-info and npm-package-arg handle this "unknown" protocol as of now. + // We can't really guess either -- there's no :// which comes before @ + return .{ .url = .{ .unmanaged = npa_str }, .protocol = .unknown }; + } + } else { + // Something like user@host which is also a valid URL. Since no :, that means that the + // URL is as good as it gets. No need to slice. + return .{ + .url = .{ .unmanaged = npa_str }, + .protocol = .{ .well_formed = .git_plus_ssh }, + }; + } + } + + // The next thing we can try is to search for the double slash and treat this protocol as a + // custom one. + // + // NOTE(markovejnovic): I also think this is wrong in parse-url.js. + // They: + // 1. Test the protocol against known protocols (which is fine) + // 2. Then, if not found, they go through that hoop of checking for @ and : guessing if it is a + // git+ssh URL or not + // 3. And finally, they search for ://. + // + // The last two steps feel like they should happen in reverse order: + // + // If I have a foobar://user:host@path URL (and foobar is not given as a known protocol), their + // implementation will not report this as a foobar protocol, but rather as + // git+ssh://foobar://user:host@path which, I think, is wrong. + // + // I even tested it: https://tinyurl.com/5y4e6zrw + // + // Our goal is to be bug-for-bug compatible, at least for now, so this is how I re-implemented + // it. + const maybe_dup_slash_idx = bun.strings.indexOf(npa_str, "//"); + if (maybe_dup_slash_idx) |dup_slash_idx| { + if (dup_slash_idx == first_colon_idx + 1) { + return .{ + .url = .{ .unmanaged = bun.strings.substring(npa_str, dup_slash_idx + 2, null) }, + .protocol = .{ .custom = npa_str[0..dup_slash_idx] }, + }; + } + } + + // Well, otherwise we have to split the original URL into two pieces, + // right at the colon. + if (first_colon_idx != -1) { + return .{ + .url = .{ + .unmanaged = bun.strings.substring(npa_str, @intCast(first_colon_idx + 1), null), + }, + .protocol = .{ .custom = npa_str[0..@intCast(first_colon_idx + 1)] }, + }; + } + + // Well we couldn't figure out anything. + return .{ .url = .{ .unmanaged = npa_str }, .protocol = .unknown }; +} + +/// Attempt to correct an scp-style URL into a proper URL, parsable with jsc.URL. Potentially +/// mutates the original input. +/// +/// This function assumes that the input is an scp-style URL. +fn correctUrl( + url_proto_pair: *const UrlProtocolPair, + allocator: std.mem.Allocator, +) error{OutOfMemory}!UrlProtocolPair { + const at_idx: isize = if (bun.strings.lastIndexBeforeChar( + url_proto_pair.urlSlice(), + '@', + '#', + )) |idx| + @intCast(idx) + else + -1; + + const col_idx: isize = if (bun.strings.lastIndexBeforeChar( + url_proto_pair.urlSlice(), + ':', + '#', + )) |idx| + @intCast(idx) + else + -1; + + if (col_idx > at_idx) { + var duped = try allocator.dupe(u8, url_proto_pair.urlSlice()); + duped[@intCast(col_idx)] = '/'; + + return .{ + .url = .{ + .managed = .{ + .buf = duped, + .allocator = allocator, + }, + }, + .protocol = .{ .well_formed = .git_plus_ssh }, + }; + } + + if (col_idx == -1 and url_proto_pair.protocol == .unknown) { + return .{ + .url = url_proto_pair.url, + .protocol = .{ .well_formed = .git_plus_ssh }, + }; + } + + return .{ .url = url_proto_pair.url, .protocol = url_proto_pair.protocol }; +} + +/// This enumeration encapsulates all known host providers and their configurations. +/// +/// Providers each have different configuration fields and, on top of that, have different +/// mechanisms for formatting URLs. For example, GitHub will format SSH URLs as +/// `git+ssh://git@${domain}/${user}/${project}.git${maybeJoin('#', committish)}`, while `gist` +/// will format URLs as `git+ssh://git@${domain}/${project}.git${maybeJoin('#', committish)}`. This +/// structure encapsulates the differences between providers and how they handle all of that. +/// +/// Effectively, this enumeration acts as a registry of all known providers and a vtable for +/// jumping between different behavior for different providers. +const HostProvider = enum { + const Self = @This(); + + bitbucket, + gist, + github, + gitlab, + sourcehut, + + fn formatSsh( + self: Self, + allocator: std.mem.Allocator, + user: ?[]const u8, + project: []const u8, + committish: ?[]const u8, + ) error{OutOfMemory}![]const u8 { + return configs.get(self).format_ssh(self, allocator, user, project, committish); + } + + fn formatSshUrl( + self: Self, + allocator: std.mem.Allocator, + user: ?[]const u8, + project: []const u8, + committish: ?[]const u8, + ) error{OutOfMemory}![]const u8 { + return configs.get(self).format_sshurl(self, allocator, user, project, committish); + } + + fn formatHttps( + self: Self, + allocator: std.mem.Allocator, + auth: ?[]const u8, + user: ?[]const u8, + project: []const u8, + committish: ?[]const u8, + ) error{OutOfMemory}![]const u8 { + return configs.get(self).format_https(self, allocator, auth, user, project, committish); + } + + fn formatShortcut( + self: Self, + allocator: std.mem.Allocator, + user: ?[]const u8, + project: []const u8, + committish: ?[]const u8, + ) error{OutOfMemory}![]const u8 { + return configs.get(self).format_shortcut(self, allocator, user, project, committish); + } + + fn extract( + self: Self, + allocator: std.mem.Allocator, + url: *jsc.URL, + ) error{ OutOfMemory, InvalidURL }!?Config.formatters.extract.Result { + return configs.get(self).format_extract(allocator, url); + } + + const Config = struct { + protocols: []const WellDefinedProtocol, + domain: []const u8, + shortcut: []const u8, + tree_path: ?[]const u8, + blob_path: ?[]const u8, + edit_path: ?[]const u8, + + format_ssh: formatters.ssh.Type = Self.Config.formatters.ssh.default, + format_sshurl: formatters.ssh_url.Type = Self.Config.formatters.ssh_url.default, + format_https: formatters.https.Type = Self.Config.formatters.https.default, + format_shortcut: formatters.shortcut.Type = Self.Config.formatters.shortcut.default, + format_git: formatters.git.Type = Self.Config.formatters.git.default, + format_extract: formatters.extract.Type, + + /// Encapsulates all the various foramtters that different hosts may have. Usually this has + /// to do with URLs, but could be other things. + const formatters = struct { + fn requiresUser(user: ?[]const u8) void { + if (user == null) { + @panic("Attempted to format a default SSH URL without a user. This is an " ++ + "irrecoverable programming bug in Bun. Please report this issue " ++ + "on GitHub."); + } + } + + /// Mirrors hosts.js's sshtemplate + const ssh = struct { + const Type = *const fn ( + self: Self, + allocator: std.mem.Allocator, + user: ?[]const u8, + project: []const u8, + committish: ?[]const u8, + ) error{OutOfMemory}![]const u8; + + fn default( + self: Self, + alloc: std.mem.Allocator, + user: ?[]const u8, + project: []const u8, + committish: ?[]const u8, + ) error{OutOfMemory}![]const u8 { + requiresUser(user); + const cmsh: []const u8 = if (committish) |c| c else ""; + const cmsh_sep = if (cmsh.len > 0) "#" else ""; + + return std.fmt.allocPrint( + alloc, + "git@{s}:{s}/{s}.git{s}{s}", + .{ self.domain(), user.?, project, cmsh_sep, cmsh }, + ); + } + + fn gist( + self: Self, + allocator: std.mem.Allocator, + user: ?[]const u8, + project: []const u8, + committish: ?[]const u8, + ) error{OutOfMemory}![]const u8 { + _ = user; + const cmsh: []const u8 = if (committish) |c| c else ""; + const cmsh_sep = if (cmsh.len > 0) "#" else ""; + + return std.fmt.allocPrint( + allocator, + "git@{s}:{s}.git{s}{s}", + .{ self.domain(), project, cmsh_sep, cmsh }, + ); + } + }; + + /// Mirrors hosts.js's sshurltemplate + const ssh_url = struct { + const Type = *const fn ( + self: Self, + allocator: std.mem.Allocator, + user: ?[]const u8, + project: []const u8, + committish: ?[]const u8, + ) error{OutOfMemory}![]const u8; + + fn default( + self: Self, + alloc: std.mem.Allocator, + user: ?[]const u8, + project: []const u8, + committish: ?[]const u8, + ) error{OutOfMemory}![]const u8 { + requiresUser(user); + const cmsh: []const u8 = if (committish) |c| c else ""; + const cmsh_sep = if (cmsh.len > 0) "#" else ""; + + return std.fmt.allocPrint( + alloc, + "git+ssh://git@{s}/{s}/{s}.git{s}{s}", + .{ self.domain(), user.?, project, cmsh_sep, cmsh }, + ); + } + + fn gist( + self: Self, + allocator: std.mem.Allocator, + user: ?[]const u8, + project: []const u8, + committish: ?[]const u8, + ) error{OutOfMemory}![]const u8 { + _ = user; + const cmsh: []const u8 = if (committish) |c| c else ""; + const cmsh_sep = if (cmsh.len > 0) "#" else ""; + + return std.fmt.allocPrint( + allocator, + "git+ssh://git@{s}/{s}.git{s}{s}", + .{ self.domain(), project, cmsh_sep, cmsh }, + ); + } + }; + + /// Mirrors hosts.js's httpstemplate + const https = struct { + const Type = *const fn ( + self: Self, + allocator: std.mem.Allocator, + auth: ?[]const u8, + user: ?[]const u8, + project: []const u8, + committish: ?[]const u8, + ) error{OutOfMemory}![]const u8; + + fn default( + self: Self, + alloc: std.mem.Allocator, + auth: ?[]const u8, + user: ?[]const u8, + project: []const u8, + committish: ?[]const u8, + ) error{OutOfMemory}![]const u8 { + requiresUser(user); + + const auth_str = if (auth) |a| a else ""; + const auth_sep = if (auth_str.len > 0) "@" else ""; + const cmsh: []const u8 = if (committish) |c| c else ""; + const cmsh_sep = if (cmsh.len > 0) "#" else ""; + + return std.fmt.allocPrint( + alloc, + "git+https://{s}{s}{s}/{s}/{s}.git{s}{s}", + .{ auth_str, auth_sep, self.domain(), user.?, project, cmsh_sep, cmsh }, + ); + } + + fn gist( + self: Self, + alloc: std.mem.Allocator, + auth: ?[]const u8, + user: ?[]const u8, + project: []const u8, + committish: ?[]const u8, + ) error{OutOfMemory}![]const u8 { + _ = auth; + _ = user; + + const cmsh: []const u8 = if (committish) |c| c else ""; + const cmsh_sep = if (cmsh.len > 0) "#" else ""; + + return std.fmt.allocPrint( + alloc, + "git+https://{s}/{s}.git{s}{s}", + .{ self.domain(), project, cmsh_sep, cmsh }, + ); + } + + fn sourcehut( + self: Self, + alloc: std.mem.Allocator, + auth: ?[]const u8, + user: ?[]const u8, + project: []const u8, + committish: ?[]const u8, + ) error{OutOfMemory}![]const u8 { + requiresUser(user); + _ = auth; + + const cmsh: []const u8 = if (committish) |c| c else ""; + const cmsh_sep = if (cmsh.len > 0) "#" else ""; + + return std.fmt.allocPrint( + alloc, + "https://{s}/{s}/{s}.git{s}{s}", + .{ self.domain(), user.?, project, cmsh_sep, cmsh }, + ); + } + }; + + /// Mirrors hosts.js's shortcuttemplate + const shortcut = struct { + const Type = *const fn ( + self: Self, + allocator: std.mem.Allocator, + user: ?[]const u8, + project: []const u8, + committish: ?[]const u8, + ) error{OutOfMemory}![]const u8; + + fn default( + self: Self, + alloc: std.mem.Allocator, + user: ?[]const u8, + project: []const u8, + committish: ?[]const u8, + ) error{OutOfMemory}![]const u8 { + requiresUser(user); + + const cmsh: []const u8 = if (committish) |c| c else ""; + const cmsh_sep = if (cmsh.len > 0) "#" else ""; + + return std.fmt.allocPrint( + alloc, + "{s}{s}/{s}{s}{s}", + .{ self.shortcut(), user.?, project, cmsh_sep, cmsh }, + ); + } + + fn gist( + self: Self, + alloc: std.mem.Allocator, + user: ?[]const u8, + project: []const u8, + committish: ?[]const u8, + ) error{OutOfMemory}![]const u8 { + _ = user; + + const cmsh: []const u8 = if (committish) |c| c else ""; + const cmsh_sep = if (cmsh.len > 0) "#" else ""; + + return std.fmt.allocPrint( + alloc, + "{s}{s}{s}{s}", + .{ self.shortcut(), project, cmsh_sep, cmsh }, + ); + } + }; + + /// Mirrors hosts.js's extract function + const extract = struct { + const Result = struct { + user: ?[]const u8, + project: []const u8, + committish: ?[]const u8, + _owned_buffer: ?[]const u8, + _allocator: std.mem.Allocator, + + fn deinit(self: *Result) void { + if (self._owned_buffer) |buf| { + self._allocator.free(buf); + } + } + + /// Return the buffer which owns this Result and the allocator responsible for + /// freeing it. + /// + /// Same semantics as C++ STL. Safe-to-deinit Result after this, not safe to + /// use it. + fn move(self: *Result) struct { + buffer: []const u8, + allocator: std.mem.Allocator, + } { + if (self._owned_buffer == null) { + @panic("Cannot move an empty Result. This is a bug in Bun. Please " ++ + "report this issue on GitHub."); + } + + const buffer = self._owned_buffer.?; + const allocator = self._allocator; + + self._owned_buffer = null; + + return .{ + .buffer = buffer, + .allocator = allocator, + }; + } + }; + + const Type = *const fn ( + allocator: std.mem.Allocator, + url: *jsc.URL, + ) error{ OutOfMemory, InvalidURL }!?Result; + + fn github( + allocator: std.mem.Allocator, + url: *jsc.URL, + ) error{ OutOfMemory, InvalidURL }!?Result { + const pathname_owned = try url.pathname().toOwnedSlice(allocator); + defer allocator.free(pathname_owned); + const pathname = bun.strings.trimPrefixComptime(u8, pathname_owned, "/"); + + var iter = std.mem.splitScalar(u8, pathname, '/'); + const user_part = iter.next() orelse return null; + const project_part = iter.next() orelse return null; + const type_part = iter.next(); + const committish_part = iter.next(); + + const project = bun.strings.trimSuffixComptime(project_part, ".git"); + + if (user_part.len == 0 or project.len == 0) { + return null; + } + + // If the type part says something other than "tree", we're not looking at a + // github URL that we understand. + if (type_part) |tp| { + if (!std.mem.eql(u8, tp, "tree")) { + return null; + } + } + + var committish: ?[]const u8 = null; + if (type_part == null) { + const fragment_str = url.fragmentIdentifier(); + defer fragment_str.deref(); + const fragment_utf8 = fragment_str.toUTF8(allocator); + defer fragment_utf8.deinit(); + const fragment = fragment_utf8.slice(); + if (fragment.len > 0) { + committish = fragment; + } + } else { + committish = committish_part; + } + + var sb = bun.StringBuilder{}; + sb.count(user_part); + sb.count(project); + if (committish) |c| sb.count(c); + + try sb.allocate(allocator); + + const user_slice = try HostedGitInfo.decodeAndAppend(&sb, user_part); + const project_slice = try HostedGitInfo.decodeAndAppend(&sb, project); + const committish_slice = + if (committish) |c| + try HostedGitInfo.decodeAndAppend(&sb, c) + else + null; + + return .{ + .user = user_slice, + .project = project_slice, + .committish = committish_slice, + ._owned_buffer = sb.allocatedSlice(), + ._allocator = allocator, + }; + } + + fn bitbucket( + allocator: std.mem.Allocator, + url: *jsc.URL, + ) error{ InvalidURL, OutOfMemory }!?Result { + const pathname_owned = try url.pathname().toOwnedSlice(allocator); + defer allocator.free(pathname_owned); + const pathname = bun.strings.trimPrefixComptime(u8, pathname_owned, "/"); + + var iter = std.mem.splitScalar(u8, pathname, '/'); + const user_part = iter.next() orelse return null; + const project_part = iter.next() orelse return null; + const aux = iter.next(); + + if (aux) |a| { + if (std.mem.eql(u8, a, "get")) { + return null; + } + } + + const project = bun.strings.trimSuffixComptime(project_part, ".git"); + + if (user_part.len == 0 or project.len == 0) { + return null; + } + + const fragment_str = url.fragmentIdentifier(); + defer fragment_str.deref(); + const fragment_utf8 = fragment_str.toUTF8(allocator); + defer fragment_utf8.deinit(); + const fragment = fragment_utf8.slice(); + const committish = if (fragment.len > 0) fragment else null; + + var sb = bun.StringBuilder{}; + sb.count(user_part); + sb.count(project); + if (committish) |c| sb.count(c); + + try sb.allocate(allocator); + + const user_slice = try HostedGitInfo.decodeAndAppend(&sb, user_part); + const project_slice = try HostedGitInfo.decodeAndAppend(&sb, project); + const committish_slice = + if (committish) |c| + try HostedGitInfo.decodeAndAppend(&sb, c) + else + null; + + return .{ + .user = user_slice, + .project = project_slice, + .committish = committish_slice, + ._owned_buffer = sb.allocatedSlice(), + ._allocator = allocator, + }; + } + + fn gitlab( + allocator: std.mem.Allocator, + url: *jsc.URL, + ) error{ OutOfMemory, InvalidURL }!?Result { + const pathname_owned = try url.pathname().toOwnedSlice(allocator); + defer allocator.free(pathname_owned); + const pathname = bun.strings.trimPrefixComptime(u8, pathname_owned, "/"); + + if (bun.strings.contains(pathname, "/-/") or + bun.strings.contains(pathname, "/archive.tar.gz")) + { + return null; + } + + const end_slash = bun.strings.lastIndexOfChar(pathname, '/') orelse return null; + const project_part = pathname[end_slash + 1 ..]; + const user_part = pathname[0..end_slash]; + + const project = bun.strings.trimSuffixComptime(project_part, ".git"); + + if (user_part.len == 0 or project.len == 0) { + return null; + } + + const fragment_str = url.fragmentIdentifier(); + defer fragment_str.deref(); + const fragment_utf8 = fragment_str.toUTF8(allocator); + defer fragment_utf8.deinit(); + const committish = fragment_utf8.slice(); + + var sb = bun.StringBuilder{}; + sb.count(user_part); + sb.count(project); + if (committish.len > 0) sb.count(committish); + + try sb.allocate(allocator); + + const user_slice = try HostedGitInfo.decodeAndAppend(&sb, user_part); + const project_slice = try HostedGitInfo.decodeAndAppend(&sb, project); + const committish_slice = + if (committish.len > 0) + HostedGitInfo.decodeAndAppend(&sb, committish) catch return null + else + null; + + return .{ + .user = user_slice, + .project = project_slice, + .committish = committish_slice, + ._owned_buffer = sb.allocatedSlice(), + ._allocator = allocator, + }; + } + + fn gist( + allocator: std.mem.Allocator, + url: *jsc.URL, + ) error{ OutOfMemory, InvalidURL }!?Result { + const pathname_owned = try url.pathname().toOwnedSlice(allocator); + defer allocator.free(pathname_owned); + const pathname = bun.strings.trimPrefixComptime(u8, pathname_owned, "/"); + + var iter = std.mem.splitScalar(u8, pathname, '/'); + var user_part = iter.next() orelse return null; + var project_part = iter.next(); + const aux = iter.next(); + + if (aux) |a| { + if (std.mem.eql(u8, a, "raw")) { + return null; + } + } + + if (project_part == null or project_part.?.len == 0) { + project_part = user_part; + user_part = ""; + } + + const project = bun.strings.trimSuffixComptime(project_part.?, ".git"); + const user = if (user_part.len > 0) user_part else null; + + if (project.len == 0) { + return null; + } + + const fragment_str = url.fragmentIdentifier(); + defer fragment_str.deref(); + const fragment_utf8 = fragment_str.toUTF8(allocator); + defer fragment_utf8.deinit(); + const fragment = fragment_utf8.slice(); + const committish = if (fragment.len > 0) fragment else null; + + var sb = bun.StringBuilder{}; + if (user) |u| sb.count(u); + sb.count(project); + if (committish) |c| sb.count(c); + + sb.allocate(allocator) catch return null; + + const user_slice = + if (user) |u| + HostedGitInfo.decodeAndAppend(&sb, u) catch return null + else + null; + const project_slice = + HostedGitInfo.decodeAndAppend(&sb, project) catch return null; + const committish_slice = + if (committish) |c| + HostedGitInfo.decodeAndAppend(&sb, c) catch return null + else + null; + + return .{ + .user = user_slice, + .project = project_slice, + .committish = committish_slice, + ._owned_buffer = sb.allocatedSlice(), + ._allocator = allocator, + }; + } + + fn sourcehut( + allocator: std.mem.Allocator, + url: *jsc.URL, + ) error{ InvalidURL, OutOfMemory }!?Result { + const pathname_owned = try url.pathname().toOwnedSlice(allocator); + defer allocator.free(pathname_owned); + const pathname = bun.strings.trimPrefixComptime(u8, pathname_owned, "/"); + + var iter = std.mem.splitScalar(u8, pathname, '/'); + const user_part = iter.next() orelse return null; + const project_part = iter.next() orelse return null; + const aux = iter.next(); + + if (aux) |a| { + if (std.mem.eql(u8, a, "archive")) { + return null; + } + } + + const project = bun.strings.trimSuffixComptime(project_part, ".git"); + + if (user_part.len == 0 or project.len == 0) { + return null; + } + + const fragment_str = url.fragmentIdentifier(); + defer fragment_str.deref(); + const fragment_utf8 = fragment_str.toUTF8(allocator); + defer fragment_utf8.deinit(); + const fragment = fragment_utf8.slice(); + const committish = if (fragment.len > 0) fragment else null; + + var sb = bun.StringBuilder{}; + sb.count(user_part); + sb.count(project); + if (committish) |c| sb.count(c); + + sb.allocate(allocator) catch return null; + + const user_slice = blk: { + const writable = sb.writable(); + var stream = std.io.fixedBufferStream(writable); + const decoded_len = PercentEncoding.decode( + @TypeOf(stream.writer()), + stream.writer(), + user_part, + ) catch return null; + sb.len += decoded_len; + break :blk writable[0..decoded_len]; + }; + const project_slice = blk: { + const writable = sb.writable(); + var stream = std.io.fixedBufferStream(writable); + const decoded_len = PercentEncoding.decode( + @TypeOf(stream.writer()), + stream.writer(), + project, + ) catch return null; + sb.len += decoded_len; + break :blk writable[0..decoded_len]; + }; + const committish_slice = if (committish) |c| blk: { + const writable = sb.writable(); + var stream = std.io.fixedBufferStream(writable); + const decoded_len = PercentEncoding.decode( + @TypeOf(stream.writer()), + stream.writer(), + c, + ) catch return null; + sb.len += decoded_len; + break :blk writable[0..decoded_len]; + } else null; + + return .{ + .user = user_slice, + .project = project_slice, + .committish = committish_slice, + ._owned_buffer = sb.allocatedSlice(), + ._allocator = allocator, + }; + } + }; + + /// Mirrors hosts.js's gittemplate + const git = struct { + const Type = ?*const fn ( + self: Self, + allocator: std.mem.Allocator, + auth: ?[]const u8, + user: ?[]const u8, + project: []const u8, + committish: ?[]const u8, + ) error{OutOfMemory}![]const u8; + + const default: Type = null; + + fn github( + self: Self, + allocator: std.mem.Allocator, + auth: ?[]const u8, + user: ?[]const u8, + project: []const u8, + committish: ?[]const u8, + ) error{OutOfMemory}![]const u8 { + requiresUser(user); + + const auth_str = if (auth) |a| a else ""; + const auth_sep = if (auth_str.len > 0) "@" else ""; + const cmsh: []const u8 = if (committish) |c| c else ""; + const cmsh_sep = if (cmsh.len > 0) "#" else ""; + + return std.fmt.allocPrint( + allocator, + "git://{s}{s}{s}/{s}/{s}.git{s}{s}", + .{ auth_str, auth_sep, self.domain(), user.?, project, cmsh_sep, cmsh }, + ); + } + + fn gist( + self: Self, + allocator: std.mem.Allocator, + auth: ?[]const u8, + user: ?[]const u8, + project: []const u8, + committish: ?[]const u8, + ) error{OutOfMemory}![]const u8 { + _ = auth; + _ = user; + + const cmsh: []const u8 = if (committish) |c| c else ""; + const cmsh_sep = if (cmsh.len > 0) "#" else ""; + + return std.fmt.allocPrint( + allocator, + "git://{s}/{s}.git{s}{s}", + .{ self.domain(), project, cmsh_sep, cmsh }, + ); + } + }; + }; + }; + + const configs = std.enums.EnumArray(Self, Config).init(.{ + .bitbucket = .{ + .protocols = &.{ .git_plus_http, .git_plus_https, .ssh, .https }, + .domain = "bitbucket.org", + .shortcut = "bitbucket:", + .tree_path = "src", + .blob_path = "src", + .edit_path = "?mode=edit", + .format_extract = Self.Config.formatters.extract.bitbucket, + }, + .gist = .{ + .protocols = &.{ .git, .git_plus_ssh, .git_plus_https, .ssh, .https }, + .domain = "gist.github.com", + .shortcut = "gist:", + .tree_path = null, + .blob_path = null, + .edit_path = "edit", + .format_ssh = Self.Config.formatters.ssh.gist, + .format_sshurl = Self.Config.formatters.ssh_url.gist, + .format_https = Self.Config.formatters.https.gist, + .format_shortcut = Self.Config.formatters.shortcut.gist, + .format_git = Self.Config.formatters.git.gist, + .format_extract = Self.Config.formatters.extract.gist, + }, + .github = .{ + .protocols = &.{ .git, .http, .git_plus_ssh, .git_plus_https, .ssh, .https }, + .domain = "github.com", + .shortcut = "github:", + .tree_path = "tree", + .blob_path = "blob", + .edit_path = "edit", + .format_git = Self.Config.formatters.git.github, + .format_extract = Self.Config.formatters.extract.github, + }, + .gitlab = .{ + .protocols = &.{ .git_plus_ssh, .git_plus_https, .ssh, .https }, + .domain = "gitlab.com", + .shortcut = "gitlab:", + .tree_path = "tree", + .blob_path = "tree", + .edit_path = "-/edit", + .format_extract = Self.Config.formatters.extract.gitlab, + }, + .sourcehut = .{ + .protocols = &.{ .git_plus_ssh, .https }, + .domain = "git.sr.ht", + .shortcut = "sourcehut:", + .tree_path = "tree", + .blob_path = "tree", + .edit_path = null, + .format_https = Self.Config.formatters.https.sourcehut, + .format_extract = Self.Config.formatters.extract.sourcehut, + }, + }); + + /// Return the string representation of the provider. + fn typeStr(self: Self) []const u8 { + return @tagName(self); + } + + fn shortcut(self: Self) []const u8 { + return configs.get(self).shortcut; + } + + fn domain(self: Self) []const u8 { + return configs.get(self).domain; + } + + fn protocols(self: Self) []const WellDefinedProtocol { + return configs.get(self).protocols; + } + + fn shortcutWithoutColon(self: Self) []const u8 { + const shct = self.shortcut(); + return shct[0 .. shct.len - 1]; + } + + fn treePath(self: Self) ?[]const u8 { + return configs.get(self).tree_path; + } + + fn blobPath(self: Self) ?[]const u8 { + return configs.get(self).blob_path; + } + + fn editPath(self: Self) ?[]const u8 { + return configs.get(self).edit_path; + } + + /// Find the appropriate host provider by its shortcut (e.g. "github:"). + /// + /// The second parameter allows you to declare whether the given string includes the protocol: + /// colon or not. + fn fromShortcut( + shortcut_str: []const u8, + comptime with_colon: enum { with_colon, without_colon }, + ) ?HostProvider { + inline for (std.meta.fields(Self)) |field| { + const provider: HostProvider = @enumFromInt(field.value); + + const shortcut_matches = std.mem.eql( + u8, + switch (with_colon) { + .with_colon => provider.shortcut(), + .without_colon => provider.shortcutWithoutColon(), + }, + shortcut_str, + ); + + if (shortcut_matches) { + return provider; + } + } + + return null; + } + + /// Find the appropriate host provider by its domain (e.g. "github.com"). + fn fromDomain(domain_str: []const u8) ?HostProvider { + inline for (std.meta.fields(Self)) |field| { + const provider: HostProvider = @enumFromInt(field.value); + + if (std.mem.eql(u8, provider.domain(), domain_str)) { + return provider; + } + } + + return null; + } + + /// Parse a URL and return the appropriate host provider, if any. + fn fromUrl(url: *jsc.URL) ?HostProvider { + const proto_str = url.protocol(); + defer proto_str.deref(); + + // Try shortcut first (github:, gitlab:, etc.) + if (HostProvider.fromShortcut(proto_str.byteSlice(), .without_colon)) |provider| { + return provider; + } + + return HostProvider.fromUrlDomain(url); + } + + // Given a URL, use the domain in the URL to find the appropriate host provider. + fn fromUrlDomain(url: *jsc.URL) ?HostProvider { + const max_hostname_len: comptime_int = 253; + + const hostname_str = url.hostname(); + defer hostname_str.deref(); + + var fba_mem: [max_hostname_len]u8 = undefined; + var fba = std.heap.FixedBufferAllocator.init(&fba_mem); + const hostname_utf8 = hostname_str.toUTF8(fba.allocator()); + defer hostname_utf8.deinit(); + const hostname = bun.strings.withoutPrefixComptime(hostname_utf8.slice(), "www."); + + return HostProvider.fromDomain(hostname); + } +}; + +pub const TestingAPIs = struct { + pub fn jsParseUrl(go: *jsc.JSGlobalObject, callframe: *jsc.CallFrame) bun.JSError!jsc.JSValue { + const allocator = bun.default_allocator; + + if (callframe.argumentsCount() != 1) { + return go.throw("hostedGitInfo.prototype.parseUrl takes exactly 1 argument", .{}); + } + + const arg0 = callframe.argument(0); + if (!arg0.isString()) { + return go.throw( + "hostedGitInfo.prototype.parseUrl takes a string as its " ++ + "first argument", + .{}, + ); + } + + // TODO(markovejnovic): This feels like there's too much going on all + // to give us a slice. Maybe there's a better way to code this up. + const npa_str = try arg0.toBunString(go); + defer npa_str.deref(); + var as_utf8 = npa_str.toUTF8(allocator); + defer as_utf8.deinit(); + const parsed = parseUrl(allocator, as_utf8.mut()) catch |err| { + return go.throw("Invalid Git URL: {}", .{err}); + }; + defer parsed.url.deinit(); + + return parsed.url.href().toJS(go); + } + + pub fn jsFromUrl(go: *jsc.JSGlobalObject, callframe: *jsc.CallFrame) bun.JSError!jsc.JSValue { + const allocator = bun.default_allocator; + + // TODO(markovejnovic): The original hosted-git-info actually takes another argument that + // allows you to inject options. Seems untested so we didn't implement + // it. + if (callframe.argumentsCount() != 1) { + return go.throw("hostedGitInfo.prototype.fromUrl takes exactly 1 argument", .{}); + } + + const arg0 = callframe.argument(0); + if (!arg0.isString()) { + return go.throw( + "hostedGitInfo.prototype.fromUrl takes a string as its first argument", + .{}, + ); + } + + // TODO(markovejnovic): This feels like there's too much going on all to give us a slice. + // Maybe there's a better way to code this up. + const npa_str = try arg0.toBunString(go); + defer npa_str.deref(); + var as_utf8 = npa_str.toUTF8(allocator); + defer as_utf8.deinit(); + const parsed = HostedGitInfo.fromUrl(allocator, as_utf8.mut()) catch |err| { + return go.throw("Invalid Git URL: {}", .{err}); + } orelse { + return .null; + }; + + return parsed.toJS(go); + } +}; + +const std = @import("std"); +const PercentEncoding = @import("../url.zig").PercentEncoding; + +const bun = @import("bun"); +const jsc = bun.jsc; diff --git a/src/install/isolated_install.zig b/src/install/isolated_install.zig index 48e85bd5aa..e4a44b501c 100644 --- a/src/install/isolated_install.zig +++ b/src/install/isolated_install.zig @@ -250,7 +250,7 @@ pub fn installIsolatedPackages( } } - next_peer: for (peer_dep_ids.items) |peer_dep_id| { + for (peer_dep_ids.items) |peer_dep_id| { const resolved_pkg_id, const auto_installed = resolved_pkg_id: { // Go through the peers parents looking for a package with the same name. @@ -316,13 +316,12 @@ pub fn installIsolatedPackages( // version. Only mark all parents if resolution is // different from this transitive peer. - if (peer_dep.behavior.isOptionalPeer()) { - // exclude it - continue :next_peer; - } - const best_version = resolutions[peer_dep_id]; + if (best_version == invalid_package_id) { + break :resolved_pkg_id .{ invalid_package_id, true }; + } + if (best_version == ids.pkg_id) { break :resolved_pkg_id .{ ids.pkg_id, true }; } @@ -344,16 +343,15 @@ pub fn installIsolatedPackages( curr_id = node_parent_ids[curr_id.get()]; } - if (peer_dep.behavior.isOptionalPeer()) { - // exclude it - continue; - } - // choose the current best version break :resolved_pkg_id .{ resolutions[peer_dep_id], true }; }; - bun.debugAssert(resolved_pkg_id != invalid_package_id); + if (resolved_pkg_id == invalid_package_id) { + // these are optional peers that failed to find any dependency with a matching + // name. they are completely excluded + continue; + } for (visited_parent_node_ids.items) |visited_parent_id| { const ctx: Store.Node.TransitivePeer.OrderedArraySetCtx = .{ diff --git a/src/install/lockfile.zig b/src/install/lockfile.zig index 38679c7c3e..adc005856c 100644 --- a/src/install/lockfile.zig +++ b/src/install/lockfile.zig @@ -914,7 +914,6 @@ pub fn hoist( var slice = lockfile.packages.slice(); var builder = Tree.Builder(method){ - .name_hashes = slice.items(.name_hash), .queue = .init(allocator), .resolution_lists = slice.items(.resolutions), .resolutions = lockfile.buffers.resolutions.items, @@ -926,6 +925,7 @@ pub fn hoist( .install_root_dependencies = install_root_dependencies, .workspace_filters = workspace_filters, .packages_to_install = packages_to_install, + .pending_optional_peers = .init(bun.default_allocator), }; try (Tree{}).processSubtree( diff --git a/src/install/lockfile/Tree.zig b/src/install/lockfile/Tree.zig index a75feae070..471876ae39 100644 --- a/src/install/lockfile/Tree.zig +++ b/src/install/lockfile/Tree.zig @@ -52,14 +52,20 @@ pub const invalid_id: Id = std.math.maxInt(Id); pub const HoistDependencyResult = union(enum) { dependency_loop, hoisted, - placement: struct { + resolve: PackageID, + resolve_replace: ResolveReplace, + resolve_later, + placement: Placement, + + const ResolveReplace = struct { + id: Id, + dep_id: DependencyID, + }; + + const Placement = struct { id: Id, bundled: bool = false, - }, - // replace: struct { - // dest_id: Id, - // dep_id: DependencyID, - // }, + }; }; pub const SubtreeError = OOM || error{DependencyLoop}; @@ -234,14 +240,16 @@ pub const BuilderMethod = enum { pub fn Builder(comptime method: BuilderMethod) type { return struct { allocator: Allocator, - name_hashes: []const PackageNameHash, list: bun.MultiArrayList(Entry) = .{}, - resolutions: []const PackageID, + resolutions: []PackageID, dependencies: []const Dependency, resolution_lists: []const Lockfile.DependencyIDSlice, queue: TreeFiller, log: *logger.Log, lockfile: *const Lockfile, + // unresolved optional peers that might resolve later. if they do we will want to assign + // builder.resolutions[peer.dep_id] to the resolved pkg_id. + pending_optional_peers: std.AutoHashMap(PackageNameHash, bun.collections.ArrayListDefault(DependencyID)), manager: if (method == .filter) *const PackageManager else void, sort_buf: std.ArrayListUnmanaged(DependencyID) = .{}, workspace_filters: if (method == .filter) []const WorkspaceFilter else void = if (method == .filter) &.{}, @@ -302,6 +310,7 @@ pub fn Builder(comptime method: BuilderMethod) type { } this.queue.deinit(); this.sort_buf.deinit(this.allocator); + this.pending_optional_peers.deinit(); // take over the `builder.list` pointer for only trees if (@intFromPtr(trees.ptr) != @intFromPtr(list_ptr)) { @@ -328,6 +337,10 @@ pub fn isFilteredDependencyOrWorkspace( ) bool { const pkg_id = lockfile.buffers.resolutions.items[dep_id]; if (pkg_id >= lockfile.packages.len) { + const dep = lockfile.buffers.dependencies.items[dep_id]; + if (dep.behavior.isOptionalPeer()) { + return false; + } return true; } @@ -454,8 +467,6 @@ pub fn processSubtree( const trees = list_slice.items(.tree); const dependency_lists = list_slice.items(.dependencies); const next: *Tree = &trees[builder.list.len - 1]; - const name_hashes: []const PackageNameHash = builder.name_hashes; - const max_package_id = @as(PackageID, @truncate(name_hashes.len)); const pkgs = builder.lockfile.packages.slice(); const pkg_resolutions = pkgs.items(.resolution); @@ -478,8 +489,6 @@ pub fn processSubtree( for (builder.sort_buf.items) |dep_id| { const pkg_id = builder.resolutions[dep_id]; - // Skip unresolved packages, e.g. "peerDependencies" - if (pkg_id >= max_package_id) continue; // filter out disabled dependencies if (comptime method == .filter) { @@ -494,6 +503,12 @@ pub fn processSubtree( continue; } + // unresolved packages are skipped when filtering. they already had + // their chance to resolve. + if (pkg_id == invalid_package_id) { + continue; + } + if (builder.packages_to_install) |packages_to_install| { if (parent_pkg_id == 0) { var found = false; @@ -511,14 +526,33 @@ pub fn processSubtree( } } + const dependency = builder.dependencies[dep_id]; + const hoisted: HoistDependencyResult = hoisted: { - const dependency = builder.dependencies[dep_id]; // don't hoist if it's a folder dependency or a bundled dependency. if (dependency.behavior.isBundled()) { break :hoisted .{ .placement = .{ .id = next.id, .bundled = true } }; } + if (pkg_id == invalid_package_id) { + if (dependency.behavior.isOptionalPeer()) { + break :hoisted try next.hoistDependency( + true, + hoist_root_id, + pkg_id, + &dependency, + dependency_lists, + trees, + method, + builder, + ); + } + + // skip unresolvable dependencies + continue; + } + if (pkg_resolutions[pkg_id].tag == .folder) { break :hoisted .{ .placement = .{ .id = next.id } }; } @@ -537,10 +571,61 @@ pub fn processSubtree( switch (hoisted) { .dependency_loop, .hoisted => continue, + + .resolve => |res_id| { + bun.assertWithLocation(pkg_id == invalid_package_id, @src()); + bun.assertWithLocation(res_id != invalid_package_id, @src()); + builder.resolutions[dep_id] = res_id; + if (comptime Environment.allow_assert) { + bun.assertWithLocation(!builder.pending_optional_peers.contains(dependency.name_hash), @src()); + } + if (builder.pending_optional_peers.fetchRemove(dependency.name_hash)) |entry| { + var peers = entry.value; + defer peers.deinit(); + for (peers.items()) |unresolved_dep_id| { + bun.assertWithLocation(builder.resolutions[unresolved_dep_id] == invalid_package_id, @src()); + builder.resolutions[unresolved_dep_id] = res_id; + } + } + }, + .resolve_replace => |replace| { + bun.assertWithLocation(pkg_id != invalid_package_id, @src()); + builder.resolutions[replace.dep_id] = pkg_id; + if (builder.pending_optional_peers.fetchRemove(dependency.name_hash)) |entry| { + var peers = entry.value; + defer peers.deinit(); + for (peers.items()) |unresolved_dep_id| { + bun.assertWithLocation(builder.resolutions[unresolved_dep_id] == invalid_package_id, @src()); + builder.resolutions[unresolved_dep_id] = pkg_id; + } + } + for (dependency_lists[replace.id].items) |*placed_dep_id| { + if (placed_dep_id.* == replace.dep_id) { + placed_dep_id.* = dep_id; + } + } + if (pkg_id != invalid_package_id and builder.resolution_lists[pkg_id].len > 0) { + try builder.queue.writeItem(.{ + .tree_id = replace.id, + .dependency_id = dep_id, + .hoist_root_id = hoist_root_id, + }); + } + }, + .resolve_later => { + // `dep_id` is an unresolved optional peer. while hoisting it deduplicated + // with another unresolved optional peer. save it so we remember resolve it + // later if it's possible to resolve it. + const entry = try builder.pending_optional_peers.getOrPut(dependency.name_hash); + if (!entry.found_existing) { + entry.value_ptr.* = .init(); + } + try entry.value_ptr.append(dep_id); + }, .placement => |dest| { bun.handleOom(dependency_lists[dest.id].append(builder.allocator, dep_id)); trees[dest.id].dependencies.len += 1; - if (builder.resolution_lists[pkg_id].len > 0) { + if (pkg_id != invalid_package_id and builder.resolution_lists[pkg_id].len > 0) { try builder.queue.writeItem(.{ .tree_id = dest.id, .dependency_id = dep_id, @@ -580,7 +665,29 @@ fn hoistDependency( const dep = builder.dependencies[dep_id]; if (dep.name_hash != dependency.name_hash) continue; - if (builder.resolutions[dep_id] == package_id) { + const res_id = builder.resolutions[dep_id]; + + if (res_id == invalid_package_id and package_id == invalid_package_id) { + bun.assertWithLocation(dep.behavior.isOptionalPeer(), @src()); + bun.assertWithLocation(dependency.behavior.isOptionalPeer(), @src()); + // both optional peers will need to be resolved if they can resolve later. + // remember input package_id and dependency for later + return .resolve_later; + } + + if (res_id == invalid_package_id) { + bun.assertWithLocation(dep.behavior.isOptionalPeer(), @src()); + return .{ .resolve_replace = .{ .id = this.id, .dep_id = dep_id } }; + } + + if (package_id == invalid_package_id) { + bun.assertWithLocation(dependency.behavior.isOptionalPeer(), @src()); + bun.assertWithLocation(res_id != invalid_package_id, @src()); + // resolve optional peer to `builder.resolutions[dep_id]` + return .{ .resolve = res_id }; // 1 + } + + if (res_id == package_id) { // this dependency is the same package as the other, hoist return .hoisted; // 1 } @@ -599,7 +706,7 @@ fn hoistDependency( if (dependency.behavior.isPeer()) { if (dependency.version.tag == .npm) { - const resolution: Resolution = builder.lockfile.packages.items(.resolution)[builder.resolutions[dep_id]]; + const resolution: Resolution = builder.lockfile.packages.items(.resolution)[res_id]; const version = dependency.version.value.npm.version; if (resolution.tag == .npm and version.satisfies(resolution.value.npm.version, builder.buf(), builder.buf())) { return .hoisted; // 1 @@ -618,8 +725,8 @@ fn hoistDependency( builder.maybeReportError("Package \"{}@{}\" has a dependency loop\n Resolution: \"{}@{}\"\n Dependency: \"{}@{}\"", .{ builder.packageName(package_id), builder.packageVersion(package_id), - builder.packageName(builder.resolutions[dep_id]), - builder.packageVersion(builder.resolutions[dep_id]), + builder.packageName(res_id), + builder.packageVersion(res_id), dependency.name.fmt(builder.buf()), dependency.version.literal.fmt(builder.buf()), }); diff --git a/src/js/builtins/ProcessObjectInternals.ts b/src/js/builtins/ProcessObjectInternals.ts index e8025f8452..67156abc56 100644 --- a/src/js/builtins/ProcessObjectInternals.ts +++ b/src/js/builtins/ProcessObjectInternals.ts @@ -361,23 +361,6 @@ export function initializeNextTickQueue( return nextTick; } -$getter; -export function mainModule() { - var existing = $getByIdDirectPrivate(this, "main"); - // note: this doesn't handle "process.mainModule = undefined" - if (typeof existing !== "undefined") { - return existing; - } - - return $requireMap.$get(Bun.main); -} - -$overriddenName = "set mainModule"; -export function setMainModule(value) { - $putByIdDirectPrivate(this, "main", value); - return true; -} - type InternalEnvMap = Record; type EditWindowsEnvVarCb = (key: string, value: null | string) => void; diff --git a/src/js/internal-for-testing.ts b/src/js/internal-for-testing.ts index 1f3d147594..89b22f5841 100644 --- a/src/js/internal-for-testing.ts +++ b/src/js/internal-for-testing.ts @@ -210,3 +210,8 @@ export const structuredCloneAdvanced: ( ) => any = $newCppFunction("StructuredClone.cpp", "jsFunctionStructuredCloneAdvanced", 5); export const lsanDoLeakCheck = $newCppFunction("InternalForTesting.cpp", "jsFunction_lsanDoLeakCheck", 1); + +export const hostedGitInfo = { + parseUrl: $newZigFunction("hosted_git_info.zig", "TestingAPIs.jsParseUrl", 1), + fromUrl: $newZigFunction("hosted_git_info.zig", "TestingAPIs.jsFromUrl", 1), +}; diff --git a/src/logger.zig b/src/logger.zig index ab70c6b1c5..73239b6411 100644 --- a/src/logger.zig +++ b/src/logger.zig @@ -851,7 +851,7 @@ pub const Log = struct { } pub inline fn allocPrint(allocator: std.mem.Allocator, comptime fmt: string, args: anytype) OOM!string { - return switch (Output.enable_ansi_colors) { + return switch (Output.enable_ansi_colors_stderr) { inline else => |enable_ansi_colors| std.fmt.allocPrint(allocator, Output.prettyFmt(fmt, enable_ansi_colors), args), }; } @@ -1283,7 +1283,7 @@ pub const Log = struct { } pub fn print(self: *const Log, to: anytype) !void { - return switch (Output.enable_ansi_colors) { + return switch (Output.enable_ansi_colors_stderr) { inline else => |enable_ansi_colors| self.printWithEnableAnsiColors(to, enable_ansi_colors), }; } diff --git a/src/napi/napi.zig b/src/napi/napi.zig index b51de870e1..d778cadd9b 100644 --- a/src/napi/napi.zig +++ b/src/napi/napi.zig @@ -813,7 +813,7 @@ pub extern fn napi_create_arraybuffer(env: napi_env, byte_length: usize, data: [ pub extern fn napi_create_external_arraybuffer(env: napi_env, external_data: ?*anyopaque, byte_length: usize, finalize_cb: napi_finalize, finalize_hint: ?*anyopaque, result: *napi_value) napi_status; -pub export fn napi_get_arraybuffer_info(env_: napi_env, arraybuffer_: napi_value, data: ?*[*]u8, byte_length: ?*usize) napi_status { +pub export fn napi_get_arraybuffer_info(env_: napi_env, arraybuffer_: napi_value, data: ?*?[*]u8, byte_length: ?*usize) napi_status { log("napi_get_arraybuffer_info", .{}); const env = env_ orelse { return envIsNull(); @@ -825,11 +825,10 @@ pub export fn napi_get_arraybuffer_info(env_: napi_env, arraybuffer_: napi_value return env.setLastError(.invalid_arg); } - const slice = array_buffer.slice(); if (data) |dat| - dat.* = slice.ptr; + dat.* = array_buffer.ptr; if (byte_length) |len| - len.* = slice.len; + len.* = array_buffer.byte_len; return env.ok(); } @@ -840,7 +839,7 @@ pub export fn napi_get_typedarray_info( typedarray_: napi_value, maybe_type: ?*napi_typedarray_type, maybe_length: ?*usize, - maybe_data: ?*[*]u8, + maybe_data: ?*?[*]u8, maybe_arraybuffer: ?*napi_value, maybe_byte_offset: ?*usize, // note: this is always 0 ) napi_status { @@ -892,7 +891,7 @@ pub export fn napi_get_dataview_info( env_: napi_env, dataview_: napi_value, maybe_bytelength: ?*usize, - maybe_data: ?*[*]u8, + maybe_data: ?*?[*]u8, maybe_arraybuffer: ?*napi_value, maybe_byte_offset: ?*usize, // note: this is always 0 ) napi_status { @@ -1223,7 +1222,7 @@ pub export fn napi_create_buffer_copy(env_: napi_env, length: usize, data: [*]u8 return env.ok(); } extern fn napi_is_buffer(napi_env, napi_value, *bool) napi_status; -pub export fn napi_get_buffer_info(env_: napi_env, value_: napi_value, data: ?*[*]u8, length: ?*usize) napi_status { +pub export fn napi_get_buffer_info(env_: napi_env, value_: napi_value, data: ?*?[*]u8, length: ?*usize) napi_status { log("napi_get_buffer_info", .{}); const env = env_ orelse { return envIsNull(); diff --git a/src/output.zig b/src/output.zig index e5dc464b2b..21ac5729d1 100644 --- a/src/output.zig +++ b/src/output.zig @@ -391,7 +391,6 @@ pub const Source = struct { enable_ansi_colors_stdout = enable_color orelse is_stdout_tty; enable_ansi_colors_stderr = enable_color orelse is_stderr_tty; - enable_ansi_colors = enable_ansi_colors_stdout or enable_ansi_colors_stderr; } stdout_stream = new_source.stream; @@ -407,7 +406,7 @@ pub const OutputStreamDescriptor = enum { terminal, }; -pub var enable_ansi_colors = Environment.isNative; +pub const enable_ansi_colors = @compileError("Deprecated to prevent accidentally using the wrong one. Use enable_ansi_colors_stdout or enable_ansi_colors_stderr instead."); pub var enable_ansi_colors_stderr = Environment.isNative; pub var enable_ansi_colors_stdout = Environment.isNative; pub var enable_buffering = Environment.isNative; @@ -429,10 +428,6 @@ pub inline fn isStdinTTY() bool { return bun_stdio_tty[0] != 0; } -pub inline fn isEmojiEnabled() bool { - return enable_ansi_colors; -} - pub fn isGithubAction() bool { if (bun.env_var.GITHUB_ACTIONS.get()) { // Do not print github annotations for AI agents because that wastes the context window. @@ -525,7 +520,7 @@ pub fn disableBuffering() void { pub fn panic(comptime fmt: string, args: anytype) noreturn { @branchHint(.cold); - if (isEmojiEnabled()) { + if (enable_ansi_colors_stderr) { std.debug.panic(comptime prettyFmt(fmt, true), args); } else { std.debug.panic(comptime prettyFmt(fmt, false), args); @@ -562,7 +557,7 @@ pub fn writerBuffered() Source.BufferedStream.Writer { } pub fn resetTerminal() void { - if (!enable_ansi_colors) { + if (!enable_ansi_colors_stderr and !enable_ansi_colors_stdout) { return; } @@ -1016,14 +1011,6 @@ pub noinline fn prettyWithPrinter(comptime fmt: string, args: anytype, comptime } } -pub noinline fn prettyWithPrinterFn(comptime fmt: string, args: anytype, comptime printFn: anytype, ctx: anytype) void { - if (enable_ansi_colors) { - printFn(ctx, comptime prettyFmt(fmt, true), args); - } else { - printFn(ctx, comptime prettyFmt(fmt, false), args); - } -} - pub noinline fn pretty(comptime fmt: string, args: anytype) void { prettyWithPrinter(fmt, args, print, .stdout); } diff --git a/src/resolver/resolve_path.zig b/src/resolver/resolve_path.zig index 941b73daa3..9c7a023fe2 100644 --- a/src/resolver/resolve_path.zig +++ b/src/resolver/resolve_path.zig @@ -37,6 +37,16 @@ inline fn nqlAtIndexCaseInsensitive(comptime string_count: comptime_int, index: return false; } +/// The given string contains separators that match the platform's path separator style. +pub fn hasPlatformPathSeparators(input_path: []const u8) bool { + if (bun.Environment.isWindows) { + // Windows accepts both forward and backward slashes as path separators + return bun.strings.indexOfAny(input_path, "\\/") != null; + } else { + return bun.strings.containsChar(input_path, '/'); + } +} + const IsSeparatorFunc = fn (char: u8) bool; const IsSeparatorFuncT = fn (comptime T: type, char: anytype) bool; const LastSeparatorFunction = fn (slice: []const u8) ?usize; diff --git a/src/safety/alloc.zig b/src/safety/alloc.zig index 6a0c6eec48..c6096df3da 100644 --- a/src/safety/alloc.zig +++ b/src/safety/alloc.zig @@ -25,7 +25,6 @@ const arena_vtable = blk: { fn hasPtr(alloc: Allocator) bool { return alloc.vtable == arena_vtable or bun.allocators.allocation_scope.isInstance(alloc) or - bun.MemoryReportingAllocator.isInstance(alloc) or ((comptime bun.Environment.isLinux) and LinuxMemFdAllocator.isInstance(alloc)) or bun.MaxHeapAllocator.isInstance(alloc) or alloc.vtable == bun.allocators.c_allocator.vtable or diff --git a/src/semver/Version.zig b/src/semver/Version.zig index ed18ba2910..c1e49881ca 100644 --- a/src/semver/Version.zig +++ b/src/semver/Version.zig @@ -91,7 +91,7 @@ pub fn VersionType(comptime IntType: type) type { other_buf: string, pub fn format(this: DiffFormatter, comptime fmt_: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { - if (!Output.enable_ansi_colors) { + if (!Output.enable_ansi_colors_stdout) { // print normally if no colors const formatter: Formatter = .{ .version = this.version, .input = this.buf }; return Formatter.format(formatter, fmt_, options, writer); diff --git a/src/string/immutable.zig b/src/string/immutable.zig index ce36729315..211f5484a0 100644 --- a/src/string/immutable.zig +++ b/src/string/immutable.zig @@ -414,6 +414,12 @@ pub fn indexOfSigned(self: string, str: string) i32 { return @as(i32, @intCast(i)); } +/// Returns last index of `char` before a character `before`. +pub fn lastIndexBeforeChar(in: []const u8, char: u8, before: u8) ?usize { + const before_pos = indexOfChar(in, before) orelse in.len; + return lastIndexOfChar(in[0..before_pos], char); +} + pub fn lastIndexOfChar(self: []const u8, char: u8) callconv(bun.callconv_inline) ?usize { if (comptime Environment.isLinux) { if (@inComptime()) { @@ -1132,6 +1138,15 @@ pub fn index(self: string, str: string) i32 { } } +/// Returns a substring starting at `start` up to the end of the string. +/// If `start` is greater than the string's length, returns an empty string. +pub fn substring(self: anytype, start: ?usize, stop: ?usize) @TypeOf(self) { + const sta = start orelse 0; + const sto = stop orelse self.len; + + return self[@min(sta, self.len)..@min(sto, self.len)]; +} + pub const ascii_vector_size = if (Environment.isWasm) 8 else 16; pub const ascii_u16_vector_size = if (Environment.isWasm) 4 else 8; pub const AsciiVectorInt = std.meta.Int(.unsigned, ascii_vector_size); @@ -1553,6 +1568,13 @@ pub fn trimPrefixComptime(comptime T: type, buffer: []const T, comptime prefix: buffer; } +pub fn trimSuffixComptime(buffer: []const u8, comptime suffix: anytype) []const u8 { + return if (hasSuffixComptime(buffer, suffix)) + buffer[0 .. buffer.len - suffix.len] + else + buffer; +} + /// Get the line number and the byte offsets of `line_range_count` above the desired line number /// The final element is the end index of the desired line const LineRange = struct { @@ -1759,6 +1781,10 @@ pub fn trim(slice: anytype, comptime values_to_strip: []const u8) @TypeOf(slice) return slice[begin..end]; } +pub fn trimSpaces(slice: anytype) @TypeOf(slice) { + return trim(slice, &whitespace_chars); +} + pub fn isAllWhitespace(slice: []const u8) bool { var begin: usize = 0; while (begin < slice.len and std.mem.indexOfScalar(u8, &whitespace_chars, slice[begin]) != null) : (begin += 1) {} @@ -2020,7 +2046,7 @@ pub fn concatWithLength( allocator: std.mem.Allocator, args: []const string, length: usize, -) ![]u8 { +) bun.OOM![]u8 { const out = try allocator.alloc(u8, length); var remain = out; for (args) |arg| { @@ -2034,7 +2060,7 @@ pub fn concatWithLength( pub fn concat( allocator: std.mem.Allocator, args: []const string, -) ![]u8 { +) bun.OOM![]u8 { var length: usize = 0; for (args) |arg| { length += arg.len; @@ -2342,7 +2368,6 @@ pub const toNTPath16 = paths_.toNTPath16; pub const toPath = paths_.toPath; pub const toPathMaybeDir = paths_.toPathMaybeDir; pub const toPathNormalized = paths_.toPathNormalized; -pub const toWDirNormalized = paths_.toWDirNormalized; pub const toWDirPath = paths_.toWDirPath; pub const toWPath = paths_.toWPath; pub const toWPathMaybeDir = paths_.toWPathMaybeDir; diff --git a/src/string/immutable/paths.zig b/src/string/immutable/paths.zig index 8cd11483b7..a541460f39 100644 --- a/src/string/immutable/paths.zig +++ b/src/string/immutable/paths.zig @@ -233,26 +233,6 @@ pub fn normalizeSlashesOnly(buf: []u8, utf8: []const u8, comptime desired_slash: return normalizeSlashesOnlyT(u8, buf, utf8, desired_slash, false); } -pub fn toWDirNormalized(wbuf: []u16, utf8: []const u8) [:0]const u16 { - var renormalized: ?*bun.PathBuffer = null; - defer if (renormalized) |r| bun.path_buffer_pool.put(r); - - var path_to_use = utf8; - - if (bun.strings.containsChar(utf8, '/')) { - renormalized = bun.path_buffer_pool.get(); - @memcpy(renormalized.?[0..utf8.len], utf8); - for (renormalized.?[0..utf8.len]) |*c| { - if (c.* == '/') { - c.* = '\\'; - } - } - path_to_use = renormalized.?[0..utf8.len]; - } - - return toWDirPath(wbuf, path_to_use); -} - pub fn toWPath(wbuf: []u16, utf8: []const u8) [:0]u16 { return toWPathMaybeDir(wbuf, utf8, false); } diff --git a/test/cli/install/__snapshots__/bun-install-dep.test.ts.snap b/test/cli/install/__snapshots__/bun-install-dep.test.ts.snap deleted file mode 100644 index 96abf0eb3e..0000000000 --- a/test/cli/install/__snapshots__/bun-install-dep.test.ts.snap +++ /dev/null @@ -1,397 +0,0 @@ -// Bun Snapshot v1, https://bun.sh/docs/test/snapshots - -exports[`npa @scoped/package 1`] = ` -{ - "name": "@scoped/package", - "version": { - "name": "@scoped/package", - "tag": "latest", - "type": "dist_tag", - }, -} -`; - -exports[`npa @scoped/package@1.0.0 1`] = ` -{ - "name": "@scoped/package", - "version": { - "alias": false, - "name": "@scoped/package", - "type": "npm", - "version": "==1.0.0", - }, -} -`; - -exports[`npa @scoped/package@1.0.0-beta.1 1`] = ` -{ - "name": "@scoped/package", - "version": { - "alias": false, - "name": "@scoped/package", - "type": "npm", - "version": "==1.0.0-beta.1", - }, -} -`; - -exports[`npa @scoped/package@1.0.0-beta.1+build.123 1`] = ` -{ - "name": "@scoped/package", - "version": { - "alias": false, - "name": "@scoped/package", - "type": "npm", - "version": "==1.0.0-beta.1+build.123", - }, -} -`; - -exports[`npa package 1`] = ` -{ - "name": "package", - "version": { - "name": "package", - "tag": "latest", - "type": "dist_tag", - }, -} -`; - -exports[`npa package@1.0.0 1`] = ` -{ - "name": "package", - "version": { - "alias": false, - "name": "package", - "type": "npm", - "version": "==1.0.0", - }, -} -`; - -exports[`npa package@1.0.0-beta.1 1`] = ` -{ - "name": "package", - "version": { - "alias": false, - "name": "package", - "type": "npm", - "version": "==1.0.0-beta.1", - }, -} -`; - -exports[`npa package@1.0.0-beta.1+build.123 1`] = ` -{ - "name": "package", - "version": { - "alias": false, - "name": "package", - "type": "npm", - "version": "==1.0.0-beta.1+build.123", - }, -} -`; - -exports[`npa bitbucket:dylan-conway/public-install-test 1`] = ` -{ - "name": "", - "version": { - "owner": "", - "ref": "", - "repo": "bitbucket:dylan-conway/public-install-test", - "type": "git", - }, -} -`; - -exports[`npa bitbucket.org:dylan-conway/public-install-test 1`] = ` -{ - "name": "", - "version": { - "owner": "", - "ref": "", - "repo": "bitbucket.org:dylan-conway/public-install-test", - "type": "git", - }, -} -`; - -exports[`npa bitbucket.com:dylan-conway/public-install-test 1`] = ` -{ - "name": "", - "version": { - "owner": "", - "ref": "", - "repo": "bitbucket.com:dylan-conway/public-install-test", - "type": "git", - }, -} -`; - -exports[`npa git@bitbucket.org:dylan-conway/public-install-test 1`] = ` -{ - "name": "", - "version": { - "owner": "", - "ref": "", - "repo": "git@bitbucket.org:dylan-conway/public-install-test", - "type": "git", - }, -} -`; - -exports[`npa foo/bar 1`] = ` -{ - "name": "", - "version": { - "owner": "foo", - "ref": "", - "repo": "bar", - "type": "github", - }, -} -`; - -exports[`npa gitlab:dylan-conway/public-install-test 1`] = ` -{ - "name": "", - "version": { - "owner": "", - "ref": "", - "repo": "gitlab:dylan-conway/public-install-test", - "type": "git", - }, -} -`; - -exports[`npa gitlab.com:dylan-conway/public-install-test 1`] = ` -{ - "name": "", - "version": { - "owner": "", - "ref": "", - "repo": "gitlab.com:dylan-conway/public-install-test", - "type": "git", - }, -} -`; - -exports[`npa http://localhost:5000/no-deps/-/no-deps-2.0.0.tgz 1`] = ` -{ - "name": "", - "version": { - "name": "", - "type": "tarball", - "url": "http://localhost:5000/no-deps/-/no-deps-2.0.0.tgz", - }, -} -`; - -exports[`npa https://registry.npmjs.org/no-deps/-/no-deps-2.0.0.tgz 1`] = ` -{ - "name": "", - "version": { - "name": "", - "type": "tarball", - "url": "https://registry.npmjs.org/no-deps/-/no-deps-2.0.0.tgz", - }, -} -`; - -exports[`npa file:./path/to/tarball.tgz 1`] = ` -{ - "name": "", - "version": { - "name": "", - "path": "./path/to/tarball.tgz", - "type": "tarball", - }, -} -`; - -exports[`npa ./path/to/tarball.tgz 1`] = ` -{ - "name": "", - "version": { - "name": "", - "path": "./path/to/tarball.tgz", - "type": "tarball", - }, -} -`; - -exports[`npa foo/bar 2`] = ` -{ - "name": "", - "version": { - "owner": "foo", - "ref": "", - "repo": "bar", - "type": "github", - }, -} -`; - -exports[`npa github:dylan-conway/public-install-test 1`] = ` -{ - "name": "", - "version": { - "owner": "dylan-conway", - "ref": "", - "repo": "public-install-test", - "type": "github", - }, -} -`; - -exports[`npa git@github.com:dylan-conway/public-install-test 1`] = ` -{ - "name": "", - "version": { - "owner": "", - "ref": "", - "repo": "git@github.com:dylan-conway/public-install-test", - "type": "git", - }, -} -`; - -exports[`npa https://github.com/dylan-conway/public-install-test 1`] = ` -{ - "name": "", - "version": { - "owner": "dylan-conway", - "ref": "", - "repo": "public-install-test", - "type": "github", - }, -} -`; - -exports[`npa https://github.com/dylan-conway/public-install-test.git 1`] = ` -{ - "name": "", - "version": { - "owner": "dylan-conway", - "ref": "", - "repo": "public-install-test", - "type": "github", - }, -} -`; - -exports[`npa https://github.com/dylan-conway/public-install-test.git#semver:^1.0.0 1`] = ` -{ - "name": "", - "version": { - "owner": "", - "ref": "semver:^1.0.0", - "repo": "https://github.com/dylan-conway/public-install-test.git", - "type": "git", - }, -} -`; - -exports[`dependencies: {"foo": "1.2.3"} 1`] = ` -{ - "alias": false, - "name": "foo", - "type": "npm", - "version": "==1.2.3-foo", -} -`; - -exports[`dependencies: {"foo": "latest"} 1`] = ` -{ - "name": "foo", - "tag": "latest", - "type": "dist_tag", -} -`; - -exports[`dependencies: {"foo": "workspace:*"} 1`] = ` -{ - "name": "*foo", - "type": "workspace", -} -`; - -exports[`dependencies: {"foo": "workspace:^1.0.0"} 1`] = ` -{ - "name": "^1.0.0foo", - "type": "workspace", -} -`; - -exports[`dependencies: {"foo": "workspace:1.0.0"} 1`] = ` -{ - "name": "1.0.0foo", - "type": "workspace", -} -`; - -exports[`dependencies: {"foo": "workspace:1.0.0-beta.1"} 1`] = ` -{ - "name": "1.0.0-beta.1foo", - "type": "workspace", -} -`; - -exports[`dependencies: {"foo": "workspace:1.0.0-beta.1+build.123"} 1`] = ` -{ - "name": "1.0.0-beta.1+build.123foo", - "type": "workspace", -} -`; - -exports[`dependencies: {"foo": "workspace:1.0.0-beta.1+build.123"} 2`] = ` -{ - "name": "1.0.0-beta.1+build.123foo", - "type": "workspace", -} -`; - -exports[`dependencies: {"foo": "workspace:1.0.0-beta.1+build.123"} 3`] = ` -{ - "name": "1.0.0-beta.1+build.123foo", - "type": "workspace", -} -`; - -exports[`dependencies: {"bar": "^1.0.0"} 1`] = ` -{ - "alias": false, - "name": "bar", - "type": "npm", - "version": ">=1.0.0-bar <2.0.0", -} -`; - -exports[`dependencies: {"bar": "~1.0.0"} 1`] = ` -{ - "alias": false, - "name": "bar", - "type": "npm", - "version": ">=1.0.0-bar <1.1.0", -} -`; - -exports[`dependencies: {"bar": "> 1.0.0 < 2.0.0"} 1`] = ` -{ - "alias": false, - "name": "bar", - "type": "npm", - "version": ">1.0.0 && <2.0.0-bar", -} -`; - -exports[`dependencies: {"bar": "1.0.0 - 2.0.0"} 1`] = ` -{ - "alias": false, - "name": "bar", - "type": "npm", - "version": ">=1.0.0 <=2.0.0-bar", -} -`; diff --git a/test/cli/install/__snapshots__/migrate-bun-lockb-v2.test.ts.snap b/test/cli/install/__snapshots__/migrate-bun-lockb-v2.test.ts.snap new file mode 100644 index 0000000000..27d3a4899d --- /dev/null +++ b/test/cli/install/__snapshots__/migrate-bun-lockb-v2.test.ts.snap @@ -0,0 +1,2087 @@ +// Bun Snapshot v1, https://bun.sh/docs/test/snapshots + +exports[`migrate migrate-bun-lockb-v2 1`] = ` +{ + "dependencies": [ + { + "behavior": { + "prod": true, + }, + "id": 0, + "literal": "^1.0.0", + "name": "is-even", + "npm": { + "name": "is-even", + "version": ">=1.0.0 <2.0.0", + }, + "package_id": 2, + }, + { + "behavior": { + "prod": true, + }, + "id": 1, + "literal": "~3.7.1", + "name": "jquery", + "npm": { + "name": "jquery", + "version": ">=3.7.1 <3.8.0", + }, + "package_id": 1, + }, + { + "behavior": { + "prod": true, + }, + "id": 2, + "literal": "^0.1.2", + "name": "is-odd", + "npm": { + "name": "is-odd", + "version": ">=0.1.2 <0.2.0", + }, + "package_id": 3, + }, + { + "behavior": { + "prod": true, + }, + "id": 3, + "literal": "^3.0.0", + "name": "is-number", + "npm": { + "name": "is-number", + "version": ">=3.0.0 <4.0.0", + }, + "package_id": 4, + }, + { + "behavior": { + "prod": true, + }, + "id": 4, + "literal": "^3.0.2", + "name": "kind-of", + "npm": { + "name": "kind-of", + "version": ">=3.0.2 <4.0.0", + }, + "package_id": 5, + }, + { + "behavior": { + "prod": true, + }, + "id": 5, + "literal": "^1.1.5", + "name": "is-buffer", + "npm": { + "name": "is-buffer", + "version": ">=1.1.5 <2.0.0", + }, + "package_id": 6, + }, + ], + "format": "v3", + "meta_hash": "b9927bfb7908a620d68ada06aeac0e7bf460658dbcef45edbfb50f09003dd7df", + "package_index": { + "is-buffer": 6, + "is-even": 2, + "is-number": 4, + "is-odd": 3, + "jquery": 1, + "kind-of": 5, + "migrate-bun-lockb-v2": 0, + }, + "packages": [ + { + "bin": null, + "dependencies": [ + 0, + 1, + ], + "id": 0, + "integrity": null, + "man_dir": "", + "name": "migrate-bun-lockb-v2", + "name_hash": "4533838613693112248", + "origin": "local", + "resolution": { + "resolved": "", + "tag": "root", + "value": "", + }, + "scripts": {}, + }, + { + "bin": null, + "dependencies": [], + "id": 1, + "integrity": "sha512-m4avr8yL8kmFN8psrbFFFmB/If14iN5o9nw/NgnnM+kybDJpRsAynV2BsfpTYrTRysYUdADVD7CkUUizgkpLfg==", + "man_dir": "", + "name": "jquery", + "name_hash": "265418099762006574", + "origin": "npm", + "resolution": { + "resolved": "https://registry.npmjs.org/jquery/-/jquery-3.7.1.tgz", + "tag": "npm", + "value": "3.7.1", + }, + "scripts": {}, + }, + { + "bin": null, + "dependencies": [ + 2, + ], + "id": 2, + "integrity": "sha512-LEhnkAdJqic4Dbqn58A0y52IXoHWlsueqQkKfMfdEnIYG8A1sm/GHidKkS6yvXlMoRrkM34csHnXQtOqcb+Jzg==", + "man_dir": "", + "name": "is-even", + "name_hash": "7770159972461911348", + "origin": "npm", + "resolution": { + "resolved": "https://registry.npmjs.org/is-even/-/is-even-1.0.0.tgz", + "tag": "npm", + "value": "1.0.0", + }, + "scripts": {}, + }, + { + "bin": null, + "dependencies": [ + 3, + ], + "id": 3, + "integrity": "sha512-Ri7C2K7o5IrUU9UEI8losXJCCD/UtsaIrkR5sxIcFg4xQ9cRJXlWA5DQvTE0yDc0krvSNLsRGXN11UPS6KyfBw==", + "man_dir": "", + "name": "is-odd", + "name_hash": "1288785352440382779", + "origin": "npm", + "resolution": { + "resolved": "https://registry.npmjs.org/is-odd/-/is-odd-0.1.2.tgz", + "tag": "npm", + "value": "0.1.2", + }, + "scripts": {}, + }, + { + "bin": null, + "dependencies": [ + 4, + ], + "id": 4, + "integrity": "sha512-4cboCqIpliH+mAvFNegjZQ4kgKc3ZUhQVr3HvWbSh5q3WH2v82ct+T2Y1hdU5Gdtorx/cLifQjqCbL7bpznLTg==", + "man_dir": "", + "name": "is-number", + "name_hash": "17443668381655379754", + "origin": "npm", + "resolution": { + "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz", + "tag": "npm", + "value": "3.0.0", + }, + "scripts": {}, + }, + { + "bin": null, + "dependencies": [ + 5, + ], + "id": 5, + "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", + "man_dir": "", + "name": "kind-of", + "name_hash": "13897574220744325158", + "origin": "npm", + "resolution": { + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "tag": "npm", + "value": "3.2.2", + }, + "scripts": {}, + }, + { + "bin": null, + "dependencies": [], + "id": 6, + "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==", + "man_dir": "", + "name": "is-buffer", + "name_hash": "3876533396633871001", + "origin": "npm", + "resolution": { + "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", + "tag": "npm", + "value": "1.1.6", + }, + "scripts": {}, + }, + ], + "trees": [ + { + "dependencies": { + "is-buffer": { + "id": 5, + "package_id": 6, + }, + "is-even": { + "id": 0, + "package_id": 2, + }, + "is-number": { + "id": 3, + "package_id": 4, + }, + "is-odd": { + "id": 2, + "package_id": 3, + }, + "jquery": { + "id": 1, + "package_id": 1, + }, + "kind-of": { + "id": 4, + "package_id": 5, + }, + }, + "depth": 0, + "id": 0, + "path": "node_modules", + }, + ], + "workspace_paths": {}, + "workspace_versions": {}, +} +`; + +exports[`migrate migrate-bun-lockb-v2-most-features 1`] = ` +{ + "dependencies": [ + { + "behavior": { + "workspace": true, + }, + "id": 0, + "literal": "packages/pkg1", + "name": "pkg-wat", + "package_id": 44, + "workspace": "packages/pkg1", + }, + { + "behavior": { + "workspace": true, + }, + "id": 1, + "literal": "packages/pkg2", + "name": "pkg-wat-2", + "package_id": 42, + "workspace": "packages/pkg2", + }, + { + "behavior": { + "dev": true, + }, + "id": 2, + "literal": "0.25.10", + "name": "esbuild", + "npm": { + "name": "esbuild", + "version": "==0.25.10", + }, + "package_id": 15, + }, + { + "behavior": { + "dev": true, + }, + "catalog": { + "name": "react", + "version": "catalog:", + }, + "id": 3, + "literal": "catalog:", + "name": "react", + "package_id": 14, + }, + { + "behavior": { + "dev": true, + }, + "id": 4, + "literal": "^3.22.4", + "name": "zod", + "npm": { + "name": "zod", + "version": ">=3.22.4 <4.0.0", + }, + "package_id": 13, + }, + { + "behavior": { + "optional": true, + }, + "id": 5, + "literal": "^7.0.0", + "name": "is-number", + "npm": { + "name": "is-number", + "version": ">=7.0.0 <8.0.0", + }, + "package_id": 12, + }, + { + "behavior": { + "prod": true, + }, + "id": 6, + "literal": "^0.0.4", + "name": "false", + "npm": { + "name": "false", + "version": ">=0.0.4 <0.0.5", + }, + "package_id": 11, + }, + { + "behavior": { + "prod": true, + }, + "id": 7, + "literal": "~3.7.1", + "name": "jquery", + "npm": { + "name": "jquery", + "version": ">=3.7.1 <3.8.0", + }, + "package_id": 10, + }, + { + "behavior": { + "prod": true, + }, + "id": 8, + "literal": "^0.23.0", + "name": "scheduler", + "npm": { + "name": "scheduler", + "version": ">=0.23.0 <0.24.0", + }, + "package_id": 6, + }, + { + "behavior": { + "peer": true, + }, + "id": 9, + "literal": "^1.0.0", + "name": "is-even", + "npm": { + "name": "is-even", + "version": ">=1.0.0 <2.0.0", + }, + "package_id": 1, + }, + { + "behavior": { + "optional": true, + "peer": true, + }, + "id": 10, + "literal": "^4.17.21", + "name": "lodash", + "npm": { + "name": "lodash", + "version": ">=4.17.21 <5.0.0", + }, + "package_id": null, + }, + { + "behavior": { + "prod": true, + }, + "id": 11, + "literal": "^0.1.2", + "name": "is-odd", + "npm": { + "name": "is-odd", + "version": ">=0.1.2 <0.2.0", + }, + "package_id": 2, + }, + { + "behavior": { + "prod": true, + }, + "id": 12, + "literal": "^3.0.0", + "name": "is-number", + "npm": { + "name": "is-number", + "version": ">=3.0.0 <4.0.0", + }, + "package_id": 3, + }, + { + "behavior": { + "prod": true, + }, + "id": 13, + "literal": "^3.0.2", + "name": "kind-of", + "npm": { + "name": "kind-of", + "version": ">=3.0.2 <4.0.0", + }, + "package_id": 4, + }, + { + "behavior": { + "prod": true, + }, + "id": 14, + "literal": "^1.1.5", + "name": "is-buffer", + "npm": { + "name": "is-buffer", + "version": ">=1.1.5 <2.0.0", + }, + "package_id": 5, + }, + { + "behavior": { + "prod": true, + }, + "id": 15, + "literal": "^1.1.0", + "name": "loose-envify", + "npm": { + "name": "loose-envify", + "version": ">=1.1.0 <2.0.0", + }, + "package_id": 8, + }, + { + "behavior": { + "prod": true, + }, + "id": 16, + "literal": "^4.1.1", + "name": "object-assign", + "npm": { + "name": "object-assign", + "version": ">=4.1.1 <5.0.0", + }, + "package_id": 7, + }, + { + "behavior": { + "prod": true, + }, + "id": 17, + "literal": "^3.0.0 || ^4.0.0", + "name": "js-tokens", + "npm": { + "name": "js-tokens", + "version": ">=3.0.0 <4.0.0 || >=4.0.0 <5.0.0 && >=4.0.0 <5.0.0", + }, + "package_id": 9, + }, + { + "behavior": { + "optional": true, + }, + "id": 18, + "literal": "0.25.10", + "name": "@esbuild/aix-ppc64", + "npm": { + "name": "@esbuild/aix-ppc64", + "version": "==0.25.10", + }, + "package_id": 41, + }, + { + "behavior": { + "optional": true, + }, + "id": 19, + "literal": "0.25.10", + "name": "@esbuild/android-arm", + "npm": { + "name": "@esbuild/android-arm", + "version": "==0.25.10", + }, + "package_id": 40, + }, + { + "behavior": { + "optional": true, + }, + "id": 20, + "literal": "0.25.10", + "name": "@esbuild/android-arm64", + "npm": { + "name": "@esbuild/android-arm64", + "version": "==0.25.10", + }, + "package_id": 39, + }, + { + "behavior": { + "optional": true, + }, + "id": 21, + "literal": "0.25.10", + "name": "@esbuild/android-x64", + "npm": { + "name": "@esbuild/android-x64", + "version": "==0.25.10", + }, + "package_id": 38, + }, + { + "behavior": { + "optional": true, + }, + "id": 22, + "literal": "0.25.10", + "name": "@esbuild/darwin-arm64", + "npm": { + "name": "@esbuild/darwin-arm64", + "version": "==0.25.10", + }, + "package_id": 37, + }, + { + "behavior": { + "optional": true, + }, + "id": 23, + "literal": "0.25.10", + "name": "@esbuild/darwin-x64", + "npm": { + "name": "@esbuild/darwin-x64", + "version": "==0.25.10", + }, + "package_id": 36, + }, + { + "behavior": { + "optional": true, + }, + "id": 24, + "literal": "0.25.10", + "name": "@esbuild/freebsd-arm64", + "npm": { + "name": "@esbuild/freebsd-arm64", + "version": "==0.25.10", + }, + "package_id": 35, + }, + { + "behavior": { + "optional": true, + }, + "id": 25, + "literal": "0.25.10", + "name": "@esbuild/freebsd-x64", + "npm": { + "name": "@esbuild/freebsd-x64", + "version": "==0.25.10", + }, + "package_id": 34, + }, + { + "behavior": { + "optional": true, + }, + "id": 26, + "literal": "0.25.10", + "name": "@esbuild/linux-arm", + "npm": { + "name": "@esbuild/linux-arm", + "version": "==0.25.10", + }, + "package_id": 33, + }, + { + "behavior": { + "optional": true, + }, + "id": 27, + "literal": "0.25.10", + "name": "@esbuild/linux-arm64", + "npm": { + "name": "@esbuild/linux-arm64", + "version": "==0.25.10", + }, + "package_id": 32, + }, + { + "behavior": { + "optional": true, + }, + "id": 28, + "literal": "0.25.10", + "name": "@esbuild/linux-ia32", + "npm": { + "name": "@esbuild/linux-ia32", + "version": "==0.25.10", + }, + "package_id": 31, + }, + { + "behavior": { + "optional": true, + }, + "id": 29, + "literal": "0.25.10", + "name": "@esbuild/linux-loong64", + "npm": { + "name": "@esbuild/linux-loong64", + "version": "==0.25.10", + }, + "package_id": 30, + }, + { + "behavior": { + "optional": true, + }, + "id": 30, + "literal": "0.25.10", + "name": "@esbuild/linux-mips64el", + "npm": { + "name": "@esbuild/linux-mips64el", + "version": "==0.25.10", + }, + "package_id": 29, + }, + { + "behavior": { + "optional": true, + }, + "id": 31, + "literal": "0.25.10", + "name": "@esbuild/linux-ppc64", + "npm": { + "name": "@esbuild/linux-ppc64", + "version": "==0.25.10", + }, + "package_id": 28, + }, + { + "behavior": { + "optional": true, + }, + "id": 32, + "literal": "0.25.10", + "name": "@esbuild/linux-riscv64", + "npm": { + "name": "@esbuild/linux-riscv64", + "version": "==0.25.10", + }, + "package_id": 27, + }, + { + "behavior": { + "optional": true, + }, + "id": 33, + "literal": "0.25.10", + "name": "@esbuild/linux-s390x", + "npm": { + "name": "@esbuild/linux-s390x", + "version": "==0.25.10", + }, + "package_id": 26, + }, + { + "behavior": { + "optional": true, + }, + "id": 34, + "literal": "0.25.10", + "name": "@esbuild/linux-x64", + "npm": { + "name": "@esbuild/linux-x64", + "version": "==0.25.10", + }, + "package_id": 25, + }, + { + "behavior": { + "optional": true, + }, + "id": 35, + "literal": "0.25.10", + "name": "@esbuild/netbsd-arm64", + "npm": { + "name": "@esbuild/netbsd-arm64", + "version": "==0.25.10", + }, + "package_id": 24, + }, + { + "behavior": { + "optional": true, + }, + "id": 36, + "literal": "0.25.10", + "name": "@esbuild/netbsd-x64", + "npm": { + "name": "@esbuild/netbsd-x64", + "version": "==0.25.10", + }, + "package_id": 23, + }, + { + "behavior": { + "optional": true, + }, + "id": 37, + "literal": "0.25.10", + "name": "@esbuild/openbsd-arm64", + "npm": { + "name": "@esbuild/openbsd-arm64", + "version": "==0.25.10", + }, + "package_id": 22, + }, + { + "behavior": { + "optional": true, + }, + "id": 38, + "literal": "0.25.10", + "name": "@esbuild/openbsd-x64", + "npm": { + "name": "@esbuild/openbsd-x64", + "version": "==0.25.10", + }, + "package_id": 21, + }, + { + "behavior": { + "optional": true, + }, + "id": 39, + "literal": "0.25.10", + "name": "@esbuild/openharmony-arm64", + "npm": { + "name": "@esbuild/openharmony-arm64", + "version": "==0.25.10", + }, + "package_id": 20, + }, + { + "behavior": { + "optional": true, + }, + "id": 40, + "literal": "0.25.10", + "name": "@esbuild/sunos-x64", + "npm": { + "name": "@esbuild/sunos-x64", + "version": "==0.25.10", + }, + "package_id": 19, + }, + { + "behavior": { + "optional": true, + }, + "id": 41, + "literal": "0.25.10", + "name": "@esbuild/win32-arm64", + "npm": { + "name": "@esbuild/win32-arm64", + "version": "==0.25.10", + }, + "package_id": 18, + }, + { + "behavior": { + "optional": true, + }, + "id": 42, + "literal": "0.25.10", + "name": "@esbuild/win32-ia32", + "npm": { + "name": "@esbuild/win32-ia32", + "version": "==0.25.10", + }, + "package_id": 17, + }, + { + "behavior": { + "optional": true, + }, + "id": 43, + "literal": "0.25.10", + "name": "@esbuild/win32-x64", + "npm": { + "name": "@esbuild/win32-x64", + "version": "==0.25.10", + }, + "package_id": 16, + }, + { + "behavior": { + "prod": true, + }, + "id": 44, + "literal": "6.0.3", + "name": "kind-of", + "npm": { + "name": "kind-of", + "version": "==6.0.3", + }, + "package_id": 43, + }, + { + "behavior": { + "prod": true, + }, + "id": 45, + "literal": "3.7.0", + "name": "jquery", + "npm": { + "name": "jquery", + "version": "==3.7.0", + }, + "package_id": 45, + }, + { + "behavior": { + "prod": true, + }, + "id": 46, + "literal": "workspace:", + "name": "pkg-wat-2", + "package_id": 42, + "workspace": "", + }, + ], + "format": "v3", + "meta_hash": "570d874a26ea3cde290c102a78c1a6b51ff33d19f52c50b0dbc0b125bd4d8233", + "package_index": { + "@esbuild/aix-ppc64": 41, + "@esbuild/android-arm": 40, + "@esbuild/android-arm64": 39, + "@esbuild/android-x64": 38, + "@esbuild/darwin-arm64": 37, + "@esbuild/darwin-x64": 36, + "@esbuild/freebsd-arm64": 35, + "@esbuild/freebsd-x64": 34, + "@esbuild/linux-arm": 33, + "@esbuild/linux-arm64": 32, + "@esbuild/linux-ia32": 31, + "@esbuild/linux-loong64": 30, + "@esbuild/linux-mips64el": 29, + "@esbuild/linux-ppc64": 28, + "@esbuild/linux-riscv64": 27, + "@esbuild/linux-s390x": 26, + "@esbuild/linux-x64": 25, + "@esbuild/netbsd-arm64": 24, + "@esbuild/netbsd-x64": 23, + "@esbuild/openbsd-arm64": 22, + "@esbuild/openbsd-x64": 21, + "@esbuild/openharmony-arm64": 20, + "@esbuild/sunos-x64": 19, + "@esbuild/win32-arm64": 18, + "@esbuild/win32-ia32": 17, + "@esbuild/win32-x64": 16, + "esbuild": 15, + "false": 11, + "is-buffer": 5, + "is-even": 1, + "is-number": [ + 12, + 3, + ], + "is-odd": 2, + "jquery": [ + 10, + 45, + ], + "js-tokens": 9, + "kind-of": [ + 43, + 4, + ], + "loose-envify": 8, + "migrate-everything": 0, + "object-assign": 7, + "pkg-wat": 44, + "pkg-wat-2": 42, + "react": 14, + "scheduler": 6, + "zod": 13, + }, + "packages": [ + { + "bin": null, + "dependencies": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + ], + "id": 0, + "integrity": null, + "man_dir": "", + "name": "migrate-everything", + "name_hash": "11252688678730182607", + "origin": "local", + "resolution": { + "resolved": "", + "tag": "root", + "value": "", + }, + "scripts": {}, + }, + { + "bin": null, + "dependencies": [ + 11, + ], + "id": 1, + "integrity": "sha512-LEhnkAdJqic4Dbqn58A0y52IXoHWlsueqQkKfMfdEnIYG8A1sm/GHidKkS6yvXlMoRrkM34csHnXQtOqcb+Jzg==", + "man_dir": "", + "name": "is-even", + "name_hash": "7770159972461911348", + "origin": "npm", + "resolution": { + "resolved": "https://registry.npmjs.org/is-even/-/is-even-1.0.0.tgz", + "tag": "npm", + "value": "1.0.0", + }, + "scripts": {}, + }, + { + "bin": null, + "dependencies": [ + 12, + ], + "id": 2, + "integrity": "sha512-Ri7C2K7o5IrUU9UEI8losXJCCD/UtsaIrkR5sxIcFg4xQ9cRJXlWA5DQvTE0yDc0krvSNLsRGXN11UPS6KyfBw==", + "man_dir": "", + "name": "is-odd", + "name_hash": "1288785352440382779", + "origin": "npm", + "resolution": { + "resolved": "https://registry.npmjs.org/is-odd/-/is-odd-0.1.2.tgz", + "tag": "npm", + "value": "0.1.2", + }, + "scripts": {}, + }, + { + "bin": null, + "dependencies": [ + 13, + ], + "id": 3, + "integrity": "sha512-4cboCqIpliH+mAvFNegjZQ4kgKc3ZUhQVr3HvWbSh5q3WH2v82ct+T2Y1hdU5Gdtorx/cLifQjqCbL7bpznLTg==", + "man_dir": "", + "name": "is-number", + "name_hash": "17443668381655379754", + "origin": "npm", + "resolution": { + "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz", + "tag": "npm", + "value": "3.0.0", + }, + "scripts": {}, + }, + { + "bin": null, + "dependencies": [ + 14, + ], + "id": 4, + "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", + "man_dir": "", + "name": "kind-of", + "name_hash": "13897574220744325158", + "origin": "npm", + "resolution": { + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "tag": "npm", + "value": "3.2.2", + }, + "scripts": {}, + }, + { + "bin": null, + "dependencies": [], + "id": 5, + "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==", + "man_dir": "", + "name": "is-buffer", + "name_hash": "3876533396633871001", + "origin": "npm", + "resolution": { + "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", + "tag": "npm", + "value": "1.1.6", + }, + "scripts": {}, + }, + { + "bin": null, + "dependencies": [ + 15, + 16, + ], + "id": 6, + "integrity": "sha512-XegIgta1bIaz2LdaL6eg1GEcE42g0BY9qFXCqlZ/+s2MuEKfigFCW6DEGBlZzeVFlwDmVusrWEyFtBo4sbkkdA==", + "man_dir": "", + "name": "scheduler", + "name_hash": "6246319597786948434", + "origin": "npm", + "resolution": { + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.20.0.tgz", + "tag": "npm", + "value": "0.20.0", + }, + "scripts": {}, + }, + { + "bin": null, + "dependencies": [], + "id": 7, + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "man_dir": "", + "name": "object-assign", + "name_hash": "741501977461426514", + "origin": "npm", + "resolution": { + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "tag": "npm", + "value": "4.1.1", + }, + "scripts": {}, + }, + { + "bin": { + "file": "cli.js", + "name": "loose-envify", + }, + "dependencies": [ + 17, + ], + "id": 8, + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "man_dir": "", + "name": "loose-envify", + "name_hash": "3112622411417245442", + "origin": "npm", + "resolution": { + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "tag": "npm", + "value": "1.4.0", + }, + "scripts": {}, + }, + { + "bin": null, + "dependencies": [], + "id": 9, + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "man_dir": "", + "name": "js-tokens", + "name_hash": "8072375596980283624", + "origin": "npm", + "resolution": { + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "tag": "npm", + "value": "4.0.0", + }, + "scripts": {}, + }, + { + "bin": null, + "dependencies": [], + "id": 10, + "integrity": "sha512-m4avr8yL8kmFN8psrbFFFmB/If14iN5o9nw/NgnnM+kybDJpRsAynV2BsfpTYrTRysYUdADVD7CkUUizgkpLfg==", + "man_dir": "", + "name": "jquery", + "name_hash": "265418099762006574", + "origin": "npm", + "resolution": { + "resolved": "https://registry.npmjs.org/jquery/-/jquery-3.7.1.tgz", + "tag": "npm", + "value": "3.7.1", + }, + "scripts": {}, + }, + { + "bin": null, + "dependencies": [], + "id": 11, + "integrity": "sha512-CPAG3ud9Cz8NvLKlsTLr/F1aoJL6tPyhNTzaDFzS5CX6id21hVuS5sBK8wlTLuvFklpvH1idvPbW8Lbri3G7qA==", + "man_dir": "", + "name": "false", + "name_hash": "14246137032213060424", + "origin": "npm", + "resolution": { + "resolved": "https://registry.npmjs.org/false/-/false-0.0.4.tgz", + "tag": "npm", + "value": "0.0.4", + }, + "scripts": {}, + }, + { + "bin": null, + "dependencies": [], + "id": 12, + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "man_dir": "", + "name": "is-number", + "name_hash": "17443668381655379754", + "origin": "npm", + "resolution": { + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "tag": "npm", + "value": "7.0.0", + }, + "scripts": {}, + }, + { + "bin": null, + "dependencies": [], + "id": 13, + "integrity": "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==", + "man_dir": "", + "name": "zod", + "name_hash": "13942938047053248045", + "origin": "npm", + "resolution": { + "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz", + "tag": "npm", + "value": "3.25.76", + }, + "scripts": {}, + }, + { + "bin": null, + "dependencies": [], + "id": 14, + "integrity": "sha512-w8nqGImo45dmMIfljjMwOGtbmC/mk4CMYhWIicdSflH91J9TyCyczcPFXJzrZ/ZXcgGRFeP6BU0BEJTw6tZdfQ==", + "man_dir": "", + "name": "react", + "name_hash": "10719851453835962256", + "origin": "npm", + "resolution": { + "resolved": "https://registry.npmjs.org/react/-/react-19.1.1.tgz", + "tag": "npm", + "value": "19.1.1", + }, + "scripts": {}, + }, + { + "bin": { + "file": "bin/esbuild", + "name": "esbuild", + }, + "dependencies": [ + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + ], + "id": 15, + "integrity": "sha512-9RiGKvCwaqxO2owP61uQ4BgNborAQskMR6QusfWzQqv7AZOg5oGehdY2pRJMTKuwxd1IDBP4rSbI5lHzU7SMsQ==", + "man_dir": "", + "name": "esbuild", + "name_hash": "13254946337092084496", + "origin": "npm", + "resolution": { + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.10.tgz", + "tag": "npm", + "value": "0.25.10", + }, + "scripts": {}, + }, + { + "arch": [ + "x64", + ], + "bin": null, + "dependencies": [], + "id": 16, + "integrity": "sha512-9KpxSVFCu0iK1owoez6aC/s/EdUQLDN3adTxGCqxMVhrPDj6bt5dbrHDXUuq+Bs2vATFBBrQS5vdQ/Ed2P+nbw==", + "man_dir": "", + "name": "@esbuild/win32-x64", + "name_hash": "8841853191689358390", + "origin": "npm", + "os": [ + "win32", + ], + "resolution": { + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.10.tgz", + "tag": "npm", + "value": "0.25.10", + }, + "scripts": {}, + }, + { + "arch": [ + "ia32", + ], + "bin": null, + "dependencies": [], + "id": 17, + "integrity": "sha512-QHPDbKkrGO8/cz9LKVnJU22HOi4pxZnZhhA2HYHez5Pz4JeffhDjf85E57Oyco163GnzNCVkZK0b/n4Y0UHcSw==", + "man_dir": "", + "name": "@esbuild/win32-ia32", + "name_hash": "14741409370801574152", + "origin": "npm", + "os": [ + "win32", + ], + "resolution": { + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.10.tgz", + "tag": "npm", + "value": "0.25.10", + }, + "scripts": {}, + }, + { + "arch": [ + "arm64", + ], + "bin": null, + "dependencies": [], + "id": 18, + "integrity": "sha512-ah+9b59KDTSfpaCg6VdJoOQvKjI33nTaQr4UluQwW7aEwZQsbMCfTmfEO4VyewOxx4RaDT/xCy9ra2GPWmO7Kw==", + "man_dir": "", + "name": "@esbuild/win32-arm64", + "name_hash": "10616572549607257800", + "origin": "npm", + "os": [ + "win32", + ], + "resolution": { + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.10.tgz", + "tag": "npm", + "value": "0.25.10", + }, + "scripts": {}, + }, + { + "arch": [ + "x64", + ], + "bin": null, + "dependencies": [], + "id": 19, + "integrity": "sha512-fswk3XT0Uf2pGJmOpDB7yknqhVkJQkAQOcW/ccVOtfx05LkbWOaRAtn5SaqXypeKQra1QaEa841PgrSL9ubSPQ==", + "man_dir": "", + "name": "@esbuild/sunos-x64", + "name_hash": "17818235823914608065", + "origin": "npm", + "os": [ + "sunos", + ], + "resolution": { + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.10.tgz", + "tag": "npm", + "value": "0.25.10", + }, + "scripts": {}, + }, + { + "arch": [ + "arm64", + ], + "bin": null, + "dependencies": [], + "id": 20, + "integrity": "sha512-AVTSBhTX8Y/Fz6OmIVBip9tJzZEUcY8WLh7I59+upa5/GPhh2/aM6bvOMQySspnCCHvFi79kMtdJS1w0DXAeag==", + "man_dir": "", + "name": "@esbuild/openharmony-arm64", + "name_hash": "14507817562898589114", + "origin": "npm", + "os": [], + "resolution": { + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.25.10.tgz", + "tag": "npm", + "value": "0.25.10", + }, + "scripts": {}, + }, + { + "arch": [ + "x64", + ], + "bin": null, + "dependencies": [], + "id": 21, + "integrity": "sha512-XkA4frq1TLj4bEMB+2HnI0+4RnjbuGZfet2gs/LNs5Hc7D89ZQBHQ0gL2ND6Lzu1+QVkjp3x1gIcPKzRNP8bXw==", + "man_dir": "", + "name": "@esbuild/openbsd-x64", + "name_hash": "9843335422580374419", + "origin": "npm", + "os": [ + "openbsd", + ], + "resolution": { + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.10.tgz", + "tag": "npm", + "value": "0.25.10", + }, + "scripts": {}, + }, + { + "arch": [ + "arm64", + ], + "bin": null, + "dependencies": [], + "id": 22, + "integrity": "sha512-5Se0VM9Wtq797YFn+dLimf2Zx6McttsH2olUBsDml+lm0GOCRVebRWUvDtkY4BWYv/3NgzS8b/UM3jQNh5hYyw==", + "man_dir": "", + "name": "@esbuild/openbsd-arm64", + "name_hash": "7084930640788656434", + "origin": "npm", + "os": [ + "openbsd", + ], + "resolution": { + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.10.tgz", + "tag": "npm", + "value": "0.25.10", + }, + "scripts": {}, + }, + { + "arch": [ + "x64", + ], + "bin": null, + "dependencies": [], + "id": 23, + "integrity": "sha512-7RTytDPGU6fek/hWuN9qQpeGPBZFfB4zZgcz2VK2Z5VpdUxEI8JKYsg3JfO0n/Z1E/6l05n0unDCNc4HnhQGig==", + "man_dir": "", + "name": "@esbuild/netbsd-x64", + "name_hash": "8663255934676239496", + "origin": "npm", + "os": [], + "resolution": { + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.10.tgz", + "tag": "npm", + "value": "0.25.10", + }, + "scripts": {}, + }, + { + "arch": [ + "arm64", + ], + "bin": null, + "dependencies": [], + "id": 24, + "integrity": "sha512-AKQM3gfYfSW8XRk8DdMCzaLUFB15dTrZfnX8WXQoOUpUBQ+NaAFCP1kPS/ykbbGYz7rxn0WS48/81l9hFl3u4A==", + "man_dir": "", + "name": "@esbuild/netbsd-arm64", + "name_hash": "5074083378749729580", + "origin": "npm", + "os": [], + "resolution": { + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.10.tgz", + "tag": "npm", + "value": "0.25.10", + }, + "scripts": {}, + }, + { + "arch": [ + "x64", + ], + "bin": null, + "dependencies": [], + "id": 25, + "integrity": "sha512-QSX81KhFoZGwenVyPoberggdW1nrQZSvfVDAIUXr3WqLRZGZqWk/P4T8p2SP+de2Sr5HPcvjhcJzEiulKgnxtA==", + "man_dir": "", + "name": "@esbuild/linux-x64", + "name_hash": "13544082894153357031", + "origin": "npm", + "os": [ + "linux", + ], + "resolution": { + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.10.tgz", + "tag": "npm", + "value": "0.25.10", + }, + "scripts": {}, + }, + { + "arch": [ + "s390x", + ], + "bin": null, + "dependencies": [], + "id": 26, + "integrity": "sha512-3BBSbgzuB9ajLoVZk0mGu+EHlBwkusRmeNYdqmznmMc9zGASFjSsxgkNsqmXugpPk00gJ0JNKh/97nxmjctdew==", + "man_dir": "", + "name": "@esbuild/linux-s390x", + "name_hash": "17731831782316971618", + "origin": "npm", + "os": [ + "linux", + ], + "resolution": { + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.10.tgz", + "tag": "npm", + "value": "0.25.10", + }, + "scripts": {}, + }, + { + "arch": [], + "bin": null, + "dependencies": [], + "id": 27, + "integrity": "sha512-FE557XdZDrtX8NMIeA8LBJX3dC2M8VGXwfrQWU7LB5SLOajfJIxmSdyL/gU1m64Zs9CBKvm4UAuBp5aJ8OgnrA==", + "man_dir": "", + "name": "@esbuild/linux-riscv64", + "name_hash": "4735976835754647660", + "origin": "npm", + "os": [ + "linux", + ], + "resolution": { + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.10.tgz", + "tag": "npm", + "value": "0.25.10", + }, + "scripts": {}, + }, + { + "arch": [ + "ppc64", + ], + "bin": null, + "dependencies": [], + "id": 28, + "integrity": "sha512-NLinzzOgZQsGpsTkEbdJTCanwA5/wozN9dSgEl12haXJBzMTpssebuXR42bthOF3z7zXFWH1AmvWunUCkBE4EA==", + "man_dir": "", + "name": "@esbuild/linux-ppc64", + "name_hash": "11665320297089208874", + "origin": "npm", + "os": [ + "linux", + ], + "resolution": { + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.10.tgz", + "tag": "npm", + "value": "0.25.10", + }, + "scripts": {}, + }, + { + "arch": [], + "bin": null, + "dependencies": [], + "id": 29, + "integrity": "sha512-ab6eiuCwoMmYDyTnyptoKkVS3k8fy/1Uvq7Dj5czXI6DF2GqD2ToInBI0SHOp5/X1BdZ26RKc5+qjQNGRBelRA==", + "man_dir": "", + "name": "@esbuild/linux-mips64el", + "name_hash": "5758178421336220113", + "origin": "npm", + "os": [ + "linux", + ], + "resolution": { + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.10.tgz", + "tag": "npm", + "value": "0.25.10", + }, + "scripts": {}, + }, + { + "arch": [], + "bin": null, + "dependencies": [], + "id": 30, + "integrity": "sha512-xoSphrd4AZda8+rUDDfD9J6FUMjrkTz8itpTITM4/xgerAZZcFW7Dv+sun7333IfKxGG8gAq+3NbfEMJfiY+Eg==", + "man_dir": "", + "name": "@esbuild/linux-loong64", + "name_hash": "11252031954434475691", + "origin": "npm", + "os": [ + "linux", + ], + "resolution": { + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.10.tgz", + "tag": "npm", + "value": "0.25.10", + }, + "scripts": {}, + }, + { + "arch": [ + "ia32", + ], + "bin": null, + "dependencies": [], + "id": 31, + "integrity": "sha512-NrSCx2Kim3EnnWgS4Txn0QGt0Xipoumb6z6sUtl5bOEZIVKhzfyp/Lyw4C1DIYvzeW/5mWYPBFJU3a/8Yr75DQ==", + "man_dir": "", + "name": "@esbuild/linux-ia32", + "name_hash": "15142519142185085346", + "origin": "npm", + "os": [ + "linux", + ], + "resolution": { + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.10.tgz", + "tag": "npm", + "value": "0.25.10", + }, + "scripts": {}, + }, + { + "arch": [ + "arm64", + ], + "bin": null, + "dependencies": [], + "id": 32, + "integrity": "sha512-5luJWN6YKBsawd5f9i4+c+geYiVEw20FVW5x0v1kEMWNq8UctFjDiMATBxLvmmHA4bf7F6hTRaJgtghFr9iziQ==", + "man_dir": "", + "name": "@esbuild/linux-arm64", + "name_hash": "13968642332621702107", + "origin": "npm", + "os": [ + "linux", + ], + "resolution": { + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.10.tgz", + "tag": "npm", + "value": "0.25.10", + }, + "scripts": {}, + }, + { + "arch": [ + "arm", + ], + "bin": null, + "dependencies": [], + "id": 33, + "integrity": "sha512-oR31GtBTFYCqEBALI9r6WxoU/ZofZl962pouZRTEYECvNF/dtXKku8YXcJkhgK/beU+zedXfIzHijSRapJY3vg==", + "man_dir": "", + "name": "@esbuild/linux-arm", + "name_hash": "16603934224415504745", + "origin": "npm", + "os": [ + "linux", + ], + "resolution": { + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.10.tgz", + "tag": "npm", + "value": "0.25.10", + }, + "scripts": {}, + }, + { + "arch": [ + "x64", + ], + "bin": null, + "dependencies": [], + "id": 34, + "integrity": "sha512-LLgJfHJk014Aa4anGDbh8bmI5Lk+QidDmGzuC2D+vP7mv/GeSN+H39zOf7pN5N8p059FcOfs2bVlrRr4SK9WxA==", + "man_dir": "", + "name": "@esbuild/freebsd-x64", + "name_hash": "4612505319005618370", + "origin": "npm", + "os": [ + "freebsd", + ], + "resolution": { + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.10.tgz", + "tag": "npm", + "value": "0.25.10", + }, + "scripts": {}, + }, + { + "arch": [ + "arm64", + ], + "bin": null, + "dependencies": [], + "id": 35, + "integrity": "sha512-3ZioSQSg1HT2N05YxeJWYR+Libe3bREVSdWhEEgExWaDtyFbbXWb49QgPvFH8u03vUPX10JhJPcz7s9t9+boWg==", + "man_dir": "", + "name": "@esbuild/freebsd-arm64", + "name_hash": "14372867258341136141", + "origin": "npm", + "os": [ + "freebsd", + ], + "resolution": { + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.10.tgz", + "tag": "npm", + "value": "0.25.10", + }, + "scripts": {}, + }, + { + "arch": [ + "x64", + ], + "bin": null, + "dependencies": [], + "id": 36, + "integrity": "sha512-tguWg1olF6DGqzws97pKZ8G2L7Ig1vjDmGTwcTuYHbuU6TTjJe5FXbgs5C1BBzHbJ2bo1m3WkQDbWO2PvamRcg==", + "man_dir": "", + "name": "@esbuild/darwin-x64", + "name_hash": "148344697379242544", + "origin": "npm", + "os": [ + "darwin", + ], + "resolution": { + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.10.tgz", + "tag": "npm", + "value": "0.25.10", + }, + "scripts": {}, + }, + { + "arch": [ + "arm64", + ], + "bin": null, + "dependencies": [], + "id": 37, + "integrity": "sha512-JC74bdXcQEpW9KkV326WpZZjLguSZ3DfS8wrrvPMHgQOIEIG/sPXEN/V8IssoJhbefLRcRqw6RQH2NnpdprtMA==", + "man_dir": "", + "name": "@esbuild/darwin-arm64", + "name_hash": "6513306581678955422", + "origin": "npm", + "os": [ + "darwin", + ], + "resolution": { + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.10.tgz", + "tag": "npm", + "value": "0.25.10", + }, + "scripts": {}, + }, + { + "arch": [ + "x64", + ], + "bin": null, + "dependencies": [], + "id": 38, + "integrity": "sha512-MiC9CWdPrfhibcXwr39p9ha1x0lZJ9KaVfvzA0Wxwz9ETX4v5CHfF09bx935nHlhi+MxhA63dKRRQLiVgSUtEg==", + "man_dir": "", + "name": "@esbuild/android-x64", + "name_hash": "7293239100947757592", + "origin": "npm", + "os": [ + "android", + ], + "resolution": { + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.10.tgz", + "tag": "npm", + "value": "0.25.10", + }, + "scripts": {}, + }, + { + "arch": [ + "arm64", + ], + "bin": null, + "dependencies": [], + "id": 39, + "integrity": "sha512-LSQa7eDahypv/VO6WKohZGPSJDq5OVOo3UoFR1E4t4Gj1W7zEQMUhI+lo81H+DtB+kP+tDgBp+M4oNCwp6kffg==", + "man_dir": "", + "name": "@esbuild/android-arm64", + "name_hash": "12038435285769403986", + "origin": "npm", + "os": [ + "android", + ], + "resolution": { + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.10.tgz", + "tag": "npm", + "value": "0.25.10", + }, + "scripts": {}, + }, + { + "arch": [ + "arm", + ], + "bin": null, + "dependencies": [], + "id": 40, + "integrity": "sha512-dQAxF1dW1C3zpeCDc5KqIYuZ1tgAdRXNoZP7vkBIRtKZPYe2xVr/d3SkirklCHudW1B45tGiUlz2pUWDfbDD4w==", + "man_dir": "", + "name": "@esbuild/android-arm", + "name_hash": "14010109405703440328", + "origin": "npm", + "os": [ + "android", + ], + "resolution": { + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.10.tgz", + "tag": "npm", + "value": "0.25.10", + }, + "scripts": {}, + }, + { + "arch": [ + "ppc64", + ], + "bin": null, + "dependencies": [], + "id": 41, + "integrity": "sha512-0NFWnA+7l41irNuaSVlLfgNT12caWJVLzp5eAVhZ0z1qpxbockccEt3s+149rE64VUI3Ml2zt8Nv5JVc4QXTsw==", + "man_dir": "", + "name": "@esbuild/aix-ppc64", + "name_hash": "4160537343427600094", + "origin": "npm", + "os": [ + "aix", + ], + "resolution": { + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.10.tgz", + "tag": "npm", + "value": "0.25.10", + }, + "scripts": {}, + }, + { + "bin": null, + "dependencies": [ + 44, + ], + "id": 42, + "integrity": null, + "man_dir": "", + "name": "pkg-wat-2", + "name_hash": "5824839127353763395", + "origin": "npm", + "resolution": { + "resolved": "workspace:packages/pkg2", + "tag": "workspace", + "value": "workspace:packages/pkg2", + }, + "scripts": {}, + }, + { + "bin": null, + "dependencies": [], + "id": 43, + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "man_dir": "", + "name": "kind-of", + "name_hash": "13897574220744325158", + "origin": "npm", + "resolution": { + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", + "tag": "npm", + "value": "6.0.3", + }, + "scripts": {}, + }, + { + "bin": null, + "dependencies": [ + 45, + 46, + ], + "id": 44, + "integrity": null, + "man_dir": "", + "name": "pkg-wat", + "name_hash": "8896020699701999556", + "origin": "npm", + "resolution": { + "resolved": "workspace:packages/pkg1", + "tag": "workspace", + "value": "workspace:packages/pkg1", + }, + "scripts": {}, + }, + { + "bin": null, + "dependencies": [], + "id": 45, + "integrity": "sha512-umpJ0/k8X0MvD1ds0P9SfowREz2LenHsQaxSohMZ5OMNEU2r0tf8pdeEFTHMFxWVxKNyU9rTtK3CWzUCTKJUeQ==", + "man_dir": "", + "name": "jquery", + "name_hash": "265418099762006574", + "origin": "npm", + "resolution": { + "resolved": "https://registry.npmjs.org/jquery/-/jquery-3.7.0.tgz", + "tag": "npm", + "value": "3.7.0", + }, + "scripts": {}, + }, + ], + "trees": [ + { + "dependencies": { + "@esbuild/aix-ppc64": { + "id": 18, + "package_id": 41, + }, + "@esbuild/android-arm": { + "id": 19, + "package_id": 40, + }, + "@esbuild/android-arm64": { + "id": 20, + "package_id": 39, + }, + "@esbuild/android-x64": { + "id": 21, + "package_id": 38, + }, + "@esbuild/darwin-arm64": { + "id": 22, + "package_id": 37, + }, + "@esbuild/darwin-x64": { + "id": 23, + "package_id": 36, + }, + "@esbuild/freebsd-arm64": { + "id": 24, + "package_id": 35, + }, + "@esbuild/freebsd-x64": { + "id": 25, + "package_id": 34, + }, + "@esbuild/linux-arm": { + "id": 26, + "package_id": 33, + }, + "@esbuild/linux-arm64": { + "id": 27, + "package_id": 32, + }, + "@esbuild/linux-ia32": { + "id": 28, + "package_id": 31, + }, + "@esbuild/linux-loong64": { + "id": 29, + "package_id": 30, + }, + "@esbuild/linux-mips64el": { + "id": 30, + "package_id": 29, + }, + "@esbuild/linux-ppc64": { + "id": 31, + "package_id": 28, + }, + "@esbuild/linux-riscv64": { + "id": 32, + "package_id": 27, + }, + "@esbuild/linux-s390x": { + "id": 33, + "package_id": 26, + }, + "@esbuild/linux-x64": { + "id": 34, + "package_id": 25, + }, + "@esbuild/netbsd-arm64": { + "id": 35, + "package_id": 24, + }, + "@esbuild/netbsd-x64": { + "id": 36, + "package_id": 23, + }, + "@esbuild/openbsd-arm64": { + "id": 37, + "package_id": 22, + }, + "@esbuild/openbsd-x64": { + "id": 38, + "package_id": 21, + }, + "@esbuild/openharmony-arm64": { + "id": 39, + "package_id": 20, + }, + "@esbuild/sunos-x64": { + "id": 40, + "package_id": 19, + }, + "@esbuild/win32-arm64": { + "id": 41, + "package_id": 18, + }, + "@esbuild/win32-ia32": { + "id": 42, + "package_id": 17, + }, + "@esbuild/win32-x64": { + "id": 43, + "package_id": 16, + }, + "esbuild": { + "id": 2, + "package_id": 15, + }, + "false": { + "id": 6, + "package_id": 11, + }, + "is-buffer": { + "id": 14, + "package_id": 5, + }, + "is-even": { + "id": 9, + "package_id": 1, + }, + "is-number": { + "id": 5, + "package_id": 12, + }, + "is-odd": { + "id": 11, + "package_id": 2, + }, + "jquery": { + "id": 7, + "package_id": 10, + }, + "js-tokens": { + "id": 17, + "package_id": 9, + }, + "kind-of": { + "id": 44, + "package_id": 43, + }, + "lodash": { + "id": 10, + "package_id": 4294967295, + }, + "loose-envify": { + "id": 15, + "package_id": 8, + }, + "object-assign": { + "id": 16, + "package_id": 7, + }, + "pkg-wat": { + "id": 0, + "package_id": 44, + }, + "pkg-wat-2": { + "id": 1, + "package_id": 42, + }, + "react": { + "id": 3, + "package_id": 14, + }, + "scheduler": { + "id": 8, + "package_id": 6, + }, + "zod": { + "id": 4, + "package_id": 13, + }, + }, + "depth": 0, + "id": 0, + "path": "node_modules", + }, + { + "dependencies": { + "jquery": { + "id": 45, + "package_id": 45, + }, + }, + "depth": 1, + "id": 1, + "path": "node_modules/pkg-wat/node_modules", + }, + { + "dependencies": { + "is-number": { + "id": 12, + "package_id": 3, + }, + }, + "depth": 1, + "id": 2, + "path": "node_modules/is-odd/node_modules", + }, + { + "dependencies": { + "kind-of": { + "id": 13, + "package_id": 4, + }, + }, + "depth": 2, + "id": 3, + "path": "node_modules/is-odd/node_modules/is-number/node_modules", + }, + ], + "workspace_paths": { + "5824839127353763395": "packages/pkg2", + "8896020699701999556": "packages/pkg1", + }, + "workspace_versions": {}, +} +`; diff --git a/test/cli/install/bun-install-dep.test.ts b/test/cli/install/bun-install-dep.test.ts deleted file mode 100644 index 03321aa867..0000000000 --- a/test/cli/install/bun-install-dep.test.ts +++ /dev/null @@ -1,70 +0,0 @@ -import { npa } from "bun:internal-for-testing"; -import { expect, test } from "bun:test"; - -const bitbucket = [ - "bitbucket:dylan-conway/public-install-test", - "bitbucket.org:dylan-conway/public-install-test", - "bitbucket.com:dylan-conway/public-install-test", - "git@bitbucket.org:dylan-conway/public-install-test", -]; - -const tarball_remote = [ - "http://localhost:5000/no-deps/-/no-deps-2.0.0.tgz", - "https://registry.npmjs.org/no-deps/-/no-deps-2.0.0.tgz", -]; - -const local_tarball = ["file:./path/to/tarball.tgz", "./path/to/tarball.tgz"]; -const github = ["foo/bar"]; -const folder = ["file:./path/to/folder"]; - -const gitlab = ["gitlab:dylan-conway/public-install-test", "gitlab.com:dylan-conway/public-install-test"]; - -const all = [ - "@scoped/package", - "@scoped/package@1.0.0", - "@scoped/package@1.0.0-beta.1", - "@scoped/package@1.0.0-beta.1+build.123", - "package", - "package@1.0.0", - "package@1.0.0-beta.1", - "package@1.0.0-beta.1+build.123", - ...bitbucket, - ...github, - ...gitlab, - ...tarball_remote, - ...local_tarball, - ...github, - "github:dylan-conway/public-install-test", - "git@github.com:dylan-conway/public-install-test", - "https://github.com/dylan-conway/public-install-test", - "https://github.com/dylan-conway/public-install-test.git", - "https://github.com/dylan-conway/public-install-test.git#semver:^1.0.0", -]; - -test.each(all)("npa %s", dep => { - expect(npa(dep)).toMatchSnapshot(); -}); - -const pkgJsonLike = [ - ["foo", "1.2.3"], - ["foo", "latest"], - ["foo", "workspace:*"], - ["foo", "workspace:^1.0.0"], - ["foo", "workspace:1.0.0"], - ["foo", "workspace:1.0.0-beta.1"], - ["foo", "workspace:1.0.0-beta.1+build.123"], - ["foo", "workspace:1.0.0-beta.1+build.123"], - ["foo", "workspace:1.0.0-beta.1+build.123"], - ["bar", "^1.0.0"], - ["bar", "~1.0.0"], - ["bar", "> 1.0.0 < 2.0.0"], - ["bar", "1.0.0 - 2.0.0"], -]; - -test.each(pkgJsonLike)('dependencies: {"%s": "%s"}', (name, version) => { - expect(npa(name, version)).toMatchSnapshot(); -}); - -test("bad", () => { - expect(() => npa("-123!}{P}{!P#$s")).toThrow(); -}); diff --git a/test/cli/install/bun-pm.test.ts b/test/cli/install/bun-pm.test.ts index 4e99091ded..a1437ca3e3 100644 --- a/test/cli/install/bun-pm.test.ts +++ b/test/cli/install/bun-pm.test.ts @@ -444,3 +444,145 @@ test("bun pm whoami still works", async () => { // Exit code will be non-zero due to missing auth expect(exitCode).toBe(1); }); + +test.each([ + { + name: "bun list executes pm ls", + cmd: ["list"], + packageName: "test-list", + dependencies: { bar: "latest" }, + expectedOutput: (dir: string) => `${dir} node_modules (1)\n└── bar@0.0.2\n`, + checkReservationMessage: true, + }, + { + name: "bun pm list works as alias for bun pm ls", + cmd: ["pm", "list"], + packageName: "test-pm-list", + dependencies: { bar: "latest" }, + expectedOutput: (dir: string) => `${dir} node_modules (1)\n└── bar@0.0.2\n`, + checkReservationMessage: false, + }, + { + name: "bun pm ls still works", + cmd: ["pm", "ls"], + packageName: "test-pm-ls", + dependencies: { bar: "latest" }, + expectedOutput: (dir: string) => `${dir} node_modules (1)\n└── bar@0.0.2\n`, + checkReservationMessage: false, + }, +])("$name", async ({ cmd, packageName, dependencies, expectedOutput, checkReservationMessage }) => { + const urls: string[] = []; + setHandler(dummyRegistry(urls)); + await writeFile( + join(package_dir, "package.json"), + JSON.stringify({ + name: packageName, + version: "1.0.0", + dependencies, + }), + ); + + // Install dependencies first + { + const { stderr, exited } = spawn({ + cmd: [bunExe(), "install"], + cwd: package_dir, + stdout: "pipe", + stdin: "pipe", + stderr: "pipe", + env, + }); + const err = await stderr.text(); + expect(err).not.toContain("error:"); + expect(err).toContain("Saved lockfile"); + expect(await exited).toBe(0); + } + + // Test the command + const { stdout, stderr, exited } = spawn({ + cmd: [bunExe(), ...cmd], + cwd: package_dir, + stdout: "pipe", + stdin: "pipe", + stderr: "pipe", + env, + }); + + const [stderrText, stdoutText, exitCode] = await Promise.all([ + new Response(stderr).text(), + new Response(stdout).text(), + exited, + ]); + + expect(stderrText).toBe(""); + if (checkReservationMessage) { + expect(stdoutText).not.toContain("reserved for future use"); + } + expect(stdoutText).toBe(expectedOutput(package_dir)); + expect(exitCode).toBe(0); +}); + +test("bun list --all shows full dependency tree", async () => { + const urls: string[] = []; + setHandler(dummyRegistry(urls)); + await writeFile( + join(package_dir, "package.json"), + JSON.stringify({ + name: "test-list-all", + version: "1.0.0", + dependencies: { + moo: "./moo", + }, + }), + ); + await mkdir(join(package_dir, "moo")); + await writeFile( + join(package_dir, "moo", "package.json"), + JSON.stringify({ + name: "moo", + version: "0.1.0", + dependencies: { + bar: "latest", + }, + }), + ); + + // Install dependencies first + { + const { stderr, exited } = spawn({ + cmd: [bunExe(), "install"], + cwd: package_dir, + stdout: "pipe", + stdin: "pipe", + stderr: "pipe", + env, + }); + const err = await stderr.text(); + expect(err).not.toContain("error:"); + expect(err).toContain("Saved lockfile"); + expect(await exited).toBe(0); + } + + // Test "bun list --all" + const { stdout, stderr, exited } = spawn({ + cmd: [bunExe(), "list", "--all"], + cwd: package_dir, + stdout: "pipe", + stdin: "pipe", + stderr: "pipe", + env, + }); + + const [stderrText, stdoutText, exitCode] = await Promise.all([ + new Response(stderr).text(), + new Response(stdout).text(), + exited, + ]); + + expect(stderrText).toBe(""); + expect(stdoutText).toBe(`${package_dir} node_modules +├── bar@0.0.2 +└── moo@moo +`); + expect(exitCode).toBe(0); +}); diff --git a/test/cli/install/hosted-git-info/cases.ts b/test/cli/install/hosted-git-info/cases.ts new file mode 100644 index 0000000000..7f8a296d97 --- /dev/null +++ b/test/cli/install/hosted-git-info/cases.ts @@ -0,0 +1,2472 @@ +/** + * Contains all the possible test cases that hosted-git-archive.test.ts tests against. + * + * These are organized according to the structure in https://github.com/npm/hosted-git-info/blob/main/test/ at the time + * of writing. + * + * TODO(markovejnovic): This does not include the following set of tests: + * - https://github.com/npm/hosted-git-info/blob/main/test/file.js + * - https://github.com/npm/hosted-git-info/blob/main/test/parse-url.js + */ +// This is a valid git branch name that contains other occurences of the characters we check +// for to determine the committish in order to test that we parse those correctly +const committishDefaults = { committish: "lk/br@nch.t#st:^1.0.0-pre.4" }; + +type Provider = "bitbucket" | "gist" | "github" | "gitlab" | "sourcehut" | "misc"; + +const defaults = { + bitbucket: { type: "bitbucket", user: "foo", project: "bar" }, + gist: { type: "gist", user: null, project: "feedbeef" }, + github: { type: "github", user: "foo", project: "bar" }, + gitlab: { type: "gitlab", user: "foo", project: "bar" }, + gitlabSubgroup: { type: "gitlab", user: "foo/bar", project: "baz" }, + sourcehut: { type: "sourcehut", user: "~foo", project: "bar" }, +}; + +export const validGitUrls: { [K in Provider]: { [K in string]: object } } = { + bitbucket: { + // shortcuts + // + // NOTE auth is accepted but ignored + "bitbucket:foo/bar": { ...defaults.bitbucket, default: "shortcut" }, + "bitbucket:foo/bar#branch": { ...defaults.bitbucket, default: "shortcut", committish: "branch" }, + "bitbucket:user@foo/bar": { ...defaults.bitbucket, default: "shortcut", auth: null }, + "bitbucket:user@foo/bar#branch": { ...defaults.bitbucket, default: "shortcut", auth: null, committish: "branch" }, + "bitbucket:user:password@foo/bar": { ...defaults.bitbucket, default: "shortcut", auth: null }, + "bitbucket:user:password@foo/bar#branch": { + ...defaults.bitbucket, + default: "shortcut", + auth: null, + committish: "branch", + }, + "bitbucket::password@foo/bar": { ...defaults.bitbucket, default: "shortcut", auth: null }, + "bitbucket::password@foo/bar#branch": { + ...defaults.bitbucket, + default: "shortcut", + auth: null, + committish: "branch", + }, + + "bitbucket:foo/bar.git": { ...defaults.bitbucket, default: "shortcut" }, + "bitbucket:foo/bar.git#branch": { ...defaults.bitbucket, default: "shortcut", committish: "branch" }, + "bitbucket:user@foo/bar.git": { ...defaults.bitbucket, default: "shortcut", auth: null }, + "bitbucket:user@foo/bar.git#branch": { + ...defaults.bitbucket, + default: "shortcut", + auth: null, + committish: "branch", + }, + "bitbucket:user:password@foo/bar.git": { ...defaults.bitbucket, default: "shortcut", auth: null }, + "bitbucket:user:password@foo/bar.git#branch": { + ...defaults.bitbucket, + default: "shortcut", + auth: null, + committish: "branch", + }, + "bitbucket::password@foo/bar.git": { ...defaults.bitbucket, default: "shortcut", auth: null }, + "bitbucket::password@foo/bar.git#branch": { + ...defaults.bitbucket, + default: "shortcut", + auth: null, + committish: "branch", + }, + + // no-protocol git+ssh + // + // NOTE auth is accepted but ignored + "git@bitbucket.org:foo/bar": { ...defaults.bitbucket, default: "sshurl", auth: null }, + "git@bitbucket.org:foo/bar#branch": { + ...defaults.bitbucket, + default: "sshurl", + auth: null, + committish: "branch", + }, + "user@bitbucket.org:foo/bar": { ...defaults.bitbucket, default: "sshurl", auth: null }, + "user@bitbucket.org:foo/bar#branch": { + ...defaults.bitbucket, + default: "sshurl", + auth: null, + committish: "branch", + }, + "user:password@bitbucket.org:foo/bar": { ...defaults.bitbucket, default: "sshurl", auth: null }, + "user:password@bitbucket.org:foo/bar#branch": { + ...defaults.bitbucket, + default: "sshurl", + auth: null, + committish: "branch", + }, + ":password@bitbucket.org:foo/bar": { ...defaults.bitbucket, default: "sshurl", auth: null }, + ":password@bitbucket.org:foo/bar#branch": { + ...defaults.bitbucket, + default: "sshurl", + auth: null, + committish: "branch", + }, + + "git@bitbucket.org:foo/bar.git": { ...defaults.bitbucket, default: "sshurl", auth: null }, + "git@bitbucket.org:foo/bar.git#branch": { + ...defaults.bitbucket, + default: "sshurl", + auth: null, + committish: "branch", + }, + "user@bitbucket.org:foo/bar.git": { ...defaults.bitbucket, default: "sshurl", auth: null }, + "user@bitbucket.org:foo/bar.git#branch": { + ...defaults.bitbucket, + default: "sshurl", + auth: null, + committish: "branch", + }, + "user:password@bitbucket.org:foo/bar.git": { ...defaults.bitbucket, default: "sshurl", auth: null }, + "user:password@bitbucket.org:foo/bar.git#branch": { + ...defaults.bitbucket, + default: "sshurl", + auth: null, + committish: "branch", + }, + ":password@bitbucket.org:foo/bar.git": { ...defaults.bitbucket, default: "sshurl", auth: null }, + ":password@bitbucket.org:foo/bar.git#branch": { + ...defaults.bitbucket, + default: "sshurl", + auth: null, + committish: "branch", + }, + + // git+ssh urls + // + // NOTE auth is accepted but ignored + "git+ssh://bitbucket.org:foo/bar": { ...defaults.bitbucket, default: "sshurl" }, + "git+ssh://bitbucket.org:foo/bar#branch": { ...defaults.bitbucket, default: "sshurl", committish: "branch" }, + "git+ssh://user@bitbucket.org:foo/bar": { ...defaults.bitbucket, default: "sshurl", auth: null }, + "git+ssh://user@bitbucket.org:foo/bar#branch": { + ...defaults.bitbucket, + default: "sshurl", + auth: null, + committish: "branch", + }, + "git+ssh://user:password@bitbucket.org:foo/bar": { ...defaults.bitbucket, default: "sshurl", auth: null }, + "git+ssh://user:password@bitbucket.org:foo/bar#branch": { + ...defaults.bitbucket, + default: "sshurl", + auth: null, + committish: "branch", + }, + "git+ssh://:password@bitbucket.org:foo/bar": { ...defaults.bitbucket, default: "sshurl", auth: null }, + "git+ssh://:password@bitbucket.org:foo/bar#branch": { + ...defaults.bitbucket, + default: "sshurl", + auth: null, + committish: "branch", + }, + + "git+ssh://bitbucket.org:foo/bar.git": { ...defaults.bitbucket, default: "sshurl" }, + "git+ssh://bitbucket.org:foo/bar.git#branch": { ...defaults.bitbucket, default: "sshurl", committish: "branch" }, + "git+ssh://user@bitbucket.org:foo/bar.git": { ...defaults.bitbucket, default: "sshurl", auth: null }, + "git+ssh://user@bitbucket.org:foo/bar.git#branch": { + ...defaults.bitbucket, + default: "sshurl", + auth: null, + committish: "branch", + }, + "git+ssh://user:password@bitbucket.org:foo/bar.git": { ...defaults.bitbucket, default: "sshurl", auth: null }, + "git+ssh://user:password@bitbucket.org:foo/bar.git#branch": { + ...defaults.bitbucket, + default: "sshurl", + auth: null, + committish: "branch", + }, + "git+ssh://:password@bitbucket.org:foo/bar.git": { ...defaults.bitbucket, default: "sshurl", auth: null }, + "git+ssh://:password@bitbucket.org:foo/bar.git#branch": { + ...defaults.bitbucket, + default: "sshurl", + auth: null, + committish: "branch", + }, + + // ssh urls + // + // NOTE auth is accepted but ignored + "ssh://bitbucket.org:foo/bar": { ...defaults.bitbucket, default: "sshurl" }, + "ssh://bitbucket.org:foo/bar#branch": { ...defaults.bitbucket, default: "sshurl", committish: "branch" }, + "ssh://user@bitbucket.org:foo/bar": { ...defaults.bitbucket, default: "sshurl", auth: null }, + "ssh://user@bitbucket.org:foo/bar#branch": { + ...defaults.bitbucket, + default: "sshurl", + auth: null, + committish: "branch", + }, + "ssh://user:password@bitbucket.org:foo/bar": { ...defaults.bitbucket, default: "sshurl", auth: null }, + "ssh://user:password@bitbucket.org:foo/bar#branch": { + ...defaults.bitbucket, + default: "sshurl", + auth: null, + committish: "branch", + }, + "ssh://:password@bitbucket.org:foo/bar": { ...defaults.bitbucket, default: "sshurl", auth: null }, + "ssh://:password@bitbucket.org:foo/bar#branch": { + ...defaults.bitbucket, + default: "sshurl", + auth: null, + committish: "branch", + }, + + "ssh://bitbucket.org:foo/bar.git": { ...defaults.bitbucket, default: "sshurl" }, + "ssh://bitbucket.org:foo/bar.git#branch": { ...defaults.bitbucket, default: "sshurl", committish: "branch" }, + "ssh://user@bitbucket.org:foo/bar.git": { ...defaults.bitbucket, default: "sshurl", auth: null }, + "ssh://user@bitbucket.org:foo/bar.git#branch": { + ...defaults.bitbucket, + default: "sshurl", + auth: null, + committish: "branch", + }, + "ssh://user:password@bitbucket.org:foo/bar.git": { ...defaults.bitbucket, default: "sshurl", auth: null }, + "ssh://user:password@bitbucket.org:foo/bar.git#branch": { + ...defaults.bitbucket, + default: "sshurl", + auth: null, + committish: "branch", + }, + "ssh://:password@bitbucket.org:foo/bar.git": { ...defaults.bitbucket, default: "sshurl", auth: null }, + "ssh://:password@bitbucket.org:foo/bar.git#branch": { + ...defaults.bitbucket, + default: "sshurl", + auth: null, + committish: "branch", + }, + + // git+https urls + // + // NOTE auth is accepted and respected + "git+https://bitbucket.org/foo/bar": { ...defaults.bitbucket, default: "https" }, + "git+https://bitbucket.org/foo/bar#branch": { ...defaults.bitbucket, default: "https", committish: "branch" }, + "git+https://user@bitbucket.org/foo/bar": { ...defaults.bitbucket, default: "https", auth: "user" }, + "git+https://user@bitbucket.org/foo/bar#branch": { + ...defaults.bitbucket, + default: "https", + auth: "user", + committish: "branch", + }, + "git+https://user:password@bitbucket.org/foo/bar": { + ...defaults.bitbucket, + default: "https", + auth: "user:password", + }, + "git+https://user:password@bitbucket.org/foo/bar#branch": { + ...defaults.bitbucket, + default: "https", + auth: "user:password", + committish: "branch", + }, + "git+https://:password@bitbucket.org/foo/bar": { ...defaults.bitbucket, default: "https", auth: ":password" }, + "git+https://:password@bitbucket.org/foo/bar#branch": { + ...defaults.bitbucket, + default: "https", + auth: ":password", + committish: "branch", + }, + + "git+https://bitbucket.org/foo/bar.git": { ...defaults.bitbucket, default: "https" }, + "git+https://bitbucket.org/foo/bar.git#branch": { ...defaults.bitbucket, default: "https", committish: "branch" }, + "git+https://user@bitbucket.org/foo/bar.git": { ...defaults.bitbucket, default: "https", auth: "user" }, + "git+https://user@bitbucket.org/foo/bar.git#branch": { + ...defaults.bitbucket, + default: "https", + auth: "user", + committish: "branch", + }, + "git+https://user:password@bitbucket.org/foo/bar.git": { + ...defaults.bitbucket, + default: "https", + auth: "user:password", + }, + "git+https://user:password@bitbucket.org/foo/bar.git#branch": { + ...defaults.bitbucket, + default: "https", + auth: "user:password", + committish: "branch", + }, + "git+https://:password@bitbucket.org/foo/bar.git": { ...defaults.bitbucket, default: "https", auth: ":password" }, + "git+https://:password@bitbucket.org/foo/bar.git#branch": { + ...defaults.bitbucket, + default: "https", + auth: ":password", + committish: "branch", + }, + + // https urls + // + // NOTE auth is accepted and respected + "https://bitbucket.org/foo/bar": { ...defaults.bitbucket, default: "https" }, + "https://bitbucket.org/foo/bar#branch": { ...defaults.bitbucket, default: "https", committish: "branch" }, + "https://user@bitbucket.org/foo/bar": { ...defaults.bitbucket, default: "https", auth: "user" }, + "https://user@bitbucket.org/foo/bar#branch": { + ...defaults.bitbucket, + default: "https", + auth: "user", + committish: "branch", + }, + "https://user:password@bitbucket.org/foo/bar": { ...defaults.bitbucket, default: "https", auth: "user:password" }, + "https://user:password@bitbucket.org/foo/bar#branch": { + ...defaults.bitbucket, + default: "https", + auth: "user:password", + committish: "branch", + }, + "https://:password@bitbucket.org/foo/bar": { ...defaults.bitbucket, default: "https", auth: ":password" }, + "https://:password@bitbucket.org/foo/bar#branch": { + ...defaults.bitbucket, + default: "https", + auth: ":password", + committish: "branch", + }, + + "https://bitbucket.org/foo/bar.git": { ...defaults.bitbucket, default: "https" }, + "https://bitbucket.org/foo/bar.git#branch": { ...defaults.bitbucket, default: "https", committish: "branch" }, + "https://user@bitbucket.org/foo/bar.git": { ...defaults.bitbucket, default: "https", auth: "user" }, + "https://user@bitbucket.org/foo/bar.git#branch": { + ...defaults.bitbucket, + default: "https", + auth: "user", + committish: "branch", + }, + "https://user:password@bitbucket.org/foo/bar.git": { + ...defaults.bitbucket, + default: "https", + auth: "user:password", + }, + "https://user:password@bitbucket.org/foo/bar.git#branch": { + ...defaults.bitbucket, + default: "https", + auth: "user:password", + committish: "branch", + }, + "https://:password@bitbucket.org/foo/bar.git": { ...defaults.bitbucket, default: "https", auth: ":password" }, + "https://:password@bitbucket.org/foo/bar.git#branch": { + ...defaults.bitbucket, + default: "https", + auth: ":password", + committish: "branch", + }, + }, + gist: { + // shortcuts + // + // NOTE auth is accepted but ignored + "gist:feedbeef": { ...defaults.gist, default: "shortcut" }, + "gist:feedbeef#branch": { ...defaults.gist, default: "shortcut", committish: "branch" }, + "gist:user@feedbeef": { ...defaults.gist, default: "shortcut", auth: null }, + "gist:user@feedbeef#branch": { ...defaults.gist, default: "shortcut", auth: null, committish: "branch" }, + "gist:user:password@feedbeef": { ...defaults.gist, default: "shortcut", auth: null }, + "gist:user:password@feedbeef#branch": { ...defaults.gist, default: "shortcut", auth: null, committish: "branch" }, + "gist::password@feedbeef": { ...defaults.gist, default: "shortcut", auth: null }, + "gist::password@feedbeef#branch": { ...defaults.gist, default: "shortcut", auth: null, committish: "branch" }, + + "gist:feedbeef.git": { ...defaults.gist, default: "shortcut" }, + "gist:feedbeef.git#branch": { ...defaults.gist, default: "shortcut", committish: "branch" }, + "gist:user@feedbeef.git": { ...defaults.gist, default: "shortcut", auth: null }, + "gist:user@feedbeef.git#branch": { ...defaults.gist, default: "shortcut", auth: null, committish: "branch" }, + "gist:user:password@feedbeef.git": { ...defaults.gist, default: "shortcut", auth: null }, + "gist:user:password@feedbeef.git#branch": { + ...defaults.gist, + default: "shortcut", + auth: null, + committish: "branch", + }, + "gist::password@feedbeef.git": { ...defaults.gist, default: "shortcut", auth: null }, + "gist::password@feedbeef.git#branch": { ...defaults.gist, default: "shortcut", auth: null, committish: "branch" }, + + "gist:/feedbeef": { ...defaults.gist, default: "shortcut" }, + "gist:/feedbeef#branch": { ...defaults.gist, default: "shortcut", committish: "branch" }, + "gist:user@/feedbeef": { ...defaults.gist, default: "shortcut", auth: null }, + "gist:user@/feedbeef#branch": { ...defaults.gist, default: "shortcut", auth: null, committish: "branch" }, + "gist:user:password@/feedbeef": { ...defaults.gist, default: "shortcut", auth: null }, + "gist:user:password@/feedbeef#branch": { + ...defaults.gist, + default: "shortcut", + auth: null, + committish: "branch", + }, + "gist::password@/feedbeef": { ...defaults.gist, default: "shortcut", auth: null }, + "gist::password@/feedbeef#branch": { ...defaults.gist, default: "shortcut", auth: null, committish: "branch" }, + + "gist:/feedbeef.git": { ...defaults.gist, default: "shortcut" }, + "gist:/feedbeef.git#branch": { ...defaults.gist, default: "shortcut", committish: "branch" }, + "gist:user@/feedbeef.git": { ...defaults.gist, default: "shortcut", auth: null }, + "gist:user@/feedbeef.git#branch": { ...defaults.gist, default: "shortcut", auth: null, committish: "branch" }, + "gist:user:password@/feedbeef.git": { ...defaults.gist, default: "shortcut", auth: null }, + "gist:user:password@/feedbeef.git#branch": { + ...defaults.gist, + default: "shortcut", + auth: null, + committish: "branch", + }, + "gist::password@/feedbeef.git": { ...defaults.gist, default: "shortcut", auth: null }, + "gist::password@/feedbeef.git#branch": { + ...defaults.gist, + default: "shortcut", + auth: null, + committish: "branch", + }, + + "gist:foo/feedbeef": { ...defaults.gist, default: "shortcut", user: "foo" }, + "gist:foo/feedbeef#branch": { ...defaults.gist, default: "shortcut", user: "foo", committish: "branch" }, + "gist:user@foo/feedbeef": { ...defaults.gist, default: "shortcut", user: "foo", auth: null }, + "gist:user@foo/feedbeef#branch": { + ...defaults.gist, + default: "shortcut", + user: "foo", + auth: null, + committish: "branch", + }, + "gist:user:password@foo/feedbeef": { ...defaults.gist, default: "shortcut", user: "foo", auth: null }, + "gist:user:password@foo/feedbeef#branch": { + ...defaults.gist, + default: "shortcut", + user: "foo", + auth: null, + committish: "branch", + }, + "gist::password@foo/feedbeef": { ...defaults.gist, default: "shortcut", user: "foo", auth: null }, + "gist::password@foo/feedbeef#branch": { + ...defaults.gist, + default: "shortcut", + user: "foo", + auth: null, + committish: "branch", + }, + + "gist:foo/feedbeef.git": { ...defaults.gist, default: "shortcut", user: "foo" }, + "gist:foo/feedbeef.git#branch": { ...defaults.gist, default: "shortcut", user: "foo", committish: "branch" }, + "gist:user@foo/feedbeef.git": { ...defaults.gist, default: "shortcut", user: "foo", auth: null }, + "gist:user@foo/feedbeef.git#branch": { + ...defaults.gist, + default: "shortcut", + user: "foo", + auth: null, + committish: "branch", + }, + "gist:user:password@foo/feedbeef.git": { ...defaults.gist, default: "shortcut", user: "foo", auth: null }, + "gist:user:password@foo/feedbeef.git#branch": { + ...defaults.gist, + default: "shortcut", + user: "foo", + auth: null, + committish: "branch", + }, + "gist::password@foo/feedbeef.git": { ...defaults.gist, default: "shortcut", user: "foo", auth: null }, + "gist::password@foo/feedbeef.git#branch": { + ...defaults.gist, + default: "shortcut", + user: "foo", + auth: null, + committish: "branch", + }, + + // git urls + // + // NOTE auth is accepted and respected + "git://gist.github.com/feedbeef": { ...defaults.gist, default: "git" }, + "git://gist.github.com/feedbeef#branch": { ...defaults.gist, default: "git", committish: "branch" }, + "git://user@gist.github.com/feedbeef": { ...defaults.gist, default: "git", auth: "user" }, + "git://user@gist.github.com/feedbeef#branch": { + ...defaults.gist, + default: "git", + auth: "user", + committish: "branch", + }, + "git://user:password@gist.github.com/feedbeef": { ...defaults.gist, default: "git", auth: "user:password" }, + "git://user:password@gist.github.com/feedbeef#branch": { + ...defaults.gist, + default: "git", + auth: "user:password", + committish: "branch", + }, + "git://:password@gist.github.com/feedbeef": { ...defaults.gist, default: "git", auth: ":password" }, + "git://:password@gist.github.com/feedbeef#branch": { + ...defaults.gist, + default: "git", + auth: ":password", + committish: "branch", + }, + + "git://gist.github.com/feedbeef.git": { ...defaults.gist, default: "git" }, + "git://gist.github.com/feedbeef.git#branch": { ...defaults.gist, default: "git", committish: "branch" }, + "git://user@gist.github.com/feedbeef.git": { ...defaults.gist, default: "git", auth: "user" }, + "git://user@gist.github.com/feedbeef.git#branch": { + ...defaults.gist, + default: "git", + auth: "user", + committish: "branch", + }, + "git://user:password@gist.github.com/feedbeef.git": { ...defaults.gist, default: "git", auth: "user:password" }, + "git://user:password@gist.github.com/feedbeef.git#branch": { + ...defaults.gist, + default: "git", + auth: "user:password", + committish: "branch", + }, + "git://:password@gist.github.com/feedbeef.git": { ...defaults.gist, default: "git", auth: ":password" }, + "git://:password@gist.github.com/feedbeef.git#branch": { + ...defaults.gist, + default: "git", + auth: ":password", + committish: "branch", + }, + + "git://gist.github.com/foo/feedbeef": { ...defaults.gist, default: "git", user: "foo" }, + "git://gist.github.com/foo/feedbeef#branch": { + ...defaults.gist, + default: "git", + user: "foo", + committish: "branch", + }, + "git://user@gist.github.com/foo/feedbeef": { ...defaults.gist, default: "git", user: "foo", auth: "user" }, + "git://user@gist.github.com/foo/feedbeef#branch": { + ...defaults.gist, + default: "git", + user: "foo", + auth: "user", + committish: "branch", + }, + "git://user:password@gist.github.com/foo/feedbeef": { + ...defaults.gist, + default: "git", + user: "foo", + auth: "user:password", + }, + "git://user:password@gist.github.com/foo/feedbeef#branch": { + ...defaults.gist, + default: "git", + user: "foo", + auth: "user:password", + committish: "branch", + }, + "git://:password@gist.github.com/foo/feedbeef": { + ...defaults.gist, + default: "git", + user: "foo", + auth: ":password", + }, + "git://:password@gist.github.com/foo/feedbeef#branch": { + ...defaults.gist, + default: "git", + user: "foo", + auth: ":password", + committish: "branch", + }, + + "git://gist.github.com/foo/feedbeef.git": { ...defaults.gist, default: "git", user: "foo" }, + "git://gist.github.com/foo/feedbeef.git#branch": { + ...defaults.gist, + default: "git", + user: "foo", + committish: "branch", + }, + "git://user@gist.github.com/foo/feedbeef.git": { ...defaults.gist, default: "git", user: "foo", auth: "user" }, + "git://user@gist.github.com/foo/feedbeef.git#branch": { + ...defaults.gist, + default: "git", + user: "foo", + auth: "user", + committish: "branch", + }, + "git://user:password@gist.github.com/foo/feedbeef.git": { + ...defaults.gist, + default: "git", + user: "foo", + auth: "user:password", + }, + "git://user:password@gist.github.com/foo/feedbeef.git#branch": { + ...defaults.gist, + default: "git", + user: "foo", + auth: "user:password", + committish: "branch", + }, + "git://:password@gist.github.com/foo/feedbeef.git": { + ...defaults.gist, + default: "git", + user: "foo", + auth: ":password", + }, + "git://:password@gist.github.com/foo/feedbeef.git#branch": { + ...defaults.gist, + default: "git", + user: "foo", + auth: ":password", + committish: "branch", + }, + + // no-protocol git+ssh + // + // NOTE auth is accepted and ignored + "git@gist.github.com:feedbeef": { ...defaults.gist, default: "sshurl", auth: null }, + "git@gist.github.com:feedbeef#branch": { ...defaults.gist, default: "sshurl", auth: null, committish: "branch" }, + "user@gist.github.com:feedbeef": { ...defaults.gist, default: "sshurl", auth: null }, + "user@gist.github.com:feedbeef#branch": { ...defaults.gist, default: "sshurl", auth: null, committish: "branch" }, + "user:password@gist.github.com:feedbeef": { ...defaults.gist, default: "sshurl", auth: null }, + "user:password@gist.github.com:feedbeef#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + committish: "branch", + }, + ":password@gist.github.com:feedbeef": { ...defaults.gist, default: "sshurl", auth: null }, + ":password@gist.github.com:feedbeef#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + committish: "branch", + }, + + "git@gist.github.com:feedbeef.git": { ...defaults.gist, default: "sshurl", auth: null }, + "git@gist.github.com:feedbeef.git#branch": { + ...defaults.gist, + default: "sshurl", + committish: "branch", + auth: null, + }, + "user@gist.github.com:feedbeef.git": { ...defaults.gist, default: "sshurl", auth: null }, + "user@gist.github.com:feedbeef.git#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + committish: "branch", + }, + "user:password@gist.github.com:feedbeef.git": { ...defaults.gist, default: "sshurl", auth: null }, + "user:password@gist.github.com:feedbeef.git#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + committish: "branch", + }, + ":password@gist.github.com:feedbeef.git": { ...defaults.gist, default: "sshurl", auth: null }, + ":password@gist.github.com:feedbeef.git#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + committish: "branch", + }, + + "git@gist.github.com:foo/feedbeef": { ...defaults.gist, default: "sshurl", auth: null, user: "foo" }, + "git@gist.github.com:foo/feedbeef#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + user: "foo", + committish: "branch", + }, + "user@gist.github.com:foo/feedbeef": { ...defaults.gist, default: "sshurl", auth: null, user: "foo" }, + "user@gist.github.com:foo/feedbeef#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + user: "foo", + committish: "branch", + }, + "user:password@gist.github.com:foo/feedbeef": { ...defaults.gist, default: "sshurl", auth: null, user: "foo" }, + "user:password@gist.github.com:foo/feedbeef#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + user: "foo", + committish: "branch", + }, + ":password@gist.github.com:foo/feedbeef": { ...defaults.gist, default: "sshurl", auth: null, user: "foo" }, + ":password@gist.github.com:foo/feedbeef#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + user: "foo", + committish: "branch", + }, + + "git@gist.github.com:foo/feedbeef.git": { ...defaults.gist, default: "sshurl", auth: null, user: "foo" }, + "git@gist.github.com:foo/feedbeef.git#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + user: "foo", + committish: "branch", + }, + "user@gist.github.com:foo/feedbeef.git": { ...defaults.gist, default: "sshurl", auth: null, user: "foo" }, + "user@gist.github.com:foo/feedbeef.git#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + user: "foo", + committish: "branch", + }, + "user:password@gist.github.com:foo/feedbeef.git": { + ...defaults.gist, + default: "sshurl", + auth: null, + user: "foo", + }, + "user:password@gist.github.com:foo/feedbeef.git#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + user: "foo", + committish: "branch", + }, + ":password@gist.github.com:foo/feedbeef.git": { ...defaults.gist, default: "sshurl", auth: null, user: "foo" }, + ":password@gist.github.com:foo/feedbeef.git#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + user: "foo", + committish: "branch", + }, + + // git+ssh urls + // + // NOTE auth is accepted but ignored + // NOTE see TODO at list of invalids, some inputs fail and shouldn't + "git+ssh://gist.github.com:feedbeef": { ...defaults.gist, default: "sshurl", auth: null }, + "git+ssh://gist.github.com:feedbeef#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + committish: "branch", + }, + "git+ssh://user@gist.github.com:feedbeef": { ...defaults.gist, default: "sshurl", auth: null }, + "git+ssh://user@gist.github.com:feedbeef#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + committish: "branch", + }, + "git+ssh://user:password@gist.github.com:feedbeef": { ...defaults.gist, default: "sshurl", auth: null }, + "git+ssh://user:password@gist.github.com:feedbeef#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + committish: "branch", + }, + "git+ssh://:password@gist.github.com:feedbeef": { ...defaults.gist, default: "sshurl", auth: null }, + "git+ssh://:password@gist.github.com:feedbeef#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + committish: "branch", + }, + + "git+ssh://gist.github.com:feedbeef.git": { ...defaults.gist, default: "sshurl", auth: null }, + "git+ssh://gist.github.com:feedbeef.git#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + committish: "branch", + }, + "git+ssh://user@gist.github.com:feedbeef.git": { ...defaults.gist, default: "sshurl", auth: null }, + "git+ssh://user@gist.github.com:feedbeef.git#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + committish: "branch", + }, + "git+ssh://user:password@gist.github.com:feedbeef.git": { ...defaults.gist, default: "sshurl", auth: null }, + "git+ssh://user:password@gist.github.com:feedbeef.git#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + committish: "branch", + }, + "git+ssh://:password@gist.github.com:feedbeef.git": { ...defaults.gist, default: "sshurl", auth: null }, + "git+ssh://:password@gist.github.com:feedbeef.git#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + committish: "branch", + }, + + "git+ssh://gist.github.com:foo/feedbeef": { ...defaults.gist, default: "sshurl", user: "foo" }, + "git+ssh://gist.github.com:foo/feedbeef#branch": { + ...defaults.gist, + default: "sshurl", + user: "foo", + committish: "branch", + }, + "git+ssh://user@gist.github.com:foo/feedbeef": { ...defaults.gist, default: "sshurl", auth: null, user: "foo" }, + "git+ssh://user@gist.github.com:foo/feedbeef#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + user: "foo", + committish: "branch", + }, + "git+ssh://user:password@gist.github.com:foo/feedbeef": { + ...defaults.gist, + default: "sshurl", + auth: null, + user: "foo", + }, + "git+ssh://user:password@gist.github.com:foo/feedbeef#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + user: "foo", + committish: "branch", + }, + "git+ssh://:password@gist.github.com:foo/feedbeef": { + ...defaults.gist, + default: "sshurl", + auth: null, + user: "foo", + }, + "git+ssh://:password@gist.github.com:foo/feedbeef#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + user: "foo", + committish: "branch", + }, + + "git+ssh://gist.github.com:foo/feedbeef.git": { ...defaults.gist, default: "sshurl", user: "foo" }, + "git+ssh://gist.github.com:foo/feedbeef.git#branch": { + ...defaults.gist, + default: "sshurl", + user: "foo", + committish: "branch", + }, + "git+ssh://user@gist.github.com:foo/feedbeef.git": { + ...defaults.gist, + default: "sshurl", + auth: null, + user: "foo", + }, + "git+ssh://user@gist.github.com:foo/feedbeef.git#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + user: "foo", + committish: "branch", + }, + "git+ssh://user:password@gist.github.com:foo/feedbeef.git": { + ...defaults.gist, + default: "sshurl", + auth: null, + user: "foo", + }, + "git+ssh://user:password@gist.github.com:foo/feedbeef.git#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + user: "foo", + committish: "branch", + }, + "git+ssh://:password@gist.github.com:foo/feedbeef.git": { + ...defaults.gist, + default: "sshurl", + auth: null, + user: "foo", + }, + "git+ssh://:password@gist.github.com:foo/feedbeef.git#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + user: "foo", + committish: "branch", + }, + + // ssh urls + // + // NOTE auth is accepted but ignored + "ssh://gist.github.com:feedbeef": { ...defaults.gist, default: "sshurl", auth: null }, + "ssh://gist.github.com:feedbeef#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + committish: "branch", + }, + "ssh://user@gist.github.com:feedbeef": { ...defaults.gist, default: "sshurl", auth: null }, + "ssh://user@gist.github.com:feedbeef#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + committish: "branch", + }, + "ssh://user:password@gist.github.com:feedbeef": { ...defaults.gist, default: "sshurl", auth: null }, + "ssh://user:password@gist.github.com:feedbeef#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + committish: "branch", + }, + "ssh://:password@gist.github.com:feedbeef": { ...defaults.gist, default: "sshurl", auth: null }, + "ssh://:password@gist.github.com:feedbeef#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + committish: "branch", + }, + + "ssh://gist.github.com:feedbeef.git": { ...defaults.gist, default: "sshurl", auth: null }, + "ssh://gist.github.com:feedbeef.git#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + committish: "branch", + }, + "ssh://user@gist.github.com:feedbeef.git": { ...defaults.gist, default: "sshurl", auth: null }, + "ssh://user@gist.github.com:feedbeef.git#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + committish: "branch", + }, + "ssh://user:password@gist.github.com:feedbeef.git": { ...defaults.gist, default: "sshurl", auth: null }, + "ssh://user:password@gist.github.com:feedbeef.git#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + committish: "branch", + }, + "ssh://:password@gist.github.com:feedbeef.git": { ...defaults.gist, default: "sshurl", auth: null }, + "ssh://:password@gist.github.com:feedbeef.git#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + committish: "branch", + }, + + "ssh://gist.github.com:foo/feedbeef": { ...defaults.gist, default: "sshurl", user: "foo" }, + "ssh://gist.github.com:foo/feedbeef#branch": { + ...defaults.gist, + default: "sshurl", + user: "foo", + committish: "branch", + }, + "ssh://user@gist.github.com:foo/feedbeef": { ...defaults.gist, default: "sshurl", auth: null, user: "foo" }, + "ssh://user@gist.github.com:foo/feedbeef#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + user: "foo", + committish: "branch", + }, + "ssh://user:password@gist.github.com:foo/feedbeef": { + ...defaults.gist, + default: "sshurl", + auth: null, + user: "foo", + }, + "ssh://user:password@gist.github.com:foo/feedbeef#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + user: "foo", + committish: "branch", + }, + "ssh://:password@gist.github.com:foo/feedbeef": { ...defaults.gist, default: "sshurl", auth: null, user: "foo" }, + "ssh://:password@gist.github.com:foo/feedbeef#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + user: "foo", + committish: "branch", + }, + + "ssh://gist.github.com:foo/feedbeef.git": { ...defaults.gist, default: "sshurl", user: "foo" }, + "ssh://gist.github.com:foo/feedbeef.git#branch": { + ...defaults.gist, + default: "sshurl", + user: "foo", + committish: "branch", + }, + "ssh://user@gist.github.com:foo/feedbeef.git": { ...defaults.gist, default: "sshurl", auth: null, user: "foo" }, + "ssh://user@gist.github.com:foo/feedbeef.git#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + user: "foo", + committish: "branch", + }, + "ssh://user:password@gist.github.com:foo/feedbeef.git": { + ...defaults.gist, + default: "sshurl", + auth: null, + user: "foo", + }, + "ssh://user:password@gist.github.com:foo/feedbeef.git#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + user: "foo", + committish: "branch", + }, + "ssh://:password@gist.github.com:foo/feedbeef.git": { + ...defaults.gist, + default: "sshurl", + auth: null, + user: "foo", + }, + "ssh://:password@gist.github.com:foo/feedbeef.git#branch": { + ...defaults.gist, + default: "sshurl", + auth: null, + user: "foo", + committish: "branch", + }, + + // git+https urls + // + // NOTE auth is accepted and respected + "git+https://gist.github.com/feedbeef": { ...defaults.gist, default: "https" }, + "git+https://gist.github.com/feedbeef#branch": { ...defaults.gist, default: "https", committish: "branch" }, + "git+https://user@gist.github.com/feedbeef": { ...defaults.gist, default: "https", auth: "user" }, + "git+https://user@gist.github.com/feedbeef#branch": { + ...defaults.gist, + default: "https", + auth: "user", + committish: "branch", + }, + "git+https://user:password@gist.github.com/feedbeef": { + ...defaults.gist, + default: "https", + auth: "user:password", + }, + "git+https://user:password@gist.github.com/feedbeef#branch": { + ...defaults.gist, + default: "https", + auth: "user:password", + committish: "branch", + }, + "git+https://:password@gist.github.com/feedbeef": { ...defaults.gist, default: "https", auth: ":password" }, + "git+https://:password@gist.github.com/feedbeef#branch": { + ...defaults.gist, + default: "https", + auth: ":password", + committish: "branch", + }, + + "git+https://gist.github.com/feedbeef.git": { ...defaults.gist, default: "https" }, + "git+https://gist.github.com/feedbeef.git#branch": { ...defaults.gist, default: "https", committish: "branch" }, + "git+https://user@gist.github.com/feedbeef.git": { ...defaults.gist, default: "https", auth: "user" }, + "git+https://user@gist.github.com/feedbeef.git#branch": { + ...defaults.gist, + default: "https", + auth: "user", + committish: "branch", + }, + "git+https://user:password@gist.github.com/feedbeef.git": { + ...defaults.gist, + default: "https", + auth: "user:password", + }, + "git+https://user:password@gist.github.com/feedbeef.git#branch": { + ...defaults.gist, + default: "https", + auth: "user:password", + committish: "branch", + }, + "git+https://:password@gist.github.com/feedbeef.git": { ...defaults.gist, default: "https", auth: ":password" }, + "git+https://:password@gist.github.com/feedbeef.git#branch": { + ...defaults.gist, + default: "https", + auth: ":password", + committish: "branch", + }, + + "git+https://gist.github.com/foo/feedbeef": { ...defaults.gist, default: "https", user: "foo" }, + "git+https://gist.github.com/foo/feedbeef#branch": { + ...defaults.gist, + default: "https", + user: "foo", + committish: "branch", + }, + "git+https://user@gist.github.com/foo/feedbeef": { + ...defaults.gist, + default: "https", + auth: "user", + user: "foo", + }, + "git+https://user@gist.github.com/foo/feedbeef#branch": { + ...defaults.gist, + default: "https", + auth: "user", + user: "foo", + committish: "branch", + }, + "git+https://user:password@gist.github.com/foo/feedbeef": { + ...defaults.gist, + default: "https", + auth: "user:password", + user: "foo", + }, + "git+https://user:password@gist.github.com/foo/feedbeef#branch": { + ...defaults.gist, + default: "https", + auth: "user:password", + user: "foo", + committish: "branch", + }, + "git+https://:password@gist.github.com/foo/feedbeef": { + ...defaults.gist, + default: "https", + auth: ":password", + user: "foo", + }, + "git+https://:password@gist.github.com/foo/feedbeef#branch": { + ...defaults.gist, + default: "https", + auth: ":password", + user: "foo", + committish: "branch", + }, + + "git+https://gist.github.com/foo/feedbeef.git": { ...defaults.gist, default: "https", user: "foo" }, + "git+https://gist.github.com/foo/feedbeef.git#branch": { + ...defaults.gist, + default: "https", + user: "foo", + committish: "branch", + }, + "git+https://user@gist.github.com/foo/feedbeef.git": { + ...defaults.gist, + default: "https", + auth: "user", + user: "foo", + }, + "git+https://user@gist.github.com/foo/feedbeef.git#branch": { + ...defaults.gist, + default: "https", + auth: "user", + user: "foo", + committish: "branch", + }, + "git+https://user:password@gist.github.com/foo/feedbeef.git": { + ...defaults.gist, + default: "https", + auth: "user:password", + user: "foo", + }, + "git+https://user:password@gist.github.com/foo/feedbeef.git#branch": { + ...defaults.gist, + default: "https", + auth: "user:password", + user: "foo", + committish: "branch", + }, + "git+https://:password@gist.github.com/foo/feedbeef.git": { + ...defaults.gist, + default: "https", + auth: ":password", + user: "foo", + }, + "git+https://:password@gist.github.com/foo/feedbeef.git#branch": { + ...defaults.gist, + default: "https", + auth: ":password", + user: "foo", + committish: "branch", + }, + + // https urls + // + // NOTE auth is accepted and respected + "https://gist.github.com/feedbeef": { ...defaults.gist, default: "https" }, + "https://gist.github.com/feedbeef#branch": { ...defaults.gist, default: "https", committish: "branch" }, + "https://user@gist.github.com/feedbeef": { ...defaults.gist, default: "https", auth: "user" }, + "https://user@gist.github.com/feedbeef#branch": { + ...defaults.gist, + default: "https", + auth: "user", + committish: "branch", + }, + "https://user:password@gist.github.com/feedbeef": { ...defaults.gist, default: "https", auth: "user:password" }, + "https://user:password@gist.github.com/feedbeef#branch": { + ...defaults.gist, + default: "https", + auth: "user:password", + committish: "branch", + }, + "https://:password@gist.github.com/feedbeef": { ...defaults.gist, default: "https", auth: ":password" }, + "https://:password@gist.github.com/feedbeef#branch": { + ...defaults.gist, + default: "https", + auth: ":password", + committish: "branch", + }, + + "https://gist.github.com/feedbeef.git": { ...defaults.gist, default: "https" }, + "https://gist.github.com/feedbeef.git#branch": { ...defaults.gist, default: "https", committish: "branch" }, + "https://user@gist.github.com/feedbeef.git": { ...defaults.gist, default: "https", auth: "user" }, + "https://user@gist.github.com/feedbeef.git#branch": { + ...defaults.gist, + default: "https", + auth: "user", + committish: "branch", + }, + "https://user:password@gist.github.com/feedbeef.git": { + ...defaults.gist, + default: "https", + auth: "user:password", + }, + "https://user:password@gist.github.com/feedbeef.git#branch": { + ...defaults.gist, + default: "https", + auth: "user:password", + committish: "branch", + }, + "https://:password@gist.github.com/feedbeef.git": { ...defaults.gist, default: "https", auth: ":password" }, + "https://:password@gist.github.com/feedbeef.git#branch": { + ...defaults.gist, + default: "https", + auth: ":password", + committish: "branch", + }, + + "https://gist.github.com/foo/feedbeef": { ...defaults.gist, default: "https", user: "foo" }, + "https://gist.github.com/foo/feedbeef#branch": { + ...defaults.gist, + default: "https", + user: "foo", + committish: "branch", + }, + "https://user@gist.github.com/foo/feedbeef": { ...defaults.gist, default: "https", auth: "user", user: "foo" }, + "https://user@gist.github.com/foo/feedbeef#branch": { + ...defaults.gist, + default: "https", + auth: "user", + user: "foo", + committish: "branch", + }, + "https://user:password@gist.github.com/foo/feedbeef": { + ...defaults.gist, + default: "https", + auth: "user:password", + user: "foo", + }, + "https://user:password@gist.github.com/foo/feedbeef#branch": { + ...defaults.gist, + default: "https", + auth: "user:password", + user: "foo", + committish: "branch", + }, + "https://:password@gist.github.com/foo/feedbeef": { + ...defaults.gist, + default: "https", + auth: ":password", + user: "foo", + }, + "https://:password@gist.github.com/foo/feedbeef#branch": { + ...defaults.gist, + default: "https", + auth: ":password", + user: "foo", + committish: "branch", + }, + + "https://gist.github.com/foo/feedbeef.git": { ...defaults.gist, default: "https", user: "foo" }, + "https://gist.github.com/foo/feedbeef.git#branch": { + ...defaults.gist, + default: "https", + user: "foo", + committish: "branch", + }, + "https://user@gist.github.com/foo/feedbeef.git": { + ...defaults.gist, + default: "https", + auth: "user", + user: "foo", + }, + "https://user@gist.github.com/foo/feedbeef.git#branch": { + ...defaults.gist, + default: "https", + auth: "user", + user: "foo", + committish: "branch", + }, + "https://user:password@gist.github.com/foo/feedbeef.git": { + ...defaults.gist, + default: "https", + auth: "user:password", + user: "foo", + }, + "https://user:password@gist.github.com/foo/feedbeef.git#branch": { + ...defaults.gist, + default: "https", + auth: "user:password", + user: "foo", + committish: "branch", + }, + "https://:password@gist.github.com/foo/feedbeef.git": { + ...defaults.gist, + default: "https", + auth: ":password", + user: "foo", + }, + "https://:password@gist.github.com/foo/feedbeef.git#branch": { + ...defaults.gist, + default: "https", + auth: ":password", + user: "foo", + committish: "branch", + }, + }, + github: { + // shortcuts + // + // NOTE auth is accepted but ignored + "github:foo/bar": { ...defaults.github, default: "shortcut" }, + [`github:foo/bar#${committishDefaults.committish}`]: { + ...defaults.github, + default: "shortcut", + ...committishDefaults, + }, + "github:user@foo/bar": { ...defaults.github, default: "shortcut", auth: null }, + [`github:user@foo/bar#${committishDefaults.committish}`]: { + ...defaults.github, + default: "shortcut", + auth: null, + ...committishDefaults, + }, + "github:user:password@foo/bar": { ...defaults.github, default: "shortcut", auth: null }, + [`github:user:password@foo/bar#${committishDefaults.committish}`]: { + ...defaults.github, + default: "shortcut", + auth: null, + ...committishDefaults, + }, + "github::password@foo/bar": { ...defaults.github, default: "shortcut", auth: null }, + [`github::password@foo/bar#${committishDefaults.committish}`]: { + ...defaults.github, + default: "shortcut", + auth: null, + ...committishDefaults, + }, + + "github:foo/bar.git": { ...defaults.github, default: "shortcut" }, + [`github:foo/bar.git#${committishDefaults.committish}`]: { + ...defaults.github, + default: "shortcut", + ...committishDefaults, + }, + "github:user@foo/bar.git": { ...defaults.github, default: "shortcut", auth: null }, + [`github:user@foo/bar.git#${committishDefaults.committish}`]: { + ...defaults.github, + default: "shortcut", + auth: null, + ...committishDefaults, + }, + "github:user:password@foo/bar.git": { ...defaults.github, default: "shortcut", auth: null }, + [`github:user:password@foo/bar.git#${committishDefaults.committish}`]: { + ...defaults.github, + default: "shortcut", + auth: null, + ...committishDefaults, + }, + "github::password@foo/bar.git": { ...defaults.github, default: "shortcut", auth: null }, + [`github::password@foo/bar.git#${committishDefaults.committish}`]: { + ...defaults.github, + default: "shortcut", + auth: null, + ...committishDefaults, + }, + + // git urls + // + // NOTE auth is accepted and respected + "git://github.com/foo/bar": { ...defaults.github, default: "git" }, + [`git://github.com/foo/bar#${committishDefaults.committish}`]: { + ...defaults.github, + default: "git", + ...committishDefaults, + }, + "git://user@github.com/foo/bar": { ...defaults.github, default: "git", auth: "user" }, + [`git://user@github.com/foo/bar#${committishDefaults.committish}`]: { + ...defaults.github, + default: "git", + auth: "user", + ...committishDefaults, + }, + "git://user:password@github.com/foo/bar": { ...defaults.github, default: "git", auth: "user:password" }, + [`git://user:password@github.com/foo/bar#${committishDefaults.committish}`]: { + ...defaults.github, + default: "git", + auth: "user:password", + ...committishDefaults, + }, + "git://:password@github.com/foo/bar": { ...defaults.github, default: "git", auth: ":password" }, + [`git://:password@github.com/foo/bar#${committishDefaults.committish}`]: { + ...defaults.github, + default: "git", + auth: ":password", + ...committishDefaults, + }, + + "git://github.com/foo/bar.git": { ...defaults.github, default: "git" }, + [`git://github.com/foo/bar.git#${committishDefaults.committish}`]: { + ...defaults.github, + default: "git", + ...committishDefaults, + }, + "git://git@github.com/foo/bar.git": { ...defaults.github, default: "git", auth: "git" }, + [`git://git@github.com/foo/bar.git#${committishDefaults.committish}`]: { + ...defaults.github, + default: "git", + auth: "git", + ...committishDefaults, + }, + "git://user:password@github.com/foo/bar.git": { ...defaults.github, default: "git", auth: "user:password" }, + [`git://user:password@github.com/foo/bar.git#${committishDefaults.committish}`]: { + ...defaults.github, + default: "git", + auth: "user:password", + ...committishDefaults, + }, + "git://:password@github.com/foo/bar.git": { ...defaults.github, default: "git", auth: ":password" }, + [`git://:password@github.com/foo/bar.git#${committishDefaults.committish}`]: { + ...defaults.github, + default: "git", + auth: ":password", + ...committishDefaults, + }, + + // no-protocol git+ssh + // + // NOTE auth is _required_ (see invalid list) but ignored + "user@github.com:foo/bar": { ...defaults.github, default: "sshurl", auth: null }, + [`user@github.com:foo/bar#${committishDefaults.committish}`]: { + ...defaults.github, + default: "sshurl", + auth: null, + ...committishDefaults, + }, + "user:password@github.com:foo/bar": { ...defaults.github, default: "sshurl", auth: null }, + [`user:password@github.com:foo/bar#${committishDefaults.committish}`]: { + ...defaults.github, + default: "sshurl", + auth: null, + ...committishDefaults, + }, + ":password@github.com:foo/bar": { ...defaults.github, default: "sshurl", auth: null }, + [`:password@github.com:foo/bar#${committishDefaults.committish}`]: { + ...defaults.github, + default: "sshurl", + auth: null, + ...committishDefaults, + }, + + "user@github.com:foo/bar.git": { ...defaults.github, default: "sshurl", auth: null }, + [`user@github.com:foo/bar.git#${committishDefaults.committish}`]: { + ...defaults.github, + default: "sshurl", + auth: null, + ...committishDefaults, + }, + "user:password@github.com:foo/bar.git": { ...defaults.github, default: "sshurl", auth: null }, + [`user:password@github.com:foo/bar.git#${committishDefaults.committish}`]: { + ...defaults.github, + default: "sshurl", + auth: null, + ...committishDefaults, + }, + ":password@github.com:foo/bar.git": { ...defaults.github, default: "sshurl", auth: null }, + [`:password@github.com:foo/bar.git#${committishDefaults.committish}`]: { + ...defaults.github, + default: "sshurl", + auth: null, + ...committishDefaults, + }, + + // git+ssh urls + // + // NOTE auth is accepted but ignored + "git+ssh://github.com:foo/bar": { ...defaults.github, default: "sshurl" }, + [`git+ssh://github.com:foo/bar#${committishDefaults.committish}`]: { + ...defaults.github, + default: "sshurl", + ...committishDefaults, + }, + "git+ssh://user@github.com:foo/bar": { ...defaults.github, default: "sshurl", auth: null }, + [`git+ssh://user@github.com:foo/bar#${committishDefaults.committish}`]: { + ...defaults.github, + default: "sshurl", + auth: null, + ...committishDefaults, + }, + "git+ssh://user:password@github.com:foo/bar": { ...defaults.github, default: "sshurl", auth: null }, + [`git+ssh://user:password@github.com:foo/bar#${committishDefaults.committish}`]: { + ...defaults.github, + default: "sshurl", + auth: null, + ...committishDefaults, + }, + "git+ssh://:password@github.com:foo/bar": { ...defaults.github, default: "sshurl", auth: null }, + [`git+ssh://:password@github.com:foo/bar#${committishDefaults.committish}`]: { + ...defaults.github, + default: "sshurl", + auth: null, + ...committishDefaults, + }, + + "git+ssh://github.com:foo/bar.git": { ...defaults.github, default: "sshurl" }, + [`git+ssh://github.com:foo/bar.git#${committishDefaults.committish}`]: { + ...defaults.github, + default: "sshurl", + ...committishDefaults, + }, + "git+ssh://user@github.com:foo/bar.git": { ...defaults.github, default: "sshurl", auth: null }, + [`git+ssh://user@github.com:foo/bar.git#${committishDefaults.committish}`]: { + ...defaults.github, + default: "sshurl", + auth: null, + ...committishDefaults, + }, + "git+ssh://user:password@github.com:foo/bar.git": { ...defaults.github, default: "sshurl", auth: null }, + [`git+ssh://user:password@github.com:foo/bar.git#${committishDefaults.committish}`]: { + ...defaults.github, + default: "sshurl", + auth: null, + ...committishDefaults, + }, + "git+ssh://:password@github.com:foo/bar.git": { ...defaults.github, default: "sshurl", auth: null }, + [`git+ssh://:password@github.com:foo/bar.git#${committishDefaults.committish}`]: { + ...defaults.github, + default: "sshurl", + auth: null, + ...committishDefaults, + }, + + // ssh urls + // + // NOTE auth is accepted but ignored + "ssh://github.com:foo/bar": { ...defaults.github, default: "sshurl" }, + [`ssh://github.com:foo/bar#${committishDefaults.committish}`]: { + ...defaults.github, + default: "sshurl", + ...committishDefaults, + }, + "ssh://user@github.com:foo/bar": { ...defaults.github, default: "sshurl", auth: null }, + [`ssh://user@github.com:foo/bar#${committishDefaults.committish}`]: { + ...defaults.github, + default: "sshurl", + auth: null, + ...committishDefaults, + }, + "ssh://user:password@github.com:foo/bar": { ...defaults.github, default: "sshurl", auth: null }, + [`ssh://user:password@github.com:foo/bar#${committishDefaults.committish}`]: { + ...defaults.github, + default: "sshurl", + auth: null, + ...committishDefaults, + }, + "ssh://:password@github.com:foo/bar": { ...defaults.github, default: "sshurl", auth: null }, + [`ssh://:password@github.com:foo/bar#${committishDefaults.committish}`]: { + ...defaults.github, + default: "sshurl", + auth: null, + ...committishDefaults, + }, + + "ssh://github.com:foo/bar.git": { ...defaults.github, default: "sshurl" }, + [`ssh://github.com:foo/bar.git#${committishDefaults.committish}`]: { + ...defaults.github, + default: "sshurl", + ...committishDefaults, + }, + "ssh://user@github.com:foo/bar.git": { ...defaults.github, default: "sshurl", auth: null }, + [`ssh://user@github.com:foo/bar.git#${committishDefaults.committish}`]: { + ...defaults.github, + default: "sshurl", + auth: null, + ...committishDefaults, + }, + "ssh://user:password@github.com:foo/bar.git": { ...defaults.github, default: "sshurl", auth: null }, + [`ssh://user:password@github.com:foo/bar.git#${committishDefaults.committish}`]: { + ...defaults.github, + default: "sshurl", + auth: null, + ...committishDefaults, + }, + "ssh://:password@github.com:foo/bar.git": { ...defaults.github, default: "sshurl", auth: null }, + [`ssh://:password@github.com:foo/bar.git#${committishDefaults.committish}`]: { + ...defaults.github, + default: "sshurl", + auth: null, + ...committishDefaults, + }, + + // git+https urls + // + // NOTE auth is accepted and respected + "git+https://github.com/foo/bar": { ...defaults.github, default: "https" }, + [`git+https://github.com/foo/bar#${committishDefaults.committish}`]: { + ...defaults.github, + default: "https", + ...committishDefaults, + }, + "git+https://user@github.com/foo/bar": { ...defaults.github, default: "https", auth: "user" }, + [`git+https://user@github.com/foo/bar#${committishDefaults.committish}`]: { + ...defaults.github, + default: "https", + auth: "user", + ...committishDefaults, + }, + "git+https://user:password@github.com/foo/bar": { ...defaults.github, default: "https", auth: "user:password" }, + [`git+https://user:password@github.com/foo/bar#${committishDefaults.committish}`]: { + ...defaults.github, + default: "https", + auth: "user:password", + ...committishDefaults, + }, + "git+https://:password@github.com/foo/bar": { ...defaults.github, default: "https", auth: ":password" }, + [`git+https://:password@github.com/foo/bar#${committishDefaults.committish}`]: { + ...defaults.github, + default: "https", + auth: ":password", + ...committishDefaults, + }, + + "git+https://github.com/foo/bar.git": { ...defaults.github, default: "https" }, + [`git+https://github.com/foo/bar.git#${committishDefaults.committish}`]: { + ...defaults.github, + default: "https", + ...committishDefaults, + }, + "git+https://user@github.com/foo/bar.git": { ...defaults.github, default: "https", auth: "user" }, + [`git+https://user@github.com/foo/bar.git#${committishDefaults.committish}`]: { + ...defaults.github, + default: "https", + auth: "user", + ...committishDefaults, + }, + "git+https://user:password@github.com/foo/bar.git": { + ...defaults.github, + default: "https", + auth: "user:password", + }, + [`git+https://user:password@github.com/foo/bar.git#${committishDefaults.committish}`]: { + ...defaults.github, + default: "https", + auth: "user:password", + ...committishDefaults, + }, + "git+https://:password@github.com/foo/bar.git": { ...defaults.github, default: "https", auth: ":password" }, + [`git+https://:password@github.com/foo/bar.git#${committishDefaults.committish}`]: { + ...defaults.github, + default: "https", + auth: ":password", + ...committishDefaults, + }, + + // https urls + // + // NOTE auth is accepted and respected + "https://github.com/foo/bar": { ...defaults.github, default: "https" }, + [`https://github.com/foo/bar#${committishDefaults.committish}`]: { + ...defaults.github, + default: "https", + ...committishDefaults, + }, + "https://user@github.com/foo/bar": { ...defaults.github, default: "https", auth: "user" }, + [`https://user@github.com/foo/bar#${committishDefaults.committish}`]: { + ...defaults.github, + default: "https", + auth: "user", + ...committishDefaults, + }, + "https://user:password@github.com/foo/bar": { ...defaults.github, default: "https", auth: "user:password" }, + [`https://user:password@github.com/foo/bar#${committishDefaults.committish}`]: { + ...defaults.github, + default: "https", + auth: "user:password", + ...committishDefaults, + }, + "https://:password@github.com/foo/bar": { ...defaults.github, default: "https", auth: ":password" }, + [`https://:password@github.com/foo/bar#${committishDefaults.committish}`]: { + ...defaults.github, + default: "https", + auth: ":password", + ...committishDefaults, + }, + + "https://github.com/foo/bar.git": { ...defaults.github, default: "https" }, + [`https://github.com/foo/bar.git#${committishDefaults.committish}`]: { + ...defaults.github, + default: "https", + ...committishDefaults, + }, + "https://user@github.com/foo/bar.git": { ...defaults.github, default: "https", auth: "user" }, + [`https://user@github.com/foo/bar.git#${committishDefaults.committish}`]: { + ...defaults.github, + default: "https", + auth: "user", + ...committishDefaults, + }, + "https://user:password@github.com/foo/bar.git": { ...defaults.github, default: "https", auth: "user:password" }, + [`https://user:password@github.com/foo/bar.git#${committishDefaults.committish}`]: { + ...defaults.github, + default: "https", + auth: "user:password", + ...committishDefaults, + }, + "https://:password@github.com/foo/bar.git": { ...defaults.github, default: "https", auth: ":password" }, + [`https://:password@github.com/foo/bar.git#${committishDefaults.committish}`]: { + ...defaults.github, + default: "https", + auth: ":password", + ...committishDefaults, + }, + + // inputs that are not quite proper but we accept anyway + "https://www.github.com/foo/bar": { ...defaults.github, default: "https" }, + "foo/bar#branch with space": { ...defaults.github, default: "shortcut", committish: "branch with space" }, + "foo/bar#branch:with:colons": { ...defaults.github, default: "shortcut", committish: "branch:with:colons" }, + "https://github.com/foo/bar/tree/branch": { ...defaults.github, default: "https", committish: "branch" }, + "user..blerg--/..foo-js# . . . . . some . tags / / /": { + ...defaults.github, + default: "shortcut", + user: "user..blerg--", + project: "..foo-js", + committish: " . . . . . some . tags / / /", + }, + }, + gitlab: { + // shortcuts + // + // NOTE auth is accepted but ignored + // NOTE gitlabSubgroups are respected, but the gitlabSubgroup is treated as the project and the real project is lost + "gitlab:foo/bar": { ...defaults.gitlab, default: "shortcut" }, + "gitlab:foo/bar#branch": { ...defaults.gitlab, default: "shortcut", committish: "branch" }, + "gitlab:user@foo/bar": { ...defaults.gitlab, default: "shortcut", auth: null }, + "gitlab:user@foo/bar#branch": { ...defaults.gitlab, default: "shortcut", auth: null, committish: "branch" }, + "gitlab:user:password@foo/bar": { ...defaults.gitlab, default: "shortcut", auth: null }, + "gitlab:user:password@foo/bar#branch": { + ...defaults.gitlab, + default: "shortcut", + auth: null, + committish: "branch", + }, + "gitlab::password@foo/bar": { ...defaults.gitlab, default: "shortcut", auth: null }, + "gitlab::password@foo/bar#branch": { ...defaults.gitlab, default: "shortcut", auth: null, committish: "branch" }, + + "gitlab:foo/bar.git": { ...defaults.gitlab, default: "shortcut" }, + "gitlab:foo/bar.git#branch": { ...defaults.gitlab, default: "shortcut", committish: "branch" }, + "gitlab:user@foo/bar.git": { ...defaults.gitlab, default: "shortcut", auth: null }, + "gitlab:user@foo/bar.git#branch": { ...defaults.gitlab, default: "shortcut", auth: null, committish: "branch" }, + "gitlab:user:password@foo/bar.git": { ...defaults.gitlab, default: "shortcut", auth: null }, + "gitlab:user:password@foo/bar.git#branch": { + ...defaults.gitlab, + default: "shortcut", + auth: null, + committish: "branch", + }, + "gitlab::password@foo/bar.git": { ...defaults.gitlab, default: "shortcut", auth: null }, + "gitlab::password@foo/bar.git#branch": { + ...defaults.gitlab, + default: "shortcut", + auth: null, + committish: "branch", + }, + + "gitlab:foo/bar/baz": { ...defaults.gitlabSubgroup, default: "shortcut" }, + "gitlab:foo/bar/baz#branch": { ...defaults.gitlabSubgroup, default: "shortcut", committish: "branch" }, + "gitlab:user@foo/bar/baz": { ...defaults.gitlabSubgroup, default: "shortcut", auth: null }, + "gitlab:user@foo/bar/baz#branch": { + ...defaults.gitlabSubgroup, + default: "shortcut", + auth: null, + committish: "branch", + }, + "gitlab:user:password@foo/bar/baz": { ...defaults.gitlabSubgroup, default: "shortcut", auth: null }, + "gitlab:user:password@foo/bar/baz#branch": { + ...defaults.gitlabSubgroup, + default: "shortcut", + auth: null, + committish: "branch", + }, + "gitlab::password@foo/bar/baz": { ...defaults.gitlabSubgroup, default: "shortcut", auth: null }, + "gitlab::password@foo/bar/baz#branch": { + ...defaults.gitlabSubgroup, + default: "shortcut", + auth: null, + committish: "branch", + }, + + "gitlab:foo/bar/baz.git": { ...defaults.gitlabSubgroup, default: "shortcut" }, + "gitlab:foo/bar/baz.git#branch": { ...defaults.gitlabSubgroup, default: "shortcut", committish: "branch" }, + "gitlab:user@foo/bar/baz.git": { ...defaults.gitlabSubgroup, default: "shortcut", auth: null }, + "gitlab:user@foo/bar/baz.git#branch": { + ...defaults.gitlabSubgroup, + default: "shortcut", + auth: null, + committish: "branch", + }, + "gitlab:user:password@foo/bar/baz.git": { ...defaults.gitlabSubgroup, default: "shortcut", auth: null }, + "gitlab:user:password@foo/bar/baz.git#branch": { + ...defaults.gitlabSubgroup, + default: "shortcut", + auth: null, + committish: "branch", + }, + "gitlab::password@foo/bar/baz.git": { ...defaults.gitlabSubgroup, default: "shortcut", auth: null }, + "gitlab::password@foo/bar/baz.git#branch": { + ...defaults.gitlabSubgroup, + default: "shortcut", + auth: null, + committish: "branch", + }, + + // no-protocol git+ssh + // + // NOTE auth is _required_ (see invalid list) but ignored + "user@gitlab.com:foo/bar": { ...defaults.gitlab, default: "sshurl", auth: null }, + "user@gitlab.com:foo/bar#branch": { ...defaults.gitlab, default: "sshurl", auth: null, committish: "branch" }, + "user:password@gitlab.com:foo/bar": { ...defaults.gitlab, default: "sshurl", auth: null }, + "user:password@gitlab.com:foo/bar#branch": { + ...defaults.gitlab, + default: "sshurl", + auth: null, + committish: "branch", + }, + ":password@gitlab.com:foo/bar": { ...defaults.gitlab, default: "sshurl", auth: null }, + ":password@gitlab.com:foo/bar#branch": { + ...defaults.gitlab, + default: "sshurl", + auth: null, + committish: "branch", + }, + + "user@gitlab.com:foo/bar.git": { ...defaults.gitlab, default: "sshurl", auth: null }, + "user@gitlab.com:foo/bar.git#branch": { ...defaults.gitlab, default: "sshurl", auth: null, committish: "branch" }, + "user:password@gitlab.com:foo/bar.git": { ...defaults.gitlab, default: "sshurl", auth: null }, + "user:password@gitlab.com:foo/bar.git#branch": { + ...defaults.gitlab, + default: "sshurl", + auth: null, + committish: "branch", + }, + ":password@gitlab.com:foo/bar.git": { ...defaults.gitlab, default: "sshurl", auth: null }, + ":password@gitlab.com:foo/bar.git#branch": { + ...defaults.gitlab, + default: "sshurl", + auth: null, + committish: "branch", + }, + + "user@gitlab.com:foo/bar/baz": { ...defaults.gitlabSubgroup, default: "sshurl", auth: null }, + "user@gitlab.com:foo/bar/baz#branch": { + ...defaults.gitlabSubgroup, + default: "sshurl", + auth: null, + committish: "branch", + }, + "user:password@gitlab.com:foo/bar/baz": { ...defaults.gitlabSubgroup, default: "sshurl", auth: null }, + "user:password@gitlab.com:foo/bar/baz#branch": { + ...defaults.gitlabSubgroup, + default: "sshurl", + auth: null, + committish: "branch", + }, + ":password@gitlab.com:foo/bar/baz": { ...defaults.gitlabSubgroup, default: "sshurl", auth: null }, + ":password@gitlab.com:foo/bar/baz#branch": { + ...defaults.gitlabSubgroup, + default: "sshurl", + auth: null, + committish: "branch", + }, + + "user@gitlab.com:foo/bar/baz.git": { ...defaults.gitlabSubgroup, default: "sshurl", auth: null }, + "user@gitlab.com:foo/bar/baz.git#branch": { + ...defaults.gitlabSubgroup, + default: "sshurl", + auth: null, + committish: "branch", + }, + "user:password@gitlab.com:foo/bar/baz.git": { ...defaults.gitlabSubgroup, default: "sshurl", auth: null }, + "user:password@gitlab.com:foo/bar/baz.git#branch": { + ...defaults.gitlabSubgroup, + default: "sshurl", + auth: null, + committish: "branch", + }, + ":password@gitlab.com:foo/bar/baz.git": { ...defaults.gitlabSubgroup, default: "sshurl", auth: null }, + ":password@gitlab.com:foo/bar/baz.git#branch": { + ...defaults.gitlabSubgroup, + default: "sshurl", + auth: null, + committish: "branch", + }, + + // git+ssh urls + // + // NOTE auth is accepted but ignored + // NOTE subprojects are accepted, but the subproject is treated as the project and the real project is lost + "git+ssh://gitlab.com:foo/bar": { ...defaults.gitlab, default: "sshurl" }, + "git+ssh://gitlab.com:foo/bar#branch": { ...defaults.gitlab, default: "sshurl", committish: "branch" }, + "git+ssh://user@gitlab.com:foo/bar": { ...defaults.gitlab, default: "sshurl", auth: null }, + "git+ssh://user@gitlab.com:foo/bar#branch": { + ...defaults.gitlab, + default: "sshurl", + auth: null, + committish: "branch", + }, + "git+ssh://user:password@gitlab.com:foo/bar": { ...defaults.gitlab, default: "sshurl", auth: null }, + "git+ssh://user:password@gitlab.com:foo/bar#branch": { + ...defaults.gitlab, + default: "sshurl", + auth: null, + committish: "branch", + }, + "git+ssh://:password@gitlab.com:foo/bar": { ...defaults.gitlab, default: "sshurl", auth: null }, + "git+ssh://:password@gitlab.com:foo/bar#branch": { + ...defaults.gitlab, + default: "sshurl", + auth: null, + committish: "branch", + }, + + "git+ssh://gitlab.com:foo/bar.git": { ...defaults.gitlab, default: "sshurl" }, + "git+ssh://gitlab.com:foo/bar.git#branch": { ...defaults.gitlab, default: "sshurl", committish: "branch" }, + "git+ssh://user@gitlab.com:foo/bar.git": { ...defaults.gitlab, default: "sshurl", auth: null }, + "git+ssh://user@gitlab.com:foo/bar.git#branch": { + ...defaults.gitlab, + default: "sshurl", + auth: null, + committish: "branch", + }, + "git+ssh://user:password@gitlab.com:foo/bar.git": { ...defaults.gitlab, default: "sshurl", auth: null }, + "git+ssh://user:password@gitlab.com:foo/bar.git#branch": { + ...defaults.gitlab, + default: "sshurl", + auth: null, + committish: "branch", + }, + "git+ssh://:password@gitlab.com:foo/bar.git": { ...defaults.gitlab, default: "sshurl", auth: null }, + "git+ssh://:password@gitlab.com:foo/bar.git#branch": { + ...defaults.gitlab, + default: "sshurl", + auth: null, + committish: "branch", + }, + + "git+ssh://gitlab.com:foo/bar/baz": { ...defaults.gitlabSubgroup, default: "sshurl" }, + "git+ssh://gitlab.com:foo/bar/baz#branch": { + ...defaults.gitlabSubgroup, + default: "sshurl", + committish: "branch", + }, + "git+ssh://user@gitlab.com:foo/bar/baz": { ...defaults.gitlabSubgroup, default: "sshurl", auth: null }, + "git+ssh://user@gitlab.com:foo/bar/baz#branch": { + ...defaults.gitlabSubgroup, + default: "sshurl", + auth: null, + committish: "branch", + }, + "git+ssh://user:password@gitlab.com:foo/bar/baz": { ...defaults.gitlabSubgroup, default: "sshurl", auth: null }, + "git+ssh://user:password@gitlab.com:foo/bar/baz#branch": { + ...defaults.gitlabSubgroup, + default: "sshurl", + auth: null, + committish: "branch", + }, + "git+ssh://:password@gitlab.com:foo/bar/baz": { ...defaults.gitlabSubgroup, default: "sshurl", auth: null }, + "git+ssh://:password@gitlab.com:foo/bar/baz#branch": { + ...defaults.gitlabSubgroup, + default: "sshurl", + auth: null, + committish: "branch", + }, + + "git+ssh://gitlab.com:foo/bar/baz.git": { ...defaults.gitlabSubgroup, default: "sshurl" }, + "git+ssh://gitlab.com:foo/bar/baz.git#branch": { + ...defaults.gitlabSubgroup, + default: "sshurl", + committish: "branch", + }, + "git+ssh://user@gitlab.com:foo/bar/baz.git": { ...defaults.gitlabSubgroup, default: "sshurl", auth: null }, + "git+ssh://user@gitlab.com:foo/bar/baz.git#branch": { + ...defaults.gitlabSubgroup, + default: "sshurl", + auth: null, + committish: "branch", + }, + "git+ssh://user:password@gitlab.com:foo/bar/baz.git": { + ...defaults.gitlabSubgroup, + default: "sshurl", + auth: null, + }, + "git+ssh://user:password@gitlab.com:foo/bar/baz.git#branch": { + ...defaults.gitlabSubgroup, + default: "sshurl", + auth: null, + committish: "branch", + }, + "git+ssh://:password@gitlab.com:foo/bar/baz.git": { ...defaults.gitlabSubgroup, default: "sshurl", auth: null }, + "git+ssh://:password@gitlab.com:foo/bar/baz.git#branch": { + ...defaults.gitlabSubgroup, + default: "sshurl", + auth: null, + committish: "branch", + }, + + // ssh urls + // + // NOTE auth is accepted but ignored + // NOTE subprojects are accepted, but the subproject is treated as the project and the real project is lost + "ssh://gitlab.com:foo/bar": { ...defaults.gitlab, default: "sshurl" }, + "ssh://gitlab.com:foo/bar#branch": { ...defaults.gitlab, default: "sshurl", committish: "branch" }, + "ssh://user@gitlab.com:foo/bar": { ...defaults.gitlab, default: "sshurl", auth: null }, + "ssh://user@gitlab.com:foo/bar#branch": { + ...defaults.gitlab, + default: "sshurl", + auth: null, + committish: "branch", + }, + "ssh://user:password@gitlab.com:foo/bar": { ...defaults.gitlab, default: "sshurl", auth: null }, + "ssh://user:password@gitlab.com:foo/bar#branch": { + ...defaults.gitlab, + default: "sshurl", + auth: null, + committish: "branch", + }, + "ssh://:password@gitlab.com:foo/bar": { ...defaults.gitlab, default: "sshurl", auth: null }, + "ssh://:password@gitlab.com:foo/bar#branch": { + ...defaults.gitlab, + default: "sshurl", + auth: null, + committish: "branch", + }, + + "ssh://gitlab.com:foo/bar.git": { ...defaults.gitlab, default: "sshurl" }, + "ssh://gitlab.com:foo/bar.git#branch": { ...defaults.gitlab, default: "sshurl", committish: "branch" }, + "ssh://user@gitlab.com:foo/bar.git": { ...defaults.gitlab, default: "sshurl", auth: null }, + "ssh://user@gitlab.com:foo/bar.git#branch": { + ...defaults.gitlab, + default: "sshurl", + auth: null, + committish: "branch", + }, + "ssh://user:password@gitlab.com:foo/bar.git": { ...defaults.gitlab, default: "sshurl", auth: null }, + "ssh://user:password@gitlab.com:foo/bar.git#branch": { + ...defaults.gitlab, + default: "sshurl", + auth: null, + committish: "branch", + }, + "ssh://:password@gitlab.com:foo/bar.git": { ...defaults.gitlab, default: "sshurl", auth: null }, + "ssh://:password@gitlab.com:foo/bar.git#branch": { + ...defaults.gitlab, + default: "sshurl", + auth: null, + committish: "branch", + }, + + "ssh://gitlab.com:foo/bar/baz": { ...defaults.gitlabSubgroup, default: "sshurl" }, + "ssh://gitlab.com:foo/bar/baz#branch": { ...defaults.gitlabSubgroup, default: "sshurl", committish: "branch" }, + "ssh://user@gitlab.com:foo/bar/baz": { ...defaults.gitlabSubgroup, default: "sshurl", auth: null }, + "ssh://user@gitlab.com:foo/bar/baz#branch": { + ...defaults.gitlabSubgroup, + default: "sshurl", + auth: null, + committish: "branch", + }, + "ssh://user:password@gitlab.com:foo/bar/baz": { ...defaults.gitlabSubgroup, default: "sshurl", auth: null }, + "ssh://user:password@gitlab.com:foo/bar/baz#branch": { + ...defaults.gitlabSubgroup, + default: "sshurl", + auth: null, + committish: "branch", + }, + "ssh://:password@gitlab.com:foo/bar/baz": { ...defaults.gitlabSubgroup, default: "sshurl", auth: null }, + "ssh://:password@gitlab.com:foo/bar/baz#branch": { + ...defaults.gitlabSubgroup, + default: "sshurl", + auth: null, + committish: "branch", + }, + + "ssh://gitlab.com:foo/bar/baz.git": { ...defaults.gitlabSubgroup, default: "sshurl" }, + "ssh://gitlab.com:foo/bar/baz.git#branch": { + ...defaults.gitlabSubgroup, + default: "sshurl", + committish: "branch", + }, + "ssh://user@gitlab.com:foo/bar/baz.git": { ...defaults.gitlabSubgroup, default: "sshurl", auth: null }, + "ssh://user@gitlab.com:foo/bar/baz.git#branch": { + ...defaults.gitlabSubgroup, + default: "sshurl", + auth: null, + committish: "branch", + }, + "ssh://user:password@gitlab.com:foo/bar/baz.git": { ...defaults.gitlabSubgroup, default: "sshurl", auth: null }, + "ssh://user:password@gitlab.com:foo/bar/baz.git#branch": { + ...defaults.gitlabSubgroup, + default: "sshurl", + auth: null, + committish: "branch", + }, + "ssh://:password@gitlab.com:foo/bar/baz.git": { ...defaults.gitlabSubgroup, default: "sshurl", auth: null }, + "ssh://:password@gitlab.com:foo/bar/baz.git#branch": { + ...defaults.gitlabSubgroup, + default: "sshurl", + auth: null, + committish: "branch", + }, + + // git+https urls + // + // NOTE auth is accepted and respected + // NOTE subprojects are accepted, but the subproject is treated as the project and the real project is lost + "git+https://gitlab.com/foo/bar": { ...defaults.gitlab, default: "https" }, + "git+https://gitlab.com/foo/bar#branch": { ...defaults.gitlab, default: "https", committish: "branch" }, + "git+https://user@gitlab.com/foo/bar": { ...defaults.gitlab, default: "https", auth: "user" }, + "git+https://user@gitlab.com/foo/bar#branch": { + ...defaults.gitlab, + default: "https", + auth: "user", + committish: "branch", + }, + "git+https://user:password@gitlab.com/foo/bar": { ...defaults.gitlab, default: "https", auth: "user:password" }, + "git+https://user:password@gitlab.com/foo/bar#branch": { + ...defaults.gitlab, + default: "https", + auth: "user:password", + committish: "branch", + }, + "git+https://:password@gitlab.com/foo/bar": { ...defaults.gitlab, default: "https", auth: ":password" }, + "git+https://:password@gitlab.com/foo/bar#branch": { + ...defaults.gitlab, + default: "https", + auth: ":password", + committish: "branch", + }, + + "git+https://gitlab.com/foo/bar.git": { ...defaults.gitlab, default: "https" }, + "git+https://gitlab.com/foo/bar.git#branch": { ...defaults.gitlab, default: "https", committish: "branch" }, + "git+https://user@gitlab.com/foo/bar.git": { ...defaults.gitlab, default: "https", auth: "user" }, + "git+https://user@gitlab.com/foo/bar.git#branch": { + ...defaults.gitlab, + default: "https", + auth: "user", + committish: "branch", + }, + "git+https://user:password@gitlab.com/foo/bar.git": { + ...defaults.gitlab, + default: "https", + auth: "user:password", + }, + "git+https://user:password@gitlab.com/foo/bar.git#branch": { + ...defaults.gitlab, + default: "https", + auth: "user:password", + committish: "branch", + }, + "git+https://:password@gitlab.com/foo/bar.git": { ...defaults.gitlab, default: "https", auth: ":password" }, + "git+https://:password@gitlab.com/foo/bar.git#branch": { + ...defaults.gitlab, + default: "https", + auth: ":password", + committish: "branch", + }, + + "git+https://gitlab.com/foo/bar/baz": { ...defaults.gitlabSubgroup, default: "https" }, + "git+https://gitlab.com/foo/bar/baz#branch": { + ...defaults.gitlabSubgroup, + default: "https", + committish: "branch", + }, + "git+https://user@gitlab.com/foo/bar/baz": { ...defaults.gitlabSubgroup, default: "https", auth: "user" }, + "git+https://user@gitlab.com/foo/bar/baz#branch": { + ...defaults.gitlabSubgroup, + default: "https", + auth: "user", + committish: "branch", + }, + "git+https://user:password@gitlab.com/foo/bar/baz": { + ...defaults.gitlabSubgroup, + default: "https", + auth: "user:password", + }, + "git+https://user:password@gitlab.com/foo/bar/baz#branch": { + ...defaults.gitlabSubgroup, + default: "https", + auth: "user:password", + committish: "branch", + }, + "git+https://:password@gitlab.com/foo/bar/baz": { + ...defaults.gitlabSubgroup, + default: "https", + auth: ":password", + }, + "git+https://:password@gitlab.com/foo/bar/baz#branch": { + ...defaults.gitlabSubgroup, + default: "https", + auth: ":password", + committish: "branch", + }, + + "git+https://gitlab.com/foo/bar/baz.git": { ...defaults.gitlabSubgroup, default: "https" }, + "git+https://gitlab.com/foo/bar/baz.git#branch": { + ...defaults.gitlabSubgroup, + default: "https", + committish: "branch", + }, + "git+https://user@gitlab.com/foo/bar/baz.git": { ...defaults.gitlabSubgroup, default: "https", auth: "user" }, + "git+https://user@gitlab.com/foo/bar/baz.git#branch": { + ...defaults.gitlabSubgroup, + default: "https", + auth: "user", + committish: "branch", + }, + "git+https://user:password@gitlab.com/foo/bar/baz.git": { + ...defaults.gitlabSubgroup, + default: "https", + auth: "user:password", + }, + "git+https://user:password@gitlab.com/foo/bar/baz.git#branch": { + ...defaults.gitlabSubgroup, + default: "https", + auth: "user:password", + committish: "branch", + }, + "git+https://:password@gitlab.com/foo/bar/baz.git": { + ...defaults.gitlabSubgroup, + default: "https", + auth: ":password", + }, + "git+https://:password@gitlab.com/foo/bar/baz.git#branch": { + ...defaults.gitlabSubgroup, + default: "https", + auth: ":password", + committish: "branch", + }, + + // https urls + // + // NOTE auth is accepted and respected + // NOTE subprojects are accepted, but the subproject is treated as the project and the real project is lost + "https://gitlab.com/foo/bar": { ...defaults.gitlab, default: "https" }, + "https://gitlab.com/foo/bar#branch": { ...defaults.gitlab, default: "https", committish: "branch" }, + "https://user@gitlab.com/foo/bar": { ...defaults.gitlab, default: "https", auth: "user" }, + "https://user@gitlab.com/foo/bar#branch": { + ...defaults.gitlab, + default: "https", + auth: "user", + committish: "branch", + }, + "https://user:password@gitlab.com/foo/bar": { ...defaults.gitlab, default: "https", auth: "user:password" }, + "https://user:password@gitlab.com/foo/bar#branch": { + ...defaults.gitlab, + default: "https", + auth: "user:password", + committish: "branch", + }, + "https://:password@gitlab.com/foo/bar": { ...defaults.gitlab, default: "https", auth: ":password" }, + "https://:password@gitlab.com/foo/bar#branch": { + ...defaults.gitlab, + default: "https", + auth: ":password", + committish: "branch", + }, + + "https://gitlab.com/foo/bar.git": { ...defaults.gitlab, default: "https" }, + "https://gitlab.com/foo/bar.git#branch": { ...defaults.gitlab, default: "https", committish: "branch" }, + "https://user@gitlab.com/foo/bar.git": { ...defaults.gitlab, default: "https", auth: "user" }, + "https://user@gitlab.com/foo/bar.git#branch": { + ...defaults.gitlab, + default: "https", + auth: "user", + committish: "branch", + }, + "https://user:password@gitlab.com/foo/bar.git": { ...defaults.gitlab, default: "https", auth: "user:password" }, + "https://user:password@gitlab.com/foo/bar.git#branch": { + ...defaults.gitlab, + default: "https", + auth: "user:password", + committish: "branch", + }, + "https://:password@gitlab.com/foo/bar.git": { ...defaults.gitlab, default: "https", auth: ":password" }, + "https://:password@gitlab.com/foo/bar.git#branch": { + ...defaults.gitlab, + default: "https", + auth: ":password", + committish: "branch", + }, + + "https://gitlab.com/foo/bar/baz": { ...defaults.gitlabSubgroup, default: "https" }, + "https://gitlab.com/foo/bar/baz#branch": { ...defaults.gitlabSubgroup, default: "https", committish: "branch" }, + "https://user@gitlab.com/foo/bar/baz": { ...defaults.gitlabSubgroup, default: "https", auth: "user" }, + "https://user@gitlab.com/foo/bar/baz#branch": { + ...defaults.gitlabSubgroup, + default: "https", + auth: "user", + committish: "branch", + }, + "https://user:password@gitlab.com/foo/bar/baz": { + ...defaults.gitlabSubgroup, + default: "https", + auth: "user:password", + }, + "https://user:password@gitlab.com/foo/bar/baz#branch": { + ...defaults.gitlabSubgroup, + default: "https", + auth: "user:password", + committish: "branch", + }, + "https://:password@gitlab.com/foo/bar/baz": { ...defaults.gitlabSubgroup, default: "https", auth: ":password" }, + "https://:password@gitlab.com/foo/bar/baz#branch": { + ...defaults.gitlabSubgroup, + default: "https", + auth: ":password", + committish: "branch", + }, + + "https://gitlab.com/foo/bar/baz.git": { ...defaults.gitlabSubgroup, default: "https" }, + "https://gitlab.com/foo/bar/baz.git#branch": { + ...defaults.gitlabSubgroup, + default: "https", + committish: "branch", + }, + "https://user@gitlab.com/foo/bar/baz.git": { ...defaults.gitlabSubgroup, default: "https", auth: "user" }, + "https://user@gitlab.com/foo/bar/baz.git#branch": { + ...defaults.gitlabSubgroup, + default: "https", + auth: "user", + committish: "branch", + }, + "https://user:password@gitlab.com/foo/bar/baz.git": { + ...defaults.gitlabSubgroup, + default: "https", + auth: "user:password", + }, + "https://user:password@gitlab.com/foo/bar/baz.git#branch": { + ...defaults.gitlabSubgroup, + default: "https", + auth: "user:password", + committish: "branch", + }, + "https://:password@gitlab.com/foo/bar/baz.git": { + ...defaults.gitlabSubgroup, + default: "https", + auth: ":password", + }, + "https://:password@gitlab.com/foo/bar/baz.git#branch": { + ...defaults.gitlabSubgroup, + default: "https", + auth: ":password", + committish: "branch", + }, + }, + misc: {}, + sourcehut: { + // shortucts + "sourcehut:~foo/bar": { ...defaults.sourcehut, default: "shortcut" }, + "sourcehut:~foo/bar#branch": { ...defaults.sourcehut, default: "shortcut", committish: "branch" }, + + // shortcuts (.git) + "sourcehut:~foo/bar.git": { ...defaults.sourcehut, default: "shortcut" }, + "sourcehut:~foo/bar.git#branch": { ...defaults.sourcehut, default: "shortcut", committish: "branch" }, + + // no-protocol git+ssh + "git@git.sr.ht:~foo/bar": { ...defaults.sourcehut, default: "sshurl", auth: null }, + "git@git.sr.ht:~foo/bar#branch": { + ...defaults.sourcehut, + default: "sshurl", + auth: null, + committish: "branch", + }, + + // no-protocol git+ssh (.git) + "git@git.sr.ht:~foo/bar.git": { ...defaults.sourcehut, default: "sshurl", auth: null }, + "git@git.sr.ht:~foo/bar.git#branch": { + ...defaults.sourcehut, + default: "sshurl", + auth: null, + committish: "branch", + }, + + // git+ssh urls + "git+ssh://git@git.sr.ht:~foo/bar": { ...defaults.sourcehut, default: "sshurl" }, + "git+ssh://git@git.sr.ht:~foo/bar#branch": { + ...defaults.sourcehut, + default: "sshurl", + committish: "branch", + }, + + // git+ssh urls (.git) + "git+ssh://git@git.sr.ht:~foo/bar.git": { ...defaults.sourcehut, default: "sshurl" }, + "git+ssh://git@git.sr.ht:~foo/bar.git#branch": { + ...defaults.sourcehut, + default: "sshurl", + committish: "branch", + }, + + // https urls + "https://git.sr.ht/~foo/bar": { ...defaults.sourcehut, default: "https" }, + "https://git.sr.ht/~foo/bar#branch": { ...defaults.sourcehut, default: "https", committish: "branch" }, + + "https://git.sr.ht/~foo/bar.git": { ...defaults.sourcehut, default: "https" }, + "https://git.sr.ht/~foo/bar.git#branch": { ...defaults.sourcehut, default: "https", committish: "branch" }, + }, +}; + +export const invalidGitUrls = { + bitbucket: [ + // invalid protocol + "git://bitbucket.org/foo/bar", + // url to get a tarball + "https://bitbucket.org/foo/bar/get/archive.tar.gz", + // missing project + "https://bitbucket.org/foo", + ], + gist: [ + // raw urls that are wrong anyway but for some reason are in the wild + "https://gist.github.com/foo/feedbeef/raw/fix%2Fbug/", + // missing both user and project + "https://gist.github.com/", + ], + github: [ + // foo/bar shorthand but specifying auth + "user@foo/bar", + "user:password@foo/bar", + ":password@foo/bar", + // foo/bar shorthand but with a space in it + "foo/ bar", + // string that ends with a slash, probably a directory + "foo/bar/", + // git@github.com style, but omitting the username + "github.com:foo/bar", + "github.com/foo/bar", + // invalid URI encoding + "github:foo%0N/bar", + // missing path + "git+ssh://git@github.com:", + // a deep url to something we don't know + "https://github.com/foo/bar/issues", + ], + gitlab: [ + // gitlab urls can contain a /-/ segment, make sure we ignore those + "https://gitlab.com/foo/-/something", + // missing project + "https://gitlab.com/foo", + // tarball, this should not parse so that it can be used for pacote's remote fetcher + "https://gitlab.com/foo/bar/repository/archive.tar.gz", + "https://gitlab.com/foo/bar/repository/archive.tar.gz?ref=49b393e2ded775f2df36ef2ffcb61b0359c194c9", + ], + misc: [ + "https://google.com", + "git+ssh://git@nothosted.com/abc/def", + "git://nothosted.com", + "git+file:///foo/bar", + "git+ssh://git@git.unlucky.com:RND/electron-tools/some-tool#2.0.1", + "::", + "", + null, + undefined, + ], + sourcehut: [ + // missing project + "https://git.sr.ht/~foo", + // invalid protocols + "git://git@git.sr.ht:~foo/bar", + "ssh://git.sr.ht:~foo/bar", + // tarball url + "https://git.sr.ht/~foo/bar/archive/HEAD.tar.gz", + ], +}; diff --git a/test/cli/install/hosted-git-info/from-url.test.ts b/test/cli/install/hosted-git-info/from-url.test.ts new file mode 100644 index 0000000000..e6cb2ab12c --- /dev/null +++ b/test/cli/install/hosted-git-info/from-url.test.ts @@ -0,0 +1,31 @@ +import { hostedGitInfo } from "bun:internal-for-testing"; +import { describe, expect, it } from "bun:test"; +import { invalidGitUrls, validGitUrls } from "./cases"; + +describe("fromUrl", () => { + describe("valid urls", () => { + describe.each(Object.entries(validGitUrls))("%s", (_, urlset: object) => { + it.each(Object.entries(urlset))("parses %s", (url, expected) => { + expect(hostedGitInfo.fromUrl(url)).toMatchObject({ + ...(expected.type && { type: expected.type }), + ...(expected.domain && { domain: expected.domain }), + ...(expected.user && { user: expected.user }), + ...(expected.project && { project: expected.project }), + ...(expected.committish && { committish: expected.committish }), + ...(expected.default && { default: expected.default }), + }); + }); + }); + }); + + // TODO(markovejnovic): Unskip these tests. + describe.skip("invalid urls", () => { + describe.each(Object.entries(invalidGitUrls))("%s", (_, urls: (string | null | undefined)[]) => { + it.each(urls)("does not permit %s", url => { + expect(() => { + hostedGitInfo.fromUrl(url); + }).toThrow(); + }); + }); + }); +}); diff --git a/test/cli/install/hosted-git-info/parse-url.test.ts b/test/cli/install/hosted-git-info/parse-url.test.ts new file mode 100644 index 0000000000..c88019b66a --- /dev/null +++ b/test/cli/install/hosted-git-info/parse-url.test.ts @@ -0,0 +1,21 @@ +/** + * Mimics https://github.com/npm/hosted-git-info/blob/main/test/parse-url.js + */ +import { hostedGitInfo } from "bun:internal-for-testing"; +import { describe, expect, it } from "bun:test"; + +const okCases = [ + // These come straight out of the hosted-git-info tests + "git+ssh://git@abc:frontend/utils.git#6d45447e0c5eb6cd2e3edf05a8c5a9bb81950c79", + // These are custom cases added for Bun + "ssh://:password@bitbucket.org:foo/bar.git", + "git@bitbucket.org:foo/bar", + "gist:user:password@/feedbeef#branch", + "github:foo/bar#branch with space", +]; + +describe("parseUrl", () => { + it.each(okCases)("parses %s", url => { + expect(hostedGitInfo.parseUrl(url)).not.toBeNull(); + }); +}); diff --git a/test/cli/install/isolated-install.test.ts b/test/cli/install/isolated-install.test.ts index ffa3812f0a..a39d0ff2a1 100644 --- a/test/cli/install/isolated-install.test.ts +++ b/test/cli/install/isolated-install.test.ts @@ -467,6 +467,138 @@ describe("isolated workspaces", () => { }); }); +describe("optional peers", () => { + const tests = [ + // non-optional versions + { + name: "non-optional transitive only", + deps: [{ "one-optional-peer-dep": "1.0.1" }, { "one-optional-peer-dep": "1.0.1" }], + expected: ["no-deps@1.1.0", "node_modules", "one-optional-peer-dep@1.0.1+7ff199101204a65d"], + }, + { + name: "non-optional direct pkg1", + deps: [{ "one-optional-peer-dep": "1.0.1", "no-deps": "1.0.1" }, { "one-optional-peer-dep": "1.0.1" }], + expected: ["no-deps@1.0.1", "node_modules", "one-optional-peer-dep@1.0.1+f8a822eca018d0a1"], + }, + { + name: "non-optional direct pkg2", + deps: [{ "one-optional-peer-dep": "1.0.1" }, { "one-optional-peer-dep": "1.0.1", "no-deps": "1.0.1" }], + expected: ["no-deps@1.0.1", "node_modules", "one-optional-peer-dep@1.0.1+f8a822eca018d0a1"], + }, + // optional versions + { + name: "optional transitive only", + deps: [{ "one-optional-peer-dep": "1.0.2" }, { "one-optional-peer-dep": "1.0.2" }], + expected: ["node_modules", "one-optional-peer-dep@1.0.2"], + }, + { + name: "optional direct pkg1", + deps: [{ "one-optional-peer-dep": "1.0.2", "no-deps": "1.0.1" }, { "one-optional-peer-dep": "1.0.2" }], + expected: ["no-deps@1.0.1", "node_modules", "one-optional-peer-dep@1.0.2+f8a822eca018d0a1"], + }, + { + name: "optional direct pkg2", + deps: [{ "one-optional-peer-dep": "1.0.2" }, { "one-optional-peer-dep": "1.0.2", "no-deps": "1.0.1" }], + expected: ["no-deps@1.0.1", "node_modules", "one-optional-peer-dep@1.0.2+f8a822eca018d0a1"], + }, + ]; + + for (const { deps, expected, name } of tests) { + test(`will resolve if available through another importer (${name})`, async () => { + const { packageDir } = await registry.createTestDir({ + bunfigOpts: { isolated: true }, + files: { + "package.json": JSON.stringify({ + name: "optional-peers", + workspaces: ["packages/*"], + }), + "packages/pkg1/package.json": JSON.stringify({ + name: "pkg1", + dependencies: deps[0], + }), + "packages/pkg2/package.json": JSON.stringify({ + name: "pkg2", + dependencies: deps[1], + }), + }, + }); + + async function checkInstall() { + const { exited } = spawn({ + cmd: [bunExe(), "install"], + cwd: packageDir, + env: bunEnv, + stdout: "ignore", + stderr: "ignore", + }); + + expect(await exited).toBe(0); + expect(await readdirSorted(join(packageDir, "node_modules/.bun"))).toEqual(expected); + } + + // without lockfile + // without node_modules + await checkInstall(); + + // with lockfile + // without node_modules + await rm(join(packageDir, "node_modules"), { recursive: true, force: true }); + await checkInstall(); + + // without lockfile + // with node_modules + await rm(join(packageDir, "bun.lock"), { force: true }); + await checkInstall(); + + // with lockfile + // with node_modules + await checkInstall(); + }); + } + + test("successfully resolves optional peer with nested package", async () => { + const { packageDir } = await registry.createTestDir({ + bunfigOpts: { isolated: true }, + files: { + "package.json": JSON.stringify({ + name: "optional-peer-nested-resolve", + dependencies: { + "one-one-dep": "1.0.0", + }, + peerDependencies: { + "one-dep": "1.0.0", + }, + peerDependenciesMeta: { + "one-dep": { + optional: true, + }, + }, + }), + }, + }); + + async function checkInstall() { + let { exited } = spawn({ + cmd: [bunExe(), "install"], + cwd: packageDir, + env: bunEnv, + }); + expect(await exited).toBe(0); + + expect(await readdirSorted(join(packageDir, "node_modules"))).toEqual([".bun", "one-dep", "one-one-dep"]); + expect(await readdirSorted(join(packageDir, "node_modules/.bun"))).toEqual([ + "no-deps@1.0.1", + "node_modules", + "one-dep@1.0.0", + "one-one-dep@1.0.0", + ]); + } + + await checkInstall(); + await checkInstall(); + }); +}); + for (const backend of ["clonefile", "hardlink", "copyfile"]) { test(`isolated install with backend: ${backend}`, async () => { const { packageJson, packageDir } = await registry.createTestDir({ bunfigOpts: { isolated: true } }); diff --git a/test/cli/install/migrate-bun-lockb-v2.test.ts b/test/cli/install/migrate-bun-lockb-v2.test.ts index 671a938f56..3c4a4d1415 100644 --- a/test/cli/install/migrate-bun-lockb-v2.test.ts +++ b/test/cli/install/migrate-bun-lockb-v2.test.ts @@ -85,8 +85,6 @@ for (const testInfo of tests) { await cp(join(import.meta.dir, "fixtures", testInfo.lockfile), join(testDir, "bun.lockb")); - const oldLockfile = parseLockfile(testDir); - let { stderr, exited } = spawn({ cmd: [bunExe(), "install"], cwd: testDir, @@ -106,7 +104,7 @@ for (const testInfo of tests) { // contents should be different due to semver numbers changing size expect(newLockfileContents).not.toEqual(oldLockfileContents); // but parse result should be the same - expect(newLockfile).toEqual(oldLockfile); + expect(newLockfile).toMatchSnapshot(); // another install should not change the lockfile ({ stderr, exited } = spawn({ diff --git a/test/cli/install/registry/packages/one-one-dep/one-one-dep-1.0.0.tgz b/test/cli/install/registry/packages/one-one-dep/one-one-dep-1.0.0.tgz new file mode 100644 index 0000000000..7ffb493935 Binary files /dev/null and b/test/cli/install/registry/packages/one-one-dep/one-one-dep-1.0.0.tgz differ diff --git a/test/cli/install/registry/packages/one-one-dep/package.json b/test/cli/install/registry/packages/one-one-dep/package.json new file mode 100644 index 0000000000..c8636a04e3 --- /dev/null +++ b/test/cli/install/registry/packages/one-one-dep/package.json @@ -0,0 +1,44 @@ +{ + "name": "one-one-dep", + "versions": { + "1.0.0": { + "name": "one-one-dep", + "version": "1.0.0", + "dependencies": { + "one-dep": "1.0.0" + }, + "_id": "one-one-dep@1.0.0", + "_integrity": "sha512-S6Bn6e9/9cZITQE2DiS8P3gd+Zwb6DF8LyLPcNemmE0nLh5kfjnxnGypmjzaRnSRnfqjEfqRFhjJPomjyNphuQ==", + "_nodeVersion": "24.3.0", + "_npmVersion": "10.8.3", + "integrity": "sha512-S6Bn6e9/9cZITQE2DiS8P3gd+Zwb6DF8LyLPcNemmE0nLh5kfjnxnGypmjzaRnSRnfqjEfqRFhjJPomjyNphuQ==", + "shasum": "52fab2c318e3e5b13fc6a4fb52da3791d79bba84", + "dist": { + "integrity": "sha512-S6Bn6e9/9cZITQE2DiS8P3gd+Zwb6DF8LyLPcNemmE0nLh5kfjnxnGypmjzaRnSRnfqjEfqRFhjJPomjyNphuQ==", + "shasum": "52fab2c318e3e5b13fc6a4fb52da3791d79bba84", + "tarball": "http://http://localhost:4873/one-one-dep/-/one-one-dep-1.0.0.tgz" + }, + "contributors": [] + } + }, + "time": { + "modified": "2025-11-01T03:49:53.670Z", + "created": "2025-11-01T03:49:53.670Z", + "1.0.0": "2025-11-01T03:49:53.670Z" + }, + "users": {}, + "dist-tags": { + "latest": "1.0.0" + }, + "_uplinks": {}, + "_distfiles": {}, + "_attachments": { + "one-one-dep-1.0.0.tgz": { + "shasum": "52fab2c318e3e5b13fc6a4fb52da3791d79bba84", + "version": "1.0.0" + } + }, + "_rev": "", + "_id": "one-one-dep", + "readme": "" +} \ No newline at end of file diff --git a/test/cli/install/registry/packages/one-optional-peer-dep/one-optional-peer-dep-1.0.1.tgz b/test/cli/install/registry/packages/one-optional-peer-dep/one-optional-peer-dep-1.0.1.tgz new file mode 100644 index 0000000000..aee036cfc2 Binary files /dev/null and b/test/cli/install/registry/packages/one-optional-peer-dep/one-optional-peer-dep-1.0.1.tgz differ diff --git a/test/cli/install/registry/packages/one-optional-peer-dep/one-optional-peer-dep-1.0.2.tgz b/test/cli/install/registry/packages/one-optional-peer-dep/one-optional-peer-dep-1.0.2.tgz new file mode 100644 index 0000000000..107138e6e6 Binary files /dev/null and b/test/cli/install/registry/packages/one-optional-peer-dep/one-optional-peer-dep-1.0.2.tgz differ diff --git a/test/cli/install/registry/packages/one-optional-peer-dep/package.json b/test/cli/install/registry/packages/one-optional-peer-dep/package.json new file mode 100644 index 0000000000..de8eeff651 --- /dev/null +++ b/test/cli/install/registry/packages/one-optional-peer-dep/package.json @@ -0,0 +1,73 @@ +{ + "name": "one-optional-peer-dep", + "versions": { + "1.0.1": { + "name": "one-optional-peer-dep", + "version": "1.0.1", + "peerDependencies": { + "no-deps": "^1.0.0" + }, + "_id": "one-optional-peer-dep@1.0.1", + "_integrity": "sha512-tqKLkZX69BL7dsv8ldPBeNqypOS2QxPEyqdB48kbcf0oyPGzLK+tOuDFEK7U63H+PoGErF+ro7reQUztRyL/pA==", + "_nodeVersion": "24.3.0", + "_npmVersion": "10.8.3", + "integrity": "sha512-tqKLkZX69BL7dsv8ldPBeNqypOS2QxPEyqdB48kbcf0oyPGzLK+tOuDFEK7U63H+PoGErF+ro7reQUztRyL/pA==", + "shasum": "70ab2e73a16d86c1ed2547889025df30b8534c71", + "dist": { + "integrity": "sha512-tqKLkZX69BL7dsv8ldPBeNqypOS2QxPEyqdB48kbcf0oyPGzLK+tOuDFEK7U63H+PoGErF+ro7reQUztRyL/pA==", + "shasum": "70ab2e73a16d86c1ed2547889025df30b8534c71", + "tarball": "http://http://localhost:4873/one-optional-peer-dep/-/one-optional-peer-dep-1.0.1.tgz" + }, + "contributors": [] + }, + "1.0.2": { + "name": "one-optional-peer-dep", + "version": "1.0.2", + "peerDependencies": { + "no-deps": "^1.0.0" + }, + "peerDependenciesMeta": { + "no-deps": { + "optional": true + } + }, + "_id": "one-optional-peer-dep@1.0.2", + "_integrity": "sha512-S25U8/QXGIKfn/AWtsce1aVMnDjDL+ykFtAufpsuKGad32NlsCpi9TDuXvzoTQ+MdaZpGV3c4xghUZUsNeMp4A==", + "_nodeVersion": "24.3.0", + "_npmVersion": "10.8.3", + "integrity": "sha512-S25U8/QXGIKfn/AWtsce1aVMnDjDL+ykFtAufpsuKGad32NlsCpi9TDuXvzoTQ+MdaZpGV3c4xghUZUsNeMp4A==", + "shasum": "0fbf084260c3decd365aaaf2d72526ec2d01fe1d", + "dist": { + "integrity": "sha512-S25U8/QXGIKfn/AWtsce1aVMnDjDL+ykFtAufpsuKGad32NlsCpi9TDuXvzoTQ+MdaZpGV3c4xghUZUsNeMp4A==", + "shasum": "0fbf084260c3decd365aaaf2d72526ec2d01fe1d", + "tarball": "http://http://localhost:4873/one-optional-peer-dep/-/one-optional-peer-dep-1.0.2.tgz" + }, + "contributors": [] + } + }, + "time": { + "modified": "2025-10-31T21:56:37.922Z", + "created": "2025-10-31T21:48:54.585Z", + "1.0.1": "2025-10-31T21:48:54.585Z", + "1.0.2": "2025-10-31T21:56:37.922Z" + }, + "users": {}, + "dist-tags": { + "latest": "1.0.2" + }, + "_uplinks": {}, + "_distfiles": {}, + "_attachments": { + "one-optional-peer-dep-1.0.1.tgz": { + "shasum": "70ab2e73a16d86c1ed2547889025df30b8534c71", + "version": "1.0.1" + }, + "one-optional-peer-dep-1.0.2.tgz": { + "shasum": "0fbf084260c3decd365aaaf2d72526ec2d01fe1d", + "version": "1.0.2" + } + }, + "_rev": "", + "_id": "one-optional-peer-dep", + "readme": "" +} \ No newline at end of file diff --git a/test/cli/run/cpu-prof.test.ts b/test/cli/run/cpu-prof.test.ts new file mode 100644 index 0000000000..a2172ab704 --- /dev/null +++ b/test/cli/run/cpu-prof.test.ts @@ -0,0 +1,190 @@ +import { describe, expect, test } from "bun:test"; +import { readdirSync, readFileSync } from "fs"; +import { bunEnv, bunExe, tempDir } from "harness"; +import { join } from "path"; + +describe.concurrent("--cpu-prof", () => { + test("generates CPU profile with default name", async () => { + using dir = tempDir("cpu-prof", { + "test.js": ` + // CPU-intensive task + function fibonacci(n) { + if (n <= 1) return n; + return fibonacci(n - 1) + fibonacci(n - 2); + } + + const now = performance.now(); + while (now + 50 > performance.now()) { + Bun.inspect(fibonacci(20)); + } + `, + }); + + await using proc = Bun.spawn({ + cmd: [bunExe(), "--cpu-prof", "test.js"], + cwd: String(dir), + env: bunEnv, + stdout: "inherit", + stderr: "inherit", + }); + + const exitCode = await proc.exited; + + // Check that a .cpuprofile file was created + const files = readdirSync(String(dir)); + const profileFiles = files.filter(f => f.endsWith(".cpuprofile")); + + expect(profileFiles.length).toBeGreaterThan(0); + expect(exitCode).toBe(0); + + // Read and validate the profile + const profilePath = join(String(dir), profileFiles[0]); + const profileContent = readFileSync(profilePath, "utf-8"); + const profile = JSON.parse(profileContent); + + // Validate Chrome CPU Profiler format + expect(profile).toHaveProperty("nodes"); + expect(profile).toHaveProperty("startTime"); + expect(profile).toHaveProperty("endTime"); + expect(profile).toHaveProperty("samples"); + expect(profile).toHaveProperty("timeDeltas"); + + expect(Array.isArray(profile.nodes)).toBe(true); + expect(Array.isArray(profile.samples)).toBe(true); + expect(Array.isArray(profile.timeDeltas)).toBe(true); + + // Validate root node + expect(profile.nodes.length).toBeGreaterThan(0); + const rootNode = profile.nodes[0]; + expect(rootNode.id).toBe(1); + expect(rootNode.callFrame.functionName).toBe("(root)"); + + // Validate node structure + profile.nodes.forEach((node: any) => { + expect(node).toHaveProperty("id"); + expect(node).toHaveProperty("callFrame"); + expect(node).toHaveProperty("hitCount"); + expect(node.callFrame).toHaveProperty("functionName"); + expect(node.callFrame).toHaveProperty("scriptId"); + expect(node.callFrame).toHaveProperty("url"); + expect(node.callFrame).toHaveProperty("lineNumber"); + expect(node.callFrame).toHaveProperty("columnNumber"); + }); + + // Validate samples point to valid nodes + const nodeIds = new Set(profile.nodes.map((n: any) => n.id)); + profile.samples.forEach((sample: number) => { + expect(nodeIds.has(sample)).toBe(true); + }); + + // Validate time deltas + expect(profile.timeDeltas.length).toBe(profile.samples.length); + // For very fast programs, start and end times might be equal or very close + expect(profile.startTime).toBeLessThanOrEqual(profile.endTime); + + // CRITICAL: Validate timestamps are positive and in microseconds + // Chrome DevTools requires timestamps in microseconds since Unix epoch + // A valid timestamp should be > 1000000000000000 (around year 2001) + // and < 3000000000000000 (around year 2065) + expect(profile.startTime).toBeGreaterThan(1000000000000000); + expect(profile.startTime).toBeLessThan(3000000000000000); + expect(profile.endTime).toBeGreaterThan(1000000000000000); + expect(profile.endTime).toBeLessThan(3000000000000000); + }); + + test("--cpu-prof-name sets custom filename", async () => { + using dir = tempDir("cpu-prof-name", { + "test.js": ` + function loop() { + const end = Date.now() + 32; + while (Date.now() < end) {} + } + loop(); + `, + }); + + const customName = "my-profile.cpuprofile"; + + await using proc = Bun.spawn({ + cmd: [bunExe(), "--cpu-prof", "--cpu-prof-name", customName, "test.js"], + cwd: String(dir), + env: bunEnv, + stdout: "inherit", + stderr: "inherit", + }); + + const exitCode = await proc.exited; + + const files = readdirSync(String(dir)); + expect(files).toContain(customName); + expect(exitCode).toBe(0); + }); + + test("--cpu-prof-dir sets custom directory", async () => { + using dir = tempDir("cpu-prof-dir", { + "test.js": ` + function loop() { + const end = Date.now() + 32; + while (Date.now() < end) {} + } + loop(); + `, + "profiles": {}, + }); + + await using proc = Bun.spawn({ + cmd: [bunExe(), "--cpu-prof", "--cpu-prof-dir", "profiles", "test.js"], + cwd: String(dir), + env: bunEnv, + stdout: "inherit", + stderr: "inherit", + }); + + const exitCode = await proc.exited; + + const profilesDir = join(String(dir), "profiles"); + const files = readdirSync(profilesDir); + const profileFiles = files.filter(f => f.endsWith(".cpuprofile")); + + expect(profileFiles.length).toBeGreaterThan(0); + expect(exitCode).toBe(0); + }); + + test("profile captures function names", async () => { + using dir = tempDir("cpu-prof-functions", { + "test.js": ` + function myFunction() { + let sum = 0; + for (let i = 0; i < 1000000; i++) { + sum += i; + } + return sum; + } + + myFunction(); + `, + }); + + await using proc = Bun.spawn({ + cmd: [bunExe(), "--cpu-prof", "test.js"], + cwd: String(dir), + env: bunEnv, + stdout: "inherit", + stderr: "inherit", + }); + + const exitCode = await proc.exited; + + const files = readdirSync(String(dir)); + const profileFiles = files.filter(f => f.endsWith(".cpuprofile")); + expect(profileFiles.length).toBeGreaterThan(0); + + const profilePath = join(String(dir), profileFiles[0]); + const profile = JSON.parse(readFileSync(profilePath, "utf-8")); + + // Check that we captured some meaningful function names + const functionNames = profile.nodes.map((n: any) => n.callFrame.functionName); + expect(functionNames.some((name: string) => name !== "(root)" && name !== "(program)")).toBe(true); + expect(exitCode).toBe(0); + }); +}); diff --git a/test/cli/run/require-cache.test.ts b/test/cli/run/require-cache.test.ts index b3087a9178..da072f9572 100644 --- a/test/cli/run/require-cache.test.ts +++ b/test/cli/run/require-cache.test.ts @@ -1,5 +1,5 @@ import { describe, expect, test } from "bun:test"; -import { bunEnv, bunExe, isBroken, isIntelMacOS, isWindows, tempDirWithFiles } from "harness"; +import { bunEnv, bunExe, isBroken, isCI, isIntelMacOS, isMacOS, isWindows, tempDirWithFiles } from "harness"; import { join } from "path"; test("require.cache is not an empty object literal when inspected", () => { @@ -195,18 +195,23 @@ describe.skipIf(isBroken && isIntelMacOS)("files transpiled and loaded don't lea expect(exitCode).toBe(0); }, 60000); - test("via require() with a lot of function calls", () => { - let text = "function i() { return 1; }\n"; - for (let i = 0; i < 20000; i++) { - text += `i();\n`; - } - text += "exports.forceCommonJS = true;\n"; + test.todoIf( + // Flaky specifically on macOS CI. + isBroken && isMacOS && isCI, + )( + "via require() with a lot of function calls", + () => { + let text = "function i() { return 1; }\n"; + for (let i = 0; i < 20000; i++) { + text += `i();\n`; + } + text += "exports.forceCommonJS = true;\n"; - console.log("Text length:", text.length); + console.log("Text length:", text.length); - const dir = tempDirWithFiles("require-cache-bug-leak-2", { - "index.js": text, - "require-cache-bug-leak-fixture.js": ` + const dir = tempDirWithFiles("require-cache-bug-leak-2", { + "index.js": text, + "require-cache-bug-leak-fixture.js": ` const path = require.resolve("./index.js"); const gc = global.gc || globalThis?.Bun?.gc || (() => {}); function bust() { @@ -242,16 +247,18 @@ describe.skipIf(isBroken && isIntelMacOS)("files transpiled and loaded don't lea exports.abc = 123; `, - }); - const { exitCode, resourceUsage } = Bun.spawnSync({ - cmd: [bunExe(), "run", "--smol", join(dir, "require-cache-bug-leak-fixture.js")], - env: bunEnv, - stdio: ["inherit", "inherit", "inherit"], - }); + }); + const { exitCode, resourceUsage } = Bun.spawnSync({ + cmd: [bunExe(), "run", "--smol", join(dir, "require-cache-bug-leak-fixture.js")], + env: bunEnv, + stdio: ["inherit", "inherit", "inherit"], + }); - console.log(resourceUsage); - expect(exitCode).toBe(0); - }, 60000); // takes 4s on an M1 in release build + console.log(resourceUsage); + expect(exitCode).toBe(0); + }, + 60000, + ); // takes 4s on an M1 in release build }); describe("files transpiled and loaded don't leak the AST", () => { diff --git a/test/cli/update_interactive_formatting.test.ts b/test/cli/update_interactive_formatting.test.ts index 8e54570781..cb97d2df09 100644 --- a/test/cli/update_interactive_formatting.test.ts +++ b/test/cli/update_interactive_formatting.test.ts @@ -1861,6 +1861,51 @@ registry = "${registryUrl}" expect(packageJson.dependencies["dep-with-tags"]).toBe("1.0.0"); }); + it("should preserve npm: alias prefix when updating packages", async () => { + const dir = tempDirWithFiles("update-interactive-npm-alias", { + "bunfig.toml": `[install] +cache = false +registry = "${registryUrl}" +`, + "package.json": JSON.stringify({ + name: "test-project", + version: "1.0.0", + dependencies: { + "my-alias": "npm:no-deps@1.0.0", + "@my/alias": "npm:@types/no-deps@^1.0.0", + }, + }), + }); + + await using install = Bun.spawn({ + cmd: [bunExe(), "install"], + cwd: dir, + env: bunEnv, + stdout: "pipe", + stderr: "pipe", + }); + expect(await install.exited).toBe(0); + + await using update = Bun.spawn({ + cmd: [bunExe(), "update", "-i", "--latest"], + cwd: dir, + env: bunEnv, + stdin: "pipe", + stdout: "pipe", + stderr: "pipe", + }); + + update.stdin.write("a\n"); + update.stdin.end(); + + const exitCode = await update.exited; + expect(exitCode).toBe(0); + + const packageJson = await Bun.file(join(dir, "package.json")).json(); + expect(packageJson.dependencies["my-alias"]).toBe("npm:no-deps@2.0.0"); + expect(packageJson.dependencies["@my/alias"]).toBe("npm:@types/no-deps@^2.0.0"); + }); + it("interactive update with mixed dependency types", async () => { const dir = tempDirWithFiles("update-interactive-mixed", { "bunfig.toml": `[install] @@ -1891,7 +1936,7 @@ registry = "${registryUrl}" name: "@test/workspace1", dependencies: { "a-dep": "catalog:", - "@test/workspace2": "workspace:*", // Workspace dependency + "@test/workspace2": "workspace:*", }, devDependencies: { "no-deps": "^1.0.0", diff --git a/test/cli/update_interactive_install.test.ts b/test/cli/update_interactive_install.test.ts new file mode 100644 index 0000000000..919cf2e6f2 --- /dev/null +++ b/test/cli/update_interactive_install.test.ts @@ -0,0 +1,182 @@ +import { describe, expect, test } from "bun:test"; +import { existsSync, readFileSync } from "fs"; +import { bunEnv, bunExe, tempDir } from "harness"; +import { join } from "path"; + +describe.concurrent("bun update --interactive actually installs packages", () => { + test("should update package.json AND install packages", async () => { + using dir = tempDir("update-interactive-install", { + "package.json": JSON.stringify({ + name: "test-project", + version: "1.0.0", + dependencies: { + // Use a very old version that definitely has updates available + "is-even": "0.1.0", + }, + }), + }); + + // First, run bun install to create initial node_modules + await using installProc = Bun.spawn({ + cmd: [bunExe(), "install"], + cwd: String(dir), + env: bunEnv, + stdout: "pipe", + stderr: "pipe", + }); + + const installExitCode = await installProc.exited; + expect(installExitCode).toBe(0); + + // Verify initial installation + const initialPackageJson = JSON.parse(readFileSync(join(String(dir), "package.json"), "utf8")); + expect(initialPackageJson.dependencies["is-even"]).toBe("0.1.0"); + + // Check that node_modules was created + expect(existsSync(join(String(dir), "node_modules"))).toBe(true); + expect(existsSync(join(String(dir), "node_modules", "is-even"))).toBe(true); + + // Read the initial installed version from package.json in node_modules + const initialInstalledPkgJson = JSON.parse( + readFileSync(join(String(dir), "node_modules", "is-even", "package.json"), "utf8"), + ); + const initialVersion = initialInstalledPkgJson.version; + expect(initialVersion).toBe("0.1.0"); + + // Now run update --interactive with automatic selection + await using updateProc = Bun.spawn({ + cmd: [bunExe(), "update", "--interactive"], + cwd: String(dir), + env: bunEnv, + stdin: "pipe", + stdout: "pipe", + stderr: "pipe", + }); + + try { + // Select first package and confirm + updateProc.stdin.write(" "); // space to select + updateProc.stdin.write("\r"); // enter to confirm + updateProc.stdin.end(); + + const [stdout, stderr, exitCode] = await Promise.all([ + updateProc.stdout.text(), + updateProc.stderr.text(), + updateProc.exited, + ]); + + // Debug output if test fails + if (exitCode !== 0) { + console.log("STDOUT:", stdout); + console.log("STDERR:", stderr); + } + + expect(exitCode).toBe(0); + + // Check that package.json was updated + const updatedPackageJson = JSON.parse(readFileSync(join(String(dir), "package.json"), "utf8")); + const updatedVersion = updatedPackageJson.dependencies["is-even"]; + + // The version should have changed from "0.1.0" + expect(updatedVersion).not.toBe("0.1.0"); + + // Most importantly: verify that node_modules was actually updated! + // This is the bug - previously only package.json changed but not node_modules + const installedPkgJson = JSON.parse( + readFileSync(join(String(dir), "node_modules", "is-even", "package.json"), "utf8"), + ); + const installedVersion = installedPkgJson.version; + + // The installed version should match what's in package.json + // Extract version number from potentially semver-prefixed string (e.g., "^1.1.0" -> "1.1.0") + const expectedVersion = updatedVersion.replace(/^[\^~]/, ""); + + // The installed version should NOT be the old version + expect(installedVersion).not.toBe("0.1.0"); + expect(Bun.semver.satisfies(installedVersion, ">0.1.0")).toBe(true); + + // And ideally should match the expected version (or at least be compatible) + // We check that it starts with the expected major.minor + const [expectedMajor, expectedMinor] = expectedVersion.split("."); + expect(installedVersion).toContain(`${expectedMajor}.${expectedMinor}`); + } catch (err) { + // Ensure cleanup on failure + updateProc.stdin.end(); + updateProc.kill(); + throw err; + } + }); + + test("should work with --latest flag", async () => { + using dir = tempDir("update-interactive-latest", { + "package.json": JSON.stringify({ + name: "test-project", + version: "1.0.0", + dependencies: { + "is-odd": "0.1.0", // Use old version of is-odd + }, + }), + }); + + // Initial install + await using installProc = Bun.spawn({ + cmd: [bunExe(), "install"], + cwd: String(dir), + env: bunEnv, + stdout: "pipe", + stderr: "pipe", + }); + + await installProc.exited; + + // Verify initial version + const initialPkgJson = JSON.parse( + readFileSync(join(String(dir), "node_modules", "is-odd", "package.json"), "utf8"), + ); + expect(initialPkgJson.version).toBe("0.1.0"); + + // Run update --interactive with 'l' to toggle latest, then select and confirm + await using updateProc = Bun.spawn({ + cmd: [bunExe(), "update", "--interactive"], + cwd: String(dir), + env: bunEnv, + stdin: "pipe", + stdout: "pipe", + stderr: "pipe", + }); + + try { + updateProc.stdin.write("l"); // toggle latest + updateProc.stdin.write(" "); // select + updateProc.stdin.write("\r"); // confirm + updateProc.stdin.end(); + + const [stdout, stderr, exitCode] = await Promise.all([ + updateProc.stdout.text(), + updateProc.stderr.text(), + updateProc.exited, + ]); + + if (exitCode !== 0) { + console.log("STDOUT:", stdout); + console.log("STDERR:", stderr); + } + + expect(exitCode).toBe(0); + + // Verify node_modules was updated + const updatedPkgJson = JSON.parse( + readFileSync(join(String(dir), "node_modules", "is-odd", "package.json"), "utf8"), + ); + + // Should be newer than 0.1.0 + expect(updatedPkgJson.version).not.toBe("0.1.0"); + expect(Bun.semver.satisfies(updatedPkgJson.version, ">0.1.0")).toBe(true); + } catch (err) { + // Ensure cleanup on failure + updateProc.stdin.end(); + updateProc.kill(); + throw err; + } + }); +}); diff --git a/test/integration/next-pages/test/__snapshots__/dev-server-ssr-100.test.ts.snap b/test/integration/next-pages/test/__snapshots__/dev-server-ssr-100.test.ts.snap index 89959331b6..8b3ef2323b 100644 --- a/test/integration/next-pages/test/__snapshots__/dev-server-ssr-100.test.ts.snap +++ b/test/integration/next-pages/test/__snapshots__/dev-server-ssr-100.test.ts.snap @@ -23958,10 +23958,18 @@ exports[`ssr works for 100-ish requests 1`] = ` "id": 407, "package_id": 61, }, + "@opentelemetry/api": { + "id": 633, + "package_id": 4294967295, + }, "@pkgjs/parseargs": { "id": 599, "package_id": 62, }, + "@playwright/test": { + "id": 634, + "package_id": 4294967295, + }, "@puppeteer/browsers": { "id": 719, "package_id": 63, @@ -24238,10 +24246,18 @@ exports[`ssr works for 100-ish requests 1`] = ` "id": 905, "package_id": 131, }, + "babel-plugin-react-compiler": { + "id": 635, + "package_id": 4294967295, + }, "balanced-match": { "id": 202, "package_id": 132, }, + "bare-buffer": { + "id": 197, + "package_id": 4294967295, + }, "bare-events": { "id": 194, "package_id": 133, @@ -24286,6 +24302,10 @@ exports[`ssr works for 100-ish requests 1`] = ` "id": 1014, "package_id": 143, }, + "bufferutil": { + "id": 1005, + "package_id": 4294967295, + }, "bun-types": { "id": 4, "package_id": 144, @@ -24530,6 +24550,10 @@ exports[`ssr works for 100-ish requests 1`] = ` "id": 398, "package_id": 204, }, + "eslint-plugin-import-x": { + "id": 416, + "package_id": 4294967295, + }, "eslint-plugin-jsx-a11y": { "id": 399, "package_id": 205, @@ -25294,6 +25318,10 @@ exports[`ssr works for 100-ish requests 1`] = ` "id": 451, "package_id": 395, }, + "sass": { + "id": 638, + "package_id": 4294967295, + }, "scheduler": { "id": 731, "package_id": 396, @@ -25490,6 +25518,10 @@ exports[`ssr works for 100-ish requests 1`] = ` "id": 877, "package_id": 442, }, + "ts-node": { + "id": 701, + "package_id": 4294967295, + }, "tsconfig-paths": { "id": 436, "package_id": 443, @@ -25546,6 +25578,10 @@ exports[`ssr works for 100-ish requests 1`] = ` "id": 138, "package_id": 456, }, + "utf-8-validate": { + "id": 1006, + "package_id": 4294967295, + }, "util-deprecate": { "id": 705, "package_id": 457, diff --git a/test/integration/next-pages/test/__snapshots__/dev-server.test.ts.snap b/test/integration/next-pages/test/__snapshots__/dev-server.test.ts.snap index 321cb36341..be4a0b6e7d 100644 --- a/test/integration/next-pages/test/__snapshots__/dev-server.test.ts.snap +++ b/test/integration/next-pages/test/__snapshots__/dev-server.test.ts.snap @@ -23958,10 +23958,18 @@ exports[`hot reloading works on the client (+ tailwind hmr) 1`] = ` "id": 407, "package_id": 61, }, + "@opentelemetry/api": { + "id": 633, + "package_id": 4294967295, + }, "@pkgjs/parseargs": { "id": 599, "package_id": 62, }, + "@playwright/test": { + "id": 634, + "package_id": 4294967295, + }, "@puppeteer/browsers": { "id": 719, "package_id": 63, @@ -24238,10 +24246,18 @@ exports[`hot reloading works on the client (+ tailwind hmr) 1`] = ` "id": 905, "package_id": 131, }, + "babel-plugin-react-compiler": { + "id": 635, + "package_id": 4294967295, + }, "balanced-match": { "id": 202, "package_id": 132, }, + "bare-buffer": { + "id": 197, + "package_id": 4294967295, + }, "bare-events": { "id": 194, "package_id": 133, @@ -24286,6 +24302,10 @@ exports[`hot reloading works on the client (+ tailwind hmr) 1`] = ` "id": 1014, "package_id": 143, }, + "bufferutil": { + "id": 1005, + "package_id": 4294967295, + }, "bun-types": { "id": 4, "package_id": 144, @@ -24530,6 +24550,10 @@ exports[`hot reloading works on the client (+ tailwind hmr) 1`] = ` "id": 398, "package_id": 204, }, + "eslint-plugin-import-x": { + "id": 416, + "package_id": 4294967295, + }, "eslint-plugin-jsx-a11y": { "id": 399, "package_id": 205, @@ -25294,6 +25318,10 @@ exports[`hot reloading works on the client (+ tailwind hmr) 1`] = ` "id": 451, "package_id": 395, }, + "sass": { + "id": 638, + "package_id": 4294967295, + }, "scheduler": { "id": 731, "package_id": 396, @@ -25490,6 +25518,10 @@ exports[`hot reloading works on the client (+ tailwind hmr) 1`] = ` "id": 877, "package_id": 442, }, + "ts-node": { + "id": 701, + "package_id": 4294967295, + }, "tsconfig-paths": { "id": 436, "package_id": 443, @@ -25546,6 +25578,10 @@ exports[`hot reloading works on the client (+ tailwind hmr) 1`] = ` "id": 138, "package_id": 456, }, + "utf-8-validate": { + "id": 1006, + "package_id": 4294967295, + }, "util-deprecate": { "id": 705, "package_id": 457, diff --git a/test/integration/next-pages/test/__snapshots__/next-build.test.ts.snap b/test/integration/next-pages/test/__snapshots__/next-build.test.ts.snap index 05004d7e99..b484a46fa6 100644 --- a/test/integration/next-pages/test/__snapshots__/next-build.test.ts.snap +++ b/test/integration/next-pages/test/__snapshots__/next-build.test.ts.snap @@ -23958,10 +23958,18 @@ exports[`next build works: bun 1`] = ` "id": 407, "package_id": 61, }, + "@opentelemetry/api": { + "id": 633, + "package_id": 4294967295, + }, "@pkgjs/parseargs": { "id": 599, "package_id": 62, }, + "@playwright/test": { + "id": 634, + "package_id": 4294967295, + }, "@puppeteer/browsers": { "id": 719, "package_id": 63, @@ -24238,10 +24246,18 @@ exports[`next build works: bun 1`] = ` "id": 905, "package_id": 131, }, + "babel-plugin-react-compiler": { + "id": 635, + "package_id": 4294967295, + }, "balanced-match": { "id": 202, "package_id": 132, }, + "bare-buffer": { + "id": 197, + "package_id": 4294967295, + }, "bare-events": { "id": 194, "package_id": 133, @@ -24286,6 +24302,10 @@ exports[`next build works: bun 1`] = ` "id": 1014, "package_id": 143, }, + "bufferutil": { + "id": 1005, + "package_id": 4294967295, + }, "bun-types": { "id": 4, "package_id": 144, @@ -24530,6 +24550,10 @@ exports[`next build works: bun 1`] = ` "id": 398, "package_id": 204, }, + "eslint-plugin-import-x": { + "id": 416, + "package_id": 4294967295, + }, "eslint-plugin-jsx-a11y": { "id": 399, "package_id": 205, @@ -25294,6 +25318,10 @@ exports[`next build works: bun 1`] = ` "id": 451, "package_id": 395, }, + "sass": { + "id": 638, + "package_id": 4294967295, + }, "scheduler": { "id": 731, "package_id": 396, @@ -25490,6 +25518,10 @@ exports[`next build works: bun 1`] = ` "id": 877, "package_id": 442, }, + "ts-node": { + "id": 701, + "package_id": 4294967295, + }, "tsconfig-paths": { "id": 436, "package_id": 443, @@ -25546,6 +25578,10 @@ exports[`next build works: bun 1`] = ` "id": 138, "package_id": 456, }, + "utf-8-validate": { + "id": 1006, + "package_id": 4294967295, + }, "util-deprecate": { "id": 705, "package_id": 457, @@ -49896,10 +49932,18 @@ exports[`next build works: node 1`] = ` "id": 407, "package_id": 61, }, + "@opentelemetry/api": { + "id": 633, + "package_id": 4294967295, + }, "@pkgjs/parseargs": { "id": 599, "package_id": 62, }, + "@playwright/test": { + "id": 634, + "package_id": 4294967295, + }, "@puppeteer/browsers": { "id": 719, "package_id": 63, @@ -50176,10 +50220,18 @@ exports[`next build works: node 1`] = ` "id": 905, "package_id": 131, }, + "babel-plugin-react-compiler": { + "id": 635, + "package_id": 4294967295, + }, "balanced-match": { "id": 202, "package_id": 132, }, + "bare-buffer": { + "id": 197, + "package_id": 4294967295, + }, "bare-events": { "id": 194, "package_id": 133, @@ -50224,6 +50276,10 @@ exports[`next build works: node 1`] = ` "id": 1014, "package_id": 143, }, + "bufferutil": { + "id": 1005, + "package_id": 4294967295, + }, "bun-types": { "id": 4, "package_id": 144, @@ -50468,6 +50524,10 @@ exports[`next build works: node 1`] = ` "id": 398, "package_id": 204, }, + "eslint-plugin-import-x": { + "id": 416, + "package_id": 4294967295, + }, "eslint-plugin-jsx-a11y": { "id": 399, "package_id": 205, @@ -51232,6 +51292,10 @@ exports[`next build works: node 1`] = ` "id": 451, "package_id": 395, }, + "sass": { + "id": 638, + "package_id": 4294967295, + }, "scheduler": { "id": 731, "package_id": 396, @@ -51428,6 +51492,10 @@ exports[`next build works: node 1`] = ` "id": 877, "package_id": 442, }, + "ts-node": { + "id": 701, + "package_id": 4294967295, + }, "tsconfig-paths": { "id": 436, "package_id": 443, @@ -51484,6 +51552,10 @@ exports[`next build works: node 1`] = ` "id": 138, "package_id": 456, }, + "utf-8-validate": { + "id": 1006, + "package_id": 4294967295, + }, "util-deprecate": { "id": 705, "package_id": 457, diff --git a/test/internal/ban-limits.json b/test/internal/ban-limits.json index b77728edff..905a6bf35a 100644 --- a/test/internal/ban-limits.json +++ b/test/internal/ban-limits.json @@ -7,7 +7,7 @@ ".arguments_old(": 265, ".jsBoolean(false)": 0, ".jsBoolean(true)": 0, - ".stdDir()": 41, + ".stdDir()": 42, ".stdFile()": 16, "// autofix": 164, ": [^=]+= undefined,$": 255, diff --git a/test/js/bun/test/ci-restrictions.test.ts b/test/js/bun/test/ci-restrictions.test.ts index 70725a3d41..d7c3232f3d 100644 --- a/test/js/bun/test/ci-restrictions.test.ts +++ b/test/js/bun/test/ci-restrictions.test.ts @@ -145,7 +145,9 @@ exports[\`existing snapshot 1\`] = \`"hello world"\`; const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]); expect(exitCode).toBe(1); - expect(stderr).toContain("Snapshot creation is not allowed in CI environments"); + expect(stderr).toContain("Snapshot creation is disabled in CI environments"); + expect(stderr).toContain('Snapshot name: "new snapshot 1"'); + expect(stderr).toContain('Received: "this is new"'); }); test("toMatchSnapshot should fail for new snapshots when GITHUB_ACTIONS=1", async () => { @@ -170,7 +172,9 @@ test("new snapshot", () => { const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]); expect(exitCode).toBe(1); - expect(stderr).toContain("Snapshot creation is not allowed in CI environments"); + expect(stderr).toContain("Snapshot creation is disabled in CI environments"); + expect(stderr).toContain('Snapshot name: "new snapshot 1"'); + expect(stderr).toContain('Received: "this is new"'); }); test("toMatchSnapshot should work for new snapshots when CI=false", async () => { @@ -246,7 +250,8 @@ test("new inline snapshot", () => { const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]); expect(exitCode).toBe(1); - expect(stderr).toContain("Updating inline snapshots is disabled in CI environments"); + expect(stderr).toContain("Inline snapshot creation is disabled in CI environments"); + expect(stderr).toContain('Received: "this is new"'); }); test("toMatchInlineSnapshot should work for new inline snapshots when CI=false", async () => { diff --git a/test/js/bun/websocket/websocket-server.test.ts b/test/js/bun/websocket/websocket-server.test.ts index 23022030c2..89a8cdb511 100644 --- a/test/js/bun/websocket/websocket-server.test.ts +++ b/test/js/bun/websocket/websocket-server.test.ts @@ -168,6 +168,225 @@ describe("Server", () => { }, })); + it("subscriptions - basic usage", async () => { + const { promise, resolve } = Promise.withResolvers(); + const { promise: onClosePromise, resolve: onClose } = Promise.withResolvers(); + + using server = serve({ + port: 0, + fetch(req, server) { + if (server.upgrade(req)) { + return; + } + return new Response("Not a websocket"); + }, + websocket: { + open(ws) { + // Initially no subscriptions + const initialSubs = ws.subscriptions; + expect(Array.isArray(initialSubs)).toBeTrue(); + expect(initialSubs.length).toBe(0); + + // Subscribe to multiple topics + ws.subscribe("topic1"); + ws.subscribe("topic2"); + ws.subscribe("topic3"); + const threeSubs = ws.subscriptions; + expect(threeSubs.length).toBe(3); + expect(threeSubs).toContain("topic1"); + expect(threeSubs).toContain("topic2"); + expect(threeSubs).toContain("topic3"); + + // Unsubscribe from one + ws.unsubscribe("topic2"); + const finalSubs = ws.subscriptions; + + resolve(finalSubs); + ws.close(); + }, + close() { + onClose(); + }, + }, + }); + + const ws = new WebSocket(`ws://localhost:${server.port}`); + ws.onclose = () => onClose(); + + const [subscriptions] = await Promise.all([promise, onClosePromise]); + expect(subscriptions.length).toBe(2); + expect(subscriptions).toContain("topic1"); + expect(subscriptions).toContain("topic3"); + expect(subscriptions).not.toContain("topic2"); + }); + + it("subscriptions - all unsubscribed", async () => { + const { promise, resolve } = Promise.withResolvers(); + const { promise: onClosePromise, resolve: onClose } = Promise.withResolvers(); + + using server = serve({ + port: 0, + fetch(req, server) { + if (server.upgrade(req)) { + return; + } + return new Response("Not a websocket"); + }, + websocket: { + open(ws) { + // Subscribe to topics + ws.subscribe("topic1"); + ws.subscribe("topic2"); + ws.subscribe("topic3"); + expect(ws.subscriptions.length).toBe(3); + + // Unsubscribe from all + ws.unsubscribe("topic1"); + ws.unsubscribe("topic2"); + ws.unsubscribe("topic3"); + const finalSubs = ws.subscriptions; + + resolve(finalSubs); + ws.close(); + }, + close() { + onClose(); + }, + }, + }); + + const ws = new WebSocket(`ws://localhost:${server.port}`); + ws.onclose = () => onClose(); + + const [subscriptions] = await Promise.all([promise, onClosePromise]); + expect(subscriptions).toEqual([]); + expect(subscriptions.length).toBe(0); + }); + + it("subscriptions - after close", async () => { + const { promise, resolve } = Promise.withResolvers(); + const { promise: onClosePromise, resolve: onClose } = Promise.withResolvers(); + + using server = serve({ + port: 0, + fetch(req, server) { + if (server.upgrade(req)) { + return; + } + return new Response("Not a websocket"); + }, + websocket: { + open(ws) { + ws.subscribe("topic1"); + ws.subscribe("topic2"); + expect(ws.subscriptions.length).toBe(2); + ws.close(); + }, + close(ws) { + // After close, should return empty array + const subsAfterClose = ws.subscriptions; + resolve(subsAfterClose); + onClose(); + }, + }, + }); + + const ws = new WebSocket(`ws://localhost:${server.port}`); + ws.onclose = () => onClose(); + + const [subscriptions] = await Promise.all([promise, onClosePromise]); + expect(subscriptions).toStrictEqual([]); + }); + + it("subscriptions - duplicate subscriptions", async () => { + const { promise, resolve } = Promise.withResolvers(); + const { promise: onClosePromise, resolve: onClose } = Promise.withResolvers(); + + using server = serve({ + port: 0, + fetch(req, server) { + if (server.upgrade(req)) { + return; + } + return new Response("Not a websocket"); + }, + websocket: { + open(ws) { + // Subscribe to same topic multiple times + ws.subscribe("topic1"); + ws.subscribe("topic1"); + ws.subscribe("topic1"); + const subs = ws.subscriptions; + + resolve(subs); + ws.close(); + }, + close() { + onClose(); + }, + }, + }); + + const ws = new WebSocket(`ws://localhost:${server.port}`); + ws.onclose = () => onClose(); + + const [subscriptions] = await Promise.all([promise, onClosePromise]); + // Should only have one instance of topic1 + expect(subscriptions.length).toBe(1); + expect(subscriptions).toContain("topic1"); + }); + + it("subscriptions - multiple cycles", async () => { + const { promise, resolve } = Promise.withResolvers(); + const { promise: onClosePromise, resolve: onClose } = Promise.withResolvers(); + + using server = serve({ + port: 0, + fetch(req, server) { + if (server.upgrade(req)) { + return; + } + return new Response("Not a websocket"); + }, + websocket: { + open(ws) { + // First cycle + ws.subscribe("topic1"); + expect(ws.subscriptions).toEqual(["topic1"]); + + ws.unsubscribe("topic1"); + expect(ws.subscriptions.length).toBe(0); + + // Second cycle with different topics + ws.subscribe("topic2"); + ws.subscribe("topic3"); + expect(ws.subscriptions.length).toBe(2); + + ws.unsubscribe("topic2"); + expect(ws.subscriptions).toEqual(["topic3"]); + + // Third cycle - resubscribe to topic1 + ws.subscribe("topic1"); + const finalSubs = ws.subscriptions; + + resolve(finalSubs); + ws.close(); + }, + close() { + onClose(); + }, + }, + }); + + const ws = new WebSocket(`ws://localhost:${server.port}`); + ws.onclose = () => onClose(); + + const [subscriptions] = await Promise.all([promise, onClosePromise]); + expect(subscriptions.length).toBe(2); + expect(subscriptions).toContain("topic1"); + expect(subscriptions).toContain("topic3"); + }); + describe("websocket", () => { test("open", done => ({ open(ws) { diff --git a/test/js/node/process/process-mainModule-fixture.esm.mjs b/test/js/node/process/process-mainModule-fixture.esm.mjs new file mode 100644 index 0000000000..2d934153a9 --- /dev/null +++ b/test/js/node/process/process-mainModule-fixture.esm.mjs @@ -0,0 +1 @@ +process.mainModule = 123; diff --git a/test/js/node/process/process-mainModule-fixture.js b/test/js/node/process/process-mainModule-fixture.js new file mode 100644 index 0000000000..ee2cbe63e5 --- /dev/null +++ b/test/js/node/process/process-mainModule-fixture.js @@ -0,0 +1,19 @@ +process.mainModule = process.mainModule; + +module.exports = {}; + +if (module.exports !== process.mainModule.exports) { + throw new Error("module.exports !== process.mainModule"); +} + +if (require.main !== process.mainModule) { + throw new Error("require.main !== process.mainModule"); +} + +process.mainModule = { abc: 123 }; + +if (require.main === process.mainModule) { + throw new Error("require.main === process.mainModule"); +} + +process.exit(0); diff --git a/test/js/node/process/process.test.js b/test/js/node/process/process.test.js index 8f48ca0f76..c82dfd4dc6 100644 --- a/test/js/node/process/process.test.js +++ b/test/js/node/process/process.test.js @@ -1,55 +1,35 @@ import { spawnSync, which } from "bun"; import { describe, expect, it } from "bun:test"; import { familySync } from "detect-libc"; -import { writeFileSync } from "fs"; -import { bunEnv, bunExe, isMacOS, isWindows, tmpdirSync } from "harness"; +import { bunEnv, bunExe, isMacOS, isWindows, tempDir, tmpdirSync } from "harness"; import { basename, join, resolve } from "path"; -expect.extend({ - toRunInlineFixture(input) { - const script = input[0]; - const optionalStdout = input[1]; - const expectedCode = input[2]; - const x = tmpdirSync(); - const path = join(x, "index.js"); - writeFileSync(path, script); +const process_sleep = resolve(import.meta.dir, "process-sleep.js"); - // return expect([path]).toRun(optionalStdout, expectedCode); - const cmds = [path]; - const result = Bun.spawnSync({ - cmd: [bunExe(), ...cmds], - env: bunEnv, - stdio: ["inherit", "pipe", "pipe"], - }); +/** + * Helper function to run inline fixture code and return stdout and exit code + */ +async function runInlineFixture(script, expectedStdout = null, expectedCode = 0) { + using dir = tempDir("process-test", { + "index.js": script, + }); - if (result.exitCode !== expectedCode) { - return { - pass: false, - message: () => - `Command ${cmds.join(" ")} failed: ${result.exitCode} != ${expectedCode}:` + - "\n" + - result.stdout.toString("utf-8") + - "\n" + - result.stderr.toString("utf-8"), - }; - } + await using proc = Bun.spawn({ + cmd: [bunExe(), join(String(dir), "index.js")], + env: bunEnv, + stdout: "pipe", + stderr: "pipe", + }); - if (optionalStdout != null) { - return { - pass: result.stdout.toString("utf-8") === optionalStdout, - message: () => - `Expected ${cmds.join(" ")} to output ${optionalStdout} but got ${result.stdout.toString("utf-8")}`, - }; - } + const [stdout, exitCode] = await Promise.all([proc.stdout.text(), proc.exited]); - return { - pass: true, - message: () => `Expected ${cmds.join(" ")} to fail`, - }; - }, -}); + if (expectedStdout !== null) { + expect(stdout).toBe(expectedStdout); + } + expect(exitCode).toBe(expectedCode); -const process_sleep = join(import.meta.dir, "process-sleep.js"); + return { stdout, exitCode }; +} it("process", () => { // this property isn't implemented yet but it should at least return a string @@ -434,357 +414,412 @@ it("process.exit", () => { expect(stdout.toString().trim()).toBe("PASS"); }); -describe("process.onBeforeExit", () => { - it("emitted", () => { - const { exitCode, stdout } = spawnSync({ - cmd: [bunExe(), join(import.meta.dir, "process-onBeforeExit-fixture.js")], +describe.concurrent(() => { + it.todoIf(isMacOS)("should be the node version on the host that we expect", async () => { + const subprocess = Bun.spawn({ + cmd: ["node", "--version"], + stdout: "pipe", + stdin: "inherit", + stderr: "pipe", env: bunEnv, }); - expect(exitCode).toBe(0); - expect(stdout.toString().trim()).toBe("beforeExit\nexit"); + + let [out, exited] = await Promise.all([new Response(subprocess.stdout).text(), subprocess.exited]); + expect(out.trim()).toEqual("v24.3.0"); + expect(exited).toBe(0); }); - it("works with explicit process.exit", () => { - const { exitCode, stdout } = spawnSync({ - cmd: [bunExe(), join(import.meta.dir, "process-onBeforeExit-keepAlive.js")], - env: bunEnv, - }); - expect(exitCode).toBe(0); - expect(stdout.toString().trim()).toBe("beforeExit: 0\nbeforeExit: 1\nexit: 2"); - }); - - it("throwing inside preserves exit code", async () => { - const proc = Bun.spawnSync({ - cmd: [bunExe(), "-e", `process.on("beforeExit", () => {throw new Error("boom")});`], - env: bunEnv, - stdio: ["inherit", "pipe", "pipe"], - }); - expect(proc.exitCode).toBe(1); - expect(proc.stderr.toString("utf8")).toInclude("error: boom"); - expect(proc.stdout.toString("utf8")).toBeEmpty(); - }); -}); - -describe("process.onExit", () => { - it("throwing inside preserves exit code", async () => { - const proc = Bun.spawnSync({ - cmd: [bunExe(), "-e", `process.on("exit", () => {throw new Error("boom")});`], - env: bunEnv, - stdio: ["inherit", "pipe", "pipe"], - }); - expect(proc.exitCode).toBe(1); - expect(proc.stderr.toString("utf8")).toInclude("error: boom"); - expect(proc.stdout.toString("utf8")).toBeEmpty(); - }); -}); - -it("process.memoryUsage", () => { - expect(process.memoryUsage()).toEqual({ - rss: expect.any(Number), - heapTotal: expect.any(Number), - heapUsed: expect.any(Number), - external: expect.any(Number), - arrayBuffers: expect.any(Number), - }); -}); - -it("process.memoryUsage.rss", () => { - expect(process.memoryUsage.rss()).toEqual(expect.any(Number)); -}); - -describe("process.cpuUsage", () => { - it("works", () => { - expect(process.cpuUsage()).toEqual({ - user: expect.any(Number), - system: expect.any(Number), - }); - }); - - it("throws for negative input", () => { - expect(() => - process.cpuUsage({ - user: -1, - system: 100, - }), - ).toThrow("The property 'prevValue.user' is invalid. Received -1"); - expect(() => - process.cpuUsage({ - user: 100, - system: -1, - }), - ).toThrow("The property 'prevValue.system' is invalid. Received -1"); - }); - - // Skipped on Windows because it seems UV returns { user: 15000, system: 0 } constantly - it.skipIf(process.platform === "win32")("works with diff", () => { - const init = process.cpuUsage(); - init.system = 0; - init.user = 0; - const delta = process.cpuUsage(init); - expect(delta.user).toBeGreaterThan(0); - expect(delta.system).toBeGreaterThanOrEqual(0); - }); - - it.skipIf(process.platform === "win32")("works with diff of different structure", () => { - const init = { - system: 0, - user: 0, - }; - const delta = process.cpuUsage(init); - expect(delta.user).toBeGreaterThan(0); - expect(delta.system).toBeGreaterThanOrEqual(0); - }); - - it("throws on invalid property", () => { - const fixtures = [ - {}, - { user: null }, - { user: {} }, - { user: "potato" }, - - { user: 123 }, - { user: 123, system: null }, - { user: 123, system: "potato" }, - ]; - for (const fixture of fixtures) { - expect(() => process.cpuUsage(fixture)).toThrow(); - } - }); - - // Skipped on Linux/Windows because it seems to not change as often as on macOS - it.skipIf(process.platform !== "darwin")("increases monotonically", () => { - const init = process.cpuUsage(); - let start = performance.now(); - while (performance.now() - start < 10) {} - const another = process.cpuUsage(); - expect(another.user).toBeGreaterThan(init.user); - expect(another.system).toBeGreaterThan(init.system); - }); -}); - -if (process.platform !== "win32") { - it("process.getegid", () => { - expect(typeof process.getegid()).toBe("number"); - }); - it("process.geteuid", () => { - expect(typeof process.geteuid()).toBe("number"); - }); - it("process.getgid", () => { - expect(typeof process.getgid()).toBe("number"); - }); - it("process.getgroups", () => { - expect(process.getgroups()).toBeInstanceOf(Array); - expect(process.getgroups().length).toBeGreaterThan(0); - }); - it("process.getuid", () => { - expect(typeof process.getuid()).toBe("number"); - }); - it("process.getuid", () => { - expect(typeof process.getuid()).toBe("number"); - }); -} else { - it("process.getegid, process.geteuid, process.getgid, process.getgroups, process.getuid, process.getuid are not implemented on Windows", () => { - expect(process.getegid).toBeUndefined(); - expect(process.geteuid).toBeUndefined(); - expect(process.getgid).toBeUndefined(); - expect(process.getgroups).toBeUndefined(); - expect(process.getuid).toBeUndefined(); - expect(process.getuid).toBeUndefined(); - }); -} - -describe("signal", () => { - const fixture = join(import.meta.dir, "./process-signal-handler.fixture.js"); - it.skipIf(isWindows)("simple case works", async () => { - const child = Bun.spawn({ - cmd: [bunExe(), fixture, "SIGUSR1"], + it("process.mainModule (CJS)", async () => { + await using proc = Bun.spawn({ + cmd: [bunExe(), join(import.meta.dir, "process-mainModule-fixture.js")], env: bunEnv, + stdout: "inherit", stderr: "inherit", + stdin: "inherit", }); - expect(await child.exited).toBe(0); - expect(await new Response(child.stdout).text()).toBe("PASS\n"); + expect(await proc.exited).toBe(0); }); - it.skipIf(isWindows)("process.emit will call signal events", async () => { - const child = Bun.spawn({ - cmd: [bunExe(), fixture, "SIGUSR2"], + + it("process.mainModule (ESM)", async () => { + await using proc = Bun.spawn({ + cmd: [bunExe(), join(import.meta.dir, "process-mainModule-fixture.esm.mjs")], env: bunEnv, + stdout: "inherit", + stderr: "inherit", + stdin: "inherit", }); - expect(await child.exited).toBe(0); - expect(await new Response(child.stdout).text()).toBe("PASS\n"); + expect(await proc.exited).toBe(0); }); - it("process.kill(2) works", async () => { - const child = Bun.spawn({ - cmd: [bunExe(), process_sleep, "1000000"], - stdout: "pipe", - env: bunEnv, + describe("process.onBeforeExit", () => { + it("emitted", async () => { + await using proc = Bun.spawn({ + cmd: [bunExe(), join(import.meta.dir, "process-onBeforeExit-fixture.js")], + env: bunEnv, + stdout: "pipe", + stderr: "pipe", + }); + const [stdout, exitCode] = await Promise.all([proc.stdout.text(), proc.exited]); + expect(exitCode).toBe(0); + expect(stdout.trim()).toBe("beforeExit\nexit"); }); - const prom = child.exited; - const ret = process.kill(child.pid, "SIGTERM"); - expect(ret).toBe(true); - await prom; - if (process.platform === "win32") { - expect(child.exitCode).toBe(1); - } else { - expect(child.signalCode).toBe("SIGTERM"); - } - }); - it("process._kill(2) works", async () => { - const child = Bun.spawn({ - cmd: [bunExe(), process_sleep, "1000000"], - stdout: "pipe", - env: bunEnv, + it("works with explicit process.exit", async () => { + await using proc = Bun.spawn({ + cmd: [bunExe(), join(import.meta.dir, "process-onBeforeExit-keepAlive.js")], + env: bunEnv, + stdout: "pipe", + stderr: "pipe", + }); + const [stdout, exitCode] = await Promise.all([proc.stdout.text(), proc.exited]); + expect(exitCode).toBe(0); + expect(stdout.trim()).toBe("beforeExit: 0\nbeforeExit: 1\nexit: 2"); }); - const prom = child.exited; - // SIGKILL as a number - const SIGKILL = 9; - process._kill(child.pid, SIGKILL); - await prom; - if (process.platform === "win32") { - expect(child.exitCode).toBe(1); - } else { - expect(child.signalCode).toBe("SIGKILL"); - } + it("throwing inside preserves exit code", async () => { + await using proc = Bun.spawn({ + cmd: [bunExe(), "-e", `process.on("beforeExit", () => {throw new Error("boom")});`], + env: bunEnv, + stdio: ["inherit", "pipe", "pipe"], + }); + const [stderr, stdout, exitCode] = await Promise.all([proc.stderr.text(), proc.stdout.text(), proc.exited]); + expect(exitCode).toBe(1); + expect(stderr).toInclude("error: boom"); + expect(stdout).toBeEmpty(); + }); }); - it("process.kill(2) throws on invalid input", async () => { - expect(() => process.kill(2147483640, "SIGPOOP")).toThrow(); - expect(() => process.kill(2147483640, 456)).toThrow(); + describe("process.onExit", () => { + it("throwing inside preserves exit code", async () => { + await using proc = Bun.spawn({ + cmd: [bunExe(), "-e", `process.on("exit", () => {throw new Error("boom")});`], + env: bunEnv, + stdio: ["inherit", "pipe", "pipe"], + }); + const [stderr, stdout, exitCode] = await Promise.all([proc.stderr.text(), proc.stdout.text(), proc.exited]); + expect(exitCode).toBe(1); + expect(stderr).toInclude("error: boom"); + expect(stdout).toBeEmpty(); + }); }); -}); -const undefinedStubs = [ - "_debugEnd", - "_debugProcess", - "_fatalException", - "_linkedBinding", - "_rawDebug", - "_startProfilerIdleNotifier", - "_stopProfilerIdleNotifier", - "_tickCallback", -]; - -for (const stub of undefinedStubs) { - it(`process.${stub}`, () => { - expect(process[stub]()).toBeUndefined(); + it("process.memoryUsage", () => { + expect(process.memoryUsage()).toEqual({ + rss: expect.any(Number), + heapTotal: expect.any(Number), + heapUsed: expect.any(Number), + external: expect.any(Number), + arrayBuffers: expect.any(Number), + }); }); -} -const arrayStubs = ["getActiveResourcesInfo", "_getActiveRequests", "_getActiveHandles"]; - -for (const stub of arrayStubs) { - it(`process.${stub}`, () => { - expect(process[stub]()).toBeInstanceOf(Array); + it("process.memoryUsage.rss", () => { + expect(process.memoryUsage.rss()).toEqual(expect.any(Number)); }); -} -const emptyObjectStubs = []; -const emptySetStubs = ["allowedNodeEnvironmentFlags"]; -const emptyArrayStubs = ["moduleLoadList", "_preload_modules"]; + describe("process.cpuUsage", () => { + it("works", () => { + expect(process.cpuUsage()).toEqual({ + user: expect.any(Number), + system: expect.any(Number), + }); + }); -for (const stub of emptyObjectStubs) { - it(`process.${stub}`, () => { - expect(process[stub]).toEqual({}); + it("throws for negative input", () => { + expect(() => + process.cpuUsage({ + user: -1, + system: 100, + }), + ).toThrow("The property 'prevValue.user' is invalid. Received -1"); + expect(() => + process.cpuUsage({ + user: 100, + system: -1, + }), + ).toThrow("The property 'prevValue.system' is invalid. Received -1"); + }); + + // Skipped on Windows because it seems UV returns { user: 15000, system: 0 } constantly + it.skipIf(process.platform === "win32")("works with diff", () => { + const init = process.cpuUsage(); + init.system = 0; + init.user = 0; + const delta = process.cpuUsage(init); + expect(delta.user).toBeGreaterThan(0); + expect(delta.system).toBeGreaterThanOrEqual(0); + }); + + it.skipIf(process.platform === "win32")("works with diff of different structure", () => { + const init = { + system: 0, + user: 0, + }; + const delta = process.cpuUsage(init); + expect(delta.user).toBeGreaterThan(0); + expect(delta.system).toBeGreaterThanOrEqual(0); + }); + + it("throws on invalid property", () => { + const fixtures = [ + {}, + { user: null }, + { user: {} }, + { user: "potato" }, + + { user: 123 }, + { user: 123, system: null }, + { user: 123, system: "potato" }, + ]; + for (const fixture of fixtures) { + expect(() => process.cpuUsage(fixture)).toThrow(); + } + }); + + // Skipped on Linux/Windows because it seems to not change as often as on macOS + it.skipIf(process.platform !== "darwin")("increases monotonically", () => { + const init = process.cpuUsage(); + let start = performance.now(); + while (performance.now() - start < 10) {} + const another = process.cpuUsage(); + expect(another.user).toBeGreaterThan(init.user); + expect(another.system).toBeGreaterThan(init.system); + }); }); -} -for (const stub of emptySetStubs) { - it(`process.${stub}`, () => { - expect(process[stub]).toBeInstanceOf(Set); - expect(process[stub].size).toBe(0); - }); -} - -for (const stub of emptyArrayStubs) { - it(`process.${stub}`, () => { - expect(process[stub]).toBeInstanceOf(Array); - expect(process[stub]).toHaveLength(0); - }); -} - -it("dlopen args parsing", () => { - const notFound = join(tmpdirSync(), "not-found.so"); - expect(() => process.dlopen({ module: "42" }, notFound)).toThrow(); - expect(() => process.dlopen({ module: 42 }, notFound)).toThrow(); - expect(() => process.dlopen({ module: { exports: "42" } }, notFound)).toThrow(); - expect(() => process.dlopen({ module: { exports: 42 } }, notFound)).toThrow(); - expect(() => process.dlopen({ module: Symbol() }, notFound)).toThrow(); - expect(() => process.dlopen({ module: { exports: Symbol("123") } }, notFound)).toThrow(); - expect(() => process.dlopen({ module: { exports: Symbol("123") } }, Symbol("badddd"))).toThrow(); -}); - -it("dlopen accepts file: URLs", () => { - const mod = { exports: {} }; - try { - process.dlopen(mod, import.meta.url); - throw "Expected error"; - } catch (e) { - expect(e.message).not.toContain("file:"); + if (process.platform !== "win32") { + it("process.getegid", () => { + expect(typeof process.getegid()).toBe("number"); + }); + it("process.geteuid", () => { + expect(typeof process.geteuid()).toBe("number"); + }); + it("process.getgid", () => { + expect(typeof process.getgid()).toBe("number"); + }); + it("process.getgroups", () => { + expect(process.getgroups()).toBeInstanceOf(Array); + expect(process.getgroups().length).toBeGreaterThan(0); + }); + it("process.getuid", () => { + expect(typeof process.getuid()).toBe("number"); + }); + } else { + it("process.getegid, process.geteuid, process.getgid, process.getgroups, process.getuid, process.getuid are not implemented on Windows", () => { + expect(process.getegid).toBeUndefined(); + expect(process.geteuid).toBeUndefined(); + expect(process.getgid).toBeUndefined(); + expect(process.getgroups).toBeUndefined(); + expect(process.getuid).toBeUndefined(); + expect(process.getuid).toBeUndefined(); + }); } - expect(() => process.dlopen(mod, "file://asd[kasd[po@[p1o23]1po!-10923-095-@$@8123=-9123=-0==][pc;!")).toThrow( - "invalid file: URL passed to dlopen", - ); -}); + describe("signal", () => { + const fixture = join(import.meta.dir, "./process-signal-handler.fixture.js"); + it.skipIf(isWindows)("simple case works", async () => { + await using child = Bun.spawn({ + cmd: [bunExe(), fixture, "SIGUSR1"], + env: bunEnv, + stderr: "inherit", + }); -it("process.constrainedMemory()", () => { - expect(process.constrainedMemory() >= 0).toBe(true); -}); + expect(await child.exited).toBe(0); + expect(await new Response(child.stdout).text()).toBe("PASS\n"); + }); + it.skipIf(isWindows)("process.emit will call signal events", async () => { + await using child = Bun.spawn({ + cmd: [bunExe(), fixture, "SIGUSR2"], + env: bunEnv, + }); -it("process.report", () => { - // TODO: write better tests - JSON.stringify(process.report.getReport(), null, 2); -}); + expect(await child.exited).toBe(0); + expect(await new Response(child.stdout).text()).toBe("PASS\n"); + }); -it("process.exit with jsDoubleNumber that is an integer", () => { - expect([join(import.meta.dir, "./process-exit-decimal-fixture.js")]).toRun(); -}); + it.serial("process.kill(2) works", async () => { + await using child = Bun.spawn({ + cmd: [bunExe(), process_sleep, "1000000"], + stdout: "pipe", + cwd: import.meta.dir, + env: bunEnv, + stderr: "inherit", + }); + const prom = child.exited; + const ret = process.kill(child.pid, "SIGTERM"); + expect(ret).toBe(true); + await prom; + if (process.platform === "win32") { + expect(child.exitCode).toBe(1); + } else { + expect(child.signalCode).toBe("SIGTERM"); + } + }); -if (isWindows) { - it("ownKeys trap windows process.env", () => { - expect(() => Object.keys(process.env)).not.toThrow(); - expect(() => Object.getOwnPropertyDescriptors(process.env)).not.toThrow(); + it.serial("process._kill(2) works", async () => { + await using child = Bun.spawn({ + cmd: [bunExe(), process_sleep, "1000000"], + stdout: "pipe", + env: bunEnv, + }); + const prom = child.exited; + // SIGKILL as a number + const SIGKILL = 9; + process._kill(child.pid, SIGKILL); + await prom; + + if (process.platform === "win32") { + expect(child.exitCode).toBe(1); + } else { + expect(child.signalCode).toBe("SIGKILL"); + } + }); + + it("process.kill(2) throws on invalid input", async () => { + expect(() => process.kill(2147483640, "SIGPOOP")).toThrow(); + expect(() => process.kill(2147483640, 456)).toThrow(); + }); }); -} -it("catches exceptions with process.setUncaughtExceptionCaptureCallback", async () => { - const proc = Bun.spawn([bunExe(), join(import.meta.dir, "process-uncaughtExceptionCaptureCallback.js")]); - expect(await proc.exited).toBe(42); -}); + const undefinedStubs = [ + "_debugEnd", + "_debugProcess", + "_fatalException", + "_linkedBinding", + "_rawDebug", + "_startProfilerIdleNotifier", + "_stopProfilerIdleNotifier", + "_tickCallback", + ]; -it("catches exceptions with process.on('uncaughtException', fn)", async () => { - const proc = Bun.spawn([bunExe(), join(import.meta.dir, "process-onUncaughtException.js")]); - expect(await proc.exited).toBe(42); -}); + for (const stub of undefinedStubs) { + it(`process.${stub}`, () => { + expect(process[stub]()).toBeUndefined(); + }); + } -it("catches exceptions with process.on('uncaughtException', fn) from setTimeout", async () => { - const proc = Bun.spawn([bunExe(), join(import.meta.dir, "process-onUncaughtExceptionSetTimeout.js")]); - expect(await proc.exited).toBe(42); -}); + const arrayStubs = ["getActiveResourcesInfo", "_getActiveRequests", "_getActiveHandles"]; -it("catches exceptions with process.on('unhandledRejection', fn)", async () => { - const proc = Bun.spawn([bunExe(), join(import.meta.dir, "process-onUnhandledRejection.js")]); - expect(await proc.exited).toBe(42); -}); + for (const stub of arrayStubs) { + it(`process.${stub}`, () => { + expect(process[stub]()).toBeInstanceOf(Array); + }); + } -it("aborts when the uncaughtException handler throws", async () => { - const proc = Bun.spawn([bunExe(), join(import.meta.dir, "process-onUncaughtExceptionAbort.js")], { - stderr: "pipe", + const emptyObjectStubs = []; + const emptySetStubs = ["allowedNodeEnvironmentFlags"]; + const emptyArrayStubs = ["moduleLoadList", "_preload_modules"]; + + for (const stub of emptyObjectStubs) { + it(`process.${stub}`, () => { + expect(process[stub]).toEqual({}); + }); + } + + for (const stub of emptySetStubs) { + it(`process.${stub}`, () => { + expect(process[stub]).toBeInstanceOf(Set); + expect(process[stub].size).toBe(0); + }); + } + + for (const stub of emptyArrayStubs) { + it(`process.${stub}`, () => { + expect(process[stub]).toBeInstanceOf(Array); + expect(process[stub]).toHaveLength(0); + }); + } + + it("dlopen args parsing", () => { + const notFound = join(tmpdirSync(), "not-found.so"); + expect(() => process.dlopen({ module: "42" }, notFound)).toThrow(); + expect(() => process.dlopen({ module: 42 }, notFound)).toThrow(); + expect(() => process.dlopen({ module: { exports: "42" } }, notFound)).toThrow(); + expect(() => process.dlopen({ module: { exports: 42 } }, notFound)).toThrow(); + expect(() => process.dlopen({ module: Symbol() }, notFound)).toThrow(); + expect(() => process.dlopen({ module: { exports: Symbol("123") } }, notFound)).toThrow(); + expect(() => process.dlopen({ module: { exports: Symbol("123") } }, Symbol("badddd"))).toThrow(); }); - expect(await proc.exited).toBe(7); - expect(await proc.stderr.text()).toContain("bar"); -}); -it("aborts when the uncaughtExceptionCaptureCallback throws", async () => { - const proc = Bun.spawn([bunExe(), join(import.meta.dir, "process-uncaughtExceptionCaptureCallbackAbort.js")], { - stderr: "pipe", + it("dlopen accepts file: URLs", () => { + const mod = { exports: {} }; + try { + process.dlopen(mod, import.meta.url); + throw "Expected error"; + } catch (e) { + expect(e.message).not.toContain("file:"); + } + + expect(() => process.dlopen(mod, "file://asd[kasd[po@[p1o23]1po!-10923-095-@$@8123=-9123=-0==][pc;!")).toThrow( + "invalid file: URL passed to dlopen", + ); + }); + + it("process.constrainedMemory()", () => { + expect(process.constrainedMemory() >= 0).toBe(true); + }); + + it("process.report", () => { + // TODO: write better tests + JSON.stringify(process.report.getReport(), null, 2); + }); + + it("process.exit with jsDoubleNumber that is an integer", async () => { + await using proc = Bun.spawn({ + cmd: [bunExe(), join(import.meta.dir, "./process-exit-decimal-fixture.js")], + env: bunEnv, + stdout: "pipe", + stderr: "pipe", + }); + + const exitCode = await proc.exited; + expect(exitCode).toBe(0); + }); + + if (isWindows) { + it("ownKeys trap windows process.env", () => { + expect(() => Object.keys(process.env)).not.toThrow(); + expect(() => Object.getOwnPropertyDescriptors(process.env)).not.toThrow(); + }); + } + + it("catches exceptions with process.setUncaughtExceptionCaptureCallback", async () => { + const proc = Bun.spawn([bunExe(), join(import.meta.dir, "process-uncaughtExceptionCaptureCallback.js")]); + expect(await proc.exited).toBe(42); + }); + + it("catches exceptions with process.on('uncaughtException', fn)", async () => { + const proc = Bun.spawn([bunExe(), join(import.meta.dir, "process-onUncaughtException.js")]); + expect(await proc.exited).toBe(42); + }); + + it("catches exceptions with process.on('uncaughtException', fn) from setTimeout", async () => { + const proc = Bun.spawn([bunExe(), join(import.meta.dir, "process-onUncaughtExceptionSetTimeout.js")]); + expect(await proc.exited).toBe(42); + }); + + it("catches exceptions with process.on('unhandledRejection', fn)", async () => { + const proc = Bun.spawn([bunExe(), join(import.meta.dir, "process-onUnhandledRejection.js")]); + expect(await proc.exited).toBe(42); + }); + + it("aborts when the uncaughtException handler throws", async () => { + const proc = Bun.spawn([bunExe(), join(import.meta.dir, "process-onUncaughtExceptionAbort.js")], { + stderr: "pipe", + }); + expect(await proc.exited).toBe(7); + expect(await proc.stderr.text()).toContain("bar"); + }); + + it("aborts when the uncaughtExceptionCaptureCallback throws", async () => { + const proc = Bun.spawn([bunExe(), join(import.meta.dir, "process-uncaughtExceptionCaptureCallbackAbort.js")], { + stderr: "pipe", + }); + expect(await proc.exited).toBe(1); + expect(await proc.stderr.text()).toContain("bar"); }); - expect(await proc.exited).toBe(1); - expect(await proc.stderr.text()).toContain("bar"); }); it("process.hasUncaughtExceptionCaptureCallback", () => { @@ -810,19 +845,19 @@ it("process.execArgv", async () => { }); describe("process.exitCode", () => { - it("normal", () => { - expect([ + it("normal", async () => { + await runInlineFixture( ` process.on("exit", (code) => console.log("exit", code, process.exitCode)); process.on("beforeExit", (code) => console.log("beforeExit", code, process.exitCode)); `, "beforeExit 0 undefined\nexit 0 undefined\n", 0, - ]).toRunInlineFixture(); + ); }); - it("setter", () => { - expect([ + it("setter", async () => { + await runInlineFixture( ` process.on("exit", (code) => console.log("exit", code, process.exitCode)); process.on("beforeExit", (code) => console.log("beforeExit", code, process.exitCode)); @@ -831,11 +866,11 @@ describe("process.exitCode", () => { `, "beforeExit 0 0\nexit 0 0\n", 0, - ]).toRunInlineFixture(); + ); }); - it("setter non-zero", () => { - expect([ + it("setter non-zero", async () => { + await runInlineFixture( ` process.on("exit", (code) => console.log("exit", code, process.exitCode)); process.on("beforeExit", (code) => console.log("beforeExit", code, process.exitCode)); @@ -844,11 +879,11 @@ describe("process.exitCode", () => { `, "beforeExit 3 3\nexit 3 3\n", 3, - ]).toRunInlineFixture(); + ); }); - it("exit", () => { - expect([ + it("exit", async () => { + await runInlineFixture( ` process.on("exit", (code) => console.log("exit", code, process.exitCode)); process.on("beforeExit", (code) => console.log("beforeExit", code, process.exitCode)); @@ -857,11 +892,11 @@ describe("process.exitCode", () => { `, "exit 0 0\n", 0, - ]).toRunInlineFixture(); + ); }); - it("exit non-zero", () => { - expect([ + it("exit non-zero", async () => { + await runInlineFixture( ` process.on("exit", (code) => console.log("exit", code, process.exitCode)); process.on("beforeExit", (code) => console.log("beforeExit", code, process.exitCode)); @@ -870,11 +905,11 @@ describe("process.exitCode", () => { `, "exit 3 3\n", 3, - ]).toRunInlineFixture(); + ); }); - it("property access on undefined", () => { - expect([ + it("property access on undefined", async () => { + await runInlineFixture( ` process.on("exit", (code) => console.log("exit", code, process.exitCode)); process.on("beforeExit", (code) => console.log("beforeExit", code, process.exitCode)); @@ -884,11 +919,11 @@ describe("process.exitCode", () => { `, "exit 1 1\n", 1, - ]).toRunInlineFixture(); + ); }); - it("thrown Error", () => { - expect([ + it("thrown Error", async () => { + await runInlineFixture( ` process.on("exit", (code) => console.log("exit", code, process.exitCode)); process.on("beforeExit", (code) => console.log("beforeExit", code, process.exitCode)); @@ -897,11 +932,11 @@ describe("process.exitCode", () => { `, "exit 1 1\n", 1, - ]).toRunInlineFixture(); + ); }); - it("unhandled rejected promise", () => { - expect([ + it("unhandled rejected promise", async () => { + await runInlineFixture( ` process.on("exit", (code) => console.log("exit", code, process.exitCode)); process.on("beforeExit", (code) => console.log("beforeExit", code, process.exitCode)); @@ -910,11 +945,11 @@ describe("process.exitCode", () => { `, "exit 1 1\n", 1, - ]).toRunInlineFixture(); + ); }); - it("exitsOnExitCodeSet", () => { - expect([ + it("exitsOnExitCodeSet", async () => { + await runInlineFixture( ` const assert = require('assert'); process.exitCode = 42; @@ -925,11 +960,11 @@ describe("process.exitCode", () => { `, "", 42, - ]).toRunInlineFixture(); + ); }); - it("changesCodeViaExit", () => { - expect([ + it("changesCodeViaExit", async () => { + await runInlineFixture( ` const assert = require('assert'); process.exitCode = 99; @@ -941,11 +976,11 @@ describe("process.exitCode", () => { `, "", 42, - ]).toRunInlineFixture(); + ); }); - it("changesCodeZeroExit", () => { - expect([ + it("changesCodeZeroExit", async () => { + await runInlineFixture( ` const assert = require('assert'); process.exitCode = 99; @@ -957,11 +992,11 @@ describe("process.exitCode", () => { `, "", 0, - ]).toRunInlineFixture(); + ); }); - it("exitWithOneOnUncaught", () => { - expect([ + it("exitWithOneOnUncaught", async () => { + await runInlineFixture( ` process.exitCode = 99; process.on('exit', (code) => { @@ -975,11 +1010,11 @@ describe("process.exitCode", () => { `, "", 1, - ]).toRunInlineFixture(); + ); }); - it("changeCodeInsideExit", () => { - expect([ + it("changeCodeInsideExit", async () => { + await runInlineFixture( ` const assert = require('assert'); process.exitCode = 95; @@ -991,11 +1026,11 @@ describe("process.exitCode", () => { `, "", 99, - ]).toRunInlineFixture(); + ); }); - it.todoIf(isWindows)("zeroExitWithUncaughtHandler", () => { - expect([ + it.todoIf(isWindows)("zeroExitWithUncaughtHandler", async () => { + await runInlineFixture( ` process.on('exit', (code) => { if (code !== 0) { @@ -1012,11 +1047,11 @@ describe("process.exitCode", () => { `, "", 0, - ]).toRunInlineFixture(); + ); }); - it.todoIf(isWindows)("changeCodeInUncaughtHandler", () => { - expect([ + it.todoIf(isWindows)("changeCodeInUncaughtHandler", async () => { + await runInlineFixture( ` process.on('exit', (code) => { if (code !== 97) { @@ -1035,11 +1070,11 @@ describe("process.exitCode", () => { `, "", 97, - ]).toRunInlineFixture(); + ); }); - it("changeCodeInExitWithUncaught", () => { - expect([ + it("changeCodeInExitWithUncaught", async () => { + await runInlineFixture( ` const assert = require('assert'); process.on('exit', (code) => { @@ -1051,11 +1086,11 @@ describe("process.exitCode", () => { `, "", 98, - ]).toRunInlineFixture(); + ); }); - it("exitWithZeroInExitWithUncaught", () => { - expect([ + it("exitWithZeroInExitWithUncaught", async () => { + await runInlineFixture( ` const assert = require('assert'); process.on('exit', (code) => { @@ -1067,11 +1102,11 @@ describe("process.exitCode", () => { `, "", 0, - ]).toRunInlineFixture(); + ); }); - it("exitWithThrowInUncaughtHandler", () => { - expect([ + it("exitWithThrowInUncaughtHandler", async () => { + await runInlineFixture( ` process.on('uncaughtException', () => { throw new Error('ok') @@ -1080,18 +1115,18 @@ describe("process.exitCode", () => { `, "", 7, - ]).toRunInlineFixture(); + ); }); - it.todo("exitWithUndefinedFatalException", () => { - expect([ + it.todo("exitWithUndefinedFatalException", async () => { + await runInlineFixture( ` process._fatalException = undefined; throw new Error('ok'); `, "", 6, - ]).toRunInlineFixture(); + ); }); }); @@ -1121,8 +1156,8 @@ it("should handle user assigned `default` properties", async () => { await promise; }); -it.each(["stdin", "stdout", "stderr"])("%s stream accessor should handle exceptions without crashing", stream => { - expect([ +it.each(["stdin", "stdout", "stderr"])("%s stream accessor should handle exceptions without crashing", async stream => { + await runInlineFixture( /* js */ ` const old = process; process = null; @@ -1135,7 +1170,7 @@ it.each(["stdin", "stdout", "stderr"])("%s stream accessor should handle excepti `, "", 1, - ]).toRunInlineFixture(); + ); }); it("process.versions", () => { @@ -1144,17 +1179,3 @@ it("process.versions", () => { expect(process.versions.napi).toEqual("10"); expect(process.versions.modules).toEqual("137"); }); - -it.todoIf(isMacOS)("should be the node version on the host that we expect", async () => { - const subprocess = Bun.spawn({ - cmd: ["node", "--version"], - stdout: "pipe", - stdin: "inherit", - stderr: "pipe", - env: bunEnv, - }); - - let [out, exited] = await Promise.all([new Response(subprocess.stdout).text(), subprocess.exited]); - expect(out.trim()).toEqual("v24.3.0"); - expect(exited).toBe(0); -}); diff --git a/test/js/node/test/parallel/test-http-full-response.js b/test/js/node/test/parallel/test-http-full-response.js index 0332f91c03..d54743b691 100644 --- a/test/js/node/test/parallel/test-http-full-response.js +++ b/test/js/node/test/parallel/test-http-full-response.js @@ -21,6 +21,7 @@ 'use strict'; const common = require('../common'); +if (common.isWindows) return; // TODO: BUN no 'ab' installed const assert = require('assert'); // This test requires the program 'ab' const http = require('http'); @@ -30,10 +31,6 @@ const bodyLength = 12345; const body = 'c'.repeat(bodyLength); -if (typeof Bun !== "undefined" && !Bun.which("ab")) { - common.skip("ab not found"); -} - const server = http.createServer(function(req, res) { res.writeHead(200, { 'Content-Length': bodyLength, diff --git a/test/js/node/test/parallel/test-runner-typechecking.js b/test/js/node/test/parallel/test-runner-typechecking.js index 9cde6d290f..f270fb062d 100644 --- a/test/js/node/test/parallel/test-runner-typechecking.js +++ b/test/js/node/test/parallel/test-runner-typechecking.js @@ -7,10 +7,10 @@ require('../common'); const assert = require('assert'); const { test, describe, it } = require('node:test'); -const testOnly = typeof Bun === 'undefined' ? test('only test', { only: true }) : undefined; // disabled in bun because test.only is not allowed in CI environments and it will skip the describe/it +const testOnly = typeof Bun === 'undefined' ? test('only test', { only: true }) : undefined; // disabled in bun because test.only is disabled in CI environments and it will skip the describe/it const testTodo = test('todo test', { todo: true }); const testSkip = test('skip test', { skip: true }); -const testOnlyShorthand = typeof Bun === 'undefined' ? test.only('only test shorthand') : undefined; // disabled in bun because test.only is not allowed in CI environments and it will skip the describe/it +const testOnlyShorthand = typeof Bun === 'undefined' ? test.only('only test shorthand') : undefined; // disabled in bun because test.only is disabled in CI environments and it will skip the describe/it const testTodoShorthand = test.todo('todo test shorthand'); const testSkipShorthand = test.skip('skip test shorthand'); diff --git a/test/napi/napi-app/standalone_tests.cpp b/test/napi/napi-app/standalone_tests.cpp index c712ef81cd..8655d68da6 100644 --- a/test/napi/napi-app/standalone_tests.cpp +++ b/test/napi/napi-app/standalone_tests.cpp @@ -1239,6 +1239,212 @@ test_napi_freeze_seal_indexed(const Napi::CallbackInfo &info) { return ok(env); } +// Test for napi_create_external_buffer with empty/null data +static void empty_buffer_finalizer(napi_env env, void *data, void *hint) { + // No-op finalizer for empty buffers +} + +static napi_value +test_napi_create_external_buffer_empty(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + + // Test 1: nullptr data with zero length + { + napi_value buffer; + napi_status status = napi_create_external_buffer( + env, 0, nullptr, empty_buffer_finalizer, nullptr, &buffer); + + if (status != napi_ok) { + printf("FAIL: napi_create_external_buffer with nullptr and zero length " + "failed with status %d\n", + status); + return env.Undefined(); + } + + // Verify it's a buffer + bool is_buffer; + NODE_API_CALL(env, napi_is_buffer(env, buffer, &is_buffer)); + if (!is_buffer) { + printf("FAIL: Created value is not a buffer\n"); + return env.Undefined(); + } + + // Verify length is 0 + size_t length; + void *data; + NODE_API_CALL(env, napi_get_buffer_info(env, buffer, &data, &length)); + if (length != 0) { + printf("FAIL: Buffer length is %zu instead of 0\n", length); + return env.Undefined(); + } + + printf("PASS: napi_create_external_buffer with nullptr and zero length\n"); + } + + // Test 2: non-null data with zero length + { + char dummy = 0; + napi_value buffer; + napi_status status = napi_create_external_buffer( + env, 0, &dummy, empty_buffer_finalizer, nullptr, &buffer); + + if (status != napi_ok) { + printf("FAIL: napi_create_external_buffer with non-null data and zero " + "length failed with status %d\n", + status); + return env.Undefined(); + } + + // Verify it's a buffer + bool is_buffer; + NODE_API_CALL(env, napi_is_buffer(env, buffer, &is_buffer)); + if (!is_buffer) { + printf("FAIL: Created value is not a buffer\n"); + return env.Undefined(); + } + + // Verify length is 0 + size_t length; + void *data; + NODE_API_CALL(env, napi_get_buffer_info(env, buffer, &data, &length)); + if (length != 0) { + printf("FAIL: Buffer length is %zu instead of 0\n", length); + return env.Undefined(); + } + + printf("PASS: napi_create_external_buffer with non-null data and zero " + "length\n"); + } + + // Test 3: nullptr finalizer + { + char dummy = 0; + napi_value buffer; + napi_status status = + napi_create_external_buffer(env, 0, &dummy, nullptr, nullptr, &buffer); + + if (status != napi_ok) { + printf("FAIL: napi_create_external_buffer with nullptr finalizer failed " + "with status %d\n", + status); + return env.Undefined(); + } + + printf("PASS: napi_create_external_buffer with nullptr finalizer\n"); + } + + return ok(env); +} + +static napi_value test_napi_empty_buffer_info(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + + // Test: Create an empty external buffer and verify napi_get_buffer_info and + // napi_get_typedarray_info + { + napi_value buffer; + napi_status status = + napi_create_external_buffer(env, 0, nullptr, nullptr, nullptr, &buffer); + + if (status != napi_ok) { + printf("FAIL: napi_create_external_buffer with nullptr and zero length " + "failed with status %d\n", + status); + return env.Undefined(); + } + + // Test napi_get_buffer_info + void *buffer_data = reinterpret_cast( + 0xDEADBEEF); // Initialize to non-null to ensure it's set to null + size_t buffer_length = + 999; // Initialize to non-zero to ensure it's set to 0 + + status = napi_get_buffer_info(env, buffer, &buffer_data, &buffer_length); + if (status != napi_ok) { + printf("FAIL: napi_get_buffer_info failed with status %d\n", status); + return env.Undefined(); + } + + if (buffer_data != nullptr) { + printf("FAIL: napi_get_buffer_info returned non-null data pointer: %p\n", + buffer_data); + return env.Undefined(); + } + + if (buffer_length != 0) { + printf("FAIL: napi_get_buffer_info returned non-zero length: %zu\n", + buffer_length); + return env.Undefined(); + } + + printf("PASS: napi_get_buffer_info returns null pointer and 0 length for " + "empty buffer\n"); + + // Test napi_get_typedarray_info + napi_typedarray_type type; + size_t typedarray_length = 999; // Initialize to non-zero + void *typedarray_data = + reinterpret_cast(0xDEADBEEF); // Initialize to non-null + napi_value arraybuffer; + size_t byte_offset; + + status = + napi_get_typedarray_info(env, buffer, &type, &typedarray_length, + &typedarray_data, &arraybuffer, &byte_offset); + if (status != napi_ok) { + printf("FAIL: napi_get_typedarray_info failed with status %d\n", status); + return env.Undefined(); + } + + if (typedarray_data != nullptr) { + printf( + "FAIL: napi_get_typedarray_info returned non-null data pointer: %p\n", + typedarray_data); + return env.Undefined(); + } + + if (typedarray_length != 0) { + printf("FAIL: napi_get_typedarray_info returned non-zero length: %zu\n", + typedarray_length); + return env.Undefined(); + } + + printf("PASS: napi_get_typedarray_info returns null pointer and 0 length " + "for empty buffer\n"); + + // Test napi_is_detached_arraybuffer + // First get the underlying arraybuffer from the buffer + napi_value arraybuffer_from_buffer; + status = napi_get_typedarray_info(env, buffer, nullptr, nullptr, nullptr, + &arraybuffer_from_buffer, nullptr); + if (status != napi_ok) { + printf("FAIL: Could not get arraybuffer from buffer, status %d\n", + status); + return env.Undefined(); + } + + bool is_detached = false; + status = napi_is_detached_arraybuffer(env, arraybuffer_from_buffer, + &is_detached); + if (status != napi_ok) { + printf("FAIL: napi_is_detached_arraybuffer failed with status %d\n", + status); + return env.Undefined(); + } + + if (!is_detached) { + printf("FAIL: napi_is_detached_arraybuffer returned false for empty " + "buffer's arraybuffer, expected true\n"); + return env.Undefined(); + } + + printf("PASS: napi_is_detached_arraybuffer returns true for empty buffer's " + "arraybuffer\n"); + } + + return ok(env); +} + void register_standalone_tests(Napi::Env env, Napi::Object exports) { REGISTER_FUNCTION(env, exports, test_issue_7685); REGISTER_FUNCTION(env, exports, test_issue_11949); @@ -1267,6 +1473,8 @@ void register_standalone_tests(Napi::Env env, Napi::Object exports) { REGISTER_FUNCTION(env, exports, test_napi_dataview_bounds_errors); REGISTER_FUNCTION(env, exports, test_napi_typeof_empty_value); REGISTER_FUNCTION(env, exports, test_napi_freeze_seal_indexed); + REGISTER_FUNCTION(env, exports, test_napi_create_external_buffer_empty); + REGISTER_FUNCTION(env, exports, test_napi_empty_buffer_info); } } // namespace napitests diff --git a/test/napi/napi.test.ts b/test/napi/napi.test.ts index 9854ec0261..8b74682e97 100644 --- a/test/napi/napi.test.ts +++ b/test/napi/napi.test.ts @@ -266,6 +266,24 @@ describe.concurrent("napi", () => { }); }); + describe("napi_create_external_buffer", () => { + it("handles empty/null data without throwing", async () => { + const result = await checkSameOutput("test_napi_create_external_buffer_empty", []); + expect(result).toContain("PASS: napi_create_external_buffer with nullptr and zero length"); + expect(result).toContain("PASS: napi_create_external_buffer with non-null data and zero length"); + expect(result).toContain("PASS: napi_create_external_buffer with nullptr finalizer"); + expect(result).not.toContain("FAIL"); + }); + + it("empty buffer returns null pointer and 0 length from napi_get_buffer_info and napi_get_typedarray_info", async () => { + const result = await checkSameOutput("test_napi_empty_buffer_info", []); + expect(result).toContain("PASS: napi_get_buffer_info returns null pointer and 0 length for empty buffer"); + expect(result).toContain("PASS: napi_get_typedarray_info returns null pointer and 0 length for empty buffer"); + expect(result).toContain("PASS: napi_is_detached_arraybuffer returns true for empty buffer's arraybuffer"); + expect(result).not.toContain("FAIL"); + }); + }); + describe("napi_async_work", () => { it("null checks execute callbacks", async () => { const output = await checkSameOutput("test_napi_async_work_execute_null_check", []);