From b76376f8a6d70a9ae83ee634e854c077cbc10366 Mon Sep 17 00:00:00 2001 From: dave caruso Date: Thu, 20 Jun 2024 13:48:39 -0700 Subject: [PATCH] chore: upgrade zig to 0.13.0 (#9965) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: Jarred Sumner Co-authored-by: Grigory Co-authored-by: Dylan Conway <35280289+dylan-conway@users.noreply.github.com> Co-authored-by: Meghan Denny Co-authored-by: Kenta Iwasaki <63115601+lithdew@users.noreply.github.com> Co-authored-by: John-David Dalton Co-authored-by: Dale Seo <5466341+DaleSeo@users.noreply.github.com> Co-authored-by: Zack Radisic <56137411+zackradisic@users.noreply.github.com> Co-authored-by: paperdave Co-authored-by: Georgijs Vilums Co-authored-by: Dylan Conway --- .github/workflows/build-darwin.yml | 2 +- .github/workflows/build-windows.yml | 2 +- .github/workflows/ci.yml | 2 +- .gitignore | 1 + .gitmodules | 1 - .vscode/settings.json | 5 +- CMakeLists.txt | 14 +- Dockerfile | 13 +- build.zig | 930 ++++++--------- .../project/internals/build-process-for-ci.md | 12 +- misctools/compression.zig | 4 +- misctools/http_bench.zig | 2 +- misctools/machbench.zig | 6 +- misctools/readlink-getfd.zig | 6 +- misctools/readlink-realpath.zig | 2 +- package.json | 4 +- .../src/protocol/v8/protocol.json | 130 ++- scripts/download-zig.ps1 | 5 +- scripts/download-zig.sh | 6 +- scripts/download-zls.ps1 | 2 +- src/Global.zig | 16 +- src/Progress.zig | 455 ++++++++ src/StandaloneModuleGraph.zig | 20 +- src/StaticHashMap.zig | 10 +- src/allocators.zig | 2 +- src/analytics/analytics_thread.zig | 6 +- src/async/posix_event_loop.zig | 52 +- src/async/windows_event_loop.zig | 2 +- src/boringssl.zig | 6 +- src/bun.js/ConsoleObject.zig | 8 +- src/bun.js/RuntimeTranspilerCache.zig | 11 +- src/bun.js/WebKit | 2 +- src/bun.js/api/BunObject.zig | 45 +- src/bun.js/api/Timer.zig | 19 +- src/bun.js/api/brotli.zig | 44 +- src/bun.js/api/bun/dns_resolver.zig | 40 +- src/bun.js/api/bun/process.zig | 128 +-- src/bun.js/api/bun/socket.zig | 21 +- src/bun.js/api/bun/spawn.zig | 23 +- src/bun.js/api/bun/spawn/stdio.zig | 6 +- src/bun.js/api/bun/subprocess.zig | 14 +- src/bun.js/api/bun/udp_socket.zig | 34 +- src/bun.js/api/ffi.zig | 13 +- src/bun.js/api/glob.zig | 12 +- src/bun.js/api/server.zig | 24 +- src/bun.js/base.zig | 28 +- src/bun.js/bindings/bindings.zig | 4 +- src/bun.js/bindings/exports.zig | 2 +- src/bun.js/bindings/shimmer.zig | 2 +- src/bun.js/event_loop.zig | 14 +- src/bun.js/ipc.zig | 2 +- src/bun.js/javascript.zig | 15 +- src/bun.js/module_loader.zig | 20 +- src/bun.js/node/dir_iterator.zig | 58 +- src/bun.js/node/node_fs.zig | 159 +-- src/bun.js/node/node_fs_binding.zig | 2 +- src/bun.js/node/node_fs_constant.zig | 78 +- src/bun.js/node/node_fs_stat_watcher.zig | 26 +- src/bun.js/node/node_fs_watcher.zig | 23 +- src/bun.js/node/node_os.zig | 43 +- src/bun.js/node/os/constants.zig | 14 +- src/bun.js/node/path_watcher.zig | 45 +- src/bun.js/node/types.zig | 67 +- src/bun.js/node/win_watcher.zig | 2 +- src/bun.js/rare_data.zig | 2 +- src/bun.js/test/jest.zig | 2 +- src/bun.js/test/pretty_format.zig | 2 +- src/bun.js/test/snapshot.zig | 4 +- src/bun.js/unbounded_queue.zig | 62 +- src/bun.js/web_worker.zig | 23 +- src/bun.js/webcore/blob.zig | 71 +- src/bun.js/webcore/blob/ReadFile.zig | 14 +- src/bun.js/webcore/blob/WriteFile.zig | 18 +- src/bun.js/webcore/response.zig | 24 +- src/bun.js/webcore/streams.zig | 32 +- src/bun.zig | 203 ++-- src/bundler/bundle_v2.zig | 56 +- src/c.zig | 52 +- src/cache.zig | 2 +- src/cli.zig | 6 +- src/cli/add_completions.zig | 4 +- src/cli/build_command.zig | 4 +- src/cli/bunx_command.zig | 10 +- src/cli/create_command.zig | 46 +- src/cli/filter_run.zig | 19 +- src/cli/init_command.zig | 4 +- src/cli/install_completions_command.zig | 8 +- src/cli/list-of-yarn-commands.zig | 3 +- src/cli/pm_trusted_command.zig | 10 +- src/cli/run_command.zig | 10 +- src/cli/test_command.zig | 10 +- src/cli/upgrade_command.zig | 19 +- src/compile_target.zig | 2 +- src/comptime_string_map.zig | 3 +- src/copy_file.zig | 54 +- src/crash_handler.zig | 137 +-- src/darwin_c.zig | 36 +- src/deps/c_ares.zig | 26 +- src/deps/libuv.zig | 27 +- src/deps/uws.zig | 22 +- src/deps/zig | 2 +- src/dns.zig | 24 +- src/enums.zig | 1002 ----------------- src/env.zig | 15 +- src/env_loader.zig | 4 + src/fd.zig | 19 +- src/fmt.zig | 15 +- src/fs.zig | 46 +- src/futex.zig | 25 +- src/generated_versions_list.zig | 2 +- src/glob.zig | 4 +- src/heap_breakdown.zig | 6 +- src/http.zig | 72 +- src/http/websocket.zig | 4 +- src/http/zlib.zig | 2 +- src/install/bin.zig | 20 +- src/install/install.zig | 147 +-- src/install/lifecycle_script_runner.zig | 16 +- src/install/lockfile.zig | 28 +- src/install/migration.zig | 7 +- src/install/npm.zig | 22 +- src/install/patch_install.zig | 15 +- src/install/resolvers/folder_resolver.zig | 7 +- src/install/windows-shim/BinLinkingShim.zig | 4 +- src/install/windows-shim/bun_shim_impl.exe | Bin 12800 -> 0 bytes src/install/windows-shim/bun_shim_impl.zig | 20 +- src/io/io.zig | 58 +- src/io/io_darwin.zig | 28 +- src/io/io_linux.zig | 8 +- src/io/time.zig | 12 +- src/js_ast.zig | 14 +- src/js_lexer/identifier_data.zig | 6 +- src/js_lexer_tables.zig | 233 ++-- src/js_parser.zig | 5 +- src/js_printer.zig | 77 +- src/jsc.zig | 2 +- src/libarchive/libarchive.zig | 16 +- src/linux_c.zig | 63 +- src/linux_memfd_allocator.zig | 21 +- src/lock.zig | 24 +- src/main.zig | 4 +- src/memory_allocator.zig | 8 +- src/meta.zig | 23 +- src/multi_array_list.zig | 2 +- src/napi/napi.zig | 12 +- src/node_fallbacks.zig | 49 +- src/open.zig | 40 +- src/options.zig | 233 ++-- src/output.zig | 40 +- src/patch.zig | 10 +- src/pool.zig | 2 +- src/renamer.zig | 2 +- src/resolver/data_url.zig | 1 - src/resolver/resolve_path.zig | 14 +- src/resolver/resolver.zig | 9 +- src/resolver/tsconfig_json.zig | 9 +- src/runtime.zig | 11 +- src/sha.zig | 6 +- src/shell/delete_tree.zig | 41 - src/shell/interpreter.zig | 175 +-- src/shell/shell.zig | 10 +- src/shell/subproc.zig | 6 +- src/shell/util.zig | 4 +- src/sourcemap/sourcemap.zig | 4 +- src/string_immutable.zig | 86 +- src/string_mutable.zig | 4 +- src/symbols.def | 569 ++++++++++ src/sync.zig | 106 +- src/sys.zig | 297 +++-- src/sys_uv.zig | 6 +- src/thread_pool.zig | 156 +-- src/tmp.zig | 2 +- src/tracy.zig | 2 +- src/trait.zig | 4 +- src/watcher.zig | 50 +- src/windows.zig | 7 +- src/windows_c.zig | 6 +- src/work_pool.zig | 2 +- test/js/bun/util/inspect.test.js | 22 +- 179 files changed, 3993 insertions(+), 3929 deletions(-) create mode 100644 src/Progress.zig delete mode 100644 src/enums.zig delete mode 100755 src/install/windows-shim/bun_shim_impl.exe delete mode 100644 src/shell/delete_tree.zig create mode 100644 src/symbols.def diff --git a/.github/workflows/build-darwin.yml b/.github/workflows/build-darwin.yml index 4b9845b139..28de5ceb59 100644 --- a/.github/workflows/build-darwin.yml +++ b/.github/workflows/build-darwin.yml @@ -265,7 +265,7 @@ jobs: -DCMAKE_BUILD_TYPE=Release \ -DUSE_LTO=ON \ -DBUN_LINK_ONLY=1 \ - -DBUN_ZIG_OBJ="${{ runner.temp }}/release/bun-zig.o" \ + -DBUN_ZIG_OBJ_DIR="${{ runner.temp }}/release" \ -DBUN_CPP_ARCHIVE="${{ runner.temp }}/bun-cpp-obj/bun-cpp-objects.a" \ -DBUN_DEPS_OUT_DIR="${{ runner.temp }}/bun-deps" \ -DNO_CONFIGURE_DEPENDS=1 diff --git a/.github/workflows/build-windows.yml b/.github/workflows/build-windows.yml index 7ddab4f345..3ea173ea0f 100644 --- a/.github/workflows/build-windows.yml +++ b/.github/workflows/build-windows.yml @@ -288,7 +288,7 @@ jobs: -DBUN_LINK_ONLY=1 ` "-DBUN_DEPS_OUT_DIR=$(Resolve-Path ../bun-deps)" ` "-DBUN_CPP_ARCHIVE=$(Resolve-Path ../bun-cpp/bun-cpp-objects.a)" ` - "-DBUN_ZIG_OBJ=$(Resolve-Path ../bun-zig/bun-zig.o)" ` + "-DBUN_ZIG_OBJ_DIR=$(Resolve-Path ../bun-zig)" ` ${{ contains(inputs.tag, '-baseline') && '-DUSE_BASELINE_BUILD=1' || '' }} if ($LASTEXITCODE -ne 0) { throw "CMake configuration failed" } ninja -v diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 102ae36cd8..e7acf37682 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -34,7 +34,7 @@ jobs: uses: ./.github/workflows/run-format.yml secrets: inherit with: - zig-version: 0.12.0-dev.1828+225fe6ddb + zig-version: 0.13.0 permissions: contents: write lint: diff --git a/.gitignore b/.gitignore index 9a7f7c3922..849c532b2b 100644 --- a/.gitignore +++ b/.gitignore @@ -15,6 +15,7 @@ .vs .vscode/clang* .vscode/cpp* +.zig-cache *.a *.bc *.big diff --git a/.gitmodules b/.gitmodules index cc5f77d136..98845d5097 100644 --- a/.gitmodules +++ b/.gitmodules @@ -79,7 +79,6 @@ fetchRecurseSubmodules = false [submodule "zig"] path = src/deps/zig url = https://github.com/oven-sh/zig -branch = bun depth = 1 shallow = true fetchRecurseSubmodules = false diff --git a/.vscode/settings.json b/.vscode/settings.json index 11bbed6f28..7f1ebe7541 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -26,8 +26,11 @@ // Zig "zig.initialSetupDone": true, - "zig.buildOnSave": false, "zig.buildOption": "build", + "zig.buildArgs": ["-Dgenerated-code=./build/codegen"], + "zig.zls.buildOnSaveStep": "check", + // "zig.zls.enableBuildOnSave": true, + // "zig.buildOnSave": true, "zig.buildFilePath": "${workspaceFolder}/build.zig", "zig.path": "${workspaceFolder}/.cache/zig/zig.exe", "zig.formattingProvider": "zls", diff --git a/CMakeLists.txt b/CMakeLists.txt index d79dfb0d3d..a2ac2e9b49 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -851,11 +851,13 @@ file(GLOB ZIG_FILES "${BUN_SRC}/*/*/*/*/*.zig" ) -if(NOT BUN_ZIG_OBJ) - set(BUN_ZIG_OBJ "${BUN_WORKDIR}/CMakeFiles/bun-zig.o") +if(NOT BUN_ZIG_OBJ_DIR) + set(BUN_ZIG_OBJ_DIR "${BUN_WORKDIR}/CMakeFiles") endif() -get_filename_component(BUN_ZIG_OBJ "${BUN_ZIG_OBJ}" REALPATH BASE_DIR "${CMAKE_BINARY_DIR}") +get_filename_component(BUN_ZIG_OBJ_DIR "${BUN_ZIG_OBJ_DIR}" REALPATH BASE_DIR "${CMAKE_BINARY_DIR}") + +set(BUN_ZIG_OBJ "${BUN_ZIG_OBJ_DIR}/bun-zig.o") set(USES_TERMINAL_NOT_IN_CI "") @@ -869,7 +871,7 @@ if(NOT BUN_LINK_ONLY AND NOT BUN_CPP_ONLY) COMMAND "${ZIG_COMPILER}" "build" "obj" "--zig-lib-dir" "${ZIG_LIB_DIR}" - "-Doutput-file=${BUN_ZIG_OBJ}" + "--prefix" "${BUN_ZIG_OBJ_DIR}" "-Dgenerated-code=${BUN_WORKDIR}/codegen" "-freference-trace=10" "-Dversion=${Bun_VERSION}" @@ -1074,7 +1076,7 @@ elseif(CMAKE_BUILD_TYPE STREQUAL "Release") list(APPEND LTO_LINK_FLAG "/LTCG") endif() - target_compile_options(${bun} PUBLIC /O2 ${LTO_FLAG} /DEBUG:FULL) + target_compile_options(${bun} PUBLIC /O2 ${LTO_FLAG}) target_link_options(${bun} PUBLIC ${LTO_LINK_FLAG} /DEBUG:FULL) endif() endif() @@ -1110,7 +1112,7 @@ if(WIN32) set_property(TARGET ${bun} PROPERTY MSVC_RUNTIME_LIBRARY "MultiThreadedDLL") target_compile_options(${bun} PUBLIC "/EHsc" "/GR-") - target_link_options(${bun} PUBLIC "/STACK:0x1200000,0x100000") + target_link_options(${bun} PUBLIC "/STACK:0x1200000,0x100000" "/DEF:${BUN_SRC}/symbols.def") else() target_compile_options(${bun} PUBLIC -fPIC diff --git a/Dockerfile b/Dockerfile index 0f7063db25..410cf01482 100644 --- a/Dockerfile +++ b/Dockerfile @@ -25,8 +25,9 @@ ARG CMAKE_BUILD_TYPE=Release ARG NODE_VERSION="20" ARG LLVM_VERSION="16" -ARG ZIG_VERSION="0.12.0-dev.1828+225fe6ddb" -ARG ZIG_VERSION_SHORT="0.12.0-dev.1828" + +ARG ZIG_VERSION="0.13.0" +ARG ZIG_VERSION_SHORT="0.13.0" ARG SCCACHE_BUCKET ARG SCCACHE_REGION @@ -144,7 +145,7 @@ ARG ZIG_VERSION_SHORT ARG BUILD_MACHINE_ARCH ARG ZIG_FOLDERNAME=zig-linux-${BUILD_MACHINE_ARCH}-${ZIG_VERSION} ARG ZIG_FILENAME=${ZIG_FOLDERNAME}.tar.xz -ARG ZIG_URL=https://github.com/oven-sh/zig/releases/download/${ZIG_VERSION_SHORT}/zig-linux-${BUILD_MACHINE_ARCH}-${ZIG_VERSION}.tar.xz +ARG ZIG_URL="https://ziglang.org/builds/${ZIG_FILENAME}" ARG ZIG_LOCAL_CACHE_DIR=/zig-cache ENV ZIG_LOCAL_CACHE_DIR=${ZIG_LOCAL_CACHE_DIR} @@ -459,7 +460,7 @@ RUN --mount=type=cache,target=${CCACHE_DIR} \ -DWEBKIT_DIR="omit" \ -DNO_CONFIGURE_DEPENDS=1 \ -DNO_CODEGEN=1 \ - -DBUN_ZIG_OBJ="/tmp/bun-zig.o" \ + -DBUN_ZIG_OBJ_DIR="/tmp" \ -DCANARY="${CANARY}" \ -DZIG_COMPILER=system \ -DZIG_LIB_DIR=$BUN_DIR/src/deps/zig/lib \ @@ -515,7 +516,7 @@ RUN --mount=type=cache,target=${CCACHE_DIR} \ -G Ninja \ -DCMAKE_BUILD_TYPE=Release \ -DBUN_LINK_ONLY=1 \ - -DBUN_ZIG_OBJ="${BUN_DIR}/build/bun-zig.o" \ + -DBUN_ZIG_OBJ_DIR="${BUN_DIR}/build" \ -DUSE_LTO=ON \ -DUSE_DEBUG_JSC=${ASSERTIONS} \ -DBUN_CPP_ARCHIVE="${BUN_DIR}/build/bun-cpp-objects.a" \ @@ -577,7 +578,7 @@ RUN --mount=type=cache,target=${CCACHE_DIR} \ -G Ninja \ -DCMAKE_BUILD_TYPE=Release \ -DBUN_LINK_ONLY=1 \ - -DBUN_ZIG_OBJ="${BUN_DIR}/build/bun-zig.o" \ + -DBUN_ZIG_OBJ_DIR="${BUN_DIR}/build" \ -DUSE_DEBUG_JSC=ON \ -DBUN_CPP_ARCHIVE="${BUN_DIR}/build/bun-cpp-objects.a" \ -DWEBKIT_DIR="${BUN_DIR}/bun-webkit" \ diff --git a/build.zig b/build.zig index ac400df2e0..a87737be26 100644 --- a/build.zig +++ b/build.zig @@ -1,306 +1,177 @@ const std = @import("std"); -const pathRel = std.fs.path.relative; const builtin = @import("builtin"); -const Wyhash11 = @import("./src/wyhash.zig").Wyhash11; -const zig_version = builtin.zig_version; +const Build = std.Build; +const Step = Build.Step; +const Compile = Step.Compile; +const LazyPath = Step.LazyPath; +const Target = std.Target; +const ResolvedTarget = std.Build.ResolvedTarget; +const CrossTarget = std.zig.CrossTarget; +const OptimizeMode = std.builtin.OptimizeMode; +const Module = Build.Module; +const fs = std.fs; +const Version = std.SemanticVersion; +const Arch = std.Target.Cpu.Arch; + +const OperatingSystem = @import("src/env.zig").OperatingSystem; + +const pathRel = fs.path.relative; /// Do not rename this constant. It is scanned by some scripts to determine which zig version to install. -const recommended_zig_version = "0.12.0-dev.1828+225fe6ddb"; +const recommended_zig_version = "0.13.0"; -var is_debug_build = false; - -fn exists(path: []const u8) bool { - _ = std.fs.openFileAbsolute(path, .{ .mode = .read_only }) catch return false; - return true; +comptime { + if (!std.mem.eql(u8, builtin.zig_version_string, recommended_zig_version)) { + @compileError( + "" ++ + "Bun requires Zig version " ++ recommended_zig_version ++ ". This is" ++ + "automatically configured via Bun's CMake setup. You likely meant to run" ++ + "`bun setup`. If you are trying to upgrade the Zig compiler," ++ + "run `./scripts/download-zig.sh master` or comment this message out.", + ); + } } -fn addInternalPackages(b: *Build, step: *CompileStep, _: std.mem.Allocator, _: []const u8, target: anytype) !void { - const io: *Module = brk: { - if (target.isDarwin()) { - break :brk b.createModule(.{ - .source_file = FileSource.relative("src/io/io_darwin.zig"), - }); - } else if (target.isLinux()) { - break :brk b.createModule(.{ - .source_file = FileSource.relative("src/io/io_linux.zig"), - }); - } else if (target.isWindows()) { - break :brk b.createModule(.{ - .source_file = FileSource.relative("src/io/io_windows.zig"), - }); - } - - break :brk b.createModule(.{ - .source_file = FileSource.relative("src/io/io_stub.zig"), - }); - }; - - step.addModule("async_io", io); - - step.addModule("zlib-internal", brk: { - if (target.isWindows()) { - break :brk b.createModule(.{ .source_file = FileSource.relative("src/deps/zlib.win32.zig") }); - } - - break :brk b.createModule(.{ .source_file = FileSource.relative("src/deps/zlib.posix.zig") }); - }); - - const async_: *Module = brk: { - if (target.isDarwin() or target.isLinux() or target.isFreeBSD()) { - break :brk b.createModule(.{ - .source_file = FileSource.relative("src/async/posix_event_loop.zig"), - }); - } else if (target.isWindows()) { - break :brk b.createModule(.{ - .source_file = FileSource.relative("src/async/windows_event_loop.zig"), - }); - } - - break :brk b.createModule(.{ - .source_file = FileSource.relative("src/async/stub_event_loop.zig"), - }); - }; - step.addModule("async", async_); -} +const zero_sha = "0000000000000000000000000000000000000000"; const BunBuildOptions = struct { + target: ResolvedTarget, + optimize: OptimizeMode, + os: OperatingSystem, + arch: Arch, + + version: Version, + canary_revision: ?u32, + sha: []const u8, enable_logs: bool = false, - is_canary: bool = false, - canary_revision: u32 = 0, - sha: [:0]const u8 = "", - version: []const u8 = "", - baseline: bool = false, - bindgen: bool = false, - sizegen: bool = false, - base_path: [:0]const u8 = "", tracy_callstack_depth: u16, - runtime_js_version: u64 = 0, - fallback_html_version: u64 = 0, + generated_code_dir: []const u8, - tinycc: bool = true, - project: [:0]const u8 = "", + cached_options_module: ?*Module = null, + windows_shim: ?WindowsShim = null, - pub fn updateRuntime(this: *BunBuildOptions) anyerror!void { - if (std.fs.cwd().openFile("src/runtime.out.js", .{ .mode = .read_only })) |file| { - defer file.close(); - const runtime_hash = Wyhash11.hash( - 0, - try file.readToEndAlloc(std.heap.page_allocator, try file.getEndPos()), - ); - this.runtime_js_version = runtime_hash; - } else |_| { - if (!is_debug_build) { - @panic("Runtime file was not read successfully. Please run `make setup`"); - } - } - - if (std.fs.cwd().openFile("src/fallback.out.js", .{ .mode = .read_only })) |file| { - defer file.close(); - const fallback_hash = Wyhash11.hash( - 0, - try file.readToEndAlloc(std.heap.page_allocator, try file.getEndPos()), - ); - this.fallback_html_version = fallback_hash; - } else |_| { - if (!is_debug_build) { - @panic("Fallback file was not read successfully. Please run `make setup`"); - } - } + pub fn isBaseline(this: *const BunBuildOptions) bool { + return this.arch.isX86() and + !Target.x86.featureSetHas(this.target.result.cpu.features, .avx2); } - pub fn step(this: BunBuildOptions, b: anytype) *std.build.OptionsStep { + pub fn buildOptionsModule(this: *BunBuildOptions, b: *Build) *Module { + if (this.cached_options_module) |mod| { + return mod; + } + var opts = b.addOptions(); - opts.addOption(@TypeOf(this.enable_logs), "enable_logs", this.enable_logs); - opts.addOption(@TypeOf(this.is_canary), "is_canary", this.is_canary); - opts.addOption(@TypeOf(this.canary_revision), "canary_revision", this.canary_revision); - opts.addOption( - std.SemanticVersion, - "version", - std.SemanticVersion.parse(this.version) catch @panic(b.fmt("Invalid version: {s}", .{this.version})), - ); - opts.addOption(@TypeOf(this.sha), "sha", this.sha); - opts.addOption(@TypeOf(this.baseline), "baseline", this.baseline); - opts.addOption(@TypeOf(this.bindgen), "bindgen", this.bindgen); - opts.addOption(@TypeOf(this.sizegen), "sizegen", this.sizegen); - opts.addOption(@TypeOf(this.base_path), "base_path", this.base_path); - opts.addOption(@TypeOf(this.runtime_js_version), "runtime_js_version", this.runtime_js_version); - opts.addOption(@TypeOf(this.fallback_html_version), "fallback_html_version", this.fallback_html_version); - opts.addOption(@TypeOf(this.tinycc), "tinycc", this.tinycc); - return opts; + opts.addOption([]const u8, "base_path", b.pathFromRoot(".")); + opts.addOption(u32, "canary_revision", this.canary_revision orelse 0); + opts.addOption(bool, "is_canary", this.canary_revision != null); + opts.addOption(Version, "version", this.version); + opts.addOption([:0]const u8, "sha", b.allocator.dupeZ(u8, this.sha) catch @panic("OOM")); + opts.addOption(bool, "baseline", this.isBaseline()); + opts.addOption(bool, "enable_logs", this.enable_logs); + + const mod = opts.createModule(); + this.cached_options_module = mod; + return mod; + } + + pub fn windowsShim(this: *BunBuildOptions, b: *Build) WindowsShim { + return this.windows_shim orelse { + this.windows_shim = WindowsShim.create(b); + return this.windows_shim.?; + }; } }; -// relative to the prefix -var output_dir: []const u8 = ""; +pub fn getOSVersionMin(os: OperatingSystem) ?Target.Query.OsVersion { + return switch (os) { + // bun needs macOS 12 to work properly due to icucore, but we have been + // compiling everything with 11 as the minimum. + .mac => .{ + .semver = .{ .major = 11, .minor = 0, .patch = 0 }, + }, -var optimize: std.builtin.OptimizeMode = .Debug; + // Windows 10 1809 is the minimum supported version + // One case where this is specifically required is in `deleteOpenedFile` + .windows => .{ + .windows = .win10_rs5, + }, -const Build = std.Build; -const CrossTarget = std.zig.CrossTarget; -const OptimizeMode = std.builtin.OptimizeMode; -const CompileStep = std.build.CompileStep; -const FileSource = std.build.FileSource; -const Module = std.build.Module; -const fs = std.fs; - -pub fn build(b: *Build) !void { - build_(b) catch |err| { - if (@errorReturnTrace()) |trace| { - (std.debug).dumpStackTrace(trace.*); - } - - return err; + else => null, }; } -pub fn build_(b: *Build) !void { - switch (comptime zig_version.order(std.SemanticVersion.parse(recommended_zig_version) catch unreachable)) { - .eq => {}, - .lt => { - @compileError("The minimum version of Zig required to compile Bun is " ++ recommended_zig_version ++ ", found " ++ @import("builtin").zig_version_string ++ ". Please follow the instructions at https://bun.sh/docs/project/contributing. You may need to re-run `bun setup`."); - }, - .gt => { - const colors = std.io.getStdErr().supportsAnsiEscapeCodes(); - std.debug.print( - "{s}WARNING:\nBun recommends Zig version '{s}', but found '{s}', build may fail...\nMake sure you are following the instructions at https://bun.sh/docs/project/contributing\n{s}You can update to the right version using 'zigup {s}'\n\n", - .{ - if (colors) "\x1b[1;33m" else "", - recommended_zig_version, - builtin.zig_version_string, - if (colors) "\x1b[0m" else "", - recommended_zig_version, - }, - ); - }, - } +pub fn getOSGlibCVersion(os: OperatingSystem) ?Version { + return switch (os) { + // Compiling with a newer glibc than this will break certain cloud environments. + .linux => .{ .major = 2, .minor = 27, .patch = 0 }, - // Standard target options allows the person running `zig build` to choose - // what target to build for. Here we do not override the defaults, which - // means any target is allowed, and the default is native. Other options - // for restricting supported target set are available. - var target = b.standardTargetOptions(.{}); - // Standard release options allow the person running `zig build` to select - // between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. - optimize = b.standardOptimizeOption(.{}); + else => null, + }; +} - var generated_code_directory = b.option([]const u8, "generated-code", "Set the generated code directory") orelse ""; +pub fn build(b: *Build) !void { + std.debug.print("zig build v{s}\n", .{builtin.zig_version_string}); - if (generated_code_directory.len == 0) { - generated_code_directory = b.pathFromRoot("build/codegen"); - } + b.zig_lib_dir = b.zig_lib_dir orelse b.path("src/deps/zig/lib"); - var output_dir_buf = std.mem.zeroes([4096]u8); - const bin_label = if (optimize == std.builtin.OptimizeMode.Debug) "packages/debug-bun-" else "packages/bun-"; + var target_query = b.standardTargetOptionsQueryOnly(.{}); + const optimize = b.standardOptimizeOption(.{}); - var triplet_buf: [64]u8 = undefined; + const os, const arch = brk: { + // resolve the target query to pick up what operating system and cpu + // architecture that is desired. this information is used to slightly + // refine the query. + const temp_resolved = b.resolveTargetQuery(target_query); + const arch = temp_resolved.result.cpu.arch; + const os: OperatingSystem = if (arch.isWasm()) + .wasm + else switch (temp_resolved.result.os.tag) { + .macos => .mac, + .linux => .linux, + .windows => .windows, + else => |t| std.debug.panic("Unsupported OS tag {}", .{t}), + }; + break :brk .{ os, arch }; + }; - const arch: std.Target.Cpu.Arch = target.getCpuArch(); + target_query.os_version_min = getOSVersionMin(os); + target_query.glibc_version = getOSGlibCVersion(os); - var os_tagname = @tagName(target.getOs().tag); + const target = b.resolveTargetQuery(target_query); - switch (target.getOs().tag) { - .macos => { - os_tagname = "darwin"; - target.os_version_min = std.zig.CrossTarget.OsVersion{ .semver = .{ .major = 11, .minor = 0, .patch = 0 } }; - }, - .windows => { - target.os_version_min = std.zig.CrossTarget.OsVersion{ - // Windows 1809 - // Minimum version for a syscall related to bun.sys.renameat - // if you update this please update install.ps1 - .windows = .win10_rs5, - }; - }, - .linux => { - target.setGnuLibCVersion(2, 27, 0); - }, - else => {}, - } + const generated_code_dir = b.pathFromRoot( + b.option([]const u8, "generated-code", "Set the generated code directory") orelse + "build/codegen", + ); + const bun_version = b.option([]const u8, "version", "Value of `Bun.version`") orelse "0.0.0"; - @memcpy(triplet_buf[0..].ptr, os_tagname); - const osname = triplet_buf[0..os_tagname.len]; - triplet_buf[osname.len] = '-'; + b.reference_trace = ref_trace: { + const trace = b.option(u32, "reference-trace", "Set the reference trace") orelse 16; + break :ref_trace if (trace == 0) null else trace; + }; - @memcpy(triplet_buf[osname.len + 1 ..].ptr, @tagName(target.getCpuArch())); - var cpuArchName = triplet_buf[osname.len + 1 ..][0..@tagName(target.getCpuArch()).len]; - std.mem.replaceScalar(u8, cpuArchName, '_', '-'); - if (std.mem.eql(u8, cpuArchName, "x86-64")) { - @memcpy(cpuArchName.ptr, "x64"); - cpuArchName = cpuArchName[0..3]; - } - - const triplet = triplet_buf[0 .. osname.len + cpuArchName.len + 1]; - - const outfile_maybe = b.option([]const u8, "output-file", "target to install to"); - - if (outfile_maybe) |outfile| { - output_dir = try pathRel(b.allocator, b.install_prefix, std.fs.path.dirname(outfile) orelse ""); - } else { - const output_dir_base = try std.fmt.bufPrint(&output_dir_buf, "{s}{s}", .{ bin_label, triplet }); - output_dir = try pathRel(b.allocator, b.install_prefix, output_dir_base); - } - - is_debug_build = optimize == OptimizeMode.Debug; - const bun_executable_name = if (outfile_maybe) |outfile| std.fs.path.basename(outfile[0 .. outfile.len - std.fs.path.extension(outfile).len]) else if (is_debug_build) "bun-debug" else "bun"; - const root_src = if (target.getOsTag() == std.Target.Os.Tag.freestanding) - "root_wasm.zig" - else - "root.zig"; - - const min_version: std.SemanticVersion = if (!(target.isWindows() or target.getOsTag() == .freestanding)) - target.getOsVersionMin().semver - else - .{ .major = 0, .minor = 0, .patch = 0 }; - - const max_version: std.SemanticVersion = if (!(target.isWindows() or target.getOsTag() == .freestanding)) - target.getOsVersionMax().semver - else - .{ .major = 0, .minor = 0, .patch = 0 }; - - var obj_step = b.step("obj", "Build bun as a .o file"); - var obj = b.addObject(.{ - .name = bun_executable_name, - .root_source_file = FileSource.relative(root_src), + var build_options = BunBuildOptions{ .target = target, .optimize = optimize, - .main_mod_path = .{ .cwd_relative = b.pathFromRoot(".") }, - }); - if (!exists(b.pathFromRoot(try std.fs.path.join(b.allocator, &.{ - "src", - "js_lexer", - "id_continue_bitset.blob", - })))) { - const identifier_data = b.pathFromRoot(try std.fs.path.join(b.allocator, &.{ "src", "js_lexer", "identifier_data.zig" })); - var run_step = b.addSystemCommand(&.{ - b.zig_exe, - "run", - identifier_data, - }); - run_step.has_side_effects = true; - obj.step.dependOn(&run_step.step); - } + .os = os, + .arch = arch, - b.reference_trace = if (b.option(u32, "reference-trace", "Set the reference trace")) |trace| - if (trace == 0) - null - else - trace - else - 16; + .generated_code_dir = generated_code_dir, - var default_build_options: BunBuildOptions = brk: { - const is_baseline = arch.isX86() and (target.cpu_model == .baseline or - !std.Target.x86.featureSetHas(target.getCpuFeatures(), .avx2)); + .version = try Version.parse(bun_version), + .canary_revision = canary: { + const rev = b.option(u32, "canary", "Treat this as a canary build") orelse 0; + break :canary if (rev == 0) null else rev; + }, - var git_sha: [:0]const u8 = ""; - if (b.env_map.get("GITHUB_SHA") orelse b.env_map.get("GIT_SHA")) |sha| { - git_sha = b.allocator.dupeZ(u8, sha) catch unreachable; - } else { - sha: { - const result = std.ChildProcess.run(.{ + .sha = sha: { + const sha = b.option([]const u8, "sha", "Force the git sha") orelse + b.graph.env_map.get("GITHUB_SHA") orelse + b.graph.env_map.get("GIT_SHA") orelse fetch_sha: { + const result = std.process.Child.run(.{ .allocator = b.allocator, .argv = &.{ "git", @@ -309,328 +180,271 @@ pub fn build_(b: *Build) !void { }, .cwd = b.pathFromRoot("."), .expand_arg0 = .expand, - }) catch break :sha; + }) catch |err| { + std.log.warn("Failed to execute 'git rev-parse HEAD': {s}", .{@errorName(err)}); + std.log.warn("Falling back to zero sha", .{}); + break :sha zero_sha; + }; - git_sha = b.allocator.dupeZ(u8, std.mem.trim(u8, result.stdout, "\n \t")) catch unreachable; + break :fetch_sha b.dupe(std.mem.trim(u8, result.stdout, "\n \t")); + }; + + if (sha.len == 0) { + std.log.warn("No git sha found, falling back to zero sha", .{}); + break :sha zero_sha; + } + if (sha.len != 40) { + std.log.warn("Invalid git sha: {s}", .{sha}); + std.log.warn("Falling back to zero sha", .{}); + break :sha zero_sha; } - } - const enable_logs = if (b.option(bool, "enable_logs", "Enable logs in release")) |l| l else false; + break :sha sha; + }, - const is_canary, const canary_revision = if (b.option(u32, "canary", "Treat this as a canary build")) |rev| - if (rev == 0) - .{ false, 0 } - else - .{ true, rev } - else - .{ false, 0 }; - break :brk .{ - .enable_logs = enable_logs, - .is_canary = is_canary, - .canary_revision = canary_revision, - .version = b.option([]const u8, "version", "Value of `Bun.version`") orelse "0.0.0", - .sha = git_sha, - .baseline = is_baseline, - .bindgen = false, - .base_path = try b.allocator.dupeZ(u8, b.pathFromRoot(".")), - .tracy_callstack_depth = b.option(u16, "tracy_callstack_depth", "") orelse 10, - }; + .tracy_callstack_depth = b.option(u16, "tracy_callstack_depth", "") orelse 10, + .enable_logs = b.option(bool, "enable_logs", "Enable logs in release") orelse false, }; + // zig build obj { - try addInternalPackages( - b, - obj, - b.allocator, - b.zig_exe, - target, - ); + var step = b.step("obj", "Build Bun's Zig code as a .o file"); + var bun_obj = addBunObject(b, &build_options); + step.dependOn(&bun_obj.step); + step.dependOn(&b.addInstallFile(bun_obj.getEmittedBin(), "bun-zig.o").step); + } - if (default_build_options.baseline) { - obj.target.cpu_model = .{ .explicit = &std.Target.x86.cpu.x86_64_v2 }; - } else if (arch.isX86()) { - obj.target.cpu_model = .{ .explicit = &std.Target.x86.cpu.haswell }; - } else if (arch.isAARCH64()) { - if (target.isDarwin()) { - obj.target.cpu_model = .{ .explicit = &std.Target.aarch64.cpu.apple_m1 }; - } else { - obj.target.cpu_model = .{ .explicit = &std.Target.aarch64.cpu.generic }; + // zig build windows-shim + { + var step = b.step("windows-shim", "Build the Windows shim (bun_shim_impl.exe + bun_shim_debug.exe)"); + var windows_shim = build_options.windowsShim(b); + step.dependOn(&b.addInstallFile(windows_shim.exe.getEmittedBin(), "bun_shim_impl.exe").step); + step.dependOn(&b.addInstallFile(windows_shim.dbg.getEmittedBin(), "bun_shim_debug.exe").step); + } + + // zig build check + { + var step = b.step("check", "Check for semantic analysis errors"); + var bun_check_obj = addBunObject(b, &build_options); + bun_check_obj.generated_bin = null; + step.dependOn(&bun_check_obj.step); + + // The default install step will run zig build check This is so ZLS + // identifies the codebase, as well as performs checking if build on + // save is enabled. + + // For building Bun itself, one should run `bun setup` + b.default_step.dependOn(step); + } + + // zig build check-all + { + var step = b.step("check-all", "Check for semantic analysis errors on all supported platforms"); + inline for (.{ + .{ .os = .windows, .arch = .x86_64 }, + .{ .os = .mac, .arch = .x86_64 }, + .{ .os = .mac, .arch = .aarch64 }, + .{ .os = .linux, .arch = .x86_64 }, + .{ .os = .linux, .arch = .aarch64 }, + }) |check| { + inline for (.{ .Debug, .ReleaseFast }) |mode| { + const check_target = b.resolveTargetQuery(.{ + .os_tag = OperatingSystem.stdOSTag(check.os), + .cpu_arch = check.arch, + .os_version_min = getOSVersionMin(check.os), + .glibc_version = getOSGlibCVersion(check.os), + }); + + var options = BunBuildOptions{ + .target = check_target, + .os = check.os, + .arch = check_target.result.cpu.arch, + .optimize = mode, + + .canary_revision = build_options.canary_revision, + .sha = build_options.sha, + .tracy_callstack_depth = build_options.tracy_callstack_depth, + .version = build_options.version, + .generated_code_dir = build_options.generated_code_dir, + }; + var obj = addBunObject(b, &options); + obj.generated_bin = null; + step.dependOn(&obj.step); } } - - try default_build_options.updateRuntime(); - - // we have to dump to stderr because stdout is read by zls - std.io.getStdErr().writer().print("Build {s} v{} - v{} ({s})\n", .{ - triplet, - min_version, - max_version, - obj.target.getCpuModel().name, - }) catch {}; - std.io.getStdErr().writer().print("Zig v{s}\n", .{builtin.zig_version_string}) catch {}; - - defer obj_step.dependOn(&obj.step); - - var install = b.addInstallFileWithDir( - obj.getEmittedBin(), - .{ .custom = output_dir }, - b.fmt("{s}.o", .{bun_executable_name}), - ); - install.step.dependOn(&obj.step); - obj_step.dependOn(&install.step); - - var actual_build_options = default_build_options; - if (b.option(bool, "generate-sizes", "Generate sizes of things") orelse false) { - actual_build_options.sizegen = true; - } - - actual_build_options.project = "bun"; - obj.addOptions("build_options", actual_build_options.step(b)); - - // Generated Code - // TODO: exit with a better error early if these files do not exist. it is an indication someone ran `zig build` directly without the code generators. - obj.addModule("ZigGeneratedClasses", b.createModule(.{ - .source_file = .{ .path = b.pathJoin(&.{ generated_code_directory, "ZigGeneratedClasses.zig" }) }, - })); - obj.addModule("ResolvedSourceTag", b.createModule(.{ - .source_file = .{ .path = b.pathJoin(&.{ generated_code_directory, "ResolvedSourceTag.zig" }) }, - })); - - obj.linkLibC(); - obj.dll_export_fns = true; - obj.strip = false; - obj.omit_frame_pointer = false; - obj.subsystem = .Console; - - // Disable stack probing on x86 so we don't need to include compiler_rt - if (target.getCpuArch().isX86() or target.isWindows()) obj.disable_stack_probing = true; - - if (b.option(bool, "for-editor", "Do not emit bin, just check for errors") orelse false) { - // obj.emit_bin = .no_emit; - obj.generated_bin = null; - } - - if (target.getOsTag() == .linux) { - // obj.want_lto = tar; - obj.link_emit_relocs = true; - obj.link_eh_frame_hdr = true; - obj.link_function_sections = true; - } - } - - { - const headers_step = b.step("headers-obj", "Build JavaScriptCore headers"); - var headers_obj = b.addObject(.{ - .name = "headers", - .root_source_file = FileSource.relative("src/bindgen.zig"), - .target = target, - .optimize = optimize, - .main_mod_path = obj.main_mod_path, - }); - defer headers_step.dependOn(&headers_obj.step); - try configureObjectStep(b, headers_obj, headers_step, @TypeOf(target), target); - var headers_build_options = default_build_options; - headers_build_options.bindgen = true; - headers_obj.addOptions("build_options", default_build_options.step(b)); - headers_obj.linkLibCpp(); - } - - { - const wasm_step = b.step("bun-wasm", "Build WASM"); - var wasm = b.addStaticLibrary(.{ - .name = "bun-wasm", - .root_source_file = FileSource.relative("root_wasm.zig"), - .target = target, - .optimize = optimize, - .main_mod_path = obj.main_mod_path, - }); - defer wasm_step.dependOn(&wasm.step); - wasm.strip = false; - // wasm_step.link_function_sections = true; - // wasm_step.link_emit_relocs = true; - // wasm_step.single_threaded = true; - try configureObjectStep(b, wasm, wasm_step, @TypeOf(target), target); - var build_opts = default_build_options; - wasm.addOptions("build_options", build_opts.step(b)); - } - - { - const headers_step = b.step("httpbench-obj", "Build HTTPBench tool (object files)"); - var headers_obj = b.addObject(.{ - .name = "httpbench", - .root_source_file = FileSource.relative("misctools/http_bench.zig"), - .target = target, - .optimize = optimize, - .main_mod_path = obj.main_mod_path, - }); - defer headers_step.dependOn(&headers_obj.step); - try configureObjectStep(b, headers_obj, headers_step, @TypeOf(target), target); - headers_obj.addOptions("build_options", default_build_options.step(b)); - } - - { - const headers_step = b.step("machbench-obj", "Build Machbench tool (object files)"); - var headers_obj = b.addObject(.{ - .name = "machbench", - .root_source_file = FileSource.relative("misctools/machbench.zig"), - .target = target, - .optimize = optimize, - .main_mod_path = obj.main_mod_path, - }); - defer headers_step.dependOn(&headers_obj.step); - try configureObjectStep(b, headers_obj, headers_step, @TypeOf(target), target); - headers_obj.addOptions("build_options", default_build_options.step(b)); - } - - { - const headers_step = b.step("fetch-obj", "Build fetch (object files)"); - var headers_obj = b.addObject(.{ - .name = "fetch", - .root_source_file = FileSource.relative("misctools/fetch.zig"), - .target = target, - .optimize = optimize, - .main_mod_path = obj.main_mod_path, - }); - defer headers_step.dependOn(&headers_obj.step); - try configureObjectStep(b, headers_obj, headers_step, @TypeOf(target), target); - headers_obj.addOptions("build_options", default_build_options.step(b)); - } - - { - const headers_step = b.step("string-bench", "Build string bench"); - var headers_obj = b.addExecutable(.{ - .name = "string-bench", - .root_source_file = FileSource.relative("src/bench/string-handling.zig"), - .target = target, - .optimize = optimize, - .main_mod_path = obj.main_mod_path, - }); - defer headers_step.dependOn(&headers_obj.step); - try configureObjectStep(b, headers_obj, headers_step, @TypeOf(target), target); - headers_obj.addOptions("build_options", default_build_options.step(b)); - } - - { - const headers_step = b.step("sha-bench-obj", "Build sha bench"); - var headers_obj = b.addObject(.{ - .name = "sha", - .root_source_file = FileSource.relative("src/sha.zig"), - .target = target, - .optimize = optimize, - .main_mod_path = obj.main_mod_path, - }); - defer headers_step.dependOn(&headers_obj.step); - try configureObjectStep(b, headers_obj, headers_step, @TypeOf(target), target); - headers_obj.addOptions("build_options", default_build_options.step(b)); - } - - { - const headers_step = b.step("vlq-bench", "Build vlq bench"); - var headers_obj: *CompileStep = b.addExecutable(.{ - .name = "vlq-bench", - .root_source_file = FileSource.relative("src/sourcemap/vlq_bench.zig"), - .target = target, - .optimize = optimize, - .main_mod_path = obj.main_mod_path, - }); - defer headers_step.dependOn(&headers_obj.step); - try configureObjectStep(b, headers_obj, headers_step, @TypeOf(target), target); - headers_obj.addOptions("build_options", default_build_options.step(b)); - } - - { - const headers_step = b.step("tgz-obj", "Build tgz (object files)"); - var headers_obj: *CompileStep = b.addObject(.{ - .name = "tgz", - .root_source_file = FileSource.relative("misctools/tgz.zig"), - .target = target, - .optimize = optimize, - .main_mod_path = obj.main_mod_path, - }); - defer headers_step.dependOn(&headers_obj.step); - try configureObjectStep(b, headers_obj, headers_step, @TypeOf(target), target); - headers_obj.addOptions("build_options", default_build_options.step(b)); - } - - { - const headers_step = b.step("test", "Build test"); - - const test_file = b.option([]const u8, "test-file", "Input file for test"); - const test_bin_ = b.option([]const u8, "test-bin", "Emit bin to"); - const test_filter = b.option([]const u8, "test-filter", "Filter for test"); - - var headers_obj: *CompileStep = b.addTest(.{ - .root_source_file = FileSource.relative(test_file orelse "src/main.zig"), - .target = target, - .main_mod_path = obj.main_mod_path, - }); - headers_obj.filter = test_filter; - if (test_bin_) |test_bin| { - headers_obj.name = std.fs.path.basename(test_bin); - if (std.fs.path.dirname(test_bin)) |dir| { - var install = b.addInstallFileWithDir( - headers_obj.getEmittedBin(), - .{ .custom = try std.fs.path.relative(b.allocator, output_dir, dir) }, - headers_obj.name, - ); - install.step.dependOn(&headers_obj.step); - headers_step.dependOn(&install.step); - } - } - - try configureObjectStep(b, headers_obj, headers_step, @TypeOf(target), target); - - headers_step.dependOn(&headers_obj.step); - headers_obj.addOptions("build_options", default_build_options.step(b)); } // Running `zig build` with no arguments is almost always a mistake. - const mistake_message = b.addSystemCommand(&.{ - "echo", - \\ - \\error: To build Bun from source, please use `bun run setup` instead of `zig build`" - \\ - \\If you want to build the zig code only, run: - \\ 'zig build obj -Dgenerated-code=./build/codegen [...opts]' - \\ - \\For more info, see https://bun.sh/docs/project/contributing - \\ - }); + // TODO: revive this error. cannot right now since ZLS runs zig build without arguments + { + // const mistake_message = b.addSystemCommand(&.{ + // "echo", + // \\ + // \\To build Bun from source, please use `bun run setup` instead of `zig build`" + // \\For more info, see https://bun.sh/docs/project/contributing + // \\ + // \\If you want to build the zig code in isolation, run: + // \\ 'zig build obj -Dgenerated-code=./build/codegen [...opts]' + // \\ + // \\If you want to test a compile without emitting an object: + // \\ 'zig build check' + // \\ 'zig build check-all' (run linux+mac+windows) + // \\ + // }); - b.default_step.dependOn(&mistake_message.step); + // b.default_step.dependOn(&mistake_message.step); + } } -pub var original_make_fn: ?*const fn (step: *std.build.Step) anyerror!void = null; +pub fn addBunObject(b: *Build, opts: *BunBuildOptions) *Compile { + const obj = b.addObject(.{ + .name = if (opts.optimize == .Debug) "bun-debug" else "bun", + .root_source_file = switch (opts.os) { + .wasm => b.path("root_wasm.zig"), + else => b.path("root.zig"), + }, + .target = opts.target, + .optimize = opts.optimize, + .pic = true, + .strip = false, // stripped at the end + }); -pub fn configureObjectStep(b: *std.build.Builder, obj: *CompileStep, obj_step: *std.build.Step, comptime Target: type, target: Target) !void { - // obj.setTarget(target); - try addInternalPackages(b, obj, std.heap.page_allocator, b.zig_exe, target); - - obj.strip = false; - - // obj.setBuildMode(optimize); obj.bundle_compiler_rt = false; - if (obj.emit_directory == null) { - var install = b.addInstallFileWithDir( - obj.getEmittedBin(), - .{ .custom = output_dir }, - b.fmt("{s}.o", .{obj.name}), - ); + obj.formatted_panics = true; + obj.root_module.omit_frame_pointer = false; - install.step.dependOn(&obj.step); - obj_step.dependOn(&install.step); + // Link libc + if (opts.os != .wasm) { + obj.linkLibC(); + obj.linkLibCpp(); } - if (target.getOsTag() != .freestanding) obj.linkLibC(); - if (target.getOsTag() != .freestanding) obj.bundle_compiler_rt = false; // Disable stack probing on x86 so we don't need to include compiler_rt - // Needs to be disabled here too so headers object will build without the `__zig_probe_stack` symbol - if (target.getCpuArch().isX86()) obj.disable_stack_probing = true; + if (opts.arch.isX86()) { + obj.root_module.stack_check = false; + obj.root_module.stack_protector = false; + } - if (target.getOsTag() == .linux) { - // obj.want_lto = tar; + if (opts.os == .linux) { obj.link_emit_relocs = true; obj.link_eh_frame_hdr = true; obj.link_function_sections = true; + + if (opts.optimize == .Debug) { + obj.root_module.valgrind = true; + } + } + addInternalPackages(b, obj, opts); + obj.root_module.addImport("build_options", opts.buildOptionsModule(b)); + return obj; +} + +fn exists(path: []const u8) bool { + const file = std.fs.openFileAbsolute(path, .{ .mode = .read_only }) catch return false; + file.close(); + return true; +} + +fn addInternalPackages(b: *Build, obj: *Compile, opts: *BunBuildOptions) void { + const os = opts.os; + + const io_path = switch (os) { + .mac => "src/io/io_darwin.zig", + .linux => "src/io/io_linux.zig", + .windows => "src/io/io_windows.zig", + else => "src/io/io_stub.zig", + }; + obj.root_module.addAnonymousImport("async_io", .{ + .root_source_file = b.path(io_path), + }); + + const zlib_internal_path = switch (os) { + .windows => "src/deps/zlib.win32.zig", + .linux, .mac => "src/deps/zlib.posix.zig", + else => null, + }; + if (zlib_internal_path) |path| { + obj.root_module.addAnonymousImport("zlib-internal", .{ + .root_source_file = b.path(path), + }); + } + + const async_path = switch (os) { + .linux, .mac => "src/async/posix_event_loop.zig", + .windows => "src/async/windows_event_loop.zig", + else => "src/async/stub_event_loop.zig", + }; + obj.root_module.addAnonymousImport("async", .{ + .root_source_file = b.path(async_path), + }); + + const zig_generated_classes_path = b.pathJoin(&.{ opts.generated_code_dir, "ZigGeneratedClasses.zig" }); + validateGeneratedPath(zig_generated_classes_path); + obj.root_module.addAnonymousImport("ZigGeneratedClasses", .{ + .root_source_file = .{ .cwd_relative = zig_generated_classes_path }, + }); + + const resolved_source_tag_path = b.pathJoin(&.{ opts.generated_code_dir, "ResolvedSourceTag.zig" }); + validateGeneratedPath(resolved_source_tag_path); + obj.root_module.addAnonymousImport("ResolvedSourceTag", .{ + .root_source_file = .{ .cwd_relative = resolved_source_tag_path }, + }); + + if (os == .windows) { + obj.root_module.addAnonymousImport("bun_shim_impl.exe", .{ + .root_source_file = opts.windowsShim(b).exe.getEmittedBin(), + }); } } + +fn validateGeneratedPath(path: []const u8) void { + if (!exists(path)) { + std.debug.panic("{s} does not exist in generated code directory!", .{std.fs.path.basename(path)}); + } +} + +const WindowsShim = struct { + exe: *Compile, + dbg: *Compile, + + fn create(b: *Build) WindowsShim { + const target = b.resolveTargetQuery(.{ + .cpu_model = .{ .explicit = &std.Target.x86.cpu.nehalem }, + .cpu_arch = .x86_64, + .os_tag = .windows, + .os_version_min = getOSVersionMin(.windows), + }); + + const path = b.path("src/install/windows-shim/bun_shim_impl.zig"); + + const exe = b.addExecutable(.{ + .name = "bun_shim_impl", + .root_source_file = path, + .target = target, + .optimize = .ReleaseFast, + .use_llvm = true, + .use_lld = true, + .unwind_tables = false, + .omit_frame_pointer = true, + .strip = true, + .linkage = .static, + .sanitize_thread = false, + .single_threaded = true, + .link_libc = false, + }); + + const dbg = b.addExecutable(.{ + .name = "bun_shim_debug", + .root_source_file = path, + .target = target, + .optimize = .Debug, + .use_llvm = true, + .use_lld = true, + .linkage = .static, + .single_threaded = true, + .link_libc = false, + }); + + return .{ .exe = exe, .dbg = dbg }; + } +}; diff --git a/docs/project/internals/build-process-for-ci.md b/docs/project/internals/build-process-for-ci.md index efc29a68df..51a0157477 100644 --- a/docs/project/internals/build-process-for-ci.md +++ b/docs/project/internals/build-process-for-ci.md @@ -1,7 +1,7 @@ There are four parts to the CI build: - Dependencies: should be cached across builds as much as possible, it depends on git submodule hashes -- Zig Object: depends on \*.zig and potentially src/js +- Zig Object: depends on \*.zig and src/js - C++ Object: depends on \*.cpp and src/js - Linking: depends on the above three @@ -15,7 +15,7 @@ BUN_DEPS_OUT_DIR="/optional/out/dir" bash ./scripts/all-dependencies.sh ## Zig Object -This does not have a dependency on WebKit or any of the dependencies at all. It can be compiled without checking out submodules, but you will need to have bun install run. It can be very easily cross compiled. +This does not have a dependency on WebKit or any of the dependencies at all. It can be compiled without checking out submodules, but you will need to have bun install run. It can be very easily cross compiled. Note that the zig object is always `bun-zig.o`. ```sh BUN_REPO=/path/to/oven-sh/bun @@ -27,9 +27,9 @@ cmake $BUN_REPO \ -DCMAKE_BUILD_TYPE=Release \ -DCPU_TARGET="native" \ -DZIG_TARGET="native" \ - -DBUN_ZIG_OBJ="./bun-zig.o" + -DBUN_ZIG_OBJ_DIR="./build" -ninja ./bun-zig.o +ninja ./build/bun-zig.o # -> bun-zig.o ``` @@ -60,12 +60,12 @@ cmake $BUN_REPO \ -G Ninja \ -DCMAKE_BUILD_TYPE=Release \ -DBUN_LINK_ONLY=1 \ - -DBUN_ZIG_OBJ="/path/to/bun-zig.o" \ + -DBUN_ZIG_OBJ_DIR="/path/to/bun-zig-dir" \ -DBUN_CPP_ARCHIVE="/path/to/bun-cpp-objects.a" ninja -# optiona: +# optional: # -DBUN_DEPS_OUT_DIR=... custom deps dir, use this to cache the built deps between rebuilds # -DWEBKIT_DIR=... same thing, but it's probably fast enough to pull from github releases diff --git a/misctools/compression.zig b/misctools/compression.zig index f7aa3853d4..465eb2d023 100644 --- a/misctools/compression.zig +++ b/misctools/compression.zig @@ -3,7 +3,7 @@ const std = @import("std"); const CompressionFramework = struct { var handle: ?*anyopaque = null; pub fn load() !void { - handle = std.os.darwin.dlopen("libcompression.dylib", 1); + handle = std.posix.darwin.dlopen("libcompression.dylib", 1); if (handle == null) return error.@"failed to load Compression.framework"; @@ -247,7 +247,7 @@ pub fn main() anyerror!void { if (algorithm == null or operation == null) { try std.io.getStdErr().writer().print("to compress: {s} ./file ./out.{{br,gz,lz4,lzfse}}\nto decompress: {s} ./out.{{br,gz,lz4,lzfse}} ./out\n", .{ argv0, argv0 }); - std.os.exit(1); + std.posix.exit(1); } var output_file: std.fs.File = undefined; diff --git a/misctools/http_bench.zig b/misctools/http_bench.zig index 021cfa6a11..b736eab9d3 100644 --- a/misctools/http_bench.zig +++ b/misctools/http_bench.zig @@ -198,7 +198,7 @@ pub fn main() anyerror!void { try channel.buffer.ensureTotalCapacity(args.count); try NetworkThread.init(); - if (args.concurrency > 0) HTTP.AsyncHTTP.max_simultaneous_requests.store(args.concurrency, .Monotonic); + if (args.concurrency > 0) HTTP.AsyncHTTP.max_simultaneous_requests.store(args.concurrency, .monotonic); const Group = struct { response_body: MutableString = undefined, context: HTTP.HTTPChannelContext = undefined, diff --git a/misctools/machbench.zig b/misctools/machbench.zig index 0c2419fb11..d664788eab 100644 --- a/misctools/machbench.zig +++ b/misctools/machbench.zig @@ -126,11 +126,11 @@ pub fn main() anyerror!void { Output.prettyErrorln("For {d} messages and {d} threads:", .{ count, thread_count }); Output.flush(); defer Output.flush(); - const runs = if (std.os.getenv("RUNS")) |run_count| try std.fmt.parseInt(usize, run_count, 10) else 1; + const runs = if (std.posix.getenv("RUNS")) |run_count| try std.fmt.parseInt(usize, run_count, 10) else 1; - if (std.os.getenv("NO_MACH") == null) + if (std.posix.getenv("NO_MACH") == null) try machMain(runs); - if (std.os.getenv("NO_USER") == null) + if (std.posix.getenv("NO_USER") == null) try userMain(runs); } diff --git a/misctools/readlink-getfd.zig b/misctools/readlink-getfd.zig index 93fb93be31..4522fdc640 100644 --- a/misctools/readlink-getfd.zig +++ b/misctools/readlink-getfd.zig @@ -42,11 +42,11 @@ pub fn main() anyerror!void { .loose, ); joined_buf[joined.len] = 0; - const os = std.os; + const os = std.posix; const joined_z: [:0]const u8 = joined_buf[0..joined.len :0]; - const O_PATH = if (@hasDecl(os.O, "PATH")) os.O.PATH else 0; + const O_PATH = if (@hasDecl(bun.O, "PATH")) bun.O.PATH else 0; - var file = std.os.openZ(joined_z, O_PATH | std.os.O.CLOEXEC, 0) catch |err| { + var file = std.posix.openZ(joined_z, O_PATH | bun.O.CLOEXEC, 0) catch |err| { switch (err) { error.NotDir, error.FileNotFound => { Output.prettyError("404 Not Found: \"{s}\"", .{joined_z}); diff --git a/misctools/readlink-realpath.zig b/misctools/readlink-realpath.zig index 1891e30f0b..827db30dc7 100644 --- a/misctools/readlink-realpath.zig +++ b/misctools/readlink-realpath.zig @@ -32,7 +32,7 @@ pub fn main() anyerror!void { var j: usize = 0; while (j < 1000) : (j += 1) { - path = try std.os.realpathZ(to_resolve, &out_buffer); + path = try std.posix.realpathZ(to_resolve, &out_buffer); } Output.print("{s}", .{path}); diff --git a/package.json b/package.json index efac87afd9..90a4fde74d 100644 --- a/package.json +++ b/package.json @@ -44,6 +44,8 @@ "lint": "eslint './**/*.d.ts' --cache", "lint:fix": "eslint './**/*.d.ts' --cache --fix", "test": "node packages/bun-internal-test/src/runner.node.mjs ./build/bun-debug", - "test:release": "node packages/bun-internal-test/src/runner.node.mjs ./build-release/bun" + "test:release": "node packages/bun-internal-test/src/runner.node.mjs ./build-release/bun", + "zig-check": ".cache/zig/zig.exe build check-all --summary new", + "zig": ".cache/zig/zig.exe " } } diff --git a/packages/bun-inspector-protocol/src/protocol/v8/protocol.json b/packages/bun-inspector-protocol/src/protocol/v8/protocol.json index 5f216d661d..d5a7db25e8 100644 --- a/packages/bun-inspector-protocol/src/protocol/v8/protocol.json +++ b/packages/bun-inspector-protocol/src/protocol/v8/protocol.json @@ -1518,7 +1518,10 @@ "id": "EventMetadata", "description": "A key-value pair for additional event information to pass along.", "type": "object", - "properties": [{ "name": "key", "type": "string" }, { "name": "value", "type": "string" }] + "properties": [ + { "name": "key", "type": "string" }, + { "name": "value", "type": "string" } + ] }, { "id": "BackgroundServiceEvent", @@ -1570,7 +1573,10 @@ { "name": "setRecording", "description": "Set the recording state for the service.", - "parameters": [{ "name": "shouldRecord", "type": "boolean" }, { "name": "service", "$ref": "ServiceName" }] + "parameters": [ + { "name": "shouldRecord", "type": "boolean" }, + { "name": "service", "$ref": "ServiceName" } + ] }, { "name": "clearEvents", @@ -1582,7 +1588,10 @@ { "name": "recordingStateChanged", "description": "Called when the recording state for the service has been updated.", - "parameters": [{ "name": "isRecording", "type": "boolean" }, { "name": "service", "$ref": "ServiceName" }] + "parameters": [ + { "name": "isRecording", "type": "boolean" }, + { "name": "service", "$ref": "ServiceName" } + ] }, { "name": "backgroundServiceEventReceived", @@ -2072,7 +2081,10 @@ { "id": "Header", "type": "object", - "properties": [{ "name": "name", "type": "string" }, { "name": "value", "type": "string" }] + "properties": [ + { "name": "name", "type": "string" }, + { "name": "value", "type": "string" } + ] }, { "id": "CachedResponse", @@ -3442,7 +3454,10 @@ { "name": "setStyleSheetText", "description": "Sets the new stylesheet text.", - "parameters": [{ "name": "styleSheetId", "$ref": "StyleSheetId" }, { "name": "text", "type": "string" }], + "parameters": [ + { "name": "styleSheetId", "$ref": "StyleSheetId" }, + { "name": "text", "type": "string" } + ], "returns": [ { "name": "sourceMapURL", @@ -3567,7 +3582,10 @@ }, { "name": "executeSQL", - "parameters": [{ "name": "databaseId", "$ref": "DatabaseId" }, { "name": "query", "type": "string" }], + "parameters": [ + { "name": "databaseId", "$ref": "DatabaseId" }, + { "name": "query", "type": "string" } + ], "returns": [ { "name": "columnNames", "optional": true, "type": "array", "items": { "type": "string" } }, { "name": "values", "optional": true, "type": "array", "items": { "type": "any" } }, @@ -3608,7 +3626,10 @@ { "name": "selectPrompt", "description": "Select a device in response to a DeviceAccess.deviceRequestPrompted event.", - "parameters": [{ "name": "id", "$ref": "RequestId" }, { "name": "deviceId", "$ref": "DeviceId" }] + "parameters": [ + { "name": "id", "$ref": "RequestId" }, + { "name": "deviceId", "$ref": "DeviceId" } + ] }, { "name": "cancelPrompt", @@ -5656,7 +5677,10 @@ }, { "name": "removeDOMStorageItem", - "parameters": [{ "name": "storageId", "$ref": "StorageId" }, { "name": "key", "type": "string" }] + "parameters": [ + { "name": "storageId", "$ref": "StorageId" }, + { "name": "key", "type": "string" } + ] }, { "name": "setDOMStorageItem", @@ -5678,7 +5702,10 @@ }, { "name": "domStorageItemRemoved", - "parameters": [{ "name": "storageId", "$ref": "StorageId" }, { "name": "key", "type": "string" }] + "parameters": [ + { "name": "storageId", "$ref": "StorageId" }, + { "name": "key", "type": "string" } + ] }, { "name": "domStorageItemUpdated", @@ -5748,7 +5775,10 @@ { "id": "MediaFeature", "type": "object", - "properties": [{ "name": "name", "type": "string" }, { "name": "value", "type": "string" }] + "properties": [ + { "name": "name", "type": "string" }, + { "name": "value", "type": "string" } + ] }, { "id": "VirtualTimePolicy", @@ -5762,7 +5792,10 @@ "description": "Used to specify User Agent Cient Hints to emulate. See https://wicg.github.io/ua-client-hints", "experimental": true, "type": "object", - "properties": [{ "name": "brand", "type": "string" }, { "name": "version", "type": "string" }] + "properties": [ + { "name": "brand", "type": "string" }, + { "name": "version", "type": "string" } + ] }, { "id": "UserAgentMetadata", @@ -6120,7 +6153,10 @@ "name": "setSensorOverrideReadings", "description": "Updates the sensor readings reported by a sensor type previously overriden\nby setSensorOverrideEnabled.", "experimental": true, - "parameters": [{ "name": "type", "$ref": "SensorType" }, { "name": "reading", "$ref": "SensorReading" }] + "parameters": [ + { "name": "type", "$ref": "SensorType" }, + { "name": "reading", "$ref": "SensorReading" } + ] }, { "name": "setIdleOverride", @@ -6405,11 +6441,17 @@ { "name": "disable" }, { "name": "selectAccount", - "parameters": [{ "name": "dialogId", "type": "string" }, { "name": "accountIndex", "type": "integer" }] + "parameters": [ + { "name": "dialogId", "type": "string" }, + { "name": "accountIndex", "type": "integer" } + ] }, { "name": "clickDialogButton", - "parameters": [{ "name": "dialogId", "type": "string" }, { "name": "dialogButton", "$ref": "DialogButton" }] + "parameters": [ + { "name": "dialogId", "type": "string" }, + { "name": "dialogButton", "$ref": "DialogButton" } + ] }, { "name": "dismissDialog", @@ -6464,7 +6506,10 @@ "id": "HeaderEntry", "description": "Response HTTP header entry", "type": "object", - "properties": [{ "name": "name", "type": "string" }, { "name": "value", "type": "string" }] + "properties": [ + { "name": "name", "type": "string" }, + { "name": "value", "type": "string" } + ] }, { "id": "AuthChallenge", @@ -8301,19 +8346,28 @@ "id": "PlayerProperty", "description": "Corresponds to kMediaPropertyChange", "type": "object", - "properties": [{ "name": "name", "type": "string" }, { "name": "value", "type": "string" }] + "properties": [ + { "name": "name", "type": "string" }, + { "name": "value", "type": "string" } + ] }, { "id": "PlayerEvent", "description": "Corresponds to kMediaEventTriggered", "type": "object", - "properties": [{ "name": "timestamp", "$ref": "Timestamp" }, { "name": "value", "type": "string" }] + "properties": [ + { "name": "timestamp", "$ref": "Timestamp" }, + { "name": "value", "type": "string" } + ] }, { "id": "PlayerErrorSourceLocation", "description": "Represents logged source line numbers reported in an error.\nNOTE: file and line are from chromium c++ implementation code, not js.", "type": "object", - "properties": [{ "name": "file", "type": "string" }, { "name": "line", "type": "integer" }] + "properties": [ + { "name": "file", "type": "string" }, + { "name": "line", "type": "integer" } + ] }, { "id": "PlayerError", @@ -12357,7 +12411,10 @@ "description": "Pair of issuer origin and number of available (signed, but not used) Trust\nTokens from that issuer.", "experimental": true, "type": "object", - "properties": [{ "name": "issuerOrigin", "type": "string" }, { "name": "count", "type": "number" }] + "properties": [ + { "name": "issuerOrigin", "type": "string" }, + { "name": "count", "type": "number" } + ] }, { "id": "InterestGroupAccessType", @@ -12420,7 +12477,10 @@ "id": "SharedStorageEntry", "description": "Struct for a single key-value pair in an origin's shared storage.", "type": "object", - "properties": [{ "name": "key", "type": "string" }, { "name": "value", "type": "string" }] + "properties": [ + { "name": "key", "type": "string" }, + { "name": "value", "type": "string" } + ] }, { "id": "SharedStorageMetadata", @@ -12436,7 +12496,10 @@ "id": "SharedStorageReportingMetadata", "description": "Pair of reporting metadata details for a candidate URL for `selectURL()`.", "type": "object", - "properties": [{ "name": "eventType", "type": "string" }, { "name": "reportingUrl", "type": "string" }] + "properties": [ + { "name": "eventType", "type": "string" }, + { "name": "reportingUrl", "type": "string" } + ] }, { "id": "SharedStorageUrlWithMetadata", @@ -12568,7 +12631,10 @@ "id": "AttributionReportingAggregationKeysEntry", "experimental": true, "type": "object", - "properties": [{ "name": "key", "type": "string" }, { "name": "value", "$ref": "UnsignedInt128AsBase16" }] + "properties": [ + { "name": "key", "type": "string" }, + { "name": "value", "$ref": "UnsignedInt128AsBase16" } + ] }, { "id": "AttributionReportingEventReportWindows", @@ -12943,7 +13009,10 @@ "name": "getInterestGroupDetails", "description": "Gets details for a named interest group.", "experimental": true, - "parameters": [{ "name": "ownerOrigin", "type": "string" }, { "name": "name", "type": "string" }], + "parameters": [ + { "name": "ownerOrigin", "type": "string" }, + { "name": "name", "type": "string" } + ], "returns": [{ "name": "details", "$ref": "InterestGroupDetails" }] }, { @@ -12986,7 +13055,10 @@ "name": "deleteSharedStorageEntry", "description": "Deletes entry for `key` (if it exists) for a given origin's shared storage.", "experimental": true, - "parameters": [{ "name": "ownerOrigin", "type": "string" }, { "name": "key", "type": "string" }] + "parameters": [ + { "name": "ownerOrigin", "type": "string" }, + { "name": "key", "type": "string" } + ] }, { "name": "clearSharedStorageEntries", @@ -13010,7 +13082,10 @@ "name": "setStorageBucketTracking", "description": "Set tracking for a storage key's buckets.", "experimental": true, - "parameters": [{ "name": "storageKey", "type": "string" }, { "name": "enable", "type": "boolean" }] + "parameters": [ + { "name": "storageKey", "type": "string" }, + { "name": "enable", "type": "boolean" } + ] }, { "name": "deleteStorageBucket", @@ -13456,7 +13531,10 @@ "id": "RemoteLocation", "experimental": true, "type": "object", - "properties": [{ "name": "host", "type": "string" }, { "name": "port", "type": "integer" }] + "properties": [ + { "name": "host", "type": "string" }, + { "name": "port", "type": "integer" } + ] } ], "commands": [ diff --git a/scripts/download-zig.ps1 b/scripts/download-zig.ps1 index d602bb5f97..dd42ea618c 100644 --- a/scripts/download-zig.ps1 +++ b/scripts/download-zig.ps1 @@ -1,11 +1,10 @@ $ErrorActionPreference = "Stop" -$ZigVersion="0.12.0-dev.1828+225fe6ddb" -$ZigVersionShort="0.12.0-dev.1828" +$ZigVersion="0.13.0" $Target="windows" $Arch="x86_64" -$Url = "https://github.com/oven-sh/zig/releases/download/${ZigVersionShort}/zig-${Target}-${Arch}-${ZigVersion}.zip" +$Url = "https://ziglang.org/builds/zig-${Target}-${Arch}-${ZigVersion}.zip" $CacheDir = (mkdir -Force (Join-Path $PSScriptRoot "../.cache")) $TarPath = Join-Path $CacheDir "zig-${ZigVersion}.zip" $OutDir = Join-Path $CacheDir "zig" diff --git a/scripts/download-zig.sh b/scripts/download-zig.sh index 4921dc0cc0..d0156f635e 100755 --- a/scripts/download-zig.sh +++ b/scripts/download-zig.sh @@ -14,8 +14,6 @@ else zig_version=$(grep 'recommended_zig_version = "' "build.zig" | cut -d'"' -f2) fi -zig_version_short=$(echo "$zig_version" | cut -d'+' -f1) - case $(uname -ms) in 'Darwin x86_64') target='macos' @@ -39,7 +37,7 @@ case $(uname -ms) in ;; esac -url="https://github.com/oven-sh/zig/releases/download/${zig_version_short}/zig-${target}-${arch}-${zig_version}.tar.xz" +url="https://ziglang.org/builds/zig-${target}-${arch}-${zig_version}.tar.xz" dest="$(pwd)/.cache/zig-${zig_version}.tar.xz" extract_at="$(pwd)/.cache/zig" @@ -57,7 +55,7 @@ update_repo_if_needed() { zig_version_previous=$(grep 'recommended_zig_version = "' "build.zig" | cut -d'"' -f2) for file in ${files[@]}; do - sed -i '' 's/'"${zig_version_previous}"'/'"${zig_version}"'/g' "$file" + sed -i 's/'"${zig_version_previous}"'/'"${zig_version}"'/g' "$file" done printf "Zig was updated to ${zig_version}. Please commit new files." diff --git a/scripts/download-zls.ps1 b/scripts/download-zls.ps1 index 1daf10381e..18b693ea24 100644 --- a/scripts/download-zls.ps1 +++ b/scripts/download-zls.ps1 @@ -2,6 +2,6 @@ push-location .cache try { git clone https://github.com/zigtools/zls set-location zls - git checkout a6786e1c324d773f9315f44c0ad976ef192d5493 + git checkout a26718049a8657d4da04c331aeced1697bc7652b ..\zig\zig.exe build -Doptimize=ReleaseFast } finally { Pop-Location } diff --git a/src/Global.zig b/src/Global.zig index 0ec22d9b81..56c0dd36cb 100644 --- a/src/Global.zig +++ b/src/Global.zig @@ -76,7 +76,7 @@ extern "kernel32" fn SetThreadDescription(thread: std.os.windows.HANDLE, name: [ pub fn setThreadName(name: [:0]const u8) void { if (Environment.isLinux) { - _ = std.os.prctl(.SET_NAME, .{@intFromPtr(name.ptr)}) catch {}; + _ = std.posix.prctl(.SET_NAME, .{@intFromPtr(name.ptr)}) catch 0; } else if (Environment.isMac) { _ = std.c.pthread_setname_np(name); } else if (Environment.isWindows) { @@ -111,11 +111,11 @@ export fn bun_is_exiting() c_int { return @intFromBool(isExiting()); } pub fn isExiting() bool { - return is_exiting.load(.Monotonic); + return is_exiting.load(.monotonic); } pub fn exitWide(code: u32) noreturn { - is_exiting.store(true, .Monotonic); + is_exiting.store(true, .monotonic); if (comptime Environment.isMac) { std.c.exit(@bitCast(code)); @@ -131,13 +131,13 @@ pub fn raiseIgnoringPanicHandler(sig: anytype) noreturn { Output.flush(); if (!Environment.isWindows) { - if (sig >= 1 and sig != std.os.SIG.STOP and sig != std.os.SIG.KILL) { - const act = std.os.Sigaction{ - .handler = .{ .sigaction = @ptrCast(@alignCast(std.os.SIG.DFL)) }, - .mask = std.os.empty_sigset, + if (sig >= 1 and sig != std.posix.SIG.STOP and sig != std.posix.SIG.KILL) { + const act = std.posix.Sigaction{ + .handler = .{ .sigaction = @ptrCast(@alignCast(std.posix.SIG.DFL)) }, + .mask = std.posix.empty_sigset, .flags = 0, }; - std.os.sigaction(@intCast(sig), &act, null) catch {}; + std.posix.sigaction(@intCast(sig), &act, null) catch {}; } } diff --git a/src/Progress.zig b/src/Progress.zig new file mode 100644 index 0000000000..da05df7f24 --- /dev/null +++ b/src/Progress.zig @@ -0,0 +1,455 @@ +//! This is a snapshot of the Zig std.Progress API before it's rewrite in 0.13 +//! We use this API for the progress in Bun install and some other places. +//! +//! TODO: It would be worth considering using our own progress indicator for +//! Bun install, as this bar only shows the most recent action. +//! +//! https://github.com/ziglang/zig/blob/0.12.0/lib/std/Progress.zig +//! +//! This API is non-allocating, non-fallible, and thread-safe. +//! The tradeoff is that users of this API must provide the storage +//! for each `Progress.Node`. +//! +//! Initialize the struct directly, overriding these fields as desired: +//! * `refresh_rate_ms` +//! * `initial_delay_ms` + +const std = @import("std"); +const builtin = @import("builtin"); +const windows = std.os.windows; +const testing = std.testing; +const assert = std.debug.assert; +const Progress = @This(); + +/// `null` if the current node (and its children) should +/// not print on update() +terminal: ?std.fs.File = undefined, + +/// Is this a windows API terminal (note: this is not the same as being run on windows +/// because other terminals exist like MSYS/git-bash) +is_windows_terminal: bool = false, + +/// Whether the terminal supports ANSI escape codes. +supports_ansi_escape_codes: bool = false, + +/// If the terminal is "dumb", don't print output. +/// This can be useful if you don't want to print all +/// the stages of code generation if there are a lot. +/// You should not use it if the user should see output +/// for example showing the user what tests run. +dont_print_on_dumb: bool = false, + +root: Node = undefined, + +/// Keeps track of how much time has passed since the beginning. +/// Used to compare with `initial_delay_ms` and `refresh_rate_ms`. +timer: ?std.time.Timer = null, + +/// When the previous refresh was written to the terminal. +/// Used to compare with `refresh_rate_ms`. +prev_refresh_timestamp: u64 = undefined, + +/// This buffer represents the maximum number of bytes written to the terminal +/// with each refresh. +output_buffer: [100]u8 = undefined, + +/// How many nanoseconds between writing updates to the terminal. +refresh_rate_ns: u64 = 50 * std.time.ns_per_ms, + +/// How many nanoseconds to keep the output hidden +initial_delay_ns: u64 = 500 * std.time.ns_per_ms, + +done: bool = true, + +/// Protects the `refresh` function, as well as `node.recently_updated_child`. +/// Without this, callsites would call `Node.end` and then free `Node` memory +/// while it was still being accessed by the `refresh` function. +update_mutex: std.Thread.Mutex = .{}, + +/// Keeps track of how many columns in the terminal have been output, so that +/// we can move the cursor back later. +columns_written: usize = undefined, + +/// Represents one unit of progress. Each node can have children nodes, or +/// one can use integers with `update`. +pub const Node = struct { + context: *Progress, + parent: ?*Node, + name: []const u8, + unit: []const u8 = "", + /// Must be handled atomically to be thread-safe. + recently_updated_child: ?*Node = null, + /// Must be handled atomically to be thread-safe. 0 means null. + unprotected_estimated_total_items: usize, + /// Must be handled atomically to be thread-safe. + unprotected_completed_items: usize, + + /// Create a new child progress node. Thread-safe. + /// Call `Node.end` when done. + /// TODO solve https://github.com/ziglang/zig/issues/2765 and then change this + /// API to set `self.parent.recently_updated_child` with the return value. + /// Until that is fixed you probably want to call `activate` on the return value. + /// Passing 0 for `estimated_total_items` means unknown. + pub fn start(self: *Node, name: []const u8, estimated_total_items: usize) Node { + return Node{ + .context = self.context, + .parent = self, + .name = name, + .unprotected_estimated_total_items = estimated_total_items, + .unprotected_completed_items = 0, + }; + } + + /// This is the same as calling `start` and then `end` on the returned `Node`. Thread-safe. + pub fn completeOne(self: *Node) void { + if (self.parent) |parent| { + @atomicStore(?*Node, &parent.recently_updated_child, self, .release); + } + _ = @atomicRmw(usize, &self.unprotected_completed_items, .Add, 1, .monotonic); + self.context.maybeRefresh(); + } + + /// Finish a started `Node`. Thread-safe. + pub fn end(self: *Node) void { + self.context.maybeRefresh(); + if (self.parent) |parent| { + { + self.context.update_mutex.lock(); + defer self.context.update_mutex.unlock(); + _ = @cmpxchgStrong(?*Node, &parent.recently_updated_child, self, null, .monotonic, .monotonic); + } + parent.completeOne(); + } else { + self.context.update_mutex.lock(); + defer self.context.update_mutex.unlock(); + self.context.done = true; + self.context.refreshWithHeldLock(); + } + } + + /// Tell the parent node that this node is actively being worked on. Thread-safe. + pub fn activate(self: *Node) void { + if (self.parent) |parent| { + @atomicStore(?*Node, &parent.recently_updated_child, self, .release); + self.context.maybeRefresh(); + } + } + + /// Thread-safe. + pub fn setName(self: *Node, name: []const u8) void { + const progress = self.context; + progress.update_mutex.lock(); + defer progress.update_mutex.unlock(); + self.name = name; + if (self.parent) |parent| { + @atomicStore(?*Node, &parent.recently_updated_child, self, .release); + if (parent.parent) |grand_parent| { + @atomicStore(?*Node, &grand_parent.recently_updated_child, parent, .release); + } + if (progress.timer) |*timer| progress.maybeRefreshWithHeldLock(timer); + } + } + + /// Thread-safe. + pub fn setUnit(self: *Node, unit: []const u8) void { + const progress = self.context; + progress.update_mutex.lock(); + defer progress.update_mutex.unlock(); + self.unit = unit; + if (self.parent) |parent| { + @atomicStore(?*Node, &parent.recently_updated_child, self, .release); + if (parent.parent) |grand_parent| { + @atomicStore(?*Node, &grand_parent.recently_updated_child, parent, .release); + } + if (progress.timer) |*timer| progress.maybeRefreshWithHeldLock(timer); + } + } + + /// Thread-safe. 0 means unknown. + pub fn setEstimatedTotalItems(self: *Node, count: usize) void { + @atomicStore(usize, &self.unprotected_estimated_total_items, count, .monotonic); + } + + /// Thread-safe. + pub fn setCompletedItems(self: *Node, completed_items: usize) void { + @atomicStore(usize, &self.unprotected_completed_items, completed_items, .monotonic); + } +}; + +/// Create a new progress node. +/// Call `Node.end` when done. +/// TODO solve https://github.com/ziglang/zig/issues/2765 and then change this +/// API to return Progress rather than accept it as a parameter. +/// `estimated_total_items` value of 0 means unknown. +pub fn start(self: *Progress, name: []const u8, estimated_total_items: usize) *Node { + const stderr = std.io.getStdErr(); + self.terminal = null; + if (stderr.supportsAnsiEscapeCodes()) { + self.terminal = stderr; + self.supports_ansi_escape_codes = true; + } else if (builtin.os.tag == .windows and stderr.isTty()) { + self.is_windows_terminal = true; + self.terminal = stderr; + } else if (builtin.os.tag != .windows) { + // we are in a "dumb" terminal like in acme or writing to a file + self.terminal = stderr; + } + self.root = Node{ + .context = self, + .parent = null, + .name = name, + .unprotected_estimated_total_items = estimated_total_items, + .unprotected_completed_items = 0, + }; + self.columns_written = 0; + self.prev_refresh_timestamp = 0; + self.timer = std.time.Timer.start() catch null; + self.done = false; + return &self.root; +} + +/// Updates the terminal if enough time has passed since last update. Thread-safe. +pub fn maybeRefresh(self: *Progress) void { + if (self.timer) |*timer| { + if (!self.update_mutex.tryLock()) return; + defer self.update_mutex.unlock(); + maybeRefreshWithHeldLock(self, timer); + } +} + +fn maybeRefreshWithHeldLock(self: *Progress, timer: *std.time.Timer) void { + const now = timer.read(); + if (now < self.initial_delay_ns) return; + // TODO I have observed this to happen sometimes. I think we need to follow Rust's + // lead and guarantee monotonically increasing times in the std lib itself. + if (now < self.prev_refresh_timestamp) return; + if (now - self.prev_refresh_timestamp < self.refresh_rate_ns) return; + return self.refreshWithHeldLock(); +} + +/// Updates the terminal and resets `self.next_refresh_timestamp`. Thread-safe. +pub fn refresh(self: *Progress) void { + if (!self.update_mutex.tryLock()) return; + defer self.update_mutex.unlock(); + + return self.refreshWithHeldLock(); +} + +fn clearWithHeldLock(p: *Progress, end_ptr: *usize) void { + const file = p.terminal orelse return; + var end = end_ptr.*; + if (p.columns_written > 0) { + // restore the cursor position by moving the cursor + // `columns_written` cells to the left, then clear the rest of the + // line + if (p.supports_ansi_escape_codes) { + end += (std.fmt.bufPrint(p.output_buffer[end..], "\x1b[{d}D", .{p.columns_written}) catch unreachable).len; + end += (std.fmt.bufPrint(p.output_buffer[end..], "\x1b[0K", .{}) catch unreachable).len; + } else if (builtin.os.tag == .windows) winapi: { + std.debug.assert(p.is_windows_terminal); + + var info: windows.CONSOLE_SCREEN_BUFFER_INFO = undefined; + if (windows.kernel32.GetConsoleScreenBufferInfo(file.handle, &info) != windows.TRUE) { + // stop trying to write to this file + p.terminal = null; + break :winapi; + } + + var cursor_pos = windows.COORD{ + .X = info.dwCursorPosition.X - @as(windows.SHORT, @intCast(p.columns_written)), + .Y = info.dwCursorPosition.Y, + }; + + if (cursor_pos.X < 0) + cursor_pos.X = 0; + + const fill_chars = @as(windows.DWORD, @intCast(info.dwSize.X - cursor_pos.X)); + + var written: windows.DWORD = undefined; + if (windows.kernel32.FillConsoleOutputAttribute( + file.handle, + info.wAttributes, + fill_chars, + cursor_pos, + &written, + ) != windows.TRUE) { + // stop trying to write to this file + p.terminal = null; + break :winapi; + } + if (windows.kernel32.FillConsoleOutputCharacterW( + file.handle, + ' ', + fill_chars, + cursor_pos, + &written, + ) != windows.TRUE) { + // stop trying to write to this file + p.terminal = null; + break :winapi; + } + if (windows.kernel32.SetConsoleCursorPosition(file.handle, cursor_pos) != windows.TRUE) { + // stop trying to write to this file + p.terminal = null; + break :winapi; + } + } else { + // we are in a "dumb" terminal like in acme or writing to a file + p.output_buffer[end] = '\n'; + end += 1; + } + + p.columns_written = 0; + } + end_ptr.* = end; +} + +fn refreshWithHeldLock(self: *Progress) void { + const is_dumb = !self.supports_ansi_escape_codes and !self.is_windows_terminal; + if (is_dumb and self.dont_print_on_dumb) return; + + const file = self.terminal orelse return; + + var end: usize = 0; + clearWithHeldLock(self, &end); + + if (!self.done) { + var need_ellipse = false; + var maybe_node: ?*Node = &self.root; + while (maybe_node) |node| { + if (need_ellipse) { + self.bufWrite(&end, "... ", .{}); + } + need_ellipse = false; + const eti = @atomicLoad(usize, &node.unprotected_estimated_total_items, .monotonic); + const completed_items = @atomicLoad(usize, &node.unprotected_completed_items, .monotonic); + const current_item = completed_items + 1; + if (node.name.len != 0 or eti > 0) { + if (node.name.len != 0) { + self.bufWrite(&end, "{s}", .{node.name}); + need_ellipse = true; + } + if (eti > 0) { + if (need_ellipse) self.bufWrite(&end, " ", .{}); + self.bufWrite(&end, "[{d}/{d}{s}] ", .{ current_item, eti, node.unit }); + need_ellipse = false; + } else if (completed_items != 0) { + if (need_ellipse) self.bufWrite(&end, " ", .{}); + self.bufWrite(&end, "[{d}{s}] ", .{ current_item, node.unit }); + need_ellipse = false; + } + } + maybe_node = @atomicLoad(?*Node, &node.recently_updated_child, .acquire); + } + if (need_ellipse) { + self.bufWrite(&end, "... ", .{}); + } + } + + _ = file.write(self.output_buffer[0..end]) catch { + // stop trying to write to this file + self.terminal = null; + }; + if (self.timer) |*timer| { + self.prev_refresh_timestamp = timer.read(); + } +} + +pub fn log(self: *Progress, comptime format: []const u8, args: anytype) void { + const file = self.terminal orelse { + std.debug.print(format, args); + return; + }; + self.refresh(); + file.writer().print(format, args) catch { + self.terminal = null; + return; + }; + self.columns_written = 0; +} + +/// Allows the caller to freely write to stderr until unlock_stderr() is called. +/// During the lock, the progress information is cleared from the terminal. +pub fn lock_stderr(p: *Progress) void { + p.update_mutex.lock(); + if (p.terminal) |file| { + var end: usize = 0; + clearWithHeldLock(p, &end); + _ = file.write(p.output_buffer[0..end]) catch { + // stop trying to write to this file + p.terminal = null; + }; + } + std.debug.getStderrMutex().lock(); +} + +pub fn unlock_stderr(p: *Progress) void { + std.debug.getStderrMutex().unlock(); + p.update_mutex.unlock(); +} + +fn bufWrite(self: *Progress, end: *usize, comptime format: []const u8, args: anytype) void { + if (std.fmt.bufPrint(self.output_buffer[end.*..], format, args)) |written| { + const amt = written.len; + end.* += amt; + self.columns_written += amt; + } else |err| switch (err) { + error.NoSpaceLeft => { + self.columns_written += self.output_buffer.len - end.*; + end.* = self.output_buffer.len; + const suffix = "... "; + @memcpy(self.output_buffer[self.output_buffer.len - suffix.len ..], suffix); + }, + } +} + +test "basic functionality" { + var disable = true; + _ = &disable; + if (disable) { + // This test is disabled because it uses time.sleep() and is therefore slow. It also + // prints bogus progress data to stderr. + return error.SkipZigTest; + } + var progress = Progress{}; + const root_node = progress.start("", 100); + defer root_node.end(); + + const speed_factor = std.time.ns_per_ms; + + const sub_task_names = [_][]const u8{ + "reticulating splines", + "adjusting shoes", + "climbing towers", + "pouring juice", + }; + var next_sub_task: usize = 0; + + var i: usize = 0; + while (i < 100) : (i += 1) { + var node = root_node.start(sub_task_names[next_sub_task], 5); + node.activate(); + next_sub_task = (next_sub_task + 1) % sub_task_names.len; + + node.completeOne(); + std.time.sleep(5 * speed_factor); + node.completeOne(); + node.completeOne(); + std.time.sleep(5 * speed_factor); + node.completeOne(); + node.completeOne(); + std.time.sleep(5 * speed_factor); + + node.end(); + + std.time.sleep(5 * speed_factor); + } + { + var node = root_node.start("this is a really long name designed to activate the truncation code. let's find out if it works", 0); + node.activate(); + std.time.sleep(10 * speed_factor); + progress.refresh(); + std.time.sleep(10 * speed_factor); + node.end(); + } +} diff --git a/src/StandaloneModuleGraph.zig b/src/StandaloneModuleGraph.zig index 6debf68af0..a7441cb757 100644 --- a/src/StandaloneModuleGraph.zig +++ b/src/StandaloneModuleGraph.zig @@ -313,7 +313,7 @@ pub const StandaloneModuleGraph = struct { // if we're on a mac, use clonefile() if we can // failure is okay, clonefile is just a fast path. if (Syscall.clonefile(self_exe, zname) == .result) { - switch (Syscall.open(zname, std.os.O.RDWR | std.os.O.CLOEXEC, 0)) { + switch (Syscall.open(zname, bun.O.RDWR | bun.O.CLOEXEC, 0)) { .result => |res| break :brk res, .err => {}, } @@ -325,7 +325,7 @@ pub const StandaloneModuleGraph = struct { const fd = brk2: { var tried_changing_abs_dir = false; for (0..3) |retry| { - switch (Syscall.open(zname, std.os.O.CLOEXEC | std.os.O.RDWR | std.os.O.CREAT, 0)) { + switch (Syscall.open(zname, bun.O.CLOEXEC | bun.O.RDWR | bun.O.CREAT, 0)) { .result => |res| break :brk2 res, .err => |err| { if (retry < 2) { @@ -366,7 +366,7 @@ pub const StandaloneModuleGraph = struct { }; const self_fd = brk2: { for (0..3) |retry| { - switch (Syscall.open(self_exe, std.os.O.CLOEXEC | std.os.O.RDONLY, 0)) { + switch (Syscall.open(self_exe, bun.O.CLOEXEC | bun.O.RDONLY, 0)) { .result => |res| break :brk2 res, .err => |err| { if (retry < 2) { @@ -540,7 +540,7 @@ pub const StandaloneModuleGraph = struct { if (comptime Environment.isMac) { if (target.os == .mac) { - var signer = std.ChildProcess.init( + var signer = std.process.Child.init( &.{ "codesign", "--remove-signature", @@ -564,9 +564,9 @@ pub const StandaloneModuleGraph = struct { bun.C.moveFileZWithHandle( fd, bun.FD.cwd(), - bun.sliceTo(&(try std.os.toPosixPath(temp_location)), 0), + bun.sliceTo(&(try std.posix.toPosixPath(temp_location)), 0), bun.toFD(root_dir.fd), - bun.sliceTo(&(try std.os.toPosixPath(std.fs.path.basename(outfile))), 0), + bun.sliceTo(&(try std.posix.toPosixPath(std.fs.path.basename(outfile))), 0), ) catch |err| { if (err == error.IsDir) { Output.prettyErrorln("error: {} is a directory. Please choose a different --outfile or delete the directory", .{bun.fmt.quote(outfile)}); @@ -574,7 +574,7 @@ pub const StandaloneModuleGraph = struct { Output.prettyErrorln("error: failed to rename {s} to {s}: {s}", .{ temp_location, outfile, @errorName(err) }); } _ = Syscall.unlink( - &(try std.os.toPosixPath(temp_location)), + &(try std.posix.toPosixPath(temp_location)), ); Global.exit(1); @@ -587,7 +587,7 @@ pub const StandaloneModuleGraph = struct { defer _ = Syscall.close(self_exe); var trailer_bytes: [4096]u8 = undefined; - std.os.lseek_END(self_exe.cast(), -4096) catch return null; + std.posix.lseek_END(self_exe.cast(), -4096) catch return null; var read_amount: usize = 0; while (read_amount < trailer_bytes.len) { @@ -640,7 +640,7 @@ pub const StandaloneModuleGraph = struct { // if you have not a ton of code, we only do a single read() call if (Environment.allow_assert or offsets.byte_count > 1024 * 3) { const offset_from_end = trailer_bytes.len - (@intFromPtr(end) - @intFromPtr(@as([]u8, &trailer_bytes).ptr)); - std.os.lseek_END(self_exe.cast(), -@as(i64, @intCast(offset_from_end + offsets.byte_count))) catch return null; + std.posix.lseek_END(self_exe.cast(), -@as(i64, @intCast(offset_from_end + offsets.byte_count))) catch return null; if (comptime Environment.allow_assert) { // actually we just want to verify this logic is correct in development @@ -750,7 +750,7 @@ pub const StandaloneModuleGraph = struct { }, .windows => { const image_path_unicode_string = std.os.windows.peb().ProcessParameters.ImagePathName; - const image_path = image_path_unicode_string.Buffer[0 .. image_path_unicode_string.Length / 2]; + const image_path = image_path_unicode_string.Buffer.?[0 .. image_path_unicode_string.Length / 2]; var nt_path_buf: bun.WPathBuffer = undefined; const nt_path = bun.strings.addNTPathPrefix(&nt_path_buf, image_path); diff --git a/src/StaticHashMap.zig b/src/StaticHashMap.zig index 8cf093de64..2e8dd64949 100644 --- a/src/StaticHashMap.zig +++ b/src/StaticHashMap.zig @@ -235,11 +235,11 @@ fn HashMapMixin( } } - pub fn get(self: *Self, key: K) ?V { + pub fn get(self: *const Self, key: K) ?V { return self.getContext(key, undefined); } - pub fn getContext(self: *Self, key: K, ctx: Context) ?V { + pub fn getContext(self: *const Self, key: K, ctx: Context) ?V { const hash = ctx.hash(key); assert(hash != Self.empty_hash); @@ -254,11 +254,11 @@ fn HashMapMixin( } } - pub fn has(self: *Self, key: K) bool { + pub fn has(self: *const Self, key: K) bool { return self.hasContext(key, undefined); } - pub fn hasWithHash(self: *Self, key_hash: u64) bool { + pub fn hasWithHash(self: *const Self, key_hash: u64) bool { assert(key_hash != Self.empty_hash); for (self.entries[key_hash >> self.shift ..]) |entry| { @@ -270,7 +270,7 @@ fn HashMapMixin( return false; } - pub fn hasContext(self: *Self, key: K, ctx: Context) bool { + pub fn hasContext(self: *const Self, key: K, ctx: Context) bool { const hash = ctx.hash(key); assert(hash != Self.empty_hash); diff --git a/src/allocators.zig b/src/allocators.zig index bc279c27a2..31bc747fa6 100644 --- a/src/allocators.zig +++ b/src/allocators.zig @@ -188,7 +188,7 @@ pub fn BSSList(comptime ValueType: type, comptime _count: anytype) type { prev: ?*OverflowBlock = null, pub fn append(this: *OverflowBlock, item: ValueType) !*ValueType { - const index = this.used.fetchAdd(1, .AcqRel); + const index = this.used.fetchAdd(1, .acq_rel); if (index >= ChunkSize) return error.OutOfMemory; this.data[index] = item; return &this.data[index]; diff --git a/src/analytics/analytics_thread.zig b/src/analytics/analytics_thread.zig index 3d6ad1099d..dc0ec59737 100644 --- a/src/analytics/analytics_thread.zig +++ b/src/analytics/analytics_thread.zig @@ -186,7 +186,7 @@ pub fn validateFeatureName(name: []const u8) void { pub const packed_features_list = brk: { const decls = std.meta.declarations(Features); - var names: [decls.len][]const u8 = undefined; + var names: [decls.len][:0]const u8 = undefined; var i = 0; for (decls) |decl| { if (@TypeOf(@field(Features, decl.name)) == usize) { @@ -195,12 +195,12 @@ pub const packed_features_list = brk: { i += 1; } } - break :brk names[0..i]; + break :brk names[0..i].*; }; pub const PackedFeatures = @Type(.{ .Struct = .{ - .layout = .Packed, + .layout = .@"packed", .backing_integer = u64, .fields = brk: { var fields: [64]std.builtin.Type.StructField = undefined; diff --git a/src/async/posix_event_loop.zig b/src/async/posix_event_loop.zig index f531940a69..aa42d1848c 100644 --- a/src/async/posix_event_loop.zig +++ b/src/async/posix_event_loop.zig @@ -85,7 +85,7 @@ pub const KeepAlive = struct { if (this.status != .active) return; this.status = .inactive; - _ = @atomicRmw(@TypeOf(vm.pending_unref_counter), &vm.pending_unref_counter, .Add, 1, .Monotonic); + _ = @atomicRmw(@TypeOf(vm.pending_unref_counter), &vm.pending_unref_counter, .Add, 1, .monotonic); } /// Allow a poll to keep the process alive. @@ -225,7 +225,7 @@ pub const FilePoll = struct { return .pipe; } - pub fn onKQueueEvent(poll: *FilePoll, _: *Loop, kqueue_event: *const std.os.system.kevent64_s) void { + pub fn onKQueueEvent(poll: *FilePoll, _: *Loop, kqueue_event: *const std.posix.system.kevent64_s) void { poll.updateFlags(Flags.fromKQueueEvent(kqueue_event.*)); log("onKQueueEvent: {}", .{poll}); @@ -499,21 +499,21 @@ pub const FilePoll = struct { } } - pub fn fromKQueueEvent(kqueue_event: std.os.system.kevent64_s) Flags.Set { + pub fn fromKQueueEvent(kqueue_event: std.posix.system.kevent64_s) Flags.Set { var flags = Flags.Set{}; - if (kqueue_event.filter == std.os.system.EVFILT_READ) { + if (kqueue_event.filter == std.posix.system.EVFILT_READ) { flags.insert(Flags.readable); - if (kqueue_event.flags & std.os.system.EV_EOF != 0) { + if (kqueue_event.flags & std.posix.system.EV_EOF != 0) { flags.insert(Flags.hup); } - } else if (kqueue_event.filter == std.os.system.EVFILT_WRITE) { + } else if (kqueue_event.filter == std.posix.system.EVFILT_WRITE) { flags.insert(Flags.writable); - if (kqueue_event.flags & std.os.system.EV_EOF != 0) { + if (kqueue_event.flags & std.posix.system.EV_EOF != 0) { flags.insert(Flags.hup); } - } else if (kqueue_event.filter == std.os.system.EVFILT_PROC) { + } else if (kqueue_event.filter == std.posix.system.EVFILT_PROC) { flags.insert(Flags.process); - } else if (kqueue_event.filter == std.os.system.EVFILT_MACHPORT) { + } else if (kqueue_event.filter == std.posix.system.EVFILT_MACHPORT) { flags.insert(Flags.machport); } return flags; @@ -783,7 +783,7 @@ pub const FilePoll = struct { @export(onTick, .{ .name = "Bun__internal_dispatch_ready_poll" }); } - const timeout = std.mem.zeroes(std.os.timespec); + const timeout = std.mem.zeroes(std.posix.timespec); const kevent = std.c.kevent; const linux = std.os.linux; @@ -831,7 +831,7 @@ pub const FilePoll = struct { return errno; } } else if (comptime Environment.isMac) { - var changelist = std.mem.zeroes([2]std.os.system.kevent64_s); + var changelist = std.mem.zeroes([2]std.posix.system.kevent64_s); const one_shot_flag: u16 = if (!this.flags.contains(.one_shot)) 0 else if (one_shot == .dispatch) @@ -842,7 +842,7 @@ pub const FilePoll = struct { changelist[0] = switch (flag) { .readable => .{ .ident = @intCast(fd.cast()), - .filter = std.os.system.EVFILT_READ, + .filter = std.posix.system.EVFILT_READ, .data = 0, .fflags = 0, .udata = @intFromPtr(Pollable.init(this).ptr()), @@ -851,7 +851,7 @@ pub const FilePoll = struct { }, .writable => .{ .ident = @intCast(fd.cast()), - .filter = std.os.system.EVFILT_WRITE, + .filter = std.posix.system.EVFILT_WRITE, .data = 0, .fflags = 0, .udata = @intFromPtr(Pollable.init(this).ptr()), @@ -860,7 +860,7 @@ pub const FilePoll = struct { }, .process => .{ .ident = @intCast(fd.cast()), - .filter = std.os.system.EVFILT_PROC, + .filter = std.posix.system.EVFILT_PROC, .data = 0, .fflags = std.c.NOTE_EXIT, .udata = @intFromPtr(Pollable.init(this).ptr()), @@ -869,7 +869,7 @@ pub const FilePoll = struct { }, .machport => .{ .ident = @intCast(fd.cast()), - .filter = std.os.system.EVFILT_MACHPORT, + .filter = std.posix.system.EVFILT_MACHPORT, .data = 0, .fflags = 0, .udata = @intFromPtr(Pollable.init(this).ptr()), @@ -887,7 +887,7 @@ pub const FilePoll = struct { // limit expires, then kevent() returns 0. const rc = rc: { while (true) { - const rc = std.os.system.kevent64( + const rc = std.posix.system.kevent64( watcher_fd, &changelist, 1, @@ -900,7 +900,7 @@ pub const FilePoll = struct { &timeout, ); - if (std.c.getErrno(rc) == .INTR) continue; + if (bun.C.getErrno(rc) == .INTR) continue; break :rc rc; } }; @@ -917,7 +917,7 @@ pub const FilePoll = struct { // indicate the error condition. } - const errno = std.c.getErrno(rc); + const errno = bun.C.getErrno(rc); if (errno != .SUCCESS) { this.deactivate(loop); @@ -1001,12 +1001,12 @@ pub const FilePoll = struct { return errno; } } else if (comptime Environment.isMac) { - var changelist = std.mem.zeroes([2]std.os.system.kevent64_s); + var changelist = std.mem.zeroes([2]std.posix.system.kevent64_s); changelist[0] = switch (flag) { .readable => .{ .ident = @intCast(fd.cast()), - .filter = std.os.system.EVFILT_READ, + .filter = std.posix.system.EVFILT_READ, .data = 0, .fflags = 0, .udata = @intFromPtr(Pollable.init(this).ptr()), @@ -1015,7 +1015,7 @@ pub const FilePoll = struct { }, .machport => .{ .ident = @intCast(fd.cast()), - .filter = std.os.system.EVFILT_MACHPORT, + .filter = std.posix.system.EVFILT_MACHPORT, .data = 0, .fflags = 0, .udata = @intFromPtr(Pollable.init(this).ptr()), @@ -1024,7 +1024,7 @@ pub const FilePoll = struct { }, .writable => .{ .ident = @intCast(fd.cast()), - .filter = std.os.system.EVFILT_WRITE, + .filter = std.posix.system.EVFILT_WRITE, .data = 0, .fflags = 0, .udata = @intFromPtr(Pollable.init(this).ptr()), @@ -1033,7 +1033,7 @@ pub const FilePoll = struct { }, .process => .{ .ident = @intCast(fd.cast()), - .filter = std.os.system.EVFILT_PROC, + .filter = std.posix.system.EVFILT_PROC, .data = 0, .fflags = std.c.NOTE_EXIT, .udata = @intFromPtr(Pollable.init(this).ptr()), @@ -1049,7 +1049,7 @@ pub const FilePoll = struct { // The kevent() system call returns the number of events placed in // the eventlist, up to the value given by nevents. If the time // limit expires, then kevent() returns 0. - const rc = std.os.system.kevent64( + const rc = std.posix.system.kevent64( watcher_fd, &changelist, 1, @@ -1069,7 +1069,7 @@ pub const FilePoll = struct { // indicate the error condition. } - const errno = std.c.getErrno(rc); + const errno = bun.C.getErrno(rc); switch (rc) { std.math.minInt(@TypeOf(rc))...-1 => return JSC.Maybe(void).errnoSys(@intFromEnum(errno), .kevent).?, else => {}, @@ -1109,7 +1109,7 @@ pub const Closer = struct { } fn onClose(task: *JSC.WorkPoolTask) void { - const closer = @fieldParentPtr(Closer, "task", task); + const closer: *Closer = @fieldParentPtr("task", task); defer closer.destroy(); _ = bun.sys.close(closer.fd); } diff --git a/src/async/windows_event_loop.zig b/src/async/windows_event_loop.zig index 09262e2895..24a2d7647c 100644 --- a/src/async/windows_event_loop.zig +++ b/src/async/windows_event_loop.zig @@ -405,7 +405,7 @@ pub const Closer = struct { } fn onClose(req: *uv.fs_t) callconv(.C) void { - var closer = @fieldParentPtr(Closer, "io_request", req); + var closer: *Closer = @fieldParentPtr("io_request", req); bun.assert(closer == @as(*Closer, @alignCast(@ptrCast(req.data.?)))); bun.sys.syslog("uv_fs_close({}) = {}", .{ bun.toFD(req.file.fd), req.result }); diff --git a/src/boringssl.zig b/src/boringssl.zig index 351706bf20..3f3877ef36 100644 --- a/src/boringssl.zig +++ b/src/boringssl.zig @@ -90,10 +90,10 @@ pub fn canonicalizeIP(addr_str: []const u8, outIP: *[INET6_ADDRSTRLEN + 1]u8) ?[ bun.copy(u8, outIP, addr_str); outIP[addr_str.len] = 0; - var af: c_int = std.os.AF.INET; + var af: c_int = std.posix.AF.INET; // get the standard text representation of the IP if (c_ares.ares_inet_pton(af, outIP, &ip_std_text) <= 0) { - af = std.os.AF.INET6; + af = std.posix.AF.INET6; if (c_ares.ares_inet_pton(af, outIP, &ip_std_text) <= 0) { return null; } @@ -110,7 +110,7 @@ pub fn canonicalizeIP(addr_str: []const u8, outIP: *[INET6_ADDRSTRLEN + 1]u8) ?[ /// converts ASN1_OCTET_STRING to canonicalized IP string /// return null when the IP is invalid pub fn ip2String(ip: *boring.ASN1_OCTET_STRING, outIP: *[INET6_ADDRSTRLEN + 1]u8) ?[]const u8 { - const af: c_int = if (ip.length == 4) std.os.AF.INET else std.os.AF.INET6; + const af: c_int = if (ip.length == 4) std.posix.AF.INET else std.posix.AF.INET6; if (c_ares.ares_inet_ntop(af, ip.data, outIP, outIP.len) == null) { return null; } diff --git a/src/bun.js/ConsoleObject.zig b/src/bun.js/ConsoleObject.zig index f8febb9b71..81918f916f 100644 --- a/src/bun.js/ConsoleObject.zig +++ b/src/bun.js/ConsoleObject.zig @@ -3097,7 +3097,9 @@ pub const Formatter = struct { else ", ... {d} more"; - writer.print(comptime Output.prettyFmt(fmt_, enable_ansi_colors), .{slice[0]}); + writer.print(comptime Output.prettyFmt(fmt_, enable_ansi_colors), .{ + if (@typeInfo(Number) == .Float) bun.fmt.fmtDouble(@floatCast(slice[0])) else slice[0], + }); var leftover = slice[1..]; const max = 512; leftover = leftover[0..@min(leftover.len, max)]; @@ -3105,7 +3107,9 @@ pub const Formatter = struct { this.printComma(@TypeOf(&writer.ctx), &writer.ctx, enable_ansi_colors) catch return; writer.space(); - writer.print(comptime Output.prettyFmt(fmt_, enable_ansi_colors), .{el}); + writer.print(comptime Output.prettyFmt(fmt_, enable_ansi_colors), .{ + if (@typeInfo(Number) == .Float) bun.fmt.fmtDouble(@floatCast(el)) else el, + }); } if (slice.len > max + 1) { diff --git a/src/bun.js/RuntimeTranspilerCache.zig b/src/bun.js/RuntimeTranspilerCache.zig index 8730027aea..113007648d 100644 --- a/src/bun.js/RuntimeTranspilerCache.zig +++ b/src/bun.js/RuntimeTranspilerCache.zig @@ -228,13 +228,8 @@ pub const RuntimeTranspilerCache = struct { if (bun.Environment.allow_assert) { var total: usize = 0; for (vecs) |v| { - if (comptime bun.Environment.isWindows) { - bun.assert(v.len > 0); - total += v.len; - } else { - bun.assert(v.iov_len > 0); - total += v.iov_len; - } + bun.assert(v.len > 0); + total += v.len; } bun.assert(end_position == total); } @@ -477,7 +472,7 @@ pub const RuntimeTranspilerCache = struct { output_code_allocator: std.mem.Allocator, ) !Entry { var metadata_bytes_buf: [Metadata.size * 2]u8 = undefined; - const cache_fd = try bun.sys.open(cache_file_path.sliceAssumeZ(), std.os.O.RDONLY, 0).unwrap(); + const cache_fd = try bun.sys.open(cache_file_path.sliceAssumeZ(), bun.O.RDONLY, 0).unwrap(); defer _ = bun.sys.close(cache_fd); errdefer { // On any error, we delete the cache file diff --git a/src/bun.js/WebKit b/src/bun.js/WebKit index 353aa20567..64d04ec1a6 160000 --- a/src/bun.js/WebKit +++ b/src/bun.js/WebKit @@ -1 +1 @@ -Subproject commit 353aa20567e80a74eb43694a27cdf41f4a56ccef +Subproject commit 64d04ec1a65d91326c5f2298b9c7d05b56125252 diff --git a/src/bun.js/api/BunObject.zig b/src/bun.js/api/BunObject.zig index 80ade0af8b..15e07c086a 100644 --- a/src/bun.js/api/BunObject.zig +++ b/src/bun.js/api/BunObject.zig @@ -854,7 +854,7 @@ pub fn getMain( vm.main, // Open with the minimum permissions necessary for resolving the file path. - if (comptime Environment.isLinux) std.os.O.PATH else std.os.O.RDONLY, + if (comptime Environment.isLinux) bun.O.PATH else bun.O.RDONLY, 0, ).unwrap() catch break :use_resolved_path; @@ -1506,7 +1506,7 @@ pub const Crypto = struct { pub usingnamespace bun.New(@This()); pub fn runTask(task: *JSC.WorkPoolTask) void { - const job = @fieldParentPtr(PBKDF2.Job, "task", task); + const job: *PBKDF2.Job = @fieldParentPtr("task", task); defer job.vm.enqueueTaskConcurrent(JSC.ConcurrentTask.create(job.any_task.task())); job.output = bun.default_allocator.alloc(u8, @as(usize, @intCast(job.pbkdf2.length))) catch { job.err = BoringSSL.EVP_R_MEMORY_LIMIT_EXCEEDED; @@ -2198,7 +2198,7 @@ pub const Crypto = struct { } pub fn run(task: *bun.ThreadPool.Task) void { - var this = @fieldParentPtr(HashJob, "task", task); + var this: *HashJob = @fieldParentPtr("task", task); var result = bun.default_allocator.create(Result) catch bun.outOfMemory(); result.* = Result{ @@ -2440,7 +2440,7 @@ pub const Crypto = struct { } pub fn run(task: *bun.ThreadPool.Task) void { - var this = @fieldParentPtr(VerifyJob, "task", task); + var this: *VerifyJob = @fieldParentPtr("task", task); var result = bun.default_allocator.create(Result) catch bun.outOfMemory(); result.* = Result{ @@ -2890,6 +2890,14 @@ pub const Crypto = struct { .{ "shake256", std.crypto.hash.sha3.Shake256 }, }; + inline fn digestLength(Algorithm: type) comptime_int { + return switch (Algorithm) { + std.crypto.hash.sha3.Shake128 => 16, + std.crypto.hash.sha3.Shake256 => 32, + else => Algorithm.digest_length, + }; + } + pub fn hashByName( globalThis: *JSGlobalObject, algorithm: ZigString, @@ -2929,7 +2937,7 @@ pub const Crypto = struct { } var h = Algorithm.init(.{}); - const digest_length_comptime = Algorithm.digest_length; + const digest_length_comptime = digestLength(Algorithm); if (output) |output_buf| { if (output_buf.byteSlice().len < digest_length_comptime) { @@ -2944,7 +2952,7 @@ pub const Crypto = struct { h.final(output_buf.slice()[0..digest_length_comptime]); return output_buf.value; } else { - var out: [Algorithm.digest_length]u8 = undefined; + var out: [digestLength(Algorithm)]u8 = undefined; h.final(&out); // Clone to GC-managed memory return JSC.ArrayBuffer.createBuffer(globalThis, &out); @@ -2956,8 +2964,8 @@ pub const Crypto = struct { if (bun.strings.eqlComptime(algorithm.slice(), item[0])) { return CryptoHasher.new(.{ .zig = .{ .algorithm = @field(EVP.Algorithm, item[0]), - .state = bun.new(item[1], .{}), - .digest_length = item[1].digest_length, + .state = bun.new(item[1], item[1].init(.{})), + .digest_length = digestLength(item[1]), } }); } } @@ -3540,19 +3548,24 @@ pub fn mmapFile( const buf_z: [:0]const u8 = buf[0..path.len :0]; - const sync_flags: u32 = if (@hasDecl(std.os.MAP, "SYNC")) std.os.MAP.SYNC | std.os.MAP.SHARED_VALIDATE else 0; - const file_flags: u32 = if (@hasDecl(std.os.MAP, "FILE")) std.os.MAP.FILE else 0; + var flags: std.c.MAP = .{ .TYPE = .SHARED }; // Conforming applications must specify either MAP_PRIVATE or MAP_SHARED. var offset: usize = 0; - var flags = file_flags; var map_size: ?usize = null; if (args.nextEat()) |opts| { - const sync = opts.get(globalThis, "sync") orelse JSC.JSValue.jsBoolean(false); - const shared = opts.get(globalThis, "shared") orelse JSC.JSValue.jsBoolean(true); - flags |= @as(u32, if (sync.toBoolean()) sync_flags else 0); - flags |= @as(u32, if (shared.toBoolean()) std.os.MAP.SHARED else std.os.MAP.PRIVATE); + flags.TYPE = if ((opts.get(globalThis, "shared") orelse JSValue.true).toBoolean()) + .SHARED + else + .PRIVATE; + + if (@hasField(std.c.MAP, "SYNC")) { + if ((opts.get(globalThis, "sync") orelse JSValue.false).toBoolean()) { + flags.TYPE = .SHARED_VALIDATE; + flags.SYNC = true; + } + } if (opts.get(globalThis, "size")) |value| { map_size = @as(usize, @intCast(value.toInt64())); @@ -3562,8 +3575,6 @@ pub fn mmapFile( offset = @as(usize, @intCast(value.toInt64())); offset = std.mem.alignBackwardAnyAlign(offset, std.mem.page_size); } - } else { - flags |= std.os.MAP.SHARED; } const map = switch (bun.sys.mmapFile(buf_z, flags, map_size, offset)) { diff --git a/src/bun.js/api/Timer.zig b/src/bun.js/api/Timer.zig index 83d6768995..5fda636d0b 100644 --- a/src/bun.js/api/Timer.zig +++ b/src/bun.js/api/Timer.zig @@ -54,7 +54,7 @@ pub const All = struct { timer.state = .ACTIVE; if (Environment.isWindows) { - this.ensureUVTimer(@fieldParentPtr(JSC.VirtualMachine, "timer", this)); + this.ensureUVTimer(@alignCast(@fieldParentPtr("timer", this))); } } @@ -91,14 +91,14 @@ pub const All = struct { } pub fn onUVTimer(uv_timer_t: *uv.Timer) callconv(.C) void { - const all = @fieldParentPtr(All, "uv_timer", uv_timer_t); - const vm = @fieldParentPtr(JSC.VirtualMachine, "timer", all); + const all: *All = @fieldParentPtr("uv_timer", uv_timer_t); + const vm: *VirtualMachine = @alignCast(@fieldParentPtr("timer", all)); all.drainTimers(vm); all.ensureUVTimer(vm); } pub fn incrementTimerRef(this: *All, delta: i32) void { - const vm = @fieldParentPtr(JSC.VirtualMachine, "timer", this); + const vm: *JSC.VirtualMachine = @alignCast(@fieldParentPtr("timer", this)); const old = this.active_timer_count; const new = old + delta; @@ -298,7 +298,10 @@ pub const All = struct { // It would be a weird situation, security-wise, if we were to let // the user cancel a timer that was of a different type. if (entry.value.tag == .TimerObject) { - break :brk @fieldParentPtr(TimerObject, "event_loop_timer", entry.value); + break :brk @as( + *TimerObject, + @fieldParentPtr("event_loop_timer", entry.value), + ); } } @@ -749,8 +752,8 @@ pub const EventLoopTimer = struct { const order = a.next.order(&b.next); if (order == .eq) { if (a.tag == .TimerObject and b.tag == .TimerObject) { - const a_timer = @fieldParentPtr(TimerObject, "event_loop_timer", a); - const b_timer = @fieldParentPtr(TimerObject, "event_loop_timer", b); + const a_timer: *const TimerObject = @fieldParentPtr("event_loop_timer", a); + const b_timer: *const TimerObject = @fieldParentPtr("event_loop_timer", b); return a_timer.id < b_timer.id; } @@ -774,7 +777,7 @@ pub const EventLoopTimer = struct { pub fn fire(this: *EventLoopTimer, now: *const timespec, vm: *VirtualMachine) Arm { switch (this.tag) { inline else => |t| { - var container: *t.Type() = @fieldParentPtr(t.Type(), "event_loop_timer", this); + var container: *t.Type() = @alignCast(@fieldParentPtr("event_loop_timer", this)); if (comptime t.Type() == TimerObject) { return container.fire(now, vm); } diff --git a/src/bun.js/api/brotli.zig b/src/bun.js/api/brotli.zig index 6512ccc461..9a467a81a1 100644 --- a/src/bun.js/api/brotli.zig +++ b/src/bun.js/api/brotli.zig @@ -32,7 +32,7 @@ pub const BrotliEncoder = struct { poll_ref: bun.Async.KeepAlive = .{}, pub fn hasPendingActivity(this: *BrotliEncoder) callconv(.C) bool { - return this.has_pending_activity.load(.Monotonic) > 0; + return this.has_pending_activity.load(.monotonic) > 0; } pub fn constructor(globalThis: *JSC.JSGlobalObject, _: *JSC.CallFrame) callconv(.C) ?*BrotliEncoder { @@ -112,7 +112,7 @@ pub const BrotliEncoder = struct { pub fn runFromJSThread(this: *BrotliEncoder) void { this.poll_ref.unref(this.globalThis.bunVM()); - defer _ = this.has_pending_activity.fetchSub(1, .Monotonic); + defer _ = this.has_pending_activity.fetchSub(1, .monotonic); this.drainFreelist(); const result = this.callback_value.get().?.call(this.globalThis, &.{ @@ -141,19 +141,19 @@ pub const BrotliEncoder = struct { pub usingnamespace bun.New(@This()); pub fn runTask(this: *JSC.WorkPoolTask) void { - var job: *EncodeJob = @fieldParentPtr(EncodeJob, "task", this); + var job: *EncodeJob = @fieldParentPtr("task", this); job.run(); job.destroy(); } pub fn run(this: *EncodeJob) void { defer { - _ = this.encoder.has_pending_activity.fetchSub(1, .Monotonic); + _ = this.encoder.has_pending_activity.fetchSub(1, .monotonic); } var any = false; - if (this.encoder.pending_encode_job_count.fetchAdd(1, .Monotonic) >= 0) { + if (this.encoder.pending_encode_job_count.fetchAdd(1, .monotonic) >= 0) { const is_last = this.encoder.has_called_end; while (true) { this.encoder.input_lock.lock(); @@ -182,7 +182,7 @@ pub const BrotliEncoder = struct { for (pending) |input| { var writer = this.encoder.stream.writer(Writer{ .encoder = this.encoder }); writer.writeAll(input.slice()) catch { - _ = this.encoder.pending_encode_job_count.fetchSub(1, .Monotonic); + _ = this.encoder.pending_encode_job_count.fetchSub(1, .monotonic); this.encoder.write_failed = true; return; }; @@ -190,7 +190,7 @@ pub const BrotliEncoder = struct { any = any or pending.len > 0; - if (this.encoder.pending_encode_job_count.fetchSub(1, .Monotonic) == 0) + if (this.encoder.pending_encode_job_count.fetchSub(1, .monotonic) == 0) break; } @@ -200,11 +200,11 @@ pub const BrotliEncoder = struct { defer this.encoder.output_lock.unlock(); output.appendSlice(bun.default_allocator, this.encoder.stream.end() catch { - _ = this.encoder.pending_encode_job_count.fetchSub(1, .Monotonic); + _ = this.encoder.pending_encode_job_count.fetchSub(1, .monotonic); this.encoder.write_failed = true; return; }) catch { - _ = this.encoder.pending_encode_job_count.fetchSub(1, .Monotonic); + _ = this.encoder.pending_encode_job_count.fetchSub(1, .monotonic); this.encoder.write_failed = true; return; }; @@ -213,7 +213,7 @@ pub const BrotliEncoder = struct { if (this.is_async and any) { var vm = this.encoder.globalThis.bunVMConcurrently(); - _ = this.encoder.has_pending_activity.fetchAdd(1, .Monotonic); + _ = this.encoder.has_pending_activity.fetchAdd(1, .monotonic); this.encoder.poll_ref.refConcurrently(vm); vm.enqueueTaskConcurrent(JSC.ConcurrentTask.create(JSC.Task.init(this.encoder))); } @@ -242,7 +242,7 @@ pub const BrotliEncoder = struct { return .zero; }; - _ = this.has_pending_activity.fetchAdd(1, .Monotonic); + _ = this.has_pending_activity.fetchAdd(1, .monotonic); if (is_last) this.has_called_end = true; @@ -284,7 +284,7 @@ pub const BrotliEncoder = struct { return .zero; }; - _ = this.has_pending_activity.fetchAdd(1, .Monotonic); + _ = this.has_pending_activity.fetchAdd(1, .monotonic); if (is_last) this.has_called_end = true; @@ -345,7 +345,7 @@ pub const BrotliDecoder = struct { freelist_write_lock: bun.Lock = bun.Lock.init(), pub fn hasPendingActivity(this: *BrotliDecoder) callconv(.C) bool { - return this.has_pending_activity.load(.Monotonic) > 0; + return this.has_pending_activity.load(.monotonic) > 0; } pub fn deinit(this: *BrotliDecoder) void { @@ -417,7 +417,7 @@ pub const BrotliDecoder = struct { pub fn runFromJSThread(this: *BrotliDecoder) void { this.poll_ref.unref(this.globalThis.bunVM()); - defer _ = this.has_pending_activity.fetchSub(1, .Monotonic); + defer _ = this.has_pending_activity.fetchSub(1, .monotonic); this.drainFreelist(); const result = this.callback_value.get().?.call(this.globalThis, &.{ @@ -466,7 +466,7 @@ pub const BrotliDecoder = struct { return .zero; }; - _ = this.has_pending_activity.fetchAdd(1, .Monotonic); + _ = this.has_pending_activity.fetchAdd(1, .monotonic); if (is_last) this.has_called_end = true; @@ -508,7 +508,7 @@ pub const BrotliDecoder = struct { return .zero; }; - _ = this.has_pending_activity.fetchAdd(1, .Monotonic); + _ = this.has_pending_activity.fetchAdd(1, .monotonic); if (is_last) this.has_called_end = true; @@ -539,19 +539,19 @@ pub const BrotliDecoder = struct { pub usingnamespace bun.New(@This()); pub fn runTask(this: *JSC.WorkPoolTask) void { - var job: *DecodeJob = @fieldParentPtr(DecodeJob, "task", this); + var job: *DecodeJob = @fieldParentPtr("task", this); job.run(); job.destroy(); } pub fn run(this: *DecodeJob) void { defer { - _ = this.decoder.has_pending_activity.fetchSub(1, .Monotonic); + _ = this.decoder.has_pending_activity.fetchSub(1, .monotonic); } var any = false; - if (this.decoder.pending_decode_job_count.fetchAdd(1, .Monotonic) >= 0) { + if (this.decoder.pending_decode_job_count.fetchAdd(1, .monotonic) >= 0) { const is_last = this.decoder.has_called_end; while (true) { this.decoder.input_lock.lock(); @@ -581,7 +581,7 @@ pub const BrotliDecoder = struct { const input = if (pending.len <= 1) pending[0].slice() else input_list.items; this.decoder.stream.input = input; this.decoder.stream.readAll(false) catch { - _ = this.decoder.pending_decode_job_count.fetchSub(1, .Monotonic); + _ = this.decoder.pending_decode_job_count.fetchSub(1, .monotonic); this.decoder.write_failed = true; return; }; @@ -589,14 +589,14 @@ pub const BrotliDecoder = struct { any = any or pending.len > 0; - if (this.decoder.pending_decode_job_count.fetchSub(1, .Monotonic) == 0) + if (this.decoder.pending_decode_job_count.fetchSub(1, .monotonic) == 0) break; } } if (this.is_async and any) { var vm = this.decoder.globalThis.bunVMConcurrently(); - _ = this.decoder.has_pending_activity.fetchAdd(1, .Monotonic); + _ = this.decoder.has_pending_activity.fetchAdd(1, .monotonic); this.decoder.poll_ref.refConcurrently(vm); vm.enqueueTaskConcurrent(JSC.ConcurrentTask.create(JSC.Task.init(this.decoder))); } diff --git a/src/bun.js/api/bun/dns_resolver.zig b/src/bun.js/api/bun/dns_resolver.zig index c7c86109ab..c4bc06f8aa 100644 --- a/src/bun.js/api/bun/dns_resolver.zig +++ b/src/bun.js/api/bun/dns_resolver.zig @@ -113,12 +113,13 @@ const LibInfo = struct { ); if (errno != 0) { - request.head.promise.rejectTask(globalThis, globalThis.createErrorInstance("getaddrinfo_async_start error: {s}", .{@tagName(std.c.getErrno(errno))})); + request.head.promise.rejectTask(globalThis, globalThis.createErrorInstance("getaddrinfo_async_start error: {s}", .{@tagName(bun.C.getErrno(errno))})); if (request.cache.pending_cache) this.pending_host_cache_native.available.set(request.cache.pos_in_pending); this.vm.allocator.destroy(request); return promise_value; } + bun.assert(request.backend.libinfo.machport != null); var poll = bun.Async.FilePoll.init(this.vm, bun.toFD(std.math.maxInt(i32) - 1), .{}, GetAddrInfoRequest, request); request.backend.libinfo.file_poll = poll; @@ -126,11 +127,9 @@ const LibInfo = struct { this.vm.event_loop_handle.?, .machport, .one_shot, - bun.toFD(@intFromPtr(request.backend.libinfo.machport)), - ); - bun.assert( - rc == .result, + bun.toFD(@as(i32, @intCast(@intFromPtr(request.backend.libinfo.machport)))), ); + bun.assert(rc == .result); poll.enableKeepingProcessAlive(this.vm.eventLoop()); @@ -714,7 +713,7 @@ pub const GetAddrInfoRequest = struct { addr_info: ?*std.c.addrinfo, arg: ?*anyopaque, ) callconv(.C) void { - const this = @as(*GetAddrInfoRequest, @ptrFromInt(@intFromPtr(arg))); + const this = @as(*GetAddrInfoRequest, @ptrCast(@alignCast(arg))); log("getAddrInfoAsyncCallback: status={d}", .{status}); if (this.backend == .libinfo) { @@ -1075,7 +1074,7 @@ pub const DNSLookup = struct { const error_value = brk: { if (err == .ESERVFAIL) { - break :brk bun.sys.Error.fromCode(bun.C.getErrno(-1), .getaddrinfo).toJSC(globalThis); + break :brk bun.sys.Error.fromCode(bun.C.getErrno(@as(c_int, -1)), .getaddrinfo).toJSC(globalThis); } const error_value = globalThis.createErrorInstance("DNS lookup failed: {s}", .{err.label()}); error_value.put( @@ -1341,7 +1340,7 @@ pub const InternalDNS = struct { fn isNearlyFull(this: *This) bool { // 80% full (value is kind of arbitrary) - return @atomicLoad(usize, &this.len, .Monotonic) * 5 >= this.cache.len * 4; + return @atomicLoad(usize, &this.len, .monotonic) * 5 >= this.cache.len * 4; } fn deleteEntryAt(this: *This, len: usize, i: usize) ?*Request { @@ -1576,7 +1575,8 @@ pub const InternalDNS = struct { return false; } - var poll = bun.Async.FilePoll.init(loop, bun.toFD(@intFromPtr(machport)), .{}, InternalDNSRequest, req); + const fake_fd: i32 = @intCast(@intFromPtr(machport)); + var poll = bun.Async.FilePoll.init(loop, bun.toFD(fake_fd), .{}, InternalDNSRequest, req); const rc = poll.register(loop.loop(), .machport, true); if (rc == .err) { @@ -1610,12 +1610,12 @@ pub const InternalDNS = struct { pub fn getDNSCacheStats(globalObject: *JSC.JSGlobalObject, _: *JSC.CallFrame) callconv(.C) JSC.JSValue { const object = JSC.JSValue.createEmptyObject(globalObject, 6); - object.put(globalObject, JSC.ZigString.static("cacheHitsCompleted"), JSC.JSValue.jsNumber(@atomicLoad(usize, &dns_cache_hits_completed, .Monotonic))); - object.put(globalObject, JSC.ZigString.static("cacheHitsInflight"), JSC.JSValue.jsNumber(@atomicLoad(usize, &dns_cache_hits_inflight, .Monotonic))); - object.put(globalObject, JSC.ZigString.static("cacheMisses"), JSC.JSValue.jsNumber(@atomicLoad(usize, &dns_cache_misses, .Monotonic))); - object.put(globalObject, JSC.ZigString.static("size"), JSC.JSValue.jsNumber(@atomicLoad(usize, &dns_cache_size, .Monotonic))); - object.put(globalObject, JSC.ZigString.static("errors"), JSC.JSValue.jsNumber(@atomicLoad(usize, &dns_cache_errors, .Monotonic))); - object.put(globalObject, JSC.ZigString.static("totalCount"), JSC.JSValue.jsNumber(@atomicLoad(usize, &getaddrinfo_calls, .Monotonic))); + object.put(globalObject, JSC.ZigString.static("cacheHitsCompleted"), JSC.JSValue.jsNumber(@atomicLoad(usize, &dns_cache_hits_completed, .monotonic))); + object.put(globalObject, JSC.ZigString.static("cacheHitsInflight"), JSC.JSValue.jsNumber(@atomicLoad(usize, &dns_cache_hits_inflight, .monotonic))); + object.put(globalObject, JSC.ZigString.static("cacheMisses"), JSC.JSValue.jsNumber(@atomicLoad(usize, &dns_cache_misses, .monotonic))); + object.put(globalObject, JSC.ZigString.static("size"), JSC.JSValue.jsNumber(@atomicLoad(usize, &dns_cache_size, .monotonic))); + object.put(globalObject, JSC.ZigString.static("errors"), JSC.JSValue.jsNumber(@atomicLoad(usize, &dns_cache_errors, .monotonic))); + object.put(globalObject, JSC.ZigString.static("totalCount"), JSC.JSValue.jsNumber(@atomicLoad(usize, &getaddrinfo_calls, .monotonic))); return object; } @@ -2931,7 +2931,7 @@ pub const DNSResolver = struct { var buf: [INET6_ADDRSTRLEN + 2 + 6 + 1]u8 = undefined; const family = current.family; - const ip = if (family == std.os.AF.INET6) blk: { + const ip = if (family == std.posix.AF.INET6) blk: { break :blk c_ares.ares_inet_ntop(family, ¤t.addr.addr6, buf[1..], @sizeOf(@TypeOf(buf)) - 1); } else blk: { break :blk c_ares.ares_inet_ntop(family, ¤t.addr.addr4, buf[1..], @sizeOf(@TypeOf(buf)) - 1); @@ -2956,7 +2956,7 @@ pub const DNSResolver = struct { if (port == IANA_DNS_PORT) { values.putIndex(globalThis, i, JSC.ZigString.init(buf[1..size]).withEncoding().toValueGC(globalThis)); } else { - if (family == std.os.AF.INET6) { + if (family == std.posix.AF.INET6) { buf[0] = '['; buf[size] = ']'; const port_slice = std.fmt.bufPrint(buf[size + 1 ..], ":{d}", .{port}) catch unreachable; @@ -3003,8 +3003,8 @@ pub const DNSResolver = struct { return .zero; }; - var sa: std.os.sockaddr.storage = std.mem.zeroes(std.os.sockaddr.storage); - if (c_ares.getSockaddr(addr_s, port, @as(*std.os.sockaddr, @ptrCast(&sa))) != 0) { + var sa: std.posix.sockaddr.storage = std.mem.zeroes(std.posix.sockaddr.storage); + if (c_ares.getSockaddr(addr_s, port, @as(*std.posix.sockaddr, @ptrCast(&sa))) != 0) { globalThis.throwInvalidArgumentType("lookupService", "address", "invalid address"); return .zero; } @@ -3051,7 +3051,7 @@ pub const DNSResolver = struct { const promise = request.tail.promise.value(); channel.getNameInfo( - @as(*std.os.sockaddr, @ptrCast(&sa)), + @as(*std.posix.sockaddr, @ptrCast(&sa)), GetNameInfoRequest, request, GetNameInfoRequest.onCaresComplete, diff --git a/src/bun.js/api/bun/process.zig b/src/bun.js/api/bun/process.zig index 3138bcf7a3..b7c81b3b6e 100644 --- a/src/bun.js/api/bun/process.zig +++ b/src/bun.js/api/bun/process.zig @@ -5,8 +5,8 @@ const Environment = bun.Environment; const JSC = bun.JSC; const Output = bun.Output; const uv = bun.windows.libuv; -const pid_t = if (Environment.isPosix) std.os.pid_t else uv.uv_pid_t; -const fd_t = if (Environment.isPosix) std.os.fd_t else i32; +const pid_t = if (Environment.isPosix) std.posix.pid_t else uv.uv_pid_t; +const fd_t = if (Environment.isPosix) std.posix.fd_t else i32; const Maybe = JSC.Maybe; const win_rusage = struct { @@ -76,7 +76,7 @@ pub fn uv_getrusage(process: *uv.uv_process_t) win_rusage { return usage_info; } -pub const Rusage = if (Environment.isWindows) win_rusage else std.os.rusage; +pub const Rusage = if (Environment.isWindows) win_rusage else std.posix.rusage; const Subprocess = JSC.Subprocess; const LifecycleScriptSubprocess = bun.install.LifecycleScriptSubprocess; @@ -223,7 +223,7 @@ pub const Process = struct { pub fn waitPosix(this: *Process, sync_: bool) void { var rusage = std.mem.zeroes(Rusage); - const waitpid_result = PosixSpawn.wait4(this.pid, if (sync_) 0 else std.os.W.NOHANG, &rusage); + const waitpid_result = PosixSpawn.wait4(this.pid, if (sync_) 0 else std.posix.W.NOHANG, &rusage); this.onWaitPid(&waitpid_result, &rusage); } @@ -377,8 +377,8 @@ pub const Process = struct { } fn onExitUV(process: *uv.uv_process_t, exit_status: i64, term_signal: c_int) callconv(.C) void { - const poller = @fieldParentPtr(PollerWindows, "uv", process); - var this = @fieldParentPtr(Process, "poller", poller); + const poller: *PollerWindows = @fieldParentPtr("uv", process); + var this: *Process = @fieldParentPtr("poller", poller); const exit_code: u8 = if (exit_status >= 0) @as(u8, @truncate(@as(u64, @intCast(exit_status)))) else 0; const signal_code: ?bun.SignalCode = if (term_signal > 0 and term_signal < @intFromEnum(bun.SignalCode.SIGSYS)) @enumFromInt(term_signal) else null; const rusage = uv_getrusage(process); @@ -411,8 +411,8 @@ pub const Process = struct { } fn onCloseUV(uv_handle: *uv.uv_process_t) callconv(.C) void { - const poller = @fieldParentPtr(Poller, "uv", uv_handle); - var this = @fieldParentPtr(Process, "poller", poller); + const poller: *Poller = @fieldParentPtr("uv", uv_handle); + var this: *Process = @fieldParentPtr("poller", poller); bun.windows.libuv.log("Process.onClose({d})", .{uv_handle.pid}); if (this.poller == .uv) { @@ -553,13 +553,13 @@ pub const Status = union(enum) { return null; } - if (std.os.W.IFEXITED(result.status)) { - exit_code = std.os.W.EXITSTATUS(result.status); + if (std.posix.W.IFEXITED(result.status)) { + exit_code = std.posix.W.EXITSTATUS(result.status); // True if the process terminated due to receipt of a signal. } - if (std.os.W.IFSIGNALED(result.status)) { - signal = @as(u8, @truncate(std.os.W.TERMSIG(result.status))); + if (std.posix.W.IFSIGNALED(result.status)) { + signal = @as(u8, @truncate(std.posix.W.TERMSIG(result.status))); } // https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man2/waitpid.2.html @@ -567,8 +567,8 @@ pub const Status = union(enum) { // be restarted. This macro can be true only if the wait call spec-ified specified // ified the WUNTRACED option or if the child process is being // traced (see ptrace(2)). - else if (std.os.W.IFSTOPPED(result.status)) { - signal = @as(u8, @truncate(std.os.W.STOPSIG(result.status))); + else if (std.posix.W.IFSTOPPED(result.status)) { + signal = @as(u8, @truncate(std.posix.W.STOPSIG(result.status))); } }, } @@ -820,7 +820,7 @@ const WaiterThreadPosix = struct { } var rusage = std.mem.zeroes(Rusage); - const result = PosixSpawn.wait4(pid, std.os.W.NOHANG, &rusage); + const result = PosixSpawn.wait4(pid, std.posix.W.NOHANG, &rusage); if (result == .err or (result == .result and result.result.pid == pid)) { _ = this.active.orderedRemove(i); queue = this.active.items; @@ -861,11 +861,11 @@ const WaiterThreadPosix = struct { } pub fn setShouldUseWaiterThread() void { - @atomicStore(bool, &should_use_waiter_thread, true, .Monotonic); + @atomicStore(bool, &should_use_waiter_thread, true, .monotonic); } pub fn shouldUseWaiterThread() bool { - return @atomicLoad(bool, &should_use_waiter_thread, .Monotonic); + return @atomicLoad(bool, &should_use_waiter_thread, .monotonic); } pub fn append(process: anytype) void { @@ -878,7 +878,7 @@ const WaiterThreadPosix = struct { if (comptime Environment.isLinux) { const one = @as([8]u8, @bitCast(@as(usize, 1))); - _ = std.os.write(instance.eventfd.cast(), &one) catch @panic("Failed to write to eventfd"); + _ = std.posix.write(instance.eventfd.cast(), &one) catch @panic("Failed to write to eventfd"); } } @@ -889,13 +889,13 @@ const WaiterThreadPosix = struct { pub fn init() !void { bun.assert(should_use_waiter_thread); - if (instance.started.fetchMax(1, .Monotonic) > 0) { + if (instance.started.fetchMax(1, .monotonic) > 0) { return; } if (comptime Environment.isLinux) { const linux = std.os.linux; - instance.eventfd = bun.toFD(try std.os.eventfd(0, linux.EFD.NONBLOCK | linux.EFD.CLOEXEC | 0)); + instance.eventfd = bun.toFD(try std.posix.eventfd(0, linux.EFD.NONBLOCK | linux.EFD.CLOEXEC | 0)); } var thread = try std.Thread.spawn(.{ .stack_size = stack_size }, loop, .{}); @@ -913,14 +913,14 @@ const WaiterThreadPosix = struct { } if (comptime Environment.isLinux) { - var current_mask = std.os.empty_sigset; - std.os.linux.sigaddset(¤t_mask, std.os.SIG.CHLD); - const act = std.os.Sigaction{ + var current_mask = std.posix.empty_sigset; + std.os.linux.sigaddset(¤t_mask, std.posix.SIG.CHLD); + const act = std.posix.Sigaction{ .handler = .{ .handler = &wakeup }, .mask = current_mask, - .flags = std.os.SA.NOCLDSTOP, + .flags = std.posix.SA.NOCLDSTOP, }; - std.os.sigaction(std.os.SIG.CHLD, &act, null) catch {}; + std.posix.sigaction(std.posix.SIG.CHLD, &act, null) catch {}; } } @@ -933,10 +933,10 @@ const WaiterThreadPosix = struct { this.js_process.loop(); if (comptime Environment.isLinux) { - var polls = [_]std.os.pollfd{ + var polls = [_]std.posix.pollfd{ .{ .fd = this.eventfd.cast(), - .events = std.os.POLL.IN | std.os.POLL.ERR, + .events = std.posix.POLL.IN | std.posix.POLL.ERR, .revents = 0, }, }; @@ -947,10 +947,10 @@ const WaiterThreadPosix = struct { continue :outer; } - _ = std.os.poll(&polls, std.math.maxInt(i32)) catch 0; + _ = std.posix.poll(&polls, std.math.maxInt(i32)) catch 0; } else { - var mask = std.os.empty_sigset; - var signal: c_int = std.os.SIG.CHLD; + var mask = std.posix.empty_sigset; + var signal: c_int = std.posix.SIG.CHLD; const rc = std.c.sigwait(&mask, &signal); _ = rc; } @@ -1107,7 +1107,7 @@ pub const PosixSpawnResult = struct { // pidfd_nonblock only supported in 5.10+ return if (kernel.orderWithoutTag(.{ .major = 5, .minor = 10, .patch = 0 }).compare(.gte)) - std.os.O.NONBLOCK + bun.O.NONBLOCK else 0; } @@ -1124,7 +1124,7 @@ pub const PosixSpawnResult = struct { pidfd_flags, ); while (true) { - switch (std.os.linux.getErrno(rc)) { + switch (bun.C.getErrno(rc)) { .SUCCESS => return JSC.Maybe(PidFDType){ .result = @intCast(rc) }, .INTR => { rc = std.os.linux.pidfd_open( @@ -1235,8 +1235,8 @@ pub fn spawnProcessPosix( var to_set_cloexec = std.ArrayList(bun.FileDescriptor).init(allocator); defer { for (to_set_cloexec.items) |fd| { - const fcntl_flags = bun.sys.fcntl(fd, std.os.F.GETFD, 0).unwrap() catch continue; - _ = bun.sys.fcntl(fd, std.os.F.SETFD, bun.C.FD_CLOEXEC | fcntl_flags); + const fcntl_flags = bun.sys.fcntl(fd, std.posix.F.GETFD, 0).unwrap() catch continue; + _ = bun.sys.fcntl(fd, std.posix.F.SETFD, bun.C.FD_CLOEXEC | fcntl_flags); } to_set_cloexec.clearAndFree(); @@ -1265,8 +1265,8 @@ pub fn spawnProcessPosix( for (0..3) |i| { const stdio = stdios[i]; - const fileno = bun.toFD(i); - const flag = if (i == 0) @as(u32, std.os.O.RDONLY) else @as(u32, std.os.O.WRONLY); + const fileno = bun.toFD(@as(i32, @intCast(i))); + const flag = if (i == 0) @as(u32, bun.O.RDONLY) else @as(u32, bun.O.WRONLY); switch (stdio_options[i]) { .dup2 => |dup2| { @@ -1285,10 +1285,10 @@ pub fn spawnProcessPosix( try actions.inherit(fileno); }, .ignore => { - try actions.openZ(fileno, "/dev/null", flag | std.os.O.CREAT, 0o664); + try actions.openZ(fileno, "/dev/null", flag | bun.O.CREAT, 0o664); }, .path => |path| { - try actions.open(fileno, path, flag | std.os.O.CREAT, 0o664); + try actions.open(fileno, path, flag | bun.O.CREAT, 0o664); }, .buffer => { if (Environment.isLinux) use_memfd: { @@ -1303,11 +1303,11 @@ pub fn spawnProcessPosix( // We use the linux syscall api because the glibc requirement is 2.27, which is a little close for comfort. const rc = std.os.linux.memfd_create(label, 0); - if (std.os.linux.getErrno(rc) != .SUCCESS) { + if (bun.C.getErrno(rc) != .SUCCESS) { break :use_memfd; } - const fd = bun.toFD(rc); + const fd = bun.toFD(@as(u32, @intCast(rc))); to_close_on_error.append(fd) catch {}; to_set_cloexec.append(fd) catch {}; try actions.dup2(fd, fileno); @@ -1319,20 +1319,20 @@ pub fn spawnProcessPosix( const fds: [2]bun.FileDescriptor = brk: { var fds_: [2]std.c.fd_t = undefined; - const rc = std.c.socketpair(std.os.AF.UNIX, std.os.SOCK.STREAM, 0, &fds_); + const rc = std.c.socketpair(std.posix.AF.UNIX, std.posix.SOCK.STREAM, 0, &fds_); if (rc != 0) { return error.SystemResources; } { - const before = std.c.fcntl(fds_[if (i == 0) 1 else 0], std.os.F.GETFD); - _ = std.c.fcntl(fds_[if (i == 0) 1 else 0], std.os.F.SETFD, before | std.os.FD_CLOEXEC); + const before = std.c.fcntl(fds_[if (i == 0) 1 else 0], std.posix.F.GETFD); + _ = std.c.fcntl(fds_[if (i == 0) 1 else 0], std.posix.F.SETFD, before | std.posix.FD_CLOEXEC); } if (comptime Environment.isMac) { // SO_NOSIGPIPE const before: i32 = 1; - _ = std.c.setsockopt(fds_[if (i == 0) 1 else 0], std.os.SOL.SOCKET, std.os.SO.NOSIGPIPE, &before, @sizeOf(c_int)); + _ = std.c.setsockopt(fds_[if (i == 0) 1 else 0], std.posix.SOL.SOCKET, std.posix.SO.NOSIGPIPE, &before, @sizeOf(c_int)); } break :brk .{ bun.toFD(fds_[if (i == 0) 1 else 0]), bun.toFD(fds_[if (i == 0) 0 else 1]) }; @@ -1340,10 +1340,10 @@ pub fn spawnProcessPosix( if (i == 0) { // their copy of stdin should be readable - _ = std.c.shutdown(@intCast(fds[1].cast()), std.os.SHUT.WR); + _ = std.c.shutdown(@intCast(fds[1].cast()), std.posix.SHUT.WR); // our copy of stdin should be writable - _ = std.c.shutdown(@intCast(fds[0].cast()), std.os.SHUT.RD); + _ = std.c.shutdown(@intCast(fds[0].cast()), std.posix.SHUT.RD); if (comptime Environment.isMac) { // macOS seems to default to around 8 KB for the buffer size @@ -1351,16 +1351,16 @@ pub fn spawnProcessPosix( // TODO: investigate if this should be adjusted on Linux. const so_recvbuf: c_int = 1024 * 512; const so_sendbuf: c_int = 1024 * 512; - _ = std.c.setsockopt(fds[1].cast(), std.os.SOL.SOCKET, std.os.SO.RCVBUF, &so_recvbuf, @sizeOf(c_int)); - _ = std.c.setsockopt(fds[0].cast(), std.os.SOL.SOCKET, std.os.SO.SNDBUF, &so_sendbuf, @sizeOf(c_int)); + _ = std.c.setsockopt(fds[1].cast(), std.posix.SOL.SOCKET, std.posix.SO.RCVBUF, &so_recvbuf, @sizeOf(c_int)); + _ = std.c.setsockopt(fds[0].cast(), std.posix.SOL.SOCKET, std.posix.SO.SNDBUF, &so_sendbuf, @sizeOf(c_int)); } } else { // their copy of stdout or stderr should be writable - _ = std.c.shutdown(@intCast(fds[1].cast()), std.os.SHUT.RD); + _ = std.c.shutdown(@intCast(fds[1].cast()), std.posix.SHUT.RD); // our copy of stdout or stderr should be readable - _ = std.c.shutdown(@intCast(fds[0].cast()), std.os.SHUT.WR); + _ = std.c.shutdown(@intCast(fds[0].cast()), std.posix.SHUT.WR); if (comptime Environment.isMac) { // macOS seems to default to around 8 KB for the buffer size @@ -1368,8 +1368,8 @@ pub fn spawnProcessPosix( // TODO: investigate if this should be adjusted on Linux. const so_recvbuf: c_int = 1024 * 512; const so_sendbuf: c_int = 1024 * 512; - _ = std.c.setsockopt(fds[0].cast(), std.os.SOL.SOCKET, std.os.SO.RCVBUF, &so_recvbuf, @sizeOf(c_int)); - _ = std.c.setsockopt(fds[1].cast(), std.os.SOL.SOCKET, std.os.SO.SNDBUF, &so_sendbuf, @sizeOf(c_int)); + _ = std.c.setsockopt(fds[0].cast(), std.posix.SOL.SOCKET, std.posix.SO.RCVBUF, &so_recvbuf, @sizeOf(c_int)); + _ = std.c.setsockopt(fds[1].cast(), std.posix.SOL.SOCKET, std.posix.SO.SNDBUF, &so_sendbuf, @sizeOf(c_int)); } } @@ -1394,7 +1394,7 @@ pub fn spawnProcessPosix( } for (options.extra_fds, 0..) |ipc, i| { - const fileno = bun.toFD(3 + i); + const fileno = bun.toFD(@as(i32, @intCast(3 + i))); switch (ipc) { .dup2 => @panic("TODO dup2 extra fd"), @@ -1402,28 +1402,28 @@ pub fn spawnProcessPosix( try actions.inherit(fileno); }, .ignore => { - try actions.openZ(fileno, "/dev/null", std.os.O.RDWR, 0o664); + try actions.openZ(fileno, "/dev/null", bun.O.RDWR, 0o664); }, .path => |path| { - try actions.open(fileno, path, std.os.O.RDWR | std.os.O.CREAT, 0o664); + try actions.open(fileno, path, bun.O.RDWR | bun.O.CREAT, 0o664); }, .buffer => { const fds: [2]bun.FileDescriptor = brk: { var fds_: [2]std.c.fd_t = undefined; - const rc = std.c.socketpair(std.os.AF.UNIX, std.os.SOCK.STREAM, 0, &fds_); + const rc = std.c.socketpair(std.posix.AF.UNIX, std.posix.SOCK.STREAM, 0, &fds_); if (rc != 0) { return error.SystemResources; } // enable non-block - var before = std.c.fcntl(fds_[0], std.os.F.GETFD); + var before = std.c.fcntl(fds_[0], std.posix.F.GETFD); - _ = std.c.fcntl(fds_[0], std.os.F.SETFD, before | bun.C.FD_CLOEXEC); + _ = std.c.fcntl(fds_[0], std.posix.F.SETFD, before | bun.C.FD_CLOEXEC); if (comptime Environment.isMac) { // SO_NOSIGPIPE - _ = std.c.setsockopt(fds_[if (i == 0) 1 else 0], std.os.SOL.SOCKET, std.os.SO.NOSIGPIPE, &before, @sizeOf(c_int)); + _ = std.c.setsockopt(fds_[if (i == 0) 1 else 0], std.posix.SOL.SOCKET, std.posix.SO.NOSIGPIPE, &before, @sizeOf(c_int)); } break :brk .{ bun.toFD(fds_[0]), bun.toFD(fds_[1]) }; @@ -1593,7 +1593,7 @@ pub fn spawnProcessWindows( .path => |path| { var req = uv.fs_t.uninitialized; defer req.deinit(); - const rc = uv.uv_fs_open(loop, &req, &(try std.os.toPosixPath(path)), flag | uv.O.CREAT, 0o644, null); + const rc = uv.uv_fs_open(loop, &req, &(try std.posix.toPosixPath(path)), flag | uv.O.CREAT, 0o644, null); if (rc.toError(.open)) |err| { failed = true; return .{ .err = err }; @@ -1644,7 +1644,7 @@ pub fn spawnProcessWindows( .path => |path| { var req = uv.fs_t.uninitialized; defer req.deinit(); - const rc = uv.uv_fs_open(loop, &req, &(try std.os.toPosixPath(path)), flag | uv.O.CREAT, 0o644, null); + const rc = uv.uv_fs_open(loop, &req, &(try std.posix.toPosixPath(path)), flag | uv.O.CREAT, 0o644, null); if (rc.toError(.open)) |err| { failed = true; return .{ .err = err }; @@ -2106,12 +2106,12 @@ pub const sync = struct { var poll_fds_buf = [_]std.c.pollfd{ .{ .fd = 0, - .events = std.os.POLL.IN | std.os.POLL.ERR | std.os.POLL.HUP, + .events = std.posix.POLL.IN | std.posix.POLL.ERR | std.posix.POLL.HUP, .revents = 0, }, .{ .fd = 0, - .events = std.os.POLL.IN | std.os.POLL.ERR | std.os.POLL.HUP, + .events = std.posix.POLL.IN | std.posix.POLL.ERR | std.posix.POLL.HUP, .revents = 0, }, }; @@ -2133,7 +2133,7 @@ pub const sync = struct { } const rc = std.c.poll(poll_fds.ptr, @intCast(poll_fds.len), -1); - switch (std.c.getErrno(rc)) { + switch (bun.C.getErrno(rc)) { .SUCCESS => {}, .AGAIN, .INTR => continue, else => |err| return .{ .err = bun.sys.Error.fromCode(err, .poll) }, diff --git a/src/bun.js/api/bun/socket.zig b/src/bun.js/api/bun/socket.zig index 95b7db860d..8c6612693a 100644 --- a/src/bun.js/api/bun/socket.zig +++ b/src/bun.js/api/bun/socket.zig @@ -224,7 +224,7 @@ const Handlers = struct { this.active_connections -= 1; if (this.active_connections == 0) { if (this.is_server) { - var listen_socket: *Listener = @fieldParentPtr(Listener, "handlers", this); + var listen_socket: *Listener = @fieldParentPtr("handlers", this); // allow it to be GC'd once the last connection is closed and it's not listening anymore if (listen_socket.listener == null) { listen_socket.strong_self.clear(); @@ -652,7 +652,7 @@ pub const Listener = struct { hostname_or_unix.deinit(); } - const errno = @intFromEnum(std.c.getErrno(-1)); + const errno = @intFromEnum(bun.C.getErrno(@as(c_int, -1))); if (errno != 0) { err.put(globalObject, ZigString.static("errno"), JSValue.jsNumber(errno)); if (bun.C.SystemErrno.init(errno)) |str| { @@ -752,7 +752,7 @@ pub const Listener = struct { bun.span(hostname_or_unix.slice()), }, ); - const errno = @intFromEnum(std.c.getErrno(-1)); + const errno = @intFromEnum(bun.C.getErrno(@as(c_int, -1))); if (errno != 0) { err.put(globalObject, ZigString.static("errno"), JSValue.jsNumber(errno)); if (bun.C.SystemErrno.init(errno)) |str| { @@ -1217,9 +1217,9 @@ fn NewSocket(comptime ssl: bool) type { JSC.Codegen.JSTLSSocket; pub fn hasPendingActivity(this: *This) callconv(.C) bool { - @fence(.Acquire); + @fence(.acquire); - return this.has_pending_activity.load(.Acquire); + return this.has_pending_activity.load(.acquire); } pub fn doConnect(this: *This, connection: Listener.UnixOrHost, socket_ctx: *uws.SocketContext) !void { @@ -1369,7 +1369,7 @@ fn NewSocket(comptime ssl: bool) type { var promise = val.asPromise().?; const err_ = err.toErrorInstance(globalObject); promise.rejectOnNextTickAsHandled(globalObject, err_); - this.has_pending_activity.store(false, .Release); + this.has_pending_activity.store(false, .release); } } pub fn onConnectError(this: *This, _: Socket, errno: c_int) void { @@ -1381,7 +1381,7 @@ fn NewSocket(comptime ssl: bool) type { if (!this.is_active) { this.handlers.markActive(); this.is_active = true; - this.has_pending_activity.store(true, .Release); + this.has_pending_activity.store(true, .release); } } @@ -1402,7 +1402,7 @@ fn NewSocket(comptime ssl: bool) type { const vm = this.handlers.vm; this.handlers.markInactive(ssl, this.socket.context(), this.wrapped); this.poll_ref.unref(vm); - this.has_pending_activity.store(false, .Release); + this.has_pending_activity.store(false, .release); } } @@ -1707,7 +1707,8 @@ fn NewSocket(comptime ssl: bool) type { return JSValue.jsUndefined(); } - return @fieldParentPtr(Listener, "handlers", this.handlers).strong_self.get() orelse JSValue.jsUndefined(); + const l: *Listener = @fieldParentPtr("handlers", this.handlers); + return l.strong_self.get() orelse JSValue.jsUndefined(); } pub fn getReadyState( @@ -3124,7 +3125,7 @@ fn NewSocket(comptime ssl: bool) type { // the connection can be upgraded inside a handler call so we need to garantee that it will be still alive this.handlers.markInactive(ssl, old_context, this.wrapped); this.poll_ref.unref(vm); - this.has_pending_activity.store(false, .Release); + this.has_pending_activity.store(false, .release); } const array = JSC.JSValue.createEmptyArray(globalObject, 2); diff --git a/src/bun.js/api/bun/spawn.zig b/src/bun.js/api/bun/spawn.zig index 94c5d29f41..0f937974b6 100644 --- a/src/bun.js/api/bun/spawn.zig +++ b/src/bun.js/api/bun/spawn.zig @@ -3,6 +3,7 @@ const bun = @import("root").bun; const string = bun.string; const std = @import("std"); const Output = bun.Output; + fn _getSystem() type { // this is a workaround for a Zig stage1 bug // the "usingnamespace" is evaluating in dead branches @@ -10,12 +11,12 @@ fn _getSystem() type { if (comptime bun.Environment.isLinux) { const Type = bun.C.linux; break :brk struct { - pub usingnamespace std.os.system; + pub usingnamespace std.posix.system; pub usingnamespace Type; }; } - break :brk std.os.system; + break :brk std.posix.system; }; } @@ -24,12 +25,12 @@ const system = _getSystem(); const Maybe = JSC.Maybe; -const fd_t = std.os.fd_t; -const pid_t = std.os.pid_t; -const toPosixPath = std.os.toPosixPath; -const errno = std.os.errno; -const mode_t = std.os.mode_t; -const unexpectedErrno = std.os.unexpectedErrno; +const fd_t = std.posix.fd_t; +const pid_t = std.posix.pid_t; +const toPosixPath = std.posix.toPosixPath; +const errno = std.posix.errno; +const mode_t = std.posix.mode_t; +const unexpectedErrno = std.posix.unexpectedErrno; pub const BunSpawn = struct { pub const Action = extern struct { @@ -387,7 +388,7 @@ pub const PosixSpawn = struct { }); // Unlike most syscalls, posix_spawn returns 0 on success and an errno on failure. - // That is why std.c.getErrno() is not used here, since that checks for -1. + // That is why bun.C.getErrno() is not used here, since that checks for -1. if (rc == 0) { return Maybe(pid_t){ .result = pid }; } @@ -403,7 +404,7 @@ pub const PosixSpawn = struct { /// Use this version of the `waitpid` wrapper if you spawned your child process using `posix_spawn` /// or `posix_spawnp` syscalls. - /// See also `std.os.waitpid` for an alternative if your child process was spawned via `fork` and + /// See also `std.posix.waitpid` for an alternative if your child process was spawned via `fork` and /// `execve` method. pub fn waitpid(pid: pid_t, flags: u32) Maybe(WaitPidResult) { const PidStatus = c_int; @@ -425,7 +426,7 @@ pub const PosixSpawn = struct { } /// Same as waitpid, but also returns resource usage information. - pub fn wait4(pid: pid_t, flags: u32, usage: ?*std.os.rusage) Maybe(WaitPidResult) { + pub fn wait4(pid: pid_t, flags: u32, usage: ?*std.posix.rusage) Maybe(WaitPidResult) { const PidStatus = c_int; var status: PidStatus = 0; while (true) { diff --git a/src/bun.js/api/bun/spawn/stdio.zig b/src/bun.js/api/bun/spawn/stdio.zig index edcc07fc4e..e86c2d6a46 100644 --- a/src/bun.js/api/bun/spawn/stdio.zig +++ b/src/bun.js/api/bun/spawn/stdio.zig @@ -8,8 +8,10 @@ const Async = bun.Async; const JSC = bun.JSC; const JSValue = JSC.JSValue; const JSGlobalObject = JSC.JSGlobalObject; +const posix = std.posix; const Output = bun.Output; const os = std.os; + const uv = bun.windows.libuv; pub const Stdio = union(enum) { inherit: void, @@ -110,11 +112,11 @@ pub const Stdio = union(enum) { }; // We use the linux syscall api because the glibc requirement is 2.27, which is a little close for comfort. - const rc = std.os.linux.memfd_create(label, 0); + const rc = std.c.memfd_create(label, 0); log("memfd_create({s}) = {d}", .{ label, rc }); - switch (std.os.linux.getErrno(rc)) { + switch (bun.C.getErrno(rc)) { .SUCCESS => {}, else => |errno| { log("Failed to create memfd: {s}", .{@tagName(errno)}); diff --git a/src/bun.js/api/bun/subprocess.zig b/src/bun.js/api/bun/subprocess.zig index 6985ec480d..b525e15f73 100644 --- a/src/bun.js/api/bun/subprocess.zig +++ b/src/bun.js/api/bun/subprocess.zig @@ -279,7 +279,7 @@ pub const Subprocess = struct { } pub fn updateHasPendingActivity(this: *Subprocess) void { - @fence(.SeqCst); + @fence(.seq_cst); if (comptime Environment.isDebug) { log("updateHasPendingActivity() {any} -> {any}", .{ this.has_pending_activity.raw, @@ -288,7 +288,7 @@ pub const Subprocess = struct { } this.has_pending_activity.store( this.hasPendingActivityNonThreadsafe(), - .Monotonic, + .monotonic, ); } @@ -342,8 +342,8 @@ pub const Subprocess = struct { } pub fn hasPendingActivity(this: *Subprocess) callconv(.C) bool { - @fence(.Acquire); - return this.has_pending_activity.load(.Acquire); + @fence(.acquire); + return this.has_pending_activity.load(.acquire); } pub fn ref(this: *Subprocess) void { @@ -1182,7 +1182,7 @@ pub const Subprocess = struct { // When the stream has closed we need to be notified to prevent a use-after-free // We can test for this use-after-free by enabling hot module reloading on a file and then saving it twice pub fn onClose(this: *Writable, _: ?bun.sys.Error) void { - const process = @fieldParentPtr(Subprocess, "stdin", this); + const process: *Subprocess = @fieldParentPtr("stdin", this); if (process.this_jsvalue != .zero) { if (Subprocess.stdinGetCached(process.this_jsvalue)) |existing_value| { @@ -1355,7 +1355,7 @@ pub const Subprocess = struct { } pub fn finalize(this: *Writable) void { - const subprocess = @fieldParentPtr(Subprocess, "stdin", this); + const subprocess: *Subprocess = @fieldParentPtr("stdin", this); if (subprocess.this_jsvalue != .zero) { if (JSC.Codegen.JSSubprocess.stdinGetCached(subprocess.this_jsvalue)) |existing_value| { JSC.WebCore.FileSink.JSSink.setDestroyCallback(existing_value, 0); @@ -2254,8 +2254,6 @@ pub const Subprocess = struct { return sync_value; } - const os = std.os; - pub fn handleIPCMessage( this: *Subprocess, message: IPC.DecodedIPCMessage, diff --git a/src/bun.js/api/bun/udp_socket.zig b/src/bun.js/api/bun/udp_socket.zig index de0dbe29bb..3108f81304 100644 --- a/src/bun.js/api/bun/udp_socket.zig +++ b/src/bun.js/api/bun/udp_socket.zig @@ -28,7 +28,7 @@ fn onClose(socket: *uws.udp.Socket) callconv(.C) void { const this: *UDPSocket = bun.cast(*UDPSocket, socket.user().?); this.closed = true; this.poll_ref.unref(this.globalThis.bunVM()); - _ = this.js_refcount.fetchSub(1, .Monotonic); + _ = this.js_refcount.fetchSub(1, .monotonic); } fn onDrain(socket: *uws.udp.Socket) callconv(.C) void { @@ -62,13 +62,13 @@ fn onData(socket: *uws.udp.Socket, buf: *uws.udp.PacketBuffer, packets: c_int) c var port: u16 = 0; switch (peer.family) { - std.os.AF.INET => { - const peer4: *std.os.sockaddr.in = @ptrCast(peer); + std.posix.AF.INET => { + const peer4: *std.posix.sockaddr.in = @ptrCast(peer); hostname = inet_ntop(peer.family, &peer4.addr, &addr_buf, addr_buf.len); port = ntohs(peer4.port); }, - std.os.AF.INET6 => { - const peer6: *std.os.sockaddr.in6 = @ptrCast(peer); + std.posix.AF.INET6 => { + const peer6: *std.posix.sockaddr.in6 = @ptrCast(peer); hostname = inet_ntop(peer.family, &peer6.addr, &addr_buf, addr_buf.len); port = ntohs(peer6.port); }, @@ -84,8 +84,8 @@ fn onData(socket: *uws.udp.Socket, buf: *uws.udp.PacketBuffer, packets: c_int) c const loop = udpSocket.vm.eventLoop(); loop.enter(); defer loop.exit(); - _ = udpSocket.js_refcount.fetchAdd(1, .Monotonic); - defer _ = udpSocket.js_refcount.fetchSub(1, .Monotonic); + _ = udpSocket.js_refcount.fetchAdd(1, .monotonic); + defer _ = udpSocket.js_refcount.fetchSub(1, .monotonic); const result = callback.callWithThis(globalThis, udpSocket.thisValue, &[_]JSValue{ udpSocket.thisValue, @@ -286,7 +286,7 @@ pub const UDPSocket = struct { } pub fn hasPendingActivity(this: *This) callconv(.C) bool { - return this.js_refcount.load(.Monotonic) > 0; + return this.js_refcount.load(.monotonic) > 0; } pub usingnamespace bun.New(@This()); @@ -404,7 +404,7 @@ pub const UDPSocket = struct { var payloads = alloc.alloc([*]const u8, len) catch bun.outOfMemory(); var lens = alloc.alloc(usize, len) catch bun.outOfMemory(); var addr_ptrs = alloc.alloc(?*const anyopaque, len) catch bun.outOfMemory(); - var addrs = alloc.alloc(std.os.sockaddr.storage, len) catch bun.outOfMemory(); + var addrs = alloc.alloc(std.posix.sockaddr.storage, len) catch bun.outOfMemory(); var iter = arg.arrayIterator(globalThis); @@ -507,7 +507,7 @@ pub const UDPSocket = struct { }; defer payload.deinit(); - var addr: std.os.sockaddr.storage = std.mem.zeroes(std.os.sockaddr.storage); + var addr: std.posix.sockaddr.storage = std.mem.zeroes(std.posix.sockaddr.storage); const addr_ptr = brk: { if (dst) |dest| { if (!this.parseAddr(globalThis, dest.port, dest.address, &addr)) { @@ -533,7 +533,7 @@ pub const UDPSocket = struct { globalThis: *JSGlobalObject, port_val: JSValue, address_val: JSValue, - storage: *std.os.sockaddr.storage, + storage: *std.posix.sockaddr.storage, ) bool { _ = this; const number = port_val.coerceToInt32(globalThis); @@ -544,15 +544,15 @@ pub const UDPSocket = struct { const address_slice = str.toOwnedSliceZ(default_allocator) catch bun.outOfMemory(); defer default_allocator.free(address_slice); - var addr4: *std.os.sockaddr.in = @ptrCast(storage); - if (inet_pton(std.os.AF.INET, address_slice.ptr, &addr4.addr) == 1) { + var addr4: *std.posix.sockaddr.in = @ptrCast(storage); + if (inet_pton(std.posix.AF.INET, address_slice.ptr, &addr4.addr) == 1) { addr4.port = htons(@truncate(port)); - addr4.family = std.os.AF.INET; + addr4.family = std.posix.AF.INET; } else { - var addr6: *std.os.sockaddr.in6 = @ptrCast(storage); - if (inet_pton(std.os.AF.INET6, address_slice.ptr, &addr6.addr) == 1) { + var addr6: *std.posix.sockaddr.in6 = @ptrCast(storage); + if (inet_pton(std.posix.AF.INET6, address_slice.ptr, &addr6.addr) == 1) { addr6.port = htons(@truncate(port)); - addr6.family = std.os.AF.INET6; + addr6.family = std.posix.AF.INET6; } else { return false; } diff --git a/src/bun.js/api/ffi.zig b/src/bun.js/api/ffi.zig index 59413cceaa..3736bdc615 100644 --- a/src/bun.js/api/ffi.zig +++ b/src/bun.js/api/ffi.zig @@ -71,7 +71,6 @@ const Config = @import("../config.zig"); const URL = @import("../../url.zig").URL; const VirtualMachine = JSC.VirtualMachine; const IOTask = JSC.IOTask; -const ComptimeStringMap = @import("../../comptime_string_map.zig").ComptimeStringMap; const TCC = @import("../../tcc.zig"); @@ -959,12 +958,12 @@ pub const FFI = struct { const ffi_wrapper = Bun__createFFICallbackFunction(js_context, js_function); try this.printCallbackSourceCode(js_context, ffi_wrapper, &source_code_writer); - if (comptime Environment.allow_assert and Environment.isPosix) { + if (comptime Environment.isDebug and Environment.isPosix) { debug_write: { - const fd = std.os.open("/tmp/bun-ffi-callback-source.c", std.os.O.WRONLY | std.os.O.CREAT, 0o644) catch break :debug_write; - _ = std.os.write(fd, source_code.items) catch break :debug_write; - std.os.ftruncate(fd, source_code.items.len) catch break :debug_write; - std.os.close(fd); + const fd = std.posix.open("/tmp/bun-ffi-callback-source.c", .{ .CREAT = true, .ACCMODE = .WRONLY }, 0o644) catch break :debug_write; + _ = std.posix.write(fd, source_code.items) catch break :debug_write; + std.posix.ftruncate(fd, source_code.items.len) catch break :debug_write; + std.posix.close(fd); } } @@ -1416,7 +1415,7 @@ pub const FFI = struct { .{ "callback", ABIType.function }, .{ "fn", ABIType.function }, }; - pub const label = ComptimeStringMap(ABIType, map); + pub const label = bun.ComptimeStringMap(ABIType, map); const EnumMapFormatter = struct { name: []const u8, entry: ABIType, diff --git a/src/bun.js/api/glob.zig b/src/bun.js/api/glob.zig index 6dd8b75735..ec1d2d58e2 100644 --- a/src/bun.js/api/glob.zig +++ b/src/bun.js/api/glob.zig @@ -370,18 +370,18 @@ pub fn finalize( } pub fn hasPendingActivity(this: *Glob) callconv(.C) bool { - @fence(.SeqCst); - return this.has_pending_activity.load(.SeqCst) > 0; + @fence(.seq_cst); + return this.has_pending_activity.load(.seq_cst) > 0; } fn incrPendingActivityFlag(has_pending_activity: *std.atomic.Value(usize)) void { - @fence(.SeqCst); - _ = has_pending_activity.fetchAdd(1, .SeqCst); + @fence(.seq_cst); + _ = has_pending_activity.fetchAdd(1, .seq_cst); } fn decrPendingActivityFlag(has_pending_activity: *std.atomic.Value(usize)) void { - @fence(.SeqCst); - _ = has_pending_activity.fetchSub(1, .SeqCst); + @fence(.seq_cst); + _ = has_pending_activity.fetchSub(1, .seq_cst); } pub fn __scan(this: *Glob, globalThis: *JSGlobalObject, callframe: *JSC.CallFrame) callconv(.C) JSC.JSValue { diff --git a/src/bun.js/api/server.zig b/src/bun.js/api/server.zig index 1ab7f76494..6d8e50568f 100644 --- a/src/bun.js/api/server.zig +++ b/src/bun.js/api/server.zig @@ -429,7 +429,7 @@ pub const ServerConfig = struct { defer sliced.deinit(); if (sliced.len > 0) { result.key_file_name = bun.default_allocator.dupeZ(u8, sliced.slice()) catch unreachable; - if (std.os.system.access(result.key_file_name, std.os.F_OK) != 0) { + if (std.posix.system.access(result.key_file_name, std.posix.F_OK) != 0) { JSC.throwInvalidArguments("Unable to access keyFile path", .{}, global, exception); result.deinit(); @@ -528,7 +528,7 @@ pub const ServerConfig = struct { defer sliced.deinit(); if (sliced.len > 0) { result.cert_file_name = bun.default_allocator.dupeZ(u8, sliced.slice()) catch unreachable; - if (std.os.system.access(result.cert_file_name, std.os.F_OK) != 0) { + if (std.posix.system.access(result.cert_file_name, std.posix.F_OK) != 0) { JSC.throwInvalidArguments("Unable to access certFile path", .{}, global, exception); result.deinit(); return null; @@ -769,7 +769,7 @@ pub const ServerConfig = struct { defer sliced.deinit(); if (sliced.len > 0) { result.ca_file_name = bun.default_allocator.dupeZ(u8, sliced.slice()) catch unreachable; - if (std.os.system.access(result.ca_file_name, std.os.F_OK) != 0) { + if (std.posix.system.access(result.ca_file_name, std.posix.F_OK) != 0) { JSC.throwInvalidArguments("Invalid caFile path", .{}, global, exception); result.deinit(); return null; @@ -801,7 +801,7 @@ pub const ServerConfig = struct { defer sliced.deinit(); if (sliced.len > 0) { result.dh_params_file_name = bun.default_allocator.dupeZ(u8, sliced.slice()) catch unreachable; - if (std.os.system.access(result.dh_params_file_name, std.os.F_OK) != 0) { + if (std.posix.system.access(result.dh_params_file_name, std.posix.F_OK) != 0) { JSC.throwInvalidArguments("Invalid dhParamsFile path", .{}, global, exception); result.deinit(); return null; @@ -2080,7 +2080,7 @@ fn NewRequestContext(comptime ssl_enabled: bool, comptime debug_mode: bool, comp this.finalize(); } const separator: string = "\r\n"; - const separator_iovec = [1]std.os.iovec_const{.{ + const separator_iovec = [1]std.posix.iovec_const{.{ .iov_base = separator.ptr, .iov_len = separator.len, }}; @@ -2102,7 +2102,7 @@ fn NewRequestContext(comptime ssl_enabled: bool, comptime debug_mode: bool, comp const val = linux.sendfile(this.sendfile.socket_fd.cast(), this.sendfile.fd.cast(), &signed_offset, this.sendfile.remain); this.sendfile.offset = @as(Blob.SizeType, @intCast(signed_offset)); - const errcode = linux.getErrno(val); + const errcode = bun.C.getErrno(val); this.sendfile.remain -|= @as(Blob.SizeType, @intCast(this.sendfile.offset -| start)); @@ -2115,9 +2115,9 @@ fn NewRequestContext(comptime ssl_enabled: bool, comptime debug_mode: bool, comp return errcode != .SUCCESS; } } else { - var sbytes: std.os.off_t = adjusted_count; + var sbytes: std.posix.off_t = adjusted_count; const signed_offset = @as(i64, @bitCast(@as(u64, this.sendfile.offset))); - const errcode = std.c.getErrno(std.c.sendfile( + const errcode = bun.C.getErrno(std.c.sendfile( this.sendfile.fd.cast(), this.sendfile.socket_fd.cast(), signed_offset, @@ -2214,7 +2214,7 @@ fn NewRequestContext(comptime ssl_enabled: bool, comptime debug_mode: bool, comp const auto_close = file.pathlike != .fd; const fd = if (!auto_close) file.pathlike.fd - else switch (bun.sys.open(file.pathlike.path.sliceZ(&file_buf), std.os.O.RDONLY | std.os.O.NONBLOCK | std.os.O.CLOEXEC, 0)) { + else switch (bun.sys.open(file.pathlike.path.sliceZ(&file_buf), bun.O.RDONLY | bun.O.NONBLOCK | bun.O.CLOEXEC, 0)) { .result => |_fd| _fd, .err => |err| return this.runErrorHandler(err.withPath(file.pathlike.path.slice()).toSystemError().toErrorInstance( this.server.globalThis, @@ -2242,7 +2242,7 @@ fn NewRequestContext(comptime ssl_enabled: bool, comptime debug_mode: bool, comp } var err = bun.sys.Error{ - .errno = @as(bun.sys.Error.Int, @intCast(@intFromEnum(std.os.E.INVAL))), + .errno = @as(bun.sys.Error.Int, @intCast(@intFromEnum(std.posix.E.INVAL))), .syscall = .sendfile, }; var sys = err.withPathLike(file.pathlike).toSystemError(); @@ -2255,13 +2255,13 @@ fn NewRequestContext(comptime ssl_enabled: bool, comptime debug_mode: bool, comp } if (Environment.isLinux) { - if (!(bun.isRegularFile(stat.mode) or std.os.S.ISFIFO(stat.mode) or std.os.S.ISSOCK(stat.mode))) { + if (!(bun.isRegularFile(stat.mode) or std.posix.S.ISFIFO(stat.mode) or std.posix.S.ISSOCK(stat.mode))) { if (auto_close) { _ = bun.sys.close(fd); } var err = bun.sys.Error{ - .errno = @as(bun.sys.Error.Int, @intCast(@intFromEnum(std.os.E.INVAL))), + .errno = @as(bun.sys.Error.Int, @intCast(@intFromEnum(std.posix.E.INVAL))), .syscall = .sendfile, }; var sys = err.withPathLike(file.pathlike).toSystemError(); diff --git a/src/bun.js/base.zig b/src/bun.js/base.zig index 25048b0281..16ab349138 100644 --- a/src/bun.js/base.zig +++ b/src/bun.js/base.zig @@ -342,7 +342,14 @@ pub const ArrayBuffer = extern struct { return result; } - const result = bun.sys.mmap(null, @intCast(@max(size, 0)), std.os.PROT.READ | std.os.PROT.WRITE, std.os.MAP.SHARED | 0, fd, 0); + const result = bun.sys.mmap( + null, + @intCast(@max(size, 0)), + std.posix.PROT.READ | std.posix.PROT.WRITE, + .{ .TYPE = .SHARED }, + fd, + 0, + ); _ = bun.sys.close(fd); switch (result) { @@ -1529,7 +1536,7 @@ pub const Strong = @import("./Strong.zig").Strong; pub const Weak = @import("./Weak.zig").Weak; pub const WeakRefType = @import("./Weak.zig").WeakRefType; -pub const BinaryType = enum { +pub const BinaryType = enum(u4) { Buffer, ArrayBuffer, Uint8Array, @@ -1662,21 +1669,21 @@ pub const MemoryReportingAllocator = struct { fn alloc(this: *MemoryReportingAllocator, n: usize, log2_ptr_align: u8, return_address: usize) ?[*]u8 { const result = this.child_allocator.rawAlloc(n, log2_ptr_align, return_address) orelse return null; - _ = this.memory_cost.fetchAdd(n, .Monotonic); + _ = this.memory_cost.fetchAdd(n, .monotonic); if (comptime Environment.allow_assert) log("malloc({d}) = {d}", .{ n, this.memory_cost.raw }); return result; } pub fn discard(this: *MemoryReportingAllocator, buf: []const u8) void { - _ = this.memory_cost.fetchSub(buf.len, .Monotonic); + _ = this.memory_cost.fetchSub(buf.len, .monotonic); if (comptime Environment.allow_assert) log("discard({d}) = {d}", .{ buf.len, this.memory_cost.raw }); } fn resize(this: *MemoryReportingAllocator, buf: []u8, buf_align: u8, new_len: usize, ret_addr: usize) bool { if (this.child_allocator.rawResize(buf, buf_align, new_len, ret_addr)) { - _ = this.memory_cost.fetchAdd(new_len -| buf.len, .Monotonic); + _ = this.memory_cost.fetchAdd(new_len -| buf.len, .monotonic); if (comptime Environment.allow_assert) log("resize() = {d}", .{this.memory_cost.raw}); return true; @@ -1688,11 +1695,12 @@ pub const MemoryReportingAllocator = struct { fn free(this: *MemoryReportingAllocator, buf: []u8, buf_align: u8, ret_addr: usize) void { this.child_allocator.rawFree(buf, buf_align, ret_addr); - const prev = this.memory_cost.fetchSub(buf.len, .Monotonic); - _ = prev; if (comptime Environment.allow_assert) { // check for overflow, racily - // bun.assert(prev > this.memory_cost.load(.Monotonic)); + const prev = this.memory_cost.fetchSub(buf.len, .monotonic); + _ = prev; + // bun.assert(prev > this.memory_cost.load(.monotonic)); + log("free({d}) = {d}", .{ buf.len, this.memory_cost.raw }); } } @@ -1713,7 +1721,7 @@ pub const MemoryReportingAllocator = struct { } pub fn report(this: *MemoryReportingAllocator, vm: *JSC.VM) void { - const mem = this.memory_cost.load(.Monotonic); + const mem = this.memory_cost.load(.monotonic); if (mem > 0) { vm.reportExtraMemory(mem); if (comptime Environment.allow_assert) @@ -1726,7 +1734,7 @@ pub const MemoryReportingAllocator = struct { return; } - const memory_cost = this.memory_cost.load(.Monotonic); + const memory_cost = this.memory_cost.load(.monotonic); if (memory_cost > 0) { Output.panic("MemoryReportingAllocator still has {d} bytes allocated", .{memory_cost}); } diff --git a/src/bun.js/bindings/bindings.zig b/src/bun.js/bindings/bindings.zig index 31476fa3ac..d42e342ea8 100644 --- a/src/bun.js/bindings/bindings.zig +++ b/src/bun.js/bindings/bindings.zig @@ -10,7 +10,7 @@ const ErrorableZigString = Exports.ErrorableZigString; const ErrorableResolvedSource = Exports.ErrorableResolvedSource; const ZigException = Exports.ZigException; const ZigStackTrace = Exports.ZigStackTrace; -const is_bindgen: bool = std.meta.globalOption("bindgen", bool) orelse false; +const is_bindgen: bool = false; const ArrayBuffer = @import("../base.zig").ArrayBuffer; const JSC = bun.JSC; const Shimmer = JSC.Shimmer; @@ -4700,7 +4700,7 @@ pub const JSValue = enum(JSValueReprInt) { return cppFn("fromUInt64NoTruncate", .{ globalObject, i }); } - /// This always returns a JS BigInt using std.os.timeval from std.os.rusage + /// This always returns a JS BigInt using std.posix.timeval from std.posix.rusage pub fn fromTimevalNoTruncate(globalObject: *JSGlobalObject, nsec: i64, sec: i64) JSValue { return cppFn("fromTimevalNoTruncate", .{ globalObject, nsec, sec }); } diff --git a/src/bun.js/bindings/exports.zig b/src/bun.js/bindings/exports.zig index f71e8e32e8..1d3eab9258 100644 --- a/src/bun.js/bindings/exports.zig +++ b/src/bun.js/bindings/exports.zig @@ -12,7 +12,7 @@ const strings = bun.strings; const default_allocator = bun.default_allocator; const NewGlobalObject = JSC.NewGlobalObject; const JSGlobalObject = JSC.JSGlobalObject; -const is_bindgen: bool = std.meta.globalOption("bindgen", bool) orelse false; +const is_bindgen: bool = false; const ZigString = JSC.ZigString; const string = bun.string; const JSValue = JSC.JSValue; diff --git a/src/bun.js/bindings/shimmer.zig b/src/bun.js/bindings/shimmer.zig index ad41828144..1c83268481 100644 --- a/src/bun.js/bindings/shimmer.zig +++ b/src/bun.js/bindings/shimmer.zig @@ -1,7 +1,7 @@ const std = @import("std"); const StaticExport = @import("./static_export.zig"); const Sizes = @import("./sizes.zig"); -pub const is_bindgen: bool = std.meta.globalOption("bindgen", bool) orelse false; +pub const is_bindgen: bool = false; const headers = @import("./headers.zig"); fn isNullableType(comptime Type: type) bool { diff --git a/src/bun.js/event_loop.zig b/src/bun.js/event_loop.zig index fd7db0e2bb..0b51c8a32f 100644 --- a/src/bun.js/event_loop.zig +++ b/src/bun.js/event_loop.zig @@ -59,7 +59,7 @@ pub fn ConcurrentPromiseTask(comptime Context: type) type { } pub fn runFromThreadPool(task: *WorkPoolTask) void { - var this = @fieldParentPtr(This, "task", task); + var this: *This = @fieldParentPtr("task", task); Context.run(this.ctx); this.onFinish(); } @@ -122,7 +122,7 @@ pub fn WorkTask(comptime Context: type) type { pub fn runFromThreadPool(task: *TaskType) void { JSC.markBinding(@src()); - const this = @fieldParentPtr(This, "task", task); + const this: *This = @fieldParentPtr("task", task); Context.run(this.ctx, this); } @@ -286,7 +286,7 @@ pub const AnyTaskWithExtraContext = struct { @as(*ContextType, @ptrCast(@alignCast(extra.?))), }, ); - const anytask: *AnyTaskWithExtraContext = @fieldParentPtr(AnyTaskWithExtraContext, "ctx", @as(*?*anyopaque, @ptrCast(@alignCast(this.?)))); + const anytask: *AnyTaskWithExtraContext = @fieldParentPtr("ctx", @as(*?*anyopaque, @ptrCast(@alignCast(this.?)))); bun.default_allocator.destroy(anytask); } }; @@ -569,7 +569,7 @@ pub const GarbageCollectionController = struct { } pub fn bunVM(this: *GarbageCollectionController) *VirtualMachine { - return @fieldParentPtr(VirtualMachine, "gc_controller", this); + return @alignCast(@fieldParentPtr("gc_controller", this)); } pub fn onGCTimer(timer: *uws.Timer) callconv(.C) void { @@ -1259,7 +1259,7 @@ pub const EventLoop = struct { pub fn tickConcurrentWithCount(this: *EventLoop) usize { JSC.markBinding(@src()); - const delta = this.concurrent_ref.swap(0, .Monotonic); + const delta = this.concurrent_ref.swap(0, .monotonic); const loop = this.virtual_machine.event_loop_handle.?; if (comptime Environment.isWindows) { if (delta > 0) { @@ -1601,13 +1601,13 @@ pub const EventLoop = struct { pub fn refConcurrently(this: *EventLoop) void { // TODO maybe this should be AcquireRelease - _ = this.concurrent_ref.fetchAdd(1, .Monotonic); + _ = this.concurrent_ref.fetchAdd(1, .monotonic); this.wakeup(); } pub fn unrefConcurrently(this: *EventLoop) void { // TODO maybe this should be AcquireRelease - _ = this.concurrent_ref.fetchSub(1, .Monotonic); + _ = this.concurrent_ref.fetchSub(1, .monotonic); this.wakeup(); } }; diff --git a/src/bun.js/ipc.zig b/src/bun.js/ipc.zig index b767573c30..1816ddca5f 100644 --- a/src/bun.js/ipc.zig +++ b/src/bun.js/ipc.zig @@ -23,7 +23,7 @@ pub const Mode = enum { /// This must match the behavior of node.js, and supports bun <--> node.js/etc communication. json, - const Map = std.ComptimeStringMap(Mode, .{ + const Map = std.StaticStringMap(Mode).initComptime(.{ .{ "advanced", .advanced }, .{ "json", .json }, }); diff --git a/src/bun.js/javascript.zig b/src/bun.js/javascript.zig index 568921f99e..c94fb86d55 100644 --- a/src/bun.js/javascript.zig +++ b/src/bun.js/javascript.zig @@ -523,7 +523,7 @@ pub const ExitHandler = struct { pub fn dispatchOnExit(this: *ExitHandler) void { JSC.markBinding(@src()); - var vm = @fieldParentPtr(VirtualMachine, "exit_handler", this); + const vm: *VirtualMachine = @alignCast(@fieldParentPtr("exit_handler", this)); Process__dispatchOnExit(vm.global, this.exit_code); if (vm.isMainThread()) { Bun__closeAllSQLiteDatabasesForTermination(); @@ -532,7 +532,7 @@ pub const ExitHandler = struct { pub fn dispatchOnBeforeExit(this: *ExitHandler) void { JSC.markBinding(@src()); - const vm = @fieldParentPtr(VirtualMachine, "exit_handler", this); + const vm: *VirtualMachine = @alignCast(@fieldParentPtr("exit_handler", this)); Process__dispatchOnBeforeExit(vm.global, this.exit_code); } }; @@ -1230,7 +1230,9 @@ pub const VirtualMachine = struct { } debug("spin", .{}); - while (futex_atomic.load(.Monotonic) > 0) std.Thread.Futex.wait(&futex_atomic, 1); + while (futex_atomic.load(.monotonic) > 0) { + std.Thread.Futex.wait(&futex_atomic, 1); + } if (comptime Environment.allow_assert) debug("waitForDebugger: {}", .{Output.ElapsedFormatter{ .colors = Output.enable_ansi_colors_stderr, @@ -1302,7 +1304,7 @@ pub const VirtualMachine = struct { } debug("wake", .{}); - futex_atomic.store(0, .Monotonic); + futex_atomic.store(0, .monotonic); std.Thread.Futex.wake(&futex_atomic, 1); this.eventLoop().tick(); @@ -3891,6 +3893,9 @@ pub fn NewHotReloader(comptime Ctx: type, comptime EventLoopType: type, comptime err: bun.sys.Error, ) void { Output.err(@as(bun.C.E, @enumFromInt(err.errno)), "Watcher crashed", .{}); + if (bun.Environment.isDebug) { + @panic("Watcher crash"); + } } pub fn getContext(this: *@This()) *@This().Watcher { @@ -3998,7 +4003,7 @@ pub fn NewHotReloader(comptime Ctx: type, comptime EventLoopType: type, comptime if (parent_hash == id) { const affected_path = file_paths[entry_id]; const was_deleted = check: { - std.os.access(affected_path, std.os.F_OK) catch break :check true; + std.posix.access(affected_path, std.posix.F_OK) catch break :check true; break :check false; }; if (!was_deleted) continue; diff --git a/src/bun.js/module_loader.zig b/src/bun.js/module_loader.zig index e839bca157..9de1852925 100644 --- a/src/bun.js/module_loader.zig +++ b/src/bun.js/module_loader.zig @@ -200,7 +200,10 @@ fn dumpSourceStringFailiable(vm: *VirtualMachine, specifier: string, written: [] }; var parent = try dir.makeOpenPath(dir_path[root_len..], .{}); defer parent.close(); - parent.writeFile(std.fs.path.basename(specifier), written) catch |e| { + parent.writeFile(.{ + .sub_path = std.fs.path.basename(specifier), + .data = written, + }) catch |e| { Output.debugWarn("Failed to dump source string: writeFile {}", .{e}); return; }; @@ -237,7 +240,10 @@ fn dumpSourceStringFailiable(vm: *VirtualMachine, specifier: string, written: [] try bufw.flush(); } } else { - dir.writeFile(std.fs.path.basename(specifier), written) catch return; + dir.writeFile(.{ + .sub_path = std.fs.path.basename(specifier), + .data = written, + }) catch return; } } @@ -274,7 +280,7 @@ pub const RuntimeTranspilerStore = struct { } else { return; } - var vm = @fieldParentPtr(JSC.VirtualMachine, "transpiler_store", this); + var vm: *JSC.VirtualMachine = @fieldParentPtr("transpiler_store", this); const event_loop = vm.eventLoop(); const global = vm.global; const jsc_vm = vm.jsc; @@ -419,7 +425,7 @@ pub const RuntimeTranspilerStore = struct { } pub fn runFromWorkerThread(work_task: *JSC.WorkPoolTask) void { - @fieldParentPtr(TranspilerJob, "work_task", work_task).run(); + @as(*TranspilerJob, @fieldParentPtr("work_task", work_task)).run(); } pub fn run(this: *TranspilerJob) void { @@ -428,7 +434,7 @@ pub const RuntimeTranspilerStore = struct { const allocator = arena.allocator(); defer this.dispatchToMainThread(); - if (this.generation_number != this.vm.transpiler_store.generation_number.load(.Monotonic)) { + if (this.generation_number != this.vm.transpiler_store.generation_number.load(.monotonic)) { this.parse_error = error.TranspilerJobGenerationMismatch; return; } @@ -1024,7 +1030,7 @@ pub const ModuleLoader = struct { pub fn pollModules(this: *Queue) void { var pm = this.vm().packageManager(); - if (pm.pending_tasks.load(.Monotonic) > 0) return; + if (pm.pending_tasks.load(.monotonic) > 0) return; var modules: []AsyncModule = this.map.items; var i: usize = 0; @@ -1107,7 +1113,7 @@ pub const ModuleLoader = struct { } pub fn vm(this: *Queue) *VirtualMachine { - return @fieldParentPtr(VirtualMachine, "modules", this); + return @alignCast(@fieldParentPtr("modules", this)); } }; diff --git a/src/bun.js/node/dir_iterator.zig b/src/bun.js/node/dir_iterator.zig index 58860ff174..381e7fe952 100644 --- a/src/bun.js/node/dir_iterator.zig +++ b/src/bun.js/node/dir_iterator.zig @@ -7,14 +7,14 @@ const builtin = @import("builtin"); const std = @import("std"); -const os = std.os; +const posix = std.posix; const Dir = std.fs.Dir; const JSC = bun.JSC; const PathString = JSC.PathString; const bun = @import("root").bun; -const IteratorError = error{ AccessDenied, SystemResources } || os.UnexpectedError; +const IteratorError = error{ AccessDenied, SystemResources } || posix.UnexpectedError; const mem = std.mem; const strings = bun.strings; const Maybe = JSC.Maybe; @@ -61,7 +61,7 @@ pub fn NewIterator(comptime use_windows_ospath: bool) type { pub const Error = IteratorError; - fn fd(self: *Self) os.fd_t { + fn fd(self: *Self) posix.fd_t { return self.dir.fd; } @@ -77,7 +77,7 @@ pub fn NewIterator(comptime use_windows_ospath: bool) type { fn nextDarwin(self: *Self) Result { start_over: while (true) { if (self.index >= self.end_index) { - const rc = os.system.__getdirentries64( + const rc = posix.system.__getdirentries64( self.dir.fd, &self.buf, self.buf.len, @@ -94,25 +94,25 @@ pub fn NewIterator(comptime use_windows_ospath: bool) type { self.index = 0; self.end_index = @as(usize, @intCast(rc)); } - const darwin_entry = @as(*align(1) os.system.dirent, @ptrCast(&self.buf[self.index])); - const next_index = self.index + darwin_entry.reclen(); + const darwin_entry = @as(*align(1) posix.system.dirent, @ptrCast(&self.buf[self.index])); + const next_index = self.index + darwin_entry.reclen; self.index = next_index; - const name = @as([*]u8, @ptrCast(&darwin_entry.d_name))[0..darwin_entry.d_namlen]; + const name = @as([*]u8, @ptrCast(&darwin_entry.name))[0..darwin_entry.namlen]; - if (strings.eqlComptime(name, ".") or strings.eqlComptime(name, "..") or (darwin_entry.d_ino == 0)) { + if (strings.eqlComptime(name, ".") or strings.eqlComptime(name, "..") or (darwin_entry.ino == 0)) { continue :start_over; } - const entry_kind = switch (darwin_entry.d_type) { - os.DT.BLK => Entry.Kind.block_device, - os.DT.CHR => Entry.Kind.character_device, - os.DT.DIR => Entry.Kind.directory, - os.DT.FIFO => Entry.Kind.named_pipe, - os.DT.LNK => Entry.Kind.sym_link, - os.DT.REG => Entry.Kind.file, - os.DT.SOCK => Entry.Kind.unix_domain_socket, - os.DT.WHT => Entry.Kind.whiteout, + const entry_kind = switch (darwin_entry.type) { + posix.DT.BLK => Entry.Kind.block_device, + posix.DT.CHR => Entry.Kind.character_device, + posix.DT.DIR => Entry.Kind.directory, + posix.DT.FIFO => Entry.Kind.named_pipe, + posix.DT.LNK => Entry.Kind.sym_link, + posix.DT.REG => Entry.Kind.file, + posix.DT.SOCK => Entry.Kind.unix_domain_socket, + posix.DT.WHT => Entry.Kind.whiteout, else => Entry.Kind.unknown, }; return .{ @@ -133,11 +133,11 @@ pub fn NewIterator(comptime use_windows_ospath: bool) type { end_index: usize, const Self = @This(); - const linux = os.linux; + const linux = std.os.linux; pub const Error = IteratorError; - fn fd(self: *Self) os.fd_t { + fn fd(self: *Self) posix.fd_t { return self.dir.fd; } @@ -153,17 +153,17 @@ pub fn NewIterator(comptime use_windows_ospath: bool) type { self.end_index = rc; } const linux_entry = @as(*align(1) linux.dirent64, @ptrCast(&self.buf[self.index])); - const next_index = self.index + linux_entry.reclen(); + const next_index = self.index + linux_entry.reclen; self.index = next_index; - const name = mem.sliceTo(@as([*:0]u8, @ptrCast(&linux_entry.d_name)), 0); + const name = mem.sliceTo(@as([*:0]u8, @ptrCast(&linux_entry.name)), 0); // skip . and .. entries if (strings.eqlComptime(name, ".") or strings.eqlComptime(name, "..")) { continue :start_over; } - const entry_kind = switch (linux_entry.d_type) { + const entry_kind = switch (linux_entry.type) { linux.DT.BLK => Entry.Kind.block_device, linux.DT.CHR => Entry.Kind.character_device, linux.DT.DIR => Entry.Kind.directory, @@ -206,7 +206,7 @@ pub fn NewIterator(comptime use_windows_ospath: bool) type { const ResultT = if (use_windows_ospath) ResultW else Result; - fn fd(self: *Self) os.fd_t { + fn fd(self: *Self) posix.fd_t { return self.dir.fd; } @@ -214,7 +214,7 @@ pub fn NewIterator(comptime use_windows_ospath: bool) type { /// with subsequent calls to `next`, as well as when this `Dir` is deinitialized. pub fn next(self: *Self) ResultT { while (true) { - const w = os.windows; + const w = std.os.windows; if (self.index >= self.end_index) { var io: w.IO_STATUS_BLOCK = undefined; if (self.first) { @@ -345,7 +345,7 @@ pub fn NewIterator(comptime use_windows_ospath: bool) type { pub const Error = IteratorError; - fn fd(self: *Self) os.fd_t { + fn fd(self: *Self) posix.fd_t { return self.dir.fd; } @@ -355,7 +355,7 @@ pub fn NewIterator(comptime use_windows_ospath: bool) type { // We intentinally use fd_readdir even when linked with libc, // since its implementation is exactly the same as below, // and we avoid the code complexity here. - const w = os.wasi; + const w = posix.wasi; start_over: while (true) { if (self.index >= self.end_index) { var bufused: usize = undefined; @@ -366,7 +366,7 @@ pub fn NewIterator(comptime use_windows_ospath: bool) type { .NOTDIR => unreachable, .INVAL => unreachable, .NOTCAPABLE => return error.AccessDenied, - else => |err| return os.unexpectedErrno(err), + else => |err| return posix.unexpectedErrno(err), } if (bufused == 0) return null; self.index = 0; @@ -419,7 +419,7 @@ pub fn NewWrappedIterator(comptime path_type: PathType) type { return self.iter.next(); } - pub inline fn fd(self: *Self) os.fd_t { + pub inline fn fd(self: *Self) posix.fd_t { return self.iter.fd(); } @@ -458,7 +458,7 @@ pub fn NewWrappedIterator(comptime path_type: PathType) type { }, .wasi => IteratorType{ .dir = dir, - .cookie = os.wasi.DIRCOOKIE_START, + .cookie = posix.wasi.DIRCOOKIE_START, .index = 0, .end_index = 0, .buf = undefined, diff --git a/src/bun.js/node/node_fs.zig b/src/bun.js/node/node_fs.zig index 73ed39ea53..5bcd62a557 100644 --- a/src/bun.js/node/node_fs.zig +++ b/src/bun.js/node/node_fs.zig @@ -11,7 +11,7 @@ const PathString = JSC.PathString; const Environment = bun.Environment; const C = bun.C; const Flavor = JSC.Node.Flavor; -const system = std.os.system; +const system = std.posix.system; const Maybe = JSC.Maybe; const Encoding = JSC.Node.Encoding; const PosixToWinNormalizer = bun.path.PosixToWinNormalizer; @@ -23,9 +23,9 @@ const Syscall = if (Environment.isWindows) bun.sys.sys_uv else bun.sys; const Constants = @import("./node_fs_constant.zig").Constants; const builtin = @import("builtin"); -const os = @import("std").os; -const darwin = os.darwin; -const linux = os.linux; +const posix = std.posix; +const darwin = std.os.darwin; +const linux = std.os.linux; const PathLike = JSC.Node.PathLike; const PathOrFileDescriptor = JSC.Node.PathOrFileDescriptor; const DirIterator = @import("./dir_iterator.zig"); @@ -36,9 +36,8 @@ const TimeLike = JSC.Node.TimeLike; const Mode = bun.Mode; const uv = bun.windows.libuv; const E = C.E; -const uid_t = if (Environment.isPosix) std.os.uid_t else bun.windows.libuv.uv_uid_t; -const gid_t = if (Environment.isPosix) std.os.gid_t else bun.windows.libuv.uv_gid_t; -/// u63 to allow one null bit +const uid_t = if (Environment.isPosix) std.posix.uid_t else bun.windows.libuv.uv_uid_t; +const gid_t = if (Environment.isPosix) std.posix.gid_t else bun.windows.libuv.uv_gid_t; const ReadPosition = i64; const Stats = JSC.Node.Stats; @@ -117,7 +116,7 @@ pub const Async = struct { pub usingnamespace bun.New(@This()); pub fn workPoolCallback(task: *JSC.WorkPoolTask) void { - var this: *AsyncMkdirp = @fieldParentPtr(AsyncMkdirp, "task", task); + var this: *AsyncMkdirp = @fieldParentPtr("task", task); var node_fs = NodeFS{}; const result = node_fs.mkdirRecursive( @@ -180,7 +179,7 @@ pub const Async = struct { } fn workPoolCallback(task: *JSC.WorkPoolTask) void { - var this: *Task = @fieldParentPtr(Task, "task", task); + var this: *Task = @alignCast(@fieldParentPtr("task", task)); var node_fs = NodeFS{}; this.result = Function(&node_fs, this.args, .promise); @@ -296,7 +295,7 @@ pub fn NewAsyncCpTask(comptime is_shell: bool) type { } fn workPoolCallback(task: *JSC.WorkPoolTask) void { - var this: *ThisSingleTask = @fieldParentPtr(ThisSingleTask, "task", task); + var this: *ThisSingleTask = @fieldParentPtr("task", task); // TODO: error strings on node_fs will die var node_fs = NodeFS{}; @@ -326,7 +325,7 @@ pub fn NewAsyncCpTask(comptime is_shell: bool) type { } } - const old_count = this.cp_task.subtask_count.fetchSub(1, .Monotonic); + const old_count = this.cp_task.subtask_count.fetchSub(1, .monotonic); if (old_count == 1) { this.cp_task.finishConcurrently(Maybe(Return.Cp).success); } @@ -425,7 +424,7 @@ pub fn NewAsyncCpTask(comptime is_shell: bool) type { } fn workPoolCallback(task: *JSC.WorkPoolTask) void { - const this: *ThisAsyncCpTask = @fieldParentPtr(ThisAsyncCpTask, "task", task); + const this: *ThisAsyncCpTask = @alignCast(@fieldParentPtr("task", task)); var node_fs = NodeFS{}; ThisAsyncCpTask.cpAsync(&node_fs, this); @@ -433,7 +432,7 @@ pub fn NewAsyncCpTask(comptime is_shell: bool) type { /// May be called from any thread (the subtasks) fn finishConcurrently(this: *ThisAsyncCpTask, result: Maybe(Return.Cp)) void { - if (this.has_result.cmpxchgStrong(false, true, .Monotonic, .Monotonic)) |_| { + if (this.has_result.cmpxchgStrong(false, true, .monotonic, .monotonic)) |_| { return; } @@ -555,7 +554,7 @@ pub fn NewAsyncCpTask(comptime is_shell: bool) type { }, }; - if (!os.S.ISDIR(stat_.mode)) { + if (!bun.S.ISDIR(stat_.mode)) { // This is the only file, there is no point in dispatching subtasks const r = nodefs._copySingleFileSync( src, @@ -584,7 +583,7 @@ pub fn NewAsyncCpTask(comptime is_shell: bool) type { } const success = ThisAsyncCpTask._cpAsyncDirectory(nodefs, args.flags, this, &src_buf, @intCast(src.len), &dest_buf, @intCast(dest.len)); - const old_count = this.subtask_count.fetchSub(1, .Monotonic); + const old_count = this.subtask_count.fetchSub(1, .monotonic); if (success and old_count == 1) { this.finishConcurrently(Maybe(Return.Cp).success); } @@ -625,7 +624,7 @@ pub fn NewAsyncCpTask(comptime is_shell: bool) type { } } - const open_flags = os.O.DIRECTORY | os.O.RDONLY; + const open_flags = bun.O.DIRECTORY | bun.O.RDONLY; const fd = switch (Syscall.openatOSPath(bun.FD.cwd(), src, open_flags, 0)) { .err => |err| { this.finishConcurrently(.{ .err = err.withPath(nodefs.osPathIntoSyncErrorBuf(src)) }); @@ -693,7 +692,7 @@ pub fn NewAsyncCpTask(comptime is_shell: bool) type { if (!should_continue) return false; }, else => { - _ = this.subtask_count.fetchAdd(1, .Monotonic); + _ = this.subtask_count.fetchAdd(1, .monotonic); const cname = current.name.slice(); @@ -806,7 +805,7 @@ pub const AsyncReaddirRecursiveTask = struct { pub usingnamespace bun.New(@This()); pub fn call(task: *JSC.WorkPoolTask) void { - var this: *Subtask = @fieldParentPtr(Subtask, "task", task); + var this: *Subtask = @alignCast(@fieldParentPtr("task", task)); defer { bun.default_allocator.free(this.basename.sliceAssumeZ()); this.destroy(); @@ -826,7 +825,7 @@ pub const AsyncReaddirRecursiveTask = struct { .basename = bun.PathString.init(bun.default_allocator.dupeZ(u8, basename) catch bun.outOfMemory()), }, ); - bun.assert(readdir_task.subtask_count.fetchAdd(1, .Monotonic) > 0); + bun.assert(readdir_task.subtask_count.fetchAdd(1, .monotonic) > 0); JSC.WorkPool.schedule(&task.task); } @@ -901,7 +900,7 @@ pub const AsyncReaddirRecursiveTask = struct { } } - if (this.subtask_count.fetchSub(1, .Monotonic) == 1) { + if (this.subtask_count.fetchSub(1, .monotonic) == 1) { this.finishConcurrently(); } }, @@ -914,7 +913,7 @@ pub const AsyncReaddirRecursiveTask = struct { } fn workPoolCallback(task: *JSC.WorkPoolTask) void { - var this: *AsyncReaddirRecursiveTask = @fieldParentPtr(AsyncReaddirRecursiveTask, "task", task); + var this: *AsyncReaddirRecursiveTask = @alignCast(@fieldParentPtr("task", task)); var buf: bun.PathBuffer = undefined; this.performWork(this.root_path.sliceAssumeZ(), &buf, true); } @@ -933,23 +932,23 @@ pub const AsyncReaddirRecursiveTask = struct { } var clone = std.ArrayList(ResultType).initCapacity(bun.default_allocator, result.items.len) catch bun.outOfMemory(); clone.appendSliceAssumeCapacity(result.items); - _ = this.result_list_count.fetchAdd(clone.items.len, .Monotonic); + _ = this.result_list_count.fetchAdd(clone.items.len, .monotonic); list.* = ResultListEntry{ .next = null, .value = @unionInit(ResultListEntry.Value, @tagName(Field), clone) }; this.result_list_queue.push(list); } - if (this.subtask_count.fetchSub(1, .Monotonic) == 1) { + if (this.subtask_count.fetchSub(1, .monotonic) == 1) { this.finishConcurrently(); } } /// May be called from any thread (the subtasks) pub fn finishConcurrently(this: *AsyncReaddirRecursiveTask) void { - if (this.has_result.cmpxchgStrong(false, true, .Monotonic, .Monotonic)) |_| { + if (this.has_result.cmpxchgStrong(false, true, .monotonic, .monotonic)) |_| { return; } - bun.assert(this.subtask_count.load(.Monotonic) == 0); + bun.assert(this.subtask_count.load(.monotonic) == 0); const root_fd = this.root_fd; if (root_fd != bun.invalid_fd) { @@ -974,7 +973,7 @@ pub const AsyncReaddirRecursiveTask = struct { switch (this.args.tag()) { inline else => |tag| { var results = &@field(this.result_list, @tagName(tag)); - results.ensureTotalCapacityPrecise(this.result_list_count.swap(0, .Monotonic)) catch bun.outOfMemory(); + results.ensureTotalCapacityPrecise(this.result_list_count.swap(0, .monotonic)) catch bun.outOfMemory(); while (iter.next()) |val| { if (to_destroy) |dest| { bun.default_allocator.destroy(dest); @@ -1012,7 +1011,7 @@ pub const AsyncReaddirRecursiveTask = struct { if (to_destroy) |dest| { bun.default_allocator.destroy(dest); } - this.result_list_count.store(0, .Monotonic); + this.result_list_count.store(0, .monotonic); } pub fn runFromJSThread(this: *AsyncReaddirRecursiveTask) void { @@ -4234,7 +4233,7 @@ pub const NodeFS = struct { .err => |err| return Maybe(Return.CopyFile){ .err = err.withPath(src) }, }; - if (!os.S.ISREG(stat_.mode)) { + if (!posix.S.ISREG(stat_.mode)) { return Maybe(Return.CopyFile){ .err = .{ .errno = @intFromEnum(C.SystemErrno.ENOTSUP), .syscall = .copyfile, @@ -4254,7 +4253,7 @@ pub const NodeFS = struct { return ret.success; } } else { - const src_fd = switch (Syscall.open(src, std.os.O.RDONLY, 0o644)) { + const src_fd = switch (Syscall.open(src, bun.O.RDONLY, 0o644)) { .result => |result| result, .err => |err| return .{ .err = err.withPath(args.src.slice()) }, }; @@ -4262,10 +4261,10 @@ pub const NodeFS = struct { _ = Syscall.close(src_fd); } - var flags: Mode = std.os.O.CREAT | std.os.O.WRONLY; + var flags: Mode = bun.O.CREAT | bun.O.WRONLY; var wrote: usize = 0; if (args.mode.shouldntOverwrite()) { - flags |= std.os.O.EXCL; + flags |= bun.O.EXCL; } const dest_fd = switch (Syscall.open(dest, flags, JSC.Node.default_permission)) { @@ -4299,7 +4298,7 @@ pub const NodeFS = struct { const src = args.src.sliceZ(&src_buf); const dest = args.dest.sliceZ(&dest_buf); - const src_fd = switch (Syscall.open(src, std.os.O.RDONLY, 0o644)) { + const src_fd = switch (Syscall.open(src, bun.O.RDONLY, 0o644)) { .result => |result| result, .err => |err| return .{ .err = err }, }; @@ -4312,14 +4311,14 @@ pub const NodeFS = struct { .err => |err| return Maybe(Return.CopyFile){ .err = err }, }; - if (!os.S.ISREG(stat_.mode)) { + if (!posix.S.ISREG(stat_.mode)) { return Maybe(Return.CopyFile){ .err = .{ .errno = @intFromEnum(C.SystemErrno.ENOTSUP), .syscall = .copyfile } }; } - var flags: Mode = std.os.O.CREAT | std.os.O.WRONLY; + var flags: Mode = bun.O.CREAT | bun.O.WRONLY; var wrote: usize = 0; if (args.mode.shouldntOverwrite()) { - flags |= std.os.O.EXCL; + flags |= bun.O.EXCL; } const dest_fd = switch (Syscall.open(dest, flags, JSC.Node.default_permission)) { @@ -4343,7 +4342,7 @@ pub const NodeFS = struct { } // If we know it's a regular file and ioctl_ficlone is available, attempt to use it. - if (os.S.ISREG(stat_.mode) and bun.can_use_ioctl_ficlone()) { + if (posix.S.ISREG(stat_.mode) and bun.can_use_ioctl_ficlone()) { const rc = bun.C.linux.ioctl_ficlone(dest_fd, src_fd); if (rc == 0) { _ = C.fchmod(dest_fd.cast(), stat_.mode); @@ -4447,14 +4446,14 @@ pub const NodeFS = struct { // Use libuv access on windows if (Environment.isWindows) { - return .{ .result = Syscall.access(slice, std.os.F_OK) != .err }; + return .{ .result = Syscall.access(slice, std.posix.F_OK) != .err }; } // access() may not work correctly on NFS file systems with UID // mapping enabled, because UID mapping is done on the server and // hidden from the client, which checks permissions. Similar // problems can occur to FUSE mounts. - const rc = (system.access(slice, std.os.F_OK)); + const rc = (system.access(slice, std.posix.F_OK)); return Ret{ .result = rc == 0 }; } @@ -4540,7 +4539,7 @@ pub const NodeFS = struct { Maybe(Return.Futimes).success; } - var times = [2]std.os.timespec{ + var times = [2]std.posix.timespec{ args.mtime, args.atime, }; @@ -4643,7 +4642,7 @@ pub const NodeFS = struct { break :brk strings.toWPath(&buf, utf8); } else { var cwd_buf: bun.PathBuffer = undefined; - const cwd = std.os.getcwd(&cwd_buf) catch return .{ .err = .{ .errno = @intFromEnum(C.SystemErrno.ENOMEM), .syscall = .getcwd } }; + const cwd = std.posix.getcwd(&cwd_buf) catch return .{ .err = .{ .errno = @intFromEnum(C.SystemErrno.ENOMEM), .syscall = .getcwd } }; break :brk strings.toWPath(&buf, bun.path.joinAbsStringBuf(cwd, &joined_buf, &.{args.path.slice()}, .windows)); } }; @@ -4839,12 +4838,15 @@ pub const NodeFS = struct { .result = JSC.ZigString.dupeForJS(bun.sliceTo(ptr, 0), bun.default_allocator) catch bun.outOfMemory(), }; } - // std.c.getErrno(rc) returns SUCCESS if rc is null so we call std.c._errno() directly + + // bun.C.getErrno(rc) returns SUCCESS if rc is -1 so we call std.c._errno() directly const errno = @as(std.c.E, @enumFromInt(std.c._errno().*)); - return .{ .err = Syscall.Error{ - .errno = @as(Syscall.Error.Int, @truncate(@intFromEnum(errno))), - .syscall = .mkdtemp, - } }; + return .{ + .err = Syscall.Error{ + .errno = @as(Syscall.Error.Int, @truncate(@intFromEnum(errno))), + .syscall = .mkdtemp, + }, + }; } pub fn open(this: *NodeFS, args: Arguments.Open, comptime _: Flavor) Maybe(Return.Open) { @@ -5131,7 +5133,7 @@ pub const NodeFS = struct { comptime is_root: bool, ) Maybe(void) { const root_basename = async_task.root_path.slice(); - const flags = os.O.DIRECTORY | os.O.RDONLY; + const flags = bun.O.DIRECTORY | bun.O.RDONLY; const atfd = if (comptime is_root) bun.FD.cwd() else async_task.root_fd; const fd = switch (switch (Environment.os) { else => Syscall.openat(atfd, basename, flags, 0), @@ -5291,7 +5293,7 @@ pub const NodeFS = struct { } } - const flags = os.O.DIRECTORY | os.O.RDONLY; + const flags = bun.O.DIRECTORY | bun.O.RDONLY; const fd = switch (Syscall.openat(if (root_fd == bun.invalid_fd) bun.FD.cwd() else root_fd, basename, flags, 0)) { .err => |err| { if (root_fd == bun.invalid_fd) { @@ -5446,7 +5448,7 @@ pub const NodeFS = struct { @panic("This code path should never be reached. It should only go through readdirWithEntriesRecursiveAsync."); } - const flags = os.O.DIRECTORY | os.O.RDONLY; + const flags = bun.O.DIRECTORY | bun.O.RDONLY; const fd = switch (switch (Environment.os) { else => Syscall.open(path, flags, 0), // windows bun.sys.open does not pass iterable=true, @@ -5526,7 +5528,7 @@ pub const NodeFS = struct { break :brk switch (bun.sys.open( path, - os.O.RDONLY | os.O.NOCTTY, + bun.O.RDONLY | bun.O.NOCTTY, 0, )) { .err => |err| return .{ @@ -5543,7 +5545,7 @@ pub const NodeFS = struct { return .{ .err = .{ - .errno = @intFromEnum(os.E.MFILE), + .errno = @intFromEnum(posix.E.MFILE), .syscall = .open, }, }; @@ -5685,7 +5687,7 @@ pub const NodeFS = struct { const open_result = Syscall.openat( args.dirfd, path, - @intFromEnum(args.flag) | os.O.NOCTTY, + @intFromEnum(args.flag) | bun.O.NOCTTY, args.mode, ); @@ -5719,7 +5721,7 @@ pub const NodeFS = struct { // on linux, it's absolutely positioned const pos = bun.sys.system.lseek( fd.cast(), - @as(std.os.off_t, @intCast(0)), + @as(std.posix.off_t, @intCast(0)), std.os.linux.SEEK.CUR, ); @@ -5731,8 +5733,8 @@ pub const NodeFS = struct { bun.C.preallocate_file( fd.cast(), - @as(std.os.off_t, @intCast(offset)), - @as(std.os.off_t, @intCast(buf.len)), + @as(std.posix.off_t, @intCast(offset)), + @as(std.posix.off_t, @intCast(buf.len)), ) catch {}; } } @@ -5772,7 +5774,7 @@ pub const NodeFS = struct { // https://github.com/oven-sh/bun/issues/2931 // https://github.com/oven-sh/bun/issues/10222 // only truncate if we're not appending and writing to a path - if ((@intFromEnum(args.flag) & std.os.O.APPEND) == 0 and args.file != .fd) { + if ((@intFromEnum(args.flag) & bun.O.APPEND) == 0 and args.file != .fd) { _ = ftruncateSync(.{ .fd = fd, .len = @as(JSC.WebCore.Blob.SizeType, @truncate(written)) }); } } @@ -5866,9 +5868,9 @@ pub const NodeFS = struct { const flags = if (comptime Environment.isLinux) // O_PATH is faster - std.os.O.PATH + bun.O.PATH else - std.os.O.RDONLY; + bun.O.RDONLY; const fd = switch (bun.sys.open(path, flags, 0)) { .err => |err| return .{ .err = err.withPath(path) }, @@ -5924,7 +5926,6 @@ pub const NodeFS = struct { if (args.recursive) { std.fs.cwd().deleteTree(args.path.slice()) catch |err| { const errno: bun.C.E = switch (err) { - error.InvalidHandle => .BADF, error.AccessDenied => .PERM, error.FileTooBig => .FBIG, error.SymLinkLoop => .LOOP, @@ -5942,6 +5943,7 @@ pub const NodeFS = struct { error.NotDir => .NOTDIR, // On Windows, file paths must be valid Unicode. error.InvalidUtf8 => .INVAL, + error.InvalidWtf8 => .INVAL, // On Windows, file paths cannot contain these characters: // '/', '*', '?', '"', '<', '>', '|' @@ -5971,7 +5973,7 @@ pub const NodeFS = struct { // TODO: switch to an implementation which does not use any "unreachable" std.fs.cwd().deleteTree(args.path.slice()) catch |err| { const errno: E = switch (err) { - error.InvalidHandle => .BADF, + // error.InvalidHandle => .BADF, error.AccessDenied => .PERM, error.FileTooBig => .FBIG, error.SymLinkLoop => .LOOP, @@ -5987,8 +5989,9 @@ pub const NodeFS = struct { // One of the path components was not a directory. // This error is unreachable if `sub_path` does not contain a path separator. error.NotDir => .NOTDIR, - // On Windows, file paths must be valid Unicode. + // On Windows, file paths must be valid WTF-8. error.InvalidUtf8 => .INVAL, + error.InvalidWtf8 => .INVAL, // On Windows, file paths cannot contain these characters: // '/', '*', '?', '"', '<', '>', '|' @@ -6008,13 +6011,13 @@ pub const NodeFS = struct { const dest = args.path.sliceZ(&this.sync_error_buf); - std.os.unlinkZ(dest) catch |er| { + std.posix.unlinkZ(dest) catch |er| { // empircally, it seems to return AccessDenied when the // file is actually a directory on macOS. if (args.recursive and (er == error.IsDir or er == error.NotDir or er == error.AccessDenied)) { - std.os.rmdirZ(dest) catch |err| { + std.posix.rmdirZ(dest) catch |err| { if (args.force) { return Maybe(Return.Rm).success; } @@ -6028,6 +6031,7 @@ pub const NodeFS = struct { error.FileBusy => .BUSY, error.FileNotFound => .NOENT, error.InvalidUtf8 => .INVAL, + error.InvalidWtf8 => .INVAL, error.BadPathName => .INVAL, else => .FAULT, }; @@ -6056,6 +6060,7 @@ pub const NodeFS = struct { error.ReadOnlyFileSystem => .ROFS, error.FileBusy => .BUSY, error.InvalidUtf8 => .INVAL, + error.InvalidWtf8 => .INVAL, error.BadPathName => .INVAL, error.FileNotFound => .NOENT, else => .FAULT, @@ -6122,7 +6127,7 @@ pub const NodeFS = struct { if (comptime Environment.isWindows) { const file = bun.sys.open( path.sliceZ(&this.sync_error_buf), - os.O.WRONLY | flags, + bun.O.WRONLY | flags, 0o644, ); if (file == .err) @@ -6351,7 +6356,7 @@ pub const NodeFS = struct { }, }; - if (!os.S.ISDIR(stat_.mode)) { + if (!posix.S.ISDIR(stat_.mode)) { const r = this._copySingleFileSync( src, dest, @@ -6400,7 +6405,7 @@ pub const NodeFS = struct { const fd = switch (Syscall.openatOSPath( bun.toFD((std.fs.cwd().fd)), src, - os.O.DIRECTORY | os.O.RDONLY, + bun.O.DIRECTORY | bun.O.RDONLY, 0, )) { .err => |err| { @@ -6514,7 +6519,7 @@ pub const NodeFS = struct { dest: bun.OSPathSliceZ, mode: Constants.Copyfile, /// Stat on posix, file attributes on windows - reuse_stat: ?if (Environment.isWindows) windows.DWORD else std.os.Stat, + reuse_stat: ?if (Environment.isWindows) windows.DWORD else std.posix.Stat, args: Arguments.Cp, ) Maybe(Return.CopyFile) { const ret = Maybe(Return.CopyFile); @@ -6533,8 +6538,8 @@ pub const NodeFS = struct { }, }; - if (!os.S.ISREG(stat_.mode)) { - if (os.S.ISLNK(stat_.mode)) { + if (!posix.S.ISREG(stat_.mode)) { + if (posix.S.ISLNK(stat_.mode)) { var mode_: Mode = C.darwin.COPYFILE_ACL | C.darwin.COPYFILE_DATA | C.darwin.COPYFILE_NOFOLLOW_SRC; if (mode.shouldntOverwrite()) { mode_ |= C.darwin.COPYFILE_EXCL; @@ -6563,7 +6568,7 @@ pub const NodeFS = struct { return ret.success; } } else { - const src_fd = switch (Syscall.open(src, std.os.O.RDONLY, 0o644)) { + const src_fd = switch (Syscall.open(src, bun.O.RDONLY, 0o644)) { .result => |result| result, .err => |err| { @memcpy(this.sync_error_buf[0..src.len], src); @@ -6574,10 +6579,10 @@ pub const NodeFS = struct { _ = Syscall.close(src_fd); } - var flags: Mode = std.os.O.CREAT | std.os.O.WRONLY; + var flags: Mode = bun.O.CREAT | bun.O.WRONLY; var wrote: usize = 0; if (mode.shouldntOverwrite()) { - flags |= std.os.O.EXCL; + flags |= bun.O.EXCL; } const dest_fd = dest_fd: { @@ -6641,7 +6646,7 @@ pub const NodeFS = struct { return Maybe(Return.CopyFile).todo(); } - const src_fd = switch (Syscall.open(src, std.os.O.RDONLY | std.os.O.NOFOLLOW, 0o644)) { + const src_fd = switch (Syscall.open(src, bun.O.RDONLY | bun.O.NOFOLLOW, 0o644)) { .result => |result| result, .err => |err| { if (err.getErrno() == .LOOP) { @@ -6662,17 +6667,17 @@ pub const NodeFS = struct { .err => |err| return Maybe(Return.CopyFile){ .err = err }, }; - if (!os.S.ISREG(stat_.mode)) { + if (!posix.S.ISREG(stat_.mode)) { return Maybe(Return.CopyFile){ .err = .{ .errno = @intFromEnum(C.SystemErrno.ENOTSUP), .syscall = .copyfile, } }; } - var flags: Mode = std.os.O.CREAT | std.os.O.WRONLY; + var flags: Mode = bun.O.CREAT | bun.O.WRONLY; var wrote: usize = 0; if (mode.shouldntOverwrite()) { - flags |= std.os.O.EXCL; + flags |= bun.O.EXCL; } const dest_fd = dest_fd: { @@ -6707,7 +6712,7 @@ pub const NodeFS = struct { var size: usize = @intCast(@max(stat_.size, 0)); - if (os.S.ISREG(stat_.mode) and bun.can_use_ioctl_ficlone()) { + if (posix.S.ISREG(stat_.mode) and bun.can_use_ioctl_ficlone()) { const rc = bun.C.linux.ioctl_ficlone(dest_fd, src_fd); if (rc == 0) { _ = C.fchmod(dest_fd.cast(), stat_.mode); @@ -6806,7 +6811,7 @@ pub const NodeFS = struct { } return ret.success; } else { - const handle = switch (bun.sys.openatWindows(bun.invalid_fd, src, os.O.RDONLY)) { + const handle = switch (bun.sys.openatWindows(bun.invalid_fd, src, bun.O.RDONLY)) { .err => |err| return .{ .err = err }, .result => |src_fd| src_fd, }; diff --git a/src/bun.js/node/node_fs_binding.zig b/src/bun.js/node/node_fs_binding.zig index a5613f0d0a..9f7bca8c44 100644 --- a/src/bun.js/node/node_fs_binding.zig +++ b/src/bun.js/node/node_fs_binding.zig @@ -3,7 +3,7 @@ const JSC = bun.JSC; const std = @import("std"); const Flavor = JSC.Node.Flavor; const ArgumentsSlice = JSC.Node.ArgumentsSlice; -const system = std.os.system; +const system = std.posix.system; const Maybe = JSC.Maybe; const Encoding = JSC.Node.Encoding; const FeatureFlags = bun.FeatureFlags; diff --git a/src/bun.js/node/node_fs_constant.zig b/src/bun.js/node/node_fs_constant.zig index 4fc515912b..4dad6c1fae 100644 --- a/src/bun.js/node/node_fs_constant.zig +++ b/src/bun.js/node/node_fs_constant.zig @@ -3,8 +3,8 @@ const Environment = bun.Environment; const std = @import("std"); fn get(comptime name: []const u8) comptime_int { - return if (@hasDecl(std.os.O, name)) - return @field(std.os.O, name) + return if (@hasDecl(bun.O, name)) + return @field(bun.O, name) else @compileError("Unknown Constant: " ++ name); } @@ -12,13 +12,13 @@ fn get(comptime name: []const u8) comptime_int { pub const Constants = struct { // File Access Constants /// Constant for fs.access(). File is visible to the calling process. - pub const F_OK = std.os.F_OK; + pub const F_OK = std.posix.F_OK; /// Constant for fs.access(). File can be read by the calling process. - pub const R_OK = std.os.R_OK; + pub const R_OK = std.posix.R_OK; /// Constant for fs.access(). File can be written by the calling process. - pub const W_OK = std.os.W_OK; + pub const W_OK = std.posix.W_OK; /// Constant for fs.access(). File can be executed by the calling process. - pub const X_OK = std.os.X_OK; + pub const X_OK = std.posix.X_OK; // File Copy Constants pub const Copyfile = enum(i32) { _, @@ -53,27 +53,27 @@ pub const Constants = struct { pub const COPYFILE_FICLONE_FORCE: i32 = Copyfile.force; // File Open Constants /// Constant for fs.open(). Flag indicating to open a file for read-only access. - pub const O_RDONLY = std.os.O.RDONLY; + pub const O_RDONLY = bun.O.RDONLY; /// Constant for fs.open(). Flag indicating to open a file for write-only access. - pub const O_WRONLY = std.os.O.WRONLY; + pub const O_WRONLY = bun.O.WRONLY; /// Constant for fs.open(). Flag indicating to open a file for read-write access. - pub const O_RDWR = std.os.O.RDWR; + pub const O_RDWR = bun.O.RDWR; /// Constant for fs.open(). Flag indicating to create the file if it does not already exist. - pub const O_CREAT = std.os.O.CREAT; + pub const O_CREAT = bun.O.CREAT; /// Constant for fs.open(). Flag indicating that opening a file should fail if the O_CREAT flag is set and the file already exists. - pub const O_EXCL = std.os.O.EXCL; + pub const O_EXCL = bun.O.EXCL; /// /// Constant for fs.open(). Flag indicating that if path identifies a terminal device, /// opening the path shall not cause that terminal to become the controlling terminal for the process /// (if the process does not already have one). - pub const O_NOCTTY = std.os.O.NOCTTY; + pub const O_NOCTTY = bun.O.NOCTTY; /// Constant for fs.open(). Flag indicating that if the file exists and is a regular file, and the file is opened successfully for write access, its length shall be truncated to zero. - pub const O_TRUNC = std.os.O.TRUNC; + pub const O_TRUNC = bun.O.TRUNC; /// Constant for fs.open(). Flag indicating that data will be appended to the end of the file. - pub const O_APPEND = std.os.O.APPEND; + pub const O_APPEND = bun.O.APPEND; /// Constant for fs.open(). Flag indicating that the open should fail if the path is not a directory. - pub const O_DIRECTORY = std.os.O.DIRECTORY; + pub const O_DIRECTORY = bun.O.DIRECTORY; /// /// constant for fs.open(). @@ -82,59 +82,59 @@ pub const Constants = struct { /// This flag is available on Linux operating systems only. pub const O_NOATIME = get("NOATIME"); /// Constant for fs.open(). Flag indicating that the open should fail if the path is a symbolic link. - pub const O_NOFOLLOW = std.os.O.NOFOLLOW; + pub const O_NOFOLLOW = bun.O.NOFOLLOW; /// Constant for fs.open(). Flag indicating that the file is opened for synchronous I/O. - pub const O_SYNC = std.os.O.SYNC; + pub const O_SYNC = bun.O.SYNC; /// Constant for fs.open(). Flag indicating that the file is opened for synchronous I/O with write operations waiting for data integrity. - pub const O_DSYNC = std.os.O.DSYNC; + pub const O_DSYNC = bun.O.DSYNC; /// Constant for fs.open(). Flag indicating to open the symbolic link itself rather than the resource it is pointing to. pub const O_SYMLINK = get("SYMLINK"); /// Constant for fs.open(). When set, an attempt will be made to minimize caching effects of file I/O. pub const O_DIRECT = get("DIRECT"); /// Constant for fs.open(). Flag indicating to open the file in nonblocking mode when possible. - pub const O_NONBLOCK = std.os.O.NONBLOCK; + pub const O_NONBLOCK = bun.O.NONBLOCK; // File Type Constants /// Constant for fs.Stats mode property for determining a file's type. Bit mask used to extract the file type code. - pub const S_IFMT = std.os.S.IFMT; + pub const S_IFMT = std.posix.S.IFMT; /// Constant for fs.Stats mode property for determining a file's type. File type constant for a regular file. - pub const S_IFREG = std.os.S.IFREG; + pub const S_IFREG = std.posix.S.IFREG; /// Constant for fs.Stats mode property for determining a file's type. File type constant for a directory. - pub const S_IFDIR = std.os.S.IFDIR; + pub const S_IFDIR = std.posix.S.IFDIR; /// Constant for fs.Stats mode property for determining a file's type. File type constant for a character-oriented device file. - pub const S_IFCHR = std.os.S.IFCHR; + pub const S_IFCHR = std.posix.S.IFCHR; /// Constant for fs.Stats mode property for determining a file's type. File type constant for a block-oriented device file. - pub const S_IFBLK = std.os.S.IFBLK; + pub const S_IFBLK = std.posix.S.IFBLK; /// Constant for fs.Stats mode property for determining a file's type. File type constant for a FIFO/pipe. - pub const S_IFIFO = std.os.S.IFIFO; + pub const S_IFIFO = std.posix.S.IFIFO; /// Constant for fs.Stats mode property for determining a file's type. File type constant for a symbolic link. - pub const S_IFLNK = std.os.S.IFLNK; + pub const S_IFLNK = std.posix.S.IFLNK; /// Constant for fs.Stats mode property for determining a file's type. File type constant for a socket. - pub const S_IFSOCK = std.os.S.IFSOCK; + pub const S_IFSOCK = std.posix.S.IFSOCK; // File Mode Constants /// Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating readable, writable and executable by owner. - pub const S_IRWXU = std.os.S.IRWXU; + pub const S_IRWXU = std.posix.S.IRWXU; /// Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating readable by owner. - pub const S_IRUSR = std.os.S.IRUSR; + pub const S_IRUSR = std.posix.S.IRUSR; /// Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating writable by owner. - pub const S_IWUSR = std.os.S.IWUSR; + pub const S_IWUSR = std.posix.S.IWUSR; /// Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating executable by owner. - pub const S_IXUSR = std.os.S.IXUSR; + pub const S_IXUSR = std.posix.S.IXUSR; /// Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating readable, writable and executable by group. - pub const S_IRWXG = std.os.S.IRWXG; + pub const S_IRWXG = std.posix.S.IRWXG; /// Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating readable by group. - pub const S_IRGRP = std.os.S.IRGRP; + pub const S_IRGRP = std.posix.S.IRGRP; /// Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating writable by group. - pub const S_IWGRP = std.os.S.IWGRP; + pub const S_IWGRP = std.posix.S.IWGRP; /// Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating executable by group. - pub const S_IXGRP = std.os.S.IXGRP; + pub const S_IXGRP = std.posix.S.IXGRP; /// Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating readable, writable and executable by others. - pub const S_IRWXO = std.os.S.IRWXO; + pub const S_IRWXO = std.posix.S.IRWXO; /// Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating readable by others. - pub const S_IROTH = std.os.S.IROTH; + pub const S_IROTH = std.posix.S.IROTH; /// Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating writable by others. - pub const S_IWOTH = std.os.S.IWOTH; + pub const S_IWOTH = std.posix.S.IWOTH; /// Constant for fs.Stats mode property for determining access permissions for a file. File mode indicating executable by others. - pub const S_IXOTH = std.os.S.IXOTH; + pub const S_IXOTH = std.posix.S.IXOTH; /// /// When set, a memory file mapping is used to access the file. This flag diff --git a/src/bun.js/node/node_fs_stat_watcher.zig b/src/bun.js/node/node_fs_stat_watcher.zig index cdf4f38aa7..74f6717b49 100644 --- a/src/bun.js/node/node_fs_stat_watcher.zig +++ b/src/bun.js/node/node_fs_stat_watcher.zig @@ -32,7 +32,7 @@ fn statToJSStats(globalThis: *JSC.JSGlobalObject, stats: bun.Stat, bigint: bool) } } -/// This is a singleton struct that contains the timer used to schedule restat calls. +/// This is a singleton struct that contains the timer used to schedule re-stat calls. pub const StatWatcherScheduler = struct { current_interval: std.atomic.Value(i32) = .{ .raw = 0 }, task: JSC.WorkPoolTask = .{ .callback = &workPoolCallback }, @@ -67,12 +67,12 @@ pub const StatWatcherScheduler = struct { } fn getInterval(this: *StatWatcherScheduler) i32 { - return this.current_interval.load(.Monotonic); + return this.current_interval.load(.monotonic); } /// Update the current interval and set the timer (this function is thread safe) fn setInterval(this: *StatWatcherScheduler, interval: i32) void { - this.current_interval.store(interval, .Monotonic); + this.current_interval.store(interval, .monotonic); if (this.main_thread == std.Thread.getCurrentId()) { // we are in the main thread we can set the timer @@ -80,7 +80,7 @@ pub const StatWatcherScheduler = struct { return; } // we are not in the main thread we need to schedule a task to set the timer - this.sheduleTimerUpdate(); + this.scheduleTimerUpdate(); } /// Set the timer (this function is not thread safe, should be called only from the main thread) @@ -102,7 +102,7 @@ pub const StatWatcherScheduler = struct { } /// Schedule a task to set the timer in the main thread - fn sheduleTimerUpdate(this: *StatWatcherScheduler) void { + fn scheduleTimerUpdate(this: *StatWatcherScheduler) void { const Holder = struct { scheduler: *StatWatcherScheduler, task: JSC.AnyTask, @@ -136,7 +136,7 @@ pub const StatWatcherScheduler = struct { } pub fn workPoolCallback(task: *JSC.WorkPoolTask) void { - var this: *StatWatcherScheduler = @fieldParentPtr(StatWatcherScheduler, "task", task); + var this: *StatWatcherScheduler = @alignCast(@fieldParentPtr("task", task)); // Instant.now will not fail on our target platforms. const now = std.time.Instant.now() catch unreachable; @@ -147,7 +147,7 @@ pub const StatWatcherScheduler = struct { var contain_watchers = false; while (iter.next()) |watcher| { if (watcher.closed) { - watcher.used_by_scheduler_thread.store(false, .Release); + watcher.used_by_scheduler_thread.store(false, .release); continue; } contain_watchers = true; @@ -326,9 +326,9 @@ pub const StatWatcher = struct { } pub fn hasPendingActivity(this: *StatWatcher) callconv(.C) bool { - @fence(.Acquire); + @fence(.acquire); - return this.used_by_scheduler_thread.load(.Acquire); + return this.used_by_scheduler_thread.load(.acquire); } /// Stops file watching but does not free the instance. @@ -367,12 +367,12 @@ pub const StatWatcher = struct { } fn workPoolCallback(task: *JSC.WorkPoolTask) void { - const initial_stat_task: *InitialStatTask = @fieldParentPtr(InitialStatTask, "task", task); + const initial_stat_task: *InitialStatTask = @fieldParentPtr("task", task); defer bun.default_allocator.destroy(initial_stat_task); const this = initial_stat_task.watcher; if (this.closed) { - this.used_by_scheduler_thread.store(false, .Release); + this.used_by_scheduler_thread.store(false, .release); return; } @@ -395,7 +395,7 @@ pub const StatWatcher = struct { pub fn initialStatSuccessOnMainThread(this: *StatWatcher) void { if (this.closed) { - this.used_by_scheduler_thread.store(false, .Release); + this.used_by_scheduler_thread.store(false, .release); return; } @@ -408,7 +408,7 @@ pub const StatWatcher = struct { pub fn initialStatErrorOnMainThread(this: *StatWatcher) void { if (this.closed) { - this.used_by_scheduler_thread.store(false, .Release); + this.used_by_scheduler_thread.store(false, .release); return; } diff --git a/src/bun.js/node/node_fs_watcher.zig b/src/bun.js/node/node_fs_watcher.zig index 65599be04a..fc785f10dc 100644 --- a/src/bun.js/node/node_fs_watcher.zig +++ b/src/bun.js/node/node_fs_watcher.zig @@ -489,7 +489,7 @@ pub const FSWatcher = struct { pub fn initJS(this: *FSWatcher, listener: JSC.JSValue) void { if (this.persistent) { this.poll_ref.ref(this.ctx); - _ = this.pending_activity_count.fetchAdd(1, .Monotonic); + _ = this.pending_activity_count.fetchAdd(1, .monotonic); } const js_this = FSWatcher.toJS(this, this.globalThis); @@ -523,7 +523,7 @@ pub const FSWatcher = struct { pub fn emitAbort(this: *FSWatcher, err: JSC.JSValue) void { if (this.closed) return; - _ = this.pending_activity_count.fetchAdd(1, .Monotonic); + _ = this.pending_activity_count.fetchAdd(1, .monotonic); defer this.close(); defer this.unrefTask(); @@ -632,28 +632,25 @@ pub const FSWatcher = struct { // this can be called from Watcher Thread or JS Context Thread pub fn refTask(this: *FSWatcher) bool { - { - @fence(.Acquire); - this.mutex.lock(); - defer this.mutex.unlock(); - if (this.closed) return false; - _ = this.pending_activity_count.fetchAdd(1, .Monotonic); - } + @fence(.acquire); + this.mutex.lock(); + defer this.mutex.unlock(); + if (this.closed) return false; + _ = this.pending_activity_count.fetchAdd(1, .monotonic); return true; } pub fn hasPendingActivity(this: *FSWatcher) callconv(.C) bool { - @fence(.Acquire); - return this.pending_activity_count.load(.Acquire) > 0; + @fence(.acquire); + return this.pending_activity_count.load(.acquire) > 0; } pub fn unrefTask(this: *FSWatcher) void { this.mutex.lock(); defer this.mutex.unlock(); - // JSC eventually will free it - _ = this.pending_activity_count.fetchSub(1, .Monotonic); + _ = this.pending_activity_count.fetchSub(1, .monotonic); } pub fn close(this: *FSWatcher) void { diff --git a/src/bun.js/node/node_os.zig b/src/bun.js/node/node_os.zig index 9cd41c3cad..54e2ae5379 100644 --- a/src/bun.js/node/node_os.zig +++ b/src/bun.js/node/node_os.zig @@ -391,7 +391,7 @@ pub const OS = struct { var name_buffer: [bun.HOST_NAME_MAX]u8 = undefined; - return JSC.ZigString.init(std.os.gethostname(&name_buffer) catch "unknown").withEncoding().toValueGC(globalThis); + return JSC.ZigString.init(std.posix.gethostname(&name_buffer) catch "unknown").withEncoding().toValueGC(globalThis); } pub fn loadavg(globalThis: *JSC.JSGlobalObject, _: *JSC.CallFrame) callconv(.C) JSC.JSValue { @@ -420,7 +420,7 @@ pub const OS = struct { const err = JSC.SystemError{ .message = bun.String.static("A system error occurred: getifaddrs returned an error"), .code = bun.String.static(@as(string, @tagName(JSC.Node.ErrorCode.ERR_SYSTEM_ERROR))), - .errno = @intFromEnum(std.os.errno(rc)), + .errno = @intFromEnum(std.posix.errno(rc)), .syscall = bun.String.static("getifaddrs"), }; @@ -445,9 +445,9 @@ pub const OS = struct { pub fn isLinkLayer(iface: *C.ifaddrs) bool { if (iface.ifa_addr == null) return false; return if (comptime Environment.isLinux) - return iface.ifa_addr.*.sa_family == std.os.AF.PACKET + return iface.ifa_addr.*.sa_family == std.posix.AF.PACKET else if (comptime Environment.isMac) - return iface.ifa_addr.?.*.family == std.os.AF.LINK + return iface.ifa_addr.?.*.family == std.posix.AF.LINK else unreachable; } @@ -477,8 +477,8 @@ pub const OS = struct { if (helpers.skip(iface) or helpers.isLinkLayer(iface)) continue; const interface_name = std.mem.sliceTo(iface.ifa_name, 0); - const addr = std.net.Address.initPosix(@alignCast(@as(*std.os.sockaddr, @ptrCast(iface.ifa_addr)))); - const netmask = std.net.Address.initPosix(@alignCast(@as(*std.os.sockaddr, @ptrCast(iface.ifa_netmask)))); + const addr = std.net.Address.initPosix(@alignCast(@as(*std.posix.sockaddr, @ptrCast(iface.ifa_addr)))); + const netmask = std.net.Address.initPosix(@alignCast(@as(*std.posix.sockaddr, @ptrCast(iface.ifa_netmask)))); var interface = JSC.JSValue.createEmptyObject(globalThis, 7); @@ -488,8 +488,8 @@ pub const OS = struct { // Compute the CIDR suffix; returns null if the netmask cannot // be converted to a CIDR suffix const maybe_suffix: ?u8 = switch (addr.any.family) { - std.os.AF.INET => netmaskToCIDRSuffix(netmask.in.sa.addr), - std.os.AF.INET6 => netmaskToCIDRSuffix(@as(u128, @bitCast(netmask.in6.sa.addr))), + std.posix.AF.INET => netmaskToCIDRSuffix(netmask.in.sa.addr), + std.posix.AF.INET6 => netmaskToCIDRSuffix(@as(u128, @bitCast(netmask.in6.sa.addr))), else => null, }; @@ -522,8 +522,8 @@ pub const OS = struct { // family Either IPv4 or IPv6 interface.put(globalThis, JSC.ZigString.static("family"), (switch (addr.any.family) { - std.os.AF.INET => JSC.ZigString.static("IPv4"), - std.os.AF.INET6 => JSC.ZigString.static("IPv6"), + std.posix.AF.INET => JSC.ZigString.static("IPv4"), + std.posix.AF.INET6 => JSC.ZigString.static("IPv6"), else => JSC.ZigString.static("unknown"), }).toValueGC(globalThis)); @@ -541,7 +541,7 @@ pub const OS = struct { // This is the correct link-layer interface entry for the current interface, // cast to a link-layer socket address if (comptime Environment.isLinux) { - break @as(?*std.os.sockaddr.ll, @ptrCast(@alignCast(ll_iface.ifa_addr))); + break @as(?*std.posix.sockaddr.ll, @ptrCast(@alignCast(ll_iface.ifa_addr))); } else if (comptime Environment.isMac) { break @as(?*C.sockaddr_dl, @ptrCast(@alignCast(ll_iface.ifa_addr))); } else { @@ -574,7 +574,7 @@ pub const OS = struct { interface.put(globalThis, JSC.ZigString.static("internal"), JSC.JSValue.jsBoolean(helpers.isLoopback(iface))); // scopeid The numeric IPv6 scope ID (only specified when family is IPv6) - if (addr.any.family == std.os.AF.INET6) { + if (addr.any.family == std.posix.AF.INET6) { interface.put(globalThis, JSC.ZigString.static("scope_id"), JSC.JSValue.jsNumber(addr.in6.sa.scope_id)); } @@ -628,8 +628,8 @@ pub const OS = struct { // Compute the CIDR suffix; returns null if the netmask cannot // be converted to a CIDR suffix const maybe_suffix: ?u8 = switch (iface.address.address4.family) { - std.os.AF.INET => netmaskToCIDRSuffix(iface.netmask.netmask4.addr), - std.os.AF.INET6 => netmaskToCIDRSuffix(@as(u128, @bitCast(iface.netmask.netmask6.addr))), + std.posix.AF.INET => netmaskToCIDRSuffix(iface.netmask.netmask4.addr), + std.posix.AF.INET6 => netmaskToCIDRSuffix(@as(u128, @bitCast(iface.netmask.netmask6.addr))), else => null, }; @@ -665,8 +665,8 @@ pub const OS = struct { } // family interface.put(globalThis, JSC.ZigString.static("family"), (switch (iface.address.address4.family) { - std.os.AF.INET => JSC.ZigString.static("IPv4"), - std.os.AF.INET6 => JSC.ZigString.static("IPv6"), + std.posix.AF.INET => JSC.ZigString.static("IPv4"), + std.posix.AF.INET6 => JSC.ZigString.static("IPv6"), else => JSC.ZigString.static("unknown"), }).toValueGC(globalThis)); @@ -688,7 +688,7 @@ pub const OS = struct { interface.put(globalThis, JSC.ZigString.static("cidr"), cidr); // scopeid - if (iface.address.address4.family == std.os.AF.INET6) { + if (iface.address.address4.family == std.posix.AF.INET6) { interface.put(globalThis, JSC.ZigString.static("scopeid"), JSC.JSValue.jsNumber(iface.address.address6.scope_id)); } @@ -840,13 +840,8 @@ pub const OS = struct { result.put(globalThis, JSC.ZigString.static("username"), JSC.ZigString.init(username).withEncoding().toValueGC(globalThis)); result.put(globalThis, JSC.ZigString.static("shell"), JSC.ZigString.init(bun.getenvZ("SHELL") orelse "unknown").withEncoding().toValueGC(globalThis)); - if (comptime Environment.isLinux) { - result.put(globalThis, JSC.ZigString.static("uid"), JSC.JSValue.jsNumber(std.os.linux.getuid())); - result.put(globalThis, JSC.ZigString.static("gid"), JSC.JSValue.jsNumber(std.os.linux.getgid())); - } else { - result.put(globalThis, JSC.ZigString.static("uid"), JSC.JSValue.jsNumber(C.darwin.getuid())); - result.put(globalThis, JSC.ZigString.static("gid"), JSC.JSValue.jsNumber(C.darwin.getgid())); - } + result.put(globalThis, JSC.ZigString.static("uid"), JSC.JSValue.jsNumber(C.getuid())); + result.put(globalThis, JSC.ZigString.static("gid"), JSC.JSValue.jsNumber(C.getgid())); } return result; diff --git a/src/bun.js/node/os/constants.zig b/src/bun.js/node/os/constants.zig index 858f3f2f01..cb3d8c56d0 100644 --- a/src/bun.js/node/os/constants.zig +++ b/src/bun.js/node/os/constants.zig @@ -7,29 +7,29 @@ const JSC = bun.JSC; const ConstantType = enum { ERRNO, ERRNO_WIN, SIG, DLOPEN, OTHER }; fn getErrnoConstant(comptime name: []const u8) ?comptime_int { - return if (@hasField(std.os.E, name)) - return @intFromEnum(@field(std.os.E, name)) + return if (@hasField(std.posix.E, name)) + return @intFromEnum(@field(std.posix.E, name)) else return null; } fn getWindowsErrnoConstant(comptime name: []const u8) ?comptime_int { - return if (@hasField(std.os.E, name)) + return if (@hasField(std.posix.E, name)) return @intFromEnum(@field(std.os.windows.ws2_32.WinsockError, name)) else return null; } fn getSignalsConstant(comptime name: []const u8) ?comptime_int { - return if (@hasDecl(std.os.SIG, name)) - return @field(std.os.SIG, name) + return if (@hasDecl(std.posix.SIG, name)) + return @field(std.posix.SIG, name) else return null; } fn getDlopenConstant(comptime name: []const u8) ?comptime_int { - return if (@hasDecl(std.os.system.RTLD, name)) - return @field(std.os.system.RTLD, name) + return if (@hasDecl(std.posix.system.RTLD, name)) + return @field(std.posix.system.RTLD, name) else return null; } diff --git a/src/bun.js/node/path_watcher.zig b/src/bun.js/node/path_watcher.zig index 2560e8ed62..f3fda8461b 100644 --- a/src/bun.js/node/path_watcher.zig +++ b/src/bun.js/node/path_watcher.zig @@ -51,27 +51,27 @@ pub const PathWatcherManager = struct { }; fn refPendingTask(this: *PathWatcherManager) bool { - @fence(.Release); + @fence(.release); this.mutex.lock(); defer this.mutex.unlock(); if (this.deinit_on_last_task) return false; this.pending_tasks += 1; - this.has_pending_tasks.store(true, .Release); + this.has_pending_tasks.store(true, .release); return true; } fn hasPendingTasks(this: *PathWatcherManager) callconv(.C) bool { - @fence(.Acquire); - return this.has_pending_tasks.load(.Acquire); + @fence(.acquire); + return this.has_pending_tasks.load(.acquire); } fn unrefPendingTask(this: *PathWatcherManager) void { - @fence(.Release); + @fence(.release); this.mutex.lock(); defer this.mutex.unlock(); this.pending_tasks -= 1; if (this.deinit_on_last_task and this.pending_tasks == 0) { - this.has_pending_tasks.store(false, .Release); + this.has_pending_tasks.store(false, .release); this.deinit(); } } @@ -90,7 +90,7 @@ pub const PathWatcherManager = struct { } switch (switch (Environment.os) { - else => bun.sys.open(path, std.os.O.DIRECTORY | std.os.O.RDONLY, 0), + else => bun.sys.open(path, bun.O.DIRECTORY | bun.O.RDONLY, 0), // windows bun.sys.open does not pass iterable=true, .windows => bun.sys.openDirAtWindowsA(bun.FD.cwd(), path, .{ .iterable = true, .read_only = true }), }) { @@ -132,9 +132,9 @@ pub const PathWatcherManager = struct { } const PathWatcherManagerError = std.mem.Allocator.Error || - std.os.KQueueError || + std.posix.KQueueError || error{KQueueError} || - std.os.INotifyInitError || + std.posix.INotifyInitError || std.Thread.SpawnError; pub fn init(vm: *JSC.VirtualMachine) PathWatcherManagerError!*PathWatcherManager { @@ -356,7 +356,7 @@ pub const PathWatcherManager = struct { watcher_list: bun.BabyList(*PathWatcher) = .{}, pub fn callback(task: *JSC.WorkPoolTask) void { - var routine = @fieldParentPtr(@This(), "task", task); + var routine: *@This() = @fieldParentPtr("task", task); defer routine.deinit(); routine.run(); } @@ -448,7 +448,9 @@ pub const PathWatcherManager = struct { .errno = @truncate(@intFromEnum(switch (err) { error.AccessDenied => bun.C.E.ACCES, error.SystemResources => bun.C.E.NOMEM, - error.Unexpected => bun.C.E.INVAL, + error.Unexpected, + error.InvalidUtf8, + => bun.C.E.INVAL, })), .syscall = .watch, }, @@ -827,39 +829,39 @@ pub const PathWatcher = struct { } pub fn refPendingDirectory(this: *PathWatcher) bool { - @fence(.Release); + @fence(.release); this.mutex.lock(); defer this.mutex.unlock(); if (this.isClosed()) return false; this.pending_directories += 1; - this.has_pending_directories.store(true, .Release); + this.has_pending_directories.store(true, .release); return true; } pub fn hasPendingDirectories(this: *PathWatcher) callconv(.C) bool { - @fence(.Acquire); - return this.has_pending_directories.load(.Acquire); + @fence(.acquire); + return this.has_pending_directories.load(.acquire); } pub fn isClosed(this: *PathWatcher) bool { - @fence(.Acquire); - return this.closed.load(.Acquire); + @fence(.acquire); + return this.closed.load(.acquire); } pub fn setClosed(this: *PathWatcher) void { this.mutex.lock(); defer this.mutex.unlock(); - @fence(.Release); - this.closed.store(true, .Release); + @fence(.release); + this.closed.store(true, .release); } pub fn unrefPendingDirectory(this: *PathWatcher) void { - @fence(.Release); + @fence(.release); this.mutex.lock(); defer this.mutex.unlock(); this.pending_directories -= 1; if (this.isClosed() and this.pending_directories == 0) { - this.has_pending_directories.store(false, .Release); + this.has_pending_directories.store(false, .release); this.deinit(); } } @@ -985,6 +987,7 @@ pub fn watch( error.NameTooLong, error.BadPathName, error.InvalidUtf8, + error.InvalidWtf8, => bun.C.E.INVAL, error.OutOfMemory, diff --git a/src/bun.js/node/types.zig b/src/bun.js/node/types.zig index 19736a53e5..0f5b31f4da 100644 --- a/src/bun.js/node/types.zig +++ b/src/bun.js/node/types.zig @@ -4,10 +4,10 @@ const bun = @import("root").bun; const meta = bun.meta; const windows = bun.windows; const heap_allocator = bun.default_allocator; -const is_bindgen: bool = meta.globalOption("bindgen", bool) orelse false; +const is_bindgen: bool = false; const kernel32 = windows.kernel32; const logger = bun.logger; -const os = std.os; +const posix = std.posix; const path_handler = bun.path; const strings = bun.strings; const string = bun.string; @@ -93,7 +93,7 @@ pub const Buffer = JSC.MarkedArrayBuffer; /// On windows, this is what libuv expects /// On unix it is what the utimens api expects -pub const TimeLike = if (Environment.isWindows) f64 else std.os.timespec; +pub const TimeLike = if (Environment.isWindows) f64 else std.posix.timespec; pub const Flavor = enum { sync, @@ -219,9 +219,9 @@ pub fn Maybe(comptime ReturnTypeT: type, comptime ErrorTypeT: type) type { }; } - pub inline fn getErrno(this: @This()) os.E { + pub inline fn getErrno(this: @This()) posix.E { return switch (this) { - .result => os.E.SUCCESS, + .result => posix.E.SUCCESS, .err => |e| @enumFromInt(e.errno), }; } @@ -274,7 +274,7 @@ pub fn Maybe(comptime ReturnTypeT: type, comptime ErrorTypeT: type) type { } pub inline fn errnoSysP(rc: anytype, syscall: Syscall.Tag, path: anytype) ?@This() { - if (meta.Child(@TypeOf(path)) == u16) { + if (bun.meta.Item(@TypeOf(path)) == u16) { @compileError("Do not pass WString path to errnoSysP, it needs the path encoded as utf8"); } if (comptime Environment.isWindows) { @@ -557,7 +557,7 @@ pub const StringOrBuffer = union(enum) { pub const ErrorCode = @import("./nodejs_error_code.zig").Code; // We can't really use Zig's error handling for syscalls because Node.js expects the "real" errno to be returned -// and various issues with std.os that make it too unstable for arbitrary user input (e.g. how .BADF is marked as unreachable) +// and various issues with std.posix that make it too unstable for arbitrary user input (e.g. how .BADF is marked as unreachable) /// https://github.com/nodejs/node/blob/master/lib/buffer.js#L587 pub const Encoding = enum(u8) { @@ -1275,44 +1275,44 @@ pub const PathOrFileDescriptor = union(Tag) { pub const FileSystemFlags = enum(Mode) { /// Open file for appending. The file is created if it does not exist. - a = std.os.O.APPEND | std.os.O.WRONLY | std.os.O.CREAT, + a = bun.O.APPEND | bun.O.WRONLY | bun.O.CREAT, /// Like 'a' but fails if the path exists. - // @"ax" = std.os.O.APPEND | std.os.O.EXCL, + // @"ax" = bun.O.APPEND | bun.O.EXCL, /// Open file for reading and appending. The file is created if it does not exist. - // @"a+" = std.os.O.APPEND | std.os.O.RDWR, + // @"a+" = bun.O.APPEND | bun.O.RDWR, /// Like 'a+' but fails if the path exists. - // @"ax+" = std.os.O.APPEND | std.os.O.RDWR | std.os.O.EXCL, + // @"ax+" = bun.O.APPEND | bun.O.RDWR | bun.O.EXCL, /// Open file for appending in synchronous mode. The file is created if it does not exist. - // @"as" = std.os.O.APPEND, + // @"as" = bun.O.APPEND, /// Open file for reading and appending in synchronous mode. The file is created if it does not exist. - // @"as+" = std.os.O.APPEND | std.os.O.RDWR, + // @"as+" = bun.O.APPEND | bun.O.RDWR, /// Open file for reading. An exception occurs if the file does not exist. - r = std.os.O.RDONLY, + r = bun.O.RDONLY, /// Open file for reading and writing. An exception occurs if the file does not exist. - // @"r+" = std.os.O.RDWR, + // @"r+" = bun.O.RDWR, /// Open file for reading and writing in synchronous mode. Instructs the operating system to bypass the local file system cache. /// This is primarily useful for opening files on NFS mounts as it allows skipping the potentially stale local cache. It has a very real impact on I/O performance so using this flag is not recommended unless it is needed. /// This doesn't turn fs.open() or fsPromises.open() into a synchronous blocking call. If synchronous operation is desired, something like fs.openSync() should be used. - // @"rs+" = std.os.O.RDWR, + // @"rs+" = bun.O.RDWR, /// Open file for writing. The file is created (if it does not exist) or truncated (if it exists). - w = std.os.O.WRONLY | std.os.O.CREAT, + w = bun.O.WRONLY | bun.O.CREAT, /// Like 'w' but fails if the path exists. - // @"wx" = std.os.O.WRONLY | std.os.O.TRUNC, + // @"wx" = bun.O.WRONLY | bun.O.TRUNC, // /// Open file for reading and writing. The file is created (if it does not exist) or truncated (if it exists). - // @"w+" = std.os.O.RDWR | std.os.O.CREAT, + // @"w+" = bun.O.RDWR | bun.O.CREAT, // /// Like 'w+' but fails if the path exists. - // @"wx+" = std.os.O.RDWR | std.os.O.EXCL, + // @"wx+" = bun.O.RDWR | bun.O.EXCL, _, - const O_RDONLY: Mode = std.os.O.RDONLY; - const O_RDWR: Mode = std.os.O.RDWR; - const O_APPEND: Mode = std.os.O.APPEND; - const O_CREAT: Mode = std.os.O.CREAT; - const O_WRONLY: Mode = std.os.O.WRONLY; - const O_EXCL: Mode = std.os.O.EXCL; + const O_RDONLY: Mode = bun.O.RDONLY; + const O_RDWR: Mode = bun.O.RDWR; + const O_APPEND: Mode = bun.O.APPEND; + const O_CREAT: Mode = bun.O.CREAT; + const O_WRONLY: Mode = bun.O.WRONLY; + const O_EXCL: Mode = bun.O.EXCL; const O_SYNC: Mode = 0; - const O_TRUNC: Mode = std.os.O.TRUNC; + const O_TRUNC: Mode = bun.O.TRUNC; const map = bun.ComptimeStringMap(Mode, .{ .{ "r", O_RDONLY }, @@ -1483,7 +1483,7 @@ pub fn StatType(comptime Big: bool) type { const This = @This(); - const StatTimespec = if (Environment.isWindows) bun.windows.libuv.uv_timespec_t else std.os.timespec; + const StatTimespec = if (Environment.isWindows) bun.windows.libuv.uv_timespec_t else std.posix.timespec; inline fn toNanoseconds(ts: StatTimespec) Timestamp { const tv_sec: i64 = @intCast(ts.tv_sec); @@ -1584,7 +1584,7 @@ pub fn StatType(comptime Big: bool) type { return @truncate(this.mode); } - const S = if (Environment.isWindows) bun.C.S else os.system.S; + const S = if (Environment.isWindows) bun.C.S else posix.system.S; pub fn isBlockDevice(this: *This) JSC.JSValue { return JSC.JSValue.jsBoolean(S.ISBLK(@intCast(this.modeInternal()))); @@ -2084,7 +2084,10 @@ pub const Path = struct { pub fn getCwdU8(buf: []u8) MaybeBuf(u8) { const result = bun.getcwd(buf) catch { - return MaybeBuf(u8).errnoSys(0, Syscall.Tag.getcwd).?; + return MaybeBuf(u8).errnoSys( + @as(c_int, 0), + Syscall.Tag.getcwd, + ).?; }; return MaybeBuf(u8){ .result = result }; } @@ -4414,12 +4417,12 @@ pub const Path = struct { break :brk u16Buf[0..bufSize :0]; } }; - // Zig's std.os.getenvW has logic to support keys like `=${resolvedDevice}`: + // Zig's std.posix.getenvW has logic to support keys like `=${resolvedDevice}`: // https://github.com/ziglang/zig/blob/7bd8b35a3dfe61e59ffea39d464e84fbcdead29a/lib/std/os.zig#L2126-L2130 // // TODO: Enable test once spawnResult.stdout works on Windows. // test/js/node/path/resolve.test.js - if (std.os.getenvW(key_w)) |r| { + if (std.process.getenvW(key_w)) |r| { if (T == u16) { bufSize = r.len; @memcpy(buf2[0..bufSize], r); diff --git a/src/bun.js/node/win_watcher.zig b/src/bun.js/node/win_watcher.zig index dc4fcbd949..9469ccc62a 100644 --- a/src/bun.js/node/win_watcher.zig +++ b/src/bun.js/node/win_watcher.zig @@ -119,7 +119,7 @@ pub const PathWatcher = struct { Output.debugWarn("uvEventCallback called with null data", .{}); return; } - const this: *PathWatcher = @alignCast(@fieldParentPtr(PathWatcher, "handle", event)); + const this: *PathWatcher = @alignCast(@fieldParentPtr("handle", event)); if (comptime bun.Environment.isDebug) { bun.assert(event.data == @as(?*anyopaque, @ptrCast(this))); } diff --git a/src/bun.js/rare_data.zig b/src/bun.js/rare_data.zig index e56dd0c7e4..9cbc965718 100644 --- a/src/bun.js/rare_data.zig +++ b/src/bun.js/rare_data.zig @@ -352,7 +352,7 @@ pub fn stdin(rare: *RareData) *Blob.Store { .pathlike = .{ .fd = fd, }, - .is_atty = if (bun.STDIN_FD.isValid()) std.os.isatty(bun.STDIN_FD.cast()) else false, + .is_atty = if (bun.STDIN_FD.isValid()) std.posix.isatty(bun.STDIN_FD.cast()) else false, .mode = mode, }, }, diff --git a/src/bun.js/test/jest.zig b/src/bun.js/test/jest.zig index 951246d09f..50c7d2cbc4 100644 --- a/src/bun.js/test/jest.zig +++ b/src/bun.js/test/jest.zig @@ -46,7 +46,7 @@ const CallFrame = JSC.CallFrame; const VirtualMachine = JSC.VirtualMachine; const Fs = bun.fs; -const is_bindgen: bool = std.meta.globalOption("bindgen", bool) orelse false; +const is_bindgen: bool = false; const ArrayIdentityContext = bun.ArrayIdentityContext; diff --git a/src/bun.js/test/pretty_format.zig b/src/bun.js/test/pretty_format.zig index b0daaf776c..7f027b5b07 100644 --- a/src/bun.js/test/pretty_format.zig +++ b/src/bun.js/test/pretty_format.zig @@ -4,7 +4,7 @@ const Output = bun.Output; const JSC = bun.JSC; const JSGlobalObject = JSC.JSGlobalObject; const JSValue = JSC.JSValue; -const is_bindgen: bool = std.meta.globalOption("bindgen", bool) orelse false; +const is_bindgen: bool = false; const default_allocator = bun.default_allocator; const CAPI = JSC.C; const ZigString = JSC.ZigString; diff --git a/src/bun.js/test/snapshot.zig b/src/bun.js/test/snapshot.zig index 5af8574f98..41fcea954d 100644 --- a/src/bun.js/test/snapshot.zig +++ b/src/bun.js/test/snapshot.zig @@ -248,8 +248,8 @@ pub const Snapshots = struct { remain[0] = 0; const snapshot_file_path = snapshot_file_path_buf[0 .. snapshot_file_path_buf.len - remain.len :0]; - var flags: bun.Mode = std.os.O.CREAT | std.os.O.RDWR; - if (this.update_snapshots) flags |= std.os.O.TRUNC; + var flags: bun.Mode = bun.O.CREAT | bun.O.RDWR; + if (this.update_snapshots) flags |= bun.O.TRUNC; const fd = switch (bun.sys.open(snapshot_file_path, flags, 0o644)) { .result => |_fd| _fd, .err => |err| return JSC.Maybe(void){ diff --git a/src/bun.js/unbounded_queue.zig b/src/bun.js/unbounded_queue.zig index 5fafbd48d4..a9050d52dc 100644 --- a/src/bun.js/unbounded_queue.zig +++ b/src/bun.js/unbounded_queue.zig @@ -1,7 +1,7 @@ const std = @import("std"); const bun = @import("root").bun; -const os = std.os; +const posix = std.posix; const mem = std.mem; const meta = std.meta; const atomic = std.atomic; @@ -58,87 +58,87 @@ pub fn UnboundedQueue(comptime T: type, comptime next_field: meta.FieldEnum(T)) }, pub fn push(self: *Self, src: *T) void { - assert(@atomicRmw(usize, &self.count, .Add, 1, .Release) >= 0); + assert(@atomicRmw(usize, &self.count, .Add, 1, .release) >= 0); @field(src, next) = null; - const old_back = @atomicRmw(?*T, &self.back, .Xchg, src, .AcqRel) orelse &self.front; + const old_back = @atomicRmw(?*T, &self.back, .Xchg, src, .acq_rel) orelse &self.front; @field(old_back, next) = src; } pub fn pushBatch(self: *Self, first: *T, last: *T, count: usize) void { - assert(@atomicRmw(usize, &self.count, .Add, count, .Release) >= 0); + assert(@atomicRmw(usize, &self.count, .Add, count, .release) >= 0); @field(last, next) = null; - const old_back = @atomicRmw(?*T, &self.back, .Xchg, last, .AcqRel) orelse &self.front; + const old_back = @atomicRmw(?*T, &self.back, .Xchg, last, .acq_rel) orelse &self.front; @field(old_back, next) = first; } pub fn pop(self: *Self) ?*T { - const first = @atomicLoad(?*T, &@field(self.front, next), .Acquire) orelse return null; - if (@atomicLoad(?*T, &@field(first, next), .Acquire)) |next_item| { - @atomicStore(?*T, &@field(self.front, next), next_item, .Monotonic); - assert(@atomicRmw(usize, &self.count, .Sub, 1, .Monotonic) >= 1); + const first = @atomicLoad(?*T, &@field(self.front, next), .acquire) orelse return null; + if (@atomicLoad(?*T, &@field(first, next), .acquire)) |next_item| { + @atomicStore(?*T, &@field(self.front, next), next_item, .monotonic); + assert(@atomicRmw(usize, &self.count, .Sub, 1, .monotonic) >= 1); return first; } - const last = @atomicLoad(?*T, &self.back, .Acquire) orelse &self.front; + const last = @atomicLoad(?*T, &self.back, .acquire) orelse &self.front; if (first != last) return null; - @atomicStore(?*T, &@field(self.front, next), null, .Monotonic); - if (@cmpxchgStrong(?*T, &self.back, last, &self.front, .AcqRel, .Acquire) == null) { - assert(@atomicRmw(usize, &self.count, .Sub, 1, .Monotonic) >= 1); + @atomicStore(?*T, &@field(self.front, next), null, .monotonic); + if (@cmpxchgStrong(?*T, &self.back, last, &self.front, .acq_rel, .acquire) == null) { + assert(@atomicRmw(usize, &self.count, .Sub, 1, .monotonic) >= 1); return first; } - var next_item = @atomicLoad(?*T, &@field(first, next), .Acquire); + var next_item = @atomicLoad(?*T, &@field(first, next), .acquire); while (next_item == null) : (atomic.spinLoopHint()) { - next_item = @atomicLoad(?*T, &@field(first, next), .Acquire); + next_item = @atomicLoad(?*T, &@field(first, next), .acquire); } - @atomicStore(?*T, &@field(self.front, next), next_item, .Monotonic); - assert(@atomicRmw(usize, &self.count, .Sub, 1, .Monotonic) >= 1); + @atomicStore(?*T, &@field(self.front, next), next_item, .monotonic); + assert(@atomicRmw(usize, &self.count, .Sub, 1, .monotonic) >= 1); return first; } pub fn popBatch(self: *Self) Self.Batch { var batch: Self.Batch = .{}; - var front = @atomicLoad(?*T, &@field(self.front, next), .Acquire) orelse return batch; + var front = @atomicLoad(?*T, &@field(self.front, next), .acquire) orelse return batch; batch.front = front; - var next_item = @atomicLoad(?*T, &@field(front, next), .Acquire); - while (next_item) |next_node| : (next_item = @atomicLoad(?*T, &@field(next_node, next), .Acquire)) { + var next_item = @atomicLoad(?*T, &@field(front, next), .acquire); + while (next_item) |next_node| : (next_item = @atomicLoad(?*T, &@field(next_node, next), .acquire)) { batch.count += 1; batch.last = front; front = next_node; } - const last = @atomicLoad(?*T, &self.back, .Acquire) orelse &self.front; + const last = @atomicLoad(?*T, &self.back, .acquire) orelse &self.front; if (front != last) { - @atomicStore(?*T, &@field(self.front, next), front, .Release); - assert(@atomicRmw(usize, &self.count, .Sub, batch.count, .Monotonic) >= batch.count); + @atomicStore(?*T, &@field(self.front, next), front, .release); + assert(@atomicRmw(usize, &self.count, .Sub, batch.count, .monotonic) >= batch.count); return batch; } - @atomicStore(?*T, &@field(self.front, next), null, .Monotonic); - if (@cmpxchgStrong(?*T, &self.back, last, &self.front, .AcqRel, .Acquire) == null) { + @atomicStore(?*T, &@field(self.front, next), null, .monotonic); + if (@cmpxchgStrong(?*T, &self.back, last, &self.front, .acq_rel, .acquire) == null) { batch.count += 1; batch.last = front; - assert(@atomicRmw(usize, &self.count, .Sub, batch.count, .Monotonic) >= batch.count); + assert(@atomicRmw(usize, &self.count, .Sub, batch.count, .monotonic) >= batch.count); return batch; } - next_item = @atomicLoad(?*T, &@field(front, next), .Acquire); + next_item = @atomicLoad(?*T, &@field(front, next), .acquire); while (next_item == null) : (atomic.spinLoopHint()) { - next_item = @atomicLoad(?*T, &@field(front, next), .Acquire); + next_item = @atomicLoad(?*T, &@field(front, next), .acquire); } batch.count += 1; - @atomicStore(?*T, &@field(self.front, next), next_item, .Monotonic); + @atomicStore(?*T, &@field(self.front, next), next_item, .monotonic); batch.last = front; - assert(@atomicRmw(usize, &self.count, .Sub, batch.count, .Monotonic) >= batch.count); + assert(@atomicRmw(usize, &self.count, .Sub, batch.count, .monotonic) >= batch.count); return batch; } pub fn peek(self: *Self) usize { - const count = @atomicLoad(usize, &self.count, .Acquire); + const count = @atomicLoad(usize, &self.count, .acquire); assert(count >= 0); return count; } diff --git a/src/bun.js/web_worker.zig b/src/bun.js/web_worker.zig index f88386d353..28a5dbdf6d 100644 --- a/src/bun.js/web_worker.zig +++ b/src/bun.js/web_worker.zig @@ -54,11 +54,11 @@ pub const WebWorker = struct { } pub fn hasRequestedTerminate(this: *const WebWorker) bool { - return this.requested_terminate.load(.Monotonic); + return this.requested_terminate.load(.monotonic); } pub fn setRequestedTerminate(this: *WebWorker) bool { - return this.requested_terminate.swap(true, .Release); + return this.requested_terminate.swap(true, .release); } export fn WebWorker__updatePtr(worker: *WebWorker, ptr: *anyopaque) bool { @@ -181,8 +181,9 @@ pub const WebWorker = struct { return; } - assert(this.status.load(.Acquire) == .start); + assert(this.status.load(.acquire) == .start); assert(this.vm == null); + this.arena = try bun.MimallocArena.init(); var vm = try JSC.VirtualMachine.initWorker(this, .{ .allocator = this.arena.allocator(), @@ -206,6 +207,16 @@ pub const WebWorker = struct { return; }; + // TODO: we may have to clone other parts of vm state. this will be more + // important when implementing vm.deinit() + const map = try vm.allocator.create(bun.DotEnv.Map); + map.* = try vm.bundler.env.map.cloneWithAllocator(vm.allocator); + + const loader = try vm.allocator.create(bun.DotEnv.Loader); + loader.* = bun.DotEnv.Loader.init(map, vm.allocator); + + vm.bundler.env = loader; + vm.loadExtraEnv(); vm.is_main_thread = false; JSC.VirtualMachine.is_main_thread_vm = false; @@ -283,7 +294,7 @@ pub const WebWorker = struct { fn setStatus(this: *WebWorker, status: Status) void { log("[{d}] status: {s}", .{ this.execution_context_id, @tagName(status) }); - this.status.store(status, .Release); + this.status.store(status, .release); } fn unhandledError(this: *WebWorker, _: anyerror) void { @@ -294,7 +305,7 @@ pub const WebWorker = struct { log("[{d}] spin start", .{this.execution_context_id}); var vm = this.vm.?; - assert(this.status.load(.Acquire) == .start); + assert(this.status.load(.acquire) == .start); this.setStatus(.starting); var promise = vm.loadEntryPointForWebWorker(this.specifier) catch { @@ -372,7 +383,7 @@ pub const WebWorker = struct { /// Request a terminate (Called from main thread from worker.terminate(), or inside worker in process.exit()) /// The termination will actually happen after the next tick of the worker's loop. pub fn requestTerminate(this: *WebWorker) callconv(.C) void { - if (this.status.load(.Acquire) == .terminated) { + if (this.status.load(.acquire) == .terminated) { return; } if (this.setRequestedTerminate()) { diff --git a/src/bun.js/webcore/blob.zig b/src/bun.js/webcore/blob.zig index 1d99226fa1..5dd332750a 100644 --- a/src/bun.js/webcore/blob.zig +++ b/src/bun.js/webcore/blob.zig @@ -808,7 +808,7 @@ pub const Blob = struct { var result = ctx.bunVM().nodeFS().truncate(.{ .path = destination_blob.store.?.data.file.pathlike, .len = 0, - .flags = std.os.O.CREAT, + .flags = bun.O.CREAT, }, .sync); if (result == .err) { // it might return EPERM when the parent directory doesn't exist @@ -1216,7 +1216,7 @@ pub const Blob = struct { pathlike.path.sliceZ(&file_path), // we deliberately don't use O_TRUNC here // it's a perf optimization - std.os.O.WRONLY | std.os.O.CREAT | std.os.O.NONBLOCK, + bun.O.WRONLY | bun.O.CREAT | bun.O.NONBLOCK, write_permissions, )) { .result => |result| { @@ -1300,9 +1300,9 @@ pub const Blob = struct { if (!Environment.isWindows) // we deliberately don't use O_TRUNC here // it's a perf optimization - std.os.O.WRONLY | std.os.O.CREAT | std.os.O.NONBLOCK + bun.O.WRONLY | bun.O.CREAT | bun.O.NONBLOCK else - std.os.O.WRONLY | std.os.O.CREAT, + bun.O.WRONLY | bun.O.CREAT, write_permissions, )) { .result => |result| { @@ -1653,7 +1653,7 @@ pub const Blob = struct { }; pub fn ref(this: *Store) void { - const old = this.ref_count.fetchAdd(1, .Monotonic); + const old = this.ref_count.fetchAdd(1, .monotonic); assert(old > 0); } @@ -1709,7 +1709,7 @@ pub const Blob = struct { } pub fn deref(this: *Blob.Store) void { - const old = this.ref_count.fetchSub(1, .Monotonic); + const old = this.ref_count.fetchSub(1, .monotonic); assert(old >= 1); if (old == 1) { this.deinit(); @@ -1781,12 +1781,12 @@ pub const Blob = struct { const State = @This(); - const __opener_flags = std.os.O.NONBLOCK | std.os.O.CLOEXEC; + const __opener_flags = bun.O.NONBLOCK | bun.O.CLOEXEC; const open_flags_ = if (@hasDecl(This, "open_flags")) This.open_flags | __opener_flags else - std.os.O.RDONLY | __opener_flags; + bun.O.RDONLY | __opener_flags; pub inline fn getFdByOpening(this: *This, comptime Callback: OpenCallback) void { var buf: bun.PathBuffer = undefined; @@ -1902,7 +1902,7 @@ pub const Blob = struct { const Closer = @This(); fn scheduleClose(request: *io.Request) io.Action { - var this: *This = @fieldParentPtr(This, "io_request", request); + var this: *This = @alignCast(@fieldParentPtr("io_request", request)); return io.Action{ .close = .{ .ctx = this, @@ -1922,7 +1922,7 @@ pub const Blob = struct { fn onCloseIORequest(task: *JSC.WorkPoolTask) void { bloblog("onCloseIORequest()", .{}); - var this: *This = @fieldParentPtr(This, "task", task); + var this: *This = @alignCast(@fieldParentPtr("task", task)); this.close_after_io = false; this.update(); } @@ -1933,9 +1933,9 @@ pub const Blob = struct { ) bool { if (@hasField(This, "io_request")) { if (this.close_after_io) { - this.state.store(ClosingState.closing, .SeqCst); + this.state.store(ClosingState.closing, .seq_cst); - @atomicStore(@TypeOf(this.io_request.callback), &this.io_request.callback, &scheduleClose, .SeqCst); + @atomicStore(@TypeOf(this.io_request.callback), &this.io_request.callback, &scheduleClose, .seq_cst); if (!this.io_request.scheduled) io.Loop.get().schedule(&this.io_request); return true; @@ -2089,8 +2089,9 @@ pub const Blob = struct { } fn onCopyFile(req: *libuv.fs_t) callconv(.C) void { - var this: *CopyFileWindows = @fieldParentPtr(CopyFileWindows, "io_request", req); - assert(req.data == @as(?*anyopaque, @ptrCast(this))); + var this: *CopyFileWindows = @fieldParentPtr("io_request", req); + bun.assert(req.data == @as(?*anyopaque, @ptrCast(this))); + var event_loop = this.event_loop; event_loop.unrefConcurrently(); const rc = req.result; @@ -2262,7 +2263,7 @@ pub const Blob = struct { } const linux = std.os.linux; - const darwin = std.os.darwin; + const darwin = std.posix.system; pub fn deinit(this: *CopyFile) void { if (this.source_file_store.pathlike == .path) { @@ -2321,7 +2322,7 @@ pub const Blob = struct { } } - const os = std.os; + const posix = std.posix; pub fn doCloseFile(this: *CopyFile, comptime which: IOWhich) void { switch (which) { @@ -2338,7 +2339,7 @@ pub const Blob = struct { } } - const O = if (Environment.isLinux) linux.O else std.os.O; + const O = bun.O; const open_destination_flags = O.CLOEXEC | O.CREAT | O.WRONLY | O.TRUNC; const open_source_flags = O.CLOEXEC | O.RDONLY; @@ -2454,7 +2455,7 @@ pub const Blob = struct { return bun.errnoToZigErr(err.errno); }, .result => { - _ = linux.ftruncate(dest_fd.cast(), @as(std.os.off_t, @intCast(total_written))); + _ = linux.ftruncate(dest_fd.cast(), @as(std.posix.off_t, @intCast(total_written))); return; }, } @@ -2467,7 +2468,7 @@ pub const Blob = struct { .splice => bun.C.splice(src_fd.cast(), null, dest_fd.cast(), null, remain, 0), }; - switch (linux.getErrno(written)) { + switch (bun.C.getErrno(written)) { .SUCCESS => {}, .NOSYS, .XDEV => { @@ -2477,7 +2478,7 @@ pub const Blob = struct { return bun.errnoToZigErr(err.errno); }, .result => { - _ = linux.ftruncate(dest_fd.cast(), @as(std.os.off_t, @intCast(total_written))); + _ = linux.ftruncate(dest_fd.cast(), @as(std.posix.off_t, @intCast(total_written))); return; }, } @@ -2490,7 +2491,7 @@ pub const Blob = struct { // make() can set STDOUT / STDERR to O_APPEND // this messes up sendfile() has_unset_append = true; - const flags = linux.fcntl(dest_fd.cast(), linux.F.GETFL, 0); + const flags = linux.fcntl(dest_fd.cast(), linux.F.GETFL, @as(c_int, 0)); if ((flags & O.APPEND) != 0) { _ = linux.fcntl(dest_fd.cast(), linux.F.SETFL, flags ^ O.APPEND); continue; @@ -2509,7 +2510,7 @@ pub const Blob = struct { return bun.errnoToZigErr(err.errno); }, .result => { - _ = linux.ftruncate(dest_fd.cast(), @as(std.os.off_t, @intCast(total_written))); + _ = linux.ftruncate(dest_fd.cast(), @as(std.posix.off_t, @intCast(total_written))); return; }, } @@ -2531,14 +2532,14 @@ pub const Blob = struct { } // wrote zero bytes means EOF - remain -|= written; - total_written += written; + remain -|= @intCast(written); + total_written += @intCast(written); if (written == 0 or remain == 0) break; } } pub fn doFCopyFile(this: *CopyFile) anyerror!void { - switch (bun.sys.fcopyfile(this.source_fd, this.destination_fd, os.system.COPYFILE_DATA)) { + switch (bun.sys.fcopyfile(this.source_fd, this.destination_fd, posix.system.COPYFILE_DATA)) { .err => |errno| { this.system_error = errno.toSystemError(); @@ -2613,12 +2614,12 @@ pub const Blob = struct { .result => |result| { stat_ = result; - if (os.S.ISDIR(result.mode)) { + if (posix.S.ISDIR(result.mode)) { this.system_error = unsupported_directory_error; return; } - if (!os.S.ISREG(result.mode)) + if (!posix.S.ISREG(result.mode)) break :do_clonefile; }, .err => |err| { @@ -2633,7 +2634,7 @@ pub const Blob = struct { // If this fails...well, there's not much we can do about it. _ = bun.C.truncate( this.destination_file_store.pathlike.path.sliceZ(&path_buf), - @as(std.os.off_t, @intCast(this.max_length)), + @as(std.posix.off_t, @intCast(this.max_length)), ); this.read_len = @as(SizeType, @intCast(this.max_length)); } else { @@ -2683,7 +2684,7 @@ pub const Blob = struct { }, }; - if (os.S.ISDIR(stat.mode)) { + if (posix.S.ISDIR(stat.mode)) { this.system_error = unsupported_directory_error; this.doClose(); return; @@ -2696,7 +2697,7 @@ pub const Blob = struct { return; } - if (os.S.ISREG(stat.mode) and + if (posix.S.ISREG(stat.mode) and this.max_length > bun.C.preallocate_length and this.max_length != Blob.max_size) { @@ -2707,7 +2708,7 @@ pub const Blob = struct { if (comptime Environment.isLinux) { // Bun.write(Bun.file("a"), Bun.file("b")) - if (os.S.ISREG(stat.mode) and (os.S.ISREG(this.destination_file_store.mode) or this.destination_file_store.mode == 0)) { + if (posix.S.ISREG(stat.mode) and (posix.S.ISREG(this.destination_file_store.mode) or this.destination_file_store.mode == 0)) { if (this.destination_file_store.is_atty orelse false) { this.doCopyFileRange(.copy_file_range, true) catch {}; } else { @@ -2719,7 +2720,7 @@ pub const Blob = struct { } // $ bun run foo.js | bun run bar.js - if (os.S.ISFIFO(stat.mode) and os.S.ISFIFO(this.destination_file_store.mode)) { + if (posix.S.ISFIFO(stat.mode) and posix.S.ISFIFO(this.destination_file_store.mode)) { if (this.destination_file_store.is_atty orelse false) { this.doCopyFileRange(.splice, true) catch {}; } else { @@ -2730,7 +2731,7 @@ pub const Blob = struct { return; } - if (os.S.ISREG(stat.mode) or os.S.ISCHR(stat.mode) or os.S.ISSOCK(stat.mode)) { + if (posix.S.ISREG(stat.mode) or posix.S.ISCHR(stat.mode) or posix.S.ISSOCK(stat.mode)) { if (this.destination_file_store.is_atty orelse false) { this.doCopyFileRange(.sendfile, true) catch {}; } else { @@ -2753,7 +2754,7 @@ pub const Blob = struct { return; }; if (stat.size != 0 and @as(SizeType, @intCast(stat.size)) > this.max_length) { - _ = darwin.ftruncate(this.destination_fd.cast(), @as(std.os.off_t, @intCast(this.max_length))); + _ = darwin.ftruncate(this.destination_fd.cast(), @as(std.posix.off_t, @intCast(this.max_length))); } this.doClose(); @@ -3039,7 +3040,7 @@ pub const Blob = struct { var file_path: bun.PathBuffer = undefined; switch (bun.sys.open( pathlike.path.sliceZ(&file_path), - std.os.O.WRONLY | std.os.O.CREAT | std.os.O.NONBLOCK, + bun.O.WRONLY | bun.O.CREAT | bun.O.NONBLOCK, write_permissions, )) { .result => |result| { diff --git a/src/bun.js/webcore/blob/ReadFile.zig b/src/bun.js/webcore/blob/ReadFile.zig index c3ea88c978..e4c3e03d8f 100644 --- a/src/bun.js/webcore/blob/ReadFile.zig +++ b/src/bun.js/webcore/blob/ReadFile.zig @@ -94,7 +94,7 @@ pub const ReadFile = struct { pub usingnamespace FileCloserMixin(ReadFile); pub fn update(this: *ReadFile) void { - switch (this.state.load(.Monotonic)) { + switch (this.state.load(.monotonic)) { .closing => { this.onFinish(); }, @@ -149,7 +149,7 @@ pub const ReadFile = struct { pub const io_tag = io.Poll.Tag.ReadFile; pub fn onReadable(request: *io.Request) void { - var this: *ReadFile = @fieldParentPtr(ReadFile, "io_request", request); + var this: *ReadFile = @fieldParentPtr("io_request", request); this.onReady(); } @@ -185,7 +185,7 @@ pub const ReadFile = struct { pub fn onRequestReadable(request: *io.Request) io.Action { bloblog("ReadFile.onRequestReadable", .{}); request.scheduled = false; - var this: *ReadFile = @fieldParentPtr(ReadFile, "io_request", request); + var this: *ReadFile = @alignCast(@fieldParentPtr("io_request", request)); return io.Action{ .readable = .{ .onError = @ptrCast(&onIOError), @@ -200,7 +200,7 @@ pub const ReadFile = struct { pub fn waitForReadable(this: *ReadFile) void { bloblog("ReadFile.waitForReadable", .{}); this.close_after_io = true; - @atomicStore(@TypeOf(this.io_request.callback), &this.io_request.callback, &onRequestReadable, .SeqCst); + @atomicStore(@TypeOf(this.io_request.callback), &this.io_request.callback, &onRequestReadable, .seq_cst); if (!this.io_request.scheduled) io.Loop.get().schedule(&this.io_request); } @@ -213,7 +213,7 @@ pub const ReadFile = struct { pub fn doRead(this: *ReadFile, buffer: []u8, read_len: *usize, retry: *bool) bool { const result: JSC.Maybe(usize) = brk: { - if (std.os.S.ISSOCK(this.file_store.mode)) { + if (std.posix.S.ISSOCK(this.file_store.mode)) { break :brk bun.sys.recvNonBlock(this.opened_fd, buffer); } @@ -440,13 +440,13 @@ pub const ReadFile = struct { } fn doReadLoopTask(task: *JSC.WorkPoolTask) void { - var this: *ReadFile = @fieldParentPtr(ReadFile, "task", task); + var this: *ReadFile = @alignCast(@fieldParentPtr("task", task)); this.update(); } fn doReadLoop(this: *ReadFile) void { - while (this.state.load(.Monotonic) == .running) { + while (this.state.load(.monotonic) == .running) { // we hold a 64 KB stack buffer incase the amount of data to // be read is greater than the reported amount // diff --git a/src/bun.js/webcore/blob/WriteFile.zig b/src/bun.js/webcore/blob/WriteFile.zig index 77a604daec..06ba8e92e1 100644 --- a/src/bun.js/webcore/blob/WriteFile.zig +++ b/src/bun.js/webcore/blob/WriteFile.zig @@ -45,10 +45,10 @@ pub const WriteFile = struct { pub usingnamespace FileOpenerMixin(WriteFile); pub usingnamespace FileCloserMixin(WriteFile); - pub const open_flags = std.os.O.WRONLY | std.os.O.CREAT | std.os.O.TRUNC | std.os.O.NONBLOCK; + pub const open_flags = bun.O.WRONLY | bun.O.CREAT | bun.O.TRUNC | bun.O.NONBLOCK; pub fn onWritable(request: *io.Request) void { - var this: *WriteFile = @fieldParentPtr(WriteFile, "io_request", request); + var this: *WriteFile = @fieldParentPtr("io_request", request); this.onReady(); } @@ -69,7 +69,7 @@ pub const WriteFile = struct { pub fn onRequestWritable(request: *io.Request) io.Action { bloblog("WriteFile.onRequestWritable()", .{}); request.scheduled = false; - var this: *WriteFile = @fieldParentPtr(WriteFile, "io_request", request); + var this: *WriteFile = @fieldParentPtr("io_request", request); return io.Action{ .writable = .{ .onError = @ptrCast(&onIOError), @@ -83,7 +83,7 @@ pub const WriteFile = struct { pub fn waitForWritable(this: *WriteFile) void { this.close_after_io = true; - @atomicStore(@TypeOf(this.io_request.callback), &this.io_request.callback, &onRequestWritable, .SeqCst); + @atomicStore(@TypeOf(this.io_request.callback), &this.io_request.callback, &onRequestWritable, .seq_cst); if (!this.io_request.scheduled) io.Loop.get().schedule(&this.io_request); } @@ -295,7 +295,7 @@ pub const WriteFile = struct { } fn doWriteLoopTask(task: *JSC.WorkPoolTask) void { - var this: *WriteFile = @fieldParentPtr(WriteFile, "task", task); + var this: *WriteFile = @fieldParentPtr("task", task); // On macOS, we use one-shot mode, so we don't need to unregister. if (comptime Environment.isMac) { this.close_after_io = false; @@ -308,7 +308,7 @@ pub const WriteFile = struct { } fn doWriteLoop(this: *WriteFile) void { - while (this.state.load(.Monotonic) == .running) { + while (this.state.load(.monotonic) == .running) { var remain = this.bytes_blob.sharedView(); remain = remain[@min(this.total_written, remain.len)..]; @@ -432,7 +432,7 @@ pub const WriteFileWindows = struct { const rc = uv.uv_fs_open( this.loop(), &this.io_request, - &(std.os.toPosixPath(path) catch { + &(std.posix.toPosixPath(path) catch { this.throw(bun.sys.Error{ .errno = @intFromEnum(bun.C.E.NAMETOOLONG), .syscall = .open, @@ -459,7 +459,7 @@ pub const WriteFileWindows = struct { } pub fn onOpen(req: *uv.fs_t) callconv(.C) void { - var this: *WriteFileWindows = @fieldParentPtr(WriteFileWindows, "io_request", req); + var this: *WriteFileWindows = @fieldParentPtr("io_request", req); bun.assert(this == @as(*WriteFileWindows, @alignCast(@ptrCast(req.data.?)))); const rc = this.io_request.result; if (comptime Environment.allow_assert) @@ -524,7 +524,7 @@ pub const WriteFileWindows = struct { } fn onWriteComplete(req: *uv.fs_t) callconv(.C) void { - var this: *WriteFileWindows = @fieldParentPtr(WriteFileWindows, "io_request", req); + var this: *WriteFileWindows = @fieldParentPtr("io_request", req); bun.assert(this == @as(*WriteFileWindows, @alignCast(@ptrCast(req.data.?)))); const rc = this.io_request.result; if (rc.errno()) |err| { diff --git a/src/bun.js/webcore/response.zig b/src/bun.js/webcore/response.zig index b41ac43f8c..3a522f2d75 100644 --- a/src/bun.js/webcore/response.zig +++ b/src/bun.js/webcore/response.zig @@ -773,12 +773,12 @@ pub const Fetch = struct { ref_count: std.atomic.Value(u32) = std.atomic.Value(u32).init(1), pub fn ref(this: *FetchTasklet) void { - const count = this.ref_count.fetchAdd(1, .Monotonic); + const count = this.ref_count.fetchAdd(1, .monotonic); bun.debugAssert(count > 0); } pub fn deref(this: *FetchTasklet) void { - const count = this.ref_count.fetchSub(1, .Monotonic); + const count = this.ref_count.fetchSub(1, .monotonic); bun.debugAssert(count > 0); if (count == 1) { @@ -881,7 +881,7 @@ pub const Fetch = struct { fn deinit(this: *FetchTasklet) void { log("deinit", .{}); - bun.assert(this.ref_count.load(.Monotonic) == 0); + bun.assert(this.ref_count.load(.monotonic) == 0); this.clearData(); @@ -1060,7 +1060,7 @@ pub const Fetch = struct { log("onProgressUpdate", .{}); defer this.deref(); this.mutex.lock(); - this.has_schedule_callback.store(false, .Monotonic); + this.has_schedule_callback.store(false, .monotonic); if (this.is_waiting_body) { this.onBodyReceived(); @@ -1228,7 +1228,7 @@ pub const Fetch = struct { check_result.ensureStillAlive(); check_result.protect(); this.abort_reason = check_result; - this.signal_store.aborted.store(true, .Monotonic); + this.signal_store.aborted.store(true, .monotonic); this.tracker.didCancel(this.global_this); // we need to abort the request @@ -1387,7 +1387,7 @@ pub const Fetch = struct { if (this.http) |http_| { http_.enableBodyStreaming(); } - if (this.signal_store.aborted.load(.Monotonic)) { + if (this.signal_store.aborted.load(.monotonic)) { return JSC.WebCore.DrainResult{ .aborted = {}, }; @@ -1609,7 +1609,7 @@ pub const Fetch = struct { } if (fetch_tasklet.check_server_identity.has() and fetch_tasklet.reject_unauthorized) { - fetch_tasklet.signal_store.cert_errors.store(true, .Monotonic); + fetch_tasklet.signal_store.cert_errors.store(true, .monotonic); } else { fetch_tasklet.signals.cert_errors = null; } @@ -1649,7 +1649,7 @@ pub const Fetch = struct { } // we want to return after headers are received - fetch_tasklet.signal_store.header_progress.store(true, .Monotonic); + fetch_tasklet.signal_store.header_progress.store(true, .monotonic); if (fetch_tasklet.request_body == .Sendfile) { bun.assert(fetch_options.url.isHTTP()); @@ -1668,7 +1668,7 @@ pub const Fetch = struct { reason.ensureStillAlive(); this.abort_reason = reason; reason.protect(); - this.signal_store.aborted.store(true, .Monotonic); + this.signal_store.aborted.store(true, .monotonic); this.tracker.didCancel(this.global_this); if (this.http != null) { @@ -1786,7 +1786,7 @@ pub const Fetch = struct { task.response_buffer.reset(); } - if (task.has_schedule_callback.cmpxchgStrong(false, true, .Acquire, .Monotonic)) |has_schedule_callback| { + if (task.has_schedule_callback.cmpxchgStrong(false, true, .acquire, .monotonic)) |has_schedule_callback| { if (has_schedule_callback) { task.deref(); return; @@ -2542,7 +2542,7 @@ pub const Fetch = struct { } var cwd_buf: bun.PathBuffer = undefined; - const cwd = if (Environment.isWindows) (std.os.getcwd(&cwd_buf) catch |err| { + const cwd = if (Environment.isWindows) (bun.getcwd(&cwd_buf) catch |err| { globalThis.throwError(err, "Failed to resolve file url"); return .zero; }) else globalThis.bunVM().bundler.fs.top_level_dir; @@ -2628,7 +2628,7 @@ pub const Fetch = struct { prepare_body: { const opened_fd_res: JSC.Maybe(bun.FileDescriptor) = switch (body.Blob.store.?.data.file.pathlike) { .fd => |fd| bun.sys.dup(fd), - .path => |path| bun.sys.open(path.sliceZ(&globalThis.bunVM().nodeFS().sync_error_buf), if (Environment.isWindows) std.os.O.RDONLY else std.os.O.RDONLY | std.os.O.NOCTTY, 0), + .path => |path| bun.sys.open(path.sliceZ(&globalThis.bunVM().nodeFS().sync_error_buf), if (Environment.isWindows) bun.O.RDONLY else bun.O.RDONLY | bun.O.NOCTTY, 0), }; const opened_fd = switch (opened_fd_res) { diff --git a/src/bun.js/webcore/streams.zig b/src/bun.js/webcore/streams.zig index ed788debf4..6d0486a5c3 100644 --- a/src/bun.js/webcore/streams.zig +++ b/src/bun.js/webcore/streams.zig @@ -461,7 +461,7 @@ pub const StreamStart = union(Tag) { pub fn flags(this: *const FileSinkOptions) bun.Mode { _ = this; - return std.os.O.NONBLOCK | std.os.O.CLOEXEC | std.os.O.CREAT | std.os.O.WRONLY; + return bun.O.NONBLOCK | bun.O.CLOEXEC | bun.O.CREAT | bun.O.WRONLY; } }; @@ -3117,7 +3117,7 @@ pub const FileSink = struct { .fd => |fd_| brk: { if (comptime Environment.isPosix and FeatureFlags.nonblocking_stdout_and_stderr_on_posix) { if (bun.FDTag.get(fd_) != .none) { - const rc = bun.C.open_as_nonblocking_tty(@intCast(fd_.cast()), std.os.O.WRONLY); + const rc = bun.C.open_as_nonblocking_tty(@intCast(fd_.cast()), bun.O.WRONLY); if (rc > -1) { isatty = true; is_nonblocking_tty = true; @@ -3126,7 +3126,7 @@ pub const FileSink = struct { } } - break :brk bun.sys.dupWithFlags(fd_, if (bun.FDTag.get(fd_) == .none and !this.force_sync_on_windows) std.os.O.NONBLOCK else 0); + break :brk bun.sys.dupWithFlags(fd_, if (bun.FDTag.get(fd_) == .none and !this.force_sync_on_windows) bun.O.NONBLOCK else 0); }, }) { .err => |err| return .{ .err = err }, @@ -3142,7 +3142,7 @@ pub const FileSink = struct { .result => |stat| { this.pollable = bun.sys.isPollable(stat.mode); if (!this.pollable and isatty == null) { - isatty = std.os.isatty(fd.int()); + isatty = std.posix.isatty(fd.int()); } if (isatty) |is| { @@ -3151,7 +3151,7 @@ pub const FileSink = struct { } this.fd = fd; - this.is_socket = std.os.S.ISSOCK(stat.mode); + this.is_socket = std.posix.S.ISSOCK(stat.mode); this.nonblocking = is_nonblocking_tty or (this.pollable and switch (options.input_path) { .path => true, .fd => |fd_| bun.FDTag.get(fd_) == .none, @@ -3503,7 +3503,7 @@ pub const FileReader = struct { const fd = if (file.pathlike == .fd) if (file.pathlike.fd.isStdio()) brk: { if (comptime Environment.isPosix) { - const rc = bun.C.open_as_nonblocking_tty(@intCast(file.pathlike.fd.int()), std.os.O.RDONLY); + const rc = bun.C.open_as_nonblocking_tty(file.pathlike.fd.int(), bun.O.RDONLY); if (rc > -1) { is_nonblocking_tty = true; file.is_atty = true; @@ -3514,7 +3514,7 @@ pub const FileReader = struct { } else switch (Syscall.dupWithFlags(file.pathlike.fd, brk: { if (comptime Environment.isPosix) { if (bun.FDTag.get(file.pathlike.fd) == .none and !(file.is_atty orelse false)) { - break :brk std.os.O.NONBLOCK; + break :brk bun.O.NONBLOCK; } } @@ -3530,7 +3530,7 @@ pub const FileReader = struct { return .{ .err = err.withFd(file.pathlike.fd) }; }, } - else switch (Syscall.open(file.pathlike.path.sliceZ(&file_buf), std.os.O.RDONLY | std.os.O.NONBLOCK | std.os.O.CLOEXEC, 0)) { + else switch (Syscall.open(file.pathlike.path.sliceZ(&file_buf), bun.O.RDONLY | bun.O.NONBLOCK | bun.O.CLOEXEC, 0)) { .result => |fd| fd, .err => |err| { return .{ .err = err.withPath(file.pathlike.path.slice()) }; @@ -3539,15 +3539,15 @@ pub const FileReader = struct { if (comptime Environment.isPosix) { if ((file.is_atty orelse false) or - (fd.int() < 3 and std.os.isatty(fd.cast())) or + (fd.int() < 3 and std.posix.isatty(fd.cast())) or (file.pathlike == .fd and bun.FDTag.get(file.pathlike.fd) != .none and - std.os.isatty(file.pathlike.fd.cast()))) + std.posix.isatty(file.pathlike.fd.cast()))) { - // var termios = std.mem.zeroes(std.os.termios); + // var termios = std.mem.zeroes(std.posix.termios); // _ = std.c.tcgetattr(fd.cast(), &termios); // bun.C.cfmakeraw(&termios); - // _ = std.c.tcsetattr(fd.cast(), std.os.TCSA.NOW, &termios); + // _ = std.c.tcsetattr(fd.cast(), std.posix.TCSA.NOW, &termios); file.is_atty = true; } @@ -3715,7 +3715,7 @@ pub const FileReader = struct { } pub fn parent(this: *@This()) *Source { - return @fieldParentPtr(Source, "context", this); + return @fieldParentPtr("context", this); } pub fn onCancel(this: *FileReader) void { @@ -4093,7 +4093,7 @@ pub const ByteBlobLoader = struct { pub const tag = ReadableStream.Tag.Blob; pub fn parent(this: *@This()) *Source { - return @fieldParentPtr(Source, "context", this); + return @fieldParentPtr("context", this); } pub fn setup( @@ -4283,7 +4283,7 @@ pub const ByteStream = struct { } pub fn isCancelled(this: *const @This()) bool { - return @fieldParentPtr(Source, "context", this).cancelled; + return this.parent().cancelled; } pub fn unpipeWithoutDeref(this: *@This()) void { @@ -4419,7 +4419,7 @@ pub const ByteStream = struct { } pub fn parent(this: *@This()) *Source { - return @fieldParentPtr(Source, "context", this); + return @fieldParentPtr("context", this); } pub fn onPull(this: *@This(), buffer: []u8, view: JSC.JSValue) StreamResult { diff --git a/src/bun.zig b/src/bun.zig index 44486feb35..46086746aa 100644 --- a/src/bun.zig +++ b/src/bun.zig @@ -5,6 +5,7 @@ // @import("root").bun // // Otherwise, you risk a circular dependency or Zig including multiple copies of this file which leads to strange bugs. +const builtin = @import("builtin"); const std = @import("std"); pub const Environment = @import("env.zig"); @@ -38,7 +39,6 @@ pub const C = @import("root").C; pub const sha = @import("./sha.zig"); pub const FeatureFlags = @import("feature_flags.zig"); pub const meta = @import("./meta.zig"); -pub const ComptimeStringMap = @import("./comptime_string_map.zig").ComptimeStringMap; pub const base64 = @import("./base64/base64.zig"); pub const path = @import("./resolver/resolve_path.zig"); pub const resolver = @import("./resolver/resolver.zig"); @@ -47,9 +47,18 @@ pub const PackageJSON = @import("./resolver/package_json.zig").PackageJSON; pub const fmt = @import("./fmt.zig"); pub const allocators = @import("./allocators.zig"); -pub const patch = @import("./patch.zig"); +/// Copied from Zig std.trait +pub const trait = @import("./trait.zig"); +/// Copied from Zig std.Progress before 0.13 rewrite +pub const Progress = @import("./Progress.zig"); +/// Modified version of Zig's ComptimeStringMap +pub const comptime_string_map = @import("./comptime_string_map.zig"); +pub const ComptimeStringMap = comptime_string_map.ComptimeStringMap; +pub const ComptimeStringMap16 = comptime_string_map.ComptimeStringMap16; +pub const ComptimeStringMapWithKeyType = comptime_string_map.ComptimeStringMapWithKeyType; pub const glob = @import("./glob.zig"); +pub const patch = @import("./patch.zig"); pub const shell = struct { pub usingnamespace @import("./shell/shell.zig"); @@ -68,7 +77,7 @@ else if (Environment.isWindows) // Do not bitcast it to *anyopaque manually, but instead use `fdcast()` u64 else - std.os.fd_t; + std.posix.fd_t; pub const FD = FileDescriptor; pub const FileDescriptor = enum(FileDescriptorInt) { @@ -87,7 +96,7 @@ pub const FileDescriptor = enum(FileDescriptorInt) { /// On Windows, it is always a mistake, as the integer is bitcast of a tagged packed struct. /// /// TODO(@paperdave): remove this API. - pub inline fn int(self: FileDescriptor) std.os.fd_t { + pub inline fn int(self: FileDescriptor) std.posix.fd_t { if (Environment.isWindows) @compileError("FileDescriptor.int() is not allowed on Windows."); return @intFromEnum(self); @@ -107,7 +116,7 @@ pub const FileDescriptor = enum(FileDescriptorInt) { /// to Windows' *HANDLE, and casts the types for proper usage. /// /// This may be needed in places where a FileDescriptor is given to `std` or `kernel32` apis - pub inline fn cast(fd: FileDescriptor) std.os.fd_t { + pub inline fn cast(fd: FileDescriptor) std.posix.fd_t { if (!Environment.isWindows) return fd.int(); // if not having this check, the cast may crash zig compiler? if (@inComptime() and fd == invalid_fd) return FDImpl.invalid.system(); @@ -190,31 +199,31 @@ pub const StoredFileDescriptorType = FileDescriptor; pub const PlatformIOVec = if (Environment.isWindows) windows.libuv.uv_buf_t else - std.os.iovec; + std.posix.iovec; pub const PlatformIOVecConst = if (Environment.isWindows) windows.libuv.uv_buf_t else - std.os.iovec_const; + std.posix.iovec_const; pub fn platformIOVecCreate(input: []const u8) PlatformIOVec { - if (Environment.isWindows) return windows.libuv.uv_buf_t.init(input); if (Environment.allow_assert) { if (input.len > @as(usize, std.math.maxInt(u32))) { Output.debugWarn("call to bun.PlatformIOVec.init with length larger than u32, this will overflow on windows", .{}); } } - return .{ .iov_len = @intCast(input.len), .iov_base = @constCast(input.ptr) }; + // TODO: remove this constCast by making the input mutable + return .{ .len = @intCast(input.len), .base = @constCast(input.ptr) }; } pub fn platformIOVecConstCreate(input: []const u8) PlatformIOVecConst { - if (Environment.isWindows) return windows.libuv.uv_buf_t.init(input); if (Environment.allow_assert) { if (input.len > @as(usize, std.math.maxInt(u32))) { Output.debugWarn("call to bun.PlatformIOVecConst.init with length larger than u32, this will overflow on windows", .{}); } } - return .{ .iov_len = @intCast(input.len), .iov_base = input.ptr }; + // TODO: remove this constCast by adding uv_buf_t_const + return .{ .len = @intCast(input.len), .base = @constCast(input.ptr) }; } pub fn platformIOVecToSlice(iovec: PlatformIOVec) []u8 { @@ -310,28 +319,6 @@ fn Span(comptime T: type) type { else => @compileError("invalid type given to std.mem.Span: " ++ @typeName(T)), } } -// fn Span(comptime T: type) type { -// switch (@typeInfo(T)) { -// .Optional => |optional_info| { -// return ?Span(optional_info.child); -// }, -// .Pointer => |ptr_info| { -// var new_ptr_info = ptr_info; -// switch (ptr_info.size) { -// .C => { -// new_ptr_info.sentinel = &@as(ptr_info.child, 0); -// new_ptr_info.is_allowzero = false; -// }, -// .Many => if (ptr_info.sentinel == null) @compileError("invalid type given to bun.span: " ++ @typeName(T)), -// else => {}, -// } -// new_ptr_info.size = .Slice; -// return @Type(.{ .Pointer = new_ptr_info }); -// }, -// else => {}, -// } -// @compileError("invalid type given to bun.span: " ++ @typeName(T)); -// } pub fn span(ptr: anytype) Span(@TypeOf(ptr)) { if (@typeInfo(@TypeOf(ptr)) == .Optional) { @@ -475,17 +462,17 @@ pub fn fastRandom() u64 { pub fn get() u64 { // This is slightly racy but its fine because this memoization is done as a performance optimization // and we only need to do it once per process - var value = seed_value.load(.Monotonic); - while (value == 0) : (value = seed_value.load(.Monotonic)) { + var value = seed_value.load(.monotonic); + while (value == 0) : (value = seed_value.load(.monotonic)) { if (comptime Environment.isDebug) outer: { if (getenvZ("BUN_DEBUG_HASH_RANDOM_SEED")) |env| { value = std.fmt.parseInt(u64, env, 10) catch break :outer; - seed_value.store(value, .Monotonic); + seed_value.store(value, .monotonic); return value; } } rand(std.mem.asBytes(&value)); - seed_value.store(value, .Monotonic); + seed_value.store(value, .monotonic); } return value; @@ -524,14 +511,12 @@ pub fn rand(bytes: []u8) void { pub const ObjectPool = @import("./pool.zig").ObjectPool; pub fn assertNonBlocking(fd: anytype) void { - assert( - (std.os.fcntl(fd, std.os.F.GETFL, 0) catch unreachable) & std.os.O.NONBLOCK != 0, - ); + assert((std.posix.fcntl(fd, std.posix.F.GETFL, 0) catch unreachable) & O.NONBLOCK != 0); } pub fn ensureNonBlocking(fd: anytype) void { - const current = std.os.fcntl(fd, std.os.F.GETFL, 0) catch 0; - _ = std.os.fcntl(fd, std.os.F.SETFL, current | std.os.O.NONBLOCK) catch 0; + const current = std.posix.fcntl(fd, std.posix.F.GETFL, 0) catch 0; + _ = std.posix.fcntl(fd, std.posix.F.SETFL, current | O.NONBLOCK) catch 0; } const global_scope_log = sys.syslog; @@ -540,16 +525,16 @@ pub fn isReadable(fd: FileDescriptor) PollFlag { @panic("TODO on Windows"); } assert(fd != invalid_fd); - var polls = [_]std.os.pollfd{ + var polls = [_]std.posix.pollfd{ .{ .fd = fd.cast(), - .events = std.os.POLL.IN | std.os.POLL.ERR | std.os.POLL.HUP, + .events = std.posix.POLL.IN | std.posix.POLL.ERR | std.posix.POLL.HUP, .revents = 0, }, }; - const result = (std.os.poll(&polls, 0) catch 0) != 0; - const rc = if (result and polls[0].revents & (std.os.POLL.HUP | std.os.POLL.ERR) != 0) + const result = (std.posix.poll(&polls, 0) catch 0) != 0; + const rc = if (result and polls[0].revents & (std.posix.POLL.HUP | std.posix.POLL.ERR) != 0) PollFlag.hup else if (result) PollFlag.ready @@ -559,7 +544,7 @@ pub fn isReadable(fd: FileDescriptor) PollFlag { fd, result, @tagName(rc), - if (polls[0].revents & std.os.POLL.ERR != 0) " ERR " else "", + if (polls[0].revents & std.posix.POLL.ERR != 0) " ERR " else "", }); return rc; } @@ -570,14 +555,14 @@ pub fn isWritable(fd: FileDescriptor) PollFlag { var polls = [_]std.os.windows.ws2_32.WSAPOLLFD{ .{ .fd = socketcast(fd), - .events = std.os.POLL.WRNORM, + .events = std.posix.POLL.WRNORM, .revents = 0, }, }; const rc = std.os.windows.ws2_32.WSAPoll(&polls, 1, 0); const result = (if (rc != std.os.windows.ws2_32.SOCKET_ERROR) @as(usize, @intCast(rc)) else 0) != 0; global_scope_log("poll({}) writable: {any} ({d})", .{ fd, result, polls[0].revents }); - if (result and polls[0].revents & std.os.POLL.WRNORM != 0) { + if (result and polls[0].revents & std.posix.POLL.WRNORM != 0) { return .hup; } else if (result) { return .ready; @@ -588,16 +573,16 @@ pub fn isWritable(fd: FileDescriptor) PollFlag { } assert(fd != invalid_fd); - var polls = [_]std.os.pollfd{ + var polls = [_]std.posix.pollfd{ .{ .fd = fd.cast(), - .events = std.os.POLL.OUT | std.os.POLL.ERR | std.os.POLL.HUP, + .events = std.posix.POLL.OUT | std.posix.POLL.ERR | std.posix.POLL.HUP, .revents = 0, }, }; - const result = (std.os.poll(&polls, 0) catch 0) != 0; - const rc = if (result and polls[0].revents & (std.os.POLL.HUP | std.os.POLL.ERR) != 0) + const result = (std.posix.poll(&polls, 0) catch 0) != 0; + const rc = if (result and polls[0].revents & (std.posix.POLL.HUP | std.posix.POLL.ERR) != 0) PollFlag.hup else if (result) PollFlag.ready @@ -607,7 +592,7 @@ pub fn isWritable(fd: FileDescriptor) PollFlag { fd, result, @tagName(rc), - if (polls[0].revents & std.os.POLL.ERR != 0) " ERR " else "", + if (polls[0].revents & std.posix.POLL.ERR != 0) " ERR " else "", }); return rc; } @@ -713,9 +698,9 @@ pub var start_time: i128 = 0; pub fn openFileZ(pathZ: [:0]const u8, open_flags: std.fs.File.OpenFlags) !std.fs.File { var flags: Mode = 0; switch (open_flags.mode) { - .read_only => flags |= std.os.O.RDONLY, - .write_only => flags |= std.os.O.WRONLY, - .read_write => flags |= std.os.O.RDWR, + .read_only => flags |= O.RDONLY, + .write_only => flags |= O.WRONLY, + .read_write => flags |= O.RDWR, } const res = try sys.open(pathZ, flags, 0).unwrap(); @@ -726,16 +711,16 @@ pub fn openFile(path_: []const u8, open_flags: std.fs.File.OpenFlags) !std.fs.Fi if (comptime Environment.isWindows) { var flags: Mode = 0; switch (open_flags.mode) { - .read_only => flags |= std.os.O.RDONLY, - .write_only => flags |= std.os.O.WRONLY, - .read_write => flags |= std.os.O.RDWR, + .read_only => flags |= O.RDONLY, + .write_only => flags |= O.WRONLY, + .read_write => flags |= O.RDWR, } const fd = try sys.openA(path_, flags, 0).unwrap(); return fd.asFile(); } - return try openFileZ(&try std.os.toPosixPath(path_), open_flags); + return try openFileZ(&try std.posix.toPosixPath(path_), open_flags); } pub fn openDir(dir: std.fs.Dir, path_: [:0]const u8) !std.fs.Dir { @@ -743,7 +728,7 @@ pub fn openDir(dir: std.fs.Dir, path_: [:0]const u8) !std.fs.Dir { const res = try sys.openDirAtWindowsA(toFD(dir.fd), path_, .{ .iterable = true, .can_rename_or_delete = true, .read_only = true }).unwrap(); return res.asDir(); } else { - const fd = try sys.openat(toFD(dir.fd), path_, std.os.O.DIRECTORY | std.os.O.CLOEXEC | std.os.O.RDONLY, 0).unwrap(); + const fd = try sys.openat(toFD(dir.fd), path_, O.DIRECTORY | O.CLOEXEC | O.RDONLY, 0).unwrap(); return fd.asDir(); } } @@ -759,7 +744,7 @@ pub fn openDirA(dir: std.fs.Dir, path_: []const u8) !std.fs.Dir { const res = try sys.openDirAtWindowsA(toFD(dir.fd), path_, .{ .iterable = true, .can_rename_or_delete = true, .read_only = true }).unwrap(); return res.asDir(); } else { - const fd = try sys.openatA(toFD(dir.fd), path_, std.os.O.DIRECTORY | std.os.O.CLOEXEC | std.os.O.RDONLY, 0).unwrap(); + const fd = try sys.openatA(toFD(dir.fd), path_, O.DIRECTORY | O.CLOEXEC | O.RDONLY, 0).unwrap(); return fd.asDir(); } } @@ -769,7 +754,7 @@ pub fn openDirForIteration(dir: std.fs.Dir, path_: []const u8) !std.fs.Dir { const res = try sys.openDirAtWindowsA(toFD(dir.fd), path_, .{ .iterable = true, .can_rename_or_delete = false, .read_only = true }).unwrap(); return res.asDir(); } else { - const fd = try sys.openatA(toFD(dir.fd), path_, std.os.O.DIRECTORY | std.os.O.CLOEXEC | std.os.O.RDONLY, 0).unwrap(); + const fd = try sys.openatA(toFD(dir.fd), path_, O.DIRECTORY | O.CLOEXEC | O.RDONLY, 0).unwrap(); return fd.asDir(); } } @@ -779,7 +764,7 @@ pub fn openDirAbsolute(path_: []const u8) !std.fs.Dir { const res = try sys.openDirAtWindowsA(invalid_fd, path_, .{ .iterable = true, .can_rename_or_delete = true, .read_only = true }).unwrap(); return res.asDir(); } else { - const fd = try sys.openA(path_, std.os.O.DIRECTORY | std.os.O.CLOEXEC | std.os.O.RDONLY, 0).unwrap(); + const fd = try sys.openA(path_, O.DIRECTORY | O.CLOEXEC | O.RDONLY, 0).unwrap(); return fd.asDir(); } } @@ -790,12 +775,12 @@ pub fn getRuntimeFeatureFlag(comptime flag: [:0]const u8) bool { const state = enum(u8) { idk, disabled, enabled }; var is_enabled: std.atomic.Value(state) = std.atomic.Value(state).init(.idk); pub fn get() bool { - return switch (is_enabled.load(.SeqCst)) { + return switch (is_enabled.load(.seq_cst)) { .enabled => true, .disabled => false, .idk => { const enabled = if (getenvZ(flag_)) |val| strings.eqlComptime(val, "1") or strings.eqlComptime(val, "true") else false; - is_enabled.store(if (enabled) .enabled else .disabled, .SeqCst); + is_enabled.store(if (enabled) .enabled else .disabled, .seq_cst); return enabled; }, }; @@ -1269,19 +1254,19 @@ var needs_proc_self_workaround: bool = false; // This is our "polyfill" when /proc/self/fd is not available it's only // necessary on linux because other platforms don't have an optional // /proc/self/fd -fn getFdPathViaCWD(fd: std.os.fd_t, buf: *[@This().MAX_PATH_BYTES]u8) ![]u8 { - const prev_fd = try std.os.openatZ(std.fs.cwd().fd, ".", std.os.O.DIRECTORY, 0); +fn getFdPathViaCWD(fd: std.posix.fd_t, buf: *[@This().MAX_PATH_BYTES]u8) ![]u8 { + const prev_fd = try std.posix.openatZ(std.fs.cwd().fd, ".", .{ .DIRECTORY = true }, 0); var needs_chdir = false; defer { - if (needs_chdir) std.os.fchdir(prev_fd) catch unreachable; - std.os.close(prev_fd); + if (needs_chdir) std.posix.fchdir(prev_fd) catch unreachable; + std.posix.close(prev_fd); } - try std.os.fchdir(fd); + try std.posix.fchdir(fd); needs_chdir = true; - return std.os.getcwd(buf); + return std.posix.getcwd(buf); } -pub const getcwd = std.os.getcwd; +pub const getcwd = std.posix.getcwd; pub fn getcwdAlloc(allocator: std.mem.Allocator) ![]u8 { var temp: PathBuffer = undefined; @@ -1574,8 +1559,8 @@ pub const failing_allocator = std.mem.Allocator{ .ptr = undefined, .vtable = &.{ var __reload_in_progress__ = std.atomic.Value(bool).init(false); threadlocal var __reload_in_progress__on_current_thread = false; pub fn isProcessReloadInProgressOnAnotherThread() bool { - @fence(.Acquire); - return __reload_in_progress__.load(.Monotonic) and !__reload_in_progress__on_current_thread; + @fence(.acquire); + return __reload_in_progress__.load(.monotonic) and !__reload_in_progress__on_current_thread; } pub noinline fn maybeHandlePanicDuringProcessReload() void { @@ -1595,7 +1580,7 @@ pub noinline fn maybeHandlePanicDuringProcessReload() void { std.atomic.spinLoopHint(); if (comptime Environment.isPosix) { - std.os.nanosleep(1, 0); + std.posix.nanosleep(1, 0); } } } @@ -1621,7 +1606,7 @@ pub fn reloadProcess( clear_terminal: bool, comptime may_return: bool, ) if (may_return) void else noreturn { - __reload_in_progress__.store(true, .Monotonic); + __reload_in_progress__.store(true, .monotonic); __reload_in_progress__on_current_thread = true; if (clear_terminal) { @@ -1719,7 +1704,7 @@ pub fn reloadProcess( }.on_before_reload_process_linux; on_before_reload_process_linux(); - const err = std.os.execveZ( + const err = std.posix.execveZ( exec_path, newargv, envp, @@ -1892,7 +1877,6 @@ pub fn HiveRef(comptime T: type, comptime capacity: u16) type { ref_count: u32, allocator: *HiveAllocator, value: T, - pub fn init(value: T, allocator: *HiveAllocator) !*@This() { var this = try allocator.tryGet(); this.allocator = allocator; @@ -1925,21 +1909,27 @@ pub const MaxHeapAllocator = @import("./max_heap_allocator.zig").MaxHeapAllocato pub const tracy = @import("./tracy.zig"); pub const trace = tracy.trace; -pub fn openFileForPath(path_: [:0]const u8) !std.fs.File { - const O_PATH = if (comptime Environment.isLinux) std.os.O.PATH else std.os.O.RDONLY; - const flags: u32 = std.os.O.CLOEXEC | std.os.O.NOCTTY | O_PATH; +pub fn openFileForPath(file_path: [:0]const u8) !std.fs.File { + if (Environment.isWindows) + return std.fs.cwd().openFileZ(file_path, .{}); - const fd = try std.os.openZ(path_, flags, 0); + const O_PATH = if (comptime Environment.isLinux) O.PATH else O.RDONLY; + const flags: u32 = O.CLOEXEC | O.NOCTTY | O_PATH; + + const fd = try std.posix.openZ(file_path, O.toPacked(flags), 0); return std.fs.File{ .handle = fd, }; } -pub fn openDirForPath(path_: [:0]const u8) !std.fs.Dir { - const O_PATH = if (comptime Environment.isLinux) std.os.O.PATH else std.os.O.RDONLY; - const flags: u32 = std.os.O.CLOEXEC | std.os.O.NOCTTY | std.os.O.DIRECTORY | O_PATH; +pub fn openDirForPath(file_path: [:0]const u8) !std.fs.Dir { + if (Environment.isWindows) + return std.fs.cwd().openDirZ(file_path, .{}); - const fd = try std.os.openZ(path_, flags, 0); + const O_PATH = if (comptime Environment.isLinux) O.PATH else O.RDONLY; + const flags: u32 = O.CLOEXEC | O.NOCTTY | O.DIRECTORY | O_PATH; + + const fd = try std.posix.openZ(file_path, O.toPacked(flags), 0); return std.fs.Dir{ .fd = fd, }; @@ -1986,7 +1976,7 @@ pub inline fn toFD(fd: anytype) FileDescriptor { const T = @TypeOf(fd); if (Environment.isWindows) { return (switch (T) { - FDImpl => fd, + FDImpl => fd, // TODO: remove the toFD call from these places and make this a @compileError FDImpl.System => FDImpl.fromSystem(fd), FDImpl.UV, i32, comptime_int => FDImpl.fromUV(fd), FileDescriptor => FDImpl.decode(fd), @@ -2001,10 +1991,10 @@ pub inline fn toFD(fd: anytype) FileDescriptor { // even though file descriptors are always positive, linux/mac repesents them as signed integers return switch (T) { FileDescriptor => fd, // TODO: remove the toFD call from these places and make this a @compileError - std.fs.File, sys.File => toFD(fd.handle), + sys.File => fd.handle, + std.fs.File => @enumFromInt(fd.handle), std.fs.Dir => @enumFromInt(@as(i32, @intCast(fd.fd))), c_int, i32, u32, comptime_int => @enumFromInt(fd), - usize, i64 => @enumFromInt(@as(i32, @intCast(fd))), else => @compileError("bun.toFD() not implemented for: " ++ @typeName(T)), }; } @@ -2075,7 +2065,7 @@ pub inline fn uvfdcast(fd: anytype) FDImpl.UV { } } -pub inline fn socketcast(fd: anytype) std.os.socket_t { +pub inline fn socketcast(fd: anytype) std.posix.socket_t { if (Environment.isWindows) { return @ptrCast(FDImpl.decode(fd).system()); } else { @@ -2089,9 +2079,8 @@ pub const HOST_NAME_MAX = if (Environment.isWindows) // https://learn.microsoft.com/en-us/windows/win32/api/winsock/nf-winsock-gethostname 256 else - std.os.HOST_NAME_MAX; + std.posix.HOST_NAME_MAX; -pub const enums = @import("./enums.zig"); const WindowsStat = extern struct { dev: u32, ino: u32, @@ -2126,7 +2115,7 @@ const WindowsStat = extern struct { } }; -pub const Stat = if (Environment.isWindows) windows.libuv.uv_stat_t else std.os.Stat; +pub const Stat = if (Environment.isWindows) windows.libuv.uv_stat_t else std.posix.Stat; pub var argv: [][:0]const u8 = &[_][:0]const u8{}; @@ -2213,8 +2202,8 @@ pub const win32 = struct { pub fn unsetStdioModeFlags(i: anytype, flags: w.DWORD) !w.DWORD { const fd = stdio(i); var original_mode: w.DWORD = 0; - if (windows.GetConsoleMode(fd.cast(), &original_mode) != 0) { - if (windows.SetConsoleMode(fd.cast(), original_mode & ~flags) == 0) { + if (windows.kernel32.GetConsoleMode(fd.cast(), &original_mode) != 0) { + if (windows.kernel32.SetConsoleMode(fd.cast(), original_mode & ~flags) == 0) { return windows.getLastError(); } } else return windows.getLastError(); @@ -2425,6 +2414,7 @@ pub fn isRegularFile(mode: anytype) bool { } pub const sys = @import("./sys.zig"); +pub const O = sys.O; pub const Mode = C.Mode; @@ -2491,9 +2481,11 @@ pub const LazyBoolValue = enum { pub fn LazyBool(comptime Getter: anytype, comptime Parent: type, comptime field: string) type { return struct { value: LazyBoolValue = .unknown, + pub fn get(self: *@This()) bool { if (self.value == .unknown) { - self.value = switch (Getter(@fieldParentPtr(Parent, field, self))) { + const parent: *Parent = @alignCast(@fieldParentPtr(field, self)); + self.value = switch (Getter(parent)) { true => .yes, false => .no, }; @@ -2585,9 +2577,11 @@ pub inline fn pathLiteral(comptime literal: anytype) *const [literal.len:0]u8 { var buf: [literal.len:0]u8 = undefined; for (literal, 0..) |c, i| { buf[i] = if (c == '/') '\\' else c; + std.debug.assert(buf[i] != 0 and buf[i] < 128); } buf[buf.len] = 0; - return &buf; + const final = buf[0..buf.len :0].*; + return &final; }; } @@ -2598,14 +2592,14 @@ pub inline fn OSPathLiteral(comptime literal: anytype) *const [literal.len:0]OSP var buf: [literal.len:0]OSPathChar = undefined; for (literal, 0..) |c, i| { buf[i] = if (c == '/') '\\' else c; + std.debug.assert(buf[i] != 0 and buf[i] < 128); } buf[buf.len] = 0; - return &buf; + const final = buf[0..buf.len :0].*; + return &final; }; } -const builtin = @import("builtin"); - pub const MakePath = struct { const w = std.os.windows; @@ -3124,10 +3118,7 @@ pub fn errnoToZigErr(err: anytype) anyerror { return error.Unexpected; } -pub const S = if (Environment.isWindows) C.S else std.os.S; - -/// Deprecated! -pub const trait = @import("./trait.zig"); +pub const S = if (Environment.isWindows) C.S else std.posix.S; pub const brotli = @import("./brotli.zig"); diff --git a/src/bundler/bundle_v2.zig b/src/bundler/bundle_v2.zig index 948f07dd0a..02dde1721d 100644 --- a/src/bundler/bundle_v2.zig +++ b/src/bundler/bundle_v2.zig @@ -126,7 +126,7 @@ const debugTreeShake = Output.scoped(.TreeShake, true); const BitSet = bun.bit_set.DynamicBitSetUnmanaged; const Async = bun.Async; -fn tracer(comptime src: std.builtin.SourceLocation, comptime name: [*:0]const u8) bun.tracy.Ctx { +fn tracer(comptime src: std.builtin.SourceLocation, comptime name: [:0]const u8) bun.tracy.Ctx { return bun.tracy.traceNamed(src, "Bundler." ++ name); } @@ -220,7 +220,7 @@ pub const ThreadPool = struct { pub fn deinitCallback(task: *ThreadPoolLib.Task) void { debug("Worker.deinit()", .{}); - var this = @fieldParentPtr(Worker, "deinit_task", task); + var this: *Worker = @alignCast(@fieldParentPtr("deinit_task", task)); this.deinit(); } @@ -459,7 +459,7 @@ pub const BundleV2 = struct { } fn isDone(this: *BundleV2) bool { - return @atomicLoad(usize, &this.graph.parse_pending, .Monotonic) == 0 and @atomicLoad(usize, &this.graph.resolve_pending, .Monotonic) == 0; + return @atomicLoad(usize, &this.graph.parse_pending, .monotonic) == 0 and @atomicLoad(usize, &this.graph.resolve_pending, .monotonic) == 0; } pub fn waitForParse(this: *BundleV2) void { @@ -623,7 +623,7 @@ pub const BundleV2 = struct { task.tree_shaking = this.linker.options.tree_shaking; task.known_target = import_record.original_target; - _ = @atomicRmw(usize, &this.graph.parse_pending, .Add, 1, .Monotonic); + _ = @atomicRmw(usize, &this.graph.parse_pending, .Add, 1, .monotonic); // Handle onLoad plugins if (!this.enqueueOnLoadPluginIfNeeded(task)) { @@ -663,7 +663,7 @@ pub const BundleV2 = struct { if (entry.found_existing) { return null; } - _ = @atomicRmw(usize, &this.graph.parse_pending, .Add, 1, .Monotonic); + _ = @atomicRmw(usize, &this.graph.parse_pending, .Add, 1, .monotonic); const source_index = Index.source(this.graph.input_files.len); if (path.pretty.ptr == path.text.ptr) { @@ -801,7 +801,7 @@ pub const BundleV2 = struct { }; runtime_parse_task.tree_shaking = true; runtime_parse_task.loader = .js; - _ = @atomicRmw(usize, &this.graph.parse_pending, .Add, 1, .Monotonic); + _ = @atomicRmw(usize, &this.graph.parse_pending, .Add, 1, .monotonic); batch.push(ThreadPoolLib.Batch.from(&runtime_parse_task.task)); } @@ -1000,7 +1000,7 @@ pub const BundleV2 = struct { .use_directive = .@"use client", }); - _ = @atomicRmw(usize, &this.graph.parse_pending, .Add, 1, .Monotonic); + _ = @atomicRmw(usize, &this.graph.parse_pending, .Add, 1, .monotonic); this.graph.entry_points.append(allocator, source_index) catch unreachable; this.graph.pool.pool.schedule(ThreadPoolLib.Batch.from(&task.task)); this.graph.shadow_entry_point_range.len += 1; @@ -1209,14 +1209,14 @@ pub const BundleV2 = struct { pub const TaskCompletion = bun.JSC.AnyTask.New(JSBundleCompletionTask, onComplete); pub fn deref(this: *JSBundleCompletionTask) void { - if (this.ref_count.fetchSub(1, .Monotonic) == 1) { + if (this.ref_count.fetchSub(1, .monotonic) == 1) { this.config.deinit(bun.default_allocator); bun.default_allocator.destroy(this); } } pub fn ref(this: *JSBundleCompletionTask) void { - _ = this.ref_count.fetchAdd(1, .Monotonic); + _ = this.ref_count.fetchAdd(1, .monotonic); } pub fn onComplete(this: *JSBundleCompletionTask) void { @@ -1371,7 +1371,7 @@ pub const BundleV2 = struct { }) catch {}; // An error ocurred, prevent spinning the event loop forever - _ = @atomicRmw(usize, &this.graph.parse_pending, .Sub, 1, .Monotonic); + _ = @atomicRmw(usize, &this.graph.parse_pending, .Sub, 1, .monotonic); }, .success => |code| { this.graph.input_files.items(.loader)[load.source_index.get()] = code.loader; @@ -1390,7 +1390,7 @@ pub const BundleV2 = struct { log.warnings += @as(usize, @intFromBool(err.kind == .warn)); // An error ocurred, prevent spinning the event loop forever - _ = @atomicRmw(usize, &this.graph.parse_pending, .Sub, 1, .Monotonic); + _ = @atomicRmw(usize, &this.graph.parse_pending, .Sub, 1, .monotonic); }, .pending, .consumed => unreachable, } @@ -1401,7 +1401,7 @@ pub const BundleV2 = struct { this: *BundleV2, ) void { defer resolve.deinit(); - defer _ = @atomicRmw(usize, &this.graph.resolve_pending, .Sub, 1, .Monotonic); + defer _ = @atomicRmw(usize, &this.graph.resolve_pending, .Sub, 1, .monotonic); debug("onResolve: ({s}:{s}, {s})", .{ resolve.import_record.namespace, resolve.import_record.specifier, @tagName(resolve.value) }); defer { @@ -1495,7 +1495,7 @@ pub const BundleV2 = struct { }; task.task.node.next = null; - _ = @atomicRmw(usize, &this.graph.parse_pending, .Add, 1, .Monotonic); + _ = @atomicRmw(usize, &this.graph.parse_pending, .Add, 1, .monotonic); // Handle onLoad plugins if (!this.enqueueOnLoadPluginIfNeeded(task)) { @@ -1785,7 +1785,7 @@ pub const BundleV2 = struct { import_record.path.namespace, import_record.path.text, }); - _ = @atomicRmw(usize, &this.graph.resolve_pending, .Add, 1, .Monotonic); + _ = @atomicRmw(usize, &this.graph.resolve_pending, .Add, 1, .monotonic); resolve.* = JSC.API.JSBundler.Resolve.create( .{ @@ -2089,9 +2089,9 @@ pub const BundleV2 = struct { defer { if (diff > 0) - _ = @atomicRmw(usize, &graph.parse_pending, .Add, @as(usize, @intCast(diff)), .Monotonic) + _ = @atomicRmw(usize, &graph.parse_pending, .Add, @as(usize, @intCast(diff)), .monotonic) else - _ = @atomicRmw(usize, &graph.parse_pending, .Sub, @as(usize, @intCast(-diff)), .Monotonic); + _ = @atomicRmw(usize, &graph.parse_pending, .Sub, @as(usize, @intCast(-diff)), .monotonic); } var resolve_queue = ResolveQueue.init(this.graph.allocator); @@ -2940,7 +2940,7 @@ pub const ParseTask = struct { } pub fn callback(this: *ThreadPoolLib.Task) void { - run(@fieldParentPtr(ParseTask, "task", this)); + run(@fieldParentPtr("task", this)); } fn run(this: *ParseTask) void { @@ -3854,23 +3854,23 @@ const LinkerContext = struct { thread_task: ThreadPoolLib.Task = .{ .callback = &runLineOffset }, pub fn runLineOffset(thread_task: *ThreadPoolLib.Task) void { - var task = @fieldParentPtr(Task, "thread_task", thread_task); + var task: *Task = @fieldParentPtr("thread_task", thread_task); defer { task.ctx.markPendingTaskDone(); task.ctx.source_maps.line_offset_wait_group.finish(); } - SourceMapData.computeLineOffsets(task.ctx, ThreadPool.Worker.get(@fieldParentPtr(BundleV2, "linker", task.ctx)).allocator, task.source_index); + SourceMapData.computeLineOffsets(task.ctx, ThreadPool.Worker.get(@fieldParentPtr("linker", task.ctx)).allocator, task.source_index); } pub fn runQuotedSourceContents(thread_task: *ThreadPoolLib.Task) void { - var task = @fieldParentPtr(Task, "thread_task", thread_task); + var task: *Task = @fieldParentPtr("thread_task", thread_task); defer { task.ctx.markPendingTaskDone(); task.ctx.source_maps.quoted_contents_wait_group.finish(); } - SourceMapData.computeQuotedSourceContents(task.ctx, ThreadPool.Worker.get(@fieldParentPtr(BundleV2, "linker", task.ctx)).allocator, task.source_index); + SourceMapData.computeQuotedSourceContents(task.ctx, ThreadPool.Worker.get(@fieldParentPtr("linker", task.ctx)).allocator, task.source_index); } }; @@ -4011,12 +4011,12 @@ const LinkerContext = struct { } pub fn scheduleTasks(this: *LinkerContext, batch: ThreadPoolLib.Batch) void { - _ = this.pending_task_count.fetchAdd(@as(u32, @truncate(batch.len)), .Monotonic); + _ = this.pending_task_count.fetchAdd(@as(u32, @truncate(batch.len)), .monotonic); this.parse_graph.pool.pool.schedule(batch); } pub fn markPendingTaskDone(this: *LinkerContext) void { - _ = this.pending_task_count.fetchSub(1, .Monotonic); + _ = this.pending_task_count.fetchSub(1, .monotonic); } pub noinline fn link( @@ -5683,7 +5683,7 @@ const LinkerContext = struct { const id = source_index; if (id > c.graph.meta.len) return; - var worker: *ThreadPool.Worker = ThreadPool.Worker.get(@fieldParentPtr(BundleV2, "linker", c)); + var worker: *ThreadPool.Worker = ThreadPool.Worker.get(@fieldParentPtr("linker", c)); defer worker.unget(); // we must use this allocator here @@ -6374,7 +6374,7 @@ const LinkerContext = struct { }; fn generateChunkJS(ctx: GenerateChunkCtx, chunk: *Chunk, chunk_index: usize) void { defer ctx.wg.finish(); - const worker = ThreadPool.Worker.get(@fieldParentPtr(BundleV2, "linker", ctx.c)); + const worker = ThreadPool.Worker.get(@fieldParentPtr("linker", ctx.c)); defer worker.unget(); postProcessJSChunk(ctx, worker, chunk, chunk_index) catch |err| Output.panic("TODO: handle error: {s}", .{@errorName(err)}); } @@ -6635,7 +6635,7 @@ const LinkerContext = struct { fn generateJSRenamer(ctx: GenerateChunkCtx, chunk: *Chunk, chunk_index: usize) void { defer ctx.wg.finish(); - var worker = ThreadPool.Worker.get(@fieldParentPtr(BundleV2, "linker", ctx.c)); + var worker = ThreadPool.Worker.get(@fieldParentPtr("linker", ctx.c)); defer worker.unget(); generateJSRenamer_(ctx, worker, chunk, chunk_index); } @@ -6650,10 +6650,10 @@ const LinkerContext = struct { } fn generateCompileResultForJSChunk(task: *ThreadPoolLib.Task) void { - const part_range: *const PendingPartRange = @fieldParentPtr(PendingPartRange, "task", task); + const part_range: *const PendingPartRange = @fieldParentPtr("task", task); const ctx = part_range.ctx; defer ctx.wg.finish(); - var worker = ThreadPool.Worker.get(@fieldParentPtr(BundleV2, "linker", ctx.c)); + var worker = ThreadPool.Worker.get(@fieldParentPtr("linker", ctx.c)); defer worker.unget(); ctx.chunk.compile_results_for_chunk[part_range.i] = generateCompileResultForJSChunk_(worker, ctx.c, ctx.chunk, part_range.part_range); } diff --git a/src/c.zig b/src/c.zig index ce2441e353..4c08263c1d 100644 --- a/src/c.zig +++ b/src/c.zig @@ -12,12 +12,12 @@ pub usingnamespace PlatformSpecific; const C = std.c; const builtin = @import("builtin"); -const os = std.os; +const posix = std.posix; const mem = std.mem; const Stat = std.fs.File.Stat; const Kind = std.fs.File.Kind; const StatError = std.fs.File.StatError; -const errno = os.errno; +const errno = posix.errno; const mode_t = bun.Mode; // TODO: this is wrong on Windows const libc_stat = bun.Stat; @@ -40,7 +40,7 @@ pub extern "c" fn stat64([*c]const u8, [*c]libc_stat) c_int; pub extern "c" fn lchmod(path: [*:0]const u8, mode: mode_t) c_int; pub extern "c" fn truncate([*:0]const u8, i64) c_int; // note: truncate64 is not a thing -pub extern "c" fn lutimes(path: [*:0]const u8, times: *const [2]std.os.timeval) c_int; +pub extern "c" fn lutimes(path: [*:0]const u8, times: *const [2]std.posix.timeval) c_int; pub extern "c" fn mkdtemp(template: [*c]u8) ?[*:0]u8; pub extern "c" fn memcmp(s1: [*c]const u8, s2: [*c]const u8, n: usize) c_int; @@ -65,7 +65,7 @@ pub fn lstat_absolute(path: [:0]const u8) !Stat { .BADF => unreachable, // Always a race condition. .NOMEM => return error.SystemResources, .ACCES => return error.AccessDenied, - else => |err| return os.unexpectedErrno(err), + else => |err| return posix.unexpectedErrno(err), } const atime = st.atime(); @@ -77,22 +77,22 @@ pub fn lstat_absolute(path: [:0]const u8) !Stat { .mode = st.mode, .kind = switch (builtin.os.tag) { .wasi => switch (st.filetype) { - os.FILETYPE_BLOCK_DEVICE => Kind.block_device, - os.FILETYPE_CHARACTER_DEVICE => Kind.character_device, - os.FILETYPE_DIRECTORY => Kind.directory, - os.FILETYPE_SYMBOLIC_LINK => Kind.sym_link, - os.FILETYPE_REGULAR_FILE => Kind.file, - os.FILETYPE_SOCKET_STREAM, os.FILETYPE_SOCKET_DGRAM => Kind.unix_domain_socket, + posix.FILETYPE_BLOCK_DEVICE => Kind.block_device, + posix.FILETYPE_CHARACTER_DEVICE => Kind.character_device, + posix.FILETYPE_DIRECTORY => Kind.directory, + posix.FILETYPE_SYMBOLIC_LINK => Kind.sym_link, + posix.FILETYPE_REGULAR_FILE => Kind.file, + posix.FILETYPE_SOCKET_STREAM, posix.FILETYPE_SOCKET_DGRAM => Kind.unix_domain_socket, else => Kind.unknown, }, - else => switch (st.mode & os.S.IFMT) { - os.S.IFBLK => Kind.block_device, - os.S.IFCHR => Kind.character_device, - os.S.IFDIR => Kind.directory, - os.S.IFIFO => Kind.named_pipe, - os.S.IFLNK => Kind.sym_link, - os.S.IFREG => Kind.file, - os.S.IFSOCK => Kind.unix_domain_socket, + else => switch (st.mode & posix.S.IFMT) { + posix.S.IFBLK => Kind.block_device, + posix.S.IFCHR => Kind.character_device, + posix.S.IFDIR => Kind.directory, + posix.S.IFIFO => Kind.named_pipe, + posix.S.IFLNK => Kind.sym_link, + posix.S.IFREG => Kind.file, + posix.S.IFSOCK => Kind.unix_domain_socket, else => Kind.unknown, }, }, @@ -150,7 +150,7 @@ pub fn moveFileZWithHandle(from_handle: bun.FileDescriptor, from_dir: bun.FileDe // On Linux, this will be fast because sendfile() supports copying between two file descriptors on disk // macOS & BSDs will be slow because pub fn moveFileZSlow(from_dir: bun.FileDescriptor, filename: [:0]const u8, to_dir: bun.FileDescriptor, destination: [:0]const u8) !void { - const in_handle = try bun.sys.openat(from_dir, filename, std.os.O.RDONLY | std.os.O.CLOEXEC, if (Environment.isWindows) 0 else 0o644).unwrap(); + const in_handle = try bun.sys.openat(from_dir, filename, bun.O.RDONLY | bun.O.CLOEXEC, if (Environment.isWindows) 0 else 0o644).unwrap(); defer _ = bun.sys.close(in_handle); _ = bun.sys.unlinkat(from_dir, filename); try copyFileZSlowWithHandle(in_handle, to_dir, destination); @@ -173,7 +173,7 @@ pub fn copyFileZSlowWithHandle(in_handle: bun.FileDescriptor, to_dir: bun.FileDe return; } - const stat_ = if (comptime Environment.isPosix) try std.os.fstat(in_handle.cast()) else void{}; + const stat_ = if (comptime Environment.isPosix) try std.posix.fstat(in_handle.cast()) else void{}; // Attempt to delete incase it already existed. // This fixes ETXTBUSY on Linux @@ -182,7 +182,7 @@ pub fn copyFileZSlowWithHandle(in_handle: bun.FileDescriptor, to_dir: bun.FileDe const out_handle = try bun.sys.openat( to_dir, destination, - std.os.O.WRONLY | std.os.O.CREAT | std.os.O.CLOEXEC | std.os.O.TRUNC, + bun.O.WRONLY | bun.O.CREAT | bun.O.CLOEXEC | bun.O.TRUNC, if (comptime Environment.isPosix) 0o644 else 0, ).unwrap(); defer _ = bun.sys.close(out_handle); @@ -230,8 +230,8 @@ pub fn getSelfExeSharedLibPaths(allocator: std.mem.Allocator) error{OutOfMemory} } allocator.free(slice); } - try os.dl_iterate_phdr(&paths, error{OutOfMemory}, struct { - fn callback(info: *os.dl_phdr_info, size: usize, list: *List) !void { + try posix.dl_iterate_phdr(&paths, error{OutOfMemory}, struct { + fn callback(info: *posix.dl_phdr_info, size: usize, list: *List) !void { _ = size; const name = info.dlpi_name orelse return; if (name[0] == '/') { @@ -355,8 +355,8 @@ pub fn setProcessPriority(pid_: i32, priority_: i32) std.c.E { if (code == -2) return .SRCH; if (code == 0) return .SUCCESS; - const errcode = std.c.getErrno(code); - return errcode; + const errcode = bun.sys.getErrno(code); + return @enumFromInt(@intFromEnum(errcode)); } pub fn getVersion(buf: []u8) []const u8 { @@ -394,7 +394,7 @@ pub fn getRelease(buf: []u8) []const u8 { } pub extern fn memmem(haystack: [*]const u8, haystacklen: usize, needle: [*]const u8, needlelen: usize) ?[*]const u8; -pub extern fn cfmakeraw(*std.os.termios) void; +pub extern fn cfmakeraw(*std.posix.termios) void; const LazyStatus = enum { pending, diff --git a/src/cache.zig b/src/cache.zig index 4cefe3b9f0..b82e2e10b7 100644 --- a/src/cache.zig +++ b/src/cache.zig @@ -170,7 +170,7 @@ pub const Fs = struct { if (_file_handle == null) { if (FeatureFlags.store_file_descriptors and dirname_fd != bun.invalid_fd and dirname_fd != .zero) { - file_handle = (bun.sys.openatA(dirname_fd, std.fs.path.basename(path), std.os.O.RDONLY, 0).unwrap() catch |err| brk: { + file_handle = (bun.sys.openatA(dirname_fd, std.fs.path.basename(path), bun.O.RDONLY, 0).unwrap() catch |err| brk: { switch (err) { error.ENOENT => { const handle = try bun.openFile(path, .{ .mode = .read_only }); diff --git a/src/cli.zig b/src/cli.zig index 2be67169ef..6875b61442 100644 --- a/src/cli.zig +++ b/src/cli.zig @@ -126,7 +126,7 @@ pub const Arguments = struct { var paths = [_]string{ cwd, filename }; const outpath = try std.fs.path.resolve(allocator, &paths); defer allocator.free(outpath); - var file = try bun.openFileZ(&try std.os.toPosixPath(outpath), std.fs.File.OpenFlags{ .mode = .read_only }); + var file = try bun.openFileZ(&try std.posix.toPosixPath(outpath), std.fs.File.OpenFlags{ .mode = .read_only }); defer file.close(); const size = try file.getEndPos(); return try file.readToEndAlloc(allocator, size); @@ -255,7 +255,7 @@ pub const Arguments = struct { pub const test_params = test_only_params ++ runtime_params_ ++ transpiler_params_ ++ base_params_; pub fn loadConfigPath(allocator: std.mem.Allocator, auto_loaded: bool, config_path: [:0]const u8, ctx: Command.Context, comptime cmd: Command.Tag) !void { - var config_file = switch (bun.sys.openA(config_path, std.os.O.RDONLY, 0)) { + var config_file = switch (bun.sys.openA(config_path, bun.O.RDONLY, 0)) { .result => |fd| fd.asFile(), .err => |err| { if (auto_loaded) return; @@ -1822,7 +1822,7 @@ pub const Command = struct { if (ctx.runtime_options.eval.script.len > 0) { const trigger = bun.pathLiteral("/[eval]"); var entry_point_buf: [bun.MAX_PATH_BYTES + trigger.len]u8 = undefined; - const cwd = try std.os.getcwd(&entry_point_buf); + const cwd = try std.posix.getcwd(&entry_point_buf); @memcpy(entry_point_buf[cwd.len..][0..trigger.len], trigger); try BunJS.Run.boot(ctx, entry_point_buf[0 .. cwd.len + trigger.len]); return; diff --git a/src/cli/add_completions.zig b/src/cli/add_completions.zig index 53ba3bfbef..b96f1c1e81 100644 --- a/src/cli/add_completions.zig +++ b/src/cli/add_completions.zig @@ -66,7 +66,9 @@ pub const index: Index = if (Environment.isDebug) Index.initFill(&.{"OOMWorkArou break; } } - array.set(@as(FirstLetter, @enumFromInt(i)), &record); + + const cloned = record; + array.set(@as(FirstLetter, @enumFromInt(i)), &cloned); @setEvalBranchQuota(999999); i = next_i; diff --git a/src/cli/build_command.zig b/src/cli/build_command.zig index 288ab6c3a8..6312782988 100644 --- a/src/cli/build_command.zig +++ b/src/cli/build_command.zig @@ -198,7 +198,7 @@ pub const BuildCommand = struct { break :brk2 resolve_path.getIfExistsLongestCommonPath(this_bundler.options.entry_points) orelse "."; }; - var dir = bun.openDirForPath(&(try std.os.toPosixPath(path))) catch |err| { + var dir = bun.openDirForPath(&(try std.posix.toPosixPath(path))) catch |err| { Output.prettyErrorln("{s} opening root directory {}", .{ @errorName(err), bun.fmt.quote(path) }); Global.exit(1); }; @@ -514,7 +514,7 @@ pub const BuildCommand = struct { try writer.writeAll(rel_path); try writer.writeByteNTimes(' ', padding_count); const size = @as(f64, @floatFromInt(f.size)) / 1000.0; - try std.fmt.formatFloatDecimal(size, .{ .precision = 2 }, writer); + try std.fmt.formatType(size, "d", .{ .precision = 2 }, writer, 1); try writer.writeAll(" KB\n"); } diff --git a/src/cli/bunx_command.zig b/src/cli/bunx_command.zig index 3871b2aa5b..bb0fc13a0f 100644 --- a/src/cli/bunx_command.zig +++ b/src/cli/bunx_command.zig @@ -64,7 +64,7 @@ pub const BunxCommand = struct { const nanoseconds_cache_valid = seconds_cache_valid * 1000000000; fn getBinNameFromSubpath(bundler: *bun.Bundler, dir_fd: bun.FileDescriptor, subpath_z: [:0]const u8) ![]const u8 { - const target_package_json_fd = try bun.sys.openat(dir_fd, subpath_z, std.os.O.RDONLY, 0).unwrap(); + const target_package_json_fd = try bun.sys.openat(dir_fd, subpath_z, bun.O.RDONLY, 0).unwrap(); const target_package_json = bun.sys.File{ .handle = target_package_json_fd }; defer target_package_json.close(); @@ -111,7 +111,7 @@ pub const BunxCommand = struct { if (expr.asProperty("directories")) |dirs| { if (dirs.expr.asProperty("bin")) |bin_prop| { if (bin_prop.expr.asString(bundler.allocator)) |dir_name| { - const bin_dir = try bun.sys.openatA(dir_fd, dir_name, std.os.O.RDONLY | std.os.O.DIRECTORY, 0).unwrap(); + const bin_dir = try bun.sys.openatA(dir_fd, dir_name, bun.O.RDONLY | bun.O.DIRECTORY, 0).unwrap(); defer _ = bun.sys.close(bin_dir); const dir = std.fs.Dir{ .fd = bin_dir.cast() }; var iterator = bun.DirIterator.iterate(dir, .u8); @@ -148,7 +148,7 @@ pub const BunxCommand = struct { bun.pathLiteral("{s}/package.json"), .{tempdir_name}, ) catch unreachable; - const target_package_json_fd = bun.sys.openat(bun.FD.cwd(), subpath_z, std.os.O.RDONLY, 0).unwrap() catch return error.NeedToInstall; + const target_package_json_fd = bun.sys.openat(bun.FD.cwd(), subpath_z, bun.O.RDONLY, 0).unwrap() catch return error.NeedToInstall; const target_package_json = bun.sys.File{ .handle = target_package_json_fd }; const is_stale = is_stale: { @@ -492,7 +492,7 @@ pub const BunxCommand = struct { if (bun.strings.hasPrefix(out, bunx_cache_dir)) { const is_stale = is_stale: { if (Environment.isWindows) { - const fd = bun.sys.openat(bun.invalid_fd, destination, std.os.O.RDONLY, 0).unwrap() catch { + const fd = bun.sys.openat(bun.invalid_fd, destination, bun.O.RDONLY, 0).unwrap() catch { // if we cant open this, we probably will just fail when we run it // and that error message is likely going to be better than the one from `bun add` break :is_stale false; @@ -512,7 +512,7 @@ pub const BunxCommand = struct { else => break :is_stale true, } } else { - var stat: std.os.Stat = undefined; + var stat: std.posix.Stat = undefined; const rc = std.c.stat(destination, &stat); if (rc != 0) { break :is_stale true; diff --git a/src/cli/create_command.zig b/src/cli/create_command.zig index 2cf3b9a0ea..92f6d823aa 100644 --- a/src/cli/create_command.zig +++ b/src/cli/create_command.zig @@ -9,6 +9,7 @@ const stringZ = bun.stringZ; const default_allocator = bun.default_allocator; const C = bun.C; const std = @import("std"); +const Progress = bun.Progress; const lex = bun.js_lexer; const logger = bun.logger; @@ -43,7 +44,6 @@ const Headers = bun.http.Headers; const CopyFile = @import("../copy_file.zig"); var bun_path_buf: bun.PathBuffer = undefined; const Futex = @import("../futex.zig"); -const ComptimeStringMap = @import("../comptime_string_map.zig").ComptimeStringMap; const target_nextjs_version = "12.2.3"; pub var initialized_store = false; @@ -270,7 +270,7 @@ pub const CreateCommand = struct { const destination = try filesystem.dirname_store.append([]const u8, resolve_path.joinAbs(filesystem.top_level_dir, .auto, dirname)); - var progress = std.Progress{}; + var progress = Progress{}; progress.supports_ansi_escape_codes = Output.enable_ansi_colors_stderr; var node = progress.start(try ProgressBuf.print("Loading {s}", .{template}), 0); @@ -502,8 +502,8 @@ pub const CreateCommand = struct { pub fn copy( destination_dir_: std.fs.Dir, walker: *Walker, - node_: *std.Progress.Node, - progress_: *std.Progress, + node_: *Progress.Node, + progress_: *Progress, dst_base_len: if (Environment.isWindows) usize else void, dst_buf: if (Environment.isWindows) *bun.WPathBuffer else void, src_base_len: if (Environment.isWindows) usize else void, @@ -650,7 +650,7 @@ pub const CreateCommand = struct { }; if (comptime Environment.isWindows) try pkg.seekTo(prev_file_pos); // The printer doesn't truncate, so we must do so manually - std.os.ftruncate(pkg.handle, 0) catch {}; + std.posix.ftruncate(pkg.handle, 0) catch {}; initializeStore(); } @@ -676,15 +676,15 @@ pub const CreateCommand = struct { if (comptime Environment.isWindows) { parent_dir.copyFile("gitignore", parent_dir, ".gitignore", .{}) catch {}; } else { - std.os.linkat(parent_dir.fd, "gitignore", parent_dir.fd, ".gitignore", 0) catch {}; + std.posix.linkat(parent_dir.fd, "gitignore", parent_dir.fd, ".gitignore", 0) catch {}; } - std.os.unlinkat( + std.posix.unlinkat( parent_dir.fd, "gitignore", 0, ) catch {}; - std.os.unlinkat( + std.posix.unlinkat( parent_dir.fd, ".npmignore", 0, @@ -1304,7 +1304,7 @@ pub const CreateCommand = struct { // // } // public_index_html_file.pwriteAll(outfile, 0) catch break :bail; - // std.os.ftruncate(public_index_html_file.handle, outfile.len + 1) catch break :bail; + // std.posix.ftruncate(public_index_html_file.handle, outfile.len + 1) catch break :bail; // bun_bun_for_react_scripts = true; // is_create_react_app = true; // Output.prettyln("[package.json] Added entry point {s} to public/index.html", .{create_react_app_entry_point_path}); @@ -1442,7 +1442,7 @@ pub const CreateCommand = struct { break :process_package_json; }; - std.os.ftruncate(package_json_file.?.handle, written + 1) catch {}; + std.posix.ftruncate(package_json_file.?.handle, written + 1) catch {}; // if (!create_options.skip_install) { // if (needs.bun_bun_for_nextjs) { @@ -1659,7 +1659,7 @@ pub const CreateCommand = struct { if (create_options.open) { if (which(&bun_path_buf, PATH, destination, "bun")) |bin| { var argv = [_]string{bun.asByteSlice(bin)}; - var child = std.ChildProcess.init(&argv, ctx.allocator); + var child = std.process.Child.init(&argv, ctx.allocator); child.cwd = destination; child.stdin_behavior = .Inherit; child.stdout_behavior = .Inherit; @@ -1833,7 +1833,7 @@ pub const Example = struct { } } - pub fn fetchAllLocalAndRemote(ctx: Command.Context, node: ?*std.Progress.Node, env_loader: *DotEnv.Loader, filesystem: *fs.FileSystem) !std.ArrayList(Example) { + pub fn fetchAllLocalAndRemote(ctx: Command.Context, node: ?*Progress.Node, env_loader: *DotEnv.Loader, filesystem: *fs.FileSystem) !std.ArrayList(Example) { const remote_examples = try Example.fetchAll(ctx, env_loader, node); if (node) |node_| node_.end(); @@ -1912,8 +1912,8 @@ pub const Example = struct { ctx: Command.Context, env_loader: *DotEnv.Loader, name: string, - refresher: *std.Progress, - progress: *std.Progress.Node, + refresher: *Progress, + progress: *Progress.Node, ) !MutableString { const owner_i = std.mem.indexOfScalar(u8, name, '/').?; const owner = name[0..owner_i]; @@ -2033,7 +2033,7 @@ pub const Example = struct { return mutable.*; } - pub fn fetch(ctx: Command.Context, env_loader: *DotEnv.Loader, name: string, refresher: *std.Progress, progress: *std.Progress.Node) !MutableString { + pub fn fetch(ctx: Command.Context, env_loader: *DotEnv.Loader, name: string, refresher: *Progress, progress: *Progress.Node) !MutableString { progress.name = "Fetching package.json"; refresher.refresh(); @@ -2171,7 +2171,7 @@ pub const Example = struct { return mutable.*; } - pub fn fetchAll(ctx: Command.Context, env_loader: *DotEnv.Loader, progress_node: ?*std.Progress.Node) ![]Example { + pub fn fetchAll(ctx: Command.Context, env_loader: *DotEnv.Loader, progress_node: ?*Progress.Node) ![]Example { url = URL.parse(examples_url); const http_proxy: ?URL = env_loader.getHttpProxy(url); @@ -2279,7 +2279,7 @@ pub const CreateListExamplesCommand = struct { env_loader.loadProcess(); - var progress = std.Progress{}; + var progress = Progress{}; progress.supports_ansi_escape_codes = Output.enable_ansi_colors_stderr; const node = progress.start("Fetching manifest", 0); progress.refresh(); @@ -2336,25 +2336,25 @@ const GitHandler = struct { else run(destination, PATH, false) catch false; - @fence(.Acquire); + @fence(.acquire); success.store( if (outcome) 1 else 2, - .Release, + .release, ); Futex.wake(&success, 1); } pub fn wait() bool { - @fence(.Release); + @fence(.release); - while (success.load(.Acquire) == 0) { + while (success.load(.acquire) == 0) { Futex.wait(&success, 0, 1000) catch continue; } - const outcome = success.load(.Acquire) == 1; + const outcome = success.load(.acquire) == 1; thread.join(); return outcome; } @@ -2400,7 +2400,7 @@ const GitHandler = struct { inline for (comptime std.meta.fieldNames(@TypeOf(Commands))) |command_field| { const command: []const string = @field(git_commands, command_field); - var process = std.ChildProcess.init(command, default_allocator); + var process = std.process.Child.init(command, default_allocator); process.cwd = destination; process.stdin_behavior = .Inherit; process.stdout_behavior = .Inherit; diff --git a/src/cli/filter_run.zig b/src/cli/filter_run.zig index 302c299b38..0f404fde21 100644 --- a/src/cli/filter_run.zig +++ b/src/cli/filter_run.zig @@ -358,7 +358,7 @@ const State = struct { for (this.handles) |*handle| { if (handle.process) |*proc| { // if we get an error here we simply ignore it - _ = proc.ptr.kill(std.os.SIG.INT); + _ = proc.ptr.kill(std.posix.SIG.INT); } } } @@ -385,7 +385,7 @@ const AbortHandler = struct { var should_abort = false; - fn posixSignalHandler(sig: i32, info: *const std.os.siginfo_t, _: ?*const anyopaque) callconv(.C) void { + fn posixSignalHandler(sig: i32, info: *const std.posix.siginfo_t, _: ?*const anyopaque) callconv(.C) void { _ = sig; _ = info; should_abort = true; @@ -401,13 +401,13 @@ const AbortHandler = struct { pub fn install() void { if (Environment.isPosix) { - const action = std.os.Sigaction{ + const action = std.posix.Sigaction{ .handler = .{ .sigaction = AbortHandler.posixSignalHandler }, - .mask = std.os.empty_sigset, - .flags = std.os.SA.SIGINFO | std.os.SA.RESTART | std.os.SA.RESETHAND, + .mask = std.posix.empty_sigset, + .flags = std.posix.SA.SIGINFO | std.posix.SA.RESTART | std.posix.SA.RESETHAND, }; // if we can't set the handler, we just ignore it - std.os.sigaction(std.os.SIG.INT, &action, null) catch |err| { + std.posix.sigaction(std.posix.SIG.INT, &action, null) catch |err| { if (Environment.isDebug) { Output.warn("Failed to set abort handler: {s}\n", .{@errorName(err)}); } @@ -568,9 +568,10 @@ pub fn runScriptsWithFilter(ctx: Command.Context) !noreturn { for (state.handles) |*handle| { var iter = handle.config.deps.map.iterator(); while (iter.next()) |entry| { - var alloc = std.heap.stackFallback(256, ctx.allocator); - const buf = try alloc.get().alloc(u8, entry.key_ptr.len()); - defer alloc.get().free(buf); + var sfa = std.heap.stackFallback(256, ctx.allocator); + const alloc = sfa.get(); + const buf = try alloc.alloc(u8, entry.key_ptr.len()); + defer alloc.free(buf); const name = entry.key_ptr.slice(buf); // is it a workspace dependency? if (map.get(name)) |pkgs| { diff --git a/src/cli/init_command.zig b/src/cli/init_command.zig index 5e2baeb9f1..5683b83d7d 100644 --- a/src/cli/init_command.zig +++ b/src/cli/init_command.zig @@ -368,7 +368,7 @@ pub const InitCommand = struct { break :write_package_json; }; - std.os.ftruncate(package_json_file.?.handle, written + 1) catch {}; + std.posix.ftruncate(package_json_file.?.handle, written + 1) catch {}; package_json_file.?.close(); } @@ -447,7 +447,7 @@ pub const InitCommand = struct { Output.flush(); if (exists("package.json")) { - var process = std.ChildProcess.init( + var process = std.process.Child.init( &.{ try bun.selfExePath(), "install", diff --git a/src/cli/install_completions_command.zig b/src/cli/install_completions_command.zig index 054b5b1111..575c79f354 100644 --- a/src/cli/install_completions_command.zig +++ b/src/cli/install_completions_command.zig @@ -57,11 +57,11 @@ pub const InstallCompletionsCommand = struct { const exe = try bun.selfExePath(); var target_buf: bun.PathBuffer = undefined; var target = std.fmt.bufPrint(&target_buf, "{s}/" ++ bunx_name, .{std.fs.path.dirname(exe).?}) catch unreachable; - std.os.symlink(exe, target) catch { + std.posix.symlink(exe, target) catch { outer: { if (bun.getenvZ("BUN_INSTALL")) |install_dir| { target = std.fmt.bufPrint(&target_buf, "{s}/bin/" ++ bunx_name, .{install_dir}) catch unreachable; - std.os.symlink(exe, target) catch break :outer; + std.posix.symlink(exe, target) catch break :outer; return; } } @@ -70,7 +70,7 @@ pub const InstallCompletionsCommand = struct { outer: { if (bun.getenvZ(bun.DotEnv.home_env)) |home_dir| { target = std.fmt.bufPrint(&target_buf, "{s}/.bun/bin/" ++ bunx_name, .{home_dir}) catch unreachable; - std.os.symlink(exe, target) catch break :outer; + std.posix.symlink(exe, target) catch break :outer; return; } } @@ -79,7 +79,7 @@ pub const InstallCompletionsCommand = struct { outer: { if (bun.getenvZ(bun.DotEnv.home_env)) |home_dir| { target = std.fmt.bufPrint(&target_buf, "{s}/.local/bin/" ++ bunx_name, .{home_dir}) catch unreachable; - std.os.symlink(exe, target) catch break :outer; + std.posix.symlink(exe, target) catch break :outer; return; } } diff --git a/src/cli/list-of-yarn-commands.zig b/src/cli/list-of-yarn-commands.zig index 4109ed7279..12d0d23a2e 100644 --- a/src/cli/list-of-yarn-commands.zig +++ b/src/cli/list-of-yarn-commands.zig @@ -104,5 +104,6 @@ pub const all_yarn_commands = brk: { } } - break :brk array[0..array_i]; + const final = array[0..array_i].*; + break :brk &final; }; diff --git a/src/cli/pm_trusted_command.zig b/src/cli/pm_trusted_command.zig index 04674af07d..8faad13b93 100644 --- a/src/cli/pm_trusted_command.zig +++ b/src/cli/pm_trusted_command.zig @@ -1,5 +1,5 @@ const std = @import("std"); -const Progress = std.Progress; +const Progress = bun.Progress; const bun = @import("root").bun; const logger = bun.logger; const Environment = bun.Environment; @@ -340,9 +340,9 @@ pub const TrustCommand = struct { for (entry.items) |info| { if (info.skip) continue; - while (LifecycleScriptSubprocess.alive_count.load(.Monotonic) >= pm.options.max_concurrent_lifecycle_scripts) { + while (LifecycleScriptSubprocess.alive_count.load(.monotonic) >= pm.options.max_concurrent_lifecycle_scripts) { if (pm.options.log_level.isVerbose()) { - if (PackageManager.hasEnoughTimePassedBetweenWaitingMessages()) Output.prettyErrorln("[PackageManager] waiting for {d} scripts\n", .{LifecycleScriptSubprocess.alive_count.load(.Monotonic)}); + if (PackageManager.hasEnoughTimePassedBetweenWaitingMessages()) Output.prettyErrorln("[PackageManager] waiting for {d} scripts\n", .{LifecycleScriptSubprocess.alive_count.load(.monotonic)}); } pm.sleep(); @@ -359,7 +359,7 @@ pub const TrustCommand = struct { } } - while (pm.pending_lifecycle_script_tasks.load(.Monotonic) > 0) { + while (pm.pending_lifecycle_script_tasks.load(.monotonic) > 0) { pm.sleep(); } } @@ -439,7 +439,7 @@ pub const TrustCommand = struct { const new_package_json_contents = package_json_writer.ctx.writtenWithoutTrailingZero(); try pm.root_package_json_file.pwriteAll(new_package_json_contents, 0); - std.os.ftruncate(pm.root_package_json_file.handle, new_package_json_contents.len) catch {}; + std.posix.ftruncate(pm.root_package_json_file.handle, new_package_json_contents.len) catch {}; pm.root_package_json_file.close(); if (comptime Environment.allow_assert) { diff --git a/src/cli/run_command.zig b/src/cli/run_command.zig index 213c1764c8..f873d334ea 100644 --- a/src/cli/run_command.zig +++ b/src/cli/run_command.zig @@ -45,7 +45,7 @@ const NpmArgs = struct { pub const package_version: string = "npm_package_version"; }; const PackageJSON = @import("../resolver/package_json.zig").PackageJSON; -const yarn_commands: []u64 = @import("./list-of-yarn-commands.zig").all_yarn_commands; +const yarn_commands: []const u64 = @import("./list-of-yarn-commands.zig").all_yarn_commands; const ShellCompletions = @import("./shell_completions.zig"); const PosixSpawn = bun.posix.spawn; @@ -723,7 +723,7 @@ pub const RunCommand = struct { var retried = false; while (true) { inner: { - std.os.symlinkZ(argv0, path) catch |err| { + std.posix.symlinkZ(argv0, path) catch |err| { if (err == error.PathAlreadyExists) break :inner; if (retried) return; @@ -791,7 +791,7 @@ pub const RunCommand = struct { { bun.assert(target_path_buffer[dir_slice.len] == '\\'); target_path_buffer[dir_slice.len] = 0; - std.os.mkdirW(target_path_buffer[0..dir_slice.len :0], 0) catch {}; + std.posix.mkdirW(target_path_buffer[0..dir_slice.len :0], 0) catch {}; target_path_buffer[dir_slice.len] = '\\'; } @@ -1521,7 +1521,7 @@ pub const RunCommand = struct { const trigger = bun.pathLiteral("/[stdin]"); var entry_point_buf: [bun.MAX_PATH_BYTES + trigger.len]u8 = undefined; - const cwd = try std.os.getcwd(&entry_point_buf); + const cwd = try std.posix.getcwd(&entry_point_buf); @memcpy(entry_point_buf[cwd.len..][0..trigger.len], trigger); const entry_path = entry_point_buf[0 .. cwd.len + trigger.len]; @@ -1610,7 +1610,7 @@ pub const RunCommand = struct { if (ctx.runtime_options.eval.script.len > 0) { const trigger = bun.pathLiteral("/[eval]"); var entry_point_buf: [bun.MAX_PATH_BYTES + trigger.len]u8 = undefined; - const cwd = try std.os.getcwd(&entry_point_buf); + const cwd = try std.posix.getcwd(&entry_point_buf); @memcpy(entry_point_buf[cwd.len..][0..trigger.len], trigger); try Run.boot(ctx, entry_point_buf[0 .. cwd.len + trigger.len]); return; diff --git a/src/cli/test_command.zig b/src/cli/test_command.zig index 9ae9b66728..e02df273f0 100644 --- a/src/cli/test_command.zig +++ b/src/cli/test_command.zig @@ -34,7 +34,7 @@ const Run = @import("../bun_js.zig").Run; var path_buf: bun.PathBuffer = undefined; var path_buf2: bun.PathBuffer = undefined; const PathString = bun.PathString; -const is_bindgen = std.meta.globalOption("bindgen", bool) orelse false; +const is_bindgen = false; const HTTPThread = bun.http.HTTPThread; const JSC = bun.JSC; @@ -173,7 +173,7 @@ pub const CommandLineReporter = struct { var writer = buffered_writer.writer(); defer buffered_writer.flush() catch unreachable; - var this: *CommandLineReporter = @fieldParentPtr(CommandLineReporter, "callback", cb); + var this: *CommandLineReporter = @fieldParentPtr("callback", cb); writeTestStatusLine(.pass, &writer); @@ -186,7 +186,7 @@ pub const CommandLineReporter = struct { pub fn handleTestFail(cb: *TestRunner.Callback, id: Test.ID, _: string, label: string, expectations: u32, elapsed_ns: u64, parent: ?*jest.DescribeScope) void { var writer_ = Output.errorWriter(); - var this: *CommandLineReporter = @fieldParentPtr(CommandLineReporter, "callback", cb); + var this: *CommandLineReporter = @fieldParentPtr("callback", cb); // when the tests fail, we want to repeat the failures at the end // so that you can see them better when there are lots of tests that ran @@ -219,7 +219,7 @@ pub const CommandLineReporter = struct { pub fn handleTestSkip(cb: *TestRunner.Callback, id: Test.ID, _: string, label: string, expectations: u32, elapsed_ns: u64, parent: ?*jest.DescribeScope) void { var writer_ = Output.errorWriter(); - var this: *CommandLineReporter = @fieldParentPtr(CommandLineReporter, "callback", cb); + var this: *CommandLineReporter = @fieldParentPtr("callback", cb); // If you do it.only, don't report the skipped tests because its pretty noisy if (jest.Jest.runner != null and !jest.Jest.runner.?.only) { @@ -244,7 +244,7 @@ pub const CommandLineReporter = struct { pub fn handleTestTodo(cb: *TestRunner.Callback, id: Test.ID, _: string, label: string, expectations: u32, elapsed_ns: u64, parent: ?*jest.DescribeScope) void { var writer_ = Output.errorWriter(); - var this: *CommandLineReporter = @fieldParentPtr(CommandLineReporter, "callback", cb); + var this: *CommandLineReporter = @fieldParentPtr("callback", cb); // when the tests skip, we want to repeat the failures at the end // so that you can see them better when there are lots of tests that ran diff --git a/src/cli/upgrade_command.zig b/src/cli/upgrade_command.zig index 689e4ea074..de498e7c14 100644 --- a/src/cli/upgrade_command.zig +++ b/src/cli/upgrade_command.zig @@ -9,6 +9,7 @@ const stringZ = bun.stringZ; const default_allocator = bun.default_allocator; const C = bun.C; const std = @import("std"); +const Progress = bun.Progress; const lex = bun.js_lexer; const logger = bun.logger; @@ -172,8 +173,8 @@ pub const UpgradeCommand = struct { pub fn getLatestVersion( allocator: std.mem.Allocator, env_loader: *DotEnv.Loader, - refresher: ?*std.Progress, - progress: ?*std.Progress.Node, + refresher: ?*Progress, + progress: ?*Progress.Node, use_profile: bool, comptime silent: bool, ) !?Version { @@ -465,7 +466,7 @@ pub const UpgradeCommand = struct { const use_profile = strings.containsAny(bun.argv, "--profile"); const version: Version = if (!use_canary) v: { - var refresher = std.Progress{}; + var refresher = Progress{}; var progress = refresher.start("Fetching version tags", 0); const version = (try getLatestVersion(ctx.allocator, &env_loader, &refresher, progress, use_profile, false)) orelse return; @@ -512,7 +513,7 @@ pub const UpgradeCommand = struct { const http_proxy: ?URL = env_loader.getHttpProxy(zip_url); { - var refresher = std.Progress{}; + var refresher = Progress{}; var progress = refresher.start("Downloading", version.size); refresher.refresh(); var async_http = try ctx.allocator.create(HTTP.AsyncHTTP); @@ -634,7 +635,7 @@ pub const UpgradeCommand = struct { tmpname, }; - var unzip_process = std.ChildProcess.init(&unzip_argv, ctx.allocator); + var unzip_process = std.process.Child.init(&unzip_argv, ctx.allocator); unzip_process.cwd = tmpdir_path; unzip_process.stdin_behavior = .Inherit; unzip_process.stdout_behavior = .Inherit; @@ -712,7 +713,7 @@ pub const UpgradeCommand = struct { "--version", }; - const result = std.ChildProcess.run(.{ + const result = std.process.Child.run(.{ .allocator = ctx.allocator, .argv = &verify_argv, .cwd = tmpdir_path, @@ -850,7 +851,7 @@ pub const UpgradeCommand = struct { target_dirname, target_filename, }); - std.os.rename(destination_executable, outdated_filename.?) catch |err| { + std.posix.rename(destination_executable, outdated_filename.?) catch |err| { save_dir_.deleteTree(version_name) catch {}; Output.prettyErrorln("error: Failed to rename current executable {s}", .{@errorName(err)}); Global.exit(1); @@ -863,7 +864,7 @@ pub const UpgradeCommand = struct { if (comptime Environment.isWindows) { // Attempt to restore the old executable. If this fails, the user will be left without a working copy of bun. - std.os.rename(outdated_filename.?, destination_executable) catch { + std.posix.rename(outdated_filename.?, destination_executable) catch { Output.errGeneric( \\Failed to move new version of Bun to {s} due to {s} , @@ -912,7 +913,7 @@ pub const UpgradeCommand = struct { env_loader.map.put("IS_BUN_AUTO_UPDATE", "true") catch bun.outOfMemory(); var std_map = try env_loader.map.stdEnvMap(ctx.allocator); defer std_map.deinit(); - _ = std.ChildProcess.run(.{ + _ = std.process.Child.run(.{ .allocator = ctx.allocator, .argv = &completions_argv, .cwd = target_dirname, diff --git a/src/compile_target.zig b/src/compile_target.zig index 1ba17bb304..240c824bcc 100644 --- a/src/compile_target.zig +++ b/src/compile_target.zig @@ -138,7 +138,7 @@ const MutableString = bun.MutableString; const Global = bun.Global; pub fn downloadToPath(this: *const CompileTarget, env: *bun.DotEnv.Loader, allocator: std.mem.Allocator, dest_z: [:0]const u8) !void { try HTTP.HTTPThread.init(); - var refresher = std.Progress{}; + var refresher = bun.Progress{}; { refresher.refresh(); diff --git a/src/comptime_string_map.zig b/src/comptime_string_map.zig index 037569143e..28eedeca42 100644 --- a/src/comptime_string_map.zig +++ b/src/comptime_string_map.zig @@ -76,7 +76,8 @@ pub fn ComptimeStringMapWithKeyType(comptime KeyType: type, comptime V: type, co for (kvs, 0..) |kv, i| { k[i] = kv.key; } - break :blk k[0..]; + const final = k; + break :blk &final; }; pub const Value = V; diff --git a/src/copy_file.zig b/src/copy_file.zig index 6acf4946ff..c3c7f2afaa 100644 --- a/src/copy_file.zig +++ b/src/copy_file.zig @@ -2,7 +2,7 @@ // The copy starts at offset 0, the initial offsets are preserved. // No metadata is transferred over. const std = @import("std"); -const os = std.os; +const posix = std.posix; const math = std.math; const bun = @import("root").bun; const strings = bun.strings; @@ -20,11 +20,11 @@ pub const CopyFileRangeError = error{ Unseekable, PermissionDenied, FileBusy, -} || os.PReadError || os.PWriteError || os.UnexpectedError; +} || posix.PReadError || posix.PWriteError || posix.UnexpectedError; -const CopyFileError = error{SystemResources} || CopyFileRangeError || os.SendFileError; +const CopyFileError = error{SystemResources} || CopyFileRangeError || posix.SendFileError; -const InputType = if (Environment.isWindows) bun.OSPathSliceZ else os.fd_t; +const InputType = if (Environment.isWindows) bun.OSPathSliceZ else posix.fd_t; /// In a `bun install` with prisma, this reduces the system call count from ~18,000 to ~12,000 /// @@ -60,14 +60,14 @@ const EmptyCopyFileState = struct {}; pub const CopyFileState = if (Environment.isLinux) LinuxCopyFileState else EmptyCopyFileState; pub fn copyFileWithState(in: InputType, out: InputType, copy_file_state: *CopyFileState) CopyFileError!void { if (comptime Environment.isMac) { - const rc = os.system.fcopyfile(in, out, null, os.system.COPYFILE_DATA); - switch (os.errno(rc)) { + const rc = posix.system.fcopyfile(in, out, null, posix.system.COPYFILE_DATA); + switch (posix.errno(rc)) { .SUCCESS => return, .NOMEM => return error.SystemResources, // The source file is not a directory, symbolic link, or regular file. // Try with the fallback path before giving up. .OPNOTSUPP => {}, - else => |err| return os.unexpectedErrno(err), + else => |err| return posix.unexpectedErrno(err), } } @@ -78,7 +78,7 @@ pub fn copyFileWithState(in: InputType, out: InputType, copy_file_state: *CopyFi const rc = bun.C.linux.ioctl_ficlone(bun.toFD(out), bun.toFD(in)); // the ordering is flipped but it is consistent with other system calls. bun.sys.syslog("ioctl_ficlone({d}, {d}) = {d}", .{ in, out, rc }); - switch (std.os.linux.getErrno(rc)) { + switch (bun.C.getErrno(rc)) { .SUCCESS => return, .XDEV => { copy_file_state.has_seen_exdev = true; @@ -89,7 +89,7 @@ pub fn copyFileWithState(in: InputType, out: InputType, copy_file_state: *CopyFi .ACCES, .BADF, .INVAL, .OPNOTSUPP, .NOSYS, .PERM => { bun.Output.debug("ioctl_ficlonerange is NOT supported", .{}); - can_use_ioctl_ficlone_.store(-1, .Monotonic); + can_use_ioctl_ficlone_.store(-1, .monotonic); copy_file_state.has_ioctl_ficlone_failed = true; }, else => { @@ -135,10 +135,10 @@ pub fn copyFileWithState(in: InputType, out: InputType, copy_file_state: *CopyFi // Sendfile is a zero-copy mechanism iff the OS supports it, otherwise the // fallback code will copy the contents chunk by chunk. - const empty_iovec = [0]os.iovec_const{}; + const empty_iovec = [0]posix.iovec_const{}; var offset: u64 = 0; sendfile_loop: while (true) { - const amt = try os.sendfile(out, in, offset, 0, &empty_iovec, &empty_iovec, 0); + const amt = try posix.sendfile(out, in, offset, 0, &empty_iovec, &empty_iovec, 0); // Terminate when no data was copied if (amt == 0) break :sendfile_loop; offset += amt; @@ -155,26 +155,26 @@ pub inline fn disableCopyFileRangeSyscall() void { if (comptime !Environment.isLinux) { return; } - can_use_copy_file_range.store(-1, .Monotonic); + can_use_copy_file_range.store(-1, .monotonic); } pub fn canUseCopyFileRangeSyscall() bool { - const result = can_use_copy_file_range.load(.Monotonic); + const result = can_use_copy_file_range.load(.monotonic); if (result == 0) { // This flag mostly exists to make other code more easily testable. if (bun.getenvZ("BUN_CONFIG_DISABLE_COPY_FILE_RANGE") != null) { bun.Output.debug("copy_file_range is disabled by BUN_CONFIG_DISABLE_COPY_FILE_RANGE", .{}); - can_use_copy_file_range.store(-1, .Monotonic); + can_use_copy_file_range.store(-1, .monotonic); return false; } const kernel = Platform.kernelVersion(); if (kernel.orderWithoutTag(.{ .major = 4, .minor = 5 }).compare(.gte)) { bun.Output.debug("copy_file_range is supported", .{}); - can_use_copy_file_range.store(1, .Monotonic); + can_use_copy_file_range.store(1, .monotonic); return true; } else { bun.Output.debug("copy_file_range is NOT supported", .{}); - can_use_copy_file_range.store(-1, .Monotonic); + can_use_copy_file_range.store(-1, .monotonic); return false; } } @@ -187,26 +187,26 @@ pub inline fn disable_ioctl_ficlone() void { if (comptime !Environment.isLinux) { return; } - can_use_ioctl_ficlone_.store(-1, .Monotonic); + can_use_ioctl_ficlone_.store(-1, .monotonic); } pub fn can_use_ioctl_ficlone() bool { - const result = can_use_ioctl_ficlone_.load(.Monotonic); + const result = can_use_ioctl_ficlone_.load(.monotonic); if (result == 0) { // This flag mostly exists to make other code more easily testable. if (bun.getenvZ("BUN_CONFIG_DISABLE_ioctl_ficlonerange") != null) { bun.Output.debug("ioctl_ficlonerange is disabled by BUN_CONFIG_DISABLE_ioctl_ficlonerange", .{}); - can_use_ioctl_ficlone_.store(-1, .Monotonic); + can_use_ioctl_ficlone_.store(-1, .monotonic); return false; } const kernel = Platform.kernelVersion(); if (kernel.orderWithoutTag(.{ .major = 4, .minor = 5 }).compare(.gte)) { bun.Output.debug("ioctl_ficlonerange is supported", .{}); - can_use_ioctl_ficlone_.store(1, .Monotonic); + can_use_ioctl_ficlone_.store(1, .monotonic); return true; } else { bun.Output.debug("ioctl_ficlonerange is NOT supported", .{}); - can_use_ioctl_ficlone_.store(-1, .Monotonic); + can_use_ioctl_ficlone_.store(-1, .monotonic); return false; } } @@ -214,14 +214,14 @@ pub fn can_use_ioctl_ficlone() bool { return result == 1; } -const fd_t = std.os.fd_t; +const fd_t = std.posix.fd_t; pub fn copyFileRange(in: fd_t, out: fd_t, len: usize, flags: u32, copy_file_state: *CopyFileState) CopyFileRangeError!usize { if (canUseCopyFileRangeSyscall() and !copy_file_state.has_seen_exdev and !copy_file_state.has_copy_file_range_failed) { while (true) { const rc = std.os.linux.copy_file_range(in, null, out, null, len, flags); bun.sys.syslog("copy_file_range({d}, {d}, {d}) = {d}", .{ in, out, len, rc }); - switch (std.os.linux.getErrno(rc)) { + switch (bun.C.getErrno(rc)) { .SUCCESS => return @as(usize, @intCast(rc)), // these may not be regular files, try fallback .INVAL => { @@ -237,7 +237,7 @@ pub fn copyFileRange(in: fd_t, out: fd_t, len: usize, flags: u32, copy_file_stat .OPNOTSUPP, .NOSYS => { copy_file_state.has_copy_file_range_failed = true; bun.Output.debug("copy_file_range is NOT supported", .{}); - can_use_copy_file_range.store(-1, .Monotonic); + can_use_copy_file_range.store(-1, .monotonic); }, .INTR => continue, else => { @@ -252,7 +252,7 @@ pub fn copyFileRange(in: fd_t, out: fd_t, len: usize, flags: u32, copy_file_stat while (!copy_file_state.has_sendfile_failed) { const rc = std.os.linux.sendfile(@intCast(out), @intCast(in), null, len); bun.sys.syslog("sendfile({d}, {d}, {d}) = {d}", .{ in, out, len, rc }); - switch (std.os.linux.getErrno(rc)) { + switch (bun.C.getErrno(rc)) { .SUCCESS => return @as(usize, @intCast(rc)), .INTR => continue, // these may not be regular files, try fallback @@ -278,7 +278,7 @@ pub fn copyFileRange(in: fd_t, out: fd_t, len: usize, flags: u32, copy_file_stat var buf: [8 * 4096]u8 = undefined; const adjusted_count = @min(buf.len, len); - const amt_read = try os.read(in, buf[0..adjusted_count]); + const amt_read = try posix.read(in, buf[0..adjusted_count]); if (amt_read == 0) return 0; - return os.write(out, buf[0..amt_read]); + return posix.write(out, buf[0..amt_read]); } diff --git a/src/crash_handler.zig b/src/crash_handler.zig index bf68337c98..9755bb3980 100644 --- a/src/crash_handler.zig +++ b/src/crash_handler.zig @@ -104,6 +104,9 @@ pub fn crashHandler( // the handler. resetSegfaultHandler(); + if (bun.Environment.isDebug) + bun.Output.disableScopedDebugWriter(); + var trace_str_buf = std.BoundedArray(u8, 1024){}; nosuspend switch (panic_stage) { @@ -111,7 +114,7 @@ pub fn crashHandler( bun.maybeHandlePanicDuringProcessReload(); panic_stage = 1; - _ = panicking.fetchAdd(1, .SeqCst); + _ = panicking.fetchAdd(1, .seq_cst); { panic_mutex.lock(); @@ -144,53 +147,53 @@ pub fn crashHandler( Output.flush(); Output.Source.Stdio.restore(); - writer.writeAll("=" ** 60 ++ "\n") catch std.os.abort(); - printMetadata(writer) catch std.os.abort(); + writer.writeAll("=" ** 60 ++ "\n") catch std.posix.abort(); + printMetadata(writer) catch std.posix.abort(); } else { if (Output.enable_ansi_colors) { - writer.writeAll(Output.prettyFmt("", true)) catch std.os.abort(); + writer.writeAll(Output.prettyFmt("", true)) catch std.posix.abort(); } - writer.writeAll("oh no") catch std.os.abort(); + writer.writeAll("oh no") catch std.posix.abort(); if (Output.enable_ansi_colors) { - writer.writeAll(Output.prettyFmt(": ", true)) catch std.os.abort(); + writer.writeAll(Output.prettyFmt(": ", true)) catch std.posix.abort(); } else { - writer.writeAll(Output.prettyFmt(": ", true)) catch std.os.abort(); + writer.writeAll(Output.prettyFmt(": ", true)) catch std.posix.abort(); } - writer.writeAll("multiple threads are crashing") catch std.os.abort(); + writer.writeAll("multiple threads are crashing") catch std.posix.abort(); } if (reason != .out_of_memory or debug_trace) { if (Output.enable_ansi_colors) { - writer.writeAll(Output.prettyFmt("", true)) catch std.os.abort(); + writer.writeAll(Output.prettyFmt("", true)) catch std.posix.abort(); } - writer.writeAll("panic") catch std.os.abort(); + writer.writeAll("panic") catch std.posix.abort(); if (Output.enable_ansi_colors) { - writer.writeAll(Output.prettyFmt("", true)) catch std.os.abort(); + writer.writeAll(Output.prettyFmt("", true)) catch std.posix.abort(); } if (bun.CLI.Cli.is_main_thread) { - writer.writeAll("(main thread)") catch std.os.abort(); + writer.writeAll("(main thread)") catch std.posix.abort(); } else switch (bun.Environment.os) { .windows => { var name: std.os.windows.PWSTR = undefined; const result = bun.windows.GetThreadDescription(std.os.windows.kernel32.GetCurrentThread(), &name); if (std.os.windows.HRESULT_CODE(result) == .SUCCESS and name[0] != 0) { - writer.print("({})", .{bun.fmt.utf16(bun.span(name))}) catch std.os.abort(); + writer.print("({})", .{bun.fmt.utf16(bun.span(name))}) catch std.posix.abort(); } else { - writer.print("(thread {d})", .{std.os.windows.kernel32.GetCurrentThreadId()}) catch std.os.abort(); + writer.print("(thread {d})", .{std.os.windows.kernel32.GetCurrentThreadId()}) catch std.posix.abort(); } }, .mac, .linux => {}, else => @compileError("TODO"), } - writer.writeAll(": ") catch std.os.abort(); + writer.writeAll(": ") catch std.posix.abort(); if (Output.enable_ansi_colors) { - writer.writeAll(Output.prettyFmt("", true)) catch std.os.abort(); + writer.writeAll(Output.prettyFmt("", true)) catch std.posix.abort(); } - writer.print("{}\n", .{reason}) catch std.os.abort(); + writer.print("{}\n", .{reason}) catch std.posix.abort(); } var addr_buf: [10]usize = undefined; @@ -213,15 +216,15 @@ pub fn crashHandler( .trace = trace, .reason = reason, .action = .view_trace, - }}) catch std.os.abort(); + }}) catch std.posix.abort(); } else { if (!has_printed_message) { has_printed_message = true; - writer.writeAll("oh no") catch std.os.abort(); + writer.writeAll("oh no") catch std.posix.abort(); if (Output.enable_ansi_colors) { - writer.writeAll(Output.prettyFmt(": ", true)) catch std.os.abort(); + writer.writeAll(Output.prettyFmt(": ", true)) catch std.posix.abort(); } else { - writer.writeAll(Output.prettyFmt(": ", true)) catch std.os.abort(); + writer.writeAll(Output.prettyFmt(": ", true)) catch std.posix.abort(); } if (reason == .out_of_memory) { writer.writeAll( @@ -231,7 +234,7 @@ pub fn crashHandler( \\please file a GitHub issue using the link below: \\ \\ - ) catch std.os.abort(); + ) catch std.posix.abort(); } else { writer.writeAll( \\Bun has crashed. This indicates a bug in Bun, not your code. @@ -240,31 +243,31 @@ pub fn crashHandler( \\please file a GitHub issue using the link below: \\ \\ - ) catch std.os.abort(); + ) catch std.posix.abort(); } } if (Output.enable_ansi_colors) { - writer.print(Output.prettyFmt("", true), .{}) catch std.os.abort(); + writer.print(Output.prettyFmt("", true), .{}) catch std.posix.abort(); } - writer.writeAll(" ") catch std.os.abort(); + writer.writeAll(" ") catch std.posix.abort(); trace_str_buf.writer().print("{}", .{TraceString{ .trace = trace, .reason = reason, .action = .open_issue, - }}) catch std.os.abort(); + }}) catch std.posix.abort(); - writer.writeAll(trace_str_buf.slice()) catch std.os.abort(); + writer.writeAll(trace_str_buf.slice()) catch std.posix.abort(); - writer.writeAll("\n") catch std.os.abort(); + writer.writeAll("\n") catch std.posix.abort(); } if (Output.enable_ansi_colors) { - writer.writeAll(Output.prettyFmt("\n", true)) catch std.os.abort(); + writer.writeAll(Output.prettyFmt("\n", true)) catch std.posix.abort(); } else { - writer.writeAll("\n") catch std.os.abort(); + writer.writeAll("\n") catch std.posix.abort(); } } // Be aware that this function only lets one thread return from it. @@ -300,15 +303,15 @@ pub fn crashHandler( // we're still holding the mutex but that's fine as we're going to // call abort() const stderr = std.io.getStdErr().writer(); - stderr.print("\npanic: {s}\n", .{reason}) catch std.os.abort(); - stderr.print("panicked during a panic. Aborting.\n", .{}) catch std.os.abort(); + stderr.print("\npanic: {s}\n", .{reason}) catch std.posix.abort(); + stderr.print("panicked during a panic. Aborting.\n", .{}) catch std.posix.abort(); }, 3 => { // Panicked while printing "Panicked during a panic." }, else => { // Panicked or otherwise looped into the panic handler while trying to exit. - std.os.abort(); + std.posix.abort(); }, }; @@ -341,7 +344,7 @@ pub fn handleRootError(err: anyerror, error_return_trace: ?*std.builtin.StackTra error.SystemFdQuotaExceeded => { if (comptime bun.Environment.isPosix) { - const limit = if (std.os.getrlimit(.NOFILE)) |limit| limit.cur else |_| null; + const limit = if (std.posix.getrlimit(.NOFILE)) |limit| limit.cur else |_| null; if (comptime bun.Environment.isMac) { Output.prettyError( \\error: Your computer ran out of file descriptors (SystemFdQuotaExceeded) @@ -405,7 +408,7 @@ pub fn handleRootError(err: anyerror, error_return_trace: ?*std.builtin.StackTra error.ProcessFdQuotaExceeded => { if (comptime bun.Environment.isPosix) { - const limit = if (std.os.getrlimit(.NOFILE)) |limit| limit.cur else |_| null; + const limit = if (std.posix.getrlimit(.NOFILE)) |limit| limit.cur else |_| null; if (comptime bun.Environment.isMac) { Output.prettyError( \\ @@ -472,10 +475,10 @@ pub fn handleRootError(err: anyerror, error_return_trace: ?*std.builtin.StackTra } }, - // The usage of `unreachable` in Zig's std.os may cause the file descriptor problem to show up as other errors + // The usage of `unreachable` in Zig's std.posix may cause the file descriptor problem to show up as other errors error.NotOpenForReading, error.Unexpected => { if (comptime bun.Environment.isPosix) { - const limit = std.os.getrlimit(.NOFILE) catch std.mem.zeroes(std.os.rlimit); + const limit = std.posix.getrlimit(.NOFILE) catch std.mem.zeroes(std.posix.rlimit); if (limit.cur > 0 and limit.cur < (8192 * 2)) { Output.prettyError( @@ -626,7 +629,7 @@ const metadata_version_line = std.fmt.comptimePrint( }, ); -fn handleSegfaultPosix(sig: i32, info: *const std.os.siginfo_t, _: ?*const anyopaque) callconv(.C) noreturn { +fn handleSegfaultPosix(sig: i32, info: *const std.posix.siginfo_t, _: ?*const anyopaque) callconv(.C) noreturn { const addr = switch (bun.Environment.os) { .linux => @intFromPtr(info.fields.sigfault.addr), .mac => @intFromPtr(info.addr), @@ -635,10 +638,10 @@ fn handleSegfaultPosix(sig: i32, info: *const std.os.siginfo_t, _: ?*const anyop crashHandler( switch (sig) { - std.os.SIG.SEGV => .{ .segmentation_fault = addr }, - std.os.SIG.ILL => .{ .illegal_instruction = addr }, - std.os.SIG.BUS => .{ .bus_error = addr }, - std.os.SIG.FPE => .{ .floating_point_error = addr }, + std.posix.SIG.SEGV => .{ .segmentation_fault = addr }, + std.posix.SIG.ILL => .{ .illegal_instruction = addr }, + std.posix.SIG.BUS => .{ .bus_error = addr }, + std.posix.SIG.FPE => .{ .floating_point_error = addr }, // we do not register this handler for other signals else => unreachable, @@ -651,7 +654,7 @@ fn handleSegfaultPosix(sig: i32, info: *const std.os.siginfo_t, _: ?*const anyop var did_register_sigaltstack = false; var sigaltstack: [512 * 1024]u8 = undefined; -pub fn updatePosixSegfaultHandler(act: ?*std.os.Sigaction) !void { +pub fn updatePosixSegfaultHandler(act: ?*std.posix.Sigaction) !void { if (act) |act_| { if (!did_register_sigaltstack) { var stack: std.c.stack_t = .{ @@ -661,16 +664,16 @@ pub fn updatePosixSegfaultHandler(act: ?*std.os.Sigaction) !void { }; if (std.c.sigaltstack(&stack, null) == 0) { - act_.flags |= std.os.SA.ONSTACK; + act_.flags |= std.posix.SA.ONSTACK; did_register_sigaltstack = true; } } } - try std.os.sigaction(std.os.SIG.SEGV, act, null); - try std.os.sigaction(std.os.SIG.ILL, act, null); - try std.os.sigaction(std.os.SIG.BUS, act, null); - try std.os.sigaction(std.os.SIG.FPE, act, null); + try std.posix.sigaction(std.posix.SIG.SEGV, act, null); + try std.posix.sigaction(std.posix.SIG.ILL, act, null); + try std.posix.sigaction(std.posix.SIG.BUS, act, null); + try std.posix.sigaction(std.posix.SIG.FPE, act, null); } var windows_segfault_handle: ?windows.HANDLE = null; @@ -682,10 +685,10 @@ pub fn init() void { windows_segfault_handle = windows.kernel32.AddVectoredExceptionHandler(0, handleSegfaultWindows); }, .mac, .linux => { - var act = std.os.Sigaction{ + var act = std.posix.Sigaction{ .handler = .{ .sigaction = handleSegfaultPosix }, - .mask = std.os.empty_sigset, - .flags = (std.os.SA.SIGINFO | std.os.SA.RESTART | std.os.SA.RESETHAND), + .mask = std.posix.empty_sigset, + .flags = (std.posix.SA.SIGINFO | std.posix.SA.RESTART | std.posix.SA.RESETHAND), }; updatePosixSegfaultHandler(&act) catch {}; }, @@ -703,9 +706,9 @@ pub fn resetSegfaultHandler() void { return; } - var act = std.os.Sigaction{ - .handler = .{ .handler = std.os.SIG.DFL }, - .mask = std.os.empty_sigset, + var act = std.posix.Sigaction{ + .handler = .{ .handler = std.posix.SIG.DFL }, + .mask = std.posix.empty_sigset, .flags = 0, }; // To avoid a double-panic, do nothing if an error happens here. @@ -791,7 +794,7 @@ pub fn printMetadata(writer: anytype) !void { } fn waitForOtherThreadToFinishPanicking() void { - if (panicking.fetchSub(1, .SeqCst) != 1) { + if (panicking.fetchSub(1, .seq_cst) != 1) { // Another thread is panicking, wait for the last one to finish // and call abort() if (builtin.single_threaded) unreachable; @@ -946,8 +949,8 @@ const StackLine = struct { } = .{ .address = addr -| 1 }; const CtxTy = @TypeOf(ctx); - std.os.dl_iterate_phdr(&ctx, error{Found}, struct { - fn callback(info: *std.os.dl_phdr_info, _: usize, context: *CtxTy) !void { + std.posix.dl_iterate_phdr(&ctx, error{Found}, struct { + fn callback(info: *std.posix.dl_phdr_info, _: usize, context: *CtxTy) !void { defer context.i += 1; if (context.address < info.dlpi_addr) return; @@ -1260,21 +1263,21 @@ fn report(url: []const u8) void { fn crash() noreturn { switch (bun.Environment.os) { .windows => { - std.os.abort(); + std.posix.abort(); }, else => { // Install default handler so that the tkill below will terminate. - const sigact = std.os.Sigaction{ .handler = .{ .handler = std.os.SIG.DFL }, .mask = std.os.empty_sigset, .flags = 0 }; + const sigact = std.posix.Sigaction{ .handler = .{ .handler = std.posix.SIG.DFL }, .mask = std.posix.empty_sigset, .flags = 0 }; inline for (.{ - std.os.SIG.SEGV, - std.os.SIG.ILL, - std.os.SIG.BUS, - std.os.SIG.ABRT, - std.os.SIG.FPE, - std.os.SIG.HUP, - std.os.SIG.TERM, + std.posix.SIG.SEGV, + std.posix.SIG.ILL, + std.posix.SIG.BUS, + std.posix.SIG.ABRT, + std.posix.SIG.FPE, + std.posix.SIG.HUP, + std.posix.SIG.TERM, }) |sig| { - std.os.sigaction(sig, &sigact, null) catch {}; + std.posix.sigaction(sig, &sigact, null) catch {}; } @trap(); diff --git a/src/darwin_c.zig b/src/darwin_c.zig index 7ba46b2dfd..4e96ecd220 100644 --- a/src/darwin_c.zig +++ b/src/darwin_c.zig @@ -1,13 +1,13 @@ const std = @import("std"); const bun = @import("root").bun; const builtin = @import("builtin"); -const os = std.os; +const posix = std.posix; const mem = std.mem; const Stat = std.fs.File.Stat; const Kind = std.fs.File.Kind; const StatError = std.fs.File.StatError; const off_t = std.c.off_t; -const errno = os.errno; +const errno = posix.errno; const zeroes = mem.zeroes; const This = @This(); pub extern "c" fn copyfile(from: [*:0]const u8, to: [*:0]const u8, state: ?std.c.copyfile_state_t, flags: u32) c_int; @@ -142,7 +142,7 @@ pub extern "c" fn clonefile(src: [*:0]const u8, dest: [*:0]const u8, flags: c_in // benchmarking this did nothing on macOS // i verified it wasn't returning -1 -pub fn preallocate_file(_: os.fd_t, _: off_t, _: off_t) !void { +pub fn preallocate_file(_: posix.fd_t, _: off_t, _: off_t) !void { // pub const struct_fstore = extern struct { // fst_flags: c_uint, // fst_posmode: c_int, @@ -492,7 +492,7 @@ pub fn getTotalMemory() u64 { var memory_: [32]c_ulonglong = undefined; var size: usize = memory_.len; - std.os.sysctlbynameZ( + std.posix.sysctlbynameZ( "hw.memsize", &memory_, &size, @@ -512,7 +512,7 @@ pub fn getSystemUptime() u64 { var uptime_: [16]struct_BootTime = undefined; var size: usize = uptime_.len; - std.os.sysctlbynameZ( + std.posix.sysctlbynameZ( "kern.boottime", &uptime_, &size, @@ -533,7 +533,7 @@ pub fn getSystemLoadavg() [3]f64 { var loadavg_: [24]struct_LoadAvg = undefined; var size: usize = loadavg_.len; - std.os.sysctlbynameZ( + std.posix.sysctlbynameZ( "vm.loadavg", &loadavg_, &size, @@ -567,8 +567,8 @@ pub const PROCESSOR_INFO_MAX = 1024; pub extern fn host_processor_info(host: std.c.host_t, flavor: processor_flavor_t, out_processor_count: *std.c.natural_t, out_processor_info: *processor_info_array_t, out_processor_infoCnt: *std.c.mach_msg_type_number_t) std.c.E; -pub extern fn getuid(...) std.os.uid_t; -pub extern fn getgid(...) std.os.gid_t; +pub extern fn getuid(...) std.posix.uid_t; +pub extern fn getgid(...) std.posix.gid_t; pub extern fn get_process_priority(pid: c_uint) i32; pub extern fn set_process_priority(pid: c_uint, priority: c_int) i32; @@ -734,9 +734,9 @@ pub const ifaddrs = extern struct { ifa_next: ?*ifaddrs, ifa_name: [*:0]u8, ifa_flags: c_uint, - ifa_addr: ?*std.os.sockaddr, - ifa_netmask: ?*std.os.sockaddr, - ifa_dstaddr: ?*std.os.sockaddr, + ifa_addr: ?*std.posix.sockaddr, + ifa_netmask: ?*std.posix.sockaddr, + ifa_dstaddr: ?*std.posix.sockaddr, ifa_data: *anyopaque, }; pub extern fn getifaddrs(*?*ifaddrs) c_int; @@ -779,13 +779,19 @@ pub const F = struct { // so this is a linux-only optimization for now. pub const preallocate_length = std.math.maxInt(u51); -pub const Mode = std.os.mode_t; +pub const Mode = std.posix.mode_t; + +pub const E = std.posix.E; +pub const S = std.posix.S; -pub const E = std.os.E; -pub const S = std.os.S; pub fn getErrno(rc: anytype) E { - return std.c.getErrno(rc); + if (rc == -1) { + return @enumFromInt(std.c._errno().*); + } else { + return .SUCCESS; + } } + pub extern "c" fn umask(Mode) Mode; // #define RENAME_SECLUDE 0x00000001 diff --git a/src/deps/c_ares.zig b/src/deps/c_ares.zig index 4dcc60e209..df481f2990 100644 --- a/src/deps/c_ares.zig +++ b/src/deps/c_ares.zig @@ -4,8 +4,8 @@ const bun = @import("root").bun; const JSC = bun.JSC; const strings = bun.strings; const iovec = @import("std").os.iovec; -const struct_in_addr = std.os.sockaddr.in; -const struct_sockaddr = std.os.sockaddr; +const struct_in_addr = std.posix.sockaddr.in; +const struct_sockaddr = std.posix.sockaddr; pub const socklen_t = c.socklen_t; const ares_socklen_t = c.socklen_t; pub const ares_ssize_t = isize; @@ -15,7 +15,7 @@ pub const struct_apattern = opaque {}; const fd_set = c.fd_set; const libuv = bun.windows.libuv; -pub const AF = std.os.AF; +pub const AF = std.posix.AF; pub const NSClass = enum(c_int) { /// Cookie. @@ -404,8 +404,8 @@ pub const AddrInfo = extern struct { GetAddrInfo.Result.toJS( &.{ .address = switch (this_node.family) { - AF.INET => std.net.Address{ .in = .{ .sa = bun.cast(*const std.os.sockaddr.in, this_node.addr.?).* } }, - AF.INET6 => std.net.Address{ .in6 = .{ .sa = bun.cast(*const std.os.sockaddr.in6, this_node.addr.?).* } }, + AF.INET => std.net.Address{ .in = .{ .sa = bun.cast(*const std.posix.sockaddr.in, this_node.addr.?).* } }, + AF.INET6 => std.net.Address{ .in6 = .{ .sa = bun.cast(*const std.posix.sockaddr.in6, this_node.addr.?).* } }, else => unreachable, }, .ttl = this_node.ttl, @@ -630,11 +630,11 @@ pub const Channel = opaque { } // https://c-ares.org/ares_getnameinfo.html - pub fn getNameInfo(this: *Channel, sa: *std.os.sockaddr, comptime Type: type, ctx: *Type, comptime callback: struct_nameinfo.Callback(Type)) void { + pub fn getNameInfo(this: *Channel, sa: *std.posix.sockaddr, comptime Type: type, ctx: *Type, comptime callback: struct_nameinfo.Callback(Type)) void { return ares_getnameinfo( this, sa, - if (sa.*.family == AF.INET) @sizeOf(std.os.sockaddr.in) else @sizeOf(std.os.sockaddr.in6), + if (sa.*.family == AF.INET) @sizeOf(std.posix.sockaddr.in) else @sizeOf(std.posix.sockaddr.in6), // node returns ENOTFOUND for addresses like 255.255.255.255:80 // So, it requires setting the ARES_NI_NAMEREQD flag ARES_NI_NAMEREQD | ARES_NI_LOOKUPHOST | ARES_NI_LOOKUPSERVICE, @@ -654,7 +654,7 @@ pub const Channel = opaque { var ares_has_loaded = std.atomic.Value(bool).init(false); fn libraryInit() void { - if (ares_has_loaded.swap(true, .Monotonic)) + if (ares_has_loaded.swap(true, .monotonic)) return; const rc = ares_library_init_mem( @@ -1339,8 +1339,8 @@ pub const Error = enum(i32) { }; } - return switch (@as(std.os.system.EAI, @enumFromInt(rc))) { - @as(std.os.system.EAI, @enumFromInt(0)) => return null, + return switch (@as(std.posix.system.EAI, @enumFromInt(rc))) { + @as(std.posix.system.EAI, @enumFromInt(0)) => return null, .ADDRFAMILY => Error.EBADFAMILY, .BADFLAGS => Error.EBADFLAGS, // Invalid hints .FAIL => Error.EBADRESP, @@ -1572,7 +1572,7 @@ pub export fn Bun__canonicalizeIP( /// # Returns /// /// This function returns 0 on success. -pub fn getSockaddr(addr: []const u8, port: u16, sa: *std.os.sockaddr) c_int { +pub fn getSockaddr(addr: []const u8, port: u16, sa: *std.posix.sockaddr) c_int { const buf_size = 128; var buf: [buf_size]u8 = undefined; @@ -1588,7 +1588,7 @@ pub fn getSockaddr(addr: []const u8, port: u16, sa: *std.os.sockaddr) c_int { }; { - const in: *std.os.sockaddr.in = @as(*std.os.sockaddr.in, @alignCast(@ptrCast(sa))); + const in: *std.posix.sockaddr.in = @alignCast(@ptrCast(sa)); if (ares_inet_pton(AF.INET, addr_ptr, &in.addr) == 1) { in.*.family = AF.INET; in.*.port = std.mem.nativeToBig(u16, port); @@ -1596,7 +1596,7 @@ pub fn getSockaddr(addr: []const u8, port: u16, sa: *std.os.sockaddr) c_int { } } { - const in6: *std.os.sockaddr.in6 = @as(*std.os.sockaddr.in6, @alignCast(@ptrCast(sa))); + const in6: *std.posix.sockaddr.in6 = @alignCast(@ptrCast(sa)); if (ares_inet_pton(AF.INET6, addr_ptr, &in6.addr) == 1) { in6.*.family = AF.INET6; in6.*.port = std.mem.nativeToBig(u16, port); diff --git a/src/deps/libuv.zig b/src/deps/libuv.zig index 1dfa30ce1f..2aecf0ddb0 100644 --- a/src/deps/libuv.zig +++ b/src/deps/libuv.zig @@ -19,7 +19,7 @@ const LPFN_CONNECTEX = *const anyopaque; const FILE = std.c.FILE; const CRITICAL_SECTION = std.os.windows.CRITICAL_SECTION; const INPUT_RECORD = windows.INPUT_RECORD; -const sockaddr = std.os.sockaddr; +const sockaddr = std.posix.sockaddr; const sockaddr_storage = std.os.linux.sockaddr_storage; const sockaddr_un = std.os.linux.sockaddr_un; const BOOL = windows.BOOL; @@ -211,16 +211,17 @@ pub const O = struct { pub const SYMLINK = UV_FS_O_SYMLINK; pub const SYNC = UV_FS_O_SYNC; - pub fn fromStd(c_flags: i32) i32 { + pub fn fromBunO(c_flags: i32) i32 { var flags: i32 = 0; - if (c_flags & std.os.O.NONBLOCK != 0) flags |= NONBLOCK; - if (c_flags & std.os.O.CREAT != 0) flags |= CREAT; - if (c_flags & std.os.O.NOFOLLOW != 0) flags |= NOFOLLOW; - if (c_flags & std.os.O.WRONLY != 0) flags |= WRONLY; - if (c_flags & std.os.O.RDONLY != 0) flags |= RDONLY; - if (c_flags & std.os.O.RDWR != 0) flags |= RDWR; - if (c_flags & std.os.O.TRUNC != 0) flags |= TRUNC; - if (c_flags & std.os.O.APPEND != 0) flags |= APPEND; + + if (c_flags & bun.O.NONBLOCK != 0) flags |= NONBLOCK; + if (c_flags & bun.O.CREAT != 0) flags |= CREAT; + if (c_flags & bun.O.NOFOLLOW != 0) flags |= NOFOLLOW; + if (c_flags & bun.O.WRONLY != 0) flags |= WRONLY; + if (c_flags & bun.O.RDONLY != 0) flags |= RDONLY; + if (c_flags & bun.O.RDWR != 0) flags |= RDWR; + if (c_flags & bun.O.TRUNC != 0) flags |= TRUNC; + if (c_flags & bun.O.APPEND != 0) flags |= APPEND; return flags; } @@ -240,7 +241,7 @@ const _O_SHORT_LIVED = 0x1000; const _O_SEQUENTIAL = 0x0020; const _O_RANDOM = 0x0010; -// These **do not** map to std.os.O! +// These **do not** map to std.posix.O/bun.O! pub const UV_FS_O_APPEND = 0x0008; pub const UV_FS_O_CREAT = _O_CREAT; pub const UV_FS_O_EXCL = 0x0400; @@ -1828,7 +1829,7 @@ pub const fs_t = extern struct { /// /// It is assumed that if UV overwrites the .loop, it probably overwrote the rest of the struct. pub const uninitialized: fs_t = if (bun.Environment.allow_assert) value: { - comptime var value = std.mem.zeroes(fs_t); + var value = std.mem.zeroes(fs_t); value.loop = @ptrFromInt(0xAAAAAAAAAAAA0000); break :value value; } else undefined; @@ -2953,7 +2954,7 @@ pub fn StreamWriterMixin(comptime Type: type, comptime pipe_field_name: std.meta } fn uv_on_write_cb(req: *uv_write_t, status: ReturnCode) callconv(.C) void { - var this: *Type = @fieldParentPtr(Type, @tagName(uv_write_t_field_name), req); + var this: *Type = @fieldParentPtr(@tagName(uv_write_t_field_name), req); this.onWrite(if (status.toError(.send)) |err| .{ .err = err } else .{ .result = @intCast(status.int()) }); } diff --git a/src/deps/uws.zig b/src/deps/uws.zig index c1a15c9ef5..6f18945a00 100644 --- a/src/deps/uws.zig +++ b/src/deps/uws.zig @@ -1,4 +1,4 @@ -pub const is_bindgen = @import("std").meta.globalOption("bindgen", bool) orelse false; +pub const is_bindgen = false; const bun = @import("root").bun; const Api = bun.ApiSchema; const std = @import("std"); @@ -557,8 +557,8 @@ pub fn NewSocketHandler(comptime is_ssl: bool) type { /// # Returns /// This function returns a slice of the buffer on success, or null on failure. pub fn localAddressText(this: ThisSocket, buf: []u8, is_ipv6: *bool) ?[]const u8 { - const addr_v4_len = @sizeOf(std.meta.FieldType(std.os.sockaddr.in, .addr)); - const addr_v6_len = @sizeOf(std.meta.FieldType(std.os.sockaddr.in6, .addr)); + const addr_v4_len = @sizeOf(std.meta.FieldType(std.posix.sockaddr.in, .addr)); + const addr_v6_len = @sizeOf(std.meta.FieldType(std.posix.sockaddr.in6, .addr)); var sa_buf: [addr_v6_len + 1]u8 = undefined; const binary = this.localAddressBinary(&sa_buf) orelse return null; @@ -567,10 +567,10 @@ pub fn NewSocketHandler(comptime is_ssl: bool) type { var ret: ?[*:0]const u8 = null; if (addr_len == addr_v4_len) { - ret = bun.c_ares.ares_inet_ntop(std.os.AF.INET, &sa_buf, buf.ptr, @as(u32, @intCast(buf.len))); + ret = bun.c_ares.ares_inet_ntop(std.posix.AF.INET, &sa_buf, buf.ptr, @as(u32, @intCast(buf.len))); is_ipv6.* = false; } else if (addr_len == addr_v6_len) { - ret = bun.c_ares.ares_inet_ntop(std.os.AF.INET6, &sa_buf, buf.ptr, @as(u32, @intCast(buf.len))); + ret = bun.c_ares.ares_inet_ntop(std.posix.AF.INET6, &sa_buf, buf.ptr, @as(u32, @intCast(buf.len))); is_ipv6.* = true; } @@ -1205,7 +1205,7 @@ pub const PosixLoop = extern struct { const EventType = switch (Environment.os) { .linux => std.os.linux.epoll_event, - .mac => std.os.system.kevent64_s, + .mac => std.posix.system.kevent64_s, // TODO: .windows => *anyopaque, else => @compileError("Unsupported OS"), @@ -1474,7 +1474,7 @@ pub const Poll = opaque { return us_poll_ext(self).?; } - pub fn fd(self: *Poll) std.os.fd_t { + pub fn fd(self: *Poll) std.posix.fd_t { return us_poll_fd(self); } @@ -1516,7 +1516,7 @@ pub const Poll = opaque { extern fn us_poll_stop(p: ?*Poll, loop: ?*Loop) void; extern fn us_poll_events(p: ?*Poll) i32; extern fn us_poll_ext(p: ?*Poll) ?*anyopaque; - extern fn us_poll_fd(p: ?*Poll) std.os.fd_t; + extern fn us_poll_fd(p: ?*Poll) std.posix.fd_t; extern fn us_poll_resize(p: ?*Poll, loop: ?*Loop, ext_size: c_uint) ?*Poll; }; @@ -2673,7 +2673,7 @@ pub const LIBUS_RECV_BUFFER_LENGTH = 524288; pub const LIBUS_TIMEOUT_GRANULARITY = @as(i32, 4); pub const LIBUS_RECV_BUFFER_PADDING = @as(i32, 32); pub const LIBUS_EXT_ALIGNMENT = @as(i32, 16); -pub const LIBUS_SOCKET_DESCRIPTOR = std.os.socket_t; +pub const LIBUS_SOCKET_DESCRIPTOR = std.posix.socket_t; pub const _COMPRESSOR_MASK: i32 = 255; pub const _DECOMPRESSOR_MASK: i32 = 3840; @@ -2974,7 +2974,7 @@ pub const udp = struct { pub const PacketBuffer = opaque { const This = @This(); - pub fn getPeer(this: *This, index: c_int) *std.os.sockaddr.storage { + pub fn getPeer(this: *This, index: c_int) *std.posix.sockaddr.storage { return us_udp_packet_buffer_peer(this, index); } @@ -2985,7 +2985,7 @@ pub const udp = struct { } }; - extern fn us_udp_packet_buffer_peer(buf: ?*PacketBuffer, index: c_int) *std.os.sockaddr.storage; + extern fn us_udp_packet_buffer_peer(buf: ?*PacketBuffer, index: c_int) *std.posix.sockaddr.storage; extern fn us_udp_packet_buffer_payload(buf: ?*PacketBuffer, index: c_int) [*]u8; extern fn us_udp_packet_buffer_payload_length(buf: ?*PacketBuffer, index: c_int) c_int; }; diff --git a/src/deps/zig b/src/deps/zig index 4011fe49a1..98814a6069 160000 --- a/src/deps/zig +++ b/src/deps/zig @@ -1 +1 @@ -Subproject commit 4011fe49a1dd9a707c8d17d7dfc15ed3ac862542 +Subproject commit 98814a60691499f1f35b182921c3af484002aba2 diff --git a/src/dns.zig b/src/dns.zig index 8153be6e4e..e3ed3e16f5 100644 --- a/src/dns.zig +++ b/src/dns.zig @@ -142,9 +142,9 @@ pub const GetAddrInfo = struct { pub fn toLibC(this: Family) i32 { return switch (this) { .unspecified => 0, - .inet => std.os.AF.INET, - .inet6 => std.os.AF.INET6, - .unix => std.os.AF.UNIX, + .inet => std.posix.AF.INET, + .inet6 => std.posix.AF.INET6, + .unix => std.posix.AF.UNIX, }; } }; @@ -164,8 +164,8 @@ pub const GetAddrInfo = struct { pub fn toLibC(this: SocketType) i32 { switch (this) { .unspecified => return 0, - .stream => return std.os.SOCK.STREAM, - .dgram => return std.os.SOCK.DGRAM, + .stream => return std.posix.SOCK.STREAM, + .dgram => return std.posix.SOCK.DGRAM, } } @@ -234,8 +234,8 @@ pub const GetAddrInfo = struct { pub fn toLibC(this: Protocol) i32 { switch (this) { .unspecified => return 0, - .tcp => return std.os.IPPROTO.TCP, - .udp => return std.os.IPPROTO.UDP, + .tcp => return std.posix.IPPROTO.TCP, + .udp => return std.posix.IPPROTO.UDP, } } }; @@ -342,8 +342,8 @@ pub const GetAddrInfo = struct { const obj = JSC.JSValue.createEmptyObject(globalThis, 3); obj.put(globalThis, JSC.ZigString.static("address"), addressToJS(&this.address, globalThis)); obj.put(globalThis, JSC.ZigString.static("family"), switch (this.address.any.family) { - std.os.AF.INET => JSValue.jsNumber(4), - std.os.AF.INET6 => JSValue.jsNumber(6), + std.posix.AF.INET => JSValue.jsNumber(4), + std.posix.AF.INET6 => JSValue.jsNumber(6), else => JSValue.jsNumber(0), }); obj.put(globalThis, JSC.ZigString.static("ttl"), JSValue.jsNumber(this.ttl)); @@ -357,7 +357,7 @@ pub fn addressToString( address: *const std.net.Address, ) !bun.String { switch (address.any.family) { - std.os.AF.INET => { + std.posix.AF.INET => { var self = address.in; const bytes = @as(*const [4]u8, @ptrCast(&self.sa.addr)); return String.createFormat("{}.{}.{}.{}", .{ @@ -367,7 +367,7 @@ pub fn addressToString( bytes[3], }); }, - std.os.AF.INET6 => { + std.posix.AF.INET6 => { var stack = std.heap.stackFallback(512, default_allocator); const allocator = stack.get(); var out = try std.fmt.allocPrint(allocator, "{any}", .{address.*}); @@ -377,7 +377,7 @@ pub fn addressToString( // ^ ^^^^^^ return String.createLatin1(out[1 .. out.len - 1 - std.fmt.count("{d}", .{address.in6.getPort()}) - 1]); }, - std.os.AF.UNIX => { + std.posix.AF.UNIX => { if (comptime std.net.has_unix_sockets) { return String.createLatin1(&address.un.path); } diff --git a/src/enums.zig b/src/enums.zig deleted file mode 100644 index dd916f32a8..0000000000 --- a/src/enums.zig +++ /dev/null @@ -1,1002 +0,0 @@ -// This is a copy-paste of the same file from Zig's standard library. -// This exists mostly as a workaround for https://github.com/ziglang/zig/issues/16980 - -const std = @import("std"); -const assert = @import("root").bun.assert; -const testing = std.testing; -const EnumField = std.builtin.Type.EnumField; - -/// Returns a struct with a field matching each unique named enum element. -/// If the enum is extern and has multiple names for the same value, only -/// the first name is used. Each field is of type Data and has the provided -/// default, which may be undefined. -pub fn EnumFieldStruct(comptime E: type, comptime Data: type, comptime field_default: ?Data) type { - const StructField = std.builtin.Type.StructField; - var fields: []const StructField = &[_]StructField{}; - for (std.meta.fields(E)) |field| { - fields = fields ++ &[_]StructField{.{ - .name = field.name, - .type = Data, - .default_value = if (field_default) |d| @as(?*const anyopaque, @ptrCast(&d)) else null, - .is_comptime = false, - .alignment = if (@sizeOf(Data) > 0) @alignOf(Data) else 0, - }}; - } - return @Type(.{ .Struct = .{ - .layout = .Auto, - .fields = fields, - .decls = &.{}, - .is_tuple = false, - } }); -} - -/// Looks up the supplied fields in the given enum type. -/// Uses only the field names, field values are ignored. -/// The result array is in the same order as the input. -pub inline fn valuesFromFields(comptime E: type, comptime fields: []const EnumField) []const E { - comptime { - var result: [fields.len]E = undefined; - for (fields, 0..) |f, i| { - result[i] = @field(E, f.name); - } - return &result; - } -} - -/// Returns the set of all named values in the given enum, in -/// declaration order. -pub fn values(comptime E: type) []const E { - return comptime valuesFromFields(E, @typeInfo(E).Enum.fields); -} - -/// A safe alternative to @tagName() for non-exhaustive enums that doesn't -/// panic when `e` has no tagged value. -/// Returns the tag name for `e` or null if no tag exists. -pub fn tagName(comptime E: type, e: E) ?[]const u8 { - return inline for (@typeInfo(E).Enum.fields) |f| { - if (@intFromEnum(e) == f.value) break f.name; - } else null; -} - -/// Determines the length of a direct-mapped enum array, indexed by -/// @intCast(usize, @intFromEnum(enum_value)). -/// If the enum is non-exhaustive, the resulting length will only be enough -/// to hold all explicit fields. -/// If the enum contains any fields with values that cannot be represented -/// by usize, a compile error is issued. The max_unused_slots parameter limits -/// the total number of items which have no matching enum key (holes in the enum -/// numbering). So for example, if an enum has values 1, 2, 5, and 6, max_unused_slots -/// must be at least 3, to allow unused slots 0, 3, and 4. -pub fn directEnumArrayLen(comptime E: type, comptime max_unused_slots: comptime_int) comptime_int { - var max_value: comptime_int = -1; - const max_usize: comptime_int = ~@as(usize, 0); - const fields = std.meta.fields(E); - for (fields) |f| { - if (f.value < 0) { - @compileError("Cannot create a direct enum array for " ++ @typeName(E) ++ ", field ." ++ f.name ++ " has a negative value."); - } - if (f.value > max_value) { - if (f.value > max_usize) { - @compileError("Cannot create a direct enum array for " ++ @typeName(E) ++ ", field ." ++ f.name ++ " is larger than the max value of usize."); - } - max_value = f.value; - } - } - - const unused_slots = max_value + 1 - fields.len; - if (unused_slots > max_unused_slots) { - const unused_str = std.fmt.comptimePrint("{d}", .{unused_slots}); - const allowed_str = std.fmt.comptimePrint("{d}", .{max_unused_slots}); - @compileError("Cannot create a direct enum array for " ++ @typeName(E) ++ ". It would have " ++ unused_str ++ " unused slots, but only " ++ allowed_str ++ " are allowed."); - } - - return max_value + 1; -} - -/// Initializes an array of Data which can be indexed by -/// @intCast(usize, @intFromEnum(enum_value)). -/// If the enum is non-exhaustive, the resulting array will only be large enough -/// to hold all explicit fields. -/// If the enum contains any fields with values that cannot be represented -/// by usize, a compile error is issued. The max_unused_slots parameter limits -/// the total number of items which have no matching enum key (holes in the enum -/// numbering). So for example, if an enum has values 1, 2, 5, and 6, max_unused_slots -/// must be at least 3, to allow unused slots 0, 3, and 4. -/// The init_values parameter must be a struct with field names that match the enum values. -/// If the enum has multiple fields with the same value, the name of the first one must -/// be used. -pub fn directEnumArray( - comptime E: type, - comptime Data: type, - comptime max_unused_slots: comptime_int, - init_values: EnumFieldStruct(E, Data, null), -) [directEnumArrayLen(E, max_unused_slots)]Data { - return directEnumArrayDefault(E, Data, null, max_unused_slots, init_values); -} - -/// Initializes an array of Data which can be indexed by -/// @intCast(usize, @intFromEnum(enum_value)). The enum must be exhaustive. -/// If the enum contains any fields with values that cannot be represented -/// by usize, a compile error is issued. The max_unused_slots parameter limits -/// the total number of items which have no matching enum key (holes in the enum -/// numbering). So for example, if an enum has values 1, 2, 5, and 6, max_unused_slots -/// must be at least 3, to allow unused slots 0, 3, and 4. -/// The init_values parameter must be a struct with field names that match the enum values. -/// If the enum has multiple fields with the same value, the name of the first one must -/// be used. -pub fn directEnumArrayDefault( - comptime E: type, - comptime Data: type, - comptime default: ?Data, - comptime max_unused_slots: comptime_int, - init_values: EnumFieldStruct(E, Data, default), -) [directEnumArrayLen(E, max_unused_slots)]Data { - const len = comptime directEnumArrayLen(E, max_unused_slots); - var result: [len]Data = if (default) |d| [_]Data{d} ** len else undefined; - inline for (@typeInfo(@TypeOf(init_values)).Struct.fields) |f| { - const enum_value = @field(E, f.name); - const index = @as(usize, @intCast(@intFromEnum(enum_value))); - result[index] = @field(init_values, f.name); - } - return result; -} - -/// Cast an enum literal, value, or string to the enum value of type E -/// with the same name. -pub fn nameCast(comptime E: type, comptime value: anytype) E { - return comptime blk: { - const V = @TypeOf(value); - if (V == E) break :blk value; - const name: ?[]const u8 = switch (@typeInfo(V)) { - .EnumLiteral, .Enum => @tagName(value), - .Pointer => value, - else => null, - }; - if (name) |n| { - if (@hasField(E, n)) { - break :blk @field(E, n); - } - @compileError("Enum " ++ @typeName(E) ++ " has no field named " ++ n); - } - @compileError("Cannot cast from " ++ @typeName(@TypeOf(value)) ++ " to " ++ @typeName(E)); - }; -} - -/// A set of enum elements, backed by a bitfield. If the enum -/// is not dense, a mapping will be constructed from enum values -/// to dense indices. This type does no dynamic allocation and -/// can be copied by value. -pub fn EnumSet(comptime E: type) type { - const mixin = struct { - fn EnumSetExt(comptime Self: type) type { - const Indexer = Self.Indexer; - return struct { - /// Initializes the set using a struct of bools - pub fn init(init_values: EnumFieldStruct(E, bool, false)) Self { - var result = Self{}; - inline for (0..Self.len) |i| { - const key = comptime Indexer.keyForIndex(i); - const tag = comptime @tagName(key); - if (@field(init_values, tag)) { - result.bits.set(i); - } - } - return result; - } - }; - } - }; - return IndexedSet(EnumIndexer(E), mixin.EnumSetExt); -} - -/// A map keyed by an enum, backed by a bitfield and a dense array. -/// If the enum is not dense, a mapping will be constructed from -/// enum values to dense indices. This type does no dynamic -/// allocation and can be copied by value. -pub fn EnumMap(comptime E: type, comptime V: type) type { - const mixin = struct { - fn EnumMapExt(comptime Self: type) type { - const Indexer = Self.Indexer; - return struct { - /// Initializes the map using a sparse struct of optionals - pub fn init(init_values: EnumFieldStruct(E, ?V, @as(?V, null))) Self { - var result = Self{}; - inline for (0..Self.len) |i| { - const key = comptime Indexer.keyForIndex(i); - const tag = comptime @tagName(key); - if (@field(init_values, tag)) |*v| { - result.bits.set(i); - result.values[i] = v.*; - } - } - return result; - } - /// Initializes a full mapping with all keys set to value. - /// Consider using EnumArray instead if the map will remain full. - pub fn initFull(value: V) Self { - var result = Self{ - .bits = Self.BitSet.initFull(), - .values = undefined, - }; - @memset(&result.values, value); - return result; - } - /// Initializes a full mapping with supplied values. - /// Consider using EnumArray instead if the map will remain full. - pub fn initFullWith(init_values: EnumFieldStruct(E, V, @as(?V, null))) Self { - return initFullWithDefault(@as(?V, null), init_values); - } - /// Initializes a full mapping with a provided default. - /// Consider using EnumArray instead if the map will remain full. - pub fn initFullWithDefault(comptime default: ?V, init_values: EnumFieldStruct(E, V, default)) Self { - var result = Self{ - .bits = Self.BitSet.initFull(), - .values = undefined, - }; - inline for (0..Self.len) |i| { - const key = comptime Indexer.keyForIndex(i); - const tag = comptime @tagName(key); - result.values[i] = @field(init_values, tag); - } - return result; - } - }; - } - }; - return IndexedMap(EnumIndexer(E), V, mixin.EnumMapExt); -} - -/// A multiset of enum elements up to a count of usize. Backed -/// by an EnumArray. This type does no dynamic allocation and can -/// be copied by value. -pub fn EnumMultiset(comptime E: type) type { - return BoundedEnumMultiset(E, usize); -} - -/// A multiset of enum elements up to CountSize. Backed by an -/// EnumArray. This type does no dynamic allocation and can be -/// copied by value. -pub fn BoundedEnumMultiset(comptime E: type, comptime CountSize: type) type { - return struct { - const Self = @This(); - - counts: EnumArray(E, CountSize), - - /// Initializes the multiset using a struct of counts. - pub fn init(init_counts: EnumFieldStruct(E, CountSize, 0)) Self { - var self = initWithCount(0); - inline for (@typeInfo(E).Enum.fields) |field| { - const c = @field(init_counts, field.name); - const key = @as(E, @enumFromInt(field.value)); - self.counts.set(key, c); - } - return self; - } - - /// Initializes the multiset with a count of zero. - pub fn initEmpty() Self { - return initWithCount(0); - } - - /// Initializes the multiset with all keys at the - /// same count. - pub fn initWithCount(comptime c: CountSize) Self { - return .{ - .counts = EnumArray(E, CountSize).initDefault(c, .{}), - }; - } - - /// Returns the total number of key counts in the multiset. - pub fn count(self: Self) usize { - var sum: usize = 0; - for (self.counts.values) |c| { - sum += c; - } - return sum; - } - - /// Checks if at least one key in multiset. - pub fn contains(self: Self, key: E) bool { - return self.counts.get(key) > 0; - } - - /// Removes all instance of a key from multiset. Same as - /// setCount(key, 0). - pub fn removeAll(self: *Self, key: E) void { - return self.counts.set(key, 0); - } - - /// Increases the key count by given amount. Caller asserts - /// operation will not overflow. - pub fn addAssertSafe(self: *Self, key: E, c: CountSize) void { - self.counts.getPtr(key).* += c; - } - - /// Increases the key count by given amount. - pub fn add(self: *Self, key: E, c: CountSize) error{Overflow}!void { - self.counts.set(key, try std.math.add(CountSize, self.counts.get(key), c)); - } - - /// Decreases the key count by given amount. If amount is - /// greater than the number of keys in multset, then key count - /// will be set to zero. - pub fn remove(self: *Self, key: E, c: CountSize) void { - self.counts.getPtr(key).* -= @min(self.getCount(key), c); - } - - /// Returns the count for a key. - pub fn getCount(self: Self, key: E) CountSize { - return self.counts.get(key); - } - - /// Set the count for a key. - pub fn setCount(self: *Self, key: E, c: CountSize) void { - self.counts.set(key, c); - } - - /// Increases the all key counts by given multiset. Caller - /// asserts operation will not overflow any key. - pub fn addSetAssertSafe(self: *Self, other: Self) void { - inline for (@typeInfo(E).Enum.fields) |field| { - const key = @as(E, @enumFromInt(field.value)); - self.addAssertSafe(key, other.getCount(key)); - } - } - - /// Increases the all key counts by given multiset. - pub fn addSet(self: *Self, other: Self) error{Overflow}!void { - inline for (@typeInfo(E).Enum.fields) |field| { - const key = @as(E, @enumFromInt(field.value)); - try self.add(key, other.getCount(key)); - } - } - - /// Deccreases the all key counts by given multiset. If - /// the given multiset has more key counts than this, - /// then that key will have a key count of zero. - pub fn removeSet(self: *Self, other: Self) void { - inline for (@typeInfo(E).Enum.fields) |field| { - const key = @as(E, @enumFromInt(field.value)); - self.remove(key, other.getCount(key)); - } - } - - /// Returns true iff all key counts are the same as - /// given multiset. - pub fn eql(self: Self, other: Self) bool { - inline for (@typeInfo(E).Enum.fields) |field| { - const key = @as(E, @enumFromInt(field.value)); - if (self.getCount(key) != other.getCount(key)) { - return false; - } - } - return true; - } - - /// Returns true iff all key counts less than or - /// equal to the given multiset. - pub fn subsetOf(self: Self, other: Self) bool { - inline for (@typeInfo(E).Enum.fields) |field| { - const key = @as(E, @enumFromInt(field.value)); - if (self.getCount(key) > other.getCount(key)) { - return false; - } - } - return true; - } - - /// Returns true iff all key counts greater than or - /// equal to the given multiset. - pub fn supersetOf(self: Self, other: Self) bool { - inline for (@typeInfo(E).Enum.fields) |field| { - const key = @as(E, @enumFromInt(field.value)); - if (self.getCount(key) < other.getCount(key)) { - return false; - } - } - return true; - } - - /// Returns a multiset with the total key count of this - /// multiset and the other multiset. Caller asserts - /// operation will not overflow any key. - pub fn plusAssertSafe(self: Self, other: Self) Self { - var result = self; - result.addSetAssertSafe(other); - return result; - } - - /// Returns a multiset with the total key count of this - /// multiset and the other multiset. - pub fn plus(self: Self, other: Self) error{Overflow}!Self { - var result = self; - try result.addSet(other); - return result; - } - - /// Returns a multiset with the key count of this - /// multiset minus the corresponding key count in the - /// other multiset. If the other multiset contains - /// more key count than this set, that key will have - /// a count of zero. - pub fn minus(self: Self, other: Self) Self { - var result = self; - result.removeSet(other); - return result; - } - - pub const Entry = EnumArray(E, CountSize).Entry; - pub const Iterator = EnumArray(E, CountSize).Iterator; - - /// Returns an iterator over this multiset. Keys with zero - /// counts are included. Modifications to the set during - /// iteration may or may not be observed by the iterator, - /// but will not invalidate it. - pub fn iterator(self: *Self) Iterator { - return self.counts.iterator(); - } - }; -} - -/// An array keyed by an enum, backed by a dense array. -/// If the enum is not dense, a mapping will be constructed from -/// enum values to dense indices. This type does no dynamic -/// allocation and can be copied by value. -pub fn EnumArray(comptime E: type, comptime V: type) type { - const mixin = struct { - fn EnumArrayExt(comptime Self: type) type { - const Indexer = Self.Indexer; - return struct { - /// Initializes all values in the enum array - pub fn init(init_values: EnumFieldStruct(E, V, @as(?V, null))) Self { - return initDefault(@as(?V, null), init_values); - } - - /// Initializes values in the enum array, with the specified default. - pub fn initDefault(comptime default: ?V, init_values: EnumFieldStruct(E, V, default)) Self { - var result = Self{ .values = undefined }; - inline for (0..Self.len) |i| { - const key = comptime Indexer.keyForIndex(i); - const tag = @tagName(key); - result.values[i] = @field(init_values, tag); - } - return result; - } - }; - } - }; - return IndexedArray(EnumIndexer(E), V, mixin.EnumArrayExt); -} - -fn NoExtension(comptime Self: type) type { - _ = Self; - return NoExt; -} -const NoExt = struct {}; - -/// A set type with an Indexer mapping from keys to indices. -/// Presence or absence is stored as a dense bitfield. This -/// type does no allocation and can be copied by value. -pub fn IndexedSet(comptime I: type, comptime Ext: ?fn (type) type) type { - comptime ensureIndexer(I); - return struct { - const Self = @This(); - - pub usingnamespace (Ext orelse NoExtension)(Self); - - /// The indexing rules for converting between keys and indices. - pub const Indexer = I; - /// The element type for this set. - pub const Key = Indexer.Key; - - const BitSet = std.StaticBitSet(Indexer.count); - - /// The maximum number of items in this set. - pub const len = Indexer.count; - - bits: BitSet = BitSet.initEmpty(), - - /// Returns a set containing no keys. - pub fn initEmpty() Self { - return .{ .bits = BitSet.initEmpty() }; - } - - /// Returns a set containing all possible keys. - pub fn initFull() Self { - return .{ .bits = BitSet.initFull() }; - } - - /// Returns a set containing multiple keys. - pub fn initMany(keys: []const Key) Self { - var set = initEmpty(); - for (keys) |key| set.insert(key); - return set; - } - - /// Returns a set containing a single key. - pub fn initOne(key: Key) Self { - return initMany(&[_]Key{key}); - } - - /// Returns the number of keys in the set. - pub fn count(self: Self) usize { - return self.bits.count(); - } - - /// Checks if a key is in the set. - pub fn contains(self: Self, key: Key) bool { - return self.bits.isSet(Indexer.indexOf(key)); - } - - /// Puts a key in the set. - pub fn insert(self: *Self, key: Key) void { - self.bits.set(Indexer.indexOf(key)); - } - - /// Removes a key from the set. - pub fn remove(self: *Self, key: Key) void { - self.bits.unset(Indexer.indexOf(key)); - } - - /// Changes the presence of a key in the set to match the passed bool. - pub fn setPresent(self: *Self, key: Key, present: bool) void { - self.bits.setValue(Indexer.indexOf(key), present); - } - - /// Toggles the presence of a key in the set. If the key is in - /// the set, removes it. Otherwise adds it. - pub fn toggle(self: *Self, key: Key) void { - self.bits.toggle(Indexer.indexOf(key)); - } - - /// Toggles the presence of all keys in the passed set. - pub fn toggleSet(self: *Self, other: Self) void { - self.bits.toggleSet(other.bits); - } - - /// Toggles all possible keys in the set. - pub fn toggleAll(self: *Self) void { - self.bits.toggleAll(); - } - - /// Adds all keys in the passed set to this set. - pub fn setUnion(self: *Self, other: Self) void { - self.bits.setUnion(other.bits); - } - - /// Removes all keys which are not in the passed set. - pub fn setIntersection(self: *Self, other: Self) void { - self.bits.setIntersection(other.bits); - } - - /// Returns true iff both sets have the same keys. - pub fn eql(self: Self, other: Self) bool { - return self.bits.eql(other.bits); - } - - /// Returns true iff all the keys in this set are - /// in the other set. The other set may have keys - /// not found in this set. - pub fn subsetOf(self: Self, other: Self) bool { - return self.bits.subsetOf(other.bits); - } - - /// Returns true iff this set contains all the keys - /// in the other set. This set may have keys not - /// found in the other set. - pub fn supersetOf(self: Self, other: Self) bool { - return self.bits.supersetOf(other.bits); - } - - /// Returns a set with all the keys not in this set. - pub fn complement(self: Self) Self { - return .{ .bits = self.bits.complement() }; - } - - /// Returns a set with keys that are in either this - /// set or the other set. - pub fn unionWith(self: Self, other: Self) Self { - return .{ .bits = self.bits.unionWith(other.bits) }; - } - - /// Returns a set with keys that are in both this - /// set and the other set. - pub fn intersectWith(self: Self, other: Self) Self { - return .{ .bits = self.bits.intersectWith(other.bits) }; - } - - /// Returns a set with keys that are in either this - /// set or the other set, but not both. - pub fn xorWith(self: Self, other: Self) Self { - return .{ .bits = self.bits.xorWith(other.bits) }; - } - - /// Returns a set with keys that are in this set - /// except for keys in the other set. - pub fn differenceWith(self: Self, other: Self) Self { - return .{ .bits = self.bits.differenceWith(other.bits) }; - } - - /// Returns an iterator over this set, which iterates in - /// index order. Modifications to the set during iteration - /// may or may not be observed by the iterator, but will - /// not invalidate it. - pub fn iterator(self: *const Self) Iterator { - return .{ .inner = self.bits.iterator(.{}) }; - } - - pub const Iterator = struct { - inner: BitSet.Iterator(.{}), - - pub fn next(self: *Iterator) ?Key { - return if (self.inner.next()) |index| - Indexer.keyForIndex(index) - else - null; - } - }; - }; -} - -/// A map from keys to values, using an index lookup. Uses a -/// bitfield to track presence and a dense array of values. -/// This type does no allocation and can be copied by value. -pub fn IndexedMap(comptime I: type, comptime V: type, comptime Ext: ?fn (type) type) type { - comptime ensureIndexer(I); - return struct { - const Self = @This(); - - pub usingnamespace (Ext orelse NoExtension)(Self); - - /// The index mapping for this map - pub const Indexer = I; - /// The key type used to index this map - pub const Key = Indexer.Key; - /// The value type stored in this map - pub const Value = V; - /// The number of possible keys in the map - pub const len = Indexer.count; - - const BitSet = std.StaticBitSet(Indexer.count); - - /// Bits determining whether items are in the map - bits: BitSet = BitSet.initEmpty(), - /// Values of items in the map. If the associated - /// bit is zero, the value is undefined. - values: [Indexer.count]Value = undefined, - - /// The number of items in the map. - pub fn count(self: Self) usize { - return self.bits.count(); - } - - /// Checks if the map contains an item. - pub fn contains(self: Self, key: Key) bool { - return self.bits.isSet(Indexer.indexOf(key)); - } - - /// Gets the value associated with a key. - /// If the key is not in the map, returns null. - pub fn get(self: Self, key: Key) ?Value { - const index = Indexer.indexOf(key); - return if (self.bits.isSet(index)) self.values[index] else null; - } - - /// Gets the value associated with a key, which must - /// exist in the map. - pub fn getAssertContains(self: Self, key: Key) Value { - const index = Indexer.indexOf(key); - assert(self.bits.isSet(index)); - return self.values[index]; - } - - /// Gets the address of the value associated with a key. - /// If the key is not in the map, returns null. - pub fn getPtr(self: *Self, key: Key) ?*Value { - const index = Indexer.indexOf(key); - return if (self.bits.isSet(index)) &self.values[index] else null; - } - - /// Gets the address of the const value associated with a key. - /// If the key is not in the map, returns null. - pub fn getPtrConst(self: *const Self, key: Key) ?*const Value { - const index = Indexer.indexOf(key); - return if (self.bits.isSet(index)) &self.values[index] else null; - } - - /// Gets the address of the value associated with a key. - /// The key must be present in the map. - pub fn getPtrAssertContains(self: *Self, key: Key) *Value { - const index = Indexer.indexOf(key); - assert(self.bits.isSet(index)); - return &self.values[index]; - } - - /// Adds the key to the map with the supplied value. - /// If the key is already in the map, overwrites the value. - pub fn put(self: *Self, key: Key, value: Value) void { - const index = Indexer.indexOf(key); - self.bits.set(index); - self.values[index] = value; - } - - /// Adds the key to the map with an undefined value. - /// If the key is already in the map, the value becomes undefined. - /// A pointer to the value is returned, which should be - /// used to initialize the value. - pub fn putUninitialized(self: *Self, key: Key) *Value { - const index = Indexer.indexOf(key); - self.bits.set(index); - self.values[index] = undefined; - return &self.values[index]; - } - - /// Sets the value associated with the key in the map, - /// and returns the old value. If the key was not in - /// the map, returns null. - pub fn fetchPut(self: *Self, key: Key, value: Value) ?Value { - const index = Indexer.indexOf(key); - const result: ?Value = if (self.bits.isSet(index)) self.values[index] else null; - self.bits.set(index); - self.values[index] = value; - return result; - } - - /// Removes a key from the map. If the key was not in the map, - /// does nothing. - pub fn remove(self: *Self, key: Key) void { - const index = Indexer.indexOf(key); - self.bits.unset(index); - self.values[index] = undefined; - } - - /// Removes a key from the map, and returns the old value. - /// If the key was not in the map, returns null. - pub fn fetchRemove(self: *Self, key: Key) ?Value { - const index = Indexer.indexOf(key); - const result: ?Value = if (self.bits.isSet(index)) self.values[index] else null; - self.bits.unset(index); - self.values[index] = undefined; - return result; - } - - /// Returns an iterator over the map, which visits items in index order. - /// Modifications to the underlying map may or may not be observed by - /// the iterator, but will not invalidate it. - pub fn iterator(self: *Self) Iterator { - return .{ - .inner = self.bits.iterator(.{}), - .values = &self.values, - }; - } - - /// An entry in the map. - pub const Entry = struct { - /// The key associated with this entry. - /// Modifying this key will not change the map. - key: Key, - - /// A pointer to the value in the map associated - /// with this key. Modifications through this - /// pointer will modify the underlying data. - value: *Value, - }; - - pub const Iterator = struct { - inner: BitSet.Iterator(.{}), - values: *[Indexer.count]Value, - - pub fn next(self: *Iterator) ?Entry { - return if (self.inner.next()) |index| - Entry{ - .key = Indexer.keyForIndex(index), - .value = &self.values[index], - } - else - null; - } - }; - }; -} - -/// A dense array of values, using an indexed lookup. -/// This type does no allocation and can be copied by value. -pub fn IndexedArray(comptime I: type, comptime V: type, comptime Ext: ?fn (type) type) type { - comptime ensureIndexer(I); - return struct { - const Self = @This(); - - pub usingnamespace (Ext orelse NoExtension)(Self); - - /// The index mapping for this map - pub const Indexer = I; - /// The key type used to index this map - pub const Key = Indexer.Key; - /// The value type stored in this map - pub const Value = V; - /// The number of possible keys in the map - pub const len = Indexer.count; - - values: [Indexer.count]Value, - - pub fn initUndefined() Self { - return Self{ .values = undefined }; - } - - pub fn initFill(v: Value) Self { - var self: Self = undefined; - @memset(&self.values, v); - return self; - } - - /// Returns the value in the array associated with a key. - pub fn get(self: Self, key: Key) Value { - return self.values[Indexer.indexOf(key)]; - } - - /// Returns a pointer to the slot in the array associated with a key. - pub fn getPtr(self: *Self, key: Key) *Value { - return &self.values[Indexer.indexOf(key)]; - } - - /// Returns a const pointer to the slot in the array associated with a key. - pub fn getPtrConst(self: *const Self, key: Key) *const Value { - return &self.values[Indexer.indexOf(key)]; - } - - /// Sets the value in the slot associated with a key. - pub fn set(self: *Self, key: Key, value: Value) void { - self.values[Indexer.indexOf(key)] = value; - } - - /// Iterates over the items in the array, in index order. - pub fn iterator(self: *Self) Iterator { - return .{ - .values = &self.values, - }; - } - - /// An entry in the array. - pub const Entry = struct { - /// The key associated with this entry. - /// Modifying this key will not change the array. - key: Key, - - /// A pointer to the value in the array associated - /// with this key. Modifications through this - /// pointer will modify the underlying data. - value: *Value, - }; - - pub const Iterator = struct { - index: usize = 0, - values: *[Indexer.count]Value, - - pub fn next(self: *Iterator) ?Entry { - const index = self.index; - if (index < Indexer.count) { - self.index += 1; - return Entry{ - .key = Indexer.keyForIndex(index), - .value = &self.values[index], - }; - } - return null; - } - }; - }; -} - -/// Verifies that a type is a valid Indexer, providing a helpful -/// compile error if not. An Indexer maps a comptime-known set -/// of keys to a dense set of zero-based indices. -/// The indexer interface must look like this: -/// ``` -/// struct { -/// /// The key type which this indexer converts to indices -/// pub const Key: type, -/// /// The number of indexes in the dense mapping -/// pub const count: usize, -/// /// Converts from a key to an index -/// pub fn indexOf(Key) usize; -/// /// Converts from an index to a key -/// pub fn keyForIndex(usize) Key; -/// } -/// ``` -pub fn ensureIndexer(comptime T: type) void { - comptime { - if (!@hasDecl(T, "Key")) @compileError("Indexer must have decl Key: type."); - if (@TypeOf(T.Key) != type) @compileError("Indexer.Key must be a type."); - if (!@hasDecl(T, "count")) @compileError("Indexer must have decl count: usize."); - if (@TypeOf(T.count) != usize) @compileError("Indexer.count must be a usize."); - if (!@hasDecl(T, "indexOf")) @compileError("Indexer.indexOf must be a fn(Key)usize."); - if (@TypeOf(T.indexOf) != fn (T.Key) usize) @compileError("Indexer must have decl indexOf: fn(Key)usize."); - if (!@hasDecl(T, "keyForIndex")) @compileError("Indexer must have decl keyForIndex: fn(usize)Key."); - if (@TypeOf(T.keyForIndex) != fn (usize) T.Key) @compileError("Indexer.keyForIndex must be a fn(usize)Key."); - } -} - -pub fn EnumIndexer(comptime E: type) type { - if (!@typeInfo(E).Enum.is_exhaustive) { - @compileError("Cannot create an enum indexer for a non-exhaustive enum."); - } - - const const_fields = std.meta.fields(E); - var fields = const_fields[0..const_fields.len].*; - const min = fields[0].value; - const max = fields[fields.len - 1].value; - const fields_len = fields.len; - if (fields_len == 0) { - return struct { - pub const Key = E; - pub const count: usize = 0; - pub fn indexOf(e: E) usize { - _ = e; - unreachable; - } - pub fn keyForIndex(i: usize) E { - _ = i; - unreachable; - } - }; - } - - const SortContext = struct { - fields: []EnumField, - - pub fn lessThan(comptime ctx: @This(), comptime a: usize, comptime b: usize) bool { - return ctx.fields[a].value < ctx.fields[b].value; - } - - pub fn swap(comptime ctx: @This(), comptime a: usize, comptime b: usize) void { - return std.mem.swap(EnumField, &ctx.fields[a], &ctx.fields[b]); - } - }; - { - const a: usize = 0; - const b = fields_len; - var i = a + 1; - const context = SortContext{ .fields = &fields }; - @setEvalBranchQuota(999999999); - while (i < b) : (i += 1) { - var j = i; - while (j > a and context.lessThan(j, j - 1)) : (j -= 1) { - context.swap(j, j - 1); - } - } - } - - if (max - min == fields.len - 1) { - return struct { - pub const Key = E; - pub const count = fields_len; - pub fn indexOf(e: E) usize { - return @as(usize, @intCast(@intFromEnum(e) - min)); - } - pub fn keyForIndex(i: usize) E { - // TODO fix addition semantics. This calculation - // gives up some safety to avoid artificially limiting - // the range of signed enum values to max_isize. - const enum_value = if (min < 0) @as(isize, @bitCast(i)) +% min else i + min; - return @as(E, @enumFromInt(@as(std.meta.Tag(E), @intCast(enum_value)))); - } - }; - } - - const keys = valuesFromFields(E, &fields); - - return struct { - pub const Key = E; - pub const count = fields_len; - pub fn indexOf(e: E) usize { - @setEvalBranchQuota(123456); - for (keys, 0..) |k, i| { - if (k == e) return i; - } - unreachable; - } - pub fn keyForIndex(i: usize) E { - return keys[i]; - } - }; -} diff --git a/src/env.zig b/src/env.zig index 1a9ed55e88..583afa64e0 100644 --- a/src/env.zig +++ b/src/env.zig @@ -25,7 +25,6 @@ pub const isAarch64 = @import("builtin").target.cpu.arch.isAARCH64(); pub const isX86 = @import("builtin").target.cpu.arch.isX86(); pub const isX64 = @import("builtin").target.cpu.arch == .x86_64; pub const allow_assert = isDebug or isTest or std.builtin.Mode.ReleaseSafe == @import("builtin").mode; -pub const analytics_url = if (isDebug) "http://localhost:4000/events" else "http://i.bun.sh/events"; const BuildOptions = if (isTest) struct { pub const baseline = false; @@ -44,7 +43,7 @@ pub const is_canary = BuildOptions.is_canary; pub const canary_revision = if (is_canary) BuildOptions.canary_revision else ""; pub const dump_source = isDebug and !isTest; pub const base_path = BuildOptions.base_path ++ "/"; -pub const allow_logs = BuildOptions.enable_logs; +pub const enable_logs = BuildOptions.enable_logs; pub const version: std.SemanticVersion = BuildOptions.version; pub const version_string = std.fmt.comptimePrint("{d}.{d}.{d}", .{ version.major, version.minor, version.patch }); @@ -103,7 +102,16 @@ pub const OperatingSystem = enum { }; } - /// npm package name + pub fn stdOSTag(self: OperatingSystem) std.Target.Os.Tag { + return switch (self) { + .mac => .macos, + .linux => .linux, + .windows => .windows, + .wasm => unreachable, + }; + } + + /// npm package name, `@oven-sh/bun-{os}-{arch}` pub fn npmName(self: OperatingSystem) []const u8 { return switch (self) { .mac => "darwin", @@ -130,6 +138,7 @@ pub const Archictecture = enum { arm64, wasm, + /// npm package name, `@oven-sh/bun-{os}-{arch}` pub fn npmName(this: Archictecture) []const u8 { return switch (this) { .x64 => "x64", diff --git a/src/env_loader.zig b/src/env_loader.zig index 8176439b26..b2375a36b8 100644 --- a/src/env_loader.zig +++ b/src/env_loader.zig @@ -1272,6 +1272,10 @@ pub const Map = struct { pub fn remove(this: *Map, key: string) void { this.map.remove(key); } + + pub fn cloneWithAllocator(this: *const Map, new_allocator: std.mem.Allocator) !Map { + return .{ .map = try this.map.cloneWithAllocator(new_allocator) }; + } }; pub var instance: ?*Loader = null; diff --git a/src/fd.zig b/src/fd.zig index b126c1b7e8..27e9745e2e 100644 --- a/src/fd.zig +++ b/src/fd.zig @@ -1,6 +1,5 @@ const std = @import("std"); -const os = std.os; -const linux = os.linux; +const posix = std.posix; const bun = @import("root").bun; const env = bun.Environment; @@ -68,7 +67,7 @@ pub const FDImpl = packed struct { .value = .{ .as_system = invalid_value }, }; - pub const System = std.os.fd_t; + pub const System = posix.fd_t; pub const SystemAsInt = switch (env.os) { .windows => u63, @@ -227,18 +226,18 @@ pub const FDImpl = packed struct { .linux => result: { const fd = this.encode(); bun.assert(fd != bun.invalid_fd); - bun.assert(fd.cast() > -1); - break :result switch (linux.getErrno(linux.close(fd.cast()))) { - .BADF => bun.sys.Error{ .errno = @intFromEnum(os.E.BADF), .syscall = .close, .fd = fd }, + bun.assert(fd.cast() >= 0); + break :result switch (bun.C.getErrno(bun.sys.system.close(fd.cast()))) { + .BADF => bun.sys.Error{ .errno = @intFromEnum(posix.E.BADF), .syscall = .close, .fd = fd }, else => null, }; }, .mac => result: { const fd = this.encode(); bun.assert(fd != bun.invalid_fd); - bun.assert(fd.cast() > -1); - break :result switch (bun.sys.system.getErrno(bun.sys.system.@"close$NOCANCEL"(fd.cast()))) { - .BADF => bun.sys.Error{ .errno = @intFromEnum(os.E.BADF), .syscall = .close, .fd = fd }, + bun.assert(fd.cast() >= 0); + break :result switch (bun.C.getErrno(bun.sys.system.@"close$NOCANCEL"(fd.cast()))) { + .BADF => bun.sys.Error{ .errno = @intFromEnum(posix.E.BADF), .syscall = .close, .fd = fd }, else => null, }; }, @@ -272,7 +271,7 @@ pub const FDImpl = packed struct { if (env.isDebug) { if (result) |err| { - if (err.errno == @intFromEnum(os.E.BADF)) { + if (err.errno == @intFromEnum(posix.E.BADF)) { // TODO(@paperdave): Zig Compiler Bug, if you remove `this` from the log. An error is correctly printed, but with the wrong reference trace bun.Output.debugWarn("close({s}) = EBADF. This is an indication of a file descriptor UAF", .{this_fmt}); } else { diff --git a/src/fmt.zig b/src/fmt.zig index 54482fb913..f4d9ccde8a 100644 --- a/src/fmt.zig +++ b/src/fmt.zig @@ -1078,6 +1078,7 @@ pub fn fastDigitCount(x: u64) u64 { pub const SizeFormatter = struct { value: usize = 0, + pub fn format(self: SizeFormatter, comptime _: []const u8, opts: fmt.FormatOptions, writer: anytype) !void { const math = std.math; const value = self.value; @@ -1097,12 +1098,12 @@ pub const SizeFormatter = struct { const suffix = mags_si[magnitude]; if (suffix == ' ') { - try fmt.formatFloatDecimal(new_value / 1000.0, .{ .precision = 2 }, writer); - return writer.writeAll(" KB"); - } else { - try fmt.formatFloatDecimal(new_value, .{ .precision = if (std.math.approxEqAbs(f64, new_value, @trunc(new_value), 0.100)) @as(usize, 1) else @as(usize, 2) }, writer); + try writer.print("{d:.2} KB", .{new_value / 1000.0}); + return; } - return writer.writeAll(&[_]u8{ ' ', suffix, 'B' }); + const precision: usize = if (std.math.approxEqAbs(f64, new_value, @trunc(new_value), 0.100)) 1 else 2; + try fmt.formatType(new_value, "d", .{ .precision = precision }, writer, 0); + try writer.writeAll(&.{ ' ', suffix, 'B' }); } }; @@ -1111,7 +1112,7 @@ pub fn size(value: anytype) SizeFormatter { f64, f32, f128 => SizeFormatter{ .value = @as(u64, @intFromFloat(value)), }, - else => SizeFormatter{ .value = @as(u64, @intCast(value)) }, + else => SizeFormatter{ .value = value }, }; } @@ -1281,7 +1282,7 @@ pub fn fmtDouble(number: f64) FormatDouble { pub const FormatDouble = struct { number: f64, - extern "C" fn WTF__dtoa(buf_124_bytes: *[124]u8, number: f64) void; + extern fn WTF__dtoa(buf_124_bytes: *[124]u8, number: f64) void; pub fn dtoa(buf: *[124]u8, number: f64) []const u8 { WTF__dtoa(buf, number); diff --git a/src/fs.zig b/src/fs.zig index ebfc2e817b..0649fb4c66 100644 --- a/src/fs.zig +++ b/src/fs.zig @@ -78,14 +78,14 @@ pub const FileSystem = struct { return try std.fmt.bufPrintZ(buf, ".{any}-{any}.{s}", .{ bun.fmt.hexIntLower(hex_value), - bun.fmt.hexIntUpper(tmpname_id_number.fetchAdd(1, .Monotonic)), + bun.fmt.hexIntUpper(tmpname_id_number.fetchAdd(1, .monotonic)), extname, }); } - pub var max_fd: std.os.fd_t = 0; + pub var max_fd: std.posix.fd_t = 0; - pub inline fn setMaxFd(fd: std.os.fd_t) void { + pub inline fn setMaxFd(fd: std.posix.fd_t) void { if (Environment.isWindows) { return; } @@ -274,12 +274,13 @@ pub const FileSystem = struct { } pub fn getComptimeQuery(entry: *const DirEntry, comptime query_str: anytype) ?Entry.Lookup { - comptime var query: [query_str.len]u8 = undefined; + comptime var query_var: [query_str.len]u8 = undefined; comptime for (query_str, 0..) |c, i| { - query[i] = std.ascii.toLower(c); + query_var[i] = std.ascii.toLower(c); }; - const query_hashed = comptime std.hash_map.hashString(&query); + const query_hashed = comptime std.hash_map.hashString(&query_var); + const query = query_var[0..query_str.len].*; const result = entry.data.getAdapted( @as([]const u8, &query), @@ -311,10 +312,11 @@ pub const FileSystem = struct { } pub fn hasComptimeQuery(entry: *const DirEntry, comptime query_str: anytype) bool { - comptime var query: [query_str.len]u8 = undefined; + comptime var query_var: [query_str.len]u8 = undefined; comptime for (query_str, 0..) |c, i| { - query[i] = std.ascii.toLower(c); + query_var[i] = std.ascii.toLower(c); }; + const query = query_var[0..query_str.len].*; const query_hashed = comptime std.hash_map.hashString(&query); @@ -326,7 +328,7 @@ pub const FileSystem = struct { } pub fn eql(_: @This(), _: []const u8, b: []const u8) bool { - return strings.eqlComptime(b, query); + return strings.eqlComptime(b, &query); } }{}, ); @@ -513,11 +515,11 @@ pub const FileSystem = struct { } pub fn printLimits() void { - const LIMITS = [_]std.os.rlimit_resource{ std.os.rlimit_resource.STACK, std.os.rlimit_resource.NOFILE }; + const LIMITS = [_]std.posix.rlimit_resource{ std.posix.rlimit_resource.STACK, std.posix.rlimit_resource.NOFILE }; Output.print("{{\n", .{}); inline for (LIMITS, 0..) |limit_type, i| { - const limit = std.os.getrlimit(limit_type) catch return; + const limit = std.posix.getrlimit(limit_type) catch return; if (i == 0) { Output.print(" \"stack\": [{d}, {d}],\n", .{ limit.cur, limit.max }); @@ -561,7 +563,7 @@ pub const FileSystem = struct { } var tmp_buf: bun.PathBuffer = undefined; - const cwd = std.os.getcwd(&tmp_buf) catch @panic("Failed to get cwd for platformTempDir"); + const cwd = std.posix.getcwd(&tmp_buf) catch @panic("Failed to get cwd for platformTempDir"); const root = bun.path.windowsFilesystemRoot(cwd); break :brk bun.fmt.allocPrint( bun.default_allocator, @@ -672,10 +674,10 @@ pub const FileSystem = struct { // We originally used a temporary directory, but it caused EXDEV. const dir_fd = std.fs.cwd().fd; - const flags = std.os.O.CREAT | std.os.O.RDWR | std.os.O.CLOEXEC; + const flags = bun.O.CREAT | bun.O.RDWR | bun.O.CLOEXEC; this.dir_fd = bun.toFD(dir_fd); - const result = try bun.sys.openat(bun.toFD(dir_fd), name, flags, std.os.S.IRWXU).unwrap(); + const result = try bun.sys.openat(bun.toFD(dir_fd), name, flags, std.posix.S.IRWXU).unwrap(); this.fd = bun.toFD(result); } @@ -717,7 +719,7 @@ pub const FileSystem = struct { pub fn create(this: *TmpfileWindows, rfs: *RealFS, name: [:0]const u8) !void { const tmpdir_ = try rfs.openTmpDir(); - const flags = std.os.O.CREAT | std.os.O.WRONLY | std.os.O.CLOEXEC; + const flags = bun.O.CREAT | bun.O.WRONLY | bun.O.CLOEXEC; this.fd = try bun.sys.openat(bun.toFD(tmpdir_.fd), name, flags, 0).unwrap(); var buf: bun.PathBuffer = undefined; @@ -798,16 +800,16 @@ pub const FileSystem = struct { return std.math.maxInt(usize); } - const LIMITS = [_]std.os.rlimit_resource{ std.os.rlimit_resource.STACK, std.os.rlimit_resource.NOFILE }; + const LIMITS = [_]std.posix.rlimit_resource{ std.posix.rlimit_resource.STACK, std.posix.rlimit_resource.NOFILE }; inline for (LIMITS, 0..) |limit_type, i| { - const limit = try std.os.getrlimit(limit_type); + const limit = try std.posix.getrlimit(limit_type); if (limit.cur < limit.max) { - var new_limit = std.mem.zeroes(std.os.rlimit); + var new_limit = std.mem.zeroes(std.posix.rlimit); new_limit.cur = limit.max; new_limit.max = limit.max; - if (std.os.setrlimit(limit_type, new_limit)) { + if (std.posix.setrlimit(limit_type, new_limit)) { if (i == 1) { Limit.handles = limit.max; } else { @@ -949,7 +951,7 @@ pub const FileSystem = struct { else bun.sys.openA( unsafe_dir_string, - std.os.O.DIRECTORY, + bun.O.DIRECTORY, 0, ); const fd = try dirfd.unwrap(); @@ -1876,7 +1878,7 @@ pub const Path = struct { }; // pub fn customRealpath(allocator: std.mem.Allocator, path: string) !string { -// var opened = try std.os.open(path, if (Environment.isLinux) std.os.O.PATH else std.os.O.RDONLY, 0); -// defer std.os.close(opened); +// var opened = try std.posix.open(path, if (Environment.isLinux) bun.O.PATH else bun.O.RDONLY, 0); +// defer std.posix.close(opened); // } diff --git a/src/futex.zig b/src/futex.zig index 734449f3c5..7b20ea9150 100644 --- a/src/futex.zig +++ b/src/futex.zig @@ -6,6 +6,7 @@ // This is copy-pasted from Zig's source code to fix an issue with linking on macOS Catalina and earlier. const std = @import("std"); +const bun = @import("root").bun; const builtin = @import("builtin"); const Futex = @This(); @@ -50,7 +51,7 @@ pub fn wait(ptr: *const Atomic(u32), expect: u32, timeout: ?u64) error{TimedOut} // Avoid calling into the OS for no-op waits() if (timeout) |timeout_ns| { if (timeout_ns == 0) { - if (ptr.load(.SeqCst) != expect) return; + if (ptr.load(.seq_cst) != expect) return; return error.TimedOut; } } @@ -132,8 +133,8 @@ const LinuxFutex = struct { const linux = std.os.linux; fn wait(ptr: *const Atomic(u32), expect: u32, timeout: ?u64) error{TimedOut}!void { - var ts: std.os.timespec = undefined; - var ts_ptr: ?*std.os.timespec = null; + var ts: std.posix.timespec = undefined; + var ts_ptr: ?*std.posix.timespec = null; // Futex timespec timeout is already in relative time. if (timeout) |timeout_ns| { @@ -142,7 +143,7 @@ const LinuxFutex = struct { ts.tv_nsec = @as(@TypeOf(ts.tv_nsec), @intCast(timeout_ns % std.time.ns_per_s)); } - switch (linux.getErrno(linux.futex_wait( + switch (bun.C.getErrno(linux.futex_wait( @as(*const i32, @ptrCast(ptr)), linux.FUTEX.PRIVATE_FLAG | linux.FUTEX.WAIT, @as(i32, @bitCast(expect)), @@ -159,7 +160,7 @@ const LinuxFutex = struct { } fn wake(ptr: *const Atomic(u32), num_waiters: u32) void { - switch (linux.getErrno(linux.futex_wake( + switch (bun.C.getErrno(linux.futex_wake( @as(*const i32, @ptrCast(ptr)), linux.FUTEX.PRIVATE_FLAG | linux.FUTEX.WAKE, std.math.cast(i32, num_waiters) orelse std.math.maxInt(i32), @@ -173,7 +174,7 @@ const LinuxFutex = struct { }; const DarwinFutex = struct { - const darwin = std.os.darwin; + const darwin = std.c; fn wait(ptr: *const Atomic(u32), expect: u32, timeout: ?u64) error{TimedOut}!void { // Darwin XNU 7195.50.7.100.1 introduced __ulock_wait2 and migrated code paths (notably pthread_cond_t) towards it: @@ -208,7 +209,7 @@ const DarwinFutex = struct { }; if (status >= 0) return; - switch (@as(std.os.E, @enumFromInt(-status))) { + switch (@as(std.posix.E, @enumFromInt(-status))) { .INTR => {}, // Address of the futex is paged out. This is unlikely, but possible in theory, and // pthread/libdispatch on darwin bother to handle it. In this case we'll return @@ -230,7 +231,7 @@ const DarwinFutex = struct { const status = darwin.__ulock_wake(flags, addr, 0); if (status >= 0) return; - switch (@as(std.os.E, @enumFromInt(-status))) { + switch (@as(std.posix.E, @enumFromInt(-status))) { .INTR => continue, // spurious wake() .FAULT => continue, // address of the lock was paged out .NOENT => return, // nothing was woken up @@ -251,7 +252,7 @@ const PosixFutex = struct { assert(std.c.pthread_mutex_lock(&bucket.mutex) == .SUCCESS); defer assert(std.c.pthread_mutex_unlock(&bucket.mutex) == .SUCCESS); - if (ptr.load(.SeqCst) != expect) { + if (ptr.load(.seq_cst) != expect) { return; } @@ -347,11 +348,11 @@ const PosixFutex = struct { .notified => return, } - var ts: std.os.timespec = undefined; - var ts_ptr: ?*const std.os.timespec = null; + var ts: std.posix.timespec = undefined; + var ts_ptr: ?*const std.posix.timespec = null; if (timeout) |timeout_ns| { ts_ptr = &ts; - std.os.clock_gettime(std.os.CLOCK_REALTIME, &ts) catch unreachable; + std.posix.clock_gettime(std.posix.CLOCK_REALTIME, &ts) catch unreachable; ts.tv_sec += @as(@TypeOf(ts.tv_sec), @intCast(timeout_ns / std.time.ns_per_s)); ts.tv_nsec += @as(@TypeOf(ts.tv_nsec), @intCast(timeout_ns % std.time.ns_per_s)); if (ts.tv_nsec >= std.time.ns_per_s) { diff --git a/src/generated_versions_list.zig b/src/generated_versions_list.zig index f1ef3256bd..c6354e9c4c 100644 --- a/src/generated_versions_list.zig +++ b/src/generated_versions_list.zig @@ -4,7 +4,7 @@ pub const boringssl = "29a2cd359458c9384694b75456026e4b57e3e567"; pub const libarchive = "313aa1fa10b657de791e3202c168a6c833bc3543"; pub const mimalloc = "4c283af60cdae205df5a872530c77e2a6a307d43"; pub const picohttpparser = "066d2b1e9ab820703db0837a7255d92d30f0c9f5"; -pub const webkit = "353aa20567e80a74eb43694a27cdf41f4a56ccef"; +pub const webkit = "64d04ec1a65d91326c5f2298b9c7d05b56125252"; pub const zig = @import("std").fmt.comptimePrint("{}", .{@import("builtin").zig_version}); pub const zlib = "886098f3f339617b4243b286f5ed364b9989e245"; pub const tinycc = "ab631362d839333660a265d3084d8ff060b96753"; diff --git a/src/glob.zig b/src/glob.zig index 65e0ba2464..ee171a6804 100644 --- a/src/glob.zig +++ b/src/glob.zig @@ -170,7 +170,7 @@ pub const SyscallAccessor = struct { }; pub fn open(path: [:0]const u8) !Maybe(Handle) { - return switch (Syscall.open(path, std.os.O.DIRECTORY | std.os.O.RDONLY, 0)) { + return switch (Syscall.open(path, bun.O.DIRECTORY | bun.O.RDONLY, 0)) { .err => |err| .{ .err = err }, .result => |fd| .{ .result = Handle{ .value = fd } }, }; @@ -185,7 +185,7 @@ pub const SyscallAccessor = struct { } pub fn openat(handle: Handle, path: [:0]const u8) !Maybe(Handle) { - return switch (Syscall.openat(handle.value, path, std.os.O.DIRECTORY | std.os.O.RDONLY, 0)) { + return switch (Syscall.openat(handle.value, path, bun.O.DIRECTORY | bun.O.RDONLY, 0)) { .err => |err| .{ .err = err }, .result => |fd| .{ .result = Handle{ .value = fd } }, }; diff --git a/src/heap_breakdown.zig b/src/heap_breakdown.zig index c9bff94879..6dba5d508b 100644 --- a/src/heap_breakdown.zig +++ b/src/heap_breakdown.zig @@ -36,16 +36,16 @@ pub const malloc_zone_t = opaque { pub var zone_t: std.atomic.Value(?*malloc_zone_t) = std.atomic.Value(?*malloc_zone_t).init(null); pub var zone_t_lock: bun.Lock = bun.Lock.init(); }; - return Holder.zone_t.load(.Monotonic) orelse brk: { + return Holder.zone_t.load(.monotonic) orelse brk: { Holder.zone_t_lock.lock(); defer Holder.zone_t_lock.unlock(); - if (Holder.zone_t.load(.Monotonic)) |z| { + if (Holder.zone_t.load(.monotonic)) |z| { break :brk z; } const z = malloc_zone_t.create(T); - Holder.zone_t.store(z, .Monotonic); + Holder.zone_t.store(z, .monotonic); break :brk z; }; } diff --git a/src/http.zig b/src/http.zig index 548728b291..e320919b4f 100644 --- a/src/http.zig +++ b/src/http.zig @@ -25,10 +25,12 @@ const Brotli = bun.brotli; const StringBuilder = @import("./string_builder.zig"); const ThreadPool = bun.ThreadPool; const ObjectPool = @import("./pool.zig").ObjectPool; -const SOCK = os.SOCK; +const posix = std.posix; +const SOCK = posix.SOCK; const Arena = @import("./mimalloc_arena.zig").Arena; const ZlibPool = @import("./http/zlib.zig"); const BoringSSL = bun.BoringSSL; +const Progress = bun.Progress; const X509 = @import("./bun.js/api/bun/x509.zig"); const SSLConfig = @import("./bun.js/api/server.zig").ServerConfig.SSLConfig; @@ -91,7 +93,7 @@ pub const Signals = struct { pub fn get(this: Signals, comptime field: std.meta.FieldEnum(Signals)) bool { var ptr: *std.atomic.Value(bool) = @field(this, @tagName(field)) orelse return false; - return ptr.load(.Monotonic); + return ptr.load(.monotonic); } }; @@ -148,7 +150,7 @@ pub const Sendfile = struct { std.os.linux.sendfile(socket.fd().cast(), this.fd.cast(), &signed_offset, this.remain); this.offset = @as(u64, @intCast(signed_offset)); - const errcode = std.os.linux.getErrno(val); + const errcode = bun.C.getErrno(val); this.remain -|= @as(u64, @intCast(this.offset -| begin)); @@ -160,9 +162,9 @@ pub const Sendfile = struct { return .{ .err = bun.errnoToZigErr(errcode) }; } } else if (Environment.isPosix) { - var sbytes: std.os.off_t = adjusted_count; + var sbytes: std.posix.off_t = adjusted_count; const signed_offset = @as(i64, @bitCast(@as(u64, this.offset))); - const errcode = std.c.getErrno(std.c.sendfile( + const errcode = bun.C.getErrno(std.c.sendfile( this.fd.cast(), socket.fd().cast(), signed_offset, @@ -768,7 +770,7 @@ pub const HTTPThread = struct { const threadlog = Output.scoped(.HTTPThread, true); pub fn init() !void { - if (http_thread_loaded.swap(true, .SeqCst)) { + if (http_thread_loaded.swap(true, .seq_cst)) { return; } @@ -801,7 +803,7 @@ pub const HTTPThread = struct { const loop = bun.JSC.MiniEventLoop.initGlobal(null); if (Environment.isWindows) { - _ = std.os.getenvW(comptime bun.strings.w("SystemRoot")) orelse { + _ = std.process.getenvW(comptime bun.strings.w("SystemRoot")) orelse { std.debug.panic("The %SystemRoot% environment variable is not set. Bun needs this set in order for network requests to work.", .{}); }; } @@ -809,7 +811,7 @@ pub const HTTPThread = struct { http_thread.loop = loop; http_thread.http_context.init() catch @panic("Failed to init http context"); http_thread.https_context.init() catch @panic("Failed to init https context"); - http_thread.has_awoken.store(true, .Monotonic); + http_thread.has_awoken.store(true, .monotonic); http_thread.processEvents(); } @@ -877,8 +879,8 @@ pub const HTTPThread = struct { } var count: usize = 0; - var active = AsyncHTTP.active_requests_count.load(.Monotonic); - const max = AsyncHTTP.max_simultaneous_requests.load(.Monotonic); + var active = AsyncHTTP.active_requests_count.load(.monotonic); + const max = AsyncHTTP.max_simultaneous_requests.load(.monotonic); if (active >= max) return; defer { if (comptime Environment.allow_assert) { @@ -941,12 +943,12 @@ pub const HTTPThread = struct { .is_tls = http.client.isHTTPS(), }) catch bun.outOfMemory(); } - if (this.has_awoken.load(.Monotonic)) + if (this.has_awoken.load(.monotonic)) this.loop.loop.wakeup(); } pub fn wakeup(this: *@This()) void { - if (this.has_awoken.load(.Monotonic)) + if (this.has_awoken.load(.monotonic)) this.loop.loop.wakeup(); } @@ -957,12 +959,12 @@ pub const HTTPThread = struct { { var batch_ = batch; while (batch_.pop()) |task| { - const http: *AsyncHTTP = @fieldParentPtr(AsyncHTTP, "task", task); + const http: *AsyncHTTP = @fieldParentPtr("task", task); this.queued_tasks.push(http); } } - if (this.has_awoken.load(.Monotonic)) + if (this.has_awoken.load(.monotonic)) this.loop.loop.wakeup(); } }; @@ -1171,7 +1173,7 @@ pub inline fn cleanup(force: bool) void { pub const Headers = @import("./http/headers.zig"); pub const SOCKET_FLAGS: u32 = if (Environment.isLinux) - SOCK.CLOEXEC | os.MSG.NOSIGNAL + SOCK.CLOEXEC | posix.MSG.NOSIGNAL else SOCK.CLOEXEC; @@ -1566,7 +1568,7 @@ allow_retry: bool = false, redirect_type: FetchRedirect = FetchRedirect.follow, redirect: []u8 = &.{}, timeout: usize = 0, -progress_node: ?*std.Progress.Node = null, +progress_node: ?*Progress.Node = null, disable_timeout: bool = false, disable_keepalive: bool = false, disable_decompression: bool = false, @@ -1632,10 +1634,6 @@ const Stage = enum(u8) { fail, }; -// threadlocal var resolver_cache - -const os = std.os; - // lowercase hash header names so that we can be sure pub fn hashHeaderName(name: string) u64 { var hasher = std.hash.Wyhash.init(0); @@ -1724,7 +1722,7 @@ pub const HTTPChannelContext = struct { channel: *HTTPChannel, pub fn callback(data: HTTPCallbackPair) void { - var this: *HTTPChannelContext = @fieldParentPtr(HTTPChannelContext, "http", data.@"0"); + var this: *HTTPChannelContext = @fieldParentPtr("http", data.@"0"); this.channel.writeItem(data) catch unreachable; } }; @@ -1789,20 +1787,20 @@ pub const AsyncHTTP = struct { ) catch unreachable; return; } - AsyncHTTP.max_simultaneous_requests.store(max, .Monotonic); + AsyncHTTP.max_simultaneous_requests.store(max, .monotonic); } } pub fn signalHeaderProgress(this: *AsyncHTTP) void { - @fence(.Release); + @fence(.release); var progress = this.signals.header_progress orelse return; - progress.store(true, .Release); + progress.store(true, .release); } pub fn enableBodyStreaming(this: *AsyncHTTP) void { - @fence(.Release); + @fence(.release); var stream = this.signals.body_streaming orelse return; - stream.store(true, .Release); + stream.store(true, .release); } pub fn clearData(this: *AsyncHTTP) void { @@ -1860,7 +1858,7 @@ pub const AsyncHTTP = struct { .result_callback = callback, .http_proxy = options.http_proxy, .signals = options.signals orelse .{}, - .async_http_id = if (options.signals != null and options.signals.?.aborted != null) async_http_id.fetchAdd(1, .Monotonic) else 0, + .async_http_id = if (options.signals != null and options.signals.?.aborted != null) async_http_id.fetchAdd(1, .monotonic) else 0, .timeout = timeout, }; @@ -2036,7 +2034,7 @@ pub const AsyncHTTP = struct { } pub fn schedule(this: *AsyncHTTP, _: std.mem.Allocator, batch: *ThreadPool.Batch) void { - this.state.store(.scheduled, .Monotonic); + this.state.store(.scheduled, .monotonic); batch.push(ThreadPool.Batch.from(&this.task)); } @@ -2083,11 +2081,11 @@ pub const AsyncHTTP = struct { if (result.metadata) |metadata| { this.response = metadata.response; } - this.state.store(.success, .Monotonic); + this.state.store(.success, .monotonic); } else { this.err = result.fail; this.response = null; - this.state.store(State.fail, .Monotonic); + this.state.store(State.fail, .monotonic); } if (result.has_more) { @@ -2100,24 +2098,24 @@ pub const AsyncHTTP = struct { callback.function(callback.ctx, async_http, result); } - const active_requests = AsyncHTTP.active_requests_count.fetchSub(1, .Monotonic); + const active_requests = AsyncHTTP.active_requests_count.fetchSub(1, .monotonic); assert(active_requests > 0); } - if (AsyncHTTP.active_requests_count.load(.Monotonic) < AsyncHTTP.max_simultaneous_requests.load(.Monotonic)) { + if (AsyncHTTP.active_requests_count.load(.monotonic) < AsyncHTTP.max_simultaneous_requests.load(.monotonic)) { http_thread.drainEvents(); } } pub fn startAsyncHTTP(task: *Task) void { - var this = @fieldParentPtr(AsyncHTTP, "task", task); + var this: *AsyncHTTP = @fieldParentPtr("task", task); this.onStart(); } pub fn onStart(this: *AsyncHTTP) void { - _ = active_requests_count.fetchAdd(1, .Monotonic); + _ = active_requests_count.fetchAdd(1, .monotonic); this.err = null; - this.state.store(.sending, .Monotonic); + this.state.store(.sending, .monotonic); this.client.result_callback = HTTPClientResult.Callback.New(*AsyncHTTP, onAsyncHTTPCallback).init( this, ); @@ -3002,7 +3000,7 @@ fn fail(this: *HTTPClient, err: anyerror) void { this.state.reset(this.allocator); this.proxy_tunneling = false; - callback.run(@fieldParentPtr(AsyncHTTP, "client", this), result); + callback.run(@fieldParentPtr("client", this), result); } // We have to clone metadata immediately after use @@ -3090,7 +3088,7 @@ pub fn progressUpdate(this: *HTTPClient, comptime is_ssl: bool, ctx: *NewHTTPCon } result.body.?.* = body; - callback.run(@fieldParentPtr(AsyncHTTP, "client", this), result); + callback.run(@fieldParentPtr("client", this), result); if (comptime print_every > 0) { print_every_i += 1; diff --git a/src/http/websocket.zig b/src/http/websocket.zig index 00eb00a5f7..7ecf4b6d7a 100644 --- a/src/http/websocket.zig +++ b/src/http/websocket.zig @@ -2,7 +2,7 @@ // Thank you @frmdstryr. const std = @import("std"); -const os = std.os; +const posix = std.posix; const bun = @import("root").bun; const string = bun.string; const Output = bun.Output; @@ -152,7 +152,7 @@ pub const Websocket = struct { reader: ReadStream.Reader, flags: u32 = 0, pub fn create( - fd: std.os.fd_t, + fd: std.posix.fd_t, comptime flags: u32, ) Websocket { const stream = ReadStream{ diff --git a/src/http/zlib.zig b/src/http/zlib.zig index 3357720e61..3055ac99f1 100644 --- a/src/http/zlib.zig +++ b/src/http/zlib.zig @@ -17,7 +17,7 @@ pub fn get(allocator: std.mem.Allocator) *MutableString { pub fn put(mutable: *MutableString) void { mutable.reset(); - var node = @fieldParentPtr(BufferPool.Node, "data", mutable); + var node: BufferPool.Node = @fieldParentPtr("data", mutable); node.release(); } diff --git a/src/install/bin.zig b/src/install/bin.zig index 15816285d4..36f8c69312 100644 --- a/src/install/bin.zig +++ b/src/install/bin.zig @@ -291,7 +291,7 @@ pub const Bin = extern struct { pub const Error = error{ NotImplementedYet, - } || std.os.SymLinkError || std.os.OpenError || std.os.RealPathError; + } || std.posix.SymLinkError || bun.OpenError || std.posix.RealPathError; pub fn ensureUmask() void { if (!has_set_umask) { @@ -307,7 +307,7 @@ pub const Bin = extern struct { return name_[(strings.indexOfChar(name_, '/') orelse return name) + 1 ..]; } - fn setPermissions(folder: std.os.fd_t, target: [:0]const u8) void { + fn setPermissions(folder: std.posix.fd_t, target: [:0]const u8) void { // we use fchmodat to avoid any issues with current working directory _ = C.fchmodat(folder, target, @intCast(umask | 0o777), 0); } @@ -315,7 +315,7 @@ pub const Bin = extern struct { fn setSymlinkAndPermissions(this: *Linker, target_path: [:0]const u8, dest_path: [:0]const u8, link_global: bool) void { if (comptime !Environment.isWindows) { const node_modules = this.package_installed_node_modules.asDir(); - std.os.symlinkatZ(target_path, node_modules.fd, dest_path) catch |err| { + std.posix.symlinkatZ(target_path, node_modules.fd, dest_path) catch |err| { // Silently ignore PathAlreadyExists if the symlink is valid. // Most likely, the symlink was already created by another package if (err == error.PathAlreadyExists) { @@ -326,7 +326,7 @@ pub const Bin = extern struct { return; }; - std.os.symlinkatZ(target_path, node_modules.fd, dest_path) catch |err2| { + std.posix.symlinkatZ(target_path, node_modules.fd, dest_path) catch |err2| { this.err = err2; return; }; @@ -407,7 +407,7 @@ pub const Bin = extern struct { ) else target_wpath, - std.os.O.RDONLY, + bun.O.RDONLY, ).unwrap() catch break :contents null; defer _ = bun.sys.close(fd); const reader = fd.asFile().reader(); @@ -484,7 +484,7 @@ pub const Bin = extern struct { const from = root_dir.realpath(dot_bin, &target_buf) catch |realpath_err| brk: { if (realpath_err == error.FileNotFound) { if (comptime Environment.isWindows) { - std.os.mkdiratW(root_dir.fd, comptime bun.OSPathLiteral(".bin"), 0) catch |err| { + std.posix.mkdiratW(root_dir.fd, comptime bun.OSPathLiteral(".bin"), 0) catch |err| { this.err = err; return; }; @@ -750,7 +750,7 @@ pub const Bin = extern struct { from_remain[0] = 0; const dest_path: [:0]u8 = target_buf[0 .. @intFromPtr(from_remain.ptr) - @intFromPtr(&target_buf) :0]; - std.os.unlinkatZ(this.root_node_modules_folder.cast(), dest_path, 0) catch {}; + std.posix.unlinkatZ(this.root_node_modules_folder.cast(), dest_path, 0) catch {}; }, .named_file => { const name_to_use = this.bin.value.named_file[0].slice(this.string_buf); @@ -759,7 +759,7 @@ pub const Bin = extern struct { from_remain[0] = 0; const dest_path: [:0]u8 = target_buf[0 .. @intFromPtr(from_remain.ptr) - @intFromPtr(&target_buf) :0]; - std.os.unlinkatZ(this.root_node_modules_folder.cast(), dest_path, 0) catch {}; + std.posix.unlinkatZ(this.root_node_modules_folder.cast(), dest_path, 0) catch {}; }, .map => { var extern_string_i: u32 = this.bin.value.map.off; @@ -787,7 +787,7 @@ pub const Bin = extern struct { from_remain[0] = 0; const dest_path: [:0]u8 = target_buf[0 .. @intFromPtr(from_remain.ptr) - @intFromPtr(&target_buf) :0]; - std.os.unlinkatZ(this.root_node_modules_folder.cast(), dest_path, 0) catch {}; + std.posix.unlinkatZ(this.root_node_modules_folder.cast(), dest_path, 0) catch {}; } }, .dir => { @@ -835,7 +835,7 @@ pub const Bin = extern struct { else std.fmt.bufPrintZ(&dest_buf, "{s}", .{entry.name}) catch continue; - std.os.unlinkatZ( + std.posix.unlinkatZ( this.root_node_modules_folder.cast(), to_path, 0, diff --git a/src/install/install.zig b/src/install/install.zig index 68059d76ba..ace051412a 100644 --- a/src/install/install.zig +++ b/src/install/install.zig @@ -668,7 +668,7 @@ pub const Task = struct { Output.Source.configureThread(); defer Output.flush(); - var this = @fieldParentPtr(Task, "threadpool_task", task); + var this: *Task = @fieldParentPtr("threadpool_task", task); const manager = this.package_manager; defer { if (this.status == .success) { @@ -980,12 +980,12 @@ pub fn NewPackageInstall(comptime kind: PkgInstallKind) type { symlink, const BackendSupport = std.EnumArray(Method, bool); - pub const map = std.ComptimeStringMap(Method, .{ - .{ "clonefile", Method.clonefile }, - .{ "clonefile_each_dir", Method.clonefile_each_dir }, - .{ "hardlink", Method.hardlink }, - .{ "copyfile", Method.copyfile }, - .{ "symlink", Method.symlink }, + pub const map = bun.ComptimeStringMap(Method, .{ + .{ "clonefile", .clonefile }, + .{ "clonefile_each_dir", .clonefile_each_dir }, + .{ "hardlink", .hardlink }, + .{ "copyfile", .copyfile }, + .{ "symlink", .symlink }, }); pub const macOS = BackendSupport.initDefault(false, .{ @@ -1041,7 +1041,7 @@ pub fn NewPackageInstall(comptime kind: PkgInstallKind) type { if (comptime bun.Environment.isPosix) { _ = bun.sys.fstatat(bun.toFD(destination_dir.fd), patch_tag_path).unwrap() catch return false; } else { - switch (bun.sys.openat(bun.toFD(destination_dir.fd), patch_tag_path, std.os.O.RDONLY, 0)) { + switch (bun.sys.openat(bun.toFD(destination_dir.fd), patch_tag_path, bun.O.RDONLY, 0)) { .err => return false, .result => |fd| _ = bun.sys.close(fd), } @@ -1140,7 +1140,7 @@ pub fn NewPackageInstall(comptime kind: PkgInstallKind) type { if (std.fs.cwd().fd != destination_dir.fd) destination_dir.close(); } - var package_json_file = File.openat(destination_dir, package_json_path, std.os.O.RDONLY, 0).unwrap() catch return false; + var package_json_file = File.openat(destination_dir, package_json_path, bun.O.RDONLY, 0).unwrap() catch return false; defer package_json_file.close(); // Heuristic: most package.jsons will be less than 2048 bytes. @@ -1326,11 +1326,11 @@ pub fn NewPackageInstall(comptime kind: PkgInstallKind) type { 0, )) { 0 => {}, - else => |errno| switch (std.os.errno(errno)) { + else => |errno| switch (std.posix.errno(errno)) { .XDEV => return error.NotSupported, // not same file system .OPNOTSUPP => return error.NotSupported, .NOENT => return error.FileNotFound, - // sometimes the downlowded npm package has already node_modules with it, so just ignore exist error here + // sometimes the downloaded npm package has already node_modules with it, so just ignore exist error here .EXIST => {}, .ACCES => return error.AccessDenied, else => return error.Unexpected, @@ -1386,7 +1386,7 @@ pub fn NewPackageInstall(comptime kind: PkgInstallKind) type { 0, )) { 0 => .{ .success = {} }, - else => |errno| switch (std.os.errno(errno)) { + else => |errno| switch (std.posix.errno(errno)) { .XDEV => error.NotSupported, // not same file system .OPNOTSUPP => error.NotSupported, .NOENT => error.FileNotFound, @@ -1642,22 +1642,22 @@ pub fn NewPackageInstall(comptime kind: PkgInstallKind) type { wake_value: std.atomic.Value(u32) = std.atomic.Value(u32).init(0), pub fn completeOne(this: *@This()) void { - @fence(.Release); - if (this.remaining.fetchSub(1, .Monotonic) == 1) { - _ = this.wake_value.fetchAdd(1, .Monotonic); + @fence(.release); + if (this.remaining.fetchSub(1, .monotonic) == 1) { + _ = this.wake_value.fetchAdd(1, .monotonic); bun.Futex.wake(&this.wake_value, std.math.maxInt(u32)); } } pub fn push(this: *@This(), task: *TaskType) void { - _ = this.remaining.fetchAdd(1, .Monotonic); + _ = this.remaining.fetchAdd(1, .monotonic); this.thread_pool.schedule(bun.ThreadPool.Batch.from(&task.task)); } pub fn wait(this: *@This()) void { - @fence(.Acquire); - this.wake_value.store(0, .Monotonic); - while (this.remaining.load(.Monotonic) > 0) { + @fence(.acquire); + this.wake_value.store(0, .monotonic); + while (this.remaining.load(.monotonic) > 0) { bun.Futex.wait(&this.wake_value, 0, std.time.ns_per_ms * 5) catch {}; } } @@ -1706,7 +1706,7 @@ pub fn NewPackageInstall(comptime kind: PkgInstallKind) type { } pub fn runFromThreadPool(task: *bun.JSC.WorkPoolTask) void { - var iter = @fieldParentPtr(@This(), "task", task); + var iter: *@This() = @fieldParentPtr("task", task); defer queue.completeOne(); if (iter.run()) |err| { iter.err = err; @@ -1808,13 +1808,13 @@ pub fn NewPackageInstall(comptime kind: PkgInstallKind) type { bun.MakePath.makePath(std.meta.Elem(@TypeOf(entry.path)), destination_dir, entry.path) catch {}; }, .file => { - std.os.linkat(entry.dir.fd, entry.basename, destination_dir.fd, entry.path, 0) catch |err| { + std.posix.linkat(entry.dir.fd, entry.basename, destination_dir.fd, entry.path, 0) catch |err| { if (err != error.PathAlreadyExists) { return err; } - std.os.unlinkat(destination_dir.fd, entry.path, 0) catch {}; - try std.os.linkat(entry.dir.fd, entry.basename, destination_dir.fd, entry.path, 0); + std.posix.unlinkat(destination_dir.fd, entry.path, 0) catch {}; + try std.posix.linkat(entry.dir.fd, entry.basename, destination_dir.fd, entry.path, 0); }; real_file_count += 1; @@ -1920,13 +1920,13 @@ pub fn NewPackageInstall(comptime kind: PkgInstallKind) type { head2[entry.path.len + (head2.len - to_copy_into2.len)] = 0; const target: [:0]u8 = head2[0 .. entry.path.len + head2.len - to_copy_into2.len :0]; - std.os.symlinkat(target, destination_dir.fd, entry.path) catch |err| { + std.posix.symlinkat(target, destination_dir.fd, entry.path) catch |err| { if (err != error.PathAlreadyExists) { return err; } - std.os.unlinkat(destination_dir.fd, entry.path, 0) catch {}; - try std.os.symlinkat(entry.basename, destination_dir.fd, entry.path); + std.posix.unlinkat(destination_dir.fd, entry.path, 0) catch {}; + try std.posix.symlinkat(entry.basename, destination_dir.fd, entry.path); }; real_file_count += 1; @@ -2060,7 +2060,7 @@ pub fn NewPackageInstall(comptime kind: PkgInstallKind) type { absolute_path: []const u8, task: JSC.WorkPoolTask = .{ .callback = &run }, pub fn run(task: *JSC.WorkPoolTask) void { - var unintall_task = @fieldParentPtr(@This(), "task", task); + var unintall_task: *@This() = @fieldParentPtr("task", task); var debug_timer = bun.Output.DebugTimer.start(); defer { _ = PackageManager.instance.decrementPendingTasks(); @@ -2112,10 +2112,10 @@ pub fn NewPackageInstall(comptime kind: PkgInstallKind) type { pub fn isDanglingSymlink(path: [:0]const u8) bool { if (comptime Environment.isLinux) { - const rc = Syscall.system.open(path, @as(u32, std.os.O.PATH), @as(u32, 0)); + const rc = Syscall.system.open(path, .{ .PATH = true }, @as(u32, 0)); switch (Syscall.getErrno(rc)) { .SUCCESS => { - _ = bun.sys.close(bun.toFD(rc)); + _ = bun.sys.close(bun.toFD(@as(i32, @intCast(rc)))); return false; }, else => return true, @@ -2131,7 +2131,7 @@ pub fn NewPackageInstall(comptime kind: PkgInstallKind) type { }, } } else { - const rc = Syscall.system.open(path, @as(u32, 0), @as(u32, 0)); + const rc = Syscall.system.open(path, .{}, .{}); switch (Syscall.getErrno(rc)) { .SUCCESS => { _ = Syscall.system.close(rc); @@ -2145,7 +2145,7 @@ pub fn NewPackageInstall(comptime kind: PkgInstallKind) type { pub fn isDanglingWindowsBinLink(node_mod_fd: bun.FileDescriptor, path: []const u16, temp_buffer: []u8) bool { const WinBinLinkingShim = @import("./windows-shim/BinLinkingShim.zig"); const bin_path = bin_path: { - const fd = bun.sys.openatWindows(node_mod_fd, path, std.os.O.RDONLY).unwrap() catch return true; + const fd = bun.sys.openatWindows(node_mod_fd, path, bun.O.RDONLY).unwrap() catch return true; defer _ = bun.sys.close(fd); const size = fd.asFile().readAll(temp_buffer) catch return true; const decoded = WinBinLinkingShim.looseDecode(temp_buffer[0..size]) orelse return true; @@ -2154,7 +2154,7 @@ pub fn NewPackageInstall(comptime kind: PkgInstallKind) type { }; { - const fd = bun.sys.openatWindows(node_mod_fd, bin_path, std.os.O.RDONLY).unwrap() catch return true; + const fd = bun.sys.openatWindows(node_mod_fd, bin_path, bun.O.RDONLY).unwrap() catch return true; _ = bun.sys.close(fd); } @@ -2247,7 +2247,7 @@ pub fn NewPackageInstall(comptime kind: PkgInstallKind) type { const dest_dir_path = bun.getFdPath(dest_dir.fd, &dest_buf) catch |err| return Result.fail(err, .linking_dependency); const target = Path.relative(dest_dir_path, to_path); - std.os.symlinkat(target, dest_dir.fd, dest) catch |err| return Result.fail(err, .linking_dependency); + std.posix.symlinkat(target, dest_dir.fd, dest) catch |err| return Result.fail(err, .linking_dependency); } if (isDanglingSymlink(symlinked_path)) return Result.fail(error.DanglingSymlink, .linking_dependency); @@ -2308,7 +2308,7 @@ pub fn NewPackageInstall(comptime kind: PkgInstallKind) type { if (result == .fail) return result; const fd = bun.toFD(destination_dir.fd); const subpath = bun.path.joinZ(&[_][]const u8{ this.destination_dir_subpath, ".bun-patch-tag" }); - const tag_fd = switch (bun.sys.openat(fd, subpath, std.os.O.CREAT | std.os.O.WRONLY, 0o666)) { + const tag_fd = switch (bun.sys.openat(fd, subpath, bun.O.CREAT | bun.O.WRONLY, 0o666)) { .err => |e| return .{ .fail = .{ .err = bun.errnoToZigErr(e.getErrno()), .step = Step.patching } }, .result => |f| f, }; @@ -2328,7 +2328,7 @@ pub fn NewPackageInstall(comptime kind: PkgInstallKind) type { if (result == .fail) return result; const fd = bun.toFD(destination_dir.fd); const subpath = bun.path.joinZ(&[_][]const u8{ this.destination_dir_subpath, ".bun-patch-tag" }, .auto); - const tag_fd = switch (bun.sys.openat(fd, subpath, std.os.O.CREAT | std.os.O.WRONLY | std.os.O.TRUNC, 0o666)) { + const tag_fd = switch (bun.sys.openat(fd, subpath, bun.O.CREAT | bun.O.WRONLY | bun.O.TRUNC, 0o666)) { .err => |e| return .{ .fail = .{ .err = bun.errnoToZigErr(e.getErrno()), .step = Step.patching } }, .result => |f| f, }; @@ -2428,7 +2428,7 @@ pub fn NewPackageInstall(comptime kind: PkgInstallKind) type { } pub const Resolution = @import("./resolution.zig").Resolution; -const Progress = std.Progress; +const Progress = bun.Progress; const TaggedPointer = @import("../tagged_pointer.zig"); const DependencyInstallContext = struct { @@ -2568,7 +2568,7 @@ pub const PackageManager = struct { cpu_count: u32 = 0, // progress bar stuff when not stack allocated - root_progress_node: *std.Progress.Node = undefined, + root_progress_node: *Progress.Node = undefined, to_update: bool = false, @@ -2980,12 +2980,12 @@ pub const PackageManager = struct { this.onWake.getHandler()(ctx, this); } - _ = this.wait_count.fetchAdd(1, .Monotonic); + _ = this.wait_count.fetchAdd(1, .monotonic); this.event_loop.wakeup(); } fn hasNoMorePendingLifecycleScripts(this: *PackageManager) bool { - return this.pending_lifecycle_script_tasks.load(.Monotonic) == 0; + return this.pending_lifecycle_script_tasks.load(.monotonic) == 0; } pub fn tickLifecycleScripts(this: *PackageManager) void { @@ -3425,7 +3425,7 @@ pub const PackageManager = struct { }; file.close(); - std.os.renameatZ(tempdir.fd, tmpname, cache_directory.fd, tmpname) catch |err| { + std.posix.renameatZ(tempdir.fd, tmpname, cache_directory.fd, tmpname) catch |err| { if (!tried_dot_tmp) { tried_dot_tmp = true; tempdir = cache_directory.makeOpenPath(".tmp", .{}) catch |err2| { @@ -3814,7 +3814,7 @@ pub const PackageManager = struct { ) catch |err| { // if we run into an error, delete the symlink // so that we don't repeatedly try to read it - std.os.unlinkat(this.getCacheDirectory().fd, cache_path, 0) catch {}; + std.posix.unlinkat(this.getCacheDirectory().fd, cache_path, 0) catch {}; return err; }; } @@ -4274,7 +4274,7 @@ pub const PackageManager = struct { } this.patch_task_fifo.writeItemAssumeCapacity(task); - _ = this.pending_pre_calc_hashes.fetchAdd(1, .Monotonic); + _ = this.pending_pre_calc_hashes.fetchAdd(1, .monotonic); } const SuccessFn = *const fn (*PackageManager, DependencyID, PackageID) void; @@ -6105,9 +6105,9 @@ pub const PackageManager = struct { if (!has_network_error) { has_network_error = true; const min = manager.options.min_simultaneous_requests; - const max = AsyncHTTP.max_simultaneous_requests.load(.Monotonic); + const max = AsyncHTTP.max_simultaneous_requests.load(.monotonic); if (max > min) { - AsyncHTTP.max_simultaneous_requests.store(@max(min, max / 2), .Monotonic); + AsyncHTTP.max_simultaneous_requests.store(@max(min, max / 2), .monotonic); } } manager.enqueueNetworkTask(task); @@ -6274,9 +6274,9 @@ pub const PackageManager = struct { if (!has_network_error) { has_network_error = true; const min = manager.options.min_simultaneous_requests; - const max = AsyncHTTP.max_simultaneous_requests.load(.Monotonic); + const max = AsyncHTTP.max_simultaneous_requests.load(.monotonic); if (max > min) { - AsyncHTTP.max_simultaneous_requests.store(@max(min, max / 2), .Monotonic); + AsyncHTTP.max_simultaneous_requests.store(@max(min, max / 2), .monotonic); } } manager.enqueueNetworkTask(task); @@ -10038,10 +10038,15 @@ pub const PackageManager = struct { if (manager.options.do.write_package_json) { // Now that we've run the install step // We can save our in-memory package.json to disk - const workspace_package_json_file = (try bun.sys.File.openat(bun.invalid_fd, manager.original_package_json_path, std.os.O.RDWR, 0).unwrap()).handle.asFile(); + const workspace_package_json_file = (try bun.sys.File.openat( + bun.invalid_fd, + manager.original_package_json_path, + bun.O.RDWR, + 0, + ).unwrap()).handle.asFile(); try workspace_package_json_file.pwriteAll(new_package_json_source, 0); - std.os.ftruncate(workspace_package_json_file.handle, new_package_json_source.len) catch {}; + std.posix.ftruncate(workspace_package_json_file.handle, new_package_json_source.len) catch {}; workspace_package_json_file.close(); if (subcommand == .remove) { @@ -10587,7 +10592,7 @@ pub const PackageManager = struct { const arg_kind: PatchArgKind = PatchArgKind.fromArg(argument); // Attempt to open the existing node_modules folder - var root_node_modules = switch (bun.sys.openatOSPath(bun.FD.cwd(), bun.OSPathLiteral("node_modules"), std.os.O.DIRECTORY | std.os.O.RDONLY, 0o755)) { + var root_node_modules = switch (bun.sys.openatOSPath(bun.FD.cwd(), bun.OSPathLiteral("node_modules"), bun.O.DIRECTORY | bun.O.RDONLY, 0o755)) { .result => |fd| std.fs.Dir{ .fd = fd.cast() }, .err => |e| { Output.prettyError( @@ -10945,7 +10950,7 @@ pub const PackageManager = struct { const tmpfd = switch (bun.sys.openat( bun.toFD(tmpdir.fd), tempfile_name, - std.os.O.RDWR | std.os.O.CREAT, + bun.O.RDWR | bun.O.CREAT, 0o666, )) { .result => |fd| fd, @@ -11024,7 +11029,7 @@ pub const PackageManager = struct { buf: *[1024]u8, patch_tag_path: [:0]const u8, ) bun.sys.Maybe(string) { - const patch_tag_fd = switch (bun.sys.open(patch_tag_path, std.os.O.RDONLY, 0)) { + const patch_tag_fd = switch (bun.sys.open(patch_tag_path, bun.O.RDONLY, 0)) { .result => |fd| fd, .err => |e| return .{ .err = e }, }; @@ -11174,7 +11179,7 @@ pub const PackageManager = struct { pub const PackageInstaller = struct { manager: *PackageManager, lockfile: *Lockfile, - progress: *std.Progress, + progress: *Progress, // relative paths from `nextNodeModulesFolder` will be copied into this list. node_modules: NodeModulesFolder, @@ -11337,9 +11342,9 @@ pub const PackageManager = struct { pub fn completeRemainingScripts(this: *PackageInstaller, comptime log_level: Options.LogLevel) void { for (this.pending_lifecycle_scripts.items) |entry| { const package_name = entry.list.package_name; - while (LifecycleScriptSubprocess.alive_count.load(.Monotonic) >= this.manager.options.max_concurrent_lifecycle_scripts) { + while (LifecycleScriptSubprocess.alive_count.load(.monotonic) >= this.manager.options.max_concurrent_lifecycle_scripts) { if (PackageManager.verbose_install) { - if (PackageManager.hasEnoughTimePassedBetweenWaitingMessages()) Output.prettyErrorln("[PackageManager] waiting for {d} scripts\n", .{LifecycleScriptSubprocess.alive_count.load(.Monotonic)}); + if (PackageManager.hasEnoughTimePassedBetweenWaitingMessages()) Output.prettyErrorln("[PackageManager] waiting for {d} scripts\n", .{LifecycleScriptSubprocess.alive_count.load(.monotonic)}); } PackageManager.instance.sleep(); @@ -11371,9 +11376,9 @@ pub const PackageManager = struct { }; } - while (this.manager.pending_lifecycle_script_tasks.load(.Monotonic) > 0) { + while (this.manager.pending_lifecycle_script_tasks.load(.monotonic) > 0) { if (PackageManager.verbose_install) { - if (PackageManager.hasEnoughTimePassedBetweenWaitingMessages()) Output.prettyErrorln("[PackageManager] waiting for {d} scripts\n", .{LifecycleScriptSubprocess.alive_count.load(.Monotonic)}); + if (PackageManager.hasEnoughTimePassedBetweenWaitingMessages()) Output.prettyErrorln("[PackageManager] waiting for {d} scripts\n", .{LifecycleScriptSubprocess.alive_count.load(.monotonic)}); } if (comptime log_level.showProgress()) { @@ -11392,7 +11397,7 @@ pub const PackageManager = struct { const deps = this.tree_ids_to_trees_the_id_depends_on.at(scripts_tree_id); return (deps.subsetOf(this.completed_trees) or deps.eql(this.completed_trees)) and - LifecycleScriptSubprocess.alive_count.load(.Monotonic) < this.manager.options.max_concurrent_lifecycle_scripts; + LifecycleScriptSubprocess.alive_count.load(.monotonic) < this.manager.options.max_concurrent_lifecycle_scripts; } /// A tree can start installing packages when the parent has installed all its packages. If the parent @@ -11628,9 +11633,11 @@ pub const PackageManager = struct { const patch_patch, const patch_contents_hash, const patch_name_and_version_hash, const remove_patch = brk: { if (this.manager.lockfile.patched_dependencies.entries.len == 0 and this.manager.patched_dependencies_to_remove.entries.len == 0) break :brk .{ null, null, null, false }; - var sfb = std.heap.stackFallback(1024, this.lockfile.allocator); - const name_and_version = std.fmt.allocPrint(sfb.get(), "{s}@{s}", .{ name, package_version }) catch unreachable; - defer sfb.get().free(name_and_version); + var sfa = std.heap.stackFallback(1024, this.lockfile.allocator); + const alloc = sfa.get(); + const name_and_version = std.fmt.allocPrint(alloc, "{s}@{s}", .{ name, package_version }) catch unreachable; + defer alloc.free(name_and_version); + const name_and_version_hash = String.Builder.stringHash(name_and_version); const patchdep = this.lockfile.patched_dependencies.get(name_and_version_hash) orelse { @@ -12502,7 +12509,7 @@ pub const PackageManager = struct { const cwd = std.fs.cwd(); const node_modules_folder = brk: { // Attempt to open the existing node_modules folder - switch (bun.sys.openatOSPath(bun.toFD(cwd), bun.OSPathLiteral("node_modules"), std.os.O.DIRECTORY | std.os.O.RDONLY, 0o755)) { + switch (bun.sys.openatOSPath(bun.toFD(cwd), bun.OSPathLiteral("node_modules"), bun.O.DIRECTORY | bun.O.RDONLY, 0o755)) { .result => |fd| break :brk std.fs.Dir{ .fd = fd.cast() }, .err => {}, } @@ -12805,7 +12812,7 @@ pub const PackageManager = struct { installer.installAvailablePackages(log_level, force); } - this.finished_installing.store(true, .Monotonic); + this.finished_installing.store(true, .monotonic); if (comptime log_level.showProgress()) { scripts_node.activate(); } @@ -12816,9 +12823,9 @@ pub const PackageManager = struct { installer.completeRemainingScripts(log_level); - while (this.pending_lifecycle_script_tasks.load(.Monotonic) > 0) { + while (this.pending_lifecycle_script_tasks.load(.monotonic) > 0) { if (PackageManager.verbose_install) { - if (PackageManager.hasEnoughTimePassedBetweenWaitingMessages()) Output.prettyErrorln("[PackageManager] waiting for {d} scripts\n", .{this.pending_lifecycle_script_tasks.load(.Monotonic)}); + if (PackageManager.hasEnoughTimePassedBetweenWaitingMessages()) Output.prettyErrorln("[PackageManager] waiting for {d} scripts\n", .{this.pending_lifecycle_script_tasks.load(.monotonic)}); } this.sleep(); @@ -12833,16 +12840,16 @@ pub const PackageManager = struct { } pub inline fn pendingTaskCount(manager: *const PackageManager) u32 { - return manager.pending_tasks.load(.Monotonic); + return manager.pending_tasks.load(.monotonic); } pub inline fn incrementPendingTasks(manager: *PackageManager, count: u32) u32 { manager.total_tasks += count; - return manager.pending_tasks.fetchAdd(count, .Monotonic); + return manager.pending_tasks.fetchAdd(count, .monotonic); } pub inline fn decrementPendingTasks(manager: *PackageManager) u32 { - return manager.pending_tasks.fetchSub(1, .Monotonic); + return manager.pending_tasks.fetchSub(1, .monotonic); } pub fn setupGlobalDir(manager: *PackageManager, ctx: Command.Context) !void { @@ -13365,7 +13372,7 @@ pub const PackageManager = struct { } if (comptime only_pre_patch) { - const pending_patch = this.pending_pre_calc_hashes.load(.Monotonic); + const pending_patch = this.pending_pre_calc_hashes.load(.monotonic); return pending_patch == 0; } @@ -13652,9 +13659,9 @@ pub const PackageManager = struct { const output_in_foreground = true; try manager.spawnPackageLifecycleScripts(ctx, scripts, log_level, output_in_foreground); - while (manager.pending_lifecycle_script_tasks.load(.Monotonic) > 0) { + while (manager.pending_lifecycle_script_tasks.load(.monotonic) > 0) { if (PackageManager.verbose_install) { - if (PackageManager.hasEnoughTimePassedBetweenWaitingMessages()) Output.prettyErrorln("[PackageManager] waiting for {d} scripts\n", .{manager.pending_lifecycle_script_tasks.load(.Monotonic)}); + if (PackageManager.hasEnoughTimePassedBetweenWaitingMessages()) Output.prettyErrorln("[PackageManager] waiting for {d} scripts\n", .{manager.pending_lifecycle_script_tasks.load(.monotonic)}); } manager.sleep(); diff --git a/src/install/lifecycle_script_runner.zig b/src/install/lifecycle_script_runner.zig index e40f29b6c8..f981c64152 100644 --- a/src/install/lifecycle_script_runner.zig +++ b/src/install/lifecycle_script_runner.zig @@ -94,13 +94,13 @@ pub const LifecycleScriptSubprocess = struct { if (!this.has_incremented_alive_count) { this.has_incremented_alive_count = true; - _ = alive_count.fetchAdd(1, .Monotonic); + _ = alive_count.fetchAdd(1, .monotonic); } errdefer { if (this.has_incremented_alive_count) { this.has_incremented_alive_count = false; - _ = alive_count.fetchSub(1, .Monotonic); + _ = alive_count.fetchSub(1, .monotonic); } } @@ -133,7 +133,7 @@ pub const LifecycleScriptSubprocess = struct { PackageManager.ProgressStrings.script_emoji, true, ); - if (manager.finished_installing.load(.Monotonic)) { + if (manager.finished_installing.load(.monotonic)) { scripts_node.activate(); manager.progress.refresh(); } @@ -293,7 +293,7 @@ pub const LifecycleScriptSubprocess = struct { if (this.has_incremented_alive_count) { this.has_incremented_alive_count = false; - _ = alive_count.fetchSub(1, .Monotonic); + _ = alive_count.fetchSub(1, .monotonic); } switch (status) { @@ -313,10 +313,10 @@ pub const LifecycleScriptSubprocess = struct { } if (!this.foreground and this.manager.scripts_node != null) { - if (this.manager.finished_installing.load(.Monotonic)) { + if (this.manager.finished_installing.load(.monotonic)) { this.manager.scripts_node.?.completeOne(); } else { - _ = @atomicRmw(usize, &this.manager.scripts_node.?.unprotected_completed_items, .Add, 1, .Monotonic); + _ = @atomicRmw(usize, &this.manager.scripts_node.?.unprotected_completed_items, .Add, 1, .monotonic); } } @@ -348,7 +348,7 @@ pub const LifecycleScriptSubprocess = struct { } // the last script finished - _ = this.manager.pending_lifecycle_script_tasks.fetchSub(1, .Monotonic); + _ = this.manager.pending_lifecycle_script_tasks.fetchSub(1, .monotonic); this.deinit(); }, .signaled => |signal| { @@ -443,7 +443,7 @@ pub const LifecycleScriptSubprocess = struct { }); } - _ = manager.pending_lifecycle_script_tasks.fetchAdd(1, .Monotonic); + _ = manager.pending_lifecycle_script_tasks.fetchAdd(1, .monotonic); lifecycle_subprocess.spawnNextScript(list.first_index) catch |err| { Output.prettyErrorln("error: Failed to run script {s} due to error {s}", .{ diff --git a/src/install/lockfile.zig b/src/install/lockfile.zig index e2bd1ddfc6..1d28bff4ea 100644 --- a/src/install/lockfile.zig +++ b/src/install/lockfile.zig @@ -86,7 +86,7 @@ const Crypto = @import("../sha.zig").Hashers; const PackageJSON = @import("../resolver/package_json.zig").PackageJSON; const StaticHashMap = @import("../StaticHashMap.zig").StaticHashMap; -const MetaHash = [std.crypto.hash.sha2.Sha512256.digest_length]u8; +const MetaHash = [std.crypto.hash.sha2.Sha512T256.digest_length]u8; const zero_hash = std.mem.zeroes(MetaHash); pub const NameHashMap = std.ArrayHashMapUnmanaged(PackageNameHash, String, ArrayIdentityContext.U64, false); pub const TrustedDependenciesSet = std.ArrayHashMapUnmanaged(TruncatedPackageNameHash, void, ArrayIdentityContext, false); @@ -2016,7 +2016,7 @@ pub fn saveToDisk(this: *Lockfile, filename: stringZ) void { bun.rand(&base64_bytes); const tmpname = std.fmt.bufPrintZ(&tmpname_buf, ".lockb-{s}.tmp", .{bun.fmt.fmtSliceHexLower(&base64_bytes)}) catch unreachable; - const file = switch (File.openat(std.fs.cwd(), tmpname, std.os.O.CREAT | std.os.O.WRONLY, 0o777)) { + const file = switch (File.openat(std.fs.cwd(), tmpname, bun.O.CREAT | bun.O.WRONLY, 0o777)) { .err => |err| { Output.err(err, "failed to create temporary file to save lockfile\n{}", .{}); Global.crash(); @@ -6393,30 +6393,33 @@ pub fn resolve(this: *Lockfile, package_name: []const u8, version: Dependency.Ve const max_default_trusted_dependencies = 512; -pub const default_trusted_dependencies_list: []string = brk: { +// TODO +pub const default_trusted_dependencies_list: []const []const u8 = brk: { // This file contains a list of dependencies that Bun runs `postinstall` on by default. const data = @embedFile("./default-trusted-dependencies.txt"); @setEvalBranchQuota(999999); - var buf: [max_default_trusted_dependencies]string = undefined; + var buf: [max_default_trusted_dependencies][]const u8 = undefined; var i: usize = 0; var iter = std.mem.tokenizeAny(u8, data, " \r\n\t"); - while (iter.next()) |dep| { - buf[i] = dep; + while (iter.next()) |package_ptr| { + const package = package_ptr[0..].*; + buf[i] = &package; i += 1; } const Sorter = struct { - pub fn lessThan(_: void, lhs: string, rhs: string) bool { + pub fn lessThan(_: void, lhs: []const u8, rhs: []const u8) bool { return std.mem.order(u8, lhs, rhs) == .lt; } }; - const names = buf[0..i]; - // alphabetical so we don't need to sort in `bun pm trusted --default` - std.sort.pdq(string, names, {}, Sorter.lessThan); + std.sort.pdq([]const u8, buf[0..i], {}, Sorter.lessThan); - break :brk names; + var names: [i][]const u8 = undefined; + @memcpy(names[0..i], buf[0..i]); + const final = names; + break :brk &final; }; /// The default list of trusted dependencies is a static hashmap @@ -6446,7 +6449,8 @@ const default_trusted_dependencies = brk: { map.putAssumeCapacity(dep, {}); } - break :brk ↦ + const final = map; + break :brk &final; }; pub fn hasTrustedDependency(this: *Lockfile, name: []const u8) bool { diff --git a/src/install/migration.zig b/src/install/migration.zig index 08bcb062b6..0f4cb9e5fd 100644 --- a/src/install/migration.zig +++ b/src/install/migration.zig @@ -54,7 +54,12 @@ pub fn detectAndLoadOtherLockfile( @memcpy(buf[dirname.len .. dirname.len + npm_lockfile_name.len], npm_lockfile_name); buf[dirname.len + npm_lockfile_name.len] = 0; var timer = std.time.Timer.start() catch unreachable; - const lockfile = bun.sys.openat(bun.FD.cwd(), buf[0 .. dirname.len + npm_lockfile_name.len :0], std.os.O.RDONLY, 0).unwrap() catch break :npm; + const lockfile = bun.sys.openat( + bun.FD.cwd(), + buf[0 .. dirname.len + npm_lockfile_name.len :0], + bun.O.RDONLY, + 0, + ).unwrap() catch break :npm; defer _ = bun.sys.close(lockfile); var lockfile_path_buf: bun.PathBuffer = undefined; const lockfile_path = bun.getFdPathZ(lockfile, &lockfile_path_buf) catch break :npm; diff --git a/src/install/npm.zig b/src/install/npm.zig index 1663a77523..62b9e517ad 100644 --- a/src/install/npm.zig +++ b/src/install/npm.zig @@ -31,7 +31,6 @@ const VersionSlice = @import("./install.zig").VersionSlice; const ObjectPool = @import("../pool.zig").ObjectPool; const Api = @import("../api/schema.zig").Api; const DotEnv = @import("../env_loader.zig"); -const ComptimeStringMap = @import("../comptime_string_map.zig").ComptimeStringMap; const Npm = @This(); @@ -354,7 +353,7 @@ pub const OperatingSystem = enum(u16) { return (@intFromEnum(this) & other) != 0; } - pub const NameMap = ComptimeStringMap(u16, .{ + pub const NameMap = bun.ComptimeStringMap(u16, .{ .{ "aix", aix }, .{ "darwin", darwin }, .{ "freebsd", freebsd }, @@ -391,7 +390,7 @@ pub const Libc = enum(u8) { pub const glibc: u8 = 1 << 1; pub const musl: u8 = 1 << 2; - pub const NameMap = ComptimeStringMap(u8, .{ + pub const NameMap = bun.ComptimeStringMap(u8, .{ .{ "glibc", glibc }, .{ "musl", musl }, }); @@ -440,7 +439,7 @@ pub const Architecture = enum(u16) { pub const all_value: u16 = arm | arm64 | ia32 | mips | mipsel | ppc | ppc64 | s390 | s390x | x32 | x64; - pub const NameMap = ComptimeStringMap(u16, .{ + pub const NameMap = bun.ComptimeStringMap(u16, .{ .{ "arm", arm }, .{ "arm64", arm64 }, .{ "ia32", ia32 }, @@ -735,20 +734,20 @@ pub const PackageManifest = struct { var is_using_o_tmpfile = if (Environment.isLinux) false else {}; const file = brk: { - const flags = std.os.O.WRONLY; + const flags = bun.O.WRONLY; const mask = if (Environment.isPosix) 0o664 else 0; // Do our best to use O_TMPFILE, so that if this process is interrupted, we don't leave a temporary file behind. // O_TMPFILE is Linux-only. Not all filesystems support O_TMPFILE. // https://manpages.debian.org/testing/manpages-dev/openat.2.en.html#O_TMPFILE if (Environment.isLinux) { - switch (bun.sys.File.openat(cache_dir, ".", flags | std.os.linux.O.TMPFILE, mask)) { + switch (bun.sys.File.openat(cache_dir, ".", flags | bun.O.TMPFILE, mask)) { .err => { const warner = struct { var did_warn = std.atomic.Value(bool).init(false); pub fn warnOnce() void { - if (!did_warn.swap(true, .Monotonic)) { + if (!did_warn.swap(true, .monotonic)) { // This is not an error. Nor is it really a warning. Output.note("Linux filesystem or kernel lacks O_TMPFILE support. Using a fallback instead.", .{}); Output.flush(); @@ -765,7 +764,12 @@ pub const PackageManifest = struct { } } - break :brk try bun.sys.File.openat(tmpdir, path_to_use_for_opening_file, flags | std.os.O.CREAT | std.os.O.TRUNC, if (Environment.isPosix) 0o664 else 0).unwrap(); + break :brk try bun.sys.File.openat( + tmpdir, + path_to_use_for_opening_file, + flags | bun.O.CREAT | bun.O.TRUNC, + if (Environment.isPosix) 0o664 else 0, + ).unwrap(); }; { @@ -843,7 +847,7 @@ pub const PackageManifest = struct { pub usingnamespace bun.New(@This()); pub fn run(task: *bun.ThreadPool.Task) void { - const save_task: *@This() = @fieldParentPtr(@This(), "task", task); + const save_task: *@This() = @fieldParentPtr("task", task); defer { save_task.destroy(); } diff --git a/src/install/patch_install.zig b/src/install/patch_install.zig index 400af73adf..1feb8f7960 100644 --- a/src/install/patch_install.zig +++ b/src/install/patch_install.zig @@ -8,6 +8,7 @@ const Global = bun.Global; const Environment = bun.Environment; const strings = bun.strings; const MutableString = bun.MutableString; +const Progress = bun.Progress; const logger = bun.logger; const Loc = logger.Loc; @@ -23,7 +24,6 @@ pub const PatchedDep = Lockfile.PatchedDep; const ThreadPool = bun.ThreadPool; pub const Resolution = @import("./resolution.zig").Resolution; -const Progress = std.Progress; pub const PackageInstall = bun.install.PackageInstall; pub const PreparePatchPackageInstall = bun.install.PreparePatchPackageInstall; @@ -118,7 +118,7 @@ pub const PatchTask = struct { } pub fn runFromThreadPool(task: *ThreadPool.Task) void { - var patch_task: *PatchTask = @fieldParentPtr(PatchTask, "task", task); + var patch_task: *PatchTask = @fieldParentPtr("task", task); patch_task.runFromThreadPoolImpl(); } @@ -145,7 +145,7 @@ pub const PatchTask = struct { ) !void { debug("runFromThreadMainThread {s}", .{@tagName(this.callback)}); defer { - if (this.pre) _ = manager.pending_pre_calc_hashes.fetchSub(1, .Monotonic); + if (this.pre) _ = manager.pending_pre_calc_hashes.fetchSub(1, .monotonic); } switch (this.callback) { .calc_hash => try this.runFromMainThreadCalcHash(manager, log_level), @@ -359,7 +359,12 @@ pub const PatchTask = struct { @memcpy(buntagbuf[0..bun_tag_prefix.len], bun_tag_prefix); const hashlen = (std.fmt.bufPrint(buntagbuf[bun_tag_prefix.len..], "{x}", .{this.callback.apply.patch_hash}) catch unreachable).len; buntagbuf[bun_tag_prefix.len + hashlen] = 0; - const buntagfd = switch (bun.sys.openat(bun.toFD(patch_pkg_dir.fd), buntagbuf[0 .. bun_tag_prefix.len + hashlen :0], std.os.O.RDWR | std.os.O.CREAT, 0o666)) { + const buntagfd = switch (bun.sys.openat( + bun.toFD(patch_pkg_dir.fd), + buntagbuf[0 .. bun_tag_prefix.len + hashlen :0], + bun.O.RDWR | bun.O.CREAT, + 0o666, + )) { .result => |fd| fd, .err => |e| { return try log.addErrorFmtNoLoc(this.manager.allocator, "{}", .{e}); @@ -431,7 +436,7 @@ pub const PatchTask = struct { return null; } - const fd = switch (bun.sys.open(absolute_patchfile_path, std.os.O.RDONLY, 0)) { + const fd = switch (bun.sys.open(absolute_patchfile_path, bun.O.RDONLY, 0)) { .err => |e| { log.addErrorFmt( null, diff --git a/src/install/resolvers/folder_resolver.zig b/src/install/resolvers/folder_resolver.zig index 5d4a1ba15b..43ed4bc7a0 100644 --- a/src/install/resolvers/folder_resolver.zig +++ b/src/install/resolvers/folder_resolver.zig @@ -187,7 +187,12 @@ pub const FolderResolution = union(Tag) { ); } else { const source = brk: { - var file = bun.sys.File.from(try bun.sys.openatA(bun.FD.cwd(), abs, std.os.O.RDONLY, 0).unwrap()); + var file = bun.sys.File.from(try bun.sys.openatA( + bun.FD.cwd(), + abs, + bun.O.RDONLY, + 0, + ).unwrap()); defer file.close(); { diff --git a/src/install/windows-shim/BinLinkingShim.zig b/src/install/windows-shim/BinLinkingShim.zig index 81dac390eb..219e13dc16 100644 --- a/src/install/windows-shim/BinLinkingShim.zig +++ b/src/install/windows-shim/BinLinkingShim.zig @@ -74,7 +74,7 @@ pub const Flags = packed struct(u16) { } }; -pub const embedded_executable_data = @embedFile("./bun_shim_impl.exe"); +pub const embedded_executable_data = @embedFile("bun_shim_impl.exe"); fn wU8(comptime s: []const u8) []const u8 { const str = std.unicode.utf8ToUtf16LeStringLiteral(s); @@ -101,7 +101,7 @@ pub const Shebang = struct { run_with_powershell, }; - const BunExtensions = std.ComptimeStringMap(ExtensionType, .{ + const BunExtensions = std.StaticStringMap(ExtensionType).initComptime(.{ .{ wU8(".js"), .run_with_bun }, .{ wU8(".mjs"), .run_with_bun }, .{ wU8(".cjs"), .run_with_bun }, diff --git a/src/install/windows-shim/bun_shim_impl.exe b/src/install/windows-shim/bun_shim_impl.exe deleted file mode 100755 index bec89b47e4a3980a1e96e4028e6922b29112db9a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12800 zcmeG?Yj_k@mbXJ7Bsx^WLj%fCWE&wUAwiMwNV+>DSPd185E68vq)EC-kDYYvu7)Vl zbm&AWL(e$k%qX9qGP~nr6~7r=RwIw314f8|B8tN3Xk1?v0upc*!o$>_Q@5&LJcEAY zkDZ(E*16~2bI~6N& znuzQYrwIB)2wky zTpg(}Uxq4g@go6f$4S>c{3Nt}I z;_2uNfD^m`k&p!l{CRyf4?<8l@86<-3E(D?3?=#EDJgC!Az8P;suCtQ20ZESA2Dsv z*?ebW+!OOM_L==9Red(!7Dpi%Ucxl4M0URRGhKgzhIAO#r*sk?;=%X3fI1_Pv9EiA zEsYt|0K;Iv$=3_6{(ZnSc(3V1K*uy3gI-P!aMO3X|D4Ob5jzJ-Qam4>*rpT4@_wVv z3?tXue6@38QiK6!5g77fpnJEC8Iw!MYr5`jAfcrh_XjRj;(WcbFq8Ao>jR!P--^Bx zPTb|`;!Ymm#CB+6JQvmLK;oVZnHfWX@65guVF>5j0Wu zpDc5!yExzWSYT`x|JL;o^zcpV^?yZsQ8~#B(#-zc(F77Uu21apebHNgD*Y8gJZ9sG z_E-;RgX}4RV5LCVYvay(YxO0>lgNpuIPo~&l&i0hu3<0>HY!vc_SkmBLf&+J074}5 z0C*I_14iObn8h9DwxcZ5umTcH49xZCW)9(}OHAW^fb*inr}kpfK91_?4sia$xD&j1 zEMM%EHo!?}%h}N|3;hqeh%ox3vQD_vz1#pg2XZp*P@OE#^YhbU%;LvLqO34C%*Iu* zNC&~w9RM-BPJ%R!J524l0gT~b-By^z9%&8|>%Is2>ybW;*tP@IG=quh;1mgztbl4_ z5@B61#T`u2YJDOQAjI<)VGmmJCOniAm=jm)Sr~!Y698c0pq;L^ab@DlLl7mA084L-bQqoH>x~f>Gj9a4zJi;A zr$D+EfV2rGcCTjj5$r>>p zCoPP?uafd2@C0dU1RgJqi@>jzMnvEPrGXk=v;Z{D6n8}&e{ck{TvK&0Fqk?MIPsWt z`_;-uHVh8M3BGq?VUtt3mqNGqOJ-GxKJ7ewjOn;7ro-rDWRxx*l<;!*CWcG;^eTvK z=rnjd{Coz@YM4|(U&@IeiMzO_8F?8p?;n?%3BE2q!NXE2=uQqzR{Wo@J8JgHal)*5 z{@7Wjt$5hwr~bes3Hr=j(V`gUI@1DVls zEkwF3ww}}Nlsmbm?2J!%f7XU+iu~>*W{i>Wx-U83nG~j>7F=rf$Br>=NdcMW)K68J zawNxz?Sm?GNGpdI_?tU;g?W91Llu`76KiE8z|> z0|hW4{qv`PEo4IaPUgk0FRgeJ<++0@4%ZDC*+Fj7`&GW~TUZa*3R!R?7;~Lz3nVlw zh_jbU&Z(4Z8%>~1nmZ1{B@o$_)-UeX{aU}rK^w3kdVi}|nGXx!-(dgjH~%N1`O`h_)+y9S*J%7h z`&%#GS^KlUO{OA3{-Gj1$e<#IDft=V|5ShZzXz`t5&2K~yDz{$v%mbE(fsKF)}p#7 z6yH%y!`Wbb@t3K~SYtye^XT}#r819*@A&Zej=wzquYtAa$)DBd;}5SNq5NSlkN^B= z{?z#=CSNdrn&DLJSN!CQX0zO za*0cA;~L`^-L<`_m=kyAHwKu7b}*HBJz>>gUTov!4yIuv+V1O(cMma(@M^xzEPl|{ zNc+aR2f`~&ca~Y)Y4&w?!mGP>e&)cdW`Yi$d_qzVybTS8b`n20$?R)m^QZ6he9ljA z6Ye?DmMr{=6W`^0-}IWq4wLVTINm=zo%dfqC3KZj?!R-JYGZVB#af~>`vQac>2G_E zaMO$aJ70pAy6K=XWYTbBvmvUV#) z9yf(0gtqZ1kDC~Ur$phSqVS=Yz!RdzW1{fWVvO%sxpM@3G+rge$QUA8^24$%v-iY~-xOG^A^9hcJ(}e4zPw-2RP`CHCVjR z!m5^V-zoY7>z~vg$WJS(Up3L_8}AChyi~{VHqxYT$0xwq6h`|$h|^)>G^z~^c(n$F zvcX>PU1!dkQ`zHx$w-d|idyUMz4`}P{=Air_ZV_!4E$oj!spNDFXV+4i&uF9d~wdq z)sOCZJ6rhno&PySCbqtIrb^h;ac+OTo!v)_%) zes%sc-a8(C0{KGejN%v4@1ed8#b1uzk-gv227D*BzR;VPlmz~$-T&U*GY)>8)&0zk zkA3s?-7RKCPP@uSn_qn!0F!=PUt&7-i1pl6D%}^Y$H;ekUt5t z?&<+)kB$6~nZRFxZDw}dTHgxz)dBg93{ZYOTUFes;_!=Nx)RrSo?P?d?}l{2xPlv1 z9L8~eygI*Aogc5x@61y5RQpe%esvgkxYc-PzT9jeu8h{-w6sngF@Ngj-@ZH%euE@N zsA>tuvF_;f(|tKoP1Lv~^>+5LDxf#>WC>t3h*%{eOECX_rn zz=FZRl zeCV-lhR+7<`a}7g9Vk;N&%Irtz%b$!doOI11H-L19!vVZ@2;%$#d(*a2NLZ( z)u48N()J-O-kl&1OK_i6aoT&H{Rni6DBegcc-PhXFL^)uvG+0cJ{7!IsrM~om2|U@1RP#-1`fq?leT*V^l7h---9i?pHk;Uo{@p@E;c})wf;hzWHEL1pHmH+vB0T`jOQ1M5iixc2J zrj_2hJ=guQ_01(d!|ne#eD>9QDh%)5y!!F)M?Y=oyZe#R9fhwLT+cn7`NPmQ!*do)!A6+@YF4u|Zbdi1F8Ib8-#4^=;+>XWU> z(fT!cntqyGO;1g}rmv=lrnjaKvjGj8 z33R;0E#$deP8U)wIA>n5#V*Wox)#{0mO89=EcvyyOdwT4xxubo&I(S} zWv#SSvjTqby#&7Ph90}1eEVH(b+Ha-nMJTWtJvjsps0#qmzJP>B@Xi}_*HnhT_tte zSPNU`bh$j$0*Hvvpxo|)ba1*JNJ~g4a9AyF`c3)xCCZ28Zo%Sku#+a)oUStKc>K~_ zsIcN^=$J#S%LztVz$U@Vhl^eVia-_lleN$-_EcjVA zxYl8fAc5sruDDMlfzgN=;3d^1>;$*{L2GKnbR|>~^yf$@Q&v1W1K04y*e|mqm1lu}k1G(j9tq$ulC1qjIUL{yv e)h;VM!%a+C;eIGZvC)QJh%n+OS&Kghlm7!85YR#Z diff --git a/src/install/windows-shim/bun_shim_impl.zig b/src/install/windows-shim/bun_shim_impl.zig index d8b0d836e4..c8cf07e09d 100644 --- a/src/install/windows-shim/bun_shim_impl.zig +++ b/src/install/windows-shim/bun_shim_impl.zig @@ -53,17 +53,7 @@ const callmod_inline = if (is_standalone) std.builtin.CallModifier.always_inline const Flags = @import("./BinLinkingShim.zig").Flags; -pub inline fn wliteral(comptime str: []const u8) []const u16 { - if (!@inComptime()) @compileError("strings.w() must be called in a comptime context"); - comptime var output: [str.len]u16 = undefined; - for (str, 0..) |c, i| { - output[i] = c; - } - const Static = struct { - pub const literal: []const u16 = output[0..output.len]; - }; - return Static.literal; -} +const wliteral = std.unicode.utf8ToUtf16LeStringLiteral; /// A copy of all ntdll declarations this program uses const nt = struct { @@ -135,7 +125,7 @@ fn debug(comptime fmt: []const u8, args: anytype) void { } fn unicodeStringToU16(str: w.UNICODE_STRING) []u16 { - return str.Buffer[0 .. str.Length / 2]; + return str.Buffer.?[0 .. str.Length / 2]; } const FILE_GENERIC_READ = w.STANDARD_RIGHTS_READ | w.FILE_READ_DATA | w.FILE_READ_ATTRIBUTES | w.FILE_READ_EA | w.SYNCHRONIZE; @@ -338,11 +328,11 @@ fn launcher(comptime mode: LauncherMode, bun_ctx: anytype) mode.RetType() { // these are all different views of the same data const image_path_b_len = if (is_standalone) ImagePathName.Length else bun_ctx.base_path.len * 2; - const image_path_u16 = (if (is_standalone) ImagePathName.Buffer else bun_ctx.base_path.ptr)[0 .. image_path_b_len / 2]; - const image_path_u8 = @as([*]u8, @ptrCast(if (is_standalone) ImagePathName.Buffer else bun_ctx.base_path.ptr))[0..image_path_b_len]; + const image_path_u16 = (if (is_standalone) ImagePathName.Buffer.? else bun_ctx.base_path.ptr)[0 .. image_path_b_len / 2]; + const image_path_u8 = @as([*]u8, @ptrCast(if (is_standalone) ImagePathName.Buffer.? else bun_ctx.base_path.ptr))[0..image_path_b_len]; const cmd_line_b_len = CommandLine.Length; - const cmd_line_u16 = CommandLine.Buffer[0 .. cmd_line_b_len / 2]; + const cmd_line_u16 = CommandLine.Buffer.?[0 .. cmd_line_b_len / 2]; const cmd_line_u8 = @as([*]u8, @ptrCast(CommandLine.Buffer))[0..cmd_line_b_len]; assert(@intFromPtr(cmd_line_u16.ptr) % 2 == 0); // alignment assumption diff --git a/src/io/io.zig b/src/io/io.zig index 0b276013e0..240eba96ec 100644 --- a/src/io/io.zig +++ b/src/io/io.zig @@ -8,7 +8,7 @@ const JSC = bun.JSC; const log = bun.Output.scoped(.loop, false); -const os = std.os; +const posix = std.posix; const assert = bun.assert; pub const Source = @import("./source.zig").Source; @@ -18,7 +18,7 @@ pub const Loop = struct { waker: bun.Async.Waker, epoll_fd: if (Environment.isLinux) bun.FileDescriptor else u0 = if (Environment.isLinux) .zero else 0, - cached_now: os.timespec = .{ + cached_now: posix.timespec = .{ .tv_nsec = 0, .tv_sec = 0, }, @@ -32,12 +32,12 @@ pub const Loop = struct { @panic("Do not use this API on windows"); } - if (!@atomicRmw(bool, &has_loaded_loop, std.builtin.AtomicRmwOp.Xchg, true, .Monotonic)) { + if (!@atomicRmw(bool, &has_loaded_loop, std.builtin.AtomicRmwOp.Xchg, true, .monotonic)) { loop = Loop{ .waker = bun.Async.Waker.init() catch @panic("failed to initialize waker"), }; if (comptime Environment.isLinux) { - loop.epoll_fd = bun.toFD(std.os.epoll_create1(std.os.linux.EPOLL.CLOEXEC | 0) catch @panic("Failed to create epoll file descriptor")); + loop.epoll_fd = bun.toFD(std.posix.epoll_create1(std.os.linux.EPOLL.CLOEXEC | 0) catch @panic("Failed to create epoll file descriptor")); { var epoll = std.mem.zeroes(std.os.linux.epoll_event); @@ -45,7 +45,7 @@ pub const Loop = struct { epoll.data.ptr = @intFromPtr(&loop); const rc = std.os.linux.epoll_ctl(loop.epoll_fd.cast(), std.os.linux.EPOLL.CTL_ADD, loop.waker.getFd().cast(), &epoll); - switch (std.os.linux.getErrno(rc)) { + switch (bun.C.getErrno(rc)) { .SUCCESS => {}, else => |err| bun.Output.panic("Failed to wait on epoll {s}", .{@tagName(err)}), } @@ -140,7 +140,7 @@ pub const Loop = struct { std.math.maxInt(i32), ); - switch (std.os.linux.getErrno(rc)) { + switch (bun.C.getErrno(rc)) { .INTR => continue, .SUCCESS => {}, else => |e| bun.Output.panic("epoll_wait: {s}", .{@tagName(e)}), @@ -248,7 +248,7 @@ pub const Loop = struct { const change_count = events_list.items.len; - const rc = os.system.kevent64( + const rc = posix.system.kevent64( this.pollfd().cast(), events_list.items.ptr, @intCast(change_count), @@ -261,7 +261,7 @@ pub const Loop = struct { null, ); - switch (std.c.getErrno(rc)) { + switch (bun.C.getErrno(rc)) { .INTR => continue, .SUCCESS => {}, else => |e| bun.Output.panic("kevent64 failed: {s}", .{@tagName(e)}), @@ -270,7 +270,7 @@ pub const Loop = struct { this.updateNow(); assert(rc <= events_list.capacity); - const current_events: []std.os.darwin.kevent64_s = events_list.items.ptr[0..@intCast(rc)]; + const current_events: []std.posix.system.kevent64_s = events_list.items.ptr[0..@intCast(rc)]; for (current_events) |event| { Poll.onUpdateKQueue(event); @@ -283,7 +283,7 @@ pub const Loop = struct { } extern "C" fn clock_gettime_monotonic(sec: *i64, nsec: *i64) c_int; - pub fn updateTimespec(timespec: *os.timespec) void { + pub fn updateTimespec(timespec: *posix.timespec) void { if (comptime Environment.isLinux) { const rc = linux.clock_gettime(linux.CLOCK.MONOTONIC, timespec); assert(rc == 0); @@ -297,12 +297,12 @@ pub const Loop = struct { timespec.tv_sec = @intCast(tv_sec); timespec.tv_nsec = @intCast(tv_nsec); } else { - std.os.clock_gettime(std.os.CLOCK.MONOTONIC, timespec) catch {}; + std.posix.clock_gettime(std.posix.CLOCK.MONOTONIC, timespec) catch {}; } } }; -const EventType = if (Environment.isLinux) linux.epoll_event else std.os.system.kevent64_s; +const EventType = if (Environment.isLinux) linux.epoll_event else std.posix.system.kevent64_s; pub const Request = struct { next: ?*Request = null, @@ -435,26 +435,26 @@ pub const Poll = struct { pub const Set = std.EnumSet(Flags); pub const Struct = std.enums.EnumFieldStruct(Flags, bool, false); - pub fn fromKQueueEvent(kqueue_event: std.os.system.kevent64_s) Flags.Set { + pub fn fromKQueueEvent(kqueue_event: std.posix.system.kevent64_s) Flags.Set { var flags = Flags.Set{}; - if (kqueue_event.filter == std.os.system.EVFILT_READ) { + if (kqueue_event.filter == std.posix.system.EVFILT_READ) { flags.insert(Flags.readable); log("readable", .{}); - if (kqueue_event.flags & std.os.system.EV_EOF != 0) { + if (kqueue_event.flags & std.posix.system.EV_EOF != 0) { flags.insert(Flags.hup); log("hup", .{}); } - } else if (kqueue_event.filter == std.os.system.EVFILT_WRITE) { + } else if (kqueue_event.filter == std.posix.system.EVFILT_WRITE) { flags.insert(Flags.writable); log("writable", .{}); - if (kqueue_event.flags & std.os.system.EV_EOF != 0) { + if (kqueue_event.flags & std.posix.system.EV_EOF != 0) { flags.insert(Flags.hup); log("hup", .{}); } - } else if (kqueue_event.filter == std.os.system.EVFILT_PROC) { + } else if (kqueue_event.filter == std.posix.system.EVFILT_PROC) { log("proc", .{}); flags.insert(Flags.process); - } else if (kqueue_event.filter == std.os.system.EVFILT_MACHPORT) { + } else if (kqueue_event.filter == std.posix.system.EVFILT_MACHPORT) { log("machport", .{}); flags.insert(Flags.machport); } @@ -487,7 +487,7 @@ pub const Poll = struct { tag: Pollable.Tag, poll: *Poll, fd: bun.FileDescriptor, - kqueue_event: *std.os.system.kevent64_s, + kqueue_event: *std.posix.system.kevent64_s, ) void { log("register({s}, {})", .{ @tagName(action), fd }); defer { @@ -512,12 +512,12 @@ pub const Poll = struct { } } - const one_shot_flag = std.os.system.EV_ONESHOT; + const one_shot_flag = std.posix.system.EV_ONESHOT; kqueue_event.* = switch (comptime action) { .readable => .{ .ident = @as(u64, @intCast(fd.int())), - .filter = std.os.system.EVFILT_READ, + .filter = std.posix.system.EVFILT_READ, .data = 0, .fflags = 0, .udata = @intFromPtr(Pollable.init(tag, poll).ptr()), @@ -526,7 +526,7 @@ pub const Poll = struct { }, .writable => .{ .ident = @as(u64, @intCast(fd.int())), - .filter = std.os.system.EVFILT_WRITE, + .filter = std.posix.system.EVFILT_WRITE, .data = 0, .fflags = 0, .udata = @intFromPtr(Pollable.init(tag, poll).ptr()), @@ -535,7 +535,7 @@ pub const Poll = struct { }, .cancel => if (poll.flags.contains(.poll_readable)) .{ .ident = @as(u64, @intCast(fd.int())), - .filter = std.os.system.EVFILT_READ, + .filter = std.posix.system.EVFILT_READ, .data = 0, .fflags = 0, .udata = @intFromPtr(Pollable.init(tag, poll).ptr()), @@ -543,7 +543,7 @@ pub const Poll = struct { .ext = .{ poll.generation_number, 0 }, } else if (poll.flags.contains(.poll_writable)) .{ .ident = @as(u64, @intCast(fd.int())), - .filter = std.os.system.EVFILT_WRITE, + .filter = std.posix.system.EVFILT_WRITE, .data = 0, .fflags = 0, .udata = @intFromPtr(Pollable.init(tag, poll).ptr()), @@ -568,7 +568,7 @@ pub const Poll = struct { } pub fn onUpdateKQueue( - event: std.os.system.kevent64_s, + event: std.posix.system.kevent64_s, ) void { if (event.filter == std.c.EVFILT_MACHPORT) return; @@ -581,7 +581,7 @@ pub const Poll = struct { .empty => {}, inline else => |t| { - var this: *Pollable.Tag.Type(t) = @fieldParentPtr(Pollable.Tag.Type(t), "io_poll", poll); + var this: *Pollable.Tag.Type(t) = @alignCast(@fieldParentPtr("io_poll", poll)); if (event.flags == std.c.EV_ERROR) { log("error({d}) = {d}", .{ event.ident, event.data }); this.onIOError(bun.sys.Error.fromCode(@enumFromInt(event.data), .kevent)); @@ -603,9 +603,9 @@ pub const Poll = struct { .empty => {}, inline else => |t| { - var this: *Pollable.Tag.Type(t) = @fieldParentPtr(Pollable.Tag.Type(t), "io_poll", poll); + var this: *Pollable.Tag.Type(t) = @alignCast(@fieldParentPtr("io_poll", poll)); if (event.events & linux.EPOLL.ERR != 0) { - const errno = std.os.linux.getErrno(event.events); + const errno = bun.C.getErrno(event.events); log("error() = {s}", .{@tagName(errno)}); this.onIOError(bun.sys.Error.fromCode(errno, .epoll_ctl)); } else { diff --git a/src/io/io_darwin.zig b/src/io/io_darwin.zig index 577a96b6cd..71fb60d661 100644 --- a/src/io/io_darwin.zig +++ b/src/io/io_darwin.zig @@ -1,6 +1,6 @@ const std = @import("std"); const os = struct { - pub usingnamespace std.os; + pub usingnamespace std.posix; pub const EINTR = 4; pub const EAGAIN = 35; pub const EBADF = 9; @@ -28,7 +28,7 @@ const assert = bun.assert; const c = std.c; const bun = @import("root").bun; pub const darwin = struct { - pub usingnamespace os.darwin; + pub usingnamespace c; pub extern "c" fn @"recvfrom$NOCANCEL"(sockfd: c.fd_t, noalias buf: *anyopaque, len: usize, flags: u32, noalias src_addr: ?*c.sockaddr, noalias addrlen: ?*c.socklen_t) isize; pub extern "c" fn @"sendto$NOCANCEL"(sockfd: c.fd_t, buf: *const anyopaque, len: usize, flags: u32, dest_addr: ?*const c.sockaddr, addrlen: c.socklen_t) isize; pub extern "c" fn @"fcntl$NOCANCEL"(fd: c.fd_t, cmd: c_int, ...) c_int; @@ -42,11 +42,11 @@ pub const darwin = struct { pub extern "c" fn @"openat$NOCANCEL"(fd: c.fd_t, path: [*:0]const u8, oflag: c_uint, ...) c_int; pub extern "c" fn @"read$NOCANCEL"(fd: c.fd_t, buf: [*]u8, nbyte: usize) isize; pub extern "c" fn @"pread$NOCANCEL"(fd: c.fd_t, buf: [*]u8, nbyte: usize, offset: c.off_t) isize; - pub extern "c" fn @"preadv$NOCANCEL"(fd: c.fd_t, uf: [*]std.os.iovec, count: i32, offset: c.off_t) isize; - pub extern "c" fn @"readv$NOCANCEL"(fd: c.fd_t, uf: [*]std.os.iovec, count: i32) isize; + pub extern "c" fn @"preadv$NOCANCEL"(fd: c.fd_t, uf: [*]std.posix.iovec, count: i32, offset: c.off_t) isize; + pub extern "c" fn @"readv$NOCANCEL"(fd: c.fd_t, uf: [*]std.posix.iovec, count: i32) isize; pub extern "c" fn @"write$NOCANCEL"(fd: c.fd_t, buf: [*]const u8, nbyte: usize) isize; - pub extern "c" fn @"writev$NOCANCEL"(fd: c.fd_t, buf: [*]const std.os.iovec_const, count: i32) isize; - pub extern "c" fn @"pwritev$NOCANCEL"(fd: c.fd_t, buf: [*]const std.os.iovec_const, count: i32, offset: c.off_t) isize; + pub extern "c" fn @"writev$NOCANCEL"(fd: c.fd_t, buf: [*]const std.posix.iovec_const, count: i32) isize; + pub extern "c" fn @"pwritev$NOCANCEL"(fd: c.fd_t, buf: [*]const std.posix.iovec_const, count: i32, offset: c.off_t) isize; }; const IO = @This(); @@ -55,7 +55,7 @@ pub fn init(_: u12, _: u32, waker: Waker) !IO { .waker = waker, }; } -const Kevent64 = std.os.system.kevent64_s; +const Kevent64 = std.posix.system.kevent64_s; pub const Waker = struct { kq: os.fd_t, machport: *anyopaque = undefined, @@ -82,7 +82,7 @@ pub const Waker = struct { bun.JSC.markBinding(@src()); var events = zeroed; - _ = std.os.system.kevent64( + _ = std.posix.system.kevent64( this.kq, &events, 0, @@ -105,7 +105,7 @@ pub const Waker = struct { ) bool; pub fn init() !Waker { - return initWithFileDescriptor(bun.default_allocator, try std.os.kqueue()); + return initWithFileDescriptor(bun.default_allocator, try std.posix.kqueue()); } pub fn initWithFileDescriptor(allocator: std.mem.Allocator, kq: i32) !Waker { @@ -139,7 +139,7 @@ pub const Waker = struct { // events[0].data = 0; // events[0].fflags = c.NOTE_TRIGGER; // events[0].udata = 0; -// const errno = std.os.system.kevent64( +// const errno = std.posix.system.kevent64( // this.kq, // &events, // 1, @@ -150,7 +150,7 @@ pub const Waker = struct { // ); // if (errno < 0) { -// return asError(std.c.getErrno(errno)); +// return asError(bun.C.getErrno(errno)); // } // } @@ -164,7 +164,7 @@ pub const Waker = struct { // events[0].data = 0; // events[0].udata = 0; -// const errno = std.os.system.kevent64( +// const errno = std.posix.system.kevent64( // this.kq, // &events, // 1, @@ -174,7 +174,7 @@ pub const Waker = struct { // null, // ); // if (errno < 0) { -// return asError(std.c.getErrno(errno)); +// return asError(bun.C.getErrno(errno)); // } // return @as(u64, @intCast(errno)); @@ -191,7 +191,7 @@ pub const Waker = struct { // events[0].data = 0; // events[0].udata = 0; // var timespec = default_timespec; -// const errno = std.os.system.kevent64( +// const errno = std.posix.system.kevent64( // kq, // &events, // 1, diff --git a/src/io/io_linux.zig b/src/io/io_linux.zig index 60bfa1e082..4b95ceb34c 100644 --- a/src/io/io_linux.zig +++ b/src/io/io_linux.zig @@ -2,7 +2,7 @@ const std = @import("std"); const assert = bun.assert; const Platform = bun.analytics.GenerateHeader.GeneratePlatform; const os = struct { - pub usingnamespace std.os; + pub usingnamespace std.posix; pub const EPERM = 1; pub const ENOENT = 2; pub const ESRCH = 3; @@ -147,7 +147,7 @@ pub const Waker = struct { fd: bun.FileDescriptor, pub fn init() !Waker { - return initWithFileDescriptor(bun.toFD(try std.os.eventfd(0, 0))); + return initWithFileDescriptor(bun.toFD(try std.posix.eventfd(0, 0))); } pub fn getFd(this: *const Waker) bun.FileDescriptor { @@ -160,12 +160,12 @@ pub const Waker = struct { pub fn wait(this: Waker) void { var bytes: usize = 0; - _ = std.os.read(this.fd.cast(), @as(*[8]u8, @ptrCast(&bytes))) catch 0; + _ = std.posix.read(this.fd.cast(), @as(*[8]u8, @ptrCast(&bytes))) catch 0; } pub fn wake(this: *const Waker) void { var bytes: usize = 1; - _ = std.os.write( + _ = std.posix.write( this.fd.cast(), @as(*[8]u8, @ptrCast(&bytes)), ) catch 0; diff --git a/src/io/time.zig b/src/io/time.zig index 917f6bcb62..ce30e31896 100644 --- a/src/io/time.zig +++ b/src/io/time.zig @@ -23,8 +23,8 @@ pub const Time = struct { // https://opensource.apple.com/source/Libc/Libc-1158.1.2/gen/clock_gettime.c.auto.html if (is_darwin) { const darwin = struct { - const mach_timebase_info_t = std.os.darwin.mach_timebase_info_data; - extern "c" fn mach_timebase_info(info: *mach_timebase_info_t) std.os.darwin.kern_return_t; + const mach_timebase_info_t = std.posix.darwin.mach_timebase_info_data; + extern "c" fn mach_timebase_info(info: *mach_timebase_info_t) std.posix.darwin.kern_return_t; extern "c" fn mach_continuous_time() u64; }; @@ -39,8 +39,8 @@ pub const Time = struct { // CLOCK_BOOTTIME is the same as CLOCK_MONOTONIC but includes elapsed time during a suspend. // For more detail and why CLOCK_MONOTONIC_RAW is even worse than CLOCK_MONOTONIC, // see https://github.com/ziglang/zig/pull/933#discussion_r656021295. - var ts: std.os.timespec = undefined; - std.os.clock_gettime(std.os.CLOCK_BOOTTIME, &ts) catch @panic("CLOCK_BOOTTIME required"); + var ts: std.posix.timespec = undefined; + std.posix.clock_gettime(std.posix.CLOCK_BOOTTIME, &ts) catch @panic("CLOCK_BOOTTIME required"); break :blk @as(u64, @intCast(ts.tv_sec)) * std.time.ns_per_s + @as(u64, @intCast(ts.tv_nsec)); }; @@ -56,8 +56,8 @@ pub const Time = struct { // macos has supported clock_gettime() since 10.12: // https://opensource.apple.com/source/Libc/Libc-1158.1.2/gen/clock_gettime.3.auto.html - var ts: std.os.timespec = undefined; - std.os.clock_gettime(std.os.CLOCK_REALTIME, &ts) catch unreachable; + var ts: std.posix.timespec = undefined; + std.posix.clock_gettime(std.posix.CLOCK_REALTIME, &ts) catch unreachable; return @as(i64, ts.tv_sec) * std.time.ns_per_s + ts.tv_nsec; } diff --git a/src/js_ast.zig b/src/js_ast.zig index 2f75b137e6..ce6b83a065 100644 --- a/src/js_ast.zig +++ b/src/js_ast.zig @@ -21,7 +21,7 @@ const allocators = @import("allocators.zig"); const JSC = bun.JSC; const RefCtx = @import("./ast/base.zig").RefCtx; const JSONParser = bun.JSON; -const is_bindgen = std.meta.globalOption("bindgen", bool) orelse false; +const is_bindgen = false; const ComptimeStringMap = bun.ComptimeStringMap; const JSPrinter = @import("./js_printer.zig"); const js_lexer = @import("./js_lexer.zig"); @@ -197,7 +197,7 @@ pub fn NewBaseStore(comptime Union: anytype, comptime count: usize) type { } this.overflow.allocated = 1; } - var base_store = @fieldParentPtr(WithBase, "store", this); + var base_store: *WithBase = @fieldParentPtr("store", this); if (this.overflow.ptrs[0] == &base_store.head) { allocator.destroy(base_store); } @@ -7784,13 +7784,19 @@ pub const Macro = struct { }; pub const ASTMemoryAllocator = struct { - stack_allocator: std.heap.StackFallbackAllocator(@min(8192, std.mem.page_size)) = undefined, + const SFA = std.heap.StackFallbackAllocator(@min(8192, std.mem.page_size)); + + stack_allocator: SFA = undefined, bump_allocator: std.mem.Allocator = undefined, allocator: std.mem.Allocator, previous: ?*ASTMemoryAllocator = null, pub fn reset(this: *ASTMemoryAllocator) void { - this.stack_allocator.fallback_allocator = this.allocator; + this.stack_allocator = SFA{ + .buffer = undefined, + .fallback_allocator = this.allocator, + .fixed_buffer_allocator = undefined, + }; this.bump_allocator = this.stack_allocator.get(); } diff --git a/src/js_lexer/identifier_data.zig b/src/js_lexer/identifier_data.zig index c516be00bc..fc474bf553 100644 --- a/src/js_lexer/identifier_data.zig +++ b/src/js_lexer/identifier_data.zig @@ -99,7 +99,7 @@ pub fn main() anyerror!void { const id_continue_data = std.mem.asBytes(&id_continue.masks); const id_start_data = std.mem.asBytes(&id_start.masks); - try std.os.chdir(std.fs.path.dirname(@src().file).?); + try std.posix.chdir(std.fs.path.dirname(@src().file).?); var start = try std.fs.cwd().createFileZ("id_start_bitset.meta.blob", .{ .truncate = true }); try start.writeAll(std.mem.asBytes(&id_start_cached)); start.close(); @@ -120,7 +120,7 @@ pub fn main() anyerror!void { test "Check" { const id_start_cached_correct = Cache.CachedBitset{ .range = id_start_range, .len = id_start_count + 1 }; const id_continue_cached_correct = Cache.CachedBitset{ .range = id_end_range, .len = id_end_count + 1 }; - try std.os.chdir(std.fs.path.dirname(@src().file).?); + try std.posix.chdir(std.fs.path.dirname(@src().file).?); var start_cached = try std.fs.cwd().openFileZ("id_start_bitset.meta.blob", .{ .mode = .read_only }); const start_cached_data = try start_cached.readToEndAlloc(std.heap.c_allocator, 4096); @@ -143,7 +143,7 @@ test "Check" { test "Check #2" { const id_start_cached_correct = Cache.CachedBitset{ .range = id_start_range, .len = id_start_count + 1 }; const id_continue_cached_correct = Cache.CachedBitset{ .range = id_end_range, .len = id_end_count + 1 }; - try std.os.chdir(std.fs.path.dirname(@src().file).?); + try std.posix.chdir(std.fs.path.dirname(@src().file).?); const start_cached_data = std.mem.asBytes(&Cache.id_start_meta); try std.testing.expectEqualSlices(u8, start_cached_data, std.mem.asBytes(&id_start_cached_correct)); diff --git a/src/js_lexer_tables.zig b/src/js_lexer_tables.zig index c18bd77e87..c9107f1ab8 100644 --- a/src/js_lexer_tables.zig +++ b/src/js_lexer_tables.zig @@ -7,7 +7,6 @@ const unicode = std.unicode; const default_allocator = bun.default_allocator; const string = @import("string_types.zig").string; const CodePoint = @import("string_types.zig").CodePoint; -const ComptimeStringMap = @import("./comptime_string_map.zig").ComptimeStringMap; pub const T = enum(u8) { t_end_of_file, @@ -160,7 +159,7 @@ pub const T = enum(u8) { } }; -pub const Keywords = ComptimeStringMap(T, .{ +pub const Keywords = std.StaticStringMap(T).initComptime(.{ .{ "break", .t_break }, .{ "case", .t_case }, .{ "catch", .t_catch }, @@ -199,7 +198,7 @@ pub const Keywords = ComptimeStringMap(T, .{ .{ "with", .t_with }, }); -pub const StrictModeReservedWords = ComptimeStringMap(void, .{ +pub const StrictModeReservedWords = std.StaticStringMap(void).initComptime(.{ .{ "implements", {} }, .{ "interface", {} }, .{ "let", {} }, @@ -211,7 +210,7 @@ pub const StrictModeReservedWords = ComptimeStringMap(void, .{ .{ "yield", {} }, }); -pub const StrictModeReservedWordsRemap = ComptimeStringMap(string, .{ +pub const StrictModeReservedWordsRemap = std.StaticStringMap(string).initComptime(.{ .{ "implements", "_implements" }, .{ "interface", "_interface" }, .{ "let", "_let" }, @@ -236,7 +235,7 @@ pub const PropertyModifierKeyword = enum { p_set, p_static, - pub const List = ComptimeStringMap(PropertyModifierKeyword, .{ + pub const List = std.StaticStringMap(PropertyModifierKeyword).initComptime(.{ .{ "abstract", .p_abstract }, .{ "async", .p_async }, .{ "declare", .p_declare }, @@ -251,7 +250,7 @@ pub const PropertyModifierKeyword = enum { }); }; -pub const TypeScriptAccessibilityModifier = ComptimeStringMap(void, .{ +pub const TypeScriptAccessibilityModifier = std.StaticStringMap(void).initComptime(.{ .{ "override", void }, .{ "private", void }, .{ "protected", void }, @@ -259,130 +258,130 @@ pub const TypeScriptAccessibilityModifier = ComptimeStringMap(void, .{ .{ "readonly", void }, }); -pub const TokenEnumType = std.EnumArray(T, []u8); +pub const TokenEnumType = std.EnumArray(T, []const u8); pub const tokenToString = brk: { - var TEndOfFile = "end of file".*; - var TSyntaxError = "syntax error".*; - var THashbang = "hashbang comment".*; + const TEndOfFile = "end of file".*; + const TSyntaxError = "syntax error".*; + const THashbang = "hashbang comment".*; // Literals - var TNoSubstitutionTemplateLiteral = "template literal".*; - var TNumericLiteral = "number".*; - var TStringLiteral = "string".*; - var TBigIntegerLiteral = "bigint".*; + const TNoSubstitutionTemplateLiteral = "template literal".*; + const TNumericLiteral = "number".*; + const TStringLiteral = "string".*; + const TBigIntegerLiteral = "bigint".*; // Pseudo-literals - var TTemplateHead = "template literal".*; - var TTemplateMiddle = "template literal".*; - var TTemplateTail = "template literal".*; + const TTemplateHead = "template literal".*; + const TTemplateMiddle = "template literal".*; + const TTemplateTail = "template literal".*; // Punctuation - var TAmpersand = "\"&\"".*; - var TAmpersandAmpersand = "\"&&\"".*; - var TAsterisk = "\"*\"".*; - var TAsteriskAsterisk = "\"**\"".*; - var TAt = "\"@\"".*; - var TBar = "\"|\"".*; - var TBarBar = "\"||\"".*; - var TCaret = "\"^\"".*; - var TCloseBrace = "\"}\"".*; - var TCloseBracket = "\"]\"".*; - var TCloseParen = "\")\"".*; - var TColon = "\" =\"".*; - var TComma = "\",\"".*; - var TDot = "\".\"".*; - var TDotDotDot = "\"...\"".*; - var TEqualsEquals = "\"==\"".*; - var TEqualsEqualsEquals = "\"===\"".*; - var TEqualsGreaterThan = "\"=>\"".*; - var TExclamation = "\"!\"".*; - var TExclamationEquals = "\"!=\"".*; - var TExclamationEqualsEquals = "\"!==\"".*; - var TGreaterThan = "\">\"".*; - var TGreaterThanEquals = "\">=\"".*; - var TGreaterThanGreaterThan = "\">>\"".*; - var TGreaterThanGreaterThanGreaterThan = "\">>>\"".*; - var TLessThan = "\"<\"".*; - var TLessThanEquals = "\"<=\"".*; - var TLessThanLessThan = "\"<<\"".*; - var TMinus = "\"-\"".*; - var TMinusMinus = "\"--\"".*; - var TOpenBrace = "\"{\"".*; - var TOpenBracket = "\"[\"".*; - var TOpenParen = "\"(\"".*; - var TPercent = "\"%\"".*; - var TPlus = "\"+\"".*; - var TPlusPlus = "\"++\"".*; - var TQuestion = "\"?\"".*; - var TQuestionDot = "\"?.\"".*; - var TQuestionQuestion = "\"??\"".*; - var TSemicolon = "\";\"".*; - var TSlash = "\"/\"".*; - var TTilde = "\"~\"".*; + const TAmpersand = "\"&\"".*; + const TAmpersandAmpersand = "\"&&\"".*; + const TAsterisk = "\"*\"".*; + const TAsteriskAsterisk = "\"**\"".*; + const TAt = "\"@\"".*; + const TBar = "\"|\"".*; + const TBarBar = "\"||\"".*; + const TCaret = "\"^\"".*; + const TCloseBrace = "\"}\"".*; + const TCloseBracket = "\"]\"".*; + const TCloseParen = "\")\"".*; + const TColon = "\" =\"".*; + const TComma = "\",\"".*; + const TDot = "\".\"".*; + const TDotDotDot = "\"...\"".*; + const TEqualsEquals = "\"==\"".*; + const TEqualsEqualsEquals = "\"===\"".*; + const TEqualsGreaterThan = "\"=>\"".*; + const TExclamation = "\"!\"".*; + const TExclamationEquals = "\"!=\"".*; + const TExclamationEqualsEquals = "\"!==\"".*; + const TGreaterThan = "\">\"".*; + const TGreaterThanEquals = "\">=\"".*; + const TGreaterThanGreaterThan = "\">>\"".*; + const TGreaterThanGreaterThanGreaterThan = "\">>>\"".*; + const TLessThan = "\"<\"".*; + const TLessThanEquals = "\"<=\"".*; + const TLessThanLessThan = "\"<<\"".*; + const TMinus = "\"-\"".*; + const TMinusMinus = "\"--\"".*; + const TOpenBrace = "\"{\"".*; + const TOpenBracket = "\"[\"".*; + const TOpenParen = "\"(\"".*; + const TPercent = "\"%\"".*; + const TPlus = "\"+\"".*; + const TPlusPlus = "\"++\"".*; + const TQuestion = "\"?\"".*; + const TQuestionDot = "\"?.\"".*; + const TQuestionQuestion = "\"??\"".*; + const TSemicolon = "\";\"".*; + const TSlash = "\"/\"".*; + const TTilde = "\"~\"".*; // Assignments - var TAmpersandAmpersandEquals = "\"&&=\"".*; - var TAmpersandEquals = "\"&=\"".*; - var TAsteriskAsteriskEquals = "\"**=\"".*; - var TAsteriskEquals = "\"*=\"".*; - var TBarBarEquals = "\"||=\"".*; - var TBarEquals = "\"|=\"".*; - var TCaretEquals = "\"^=\"".*; - var TEquals = "\"=\"".*; - var TGreaterThanGreaterThanEquals = "\">>=\"".*; - var TGreaterThanGreaterThanGreaterThanEquals = "\">>>=\"".*; - var TLessThanLessThanEquals = "\"<<=\"".*; - var TMinusEquals = "\"-=\"".*; - var TPercentEquals = "\"%=\"".*; - var TPlusEquals = "\"+=\"".*; - var TQuestionQuestionEquals = "\"??=\"".*; - var TSlashEquals = "\"/=\"".*; + const TAmpersandAmpersandEquals = "\"&&=\"".*; + const TAmpersandEquals = "\"&=\"".*; + const TAsteriskAsteriskEquals = "\"**=\"".*; + const TAsteriskEquals = "\"*=\"".*; + const TBarBarEquals = "\"||=\"".*; + const TBarEquals = "\"|=\"".*; + const TCaretEquals = "\"^=\"".*; + const TEquals = "\"=\"".*; + const TGreaterThanGreaterThanEquals = "\">>=\"".*; + const TGreaterThanGreaterThanGreaterThanEquals = "\">>>=\"".*; + const TLessThanLessThanEquals = "\"<<=\"".*; + const TMinusEquals = "\"-=\"".*; + const TPercentEquals = "\"%=\"".*; + const TPlusEquals = "\"+=\"".*; + const TQuestionQuestionEquals = "\"??=\"".*; + const TSlashEquals = "\"/=\"".*; // Class-private fields and methods - var TPrivateIdentifier = "private identifier".*; + const TPrivateIdentifier = "private identifier".*; // Identifiers - var TIdentifier = "identifier".*; - var TEscapedKeyword = "escaped keyword".*; + const TIdentifier = "identifier".*; + const TEscapedKeyword = "escaped keyword".*; // Reserved words - var TBreak = "\"break\"".*; - var TCase = "\"case\"".*; - var TCatch = "\"catch\"".*; - var TClass = "\"class\"".*; - var TConst = "\"const\"".*; - var TContinue = "\"continue\"".*; - var TDebugger = "\"debugger\"".*; - var TDefault = "\"default\"".*; - var TDelete = "\"delete\"".*; - var TDo = "\"do\"".*; - var TElse = "\"else\"".*; - var TEnum = "\"enum\"".*; - var TExport = "\"export\"".*; - var TExtends = "\"extends\"".*; - var TFalse = "\"false\"".*; - var TFinally = "\"finally\"".*; - var TFor = "\"for\"".*; - var TFunction = "\"function\"".*; - var TIf = "\"if\"".*; - var TImport = "\"import\"".*; - var TIn = "\"in\"".*; - var TInstanceof = "\"instanceof\"".*; - var TNew = "\"new\"".*; - var TNull = "\"null\"".*; - var TReturn = "\"return\"".*; - var TSuper = "\"super\"".*; - var TSwitch = "\"switch\"".*; - var TThis = "\"this\"".*; - var TThrow = "\"throw\"".*; - var TTrue = "\"true\"".*; - var TTry = "\"try\"".*; - var TTypeof = "\"typeof\"".*; - var TVar = "\"var\"".*; - var TVoid = "\"void\"".*; - var TWhile = "\"while\"".*; - var TWith = "\"with\"".*; + const TBreak = "\"break\"".*; + const TCase = "\"case\"".*; + const TCatch = "\"catch\"".*; + const TClass = "\"class\"".*; + const TConst = "\"const\"".*; + const TContinue = "\"continue\"".*; + const TDebugger = "\"debugger\"".*; + const TDefault = "\"default\"".*; + const TDelete = "\"delete\"".*; + const TDo = "\"do\"".*; + const TElse = "\"else\"".*; + const TEnum = "\"enum\"".*; + const TExport = "\"export\"".*; + const TExtends = "\"extends\"".*; + const TFalse = "\"false\"".*; + const TFinally = "\"finally\"".*; + const TFor = "\"for\"".*; + const TFunction = "\"function\"".*; + const TIf = "\"if\"".*; + const TImport = "\"import\"".*; + const TIn = "\"in\"".*; + const TInstanceof = "\"instanceof\"".*; + const TNew = "\"new\"".*; + const TNull = "\"null\"".*; + const TReturn = "\"return\"".*; + const TSuper = "\"super\"".*; + const TSwitch = "\"switch\"".*; + const TThis = "\"this\"".*; + const TThrow = "\"throw\"".*; + const TTrue = "\"true\"".*; + const TTry = "\"try\"".*; + const TTypeof = "\"typeof\"".*; + const TVar = "\"var\"".*; + const TVoid = "\"void\"".*; + const TWhile = "\"while\"".*; + const TWith = "\"with\"".*; var tokenEnums = TokenEnumType.initUndefined(); @@ -520,7 +519,7 @@ pub const TypescriptStmtKeyword = enum { ts_stmt_global, ts_stmt_declare, - pub const List = ComptimeStringMap(TypescriptStmtKeyword, .{ + pub const List = std.StaticStringMap(TypescriptStmtKeyword).initComptime(.{ .{ "type", TypescriptStmtKeyword.ts_stmt_type, @@ -553,7 +552,7 @@ pub const TypescriptStmtKeyword = enum { }; // Error: meta is a void element tag and must neither have `children` nor use `dangerouslySetInnerHTML`. -pub const ChildlessJSXTags = ComptimeStringMap(void, .{ +pub const ChildlessJSXTags = std.StaticStringMap(void).initComptime(.{ .{ "area", void }, .{ "base", void }, .{ "br", void }, @@ -573,7 +572,7 @@ pub const ChildlessJSXTags = ComptimeStringMap(void, .{ }); // In a microbenchmark, this outperforms -pub const jsxEntity = ComptimeStringMap(CodePoint, .{ +pub const jsxEntity = std.StaticStringMap(CodePoint).initComptime(.{ .{ "Aacute", @as(CodePoint, 0x00C1) }, .{ "aacute", @as(CodePoint, 0x00E1) }, .{ "Acirc", @as(CodePoint, 0x00C2) }, diff --git a/src/js_parser.zig b/src/js_parser.zig index 8b1a4b6298..3934fd3331 100644 --- a/src/js_parser.zig +++ b/src/js_parser.zig @@ -42,7 +42,6 @@ pub const ExprNodeList = js_ast.ExprNodeList; pub const StmtNodeList = js_ast.StmtNodeList; pub const BindingNodeList = js_ast.BindingNodeList; const DeclaredSymbol = js_ast.DeclaredSymbol; -const ComptimeStringMap = @import("./comptime_string_map.zig").ComptimeStringMap; const JSC = bun.JSC; const Index = @import("./ast/base.zig").Index; @@ -824,7 +823,7 @@ pub const TypeScript = struct { else => return null, } } - pub const IMap = ComptimeStringMap(Kind, .{ + pub const IMap = std.StaticStringMap(Kind).initComptime(.{ .{ "unique", .unique }, .{ "abstract", .abstract }, .{ "asserts", .asserts }, @@ -2338,7 +2337,7 @@ const AsyncPrefixExpression = enum(u2) { is_async, is_await, - const map = ComptimeStringMap(AsyncPrefixExpression, .{ + const map = std.StaticStringMap(AsyncPrefixExpression).initComptime(.{ .{ "yield", .is_yield }, .{ "await", .is_await }, .{ "async", .is_async }, diff --git a/src/js_printer.zig b/src/js_printer.zig index dd0b116dde..1861990846 100644 --- a/src/js_printer.zig +++ b/src/js_printer.zig @@ -161,8 +161,8 @@ fn ws(comptime str: []const u8) Whitespacer { buf_i += 1; } } - - break :brk buf[0..buf_i]; + const final = buf[0..buf_i].*; + break :brk &final; }; }; @@ -773,6 +773,9 @@ fn NewPrinter( const Printer = @This(); + /// When Printer is used as a io.Writer, this represents it's error type, aka nothing. + pub const Error = error{}; + /// The handling of binary expressions is convoluted because we're using /// iteration on the heap instead of recursion on the call stack to avoid /// stack overflow for deeply-nested ASTs. See the comments for the similar @@ -943,7 +946,6 @@ fn NewPrinter( pub fn writeAll(p: *Printer, bytes: anytype) anyerror!void { p.print(bytes); - return; } pub fn writeByteNTimes(self: *Printer, byte: u8, n: usize) !void { @@ -958,22 +960,23 @@ fn NewPrinter( } } + pub fn writeBytesNTimes(self: *Printer, bytes: []const u8, n: usize) anyerror!void { + var i: usize = 0; + while (i < n) : (i += 1) { + try self.writeAll(bytes); + } + } + fn fmt(p: *Printer, comptime str: string, args: anytype) !void { const len = @call( - .{ - .modifier = .always_inline, - }, + .always_inline, std.fmt.count, .{ str, args }, ); - var ptr = try p.writer.reserveNext( - len, - ); + var ptr = try p.writer.reserve(len); const written = @call( - .{ - .modifier = .always_inline, - }, + .always_inline, std.fmt.bufPrint, .{ ptr[0..len], str, args }, ) catch unreachable; @@ -1537,7 +1540,7 @@ fn NewPrinter( const remainder: f64 = (float - floored); const is_integer = remainder == 0; if (float < std.math.maxInt(u52) and is_integer) { - @setFloatMode(.Optimized); + @setFloatMode(.optimized); // In JavaScript, numbers are represented as 64 bit floats // However, they could also be signed or unsigned int 32 (when doing bit shifts) // In this case, it's always going to unsigned since that conversion has already happened. @@ -1630,26 +1633,19 @@ fn NewPrinter( return; } - std.fmt.formatFloatDecimal( - float, - .{}, - p, - ) catch unreachable; + p.fmt("{d}", .{float}) catch {}; } pub fn printQuotedUTF16(e: *Printer, text: []const u16, quote: u8) void { var i: usize = 0; const n: usize = text.len; - // e(text.len) catch unreachable; - outer: while (i < n) { const CodeUnitType = u32; const c: CodeUnitType = text[i]; i += 1; - // TODO: here switch (c) { // Special-case the null character since it may mess with code written in C @@ -5486,8 +5482,8 @@ pub fn NewWriter( comptime writeAllFn: fn (ctx: *ContextType, buf: anytype) anyerror!usize, comptime getLastByte: fn (ctx: *const ContextType) u8, comptime getLastLastByte: fn (ctx: *const ContextType) u8, - comptime reserveNext: fn (ctx: *ContextType, count: u32) anyerror![*]u8, - comptime advanceBy: fn (ctx: *ContextType, count: u32) void, + comptime reserveNext: fn (ctx: *ContextType, count: u64) anyerror![*]u8, + comptime advanceBy: fn (ctx: *ContextType, count: u64) void, ) type { return struct { const Self = @This(); @@ -5543,11 +5539,11 @@ pub fn NewWriter( return @call(bun.callmod_inline, getLastLastByte, .{&writer.ctx}); } - pub fn reserve(writer: *Self, count: u32) anyerror![*]u8 { + pub fn reserve(writer: *Self, count: u64) anyerror![*]u8 { return try reserveNext(&writer.ctx, count); } - pub fn advance(writer: *Self, count: u32) void { + pub fn advance(writer: *Self, count: u64) void { advanceBy(&writer.ctx, count); writer.written += @as(i32, @intCast(count)); } @@ -5608,14 +5604,14 @@ pub const DirectWriter = struct { handle: FileDescriptorType, pub fn write(writer: *DirectWriter, buf: []const u8) !usize { - return try std.os.write(writer.handle, buf); + return try std.posix.write(writer.handle, buf); } pub fn writeAll(writer: *DirectWriter, buf: []const u8) !void { - _ = try std.os.write(writer.handle, buf); + _ = try std.posix.write(writer.handle, buf); } - pub const Error = std.os.WriteError; + pub const Error = std.posix.WriteError; }; // Unbuffered 653ms @@ -5677,11 +5673,11 @@ const FileWriterInternal = struct { return this.last_bytes[0]; } - pub fn reserveNext(_: *FileWriterInternal, count: u32) anyerror![*]u8 { + pub fn reserveNext(_: *FileWriterInternal, count: u64) anyerror![*]u8 { try buffer.growIfNeeded(count); return @as([*]u8, @ptrCast(&buffer.list.items.ptr[buffer.list.items.len])); } - pub fn advanceBy(this: *FileWriterInternal, count: u32) void { + pub fn advanceBy(this: *FileWriterInternal, count: u64) void { if (comptime Environment.isDebug) bun.assert(buffer.list.items.len + count <= buffer.list.capacity); buffer.list.items = buffer.list.items.ptr[0 .. buffer.list.items.len + count]; @@ -5712,22 +5708,22 @@ const FileWriterInternal = struct { const remain = first.len + second.len; const third: []const u8 = result[remain..]; - var vecs = [_]std.os.iovec_const{ + var vecs = [_]std.posix.iovec_const{ .{ - .iov_base = first.ptr, - .iov_len = first.len, + .base = first.ptr, + .len = first.len, }, .{ - .iov_base = second.ptr, - .iov_len = second.len, + .base = second.ptr, + .len = second.len, }, .{ - .iov_base = third.ptr, - .iov_len = third.len, + .base = third.ptr, + .len = third.len, }, }; - const written = try std.os.writev(ctx.file.handle, vecs[0..@as(usize, if (third.len > 0) 3 else 2)]); + const written = try std.posix.writev(ctx.file.handle, vecs[0..@as(usize, if (third.len > 0) 3 else 2)]); if (written == 0 or result.len - written == 0) return; result = result[written..]; }, @@ -5796,11 +5792,12 @@ pub const BufferWriter = struct { return ctx.last_bytes[0]; } - pub fn reserveNext(ctx: *BufferWriter, count: u32) anyerror![*]u8 { + pub fn reserveNext(ctx: *BufferWriter, count: u64) anyerror![*]u8 { try ctx.buffer.growIfNeeded(count); return @as([*]u8, @ptrCast(&ctx.buffer.list.items.ptr[ctx.buffer.list.items.len])); } - pub fn advanceBy(ctx: *BufferWriter, count: u32) void { + + pub fn advanceBy(ctx: *BufferWriter, count: u64) void { if (comptime Environment.isDebug) bun.assert(ctx.buffer.list.items.len + count <= ctx.buffer.list.capacity); ctx.buffer.list.items = ctx.buffer.list.items.ptr[0 .. ctx.buffer.list.items.len + count]; diff --git a/src/jsc.zig b/src/jsc.zig index 57b7fe5db2..872a47e5f6 100644 --- a/src/jsc.zig +++ b/src/jsc.zig @@ -5,7 +5,7 @@ pub usingnamespace @import("./bun.js/bindings/exports.zig"); pub usingnamespace @import("./bun.js/event_loop.zig"); pub usingnamespace @import("./bun.js/javascript.zig"); pub usingnamespace @import("./bun.js/module_loader.zig"); -pub const is_bindgen = @import("std").meta.globalOption("bindgen", bool) orelse false; +pub const is_bindgen = false; pub const Debugger = @import("./bun.js/bindings/Debugger.zig").Debugger; pub const napi = @import("./napi/napi.zig"); pub const RareData = @import("./bun.js/rare_data.zig"); diff --git a/src/libarchive/libarchive.zig b/src/libarchive/libarchive.zig index 43cbe5354a..a3bc7787d1 100644 --- a/src/libarchive/libarchive.zig +++ b/src/libarchive/libarchive.zig @@ -16,9 +16,9 @@ const std = @import("std"); const struct_archive = lib.struct_archive; const JSC = bun.JSC; pub const Seek = enum(c_int) { - set = std.os.SEEK_SET, - current = std.os.SEEK_CUR, - end = std.os.SEEK_END, + set = std.posix.SEEK_SET, + current = std.posix.SEEK_CUR, + end = std.posix.SEEK_END, }; pub const Flags = struct { @@ -599,24 +599,24 @@ pub const Archive = struct { if (comptime Environment.isWindows) { try bun.MakePath.makePath(u16, dir, path); } else { - std.os.mkdiratZ(dir_fd, path, @as(u32, @intCast(mode))) catch |err| { + std.posix.mkdiratZ(dir_fd, pathname, @as(u32, @intCast(mode))) catch |err| { // It's possible for some tarballs to return a directory twice, with and // without `./` in the beginning. So if it already exists, continue to the // next entry. if (err == error.PathAlreadyExists or err == error.NotDir) continue; bun.makePath(dir, std.fs.path.dirname(path_slice) orelse return err) catch {}; - std.os.mkdiratZ(dir_fd, path, 0o777) catch {}; + std.posix.mkdiratZ(dir_fd, pathname, 0o777) catch {}; }; } }, Kind.sym_link => { const link_target = lib.archive_entry_symlink(entry).?; if (Environment.isPosix) { - std.os.symlinkatZ(link_target, dir_fd, path) catch |err| brk: { + std.posix.symlinkatZ(link_target, dir_fd, path) catch |err| brk: { switch (err) { error.AccessDenied, error.FileNotFound => { dir.makePath(std.fs.path.dirname(path_slice) orelse return err) catch {}; - break :brk try std.os.symlinkatZ(link_target, dir_fd, path); + break :brk try std.posix.symlinkatZ(link_target, dir_fd, path); }, else => { return err; @@ -630,7 +630,7 @@ pub const Archive = struct { const file_handle_native = brk: { if (Environment.isWindows) { - const flags = std.os.O.WRONLY | std.os.O.CREAT | std.os.O.TRUNC; + const flags = bun.O.WRONLY | bun.O.CREAT | bun.O.TRUNC; switch (bun.sys.openatWindows(bun.toFD(dir_fd), path, flags)) { .result => |fd| break :brk fd, .err => |e| switch (e.errno) { diff --git a/src/linux_c.zig b/src/linux_c.zig index 601b77ac4e..bfb2aca505 100644 --- a/src/linux_c.zig +++ b/src/linux_c.zig @@ -294,7 +294,7 @@ pub const SystemErrno = enum(u8) { }; pub const preallocate_length = 2048 * 1024; -pub fn preallocate_file(fd: std.os.fd_t, offset: std.os.off_t, len: std.os.off_t) anyerror!void { +pub fn preallocate_file(fd: std.posix.fd_t, offset: std.posix.off_t, len: std.posix.off_t) anyerror!void { // https://gist.github.com/Jarred-Sumner/b37b93399b63cbfd86e908c59a0a37df // ext4 NVME Linux kernel 5.17.0-1016-oem x86_64 // @@ -382,7 +382,7 @@ pub fn preallocate_file(fd: std.os.fd_t, offset: std.os.off_t, len: std.os.off_t /// transfers up to len bytes of data from the file descriptor fd_in /// to the file descriptor fd_out, where one of the file descriptors /// must refer to a pipe. -pub fn splice(fd_in: std.os.fd_t, off_in: ?*i64, fd_out: std.os.fd_t, off_out: ?*i64, len: usize, flags: u32) usize { +pub fn splice(fd_in: std.posix.fd_t, off_in: ?*i64, fd_out: std.posix.fd_t, off_out: ?*i64, len: usize, flags: u32) usize { return std.os.linux.syscall6( .splice, @as(usize, @bitCast(@as(isize, fd_in))), @@ -448,7 +448,7 @@ pub fn getSystemLoadavg() [3]f64 { } pub fn get_version(name_buffer: *[bun.HOST_NAME_MAX]u8) []const u8 { - const uts = std.os.uname(); + const uts = std.posix.uname(); const result = bun.sliceTo(&uts.version, 0); bun.copy(u8, name_buffer, result); @@ -456,7 +456,7 @@ pub fn get_version(name_buffer: *[bun.HOST_NAME_MAX]u8) []const u8 { } pub fn get_release(name_buffer: *[bun.HOST_NAME_MAX]u8) []const u8 { - const uts = std.os.uname(); + const uts = std.posix.uname(); const result = bun.sliceTo(&uts.release, 0); bun.copy(u8, name_buffer, result); @@ -475,11 +475,11 @@ pub const POSIX_SPAWN = struct { pub const SETSID = 0x80; }; -const fd_t = std.os.fd_t; -const pid_t = std.os.pid_t; -const mode_t = std.os.mode_t; +const fd_t = std.posix.fd_t; +const pid_t = std.posix.pid_t; +const mode_t = std.posix.mode_t; const sigset_t = std.c.sigset_t; -const sched_param = std.os.sched_param; +const sched_param = std.posix.sched_param; pub const posix_spawnattr_t = extern struct { __flags: c_short, @@ -548,7 +548,7 @@ const posix_spawn_file_actions_addfchdir_np_type = *const fn (actions: *posix_sp const posix_spawn_file_actions_addchdir_np_type = *const fn (actions: *posix_spawn_file_actions_t, path: [*:0]const u8) c_int; /// When not available, these functions will return 0. -pub fn posix_spawn_file_actions_addfchdir_np(actions: *posix_spawn_file_actions_t, filedes: std.os.fd_t) c_int { +pub fn posix_spawn_file_actions_addfchdir_np(actions: *posix_spawn_file_actions_t, filedes: std.posix.fd_t) c_int { const function = bun.C.dlsym(posix_spawn_file_actions_addfchdir_np_type, "posix_spawn_file_actions_addfchdir_np") orelse return 0; return function(actions, filedes); @@ -561,7 +561,7 @@ pub fn posix_spawn_file_actions_addchdir_np(actions: *posix_spawn_file_actions_t return function(actions, path); } -pub extern fn vmsplice(fd: c_int, iovec: [*]const std.os.iovec, iovec_count: usize, flags: u32) isize; +pub extern fn vmsplice(fd: c_int, iovec: [*]const std.posix.iovec, iovec_count: usize, flags: u32) isize; const net_c = @cImport({ @cInclude("ifaddrs.h"); // getifaddrs, freeifaddrs @@ -586,13 +586,34 @@ pub const F = struct { }; pub const Mode = u32; -pub const E = std.os.E; -pub const S = std.os.S; +pub const E = std.posix.E; +pub const S = std.posix.S; pub extern "c" fn umask(Mode) Mode; pub fn getErrno(rc: anytype) E { - return std.c.getErrno(rc); + const Type = @TypeOf(rc); + + return switch (Type) { + // raw system calls from std.os.linux.* will return usize + // the errno is stored in this value + usize => { + const signed: isize = @bitCast(rc); + const int = if (signed > -4096 and signed < 0) -signed else 0; + return @enumFromInt(int); + }, + + // glibc system call wrapper returns i32/int + // the errno is stored in a thread local variable + // + // TODO: the inclusion of 'u32' and 'isize' seems suspicous + i32, c_int, u32, isize, i64 => if (rc == -1) + @enumFromInt(std.c._errno().*) + else + .SUCCESS, + + else => @compileError("Not implemented yet for type " ++ @typeName(Type)), + }; } pub const getuid = std.os.linux.getuid; @@ -623,20 +644,20 @@ pub const RWFFlagSupport = enum(u8) { } pub fn disable() void { - rwf_bool.store(.unsupported, .Monotonic); + rwf_bool.store(.unsupported, .monotonic); } /// Workaround for https://github.com/google/gvisor/issues/2601 pub fn isMaybeSupported() bool { if (comptime !bun.Environment.isLinux) return false; - switch (rwf_bool.load(.Monotonic)) { + switch (rwf_bool.load(.monotonic)) { .unknown => { if (isLinuxKernelVersionWithBuggyRWF_NONBLOCK()) { - rwf_bool.store(.unsupported, .Monotonic); + rwf_bool.store(.unsupported, .monotonic); return false; } - rwf_bool.store(.supported, .Monotonic); + rwf_bool.store(.supported, .monotonic); return true; }, .supported => { @@ -653,17 +674,17 @@ pub const RWFFlagSupport = enum(u8) { pub extern "C" fn sys_preadv2( fd: c_int, - iov: [*]const std.os.iovec, + iov: [*]const std.posix.iovec, iovcnt: c_int, - offset: std.os.off_t, + offset: std.posix.off_t, flags: c_uint, ) isize; pub extern "C" fn sys_pwritev2( fd: c_int, - iov: [*]const std.os.iovec_const, + iov: [*]const std.posix.iovec_const, iovcnt: c_int, - offset: std.os.off_t, + offset: std.posix.off_t, flags: c_uint, ) isize; diff --git a/src/linux_memfd_allocator.zig b/src/linux_memfd_allocator.zig index eae026b85c..eba502454a 100644 --- a/src/linux_memfd_allocator.zig +++ b/src/linux_memfd_allocator.zig @@ -27,11 +27,11 @@ pub const LinuxMemFdAllocator = struct { pub usingnamespace bun.New(LinuxMemFdAllocator); pub fn ref(this: *LinuxMemFdAllocator) void { - _ = this.ref_count.fetchAdd(1, .Monotonic); + _ = this.ref_count.fetchAdd(1, .monotonic); } pub fn deref(this: *LinuxMemFdAllocator) void { - if (this.ref_count.fetchSub(1, .Monotonic) == 1) { + if (this.ref_count.fetchSub(1, .monotonic) == 1) { _ = bun.sys.close(this.fd); this.destroy(); } @@ -82,17 +82,20 @@ pub const LinuxMemFdAllocator = struct { }; }; - pub fn alloc(this: *LinuxMemFdAllocator, len: usize, offset: usize, flags: u32) bun.JSC.Maybe(bun.JSC.WebCore.Blob.ByteStore) { + pub fn alloc(this: *LinuxMemFdAllocator, len: usize, offset: usize, flags: std.posix.MAP) bun.JSC.Maybe(bun.JSC.WebCore.Blob.ByteStore) { var size = len; // size rounded up to nearest page size += (size + std.mem.page_size - 1) & std.mem.page_size; + var flags_mut = flags; + flags_mut.TYPE = .SHARED; + switch (bun.sys.mmap( null, @min(size, this.size), - std.os.PROT.READ | std.os.PROT.WRITE, - std.os.MAP.SHARED | flags, + std.posix.PROT.READ | std.posix.PROT.WRITE, + flags_mut, this.fd, offset, )) { @@ -133,16 +136,16 @@ pub const LinuxMemFdAllocator = struct { const rc = brk: { var label_buf: [128]u8 = undefined; - const label = std.fmt.bufPrintZ(&label_buf, "memfd-num-{d}", .{memfd_counter.fetchAdd(1, .Monotonic)}) catch ""; + const label = std.fmt.bufPrintZ(&label_buf, "memfd-num-{d}", .{memfd_counter.fetchAdd(1, .monotonic)}) catch ""; // Using huge pages was slower. - const code = std.os.linux.memfd_create(label.ptr, std.os.linux.MFD.CLOEXEC | 0); + const code = std.c.memfd_create(label.ptr, std.os.linux.MFD.CLOEXEC | 0); bun.sys.syslog("memfd_create({s}) = {d}", .{ label, code }); break :brk code; }; - switch (std.os.linux.getErrno(rc)) { + switch (bun.C.getErrno(rc)) { .SUCCESS => {}, else => |errno| { bun.sys.syslog("Failed to create memfd: {s}", .{@tagName(errno)}); @@ -189,7 +192,7 @@ pub const LinuxMemFdAllocator = struct { .size = bytes.len, }); - switch (linux_memfd_allocator.alloc(bytes.len, 0, 0)) { + switch (linux_memfd_allocator.alloc(bytes.len, 0, .{ .TYPE = .SHARED })) { .result => |res| { return .{ .result = res }; }, diff --git a/src/lock.zig b/src/lock.zig index 76d35810ea..1becb95387 100644 --- a/src/lock.zig +++ b/src/lock.zig @@ -24,7 +24,7 @@ pub const Mutex = struct { inline fn acquireFast(self: *Mutex, comptime strong: bool) bool { // On x86, "lock bts" uses less i-cache & can be faster than "lock cmpxchg" below. if (comptime is_x86) { - return self.state.bitSet(@ctz(@as(u32, LOCKED)), .Acquire) == UNLOCKED; + return self.state.bitSet(@ctz(@as(u32, LOCKED)), .acquire) == UNLOCKED; } const cas_fn = comptime switch (strong) { @@ -36,8 +36,8 @@ pub const Mutex = struct { &self.state, UNLOCKED, LOCKED, - .Acquire, - .Monotonic, + .acquire, + .monotonic, ) == null; } @@ -51,12 +51,12 @@ pub const Mutex = struct { while (spin > 0) : (spin -= 1) { std.atomic.spinLoopHint(); - switch (self.state.load(.Monotonic)) { + switch (self.state.load(.monotonic)) { UNLOCKED => _ = self.state.cmpxchgWeak( UNLOCKED, LOCKED, - .Acquire, - .Monotonic, + .acquire, + .monotonic, ) orelse return, LOCKED => continue, CONTENDED => break, @@ -73,18 +73,18 @@ pub const Mutex = struct { while (true) : (Futex.wait(&self.state, CONTENDED, null) catch unreachable) { // On x86, "xchg" can be faster than "lock cmpxchg" below. if (comptime is_x86) { - switch (self.state.swap(CONTENDED, .Acquire)) { + switch (self.state.swap(CONTENDED, .acquire)) { UNLOCKED => return, LOCKED, CONTENDED => continue, else => unreachable, // invalid Mutex state } } - var state = self.state.load(.Monotonic); + var state = self.state.load(.monotonic); while (state != CONTENDED) { state = switch (state) { - UNLOCKED => self.state.cmpxchgWeak(state, CONTENDED, .Acquire, .Monotonic) orelse return, - LOCKED => self.state.cmpxchgWeak(state, CONTENDED, .Monotonic, .Monotonic) orelse break, + UNLOCKED => self.state.cmpxchgWeak(state, CONTENDED, .acquire, .monotonic) orelse return, + LOCKED => self.state.cmpxchgWeak(state, CONTENDED, .monotonic, .monotonic) orelse break, CONTENDED => unreachable, // checked above else => unreachable, // invalid Mutex state }; @@ -93,7 +93,7 @@ pub const Mutex = struct { } pub fn release(self: *Mutex) void { - switch (self.state.swap(UNLOCKED, .Release)) { + switch (self.state.swap(UNLOCKED, .release)) { UNLOCKED => unreachable, // released without being acquired LOCKED => {}, CONTENDED => Futex.wake(&self.state, 1), @@ -118,7 +118,7 @@ pub const Lock = struct { } pub inline fn assertUnlocked(this: *Lock, comptime message: []const u8) void { - if (this.mutex.state.load(.Monotonic) != 0) { + if (this.mutex.state.load(.monotonic) != 0) { @panic(message); } } diff --git a/src/main.zig b/src/main.zig index 51802b9c10..4488bcb900 100644 --- a/src/main.zig +++ b/src/main.zig @@ -7,8 +7,8 @@ const Output = bun.Output; const Environment = bun.Environment; pub const panic = bun.crash_handler.panic; -pub const std_options = struct { - pub const enable_segfault_handler = !bun.crash_handler.enable; +pub const std_options = std.Options{ + .enable_segfault_handler = !bun.crash_handler.enable, }; pub const io_mode = .blocking; diff --git a/src/memory_allocator.zig b/src/memory_allocator.zig index 196b658461..159cbc15d2 100644 --- a/src/memory_allocator.zig +++ b/src/memory_allocator.zig @@ -185,11 +185,11 @@ const HugeAllocator = struct { assert(len > 0); assert(std.math.isPowerOfTwo(alignment)); - const slice = std.os.mmap( + const slice = std.posix.mmap( null, len, - std.os.PROT.READ | std.os.PROT.WRITE, - std.os.MAP.ANONYMOUS | std.os.MAP.PRIVATE, + std.posix.PROT.READ | std.posix.PROT.WRITE, + std.posix.MAP.ANONYMOUS | std.posix.MAP.PRIVATE, -1, 0, ) catch @@ -216,7 +216,7 @@ const HugeAllocator = struct { _: u29, _: usize, ) void { - std.os.munmap(@alignCast(buf)); + std.posix.munmap(@alignCast(buf)); } }; diff --git a/src/meta.zig b/src/meta.zig index cf49553771..24d3d0ed64 100644 --- a/src/meta.zig +++ b/src/meta.zig @@ -19,8 +19,7 @@ pub fn typeName(comptime Type: type) []const u8 { // partially emulates behaviour of @typeName in previous Zig versions, // converting "some.namespace.MyType" to "MyType" -pub fn typeBaseName(comptime fullname: []const u8) []const u8 { - +pub fn typeBaseName(comptime fullname: [:0]const u8) [:0]const u8 { // leave type name like "namespace.WrapperType(namespace.MyType)" as it is const baseidx = comptime std.mem.indexOf(u8, fullname, "("); if (baseidx != null) return fullname; @@ -55,3 +54,23 @@ pub fn banFieldType(comptime Container: type, comptime T: type) void { } } } + +// []T -> T +// *const T -> T +// *[n]T -> T +pub fn Item(comptime T: type) type { + switch (@typeInfo(T)) { + .Pointer => |ptr| { + if (ptr.size == .One) { + switch (@typeInfo(ptr.child)) { + .Array => |array| { + return array.child; + }, + else => {}, + } + } + return ptr.child; + }, + else => return std.meta.Child(T), + } +} diff --git a/src/multi_array_list.zig b/src/multi_array_list.zig index f6482b7d11..2fcd7453f3 100644 --- a/src/multi_array_list.zig +++ b/src/multi_array_list.zig @@ -579,7 +579,7 @@ pub fn MultiArrayList(comptime T: type) type { .alignment = fields[i].alignment, }; break :entry @Type(.{ .Struct = .{ - .layout = .Extern, + .layout = .@"extern", .fields = &entry_fields, .decls = &.{}, .is_tuple = false, diff --git a/src/napi/napi.zig b/src/napi/napi.zig index 21a4ac4c77..05f892a0ca 100644 --- a/src/napi/napi.zig +++ b/src/napi/napi.zig @@ -1101,12 +1101,12 @@ pub const napi_async_work = struct { } pub fn runFromThreadPool(task: *WorkPoolTask) void { - var this = @fieldParentPtr(napi_async_work, "task", task); + var this: *napi_async_work = @fieldParentPtr("task", task); this.run(); } pub fn run(this: *napi_async_work) void { - if (this.status.cmpxchgStrong(@intFromEnum(Status.pending), @intFromEnum(Status.started), .SeqCst, .SeqCst)) |state| { + if (this.status.cmpxchgStrong(@intFromEnum(Status.pending), @intFromEnum(Status.started), .seq_cst, .seq_cst)) |state| { if (state == @intFromEnum(Status.cancelled)) { if (this.wait_for_deinit) { // this might cause a segfault due to Task using a linked list! @@ -1116,7 +1116,7 @@ pub const napi_async_work = struct { return; } this.execute.?(this.global, this.ctx); - this.status.store(@intFromEnum(Status.completed), .SeqCst); + this.status.store(@intFromEnum(Status.completed), .seq_cst); this.event_loop.enqueueTaskConcurrent(this.concurrent_task.from(this, .manual_deinit)); } @@ -1130,7 +1130,7 @@ pub const napi_async_work = struct { pub fn cancel(this: *napi_async_work) bool { this.ref.unref(this.global.bunVM()); - return this.status.cmpxchgStrong(@intFromEnum(Status.cancelled), @intFromEnum(Status.pending), .SeqCst, .SeqCst) != null; + return this.status.cmpxchgStrong(@intFromEnum(Status.cancelled), @intFromEnum(Status.pending), .seq_cst, .seq_cst) != null; } pub fn deinit(this: *napi_async_work) void { @@ -1146,7 +1146,7 @@ pub const napi_async_work = struct { pub fn runFromJS(this: *napi_async_work) void { this.complete.?( this.global, - if (this.status.load(.SeqCst) == @intFromEnum(Status.cancelled)) + if (this.status.load(.seq_cst) == @intFromEnum(Status.cancelled)) napi_status.cancelled else napi_status.ok, @@ -1440,7 +1440,7 @@ pub const ThreadSafeFunction = struct { .sized => &this.sized.is_closed, .unsized => &this.unsized.is_closed, }, - .SeqCst, + .seq_cst, ); } diff --git a/src/node_fallbacks.zig b/src/node_fallbacks.zig index b3f95ac836..1bb9a91ae9 100644 --- a/src/node_fallbacks.zig +++ b/src/node_fallbacks.zig @@ -4,7 +4,6 @@ const PackageJSON = @import("./resolver/package_json.zig").PackageJSON; const logger = bun.logger; const Fs = @import("./fs.zig"); const bun = @import("root").bun; -const ComptimeStringMap = @import("./comptime_string_map.zig").ComptimeStringMap; const assert_code: string = @embedFile("./node-fallbacks/out/assert.js"); const buffer_code: string = @embedFile("./node-fallbacks/out/buffer.js"); @@ -389,30 +388,30 @@ pub const FallbackModule = struct { }; }; -pub const Map = ComptimeStringMap(FallbackModule, .{ - &.{ "assert", FallbackModule.assert }, - &.{ "buffer", FallbackModule.buffer }, - &.{ "console", FallbackModule.console }, - &.{ "constants", FallbackModule.constants }, - &.{ "crypto", FallbackModule.crypto }, - &.{ "domain", FallbackModule.domain }, - &.{ "events", FallbackModule.events }, - &.{ "http", FallbackModule.http }, - &.{ "https", FallbackModule.https }, - &.{ "net", FallbackModule.net }, - &.{ "os", FallbackModule.os }, - &.{ "path", FallbackModule.path }, - &.{ "process", FallbackModule.process }, - &.{ "punycode", FallbackModule.punycode }, - &.{ "querystring", FallbackModule.querystring }, - &.{ "stream", FallbackModule.stream }, - &.{ "string_decoder", FallbackModule.string_decoder }, - &.{ "sys", FallbackModule.sys }, - &.{ "timers", FallbackModule.timers }, - &.{ "tty", FallbackModule.tty }, - &.{ "url", FallbackModule.url }, - &.{ "util", FallbackModule.util }, - &.{ "zlib", FallbackModule.zlib }, +pub const Map = bun.ComptimeStringMap(FallbackModule, .{ + .{ "assert", FallbackModule.assert }, + .{ "buffer", FallbackModule.buffer }, + .{ "console", FallbackModule.console }, + .{ "constants", FallbackModule.constants }, + .{ "crypto", FallbackModule.crypto }, + .{ "domain", FallbackModule.domain }, + .{ "events", FallbackModule.events }, + .{ "http", FallbackModule.http }, + .{ "https", FallbackModule.https }, + .{ "net", FallbackModule.net }, + .{ "os", FallbackModule.os }, + .{ "path", FallbackModule.path }, + .{ "process", FallbackModule.process }, + .{ "punycode", FallbackModule.punycode }, + .{ "querystring", FallbackModule.querystring }, + .{ "stream", FallbackModule.stream }, + .{ "string_decoder", FallbackModule.string_decoder }, + .{ "sys", FallbackModule.sys }, + .{ "timers", FallbackModule.timers }, + .{ "tty", FallbackModule.tty }, + .{ "url", FallbackModule.url }, + .{ "util", FallbackModule.util }, + .{ "zlib", FallbackModule.zlib }, }); pub fn contentsFromPath(path: string) ?string { diff --git a/src/open.zig b/src/open.zig index 27cfd0db02..3c867f9026 100644 --- a/src/open.zig +++ b/src/open.zig @@ -10,7 +10,7 @@ const default_allocator = bun.default_allocator; const C = bun.C; const std = @import("std"); const DotEnv = @import("env_loader.zig"); -const ComptimeStringMap = @import("./comptime_string_map.zig").ComptimeStringMap; + const opener = switch (@import("builtin").target.os.tag) { .macos => "/usr/bin/open", .windows => "start", @@ -66,21 +66,21 @@ pub const Editor = enum(u8) { const StringMap = std.EnumMap(Editor, string); const StringArrayMap = std.EnumMap(Editor, []const [:0]const u8); - const name_map = ComptimeStringMap(Editor, .{ - .{ "sublime", Editor.sublime }, - .{ "subl", Editor.sublime }, - .{ "vscode", Editor.vscode }, - .{ "code", Editor.vscode }, - .{ "textmate", Editor.textmate }, - .{ "mate", Editor.textmate }, - .{ "atom", Editor.atom }, - .{ "idea", Editor.intellij }, - .{ "webstorm", Editor.webstorm }, - .{ "nvim", Editor.neovim }, - .{ "neovim", Editor.neovim }, - .{ "vim", Editor.vim }, - .{ "vi", Editor.vim }, - .{ "emacs", Editor.emacs }, + const name_map = std.StaticStringMap(Editor).initComptime(.{ + .{ "sublime", .sublime }, + .{ "subl", .sublime }, + .{ "vscode", .vscode }, + .{ "code", .vscode }, + .{ "textmate", .textmate }, + .{ "mate", .textmate }, + .{ "atom", .atom }, + .{ "idea", .intellij }, + .{ "webstorm", .webstorm }, + .{ "nvim", .neovim }, + .{ "neovim", .neovim }, + .{ "vim", .vim }, + .{ "vi", .vim }, + .{ "emacs", .emacs }, }); pub fn byName(name: string) ?Editor { @@ -139,8 +139,8 @@ pub const Editor = enum(u8) { pub fn byFallbackPathForEditor(editor: Editor, out: ?*[]const u8) bool { if (bin_path.get(editor)) |paths| { for (paths) |path| { - if (std.os.open(path, 0, 0)) |opened| { - std.os.close(opened); + if (std.fs.cwd().openFile(path, .{})) |opened| { + opened.close(); if (out != null) { out.?.* = bun.asByteSlice(path); } @@ -327,14 +327,14 @@ pub const Editor = enum(u8) { }, } - spawned.child_process = std.ChildProcess.init(args_buf[0..i], default_allocator); + spawned.child_process = std.process.Child.init(args_buf[0..i], default_allocator); var thread = try std.Thread.spawn(.{}, autoClose, .{spawned}); thread.detach(); } const SpawnedEditorContext = struct { file_path_buf: [1024 + bun.MAX_PATH_BYTES]u8 = undefined, buf: [10]string = undefined, - child_process: std.ChildProcess = undefined, + child_process: std.process.Child = undefined, }; fn autoClose(spawned: *SpawnedEditorContext) void { diff --git a/src/options.zig b/src/options.zig index b872c2561e..5cdd3412e8 100644 --- a/src/options.zig +++ b/src/options.zig @@ -28,14 +28,13 @@ const Runtime = @import("./runtime.zig").Runtime; const Analytics = @import("./analytics/analytics_thread.zig"); const MacroRemap = @import("./resolver/package_json.zig").MacroMap; const DotEnv = @import("./env_loader.zig"); -const ComptimeStringMap = @import("./comptime_string_map.zig").ComptimeStringMap; const assert = bun.assert; pub const WriteDestination = enum { stdout, disk, - // eventaully: wasm + // eventually: wasm }; pub fn validatePath( @@ -50,7 +49,7 @@ pub fn validatePath( return ""; } const paths = [_]string{ cwd, rel_path }; - // TODO: switch to getFdPath()-based implemetation + // TODO: switch to getFdPath()-based implementation const out = std.fs.path.resolve(allocator, &paths) catch |err| { log.addErrorFmt( null, @@ -299,7 +298,7 @@ pub const ExternalModules = struct { "zlib", }; - pub const NodeBuiltinsMap = ComptimeStringMap(void, .{ + pub const NodeBuiltinsMap = std.StaticStringMap(void).initComptime(.{ .{ "_http_agent", {} }, .{ "_http_client", {} }, .{ "_http_common", {} }, @@ -371,7 +370,7 @@ pub const ModuleType = enum { cjs, esm, - pub const List = ComptimeStringMap(ModuleType, .{ + pub const List = std.StaticStringMap(ModuleType).initComptime(.{ .{ "commonjs", ModuleType.cjs }, .{ "module", ModuleType.esm }, }); @@ -383,16 +382,13 @@ pub const Target = enum { bun_macro, node, - pub const Map = ComptimeStringMap( - Target, - .{ - .{ "browser", Target.browser }, - .{ "bun", Target.bun }, - .{ "bun_macro", Target.bun_macro }, - .{ "macro", Target.bun_macro }, - .{ "node", Target.node }, - }, - ); + pub const Map = bun.ComptimeStringMap(Target, .{ + .{ "browser", Target.browser }, + .{ "bun", Target.bun }, + .{ "bun_macro", Target.bun_macro }, + .{ "macro", Target.bun_macro }, + .{ "node", Target.node }, + }); pub fn fromJS(global: *JSC.JSGlobalObject, value: JSC.JSValue, exception: JSC.C.ExceptionRef) ?Target { if (!value.jsType().isStringLike()) { @@ -542,7 +538,7 @@ pub const Target = enum { // // This is unfortunate but it's a problem on the side of those packages. // They won't work correctly with other popular bundlers (with node as a target) anyway. - var list = [_]string{ MAIN_FIELD_NAMES[2], MAIN_FIELD_NAMES[1] }; + const list = [_]string{ MAIN_FIELD_NAMES[2], MAIN_FIELD_NAMES[1] }; array.set(Target.node, &list); // Note that this means if a package specifies "main", "module", and @@ -552,8 +548,8 @@ pub const Target = enum { // This is deliberate because the presence of the "browser" field is a // good signal that this should be preferred. Some older packages might only use CJS in their "browser" // but in such a case they probably don't have any ESM files anyway. - var listc = [_]string{ MAIN_FIELD_NAMES[0], MAIN_FIELD_NAMES[1], MAIN_FIELD_NAMES[3], MAIN_FIELD_NAMES[2] }; - var listd = [_]string{ MAIN_FIELD_NAMES[1], MAIN_FIELD_NAMES[2], MAIN_FIELD_NAMES[3] }; + const listc = [_]string{ MAIN_FIELD_NAMES[0], MAIN_FIELD_NAMES[1], MAIN_FIELD_NAMES[3], MAIN_FIELD_NAMES[2] }; + const listd = [_]string{ MAIN_FIELD_NAMES[1], MAIN_FIELD_NAMES[2], MAIN_FIELD_NAMES[3] }; array.set(Target.browser, &listc); array.set(Target.bun, &listd); @@ -597,23 +593,11 @@ pub const Format = enum { cjs, iife, - pub const Map = ComptimeStringMap( - Format, - .{ - .{ - "esm", - Format.esm, - }, - .{ - "cjs", - Format.cjs, - }, - .{ - "iife", - Format.iife, - }, - }, - ); + pub const Map = bun.ComptimeStringMap(Format, .{ + .{ "esm", .esm }, + .{ "cjs", .cjs }, + .{ "iife", .iife }, + }); pub fn fromJS(global: *JSC.JSGlobalObject, format: JSC.JSValue, exception: JSC.C.ExceptionRef) ?Format { if (format.isUndefinedOrNull()) return null; @@ -701,18 +685,18 @@ pub const Loader = enum(u8) { pub const Map = std.EnumArray(Loader, string); pub const stdin_name: Map = brk: { var map = Map.initFill(""); - map.set(Loader.jsx, "input.jsx"); - map.set(Loader.js, "input.js"); - map.set(Loader.ts, "input.ts"); - map.set(Loader.tsx, "input.tsx"); - map.set(Loader.css, "input.css"); - map.set(Loader.file, "input"); - map.set(Loader.json, "input.json"); - map.set(Loader.toml, "input.toml"); - map.set(Loader.wasm, "input.wasm"); - map.set(Loader.napi, "input.node"); - map.set(Loader.text, "input.txt"); - map.set(Loader.bunsh, "input.sh"); + map.set(.jsx, "input.jsx"); + map.set(.js, "input.js"); + map.set(.ts, "input.ts"); + map.set(.tsx, "input.tsx"); + map.set(.css, "input.css"); + map.set(.file, "input"); + map.set(.json, "input.json"); + map.set(.toml, "input.toml"); + map.set(.wasm, "input.wasm"); + map.set(.napi, "input.node"); + map.set(.text, "input.txt"); + map.set(.bunsh, "input.sh"); break :brk map; }; @@ -739,50 +723,50 @@ pub const Loader = enum(u8) { } pub const names = bun.ComptimeStringMap(Loader, .{ - .{ "js", Loader.js }, - .{ "mjs", Loader.js }, - .{ "cjs", Loader.js }, - .{ "cts", Loader.ts }, - .{ "mts", Loader.ts }, - .{ "jsx", Loader.jsx }, - .{ "ts", Loader.ts }, - .{ "tsx", Loader.tsx }, - .{ "css", Loader.css }, - .{ "file", Loader.file }, - .{ "json", Loader.json }, - .{ "toml", Loader.toml }, - .{ "wasm", Loader.wasm }, - .{ "node", Loader.napi }, - .{ "dataurl", Loader.dataurl }, - .{ "base64", Loader.base64 }, - .{ "txt", Loader.text }, - .{ "text", Loader.text }, - .{ "sh", Loader.bunsh }, - .{ "sqlite", Loader.sqlite }, - .{ "sqlite_embedded", Loader.sqlite_embedded }, + .{ "js", .js }, + .{ "mjs", .js }, + .{ "cjs", .js }, + .{ "cts", .ts }, + .{ "mts", .ts }, + .{ "jsx", .jsx }, + .{ "ts", .ts }, + .{ "tsx", .tsx }, + .{ "css", .css }, + .{ "file", .file }, + .{ "json", .json }, + .{ "toml", .toml }, + .{ "wasm", .wasm }, + .{ "node", .napi }, + .{ "dataurl", .dataurl }, + .{ "base64", .base64 }, + .{ "txt", .text }, + .{ "text", .text }, + .{ "sh", .bunsh }, + .{ "sqlite", .sqlite }, + .{ "sqlite_embedded", .sqlite_embedded }, }); pub const api_names = bun.ComptimeStringMap(Api.Loader, .{ - .{ "js", Api.Loader.js }, - .{ "mjs", Api.Loader.js }, - .{ "cjs", Api.Loader.js }, - .{ "cts", Api.Loader.ts }, - .{ "mts", Api.Loader.ts }, - .{ "jsx", Api.Loader.jsx }, - .{ "ts", Api.Loader.ts }, - .{ "tsx", Api.Loader.tsx }, - .{ "css", Api.Loader.css }, - .{ "file", Api.Loader.file }, - .{ "json", Api.Loader.json }, - .{ "toml", Api.Loader.toml }, - .{ "wasm", Api.Loader.wasm }, - .{ "node", Api.Loader.napi }, - .{ "dataurl", Api.Loader.dataurl }, - .{ "base64", Api.Loader.base64 }, - .{ "txt", Api.Loader.text }, - .{ "text", Api.Loader.text }, - .{ "sh", Api.Loader.file }, - .{ "sqlite", Api.Loader.sqlite }, + .{ "js", .js }, + .{ "mjs", .js }, + .{ "cjs", .js }, + .{ "cts", .ts }, + .{ "mts", .ts }, + .{ "jsx", .jsx }, + .{ "ts", .ts }, + .{ "tsx", .tsx }, + .{ "css", .css }, + .{ "file", .file }, + .{ "json", .json }, + .{ "toml", .toml }, + .{ "wasm", .wasm }, + .{ "node", .napi }, + .{ "dataurl", .dataurl }, + .{ "base64", .base64 }, + .{ "txt", .text }, + .{ "text", .text }, + .{ "sh", .file }, + .{ "sqlite", .sqlite }, }); pub fn fromString(slice_: string) ?Loader { @@ -844,6 +828,7 @@ pub const Loader = enum(u8) { pub fn isJSX(loader: Loader) bool { return loader == .jsx or loader == .tsx; } + pub fn isTypeScript(loader: Loader) bool { return loader == .tsx or loader == .ts; } @@ -875,35 +860,32 @@ pub const Loader = enum(u8) { }; const default_loaders_posix = .{ - .{ ".jsx", Loader.jsx }, - .{ ".json", Loader.json }, - .{ ".js", Loader.jsx }, + .{ ".jsx", .jsx }, + .{ ".json", .json }, + .{ ".js", .jsx }, - .{ ".mjs", Loader.js }, - .{ ".cjs", Loader.js }, + .{ ".mjs", .js }, + .{ ".cjs", .js }, - .{ ".css", Loader.css }, - .{ ".ts", Loader.ts }, - .{ ".tsx", Loader.tsx }, + .{ ".css", .css }, + .{ ".ts", .ts }, + .{ ".tsx", .tsx }, - .{ ".mts", Loader.ts }, - .{ ".cts", Loader.ts }, + .{ ".mts", .ts }, + .{ ".cts", .ts }, - .{ ".toml", Loader.toml }, - .{ ".wasm", Loader.wasm }, - .{ ".node", Loader.napi }, - .{ ".txt", Loader.text }, - .{ ".text", Loader.text }, + .{ ".toml", .toml }, + .{ ".wasm", .wasm }, + .{ ".node", .napi }, + .{ ".txt", .text }, + .{ ".text", .text }, }; const default_loaders_win32 = default_loaders_posix ++ .{ - .{ ".sh", Loader.bunsh }, + .{ ".sh", .bunsh }, }; const default_loaders = if (Environment.isWindows) default_loaders_win32 else default_loaders_posix; -pub const defaultLoaders = ComptimeStringMap( - Loader, - default_loaders, -); +pub const defaultLoaders = bun.ComptimeStringMap(Loader, default_loaders); // https://webpack.js.org/guides/package-exports/#reference-syntax pub const ESMConditions = struct { @@ -956,12 +938,12 @@ pub const ESMConditions = struct { pub const JSX = struct { pub const RuntimeMap = bun.ComptimeStringMap(JSX.Runtime, .{ - .{ "classic", JSX.Runtime.classic }, - .{ "automatic", JSX.Runtime.automatic }, - .{ "react", JSX.Runtime.classic }, - .{ "react-jsx", JSX.Runtime.automatic }, - .{ "react-jsxdev", JSX.Runtime.automatic }, - .{ "solid", JSX.Runtime.solid }, + .{ "classic", .classic }, + .{ "automatic", .automatic }, + .{ "react", .classic }, + .{ "react-jsx", .automatic }, + .{ "react-jsxdev", .automatic }, + .{ "solid", .solid }, }); pub const Pragma = struct { @@ -1387,7 +1369,7 @@ pub const SourceMapOption = enum { }; } - pub const Map = ComptimeStringMap(SourceMapOption, .{ + pub const Map = bun.ComptimeStringMap(SourceMapOption, .{ .{ "none", .none }, .{ "inline", .@"inline" }, .{ "external", .external }, @@ -2033,7 +2015,7 @@ pub const OutputFile = struct { } pub fn moveTo(file: *const OutputFile, _: string, rel_path: []u8, dir: FileDescriptorType) !void { - try bun.C.moveFileZ(file.value.move.dir, bun.sliceTo(&(try std.os.toPosixPath(file.value.move.getPathname())), 0), dir, bun.sliceTo(&(try std.os.toPosixPath(rel_path)), 0)); + try bun.C.moveFileZ(file.value.move.dir, bun.sliceTo(&(try std.posix.toPosixPath(file.value.move.getPathname())), 0), dir, bun.sliceTo(&(try std.posix.toPosixPath(rel_path)), 0)); } pub fn copyTo(file: *const OutputFile, _: string, rel_path: []u8, dir: FileDescriptorType) !void { @@ -2055,8 +2037,8 @@ pub const OutputFile = struct { defer { if (do_close) { - std.os.close(fd_out); - std.os.close(fd_in); + std.posix.close(fd_out); + std.posix.close(fd_in); } } @@ -2692,15 +2674,12 @@ pub const PathTemplate = struct { ext: []const u8 = "", hash: ?u64 = null, - pub const map = bun.ComptimeStringMap( - std.meta.FieldEnum(Placeholder), - .{ - .{ "dir", .dir }, - .{ "name", .name }, - .{ "ext", .ext }, - .{ "hash", .hash }, - }, - ); + pub const map = bun.ComptimeStringMap(std.meta.FieldEnum(Placeholder), .{ + .{ "dir", .dir }, + .{ "name", .name }, + .{ "ext", .ext }, + .{ "hash", .hash }, + }); }; pub const chunk = PathTemplate{ diff --git a/src/output.zig b/src/output.zig index 8c3536aafd..9ddaddc3d6 100644 --- a/src/output.zig +++ b/src/output.zig @@ -23,7 +23,7 @@ var stderr_stream: Source.StreamType = undefined; var stdout_stream: Source.StreamType = undefined; var stdout_stream_set = false; const File = bun.sys.File; -pub var terminal_size: std.os.winsize = .{ +pub var terminal_size: std.posix.winsize = .{ .ws_row = 0, .ws_col = 0, .ws_xpixel = 0, @@ -248,7 +248,7 @@ pub const Source = struct { Output.Source.init(stdout, stderr) .set(); - if (comptime Environment.isDebug or Environment.allow_logs) { + if (comptime Environment.isDebug or Environment.enable_logs) { initScopedDebugWriterAtStartup(); } } @@ -576,7 +576,7 @@ pub fn Scoped(comptime tag: anytype, comptime disabled: bool) type { else => tag, }; - if (comptime !Environment.isDebug and !Environment.allow_logs) { + if (comptime !Environment.isDebug and !Environment.enable_logs) { return struct { pub fn isVisible() bool { return false; @@ -626,9 +626,9 @@ pub fn Scoped(comptime tag: anytype, comptime disabled: bool) type { return; } - if (Environment.allow_logs) ScopedDebugWriter.disable_inside_log += 1; + if (Environment.enable_logs) ScopedDebugWriter.disable_inside_log += 1; defer { - if (Environment.allow_logs) + if (Environment.enable_logs) ScopedDebugWriter.disable_inside_log -= 1; } @@ -778,7 +778,8 @@ pub fn prettyFmt(comptime fmt: string, comptime is_enabled: bool) string { } }; - return comptime new_fmt[0..new_fmt_i]; + const fmt_data = comptime new_fmt[0..new_fmt_i].*; + return &fmt_data; } pub noinline fn prettyWithPrinter(comptime fmt: string, args: anytype, comptime printer: anytype, comptime l: Destination) void { @@ -862,17 +863,12 @@ pub const DebugTimer = struct { pub const WriteError = error{}; - pub fn format(self: DebugTimer, comptime _: []const u8, opts: std.fmt.FormatOptions, writer_: anytype) WriteError!void { + pub fn format(self: DebugTimer, comptime _: []const u8, _: std.fmt.FormatOptions, w: anytype) WriteError!void { if (comptime Environment.isDebug) { var timer = self.timer; - var _opts = opts; - _opts.precision = 3; - std.fmt.formatFloatDecimal( - @as(f64, @floatCast(@as(f64, @floatFromInt(timer.read())) / std.time.ns_per_ms)), - _opts, - writer_, - ) catch unreachable; - writer_.writeAll("ms") catch {}; + w.print("{d:.3}ms", .{@as(f64, @floatFromInt(timer.read())) / std.time.ns_per_ms}) catch unreachable; + } else { + @compileError("DebugTimer.format() should only be called in debug mode"); } } }; @@ -995,14 +991,10 @@ pub fn initScopedDebugWriterAtStartup() void { const path_fmt = std.mem.replaceOwned(u8, bun.default_allocator, path, "{pid}", pid) catch @panic("failed to allocate path"); defer bun.default_allocator.free(path_fmt); - const fd = std.os.openat( - std.fs.cwd().fd, - path_fmt, - std.os.O.CREAT | std.os.O.WRONLY, - // on windows this is u0 - if (Environment.isWindows) 0 else 0o644, - ) catch |err_| { - Output.panic("Failed to open file for debug output: {s} ({s})", .{ @errorName(err_), path }); + const fd = std.fs.cwd().createFile(path_fmt, .{ + .mode = if (Environment.isPosix) 0o644 else 0, + }) catch |open_err| { + Output.panic("Failed to open file for debug output: {s} ({s})", .{ @errorName(open_err), path }); }; _ = bun.sys.ftruncate(bun.toFD(fd), 0); // windows ScopedDebugWriter.scoped_file_writer = File.from(fd).quietWriter(); @@ -1013,7 +1005,7 @@ pub fn initScopedDebugWriterAtStartup() void { ScopedDebugWriter.scoped_file_writer = source.stream.quietWriter(); } fn scopedWriter() File.QuietWriter { - if (comptime !Environment.isDebug and !Environment.allow_logs) { + if (comptime !Environment.isDebug and !Environment.enable_logs) { @compileError("scopedWriter() should only be called in debug mode"); } diff --git a/src/patch.zig b/src/patch.zig index f4d039ec61..950ab014c4 100644 --- a/src/patch.zig +++ b/src/patch.zig @@ -132,7 +132,7 @@ pub const PatchFile = struct { const newfile_fd = switch (bun.sys.openat( patch_dir, filepath.sliceAssumeZ(), - std.os.O.CREAT | std.os.O.WRONLY | std.os.O.TRUNC, + bun.O.CREAT | bun.O.WRONLY | bun.O.TRUNC, mode.toBunMode(), )) { .result => |fd| fd, @@ -204,7 +204,7 @@ pub const PatchFile = struct { .result => |p| p, .err => |e| return e.toSystemError(), }; - const fd = switch (bun.sys.open(bun.path.joinZ(&[_][]const u8{ absfilepath, filepath }, .auto), std.os.O.RDWR, 0)) { + const fd = switch (bun.sys.open(bun.path.joinZ(&[_][]const u8{ absfilepath, filepath }, .auto), bun.O.RDWR, 0)) { .err => |e| return e.toSystemError(), .result => |f| f, }; @@ -333,7 +333,7 @@ pub const PatchFile = struct { const file_fd = switch (bun.sys.openat( patch_dir, file_path, - std.os.O.CREAT | std.os.O.WRONLY | std.os.O.TRUNC, + bun.O.CREAT | bun.O.WRONLY | bun.O.TRUNC, @intCast(stat.mode), )) { .err => |e| return .{ .err = e.withPath(file_path) }, @@ -1201,7 +1201,7 @@ pub const TestingAPIs = struct { const path = bunstr.toOwnedSliceZ(bun.default_allocator) catch unreachable; defer bun.default_allocator.free(path); - break :brk switch (bun.sys.open(path, std.os.O.DIRECTORY | std.os.O.RDONLY, 0)) { + break :brk switch (bun.sys.open(path, bun.O.DIRECTORY | bun.O.RDONLY, 0)) { .err => |e| { globalThis.throwValue(e.withPath(path).toJSC(globalThis)); return .{ .err = .undefined }; @@ -1373,7 +1373,7 @@ pub fn gitDiffInternal( allocator.free(new_folder); }; - var child_proc = std.ChildProcess.init( + var child_proc = std.process.Child.init( &[_][]const u8{ "git", "-c", diff --git a/src/pool.zig b/src/pool.zig index 75521a3c27..ec5a07388b 100644 --- a/src/pool.zig +++ b/src/pool.zig @@ -209,7 +209,7 @@ pub fn ObjectPool( } pub fn releaseValue(value: *Type) void { - @fieldParentPtr(LinkedList.Node, "data", value).release(); + @as(*LinkedList.Node, @fieldParentPtr("data", value)).release(); } pub fn release(node: *LinkedList.Node) void { diff --git a/src/renamer.zig b/src/renamer.zig index b12e944439..3f3b572546 100644 --- a/src/renamer.zig +++ b/src/renamer.zig @@ -549,7 +549,7 @@ pub const NumberRenamer = struct { renamer.number_scope_pool = bun.HiveArray(NumberScope, 128).Fallback.init(renamer.arena.allocator()); renamer.root.name_counts = root_names; if (comptime Environment.allow_assert and !Environment.isWindows) { - if (std.os.getenv("BUN_DUMP_SYMBOLS") != null) + if (std.posix.getenv("BUN_DUMP_SYMBOLS") != null) symbols.dump(); } diff --git a/src/resolver/data_url.zig b/src/resolver/data_url.zig index 177337fc96..5757cebfe8 100644 --- a/src/resolver/data_url.zig +++ b/src/resolver/data_url.zig @@ -11,7 +11,6 @@ const C = bun.C; const std = @import("std"); const Allocator = std.mem.Allocator; -const ComptimeStringMap = @import("../comptime_string_map.zig").ComptimeStringMap; // https://github.com/Vexu/zuri/blob/master/src/zuri.zig#L61-L127 pub const PercentEncoding = struct { diff --git a/src/resolver/resolve_path.zig b/src/resolver/resolve_path.zig index f2af9c97c7..332dffab8e 100644 --- a/src/resolver/resolve_path.zig +++ b/src/resolver/resolve_path.zig @@ -758,16 +758,16 @@ pub fn normalizeStringGenericTZ( if (isWindows and !options.allow_above_root) { if (volLen > 0) { if (options.add_nt_prefix) { - @memcpy(buf[buf_i .. buf_i + 4], &comptime strings.literalBuf(T, "\\??\\")); + @memcpy(buf[buf_i .. buf_i + 4], comptime strings.literal(T, "\\??\\")); buf_i += 4; } if (path_[1] != ':') { // UNC paths if (options.add_nt_prefix) { - @memcpy(buf[buf_i .. buf_i + 4], &comptime strings.literalBuf(T, "UNC" ++ sep_str)); + @memcpy(buf[buf_i .. buf_i + 4], comptime strings.literal(T, "UNC" ++ sep_str)); buf_i += 2; } else { - @memcpy(buf[buf_i .. buf_i + 2], &comptime strings.literalBuf(T, sep_str ++ sep_str)); + @memcpy(buf[buf_i .. buf_i + 2], comptime strings.literal(T, sep_str ++ sep_str)); } @memcpy(buf[buf_i + 2 .. buf_i + indexOfThirdUNCSlash + 1], path_[2 .. indexOfThirdUNCSlash + 1]); buf[buf_i + indexOfThirdUNCSlash] = options.separator; @@ -866,10 +866,10 @@ pub fn normalizeStringGenericTZ( } } else if (options.allow_above_root) { if (buf_i > buf_start) { - buf[buf_i..][0..3].* = comptime strings.literalBuf(T, sep_str ++ ".."); + buf[buf_i..][0..3].* = (comptime strings.literal(T, sep_str ++ "..")).*; buf_i += 3; } else { - buf[buf_i..][0..2].* = comptime strings.literalBuf(T, ".."); + buf[buf_i..][0..2].* = (comptime strings.literal(T, "..")).*; buf_i += 2; } dotdot = buf_i; @@ -1915,7 +1915,7 @@ pub const PosixToWinNormalizer = struct { if (root.len == 1) { assert(isSepAny(root[0])); if (bun.strings.isWindowsAbsolutePathMissingDriveLetter(u8, maybe_posix_path)) { - const cwd = try std.os.getcwd(buf); + const cwd = try std.posix.getcwd(buf); assert(cwd.ptr == buf.ptr); const source_root = windowsFilesystemRoot(cwd); assert(source_root.ptr == source_root.ptr); @@ -1941,7 +1941,7 @@ pub const PosixToWinNormalizer = struct { if (root.len == 1) { assert(isSepAny(root[0])); if (bun.strings.isWindowsAbsolutePathMissingDriveLetter(u8, maybe_posix_path)) { - const cwd = try std.os.getcwd(buf); + const cwd = try std.posix.getcwd(buf); assert(cwd.ptr == buf.ptr); const source_root = windowsFilesystemRoot(cwd); assert(source_root.ptr == source_root.ptr); diff --git a/src/resolver/resolver.zig b/src/resolver/resolver.zig index 0a2ce3ccd4..e9da354bfa 100644 --- a/src/resolver/resolver.zig +++ b/src/resolver/resolver.zig @@ -768,14 +768,14 @@ pub const Resolver = struct { var parts = [_]string{ r.fs.top_level_dir, std.fs.path.sep_str, route_dir }; const abs = r.fs.join(&parts); // must end in trailing slash - break :brk (std.os.realpath(abs, &buf) catch continue); + break :brk (std.posix.realpath(abs, &buf) catch continue); } return error.MissingRouteDir; } else { var parts = [_]string{ r.fs.top_level_dir, std.fs.path.sep_str, pair.router.dir }; const abs = r.fs.join(&parts); // must end in trailing slash - break :brk std.os.realpath(abs, &buf) catch return error.MissingRouteDir; + break :brk std.posix.realpath(abs, &buf) catch return error.MissingRouteDir; } }; @@ -3385,6 +3385,7 @@ pub const Resolver = struct { var arena = std.heap.ArenaAllocator.init(bun.default_allocator); defer arena.deinit(); var stack_fallback_allocator = std.heap.stackFallback(1024, arena.allocator()); + const alloc = stack_fallback_allocator.get(); if (r.readDirInfo(str) catch null) |result| { var dir_info = result; @@ -3398,7 +3399,7 @@ pub const Resolver = struct { break :brk [2]string{ path_without_trailing_slash, std.fs.path.sep_str ++ "node_modules" }; }; - const nodemodules_path = bun.strings.concat(stack_fallback_allocator.get(), &path_parts) catch unreachable; + const nodemodules_path = bun.strings.concat(alloc, &path_parts) catch unreachable; bun.path.posixToPlatformInPlace(u8, nodemodules_path); list.append(bun.String.createUTF8(nodemodules_path)) catch unreachable; dir_info = (r.readDirInfo(std.fs.path.dirname(path_without_trailing_slash) orelse break) catch null) orelse break; @@ -3413,7 +3414,7 @@ pub const Resolver = struct { list.append( bun.String.createUTF8( bun.strings.concat( - stack_fallback_allocator.get(), + alloc, &[_]string{ path_without_trailing_slash, std.fs.path.sep_str ++ "node_modules", diff --git a/src/resolver/tsconfig_json.zig b/src/resolver/tsconfig_json.zig index 4595b08ba6..bc4c96927f 100644 --- a/src/resolver/tsconfig_json.zig +++ b/src/resolver/tsconfig_json.zig @@ -14,7 +14,6 @@ const logger = bun.logger; const cache = @import("../cache.zig"); const js_ast = bun.JSAst; const js_lexer = bun.js_lexer; -const ComptimeStringMap = @import("../comptime_string_map.zig").ComptimeStringMap; // Heuristic: you probably don't have 100 of these // Probably like 5-10 @@ -68,10 +67,10 @@ pub const TSConfigJSON = struct { remove, invalid, - pub const List = ComptimeStringMap(ImportsNotUsedAsValue, .{ - .{ "preserve", ImportsNotUsedAsValue.preserve }, - .{ "error", ImportsNotUsedAsValue.err }, - .{ "remove", ImportsNotUsedAsValue.remove }, + pub const List = std.StaticStringMap(ImportsNotUsedAsValue).initComptime(.{ + .{ "preserve", .preserve }, + .{ "error", .err }, + .{ "remove", .remove }, }); }; diff --git a/src/runtime.zig b/src/runtime.zig index b92df47bac..8892cad444 100644 --- a/src/runtime.zig +++ b/src/runtime.zig @@ -199,13 +199,12 @@ pub const Fallback = struct { pub const Runtime = struct { pub const source_code = @embedFile("./runtime.out.js"); - pub const version_hash = @import("build_options").runtime_js_version; - var version_hash_int: u32 = 0; + pub const hash = brk: { + @setEvalBranchQuota(source_code.len * 50); + break :brk bun.Wyhash11.hash(0, source_code); + }; pub fn versionHash() u32 { - if (version_hash_int == 0) { - version_hash_int = @as(u32, @truncate(version_hash)); - } - return version_hash_int; + return @truncate(hash); } pub const Features = struct { diff --git a/src/sha.zig b/src/sha.zig index 7bceeedf88..0fac3db29c 100644 --- a/src/sha.zig +++ b/src/sha.zig @@ -84,7 +84,7 @@ pub const EVP = struct { pub const SHA512 = NewEVP(std.crypto.hash.sha2.Sha512.digest_length, "EVP_sha512"); pub const SHA384 = NewEVP(std.crypto.hash.sha2.Sha384.digest_length, "EVP_sha384"); pub const SHA256 = NewEVP(std.crypto.hash.sha2.Sha256.digest_length, "EVP_sha256"); - pub const SHA512_256 = NewEVP(std.crypto.hash.sha2.Sha512256.digest_length, "EVP_sha512_256"); + pub const SHA512_256 = NewEVP(std.crypto.hash.sha2.Sha512T256.digest_length, "EVP_sha512_256"); pub const MD5_SHA1 = NewEVP(std.crypto.hash.Sha1.digest_length, "EVP_md5_sha1"); pub const Blake2 = NewEVP(256 / 8, "EVP_blake2b256"); }; @@ -138,7 +138,7 @@ pub const Hashers = struct { ); pub const SHA512_256 = NewHasher( - std.crypto.hash.sha2.Sha512256.digest_length, + std.crypto.hash.sha2.Sha512T256.digest_length, BoringSSL.SHA512_CTX, BoringSSL.SHA512_256, BoringSSL.SHA512_256_Init, @@ -172,7 +172,7 @@ const zig = [_]type{ std.crypto.hash.sha2.Sha512, std.crypto.hash.sha2.Sha384, std.crypto.hash.sha2.Sha256, - std.crypto.hash.sha2.Sha512256, + std.crypto.hash.sha2.Sha512T256, std.crypto.hash.blake2.Blake2b256, std.crypto.hash.Blake3, }; diff --git a/src/shell/delete_tree.zig b/src/shell/delete_tree.zig deleted file mode 100644 index 4962842489..0000000000 --- a/src/shell/delete_tree.zig +++ /dev/null @@ -1,41 +0,0 @@ -//! Modified version of `std.fs.deleteTree`: -//! - nonsense instances of `unreachable` removed -//! - uses Bun's DirIterator -//! - can pass a Context which allows you to inspect which files/directories have been deleted (needed for shell's implementation of rm with verbose flag) -const std = @import("std"); -const os = std.os; - -const bun = @import("root").bun; -const DirIterator = @import("../bun.js/node/dir_iterator.zig"); -const Maybe = @import("../bun.js/node/types.zig").Maybe; -const Syscall = @import("../sys.zig"); - -pub const DeleteTreeError = error{ - InvalidHandle, - AccessDenied, - FileTooBig, - SymLinkLoop, - ProcessFdQuotaExceeded, - NameTooLong, - SystemFdQuotaExceeded, - NoDevice, - SystemResources, - ReadOnlyFileSystem, - FileSystem, - FileBusy, - DeviceBusy, - - /// One of the path components was not a directory. - /// This error is unreachable if `sub_path` does not contain a path separator. - NotDir, - - /// On Windows, file paths must be valid Unicode. - InvalidUtf8, - - /// On Windows, file paths cannot contain these characters: - /// '/', '*', '?', '"', '<', '>', '|' - BadPathName, - - /// On Windows, `\\server` or `\\server\share` was not found. - NetworkNotFound, -} || os.UnexpectedError; diff --git a/src/shell/interpreter.zig b/src/shell/interpreter.zig index 55a477e085..d5c0eca546 100644 --- a/src/shell/interpreter.zig +++ b/src/shell/interpreter.zig @@ -20,7 +20,7 @@ const std = @import("std"); const builtin = @import("builtin"); const string = []const u8; const bun = @import("root").bun; -const os = std.os; +const posix = std.posix; const Arena = std.heap.ArenaAllocator; const Allocator = std.mem.Allocator; const ArrayList = std.ArrayList; @@ -887,7 +887,7 @@ pub const Interpreter = struct { async_pids: SmolList(pid_t, 4) = SmolList(pid_t, 4).zeroes, - const pid_t = if (bun.Environment.isPosix) std.os.pid_t else uv.uv_pid_t; + const pid_t = if (bun.Environment.isPosix) std.posix.pid_t else uv.uv_pid_t; const Bufio = union(enum) { owned: bun.ByteList, borrowed: *bun.ByteList }; @@ -1073,7 +1073,7 @@ pub const Interpreter = struct { const new_cwd_fd = switch (ShellSyscall.openat( this.cwd_fd, new_cwd, - std.os.O.DIRECTORY | std.os.O.RDONLY, + bun.O.DIRECTORY | bun.O.RDONLY, 0, )) { .result => |fd| fd, @@ -1327,7 +1327,8 @@ pub const Interpreter = struct { return .{ .err = .{ .sys = err.toSystemError() } }; }, }; - const cwd_fd = switch (Syscall.open(cwd, std.os.O.DIRECTORY | std.os.O.RDONLY, 0)) { + + const cwd_fd = switch (Syscall.open(cwd, bun.O.DIRECTORY | bun.O.RDONLY, 0)) { .result => |fd| fd, .err => |err| { return .{ .err = .{ .sys = err.toSystemError() } }; @@ -1594,8 +1595,9 @@ pub const Interpreter = struct { if (this.setupIOBeforeRun().asErr()) |e| { return .{ .err = e }; } + var root = Script.init(this, &this.root_shell, &this.args.script_ast, Script.ParentPtr.init(this), this.root_io.copy()); - this.started.store(true, .SeqCst); + this.started.store(true, .seq_cst); root.start(); return Maybe(void).success; @@ -1611,9 +1613,11 @@ pub const Interpreter = struct { return .undefined; } incrPendingActivityFlag(&this.has_pending_activity); + var root = Script.init(this, &this.root_shell, &this.args.script_ast, Script.ParentPtr.init(this), this.root_io.copy()); - this.started.store(true, .SeqCst); + this.started.store(true, .seq_cst); root.start(); + return .undefined; } @@ -1801,7 +1805,7 @@ pub const Interpreter = struct { _ = globalThis; // autofix _ = callframe; // autofix - return JSC.JSValue.jsBoolean(this.started.load(.SeqCst)); + return JSC.JSValue.jsBoolean(this.started.load(.seq_cst)); } pub fn getBufferedStdout( @@ -1826,20 +1830,20 @@ pub const Interpreter = struct { } pub fn hasPendingActivity(this: *ThisInterpreter) callconv(.C) bool { - @fence(.SeqCst); - return this.has_pending_activity.load(.SeqCst) > 0; + @fence(.seq_cst); + return this.has_pending_activity.load(.seq_cst) > 0; } fn incrPendingActivityFlag(has_pending_activity: *std.atomic.Value(usize)) void { - @fence(.SeqCst); - _ = has_pending_activity.fetchAdd(1, .SeqCst); - log("Interpreter incr pending activity {d}", .{has_pending_activity.load(.SeqCst)}); + @fence(.seq_cst); + _ = has_pending_activity.fetchAdd(1, .seq_cst); + log("Interpreter incr pending activity {d}", .{has_pending_activity.load(.seq_cst)}); } fn decrPendingActivityFlag(has_pending_activity: *std.atomic.Value(usize)) void { - @fence(.SeqCst); - _ = has_pending_activity.fetchSub(1, .SeqCst); - log("Interpreter decr pending activity {d}", .{has_pending_activity.load(.SeqCst)}); + @fence(.seq_cst); + _ = has_pending_activity.fetchSub(1, .seq_cst); + log("Interpreter decr pending activity {d}", .{has_pending_activity.load(.seq_cst)}); } pub fn rootIO(this: *const Interpreter) *const IO { @@ -2636,7 +2640,7 @@ pub const Interpreter = struct { pub fn runFromThreadPool(task: *WorkPoolTask) void { debug("runFromThreadPool", .{}); - var this = @fieldParentPtr(This, "task", task); + var this: *This = @fieldParentPtr("task", task); switch (this.walkImpl()) { .result => {}, .err => |e| { @@ -3553,14 +3557,14 @@ pub const Interpreter = struct { } else { const fds: [2]bun.FileDescriptor = brk: { var fds_: [2]std.c.fd_t = undefined; - const rc = std.c.socketpair(std.os.AF.UNIX, std.os.SOCK.STREAM, 0, &fds_); + const rc = std.c.socketpair(std.posix.AF.UNIX, std.posix.SOCK.STREAM, 0, &fds_); if (rc != 0) { return bun.sys.Maybe(void).errno(bun.sys.getErrno(rc), .socketpair); } - var before = std.c.fcntl(fds_[0], std.os.F.GETFL); + var before = std.c.fcntl(fds_[0], std.posix.F.GETFL); - const result = std.c.fcntl(fds_[0], std.os.F.SETFL, before | os.O.CLOEXEC); + const result = std.c.fcntl(fds_[0], std.posix.F.SETFL, before | bun.O.CLOEXEC); if (result == -1) { _ = bun.sys.close(bun.toFD(fds_[0])); _ = bun.sys.close(bun.toFD(fds_[1])); @@ -3570,7 +3574,7 @@ pub const Interpreter = struct { if (comptime bun.Environment.isMac) { // SO_NOSIGPIPE before = 1; - _ = std.c.setsockopt(fds_[0], std.os.SOL.SOCKET, std.os.SO.NOSIGPIPE, &before, @sizeOf(c_int)); + _ = std.c.setsockopt(fds_[0], std.posix.SOL.SOCKET, std.posix.SO.NOSIGPIPE, &before, @sizeOf(c_int)); } break :brk .{ bun.toFD(fds_[0]), bun.toFD(fds_[1]) }; @@ -5682,13 +5686,13 @@ pub const Interpreter = struct { } pub inline fn parentCmd(this: *const Builtin) *const Cmd { - const union_ptr = @fieldParentPtr(Cmd.Exec, "bltn", this); - return @fieldParentPtr(Cmd, "exec", union_ptr); + const union_ptr: *const Cmd.Exec = @fieldParentPtr("bltn", this); + return @fieldParentPtr("exec", union_ptr); } pub inline fn parentCmdMut(this: *Builtin) *Cmd { - const union_ptr = @fieldParentPtr(Cmd.Exec, "bltn", this); - return @fieldParentPtr(Cmd, "exec", union_ptr); + const union_ptr: *Cmd.Exec = @fieldParentPtr("bltn", this); + return @fieldParentPtr("exec", union_ptr); } pub fn done(this: *Builtin, exit_code: anytype) void { @@ -5940,7 +5944,7 @@ pub const Interpreter = struct { const arg = std.mem.span(exec.args[exec.idx]); exec.idx += 1; const dir = this.bltn.parentCmd().base.shell.cwd_fd; - const fd = switch (ShellSyscall.openat(dir, arg, os.O.RDONLY, 0)) { + const fd = switch (ShellSyscall.openat(dir, arg, bun.O.RDONLY, 0)) { .result => |fd| fd, .err => |e| { const buf = this.bltn.taskErrorToString(.cat, e); @@ -6406,7 +6410,7 @@ pub const Interpreter = struct { } fn runFromThreadPool(task: *JSC.WorkPoolTask) void { - var this: *ShellTouchTask = @fieldParentPtr(ShellTouchTask, "task", task); + var this: *ShellTouchTask = @fieldParentPtr("task", task); debug("{} runFromThreadPool", .{this}); // We have to give an absolute path @@ -6434,7 +6438,7 @@ pub const Interpreter = struct { if (node_fs.utimes(args, .callback).asErr()) |err| out: { if (err.getErrno() == bun.C.E.NOENT) { const perm = 0o664; - switch (Syscall.open(filepath, std.os.O.CREAT | std.os.O.WRONLY, perm)) { + switch (Syscall.open(filepath, bun.O.CREAT | bun.O.WRONLY, perm)) { .result => |fd| { _ = bun.sys.close(fd); break :out; @@ -6810,7 +6814,7 @@ pub const Interpreter = struct { } fn runFromThreadPool(task: *JSC.WorkPoolTask) void { - var this: *ShellMkdirTask = @fieldParentPtr(ShellMkdirTask, "task", task); + var this: *ShellMkdirTask = @fieldParentPtr("task", task); debug("{} runFromThreadPool", .{this}); // We have to give an absolute path to our mkdir @@ -7542,11 +7546,11 @@ pub const Interpreter = struct { log("Ls(0x{x}, state=exec) Check: tasks_done={d} task_count={d} output_done={d} output_waiting={d}", .{ @intFromPtr(this), this.state.exec.tasks_done, - this.state.exec.task_count.load(.Monotonic), + this.state.exec.task_count.load(.monotonic), this.state.exec.output_done, this.state.exec.output_waiting, }); - if (this.state.exec.tasks_done >= this.state.exec.task_count.load(.Monotonic) and this.state.exec.output_done >= this.state.exec.output_waiting) { + if (this.state.exec.tasks_done >= this.state.exec.task_count.load(.monotonic) and this.state.exec.output_done >= this.state.exec.output_waiting) { const exit_code: ExitCode = if (this.state.exec.err != null) 1 else 0; this.state = .done; this.bltn.done(exit_code); @@ -7696,7 +7700,7 @@ pub const Interpreter = struct { ); var subtask = @This().create(this.ls, this.opts, this.task_count, this.cwd, new_path, this.event_loop); - _ = this.task_count.fetchAdd(1, .Monotonic); + _ = this.task_count.fetchAdd(1, .monotonic); subtask.is_root = false; subtask.schedule(); } @@ -7715,7 +7719,7 @@ pub const Interpreter = struct { } pub fn run(this: *@This()) void { - const fd = switch (ShellSyscall.openat(this.cwd, this.path, os.O.RDONLY | os.O.DIRECTORY, 0)) { + const fd = switch (ShellSyscall.openat(this.cwd, this.path, bun.O.RDONLY | bun.O.DIRECTORY, 0)) { .err => |e| { switch (e.getErrno()) { bun.C.E.NOENT => { @@ -7793,7 +7797,7 @@ pub const Interpreter = struct { } pub fn workPoolCallback(task: *JSC.WorkPoolTask) void { - var this: *@This() = @fieldParentPtr(@This(), "task", task); + var this: *@This() = @fieldParentPtr("task", task); this.run(); this.doneLogic(); } @@ -8286,7 +8290,7 @@ pub const Interpreter = struct { task: ShellTask(@This(), runFromThreadPool, runFromMainThread, debug), pub fn runFromThreadPool(this: *@This()) void { - const fd = switch (ShellSyscall.openat(this.cwd, this.target, os.O.RDONLY | os.O.DIRECTORY, 0)) { + const fd = switch (ShellSyscall.openat(this.cwd, this.target, bun.O.RDONLY | bun.O.DIRECTORY, 0)) { .err => |e| { switch (e.getErrno()) { bun.C.E.NOTDIR => { @@ -8382,7 +8386,7 @@ pub const Interpreter = struct { var fixed_alloc = std.heap.FixedBufferAllocator.init(buf[0..bun.MAX_PATH_BYTES]); for (this.sources) |src_raw| { - if (this.error_signal.load(.SeqCst)) return; + if (this.error_signal.load(.seq_cst)) return; defer fixed_alloc.reset(); const src = src_raw[0..std.mem.len(src_raw) :0]; @@ -8604,7 +8608,7 @@ pub const Interpreter = struct { var exec = &this.state.executing; if (task.err) |err| { - exec.error_signal.store(true, .SeqCst); + exec.error_signal.store(true, .seq_cst); if (exec.err == null) { exec.err = err; } else { @@ -8776,17 +8780,17 @@ pub const Interpreter = struct { }, fn incrementOutputCount(this: *@This(), comptime thevar: @Type(.EnumLiteral)) void { - @fence(.SeqCst); + @fence(.seq_cst); var atomicvar = &@field(this, @tagName(thevar)); - const result = atomicvar.fetchAdd(1, .SeqCst); + const result = atomicvar.fetchAdd(1, .seq_cst); log("[rm] {s}: {d} + 1", .{ @tagName(thevar), result }); return; } fn getOutputCount(this: *@This(), comptime thevar: @Type(.EnumLiteral)) usize { - @fence(.SeqCst); + @fence(.seq_cst); var atomicvar = &@field(this, @tagName(thevar)); - return atomicvar.load(.SeqCst); + return atomicvar.load(.seq_cst); } }, done: struct { exit_code: ExitCode }, @@ -9052,7 +9056,7 @@ pub const Interpreter = struct { log("Rm(0x{x}).onIOWriterChunk()", .{@intFromPtr(this)}); if (comptime bun.Environment.allow_assert) { assert((this.state == .parse_opts and this.state.parse_opts.state == .wait_write_err) or - (this.state == .exec and this.state.exec.state == .waiting and this.state.exec.output_count.load(.SeqCst) > 0)); + (this.state == .exec and this.state.exec.state == .waiting and this.state.exec.output_count.load(.seq_cst) > 0)); } if (this.state == .exec and this.state.exec.state == .waiting) { @@ -9207,7 +9211,7 @@ pub const Interpreter = struct { fn writeVerbose(this: *Rm, verbose: *ShellRmTask.DirTask) void { if (!this.bltn.stdout.needsIO()) { - _ = this.bltn.writeNoIO(.stdout, verbose.deleted_entries.items[0..]); + _ = this.bltn.writeNoIO(.stdout, verbose.deleted_entries.items); _ = this.state.exec.incrementOutputCount(.output_done); if (this.state.exec.state.tasksDone() >= this.state.exec.total_tasks and this.state.exec.getOutputCount(.output_done) >= this.state.exec.getOutputCount(.output_count)) { this.bltn.done(if (this.state.exec.err != null) @as(ExitCode, 1) else @as(ExitCode, 0)); @@ -9217,7 +9221,7 @@ pub const Interpreter = struct { } const buf = verbose.takeDeletedEntries(); defer buf.deinit(); - this.bltn.stdout.enqueue(this, buf.items[0..]); + this.bltn.stdout.enqueue(this, buf.items); } pub const ShellRmTask = struct { @@ -9300,13 +9304,13 @@ pub const Interpreter = struct { } pub fn runFromThreadPool(task: *JSC.WorkPoolTask) void { - var this: *DirTask = @fieldParentPtr(DirTask, "task", task); + var this: *DirTask = @fieldParentPtr("task", task); this.runFromThreadPoolImpl(); } fn runFromThreadPoolImpl(this: *DirTask) void { defer { - if (!this.deleting_after_waiting_for_children.load(.SeqCst)) { + if (!this.deleting_after_waiting_for_children.load(.seq_cst)) { this.postRun(); } } @@ -9323,7 +9327,7 @@ pub const Interpreter = struct { defer this.task_manager.err_mutex.unlock(); if (this.task_manager.err == null) { this.task_manager.err = err; - this.task_manager.error_signal.store(true, .SeqCst); + this.task_manager.error_signal.store(true, .seq_cst); } return; }, @@ -9341,7 +9345,7 @@ pub const Interpreter = struct { defer this.task_manager.err_mutex.unlock(); if (this.task_manager.err == null) { this.task_manager.err = err; - this.task_manager.error_signal.store(true, .SeqCst); + this.task_manager.error_signal.store(true, .seq_cst); } else { bun.default_allocator.free(err.path); } @@ -9356,7 +9360,7 @@ pub const Interpreter = struct { defer this.task_manager.err_mutex.unlock(); if (this.task_manager.err == null) { this.task_manager.err = err; - this.task_manager.error_signal.store(true, .SeqCst); + this.task_manager.error_signal.store(true, .seq_cst); } else { bun.default_allocator.free(err.path); } @@ -9366,10 +9370,10 @@ pub const Interpreter = struct { debug("DirTask(0x{x}, path={s}) postRun", .{ @intFromPtr(this), this.path }); // // This is true if the directory has subdirectories // // that need to be deleted - if (this.need_to_wait.load(.SeqCst)) return; + if (this.need_to_wait.load(.seq_cst)) return; // We have executed all the children of this task - if (this.subtask_count.fetchSub(1, .SeqCst) == 1) { + if (this.subtask_count.fetchSub(1, .seq_cst) == 1) { defer { if (this.task_manager.opts.verbose) this.queueForWrite() @@ -9381,8 +9385,8 @@ pub const Interpreter = struct { if (this.parent_task != null) { // It's possible that we queued this subdir task and it finished, while the parent // was still in the `removeEntryDir` function - const tasks_left_before_decrement = this.parent_task.?.subtask_count.fetchSub(1, .SeqCst); - const parent_still_in_remove_entry_dir = !this.parent_task.?.need_to_wait.load(.Monotonic); + const tasks_left_before_decrement = this.parent_task.?.subtask_count.fetchSub(1, .seq_cst); + const parent_still_in_remove_entry_dir = !this.parent_task.?.need_to_wait.load(.monotonic); if (!parent_still_in_remove_entry_dir and tasks_left_before_decrement == 2) { this.parent_task.?.deleteAfterWaitingForChildren(); } @@ -9399,13 +9403,13 @@ pub const Interpreter = struct { pub fn deleteAfterWaitingForChildren(this: *DirTask) void { debug("DirTask(0x{x}, path={s}) deleteAfterWaitingForChildren", .{ @intFromPtr(this), this.path }); // `runFromMainThreadImpl` has a `defer this.postRun()` so need to set this to true to skip that - this.deleting_after_waiting_for_children.store(true, .SeqCst); - this.need_to_wait.store(false, .SeqCst); + this.deleting_after_waiting_for_children.store(true, .seq_cst); + this.need_to_wait.store(false, .seq_cst); var do_post_run = true; defer { if (do_post_run) this.postRun(); } - if (this.task_manager.error_signal.load(.SeqCst)) { + if (this.task_manager.error_signal.load(.seq_cst)) { return; } @@ -9479,7 +9483,7 @@ pub const Interpreter = struct { } pub fn enqueue(this: *ShellRmTask, parent_dir: *DirTask, path: [:0]const u8, is_absolute: bool, kind_hint: DirTask.EntryKindHint) void { - if (this.error_signal.load(.SeqCst)) { + if (this.error_signal.load(.seq_cst)) { return; } const new_path = this.join( @@ -9496,7 +9500,7 @@ pub const Interpreter = struct { pub fn enqueueNoJoin(this: *ShellRmTask, parent_task: *DirTask, path: [:0]const u8, kind_hint: DirTask.EntryKindHint) void { defer debug("enqueue: {s} {s}", .{ path, @tagName(kind_hint) }); - if (this.error_signal.load(.SeqCst)) { + if (this.error_signal.load(.seq_cst)) { return; } @@ -9510,7 +9514,11 @@ pub const Interpreter = struct { .deleted_entries = std.ArrayList(u8).init(bun.default_allocator), .concurrent_task = JSC.EventLoopTask.fromEventLoop(this.event_loop), }; - assert(parent_task.subtask_count.fetchAdd(1, .Monotonic) > 0); + + const count = parent_task.subtask_count.fetchAdd(1, .monotonic); + if (comptime bun.Environment.allow_assert) { + assert(count > 0); + } JSC.WorkPool.schedule(&subtask.task); } @@ -9600,7 +9608,7 @@ pub const Interpreter = struct { return Maybe(void).initErr(Syscall.Error.fromCode(bun.C.E.ISDIR, .TODO).withPath(bun.default_allocator.dupeZ(u8, dir_task.path) catch bun.outOfMemory())); } - const flags = os.O.DIRECTORY | os.O.RDONLY; + const flags = bun.O.DIRECTORY | bun.O.RDONLY; const fd = switch (ShellSyscall.openat(dirfd, path, flags, 0)) { .result => |fd| fd, .err => |e| { @@ -9626,7 +9634,7 @@ pub const Interpreter = struct { } } - if (this.error_signal.load(.SeqCst)) { + if (this.error_signal.load(.seq_cst)) { return Maybe(void).success; } @@ -9647,7 +9655,7 @@ pub const Interpreter = struct { }) |current| : (entry = iterator.next()) { debug("dir({s}) entry({s}, {s})", .{ path, current.name.slice(), @tagName(current.kind) }); // TODO this seems bad maybe better to listen to kqueue/epoll event - if (fastMod(i, 4) == 0 and this.error_signal.load(.SeqCst)) return Maybe(void).success; + if (fastMod(i, 4) == 0 and this.error_signal.load(.seq_cst)) return Maybe(void).success; defer i += 1; switch (current.kind) { @@ -9677,13 +9685,13 @@ pub const Interpreter = struct { } // Need to wait for children to finish - if (dir_task.subtask_count.load(.SeqCst) > 1) { + if (dir_task.subtask_count.load(.seq_cst) > 1) { close_fd = true; - dir_task.need_to_wait.store(true, .SeqCst); + dir_task.need_to_wait.store(true, .seq_cst); return Maybe(void).success; } - if (this.error_signal.load(.SeqCst)) return Maybe(void).success; + if (this.error_signal.load(.seq_cst)) return Maybe(void).success; if (bun.Environment.isWindows) { close_fd = false; @@ -9691,7 +9699,7 @@ pub const Interpreter = struct { } debug("[removeEntryDir] remove after children {s}", .{path}); - switch (ShellSyscall.unlinkatWithFlags(this.getcwd(), path, std.os.AT.REMOVEDIR)) { + switch (ShellSyscall.unlinkatWithFlags(this.getcwd(), path, std.posix.AT.REMOVEDIR)) { .result => { switch (this.verboseDeleted(dir_task, path)) { .err => |e| return .{ .err = e }, @@ -9875,7 +9883,7 @@ pub const Interpreter = struct { // If `path` points to a directory, then it is deleted (if empty) or we handle it as a directory // If it's actually a file, we get an error so we don't need to call `stat` to check that. if (this.opts.recursive or this.opts.remove_empty_dirs) { - return switch (ShellSyscall.unlinkatWithFlags(this.getcwd(), path, std.os.AT.REMOVEDIR)) { + return switch (ShellSyscall.unlinkatWithFlags(this.getcwd(), path, std.posix.AT.REMOVEDIR)) { // it was empty, we saved a syscall .result => return this.verboseDeleted(parent_dir_task, path), .err => |e2| { @@ -9927,7 +9935,7 @@ pub const Interpreter = struct { } pub fn workPoolCallback(task: *JSC.WorkPoolTask) void { - var this: *ShellRmTask = @fieldParentPtr(ShellRmTask, "task", task); + var this: *ShellRmTask = @alignCast(@fieldParentPtr("task", task)); this.root_task.runFromThreadPoolImpl(); } @@ -10126,7 +10134,7 @@ pub const Interpreter = struct { } pub fn runFromMainThread(this: *@This()) void { - const yes = @fieldParentPtr(Yes, "task", this); + const yes: *Yes = @fieldParentPtr("task", this); yes.bltn.stdout.enqueue(yes, yes.expletive); yes.bltn.stdout.enqueue(yes, "\n"); @@ -10824,7 +10832,7 @@ pub const Interpreter = struct { return .{ .err = e }; }, }; - return .{ .result = os.S.ISDIR(stat.mode) }; + return .{ .result = bun.S.ISDIR(stat.mode) }; } fn enqueueToEventLoop(this: *ShellCpTask) void { @@ -10846,7 +10854,7 @@ pub const Interpreter = struct { pub fn runFromThreadPool(task: *WorkPoolTask) void { debug("runFromThreadPool", .{}); - var this = @fieldParentPtr(@This(), "task", task); + var this: *@This() = @fieldParentPtr("task", task); if (this.runFromThreadPoolImpl()) |e| { this.err = e; this.enqueueToEventLoop(); @@ -11376,12 +11384,11 @@ pub const Interpreter = struct { } pub fn writer(this: *@This()) *IOWriter { - return @fieldParentPtr(IOWriter, "async_deinit", this); + return @alignCast(@fieldParentPtr("async_deinit", this)); } pub fn runFromMainThread(this: *@This()) void { - const ioreader = @fieldParentPtr(IOWriter, "async_deinit", this); - ioreader.__deinit(); + this.writer().__deinit(); } pub fn runFromMainThreadMini(this: *@This(), _: *void) void { @@ -11405,11 +11412,11 @@ pub const Interpreter = struct { } pub fn reader(this: *AsyncDeinitReader) *IOReader { - return @fieldParentPtr(IOReader, "async_deinit", this); + return @alignCast(@fieldParentPtr("async_deinit", this)); } pub fn runFromMainThread(this: *AsyncDeinitReader) void { - const ioreader = @fieldParentPtr(IOReader, "async_deinit", this); + const ioreader: *IOReader = @alignCast(@fieldParentPtr("async_deinit", this)); ioreader.__deinit(); } @@ -12088,7 +12095,7 @@ pub fn ShellTask( pub fn onFinish(this: *@This()) void { debug("onFinish", .{}); if (this.event_loop == .js) { - const ctx = @fieldParentPtr(Ctx, "task", this); + const ctx: *Ctx = @fieldParentPtr("task", this); this.event_loop.js.enqueueTaskConcurrent(this.concurrent_task.js.from(ctx, .manual_deinit)); } else { const ctx = this; @@ -12098,15 +12105,15 @@ pub fn ShellTask( pub fn runFromThreadPool(task: *WorkPoolTask) void { debug("runFromThreadPool", .{}); - var this = @fieldParentPtr(@This(), "task", task); - const ctx = @fieldParentPtr(Ctx, "task", this); + var this: *@This() = @fieldParentPtr("task", task); + const ctx: *Ctx = @fieldParentPtr("task", this); runFromThreadPool_(ctx); this.onFinish(); } pub fn runFromMainThread(this: *@This()) void { debug("runFromJS", .{}); - const ctx = @fieldParentPtr(Ctx, "task", this); + const ctx: *Ctx = @fieldParentPtr("task", this); this.ref.unref(this.event_loop); runFromMainThread_(ctx); } @@ -12278,19 +12285,19 @@ const ShellSyscall = struct { fn openat(dir: bun.FileDescriptor, path: [:0]const u8, flags: bun.Mode, perm: bun.Mode) Maybe(bun.FileDescriptor) { if (bun.Environment.isWindows) { - if (flags & os.O.DIRECTORY != 0) { + if (flags & bun.O.DIRECTORY != 0) { if (ResolvePath.Platform.posix.isAbsolute(path[0..path.len])) { var buf: bun.PathBuffer = undefined; const p = switch (getPath(dir, path, &buf)) { .result => |p| p, .err => |e| return .{ .err = e }, }; - return switch (Syscall.openDirAtWindowsA(dir, p, .{ .iterable = true, .no_follow = flags & os.O.NOFOLLOW != 0 })) { + return switch (Syscall.openDirAtWindowsA(dir, p, .{ .iterable = true, .no_follow = flags & bun.O.NOFOLLOW != 0 })) { .result => |fd| bun.sys.toLibUVOwnedFD(fd, .open, .close_on_fail), .err => |e| .{ .err = e.withPath(path) }, }; } - return switch (Syscall.openDirAtWindowsA(dir, path, .{ .iterable = true, .no_follow = flags & os.O.NOFOLLOW != 0 })) { + return switch (Syscall.openDirAtWindowsA(dir, path, .{ .iterable = true, .no_follow = flags & bun.O.NOFOLLOW != 0 })) { .result => |fd| bun.sys.toLibUVOwnedFD(fd, .open, .close_on_fail), .err => |e| .{ .err = e.withPath(path) }, }; @@ -12525,7 +12532,7 @@ pub fn FlagParser(comptime Opts: type) type { pub fn isPollable(fd: bun.FileDescriptor, mode: bun.Mode) bool { if (bun.Environment.isWindows) return false; - if (bun.Environment.isLinux) return os.S.ISFIFO(mode) or os.S.ISSOCK(mode) or os.isatty(fd.int()); + if (bun.Environment.isLinux) return posix.S.ISFIFO(mode) or posix.S.ISSOCK(mode) or posix.isatty(fd.int()); // macos allows regular files to be pollable: ISREG(mode) == true - return os.S.ISFIFO(mode) or os.S.ISSOCK(mode) or os.isatty(fd.int()) or os.S.ISREG(mode); + return posix.S.ISFIFO(mode) or posix.S.ISSOCK(mode) or posix.isatty(fd.int()) or posix.S.ISREG(mode); } diff --git a/src/shell/shell.zig b/src/shell/shell.zig index ca1cd41fad..a8358bfe6f 100644 --- a/src/shell/shell.zig +++ b/src/shell/shell.zig @@ -575,7 +575,8 @@ pub const AST = struct { len += 1; } } - break :brk &ret; + const final = ret[0..].*; + break :brk &final; }; const BINARY_OPS: []const std.builtin.Type.EnumField = brk: { @@ -597,7 +598,8 @@ pub const AST = struct { len += 1; } } - break :brk &ret; + const final = ret[0..].*; + break :brk &final; }; }; @@ -769,8 +771,8 @@ pub const AST = struct { } pub fn toFlags(this: RedirectFlags) bun.Mode { - const read_write_flags: bun.Mode = if (this.stdin) std.os.O.RDONLY else std.os.O.WRONLY | std.os.O.CREAT; - const extra: bun.Mode = if (this.append) std.os.O.APPEND else std.os.O.TRUNC; + const read_write_flags: bun.Mode = if (this.stdin) bun.O.RDONLY else bun.O.WRONLY | bun.O.CREAT; + const extra: bun.Mode = if (this.append) bun.O.APPEND else bun.O.TRUNC; const final_flags: bun.Mode = if (this.stdin) read_write_flags else extra | read_write_flags; return final_flags; } diff --git a/src/shell/subproc.zig b/src/shell/subproc.zig index bcf0fdfd49..b8583e4fef 100644 --- a/src/shell/subproc.zig +++ b/src/shell/subproc.zig @@ -269,7 +269,7 @@ pub const ShellSubprocess = struct { } pub fn finalize(this: *Writable) void { - const subprocess = @fieldParentPtr(Subprocess, "stdin", this); + const subprocess: *Subprocess = @fieldParentPtr("stdin", this); if (subprocess.this_jsvalue != .zero) { if (JSC.Codegen.JSSubprocess.stdinGetCached(subprocess.this_jsvalue)) |existing_value| { JSC.WebCore.FileSink.JSSink.setDestroyCallback(existing_value, 0); @@ -954,8 +954,6 @@ pub const ShellSubprocess = struct { } } } - - const os = std.os; }; const WaiterThread = bun.spawn.WaiterThread; @@ -1048,7 +1046,7 @@ pub const PipeReader = struct { } pub fn parent(this: *CapturedWriter) *PipeReader { - return @fieldParentPtr(PipeReader, "captured_writer", this); + return @fieldParentPtr("captured_writer", this); } pub fn eventLoop(this: *CapturedWriter) JSC.EventLoopHandle { diff --git a/src/shell/util.zig b/src/shell/util.zig index a474fafcf7..48c5b5577b 100644 --- a/src/shell/util.zig +++ b/src/shell/util.zig @@ -12,7 +12,7 @@ const JSGlobalObject = JSC.JSGlobalObject; const Which = @import("../which.zig"); const Output = bun.Output; const PosixSpawn = @import("../bun.js/api/bun/spawn.zig").PosixSpawn; -const os = std.os; +const posix = std.posix; pub const OutKind = enum { stdout, @@ -27,4 +27,4 @@ pub const OutKind = enum { pub const Stdio = bun.spawn.Stdio; -pub const WatchFd = if (Environment.isLinux) std.os.fd_t else i32; +pub const WatchFd = if (Environment.isLinux) posix.fd_t else i32; diff --git a/src/sourcemap/sourcemap.zig b/src/sourcemap/sourcemap.zig index 2beba9bfda..6dde80eaef 100644 --- a/src/sourcemap/sourcemap.zig +++ b/src/sourcemap/sourcemap.zig @@ -681,14 +681,14 @@ pub const Mapping = struct { /// bother loading source code into memory. Most uses of source maps only care /// about filenames and source mappings, and we should avoid loading contents /// whenever possible. -pub const SourceContentHandling = enum { +pub const SourceContentHandling = enum(u1) { no_source_contents, source_contents, }; /// For some sourcemap loading code, this enum is used as a hint if we already /// know if the sourcemap is located on disk or inline in the source code. -pub const SourceMapLoadHint = enum { +pub const SourceMapLoadHint = enum(u2) { none, is_inline_map, is_external_map, diff --git a/src/string_immutable.zig b/src/string_immutable.zig index 1bb8f90640..ec37a6934e 100644 --- a/src/string_immutable.zig +++ b/src/string_immutable.zig @@ -8,6 +8,7 @@ const bun = @import("root").bun; const log = bun.Output.scoped(.STR, true); const js_lexer = @import("./js_lexer.zig"); const grapheme = @import("./grapheme.zig"); +const JSC = bun.JSC; pub const Encoding = enum { ascii, @@ -46,68 +47,37 @@ pub inline fn removeLeadingDotSlash(slice: []const u8) []const u8 { return slice; } -pub inline fn w(comptime str: []const u8) [:0]const u16 { - if (!@inComptime()) @compileError("strings.w() must be called in a comptime context"); - comptime var output: [str.len + 1]u16 = undefined; +// TODO: remove this +pub const w = toUTF16Literal; - for (str, 0..) |c, i| { - output[i] = c; - } - output[str.len] = 0; - - const Static = struct { - pub const literal: [:0]const u16 = output[0 .. output.len - 1 :0]; - }; - return Static.literal; -} - -pub fn toUTF16Literal(comptime str: []const u8) []const u16 { +pub fn toUTF16Literal(comptime str: []const u8) [:0]const u16 { return comptime literal(u16, str); } -pub inline fn literal(comptime T: type, comptime str: string) []const T { - if (!@inComptime()) @compileError("strings.literal() should be called in a comptime context"); - comptime var output: [str.len]T = undefined; - - for (str, 0..) |c, i| { - // TODO(dylan-conway): should we check for non-ascii characters like JSC does with operator""_s - output[i] = c; - } - - const Static = struct { - pub const literal: []const T = output[0..]; +pub fn literal(comptime T: type, comptime str: []const u8) *const [literalLength(T, str):0]T { + if (!@inComptime()) @compileError("strings.literal() must be called in a comptime context"); + return comptime switch (T) { + u8 => brk: { + var data: [str.len:0]u8 = undefined; + @memcpy(&data, str); + const final = data[0..].*; + break :brk &final; + }, + u16 => return std.unicode.utf8ToUtf16LeStringLiteral(str), + else => @compileError("unsupported type " ++ @typeName(T) ++ " in strings.literal() call."), }; - return Static.literal; } -pub inline fn literalBuf(comptime T: type, comptime str: string) [str.len]T { - if (!@inComptime()) @compileError("strings.literalBuf() should be called in a comptime context"); - comptime var output: [str.len]T = undefined; - - for (str, 0..) |c, i| { - // TODO(dylan-conway): should we check for non-ascii characters like JSC does with operator""_s - output[i] = c; - } - - const Static = struct { - pub const literal: [str.len]T = output; +fn literalLength(comptime T: type, comptime str: string) usize { + return comptime switch (T) { + u8 => str.len, + u16 => std.unicode.calcUtf16LeLen(str) catch unreachable, + else => 0, // let other errors report first }; - return Static.literal; } -pub inline fn toUTF16LiteralZ(comptime str: []const u8) [:0]const u16 { - comptime var output: [str.len + 1]u16 = undefined; - - for (str, 0..) |c, i| { - output[i] = c; - } - output[str.len] = 0; - - const Static = struct { - pub const literal: [:0]const u16 = output[0..str.len :0]; - }; - return Static.literal; -} +// TODO: remove this +pub const toUTF16LiteralZ = toUTF16Literal; pub const OptionalUsize = std.meta.Int(.unsigned, @bitSizeOf(usize) - 1); pub fn indexOfAny(slice: string, comptime str: anytype) ?OptionalUsize { @@ -801,7 +771,7 @@ pub fn hasPrefixComptimeUTF16(self: []const u16, comptime alt: []const u8) bool pub fn hasPrefixComptimeType(comptime T: type, self: []const T, comptime alt: anytype) bool { const rhs = comptime switch (T) { u8 => alt, - u16 => switch (std.meta.Child(@TypeOf(alt))) { + u16 => switch (bun.meta.Item(@TypeOf(alt))) { u16 => alt, else => w(alt), }, @@ -2369,8 +2339,6 @@ pub fn elementLengthLatin1IntoUTF8(comptime Type: type, latin1_: Type) usize { return input_len + total_non_ascii_count; } -const JSC = bun.JSC; - pub fn copyLatin1IntoUTF16(comptime Buffer: type, buf_: Buffer, comptime Type: type, latin1_: Type) EncodeIntoResult { var buf = buf_; var latin1 = latin1_; @@ -4913,27 +4881,27 @@ pub fn isIPAddress(input: []const u8) bool { var max_ip_address_buffer: [512]u8 = undefined; if (input.len > max_ip_address_buffer.len) return false; - var sockaddr: std.os.sockaddr = undefined; + var sockaddr: std.posix.sockaddr = undefined; @memset(std.mem.asBytes(&sockaddr), 0); @memcpy(max_ip_address_buffer[0..input.len], input); max_ip_address_buffer[input.len] = 0; const ip_addr_str: [:0]const u8 = max_ip_address_buffer[0..input.len :0]; - return bun.c_ares.ares_inet_pton(std.os.AF.INET, ip_addr_str.ptr, &sockaddr) > 0 or bun.c_ares.ares_inet_pton(std.os.AF.INET6, ip_addr_str.ptr, &sockaddr) > 0; + return bun.c_ares.ares_inet_pton(std.posix.AF.INET, ip_addr_str.ptr, &sockaddr) > 0 or bun.c_ares.ares_inet_pton(std.posix.AF.INET6, ip_addr_str.ptr, &sockaddr) > 0; } pub fn isIPV6Address(input: []const u8) bool { var max_ip_address_buffer: [512]u8 = undefined; if (input.len > max_ip_address_buffer.len) return false; - var sockaddr: std.os.sockaddr = undefined; + var sockaddr: std.posix.sockaddr = undefined; @memset(std.mem.asBytes(&sockaddr), 0); @memcpy(max_ip_address_buffer[0..input.len], input); max_ip_address_buffer[input.len] = 0; const ip_addr_str: [:0]const u8 = max_ip_address_buffer[0..input.len :0]; - return bun.c_ares.ares_inet_pton(std.os.AF.INET6, ip_addr_str.ptr, &sockaddr) > 0; + return bun.c_ares.ares_inet_pton(std.posix.AF.INET6, ip_addr_str.ptr, &sockaddr) > 0; } pub fn cloneNormalizingSeparators( diff --git a/src/string_mutable.zig b/src/string_mutable.zig index bed5d7461a..02ac07cbb3 100644 --- a/src/string_mutable.zig +++ b/src/string_mutable.zig @@ -286,8 +286,8 @@ pub const MutableString = struct { return std.mem.eql(u8, self.list.items, other); } - pub fn toSocketBuffers(self: *MutableString, comptime count: usize, ranges: anytype) [count]std.os.iovec_const { - var buffers: [count]std.os.iovec_const = undefined; + pub fn toSocketBuffers(self: *MutableString, comptime count: usize, ranges: anytype) [count]std.posix.iovec_const { + var buffers: [count]std.posix.iovec_const = undefined; inline for (&buffers, ranges) |*b, r| { b.* = .{ .iov_base = self.list.items[r[0]..r[1]].ptr, diff --git a/src/symbols.def b/src/symbols.def new file mode 100644 index 0000000000..1aa19a0cbf --- /dev/null +++ b/src/symbols.def @@ -0,0 +1,569 @@ +; To update this list, use: +; +; dumpbin /symbols .\src\deps\libuv.lib | Where-Object { $_.Contains('| uv_') } | foreach-object { (($_ -split "\|")[1] -split " ")[1] } | Where-Object { $_ -match 'uv_[a-z]' } | ForEach-Object { " ${_}" } > out.txt; dumpbin /symbols .\build\CMakeFiles\bun-zig.o | Where-Object { $_.Contains('| napi_') } | foreach-object { (($_ -split "\|")[1] -split " ")[1] } | ForEach-Object { " ${_}" } >> out.txt +; -> out.txt +; +; the above will include extra symbols that do not exist, remember +; to double check a local build +EXPORTS + uv_fs_poll_init + uv_fs_poll_start + uv_fs_poll_stop + uv_fs_poll_getpath + uv_is_active + uv_now + uv_timer_init + uv_fs_stat + uv_fs_req_cleanup + uv_close + uv_timer_start + uv_inet_ntop + uv_inet_pton + uv_random + uv_barrier_init + uv_barrier_wait + uv_barrier_destroy + uv_mutex_init + uv_cond_init + uv_mutex_destroy + uv_mutex_lock + uv_cond_wait + uv_cond_broadcast + uv_mutex_unlock + uv_cond_destroy + uv_queue_work + uv_cancel + uv_mutex_lock + uv_cond_signal + uv_mutex_unlock + uv_thread_join + uv_mutex_destroy + uv_cond_destroy + uv_once + uv_cond_init + uv_mutex_init + uv_sem_init + uv_thread_create_ex + uv_sem_wait + uv_sem_destroy + uv_async_send + uv_sem_post + uv_cond_wait + uv_timer_init + uv_timer_start + uv_timer_stop + uv_timer_again + uv_timer_set_repeat + uv_timer_get_repeat + uv_timer_get_due_in + uv_replace_allocator + uv_os_free_passwd + uv_os_free_group + uv_handle_size + uv_req_size + uv_loop_size + uv_buf_init + uv_err_name_r + uv_err_name + uv_strerror_r + uv_strerror + uv_ip4_addr + uv_ip6_addr + uv_ip4_name + uv_ip6_name + uv_ip_name + uv_tcp_bind + uv_udp_init_ex + uv_udp_init + uv_udp_bind + uv_tcp_connect + uv_udp_connect + uv_udp_send + uv_udp_try_send + uv_udp_recv_start + uv_udp_recv_stop + uv_walk + uv_print_all_handles + uv_print_active_handles + uv_ref + uv_unref + uv_has_ref + uv_stop + uv_now + uv_recv_buffer_size + uv_send_buffer_size + uv_fs_event_getpath + uv_fs_scandir_next + uv_loop_configure + uv_default_loop + uv_loop_new + uv_loop_close + uv_loop_delete + uv_read_start + uv_os_free_environ + uv_free_cpu_info + uv_library_shutdown + uv_metrics_info + uv_metrics_idle_time + uv_inet_pton + uv_inet_ntop + uv_udp_getpeername + uv_loop_init + uv_hrtime + uv_mutex_lock + uv_mutex_unlock + uv_handle_type_name + uv_handle_get_type + uv_handle_get_data + uv_handle_get_loop + uv_handle_set_data + uv_req_type_name + uv_req_get_type + uv_req_get_data + uv_req_set_data + uv_stream_get_write_queue_size + uv_udp_get_send_queue_size + uv_udp_get_send_queue_count + uv_process_get_pid + uv_fs_get_type + uv_fs_get_result + uv_fs_get_ptr + uv_fs_get_path + uv_fs_get_statbuf + uv_loop_get_data + uv_loop_set_data + uv_version + uv_version_string + uv_async_init + uv_async_send + uv_fatal_error + uv_loop_init + uv_update_time + uv_backend_fd + uv_loop_fork + uv_loop_alive + uv_backend_timeout + uv_run + uv_fileno + uv_cpumask_size + uv_mutex_lock + uv_mutex_unlock + uv_once + uv_mutex_init + uv_async_init + uv_mutex_destroy + uv_translate_sys_error + uv_fatal_error + uv_is_closing + uv_dlopen + uv_dlclose + uv_dlsym + uv_dlerror + uv_fatal_error + uv_translate_sys_error + uv_fs_req_cleanup + uv_fs_open + uv_fs_close + uv_fs_read + uv_fs_write + uv_fs_unlink + uv_fs_mkdir + uv_fs_mkdtemp + uv_fs_mkstemp + uv_fs_rmdir + uv_fs_scandir + uv_fs_opendir + uv_fs_readdir + uv_fs_closedir + uv_fs_link + uv_fs_symlink + uv_fs_readlink + uv_fs_realpath + uv_fs_chown + uv_fs_fchown + uv_fs_lchown + uv_fs_stat + uv_fs_lstat + uv_fs_fstat + uv_fs_rename + uv_fs_fsync + uv_fs_fdatasync + uv_fs_ftruncate + uv_fs_copyfile + uv_fs_sendfile + uv_fs_access + uv_fs_chmod + uv_fs_fchmod + uv_fs_utime + uv_fs_futime + uv_fs_lutime + uv_fs_statfs + uv_fs_get_system_error + uv_mutex_init + uv_fatal_error + uv_translate_sys_error + uv_mutex_lock + uv_mutex_unlock + uv_guess_handle + uv_fs_event_init + uv_fs_event_start + uv_fs_event_stop + uv_translate_sys_error + uv_fatal_error + uv_directory_watcher_buffer_size + uv_freeaddrinfo + uv_getaddrinfo + uv_if_indextoname + uv_if_indextoiid + uv_translate_sys_error + uv_getnameinfo + uv_translate_sys_error + uv_guess_handle + uv_is_active + uv_close + uv_is_closing + uv_get_osfhandle + uv_open_osfhandle + uv_check_stop + uv_idle_stop + uv_prepare_stop + uv_timer_stop + uv_prepare_init + uv_prepare_start + uv_prepare_stop + uv_check_init + uv_check_start + uv_check_stop + uv_idle_init + uv_idle_start + uv_idle_stop + uv_pipe_init + uv_pipe + uv_pipe_pending_instances + uv_pipe_bind + uv_pipe_bind2 + uv_pipe_connect + uv_pipe_connect2 + uv_pipe_open + uv_pipe_pending_count + uv_pipe_getsockname + uv_pipe_getpeername + uv_pipe_pending_type + uv_pipe_chmod + uv_fatal_error + uv_translate_sys_error + uv_close + uv_timer_start + uv_buf_init + uv_timer_stop + uv_read_stop + uv_timer_init + uv_unref + uv_once + uv_thread_create + uv_thread_create_ex + uv_thread_setaffinity + uv_thread_getaffinity + uv_thread_getcpu + uv_thread_self + uv_key_get + uv_key_set + uv_thread_join + uv_thread_equal + uv_mutex_init + uv_mutex_init_recursive + uv_mutex_destroy + uv_mutex_lock + uv_mutex_trylock + uv_mutex_unlock + uv_rwlock_init + uv_rwlock_destroy + uv_rwlock_rdlock + uv_rwlock_tryrdlock + uv_rwlock_rdunlock + uv_rwlock_wrlock + uv_rwlock_trywrlock + uv_rwlock_wrunlock + uv_sem_init + uv_sem_destroy + uv_sem_post + uv_sem_wait + uv_sem_trywait + uv_cond_init + uv_cond_destroy + uv_cond_signal + uv_cond_broadcast + uv_cond_wait + uv_cond_timedwait + uv_key_create + uv_key_delete + uv_fatal_error + uv_cpumask_size + uv_translate_sys_error + uv_poll_init + uv_poll_init_socket + uv_poll_start + uv_poll_stop + uv_translate_sys_error + uv_once + uv_fatal_error + uv_spawn + uv_process_kill + uv_kill + uv_fatal_error + uv_translate_sys_error + uv_once + uv_disable_stdio_inheritance + uv_signal_init + uv_signal_stop + uv_signal_start + uv_signal_start_oneshot + uv_fatal_error + uv_listen + uv_accept + uv_read_stop + uv_write + uv_write2 + uv_try_write + uv_try_write2 + uv_shutdown + uv_is_readable + uv_is_writable + uv_stream_set_blocking + uv_translate_sys_error + uv_tcp_init_ex + uv_tcp_init + uv_tcp_close_reset + uv_tcp_getsockname + uv_tcp_getpeername + uv_tcp_nodelay + uv_tcp_keepalive + uv_tcp_simultaneous_accepts + uv_tcp_open + uv_socketpair + uv_translate_sys_error + uv_tcp_non_ifs_lsp_ipv6 + uv_tcp_non_ifs_lsp_ipv4 + uv_close + uv_addr_ip4_any_ + uv_fatal_error + uv_buf_init + uv_read_stop + uv_addr_ip6_any_ + uv_simultaneous_server_accepts + uv_tty_init + uv_tty_set_mode + uv_tty_get_winsize + uv_process_tty_read_raw_req + uv_process_tty_read_line_req + uv_tty_reset_mode + uv_tty_set_vterm_state + uv_tty_get_vterm_state + uv_sem_init + uv_mutex_init + uv_sem_wait + uv_sem_post + uv_translate_sys_error + uv_buf_init + uv_mutex_lock + uv_mutex_unlock + uv_fatal_error + uv_udp_getpeername + uv_udp_getsockname + uv_udp_using_recvmmsg + uv_udp_set_membership + uv_udp_set_source_membership + uv_udp_set_multicast_interface + uv_udp_set_broadcast + uv_udp_open + uv_udp_set_ttl + uv_udp_set_multicast_ttl + uv_udp_set_multicast_loop + uv_translate_sys_error + uv_udp_recv_stop + uv_addr_ip4_any_ + uv_buf_init + uv_addr_ip6_any_ + uv_ip6_addr + uv_ip4_addr + uv_inet_pton + uv_exepath + uv_cwd + uv_chdir + uv_loadavg + uv_get_free_memory + uv_get_total_memory + uv_get_constrained_memory + uv_get_available_memory + uv_os_getpid + uv_os_getppid + uv_setup_args + uv_set_process_title + uv_get_process_title + uv_clock_gettime + uv_hrtime + uv_resident_set_memory + uv_uptime + uv_available_parallelism + uv_cpu_info + uv_interface_addresses + uv_free_interface_addresses + uv_getrusage + uv_os_homedir + uv_os_getenv + uv_os_get_passwd + uv_os_tmpdir + uv_os_get_passwd2 + uv_os_get_group + uv_os_environ + uv_os_setenv + uv_os_unsetenv + uv_os_gethostname + uv_os_getpriority + uv_os_setpriority + uv_os_uname + uv_gettimeofday + uv_sleep + uv_fatal_error + uv_translate_sys_error + uv_os_free_passwd + uv_fatal_error + uv_addr_ip4_any_ + uv_ip4_addr + uv_addr_ip6_any_ + uv_ip6_addr + uv_tcp_non_ifs_lsp_ipv4 + uv_tcp_non_ifs_lsp_ipv6 + uv_fatal_error + napi_get_undefined + napi_get_null + napi_get_boolean + napi_create_array + napi_create_array_with_length + napi_create_int32 + napi_create_uint32 + napi_create_int64 + napi_create_string_latin1 + napi_create_string_utf8 + napi_create_string_utf16 + napi_get_value_int32 + napi_get_value_uint32 + napi_get_value_int64 + napi_get_value_bool + napi_get_value_string_latin1 + napi_get_value_string_utf16 + napi_coerce_to_bool + napi_coerce_to_number + napi_coerce_to_object + napi_get_prototype + napi_set_element + napi_has_element + napi_is_array + napi_get_array_length + napi_strict_equals + napi_new_instance + napi_instanceof + napi_open_handle_scope + napi_close_handle_scope + napi_async_init + napi_async_destroy + napi_make_callback + napi_open_escapable_handle_scope + napi_close_escapable_handle_scope + napi_escape_handle + napi_type_tag_object + napi_check_object_type_tag + napi_open_callback_scope + napi_close_callback_scope + napi_is_error + napi_is_arraybuffer + napi_get_arraybuffer_info + napi_is_typedarray + napi_create_typedarray + napi_get_typedarray_info + napi_is_dataview + napi_get_dataview_info + napi_get_version + napi_create_promise + napi_resolve_deferred + napi_reject_deferred + napi_is_promise + napi_create_date + napi_is_date + napi_create_bigint_int64 + napi_create_bigint_uint64 + napi_get_value_bigint_int64 + napi_get_value_bigint_uint64 + napi_fatal_error + napi_create_buffer + napi_create_buffer_copy + napi_is_buffer + napi_get_buffer_info + napi_create_async_work + napi_delete_async_work + napi_queue_async_work + napi_cancel_async_work + napi_get_node_version + napi_get_uv_event_loop + napi_add_env_cleanup_hook + napi_remove_env_cleanup_hook + napi_create_threadsafe_function + napi_get_threadsafe_function_context + napi_call_threadsafe_function + napi_acquire_threadsafe_function + napi_release_threadsafe_function + napi_unref_threadsafe_function + napi_ref_threadsafe_function + napi_add_async_cleanup_hook + napi_remove_async_cleanup_hook + napi_get_last_error_info + napi_get_global + napi_create_double + napi_create_symbol + napi_create_error + napi_create_type_error + napi_create_range_error + napi_typeof + napi_get_value_double + napi_get_value_string_utf8 + napi_get_element + napi_delete_element + napi_define_properties + napi_call_function + napi_get_cb_info + napi_get_new_target + napi_define_class + napi_wrap + napi_unwrap + napi_remove_wrap + napi_create_object + napi_create_external + napi_get_value_external + napi_create_reference + napi_delete_reference + napi_reference_ref + napi_reference_unref + napi_get_reference_value + napi_get_reference_value_internal + napi_throw + napi_throw_error + napi_throw_type_error + napi_throw_range_error + napi_is_exception_pending + napi_get_and_clear_last_exception + napi_create_arraybuffer + napi_create_external_arraybuffer + napi_create_dataview + napi_run_script + napi_adjust_external_memory + napi_get_date_value + napi_add_finalizer + napi_create_bigint_words + napi_get_value_bigint_words + napi_get_all_property_names + napi_set_instance_data + napi_get_instance_data + napi_detach_arraybuffer + napi_is_detached_arraybuffer + napi_create_external_buffer + napi_fatal_exception diff --git a/src/sync.zig b/src/sync.zig index b8330cfbe9..b71040f6fa 100644 --- a/src/sync.zig +++ b/src/sync.zig @@ -1,5 +1,5 @@ const std = @import("std"); -const system = if (bun.Environment.isWindows) std.os.windows else std.os.system; +const system = if (bun.Environment.isWindows) std.os.windows else std.posix.system; const bun = @import("root").bun; // https://gist.github.com/kprotty/0d2dc3da4840341d6ff361b27bdac7dc @@ -33,7 +33,7 @@ pub const ThreadPool = struct { for (&self.workers) |*worker| { try worker.init(self); - @atomicStore(usize, &self.spawned, self.spawned + 1, .SeqCst); + @atomicStore(usize, &self.spawned, self.spawned + 1, .seq_cst); } } @@ -60,8 +60,8 @@ pub const ThreadPool = struct { run_node: RunNode = .{ .data = .{ .runFn = runFn } }, fn runFn(runnable: *Runnable) void { - const run_node = @fieldParentPtr(RunNode, "data", runnable); - const closure = @fieldParentPtr(@This(), "run_node", run_node); + const run_node: *RunNode = @fieldParentPtr("data", runnable); + const closure: *@This() = @fieldParentPtr("run_node", run_node); _ = @call(.auto, func, closure.func_args); closure.allocator.destroy(closure); } @@ -106,7 +106,7 @@ pub const ThreadPool = struct { }; fn wait(self: *ThreadPool) error{Shutdown}!void { - var state = State.unpack(@atomicLoad(usize, &self.state, .SeqCst)); + var state = State.unpack(@atomicLoad(usize, &self.state, .seq_cst)); while (true) { if (state.is_shutdown) return error.Shutdown; @@ -123,8 +123,8 @@ pub const ThreadPool = struct { &self.state, state.pack(), new_state.pack(), - .SeqCst, - .SeqCst, + .seq_cst, + .seq_cst, )) |updated| { state = State.unpack(updated); continue; @@ -137,7 +137,7 @@ pub const ThreadPool = struct { } fn notify(self: *ThreadPool) void { - var state = State.unpack(@atomicLoad(usize, &self.state, .SeqCst)); + var state = State.unpack(@atomicLoad(usize, &self.state, .seq_cst)); while (true) { if (state.is_shutdown) return; @@ -156,8 +156,8 @@ pub const ThreadPool = struct { &self.state, state.pack(), new_state.pack(), - .SeqCst, - .SeqCst, + .seq_cst, + .seq_cst, )) |updated| { state = State.unpack(updated); continue; @@ -175,7 +175,7 @@ pub const ThreadPool = struct { &self.state, .Xchg, (State{ .is_shutdown = true }).pack(), - .SeqCst, + .seq_cst, )); while (state.idle_workers > 0) : (state.idle_workers -= 1) @@ -258,7 +258,7 @@ pub const ThreadPool = struct { if (self.steal(pool, rand, .unfair)) |run_node| { return run_node; } else { - std.os.sched_yield() catch spinLoopHint(); + std.posix.sched_yield() catch spinLoopHint(); } } @@ -269,7 +269,7 @@ pub const ThreadPool = struct { } fn steal(self: *Worker, pool: *ThreadPool, rand: *std.rand.Random, mode: anytype) ?*RunNode { - const spawned = @atomicLoad(usize, &pool.spawned, .SeqCst); + const spawned = @atomicLoad(usize, &pool.spawned, .seq_cst); if (spawned < 2) return null; @@ -316,7 +316,7 @@ pub const ThreadPool = struct { defer self.mutex.unlock(); self.list.prepend(node); - @atomicStore(usize, &self.size, self.size + 1, .SeqCst); + @atomicStore(usize, &self.size, self.size + 1, .seq_cst); } fn pop(self: *Queue) ?*List.Node { @@ -331,7 +331,7 @@ pub const ThreadPool = struct { } fn popFrom(self: *Queue, side: enum { head, tail }) ?*RunNode { - if (@atomicLoad(usize, &self.size, .SeqCst) == 0) + if (@atomicLoad(usize, &self.size, .seq_cst) == 0) return null; self.mutex.lock(); @@ -344,7 +344,7 @@ pub const ThreadPool = struct { }; if (run_node != null) - @atomicStore(usize, &self.size, self.size - 1, .SeqCst); + @atomicStore(usize, &self.size, self.size - 1, .seq_cst); return run_node; } @@ -534,7 +534,7 @@ pub const RwLock = if (@import("builtin").os.tag != .windows and @import("builti pub fn deinit(self: *RwLock) void { const safe_rc = switch (@import("builtin").os.tag) { - .dragonfly, .netbsd => std.os.EAGAIN, + .dragonfly, .netbsd => std.posix.EAGAIN, else => 0, }; @@ -643,12 +643,12 @@ pub const RwLock = if (@import("builtin").os.tag != .windows and @import("builti else => @compileError("pthread_rwlock_t not implemented for this platform"), }; - extern "c" fn pthread_rwlock_destroy(p: *pthread_rwlock_t) callconv(.C) std.os.E; - extern "c" fn pthread_rwlock_rdlock(p: *pthread_rwlock_t) callconv(.C) std.os.E; - extern "c" fn pthread_rwlock_wrlock(p: *pthread_rwlock_t) callconv(.C) std.os.E; - extern "c" fn pthread_rwlock_tryrdlock(p: *pthread_rwlock_t) callconv(.C) std.os.E; - extern "c" fn pthread_rwlock_trywrlock(p: *pthread_rwlock_t) callconv(.C) std.os.E; - extern "c" fn pthread_rwlock_unlock(p: *pthread_rwlock_t) callconv(.C) std.os.E; + extern "c" fn pthread_rwlock_destroy(p: *pthread_rwlock_t) callconv(.C) std.posix.E; + extern "c" fn pthread_rwlock_rdlock(p: *pthread_rwlock_t) callconv(.C) std.posix.E; + extern "c" fn pthread_rwlock_wrlock(p: *pthread_rwlock_t) callconv(.C) std.posix.E; + extern "c" fn pthread_rwlock_tryrdlock(p: *pthread_rwlock_t) callconv(.C) std.posix.E; + extern "c" fn pthread_rwlock_trywrlock(p: *pthread_rwlock_t) callconv(.C) std.posix.E; + extern "c" fn pthread_rwlock_unlock(p: *pthread_rwlock_t) callconv(.C) std.posix.E; } else struct { @@ -680,9 +680,9 @@ else pub fn tryLock(self: *RwLock) bool { if (self.mutex.tryLock()) { - const state = @atomicLoad(usize, &self.state, .SeqCst); + const state = @atomicLoad(usize, &self.state, .seq_cst); if (state & READER_MASK == 0) { - _ = @atomicRmw(usize, &self.state, .Or, IS_WRITING, .SeqCst); + _ = @atomicRmw(usize, &self.state, .Or, IS_WRITING, .seq_cst); return true; } @@ -693,34 +693,34 @@ else } pub fn lock(self: *RwLock) void { - _ = @atomicRmw(usize, &self.state, .Add, WRITER, .SeqCst); + _ = @atomicRmw(usize, &self.state, .Add, WRITER, .seq_cst); self.mutex.lock(); - const state = @atomicRmw(usize, &self.state, .Or, IS_WRITING, .SeqCst); + const state = @atomicRmw(usize, &self.state, .Or, IS_WRITING, .seq_cst); if (state & READER_MASK != 0) self.semaphore.wait(); } pub fn unlock(self: *RwLock) void { - _ = @atomicRmw(usize, &self.state, .And, ~IS_WRITING, .SeqCst); + _ = @atomicRmw(usize, &self.state, .And, ~IS_WRITING, .seq_cst); self.mutex.unlock(); } pub fn tryLockShared(self: *RwLock) bool { - const state = @atomicLoad(usize, &self.state, .SeqCst); + const state = @atomicLoad(usize, &self.state, .seq_cst); if (state & (IS_WRITING | WRITER_MASK) == 0) { _ = @cmpxchgStrong( usize, &self.state, state, state + READER, - .SeqCst, - .SeqCst, + .seq_cst, + .seq_cst, ) orelse return true; } if (self.mutex.tryLock()) { - _ = @atomicRmw(usize, &self.state, .Add, READER, .SeqCst); + _ = @atomicRmw(usize, &self.state, .Add, READER, .seq_cst); self.mutex.unlock(); return true; } @@ -729,25 +729,25 @@ else } pub fn lockShared(self: *RwLock) void { - var state = @atomicLoad(usize, &self.state, .SeqCst); + var state = @atomicLoad(usize, &self.state, .seq_cst); while (state & (IS_WRITING | WRITER_MASK) == 0) { state = @cmpxchgWeak( usize, &self.state, state, state + READER, - .SeqCst, - .SeqCst, + .seq_cst, + .seq_cst, ) orelse return; } self.mutex.lock(); - _ = @atomicRmw(usize, &self.state, .Add, READER, .SeqCst); + _ = @atomicRmw(usize, &self.state, .Add, READER, .seq_cst); self.mutex.unlock(); } pub fn unlockShared(self: *RwLock) void { - const state = @atomicRmw(usize, &self.state, .Sub, READER, .SeqCst); + const state = @atomicRmw(usize, &self.state, .Sub, READER, .seq_cst); if ((state & READER_MASK == READER) and (state & IS_WRITING != 0)) self.semaphore.post(); @@ -883,7 +883,7 @@ else if (@import("builtin").link_libc) pub fn deinit(self: *Mutex) void { const safe_rc = switch (@import("builtin").os.tag) { - .dragonfly, .netbsd => std.os.EAGAIN, + .dragonfly, .netbsd => std.posix.EAGAIN, else => 0, }; @@ -933,13 +933,13 @@ else if (@import("builtin").os.tag == .linux) &self.state, .unlocked, .locked, - .Acquire, - .Monotonic, + .acquire, + .monotonic, ) == null; } pub fn lock(self: *Mutex) void { - switch (@atomicRmw(State, &self.state, .Xchg, .locked, .Acquire)) { + switch (@atomicRmw(State, &self.state, .Xchg, .locked, .acquire)) { .unlocked => {}, else => |s| self.lockSlow(s), } @@ -956,8 +956,8 @@ else if (@import("builtin").os.tag == .linux) &self.state, .unlocked, new_state, - .Acquire, - .Monotonic, + .acquire, + .monotonic, ) orelse return; switch (state) { @@ -971,7 +971,7 @@ else if (@import("builtin").os.tag == .linux) } new_state = .waiting; - switch (@atomicRmw(State, &self.state, .Xchg, new_state, .Acquire)) { + switch (@atomicRmw(State, &self.state, .Xchg, new_state, .acquire)) { .unlocked => return, else => {}, } @@ -984,7 +984,7 @@ else if (@import("builtin").os.tag == .linux) } pub fn unlock(self: *Mutex) void { - switch (@atomicRmw(State, &self.state, .Xchg, .unlocked, .Release)) { + switch (@atomicRmw(State, &self.state, .Xchg, .unlocked, .release)) { .unlocked => unreachable, .locked => {}, .waiting => self.unlockSlow(), @@ -1010,7 +1010,7 @@ else } pub fn tryLock(self: *Mutex) bool { - return @atomicRmw(bool, &self.is_locked, .Xchg, true, .Acquire) == false; + return @atomicRmw(bool, &self.is_locked, .Xchg, true, .acquire) == false; } pub fn lock(self: *Mutex) void { @@ -1019,7 +1019,7 @@ else } pub fn unlock(self: *Mutex) void { - @atomicStore(bool, &self.is_locked, false, .Release); + @atomicStore(bool, &self.is_locked, false, .release); } }; @@ -1077,7 +1077,7 @@ else if (@import("builtin").link_libc) pub fn deinit(self: *Condvar) void { const safe_rc = switch (@import("builtin").os.tag) { - .dragonfly, .netbsd => std.os.EAGAIN, + .dragonfly, .netbsd => std.posix.EAGAIN, else => 0, }; @@ -1168,7 +1168,7 @@ else futex: i32 = 0, fn wait(self: *Event) void { - while (@atomicLoad(i32, &self.futex, .Acquire) == 0) { + while (@atomicLoad(i32, &self.futex, .acquire) == 0) { if (@hasDecl(Futex, "wait")) { Futex.wait(&self.futex, 0); } else { @@ -1178,7 +1178,7 @@ else } fn set(self: *Event) void { - @atomicStore(i32, &self.futex, 1, .Release); + @atomicStore(i32, &self.futex, 1, .release); if (@hasDecl(Futex, "wake")) Futex.wake(&self.futex); @@ -1196,8 +1196,8 @@ const Futex = switch (@import("builtin").os.tag) { null, ))) { 0 => {}, - std.os.EINTR => {}, - std.os.EAGAIN => {}, + std.posix.EINTR => {}, + std.posix.EAGAIN => {}, else => unreachable, } } @@ -1209,7 +1209,7 @@ const Futex = switch (@import("builtin").os.tag) { @as(i32, 1), ))) { 0 => {}, - std.os.EFAULT => {}, + std.posix.EFAULT => {}, else => unreachable, } } diff --git a/src/sys.zig b/src/sys.zig index 2f0bd5eb6f..5d70b8c132 100644 --- a/src/sys.zig +++ b/src/sys.zig @@ -1,19 +1,18 @@ -// This file is entirely based on Zig's std.os +// This file is entirely based on Zig's std.posix // The differences are in error handling const std = @import("std"); const builtin = @import("builtin"); const bun = @import("root").bun; -const os = std.os; +const posix = std.posix; const assertIsValidWindowsPath = bun.strings.assertIsValidWindowsPath; const default_allocator = bun.default_allocator; const kernel32 = bun.windows; -const linux = os.linux; const mem = std.mem; -const mode_t = os.mode_t; +const mode_t = posix.mode_t; const open_sym = system.open; -const sys = std.os.system; +const sys = std.posix.system; const windows = bun.windows; const C = bun.C; @@ -24,43 +23,143 @@ const PathString = bun.PathString; const Syscall = @This(); const SystemError = JSC.SystemError; +const linux = system; + pub const sys_uv = if (Environment.isWindows) @import("./sys_uv.zig") else Syscall; const log = bun.Output.scoped(.SYS, false); pub const syslog = log; -// On Linux AARCh64, zig is missing stat & lstat syscalls -const use_libc = !(Environment.isLinux and Environment.isX64); pub const system = switch (Environment.os) { - .linux => linux, + .linux => std.os.linux, .mac => bun.AsyncIO.system, else => @compileError("not implemented"), }; -pub const S = struct { - pub usingnamespace if (Environment.isLinux) linux.S else if (Environment.isPosix) std.os.S else struct {}; +fn toPackedO(number: anytype) std.posix.O { + return @bitCast(number); +} + +pub const O = switch (Environment.os) { + .mac => struct { + pub const PATH = 0x0000; + pub const RDONLY = 0x0000; + pub const WRONLY = 0x0001; + pub const RDWR = 0x0002; + pub const NONBLOCK = 0x0004; + pub const APPEND = 0x0008; + pub const CREAT = 0x0200; + pub const TRUNC = 0x0400; + pub const EXCL = 0x0800; + pub const SHLOCK = 0x0010; + pub const EXLOCK = 0x0020; + pub const NOFOLLOW = 0x0100; + pub const SYMLINK = 0x200000; + pub const EVTONLY = 0x8000; + pub const CLOEXEC = 0x1000000; + pub const ACCMODE = 3; + pub const ALERT = 536870912; + pub const ASYNC = 64; + pub const DIRECTORY = 1048576; + pub const DP_GETRAWENCRYPTED = 1; + pub const DP_GETRAWUNENCRYPTED = 2; + pub const DSYNC = 4194304; + pub const FSYNC = SYNC; + pub const NOCTTY = 131072; + pub const POPUP = 2147483648; + pub const SYNC = 128; + + pub const toPacked = toPackedO; + }, + .linux, .wasm => switch (Environment.isX86) { + true => struct { + pub const RDONLY = 0x0000; + pub const WRONLY = 0x0001; + pub const RDWR = 0x0002; + + pub const CREAT = 0o100; + pub const EXCL = 0o200; + pub const NOCTTY = 0o400; + pub const TRUNC = 0o1000; + pub const APPEND = 0o2000; + pub const NONBLOCK = 0o4000; + pub const DSYNC = 0o10000; + pub const SYNC = 0o4010000; + pub const RSYNC = 0o4010000; + pub const DIRECTORY = 0o200000; + pub const NOFOLLOW = 0o400000; + pub const CLOEXEC = 0o2000000; + + pub const ASYNC = 0o20000; + pub const DIRECT = 0o40000; + pub const LARGEFILE = 0; + pub const NOATIME = 0o1000000; + pub const PATH = 0o10000000; + pub const TMPFILE = 0o20200000; + pub const NDELAY = NONBLOCK; + + pub const toPacked = toPackedO; + }, + false => struct { + pub const RDONLY = 0x0000; + pub const WRONLY = 0x0001; + pub const RDWR = 0x0002; + + pub const CREAT = 0o100; + pub const EXCL = 0o200; + pub const NOCTTY = 0o400; + pub const TRUNC = 0o1000; + pub const APPEND = 0o2000; + pub const NONBLOCK = 0o4000; + pub const DSYNC = 0o10000; + pub const SYNC = 0o4010000; + pub const RSYNC = 0o4010000; + pub const DIRECTORY = 0o40000; + pub const NOFOLLOW = 0o100000; + pub const CLOEXEC = 0o2000000; + + pub const ASYNC = 0o20000; + pub const DIRECT = 0o200000; + pub const LARGEFILE = 0o400000; + pub const NOATIME = 0o1000000; + pub const PATH = 0o10000000; + pub const TMPFILE = 0o20040000; + pub const NDELAY = NONBLOCK; + + pub const toPacked = toPackedO; + }, + }, + .windows => struct { + pub const RDONLY = 0o0; + pub const WRONLY = 0o1; + pub const RDWR = 0o2; + + pub const CREAT = 0o100; + pub const EXCL = 0o200; + pub const NOCTTY = 0o400; + pub const TRUNC = 0o1000; + pub const APPEND = 0o2000; + pub const NONBLOCK = 0o4000; + pub const DSYNC = 0o10000; + pub const SYNC = 0o4010000; + pub const RSYNC = 0o4010000; + pub const DIRECTORY = 0o200000; + pub const NOFOLLOW = 0o400000; + pub const CLOEXEC = 0o2000000; + + pub const ASYNC = 0o20000; + pub const DIRECT = 0o40000; + pub const LARGEFILE = 0; + pub const NOATIME = 0o1000000; + pub const PATH = 0o10000000; + pub const TMPFILE = 0o20200000; + pub const NDELAY = NONBLOCK; + + pub const toPacked = toPackedO; + }, }; -const statSym = if (use_libc) - C.stat -else if (Environment.isLinux) - linux.stat -else - @compileError("STAT"); - -const fstatSym = if (use_libc) - C.fstat -else if (Environment.isLinux) - linux.fstat -else - @compileError("STAT"); - -const lstat64 = if (use_libc) - C.lstat -else if (Environment.isLinux) - linux.lstat -else - @compileError("STAT"); +pub const S = if (Environment.isLinux) linux.S else if (Environment.isPosix) std.posix.S else struct {}; pub const Tag = enum(u8) { TODO, @@ -366,7 +465,7 @@ pub fn getcwdZ(buf: *bun.PathBuffer) Maybe([:0]const u8) { return if (rc != null) Result{ .result = rc.?[0..std.mem.len(rc.?) :0] } else - Result.errnoSys(0, .getcwd).?; + Result.errnoSys(@as(c_int, 0), .getcwd).?; } pub fn fchmod(fd: bun.FileDescriptor, mode: bun.Mode) Maybe(void) { @@ -413,7 +512,7 @@ pub fn chdir(destination: anytype) Maybe(void) { if (comptime Environment.isPosix) { if (comptime Type == []u8 or Type == []const u8) { return chdirOSPath( - &(std.os.toPosixPath(destination) catch return .{ .err = .{ + &(std.posix.toPosixPath(destination) catch return .{ .err = .{ .errno = @intFromEnum(bun.C.SystemErrno.EINVAL), .syscall = .chdir, } }), @@ -466,7 +565,7 @@ pub fn stat(path: [:0]const u8) Maybe(bun.Stat) { return sys_uv.stat(path); } else { var stat_ = mem.zeroes(bun.Stat); - const rc = statSym(path, &stat_); + const rc = C.stat(path, &stat_); if (comptime Environment.allow_assert) log("stat({s}) = {d}", .{ bun.asByteSlice(path), rc }); @@ -481,7 +580,7 @@ pub fn lstat(path: [:0]const u8) Maybe(bun.Stat) { return sys_uv.lstat(path); } else { var stat_ = mem.zeroes(bun.Stat); - if (Maybe(bun.Stat).errnoSys(lstat64(path, &stat_), .lstat)) |err| return err; + if (Maybe(bun.Stat).errnoSys(C.lstat64(path, &stat_), .lstat)) |err| return err; return Maybe(bun.Stat){ .result = stat_ }; } } @@ -498,7 +597,7 @@ pub fn fstat(fd: bun.FileDescriptor) Maybe(bun.Stat) { var stat_ = mem.zeroes(bun.Stat); - const rc = fstatSym(fd.cast(), &stat_); + const rc = C.fstat(fd.cast(), &stat_); if (comptime Environment.allow_assert) log("fstat({}) = {d}", .{ fd, rc }); @@ -523,7 +622,7 @@ pub fn mkdiratZ(dir_fd: bun.FileDescriptor, file_path: [*:0]const u8, mode: mode fn mkdiratPosix(dir_fd: bun.FileDescriptor, file_path: []const u8, mode: mode_t) Maybe(void) { return mkdiratZ( dir_fd, - &(std.os.toPosixPath(file_path) catch return .{ .err = Error.fromCode(.NAMETOOLONG, .mkdir) }), + &(std.posix.toPosixPath(file_path) catch return .{ .err = Error.fromCode(.NAMETOOLONG, .mkdir) }), mode, ); } @@ -558,7 +657,7 @@ pub fn mkdir(file_path: [:0]const u8, flags: bun.Mode) Maybe(void) { return switch (Environment.os) { .mac => Maybe(void).errnoSysP(system.mkdir(file_path, flags), .mkdir, file_path) orelse Maybe(void).success, - .linux => Maybe(void).errnoSysP(linux.mkdir(file_path, flags), .mkdir, file_path) orelse Maybe(void).success, + .linux => Maybe(void).errnoSysP(system.mkdir(file_path, flags), .mkdir, file_path) orelse Maybe(void).success, .windows => { var wbuf: bun.WPathBuffer = undefined; @@ -575,7 +674,7 @@ pub fn mkdir(file_path: [:0]const u8, flags: bun.Mode) Maybe(void) { pub fn mkdirA(file_path: []const u8, flags: bun.Mode) Maybe(void) { if (comptime Environment.isMac) { - return Maybe(void).errnoSysP(system.mkdir(&(std.os.toPosixPath(file_path) catch return Maybe(void){ + return Maybe(void).errnoSysP(system.mkdir(&(std.posix.toPosixPath(file_path) catch return Maybe(void){ .err = .{ .errno = @intFromEnum(bun.C.E.NOMEM), .syscall = .open, @@ -584,7 +683,7 @@ pub fn mkdirA(file_path: []const u8, flags: bun.Mode) Maybe(void) { } if (comptime Environment.isLinux) { - return Maybe(void).errnoSysP(linux.mkdir(&(std.os.toPosixPath(file_path) catch return Maybe(void){ + return Maybe(void).errnoSysP(linux.mkdir(&(std.posix.toPosixPath(file_path) catch return Maybe(void){ .err = .{ .errno = @intFromEnum(bun.C.E.NOMEM), .syscall = .open, @@ -645,17 +744,9 @@ pub fn getErrno(rc: anytype) bun.C.E { return bun.C.E.UNKNOWN; } - if (comptime Environment.isMac) return std.os.errno(rc); - const Type = @TypeOf(rc); - - return switch (Type) { - usize => std.os.linux.getErrno(@as(usize, rc)), - comptime_int, i32, c_int, isize => std.os.errno(rc), - else => @compileError("Not implemented yet for type " ++ @typeName(Type)), - }; + return bun.C.getErrno(rc); } -const O = std.os.O; const w = std.os.windows; pub fn normalizePathWindows( @@ -1118,11 +1209,11 @@ pub fn openatOSPath(dirfd: bun.FileDescriptor, file_path: bun.OSPathSliceZ, flag } while (true) { - const rc = Syscall.system.openat(dirfd.cast(), file_path, flags, perm); + const rc = Syscall.system.openat(dirfd.cast(), file_path, bun.O.toPacked(flags), perm); if (comptime Environment.allow_assert) log("openat({}, {s}) = {d}", .{ dirfd, bun.sliceTo(file_path, 0), rc }); return switch (Syscall.getErrno(rc)) { - .SUCCESS => .{ .result = bun.toFD(rc) }, + .SUCCESS => .{ .result = bun.toFD(@as(i32, @intCast(rc))) }, .INTR => continue, else => |err| { return .{ @@ -1149,7 +1240,7 @@ pub fn openatA(dirfd: bun.FileDescriptor, file_path: []const u8, flags: bun.Mode return openatWindowsT(u8, dirfd, file_path, flags); } - const pathZ = std.os.toPosixPath(file_path) catch return Maybe(bun.FileDescriptor){ + const pathZ = std.posix.toPosixPath(file_path) catch return Maybe(bun.FileDescriptor){ .err = .{ .errno = @intFromEnum(bun.C.E.NAMETOOLONG), .syscall = .open, @@ -1273,14 +1364,14 @@ pub fn write(fd: bun.FileDescriptor, bytes: []const u8) Maybe(usize) { fn veclen(buffers: anytype) usize { var len: usize = 0; for (buffers) |buffer| { - len += buffer.iov_len; + len += buffer.len; } return len; } -pub fn writev(fd: bun.FileDescriptor, buffers: []std.os.iovec) Maybe(usize) { +pub fn writev(fd: bun.FileDescriptor, buffers: []std.posix.iovec) Maybe(usize) { if (comptime Environment.isMac) { - const rc = writev_sym(fd.cast(), @as([*]std.os.iovec_const, @ptrCast(buffers.ptr)), @as(i32, @intCast(buffers.len))); + const rc = writev_sym(fd.cast(), @as([*]std.posix.iovec_const, @ptrCast(buffers.ptr)), @as(i32, @intCast(buffers.len))); if (comptime Environment.allow_assert) log("writev({}, {d}) = {d}", .{ fd, veclen(buffers), rc }); @@ -1291,7 +1382,7 @@ pub fn writev(fd: bun.FileDescriptor, buffers: []std.os.iovec) Maybe(usize) { return Maybe(usize){ .result = @as(usize, @intCast(rc)) }; } else { while (true) { - const rc = writev_sym(fd.cast(), @as([*]std.os.iovec_const, @ptrCast(buffers.ptr)), buffers.len); + const rc = writev_sym(fd.cast(), @as([*]std.posix.iovec_const, @ptrCast(buffers.ptr)), buffers.len); if (comptime Environment.allow_assert) log("writev({}, {d}) = {d}", .{ fd, veclen(buffers), rc }); @@ -1337,7 +1428,7 @@ pub fn pwritev(fd: bun.FileDescriptor, buffers: []const bun.PlatformIOVecConst, } } -pub fn readv(fd: bun.FileDescriptor, buffers: []std.os.iovec) Maybe(usize) { +pub fn readv(fd: bun.FileDescriptor, buffers: []std.posix.iovec) Maybe(usize) { if (comptime Environment.allow_assert) { if (buffers.len == 0) { bun.Output.debugWarn("readv() called with 0 length buffer", .{}); @@ -1371,7 +1462,7 @@ pub fn readv(fd: bun.FileDescriptor, buffers: []std.os.iovec) Maybe(usize) { } } -pub fn preadv(fd: bun.FileDescriptor, buffers: []std.os.iovec, position: isize) Maybe(usize) { +pub fn preadv(fd: bun.FileDescriptor, buffers: []std.posix.iovec, position: isize) Maybe(usize) { if (comptime Environment.allow_assert) { if (buffers.len == 0) { bun.Output.debugWarn("preadv() called with 0 length buffer", .{}); @@ -1756,7 +1847,7 @@ pub fn renameat2(from_dir: bun.FileDescriptor, from: [:0]const u8, to_dir: bun.F while (true) { const rc = switch (comptime Environment.os) { - .linux => linux.renameat2(@intCast(from_dir.cast()), from.ptr, @intCast(to_dir.cast()), to.ptr, flags.int()), + .linux => std.os.linux.renameat2(@intCast(from_dir.cast()), from.ptr, @intCast(to_dir.cast()), to.ptr, flags.int()), .mac => bun.C.renameatx_np(@intCast(from_dir.cast()), from.ptr, @intCast(to_dir.cast()), to.ptr, flags.int()), else => @compileError("renameat2() is not implemented on this platform"), }; @@ -1801,7 +1892,7 @@ pub fn renameat(from_dir: bun.FileDescriptor, from: [:0]const u8, to_dir: bun.Fi } } -pub fn chown(path: [:0]const u8, uid: os.uid_t, gid: os.gid_t) Maybe(void) { +pub fn chown(path: [:0]const u8, uid: posix.uid_t, gid: posix.gid_t) Maybe(void) { while (true) { if (Maybe(void).errnoSys(C.chown(path, uid, gid), .chown)) |err| { if (err.getErrno() == .INTR) continue; @@ -1980,7 +2071,7 @@ pub fn unlink(from: [:0]const u8) Maybe(void) { } pub fn rmdirat(dirfd: bun.FileDescriptor, to: anytype) Maybe(void) { - return unlinkatWithFlags(dirfd, to, std.os.AT.REMOVEDIR); + return unlinkatWithFlags(dirfd, to, std.posix.AT.REMOVEDIR); } pub fn unlinkatWithFlags(dirfd: bun.FileDescriptor, to: anytype, flags: c_uint) Maybe(void) { @@ -1992,7 +2083,7 @@ pub fn unlinkatWithFlags(dirfd: bun.FileDescriptor, to: anytype, flags: c_uint) return bun.windows.DeleteFileBun(to, .{ .dir = if (dirfd != bun.invalid_fd) dirfd.cast() else null, - .remove_dir = flags & std.os.AT.REMOVEDIR != 0, + .remove_dir = flags & std.posix.AT.REMOVEDIR != 0, }); } @@ -2042,7 +2133,7 @@ pub fn getFdPath(fd: bun.FileDescriptor, out_buffer: *[MAX_PATH_BYTES]u8) Maybe( // On macOS, we can use F.GETPATH fcntl command to query the OS for // the path to the file descriptor. @memset(out_buffer[0..MAX_PATH_BYTES], 0); - if (Maybe([]u8).errnoSys(system.fcntl(fd.cast(), os.F.GETPATH, out_buffer), .fcntl)) |err| { + if (Maybe([]u8).errnoSys(system.fcntl(fd.cast(), posix.F.GETPATH, out_buffer), .fcntl)) |err| { return err; } const len = mem.indexOfScalar(u8, out_buffer[0..], @as(u8, 0)) orelse MAX_PATH_BYTES; @@ -2068,25 +2159,25 @@ pub fn mmap( ptr: ?[*]align(mem.page_size) u8, length: usize, prot: u32, - flags: u32, + flags: std.posix.MAP, fd: bun.FileDescriptor, offset: u64, ) Maybe([]align(mem.page_size) u8) { const ioffset = @as(i64, @bitCast(offset)); // the OS treats this as unsigned const rc = std.c.mmap(ptr, length, prot, flags, fd.cast(), ioffset); - const fail = std.c.MAP.FAILED; + const fail = std.c.MAP_FAILED; if (rc == fail) { return Maybe([]align(mem.page_size) u8){ - .err = .{ .errno = @as(Syscall.Error.Int, @truncate(@intFromEnum(std.c.getErrno(@as(i64, @bitCast(@intFromPtr(fail))))))), .syscall = .mmap }, + .err = .{ .errno = @as(Syscall.Error.Int, @truncate(@intFromEnum(bun.C.getErrno(@as(i64, @bitCast(@intFromPtr(fail))))))), .syscall = .mmap }, }; } return Maybe([]align(mem.page_size) u8){ .result = @as([*]align(mem.page_size) u8, @ptrCast(@alignCast(rc)))[0..length] }; } -pub fn mmapFile(path: [:0]const u8, flags: u32, wanted_size: ?usize, offset: usize) Maybe([]align(mem.page_size) u8) { +pub fn mmapFile(path: [:0]const u8, flags: std.c.MAP, wanted_size: ?usize, offset: usize) Maybe([]align(mem.page_size) u8) { assertIsValidWindowsPath(u8, path); - const fd = switch (open(path, os.O.RDWR, 0)) { + const fd = switch (open(path, bun.O.RDWR, 0)) { .result => |fd| fd, .err => |err| return .{ .err = err }, }; @@ -2101,7 +2192,7 @@ pub fn mmapFile(path: [:0]const u8, flags: u32, wanted_size: ?usize, offset: usi if (wanted_size) |size_| size = @min(size, size_); - const map = switch (mmap(null, size, os.PROT.READ | os.PROT.WRITE, flags, fd, offset)) { + const map = switch (mmap(null, size, posix.PROT.READ | posix.PROT.WRITE, flags, fd, offset)) { .result => |map| map, .err => |err| { @@ -2160,7 +2251,7 @@ pub fn getMaxPipeSizeOnLinux() usize { fn once() c_int { const strings = bun.strings; const default_out_size = 512 * 1024; - const pipe_max_size_fd = switch (bun.sys.open("/proc/sys/fs/pipe-max-size", std.os.O.RDONLY, 0)) { + const pipe_max_size_fd = switch (bun.sys.open("/proc/sys/fs/pipe-max-size", bun.O.RDONLY, 0)) { .result => |fd2| fd2, .err => |err| { log("Failed to open /proc/sys/fs/pipe-max-size: {d}\n", .{err.errno}); @@ -2289,7 +2380,7 @@ pub fn existsOSPath(path: bun.OSPathSliceZ, file_only: bool) bool { pub fn exists(path: []const u8) bool { if (comptime Environment.isPosix) { - return system.access(&(std.os.toPosixPath(path) catch return false), 0) == 0; + return system.access(&(std.posix.toPosixPath(path) catch return false), 0) == 0; } if (comptime Environment.isWindows) { @@ -2311,7 +2402,7 @@ pub fn faccessat(dir_: anytype, subpath: anytype) JSC.Maybe(bool) { if (comptime Environment.isLinux) { // avoid loading the libc symbol for this to reduce chances of GLIBC minimum version requirements const rc = linux.faccessat(dir_fd.cast(), subpath, linux.F_OK, 0); - syslog("faccessat({}, {}, O_RDONLY, 0) = {d}", .{ dir_fd, bun.fmt.fmtOSPath(subpath, .{}), if (rc == 0) 0 else @intFromEnum(linux.getErrno(rc)) }); + syslog("faccessat({}, {}, O_RDONLY, 0) = {d}", .{ dir_fd, bun.fmt.fmtOSPath(subpath, .{}), if (rc == 0) 0 else @intFromEnum(bun.C.getErrno(rc)) }); if (rc == 0) { return JSC.Maybe(bool){ .result = true }; } @@ -2320,8 +2411,8 @@ pub fn faccessat(dir_: anytype, subpath: anytype) JSC.Maybe(bool) { } // on other platforms use faccessat from libc - const rc = std.c.faccessat(dir_fd.cast(), subpath, std.os.F_OK, 0); - syslog("faccessat({}, {}, O_RDONLY, 0) = {d}", .{ dir_fd, bun.fmt.fmtOSPath(subpath, .{}), if (rc == 0) 0 else @intFromEnum(std.c.getErrno(rc)) }); + const rc = std.c.faccessat(dir_fd.cast(), subpath, std.posix.F_OK, 0); + syslog("faccessat({}, {}, O_RDONLY, 0) = {d}", .{ dir_fd, bun.fmt.fmtOSPath(subpath, .{}), if (rc == 0) 0 else @intFromEnum(bun.C.getErrno(rc)) }); if (rc == 0) { return JSC.Maybe(bool){ .result = true }; } @@ -2375,16 +2466,16 @@ pub fn directoryExistsAt(dir_: anytype, subpath: anytype) JSC.Maybe(bool) { pub fn setNonblocking(fd: bun.FileDescriptor) Maybe(void) { const flags = switch (bun.sys.fcntl( fd, - std.os.F.GETFL, + std.posix.F.GETFL, 0, )) { .result => |f| f, .err => |err| return .{ .err = err }, }; - const new_flags = flags | std.os.O.NONBLOCK; + const new_flags = flags | bun.O.NONBLOCK; - switch (bun.sys.fcntl(fd, std.os.F.SETFL, new_flags)) { + switch (bun.sys.fcntl(fd, std.posix.F.SETFL, new_flags)) { .err => |err| return .{ .err = err }, .result => {}, } @@ -2480,7 +2571,7 @@ pub fn isExecutableFilePath(path: anytype) bool { *[*:0]const u8, *[*:0]u8, [*:0]const u8, [*:0]u8 => return is_executable_file(path), [:0]const u8, [:0]u8 => return is_executable_file(path.ptr), []const u8, []u8 => return is_executable_file( - &(std.os.toPosixPath(path) catch return false), + &(std.posix.toPosixPath(path) catch return false), ), else => @compileError("TODO: isExecutableFilePath"), } @@ -2497,7 +2588,7 @@ pub fn isExecutableFilePath(path: anytype) bool { pub fn setFileOffset(fd: bun.FileDescriptor, offset: usize) Maybe(void) { if (comptime Environment.isLinux) { return Maybe(void).errnoSysFd( - linux.lseek(fd.cast(), @intCast(offset), os.SEEK.SET), + linux.lseek(fd.cast(), @intCast(offset), posix.SEEK.SET), .lseek, fd, ) orelse Maybe(void).success; @@ -2505,7 +2596,7 @@ pub fn setFileOffset(fd: bun.FileDescriptor, offset: usize) Maybe(void) { if (comptime Environment.isMac) { return Maybe(void).errnoSysFd( - std.c.lseek(fd.cast(), @intCast(offset), os.SEEK.SET), + std.c.lseek(fd.cast(), @intCast(offset), posix.SEEK.SET), .lseek, fd, ) orelse Maybe(void).success; @@ -2567,7 +2658,7 @@ pub fn openNullDevice() Maybe(bun.FileDescriptor) { return sys_uv.open("nul", 0, 0); } - return open("/dev/null", os.O.RDWR, 0); + return open("/dev/null", bun.O.RDWR, 0); } pub fn dupWithFlags(fd: bun.FileDescriptor, flags: i32) Maybe(bun.FileDescriptor) { @@ -2601,11 +2692,13 @@ pub fn dupWithFlags(fd: bun.FileDescriptor, flags: i32) Maybe(bun.FileDescriptor } if (flags != 0) { - const fd_flags: ArgType = @intCast(system.fcntl(@intCast(out), @as(i32, std.os.F.GETFD), @as(ArgType, 0))); - _ = system.fcntl(@intCast(out), @as(i32, std.os.F.SETFD), @as(ArgType, @intCast(fd_flags | @as(ArgType, @intCast(flags))))); + const fd_flags: ArgType = @intCast(system.fcntl(@intCast(out), @as(i32, std.posix.F.GETFD), @as(ArgType, 0))); + _ = system.fcntl(@intCast(out), @as(i32, std.posix.F.SETFD), @as(ArgType, @intCast(fd_flags | @as(ArgType, @intCast(flags))))); } - return Maybe(bun.FileDescriptor){ .result = bun.toFD(out) }; + return Maybe(bun.FileDescriptor){ + .result = bun.toFD(@as(u32, @intCast(out))), + }; } pub fn dup(fd: bun.FileDescriptor) Maybe(bun.FileDescriptor) { @@ -2616,14 +2709,14 @@ pub fn linkat(dir_fd: bun.FileDescriptor, basename: []const u8, dest_dir_fd: bun return Maybe(void).errnoSysP( std.c.linkat( @intCast(dir_fd), - &(std.os.toPosixPath(basename) catch return .{ + &(std.posix.toPosixPath(basename) catch return .{ .err = .{ .errno = @intFromEnum(bun.C.E.NOMEM), .syscall = .open, }, }), @intCast(dest_dir_fd), - &(std.os.toPosixPath(dest_name) catch return .{ + &(std.posix.toPosixPath(dest_name) catch return .{ .err = .{ .errno = @intFromEnum(bun.C.E.NOMEM), .syscall = .open, @@ -2647,14 +2740,14 @@ pub fn linkatTmpfile(tmpfd: bun.FileDescriptor, dirfd: bun.FileDescriptor, name: while (true) { // This is racy but it's fine if we call linkat() with an empty path multiple times. - const current_status = CAP_DAC_READ_SEARCH.status.load(.Monotonic); + const current_status = CAP_DAC_READ_SEARCH.status.load(.monotonic); const rc = if (current_status != -1) std.os.linux.linkat( tmpfd.cast(), "", dirfd.cast(), name, - os.AT.EMPTY_PATH, + posix.AT.EMPTY_PATH, ) else brk: { // // snprintf(path, PATH_MAX, "/proc/self/fd/%d", fd); @@ -2665,11 +2758,11 @@ pub fn linkatTmpfile(tmpfd: bun.FileDescriptor, dirfd: bun.FileDescriptor, name: const path = std.fmt.bufPrintZ(&procfs_buf, "/proc/self/fd/{d}", .{tmpfd.cast()}) catch unreachable; break :brk std.os.linux.linkat( - os.AT.FDCWD, + posix.AT.FDCWD, path, dirfd.cast(), name, - os.AT.SYMLINK_FOLLOW, + posix.AT.SYMLINK_FOLLOW, ); }; @@ -2679,7 +2772,7 @@ pub fn linkatTmpfile(tmpfd: bun.FileDescriptor, dirfd: bun.FileDescriptor, name: .ISDIR, .NOENT, .OPNOTSUPP, .PERM, .INVAL => { // CAP_DAC_READ_SEARCH is required to linkat with an empty path. if (current_status == 0) { - CAP_DAC_READ_SEARCH.status.store(-1, .Monotonic); + CAP_DAC_READ_SEARCH.status.store(-1, .monotonic); continue; } }, @@ -2690,7 +2783,7 @@ pub fn linkatTmpfile(tmpfd: bun.FileDescriptor, dirfd: bun.FileDescriptor, name: } if (current_status == 0) { - CAP_DAC_READ_SEARCH.status.store(1, .Monotonic); + CAP_DAC_READ_SEARCH.status.store(1, .monotonic); } return Maybe(void).success; @@ -2703,14 +2796,14 @@ pub fn linkatTmpfile(tmpfd: bun.FileDescriptor, dirfd: bun.FileDescriptor, name: pub fn readNonblocking(fd: bun.FileDescriptor, buf: []u8) Maybe(usize) { if (Environment.isLinux) { while (bun.C.linux.RWFFlagSupport.isMaybeSupported()) { - const iovec = [1]std.os.iovec{.{ - .iov_base = buf.ptr, - .iov_len = buf.len, + const iovec = [1]std.posix.iovec{.{ + .base = buf.ptr, + .len = buf.len, }}; var debug_timer = bun.Output.DebugTimer.start(); // Note that there is a bug on Linux Kernel 5 - const rc = C.sys_preadv2(@intCast(fd.int()), &iovec, 1, -1, linux.RWF.NOWAIT); + const rc = C.sys_preadv2(@intCast(fd.int()), &iovec, 1, -1, std.os.linux.RWF.NOWAIT); if (comptime Environment.isDebug) { log("preadv2({}, {d}) = {d} ({})", .{ fd, buf.len, rc, debug_timer }); @@ -2747,14 +2840,14 @@ pub fn readNonblocking(fd: bun.FileDescriptor, buf: []u8) Maybe(usize) { pub fn writeNonblocking(fd: bun.FileDescriptor, buf: []const u8) Maybe(usize) { if (Environment.isLinux) { while (bun.C.linux.RWFFlagSupport.isMaybeSupported()) { - const iovec = [1]std.os.iovec_const{.{ - .iov_base = buf.ptr, - .iov_len = buf.len, + const iovec = [1]std.posix.iovec_const{.{ + .base = buf.ptr, + .len = buf.len, }}; var debug_timer = bun.Output.DebugTimer.start(); - const rc = C.sys_pwritev2(@intCast(fd.int()), &iovec, 1, -1, linux.RWF.NOWAIT); + const rc = C.sys_pwritev2(@intCast(fd.int()), &iovec, 1, -1, std.os.linux.RWF.NOWAIT); if (comptime Environment.isDebug) { log("pwritev2({}, {d}) = {d} ({})", .{ fd, buf.len, rc, debug_timer }); @@ -2808,7 +2901,7 @@ pub fn getFileSize(fd: bun.FileDescriptor) Maybe(usize) { } pub fn isPollable(mode: mode_t) bool { - return os.S.ISFIFO(mode) or os.S.ISSOCK(mode); + return posix.S.ISFIFO(mode) or posix.S.ISSOCK(mode); } const This = @This(); @@ -2831,7 +2924,7 @@ pub const File = struct { return other; } - if (T == std.os.fd_t) { + if (T == std.posix.fd_t) { return File{ .handle = bun.toFD(other) }; } @@ -2935,7 +3028,7 @@ pub const File = struct { } pub fn isTty(self: File) bool { - return std.os.isatty(self.handle.cast()); + return std.posix.isatty(self.handle.cast()); } pub fn close(self: File) void { diff --git a/src/sys_uv.zig b/src/sys_uv.zig index e8bf926f20..2704584189 100644 --- a/src/sys_uv.zig +++ b/src/sys_uv.zig @@ -1,14 +1,14 @@ //! bun.sys.sys_uv is a polyfill of bun.sys but with libuv. //! TODO: Probably should merge this into bun.sys itself with isWindows checks const std = @import("std"); -const os = std.os; +const posix = std.posix; const bun = @import("root").bun; const assertIsValidWindowsPath = bun.strings.assertIsValidWindowsPath; const fd_t = bun.FileDescriptor; const default_allocator = bun.default_allocator; const kernel32 = bun.windows; -const linux = os.linux; +const linux = posix.linux; const uv = bun.windows.libuv; const C = bun.C; @@ -43,7 +43,7 @@ pub fn open(file_path: [:0]const u8, c_flags: bun.Mode, _perm: bun.Mode) Maybe(b var req: uv.fs_t = uv.fs_t.uninitialized; defer req.deinit(); - const flags = uv.O.fromStd(c_flags); + const flags = uv.O.fromBunO(c_flags); var perm = _perm; if (perm == 0) { diff --git a/src/thread_pool.zig b/src/thread_pool.zig index 887e0c00eb..8d5f859d44 100644 --- a/src/thread_pool.zig +++ b/src/thread_pool.zig @@ -89,13 +89,13 @@ pub const Batch = struct { tail: ?*Task = null, pub fn pop(this: *Batch) ?*Task { - const len = @atomicLoad(usize, &this.len, .Monotonic); + const len = @atomicLoad(usize, &this.len, .monotonic); if (len == 0) { return null; } const task = this.head.?; if (task.node.next) |node| { - this.head = @fieldParentPtr(Task, "node", node); + this.head = @fieldParentPtr("node", node); } else { if (task != this.tail.?) unreachable; this.tail = null; @@ -157,7 +157,7 @@ pub const WaitGroup = struct { } pub fn isDone(this: *WaitGroup) bool { - return @atomicLoad(u32, &this.counter, .Monotonic) == 0; + return @atomicLoad(u32, &this.counter, .monotonic) == 0; } pub fn finish(self: *WaitGroup) void { @@ -227,7 +227,7 @@ pub fn ConcurrentFunction( task: Task = .{ .callback = callback }, pub fn callback(task: *Task) void { - const routine = @fieldParentPtr(@This(), "task", task); + const routine: *@This() = @fieldParentPtr("task", task); @call(bun.callmod_inline, Fn, routine.args); } }; @@ -309,7 +309,7 @@ pub fn Do( i: usize = 0, pub fn call(task: *Task) void { - var runner_task = @fieldParentPtr(@This(), "task", task); + var runner_task: *@This() = @fieldParentPtr("task", task); const i = runner_task.i; if (comptime as_ptr) { Function(runner_task.ctx.ctx, &runner_task.ctx.values[i], i); @@ -390,7 +390,7 @@ inline fn notify(self: *ThreadPool, is_waking: bool) void { // Fast path to check the Sync state to avoid calling into notifySlow(). // If we're waking, then we need to update the state regardless if (!is_waking) { - const sync = @as(Sync, @bitCast(self.sync.load(.Monotonic))); + const sync = @as(Sync, @bitCast(self.sync.load(.monotonic))); if (sync.notified) { return; } @@ -418,7 +418,7 @@ pub const default_thread_stack_size = brk: { /// Warm the thread pool up to the given number of threads. /// https://www.youtube.com/watch?v=ys3qcbO5KWw pub fn warm(self: *ThreadPool, count: u14) void { - var sync = @as(Sync, @bitCast(self.sync.load(.Monotonic))); + var sync = @as(Sync, @bitCast(self.sync.load(.monotonic))); if (sync.spawned >= count) return; @@ -429,8 +429,8 @@ pub fn warm(self: *ThreadPool, count: u14) void { sync = @as(Sync, @bitCast(self.sync.cmpxchgWeak( @as(u32, @bitCast(sync)), @as(u32, @bitCast(new_sync)), - .Release, - .Monotonic, + .release, + .monotonic, ) orelse break)); const spawn_config = std.Thread.SpawnConfig{ .stack_size = default_thread_stack_size }; const thread = std.Thread.spawn(spawn_config, Thread.run, .{self}) catch return self.unregister(null); @@ -439,7 +439,7 @@ pub fn warm(self: *ThreadPool, count: u14) void { } noinline fn notifySlow(self: *ThreadPool, is_waking: bool) void { - var sync = @as(Sync, @bitCast(self.sync.load(.Monotonic))); + var sync = @as(Sync, @bitCast(self.sync.load(.monotonic))); while (sync.state != .shutdown) { const can_wake = is_waking or (sync.state == .pending); if (is_waking) { @@ -464,8 +464,8 @@ noinline fn notifySlow(self: *ThreadPool, is_waking: bool) void { sync = @bitCast(self.sync.cmpxchgWeak( @as(u32, @bitCast(sync)), @as(u32, @bitCast(new_sync)), - .Release, - .Monotonic, + .release, + .monotonic, ) orelse { // We signaled to notify an idle thread if (can_wake and sync.idle > 0) { @@ -488,7 +488,7 @@ noinline fn notifySlow(self: *ThreadPool, is_waking: bool) void { noinline fn wait(self: *ThreadPool, _is_waking: bool) error{Shutdown}!bool { var is_idle = false; var is_waking = _is_waking; - var sync = @as(Sync, @bitCast(self.sync.load(.Monotonic))); + var sync = @as(Sync, @bitCast(self.sync.load(.monotonic))); while (true) { if (sync.state == .shutdown) return error.Shutdown; @@ -508,8 +508,8 @@ noinline fn wait(self: *ThreadPool, _is_waking: bool) error{Shutdown}!bool { sync = @as(Sync, @bitCast(self.sync.cmpxchgWeak( @as(u32, @bitCast(sync)), @as(u32, @bitCast(new_sync)), - .Acquire, - .Monotonic, + .acquire, + .monotonic, ) orelse { return is_waking or (sync.state == .signaled); })); @@ -522,8 +522,8 @@ noinline fn wait(self: *ThreadPool, _is_waking: bool) error{Shutdown}!bool { sync = @as(Sync, @bitCast(self.sync.cmpxchgWeak( @as(u32, @bitCast(sync)), @as(u32, @bitCast(new_sync)), - .Monotonic, - .Monotonic, + .monotonic, + .monotonic, ) orelse { is_waking = false; is_idle = true; @@ -535,14 +535,14 @@ noinline fn wait(self: *ThreadPool, _is_waking: bool) error{Shutdown}!bool { } self.idle_event.wait(); - sync = @as(Sync, @bitCast(self.sync.load(.Monotonic))); + sync = @as(Sync, @bitCast(self.sync.load(.monotonic))); } } } /// Marks the thread pool as shutdown pub noinline fn shutdown(self: *ThreadPool) void { - var sync = @as(Sync, @bitCast(self.sync.load(.Monotonic))); + var sync = @as(Sync, @bitCast(self.sync.load(.monotonic))); while (sync.state != .shutdown) { var new_sync = sync; new_sync.notified = true; @@ -553,8 +553,8 @@ pub noinline fn shutdown(self: *ThreadPool) void { sync = @as(Sync, @bitCast(self.sync.cmpxchgWeak( @as(u32, @bitCast(sync)), @as(u32, @bitCast(new_sync)), - .AcqRel, - .Monotonic, + .acq_rel, + .monotonic, ) orelse { // Wake up any threads sleeping on the idle_event. // TODO: I/O polling notification here. @@ -566,14 +566,14 @@ pub noinline fn shutdown(self: *ThreadPool) void { fn register(noalias self: *ThreadPool, noalias thread: *Thread) void { // Push the thread onto the threads stack in a lock-free manner. - var threads = self.threads.load(.Monotonic); + var threads = self.threads.load(.monotonic); while (true) { thread.next = threads; threads = self.threads.cmpxchgWeak( threads, thread, - .Release, - .Monotonic, + .release, + .monotonic, ) orelse break; } } @@ -581,7 +581,7 @@ fn register(noalias self: *ThreadPool, noalias thread: *Thread) void { pub fn setThreadContext(noalias pool: *ThreadPool, ctx: ?*anyopaque) void { pool.threadpool_context = ctx; - var thread = pool.threads.load(.Monotonic) orelse return; + var thread = pool.threads.load(.monotonic) orelse return; thread.ctx = pool.threadpool_context; while (thread.next) |next| { next.ctx = pool.threadpool_context; @@ -592,7 +592,7 @@ pub fn setThreadContext(noalias pool: *ThreadPool, ctx: ?*anyopaque) void { fn unregister(noalias self: *ThreadPool, noalias maybe_thread: ?*Thread) void { // Un-spawn one thread, either due to a failed OS thread spawning or the thread is exiting. const one_spawned = @as(u32, @bitCast(Sync{ .spawned = 1 })); - const sync = @as(Sync, @bitCast(self.sync.fetchSub(one_spawned, .Release))); + const sync = @as(Sync, @bitCast(self.sync.fetchSub(one_spawned, .release))); assert(sync.spawned > 0); // The last thread to exit must wake up the thread pool join()er @@ -614,10 +614,10 @@ fn unregister(noalias self: *ThreadPool, noalias maybe_thread: ?*Thread) void { fn join(self: *ThreadPool) void { // Wait for the thread pool to be shutdown() then for all threads to enter a joinable state - var sync = @as(Sync, @bitCast(self.sync.load(.Monotonic))); + var sync = @as(Sync, @bitCast(self.sync.load(.monotonic))); if (!(sync.state == .shutdown and sync.spawned == 0)) { self.join_event.wait(); - sync = @as(Sync, @bitCast(self.sync.load(.Monotonic))); + sync = @as(Sync, @bitCast(self.sync.load(.monotonic))); } assert(sync.state == .shutdown); @@ -625,7 +625,7 @@ fn join(self: *ThreadPool) void { // If there are threads, start off the chain sending it the shutdown signal. // The thread receives the shutdown signal and sends it to the next thread, and the next.. - const thread = self.threads.load(.Acquire) orelse return; + const thread = self.threads.load(.acquire) orelse return; thread.join_event.notify(); } @@ -654,7 +654,7 @@ pub const Thread = struct { fn run(thread_pool: *ThreadPool) void { { var counter_buf: [100]u8 = undefined; - const int = counter.fetchAdd(1, .SeqCst); + const int = counter.fetchAdd(1, .seq_cst); const named = std.fmt.bufPrintZ(&counter_buf, "Bun Pool {d}", .{int}) catch "Bun Pool"; Output.Source.configureNamedThread(named); } @@ -680,7 +680,7 @@ pub const Thread = struct { thread_pool.notify(is_waking); is_waking = false; - const task = @fieldParentPtr(Task, "node", result.node); + const task: *Task = @fieldParentPtr("node", result.node); (task.callback)(task); } @@ -694,7 +694,7 @@ pub const Thread = struct { var consumer = self.idle_queue.tryAcquireConsumer() catch return; defer self.idle_queue.releaseConsumer(consumer); while (self.idle_queue.pop(&consumer)) |node| { - const task = @fieldParentPtr(Task, "node", node); + const task: *Task = @fieldParentPtr("node", node); (task.callback)(task); } } @@ -721,10 +721,10 @@ pub const Thread = struct { } // Then try work stealing from other threads - var num_threads: u32 = @as(Sync, @bitCast(thread_pool.sync.load(.Monotonic))).spawned; + var num_threads: u32 = @as(Sync, @bitCast(thread_pool.sync.load(.monotonic))).spawned; while (num_threads > 0) : (num_threads -= 1) { // Traverse the stack of registered threads on the thread pool - const target = self.target orelse thread_pool.threads.load(.Acquire) orelse unreachable; + const target = self.target orelse thread_pool.threads.load(.acquire) orelse unreachable; self.target = target.next; // Try to steal from their queue first to avoid contention (the target steal's from queue last). @@ -763,14 +763,14 @@ const Event = struct { /// or wait for the event to be shutdown entirely noinline fn wait(self: *Event) void { var acquire_with: u32 = EMPTY; - var state = self.state.load(.Monotonic); + var state = self.state.load(.monotonic); while (true) { // If we're shutdown then exit early. // Acquire barrier to ensure operations before the shutdown() are seen after the wait(). // Shutdown is rare so it's better to have an Acquire barrier here instead of on CAS failure + load which are common. if (state == SHUTDOWN) { - @fence(.Acquire); + @fence(.acquire); return; } @@ -780,8 +780,8 @@ const Event = struct { state = self.state.cmpxchgWeak( state, acquire_with, - .Acquire, - .Monotonic, + .acquire, + .monotonic, ) orelse return; continue; } @@ -791,8 +791,8 @@ const Event = struct { state = self.state.cmpxchgWeak( state, WAITING, - .Monotonic, - .Monotonic, + .monotonic, + .monotonic, ) orelse break :blk; continue; } @@ -805,7 +805,7 @@ const Event = struct { // who will either exit on SHUTDOWN or acquire with WAITING again, ensuring all threads are awoken. // This unfortunately results in the last notify() or shutdown() doing an extra futex wake but that's fine. Futex.wait(&self.state, WAITING, null) catch unreachable; - state = self.state.load(.Monotonic); + state = self.state.load(.monotonic); acquire_with = WAITING; } } @@ -814,14 +814,14 @@ const Event = struct { /// or wait for the event to be shutdown entirely noinline fn waitFor(self: *Event, timeout: usize) void { var acquire_with: u32 = EMPTY; - var state = self.state.load(.Monotonic); + var state = self.state.load(.monotonic); while (true) { // If we're shutdown then exit early. // Acquire barrier to ensure operations before the shutdown() are seen after the wait(). // Shutdown is rare so it's better to have an Acquire barrier here instead of on CAS failure + load which are common. if (state == SHUTDOWN) { - @fence(.Acquire); + @fence(.acquire); return; } @@ -831,8 +831,8 @@ const Event = struct { state = self.state.cmpxchgWeak( state, acquire_with, - .Acquire, - .Monotonic, + .acquire, + .monotonic, ) orelse return; continue; } @@ -842,8 +842,8 @@ const Event = struct { state = self.state.cmpxchgWeak( state, WAITING, - .Monotonic, - .Monotonic, + .monotonic, + .monotonic, ) orelse break :blk; continue; } @@ -856,7 +856,7 @@ const Event = struct { // who will either exit on SHUTDOWN or acquire with WAITING again, ensuring all threads are awoken. // This unfortunately results in the last notify() or shutdown() doing an extra futex wake but that's fine. Futex.wait(&self.state, WAITING, timeout) catch {}; - state = self.state.load(.Monotonic); + state = self.state.load(.monotonic); acquire_with = WAITING; } } @@ -876,7 +876,7 @@ const Event = struct { fn wake(self: *Event, release_with: u32, wake_threads: u32) void { // Update the Event to notify it with the new `release_with` state (either NOTIFIED or SHUTDOWN). // Release barrier to ensure any operations before this are this to happen before the wait() in the other threads. - const state = self.state.swap(release_with, .Release); + const state = self.state.swap(release_with, .release); // Only wake threads sleeping in futex if the state is WAITING. // Avoids unnecessary wake ups. @@ -910,7 +910,7 @@ pub const Node = struct { } fn push(noalias self: *Queue, list: List) void { - var stack = self.stack.load(.Monotonic); + var stack = self.stack.load(.monotonic); while (true) { // Attach the list to the stack (pt. 1) list.tail.next = @as(?*Node, @ptrFromInt(stack & PTR_MASK)); @@ -925,14 +925,14 @@ pub const Node = struct { stack = self.stack.cmpxchgWeak( stack, new_stack, - .Release, - .Monotonic, + .release, + .monotonic, ) orelse break; } } fn tryAcquireConsumer(self: *Queue) error{ Empty, Contended }!?*Node { - var stack = self.stack.load(.Monotonic); + var stack = self.stack.load(.monotonic); while (true) { if (stack & IS_CONSUMING != 0) return error.Contended; // The queue already has a consumer. @@ -951,8 +951,8 @@ pub const Node = struct { stack = self.stack.cmpxchgWeak( stack, new_stack, - .Acquire, - .Monotonic, + .acquire, + .monotonic, ) orelse return self.cache orelse @as(*Node, @ptrFromInt(stack & PTR_MASK)); } } @@ -967,7 +967,7 @@ pub const Node = struct { // Release the consumer with a release barrier to ensure cache/node accesses // happen before the consumer was released and before the next consumer starts using the cache. self.cache = consumer; - const stack = self.stack.fetchSub(remove, .Release); + const stack = self.stack.fetchSub(remove, .release); assert(stack & remove != 0); } @@ -979,14 +979,14 @@ pub const Node = struct { } // Load the stack to see if there was anything pushed that we could grab. - var stack = self.stack.load(.Monotonic); + var stack = self.stack.load(.monotonic); assert(stack & IS_CONSUMING != 0); if (stack & PTR_MASK == 0) { return null; } // Nodes have been pushed to the stack, grab then with an Acquire barrier to see the Node links. - stack = self.stack.swap(HAS_CACHE | IS_CONSUMING, .Acquire); + stack = self.stack.swap(HAS_CACHE | IS_CONSUMING, .acquire); assert(stack & IS_CONSUMING != 0); assert(stack & PTR_MASK != 0); @@ -1010,7 +1010,7 @@ pub const Node = struct { } fn push(noalias self: *Buffer, noalias list: *List) error{Overflow}!void { - var head = self.head.load(.Monotonic); + var head = self.head.load(.monotonic); var tail = self.tail.raw; // we're the only thread that can change this while (true) { @@ -1025,17 +1025,17 @@ pub const Node = struct { nodes = node.next; // Array written atomically with weakest ordering since it could be getting atomically read by steal(). - self.array[tail % capacity].store(node, .Unordered); + self.array[tail % capacity].store(node, .unordered); tail +%= 1; } // Release barrier synchronizes with Acquire loads for steal()ers to see the array writes. - self.tail.store(tail, .Release); + self.tail.store(tail, .release); // Update the list with the nodes we pushed to the buffer and try again if there's more. list.head = nodes orelse return; std.atomic.spinLoopHint(); - head = self.head.load(.Monotonic); + head = self.head.load(.monotonic); continue; } @@ -1046,8 +1046,8 @@ pub const Node = struct { head = self.head.cmpxchgWeak( head, head +% migrate, - .Acquire, - .Monotonic, + .acquire, + .monotonic, ) orelse { // Link the migrated Nodes together const first = self.array[head % capacity].raw; @@ -1070,7 +1070,7 @@ pub const Node = struct { } fn pop(self: *Buffer) ?*Node { - var head = self.head.load(.Monotonic); + var head = self.head.load(.monotonic); const tail = self.tail.raw; // we're the only thread that can change this while (true) { @@ -1086,8 +1086,8 @@ pub const Node = struct { head = self.head.cmpxchgWeak( head, head +% 1, - .Acquire, - .Monotonic, + .acquire, + .monotonic, ) orelse return self.array[head % capacity].raw; } } @@ -1101,7 +1101,7 @@ pub const Node = struct { var consumer = queue.tryAcquireConsumer() catch return null; defer queue.releaseConsumer(consumer); - const head = self.head.load(.Monotonic); + const head = self.head.load(.monotonic); const tail = self.tail.raw; // we're the only thread that can change this const size = tail -% head; @@ -1113,7 +1113,7 @@ pub const Node = struct { var pushed: Index = 0; while (pushed < capacity) : (pushed += 1) { const node = queue.pop(&consumer) orelse break; - self.array[(tail +% pushed) % capacity].store(node, .Unordered); + self.array[(tail +% pushed) % capacity].store(node, .unordered); } // We will be returning one node that we stole from the queue. @@ -1126,7 +1126,7 @@ pub const Node = struct { // Update the array tail with the nodes we pushed to it. // Release barrier to synchronize with Acquire barrier in steal()'s to see the written array Nodes. - if (pushed > 0) self.tail.store(tail +% pushed, .Release); + if (pushed > 0) self.tail.store(tail +% pushed, .release); return Stole{ .node = node, .pushed = pushed > 0, @@ -1134,7 +1134,7 @@ pub const Node = struct { } fn steal(noalias self: *Buffer, noalias buffer: *Buffer) ?Stole { - const head = self.head.load(.Monotonic); + const head = self.head.load(.monotonic); const tail = self.tail.raw; // we're the only thread that can change this const size = tail -% head; @@ -1142,8 +1142,8 @@ pub const Node = struct { assert(size == 0); // we should only be stealing if our array is empty while (true) : (std.atomic.spinLoopHint()) { - const buffer_head = buffer.head.load(.Acquire); - const buffer_tail = buffer.tail.load(.Acquire); + const buffer_head = buffer.head.load(.acquire); + const buffer_tail = buffer.tail.load(.acquire); // Overly large size indicates the tail was updated a lot after the head was loaded. // Reload both and try again. @@ -1162,8 +1162,8 @@ pub const Node = struct { // Atomically load from the target buffer array as it may be pushing and atomically storing to it. // Atomic store to our array as other steal() threads may be atomically loading from it as above. for (0..steal_size) |i| { - const node = buffer.array[(buffer_head +% i) % capacity].load(.Unordered); - self.array[(tail +% i) % capacity].store(node, .Unordered); + const node = buffer.array[(buffer_head +% i) % capacity].load(.unordered); + self.array[(tail +% i) % capacity].store(node, .unordered); } // Try to commit the steal from the target buffer using: @@ -1173,8 +1173,8 @@ pub const Node = struct { _ = buffer.head.cmpxchgStrong( buffer_head, buffer_head +% steal_size, - .AcqRel, - .Monotonic, + .acq_rel, + .monotonic, ) orelse { // Pop one from the nodes we stole as we'll be returning it const pushed = steal_size - 1; @@ -1182,7 +1182,7 @@ pub const Node = struct { // Update the array tail with the nodes we pushed to it. // Release barrier to synchronize with Acquire barrier in steal()'s to see the written array Nodes. - if (pushed > 0) self.tail.store(tail +% pushed, .Release); + if (pushed > 0) self.tail.store(tail +% pushed, .release); return Stole{ .node = node, .pushed = pushed > 0, diff --git a/src/tmp.zig b/src/tmp.zig index 22531204f9..9b1cd7a254 100644 --- a/src/tmp.zig +++ b/src/tmp.zig @@ -1,7 +1,7 @@ const bun = @import("root").bun; const std = @import("std"); const Environment = bun.Environment; -const O = std.os.O; +const O = bun.O; // O_TMPFILE doesn't seem to work very well. const allow_tmpfile = false; diff --git a/src/tracy.zig b/src/tracy.zig index e55a13c026..b83c6946ab 100644 --- a/src/tracy.zig +++ b/src/tracy.zig @@ -537,7 +537,7 @@ fn dlsym(comptime Type: type, comptime symbol: [:0]const u8) ?Type { 0; if (bun.getenvZ("BUN_TRACY_PATH")) |path| { - const handle = bun.C.dlopen(&(std.os.toPosixPath(path) catch unreachable), RLTD); + const handle = bun.C.dlopen(&(std.posix.toPosixPath(path) catch unreachable), RLTD); if (handle != null) { Handle.handle = handle; break :get; diff --git a/src/trait.zig b/src/trait.zig index 1471f3555a..5c3db37749 100644 --- a/src/trait.zig +++ b/src/trait.zig @@ -64,8 +64,8 @@ pub inline fn isSingleItemPtr(comptime T: type) bool { pub fn isExternContainer(comptime T: type) bool { return switch (@typeInfo(T)) { - .Struct => |s| s.layout == .Extern, - .Union => |u| u.layout == .Extern, + .Struct => |s| s.layout == .@"extern", + .Union => |u| u.layout == .@"extern", else => false, }; } diff --git a/src/watcher.zig b/src/watcher.zig index 43f0dde10a..a3c2cd6034 100644 --- a/src/watcher.zig +++ b/src/watcher.zig @@ -50,11 +50,11 @@ const INotify = struct { pub fn watchPath(this: *INotify, pathname: [:0]const u8) bun.JSC.Maybe(EventListIndex) { bun.assert(this.loaded_inotify); - const old_count = this.watch_count.fetchAdd(1, .Release); + const old_count = this.watch_count.fetchAdd(1, .release); defer if (old_count == 0) Futex.wake(&this.watch_count, 10); const watch_file_mask = std.os.linux.IN.EXCL_UNLINK | std.os.linux.IN.MOVE_SELF | std.os.linux.IN.DELETE_SELF | std.os.linux.IN.MOVED_TO | std.os.linux.IN.MODIFY; return .{ - .result = std.os.inotify_add_watchZ(this.inotify_fd, pathname, watch_file_mask) catch |err| return .{ + .result = std.posix.inotify_add_watchZ(this.inotify_fd, pathname, watch_file_mask) catch |err| return .{ .err = .{ .errno = @truncate(@intFromEnum(switch (err) { error.FileNotFound => bun.C.E.NOENT, @@ -74,11 +74,11 @@ const INotify = struct { pub fn watchDir(this: *INotify, pathname: [:0]const u8) bun.JSC.Maybe(EventListIndex) { bun.assert(this.loaded_inotify); - const old_count = this.watch_count.fetchAdd(1, .Release); + const old_count = this.watch_count.fetchAdd(1, .release); defer if (old_count == 0) Futex.wake(&this.watch_count, 10); const watch_dir_mask = std.os.linux.IN.EXCL_UNLINK | std.os.linux.IN.DELETE | std.os.linux.IN.DELETE_SELF | std.os.linux.IN.CREATE | std.os.linux.IN.MOVE_SELF | std.os.linux.IN.ONLYDIR | std.os.linux.IN.MOVED_TO; return .{ - .result = std.os.inotify_add_watchZ(this.inotify_fd, pathname, watch_dir_mask) catch |err| return .{ + .result = std.posix.inotify_add_watchZ(this.inotify_fd, pathname, watch_dir_mask) catch |err| return .{ .err = .{ .errno = @truncate(@intFromEnum(switch (err) { error.FileNotFound => bun.C.E.NOENT, @@ -98,7 +98,7 @@ const INotify = struct { pub fn unwatch(this: *INotify, wd: EventListIndex) void { bun.assert(this.loaded_inotify); - _ = this.watch_count.fetchSub(1, .Release); + _ = this.watch_count.fetchSub(1, .release); std.os.inotify_rm_watch(this.inotify_fd, wd); } @@ -110,7 +110,7 @@ const INotify = struct { this.coalesce_interval = std.fmt.parseInt(isize, env, 10) catch 100_000; } - this.inotify_fd = try std.os.inotify_init1(std.os.linux.IN.CLOEXEC); + this.inotify_fd = try std.posix.inotify_init1(std.os.linux.IN.CLOEXEC); } pub fn read(this: *INotify) bun.JSC.Maybe([]*const INotifyEvent) { @@ -121,13 +121,13 @@ const INotify = struct { error.TimedOut => unreachable, // timeout is infinite }; - const rc = std.os.system.read( + const rc = std.posix.system.read( this.inotify_fd, @as([*]u8, @ptrCast(@alignCast(&this.eventlist))), @sizeOf(EventListBuffer), ); - const errno = std.os.errno(rc); + const errno = std.posix.errno(rc); switch (errno) { .SUCCESS => { var len = @as(usize, @intCast(rc)); @@ -137,20 +137,20 @@ const INotify = struct { // IN_MODIFY is very noisy // we do a 0.1ms sleep to try to coalesce events better if (len < (@sizeOf(EventListBuffer) / 2)) { - var fds = [_]std.os.pollfd{.{ + var fds = [_]std.posix.pollfd{.{ .fd = this.inotify_fd, - .events = std.os.POLL.IN | std.os.POLL.ERR, + .events = std.posix.POLL.IN | std.posix.POLL.ERR, .revents = 0, }}; - var timespec = std.os.timespec{ .tv_sec = 0, .tv_nsec = this.coalesce_interval }; - if ((std.os.ppoll(&fds, ×pec, null) catch 0) > 0) { + var timespec = std.posix.timespec{ .tv_sec = 0, .tv_nsec = this.coalesce_interval }; + if ((std.posix.ppoll(&fds, ×pec, null) catch 0) > 0) { while (true) { - const new_rc = std.os.system.read( + const new_rc = std.posix.system.read( this.inotify_fd, @as([*]u8, @ptrCast(@alignCast(&this.eventlist))) + len, @sizeOf(EventListBuffer) - len, ); - const e = std.os.errno(new_rc); + const e = std.posix.errno(new_rc); switch (e) { .SUCCESS => { len += @as(usize, @intCast(new_rc)); @@ -222,7 +222,7 @@ const DarwinWatcher = struct { fd: bun.FileDescriptor = bun.invalid_fd, pub fn init(this: *DarwinWatcher, _: []const u8) !void { - const fd = try std.os.kqueue(); + const fd = try std.posix.kqueue(); if (fd == 0) return error.KQueueError; this.fd = bun.toFD(fd); } @@ -281,6 +281,7 @@ const WindowsWatcher = struct { .syscall = .watch, } }; } + log("read directory changes!", .{}); return .{ .result = {} }; } }; @@ -367,7 +368,10 @@ const WindowsWatcher = struct { // wait until new events are available pub fn next(this: *WindowsWatcher, timeout: Timeout) bun.JSC.Maybe(?EventIterator) { switch (this.watcher.prepare()) { - .err => |err| return .{ .err = err }, + .err => |err| { + log("prepare() returned error", .{}); + return .{ .err = err }; + }, .result => {}, } @@ -378,7 +382,7 @@ const WindowsWatcher = struct { const rc = w.kernel32.GetQueuedCompletionStatus(this.iocp, &nbytes, &key, &overlapped, @intFromEnum(timeout)); if (rc == 0) { const err = w.kernel32.GetLastError(); - if (err == w.Win32Error.IMEOUT) { + if (err == .TIMEOUT or err == .WAIT_TIMEOUT) { return .{ .result = null }; } else { log("GetQueuedCompletionStatus failed: {s}", .{@tagName(err)}); @@ -397,6 +401,7 @@ const WindowsWatcher = struct { if (nbytes == 0) { // shutdown notification // TODO close handles? + log("shutdown notification in WindowsWatcher.next", .{}); return .{ .err = .{ .errno = @intFromEnum(bun.C.SystemErrno.ESHUTDOWN), .syscall = .watch, @@ -685,7 +690,7 @@ pub fn NewWatcher(comptime ContextType: type) type { while (true) { defer Output.flush(); - var count_ = std.os.system.kevent( + var count_ = std.posix.system.kevent( this.platform.fd.cast(), @as([*]KEvent, changelist), 0, @@ -698,8 +703,8 @@ pub fn NewWatcher(comptime ContextType: type) type { // Give the events more time to coallesce if (count_ < 128 / 2) { const remain = 128 - count_; - var timespec = std.os.timespec{ .tv_sec = 0, .tv_nsec = 100_000 }; - const extra = std.os.system.kevent( + var timespec = std.posix.timespec{ .tv_sec = 0, .tv_nsec = 100_000 }; + const extra = std.posix.system.kevent( this.platform.fd.cast(), @as([*]KEvent, changelist[@as(usize, @intCast(count_))..].ptr), 0, @@ -822,6 +827,7 @@ pub fn NewWatcher(comptime ContextType: type) type { } } } else if (Environment.isWindows) { + log("_watchLoop", .{}); var buf: bun.PathBuffer = undefined; const root = this.fs.top_level_dir; @memcpy(buf[0..root.len], root); @@ -976,7 +982,7 @@ pub fn NewWatcher(comptime ContextType: type) type { // Basically: // - We register the event here. // our while(true) loop above receives notification of changes to any of the events created here. - _ = std.os.system.kevent( + _ = std.posix.system.kevent( this.platform.fd.cast(), @as([]KEvent, events[0..1]).ptr, 1, @@ -1072,7 +1078,7 @@ pub fn NewWatcher(comptime ContextType: type) type { // Basically: // - We register the event here. // our while(true) loop above receives notification of changes to any of the events created here. - _ = std.os.system.kevent( + _ = std.posix.system.kevent( this.platform.fd.cast(), @as([]KEvent, events[0..1]).ptr, 1, diff --git a/src/windows.zig b/src/windows.zig index b84b78678e..de41aca60e 100644 --- a/src/windows.zig +++ b/src/windows.zig @@ -2987,8 +2987,6 @@ pub extern fn LoadLibraryA( [*:0]const u8, ) ?*anyopaque; -pub extern fn LoadLibraryExW([*:0]const u16, ?HANDLE, DWORD) ?*anyopaque; - pub const CreateHardLinkW = struct { pub fn wrapper(newFileName: LPCWSTR, existingFileName: LPCWSTR, securityAttributes: ?*win32.SECURITY_ATTRIBUTES) BOOL { const run = struct { @@ -3165,7 +3163,7 @@ pub const PROCESS_QUERY_LIMITED_INFORMATION: DWORD = 0x1000; pub fn exePathW() [:0]const u16 { const image_path_unicode_string = &std.os.windows.peb().ProcessParameters.ImagePathName; - return image_path_unicode_string.Buffer[0 .. image_path_unicode_string.Length / 2 :0]; + return image_path_unicode_string.Buffer.?[0 .. image_path_unicode_string.Length / 2 :0]; } pub const KEY_EVENT_RECORD = extern struct { @@ -3442,8 +3440,7 @@ pub const ENABLE_VIRTUAL_TERMINAL_INPUT = 0x200; pub const ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002; pub const ENABLE_PROCESSED_OUTPUT = 0x0001; -// TODO: when https://github.com/ziglang/zig/pull/18692 merges, use std.os.windows for this -pub extern fn SetConsoleMode(console_handle: *anyopaque, mode: u32) u32; +const SetConsoleMode = kernel32.SetConsoleMode; pub extern fn SetStdHandle(nStdHandle: u32, hHandle: *anyopaque) u32; pub extern fn GetConsoleOutputCP() u32; pub extern "kernel32" fn SetConsoleCP(wCodePageID: std.os.windows.UINT) callconv(std.os.windows.WINAPI) std.os.windows.BOOL; diff --git a/src/windows_c.zig b/src/windows_c.zig index f5abfd07c2..d465ac0ecf 100644 --- a/src/windows_c.zig +++ b/src/windows_c.zig @@ -2,7 +2,7 @@ const std = @import("std"); const bun = @import("root").bun; const builtin = @import("builtin"); const win32 = std.os.windows; -const os = std.os; +const posix = std.posix; const mem = std.mem; const Stat = std.fs.File.Stat; const Kind = std.fs.File.Kind; @@ -818,7 +818,7 @@ pub const SystemErrno = enum(u16) { return labels.get(this) orelse null; } - const LabelMap = bun.enums.EnumMap(SystemErrno, []const u8); + const LabelMap = std.enums.EnumMap(SystemErrno, []const u8); pub const labels: LabelMap = brk: { var map: LabelMap = LabelMap.initFull(""); @@ -959,7 +959,7 @@ pub const SystemErrno = enum(u16) { }; pub const off_t = i64; -pub fn preallocate_file(_: os.fd_t, _: off_t, _: off_t) !void {} +pub fn preallocate_file(_: posix.fd_t, _: off_t, _: off_t) !void {} const uv = @import("./deps/libuv.zig"); diff --git a/src/work_pool.zig b/src/work_pool.zig index 495bfdb292..5cbad488ff 100644 --- a/src/work_pool.zig +++ b/src/work_pool.zig @@ -42,7 +42,7 @@ pub fn NewWorkPool(comptime max_threads: ?usize) type { allocator: std.mem.Allocator, pub fn callback(task: *Task) void { - var this_task = @fieldParentPtr(@This(), "task", task); + var this_task: *@This() = @fieldParentPtr("task", task); function(this_task.context); this_task.allocator.destroy(this_task); } diff --git a/test/js/bun/util/inspect.test.js b/test/js/bun/util/inspect.test.js index 4c293d1059..476618202f 100644 --- a/test/js/bun/util/inspect.test.js +++ b/test/js/bun/util/inspect.test.js @@ -220,8 +220,8 @@ it("BigIntArray", () => { } }); -it("FloatArray", () => { - for (let TypedArray of [Float32Array, Float64Array]) { +for (let TypedArray of [Float32Array, Float64Array]) { + it(TypedArray.name + " " + Math.fround(42.68), () => { const buffer = new TypedArray([Math.fround(42.68)]); const input = Bun.inspect(buffer); @@ -231,8 +231,22 @@ it("FloatArray", () => { `${TypedArray.name}(${buffer.length - i}) [ ` + [...buffer.subarray(i)].join(", ") + " ]", ); } - } -}); + }); + + it(TypedArray.name + " " + 42.68, () => { + const buffer = new TypedArray([42.68]); + const input = Bun.inspect(buffer); + + expect(input).toBe( + `${TypedArray.name}(${buffer.length}) [ ${[TypedArray === Float32Array ? Math.fround(42.68) : 42.68].join(", ")} ]`, + ); + for (let i = 1; i < buffer.length + 1; i++) { + expect(Bun.inspect(buffer.subarray(i))).toBe( + `${TypedArray.name}(${buffer.length - i}) [ ` + [...buffer.subarray(i)].join(", ") + " ]", + ); + } + }); +} it("jsx with two elements", () => { const input = Bun.inspect(