From 9244d827b4eeb9bd82b09eb95b6a5c74a65a3073 Mon Sep 17 00:00:00 2001 From: chloe caruso Date: Wed, 15 Jan 2025 15:51:23 -0800 Subject: [PATCH] nodefs compat pr 2 --- src/Watcher.zig | 670 ++++++ src/bake/DevServer.zig | 6 +- src/bun.js/api/bun/dns_resolver.zig | 2 +- src/bun.js/javascript.zig | 25 +- src/bun.js/module_loader.zig | 6 +- src/bun.js/node/node_fs.zig | 20 +- src/bun.js/node/path_watcher.zig | 21 +- src/bun.js/node/types.zig | 16 +- src/bun.js/node/win_watcher.zig | 2 +- src/bun.zig | 2 + src/bundler/bundle_v2.zig | 2 +- src/c-headers-for-zig.h | 2 + src/compile_target.zig | 34 +- src/darwin_c.zig | 8 +- src/dns.zig | 10 +- src/linux_c.zig | 8 +- src/string_immutable.zig | 15 + src/sys.zig | 14 +- src/watcher.zig | 1810 ++++++----------- src/watcher/INotifyWatcher.zig | 336 +++ src/watcher/KEventWatcher.zig | 111 + src/watcher/WindowsWatcher.zig | 301 +++ test/js/node/test/common/tmpdir.js | 1 + .../test/parallel/test-fs-error-messages.js | 850 ++++++++ .../test/parallel/test-fs-existssync-false.js | 32 + test/js/node/test/parallel/test-fs-fmap.js | 28 + .../test-fs-promises-file-handle-read.js | 129 ++ .../test-fs-promises-write-optional-params.js | 110 + .../test/parallel/test-fs-readdir-ucs2.js | 31 + .../test/parallel/test-fs-readfile-flags.js | 50 + .../parallel/test-fs-readfilesync-enoent.js | 32 + .../test-fs-realpath-on-substed-drive.js | 51 + .../parallel/test-fs-symlink-dir-junction.js | 63 + .../node/test/parallel/test-fs-symlink-dir.js | 81 + .../test/parallel/test-fs-symlink-longpath.js | 27 + ...est-fs-watch-file-enoent-after-deletion.js | 5 + ...-watch-recursive-add-file-to-new-folder.js | 53 + ...s-watch-recursive-linux-parallel-remove.js | 33 + .../test-fs-watch-recursive-symlink.js | 111 + 39 files changed, 3781 insertions(+), 1327 deletions(-) create mode 100644 src/Watcher.zig create mode 100644 src/watcher/INotifyWatcher.zig create mode 100644 src/watcher/KEventWatcher.zig create mode 100644 src/watcher/WindowsWatcher.zig create mode 100644 test/js/node/test/parallel/test-fs-error-messages.js create mode 100644 test/js/node/test/parallel/test-fs-existssync-false.js create mode 100644 test/js/node/test/parallel/test-fs-fmap.js create mode 100644 test/js/node/test/parallel/test-fs-promises-file-handle-read.js create mode 100644 test/js/node/test/parallel/test-fs-promises-write-optional-params.js create mode 100644 test/js/node/test/parallel/test-fs-readdir-ucs2.js create mode 100644 test/js/node/test/parallel/test-fs-readfile-flags.js create mode 100644 test/js/node/test/parallel/test-fs-readfilesync-enoent.js create mode 100644 test/js/node/test/parallel/test-fs-realpath-on-substed-drive.js create mode 100644 test/js/node/test/parallel/test-fs-symlink-dir-junction.js create mode 100644 test/js/node/test/parallel/test-fs-symlink-dir.js create mode 100644 test/js/node/test/parallel/test-fs-symlink-longpath.js create mode 100644 test/js/node/test/parallel/test-fs-watch-recursive-add-file-to-new-folder.js create mode 100644 test/js/node/test/parallel/test-fs-watch-recursive-linux-parallel-remove.js create mode 100644 test/js/node/test/parallel/test-fs-watch-recursive-symlink.js diff --git a/src/Watcher.zig b/src/Watcher.zig new file mode 100644 index 0000000000..7639bc11be --- /dev/null +++ b/src/Watcher.zig @@ -0,0 +1,670 @@ +//! Bun's cross-platform filesystem watcher. Runs on its own thread. +const Watcher = @This(); +pub const max_count = 128; + +pub const Event = WatchEvent; +pub const Item = WatchItem; +pub const ItemList = WatchList; +pub const WatchList = std.MultiArrayList(WatchItem); +pub const HashType = u32; +const no_watch_item: WatchItemIndex = std.math.maxInt(WatchItemIndex); + +// Consumer-facing +watch_events: [128]WatchEvent, +changed_filepaths: [128]?[:0]u8, + +/// The platform-specific implementation of the watcher +platform: Platform, + +watchlist: WatchList, +watched_count: usize, +mutex: Mutex, + +fs: *bun.fs.FileSystem, +allocator: std.mem.Allocator, +watchloop_handle: ?std.Thread.Id = null, +cwd: string, +thread: std.Thread = undefined, +running: bool = true, +close_descriptors: bool = false, + +evict_list: [max_eviction_count]WatchItemIndex = undefined, +evict_list_i: WatchItemIndex = 0, + +ctx: *anyopaque, +onFileUpdate: *const fn (this: *anyopaque, events: []WatchEvent, changed_files: []?[:0]u8, watchlist: WatchList) void, +onError: *const fn (this: *anyopaque, err: bun.sys.Error) void, + +thread_lock: bun.DebugThreadLock = bun.DebugThreadLock.unlocked, + +/// Initializes a watcher. Each watcher is tied to some context type, which +/// recieves watch callbacks on the watcher thread. This function does not +/// actually start the watcher thread. +/// +/// const watcher = try Watcher.init(T, instance_of_t, fs, bun.default_allocator) +/// errdefer watcher.deinit(false); +/// try watcher.start(); +/// +/// To integrate a started watcher into module resolution: +/// +/// transpiler.resolver.watcher = watcher.getResolveWatcher(); +/// +/// To integrate a started watcher into bundle_v2: +/// +/// bundle_v2.bun_watcher = watcher; +pub fn init(comptime T: type, ctx: *T, fs: *bun.fs.FileSystem, allocator: std.mem.Allocator) !*Watcher { + const wrapped = struct { + fn onFileUpdateWrapped(ctx_opaque: *anyopaque, events: []WatchEvent, changed_files: []?[:0]u8, watchlist: WatchList) void { + T.onFileUpdate(@alignCast(@ptrCast(ctx_opaque)), events, changed_files, watchlist); + } + fn onErrorWrapped(ctx_opaque: *anyopaque, err: bun.sys.Error) void { + if (@hasDecl(T, "onWatchError")) { + T.onWatchError(@alignCast(@ptrCast(ctx_opaque)), err); + } else { + T.onError(@alignCast(@ptrCast(ctx_opaque)), err); + } + } + }; + + const watcher = try allocator.create(Watcher); + errdefer allocator.destroy(watcher); + watcher.* = Watcher{ + .fs = fs, + .allocator = allocator, + .watched_count = 0, + .watchlist = WatchList{}, + .mutex = .{}, + .cwd = fs.top_level_dir, + .ctx = ctx, + .onFileUpdate = &wrapped.onFileUpdateWrapped, + .onError = &wrapped.onErrorWrapped, + .platform = .{}, + .watch_events = undefined, + .changed_filepaths = [_]?[:0]u8{null} ** 128, + }; + + try Platform.init(&watcher.platform, fs.top_level_dir); + + return watcher; +} + +pub fn start(this: *Watcher) !void { + bun.assert(this.watchloop_handle == null); + this.thread = try std.Thread.spawn(.{}, threadMain, .{this}); +} + +pub fn deinit(this: *Watcher, close_descriptors: bool) void { + if (this.watchloop_handle != null) { + this.mutex.lock(); + defer this.mutex.unlock(); + this.close_descriptors = close_descriptors; + this.running = false; + } else { + if (close_descriptors and this.running) { + const fds = this.watchlist.items(.fd); + for (fds) |fd| { + _ = bun.sys.close(fd); + } + } + this.watchlist.deinit(this.allocator); + const allocator = this.allocator; + allocator.destroy(this); + } +} + +pub fn getHash(filepath: string) HashType { + return @as(HashType, @truncate(bun.hash(filepath))); +} + +pub const WatchItemIndex = u16; +pub const max_eviction_count = 8096; + +const log = bun.Output.scoped(.watcher, false); + +const WindowsWatcher = @import("./watcher/WindowsWatcher.zig"); +// TODO: some platform-specific behavior is implemented in +// this file instead of the platform-specific file. +// ideally, the constants above can be inlined +const Platform = switch (Environment.os) { + .linux => @import("./watcher/INotifyWatcher.zig"), + .mac => @import("./watcher/KEventWatcher.zig"), + .windows => WindowsWatcher, + else => @compileError("Unsupported platform"), +}; + +pub const WatchEvent = struct { + index: WatchItemIndex, + op: Op, + name_off: u8 = 0, + name_len: u8 = 0, + + pub fn names(this: WatchEvent, buf: []?[:0]u8) []?[:0]u8 { + if (this.name_len == 0) return &[_]?[:0]u8{}; + return buf[this.name_off..][0..this.name_len]; + } + + pub const Sorter = void; + + pub fn sortByIndex(_: Sorter, event: WatchEvent, rhs: WatchEvent) bool { + return event.index < rhs.index; + } + + pub fn merge(this: *WatchEvent, other: WatchEvent) void { + this.name_len += other.name_len; + this.op = Op{ + .delete = this.op.delete or other.op.delete, + .metadata = this.op.metadata or other.op.metadata, + .rename = this.op.rename or other.op.rename, + .write = this.op.write or other.op.write, + }; + } + + pub const Op = packed struct { + delete: bool = false, + metadata: bool = false, + rename: bool = false, + write: bool = false, + move_to: bool = false, + + pub fn merge(before: Op, after: Op) Op { + return .{ + .delete = before.delete or after.delete, + .write = before.write or after.write, + .metadata = before.metadata or after.metadata, + .rename = before.rename or after.rename, + .move_to = before.move_to or after.move_to, + }; + } + + pub fn format(op: Op, comptime _: []const u8, _: std.fmt.FormatOptions, w: anytype) !void { + try w.writeAll("{"); + var first = true; + inline for (comptime std.meta.fieldNames(Op)) |name| { + if (@field(op, name)) { + if (!first) { + try w.writeAll(","); + } + first = false; + try w.writeAll(name); + } + } + try w.writeAll("}"); + } + }; +}; + +pub const WatchItem = struct { + file_path: string, + // filepath hash for quick comparison + hash: u32, + loader: options.Loader, + fd: bun.FileDescriptor, + count: u32, + parent_hash: u32, + kind: Kind, + package_json: ?*PackageJSON, + eventlist_index: if (Environment.isLinux) Platform.EventListIndex else u0 = 0, + + pub const Kind = enum { file, directory }; +}; + +fn threadMain(this: *Watcher) !void { + this.watchloop_handle = std.Thread.getCurrentId(); + this.thread_lock.lock(); + Output.Source.configureNamedThread("File Watcher"); + + defer Output.flush(); + if (FeatureFlags.verbose_watcher) Output.prettyln("Watcher started", .{}); + + switch (this.watchLoop()) { + .err => |err| { + this.watchloop_handle = null; + this.platform.stop(); + if (this.running) { + this.onError(this.ctx, err); + } + }, + .result => {}, + } + + // deinit and close descriptors if needed + if (this.close_descriptors) { + const fds = this.watchlist.items(.fd); + for (fds) |fd| { + _ = bun.sys.close(fd); + } + } + this.watchlist.deinit(this.allocator); + + const allocator = this.allocator; + allocator.destroy(this); +} + +pub fn flushEvictions(this: *Watcher) void { + if (this.evict_list_i == 0) return; + defer this.evict_list_i = 0; + + // swapRemove messes up the order + // But, it only messes up the order if any elements in the list appear after the item being removed + // So if we just sort the list by the biggest index first, that should be fine + std.sort.pdq( + WatchItemIndex, + this.evict_list[0..this.evict_list_i], + {}, + comptime std.sort.desc(WatchItemIndex), + ); + + var slice = this.watchlist.slice(); + const fds = slice.items(.fd); + var last_item = no_watch_item; + + for (this.evict_list[0..this.evict_list_i]) |item| { + // catch duplicates, since the list is sorted, duplicates will appear right after each other + if (item == last_item) continue; + + if (!Environment.isWindows) { + // on mac and linux we can just close the file descriptor + // TODO do we need to call inotify_rm_watch on linux? + _ = bun.sys.close(fds[item]); + } + last_item = item; + } + + last_item = no_watch_item; + // This is split into two passes because reading the slice while modified is potentially unsafe. + for (this.evict_list[0..this.evict_list_i]) |item| { + if (item == last_item) continue; + this.watchlist.swapRemove(item); + last_item = item; + } +} + +fn watchLoop(this: *Watcher) bun.JSC.Maybe(void) { + while (this.running) { + // individual platform implementation will call onFileUpdate + switch (Platform.watchLoopCycle(this)) { + .err => |err| return .{ .err = err }, + .result => |iter| iter, + } + } + return .{ .result = {} }; +} + +fn appendFileAssumeCapacity( + this: *Watcher, + fd: bun.FileDescriptor, + file_path: string, + hash: HashType, + loader: options.Loader, + parent_hash: HashType, + package_json: ?*PackageJSON, + comptime copy_file_path: bool, +) bun.JSC.Maybe(void) { + if (comptime Environment.isWindows) { + // on windows we can only watch items that are in the directory tree of the top level dir + const rel = bun.path.isParentOrEqual(this.fs.top_level_dir, file_path); + if (rel == .unrelated) { + Output.warn("File {s} is not in the project directory and will not be watched\n", .{file_path}); + return .{ .result = {} }; + } + } + + const watchlist_id = this.watchlist.len; + + const file_path_: string = if (comptime copy_file_path) + bun.asByteSlice(this.allocator.dupeZ(u8, file_path) catch bun.outOfMemory()) + else + file_path; + + var item = WatchItem{ + .file_path = file_path_, + .fd = fd, + .hash = hash, + .count = 0, + .loader = loader, + .parent_hash = parent_hash, + .package_json = package_json, + .kind = .file, + }; + + if (comptime Environment.isMac) { + const KEvent = std.c.Kevent; + + // https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man2/kqueue.2.html + var event = std.mem.zeroes(KEvent); + + event.flags = std.c.EV_ADD | std.c.EV_CLEAR | std.c.EV_ENABLE; + // we want to know about the vnode + event.filter = std.c.EVFILT_VNODE; + + event.fflags = std.c.NOTE_WRITE | std.c.NOTE_RENAME | std.c.NOTE_DELETE; + + // id + event.ident = @intCast(fd.int()); + + // Store the hash for fast filtering later + event.udata = @as(usize, @intCast(watchlist_id)); + var events: [1]KEvent = .{event}; + + // This took a lot of work to figure out the right permutation + // Basically: + // - We register the event here. + // our while(true) loop above receives notification of changes to any of the events created here. + _ = std.posix.system.kevent( + this.platform.fd.cast(), + @as([]KEvent, events[0..1]).ptr, + 1, + @as([]KEvent, events[0..1]).ptr, + 0, + null, + ); + } else if (comptime Environment.isLinux) { + // var file_path_to_use_ = std.mem.trimRight(u8, file_path_, "/"); + // var buf: [bun.MAX_PATH_BYTES+1]u8 = undefined; + // bun.copy(u8, &buf, file_path_to_use_); + // buf[file_path_to_use_.len] = 0; + var buf = file_path_.ptr; + const slice: [:0]const u8 = buf[0..file_path_.len :0]; + item.eventlist_index = switch (this.platform.watchPath(slice)) { + .err => |err| return .{ .err = err }, + .result => |r| r, + }; + } + + this.watchlist.appendAssumeCapacity(item); + return .{ .result = {} }; +} + +fn appendDirectoryAssumeCapacity( + this: *Watcher, + stored_fd: bun.FileDescriptor, + file_path: string, + hash: HashType, + comptime copy_file_path: bool, +) bun.JSC.Maybe(WatchItemIndex) { + if (comptime Environment.isWindows) { + // on windows we can only watch items that are in the directory tree of the top level dir + const rel = bun.path.isParentOrEqual(this.fs.top_level_dir, file_path); + if (rel == .unrelated) { + Output.warn("Directory {s} is not in the project directory and will not be watched\n", .{file_path}); + return .{ .result = no_watch_item }; + } + } + + const fd = brk: { + if (stored_fd != .zero) break :brk stored_fd; + break :brk switch (bun.sys.openA(file_path, 0, 0)) { + .err => |err| return .{ .err = err }, + .result => |fd| fd, + }; + }; + + const parent_hash = getHash(bun.fs.PathName.init(file_path).dirWithTrailingSlash()); + + const file_path_: string = if (comptime copy_file_path) + bun.asByteSlice(this.allocator.dupeZ(u8, file_path) catch bun.outOfMemory()) + else + file_path; + + const watchlist_id = this.watchlist.len; + + var item = WatchItem{ + .file_path = file_path_, + .fd = fd, + .hash = hash, + .count = 0, + .loader = options.Loader.file, + .parent_hash = parent_hash, + .kind = .directory, + .package_json = null, + }; + + if (Environment.isMac) { + const KEvent = std.c.Kevent; + + // https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man2/kqueue.2.html + var event = std.mem.zeroes(KEvent); + + event.flags = std.c.EV_ADD | std.c.EV_CLEAR | std.c.EV_ENABLE; + // we want to know about the vnode + event.filter = std.c.EVFILT_VNODE; + + // monitor: + // - Write + // - Rename + // - Delete + event.fflags = std.c.NOTE_WRITE | std.c.NOTE_RENAME | std.c.NOTE_DELETE; + + // id + event.ident = @intCast(fd.int()); + + // Store the hash for fast filtering later + event.udata = @as(usize, @intCast(watchlist_id)); + var events: [1]KEvent = .{event}; + + // This took a lot of work to figure out the right permutation + // Basically: + // - We register the event here. + // our while(true) loop above receives notification of changes to any of the events created here. + _ = std.posix.system.kevent( + this.platform.fd.cast(), + @as([]KEvent, events[0..1]).ptr, + 1, + @as([]KEvent, events[0..1]).ptr, + 0, + null, + ); + } else if (Environment.isLinux) { + const file_path_to_use_ = std.mem.trimRight(u8, file_path_, "/"); + var buf: bun.PathBuffer = undefined; + bun.copy(u8, &buf, file_path_to_use_); + buf[file_path_to_use_.len] = 0; + const slice: [:0]u8 = buf[0..file_path_to_use_.len :0]; + item.eventlist_index = switch (this.platform.watchDir(slice)) { + .err => |err| return .{ .err = err }, + .result => |r| r, + }; + } + + this.watchlist.appendAssumeCapacity(item); + return .{ + .result = @as(WatchItemIndex, @truncate(this.watchlist.len - 1)), + }; +} + +// Below is platform-independent + +pub fn appendFileMaybeLock( + this: *Watcher, + fd: bun.FileDescriptor, + file_path: string, + hash: HashType, + loader: options.Loader, + dir_fd: bun.FileDescriptor, + package_json: ?*PackageJSON, + comptime copy_file_path: bool, + comptime lock: bool, +) bun.JSC.Maybe(void) { + if (comptime lock) this.mutex.lock(); + defer if (comptime lock) this.mutex.unlock(); + bun.assert(file_path.len > 1); + const pathname = bun.fs.PathName.init(file_path); + + const parent_dir = pathname.dirWithTrailingSlash(); + const parent_dir_hash: HashType = getHash(parent_dir); + + var parent_watch_item: ?WatchItemIndex = null; + const autowatch_parent_dir = (comptime FeatureFlags.watch_directories) and this.isEligibleDirectory(parent_dir); + if (autowatch_parent_dir) { + var watchlist_slice = this.watchlist.slice(); + + if (dir_fd != .zero) { + const fds = watchlist_slice.items(.fd); + if (std.mem.indexOfScalar(bun.FileDescriptor, fds, dir_fd)) |i| { + parent_watch_item = @as(WatchItemIndex, @truncate(i)); + } + } + + if (parent_watch_item == null) { + const hashes = watchlist_slice.items(.hash); + if (std.mem.indexOfScalar(HashType, hashes, parent_dir_hash)) |i| { + parent_watch_item = @as(WatchItemIndex, @truncate(i)); + } + } + } + this.watchlist.ensureUnusedCapacity(this.allocator, 1 + @as(usize, @intCast(@intFromBool(parent_watch_item == null)))) catch bun.outOfMemory(); + + if (autowatch_parent_dir) { + parent_watch_item = parent_watch_item orelse switch (this.appendDirectoryAssumeCapacity(dir_fd, parent_dir, parent_dir_hash, copy_file_path)) { + .err => |err| return .{ .err = err }, + .result => |r| r, + }; + } + + switch (this.appendFileAssumeCapacity( + fd, + file_path, + hash, + loader, + parent_dir_hash, + package_json, + copy_file_path, + )) { + .err => |err| return .{ .err = err }, + .result => {}, + } + + if (comptime FeatureFlags.verbose_watcher) { + if (strings.indexOf(file_path, this.cwd)) |i| { + Output.prettyln("Added ./{s} to watch list.", .{file_path[i + this.cwd.len ..]}); + } else { + Output.prettyln("Added {s} to watch list.", .{file_path}); + } + } + + return .{ .result = {} }; +} + +inline fn isEligibleDirectory(this: *Watcher, dir: string) bool { + return strings.contains(dir, this.fs.top_level_dir) and !strings.contains(dir, "node_modules"); +} + +pub fn appendFile( + this: *Watcher, + fd: bun.FileDescriptor, + file_path: string, + hash: HashType, + loader: options.Loader, + dir_fd: bun.FileDescriptor, + package_json: ?*PackageJSON, + comptime copy_file_path: bool, +) bun.JSC.Maybe(void) { + return appendFileMaybeLock(this, fd, file_path, hash, loader, dir_fd, package_json, copy_file_path, true); +} + +pub fn addDirectory( + this: *Watcher, + fd: bun.FileDescriptor, + file_path: string, + hash: HashType, + comptime copy_file_path: bool, +) bun.JSC.Maybe(WatchItemIndex) { + this.mutex.lock(); + defer this.mutex.unlock(); + + if (this.indexOf(hash)) |idx| { + return .{ .result = @truncate(idx) }; + } + + this.watchlist.ensureUnusedCapacity(this.allocator, 1) catch bun.outOfMemory(); + + return this.appendDirectoryAssumeCapacity(fd, file_path, hash, copy_file_path); +} + +pub fn addFile( + this: *Watcher, + fd: bun.FileDescriptor, + file_path: string, + hash: HashType, + loader: options.Loader, + dir_fd: bun.FileDescriptor, + package_json: ?*PackageJSON, + comptime copy_file_path: bool, +) bun.JSC.Maybe(void) { + // This must lock due to concurrent transpiler + this.mutex.lock(); + defer this.mutex.unlock(); + + if (this.indexOf(hash)) |index| { + if (comptime FeatureFlags.atomic_file_watcher) { + // On Linux, the file descriptor might be out of date. + if (fd.int() > 0) { + var fds = this.watchlist.items(.fd); + fds[index] = fd; + } + } + return .{ .result = {} }; + } + + return this.appendFileMaybeLock(fd, file_path, hash, loader, dir_fd, package_json, copy_file_path, false); +} + +pub fn indexOf(this: *Watcher, hash: HashType) ?u32 { + for (this.watchlist.items(.hash), 0..) |other, i| { + if (hash == other) { + return @as(u32, @truncate(i)); + } + } + return null; +} + +pub fn remove(this: *Watcher, hash: HashType) void { + this.mutex.lock(); + defer this.mutex.unlock(); + if (this.indexOf(hash)) |index| { + this.removeAtIndex(@truncate(index), hash, &[_]HashType{}, .file); + } +} + +pub fn removeAtIndex(this: *Watcher, index: WatchItemIndex, hash: HashType, parents: []HashType, comptime kind: WatchItem.Kind) void { + bun.assert(index != no_watch_item); + + this.evict_list[this.evict_list_i] = index; + this.evict_list_i += 1; + + if (comptime kind == .directory) { + for (parents) |parent| { + if (parent == hash) { + this.evict_list[this.evict_list_i] = @as(WatchItemIndex, @truncate(parent)); + this.evict_list_i += 1; + } + } + } +} + +pub fn getResolveWatcher(watcher: *Watcher) bun.resolver.AnyResolveWatcher { + return bun.resolver.ResolveWatcher(*@This(), onMaybeWatchDirectory).init(watcher); +} + +pub fn onMaybeWatchDirectory(watch: *Watcher, file_path: string, dir_fd: bun.StoredFileDescriptorType) void { + // We don't want to watch: + // - Directories outside the root directory + // - Directories inside node_modules + if (std.mem.indexOf(u8, file_path, "node_modules") == null and std.mem.indexOf(u8, file_path, watch.fs.top_level_dir) != null) { + _ = watch.addDirectory(dir_fd, file_path, getHash(file_path), false); + } +} + +const std = @import("std"); +const bun = @import("root").bun; +const string = bun.string; +const Output = bun.Output; +const Global = bun.Global; +const Environment = bun.Environment; +const strings = bun.strings; +const stringZ = bun.stringZ; +const FeatureFlags = bun.FeatureFlags; +const options = @import("./options.zig"); +const Mutex = bun.Mutex; +const Futex = @import("./futex.zig"); +const PackageJSON = @import("./resolver/package_json.zig").PackageJSON; diff --git a/src/bake/DevServer.zig b/src/bake/DevServer.zig index 096220b9c7..29350e1fdb 100644 --- a/src/bake/DevServer.zig +++ b/src/bake/DevServer.zig @@ -83,7 +83,7 @@ server_fetch_function_callback: JSC.Strong, server_register_update_callback: JSC.Strong, // Watching -bun_watcher: *JSC.Watcher, +bun_watcher: *bun.Watcher, directory_watchers: DirectoryWatchStore, watcher_atomics: WatcherAtomics, @@ -3153,7 +3153,7 @@ const DirectoryWatchStore = struct { const specifier_cloned = try dev.allocator.dupe(u8, specifier); errdefer dev.allocator.free(specifier_cloned); - const watch_index = switch (dev.bun_watcher.addDirectory(fd, dir_name, bun.JSC.GenericWatcher.getHash(dir_name), false)) { + const watch_index = switch (dev.bun_watcher.addDirectory(fd, dir_name, bun.Watcher.getHash(dir_name), false)) { .err => return error.Ignore, .result => |id| id, }; @@ -4478,7 +4478,7 @@ const Response = App.Response; const MimeType = bun.http.MimeType; const JSC = bun.JSC; -const Watcher = bun.JSC.Watcher; +const Watcher = bun.Watcher; const JSValue = JSC.JSValue; const VirtualMachine = JSC.VirtualMachine; const JSModuleLoader = JSC.JSModuleLoader; diff --git a/src/bun.js/api/bun/dns_resolver.zig b/src/bun.js/api/bun/dns_resolver.zig index 0119d828ff..63e74b34e8 100644 --- a/src/bun.js/api/bun/dns_resolver.zig +++ b/src/bun.js/api/bun/dns_resolver.zig @@ -1395,7 +1395,7 @@ pub const InternalDNS = struct { // https://github.com/nodejs/node/issues/33816 // https://github.com/aio-libs/aiohttp/issues/5357 // https://github.com/libuv/libuv/issues/2225 - .flags = if (Environment.isPosix) bun.C.netdb.AI_ADDRCONFIG else 0, + .flags = if (Environment.isPosix) bun.C.translated.AI_ADDRCONFIG else 0, .next = null, .protocol = 0, .socktype = std.c.SOCK.STREAM, diff --git a/src/bun.js/javascript.zig b/src/bun.js/javascript.zig index 342e33c358..5890ec9698 100644 --- a/src/bun.js/javascript.zig +++ b/src/bun.js/javascript.zig @@ -84,7 +84,7 @@ const ThreadSafeFunction = JSC.napi.ThreadSafeFunction; const PackageManager = @import("../install/install.zig").PackageManager; const IPC = @import("ipc.zig"); const DNSResolver = @import("api/bun/dns_resolver.zig").DNSResolver; -pub const GenericWatcher = @import("../watcher.zig"); +const Watcher = bun.Watcher; const ModuleLoader = JSC.ModuleLoader; const FetchFlags = JSC.FetchFlags; @@ -629,14 +629,14 @@ pub const ImportWatcher = union(enum) { } } - pub inline fn watchlist(this: ImportWatcher) GenericWatcher.WatchList { + pub inline fn watchlist(this: ImportWatcher) Watcher.WatchList { return switch (this) { inline .hot, .watch => |w| w.watchlist, else => .{}, }; } - pub inline fn indexOf(this: ImportWatcher, hash: GenericWatcher.HashType) ?u32 { + pub inline fn indexOf(this: ImportWatcher, hash: Watcher.HashType) ?u32 { return switch (this) { inline .hot, .watch => |w| w.indexOf(hash), else => null, @@ -647,7 +647,7 @@ pub const ImportWatcher = union(enum) { this: ImportWatcher, fd: StoredFileDescriptorType, file_path: string, - hash: GenericWatcher.HashType, + hash: Watcher.HashType, loader: options.Loader, dir_fd: StoredFileDescriptorType, package_json: ?*PackageJSON, @@ -3095,7 +3095,7 @@ pub const VirtualMachine = struct { pub fn reloadEntryPoint(this: *VirtualMachine, entry_path: []const u8) !*JSInternalPromise { this.has_loaded = false; this.main = entry_path; - this.main_hash = GenericWatcher.getHash(entry_path); + this.main_hash = Watcher.getHash(entry_path); try this.ensureDebugger(true); @@ -3131,7 +3131,7 @@ pub const VirtualMachine = struct { pub fn reloadEntryPointForTestRunner(this: *VirtualMachine, entry_path: []const u8) !*JSInternalPromise { this.has_loaded = false; this.main = entry_path; - this.main_hash = GenericWatcher.getHash(entry_path); + this.main_hash = Watcher.getHash(entry_path); this.eventLoop().ensureWaker(); @@ -4496,7 +4496,6 @@ pub const VirtualMachine = struct { } }; -pub const Watcher = GenericWatcher.NewWatcher; pub const HotReloader = NewHotReloader(VirtualMachine, JSC.EventLoop, false); pub const WatchReloader = NewHotReloader(VirtualMachine, JSC.EventLoop, true); extern fn BunDebugger__willHotReload() void; @@ -4724,9 +4723,9 @@ pub fn NewHotReloader(comptime Ctx: type, comptime EventLoopType: type, comptime pub noinline fn onFileUpdate( this: *@This(), - events: []GenericWatcher.WatchEvent, + events: []Watcher.WatchEvent, changed_files: []?[:0]u8, - watchlist: GenericWatcher.WatchList, + watchlist: Watcher.WatchList, ) void { const slice = watchlist.slice(); const file_paths = slice.items(.file_path); @@ -4834,7 +4833,7 @@ pub fn NewHotReloader(comptime Ctx: type, comptime EventLoopType: type, comptime _ = this.ctx.bustDirCache(strings.withoutTrailingSlashWindowsPath(file_path)); if (entries_option) |dir_ent| { - var last_file_hash: GenericWatcher.HashType = std.math.maxInt(GenericWatcher.HashType); + var last_file_hash: Watcher.HashType = std.math.maxInt(Watcher.HashType); for (affected) |changed_name_| { const changed_name: []const u8 = if (comptime Environment.isMac) @@ -4847,14 +4846,14 @@ pub fn NewHotReloader(comptime Ctx: type, comptime EventLoopType: type, comptime var prev_entry_id: usize = std.math.maxInt(usize); if (loader != .file) { var path_string: bun.PathString = undefined; - var file_hash: GenericWatcher.HashType = last_file_hash; + var file_hash: Watcher.HashType = last_file_hash; const abs_path: string = brk: { if (dir_ent.entries.get(@as([]const u8, @ptrCast(changed_name)))) |file_ent| { // reset the file descriptor file_ent.entry.cache.fd = .zero; file_ent.entry.need_stat = true; path_string = file_ent.entry.abs_path; - file_hash = GenericWatcher.getHash(path_string.slice()); + file_hash = Watcher.getHash(path_string.slice()); for (hashes, 0..) |hash, entry_id| { if (hash == file_hash) { if (file_descriptors[entry_id] != .zero) { @@ -4882,7 +4881,7 @@ pub fn NewHotReloader(comptime Ctx: type, comptime EventLoopType: type, comptime @memcpy(_on_file_update_path_buf[file_path_without_trailing_slash.len..][0..changed_name.len], changed_name); const path_slice = _on_file_update_path_buf[0 .. file_path_without_trailing_slash.len + changed_name.len + 1]; - file_hash = GenericWatcher.getHash(path_slice); + file_hash = Watcher.getHash(path_slice); break :brk path_slice; } }; diff --git a/src/bun.js/module_loader.zig b/src/bun.js/module_loader.zig index ff63039f88..1960b3bbd1 100644 --- a/src/bun.js/module_loader.zig +++ b/src/bun.js/module_loader.zig @@ -422,7 +422,7 @@ pub const RuntimeTranspilerStore = struct { var fd: ?StoredFileDescriptorType = null; var package_json: ?*PackageJSON = null; - const hash = JSC.GenericWatcher.getHash(path.text); + const hash = bun.Watcher.getHash(path.text); switch (vm.bun_watcher) { .hot, .watch => { @@ -1521,7 +1521,7 @@ pub const ModuleLoader = struct { .js, .jsx, .ts, .tsx, .json, .toml, .text => { jsc_vm.transpiled_count += 1; jsc_vm.transpiler.resetStore(); - const hash = JSC.GenericWatcher.getHash(path.text); + const hash = bun.Watcher.getHash(path.text); const is_main = jsc_vm.main.len == path.text.len and jsc_vm.main_hash == hash and strings.eqlLong(jsc_vm.main, path.text, false); @@ -2140,7 +2140,7 @@ pub const ModuleLoader = struct { break :brk .zero; } }; - const hash = JSC.GenericWatcher.getHash(path.text); + const hash = bun.Watcher.getHash(path.text); switch (jsc_vm.bun_watcher.addFile( input_fd, path.text, diff --git a/src/bun.js/node/node_fs.zig b/src/bun.js/node/node_fs.zig index 470da8706d..15c90ad802 100644 --- a/src/bun.js/node/node_fs.zig +++ b/src/bun.js/node/node_fs.zig @@ -3586,20 +3586,10 @@ pub const NodeFS = struct { } pub fn exists(this: *NodeFS, args: Arguments.Exists, _: Flavor) Maybe(Return.Exists) { + // NOTE: exists cannot return an error const path = args.path orelse return .{ .result = false }; - const slice = path.sliceZ(&this.sync_error_buf); - - // Use libuv access on windows - if (Environment.isWindows) { - return .{ .result = Syscall.access(slice, std.posix.F_OK) != .err }; - } - - // access() may not work correctly on NFS file systems with UID - // mapping enabled, because UID mapping is done on the server and - // hidden from the client, which checks permissions. Similar - // problems can occur to FUSE mounts. - const rc = (system.access(slice, std.posix.F_OK)); - return .{ .result = rc == 0 }; + const slice = path.osPathKernel32(&this.sync_error_buf); + return .{ .result = bun.sys.existsOSPath(slice, false) }; } pub fn chown(this: *NodeFS, args: Arguments.Chown, _: Flavor) Maybe(Return.Chown) { @@ -3751,16 +3741,14 @@ pub const NodeFS = struct { return mkdirRecursiveImpl(this, args, void, {}); } - // TODO: verify this works correctly with unicode codepoints pub fn mkdirRecursiveImpl(this: *NodeFS, args: Arguments.Mkdir, comptime Ctx: type, ctx: Ctx) Maybe(Return.Mkdir) { const buf = bun.OSPathBufferPool.get(); defer bun.OSPathBufferPool.put(buf); const path: bun.OSPathSliceZ = if (Environment.isWindows) - strings.toNTPath(buf, args.path.slice()) + strings.toKernel32Path(buf, args.path.slice()) else args.path.osPath(buf); - // TODO: remove and make it always a comptime argument return switch (args.always_return_none) { inline else => |always_return_none| this.mkdirRecursiveOSPathImpl(Ctx, ctx, path, args.mode, !always_return_none), }; diff --git a/src/bun.js/node/path_watcher.zig b/src/bun.js/node/path_watcher.zig index 4dd3aae178..9c5c43e9ed 100644 --- a/src/bun.js/node/path_watcher.zig +++ b/src/bun.js/node/path_watcher.zig @@ -13,7 +13,6 @@ const StoredFileDescriptorType = bun.StoredFileDescriptorType; const string = bun.string; const JSC = bun.JSC; const VirtualMachine = JSC.VirtualMachine; -const GenericWatcher = @import("../../watcher.zig"); const sync = @import("../../sync.zig"); const Semaphore = sync.Semaphore; @@ -25,7 +24,7 @@ const FSWatcher = bun.JSC.Node.FSWatcher; const Event = FSWatcher.Event; const StringOrBytesToDecode = FSWatcher.FSWatchTaskWindows.StringOrBytesToDecode; -const Watcher = GenericWatcher.NewWatcher; +const Watcher = bun.Watcher; pub const PathWatcherManager = struct { const options = @import("../../options.zig"); @@ -48,7 +47,7 @@ pub const PathWatcherManager = struct { path: [:0]const u8, dirname: string, refs: u32 = 0, - hash: GenericWatcher.HashType, + hash: Watcher.HashType, }; fn refPendingTask(this: *PathWatcherManager) bool { @@ -108,7 +107,7 @@ pub const PathWatcherManager = struct { .path = cloned_path, // if is really a file we need to get the dirname .dirname = std.fs.path.dirname(cloned_path) orelse cloned_path, - .hash = GenericWatcher.getHash(cloned_path), + .hash = Watcher.getHash(cloned_path), .refs = 1, }; _ = this.file_paths.put(cloned_path, result) catch bun.outOfMemory(); @@ -123,7 +122,7 @@ pub const PathWatcherManager = struct { .is_file = false, .path = cloned_path, .dirname = cloned_path, - .hash = GenericWatcher.getHash(cloned_path), + .hash = Watcher.getHash(cloned_path), .refs = 1, }; _ = this.file_paths.put(cloned_path, result) catch bun.outOfMemory(); @@ -166,9 +165,9 @@ pub const PathWatcherManager = struct { pub fn onFileUpdate( this: *PathWatcherManager, - events: []GenericWatcher.WatchEvent, + events: []Watcher.WatchEvent, changed_files: []?[:0]u8, - watchlist: GenericWatcher.WatchList, + watchlist: Watcher.WatchList, ) void { var slice = watchlist.slice(); const file_paths = slice.items(.file_path); @@ -211,7 +210,7 @@ pub const PathWatcherManager = struct { if (event.op.write or event.op.delete or event.op.rename) { const event_type: PathWatcher.EventType = if (event.op.delete or event.op.rename or event.op.move_to) .rename else .change; - const hash = GenericWatcher.getHash(file_path); + const hash = Watcher.getHash(file_path); for (watchers) |w| { if (w) |watcher| { @@ -274,7 +273,7 @@ pub const PathWatcherManager = struct { const len = file_path_without_trailing_slash.len + changed_name.len; const path_slice = _on_file_update_path_buf[0 .. len + 1]; - const hash = GenericWatcher.getHash(path_slice); + const hash = Watcher.getHash(path_slice); // skip consecutive duplicates const event_type: PathWatcher.EventType = .rename; // renaming folders, creating folder or files will be always be rename @@ -745,7 +744,7 @@ pub const PathWatcher = struct { has_pending_directories: std.atomic.Value(bool) = std.atomic.Value(bool).init(false), closed: std.atomic.Value(bool) = std.atomic.Value(bool).init(false), pub const ChangeEvent = struct { - hash: GenericWatcher.HashType = 0, + hash: Watcher.HashType = 0, event_type: EventType = .change, time_stamp: i64 = 0, }; @@ -868,7 +867,7 @@ pub const PathWatcher = struct { } } - pub fn emit(this: *PathWatcher, event: Event, hash: GenericWatcher.HashType, time_stamp: i64, is_file: bool) void { + pub fn emit(this: *PathWatcher, event: Event, hash: Watcher.HashType, time_stamp: i64, is_file: bool) void { switch (event) { .change, .rename => { const event_type = switch (event) { diff --git a/src/bun.js/node/types.zig b/src/bun.js/node/types.zig index c3d84429e3..29ca6aa67f 100644 --- a/src/bun.js/node/types.zig +++ b/src/bun.js/node/types.zig @@ -926,6 +926,14 @@ pub const PathLike = union(enum) { return sliceZWithForceCopy(this, buf, false); } + pub inline fn osPathKernel32(this: PathLike, buf: *bun.PathBuffer) bun.OSPathSliceZ { + if (comptime Environment.isWindows) { + return strings.toWPath(@alignCast(std.mem.bytesAsSlice(u14, buf)), this.slice()); + } + + return sliceZWithForceCopy(this, buf, false); + } + pub fn toJS(this: *const PathLike, globalObject: *JSC.JSGlobalObject) JSC.JSValue { return switch (this.*) { .string => this.string.toJS(globalObject, null), @@ -1632,10 +1640,10 @@ pub fn StatType(comptime big: bool) type { const tv_sec = if (Environment.isWindows) @as(u32, @bitCast(ts.tv_sec)) else ts.tv_sec; const tv_nsec = if (Environment.isWindows) @as(u32, @bitCast(ts.tv_nsec)) else ts.tv_nsec; if (big) { - const sec: i64 = @intCast(tv_sec); - const nsec: i64 = @intCast(tv_nsec); - return @as(i64, @intCast(sec * std.time.ms_per_s)) + - @as(i64, @intCast(@divTrunc(nsec, std.time.ns_per_ms))); + const sec: i64 = tv_sec; + const nsec: i64 = tv_nsec; + return @as(i64, sec * std.time.ms_per_s) + + @as(i64, @divTrunc(nsec, std.time.ns_per_ms)); } else { return (@as(f64, @floatFromInt(tv_sec)) * std.time.ms_per_s) + (@as(f64, @floatFromInt(tv_nsec)) / std.time.ns_per_ms); diff --git a/src/bun.js/node/win_watcher.zig b/src/bun.js/node/win_watcher.zig index 93a96faf4a..8b7f8b7123 100644 --- a/src/bun.js/node/win_watcher.zig +++ b/src/bun.js/node/win_watcher.zig @@ -10,7 +10,7 @@ const JSC = bun.JSC; const VirtualMachine = JSC.VirtualMachine; const StoredFileDescriptorType = bun.StoredFileDescriptorType; const Output = bun.Output; -const Watcher = @import("../../watcher.zig"); +const Watcher = bun.Watcher; const FSWatcher = bun.JSC.Node.FSWatcher; const EventType = @import("./path_watcher.zig").PathWatcher.EventType; diff --git a/src/bun.zig b/src/bun.zig index 806cf42ae7..cae21f8163 100644 --- a/src/bun.zig +++ b/src/bun.zig @@ -1600,6 +1600,8 @@ pub const Semver = @import("./install/semver.zig"); pub const ImportRecord = @import("./import_record.zig").ImportRecord; pub const ImportKind = @import("./import_record.zig").ImportKind; +pub const Watcher = @import("./Watcher.zig"); + pub usingnamespace @import("./util.zig"); pub const fast_debug_build_cmd = .None; pub const fast_debug_build_mode = fast_debug_build_cmd != .None and diff --git a/src/bundler/bundle_v2.zig b/src/bundler/bundle_v2.zig index 81f3bdc2a1..c75971e00b 100644 --- a/src/bundler/bundle_v2.zig +++ b/src/bundler/bundle_v2.zig @@ -383,7 +383,7 @@ pub const BundleV2 = struct { framework: ?bake.Framework, graph: Graph, linker: LinkerContext, - bun_watcher: ?*bun.JSC.Watcher, + bun_watcher: ?*bun.Watcher, plugins: ?*JSC.API.JSBundler.Plugin, completion: ?*JSBundleCompletionTask, source_code_length: usize, diff --git a/src/c-headers-for-zig.h b/src/c-headers-for-zig.h index 42c03aca64..4209c0b950 100644 --- a/src/c-headers-for-zig.h +++ b/src/c-headers-for-zig.h @@ -18,4 +18,6 @@ #include "pwd.h" // geteuid #include +// AI_ADDRCONFIG +#include #endif diff --git a/src/compile_target.zig b/src/compile_target.zig index 82f0731dd3..5e4484d2b0 100644 --- a/src/compile_target.zig +++ b/src/compile_target.zig @@ -73,23 +73,23 @@ pub fn toNPMRegistryURLWithURL(this: *const CompileTarget, buf: []u8, registry_u return switch (this.os) { inline else => |os| switch (this.arch) { inline else => |arch| switch (this.libc) { - inline else => |libc| switch (this.baseline) { - // https://registry.npmjs.org/@oven/bun-linux-x64/-/bun-linux-x64-0.1.6.tgz - inline else => |is_baseline| try std.fmt.bufPrint(buf, comptime "{s}/@oven/bun-" ++ - os.npmName() ++ "-" ++ arch.npmName() ++ - libc.npmName() ++ - (if (is_baseline) "-baseline" else "") ++ - "/-/bun-" ++ - os.npmName() ++ "-" ++ arch.npmName() ++ - libc.npmName() ++ - (if (is_baseline) "-baseline" else "") ++ - "-" ++ - "{d}.{d}.{d}.tgz", .{ - registry_url, - this.version.major, - this.version.minor, - this.version.patch, - }), + inline else => |libc| switch (this.baseline) { + // https://registry.npmjs.org/@oven/bun-linux-x64/-/bun-linux-x64-0.1.6.tgz + inline else => |is_baseline| try std.fmt.bufPrint(buf, comptime "{s}/@oven/bun-" ++ + os.npmName() ++ "-" ++ arch.npmName() ++ + libc.npmName() ++ + (if (is_baseline) "-baseline" else "") ++ + "/-/bun-" ++ + os.npmName() ++ "-" ++ arch.npmName() ++ + libc.npmName() ++ + (if (is_baseline) "-baseline" else "") ++ + "-" ++ + "{d}.{d}.{d}.tgz", .{ + registry_url, + this.version.major, + this.version.minor, + this.version.patch, + }), }, }, }, diff --git a/src/darwin_c.zig b/src/darwin_c.zig index 3019c177e7..00c6b5abc5 100644 --- a/src/darwin_c.zig +++ b/src/darwin_c.zig @@ -708,6 +708,8 @@ pub extern fn getifaddrs(*?*ifaddrs) c_int; pub extern fn freeifaddrs(?*ifaddrs) void; const net_if_h = @cImport({ + // TODO: remove this c import! instead of adding to it, add to + // c-headers-for-zig.h and use bun.C.translated. @cInclude("net/if.h"); }); pub const IFF_RUNNING = net_if_h.IFF_RUNNING; @@ -730,6 +732,8 @@ pub const sockaddr_dl = extern struct { }; pub usingnamespace @cImport({ + // TODO: remove this c import! instead of adding to it, add to + // c-headers-for-zig.h and use bun.C.translated. @cInclude("sys/spawn.h"); @cInclude("sys/fcntl.h"); @cInclude("sys/socket.h"); @@ -782,10 +786,6 @@ pub const CLOCK_UPTIME_RAW_APPROX = 9; pub const CLOCK_PROCESS_CPUTIME_ID = 12; pub const CLOCK_THREAD_CPUTIME_ID = 1; -pub const netdb = @cImport({ - @cInclude("netdb.h"); -}); - pub extern fn memset_pattern4(buf: [*]u8, pattern: [*]const u8, len: usize) void; pub extern fn memset_pattern8(buf: [*]u8, pattern: [*]const u8, len: usize) void; pub extern fn memset_pattern16(buf: [*]u8, pattern: [*]const u8, len: usize) void; diff --git a/src/dns.zig b/src/dns.zig index 95d9d74635..c1d97920f2 100644 --- a/src/dns.zig +++ b/src/dns.zig @@ -3,11 +3,9 @@ const std = @import("std"); const JSC = bun.JSC; const JSValue = JSC.JSValue; -const netdb = if (bun.Environment.isWindows) .{ - .AI_V4MAPPED = @as(c_int, 2048), - .AI_ADDRCONFIG = @as(c_int, 1024), - .AI_ALL = @as(c_int, 256), -} else @cImport(@cInclude("netdb.h")); +pub const AI_V4MAPPED: c_int = if (bun.Environment.isWindows) 2048 else bun.C.translated.AI_V4MAPPED; +pub const AI_ADDRCONFIG: c_int = if (bun.Environment.isWindows) 1024 else bun.C.translated.AI_ADDRCONFIG; +pub const AI_ALL: c_int = if (bun.Environment.isWindows) 256 else bun.C.translated.AI_ALL; pub const GetAddrInfo = struct { name: []const u8 = "", @@ -102,7 +100,7 @@ pub const GetAddrInfo = struct { options.flags = flags.coerce(i32, globalObject); - if (options.flags & ~(netdb.AI_ALL | netdb.AI_ADDRCONFIG | netdb.AI_V4MAPPED) != 0) + if (options.flags & ~(AI_ALL | AI_ADDRCONFIG | AI_V4MAPPED) != 0) return error.InvalidFlags; } diff --git a/src/linux_c.zig b/src/linux_c.zig index 0709ca04f4..7236b0bba6 100644 --- a/src/linux_c.zig +++ b/src/linux_c.zig @@ -494,6 +494,8 @@ pub fn posix_spawn_file_actions_addchdir_np(actions: *posix_spawn_file_actions_t pub extern fn vmsplice(fd: c_int, iovec: [*]const std.posix.iovec, iovec_count: usize, flags: u32) isize; const net_c = @cImport({ + // TODO: remove this c import! instead of adding to it, add to + // c-headers-for-zig.h and use bun.C.translated. @cInclude("ifaddrs.h"); // getifaddrs, freeifaddrs @cInclude("net/if.h"); // IFF_RUNNING, IFF_UP @cInclude("fcntl.h"); // F_DUPFD_CLOEXEC @@ -549,6 +551,8 @@ pub fn getErrno(rc: anytype) E { pub const getuid = std.os.linux.getuid; pub const getgid = std.os.linux.getgid; pub const linux_fs = if (bun.Environment.isLinux) @cImport({ + // TODO: remove this c import! instead of adding to it, add to + // c-headers-for-zig.h and use bun.C.translated. @cInclude("linux/fs.h"); }) else struct {}; @@ -629,10 +633,6 @@ pub const RENAME_WHITEOUT = 1 << 2; pub extern "C" fn quick_exit(code: c_int) noreturn; pub extern "C" fn memrchr(ptr: [*]const u8, val: c_int, len: usize) ?[*]const u8; -pub const netdb = @cImport({ - @cInclude("netdb.h"); -}); - export fn sys_epoll_pwait2(epfd: i32, events: ?[*]std.os.linux.epoll_event, maxevents: i32, timeout: ?*const std.os.linux.timespec, sigmask: ?*const std.os.linux.sigset_t) isize { return @bitCast( std.os.linux.syscall6( diff --git a/src/string_immutable.zig b/src/string_immutable.zig index 4fed847464..8c7e4c2356 100644 --- a/src/string_immutable.zig +++ b/src/string_immutable.zig @@ -2032,6 +2032,7 @@ pub fn toWDirNormalized(wbuf: []u16, utf8: []const u8) [:0]const u16 { pub fn toWPath(wbuf: []u16, utf8: []const u8) [:0]u16 { return toWPathMaybeDir(wbuf, utf8, false); } + pub fn toPath(buf: []u8, utf8: []const u8) [:0]u8 { return toPathMaybeDir(buf, utf8, false); } @@ -2039,6 +2040,20 @@ pub fn toPath(buf: []u8, utf8: []const u8) [:0]u8 { pub fn toWDirPath(wbuf: []u16, utf8: []const u8) [:0]const u16 { return toWPathMaybeDir(wbuf, utf8, true); } + +pub fn toKernel32Path(wbuf: []u16, utf8: []const u8) [:0]const u16 { + const path = if (hasPrefixComptime(utf8, bun.windows.nt_object_prefix_u8)) + utf8[bun.windows.nt_object_prefix_u8.len..] + else + utf8; + if (hasPrefixComptime(path, bun.windows.nt_maxpath_prefix_u8)) { + return toWPath(wbuf, path); + } + wbuf[0..4].* = bun.windows.nt_maxpath_prefix; + const wpath = toWPath(wbuf[4..], path); + return wbuf[0 .. wpath.len + 4 :0]; +} + fn isUNCPath(comptime T: type, path: []const T) bool { return path.len >= 3 and bun.path.Platform.windows.isSeparatorT(T, path[0]) and diff --git a/src/sys.zig b/src/sys.zig index 73272f5eb9..5fb343dcf0 100644 --- a/src/sys.zig +++ b/src/sys.zig @@ -790,7 +790,7 @@ pub fn mkdir(file_path: [:0]const u8, flags: bun.Mode) Maybe(void) { const wbuf = bun.WPathBufferPool.get(); defer bun.WPathBufferPool.put(wbuf); return Maybe(void).errnoSysP( - kernel32.CreateDirectoryW(bun.strings.toWPath(wbuf, file_path).ptr, null), + kernel32.CreateDirectoryW(bun.strings.toKernel32Path(wbuf, file_path).ptr, null), .mkdir, file_path, ) orelse Maybe(void).success; @@ -822,7 +822,7 @@ pub fn mkdirA(file_path: []const u8, flags: bun.Mode) Maybe(void) { if (comptime Environment.isWindows) { const wbuf = bun.WPathBufferPool.get(); defer bun.WPathBufferPool.put(wbuf); - const wpath = bun.strings.toWPath(wbuf, file_path); + const wpath = bun.strings.toKernel32Path(wbuf, file_path); assertIsValidWindowsPath(u16, wpath); return Maybe(void).errnoSysP( kernel32.CreateDirectoryW(wpath.ptr, null), @@ -2768,17 +2768,23 @@ pub fn getFileAttributes(path: anytype) ?WindowsFileAttributes { } pub fn existsOSPath(path: bun.OSPathSliceZ, file_only: bool) bool { - if (comptime Environment.isPosix) { + if (Environment.isPosix) { + // access() may not work correctly on NFS file systems with UID + // mapping enabled, because UID mapping is done on the server and + // hidden from the client, which checks permissions. Similar + // problems can occur to FUSE mounts. return syscall.access(path, 0) == 0; } - if (comptime Environment.isWindows) { + if (Environment.isWindows) { const attributes = getFileAttributes(path) orelse return false; if (file_only and attributes.is_directory) { return false; } + std.debug.print("{}\n", .{attributes}); + return true; } diff --git a/src/watcher.zig b/src/watcher.zig index 1cd75586b5..7639bc11be 100644 --- a/src/watcher.zig +++ b/src/watcher.zig @@ -1,455 +1,148 @@ -const std = @import("std"); -const bun = @import("root").bun; -const string = bun.string; -const Output = bun.Output; -const Global = bun.Global; -const Environment = bun.Environment; -const strings = bun.strings; -const stringZ = bun.stringZ; -const FeatureFlags = bun.FeatureFlags; -const options = @import("./options.zig"); +//! Bun's cross-platform filesystem watcher. Runs on its own thread. +const Watcher = @This(); +pub const max_count = 128; + +pub const Event = WatchEvent; +pub const Item = WatchItem; +pub const ItemList = WatchList; +pub const WatchList = std.MultiArrayList(WatchItem); +pub const HashType = u32; +const no_watch_item: WatchItemIndex = std.math.maxInt(WatchItemIndex); + +// Consumer-facing +watch_events: [128]WatchEvent, +changed_filepaths: [128]?[:0]u8, + +/// The platform-specific implementation of the watcher +platform: Platform, + +watchlist: WatchList, +watched_count: usize, +mutex: Mutex, + +fs: *bun.fs.FileSystem, +allocator: std.mem.Allocator, +watchloop_handle: ?std.Thread.Id = null, +cwd: string, +thread: std.Thread = undefined, +running: bool = true, +close_descriptors: bool = false, + +evict_list: [max_eviction_count]WatchItemIndex = undefined, +evict_list_i: WatchItemIndex = 0, + +ctx: *anyopaque, +onFileUpdate: *const fn (this: *anyopaque, events: []WatchEvent, changed_files: []?[:0]u8, watchlist: WatchList) void, +onError: *const fn (this: *anyopaque, err: bun.sys.Error) void, + +thread_lock: bun.DebugThreadLock = bun.DebugThreadLock.unlocked, + +/// Initializes a watcher. Each watcher is tied to some context type, which +/// recieves watch callbacks on the watcher thread. This function does not +/// actually start the watcher thread. +/// +/// const watcher = try Watcher.init(T, instance_of_t, fs, bun.default_allocator) +/// errdefer watcher.deinit(false); +/// try watcher.start(); +/// +/// To integrate a started watcher into module resolution: +/// +/// transpiler.resolver.watcher = watcher.getResolveWatcher(); +/// +/// To integrate a started watcher into bundle_v2: +/// +/// bundle_v2.bun_watcher = watcher; +pub fn init(comptime T: type, ctx: *T, fs: *bun.fs.FileSystem, allocator: std.mem.Allocator) !*Watcher { + const wrapped = struct { + fn onFileUpdateWrapped(ctx_opaque: *anyopaque, events: []WatchEvent, changed_files: []?[:0]u8, watchlist: WatchList) void { + T.onFileUpdate(@alignCast(@ptrCast(ctx_opaque)), events, changed_files, watchlist); + } + fn onErrorWrapped(ctx_opaque: *anyopaque, err: bun.sys.Error) void { + if (@hasDecl(T, "onWatchError")) { + T.onWatchError(@alignCast(@ptrCast(ctx_opaque)), err); + } else { + T.onError(@alignCast(@ptrCast(ctx_opaque)), err); + } + } + }; + + const watcher = try allocator.create(Watcher); + errdefer allocator.destroy(watcher); + watcher.* = Watcher{ + .fs = fs, + .allocator = allocator, + .watched_count = 0, + .watchlist = WatchList{}, + .mutex = .{}, + .cwd = fs.top_level_dir, + .ctx = ctx, + .onFileUpdate = &wrapped.onFileUpdateWrapped, + .onError = &wrapped.onErrorWrapped, + .platform = .{}, + .watch_events = undefined, + .changed_filepaths = [_]?[:0]u8{null} ** 128, + }; + + try Platform.init(&watcher.platform, fs.top_level_dir); + + return watcher; +} + +pub fn start(this: *Watcher) !void { + bun.assert(this.watchloop_handle == null); + this.thread = try std.Thread.spawn(.{}, threadMain, .{this}); +} + +pub fn deinit(this: *Watcher, close_descriptors: bool) void { + if (this.watchloop_handle != null) { + this.mutex.lock(); + defer this.mutex.unlock(); + this.close_descriptors = close_descriptors; + this.running = false; + } else { + if (close_descriptors and this.running) { + const fds = this.watchlist.items(.fd); + for (fds) |fd| { + _ = bun.sys.close(fd); + } + } + this.watchlist.deinit(this.allocator); + const allocator = this.allocator; + allocator.destroy(this); + } +} + +pub fn getHash(filepath: string) HashType { + return @as(HashType, @truncate(bun.hash(filepath))); +} -const Mutex = bun.Mutex; -const Futex = @import("./futex.zig"); pub const WatchItemIndex = u16; -const PackageJSON = @import("./resolver/package_json.zig").PackageJSON; +pub const max_eviction_count = 8096; const log = bun.Output.scoped(.watcher, false); -const WATCHER_MAX_LIST = 8096; - -const INotify = struct { - loaded_inotify: bool = false, - inotify_fd: EventListIndex = 0, - - eventlist: EventListBuffer = undefined, - eventlist_ptrs: [128]*const INotifyEvent = undefined, - - watch_count: std.atomic.Value(u32) = std.atomic.Value(u32).init(0), - coalesce_interval: isize = 100_000, - - pub const EventListIndex = c_int; - const EventListBuffer = [@sizeOf([128]INotifyEvent) + (128 * bun.MAX_PATH_BYTES + (128 * @alignOf(INotifyEvent)))]u8; - - pub const INotifyEvent = extern struct { - watch_descriptor: c_int, - mask: u32, - cookie: u32, - name_len: u32, - - pub fn name(this: *const INotifyEvent) [:0]u8 { - if (comptime Environment.allow_assert) bun.assert(this.name_len > 0); - - // the name_len field is wrong - // it includes alignment / padding - // but it is a sentineled value - // so we can just trim it to the first null byte - return bun.sliceTo(@as([*:0]u8, @ptrFromInt(@intFromPtr(&this.name_len) + @sizeOf(u32))), 0)[0.. :0]; - } - }; - - pub fn watchPath(this: *INotify, pathname: [:0]const u8) bun.JSC.Maybe(EventListIndex) { - bun.assert(this.loaded_inotify); - const old_count = this.watch_count.fetchAdd(1, .release); - defer if (old_count == 0) Futex.wake(&this.watch_count, 10); - const watch_file_mask = std.os.linux.IN.EXCL_UNLINK | std.os.linux.IN.MOVE_SELF | std.os.linux.IN.DELETE_SELF | std.os.linux.IN.MOVED_TO | std.os.linux.IN.MODIFY; - return .{ - .result = std.posix.inotify_add_watchZ(this.inotify_fd, pathname, watch_file_mask) catch |err| return .{ - .err = .{ - .errno = @truncate(@intFromEnum(switch (err) { - error.FileNotFound => bun.C.E.NOENT, - error.AccessDenied => bun.C.E.ACCES, - error.SystemResources => bun.C.E.NOMEM, - error.Unexpected => bun.C.E.INVAL, - error.NotDir => bun.C.E.NOTDIR, - error.NameTooLong => bun.C.E.NAMETOOLONG, - error.UserResourceLimitReached => bun.C.E.MFILE, - error.WatchAlreadyExists => bun.C.E.EXIST, - })), - .syscall = .watch, - }, - }, - }; - } - - pub fn watchDir(this: *INotify, pathname: [:0]const u8) bun.JSC.Maybe(EventListIndex) { - bun.assert(this.loaded_inotify); - const old_count = this.watch_count.fetchAdd(1, .release); - defer if (old_count == 0) Futex.wake(&this.watch_count, 10); - const watch_dir_mask = std.os.linux.IN.EXCL_UNLINK | std.os.linux.IN.DELETE | std.os.linux.IN.DELETE_SELF | std.os.linux.IN.CREATE | std.os.linux.IN.MOVE_SELF | std.os.linux.IN.ONLYDIR | std.os.linux.IN.MOVED_TO; - return .{ - .result = std.posix.inotify_add_watchZ(this.inotify_fd, pathname, watch_dir_mask) catch |err| return .{ - .err = .{ - .errno = @truncate(@intFromEnum(switch (err) { - error.FileNotFound => bun.C.E.NOENT, - error.AccessDenied => bun.C.E.ACCES, - error.SystemResources => bun.C.E.NOMEM, - error.Unexpected => bun.C.E.INVAL, - error.NotDir => bun.C.E.NOTDIR, - error.NameTooLong => bun.C.E.NAMETOOLONG, - error.UserResourceLimitReached => bun.C.E.MFILE, - error.WatchAlreadyExists => bun.C.E.EXIST, - })), - .syscall = .watch, - }, - }, - }; - } - - pub fn unwatch(this: *INotify, wd: EventListIndex) void { - bun.assert(this.loaded_inotify); - _ = this.watch_count.fetchSub(1, .release); - std.os.inotify_rm_watch(this.inotify_fd, wd); - } - - pub fn init(this: *INotify, _: []const u8) !void { - bun.assert(!this.loaded_inotify); - this.loaded_inotify = true; - - if (bun.getenvZ("BUN_INOTIFY_COALESCE_INTERVAL")) |env| { - this.coalesce_interval = std.fmt.parseInt(isize, env, 10) catch 100_000; - } - - this.inotify_fd = try std.posix.inotify_init1(std.os.linux.IN.CLOEXEC); - } - - pub fn read(this: *INotify) bun.JSC.Maybe([]*const INotifyEvent) { - bun.assert(this.loaded_inotify); - - restart: while (true) { - Futex.waitForever(&this.watch_count, 0); - - const rc = std.posix.system.read( - this.inotify_fd, - @as([*]u8, @ptrCast(@alignCast(&this.eventlist))), - @sizeOf(EventListBuffer), - ); - - const errno = std.posix.errno(rc); - switch (errno) { - .SUCCESS => { - var len = @as(usize, @intCast(rc)); - - if (len == 0) return .{ .result = &[_]*INotifyEvent{} }; - - // IN_MODIFY is very noisy - // we do a 0.1ms sleep to try to coalesce events better - if (len < (@sizeOf(EventListBuffer) / 2)) { - var fds = [_]std.posix.pollfd{.{ - .fd = this.inotify_fd, - .events = std.posix.POLL.IN | std.posix.POLL.ERR, - .revents = 0, - }}; - var timespec = std.posix.timespec{ .tv_sec = 0, .tv_nsec = this.coalesce_interval }; - if ((std.posix.ppoll(&fds, ×pec, null) catch 0) > 0) { - while (true) { - const new_rc = std.posix.system.read( - this.inotify_fd, - @as([*]u8, @ptrCast(@alignCast(&this.eventlist))) + len, - @sizeOf(EventListBuffer) - len, - ); - const e = std.posix.errno(new_rc); - switch (e) { - .SUCCESS => { - len += @as(usize, @intCast(new_rc)); - }, - .AGAIN => continue, - .INTR => continue, - else => return .{ .err = .{ - .errno = @truncate(@intFromEnum(e)), - .syscall = .read, - } }, - } - break; - } - } - } - - // This is what replit does as of Jaunary 2023. - // 1) CREATE .http.ts.3491171321~ - // 2) OPEN .http.ts.3491171321~ - // 3) ATTRIB .http.ts.3491171321~ - // 4) MODIFY .http.ts.3491171321~ - // 5) CLOSE_WRITE,CLOSE .http.ts.3491171321~ - // 6) MOVED_FROM .http.ts.3491171321~ - // 7) MOVED_TO http.ts - // We still don't correctly handle MOVED_FROM && MOVED_TO it seems. - - var count: u32 = 0; - var i: u32 = 0; - while (i < len) : (i += @sizeOf(INotifyEvent)) { - @setRuntimeSafety(false); - const event = @as(*INotifyEvent, @ptrCast(@alignCast(this.eventlist[i..][0..@sizeOf(INotifyEvent)]))); - i += event.name_len; - - this.eventlist_ptrs[count] = event; - count += 1; - } - - return .{ .result = this.eventlist_ptrs[0..count] }; - }, - .AGAIN => continue :restart, - else => return .{ .err = .{ - .errno = @truncate(@intFromEnum(errno)), - .syscall = .read, - } }, - } - } - } - - pub fn stop(this: *INotify) void { - if (this.inotify_fd != 0) { - _ = bun.sys.close(bun.toFD(this.inotify_fd)); - this.inotify_fd = 0; - } - } +const WindowsWatcher = @import("./watcher/WindowsWatcher.zig"); +// TODO: some platform-specific behavior is implemented in +// this file instead of the platform-specific file. +// ideally, the constants above can be inlined +const Platform = switch (Environment.os) { + .linux => @import("./watcher/INotifyWatcher.zig"), + .mac => @import("./watcher/KEventWatcher.zig"), + .windows => WindowsWatcher, + else => @compileError("Unsupported platform"), }; -const DarwinWatcher = struct { - pub const EventListIndex = u32; - - const KEvent = std.c.Kevent; - - // Internal - changelist: [128]KEvent = undefined, - - // Everything being watched - eventlist: [WATCHER_MAX_LIST]KEvent = undefined, - eventlist_index: EventListIndex = 0, - - fd: bun.FileDescriptor = bun.invalid_fd, - - pub fn init(this: *DarwinWatcher, _: []const u8) !void { - const fd = try std.posix.kqueue(); - if (fd == 0) return error.KQueueError; - this.fd = bun.toFD(fd); - } - - pub fn stop(this: *DarwinWatcher) void { - if (this.fd.isValid()) { - _ = bun.sys.close(this.fd); - this.fd = bun.invalid_fd; - } - } -}; - -const WindowsWatcher = struct { - mutex: Mutex = .{}, - iocp: w.HANDLE = undefined, - watcher: DirWatcher = undefined, - - const w = std.os.windows; - pub const EventListIndex = c_int; - - const Error = error{ - IocpFailed, - ReadDirectoryChangesFailed, - CreateFileFailed, - InvalidPath, - }; - - const Action = enum(w.DWORD) { - Added = w.FILE_ACTION_ADDED, - Removed = w.FILE_ACTION_REMOVED, - Modified = w.FILE_ACTION_MODIFIED, - RenamedOld = w.FILE_ACTION_RENAMED_OLD_NAME, - RenamedNew = w.FILE_ACTION_RENAMED_NEW_NAME, - }; - - const FileEvent = struct { - action: Action, - filename: []u16 = undefined, - }; - - const DirWatcher = struct { - // must be initialized to zero (even though it's never read or written in our code), - // otherwise ReadDirectoryChangesW will fail with INVALID_HANDLE - overlapped: w.OVERLAPPED = std.mem.zeroes(w.OVERLAPPED), - buf: [64 * 1024]u8 align(@alignOf(w.FILE_NOTIFY_INFORMATION)) = undefined, - dirHandle: w.HANDLE, - - // invalidates any EventIterators - fn prepare(this: *DirWatcher) bun.JSC.Maybe(void) { - const filter = w.FILE_NOTIFY_CHANGE_FILE_NAME | w.FILE_NOTIFY_CHANGE_DIR_NAME | w.FILE_NOTIFY_CHANGE_LAST_WRITE | w.FILE_NOTIFY_CHANGE_CREATION; - if (w.kernel32.ReadDirectoryChangesW(this.dirHandle, &this.buf, this.buf.len, 1, filter, null, &this.overlapped, null) == 0) { - const err = w.kernel32.GetLastError(); - log("failed to start watching directory: {s}", .{@tagName(err)}); - return .{ .err = .{ - .errno = @intFromEnum(bun.C.SystemErrno.init(err) orelse bun.C.SystemErrno.EINVAL), - .syscall = .watch, - } }; - } - log("read directory changes!", .{}); - return .{ .result = {} }; - } - }; - - const EventIterator = struct { - watcher: *DirWatcher, - offset: usize = 0, - hasNext: bool = true, - - pub fn next(this: *EventIterator) ?FileEvent { - if (!this.hasNext) return null; - const info_size = @sizeOf(w.FILE_NOTIFY_INFORMATION); - const info: *w.FILE_NOTIFY_INFORMATION = @alignCast(@ptrCast(this.watcher.buf[this.offset..].ptr)); - const name_ptr: [*]u16 = @alignCast(@ptrCast(this.watcher.buf[this.offset + info_size ..])); - const filename: []u16 = name_ptr[0 .. info.FileNameLength / @sizeOf(u16)]; - - const action: Action = @enumFromInt(info.Action); - - if (info.NextEntryOffset == 0) { - this.hasNext = false; - } else { - this.offset += @as(usize, info.NextEntryOffset); - } - - return FileEvent{ - .action = action, - .filename = filename, - }; - } - }; - - pub fn init(this: *WindowsWatcher, root: []const u8) !void { - var pathbuf: bun.WPathBuffer = undefined; - const wpath = bun.strings.toNTPath(&pathbuf, root); - const path_len_bytes: u16 = @truncate(wpath.len * 2); - var nt_name = w.UNICODE_STRING{ - .Length = path_len_bytes, - .MaximumLength = path_len_bytes, - .Buffer = @constCast(wpath.ptr), - }; - var attr = w.OBJECT_ATTRIBUTES{ - .Length = @sizeOf(w.OBJECT_ATTRIBUTES), - .RootDirectory = null, - .Attributes = 0, // Note we do not use OBJ_CASE_INSENSITIVE here. - .ObjectName = &nt_name, - .SecurityDescriptor = null, - .SecurityQualityOfService = null, - }; - var handle: w.HANDLE = w.INVALID_HANDLE_VALUE; - var io: w.IO_STATUS_BLOCK = undefined; - const rc = w.ntdll.NtCreateFile( - &handle, - w.FILE_LIST_DIRECTORY, - &attr, - &io, - null, - 0, - w.FILE_SHARE_READ | w.FILE_SHARE_WRITE | w.FILE_SHARE_DELETE, - w.FILE_OPEN, - w.FILE_DIRECTORY_FILE | w.FILE_OPEN_FOR_BACKUP_INTENT, - null, - 0, - ); - - if (rc != .SUCCESS) { - const err = bun.windows.Win32Error.fromNTStatus(rc); - log("failed to open directory for watching: {s}", .{@tagName(err)}); - return Error.CreateFileFailed; - } - errdefer _ = w.kernel32.CloseHandle(handle); - - this.iocp = try w.CreateIoCompletionPort(handle, null, 0, 1); - errdefer _ = w.kernel32.CloseHandle(this.iocp); - - this.watcher = .{ .dirHandle = handle }; - } - - const Timeout = enum(w.DWORD) { - infinite = w.INFINITE, - minimal = 1, - none = 0, - }; - - // wait until new events are available - pub fn next(this: *WindowsWatcher, timeout: Timeout) bun.JSC.Maybe(?EventIterator) { - switch (this.watcher.prepare()) { - .err => |err| { - log("prepare() returned error", .{}); - return .{ .err = err }; - }, - .result => {}, - } - - var nbytes: w.DWORD = 0; - var key: w.ULONG_PTR = 0; - var overlapped: ?*w.OVERLAPPED = null; - while (true) { - const rc = w.kernel32.GetQueuedCompletionStatus(this.iocp, &nbytes, &key, &overlapped, @intFromEnum(timeout)); - if (rc == 0) { - const err = w.kernel32.GetLastError(); - if (err == .TIMEOUT or err == .WAIT_TIMEOUT) { - return .{ .result = null }; - } else { - log("GetQueuedCompletionStatus failed: {s}", .{@tagName(err)}); - return .{ .err = .{ - .errno = @intFromEnum(bun.C.SystemErrno.init(err) orelse bun.C.SystemErrno.EINVAL), - .syscall = .watch, - } }; - } - } - - if (overlapped) |ptr| { - // ignore possible spurious events - if (ptr != &this.watcher.overlapped) { - continue; - } - if (nbytes == 0) { - // shutdown notification - // TODO close handles? - log("shutdown notification in WindowsWatcher.next", .{}); - return .{ .err = .{ - .errno = @intFromEnum(bun.C.SystemErrno.ESHUTDOWN), - .syscall = .watch, - } }; - } - return .{ .result = EventIterator{ .watcher = &this.watcher } }; - } else { - log("GetQueuedCompletionStatus returned no overlapped event", .{}); - return .{ .err = .{ - .errno = @truncate(@intFromEnum(bun.C.E.INVAL)), - .syscall = .watch, - } }; - } - } - } - - pub fn stop(this: *WindowsWatcher) void { - w.CloseHandle(this.watcher.dirHandle); - w.CloseHandle(this.iocp); - } -}; - -const PlatformWatcher = if (Environment.isMac) - DarwinWatcher -else if (Environment.isLinux) - INotify -else if (Environment.isWindows) - WindowsWatcher -else - @compileError("Unsupported platform"); - pub const WatchEvent = struct { index: WatchItemIndex, op: Op, name_off: u8 = 0, name_len: u8 = 0, - pub fn ignoreINotifyEvent(event: INotify.INotifyEvent) bool { - var stack: WatchEvent = undefined; - stack.fromINotify(event, 0); - return @as(std.meta.Int(.unsigned, @bitSizeOf(Op)), @bitCast(stack.op)) == 0; - } - pub fn names(this: WatchEvent, buf: []?[:0]u8) []?[:0]u8 { if (this.name_len == 0) return &[_]?[:0]u8{}; return buf[this.name_off..][0..this.name_len]; } - const KEvent = std.c.Kevent; - pub const Sorter = void; pub fn sortByIndex(_: Sorter, event: WatchEvent, rhs: WatchEvent) bool { @@ -466,42 +159,6 @@ pub const WatchEvent = struct { }; } - pub fn fromKEvent(this: *WatchEvent, kevent: KEvent) void { - this.* = - WatchEvent{ - .op = Op{ - .delete = (kevent.fflags & std.c.NOTE_DELETE) > 0, - .metadata = (kevent.fflags & std.c.NOTE_ATTRIB) > 0, - .rename = (kevent.fflags & (std.c.NOTE_RENAME | std.c.NOTE_LINK)) > 0, - .write = (kevent.fflags & std.c.NOTE_WRITE) > 0, - }, - .index = @as(WatchItemIndex, @truncate(kevent.udata)), - }; - } - - pub fn fromINotify(this: *WatchEvent, event: INotify.INotifyEvent, index: WatchItemIndex) void { - this.* = WatchEvent{ - .op = Op{ - .delete = (event.mask & std.os.linux.IN.DELETE_SELF) > 0 or (event.mask & std.os.linux.IN.DELETE) > 0, - .rename = (event.mask & std.os.linux.IN.MOVE_SELF) > 0, - .move_to = (event.mask & std.os.linux.IN.MOVED_TO) > 0, - .write = (event.mask & std.os.linux.IN.MODIFY) > 0, - }, - .index = index, - }; - } - - pub fn fromFileNotify(this: *WatchEvent, event: WindowsWatcher.FileEvent, index: WatchItemIndex) void { - this.* = WatchEvent{ - .op = Op{ - .delete = event.action == .Removed, - .rename = event.action == .RenamedOld, - .write = event.action == .Modified, - }, - .index = index, - }; - } - pub const Op = packed struct { delete: bool = false, metadata: bool = false, @@ -546,793 +203,468 @@ pub const WatchItem = struct { parent_hash: u32, kind: Kind, package_json: ?*PackageJSON, - eventlist_index: if (Environment.isLinux) PlatformWatcher.EventListIndex else u0 = 0, + eventlist_index: if (Environment.isLinux) Platform.EventListIndex else u0 = 0, pub const Kind = enum { file, directory }; }; -pub const WatchList = std.MultiArrayList(WatchItem); -pub const HashType = u32; +fn threadMain(this: *Watcher) !void { + this.watchloop_handle = std.Thread.getCurrentId(); + this.thread_lock.lock(); + Output.Source.configureNamedThread("File Watcher"); -pub fn getHash(filepath: string) HashType { - return @as(HashType, @truncate(bun.hash(filepath))); + defer Output.flush(); + if (FeatureFlags.verbose_watcher) Output.prettyln("Watcher started", .{}); + + switch (this.watchLoop()) { + .err => |err| { + this.watchloop_handle = null; + this.platform.stop(); + if (this.running) { + this.onError(this.ctx, err); + } + }, + .result => {}, + } + + // deinit and close descriptors if needed + if (this.close_descriptors) { + const fds = this.watchlist.items(.fd); + for (fds) |fd| { + _ = bun.sys.close(fd); + } + } + this.watchlist.deinit(this.allocator); + + const allocator = this.allocator; + allocator.destroy(this); } -// TODO: Rename to `Watcher` and make a top-level struct. -// `if(true)` is to reduce git diff from when it was changed -// from a comptime function to a basic struct. -pub const NewWatcher = if (true) - struct { - const Watcher = @This(); +pub fn flushEvictions(this: *Watcher) void { + if (this.evict_list_i == 0) return; + defer this.evict_list_i = 0; - pub const Event = WatchEvent; - pub const Item = WatchItem; - pub const ItemList = WatchList; + // swapRemove messes up the order + // But, it only messes up the order if any elements in the list appear after the item being removed + // So if we just sort the list by the biggest index first, that should be fine + std.sort.pdq( + WatchItemIndex, + this.evict_list[0..this.evict_list_i], + {}, + comptime std.sort.desc(WatchItemIndex), + ); - watchlist: WatchList, - watched_count: usize = 0, - mutex: Mutex, + var slice = this.watchlist.slice(); + const fds = slice.items(.fd); + var last_item = no_watch_item; - platform: PlatformWatcher = PlatformWatcher{}, + for (this.evict_list[0..this.evict_list_i]) |item| { + // catch duplicates, since the list is sorted, duplicates will appear right after each other + if (item == last_item) continue; - // User-facing - watch_events: [128]WatchEvent = undefined, - changed_filepaths: [128]?[:0]u8 = [_]?[:0]u8{null} ** 128, - - ctx: *anyopaque, - onFileUpdate: *const fn (this: *anyopaque, events: []WatchEvent, changed_files: []?[:0]u8, watchlist: WatchList) void, - onError: *const fn (this: *anyopaque, err: bun.sys.Error) void, - - fs: *bun.fs.FileSystem, - allocator: std.mem.Allocator, - watchloop_handle: ?std.Thread.Id = null, - cwd: string, - thread: std.Thread = undefined, - running: bool = true, - close_descriptors: bool = false, - - evict_list: [WATCHER_MAX_LIST]WatchItemIndex = undefined, - evict_list_i: WatchItemIndex = 0, - - thread_lock: bun.DebugThreadLock = bun.DebugThreadLock.unlocked, - - const no_watch_item: WatchItemIndex = std.math.maxInt(WatchItemIndex); - - pub fn init(comptime T: type, ctx: *T, fs: *bun.fs.FileSystem, allocator: std.mem.Allocator) !*Watcher { - const wrapped = struct { - fn onFileUpdateWrapped(ctx_opaque: *anyopaque, events: []WatchEvent, changed_files: []?[:0]u8, watchlist: WatchList) void { - T.onFileUpdate(@alignCast(@ptrCast(ctx_opaque)), events, changed_files, watchlist); - } - fn onErrorWrapped(ctx_opaque: *anyopaque, err: bun.sys.Error) void { - if (@hasDecl(T, "onWatchError")) { - T.onWatchError(@alignCast(@ptrCast(ctx_opaque)), err); - } else { - T.onError(@alignCast(@ptrCast(ctx_opaque)), err); - } - } - }; - - const watcher = try allocator.create(Watcher); - errdefer allocator.destroy(watcher); - - watcher.* = Watcher{ - .fs = fs, - .allocator = allocator, - .watched_count = 0, - .watchlist = WatchList{}, - .mutex = .{}, - .cwd = fs.top_level_dir, - - .ctx = ctx, - .onFileUpdate = &wrapped.onFileUpdateWrapped, - .onError = &wrapped.onErrorWrapped, - }; - - try PlatformWatcher.init(&watcher.platform, fs.top_level_dir); - - return watcher; + if (!Environment.isWindows) { + // on mac and linux we can just close the file descriptor + // TODO do we need to call inotify_rm_watch on linux? + _ = bun.sys.close(fds[item]); } + last_item = item; + } - pub fn start(this: *Watcher) !void { - bun.assert(this.watchloop_handle == null); - this.thread = try std.Thread.spawn(.{}, Watcher.watchLoop, .{this}); + last_item = no_watch_item; + // This is split into two passes because reading the slice while modified is potentially unsafe. + for (this.evict_list[0..this.evict_list_i]) |item| { + if (item == last_item) continue; + this.watchlist.swapRemove(item); + last_item = item; + } +} + +fn watchLoop(this: *Watcher) bun.JSC.Maybe(void) { + while (this.running) { + // individual platform implementation will call onFileUpdate + switch (Platform.watchLoopCycle(this)) { + .err => |err| return .{ .err = err }, + .result => |iter| iter, } + } + return .{ .result = {} }; +} - pub fn deinit(this: *Watcher, close_descriptors: bool) void { - if (this.watchloop_handle != null) { - this.mutex.lock(); - defer this.mutex.unlock(); - this.close_descriptors = close_descriptors; - this.running = false; - } else { - if (close_descriptors and this.running) { - const fds = this.watchlist.items(.fd); - for (fds) |fd| { - _ = bun.sys.close(fd); - } - } - this.watchlist.deinit(this.allocator); - const allocator = this.allocator; - allocator.destroy(this); - } - } - - // This must only be called from the watcher thread - pub fn watchLoop(this: *Watcher) !void { - this.watchloop_handle = std.Thread.getCurrentId(); - this.thread_lock.lock(); - Output.Source.configureNamedThread("File Watcher"); - - defer Output.flush(); - if (FeatureFlags.verbose_watcher) Output.prettyln("Watcher started", .{}); - - switch (this._watchLoop()) { - .err => |err| { - this.watchloop_handle = null; - this.platform.stop(); - if (this.running) { - this.onError(this.ctx, err); - } - }, - .result => {}, - } - - // deinit and close descriptors if needed - if (this.close_descriptors) { - const fds = this.watchlist.items(.fd); - for (fds) |fd| { - _ = bun.sys.close(fd); - } - } - this.watchlist.deinit(this.allocator); - - const allocator = this.allocator; - allocator.destroy(this); - } - - pub fn flushEvictions(this: *Watcher) void { - if (this.evict_list_i == 0) return; - defer this.evict_list_i = 0; - - // swapRemove messes up the order - // But, it only messes up the order if any elements in the list appear after the item being removed - // So if we just sort the list by the biggest index first, that should be fine - std.sort.pdq( - WatchItemIndex, - this.evict_list[0..this.evict_list_i], - {}, - comptime std.sort.desc(WatchItemIndex), - ); - - var slice = this.watchlist.slice(); - const fds = slice.items(.fd); - var last_item = no_watch_item; - - for (this.evict_list[0..this.evict_list_i]) |item| { - // catch duplicates, since the list is sorted, duplicates will appear right after each other - if (item == last_item) continue; - - if (!Environment.isWindows) { - // on mac and linux we can just close the file descriptor - // TODO do we need to call inotify_rm_watch on linux? - _ = bun.sys.close(fds[item]); - } - last_item = item; - } - - last_item = no_watch_item; - // This is split into two passes because reading the slice while modified is potentially unsafe. - for (this.evict_list[0..this.evict_list_i]) |item| { - if (item == last_item) continue; - this.watchlist.swapRemove(item); - last_item = item; - } - } - - fn _watchLoop(this: *Watcher) bun.JSC.Maybe(void) { - if (Environment.isMac) { - bun.assert(this.platform.fd.isValid()); - const KEvent = std.c.Kevent; - - var changelist_array: [128]KEvent = std.mem.zeroes([128]KEvent); - var changelist = &changelist_array; - while (true) { - defer Output.flush(); - - var count_ = std.posix.system.kevent( - this.platform.fd.cast(), - @as([*]KEvent, changelist), - 0, - @as([*]KEvent, changelist), - 128, - - null, - ); - - // Give the events more time to coalesce - if (count_ < 128 / 2) { - const remain = 128 - count_; - var timespec = std.posix.timespec{ .tv_sec = 0, .tv_nsec = 100_000 }; - const extra = std.posix.system.kevent( - this.platform.fd.cast(), - @as([*]KEvent, changelist[@as(usize, @intCast(count_))..].ptr), - 0, - @as([*]KEvent, changelist[@as(usize, @intCast(count_))..].ptr), - remain, - - ×pec, - ); - - count_ += extra; - } - - var changes = changelist[0..@as(usize, @intCast(@max(0, count_)))]; - var watchevents = this.watch_events[0..changes.len]; - var out_len: usize = 0; - if (changes.len > 0) { - watchevents[0].fromKEvent(changes[0]); - out_len = 1; - var prev_event = changes[0]; - for (changes[1..]) |event| { - if (prev_event.udata == event.udata) { - var new: WatchEvent = undefined; - new.fromKEvent(event); - watchevents[out_len - 1].merge(new); - continue; - } - - watchevents[out_len].fromKEvent(event); - prev_event = event; - out_len += 1; - } - - watchevents = watchevents[0..out_len]; - } - - this.mutex.lock(); - defer this.mutex.unlock(); - if (this.running) { - this.onFileUpdate(this.ctx, watchevents, this.changed_filepaths[0..watchevents.len], this.watchlist); - } else { - break; - } - } - } else if (Environment.isLinux) { - restart: while (true) { - defer Output.flush(); - - var events = switch (this.platform.read()) { - .result => |result| result, - .err => |err| return .{ .err = err }, - }; - if (events.len == 0) continue :restart; - - // TODO: is this thread safe? - var remaining_events = events.len; - - const eventlist_index = this.watchlist.items(.eventlist_index); - - while (remaining_events > 0) { - var name_off: u8 = 0; - var temp_name_list: [128]?[:0]u8 = undefined; - var temp_name_off: u8 = 0; - - const slice = events[0..@min(128, remaining_events, this.watch_events.len)]; - var watchevents = this.watch_events[0..slice.len]; - var watch_event_id: u32 = 0; - for (slice) |event| { - watchevents[watch_event_id].fromINotify( - event.*, - @as( - WatchItemIndex, - @intCast(std.mem.indexOfScalar( - INotify.EventListIndex, - eventlist_index, - event.watch_descriptor, - ) orelse continue), - ), - ); - temp_name_list[temp_name_off] = if (event.name_len > 0) - event.name() - else - null; - watchevents[watch_event_id].name_off = temp_name_off; - watchevents[watch_event_id].name_len = @as(u8, @intFromBool((event.name_len > 0))); - temp_name_off += @as(u8, @intFromBool((event.name_len > 0))); - - watch_event_id += 1; - } - - var all_events = watchevents[0..watch_event_id]; - std.sort.pdq(WatchEvent, all_events, {}, WatchEvent.sortByIndex); - - var last_event_index: usize = 0; - var last_event_id: INotify.EventListIndex = std.math.maxInt(INotify.EventListIndex); - - for (all_events, 0..) |_, i| { - if (all_events[i].name_len > 0) { - this.changed_filepaths[name_off] = temp_name_list[all_events[i].name_off]; - all_events[i].name_off = name_off; - name_off += 1; - } - - if (all_events[i].index == last_event_id) { - all_events[last_event_index].merge(all_events[i]); - continue; - } - last_event_index = i; - last_event_id = all_events[i].index; - } - if (all_events.len == 0) continue :restart; - - this.mutex.lock(); - defer this.mutex.unlock(); - if (this.running) { - this.onFileUpdate(this.ctx, all_events[0 .. last_event_index + 1], this.changed_filepaths[0 .. name_off + 1], this.watchlist); - } else { - break; - } - remaining_events -= slice.len; - } - } - } else if (Environment.isWindows) { - log("_watchLoop", .{}); - var buf: bun.PathBuffer = undefined; - const root = this.fs.top_level_dir; - @memcpy(buf[0..root.len], root); - const needs_slash = root.len == 0 or !bun.strings.charIsAnySlash(root[root.len - 1]); - if (needs_slash) { - buf[root.len] = '\\'; - } - const baseidx = if (needs_slash) root.len + 1 else root.len; - restart: while (true) { - var event_id: usize = 0; - - // first wait has infinite timeout - we're waiting for the next event and don't want to spin - var timeout = WindowsWatcher.Timeout.infinite; - while (true) { - var iter = switch (this.platform.next(timeout)) { - .err => |err| return .{ .err = err }, - .result => |iter| iter orelse break, - }; - // after the first wait, we want to coalesce further events but don't want to wait for them - // NOTE: using a 1ms timeout would be ideal, but that actually makes the thread wait for at least 10ms more than it should - // Instead we use a 0ms timeout, which may not do as much coalescing but is more responsive. - timeout = WindowsWatcher.Timeout.none; - const item_paths = this.watchlist.items(.file_path); - log("number of watched items: {d}", .{item_paths.len}); - while (iter.next()) |event| { - const convert_res = bun.strings.copyUTF16IntoUTF8(buf[baseidx..], []const u16, event.filename, false); - const eventpath = buf[0 .. baseidx + convert_res.written]; - - log("watcher update event: (filename: {s}, action: {s}", .{ eventpath, @tagName(event.action) }); - - // TODO this probably needs a more sophisticated search algorithm in the future - // Possible approaches: - // - Keep a sorted list of the watched paths and perform a binary search. We could use a bool to keep - // track of whether the list is sorted and only sort it when we detect a change. - // - Use a prefix tree. Potentially more efficient for large numbers of watched paths, but complicated - // to implement and maintain. - // - others that i'm not thinking of - - for (item_paths, 0..) |path_, item_idx| { - var path = path_; - if (path.len > 0 and bun.strings.charIsAnySlash(path[path.len - 1])) { - path = path[0 .. path.len - 1]; - } - // log("checking path: {s}\n", .{path}); - // check if the current change applies to this item - // if so, add it to the eventlist - const rel = bun.path.isParentOrEqual(eventpath, path); - // skip unrelated items - if (rel == .unrelated) continue; - // if the event is for a parent dir of the item, only emit it if it's a delete or rename - if (rel == .parent and (event.action != .Removed or event.action != .RenamedOld)) continue; - this.watch_events[event_id].fromFileNotify(event, @truncate(item_idx)); - event_id += 1; - } - } - } - if (event_id == 0) { - continue :restart; - } - - // log("event_id: {d}\n", .{event_id}); - - var all_events = this.watch_events[0..event_id]; - std.sort.pdq(WatchEvent, all_events, {}, WatchEvent.sortByIndex); - - var last_event_index: usize = 0; - var last_event_id: INotify.EventListIndex = std.math.maxInt(INotify.EventListIndex); - - for (all_events, 0..) |_, i| { - // if (all_events[i].name_len > 0) { - // this.changed_filepaths[name_off] = temp_name_list[all_events[i].name_off]; - // all_events[i].name_off = name_off; - // name_off += 1; - // } - - if (all_events[i].index == last_event_id) { - all_events[last_event_index].merge(all_events[i]); - continue; - } - last_event_index = i; - last_event_id = all_events[i].index; - } - if (all_events.len == 0) continue :restart; - all_events = all_events[0 .. last_event_index + 1]; - - log("calling onFileUpdate (all_events.len = {d})", .{all_events.len}); - - this.onFileUpdate(this.ctx, all_events, this.changed_filepaths[0 .. last_event_index + 1], this.watchlist); - } - } - +fn appendFileAssumeCapacity( + this: *Watcher, + fd: bun.FileDescriptor, + file_path: string, + hash: HashType, + loader: options.Loader, + parent_hash: HashType, + package_json: ?*PackageJSON, + comptime copy_file_path: bool, +) bun.JSC.Maybe(void) { + if (comptime Environment.isWindows) { + // on windows we can only watch items that are in the directory tree of the top level dir + const rel = bun.path.isParentOrEqual(this.fs.top_level_dir, file_path); + if (rel == .unrelated) { + Output.warn("File {s} is not in the project directory and will not be watched\n", .{file_path}); return .{ .result = {} }; } + } - fn appendFileAssumeCapacity( - this: *Watcher, - fd: bun.FileDescriptor, - file_path: string, - hash: HashType, - loader: options.Loader, - parent_hash: HashType, - package_json: ?*PackageJSON, - comptime copy_file_path: bool, - ) bun.JSC.Maybe(void) { - if (comptime Environment.isWindows) { - // on windows we can only watch items that are in the directory tree of the top level dir - const rel = bun.path.isParentOrEqual(this.fs.top_level_dir, file_path); - if (rel == .unrelated) { - Output.warn("File {s} is not in the project directory and will not be watched\n", .{file_path}); - return .{ .result = {} }; - } - } + const watchlist_id = this.watchlist.len; - const watchlist_id = this.watchlist.len; + const file_path_: string = if (comptime copy_file_path) + bun.asByteSlice(this.allocator.dupeZ(u8, file_path) catch bun.outOfMemory()) + else + file_path; - const file_path_: string = if (comptime copy_file_path) - bun.asByteSlice(this.allocator.dupeZ(u8, file_path) catch bun.outOfMemory()) - else - file_path; - - var item = WatchItem{ - .file_path = file_path_, - .fd = fd, - .hash = hash, - .count = 0, - .loader = loader, - .parent_hash = parent_hash, - .package_json = package_json, - .kind = .file, - }; - - if (comptime Environment.isMac) { - const KEvent = std.c.Kevent; - - // https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man2/kqueue.2.html - var event = std.mem.zeroes(KEvent); - - event.flags = std.c.EV_ADD | std.c.EV_CLEAR | std.c.EV_ENABLE; - // we want to know about the vnode - event.filter = std.c.EVFILT_VNODE; - - event.fflags = std.c.NOTE_WRITE | std.c.NOTE_RENAME | std.c.NOTE_DELETE; - - // id - event.ident = @intCast(fd.int()); - - // Store the hash for fast filtering later - event.udata = @as(usize, @intCast(watchlist_id)); - var events: [1]KEvent = .{event}; - - // This took a lot of work to figure out the right permutation - // Basically: - // - We register the event here. - // our while(true) loop above receives notification of changes to any of the events created here. - _ = std.posix.system.kevent( - this.platform.fd.cast(), - @as([]KEvent, events[0..1]).ptr, - 1, - @as([]KEvent, events[0..1]).ptr, - 0, - null, - ); - } else if (comptime Environment.isLinux) { - // var file_path_to_use_ = std.mem.trimRight(u8, file_path_, "/"); - // var buf: [bun.MAX_PATH_BYTES+1]u8 = undefined; - // bun.copy(u8, &buf, file_path_to_use_); - // buf[file_path_to_use_.len] = 0; - var buf = file_path_.ptr; - const slice: [:0]const u8 = buf[0..file_path_.len :0]; - item.eventlist_index = switch (this.platform.watchPath(slice)) { - .err => |err| return .{ .err = err }, - .result => |r| r, - }; - } - - this.watchlist.appendAssumeCapacity(item); - return .{ .result = {} }; - } - - fn appendDirectoryAssumeCapacity( - this: *Watcher, - stored_fd: bun.FileDescriptor, - file_path: string, - hash: HashType, - comptime copy_file_path: bool, - ) bun.JSC.Maybe(WatchItemIndex) { - if (comptime Environment.isWindows) { - // on windows we can only watch items that are in the directory tree of the top level dir - const rel = bun.path.isParentOrEqual(this.fs.top_level_dir, file_path); - if (rel == .unrelated) { - Output.warn("Directory {s} is not in the project directory and will not be watched\n", .{file_path}); - return .{ .result = no_watch_item }; - } - } - - const fd = brk: { - if (stored_fd != .zero) break :brk stored_fd; - break :brk switch (bun.sys.openA(file_path, 0, 0)) { - .err => |err| return .{ .err = err }, - .result => |fd| fd, - }; - }; - - const parent_hash = getHash(bun.fs.PathName.init(file_path).dirWithTrailingSlash()); - - const file_path_: string = if (comptime copy_file_path) - bun.asByteSlice(this.allocator.dupeZ(u8, file_path) catch bun.outOfMemory()) - else - file_path; - - const watchlist_id = this.watchlist.len; - - var item = WatchItem{ - .file_path = file_path_, - .fd = fd, - .hash = hash, - .count = 0, - .loader = options.Loader.file, - .parent_hash = parent_hash, - .kind = .directory, - .package_json = null, - }; - - if (Environment.isMac) { - const KEvent = std.c.Kevent; - - // https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man2/kqueue.2.html - var event = std.mem.zeroes(KEvent); - - event.flags = std.c.EV_ADD | std.c.EV_CLEAR | std.c.EV_ENABLE; - // we want to know about the vnode - event.filter = std.c.EVFILT_VNODE; - - // monitor: - // - Write - // - Rename - // - Delete - event.fflags = std.c.NOTE_WRITE | std.c.NOTE_RENAME | std.c.NOTE_DELETE; - - // id - event.ident = @intCast(fd.int()); - - // Store the hash for fast filtering later - event.udata = @as(usize, @intCast(watchlist_id)); - var events: [1]KEvent = .{event}; - - // This took a lot of work to figure out the right permutation - // Basically: - // - We register the event here. - // our while(true) loop above receives notification of changes to any of the events created here. - _ = std.posix.system.kevent( - this.platform.fd.cast(), - @as([]KEvent, events[0..1]).ptr, - 1, - @as([]KEvent, events[0..1]).ptr, - 0, - null, - ); - } else if (Environment.isLinux) { - const file_path_to_use_ = std.mem.trimRight(u8, file_path_, "/"); - var buf: bun.PathBuffer = undefined; - bun.copy(u8, &buf, file_path_to_use_); - buf[file_path_to_use_.len] = 0; - const slice: [:0]u8 = buf[0..file_path_to_use_.len :0]; - item.eventlist_index = switch (this.platform.watchDir(slice)) { - .err => |err| return .{ .err = err }, - .result => |r| r, - }; - } - - this.watchlist.appendAssumeCapacity(item); - return .{ - .result = @as(WatchItemIndex, @truncate(this.watchlist.len - 1)), - }; - } - - // Below is platform-independent - - pub fn appendFileMaybeLock( - this: *Watcher, - fd: bun.FileDescriptor, - file_path: string, - hash: HashType, - loader: options.Loader, - dir_fd: bun.FileDescriptor, - package_json: ?*PackageJSON, - comptime copy_file_path: bool, - comptime lock: bool, - ) bun.JSC.Maybe(void) { - if (comptime lock) this.mutex.lock(); - defer if (comptime lock) this.mutex.unlock(); - bun.assert(file_path.len > 1); - const pathname = bun.fs.PathName.init(file_path); - - const parent_dir = pathname.dirWithTrailingSlash(); - const parent_dir_hash: HashType = getHash(parent_dir); - - var parent_watch_item: ?WatchItemIndex = null; - const autowatch_parent_dir = (comptime FeatureFlags.watch_directories) and this.isEligibleDirectory(parent_dir); - if (autowatch_parent_dir) { - var watchlist_slice = this.watchlist.slice(); - - if (dir_fd != .zero) { - const fds = watchlist_slice.items(.fd); - if (std.mem.indexOfScalar(bun.FileDescriptor, fds, dir_fd)) |i| { - parent_watch_item = @as(WatchItemIndex, @truncate(i)); - } - } - - if (parent_watch_item == null) { - const hashes = watchlist_slice.items(.hash); - if (std.mem.indexOfScalar(HashType, hashes, parent_dir_hash)) |i| { - parent_watch_item = @as(WatchItemIndex, @truncate(i)); - } - } - } - this.watchlist.ensureUnusedCapacity(this.allocator, 1 + @as(usize, @intCast(@intFromBool(parent_watch_item == null)))) catch bun.outOfMemory(); - - if (autowatch_parent_dir) { - parent_watch_item = parent_watch_item orelse switch (this.appendDirectoryAssumeCapacity(dir_fd, parent_dir, parent_dir_hash, copy_file_path)) { - .err => |err| return .{ .err = err }, - .result => |r| r, - }; - } - - switch (this.appendFileAssumeCapacity( - fd, - file_path, - hash, - loader, - parent_dir_hash, - package_json, - copy_file_path, - )) { - .err => |err| return .{ .err = err }, - .result => {}, - } - - if (comptime FeatureFlags.verbose_watcher) { - if (strings.indexOf(file_path, this.cwd)) |i| { - Output.prettyln("Added ./{s} to watch list.", .{file_path[i + this.cwd.len ..]}); - } else { - Output.prettyln("Added {s} to watch list.", .{file_path}); - } - } - - return .{ .result = {} }; - } - - inline fn isEligibleDirectory(this: *Watcher, dir: string) bool { - return strings.contains(dir, this.fs.top_level_dir) and !strings.contains(dir, "node_modules"); - } - - pub fn appendFile( - this: *Watcher, - fd: bun.FileDescriptor, - file_path: string, - hash: HashType, - loader: options.Loader, - dir_fd: bun.FileDescriptor, - package_json: ?*PackageJSON, - comptime copy_file_path: bool, - ) bun.JSC.Maybe(void) { - return appendFileMaybeLock(this, fd, file_path, hash, loader, dir_fd, package_json, copy_file_path, true); - } - - pub fn addDirectory( - this: *Watcher, - fd: bun.FileDescriptor, - file_path: string, - hash: HashType, - comptime copy_file_path: bool, - ) bun.JSC.Maybe(WatchItemIndex) { - this.mutex.lock(); - defer this.mutex.unlock(); - - if (this.indexOf(hash)) |idx| { - return .{ .result = @truncate(idx) }; - } - - this.watchlist.ensureUnusedCapacity(this.allocator, 1) catch bun.outOfMemory(); - - return this.appendDirectoryAssumeCapacity(fd, file_path, hash, copy_file_path); - } - - pub fn addFile( - this: *Watcher, - fd: bun.FileDescriptor, - file_path: string, - hash: HashType, - loader: options.Loader, - dir_fd: bun.FileDescriptor, - package_json: ?*PackageJSON, - comptime copy_file_path: bool, - ) bun.JSC.Maybe(void) { - // This must lock due to concurrent transpiler - this.mutex.lock(); - defer this.mutex.unlock(); - - if (this.indexOf(hash)) |index| { - if (comptime FeatureFlags.atomic_file_watcher) { - // On Linux, the file descriptor might be out of date. - if (fd.int() > 0) { - var fds = this.watchlist.items(.fd); - fds[index] = fd; - } - } - return .{ .result = {} }; - } - - return this.appendFileMaybeLock(fd, file_path, hash, loader, dir_fd, package_json, copy_file_path, false); - } - - pub fn indexOf(this: *Watcher, hash: HashType) ?u32 { - for (this.watchlist.items(.hash), 0..) |other, i| { - if (hash == other) { - return @as(u32, @truncate(i)); - } - } - return null; - } - - pub fn remove(this: *Watcher, hash: HashType) void { - this.mutex.lock(); - defer this.mutex.unlock(); - if (this.indexOf(hash)) |index| { - this.removeAtIndex(@truncate(index), hash, &[_]HashType{}, .file); - } - } - - pub fn removeAtIndex(this: *Watcher, index: WatchItemIndex, hash: HashType, parents: []HashType, comptime kind: WatchItem.Kind) void { - bun.assert(index != no_watch_item); - - this.evict_list[this.evict_list_i] = index; - this.evict_list_i += 1; - - if (comptime kind == .directory) { - for (parents) |parent| { - if (parent == hash) { - this.evict_list[this.evict_list_i] = @as(WatchItemIndex, @truncate(parent)); - this.evict_list_i += 1; - } - } - } - } - - pub fn getResolveWatcher(watcher: *Watcher) bun.resolver.AnyResolveWatcher { - return bun.resolver.ResolveWatcher(*@This(), onMaybeWatchDirectory).init(watcher); - } - - pub fn onMaybeWatchDirectory(watch: *Watcher, file_path: string, dir_fd: bun.StoredFileDescriptorType) void { - // We don't want to watch: - // - Directories outside the root directory - // - Directories inside node_modules - if (std.mem.indexOf(u8, file_path, "node_modules") == null and std.mem.indexOf(u8, file_path, watch.fs.top_level_dir) != null) { - _ = watch.addDirectory(dir_fd, file_path, getHash(file_path), false); - } - } + var item = WatchItem{ + .file_path = file_path_, + .fd = fd, + .hash = hash, + .count = 0, + .loader = loader, + .parent_hash = parent_hash, + .package_json = package_json, + .kind = .file, }; + + if (comptime Environment.isMac) { + const KEvent = std.c.Kevent; + + // https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man2/kqueue.2.html + var event = std.mem.zeroes(KEvent); + + event.flags = std.c.EV_ADD | std.c.EV_CLEAR | std.c.EV_ENABLE; + // we want to know about the vnode + event.filter = std.c.EVFILT_VNODE; + + event.fflags = std.c.NOTE_WRITE | std.c.NOTE_RENAME | std.c.NOTE_DELETE; + + // id + event.ident = @intCast(fd.int()); + + // Store the hash for fast filtering later + event.udata = @as(usize, @intCast(watchlist_id)); + var events: [1]KEvent = .{event}; + + // This took a lot of work to figure out the right permutation + // Basically: + // - We register the event here. + // our while(true) loop above receives notification of changes to any of the events created here. + _ = std.posix.system.kevent( + this.platform.fd.cast(), + @as([]KEvent, events[0..1]).ptr, + 1, + @as([]KEvent, events[0..1]).ptr, + 0, + null, + ); + } else if (comptime Environment.isLinux) { + // var file_path_to_use_ = std.mem.trimRight(u8, file_path_, "/"); + // var buf: [bun.MAX_PATH_BYTES+1]u8 = undefined; + // bun.copy(u8, &buf, file_path_to_use_); + // buf[file_path_to_use_.len] = 0; + var buf = file_path_.ptr; + const slice: [:0]const u8 = buf[0..file_path_.len :0]; + item.eventlist_index = switch (this.platform.watchPath(slice)) { + .err => |err| return .{ .err = err }, + .result => |r| r, + }; + } + + this.watchlist.appendAssumeCapacity(item); + return .{ .result = {} }; +} + +fn appendDirectoryAssumeCapacity( + this: *Watcher, + stored_fd: bun.FileDescriptor, + file_path: string, + hash: HashType, + comptime copy_file_path: bool, +) bun.JSC.Maybe(WatchItemIndex) { + if (comptime Environment.isWindows) { + // on windows we can only watch items that are in the directory tree of the top level dir + const rel = bun.path.isParentOrEqual(this.fs.top_level_dir, file_path); + if (rel == .unrelated) { + Output.warn("Directory {s} is not in the project directory and will not be watched\n", .{file_path}); + return .{ .result = no_watch_item }; + } + } + + const fd = brk: { + if (stored_fd != .zero) break :brk stored_fd; + break :brk switch (bun.sys.openA(file_path, 0, 0)) { + .err => |err| return .{ .err = err }, + .result => |fd| fd, + }; + }; + + const parent_hash = getHash(bun.fs.PathName.init(file_path).dirWithTrailingSlash()); + + const file_path_: string = if (comptime copy_file_path) + bun.asByteSlice(this.allocator.dupeZ(u8, file_path) catch bun.outOfMemory()) + else + file_path; + + const watchlist_id = this.watchlist.len; + + var item = WatchItem{ + .file_path = file_path_, + .fd = fd, + .hash = hash, + .count = 0, + .loader = options.Loader.file, + .parent_hash = parent_hash, + .kind = .directory, + .package_json = null, + }; + + if (Environment.isMac) { + const KEvent = std.c.Kevent; + + // https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man2/kqueue.2.html + var event = std.mem.zeroes(KEvent); + + event.flags = std.c.EV_ADD | std.c.EV_CLEAR | std.c.EV_ENABLE; + // we want to know about the vnode + event.filter = std.c.EVFILT_VNODE; + + // monitor: + // - Write + // - Rename + // - Delete + event.fflags = std.c.NOTE_WRITE | std.c.NOTE_RENAME | std.c.NOTE_DELETE; + + // id + event.ident = @intCast(fd.int()); + + // Store the hash for fast filtering later + event.udata = @as(usize, @intCast(watchlist_id)); + var events: [1]KEvent = .{event}; + + // This took a lot of work to figure out the right permutation + // Basically: + // - We register the event here. + // our while(true) loop above receives notification of changes to any of the events created here. + _ = std.posix.system.kevent( + this.platform.fd.cast(), + @as([]KEvent, events[0..1]).ptr, + 1, + @as([]KEvent, events[0..1]).ptr, + 0, + null, + ); + } else if (Environment.isLinux) { + const file_path_to_use_ = std.mem.trimRight(u8, file_path_, "/"); + var buf: bun.PathBuffer = undefined; + bun.copy(u8, &buf, file_path_to_use_); + buf[file_path_to_use_.len] = 0; + const slice: [:0]u8 = buf[0..file_path_to_use_.len :0]; + item.eventlist_index = switch (this.platform.watchDir(slice)) { + .err => |err| return .{ .err = err }, + .result => |r| r, + }; + } + + this.watchlist.appendAssumeCapacity(item); + return .{ + .result = @as(WatchItemIndex, @truncate(this.watchlist.len - 1)), + }; +} + +// Below is platform-independent + +pub fn appendFileMaybeLock( + this: *Watcher, + fd: bun.FileDescriptor, + file_path: string, + hash: HashType, + loader: options.Loader, + dir_fd: bun.FileDescriptor, + package_json: ?*PackageJSON, + comptime copy_file_path: bool, + comptime lock: bool, +) bun.JSC.Maybe(void) { + if (comptime lock) this.mutex.lock(); + defer if (comptime lock) this.mutex.unlock(); + bun.assert(file_path.len > 1); + const pathname = bun.fs.PathName.init(file_path); + + const parent_dir = pathname.dirWithTrailingSlash(); + const parent_dir_hash: HashType = getHash(parent_dir); + + var parent_watch_item: ?WatchItemIndex = null; + const autowatch_parent_dir = (comptime FeatureFlags.watch_directories) and this.isEligibleDirectory(parent_dir); + if (autowatch_parent_dir) { + var watchlist_slice = this.watchlist.slice(); + + if (dir_fd != .zero) { + const fds = watchlist_slice.items(.fd); + if (std.mem.indexOfScalar(bun.FileDescriptor, fds, dir_fd)) |i| { + parent_watch_item = @as(WatchItemIndex, @truncate(i)); + } + } + + if (parent_watch_item == null) { + const hashes = watchlist_slice.items(.hash); + if (std.mem.indexOfScalar(HashType, hashes, parent_dir_hash)) |i| { + parent_watch_item = @as(WatchItemIndex, @truncate(i)); + } + } + } + this.watchlist.ensureUnusedCapacity(this.allocator, 1 + @as(usize, @intCast(@intFromBool(parent_watch_item == null)))) catch bun.outOfMemory(); + + if (autowatch_parent_dir) { + parent_watch_item = parent_watch_item orelse switch (this.appendDirectoryAssumeCapacity(dir_fd, parent_dir, parent_dir_hash, copy_file_path)) { + .err => |err| return .{ .err = err }, + .result => |r| r, + }; + } + + switch (this.appendFileAssumeCapacity( + fd, + file_path, + hash, + loader, + parent_dir_hash, + package_json, + copy_file_path, + )) { + .err => |err| return .{ .err = err }, + .result => {}, + } + + if (comptime FeatureFlags.verbose_watcher) { + if (strings.indexOf(file_path, this.cwd)) |i| { + Output.prettyln("Added ./{s} to watch list.", .{file_path[i + this.cwd.len ..]}); + } else { + Output.prettyln("Added {s} to watch list.", .{file_path}); + } + } + + return .{ .result = {} }; +} + +inline fn isEligibleDirectory(this: *Watcher, dir: string) bool { + return strings.contains(dir, this.fs.top_level_dir) and !strings.contains(dir, "node_modules"); +} + +pub fn appendFile( + this: *Watcher, + fd: bun.FileDescriptor, + file_path: string, + hash: HashType, + loader: options.Loader, + dir_fd: bun.FileDescriptor, + package_json: ?*PackageJSON, + comptime copy_file_path: bool, +) bun.JSC.Maybe(void) { + return appendFileMaybeLock(this, fd, file_path, hash, loader, dir_fd, package_json, copy_file_path, true); +} + +pub fn addDirectory( + this: *Watcher, + fd: bun.FileDescriptor, + file_path: string, + hash: HashType, + comptime copy_file_path: bool, +) bun.JSC.Maybe(WatchItemIndex) { + this.mutex.lock(); + defer this.mutex.unlock(); + + if (this.indexOf(hash)) |idx| { + return .{ .result = @truncate(idx) }; + } + + this.watchlist.ensureUnusedCapacity(this.allocator, 1) catch bun.outOfMemory(); + + return this.appendDirectoryAssumeCapacity(fd, file_path, hash, copy_file_path); +} + +pub fn addFile( + this: *Watcher, + fd: bun.FileDescriptor, + file_path: string, + hash: HashType, + loader: options.Loader, + dir_fd: bun.FileDescriptor, + package_json: ?*PackageJSON, + comptime copy_file_path: bool, +) bun.JSC.Maybe(void) { + // This must lock due to concurrent transpiler + this.mutex.lock(); + defer this.mutex.unlock(); + + if (this.indexOf(hash)) |index| { + if (comptime FeatureFlags.atomic_file_watcher) { + // On Linux, the file descriptor might be out of date. + if (fd.int() > 0) { + var fds = this.watchlist.items(.fd); + fds[index] = fd; + } + } + return .{ .result = {} }; + } + + return this.appendFileMaybeLock(fd, file_path, hash, loader, dir_fd, package_json, copy_file_path, false); +} + +pub fn indexOf(this: *Watcher, hash: HashType) ?u32 { + for (this.watchlist.items(.hash), 0..) |other, i| { + if (hash == other) { + return @as(u32, @truncate(i)); + } + } + return null; +} + +pub fn remove(this: *Watcher, hash: HashType) void { + this.mutex.lock(); + defer this.mutex.unlock(); + if (this.indexOf(hash)) |index| { + this.removeAtIndex(@truncate(index), hash, &[_]HashType{}, .file); + } +} + +pub fn removeAtIndex(this: *Watcher, index: WatchItemIndex, hash: HashType, parents: []HashType, comptime kind: WatchItem.Kind) void { + bun.assert(index != no_watch_item); + + this.evict_list[this.evict_list_i] = index; + this.evict_list_i += 1; + + if (comptime kind == .directory) { + for (parents) |parent| { + if (parent == hash) { + this.evict_list[this.evict_list_i] = @as(WatchItemIndex, @truncate(parent)); + this.evict_list_i += 1; + } + } + } +} + +pub fn getResolveWatcher(watcher: *Watcher) bun.resolver.AnyResolveWatcher { + return bun.resolver.ResolveWatcher(*@This(), onMaybeWatchDirectory).init(watcher); +} + +pub fn onMaybeWatchDirectory(watch: *Watcher, file_path: string, dir_fd: bun.StoredFileDescriptorType) void { + // We don't want to watch: + // - Directories outside the root directory + // - Directories inside node_modules + if (std.mem.indexOf(u8, file_path, "node_modules") == null and std.mem.indexOf(u8, file_path, watch.fs.top_level_dir) != null) { + _ = watch.addDirectory(dir_fd, file_path, getHash(file_path), false); + } +} + +const std = @import("std"); +const bun = @import("root").bun; +const string = bun.string; +const Output = bun.Output; +const Global = bun.Global; +const Environment = bun.Environment; +const strings = bun.strings; +const stringZ = bun.stringZ; +const FeatureFlags = bun.FeatureFlags; +const options = @import("./options.zig"); +const Mutex = bun.Mutex; +const Futex = @import("./futex.zig"); +const PackageJSON = @import("./resolver/package_json.zig").PackageJSON; diff --git a/src/watcher/INotifyWatcher.zig b/src/watcher/INotifyWatcher.zig new file mode 100644 index 0000000000..e088174abe --- /dev/null +++ b/src/watcher/INotifyWatcher.zig @@ -0,0 +1,336 @@ +//! Bun's filesystem watcher implementation for linux using inotify +//! https://man7.org/linux/man-pages/man7/inotify.7.html +const INotifyWatcher = @This(); + +// inotify events are variable-sized, so a byte buffer is used (also needed +// since communication is done via the `read` syscall). what is notable about +// this is that while a max_count is defined, more events than max_count can be +// read if the paths are short. the buffer is sized not to the maximum possible, +// but an arbitrary but reasonable size. when reading, the strategy is to read +// as much as possible, then process the buffer in `max_count` chunks, since +// `bun.Watcher` has the same hardcoded `max_count`. +const max_count = bun.Watcher.max_count; +const eventlist_bytes_size = (Event.largest_size / 2) * max_count; + +fd: bun.FileDescriptor = bun.invalid_fd, +loaded: bool = false, + +eventlist_bytes: [eventlist_bytes_size]u8 align(@alignOf(Event)) = undefined, +/// pointers into the next chunk of events +eventlist_ptrs: [max_count]*Event = undefined, +/// if defined, it means `read` should continue from this offset before asking +/// for more bytes. this is only hit under high watching load. +/// see `test-fs-watch-recursive-linux-parallel-remove.js` +read_ptr: ?struct { + i: u32, + len: u32, +} = null, + +watch_count: std.atomic.Value(u32) = std.atomic.Value(u32).init(0), +/// nanoseconds +coalesce_interval: isize = 100_000, + +pub const EventListIndex = c_int; +pub const Event = extern struct { + watch_descriptor: EventListIndex, + mask: u32, + cookie: u32, + /// The name field is present only when an event is returned for a + /// file inside a watched directory; it identifies the filename + /// within the watched directory. This filename is null-terminated, + /// and may include further null bytes ('\0') to align subsequent + /// reads to a suitable address boundary. + /// + /// The len field counts all of the bytes in name, including the null + /// bytes; the length of each inotify_event structure is thus + /// sizeof(struct inotify_event)+len. + name_len: u32, + + const largest_size = std.mem.alignForward(usize, @sizeOf(Event) + bun.MAX_PATH_BYTES, @alignOf(Event)); + + pub fn name(event: *Event) [:0]u8 { + if (comptime Environment.allow_assert) bun.assert(event.name_len > 0); + const name_first_char_ptr = std.mem.asBytes(&event.name_len).ptr + @sizeOf(u32); + return bun.sliceTo(@as([*:0]u8, @ptrCast(name_first_char_ptr)), 0); + } + + pub fn size(event: *Event) u32 { + return @intCast(@sizeOf(Event) + event.name_len); + } +}; + +pub fn watchPath(this: *INotifyWatcher, pathname: [:0]const u8) bun.JSC.Maybe(EventListIndex) { + bun.assert(this.loaded); + const old_count = this.watch_count.fetchAdd(1, .release); + defer if (old_count == 0) Futex.wake(&this.watch_count, 10); + const watch_file_mask = std.os.linux.IN.EXCL_UNLINK | std.os.linux.IN.MOVE_SELF | std.os.linux.IN.DELETE_SELF | std.os.linux.IN.MOVED_TO | std.os.linux.IN.MODIFY; + return .{ + .result = std.posix.inotify_add_watchZ(this.fd.cast(), pathname, watch_file_mask) catch |err| return .{ + .err = .{ + .errno = @truncate(@intFromEnum(switch (err) { + error.FileNotFound => bun.C.E.NOENT, + error.AccessDenied => bun.C.E.ACCES, + error.SystemResources => bun.C.E.NOMEM, + error.Unexpected => bun.C.E.INVAL, + error.NotDir => bun.C.E.NOTDIR, + error.NameTooLong => bun.C.E.NAMETOOLONG, + error.UserResourceLimitReached => bun.C.E.MFILE, + error.WatchAlreadyExists => bun.C.E.EXIST, + })), + .syscall = .watch, + }, + }, + }; +} + +pub fn watchDir(this: *INotifyWatcher, pathname: [:0]const u8) bun.JSC.Maybe(EventListIndex) { + bun.assert(this.loaded); + const old_count = this.watch_count.fetchAdd(1, .release); + defer if (old_count == 0) Futex.wake(&this.watch_count, 10); + const watch_dir_mask = std.os.linux.IN.EXCL_UNLINK | std.os.linux.IN.DELETE | std.os.linux.IN.DELETE_SELF | std.os.linux.IN.CREATE | std.os.linux.IN.MOVE_SELF | std.os.linux.IN.ONLYDIR | std.os.linux.IN.MOVED_TO; + return .{ + .result = std.posix.inotify_add_watchZ(this.fd.cast(), pathname, watch_dir_mask) catch |err| return .{ + .err = .{ + .errno = @truncate(@intFromEnum(switch (err) { + error.FileNotFound => bun.C.E.NOENT, + error.AccessDenied => bun.C.E.ACCES, + error.SystemResources => bun.C.E.NOMEM, + error.Unexpected => bun.C.E.INVAL, + error.NotDir => bun.C.E.NOTDIR, + error.NameTooLong => bun.C.E.NAMETOOLONG, + error.UserResourceLimitReached => bun.C.E.MFILE, + error.WatchAlreadyExists => bun.C.E.EXIST, + })), + .syscall = .watch, + }, + }, + }; +} + +pub fn unwatch(this: *INotifyWatcher, wd: EventListIndex) void { + bun.assert(this.loaded); + _ = this.watch_count.fetchSub(1, .release); + std.os.inotify_rm_watch(this.fd, wd); +} + +pub fn init(this: *INotifyWatcher, _: []const u8) !void { + bun.assert(!this.loaded); + this.loaded = true; + + if (bun.getenvZ("BUN_INOTIFY_COALESCE_INTERVAL")) |env| { + this.coalesce_interval = std.fmt.parseInt(isize, env, 10) catch 100_000; + } + + this.fd = bun.toFD(try std.posix.inotify_init1(std.os.linux.IN.CLOEXEC)); +} + +pub fn read(this: *INotifyWatcher) bun.JSC.Maybe([]const *Event) { + bun.assert(this.loaded); + // This is what replit does as of Jaunary 2023. + // 1) CREATE .http.ts.3491171321~ + // 2) OPEN .http.ts.3491171321~ + // 3) ATTRIB .http.ts.3491171321~ + // 4) MODIFY .http.ts.3491171321~ + // 5) CLOSE_WRITE,CLOSE .http.ts.3491171321~ + // 6) MOVED_FROM .http.ts.3491171321~ + // 7) MOVED_TO http.ts + // We still don't correctly handle MOVED_FROM && MOVED_TO it seems. + var i: u32 = 0; + const read_eventlist_bytes = if (this.read_ptr) |ptr| brk: { + Futex.waitForever(&this.watch_count, 0); + i = ptr.i; + break :brk this.eventlist_bytes[0..ptr.len]; + } else outer: while (true) { + Futex.waitForever(&this.watch_count, 0); + + const rc = std.posix.system.read( + this.fd.cast(), + &this.eventlist_bytes, + this.eventlist_bytes.len, + ); + const errno = std.posix.errno(rc); + switch (errno) { + .SUCCESS => { + var read_eventlist_bytes = this.eventlist_bytes[0..@intCast(rc)]; + if (read_eventlist_bytes.len == 0) return .{ .result = &.{} }; + + // IN_MODIFY is very noisy + // we do a 0.1ms sleep to try to coalesce events better + const double_read_threshold = Event.largest_size * (max_count / 2); + if (read_eventlist_bytes.len < double_read_threshold) { + var fds = [_]std.posix.pollfd{.{ + .fd = this.fd.cast(), + .events = std.posix.POLL.IN | std.posix.POLL.ERR, + .revents = 0, + }}; + var timespec = std.posix.timespec{ .tv_sec = 0, .tv_nsec = this.coalesce_interval }; + if ((std.posix.ppoll(&fds, ×pec, null) catch 0) > 0) { + inner: while (true) { + const rest = this.eventlist_bytes[read_eventlist_bytes.len..]; + bun.assert(rest.len > 0); + const new_rc = std.posix.system.read(this.fd.cast(), rest.ptr, rest.len); + // Output.warn("wapa {} {} = {}", .{ this.fd, rest.len, new_rc }); + const e = std.posix.errno(new_rc); + switch (e) { + .SUCCESS => { + read_eventlist_bytes.len += @intCast(new_rc); + break :outer read_eventlist_bytes; + }, + .AGAIN, .INTR => continue :inner, + else => return .{ .err = .{ + .errno = @truncate(@intFromEnum(e)), + .syscall = .read, + } }, + } + } + } + } + + break :outer read_eventlist_bytes; + }, + .AGAIN, .INTR => continue :outer, + .INVAL => { + if (Environment.isDebug) { + bun.Output.err("EINVAL", "inotify read({}, {d})", .{ this.fd, this.eventlist_bytes.len }); + } + return .{ .err = .{ + .errno = @truncate(@intFromEnum(errno)), + .syscall = .read, + } }; + }, + else => return .{ .err = .{ + .errno = @truncate(@intFromEnum(errno)), + .syscall = .read, + } }, + } + }; + + var count: u32 = 0; + while (i < read_eventlist_bytes.len) { + // It is implied that events *are* aligned, since the manual + // pages note about extra zero bytes in the name field. + const event: *Event = @alignCast(@ptrCast(read_eventlist_bytes[i..][0..@sizeOf(Event)].ptr)); + this.eventlist_ptrs[count] = event; + i += event.size(); + count += 1; + + // when under high load with short file paths, it is very easy to + // overrun the watcher's event buffer. + if (count == max_count) { + this.read_ptr = .{ + .i = i, + .len = @intCast(read_eventlist_bytes.len), + }; + return .{ .result = &this.eventlist_ptrs }; + } + } + + return .{ .result = this.eventlist_ptrs[0..count] }; +} + +pub fn stop(this: *INotifyWatcher) void { + if (this.fd != bun.invalid_fd) { + _ = bun.sys.close(this.fd); + this.fd = bun.invalid_fd; + } +} + +/// Repeatedly called by the main watcher until the watcher is terminated. +pub fn watchLoopCycle(this: *bun.Watcher) bun.JSC.Maybe(void) { + defer Output.flush(); + + var events = switch (this.platform.read()) { + .result => |result| result, + .err => |err| return .{ .err = err }, + }; + if (events.len == 0) return .{ .result = {} }; + + // TODO: is this thread safe? + var remaining_events = events.len; + + const eventlist_index = this.watchlist.items(.eventlist_index); + + while (remaining_events > 0) { + var name_off: u8 = 0; + var temp_name_list: [128]?[:0]u8 = undefined; + var temp_name_off: u8 = 0; + + const slice = events[0..@min(128, remaining_events, this.watch_events.len)]; + var watchevents = this.watch_events[0..slice.len]; + var watch_event_id: u32 = 0; + for (slice) |event| { + watchevents[watch_event_id] = watchEventFromInotifyEvent( + event, + @intCast(std.mem.indexOfScalar( + EventListIndex, + eventlist_index, + event.watch_descriptor, + ) orelse continue), + ); + temp_name_list[temp_name_off] = if (event.name_len > 0) + event.name() + else + null; + watchevents[watch_event_id].name_off = temp_name_off; + watchevents[watch_event_id].name_len = @as(u8, @intFromBool((event.name_len > 0))); + temp_name_off += @as(u8, @intFromBool((event.name_len > 0))); + + watch_event_id += 1; + } + + var all_events = watchevents[0..watch_event_id]; + std.sort.pdq(WatchEvent, all_events, {}, WatchEvent.sortByIndex); + + var last_event_index: usize = 0; + var last_event_id: EventListIndex = std.math.maxInt(EventListIndex); + + for (all_events, 0..) |_, i| { + if (all_events[i].name_len > 0) { + this.changed_filepaths[name_off] = temp_name_list[all_events[i].name_off]; + all_events[i].name_off = name_off; + name_off += 1; + } + + if (all_events[i].index == last_event_id) { + all_events[last_event_index].merge(all_events[i]); + continue; + } + last_event_index = i; + last_event_id = all_events[i].index; + } + if (all_events.len == 0) return .{ .result = {} }; + + this.mutex.lock(); + defer this.mutex.unlock(); + if (this.running) { + this.onFileUpdate(this.ctx, all_events[0 .. last_event_index + 1], this.changed_filepaths[0 .. name_off + 1], this.watchlist); + } else { + break; + } + remaining_events -= slice.len; + } + + return .{ .result = {} }; +} + +pub fn watchEventFromInotifyEvent(event: *const INotifyWatcher.Event, index: WatchItemIndex) WatchEvent { + return .{ + .op = .{ + .delete = (event.mask & std.os.linux.IN.DELETE_SELF) > 0 or (event.mask & std.os.linux.IN.DELETE) > 0, + .rename = (event.mask & std.os.linux.IN.MOVE_SELF) > 0, + .move_to = (event.mask & std.os.linux.IN.MOVED_TO) > 0, + .write = (event.mask & std.os.linux.IN.MODIFY) > 0, + }, + .index = index, + }; +} + +const std = @import("std"); +const bun = @import("root").bun; +const Environment = bun.Environment; +const Output = bun.Output; +const log = Output.scoped(.watcher, false); +const Futex = bun.Futex; + +const WatchItemIndex = bun.Watcher.WatchItemIndex; +const WatchEvent = bun.Watcher.Event; diff --git a/src/watcher/KEventWatcher.zig b/src/watcher/KEventWatcher.zig new file mode 100644 index 0000000000..b5c4436d13 --- /dev/null +++ b/src/watcher/KEventWatcher.zig @@ -0,0 +1,111 @@ +const KEventWatcher = @This(); +pub const EventListIndex = u32; + +const KEvent = std.c.Kevent; + +// Internal +changelist: [128]KEvent = undefined, + +// Everything being watched +eventlist: [max_eviction_count]KEvent = undefined, +eventlist_index: EventListIndex = 0, + +fd: bun.FileDescriptor = bun.invalid_fd, + +pub fn init(this: *KEventWatcher, _: []const u8) !void { + const fd = try std.posix.kqueue(); + if (fd == 0) return error.KQueueError; + this.fd = bun.toFD(fd); +} + +pub fn stop(this: *KEventWatcher) void { + if (this.fd.isValid()) { + _ = bun.sys.close(this.fd); + this.fd = bun.invalid_fd; + } +} + +pub fn watchEventFromKEvent(kevent: KEvent) Watcher.Event { + return .{ + .op = .{ + .delete = (kevent.fflags & std.c.NOTE_DELETE) > 0, + .metadata = (kevent.fflags & std.c.NOTE_ATTRIB) > 0, + .rename = (kevent.fflags & (std.c.NOTE_RENAME | std.c.NOTE_LINK)) > 0, + .write = (kevent.fflags & std.c.NOTE_WRITE) > 0, + }, + .index = @truncate(kevent.udata), + }; +} + +pub fn watchLoopCycle(this: *Watcher) bun.JSC.Maybe(void) { + bun.assert(this.platform.fd.isValid()); + + // not initialized each time + var changelist_array: [128]KEvent = std.mem.zeroes([128]KEvent); + var changelist = &changelist_array; + + defer Output.flush(); + + var count = std.posix.system.kevent( + this.platform.fd.cast(), + @as([*]KEvent, changelist), + 0, + @as([*]KEvent, changelist), + 128, + + null, + ); + + // Give the events more time to coalesce + if (count < 128 / 2) { + const remain = 128 - count; + var timespec = std.posix.timespec{ .tv_sec = 0, .tv_nsec = 100_000 }; + const extra = std.posix.system.kevent( + this.platform.fd.cast(), + @as([*]KEvent, changelist[@as(usize, @intCast(count))..].ptr), + 0, + @as([*]KEvent, changelist[@as(usize, @intCast(count))..].ptr), + remain, + + ×pec, + ); + + count += extra; + } + + var changes = changelist[0..@as(usize, @intCast(@max(0, count)))]; + var watchevents = this.watch_events[0..changes.len]; + var out_len: usize = 0; + if (changes.len > 0) { + watchevents[0] = watchEventFromKEvent(changes[0]); + out_len = 1; + var prev_event = changes[0]; + for (changes[1..]) |event| { + if (prev_event.udata == event.udata) { + const new = watchEventFromKEvent(event); + watchevents[out_len - 1].merge(new); + continue; + } + + watchevents[out_len] = watchEventFromKEvent(event); + prev_event = event; + out_len += 1; + } + + watchevents = watchevents[0..out_len]; + } + + this.mutex.lock(); + defer this.mutex.unlock(); + if (this.running) { + this.onFileUpdate(this.ctx, watchevents, this.changed_filepaths[0..watchevents.len], this.watchlist); + } + + return .{ .result = {} }; +} + +const std = @import("std"); +const bun = @import("root").bun; +const Output = bun.Output; +const Watcher = bun.Watcher; +const max_eviction_count = Watcher.max_eviction_count; diff --git a/src/watcher/WindowsWatcher.zig b/src/watcher/WindowsWatcher.zig new file mode 100644 index 0000000000..294e9275df --- /dev/null +++ b/src/watcher/WindowsWatcher.zig @@ -0,0 +1,301 @@ +//! Bun's filesystem watcher implementation for windows using kernel32 +const WindowsWatcher = @This(); + +mutex: Mutex = .{}, +iocp: w.HANDLE = undefined, +watcher: DirWatcher = undefined, +buf: bun.PathBuffer = undefined, +base_idx: usize = 0, + +pub const EventListIndex = c_int; + +const Error = error{ + IocpFailed, + ReadDirectoryChangesFailed, + CreateFileFailed, + InvalidPath, +}; + +const Action = enum(w.DWORD) { + Added = w.FILE_ACTION_ADDED, + Removed = w.FILE_ACTION_REMOVED, + Modified = w.FILE_ACTION_MODIFIED, + RenamedOld = w.FILE_ACTION_RENAMED_OLD_NAME, + RenamedNew = w.FILE_ACTION_RENAMED_NEW_NAME, +}; + +const FileEvent = struct { + action: Action, + filename: []u16 = undefined, +}; + +const DirWatcher = struct { + // must be initialized to zero (even though it's never read or written in our code), + // otherwise ReadDirectoryChangesW will fail with INVALID_HANDLE + overlapped: w.OVERLAPPED = std.mem.zeroes(w.OVERLAPPED), + buf: [64 * 1024]u8 align(@alignOf(w.FILE_NOTIFY_INFORMATION)) = undefined, + dirHandle: w.HANDLE, + + // invalidates any EventIterators + fn prepare(this: *DirWatcher) bun.JSC.Maybe(void) { + const filter = w.FILE_NOTIFY_CHANGE_FILE_NAME | w.FILE_NOTIFY_CHANGE_DIR_NAME | w.FILE_NOTIFY_CHANGE_LAST_WRITE | w.FILE_NOTIFY_CHANGE_CREATION; + if (w.kernel32.ReadDirectoryChangesW(this.dirHandle, &this.buf, this.buf.len, 1, filter, null, &this.overlapped, null) == 0) { + const err = w.kernel32.GetLastError(); + log("failed to start watching directory: {s}", .{@tagName(err)}); + return .{ .err = .{ + .errno = @intFromEnum(bun.C.SystemErrno.init(err) orelse bun.C.SystemErrno.EINVAL), + .syscall = .watch, + } }; + } + log("read directory changes!", .{}); + return .{ .result = {} }; + } +}; + +const EventIterator = struct { + watcher: *DirWatcher, + offset: usize = 0, + hasNext: bool = true, + + pub fn next(this: *EventIterator) ?FileEvent { + if (!this.hasNext) return null; + const info_size = @sizeOf(w.FILE_NOTIFY_INFORMATION); + const info: *w.FILE_NOTIFY_INFORMATION = @alignCast(@ptrCast(this.watcher.buf[this.offset..].ptr)); + const name_ptr: [*]u16 = @alignCast(@ptrCast(this.watcher.buf[this.offset + info_size ..])); + const filename: []u16 = name_ptr[0 .. info.FileNameLength / @sizeOf(u16)]; + + const action: Action = @enumFromInt(info.Action); + + if (info.NextEntryOffset == 0) { + this.hasNext = false; + } else { + this.offset += @as(usize, info.NextEntryOffset); + } + + return FileEvent{ + .action = action, + .filename = filename, + }; + } +}; + +pub fn init(this: *WindowsWatcher, root: []const u8) !void { + var pathbuf: bun.WPathBuffer = undefined; + const wpath = bun.strings.toNTPath(&pathbuf, root); + const path_len_bytes: u16 = @truncate(wpath.len * 2); + var nt_name = w.UNICODE_STRING{ + .Length = path_len_bytes, + .MaximumLength = path_len_bytes, + .Buffer = @constCast(wpath.ptr), + }; + var attr = w.OBJECT_ATTRIBUTES{ + .Length = @sizeOf(w.OBJECT_ATTRIBUTES), + .RootDirectory = null, + .Attributes = 0, // Note we do not use OBJ_CASE_INSENSITIVE here. + .ObjectName = &nt_name, + .SecurityDescriptor = null, + .SecurityQualityOfService = null, + }; + var handle: w.HANDLE = w.INVALID_HANDLE_VALUE; + var io: w.IO_STATUS_BLOCK = undefined; + const rc = w.ntdll.NtCreateFile( + &handle, + w.FILE_LIST_DIRECTORY, + &attr, + &io, + null, + 0, + w.FILE_SHARE_READ | w.FILE_SHARE_WRITE | w.FILE_SHARE_DELETE, + w.FILE_OPEN, + w.FILE_DIRECTORY_FILE | w.FILE_OPEN_FOR_BACKUP_INTENT, + null, + 0, + ); + + if (rc != .SUCCESS) { + const err = bun.windows.Win32Error.fromNTStatus(rc); + log("failed to open directory for watching: {s}", .{@tagName(err)}); + return Error.CreateFileFailed; + } + errdefer _ = w.kernel32.CloseHandle(handle); + + this.iocp = try w.CreateIoCompletionPort(handle, null, 0, 1); + errdefer _ = w.kernel32.CloseHandle(this.iocp); + + this.watcher = .{ .dirHandle = handle }; + + @memcpy(this.buf[0..root.len], root); + const needs_slash = root.len == 0 or !bun.strings.charIsAnySlash(root[root.len - 1]); + if (needs_slash) { + this.buf[root.len] = '\\'; + } + this.base_idx = if (needs_slash) root.len + 1 else root.len; +} + +const Timeout = enum(w.DWORD) { + infinite = w.INFINITE, + minimal = 1, + none = 0, +}; + +// wait until new events are available +pub fn next(this: *WindowsWatcher, timeout: Timeout) bun.JSC.Maybe(?EventIterator) { + switch (this.watcher.prepare()) { + .err => |err| { + log("prepare() returned error", .{}); + return .{ .err = err }; + }, + .result => {}, + } + + var nbytes: w.DWORD = 0; + var key: w.ULONG_PTR = 0; + var overlapped: ?*w.OVERLAPPED = null; + while (true) { + const rc = w.kernel32.GetQueuedCompletionStatus(this.iocp, &nbytes, &key, &overlapped, @intFromEnum(timeout)); + if (rc == 0) { + const err = w.kernel32.GetLastError(); + if (err == .TIMEOUT or err == .WAIT_TIMEOUT) { + return .{ .result = null }; + } else { + log("GetQueuedCompletionStatus failed: {s}", .{@tagName(err)}); + return .{ .err = .{ + .errno = @intFromEnum(bun.C.SystemErrno.init(err) orelse bun.C.SystemErrno.EINVAL), + .syscall = .watch, + } }; + } + } + + if (overlapped) |ptr| { + // ignore possible spurious events + if (ptr != &this.watcher.overlapped) { + continue; + } + if (nbytes == 0) { + // shutdown notification + // TODO close handles? + log("shutdown notification in WindowsWatcher.next", .{}); + return .{ .err = .{ + .errno = @intFromEnum(bun.C.SystemErrno.ESHUTDOWN), + .syscall = .watch, + } }; + } + return .{ .result = EventIterator{ .watcher = &this.watcher } }; + } else { + log("GetQueuedCompletionStatus returned no overlapped event", .{}); + return .{ .err = .{ + .errno = @truncate(@intFromEnum(bun.C.E.INVAL)), + .syscall = .watch, + } }; + } + } +} + +pub fn stop(this: *WindowsWatcher) void { + w.CloseHandle(this.watcher.dirHandle); + w.CloseHandle(this.iocp); +} + +pub fn watchLoopCycle(this: *bun.Watcher) bun.JSC.Maybe(void) { + const buf = &this.platform.buf; + const base_idx = this.platform.base_idx; + + var event_id: usize = 0; + + // first wait has infinite timeout - we're waiting for the next event and don't want to spin + var timeout = WindowsWatcher.Timeout.infinite; + while (true) { + var iter = switch (this.platform.next(timeout)) { + .err => |err| return .{ .err = err }, + .result => |iter| iter orelse break, + }; + // after the first wait, we want to coalesce further events but don't want to wait for them + // NOTE: using a 1ms timeout would be ideal, but that actually makes the thread wait for at least 10ms more than it should + // Instead we use a 0ms timeout, which may not do as much coalescing but is more responsive. + timeout = WindowsWatcher.Timeout.none; + const item_paths = this.watchlist.items(.file_path); + log("number of watched items: {d}", .{item_paths.len}); + while (iter.next()) |event| { + const convert_res = bun.strings.copyUTF16IntoUTF8(buf[base_idx..], []const u16, event.filename, false); + const eventpath = buf[0 .. base_idx + convert_res.written]; + + log("watcher update event: (filename: {s}, action: {s}", .{ eventpath, @tagName(event.action) }); + + // TODO this probably needs a more sophisticated search algorithm in the future + // Possible approaches: + // - Keep a sorted list of the watched paths and perform a binary search. We could use a bool to keep + // track of whether the list is sorted and only sort it when we detect a change. + // - Use a prefix tree. Potentially more efficient for large numbers of watched paths, but complicated + // to implement and maintain. + // - others that i'm not thinking of + + for (item_paths, 0..) |path_, item_idx| { + var path = path_; + if (path.len > 0 and bun.strings.charIsAnySlash(path[path.len - 1])) { + path = path[0 .. path.len - 1]; + } + // log("checking path: {s}\n", .{path}); + // check if the current change applies to this item + // if so, add it to the eventlist + const rel = bun.path.isParentOrEqual(eventpath, path); + // skip unrelated items + if (rel == .unrelated) continue; + // if the event is for a parent dir of the item, only emit it if it's a delete or rename + if (rel == .parent and (event.action != .Removed or event.action != .RenamedOld)) continue; + this.watch_events[event_id] = createWatchEvent(event, @truncate(item_idx)); + event_id += 1; + } + } + } + if (event_id == 0) { + return .{ .result = {} }; + } + + // log("event_id: {d}\n", .{event_id}); + + var all_events = this.watch_events[0..event_id]; + std.sort.pdq(WatchEvent, all_events, {}, WatchEvent.sortByIndex); + + var last_event_index: usize = 0; + var last_event_id: u32 = std.math.maxInt(u32); + + for (all_events, 0..) |_, i| { + if (all_events[i].index == last_event_id) { + all_events[last_event_index].merge(all_events[i]); + continue; + } + last_event_index = i; + last_event_id = all_events[i].index; + } + if (all_events.len == 0) return .{ .result = {} }; + all_events = all_events[0 .. last_event_index + 1]; + + log("calling onFileUpdate (all_events.len = {d})", .{all_events.len}); + + this.onFileUpdate(this.ctx, all_events, this.changed_filepaths[0 .. last_event_index + 1], this.watchlist); + + return .{ .result = {} }; +} + +pub fn createWatchEvent(event: FileEvent, index: WatchItemIndex) WatchEvent { + return .{ + .op = .{ + .delete = event.action == .Removed, + .rename = event.action == .RenamedOld, + .write = event.action == .Modified, + }, + .index = index, + }; +} + +const std = @import("std"); +const bun = @import("root").bun; +const Environment = bun.Environment; +const Output = bun.Output; +const log = Output.scoped(.watcher, false); +const Futex = bun.Futex; +const Mutex = bun.Mutex; +const w = std.os.windows; + +const WatchItemIndex = bun.Watcher.WatchItemIndex; +const WatchEvent = bun.Watcher.WatchEvent; diff --git a/test/js/node/test/common/tmpdir.js b/test/js/node/test/common/tmpdir.js index f1f06818dc..b17a00cc00 100644 --- a/test/js/node/test/common/tmpdir.js +++ b/test/js/node/test/common/tmpdir.js @@ -46,6 +46,7 @@ function refresh(useSpawn = false) { } function onexit(useSpawn) { + return; // Change directory to avoid possible EBUSY if (isMainThread) process.chdir(testRoot); diff --git a/test/js/node/test/parallel/test-fs-error-messages.js b/test/js/node/test/parallel/test-fs-error-messages.js new file mode 100644 index 0000000000..8c50acbac4 --- /dev/null +++ b/test/js/node/test/parallel/test-fs-error-messages.js @@ -0,0 +1,850 @@ +// Flags: --expose-internals +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +'use strict'; +const common = require('../common'); +const fixtures = require('../common/fixtures'); +const tmpdir = require('../common/tmpdir'); +const assert = require('assert'); +const fs = require('fs'); + +tmpdir.refresh(); + + +const nonexistentFile = tmpdir.resolve('non-existent'); +const nonexistentDir = tmpdir.resolve('non-existent', 'foo', 'bar'); +const existingFile = tmpdir.resolve('existingFile.js'); +const existingFile2 = tmpdir.resolve('existingFile2.js'); +const existingDir = tmpdir.resolve('dir'); +const existingDir2 = fixtures.path('keys'); +fs.mkdirSync(existingDir); +fs.writeFileSync(existingFile, 'test', 'utf-8'); +fs.writeFileSync(existingFile2, 'test', 'utf-8'); + + +const { COPYFILE_EXCL } = fs.constants; +const { internalBinding } = require('internal/test/binding'); +const { + UV_EBADF, + UV_EEXIST, + UV_EINVAL, + UV_ENOENT, + UV_ENOTDIR, + UV_ENOTEMPTY, + UV_EPERM +} = internalBinding('uv'); + +// Template tag function for escaping special characters in strings so that: +// new RegExp(re`${str}`).test(str) === true +function re(literals, ...values) { + const escapeRE = /[\\^$.*+?()[\]{}|=!<>:-]/g; + let result = literals[0].replace(escapeRE, '\\$&'); + for (const [i, value] of values.entries()) { + result += value.replace(escapeRE, '\\$&'); + result += literals[i + 1].replace(escapeRE, '\\$&'); + } + return result; +} + +// stat +{ + const validateError = (err) => { + assert.strictEqual(nonexistentFile, err.path); + assert.strictEqual( + err.message, + `ENOENT: no such file or directory, stat '${nonexistentFile}'`); + assert.strictEqual(err.errno, UV_ENOENT); + assert.strictEqual(err.code, 'ENOENT'); + assert.strictEqual(err.syscall, 'stat'); + return true; + }; + + fs.stat(nonexistentFile, common.mustCall(validateError)); + + assert.throws( + () => fs.statSync(nonexistentFile), + validateError + ); +} + +// lstat +{ + const validateError = (err) => { + assert.strictEqual(nonexistentFile, err.path); + assert.strictEqual( + err.message, + `ENOENT: no such file or directory, lstat '${nonexistentFile}'`); + assert.strictEqual(err.errno, UV_ENOENT); + assert.strictEqual(err.code, 'ENOENT'); + assert.strictEqual(err.syscall, 'lstat'); + return true; + }; + + fs.lstat(nonexistentFile, common.mustCall(validateError)); + assert.throws( + () => fs.lstatSync(nonexistentFile), + validateError + ); +} + +// fstat +{ + const validateError = (err) => { + assert.strictEqual(err.message, 'EBADF: bad file descriptor, fstat'); + assert.strictEqual(err.errno, UV_EBADF); + assert.strictEqual(err.code, 'EBADF'); + assert.strictEqual(err.syscall, 'fstat'); + return true; + }; + + common.runWithInvalidFD((fd) => { + fs.fstat(fd, common.mustCall(validateError)); + + assert.throws( + () => fs.fstatSync(fd), + validateError + ); + }); +} + +// realpath +{ + const validateError = (err) => { + assert.strictEqual(nonexistentFile, err.path); + assert.strictEqual( + err.message, + `ENOENT: no such file or directory, lstat '${nonexistentFile}'`); + assert.strictEqual(err.errno, UV_ENOENT); + assert.strictEqual(err.code, 'ENOENT'); + assert.strictEqual(err.syscall, 'lstat'); + return true; + }; + + fs.realpath(nonexistentFile, common.mustCall(validateError)); + + assert.throws( + () => fs.realpathSync(nonexistentFile), + validateError + ); +} + +// native realpath +{ + const validateError = (err) => { + assert.strictEqual(nonexistentFile, err.path); + assert.strictEqual( + err.message, + `ENOENT: no such file or directory, realpath '${nonexistentFile}'`); + assert.strictEqual(err.errno, UV_ENOENT); + assert.strictEqual(err.code, 'ENOENT'); + assert.strictEqual(err.syscall, 'realpath'); + return true; + }; + + fs.realpath.native(nonexistentFile, common.mustCall(validateError)); + + assert.throws( + () => fs.realpathSync.native(nonexistentFile), + validateError + ); +} + +// readlink +{ + const validateError = (err) => { + assert.strictEqual(nonexistentFile, err.path); + assert.strictEqual( + err.message, + `ENOENT: no such file or directory, readlink '${nonexistentFile}'`); + assert.strictEqual(err.errno, UV_ENOENT); + assert.strictEqual(err.code, 'ENOENT'); + assert.strictEqual(err.syscall, 'readlink'); + return true; + }; + + fs.readlink(nonexistentFile, common.mustCall(validateError)); + + assert.throws( + () => fs.readlinkSync(nonexistentFile), + validateError + ); +} + +// Link nonexistent file +{ + const validateError = (err) => { + assert.strictEqual(nonexistentFile, err.path); + // Could be resolved to an absolute path + assert.ok(err.dest.endsWith('foo'), + `expect ${err.dest} to end with 'foo'`); + const regexp = new RegExp('^ENOENT: no such file or directory, link ' + + re`'${nonexistentFile}' -> ` + '\'.*foo\''); + assert.match(err.message, regexp); + assert.strictEqual(err.errno, UV_ENOENT); + assert.strictEqual(err.code, 'ENOENT'); + assert.strictEqual(err.syscall, 'link'); + return true; + }; + + fs.link(nonexistentFile, 'foo', common.mustCall(validateError)); + + assert.throws( + () => fs.linkSync(nonexistentFile, 'foo'), + validateError + ); +} + +// link existing file +{ + const validateError = (err) => { + assert.strictEqual(existingFile, err.path); + assert.strictEqual(existingFile2, err.dest); + assert.strictEqual( + err.message, + `EEXIST: file already exists, link '${existingFile}' -> ` + + `'${existingFile2}'`); + assert.strictEqual(err.errno, UV_EEXIST); + assert.strictEqual(err.code, 'EEXIST'); + assert.strictEqual(err.syscall, 'link'); + return true; + }; + + fs.link(existingFile, existingFile2, common.mustCall(validateError)); + + assert.throws( + () => fs.linkSync(existingFile, existingFile2), + validateError + ); +} + +// symlink +{ + const validateError = (err) => { + assert.strictEqual(existingFile, err.path); + assert.strictEqual(existingFile2, err.dest); + assert.strictEqual( + err.message, + `EEXIST: file already exists, symlink '${existingFile}' -> ` + + `'${existingFile2}'`); + assert.strictEqual(err.errno, UV_EEXIST); + assert.strictEqual(err.code, 'EEXIST'); + assert.strictEqual(err.syscall, 'symlink'); + return true; + }; + + fs.symlink(existingFile, existingFile2, common.mustCall(validateError)); + + assert.throws( + () => fs.symlinkSync(existingFile, existingFile2), + validateError + ); +} + +// unlink +{ + const validateError = (err) => { + assert.strictEqual(nonexistentFile, err.path); + assert.strictEqual( + err.message, + `ENOENT: no such file or directory, unlink '${nonexistentFile}'`); + assert.strictEqual(err.errno, UV_ENOENT); + assert.strictEqual(err.code, 'ENOENT'); + assert.strictEqual(err.syscall, 'unlink'); + return true; + }; + + fs.unlink(nonexistentFile, common.mustCall(validateError)); + + assert.throws( + () => fs.unlinkSync(nonexistentFile), + validateError + ); +} + +// rename +{ + const validateError = (err) => { + assert.strictEqual(nonexistentFile, err.path); + // Could be resolved to an absolute path + assert.ok(err.dest.endsWith('foo'), + `expect ${err.dest} to end with 'foo'`); + const regexp = new RegExp('ENOENT: no such file or directory, rename ' + + re`'${nonexistentFile}' -> ` + '\'.*foo\''); + assert.match(err.message, regexp); + assert.strictEqual(err.errno, UV_ENOENT); + assert.strictEqual(err.code, 'ENOENT'); + assert.strictEqual(err.syscall, 'rename'); + return true; + }; + + const destFile = tmpdir.resolve('foo'); + fs.rename(nonexistentFile, destFile, common.mustCall(validateError)); + + assert.throws( + () => fs.renameSync(nonexistentFile, destFile), + validateError + ); +} + +// Rename non-empty directory +{ + const validateError = (err) => { + assert.strictEqual(existingDir, err.path); + assert.strictEqual(existingDir2, err.dest); + assert.strictEqual(err.syscall, 'rename'); + // Could be ENOTEMPTY, EEXIST, or EPERM, depending on the platform + if (err.code === 'ENOTEMPTY') { + assert.strictEqual( + err.message, + `ENOTEMPTY: directory not empty, rename '${existingDir}' -> ` + + `'${existingDir2}'`); + assert.strictEqual(err.errno, UV_ENOTEMPTY); + } else if (err.code === 'EXDEV') { // Not on the same mounted filesystem + assert.strictEqual( + err.message, + `EXDEV: cross-device link not permitted, rename '${existingDir}' -> ` + + `'${existingDir2}'`); + } else if (err.code === 'EEXIST') { // smartos and aix + assert.strictEqual( + err.message, + `EEXIST: file already exists, rename '${existingDir}' -> ` + + `'${existingDir2}'`); + assert.strictEqual(err.errno, UV_EEXIST); + } else { // windows + assert.strictEqual( + err.message, + `EPERM: operation not permitted, rename '${existingDir}' -> ` + + `'${existingDir2}'`); + assert.strictEqual(err.errno, UV_EPERM); + assert.strictEqual(err.code, 'EPERM'); + } + return true; + }; + + fs.rename(existingDir, existingDir2, common.mustCall(validateError)); + + assert.throws( + () => fs.renameSync(existingDir, existingDir2), + validateError + ); +} + +// rmdir +{ + const validateError = (err) => { + assert.strictEqual(nonexistentFile, err.path); + assert.strictEqual( + err.message, + `ENOENT: no such file or directory, rmdir '${nonexistentFile}'`); + assert.strictEqual(err.errno, UV_ENOENT); + assert.strictEqual(err.code, 'ENOENT'); + assert.strictEqual(err.syscall, 'rmdir'); + return true; + }; + + fs.rmdir(nonexistentFile, common.mustCall(validateError)); + + assert.throws( + () => fs.rmdirSync(nonexistentFile), + validateError + ); +} + +// rmdir a file +{ + const validateError = (err) => { + assert.strictEqual(existingFile, err.path); + assert.strictEqual(err.syscall, 'rmdir'); + if (err.code === 'ENOTDIR') { + assert.strictEqual( + err.message, + `ENOTDIR: not a directory, rmdir '${existingFile}'`); + assert.strictEqual(err.errno, UV_ENOTDIR); + } else { // windows + assert.strictEqual( + err.message, + `ENOENT: no such file or directory, rmdir '${existingFile}'`); + assert.strictEqual(err.errno, UV_ENOENT); + assert.strictEqual(err.code, 'ENOENT'); + } + return true; + }; + + fs.rmdir(existingFile, common.mustCall(validateError)); + + assert.throws( + () => fs.rmdirSync(existingFile), + validateError + ); +} + +// mkdir +{ + const validateError = (err) => { + assert.strictEqual(existingFile, err.path); + assert.strictEqual( + err.message, + `EEXIST: file already exists, mkdir '${existingFile}'`); + assert.strictEqual(err.errno, UV_EEXIST); + assert.strictEqual(err.code, 'EEXIST'); + assert.strictEqual(err.syscall, 'mkdir'); + return true; + }; + + fs.mkdir(existingFile, 0o666, common.mustCall(validateError)); + + assert.throws( + () => fs.mkdirSync(existingFile, 0o666), + validateError + ); +} + +// chmod +{ + const validateError = (err) => { + assert.strictEqual(nonexistentFile, err.path); + assert.strictEqual( + err.message, + `ENOENT: no such file or directory, chmod '${nonexistentFile}'`); + assert.strictEqual(err.errno, UV_ENOENT); + assert.strictEqual(err.code, 'ENOENT'); + assert.strictEqual(err.syscall, 'chmod'); + return true; + }; + + fs.chmod(nonexistentFile, 0o666, common.mustCall(validateError)); + + assert.throws( + () => fs.chmodSync(nonexistentFile, 0o666), + validateError + ); +} + +// open +{ + const validateError = (err) => { + assert.strictEqual(nonexistentFile, err.path); + assert.strictEqual( + err.message, + `ENOENT: no such file or directory, open '${nonexistentFile}'`); + assert.strictEqual(err.errno, UV_ENOENT); + assert.strictEqual(err.code, 'ENOENT'); + assert.strictEqual(err.syscall, 'open'); + return true; + }; + + fs.open(nonexistentFile, 'r', 0o666, common.mustCall(validateError)); + + assert.throws( + () => fs.openSync(nonexistentFile, 'r', 0o666), + validateError + ); +} + + +// close +{ + const validateError = (err) => { + assert.strictEqual(err.message, 'EBADF: bad file descriptor, close'); + assert.strictEqual(err.errno, UV_EBADF); + assert.strictEqual(err.code, 'EBADF'); + assert.strictEqual(err.syscall, 'close'); + return true; + }; + + common.runWithInvalidFD((fd) => { + fs.close(fd, common.mustCall(validateError)); + + assert.throws( + () => fs.closeSync(fd), + validateError + ); + }); +} + +// readFile +{ + const validateError = (err) => { + assert.strictEqual(nonexistentFile, err.path); + assert.strictEqual( + err.message, + `ENOENT: no such file or directory, open '${nonexistentFile}'`); + assert.strictEqual(err.errno, UV_ENOENT); + assert.strictEqual(err.code, 'ENOENT'); + assert.strictEqual(err.syscall, 'open'); + return true; + }; + + fs.readFile(nonexistentFile, common.mustCall(validateError)); + + assert.throws( + () => fs.readFileSync(nonexistentFile), + validateError + ); +} + +// readdir +{ + const validateError = (err) => { + assert.strictEqual(nonexistentFile, err.path); + assert.strictEqual( + err.message, + `ENOENT: no such file or directory, scandir '${nonexistentFile}'`); + assert.strictEqual(err.errno, UV_ENOENT); + assert.strictEqual(err.code, 'ENOENT'); + assert.strictEqual(err.syscall, 'scandir'); + return true; + }; + + fs.readdir(nonexistentFile, common.mustCall(validateError)); + + assert.throws( + () => fs.readdirSync(nonexistentFile), + validateError + ); +} + +// ftruncate +{ + const validateError = (err) => { + assert.strictEqual(err.syscall, 'ftruncate'); + // Could be EBADF or EINVAL, depending on the platform + if (err.code === 'EBADF') { + assert.strictEqual(err.message, 'EBADF: bad file descriptor, ftruncate'); + assert.strictEqual(err.errno, UV_EBADF); + } else { + assert.strictEqual(err.message, 'EINVAL: invalid argument, ftruncate'); + assert.strictEqual(err.errno, UV_EINVAL); + assert.strictEqual(err.code, 'EINVAL'); + } + return true; + }; + + common.runWithInvalidFD((fd) => { + fs.ftruncate(fd, 4, common.mustCall(validateError)); + + assert.throws( + () => fs.ftruncateSync(fd, 4), + validateError + ); + }); +} + +// fdatasync +{ + const validateError = (err) => { + assert.strictEqual(err.message, 'EBADF: bad file descriptor, fdatasync'); + assert.strictEqual(err.errno, UV_EBADF); + assert.strictEqual(err.code, 'EBADF'); + assert.strictEqual(err.syscall, 'fdatasync'); + return true; + }; + + common.runWithInvalidFD((fd) => { + fs.fdatasync(fd, common.mustCall(validateError)); + + assert.throws( + () => fs.fdatasyncSync(fd), + validateError + ); + }); +} + +// fsync +{ + const validateError = (err) => { + assert.strictEqual(err.message, 'EBADF: bad file descriptor, fsync'); + assert.strictEqual(err.errno, UV_EBADF); + assert.strictEqual(err.code, 'EBADF'); + assert.strictEqual(err.syscall, 'fsync'); + return true; + }; + + common.runWithInvalidFD((fd) => { + fs.fsync(fd, common.mustCall(validateError)); + + assert.throws( + () => fs.fsyncSync(fd), + validateError + ); + }); +} + +// chown +if (!common.isWindows) { + const validateError = (err) => { + assert.strictEqual(nonexistentFile, err.path); + assert.strictEqual( + err.message, + `ENOENT: no such file or directory, chown '${nonexistentFile}'`); + assert.strictEqual(err.errno, UV_ENOENT); + assert.strictEqual(err.code, 'ENOENT'); + assert.strictEqual(err.syscall, 'chown'); + return true; + }; + + fs.chown(nonexistentFile, process.getuid(), process.getgid(), + common.mustCall(validateError)); + + assert.throws( + () => fs.chownSync(nonexistentFile, + process.getuid(), process.getgid()), + validateError + ); +} + +// utimes +if (!common.isAIX) { + const validateError = (err) => { + assert.strictEqual(nonexistentFile, err.path); + assert.strictEqual( + err.message, + `ENOENT: no such file or directory, utime '${nonexistentFile}'`); + assert.strictEqual(err.errno, UV_ENOENT); + assert.strictEqual(err.code, 'ENOENT'); + assert.strictEqual(err.syscall, 'utime'); + return true; + }; + + fs.utimes(nonexistentFile, new Date(), new Date(), + common.mustCall(validateError)); + + assert.throws( + () => fs.utimesSync(nonexistentFile, new Date(), new Date()), + validateError + ); +} + +// mkdtemp +{ + const validateError = (err) => { + const pathPrefix = new RegExp('^' + re`${nonexistentDir}`); + assert.match(err.path, pathPrefix); + + const prefix = new RegExp('^ENOENT: no such file or directory, mkdtemp ' + + re`'${nonexistentDir}`); + assert.match(err.message, prefix); + + assert.strictEqual(err.errno, UV_ENOENT); + assert.strictEqual(err.code, 'ENOENT'); + assert.strictEqual(err.syscall, 'mkdtemp'); + return true; + }; + + fs.mkdtemp(nonexistentDir, common.mustCall(validateError)); + + assert.throws( + () => fs.mkdtempSync(nonexistentDir), + validateError + ); +} + +// Check copyFile with invalid modes. +{ + const validateError = { + code: 'ERR_OUT_OF_RANGE', + }; + + assert.throws( + () => fs.copyFile(existingFile, nonexistentFile, -1, () => {}), + validateError + ); + assert.throws( + () => fs.copyFileSync(existingFile, nonexistentFile, -1), + validateError + ); +} + +// copyFile: destination exists but the COPYFILE_EXCL flag is provided. +{ + const validateError = (err) => { + if (err.code === 'ENOENT') { // Could be ENOENT or EEXIST + assert.strictEqual(err.message, + 'ENOENT: no such file or directory, copyfile ' + + `'${existingFile}' -> '${existingFile2}'`); + assert.strictEqual(err.errno, UV_ENOENT); + assert.strictEqual(err.code, 'ENOENT'); + assert.strictEqual(err.syscall, 'copyfile'); + } else { + assert.strictEqual(err.message, + 'EEXIST: file already exists, copyfile ' + + `'${existingFile}' -> '${existingFile2}'`); + assert.strictEqual(err.errno, UV_EEXIST); + assert.strictEqual(err.code, 'EEXIST'); + assert.strictEqual(err.syscall, 'copyfile'); + } + return true; + }; + + fs.copyFile(existingFile, existingFile2, COPYFILE_EXCL, + common.mustCall(validateError)); + + assert.throws( + () => fs.copyFileSync(existingFile, existingFile2, COPYFILE_EXCL), + validateError + ); +} + +// copyFile: the source does not exist. +{ + const validateError = (err) => { + assert.strictEqual(err.message, + 'ENOENT: no such file or directory, copyfile ' + + `'${nonexistentFile}' -> '${existingFile2}'`); + assert.strictEqual(err.errno, UV_ENOENT); + assert.strictEqual(err.code, 'ENOENT'); + assert.strictEqual(err.syscall, 'copyfile'); + return true; + }; + + fs.copyFile(nonexistentFile, existingFile2, COPYFILE_EXCL, + common.mustCall(validateError)); + + assert.throws( + () => fs.copyFileSync(nonexistentFile, existingFile2, COPYFILE_EXCL), + validateError + ); +} + +// read +{ + const validateError = (err) => { + assert.strictEqual(err.message, 'EBADF: bad file descriptor, read'); + assert.strictEqual(err.errno, UV_EBADF); + assert.strictEqual(err.code, 'EBADF'); + assert.strictEqual(err.syscall, 'read'); + return true; + }; + + common.runWithInvalidFD((fd) => { + const buf = Buffer.alloc(5); + fs.read(fd, buf, 0, 1, 1, common.mustCall(validateError)); + + assert.throws( + () => fs.readSync(fd, buf, 0, 1, 1), + validateError + ); + }); +} + +// fchmod +{ + const validateError = (err) => { + assert.strictEqual(err.message, 'EBADF: bad file descriptor, fchmod'); + assert.strictEqual(err.errno, UV_EBADF); + assert.strictEqual(err.code, 'EBADF'); + assert.strictEqual(err.syscall, 'fchmod'); + return true; + }; + + common.runWithInvalidFD((fd) => { + fs.fchmod(fd, 0o666, common.mustCall(validateError)); + + assert.throws( + () => fs.fchmodSync(fd, 0o666), + validateError + ); + }); +} + +// fchown +if (!common.isWindows) { + const validateError = (err) => { + assert.strictEqual(err.message, 'EBADF: bad file descriptor, fchown'); + assert.strictEqual(err.errno, UV_EBADF); + assert.strictEqual(err.code, 'EBADF'); + assert.strictEqual(err.syscall, 'fchown'); + return true; + }; + + common.runWithInvalidFD((fd) => { + fs.fchown(fd, process.getuid(), process.getgid(), + common.mustCall(validateError)); + + assert.throws( + () => fs.fchownSync(fd, process.getuid(), process.getgid()), + validateError + ); + }); +} + +// write buffer +{ + const validateError = (err) => { + assert.strictEqual(err.message, 'EBADF: bad file descriptor, write'); + assert.strictEqual(err.errno, UV_EBADF); + assert.strictEqual(err.code, 'EBADF'); + assert.strictEqual(err.syscall, 'write'); + return true; + }; + + common.runWithInvalidFD((fd) => { + const buf = Buffer.alloc(5); + fs.write(fd, buf, 0, 1, 1, common.mustCall(validateError)); + + assert.throws( + () => fs.writeSync(fd, buf, 0, 1, 1), + validateError + ); + }); +} + +// write string +{ + const validateError = (err) => { + assert.strictEqual(err.message, 'EBADF: bad file descriptor, write'); + assert.strictEqual(err.errno, UV_EBADF); + assert.strictEqual(err.code, 'EBADF'); + assert.strictEqual(err.syscall, 'write'); + return true; + }; + + common.runWithInvalidFD((fd) => { + fs.write(fd, 'test', 1, common.mustCall(validateError)); + + assert.throws( + () => fs.writeSync(fd, 'test', 1), + validateError + ); + }); +} + + +// futimes +if (!common.isAIX) { + const validateError = (err) => { + assert.strictEqual(err.message, 'EBADF: bad file descriptor, futime'); + assert.strictEqual(err.errno, UV_EBADF); + assert.strictEqual(err.code, 'EBADF'); + assert.strictEqual(err.syscall, 'futime'); + return true; + }; + + common.runWithInvalidFD((fd) => { + fs.futimes(fd, new Date(), new Date(), common.mustCall(validateError)); + + assert.throws( + () => fs.futimesSync(fd, new Date(), new Date()), + validateError + ); + }); +} diff --git a/test/js/node/test/parallel/test-fs-existssync-false.js b/test/js/node/test/parallel/test-fs-existssync-false.js new file mode 100644 index 0000000000..43e826cef5 --- /dev/null +++ b/test/js/node/test/parallel/test-fs-existssync-false.js @@ -0,0 +1,32 @@ +'use strict'; +const common = require('../common'); +const tmpdir = require('../common/tmpdir'); + +// This test ensures that fs.existsSync doesn't incorrectly return false. +// (especially on Windows) +// https://github.com/nodejs/node-v0.x-archive/issues/3739 + +const assert = require('assert'); +const fs = require('fs'); +const path = require('path'); + +let dir = path.resolve(tmpdir.path); + +// Make sure that the tmp directory is clean +tmpdir.refresh(); + +// Make a long path. +for (let i = 0; i < 50; i++) { + dir = `${dir}/1234567890`; +} + +fs.mkdirSync(dir, { + mode: '0777', + recursive: true, +}); + +// Test if file exists synchronously +assert(fs.existsSync(dir), 'Directory is not accessible'); + +// Test if file exists asynchronously +fs.access(dir, common.mustSucceed()); diff --git a/test/js/node/test/parallel/test-fs-fmap.js b/test/js/node/test/parallel/test-fs-fmap.js new file mode 100644 index 0000000000..c4298f0d0e --- /dev/null +++ b/test/js/node/test/parallel/test-fs-fmap.js @@ -0,0 +1,28 @@ +'use strict'; +require('../common'); +const assert = require('assert'); +const fs = require('fs'); + +const { + O_CREAT = 0, + O_RDONLY = 0, + O_TRUNC = 0, + O_WRONLY = 0, + UV_FS_O_FILEMAP = 0 +} = fs.constants; + +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); + +// Run this test on all platforms. While UV_FS_O_FILEMAP is only available on +// Windows, it should be silently ignored on other platforms. + +const filename = tmpdir.resolve('fmap.txt'); +const text = 'Memory File Mapping Test'; + +const mw = UV_FS_O_FILEMAP | O_TRUNC | O_CREAT | O_WRONLY; +const mr = UV_FS_O_FILEMAP | O_RDONLY; + +fs.writeFileSync(filename, text, { flag: mw }); +const r1 = fs.readFileSync(filename, { encoding: 'utf8', flag: mr }); +assert.strictEqual(r1, text); diff --git a/test/js/node/test/parallel/test-fs-promises-file-handle-read.js b/test/js/node/test/parallel/test-fs-promises-file-handle-read.js new file mode 100644 index 0000000000..2e9534c398 --- /dev/null +++ b/test/js/node/test/parallel/test-fs-promises-file-handle-read.js @@ -0,0 +1,129 @@ +'use strict'; + +const common = require('../common'); + +// The following tests validate base functionality for the fs.promises +// FileHandle.read method. + +const fs = require('fs'); +const { open } = fs.promises; +const path = require('path'); +const fixtures = require('../common/fixtures'); +const tmpdir = require('../common/tmpdir'); +const assert = require('assert'); +const tmpDir = tmpdir.path; + +async function read(fileHandle, buffer, offset, length, position, options) { + return options?.useConf ? + fileHandle.read({ buffer, offset, length, position }) : + fileHandle.read(buffer, offset, length, position); +} + +async function validateRead(data, file, options) { + const filePath = path.resolve(tmpDir, file); + const buffer = Buffer.from(data, 'utf8'); + + const fd = fs.openSync(filePath, 'w+'); + const fileHandle = await open(filePath, 'w+'); + const streamFileHandle = await open(filePath, 'w+'); + + fs.writeSync(fd, buffer, 0, buffer.length); + fs.closeSync(fd); + + fileHandle.on('close', common.mustCall()); + const readAsyncHandle = + await read(fileHandle, Buffer.alloc(11), 0, 11, 0, options); + assert.deepStrictEqual(data.length, readAsyncHandle.bytesRead); + if (data.length) + assert.deepStrictEqual(buffer, readAsyncHandle.buffer); + await fileHandle.close(); + + const stream = fs.createReadStream(null, { fd: streamFileHandle }); + let streamData = Buffer.alloc(0); + for await (const chunk of stream) + streamData = Buffer.from(chunk); + assert.deepStrictEqual(buffer, streamData); + if (data.length) + assert.deepStrictEqual(streamData, readAsyncHandle.buffer); + await streamFileHandle.close(); +} + +async function validateLargeRead(options) { + // Reading beyond file length (3 in this case) should return no data. + // This is a test for a bug where reads > uint32 would return data + // from the current position in the file. + const filePath = fixtures.path('x.txt'); + const fileHandle = await open(filePath, 'r'); + const pos = 0xffffffff + 1; // max-uint32 + 1 + const readHandle = + await read(fileHandle, Buffer.alloc(1), 0, 1, pos, options); + + assert.strictEqual(readHandle.bytesRead, 0); +} + +async function validateReadNoParams() { + const filePath = fixtures.path('x.txt'); + const fileHandle = await open(filePath, 'r'); + // Should not throw + await fileHandle.read(); +} + +// Validates that the zero position is respected after the position has been +// moved. The test iterates over the xyz chars twice making sure that the values +// are read from the correct position. +async function validateReadWithPositionZero() { + const opts = { useConf: true }; + const filePath = fixtures.path('x.txt'); + const fileHandle = await open(filePath, 'r'); + const expectedSequence = ['x', 'y', 'z']; + + for (let i = 0; i < expectedSequence.length * 2; i++) { + const len = 1; + const pos = i % 3; + const buf = Buffer.alloc(len); + const { bytesRead } = await read(fileHandle, buf, 0, len, pos, opts); + assert.strictEqual(bytesRead, len); + assert.strictEqual(buf.toString(), expectedSequence[pos]); + } +} + +async function validateReadLength(len) { + const buf = Buffer.alloc(4); + const opts = { useConf: true }; + const filePath = fixtures.path('x.txt'); + const fileHandle = await open(filePath, 'r'); + const { bytesRead } = await read(fileHandle, buf, 0, len, 0, opts); + assert.strictEqual(bytesRead, len); +} + +async function validateReadWithNoOptions(byte) { + const buf = Buffer.alloc(byte); + const filePath = fixtures.path('x.txt'); + const fileHandle = await open(filePath, 'r'); + let response = await fileHandle.read(buf); + assert.strictEqual(response.bytesRead, byte); + response = await read(fileHandle, buf, 0, undefined, 0); + assert.strictEqual(response.bytesRead, byte); + response = await read(fileHandle, buf, 0, null, 0); + assert.strictEqual(response.bytesRead, byte); + response = await read(fileHandle, buf, 0, undefined, 0, { useConf: true }); + assert.strictEqual(response.bytesRead, byte); + response = await read(fileHandle, buf, 0, null, 0, { useConf: true }); + assert.strictEqual(response.bytesRead, byte); +} + +(async function() { + tmpdir.refresh(); + await validateRead('Hello world', 'read-file', { useConf: false }); + await validateRead('', 'read-empty-file', { useConf: false }); + await validateRead('Hello world', 'read-file-conf', { useConf: true }); + await validateRead('', 'read-empty-file-conf', { useConf: true }); + await validateLargeRead({ useConf: false }); + await validateLargeRead({ useConf: true }); + await validateReadNoParams(); + await validateReadWithPositionZero(); + await validateReadLength(0); + await validateReadLength(1); + await validateReadWithNoOptions(0); + await validateReadWithNoOptions(1); +})().then(common.mustCall()); diff --git a/test/js/node/test/parallel/test-fs-promises-write-optional-params.js b/test/js/node/test/parallel/test-fs-promises-write-optional-params.js new file mode 100644 index 0000000000..739875cb2c --- /dev/null +++ b/test/js/node/test/parallel/test-fs-promises-write-optional-params.js @@ -0,0 +1,110 @@ +'use strict'; + +const common = require('../common'); + +// This test ensures that filehandle.write accepts "named parameters" object +// and doesn't interpret objects as strings + +const assert = require('assert'); +const fsPromises = require('fs').promises; +const tmpdir = require('../common/tmpdir'); + +tmpdir.refresh(); + +const dest = tmpdir.resolve('tmp.txt'); +const buffer = Buffer.from('zyx'); + +async function testInvalid(dest, expectedCode, ...params) { + if (params.length >= 2) { + params[1] = common.mustNotMutateObjectDeep(params[1]); + } + let fh; + try { + fh = await fsPromises.open(dest, 'w+'); + await assert.rejects( + fh.write(...params), + { code: expectedCode }); + } finally { + await fh?.close(); + } +} + +async function testValid(dest, buffer, options) { + const length = options?.length; + const offset = options?.offset; + let fh, writeResult, writeBufCopy, readResult, readBufCopy; + + try { + fh = await fsPromises.open(dest, 'w'); + writeResult = await fh.write(buffer, options); + writeBufCopy = Uint8Array.prototype.slice.call(writeResult.buffer); + } finally { + await fh?.close(); + } + + try { + fh = await fsPromises.open(dest, 'r'); + readResult = await fh.read(buffer, options); + readBufCopy = Uint8Array.prototype.slice.call(readResult.buffer); + } finally { + await fh?.close(); + } + + assert.ok(writeResult.bytesWritten >= readResult.bytesRead); + if (length !== undefined && length !== null) { + assert.strictEqual(writeResult.bytesWritten, length); + assert.strictEqual(readResult.bytesRead, length); + } + if (offset === undefined || offset === 0) { + assert.deepStrictEqual(writeBufCopy, readBufCopy); + } + assert.deepStrictEqual(writeResult.buffer, readResult.buffer); +} + +(async () => { + // Test if first argument is not wrongly interpreted as ArrayBufferView|string + for (const badBuffer of [ + undefined, null, true, 42, 42n, Symbol('42'), NaN, [], () => {}, + common.mustNotCall(), + common.mustNotMutateObjectDeep({}), + Promise.resolve(new Uint8Array(1)), + {}, + { buffer: 'amNotParam' }, + { string: 'amNotParam' }, + { buffer: new Uint8Array(1).buffer }, + new Date(), + new String('notPrimitive'), + { toString() { return 'amObject'; } }, + { [Symbol.toPrimitive]: (hint) => 'amObject' }, + ]) { + await testInvalid(dest, 'ERR_INVALID_ARG_TYPE', common.mustNotMutateObjectDeep(badBuffer), {}); + } + + // First argument (buffer or string) is mandatory + await testInvalid(dest, 'ERR_INVALID_ARG_TYPE'); + + // Various invalid options + await testInvalid(dest, 'ERR_OUT_OF_RANGE', buffer, { length: 5 }); + await testInvalid(dest, 'ERR_OUT_OF_RANGE', buffer, { offset: 5 }); + await testInvalid(dest, 'ERR_OUT_OF_RANGE', buffer, { length: 1, offset: 3 }); + await testInvalid(dest, 'ERR_OUT_OF_RANGE', buffer, { length: -1 }); + await testInvalid(dest, 'ERR_OUT_OF_RANGE', buffer, { offset: -1 }); + await testInvalid(dest, 'ERR_INVALID_ARG_TYPE', buffer, { offset: false }); + await testInvalid(dest, 'ERR_INVALID_ARG_TYPE', buffer, { offset: true }); + + // Test compatibility with filehandle.read counterpart + for (const options of [ + undefined, + null, + {}, + { length: 1 }, + { position: 5 }, + { length: 1, position: 5 }, + { length: 1, position: -1, offset: 2 }, + { length: null }, + { position: null }, + { offset: 1 }, + ]) { + await testValid(dest, buffer, common.mustNotMutateObjectDeep(options)); + } +})().then(common.mustCall()); diff --git a/test/js/node/test/parallel/test-fs-readdir-ucs2.js b/test/js/node/test/parallel/test-fs-readdir-ucs2.js new file mode 100644 index 0000000000..264858ec6a --- /dev/null +++ b/test/js/node/test/parallel/test-fs-readdir-ucs2.js @@ -0,0 +1,31 @@ +'use strict'; + +const common = require('../common'); +if (!common.isLinux) + common.skip('Test is linux specific.'); + +const path = require('path'); +const fs = require('fs'); +const assert = require('assert'); + +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); +const filename = '\uD83D\uDC04'; +const root = Buffer.from(`${tmpdir.path}${path.sep}`); +const filebuff = Buffer.from(filename, 'ucs2'); +const fullpath = Buffer.concat([root, filebuff]); + +try { + fs.closeSync(fs.openSync(fullpath, 'w+')); +} catch (e) { + if (e.code === 'EINVAL') + common.skip('test requires filesystem that supports UCS2'); + throw e; +} + +fs.readdir(tmpdir.path, 'ucs2', common.mustSucceed((list) => { + assert.strictEqual(list.length, 1); + const fn = list[0]; + assert.deepStrictEqual(Buffer.from(fn, 'ucs2'), filebuff); + assert.strictEqual(fn, filename); +})); diff --git a/test/js/node/test/parallel/test-fs-readfile-flags.js b/test/js/node/test/parallel/test-fs-readfile-flags.js new file mode 100644 index 0000000000..72b910aeeb --- /dev/null +++ b/test/js/node/test/parallel/test-fs-readfile-flags.js @@ -0,0 +1,50 @@ +'use strict'; + +// Test of fs.readFile with different flags. +const common = require('../common'); +const fs = require('fs'); +const assert = require('assert'); +const tmpdir = require('../common/tmpdir'); + +tmpdir.refresh(); + +{ + const emptyFile = tmpdir.resolve('empty.txt'); + fs.closeSync(fs.openSync(emptyFile, 'w')); + + fs.readFile( + emptyFile, + // With `a+` the file is created if it does not exist + common.mustNotMutateObjectDeep({ encoding: 'utf8', flag: 'a+' }), + common.mustCall((err, data) => { assert.strictEqual(data, ''); }) + ); + + fs.readFile( + emptyFile, + // Like `a+` but fails if the path exists. + common.mustNotMutateObjectDeep({ encoding: 'utf8', flag: 'ax+' }), + common.mustCall((err, data) => { assert.strictEqual(err.code, 'EEXIST'); }) + ); +} + +{ + const willBeCreated = tmpdir.resolve('will-be-created'); + + fs.readFile( + willBeCreated, + // With `a+` the file is created if it does not exist + common.mustNotMutateObjectDeep({ encoding: 'utf8', flag: 'a+' }), + common.mustCall((err, data) => { assert.strictEqual(data, ''); }) + ); +} + +{ + const willNotBeCreated = tmpdir.resolve('will-not-be-created'); + + fs.readFile( + willNotBeCreated, + // Default flag is `r`. An exception occurs if the file does not exist. + common.mustNotMutateObjectDeep({ encoding: 'utf8' }), + common.mustCall((err, data) => { assert.strictEqual(err.code, 'ENOENT'); }) + ); +} diff --git a/test/js/node/test/parallel/test-fs-readfilesync-enoent.js b/test/js/node/test/parallel/test-fs-readfilesync-enoent.js new file mode 100644 index 0000000000..baf87ff990 --- /dev/null +++ b/test/js/node/test/parallel/test-fs-readfilesync-enoent.js @@ -0,0 +1,32 @@ +'use strict'; +const common = require('../common'); + +// This test is only relevant on Windows. +if (!common.isWindows) + common.skip('Windows specific test.'); + +// This test ensures fs.realpathSync works on properly on Windows without +// throwing ENOENT when the path involves a fileserver. +// https://github.com/nodejs/node-v0.x-archive/issues/3542 + +const assert = require('assert'); +const fs = require('fs'); +const os = require('os'); +const path = require('path'); + +function test(p) { + const result = fs.realpathSync(p); + assert.strictEqual(result.toLowerCase(), path.resolve(p).toLowerCase()); + + fs.realpath(p, common.mustSucceed((result) => { + assert.strictEqual(result.toLowerCase(), path.resolve(p).toLowerCase()); + })); +} + +test(`//${os.hostname()}/c$/Windows/System32`); +test(`//${os.hostname()}/c$/Windows`); +test(`//${os.hostname()}/c$/`); +test(`\\\\${os.hostname()}\\c$\\`); +test('C:\\'); +test('C:'); +test(process.env.windir); diff --git a/test/js/node/test/parallel/test-fs-realpath-on-substed-drive.js b/test/js/node/test/parallel/test-fs-realpath-on-substed-drive.js new file mode 100644 index 0000000000..aea53f642f --- /dev/null +++ b/test/js/node/test/parallel/test-fs-realpath-on-substed-drive.js @@ -0,0 +1,51 @@ +'use strict'; + +const common = require('../common'); +if (!common.isWindows) + common.skip('Test for Windows only'); + +const fixtures = require('../common/fixtures'); + +const assert = require('assert'); +const fs = require('fs'); +const spawnSync = require('child_process').spawnSync; + +let result; + +// Create a subst drive +const driveLetters = 'ABCDEFGHIJKLMNOPQRSTUWXYZ'; +let drive; +let i; +for (i = 0; i < driveLetters.length; ++i) { + drive = `${driveLetters[i]}:`; + result = spawnSync('subst', [drive, fixtures.fixturesDir]); + if (result.status === 0) + break; +} +if (i === driveLetters.length) + common.skip('Cannot create subst drive'); + +// Schedule cleanup (and check if all callbacks where called) +process.on('exit', function() { + spawnSync('subst', ['/d', drive]); +}); + +// test: +const filename = `${drive}\\empty.js`; +const filenameBuffer = Buffer.from(filename); + +result = fs.realpathSync(filename); +assert.strictEqual(result, filename); + +result = fs.realpathSync(filename, 'buffer'); +assert(Buffer.isBuffer(result)); +assert(result.equals(filenameBuffer)); + +fs.realpath(filename, common.mustSucceed((result) => { + assert.strictEqual(result, filename); +})); + +fs.realpath(filename, 'buffer', common.mustSucceed((result) => { + assert(Buffer.isBuffer(result)); + assert(result.equals(filenameBuffer)); +})); diff --git a/test/js/node/test/parallel/test-fs-symlink-dir-junction.js b/test/js/node/test/parallel/test-fs-symlink-dir-junction.js new file mode 100644 index 0000000000..4d5db3b444 --- /dev/null +++ b/test/js/node/test/parallel/test-fs-symlink-dir-junction.js @@ -0,0 +1,63 @@ +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +'use strict'; +const common = require('../common'); +const fixtures = require('../common/fixtures'); +const assert = require('assert'); +const fs = require('fs'); + +const tmpdir = require('../common/tmpdir'); + +// Test creating and reading symbolic link +const linkData = fixtures.path('cycles'); +const linkPath = tmpdir.resolve('cycles_link'); + +tmpdir.refresh(); + +fs.symlink(linkData, linkPath, 'junction', common.mustSucceed(() => { + fs.lstat(linkPath, common.mustSucceed((stats) => { + assert.ok(stats.isSymbolicLink()); + + fs.readlink(linkPath, common.mustSucceed((destination) => { + assert.strictEqual(destination, linkData); + + fs.unlink(linkPath, common.mustSucceed(() => { + assert(!fs.existsSync(linkPath)); + assert(fs.existsSync(linkData)); + })); + })); + })); +})); + +// Test invalid symlink +{ + const linkData = fixtures.path('/not/exists/dir'); + const linkPath = tmpdir.resolve('invalid_junction_link'); + + fs.symlink(linkData, linkPath, 'junction', common.mustSucceed(() => { + assert(!fs.existsSync(linkPath)); + + fs.unlink(linkPath, common.mustSucceed(() => { + assert(!fs.existsSync(linkPath)); + })); + })); +} diff --git a/test/js/node/test/parallel/test-fs-symlink-dir.js b/test/js/node/test/parallel/test-fs-symlink-dir.js new file mode 100644 index 0000000000..690e3302ed --- /dev/null +++ b/test/js/node/test/parallel/test-fs-symlink-dir.js @@ -0,0 +1,81 @@ +'use strict'; +const common = require('../common'); + +// Test creating a symbolic link pointing to a directory. +// Ref: https://github.com/nodejs/node/pull/23724 +// Ref: https://github.com/nodejs/node/issues/23596 + + +if (!common.canCreateSymLink()) + common.skip('insufficient privileges'); + +const assert = require('assert'); +const path = require('path'); +const fs = require('fs'); +const fsPromises = fs.promises; + +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); + +const linkTargets = [ + 'relative-target', + tmpdir.resolve('absolute-target'), +]; +const linkPaths = [ + path.relative(process.cwd(), tmpdir.resolve('relative-path')), + tmpdir.resolve('absolute-path'), +]; + +function testSync(target, path) { + fs.symlinkSync(target, path); + fs.readdirSync(path); +} + +function testAsync(target, path) { + fs.symlink(target, path, common.mustSucceed(() => { + fs.readdirSync(path); + })); +} + +async function testPromises(target, path) { + await fsPromises.symlink(target, path); + fs.readdirSync(path); +} + +for (const linkTarget of linkTargets) { + fs.mkdirSync(tmpdir.resolve(linkTarget)); + for (const linkPath of linkPaths) { + testSync(linkTarget, `${linkPath}-${path.basename(linkTarget)}-sync`); + testAsync(linkTarget, `${linkPath}-${path.basename(linkTarget)}-async`); + testPromises(linkTarget, `${linkPath}-${path.basename(linkTarget)}-promises`) + .then(common.mustCall()); + } +} + +// Test invalid symlink +{ + function testSync(target, path) { + fs.symlinkSync(target, path); + assert(!fs.existsSync(path)); + } + + function testAsync(target, path) { + fs.symlink(target, path, common.mustSucceed(() => { + assert(!fs.existsSync(path)); + })); + } + + async function testPromises(target, path) { + await fsPromises.symlink(target, path); + assert(!fs.existsSync(path)); + } + + for (const linkTarget of linkTargets.map((p) => p + '-broken')) { + for (const linkPath of linkPaths) { + testSync(linkTarget, `${linkPath}-${path.basename(linkTarget)}-sync`); + testAsync(linkTarget, `${linkPath}-${path.basename(linkTarget)}-async`); + testPromises(linkTarget, `${linkPath}-${path.basename(linkTarget)}-promises`) + .then(common.mustCall()); + } + } +} diff --git a/test/js/node/test/parallel/test-fs-symlink-longpath.js b/test/js/node/test/parallel/test-fs-symlink-longpath.js new file mode 100644 index 0000000000..f3586317c2 --- /dev/null +++ b/test/js/node/test/parallel/test-fs-symlink-longpath.js @@ -0,0 +1,27 @@ +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const path = require('path'); +const fs = require('fs'); + +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); +const tmpDir = tmpdir.path; +const longPath = path.join(...[tmpDir].concat(Array(30).fill('1234567890'))); +fs.mkdirSync(longPath, { recursive: true }); + +// Test if we can have symlinks to files and folders with long filenames +const targetDirectory = path.join(longPath, 'target-directory'); +fs.mkdirSync(targetDirectory); +const pathDirectory = path.join(tmpDir, 'new-directory'); +fs.symlink(targetDirectory, pathDirectory, 'dir', common.mustSucceed(() => { + assert(fs.existsSync(pathDirectory)); +})); + +const targetFile = path.join(longPath, 'target-file'); +fs.writeFileSync(targetFile, 'data'); +const pathFile = path.join(tmpDir, 'new-file'); +fs.symlink(targetFile, pathFile, common.mustSucceed(() => { + assert(fs.existsSync(pathFile)); +})); diff --git a/test/js/node/test/parallel/test-fs-watch-file-enoent-after-deletion.js b/test/js/node/test/parallel/test-fs-watch-file-enoent-after-deletion.js index e4baf90fd1..a7133f4ae5 100644 --- a/test/js/node/test/parallel/test-fs-watch-file-enoent-after-deletion.js +++ b/test/js/node/test/parallel/test-fs-watch-file-enoent-after-deletion.js @@ -21,6 +21,7 @@ 'use strict'; const common = require('../common'); +<<<<<<<< HEAD:test/js/node/test/parallel/test-fs-watch-file-enoent-after-deletion.js // Make sure the deletion event gets reported in the following scenario: // 1. Watch a file. @@ -31,6 +32,10 @@ const common = require('../common'); // The second stat() translates into the first 'change' event but a logic error // stopped it from getting emitted. // https://github.com/nodejs/node-v0.x-archive/issues/4027 +======== +if (!common.isWindows) + common.skip('this test is Windows-specific.'); +>>>>>>>> 7e819cd9b (add all these passing tests):test/js/node/test/parallel/test-fs-long-path.js const fs = require('fs'); diff --git a/test/js/node/test/parallel/test-fs-watch-recursive-add-file-to-new-folder.js b/test/js/node/test/parallel/test-fs-watch-recursive-add-file-to-new-folder.js new file mode 100644 index 0000000000..fcc49bb746 --- /dev/null +++ b/test/js/node/test/parallel/test-fs-watch-recursive-add-file-to-new-folder.js @@ -0,0 +1,53 @@ +'use strict'; + +const common = require('../common'); + +if (common.isIBMi) + common.skip('IBMi does not support `fs.watch()`'); + +// fs-watch on folders have limited capability in AIX. +// The testcase makes use of folder watching, and causes +// hang. This behavior is documented. Skip this for AIX. + +if (common.isAIX) + common.skip('folder watch capability is limited in AIX.'); + +const assert = require('assert'); +const path = require('path'); +const fs = require('fs'); + +const tmpdir = require('../common/tmpdir'); +const testDir = tmpdir.path; +tmpdir.refresh(); + +// Add a file to newly created folder to already watching folder + +const rootDirectory = fs.mkdtempSync(testDir + path.sep); +const testDirectory = path.join(rootDirectory, 'test-3'); +fs.mkdirSync(testDirectory); + +const filePath = path.join(testDirectory, 'folder-3'); + +const childrenFile = 'file-4.txt'; +const childrenAbsolutePath = path.join(filePath, childrenFile); +const childrenRelativePath = path.join(path.basename(filePath), childrenFile); +let watcherClosed = false; + +const watcher = fs.watch(testDirectory, { recursive: true }); +watcher.on('change', function(event, filename) { + if (filename === childrenRelativePath) { + assert.strictEqual(event, 'rename'); + watcher.close(); + watcherClosed = true; + } +}); + +// Do the write with a delay to ensure that the OS is ready to notify us. +setTimeout(() => { + fs.mkdirSync(filePath); + fs.writeFileSync(childrenAbsolutePath, 'world'); +}, common.platformTimeout(200)); + +process.once('exit', function() { + assert(watcherClosed, 'watcher Object was not closed'); +}); diff --git a/test/js/node/test/parallel/test-fs-watch-recursive-linux-parallel-remove.js b/test/js/node/test/parallel/test-fs-watch-recursive-linux-parallel-remove.js new file mode 100644 index 0000000000..145b3314f2 --- /dev/null +++ b/test/js/node/test/parallel/test-fs-watch-recursive-linux-parallel-remove.js @@ -0,0 +1,33 @@ +'use strict'; + +const common = require('../common'); + +if (!common.isLinux) + common.skip('This test can run only on Linux'); + +// Test that the watcher do not crash if the file "disappears" while +// watch is being set up. + +const path = require('node:path'); +const fs = require('node:fs'); +const { spawn } = require('node:child_process'); + +const tmpdir = require('../common/tmpdir'); +const testDir = tmpdir.path; +tmpdir.refresh(); + +const watcher = fs.watch(testDir, { recursive: true }); +watcher.on('change', function(event, filename) { + // This console.log makes the error happen + // do not remove + console.log(filename, event); +}); + +const testFile = path.join(testDir, 'a'); +const child = spawn(process.argv[0], ['-e', `const fs = require('node:fs'); for (let i = 0; i < 10000; i++) { const fd = fs.openSync('${testFile}', 'w'); fs.writeSync(fd, Buffer.from('hello')); fs.rmSync('${testFile}') }`], { + stdio: 'inherit' +}); + +child.on('exit', function() { + watcher.close(); +}); diff --git a/test/js/node/test/parallel/test-fs-watch-recursive-symlink.js b/test/js/node/test/parallel/test-fs-watch-recursive-symlink.js new file mode 100644 index 0000000000..37f71f56f8 --- /dev/null +++ b/test/js/node/test/parallel/test-fs-watch-recursive-symlink.js @@ -0,0 +1,111 @@ +'use strict'; + +const common = require('../common'); +const { setTimeout } = require('timers/promises'); + +if (common.isIBMi) + common.skip('IBMi does not support `fs.watch()`'); + +// fs-watch on folders have limited capability in AIX. +// The testcase makes use of folder watching, and causes +// hang. This behavior is documented. Skip this for AIX. + +if (common.isAIX) + common.skip('folder watch capability is limited in AIX.'); + +const assert = require('assert'); +const path = require('path'); +const fs = require('fs'); + +const tmpdir = require('../common/tmpdir'); +const testDir = tmpdir.path; +tmpdir.refresh(); + +(async () => { + // Add a recursive symlink to the parent folder + + const testDirectory = fs.mkdtempSync(testDir + path.sep); + + // Do not use `testDirectory` as base. It will hang the tests. + const rootDirectory = path.join(testDirectory, 'test-1'); + fs.mkdirSync(rootDirectory); + + const filePath = path.join(rootDirectory, 'file.txt'); + + const symlinkFolder = path.join(rootDirectory, 'symlink-folder'); + fs.symlinkSync(rootDirectory, symlinkFolder); + + if (common.isMacOS) { + // On macOS delay watcher start to avoid leaking previous events. + // Refs: https://github.com/libuv/libuv/pull/4503 + await setTimeout(common.platformTimeout(100)); + } + + const watcher = fs.watch(rootDirectory, { recursive: true }); + let watcherClosed = false; + watcher.on('change', function(event, filename) { + assert.ok(event === 'rename', `Received ${event}`); + assert.ok(filename === path.basename(symlinkFolder) || filename === path.basename(filePath), `Received ${filename}`); + + if (filename === path.basename(filePath)) { + watcher.close(); + watcherClosed = true; + } + }); + + await setTimeout(common.platformTimeout(100)); + fs.writeFileSync(filePath, 'world'); + + process.once('exit', function() { + assert(watcherClosed, 'watcher Object was not closed'); + }); +})().then(common.mustCall()); + +(async () => { + // This test checks how a symlink to outside the tracking folder can trigger change + // tmp/sub-directory/tracking-folder/symlink-folder -> tmp/sub-directory + + const rootDirectory = fs.mkdtempSync(testDir + path.sep); + + const subDirectory = path.join(rootDirectory, 'sub-directory'); + fs.mkdirSync(subDirectory); + + const trackingSubDirectory = path.join(subDirectory, 'tracking-folder'); + fs.mkdirSync(trackingSubDirectory); + + const symlinkFolder = path.join(trackingSubDirectory, 'symlink-folder'); + fs.symlinkSync(subDirectory, symlinkFolder); + + const forbiddenFile = path.join(subDirectory, 'forbidden.txt'); + const acceptableFile = path.join(trackingSubDirectory, 'acceptable.txt'); + + if (common.isMacOS) { + // On macOS delay watcher start to avoid leaking previous events. + // Refs: https://github.com/libuv/libuv/pull/4503 + await setTimeout(common.platformTimeout(100)); + } + + const watcher = fs.watch(trackingSubDirectory, { recursive: true }); + let watcherClosed = false; + watcher.on('change', function(event, filename) { + // macOS will only change the following events: + // { event: 'rename', filename: 'symlink-folder' } + // { event: 'rename', filename: 'acceptable.txt' } + assert.ok(event === 'rename', `Received ${event}`); + assert.ok(filename === path.basename(symlinkFolder) || filename === path.basename(acceptableFile), `Received ${filename}`); + + if (filename === path.basename(acceptableFile)) { + watcher.close(); + watcherClosed = true; + } + }); + + await setTimeout(common.platformTimeout(100)); + fs.writeFileSync(forbiddenFile, 'world'); + await setTimeout(common.platformTimeout(100)); + fs.writeFileSync(acceptableFile, 'acceptable'); + + process.once('exit', function() { + assert(watcherClosed, 'watcher Object was not closed'); + }); +})().then(common.mustCall());