This commit is contained in:
Jarred Sumner
2026-01-23 09:12:27 +01:00
parent 0b75120501
commit 7f0e291fa0
2 changed files with 51 additions and 12 deletions

View File

@@ -125,6 +125,17 @@ pub fn deinit(this: *Watcher, close_descriptors: bool) void {
fd.close();
}
}
// Free any owned file paths before deiniting the watchlist
{
const wl_slice = this.watchlist.slice();
const wl_file_paths = wl_slice.items(.file_path);
const wl_owns = wl_slice.items(.owns_file_path);
for (wl_file_paths, wl_owns) |fp, owned| {
if (owned and fp.len > 0) {
this.allocator.free(@as([*]u8, @constCast(fp.ptr))[0 .. fp.len + 1]);
}
}
}
this.watchlist.deinit(this.allocator);
const allocator = this.allocator;
allocator.destroy(this);
@@ -218,6 +229,9 @@ pub const WatchItem = struct {
kind: Kind,
package_json: ?*PackageJSON,
eventlist_index: if (Environment.isLinux) Platform.EventListIndex else u0 = 0,
/// Whether the Watcher owns the file_path allocation and must free it on eviction/deinit.
/// Set to true when clone_file_path=true was used in addFile/addDirectory.
owns_file_path: bool = false,
pub const Kind = enum { file, directory };
};
@@ -248,6 +262,17 @@ fn threadMain(this: *Watcher) !void {
fd.close();
}
}
// Free any owned file paths before deiniting the watchlist
{
const wl_slice = this.watchlist.slice();
const wl_file_paths = wl_slice.items(.file_path);
const wl_owns = wl_slice.items(.owns_file_path);
for (wl_file_paths, wl_owns) |fp, owned| {
if (owned and fp.len > 0) {
this.allocator.free(@as([*]u8, @constCast(fp.ptr))[0 .. fp.len + 1]);
}
}
}
this.watchlist.deinit(this.allocator);
// Close trace file if open
@@ -290,9 +315,18 @@ pub fn flushEvictions(this: *Watcher) void {
}
last_item = no_watch_item;
const file_paths = slice.items(.file_path);
const owns = slice.items(.owns_file_path);
// This is split into two passes because reading the slice while modified is potentially unsafe.
for (this.evict_list[0..this.evict_list_i]) |item| {
if (item == last_item or this.watchlist.len <= item) continue;
// Free the cloned file_path before swapRemove overwrites it
if (owns[item]) {
const fp = file_paths[item];
if (fp.len > 0) {
this.allocator.free(@as([*]u8, @constCast(fp.ptr))[0 .. fp.len + 1]);
}
}
this.watchlist.swapRemove(item);
last_item = item;
}
@@ -386,6 +420,7 @@ fn appendFileAssumeCapacity(
.parent_hash = parent_hash,
.package_json = package_json,
.kind = .file,
.owns_file_path = clone_file_path,
};
if (comptime Environment.isMac) {
@@ -448,6 +483,7 @@ fn appendDirectoryAssumeCapacity(
.parent_hash = parent_hash,
.kind = .directory,
.package_json = null,
.owns_file_path = clone_file_path,
};
if (Environment.isMac) {

View File

@@ -477,7 +477,7 @@ pub const PathWatcherManager = struct {
options.Loader.file,
.invalid,
null,
false,
true,
)) {
.err => |err| return .{ .err = err },
.result => {},
@@ -525,7 +525,7 @@ pub const PathWatcherManager = struct {
// this should only be called if thread pool is not null
fn _addDirectory(this: *PathWatcherManager, watcher: *PathWatcher, path: PathInfo) bun.sys.Maybe(void) {
const fd = path.fd;
switch (this.main_watcher.addDirectory(fd, path.path, path.hash, false)) {
switch (this.main_watcher.addDirectory(fd, path.path, path.hash, true)) {
.err => |err| return .{ .err = err.withPath(path.path) },
.result => {},
}
@@ -568,7 +568,7 @@ pub const PathWatcherManager = struct {
const path = watcher.path;
if (path.is_file) {
try this.main_watcher.addFile(path.fd, path.path, path.hash, .file, .invalid, null, false).unwrap();
try this.main_watcher.addFile(path.fd, path.path, path.hash, .file, .invalid, null, true).unwrap();
} else {
if (comptime Environment.isMac) {
if (watcher.fsevents_watcher != null) {
@@ -591,11 +591,12 @@ pub const PathWatcherManager = struct {
}
/// Decrements the ref count for the given path. If the ref count reaches zero,
/// removes the path from the file_paths map and returns its hash so the caller
/// can call `main_watcher.remove(hash)` **after** releasing the manager mutex.
/// This avoids a lock ordering inversion: the watcher thread holds Watcher.mutex
/// then acquires PathWatcherManager.mutex, so we must not acquire Watcher.mutex
/// while holding PathWatcherManager.mutex.
/// removes the path from the file_paths map, frees the path string, and returns
/// the hash. The caller must call `main_watcher.remove(hash)` **after** releasing
/// the manager mutex to avoid lock ordering inversion.
///
/// The path string can be freed immediately because the Watcher now owns its own
/// cloned copy (clone_file_path=true), so it does not reference this allocation.
fn _decrementPathRefNoLock(this: *PathWatcherManager, file_path: [:0]const u8) ?Watcher.HashType {
if (this.file_paths.getEntry(file_path)) |entry| {
var path = entry.value_ptr;
@@ -626,10 +627,12 @@ pub const PathWatcherManager = struct {
// unregister is always called from main thread
fn unregisterWatcher(this: *PathWatcherManager, watcher: *PathWatcher) void {
// Collect hashes that need to be removed from the Watcher. We must not
// call main_watcher.remove() while holding PathWatcherManager.mutex
// because that acquires Watcher.mutex, and the watcher thread acquires
// these locks in the opposite order (Watcher.mutex -> PathWatcherManager.mutex).
// Collect hashes that need to be removed from the Watcher after releasing
// the manager mutex. We must not call main_watcher.remove() while holding
// PathWatcherManager.mutex because that acquires Watcher.mutex, and the
// watcher thread acquires these locks in the opposite order.
// Path strings can be freed immediately in _decrementPathRefNoLock because
// the Watcher now owns its own cloned copies (clone_file_path=true).
var sfb = std.heap.stackFallback(32 * @sizeOf(Watcher.HashType), bun.default_allocator);
const sfb_alloc = sfb.get();
var hashes: std.ArrayListUnmanaged(Watcher.HashType) = .{};