Compare commits

...

3 Commits

Author SHA1 Message Date
Jarred Sumner
7f0e291fa0 Fix 2026-01-23 09:12:27 +01:00
Jarred Sumner
0b75120501 fix(watcher): call StackFallbackAllocator.get() only once in unregisterWatcher
StackFallbackAllocator.get() resets internal state and asserts it is
only called once per instance. The previous code called sfb.get()
multiple times: once in the defer statement (which eagerly evaluates
the argument) and again in each hashes.append() call, triggering a
debug assertion failure.

Store the allocator from sfb.get() in a local variable and reuse it.
2026-01-23 08:40:48 +01:00
Jarred Sumner
3ac3314bb1 fix(watcher): resolve lock ordering inversion in PathWatcherManager
The watcher thread acquires locks in the order Watcher.mutex ->
PathWatcherManager.mutex, but several PathWatcherManager methods were
calling main_watcher.remove() (which acquires Watcher.mutex) while
already holding PathWatcherManager.mutex. This lock ordering inversion
could cause a deadlock.

Fix by deferring main_watcher.remove() calls until after the manager
mutex is released:

- _decrementPathRefNoLock now returns the hash instead of calling remove
- _decrementPathRef releases the mutex before calling remove
- getNext in DirectoryRegisterTask releases the mutex before calling remove
- unregisterWatcher collects hashes into a stack-backed list and removes
  them after releasing the mutex

Co-Authored-By: Claude <noreply@anthropic.com>
2026-01-22 22:42:46 -08:00
2 changed files with 128 additions and 46 deletions

View File

@@ -125,6 +125,17 @@ pub fn deinit(this: *Watcher, close_descriptors: bool) void {
fd.close();
}
}
// Free any owned file paths before deiniting the watchlist
{
const wl_slice = this.watchlist.slice();
const wl_file_paths = wl_slice.items(.file_path);
const wl_owns = wl_slice.items(.owns_file_path);
for (wl_file_paths, wl_owns) |fp, owned| {
if (owned and fp.len > 0) {
this.allocator.free(@as([*]u8, @constCast(fp.ptr))[0 .. fp.len + 1]);
}
}
}
this.watchlist.deinit(this.allocator);
const allocator = this.allocator;
allocator.destroy(this);
@@ -218,6 +229,9 @@ pub const WatchItem = struct {
kind: Kind,
package_json: ?*PackageJSON,
eventlist_index: if (Environment.isLinux) Platform.EventListIndex else u0 = 0,
/// Whether the Watcher owns the file_path allocation and must free it on eviction/deinit.
/// Set to true when clone_file_path=true was used in addFile/addDirectory.
owns_file_path: bool = false,
pub const Kind = enum { file, directory };
};
@@ -248,6 +262,17 @@ fn threadMain(this: *Watcher) !void {
fd.close();
}
}
// Free any owned file paths before deiniting the watchlist
{
const wl_slice = this.watchlist.slice();
const wl_file_paths = wl_slice.items(.file_path);
const wl_owns = wl_slice.items(.owns_file_path);
for (wl_file_paths, wl_owns) |fp, owned| {
if (owned and fp.len > 0) {
this.allocator.free(@as([*]u8, @constCast(fp.ptr))[0 .. fp.len + 1]);
}
}
}
this.watchlist.deinit(this.allocator);
// Close trace file if open
@@ -290,9 +315,18 @@ pub fn flushEvictions(this: *Watcher) void {
}
last_item = no_watch_item;
const file_paths = slice.items(.file_path);
const owns = slice.items(.owns_file_path);
// This is split into two passes because reading the slice while modified is potentially unsafe.
for (this.evict_list[0..this.evict_list_i]) |item| {
if (item == last_item or this.watchlist.len <= item) continue;
// Free the cloned file_path before swapRemove overwrites it
if (owns[item]) {
const fp = file_paths[item];
if (fp.len > 0) {
this.allocator.free(@as([*]u8, @constCast(fp.ptr))[0 .. fp.len + 1]);
}
}
this.watchlist.swapRemove(item);
last_item = item;
}
@@ -386,6 +420,7 @@ fn appendFileAssumeCapacity(
.parent_hash = parent_hash,
.package_json = package_json,
.kind = .file,
.owns_file_path = clone_file_path,
};
if (comptime Environment.isMac) {
@@ -448,6 +483,7 @@ fn appendDirectoryAssumeCapacity(
.parent_hash = parent_hash,
.kind = .directory,
.package_json = null,
.owns_file_path = clone_file_path,
};
if (Environment.isMac) {

View File

@@ -391,17 +391,24 @@ pub const PathWatcherManager = struct {
}
fn getNext(this: *DirectoryRegisterTask) ?*PathWatcher {
this.manager.mutex.lock();
defer this.manager.mutex.unlock();
const maybe_hash = blk: {
this.manager.mutex.lock();
defer this.manager.mutex.unlock();
const watcher = this.watcher_list.pop();
if (watcher == null) {
// no more work todo, release the fd and path
_ = this.manager.current_fd_task.remove(this.path.fd);
this.manager._decrementPathRefNoLock(this.path.path);
return null;
const watcher = this.watcher_list.pop();
if (watcher == null) {
// no more work todo, release the fd and path
_ = this.manager.current_fd_task.remove(this.path.fd);
break :blk this.manager._decrementPathRefNoLock(this.path.path);
}
return watcher;
};
// Remove from the watcher after releasing the manager mutex to
// avoid lock ordering inversion.
if (maybe_hash) |hash| {
this.manager.main_watcher.remove(hash);
}
return watcher;
return null;
}
fn processWatcher(
@@ -470,7 +477,7 @@ pub const PathWatcherManager = struct {
options.Loader.file,
.invalid,
null,
false,
true,
)) {
.err => |err| return .{ .err = err },
.result => {},
@@ -518,7 +525,7 @@ pub const PathWatcherManager = struct {
// this should only be called if thread pool is not null
fn _addDirectory(this: *PathWatcherManager, watcher: *PathWatcher, path: PathInfo) bun.sys.Maybe(void) {
const fd = path.fd;
switch (this.main_watcher.addDirectory(fd, path.path, path.hash, false)) {
switch (this.main_watcher.addDirectory(fd, path.path, path.hash, true)) {
.err => |err| return .{ .err = err.withPath(path.path) },
.result => {},
}
@@ -561,7 +568,7 @@ pub const PathWatcherManager = struct {
const path = watcher.path;
if (path.is_file) {
try this.main_watcher.addFile(path.fd, path.path, path.hash, .file, .invalid, null, false).unwrap();
try this.main_watcher.addFile(path.fd, path.path, path.hash, .file, .invalid, null, true).unwrap();
} else {
if (comptime Environment.isMac) {
if (watcher.fsevents_watcher != null) {
@@ -583,66 +590,105 @@ pub const PathWatcherManager = struct {
}
}
fn _decrementPathRefNoLock(this: *PathWatcherManager, file_path: [:0]const u8) void {
/// Decrements the ref count for the given path. If the ref count reaches zero,
/// removes the path from the file_paths map, frees the path string, and returns
/// the hash. The caller must call `main_watcher.remove(hash)` **after** releasing
/// the manager mutex to avoid lock ordering inversion.
///
/// The path string can be freed immediately because the Watcher now owns its own
/// cloned copy (clone_file_path=true), so it does not reference this allocation.
fn _decrementPathRefNoLock(this: *PathWatcherManager, file_path: [:0]const u8) ?Watcher.HashType {
if (this.file_paths.getEntry(file_path)) |entry| {
var path = entry.value_ptr;
if (path.refs > 0) {
path.refs -= 1;
if (path.refs == 0) {
const hash = path.hash;
const path_ = path.path;
this.main_watcher.remove(path.hash);
_ = this.file_paths.remove(path_);
bun.default_allocator.free(path_);
return hash;
}
}
}
return null;
}
fn _decrementPathRef(this: *PathWatcherManager, file_path: [:0]const u8) void {
this.mutex.lock();
defer this.mutex.unlock();
this._decrementPathRefNoLock(file_path);
const maybe_hash = blk: {
this.mutex.lock();
defer this.mutex.unlock();
break :blk this._decrementPathRefNoLock(file_path);
};
if (maybe_hash) |hash| {
this.main_watcher.remove(hash);
}
}
// unregister is always called form main thread
// unregister is always called from main thread
fn unregisterWatcher(this: *PathWatcherManager, watcher: *PathWatcher) void {
this.mutex.lock();
defer this.mutex.unlock();
// Collect hashes that need to be removed from the Watcher after releasing
// the manager mutex. We must not call main_watcher.remove() while holding
// PathWatcherManager.mutex because that acquires Watcher.mutex, and the
// watcher thread acquires these locks in the opposite order.
// Path strings can be freed immediately in _decrementPathRefNoLock because
// the Watcher now owns its own cloned copies (clone_file_path=true).
var sfb = std.heap.stackFallback(32 * @sizeOf(Watcher.HashType), bun.default_allocator);
const sfb_alloc = sfb.get();
var hashes: std.ArrayListUnmanaged(Watcher.HashType) = .{};
defer hashes.deinit(sfb_alloc);
var watchers = this.watchers.slice();
defer {
if (this.deinit_on_last_watcher and this.watcher_count == 0) {
this.deinit();
}
}
var should_deinit = false;
for (watchers, 0..) |w, i| {
if (w) |item| {
if (item == watcher) {
watchers[i] = null;
// if is the last one just pop
if (i == watchers.len - 1) {
this.watchers.len -= 1;
}
this.watcher_count -= 1;
{
this.mutex.lock();
defer this.mutex.unlock();
this._decrementPathRefNoLock(watcher.path.path);
if (comptime Environment.isMac) {
if (watcher.fsevents_watcher != null) {
break;
var watchers = this.watchers.slice();
for (watchers, 0..) |w, i| {
if (w) |item| {
if (item == watcher) {
watchers[i] = null;
// if is the last one just pop
if (i == watchers.len - 1) {
this.watchers.len -= 1;
}
}
this.watcher_count -= 1;
{
watcher.mutex.lock();
defer watcher.mutex.unlock();
while (watcher.file_paths.pop()) |file_path| {
this._decrementPathRefNoLock(file_path);
if (this._decrementPathRefNoLock(watcher.path.path)) |hash| {
hashes.append(sfb_alloc, hash) catch bun.outOfMemory();
}
if (comptime Environment.isMac) {
if (watcher.fsevents_watcher != null) {
break;
}
}
{
watcher.mutex.lock();
defer watcher.mutex.unlock();
while (watcher.file_paths.pop()) |file_path| {
if (this._decrementPathRefNoLock(file_path)) |hash| {
hashes.append(sfb_alloc, hash) catch bun.outOfMemory();
}
}
}
break;
}
break;
}
}
should_deinit = this.deinit_on_last_watcher and this.watcher_count == 0;
}
// Now remove from the watcher without holding the manager mutex.
for (hashes.items) |hash| {
this.main_watcher.remove(hash);
}
if (should_deinit) {
this.deinit();
}
}