mirror of
https://github.com/oven-sh/bun
synced 2026-02-03 15:38:46 +00:00
Compare commits
2 Commits
dylan/pyth
...
zack/shell
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a030fbe639 | ||
|
|
b3e19a59aa |
@@ -4,6 +4,22 @@ const AllocationScope = @This();
|
||||
|
||||
pub const enabled = bun.Environment.enableAllocScopes;
|
||||
|
||||
pub const checkIsPoisoned = if (bun.Environment.enableAsan)
|
||||
struct {
|
||||
extern "c" fn __asan_address_is_poisoned(addr: *const anyopaque) c_int;
|
||||
pub fn checkIsPoisoned(addr: *const anyopaque) bool {
|
||||
const result = __asan_address_is_poisoned(addr) == 1;
|
||||
debug("checkIsPoisoned(0x{x}) = {any}", .{ addr, result });
|
||||
return result;
|
||||
}
|
||||
}.checkIsPoisoned
|
||||
else
|
||||
struct {
|
||||
pub fn checkIsPoisoned(_: *const anyopaque) bool {
|
||||
return true;
|
||||
}
|
||||
}.checkIsPoisoned;
|
||||
|
||||
parent: Allocator,
|
||||
state: if (enabled) struct {
|
||||
mutex: bun.Mutex,
|
||||
@@ -52,6 +68,10 @@ pub fn init(parent: Allocator) AllocationScope {
|
||||
}
|
||||
|
||||
pub fn deinit(scope: *AllocationScope) void {
|
||||
return deinitImpl(scope, false);
|
||||
}
|
||||
|
||||
pub fn deinitImpl(scope: *AllocationScope, check_poisoned: bool) void {
|
||||
if (enabled) {
|
||||
scope.state.mutex.lock();
|
||||
defer scope.state.allocations.deinit(scope.parent);
|
||||
@@ -64,7 +84,13 @@ pub fn deinit(scope: *AllocationScope) void {
|
||||
var it = scope.state.allocations.iterator();
|
||||
var n: usize = 0;
|
||||
while (it.next()) |entry| {
|
||||
Output.prettyErrorln("- {any}, len {d}, at:", .{ entry.key_ptr.*, entry.value_ptr.len });
|
||||
// It's possible it got freed but we accidentally used the actual
|
||||
// underlying allocator and NOT this allocation scope.
|
||||
if (check_poisoned and bun.Environment.enableAsan and checkIsPoisoned(entry.key_ptr.*)) {
|
||||
Output.prettyErrorln("- {any}, len {d}, (poisoned; did you free this pointer but forget to use the allocation scope to do so?) at:", .{ entry.key_ptr.*, entry.value_ptr.len });
|
||||
} else {
|
||||
Output.prettyErrorln("- {any}, len {d}, at:", .{ entry.key_ptr.*, entry.value_ptr.len });
|
||||
}
|
||||
bun.crash_handler.dumpStackTrace(entry.value_ptr.allocated_at.trace(), trace_limits);
|
||||
|
||||
switch (entry.value_ptr.extra) {
|
||||
@@ -251,6 +277,7 @@ pub inline fn downcast(a: Allocator) ?*AllocationScope {
|
||||
null;
|
||||
}
|
||||
|
||||
const debug = bun.Output.scoped(.AllocationScope, false);
|
||||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const bun = @import("bun");
|
||||
|
||||
@@ -17,6 +17,12 @@ pub fn endScope(this: *AllocScope) void {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn endScopeWithPoisonCheck(this: *AllocScope) void {
|
||||
if (comptime bun.Environment.enableAllocScopes) {
|
||||
this.__scope.deinitImpl(true);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn leakSlice(this: *AllocScope, memory: anytype) void {
|
||||
if (comptime bun.Environment.enableAllocScopes) {
|
||||
_ = @typeInfo(@TypeOf(memory)).pointer;
|
||||
|
||||
@@ -375,6 +375,7 @@ pub fn init(
|
||||
cmd.exec.bltn.impl = .{
|
||||
.rm = Rm{
|
||||
.opts = .{},
|
||||
.alloc_scope = shell.AllocScope.beginScope(bun.default_allocator),
|
||||
},
|
||||
};
|
||||
},
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
opts: Opts,
|
||||
alloc_scope: shell.AllocScope,
|
||||
state: union(enum) {
|
||||
idle,
|
||||
parse_opts: struct {
|
||||
@@ -13,7 +14,7 @@ state: union(enum) {
|
||||
// task: RmTask,
|
||||
filepath_args: []const [*:0]const u8,
|
||||
total_tasks: usize,
|
||||
err: ?Syscall.Error = null,
|
||||
errno: ?ExitCode = null,
|
||||
lock: bun.Mutex = bun.Mutex{},
|
||||
error_signal: std.atomic.Value(bool) = .{ .raw = false },
|
||||
output_done: std.atomic.Value(usize) = .{ .raw = 0 },
|
||||
@@ -305,7 +306,8 @@ pub fn onIOWriterChunk(this: *Rm, _: usize, e: ?JSC.SystemError) Yield {
|
||||
log("Rm(0x{x}) output done={d} output count={d}", .{ @intFromPtr(this), this.state.exec.getOutputCount(.output_done), this.state.exec.getOutputCount(.output_count) });
|
||||
this.state.exec.incrementOutputCount(.output_done);
|
||||
if (this.state.exec.state.tasksDone() >= this.state.exec.total_tasks and this.state.exec.getOutputCount(.output_done) >= this.state.exec.getOutputCount(.output_count)) {
|
||||
const code: ExitCode = if (this.state.exec.err != null) 1 else 0;
|
||||
// rm seems to just use exit code 1
|
||||
const code: ExitCode = if (this.state.exec.errno != null) @as(ExitCode, 1) else @as(ExitCode, 0);
|
||||
return this.bltn().done(code);
|
||||
}
|
||||
return .suspended;
|
||||
@@ -321,7 +323,7 @@ pub fn onIOWriterChunk(this: *Rm, _: usize, e: ?JSC.SystemError) Yield {
|
||||
}
|
||||
|
||||
pub fn deinit(this: *Rm) void {
|
||||
_ = this;
|
||||
this.alloc_scope.endScopeWithPoisonCheck();
|
||||
}
|
||||
|
||||
pub inline fn bltn(this: *Rm) *Builtin {
|
||||
@@ -407,12 +409,15 @@ pub fn onShellRmTaskDone(this: *Rm, task: *ShellRmTask) void {
|
||||
.waiting => brk: {
|
||||
exec.state.waiting.tasks_done += 1;
|
||||
const amt = exec.state.waiting.tasks_done;
|
||||
if (task.err) |err| {
|
||||
exec.err = err;
|
||||
if (bun.take(&task.err)) |_err| {
|
||||
var err: Syscall.Error = _err;
|
||||
exec.errno = @intFromEnum(err.getErrno());
|
||||
const error_string = this.bltn().taskErrorToString(.rm, err);
|
||||
err.deinitWithAllocator(this.alloc_scope.allocator());
|
||||
if (this.bltn().stderr.needsIO()) |safeguard| {
|
||||
log("Rm(0x{x}) task=0x{x} ERROR={s}", .{ @intFromPtr(this), @intFromPtr(task), error_string });
|
||||
exec.incrementOutputCount(.output_count);
|
||||
task.deinit();
|
||||
this.bltn().stderr.enqueue(this, error_string, safeguard).run();
|
||||
return;
|
||||
} else {
|
||||
@@ -428,12 +433,16 @@ pub fn onShellRmTaskDone(this: *Rm, task: *ShellRmTask) void {
|
||||
if (tasks_done >= this.state.exec.total_tasks and
|
||||
exec.getOutputCount(.output_done) >= exec.getOutputCount(.output_count))
|
||||
{
|
||||
this.state = .{ .done = .{ .exit_code = if (exec.err) |theerr| theerr.errno else 0 } };
|
||||
this.state = .{ .done = .{ .exit_code = if (exec.errno) |theerr| theerr else 0 } };
|
||||
task.deinit();
|
||||
this.next().run();
|
||||
} else {
|
||||
task.deinit();
|
||||
}
|
||||
}
|
||||
|
||||
fn writeVerbose(this: *Rm, verbose: *ShellRmTask.DirTask) Yield {
|
||||
defer verbose.deinit();
|
||||
if (this.bltn().stdout.needsIO()) |safeguard| {
|
||||
const buf = verbose.takeDeletedEntries();
|
||||
defer buf.deinit();
|
||||
@@ -442,11 +451,18 @@ fn writeVerbose(this: *Rm, verbose: *ShellRmTask.DirTask) Yield {
|
||||
_ = this.bltn().writeNoIO(.stdout, verbose.deleted_entries.items);
|
||||
_ = this.state.exec.incrementOutputCount(.output_done);
|
||||
if (this.state.exec.state.tasksDone() >= this.state.exec.total_tasks and this.state.exec.getOutputCount(.output_done) >= this.state.exec.getOutputCount(.output_count)) {
|
||||
return this.bltn().done(if (this.state.exec.err != null) @as(ExitCode, 1) else @as(ExitCode, 0));
|
||||
return this.bltn().done(if (this.state.exec.errno) |theerr| theerr else 0);
|
||||
}
|
||||
return .done;
|
||||
}
|
||||
|
||||
const PostRunAction = enum {
|
||||
done,
|
||||
done_no_decrement,
|
||||
waiting_for_children,
|
||||
nothing,
|
||||
};
|
||||
|
||||
pub const ShellRmTask = struct {
|
||||
const debug = bun.Output.scoped(.AsyncRmTask, true);
|
||||
|
||||
@@ -495,19 +511,39 @@ pub const ShellRmTask = struct {
|
||||
|
||||
const ParentRmTask = @This();
|
||||
|
||||
/// Notes about how this code works:
|
||||
///
|
||||
/// A `DirTask` iterates over the entries of its directory. If there are
|
||||
/// sub-directories, it will create sub-DirTasks that will run concurrently.
|
||||
///
|
||||
/// Because of the concurrent nature of these tasks, clean-up of a `DirTask` becomes a little complicated.
|
||||
///
|
||||
/// There are essentially three scenarios that we need to be mindful of:
|
||||
///
|
||||
/// 1. The `DirTask` encountered no sub-directories and can just clean itself up immediately.
|
||||
/// 2. The `DirTask` encountered and spawned sub-DirTasks, but they all finished before the parent `DirTask` iterated over its entries.
|
||||
/// 3. The `DirTask` encountered and spawned sub-DirTasks, but they are still running when the parent `DirTask` finishes iterating over its entries.
|
||||
///
|
||||
/// We need some synchronization mechanism to disambiguate 2 from 3 concurrently.
|
||||
pub const DirTask = struct {
|
||||
task_manager: *ParentRmTask,
|
||||
parent_task: ?*DirTask,
|
||||
path: [:0]const u8,
|
||||
is_absolute: bool = false,
|
||||
subtask_count: std.atomic.Value(usize),
|
||||
need_to_wait: std.atomic.Value(bool) = std.atomic.Value(bool).init(false),
|
||||
deleting_after_waiting_for_children: std.atomic.Value(bool) = std.atomic.Value(bool).init(false),
|
||||
|
||||
/// This is initialized to 1 because we also count the PARENT DirTask
|
||||
///
|
||||
/// The parent DirTask decrements the count after the directory iterating (inside `removeEntryDir(...)` ) is complete
|
||||
///
|
||||
subtask_count: std.atomic.Value(usize) = std.atomic.Value(usize).init(1),
|
||||
|
||||
kind_hint: EntryKindHint,
|
||||
task: JSC.WorkPoolTask = .{ .callback = runFromThreadPool },
|
||||
deleted_entries: std.ArrayList(u8),
|
||||
concurrent_task: JSC.EventLoopTask,
|
||||
|
||||
post_run_action: ?PostRunAction = null,
|
||||
|
||||
const EntryKindHint = enum { idk, dir, file };
|
||||
|
||||
pub fn takeDeletedEntries(this: *DirTask) std.ArrayList(u8) {
|
||||
@@ -533,8 +569,12 @@ pub const ShellRmTask = struct {
|
||||
|
||||
fn runFromThreadPoolImpl(this: *DirTask) void {
|
||||
defer {
|
||||
if (!this.deleting_after_waiting_for_children.load(.seq_cst)) {
|
||||
this.postRun();
|
||||
const post_run_action: PostRunAction = bun.take(&this.post_run_action).?;
|
||||
switch (post_run_action) {
|
||||
.done => this.postRun(),
|
||||
.done_no_decrement => this.postRunImpl(false),
|
||||
.waiting_for_children => {},
|
||||
.nothing => {},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -543,7 +583,7 @@ pub const ShellRmTask = struct {
|
||||
if (this.parent_task == null) {
|
||||
var buf: bun.PathBuffer = undefined;
|
||||
const cwd_path = switch (Syscall.getFdPath(this.task_manager.cwd, &buf)) {
|
||||
.result => |p| bun.default_allocator.dupeZ(u8, p) catch bun.outOfMemory(),
|
||||
.result => |p| this.task_manager.rm.alloc_scope.allocator().dupeZ(u8, p) catch bun.outOfMemory(),
|
||||
.err => |err| {
|
||||
debug("[runFromThreadPoolImpl:getcwd] DirTask({x}) failed: {s}: {s}", .{ @intFromPtr(this), @tagName(err.getErrno()), err.path });
|
||||
this.task_manager.err_mutex.lock();
|
||||
@@ -551,6 +591,8 @@ pub const ShellRmTask = struct {
|
||||
if (this.task_manager.err == null) {
|
||||
this.task_manager.err = err;
|
||||
this.task_manager.error_signal.store(true, .seq_cst);
|
||||
} else {
|
||||
err.deinitWithAllocator(this.task_manager.rm.alloc_scope.allocator());
|
||||
}
|
||||
return;
|
||||
},
|
||||
@@ -571,7 +613,7 @@ pub const ShellRmTask = struct {
|
||||
this.task_manager.error_signal.store(true, .seq_cst);
|
||||
} else {
|
||||
var err2 = err;
|
||||
err2.deinit();
|
||||
err2.deinitWithAllocator(this.task_manager.rm.alloc_scope.allocator());
|
||||
}
|
||||
},
|
||||
.result => {},
|
||||
@@ -591,34 +633,56 @@ pub const ShellRmTask = struct {
|
||||
}
|
||||
|
||||
pub fn postRun(this: *DirTask) void {
|
||||
debug("DirTask(0x{x}, path={s}) postRun", .{ @intFromPtr(this), this.path });
|
||||
// // This is true if the directory has subdirectories
|
||||
// // that need to be deleted
|
||||
if (this.need_to_wait.load(.seq_cst)) return;
|
||||
return this.postRunImpl(true);
|
||||
}
|
||||
|
||||
// We have executed all the children of this task
|
||||
if (this.subtask_count.fetchSub(1, .seq_cst) == 1) {
|
||||
defer {
|
||||
if (this.task_manager.opts.verbose)
|
||||
this.queueForWrite()
|
||||
else
|
||||
this.deinit();
|
||||
}
|
||||
pub fn postRunImpl(this: *DirTask, check_if_done: bool) void {
|
||||
debug("DirTask(0x{x}, path={s}) postRun", .{ @intFromPtr(this), this.path });
|
||||
|
||||
const all_done = if (!check_if_done) brk: {
|
||||
bun.assert(this.subtask_count.load(.acquire) == 0);
|
||||
break :brk true;
|
||||
} else brk: {
|
||||
|
||||
// If this value is:
|
||||
// 1: The parent DirTask has finished iterating and we are the last child
|
||||
// >1: We are not the last child, or the parent DirTask is still iterating
|
||||
const previous_tasks_amount = this.subtask_count.fetchSub(1, .acq_rel);
|
||||
bun.assert(previous_tasks_amount >= 1);
|
||||
|
||||
// We have executed all the children of this task
|
||||
break :brk previous_tasks_amount == 1;
|
||||
};
|
||||
|
||||
if (all_done) {
|
||||
const deinit_fn: *const fn (*DirTask) void = if (this.task_manager.opts.verbose)
|
||||
DirTask.queueForWrite
|
||||
else
|
||||
DirTask.deinit;
|
||||
|
||||
// If we have a parent and we are the last child, now we can delete the parent
|
||||
if (this.parent_task != null) {
|
||||
// It's possible that we queued this subdir task and it finished, while the parent
|
||||
// was still in the `removeEntryDir` function
|
||||
const tasks_left_before_decrement = this.parent_task.?.subtask_count.fetchSub(1, .seq_cst);
|
||||
const parent_still_in_remove_entry_dir = !this.parent_task.?.need_to_wait.load(.monotonic);
|
||||
if (!parent_still_in_remove_entry_dir and tasks_left_before_decrement == 2) {
|
||||
this.parent_task.?.deleteAfterWaitingForChildren();
|
||||
if (this.parent_task) |parent_task| {
|
||||
// BUT, only if the parent task is done iterating
|
||||
const parent_tasks_previous_amount = parent_task.subtask_count.fetchSub(1, .acq_rel);
|
||||
debug("DirTask(0x{x}, path={s}) parent=(0x{x}, path={s}) parent_tasks_previous_amount={d}", .{
|
||||
@intFromPtr(this),
|
||||
this.path,
|
||||
@intFromPtr(parent_task),
|
||||
parent_task.path,
|
||||
parent_tasks_previous_amount,
|
||||
});
|
||||
bun.assert(parent_tasks_previous_amount >= 1);
|
||||
deinit_fn(this);
|
||||
if (parent_tasks_previous_amount == 1) {
|
||||
parent_task.deleteAfterWaitingForChildren();
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Otherwise we are root task
|
||||
this.task_manager.finishConcurrently();
|
||||
const task_manager = this.task_manager;
|
||||
deinit_fn(this);
|
||||
task_manager.finishConcurrently();
|
||||
}
|
||||
|
||||
// Otherwise need to wait
|
||||
@@ -626,26 +690,24 @@ pub const ShellRmTask = struct {
|
||||
|
||||
pub fn deleteAfterWaitingForChildren(this: *DirTask) void {
|
||||
debug("DirTask(0x{x}, path={s}) deleteAfterWaitingForChildren", .{ @intFromPtr(this), this.path });
|
||||
// `runFromMainThreadImpl` has a `defer this.postRun()` so need to set this to true to skip that
|
||||
this.deleting_after_waiting_for_children.store(true, .seq_cst);
|
||||
this.need_to_wait.store(false, .seq_cst);
|
||||
var do_post_run = true;
|
||||
defer {
|
||||
if (do_post_run) this.postRun();
|
||||
if (do_post_run) this.postRunImpl(false);
|
||||
}
|
||||
if (this.task_manager.error_signal.load(.seq_cst)) {
|
||||
return;
|
||||
}
|
||||
|
||||
switch (this.task_manager.removeEntryDirAfterChildren(this)) {
|
||||
.err => |e| {
|
||||
.err => |e_| {
|
||||
var e = e_;
|
||||
debug("[deleteAfterWaitingForChildren] DirTask({x}) failed: {s}: {s}", .{ @intFromPtr(this), @tagName(e.getErrno()), e.path });
|
||||
this.task_manager.err_mutex.lock();
|
||||
defer this.task_manager.err_mutex.unlock();
|
||||
if (this.task_manager.err == null) {
|
||||
this.task_manager.err = e;
|
||||
} else {
|
||||
bun.default_allocator.free(e.path);
|
||||
e.deinitWithAllocator(this.task_manager.rm.alloc_scope.allocator());
|
||||
}
|
||||
},
|
||||
.result => |deleted| {
|
||||
@@ -667,18 +729,19 @@ pub const ShellRmTask = struct {
|
||||
}
|
||||
|
||||
pub fn deinit(this: *DirTask) void {
|
||||
log("DirTask(0x{x}, path={s}) deinit", .{ @intFromPtr(this), this.path });
|
||||
this.deleted_entries.deinit();
|
||||
// The root's path string is from Rm's argv so don't deallocate it
|
||||
// And the root task is actually a field on the struct of the AsyncRmTask so don't deallocate it either
|
||||
if (this.parent_task != null) {
|
||||
bun.default_allocator.free(this.path);
|
||||
bun.default_allocator.destroy(this);
|
||||
this.task_manager.rm.alloc_scope.allocator().free(this.path);
|
||||
this.task_manager.rm.alloc_scope.allocator().destroy(this);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub fn create(root_path: bun.PathString, rm: *Rm, cwd: bun.FileDescriptor, error_signal: *std.atomic.Value(bool), is_absolute: bool) *ShellRmTask {
|
||||
const task = bun.default_allocator.create(ShellRmTask) catch bun.outOfMemory();
|
||||
const task = rm.alloc_scope.allocator().create(ShellRmTask) catch bun.outOfMemory();
|
||||
task.* = ShellRmTask{
|
||||
.rm = rm,
|
||||
.opts = rm.opts,
|
||||
@@ -690,7 +753,7 @@ pub const ShellRmTask = struct {
|
||||
.path = root_path.sliceAssumeZ(),
|
||||
.subtask_count = std.atomic.Value(usize).init(1),
|
||||
.kind_hint = .idk,
|
||||
.deleted_entries = std.ArrayList(u8).init(bun.default_allocator),
|
||||
.deleted_entries = std.ArrayList(u8).init(rm.alloc_scope.allocator()),
|
||||
.concurrent_task = JSC.EventLoopTask.fromEventLoop(rm.bltn().eventLoop()),
|
||||
},
|
||||
.event_loop = rm.bltn().parentCmd().base.eventLoop(),
|
||||
@@ -699,6 +762,7 @@ pub const ShellRmTask = struct {
|
||||
.root_is_absolute = is_absolute,
|
||||
.join_style = JoinStyle.fromPath(root_path),
|
||||
};
|
||||
debug("DirTask(0x{x}, path={s}) created", .{ @intFromPtr(&task.root_task), task.root_path });
|
||||
return task;
|
||||
}
|
||||
|
||||
@@ -711,7 +775,7 @@ pub const ShellRmTask = struct {
|
||||
return;
|
||||
}
|
||||
const new_path = this.join(
|
||||
bun.default_allocator,
|
||||
this.rm.alloc_scope.allocator(),
|
||||
&[_][]const u8{
|
||||
parent_dir.path[0..parent_dir.path.len],
|
||||
path[0..path.len],
|
||||
@@ -728,14 +792,14 @@ pub const ShellRmTask = struct {
|
||||
return;
|
||||
}
|
||||
|
||||
var subtask = bun.default_allocator.create(DirTask) catch bun.outOfMemory();
|
||||
var subtask = this.rm.alloc_scope.allocator().create(DirTask) catch bun.outOfMemory();
|
||||
subtask.* = DirTask{
|
||||
.task_manager = this,
|
||||
.path = path,
|
||||
.parent_task = parent_task,
|
||||
.subtask_count = std.atomic.Value(usize).init(1),
|
||||
.kind_hint = kind_hint,
|
||||
.deleted_entries = std.ArrayList(u8).init(bun.default_allocator),
|
||||
.deleted_entries = std.ArrayList(u8).init(this.rm.alloc_scope.allocator()),
|
||||
.concurrent_task = JSC.EventLoopTask.fromEventLoop(this.event_loop),
|
||||
};
|
||||
|
||||
@@ -791,6 +855,8 @@ pub const ShellRmTask = struct {
|
||||
}
|
||||
|
||||
fn removeEntryDir(this: *ShellRmTask, dir_task: *DirTask, is_absolute: bool, buf: *bun.PathBuffer) Maybe(void) {
|
||||
dir_task.post_run_action = .done;
|
||||
|
||||
const path = dir_task.path;
|
||||
const dirfd = this.cwd;
|
||||
debug("removeEntryDir({s})", .{path});
|
||||
@@ -827,7 +893,7 @@ pub const ShellRmTask = struct {
|
||||
}
|
||||
|
||||
if (!this.opts.recursive) {
|
||||
return Maybe(void).initErr(Syscall.Error.fromCode(bun.sys.E.ISDIR, .TODO).withPath(bun.default_allocator.dupeZ(u8, dir_task.path) catch bun.outOfMemory()));
|
||||
return Maybe(void).initErr(Syscall.Error.fromCode(bun.sys.E.ISDIR, .TODO).withPath(this.rm.alloc_scope.allocator().dupeZ(u8, dir_task.path) catch bun.outOfMemory()));
|
||||
}
|
||||
|
||||
const flags = bun.O.DIRECTORY | bun.O.RDONLY;
|
||||
@@ -906,13 +972,22 @@ pub const ShellRmTask = struct {
|
||||
}
|
||||
}
|
||||
|
||||
// If this value is:
|
||||
// 1: then all other sub-tasks have finished
|
||||
// >1: then there are still sub-tasks running
|
||||
const previous_subtask_value = dir_task.subtask_count.fetchSub(1, .acq_rel);
|
||||
bun.assert(previous_subtask_value >= 1);
|
||||
|
||||
// Need to wait for children to finish
|
||||
if (dir_task.subtask_count.load(.seq_cst) > 1) {
|
||||
if (previous_subtask_value > 1) {
|
||||
dir_task.post_run_action = .waiting_for_children;
|
||||
close_fd = true;
|
||||
dir_task.need_to_wait.store(true, .seq_cst);
|
||||
return Maybe(void).success;
|
||||
}
|
||||
|
||||
// We'll post run ourselves
|
||||
dir_task.post_run_action = .done_no_decrement;
|
||||
|
||||
if (this.error_signal.load(.seq_cst)) return Maybe(void).success;
|
||||
|
||||
if (bun.Environment.isWindows) {
|
||||
@@ -978,14 +1053,14 @@ pub const ShellRmTask = struct {
|
||||
|
||||
pub fn onIsDir(this: *@This(), parent_dir_task: *DirTask, path: [:0]const u8, is_absolute: bool, buf: *bun.PathBuffer) Maybe(void) {
|
||||
if (this.child_of_dir) {
|
||||
this.task.enqueueNoJoin(parent_dir_task, bun.default_allocator.dupeZ(u8, path) catch bun.outOfMemory(), .dir);
|
||||
this.task.enqueueNoJoin(parent_dir_task, this.task.rm.alloc_scope.allocator().dupeZ(u8, path) catch bun.outOfMemory(), .dir);
|
||||
return Maybe(void).success;
|
||||
}
|
||||
return this.task.removeEntryDir(parent_dir_task, is_absolute, buf);
|
||||
}
|
||||
|
||||
pub fn onDirNotEmpty(this: *@This(), parent_dir_task: *DirTask, path: [:0]const u8, is_absolute: bool, buf: *bun.PathBuffer) Maybe(void) {
|
||||
if (this.child_of_dir) return .{ .result = this.task.enqueueNoJoin(parent_dir_task, bun.default_allocator.dupeZ(u8, path) catch bun.outOfMemory(), .dir) };
|
||||
if (this.child_of_dir) return .{ .result = this.task.enqueueNoJoin(parent_dir_task, this.task.rm.alloc_scope.allocator().dupeZ(u8, path) catch bun.outOfMemory(), .dir) };
|
||||
return this.task.removeEntryDir(parent_dir_task, is_absolute, buf);
|
||||
}
|
||||
};
|
||||
@@ -1080,6 +1155,10 @@ pub const ShellRmTask = struct {
|
||||
return Maybe(void).success;
|
||||
}
|
||||
};
|
||||
|
||||
// const is_root_task = &this.root_task == parent_dir_task;
|
||||
this.root_task.post_run_action = .done;
|
||||
|
||||
const dirfd = this.cwd;
|
||||
switch (ShellSyscall.unlinkatWithFlags(dirfd, path, 0)) {
|
||||
.result => return this.verboseDeleted(parent_dir_task, path),
|
||||
@@ -1139,8 +1218,7 @@ pub const ShellRmTask = struct {
|
||||
}
|
||||
|
||||
fn errorWithPath(this: *ShellRmTask, err: Syscall.Error, path: [:0]const u8) Syscall.Error {
|
||||
_ = this;
|
||||
return err.withPath(bun.default_allocator.dupeZ(u8, path[0..path.len]) catch bun.outOfMemory());
|
||||
return err.withPath(this.rm.alloc_scope.allocator().dupeZ(u8, path[0..path.len]) catch bun.outOfMemory());
|
||||
}
|
||||
|
||||
inline fn join(this: *ShellRmTask, alloc: Allocator, subdir_parts: []const []const u8, is_absolute: bool) [:0]const u8 {
|
||||
@@ -1170,7 +1248,10 @@ pub const ShellRmTask = struct {
|
||||
}
|
||||
|
||||
pub fn deinit(this: *ShellRmTask) void {
|
||||
bun.default_allocator.destroy(this);
|
||||
if (this.err) |*err| {
|
||||
err.deinitWithAllocator(this.rm.alloc_scope.allocator());
|
||||
}
|
||||
this.rm.alloc_scope.allocator().destroy(this);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
543
test/js/bun/shell/commands/rm-edge-cases.test.ts
Normal file
543
test/js/bun/shell/commands/rm-edge-cases.test.ts
Normal file
@@ -0,0 +1,543 @@
|
||||
import { $ } from "bun";
|
||||
import { beforeAll, describe, expect, setDefaultTimeout, test } from "bun:test";
|
||||
import { tempDirWithFiles, bunExe, bunEnv, tmpdirSync, isWindows, isPosix } from "harness";
|
||||
import { mkdirSync, writeFileSync, chmodSync, symlinkSync, existsSync } from "node:fs";
|
||||
import path from "node:path";
|
||||
import { sortedShellOutput } from "../util";
|
||||
|
||||
$.nothrow();
|
||||
|
||||
beforeAll(() => {
|
||||
setDefaultTimeout(1000 * 60 * 5);
|
||||
});
|
||||
|
||||
const fileExists = async (path: string): Promise<boolean> => {
|
||||
try {
|
||||
await $`ls -d ${path}`;
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
describe("rm edge cases and error paths", () => {
|
||||
// Test 1: Basic functionality covered by existing tests - marking as complete
|
||||
|
||||
// Test 2: Permission errors (EPERM)
|
||||
test.skipIf(isWindows)("permission denied errors", async () => {
|
||||
const tmpdir = tmpdirSync();
|
||||
const protectedFile = path.join(tmpdir, "protected.txt");
|
||||
const protectedDir = path.join(tmpdir, "protected_dir");
|
||||
const fileInProtectedDir = path.join(protectedDir, "file.txt");
|
||||
|
||||
// Create protected file
|
||||
writeFileSync(protectedFile, "test");
|
||||
chmodSync(protectedFile, 0o444); // Read-only
|
||||
|
||||
// Create protected directory with file
|
||||
mkdirSync(protectedDir);
|
||||
writeFileSync(fileInProtectedDir, "test");
|
||||
chmodSync(protectedDir, 0o555); // Read/execute only
|
||||
|
||||
// Try to remove read-only file without force
|
||||
{
|
||||
const { stderr, exitCode } = await $`rm ${protectedFile}`;
|
||||
// On macOS, removing read-only files might succeed
|
||||
if (exitCode !== 0) {
|
||||
expect(stderr.toString()).toContain("Permission denied");
|
||||
}
|
||||
// Cleanup - only if file still exists
|
||||
if (existsSync(protectedFile)) {
|
||||
chmodSync(protectedFile, 0o644);
|
||||
}
|
||||
}
|
||||
|
||||
// Try to remove file in read-only directory
|
||||
{
|
||||
const { stderr, exitCode } = await $`rm ${fileInProtectedDir}`;
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr.toString()).toContain("Permission denied");
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
chmodSync(protectedDir, 0o755);
|
||||
});
|
||||
|
||||
// Test 3: Root directory protection
|
||||
test.skipIf(isWindows)("root directory protection", async () => {
|
||||
// Test various forms of root paths
|
||||
const rootPaths = isPosix ? ["/", "/../", "/./"] : ["C:\\", "C:\\..\\", "C:\\.\\"];
|
||||
|
||||
for (const rootPath of rootPaths) {
|
||||
// Default behavior should prevent root deletion
|
||||
{
|
||||
const { stderr, exitCode } = await $`rm -rf ${rootPath}`;
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr.toString()).toContain("may not be removed");
|
||||
}
|
||||
|
||||
// Even with --no-preserve-root, Bun should protect root
|
||||
{
|
||||
const { stderr, exitCode } = await $`rm -rf --no-preserve-root ${rootPath}`;
|
||||
// Should still fail as an extra safety measure
|
||||
expect(exitCode).toBe(1);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Test 4: Empty directory removal with -d
|
||||
test("empty directory removal with -d flag", async () => {
|
||||
const tmpdir = tmpdirSync();
|
||||
const emptyDir = path.join(tmpdir, "empty");
|
||||
|
||||
mkdirSync(emptyDir);
|
||||
|
||||
// -d should remove empty directory
|
||||
{
|
||||
const { exitCode } = await $`rm -d ${emptyDir}`;
|
||||
expect(exitCode).toBe(0);
|
||||
expect(existsSync(emptyDir)).toBe(false);
|
||||
}
|
||||
|
||||
// -d on a file should work
|
||||
{
|
||||
const testFile = path.join(tmpdir, "test.txt");
|
||||
writeFileSync(testFile, "test");
|
||||
const { exitCode } = await $`rm -d ${testFile}`;
|
||||
expect(exitCode).toBe(0);
|
||||
expect(existsSync(testFile)).toBe(false);
|
||||
}
|
||||
});
|
||||
|
||||
test("non-empty directory with -d flag", async () => {
|
||||
const tmpdir = tmpdirSync();
|
||||
const nonEmptyDir = path.join(tmpdir, "nonempty");
|
||||
const fileInDir = path.join(nonEmptyDir, "file.txt");
|
||||
|
||||
mkdirSync(nonEmptyDir);
|
||||
writeFileSync(fileInDir, "test");
|
||||
|
||||
// -d should fail on non-empty directory
|
||||
const { stderr, exitCode } = await $`rm -d ${nonEmptyDir}`;
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr.toString()).toContain("Directory not empty");
|
||||
expect(existsSync(nonEmptyDir)).toBe(true);
|
||||
});
|
||||
|
||||
test("non-existent file handling", async () => {
|
||||
const tmpdir = tmpdirSync();
|
||||
const nonExistent = path.join(tmpdir, "does_not_exist.txt");
|
||||
|
||||
// Without -f should error
|
||||
{
|
||||
const { stderr, exitCode } = await $`rm ${nonExistent}`;
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr.toString()).toContain("No such file or directory");
|
||||
}
|
||||
|
||||
// With -f should succeed silently
|
||||
{
|
||||
const { stdout, stderr, exitCode } = await $`rm -f ${nonExistent}`;
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stdout.toString()).toBe("");
|
||||
expect(stderr.toString()).toBe("");
|
||||
}
|
||||
|
||||
// Multiple non-existent files with -f
|
||||
{
|
||||
const { exitCode } =
|
||||
await $`rm -f ${path.join(tmpdir, "fake1")} ${path.join(tmpdir, "fake2")} ${path.join(tmpdir, "fake3")}`;
|
||||
expect(exitCode).toBe(0);
|
||||
}
|
||||
});
|
||||
|
||||
// Test 6: Deep recursive deletion
|
||||
test("deep recursive deletion", async () => {
|
||||
const tmpdir = tmpdirSync();
|
||||
const deepPath = path.join(tmpdir, "a", "b", "c", "d", "e", "f", "g");
|
||||
|
||||
mkdirSync(deepPath, { recursive: true });
|
||||
|
||||
// Create files at various depths
|
||||
writeFileSync(path.join(tmpdir, "a", "file1.txt"), "test");
|
||||
writeFileSync(path.join(tmpdir, "a", "b", "file2.txt"), "test");
|
||||
writeFileSync(path.join(tmpdir, "a", "b", "c", "file3.txt"), "test");
|
||||
writeFileSync(path.join(deepPath, "deep.txt"), "test");
|
||||
|
||||
// Remove recursively with verbose
|
||||
{
|
||||
const { stdout, exitCode } = await $`rm -rv ${path.join(tmpdir, "a")}`;
|
||||
expect(exitCode).toBe(0);
|
||||
const output = stdout.toString();
|
||||
expect(output).toContain("file1.txt");
|
||||
expect(output).toContain("file2.txt");
|
||||
expect(output).toContain("file3.txt");
|
||||
expect(output).toContain("deep.txt");
|
||||
expect(existsSync(path.join(tmpdir, "a"))).toBe(false);
|
||||
}
|
||||
});
|
||||
|
||||
// Test 7: Verbose mode output
|
||||
test("verbose mode output format", async () => {
|
||||
const tmpdir = tmpdirSync();
|
||||
const file1 = path.join(tmpdir, "file1.txt");
|
||||
const file2 = path.join(tmpdir, "file2.txt");
|
||||
const dir1 = path.join(tmpdir, "dir1");
|
||||
const fileInDir = path.join(dir1, "nested.txt");
|
||||
|
||||
writeFileSync(file1, "test");
|
||||
writeFileSync(file2, "test");
|
||||
mkdirSync(dir1);
|
||||
writeFileSync(fileInDir, "test");
|
||||
|
||||
// Verbose output for multiple files
|
||||
{
|
||||
const { stdout, exitCode } = await $`rm -v ${file1} ${file2}`;
|
||||
expect(exitCode).toBe(0);
|
||||
const lines = stdout.toString().trim().split("\n");
|
||||
expect(lines).toHaveLength(2);
|
||||
expect(lines).toContain(file1);
|
||||
expect(lines).toContain(file2);
|
||||
}
|
||||
|
||||
// Verbose recursive
|
||||
{
|
||||
const { stdout, exitCode } = await $`rm -rv ${dir1}`;
|
||||
expect(exitCode).toBe(0);
|
||||
const output = stdout.toString();
|
||||
expect(output).toContain(fileInDir);
|
||||
expect(output).toContain(dir1);
|
||||
}
|
||||
});
|
||||
|
||||
// Test 8: Invalid command line options
|
||||
test("invalid command line options", async () => {
|
||||
// No arguments
|
||||
{
|
||||
const { stderr, exitCode } = await $`rm`;
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr.toString()).toContain("usage:");
|
||||
}
|
||||
|
||||
// Invalid flag
|
||||
{
|
||||
const { stderr, exitCode } = await $`rm -xyz test.txt`;
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr.toString()).toContain("illegal option");
|
||||
}
|
||||
|
||||
// Long invalid option
|
||||
{
|
||||
const { stderr, exitCode } = await $`rm --invalid-option test.txt`;
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr.toString()).toContain("illegal option");
|
||||
}
|
||||
|
||||
// Interactive mode not supported yet
|
||||
{
|
||||
const { stderr, exitCode } = await $`rm -i test.txt`;
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr.toString()).toContain('-i" is not supported yet');
|
||||
}
|
||||
});
|
||||
|
||||
// Test 9: Concurrent deletion
|
||||
test("concurrent deletion of multiple paths", async () => {
|
||||
const tmpdir = tmpdirSync();
|
||||
const paths: string[] = [];
|
||||
|
||||
// Create 10 directories with files
|
||||
for (let i = 0; i < 10; i++) {
|
||||
const dir = path.join(tmpdir, `dir${i}`);
|
||||
mkdirSync(dir);
|
||||
for (let j = 0; j < 5; j++) {
|
||||
writeFileSync(path.join(dir, `file${j}.txt`), `content ${i}-${j}`);
|
||||
}
|
||||
paths.push(dir);
|
||||
}
|
||||
|
||||
// Remove all concurrently
|
||||
{
|
||||
const { exitCode } = await $`rm -rf ${{ raw: paths.join(" ") }}`;
|
||||
expect(exitCode).toBe(0);
|
||||
|
||||
// Verify all deleted
|
||||
for (const p of paths) {
|
||||
console.log(p);
|
||||
expect(existsSync(p)).toBe(false);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Test for read-only files in nested directories
|
||||
test.skipIf(isWindows)("read-only file in nested directory", async () => {
|
||||
const tmpdir = tmpdirSync();
|
||||
const nestedPath = path.join(tmpdir, "level1", "level2", "level3");
|
||||
mkdirSync(nestedPath, { recursive: true });
|
||||
|
||||
// Create read-only file deep in the structure
|
||||
const readOnlyFile = path.join(nestedPath, "readonly.txt");
|
||||
writeFileSync(readOnlyFile, "protected content");
|
||||
chmodSync(readOnlyFile, 0o444); // Read-only
|
||||
|
||||
// Also create normal files at various levels
|
||||
writeFileSync(path.join(tmpdir, "level1", "normal1.txt"), "test");
|
||||
writeFileSync(path.join(tmpdir, "level1", "level2", "normal2.txt"), "test");
|
||||
|
||||
// Try recursive deletion without force
|
||||
{
|
||||
const { stderr, exitCode } = await $`rm -r ${path.join(tmpdir, "level1")}`;
|
||||
// On macOS, removing read-only files might succeed
|
||||
if (exitCode !== 0) {
|
||||
expect(stderr.toString()).toContain("Permission denied");
|
||||
// Verify structure still exists
|
||||
expect(existsSync(path.join(tmpdir, "level1"))).toBe(true);
|
||||
}
|
||||
}
|
||||
|
||||
// Try with force flag
|
||||
{
|
||||
const { exitCode } = await $`rm -rf ${path.join(tmpdir, "level1")}`;
|
||||
expect(exitCode).toBe(0);
|
||||
expect(existsSync(path.join(tmpdir, "level1"))).toBe(false);
|
||||
}
|
||||
});
|
||||
|
||||
// Test for read-only directories in nested structure
|
||||
test.skipIf(isWindows)("read-only directory in nested structure", async () => {
|
||||
const tmpdir = tmpdirSync();
|
||||
const parentDir = path.join(tmpdir, "parent");
|
||||
const readOnlyDir = path.join(parentDir, "readonly_dir");
|
||||
const childDir = path.join(readOnlyDir, "child");
|
||||
|
||||
// Create directory structure
|
||||
mkdirSync(childDir, { recursive: true });
|
||||
|
||||
// Add files at various levels
|
||||
writeFileSync(path.join(parentDir, "parent_file.txt"), "test");
|
||||
writeFileSync(path.join(readOnlyDir, "readonly_file.txt"), "test");
|
||||
writeFileSync(path.join(childDir, "child_file.txt"), "test");
|
||||
|
||||
// Make middle directory read-only
|
||||
chmodSync(readOnlyDir, 0o555); // Read/execute only
|
||||
|
||||
// Try to remove the entire structure
|
||||
{
|
||||
const { stderr, exitCode } = await $`rm -r ${parentDir}`;
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr.toString()).toContain("Permission denied");
|
||||
// Parent directory should still exist
|
||||
expect(existsSync(parentDir)).toBe(true);
|
||||
}
|
||||
|
||||
// Try with force flag - should still fail because we can't delete files in read-only dir
|
||||
{
|
||||
const { stderr, exitCode } = await $`rm -rf ${parentDir}`;
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr.toString()).toContain("Permission denied");
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
chmodSync(readOnlyDir, 0o755);
|
||||
await $`rm -rf ${parentDir}`;
|
||||
});
|
||||
|
||||
// Test multiple read-only files/dirs in recursive deletion
|
||||
test.skipIf(isWindows)("multiple read-only items in recursive deletion", async () => {
|
||||
const tmpdir = tmpdirSync();
|
||||
const rootDir = path.join(tmpdir, "root");
|
||||
|
||||
// Create complex structure with multiple read-only items
|
||||
const structure = {
|
||||
dir1: {
|
||||
files: ["file1.txt", "readonly1.txt"],
|
||||
subdirs: {
|
||||
subdir1: {
|
||||
files: ["file2.txt"],
|
||||
readonly: false
|
||||
}
|
||||
},
|
||||
readonly: false
|
||||
},
|
||||
readonly_dir: {
|
||||
files: ["file3.txt", "file4.txt"],
|
||||
subdirs: {
|
||||
subdir2: {
|
||||
files: ["file5.txt"],
|
||||
readonly: false
|
||||
}
|
||||
},
|
||||
readonly: true
|
||||
},
|
||||
dir2: {
|
||||
files: ["file6.txt", "readonly2.txt"],
|
||||
subdirs: {},
|
||||
readonly: false
|
||||
}
|
||||
};
|
||||
|
||||
// Create the structure
|
||||
mkdirSync(rootDir);
|
||||
mkdirSync(path.join(rootDir, "dir1", "subdir1"), { recursive: true });
|
||||
mkdirSync(path.join(rootDir, "readonly_dir", "subdir2"), { recursive: true });
|
||||
mkdirSync(path.join(rootDir, "dir2"));
|
||||
|
||||
// Create files
|
||||
writeFileSync(path.join(rootDir, "dir1", "file1.txt"), "test");
|
||||
writeFileSync(path.join(rootDir, "dir1", "readonly1.txt"), "test");
|
||||
writeFileSync(path.join(rootDir, "dir1", "subdir1", "file2.txt"), "test");
|
||||
writeFileSync(path.join(rootDir, "readonly_dir", "file3.txt"), "test");
|
||||
writeFileSync(path.join(rootDir, "readonly_dir", "file4.txt"), "test");
|
||||
writeFileSync(path.join(rootDir, "readonly_dir", "subdir2", "file5.txt"), "test");
|
||||
writeFileSync(path.join(rootDir, "dir2", "file6.txt"), "test");
|
||||
writeFileSync(path.join(rootDir, "dir2", "readonly2.txt"), "test");
|
||||
|
||||
// Set permissions
|
||||
chmodSync(path.join(rootDir, "dir1", "readonly1.txt"), 0o444);
|
||||
chmodSync(path.join(rootDir, "dir2", "readonly2.txt"), 0o444);
|
||||
chmodSync(path.join(rootDir, "readonly_dir"), 0o555);
|
||||
|
||||
// Try verbose recursive deletion to see what fails
|
||||
{
|
||||
const { stdout, stderr, exitCode } = await $`rm -rv ${rootDir}`;
|
||||
expect(exitCode).toBe(1);
|
||||
const stderrStr = stderr.toString();
|
||||
expect(stderrStr).toContain("Permission denied");
|
||||
|
||||
// Some files should have been deleted (those in writable directories)
|
||||
const stdoutStr = stdout.toString();
|
||||
if (stdoutStr) {
|
||||
// On macOS, some files might be deleted
|
||||
console.log("Deleted files:", stdoutStr);
|
||||
}
|
||||
}
|
||||
|
||||
// Force should handle read-only files but not read-only directories
|
||||
{
|
||||
const { stderr, exitCode } = await $`rm -rf ${rootDir}`;
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr.toString()).toContain("Permission denied");
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
chmodSync(path.join(rootDir, "readonly_dir"), 0o755);
|
||||
await $`rm -rf ${rootDir}`;
|
||||
});
|
||||
|
||||
// Test 10: Symlink handling
|
||||
test("symlink handling", async () => {
|
||||
const tmpdir = tmpdirSync();
|
||||
const targetFile = path.join(tmpdir, "target.txt");
|
||||
const targetDir = path.join(tmpdir, "targetdir");
|
||||
const linkToFile = path.join(tmpdir, "link_to_file");
|
||||
const linkToDir = path.join(tmpdir, "link_to_dir");
|
||||
|
||||
writeFileSync(targetFile, "test");
|
||||
mkdirSync(targetDir);
|
||||
writeFileSync(path.join(targetDir, "file.txt"), "test");
|
||||
|
||||
// Create symlinks
|
||||
symlinkSync(targetFile, linkToFile);
|
||||
symlinkSync(targetDir, linkToDir, "dir");
|
||||
|
||||
// Remove symlink to file (should not remove target)
|
||||
{
|
||||
const { exitCode } = await $`rm ${linkToFile}`;
|
||||
expect(exitCode).toBe(0);
|
||||
expect(existsSync(linkToFile)).toBe(false);
|
||||
expect(existsSync(targetFile)).toBe(true);
|
||||
}
|
||||
|
||||
// Remove symlink to directory without -r (should work)
|
||||
{
|
||||
const { exitCode } = await $`rm ${linkToDir}`;
|
||||
expect(exitCode).toBe(0);
|
||||
expect(existsSync(linkToDir)).toBe(false);
|
||||
expect(existsSync(targetDir)).toBe(true);
|
||||
}
|
||||
});
|
||||
|
||||
// Test cross-platform path separators on Windows
|
||||
if (isWindows) {
|
||||
test("Windows path separator handling", async () => {
|
||||
const tmpdir = tmpdirSync();
|
||||
|
||||
// Test with forward slashes
|
||||
const dirForward = tmpdir + "/test_dir";
|
||||
mkdirSync(dirForward);
|
||||
writeFileSync(dirForward + "/file.txt", "test");
|
||||
|
||||
{
|
||||
const { exitCode } = await $`rm -rf ${dirForward}`;
|
||||
expect(exitCode).toBe(0);
|
||||
expect(existsSync(dirForward)).toBe(false);
|
||||
}
|
||||
|
||||
// Test with backslashes
|
||||
const dirBackslash = tmpdir + "\\test_dir2";
|
||||
mkdirSync(dirBackslash);
|
||||
writeFileSync(dirBackslash + "\\file.txt", "test");
|
||||
|
||||
{
|
||||
const { exitCode } = await $`rm -rf ${dirBackslash}`;
|
||||
expect(exitCode).toBe(0);
|
||||
expect(existsSync(dirBackslash)).toBe(false);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Additional edge cases
|
||||
test("special characters in filenames", async () => {
|
||||
const tmpdir = tmpdirSync();
|
||||
const specialFiles = [
|
||||
"file with spaces.txt",
|
||||
"file-with-dashes.txt",
|
||||
"file_with_underscores.txt",
|
||||
"file.multiple.dots.txt",
|
||||
"file'with'quotes.txt",
|
||||
'"file"with"doublequotes.txt',
|
||||
];
|
||||
|
||||
for (const filename of specialFiles) {
|
||||
const filepath = path.join(tmpdir, filename);
|
||||
writeFileSync(filepath, "test");
|
||||
|
||||
const { exitCode } = await $`rm ${filepath}`;
|
||||
expect(exitCode).toBe(0);
|
||||
expect(existsSync(filepath)).toBe(false);
|
||||
}
|
||||
});
|
||||
|
||||
test("directory with many entries", async () => {
|
||||
const tmpdir = tmpdirSync();
|
||||
const bigDir = path.join(tmpdir, "big");
|
||||
mkdirSync(bigDir);
|
||||
|
||||
// Create 1000 files
|
||||
for (let i = 0; i < 1000; i++) {
|
||||
writeFileSync(path.join(bigDir, `file${i}.txt`), `content ${i}`);
|
||||
}
|
||||
|
||||
const start = Date.now();
|
||||
const { exitCode } = await $`rm -rf ${bigDir}`;
|
||||
const duration = Date.now() - start;
|
||||
|
||||
expect(exitCode).toBe(0);
|
||||
expect(existsSync(bigDir)).toBe(false);
|
||||
|
||||
// Should complete reasonably quickly even with many files
|
||||
expect(duration).toBeLessThan(10000); // 10 seconds
|
||||
});
|
||||
|
||||
// Test force flag with non-existent files (safe version)
|
||||
test("force flag with non-existent files", async () => {
|
||||
const tmpdir = tmpdirSync();
|
||||
const nonExistent = path.join(tmpdir, "does_not_exist.txt");
|
||||
|
||||
// With -f should succeed silently
|
||||
const { stdout, stderr, exitCode } = await $`rm -f ${nonExistent}`;
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stdout.toString()).toBe("");
|
||||
expect(stderr.toString()).toBe("");
|
||||
});
|
||||
});
|
||||
Reference in New Issue
Block a user