Resolver is fast now!

Former-commit-id: 2ef6397ab9
This commit is contained in:
Jarred Sumner
2021-05-18 02:24:40 -07:00
parent 154e049638
commit 28ecf60267
8 changed files with 759 additions and 281 deletions

4
.vscode/launch.json vendored
View File

@@ -4,7 +4,7 @@
{
"type": "lldb",
"request": "launch",
"name": "Launch",
"name": "Test",
"program": "${workspaceFolder}/zig-out/bin/test",
"preLaunchTask": "test",
"args": ["/usr/local/bin/zig"],
@@ -59,7 +59,7 @@
"-o",
"out"
],
"cwd": "${workspaceFolder}",
"cwd": "/Users/jarredsumner/Builds/esbuild/bench/three/src",
"console": "internalConsole"
}
// {

View File

@@ -60,20 +60,42 @@ pub fn BSSSectionAllocator(comptime size: usize) type {
};
}
pub fn isSliceInBuffer(slice: anytype, buffer: anytype) bool {
return (@ptrToInt(buffer) <= @ptrToInt(slice.ptr) and (@ptrToInt(slice.ptr) + slice.len) <= (@ptrToInt(buffer) + buffer.len));
}
pub const IndexType = packed struct {
index: u31,
is_overflow: bool = false,
};
const HashKeyType = u64;
const IndexMap = std.HashMapUnmanaged(HashKeyType, u32, hash_hashFn, hash_eqlFn, 80);
const IndexMap = std.HashMapUnmanaged(HashKeyType, IndexType, hash_hashFn, hash_eqlFn, 80);
pub const Result = struct {
hash: HashKeyType,
index: u32,
index: IndexType,
status: ItemStatus,
pub fn hasCheckedIfExists(r: *Result) bool {
return r.status != .unknown;
pub fn hasCheckedIfExists(r: *const Result) bool {
return r.index.index != Unassigned.index;
}
pub fn isOverflowing(r: *const Result, comptime count: usize) bool {
return r.index >= count;
}
pub fn realIndex(r: *const Result, comptime count: anytype) IndexType {
return if (r.isOverflowing(count)) @intCast(IndexType, r.index - max_index) else r.index;
}
};
const Seed = 999;
pub const NotFound = std.math.maxInt(u32);
pub const Unassigned = NotFound - 1;
pub const NotFound = IndexType{
.index = std.math.maxInt(u31),
};
pub const Unassigned = IndexType{
.index = std.math.maxInt(u31) - 1,
};
pub fn hash_hashFn(key: HashKeyType) HashKeyType {
return key;
@@ -91,6 +113,245 @@ pub const ItemStatus = packed enum(u3) {
const hasDeinit = std.meta.trait.hasFn("deinit")(ValueType);
pub fn BSSList(comptime ValueType: type, comptime count: anytype) type {
const max_index = count - 1;
var list_type: type = undefined;
var list_count = count;
return struct {
pub var backing_buf: [count]ValueType = undefined;
pub var backing_buf_used: u16 = 0;
const Allocator = std.mem.Allocator;
const Self = @This();
pub const ListIndex = packed struct {
index: u31,
is_overflowing: bool = false,
};
overflow_list: std.ArrayListUnmanaged(ValueType),
allocator: *Allocator,
pub var instance: Self = undefined;
pub fn init(allocator: *std.mem.Allocator) *Self {
instance = Self{
.allocator = allocator,
.overflow_list = std.ArrayListUnmanaged(ValueType){},
};
return &instance;
}
pub fn isOverflowing() bool {
return backing_buf_used >= @as(u16, count);
}
pub fn at(self: *const Self, index: ListIndex) ?*ValueType {
if (index.index == NotFound.index or index.index == Unassigned.index) return null;
if (index.is_overflowing) {
return &self.overflow_list.items[index.index];
} else {
return &backing_buf[index.index];
}
}
pub fn exists(self: *Self, value: ValueType) bool {
return isSliceInBuffer(value, backing_buf);
}
pub fn append(self: *Self, value: ValueType) !ListIndex {
var result = ListIndex{ .index = std.math.maxInt(u31), .is_overflowing = backing_buf_used > max_index };
if (result.is_overflowing) {
result.index = @intCast(u31, self.overflow_list.items.len);
try self.overflow_list.append(self.allocator, value);
} else {
result.index = backing_buf_used;
backing_buf[result.index] = value;
backing_buf_used += 1;
if (backing_buf_used >= max_index) {
self.overflow_list = try @TypeOf(self.overflow_list).initCapacity(self.allocator, count);
}
}
return result;
}
pub fn update(self: *Self, result: *ListIndex, value: ValueType) !*ValueType {
if (result.index.index == NotFound.index or result.index.index == Unassigned.index) {
result.index.is_overflowing = backing_buf_used > max_index;
if (result.index.is_overflowing) {
result.index.index = @intCast(u31, self.overflow_list.items.len);
} else {
result.index.index = backing_buf_used;
backing_buf_used += 1;
if (backing_buf_used >= max_index) {
self.overflow_list = try @TypeOf(self.overflow_list).initCapacity(self.allocator, count);
}
}
}
if (result.index.is_overflowing) {
if (self.overflow_list.items.len == result.index.index) {
const real_index = self.overflow_list.items.len;
try self.overflow_list.append(self.allocator, value);
} else {
self.overflow_list.items[result.index.index] = value;
}
return &self.overflow_list.items[result.index.index];
} else {
backing_buf[result.index.index] = value;
return &backing_buf[result.index.index];
}
}
pub fn remove(self: *Self, index: ListIndex) void {
@compileError("Not implemented yet.");
// switch (index) {
// Unassigned.index => {
// self.index.remove(_key);
// },
// NotFound.index => {
// self.index.remove(_key);
// },
// 0...max_index => {
// if (hasDeinit(ValueType)) {
// backing_buf[index].deinit();
// }
// backing_buf[index] = undefined;
// },
// else => {
// const i = index - count;
// if (hasDeinit(ValueType)) {
// self.overflow_list.items[i].deinit();
// }
// self.overflow_list.items[index - count] = undefined;
// },
// }
// return index;
}
};
}
pub fn BSSStringList(comptime count: usize, comptime item_length: usize) type {
const max_index = count - 1;
const ValueType = []const u8;
return struct {
pub var slice_buf: [count][]const u8 = undefined;
pub var slice_buf_used: u16 = 0;
pub var backing_buf: [count * item_length]u8 = undefined;
pub var backing_buf_used: u64 = undefined;
const Allocator = std.mem.Allocator;
const Self = @This();
pub const ListIndex = packed struct {
index: u31,
is_overflowing: bool = false,
};
overflow_list: std.ArrayListUnmanaged(ValueType),
allocator: *Allocator,
pub var instance: Self = undefined;
pub fn init(allocator: *std.mem.Allocator) *Self {
instance = Self{
.allocator = allocator,
.overflow_list = std.ArrayListUnmanaged(ValueType){},
};
return &instance;
}
pub fn isOverflowing() bool {
return slice_buf_used >= @as(u16, count);
}
pub fn at(self: *const Self, index: IndexType) ?ValueType {
if (index.index == NotFound.index or index.index == Unassigned.index) return null;
if (index.is_overflowing) {
return &self.overflow_list.items[index.index];
} else {
return &slice_buf[index.index];
}
}
pub fn exists(self: *Self, value: ValueType) bool {
return isSliceInBuffer(value, slice_buf);
}
pub fn editableSlice(slice: []const u8) []u8 {
return constStrToU8(slice);
}
pub fn append(self: *Self, _value: anytype) ![]const u8 {
var value = _value;
if (value.len + backing_buf_used < backing_buf.len - 1) {
const start = backing_buf_used;
backing_buf_used += value.len;
std.mem.copy(u8, backing_buf[start..backing_buf_used], _value);
value = backing_buf[start..backing_buf_used];
} else {
value = try self.allocator.dupe(u8, _value);
}
var result = ListIndex{ .index = std.math.maxInt(u31), .is_overflowing = slice_buf_used > max_index };
if (result.is_overflowing) {
result.index = @intCast(u31, self.overflow_list.items.len);
} else {
result.index = slice_buf_used;
slice_buf_used += 1;
if (slice_buf_used >= max_index) {
self.overflow_list = try @TypeOf(self.overflow_list).initCapacity(self.allocator, count);
}
}
if (result.is_overflowing) {
if (self.overflow_list.items.len == result.index) {
const real_index = self.overflow_list.items.len;
try self.overflow_list.append(self.allocator, value);
} else {
self.overflow_list.items[result.index] = value;
}
return self.overflow_list.items[result.index];
} else {
slice_buf[result.index] = value;
return slice_buf[result.index];
}
}
pub fn remove(self: *Self, index: ListIndex) void {
@compileError("Not implemented yet.");
// switch (index) {
// Unassigned.index => {
// self.index.remove(_key);
// },
// NotFound.index => {
// self.index.remove(_key);
// },
// 0...max_index => {
// if (hasDeinit(ValueType)) {
// slice_buf[index].deinit();
// }
// slice_buf[index] = undefined;
// },
// else => {
// const i = index - count;
// if (hasDeinit(ValueType)) {
// self.overflow_list.items[i].deinit();
// }
// self.overflow_list.items[index - count] = undefined;
// },
// }
// return index;
}
};
}
pub fn BSSMap(comptime ValueType: type, comptime count: anytype, store_keys: bool, estimated_key_length: usize) type {
const max_index = count - 1;
const BSSMapType = struct {
@@ -99,8 +360,6 @@ pub fn BSSMap(comptime ValueType: type, comptime count: anytype, store_keys: boo
const Allocator = std.mem.Allocator;
const Self = @This();
// const HashTableAllocator = BSSSectionAllocator(@bitSizeOf(HashKeyType) * count * 2);
index: IndexMap,
overflow_list: std.ArrayListUnmanaged(ValueType),
allocator: *Allocator,
@@ -129,9 +388,9 @@ pub fn BSSMap(comptime ValueType: type, comptime count: anytype, store_keys: boo
return Result{
.hash = _key,
.index = index.entry.value,
.status = switch (index.entry.value) {
NotFound => .not_found,
Unassigned => .unknown,
.status = switch (index.entry.value.index) {
NotFound.index => .not_found,
Unassigned.index => .unknown,
else => .exists,
},
};
@@ -155,43 +414,56 @@ pub fn BSSMap(comptime ValueType: type, comptime count: anytype, store_keys: boo
self.index.put(self.allocator, result.hash, NotFound) catch unreachable;
}
pub fn atIndex(self: *const Self, index: u32) ?*ValueType {
return switch (index) {
NotFound, Unassigned => null,
0...max_index => &backing_buf[index],
else => &self.overflow_list.items[index - count],
};
}
pub fn atIndex(self: *const Self, index: IndexType) ?*ValueType {
if (index.index == NotFound.index or index.index == Unassigned.index) return null;
pub fn put(self: *Self, result: *Result, value: ValueType) !*ValueType {
var index: u32 = @intCast(u32, backing_buf_used + 1);
if (index >= max_index) {
const real_index = self.overflow_list.items.len;
index += @truncate(u32, real_index);
try self.overflow_list.append(self.allocator, value);
result.index = index;
self.index.putAssumeCapacity(result.hash, index);
return &self.overflow_list.items[real_index];
if (index.is_overflow) {
return &self.overflow_list.items[index.index];
} else {
backing_buf_used += 1;
backing_buf[index] = value;
result.index = index;
self.index.putAssumeCapacity(result.hash, index);
if (backing_buf_used >= max_index - 1) {
self.overflow_list = try @TypeOf(self.overflow_list).initCapacity(self.allocator, count);
}
return &backing_buf[index];
return &backing_buf[index.index];
}
}
pub fn remove(self: *Self, key: string) u32 {
pub fn put(self: *Self, result: *Result, value: ValueType) !*ValueType {
if (result.index.index == NotFound.index or result.index.index == Unassigned.index) {
result.index.is_overflow = backing_buf_used > max_index;
if (result.index.is_overflow) {
result.index.index = @intCast(u31, self.overflow_list.items.len);
} else {
result.index.index = backing_buf_used;
backing_buf_used += 1;
if (backing_buf_used >= max_index) {
self.overflow_list = try @TypeOf(self.overflow_list).initCapacity(self.allocator, count);
}
}
}
try self.index.put(self.allocator, result.hash, result.index);
if (result.index.is_overflow) {
if (self.overflow_list.items.len == result.index.index) {
const real_index = self.overflow_list.items.len;
try self.overflow_list.append(self.allocator, value);
} else {
self.overflow_list.items[result.index.index] = value;
}
return &self.overflow_list.items[result.index.index];
} else {
backing_buf[result.index.index] = value;
return &backing_buf[result.index.index];
}
}
pub fn remove(self: *Self, key: string) IndexType {
const _key = Wyhash.hash(Seed, key);
const index = self.index.get(_key) orelse return;
switch (index) {
Unassigned => {
Unassigned.index => {
self.index.remove(_key);
},
NotFound => {
NotFound.index => {
self.index.remove(_key);
},
0...max_index => {
@@ -243,18 +515,19 @@ pub fn BSSMap(comptime ValueType: type, comptime count: anytype, store_keys: boo
return @call(.{ .modifier = .always_inline }, BSSMapType.get, .{ self.map, key });
}
pub fn atIndex(self: *Self, index: u32) ?*ValueType {
pub fn atIndex(self: *Self, index: IndexType) ?*ValueType {
return @call(.{ .modifier = .always_inline }, BSSMapType.atIndex, .{ self.map, index });
}
pub fn keyAtIndex(self: *Self, index: u32) ?[]const u8 {
return switch (index) {
Unassigned, NotFound => null,
0...max_index => {
return key_list_slices[index];
},
pub fn keyAtIndex(self: *Self, index: IndexType) ?[]const u8 {
return switch (index.index) {
Unassigned.index, NotFound.index => null,
else => {
return key_list_overflow.items[index - count];
if (!index.is_overflow) {
return key_list_slices[index.index];
} else {
return key_list_overflow.items[index.index];
}
},
};
}
@@ -268,27 +541,40 @@ pub fn BSSMap(comptime ValueType: type, comptime count: anytype, store_keys: boo
return ptr;
}
pub fn isKeyStaticallyAllocated(key: anytype) bool {
return isSliceInBuffer(key, &key_list_buffer);
}
// There's two parts to this.
// 1. Storing the underyling string.
// 2. Making the key accessible at the index.
pub fn putKey(self: *Self, key: anytype, result: *Result) !void {
if (key_list_buffer_used + key.len < key_list_buffer.len) {
var slice: []u8 = undefined;
// Is this actually a slice into the map? Don't free it.
if (isKeyStaticallyAllocated(key)) {
slice = constStrToU8(key);
} else if (key_list_buffer_used + key.len < key_list_buffer.len) {
const start = key_list_buffer_used;
key_list_buffer_used += key.len;
var slice = key_list_buffer[start..key_list_buffer_used];
slice = key_list_buffer[start..key_list_buffer_used];
std.mem.copy(u8, slice, key);
} else {
slice = try self.map.allocator.dupe(u8, key);
}
if (result.index < count) {
key_list_slices[result.index] = slice;
if (!result.index.is_overflow) {
key_list_slices[result.index.index] = slice;
} else {
if (@intCast(u31, key_list_overflow.items.len) > result.index.index) {
const existing_slice = key_list_overflow.items[result.index.index];
if (!isKeyStaticallyAllocated(existing_slice)) {
self.map.allocator.free(existing_slice);
}
key_list_overflow.items[result.index.index] = slice;
} else {
try key_list_overflow.append(self.map.allocator, slice);
}
} else if (result.index > key_list_overflow.items.len) {
try key_list_overflow.append(self.map.allocator, try self.map.allocator.dupe(u8, key));
} else {
const real_index = result.index - count;
if (key_list_overflow.items[real_index].len > 0) {
self.map.allocator.free(key_list_overflow.items[real_index]);
}
key_list_overflow.items[real_index] = try self.map.allocator.dupe(u8, key);
}
}
@@ -297,8 +583,12 @@ pub fn BSSMap(comptime ValueType: type, comptime count: anytype, store_keys: boo
}
// For now, don't free the keys.
pub fn remove(self: *Self, key: string) u32 {
pub fn remove(self: *Self, key: string) IndexType {
return self.map.remove(key);
}
};
}
fn constStrToU8(s: []const u8) []u8 {
return @intToPtr([*]u8, @ptrToInt(s.ptr))[0..s.len];
}

View File

@@ -34,7 +34,7 @@ pub const Bundler = struct {
output_files: std.ArrayList(options.OutputFile),
resolve_results: *ResolveResults,
resolve_queue: std.fifo.LinearFifo(Resolver.Resolver.Result, std.fifo.LinearFifoBufferType.Dynamic),
elapsed: i128 = 0,
// to_bundle:
// thread_pool: *ThreadPool,
@@ -139,8 +139,16 @@ pub const Bundler = struct {
ast: js_ast.Ast,
};
pub var tracing_start: i128 = if (enableTracing) 0 else undefined;
pub fn parse(bundler: *Bundler, path: Fs.Path) ?ParseResult {
if (enableTracing) {
tracing_start = std.time.nanoTimestamp();
}
defer {
if (enableTracing) {
bundler.elapsed += std.time.nanoTimestamp() - tracing_start;
}
}
var result: ParseResult = undefined;
const loader: options.Loader = bundler.options.loaders.get(path.name.ext) orelse .file;
const entry = bundler.resolver.caches.fs.readFile(bundler.fs, path.text) catch return null;
@@ -274,6 +282,13 @@ pub const Bundler = struct {
else => Global.panic("Unsupported resolve mode: {s}", .{@tagName(bundler.options.resolve_mode)}),
}
if (enableTracing) {
Output.print(
"\n---Tracing---\nResolve time: {d}\nParsing time: {d}\n---Tracing--\n\n",
.{ bundler.resolver.elapsed, bundler.elapsed },
);
}
return try options.TransformResult.init(bundler.output_files.toOwnedSlice(), log, allocator);
}
};

View File

@@ -14,11 +14,25 @@ const allocators = @import("./allocators.zig");
threadlocal var scratch_lookup_buffer: [256]u8 = undefined;
pub const Preallocate = struct {
pub const Counts = struct {
pub const dir_entry: usize = 1024;
pub const files: usize = 2048;
};
};
pub const FileSystem = struct {
allocator: *std.mem.Allocator,
top_level_dir: string = "/",
fs: Implementation,
dirname_store: *DirnameStore,
filename_store: *FilenameStore,
pub var instance: FileSystem = undefined;
pub const DirnameStore = allocators.BSSStringList(Preallocate.Counts.dir_entry, 256);
pub const FilenameStore = allocators.BSSStringList(Preallocate.Counts.files, 64);
pub const Error = error{
ENOENT,
EACCESS,
@@ -27,22 +41,74 @@ pub const FileSystem = struct {
};
pub fn init1(allocator: *std.mem.Allocator, top_level_dir: ?string, enable_watcher: bool) !*FileSystem {
var files = try allocator.create(FileSystem);
files.* = FileSystem{
const _top_level_dir = top_level_dir orelse (if (isBrowser) "/project" else try std.process.getCwdAlloc(allocator));
instance = FileSystem{
.allocator = allocator,
.top_level_dir = top_level_dir orelse (if (isBrowser) "/project" else try std.process.getCwdAlloc(allocator)),
.fs = Implementation.init(allocator, enable_watcher),
.top_level_dir = _top_level_dir,
.fs = Implementation.init(allocator, _top_level_dir, enable_watcher),
// .stats = std.StringHashMap(Stat).init(allocator),
.dirname_store = DirnameStore.init(allocator),
.filename_store = FilenameStore.init(allocator),
};
return files;
instance.fs.parent_fs = &instance;
_ = DirEntry.EntryStore.init(allocator);
return &instance;
}
pub const DirEntry = struct {
pub const EntryMap = std.StringArrayHashMap(*Entry);
pub const EntryMap = std.StringHashMap(EntryStore.ListIndex);
pub const EntryStore = allocators.BSSList(Entry, Preallocate.Counts.files);
dir: string,
data: EntryMap,
pub fn addEntry(dir: *DirEntry, entry: std.fs.Dir.Entry) !void {
var _kind: Entry.Kind = undefined;
switch (entry.kind) {
.Directory => {
_kind = Entry.Kind.dir;
},
.SymLink => {
// This might be wrong!
_kind = Entry.Kind.file;
},
.File => {
_kind = Entry.Kind.file;
},
else => {
return;
},
}
// entry.name only lives for the duration of the iteration
var name = FileSystem.FilenameStore.editableSlice(try FileSystem.FilenameStore.instance.append(entry.name));
for (entry.name) |c, i| {
name[i] = std.ascii.toLower(c);
}
var symlink: []u8 = "";
if (entry.kind == std.fs.Dir.Entry.Kind.SymLink) {
symlink = name;
}
const index = try EntryStore.instance.append(Entry{
.base = name,
.dir = dir.dir,
.mutex = Mutex.init(),
// Call "stat" lazily for performance. The "@material-ui/icons" package
// contains a directory with over 11,000 entries in it and running "stat"
// for each entry was a big performance issue for that package.
.need_stat = entry.kind == .SymLink,
.cache = Entry.Cache{
.symlink = symlink,
.kind = _kind,
},
});
try dir.data.put(name, index);
}
pub fn updateDir(i: *DirEntry, dir: string) void {
var iter = i.data.iterator();
i.dir = dir;
@@ -67,9 +133,11 @@ pub const FileSystem = struct {
pub fn deinit(d: *DirEntry) void {
d.data.allocator.free(d.dir);
for (d.data.items()) |item| {
item.value.deinit(d.data.allocator);
var iter = d.data.iterator();
while (iter.next()) |file_entry| {
EntryStore.instance.at(file_entry.value).?.deinit(d.data.allocator);
}
d.data.deinit();
}
@@ -83,7 +151,8 @@ pub const FileSystem = struct {
end = i;
}
const query = scratch_lookup_buffer[0 .. end + 1];
const result = entry.data.get(query) orelse return null;
const result_index = entry.data.get(query) orelse return null;
const result = EntryStore.instance.at(result_index) orelse return null;
if (!strings.eql(result.base, query)) {
return Entry.Lookup{ .entry = result, .diff_case = Entry.Lookup.DifferentCase{
.dir = entry.dir,
@@ -132,8 +201,8 @@ pub const FileSystem = struct {
};
pub fn kind(entry: *Entry, fs: *Implementation) Kind {
entry.mutex.lock();
defer entry.mutex.unlock();
// entry.mutex.lock();
// defer entry.mutex.unlock();
if (entry.need_stat) {
entry.need_stat = false;
entry.cache = fs.kind(entry.dir, entry.base) catch unreachable;
@@ -142,8 +211,8 @@ pub const FileSystem = struct {
}
pub fn symlink(entry: *Entry, fs: *Implementation) string {
entry.mutex.lock();
defer entry.mutex.unlock();
// entry.mutex.lock();
// defer entry.mutex.unlock();
if (entry.need_stat) {
entry.need_stat = false;
entry.cache = fs.kind(entry.dir, entry.base) catch unreachable;
@@ -189,11 +258,14 @@ pub const FileSystem = struct {
limiter: Limiter,
watcher: ?std.StringHashMap(WatchData) = null,
watcher_mutex: Mutex = Mutex.init(),
cwd: string,
parent_fs: *FileSystem = undefined,
pub fn init(allocator: *std.mem.Allocator, enable_watcher: bool) RealFS {
pub fn init(allocator: *std.mem.Allocator, cwd: string, enable_watcher: bool) RealFS {
return RealFS{
.entries = EntriesOption.Map.init(allocator),
.allocator = allocator,
.cwd = cwd,
.limiter = Limiter.init(allocator),
.watcher = if (enable_watcher) std.StringHashMap(WatchData).init(allocator) else null,
};
@@ -306,7 +378,7 @@ pub const FileSystem = struct {
// This custom map implementation:
// - Preallocates a fixed amount of directory name space
// - Doesn't store directory names which don't exist.
pub const Map = allocators.BSSMap(EntriesOption, 1024, true, 128);
pub const Map = allocators.BSSMap(EntriesOption, Preallocate.Counts.dir_entry, false, 128);
};
// Limit the number of files open simultaneously to avoid ulimit issues
@@ -337,7 +409,7 @@ pub const FileSystem = struct {
};
pub fn openDir(fs: *RealFS, unsafe_dir_string: string) std.fs.File.OpenError!std.fs.Dir {
return try std.fs.openDirAbsolute(unsafe_dir_string, std.fs.Dir.OpenDirOptions{ .iterate = true, .access_sub_paths = true });
return try std.fs.openDirAbsolute(unsafe_dir_string, std.fs.Dir.OpenDirOptions{ .iterate = true, .access_sub_paths = true, .no_follow = true });
}
fn readdir(
@@ -349,48 +421,10 @@ pub const FileSystem = struct {
defer fs.limiter.after();
var iter: std.fs.Dir.Iterator = handle.iterate();
var dir = DirEntry.init("", fs.allocator);
var dir = DirEntry.init(_dir, fs.allocator);
errdefer dir.deinit();
while (try iter.next()) |_entry| {
const entry: std.fs.Dir.Entry = _entry;
var _kind: Entry.Kind = undefined;
switch (entry.kind) {
.Directory => {
_kind = Entry.Kind.dir;
},
.SymLink => {
// This might be wrong!
_kind = Entry.Kind.file;
},
.File => {
_kind = Entry.Kind.file;
},
else => {
continue;
},
}
// entry.name only lives for the duration of the iteration
var name = try fs.allocator.alloc(u8, entry.name.len);
for (entry.name) |c, i| {
name[i] = std.ascii.toLower(c);
}
var entry_ptr = try fs.allocator.create(Entry);
entry_ptr.* = Entry{
.base = name,
.dir = "",
.mutex = Mutex.init(),
// Call "stat" lazily for performance. The "@material-ui/icons" package
// contains a directory with over 11,000 entries in it and running "stat"
// for each entry was a big performance issue for that package.
.need_stat = true,
.cache = Entry.Cache{
.symlink = if (entry.kind == std.fs.Dir.Entry.Kind.SymLink) (try fs.allocator.dupe(u8, name)) else "",
.kind = _kind,
},
};
try dir.data.put(name, entry_ptr);
try dir.addEntry(_entry);
}
return dir;
@@ -407,7 +441,7 @@ pub const FileSystem = struct {
fs.entries_mutex.lock();
defer fs.entries_mutex.unlock();
var get_or_put_result = try fs.entries.getOrPut(dir);
var opt = try fs.entries.put(null, false, &get_or_put_result, EntriesOption{
var opt = try fs.entries.put(&get_or_put_result, EntriesOption{
.err = DirEntry.Err{ .original_err = err, .canonical_error = err },
});
@@ -422,7 +456,8 @@ pub const FileSystem = struct {
threadlocal var temp_entries_option: EntriesOption = undefined;
pub fn readDirectory(fs: *RealFS, dir: string, _handle: ?std.fs.Dir, recursive: bool) !*EntriesOption {
pub fn readDirectory(fs: *RealFS, _dir: string, _handle: ?std.fs.Dir, recursive: bool) !*EntriesOption {
var dir = _dir;
var cache_result: ?allocators.Result = null;
if (!fs.do_not_cache_entries) {
@@ -446,6 +481,11 @@ pub const FileSystem = struct {
}
}
// if we get this far, it's a real directory, so we can just store the dir name.
if (_handle == null) {
dir = try FilenameStore.instance.append(_dir);
}
// Cache miss: read the directory entries
const entries = fs.readdir(
dir,
@@ -454,21 +494,22 @@ pub const FileSystem = struct {
return fs.readDirectoryError(dir, err) catch unreachable;
};
if (fs.watcher) |*watcher| {
fs.watcher_mutex.lock();
defer fs.watcher_mutex.unlock();
var _entries = entries.data.items();
const names = try fs.allocator.alloc([]const u8, _entries.len);
for (_entries) |entry, i| {
names[i] = try fs.allocator.dupe(u8, entry.key);
}
strings.sortAsc(names);
// if (fs.watcher) |*watcher| {
// fs.watcher_mutex.lock();
// defer fs.watcher_mutex.unlock();
// var _entries = watcher.iterator();
// const names = try fs.allocator.alloc([]const u8, _entries.len);
// for (_entries) |entry, i| {
// names[i] = try fs.allocator.dupe(u8, entry.key);
// }
// strings.sortAsc(names);
// try watcher.put(
// try fs.allocator.dupe(u8, dir),
// WatchData{ .dir_entries = names, .state = .dir_has_entries },
// );
// }
try watcher.put(
try fs.allocator.dupe(u8, dir),
WatchData{ .dir_entries = names, .state = .dir_has_entries },
);
}
if (!fs.do_not_cache_entries) {
fs.entries_mutex.lock();
defer fs.entries_mutex.unlock();
@@ -476,14 +517,10 @@ pub const FileSystem = struct {
.entries = entries,
};
var entries_ptr = try fs.entries.put(dir, true, &cache_result.?, result);
const dir_key = fs.entries.keyAtIndex(cache_result.?.index) orelse unreachable;
entries_ptr.entries.updateDir(dir_key);
return entries_ptr;
return try fs.entries.put(&cache_result.?, result);
}
temp_entries_option = EntriesOption{ .entries = entries };
temp_entries_option.entries.updateDir(try fs.allocator.dupe(u8, dir));
return &temp_entries_option;
}
@@ -532,8 +569,7 @@ pub const FileSystem = struct {
pub fn kind(fs: *RealFS, _dir: string, base: string) !Entry.Cache {
var dir = _dir;
var combo = [2]string{ dir, base };
var entry_path = try std.fs.path.join(fs.allocator, &combo);
defer fs.allocator.free(entry_path);
var entry_path = path_handler.normalizeAndJoinString(fs.cwd, &combo, .auto);
fs.limiter.before();
defer fs.limiter.after();
@@ -544,7 +580,7 @@ pub const FileSystem = struct {
var _kind = stat.kind;
var cache = Entry.Cache{ .kind = Entry.Kind.file, .symlink = "" };
var symlink: []u8 = &([_]u8{});
var symlink: []const u8 = "";
if (_kind == .SymLink) {
// windows has a max filepath of 255 chars
// we give it a little longer for other platforms
@@ -554,15 +590,13 @@ pub const FileSystem = struct {
var links_walked: u8 = 0;
while (links_walked < 255) : (links_walked += 1) {
var link = try std.os.readlink(symlink, out_slice);
var link: string = try std.os.readlink(symlink, out_slice);
if (!std.fs.path.isAbsolute(link)) {
combo[0] = dir;
combo[1] = link;
if (link.ptr != &out_buffer) {
fs.allocator.free(link);
}
link = std.fs.path.join(fs.allocator, &combo) catch return cache;
link = path_handler.normalizeAndJoinStringBuf(fs.cwd, out_slice, &combo, .auto);
}
// TODO: do we need to clean the path?
symlink = link;
@@ -590,7 +624,9 @@ pub const FileSystem = struct {
} else {
cache.kind = .file;
}
cache.symlink = symlink;
if (symlink.len > 0) {
cache.symlink = try fs.allocator.dupe(u8, symlink);
}
return cache;
}

View File

@@ -1,6 +1,7 @@
const std = @import("std");
pub usingnamespace @import("strings.zig");
pub const C = @import("c.zig");
pub const BuildTarget = enum { native, wasm, wasi };
pub const build_target: BuildTarget = comptime {
if (std.Target.current.isWasm() and std.Target.current.getOsTag() == .wasi) {
@@ -16,6 +17,9 @@ pub const isWasm = build_target == .wasm;
pub const isNative = build_target == .native;
pub const isWasi = build_target == .wasi;
pub const isBrowser = !isWasi and isWasm;
pub const isWindows = std.Target.current.os.tag == .windows;
pub const enableTracing = true;
pub const isDebug = std.builtin.Mode.Debug == std.builtin.mode;

View File

@@ -318,7 +318,7 @@ pub const G = struct {
pub const Comment = struct { loc: logger.Loc, text: string };
pub const Property = struct {
ts_decorators: ExprNodeList = &([_]Expr{}),
ts_decorators: ExprNodeList = &([_]ExprNodeIndex{}),
// Key is optional for spread
key: ?ExprNodeIndex = null,

View File

@@ -343,8 +343,8 @@ pub const BundleOptions = struct {
allocator,
resolved_defines,
),
.output_dir = try std.fs.path.join(allocator, &output_dir_parts),
.loaders = loaders,
.output_dir = try fs.joinAlloc(allocator, &output_dir_parts),
.write = transform.write orelse false,
.external = ExternalModules.init(allocator, &fs.fs, fs.top_level_dir, transform.external, log),
.entry_points = transform.entry_points,

View File

@@ -28,7 +28,7 @@ pub const SideEffectsData = struct {
};
pub const DirInfo = struct {
pub const Index = u32;
pub const Index = allocators.IndexType;
// These objects are immutable, so we can just point to the parent directory
// and avoid having to lock the cache again
@@ -39,12 +39,24 @@ pub const DirInfo = struct {
enclosing_browser_scope: Index = allocators.NotFound,
abs_path: string = "",
entries: Fs.FileSystem.DirEntry = undefined,
entries: Index = undefined,
has_node_modules: bool = false, // Is there a "node_modules" subdirectory?
package_json: ?*PackageJSON = null, // Is there a "package.json" file?
tsconfig_json: ?*TSConfigJSON = null, // Is there a "tsconfig.json" file in this directory or a parent directory?
abs_real_path: string = "", // If non-empty, this is the real absolute path resolving any symlinks
pub fn getEntries(dirinfo: *DirInfo) ?*Fs.FileSystem.DirEntry {
var entries_ptr = Fs.FileSystem.instance.fs.entries.atIndex(dirinfo.entries) orelse return null;
switch (entries_ptr.*) {
.entries => |entr| {
return &entries_ptr.entries;
},
.err => {
return null;
},
}
}
pub fn getParent(i: *DirInfo) ?*DirInfo {
return HashMap.instance.atIndex(i.parent);
}
@@ -57,7 +69,7 @@ pub const DirInfo = struct {
// 2. Don't expect a provided key to exist after it's queried
// 3. Store whether a directory has been queried and whether that query was successful.
// 4. Allocate onto the https://en.wikipedia.org/wiki/.bss#BSS_in_C instead of the heap, so we can avoid memory leaks
pub const HashMap = allocators.BSSMap(DirInfo, 1024, true, 128);
pub const HashMap = allocators.BSSMap(DirInfo, Fs.Preallocate.Counts.dir_entry, false, 128);
};
pub const TemporaryBuffer = struct {
pub threadlocal var ExtensionPathBuf = std.mem.zeroes([512]u8);
@@ -73,6 +85,7 @@ pub const Resolver = struct {
allocator: *std.mem.Allocator,
debug_logs: ?DebugLogs = null,
elapsed: i128 = 0, // tracing
caches: cache.Cache.Set,
@@ -291,8 +304,17 @@ pub const Resolver = struct {
}
}
}
var tracing_start: i128 = if (enableTracing) 0 else undefined;
pub fn resolve(r: *Resolver, source_dir: string, import_path: string, kind: ast.ImportKind) !?Result {
if (enableTracing) {
tracing_start = std.time.nanoTimestamp();
}
defer {
if (enableTracing) {
r.elapsed += std.time.nanoTimestamp() - tracing_start;
}
}
if (r.log.level == .verbose) {
if (r.debug_logs != null) {
r.debug_logs.?.deinit();
@@ -740,34 +762,210 @@ pub const Resolver = struct {
return path[0] != '/' and !strings.startsWith(path, "./") and !strings.startsWith(path, "../") and !strings.eql(path, ".") and !strings.eql(path, "..");
}
pub const DirEntryResolveQueueItem = struct { result: allocators.Result, unsafe_path: string };
threadlocal var _dir_entry_paths_to_resolve: [256]DirEntryResolveQueueItem = undefined;
threadlocal var _open_dirs: [256]std.fs.Dir = undefined;
fn dirInfoCached(r: *Resolver, path: string) !?*DirInfo {
var dir_info_entry = try r.dir_cache.getOrPut(path);
var ptr = try r.dirInfoCachedGetOrPut(path, &dir_info_entry);
return ptr;
}
fn dirInfoCachedGetOrPut(r: *Resolver, path: string, dir_info_entry: *allocators.Result) !?*DirInfo {
switch (dir_info_entry.status) {
.unknown => {
return try r.dirInfoUncached(path, dir_info_entry);
},
.not_found => {
return null;
},
.exists => {
return r.dir_cache.atIndex(dir_info_entry.index);
},
const top_result = try r.dir_cache.getOrPut(path);
if (top_result.status != .unknown) {
return r.dir_cache.atIndex(top_result.index);
}
// if (__entry.found_existing) {
// return if (__entry.entry.value == DirInfo.NotFound) null else __entry.entry.value;
// }
// __entry.entry.value = DirInfo.NotFound;
var i: i32 = 1;
_dir_entry_paths_to_resolve[0] = (DirEntryResolveQueueItem{ .result = top_result, .unsafe_path = path });
var top = path;
var top_parent: allocators.Result = allocators.Result{
.index = allocators.NotFound,
.hash = 0,
.status = .not_found,
};
const root_path = if (isWindows) std.fs.path.diskDesignator(path) else "/";
// try r.dirInfoUncached(path);
// const entry = r.dir_cache.get(path) orelse unreachable;
// return if (__entry.entry.value == DirInfo.NotFound) null else entry;
while (std.fs.path.dirname(top)) |_top| {
var result = try r.dir_cache.getOrPut(_top);
if (result.status != .unknown) {
top_parent = result;
break;
}
_dir_entry_paths_to_resolve[@intCast(usize, i)] = DirEntryResolveQueueItem{
.unsafe_path = _top,
.result = result,
};
i += 1;
top = _top;
}
if (std.fs.path.dirname(top) == null and !strings.eql(top, root_path)) {
var result = try r.dir_cache.getOrPut(root_path);
if (result.status != .unknown) {
top_parent = result;
} else {
_dir_entry_paths_to_resolve[@intCast(usize, i)] = DirEntryResolveQueueItem{
.unsafe_path = root_path,
.result = result,
};
i += 1;
top = root_path;
}
}
var queue_slice: []DirEntryResolveQueueItem = _dir_entry_paths_to_resolve[0..@intCast(usize, i)];
std.debug.assert(queue_slice.len > 0);
var open_dir_count: usize = 0;
// When this function halts, any item not processed means it's not found.
defer {
// Anything
if (open_dir_count > 0) {
var open_dirs: []std.fs.Dir = _open_dirs[0..open_dir_count];
for (open_dirs) |*open_dir| {
open_dir.close();
}
}
}
var rfs: *Fs.FileSystem.RealFS = &r.fs.fs;
rfs.entries_mutex.lock();
defer rfs.entries_mutex.unlock();
// We want to walk in a straight line from the topmost directory to the desired directory
// For each directory we visit, we get the entries, but not traverse into child directories
// (unless those child directores are in the queue)
// Going top-down rather than bottom-up should have best performance because we can use
// the file handle from the parent directory to open the child directory
// It's important that we walk in precisely a straight line
// For example
// "/home/jarred/Code/node_modules/react/cjs/react.development.js"
// ^
// If we start there, we will traverse all of /home/jarred, including e.g. /home/jarred/Downloads
// which is completely irrelevant.
// After much experimentation, fts_open is not the fastest way. fts actually just uses readdir!!
var _safe_path: ?string = null;
// Start at the top.
while (queue_slice.len > 0) {
var queue_top = queue_slice[queue_slice.len - 1];
defer top_parent = queue_top.result;
queue_slice.len -= 1;
var _open_dir: anyerror!std.fs.Dir = undefined;
if (open_dir_count > 0) {
_open_dir = _open_dirs[open_dir_count - 1].openDir(std.fs.path.basename(queue_top.unsafe_path), .{ .iterate = true });
} else {
_open_dir = std.fs.openDirAbsolute(queue_top.unsafe_path, .{ .iterate = true });
}
const open_dir = _open_dir catch |err| {
switch (err) {
error.EACCESS => {},
// Ignore "ENOTDIR" here so that calling "ReadDirectory" on a file behaves
// as if there is nothing there at all instead of causing an error due to
// the directory actually being a file. This is a workaround for situations
// where people try to import from a path containing a file as a parent
// directory. The "pnpm" package manager generates a faulty "NODE_PATH"
// list which contains such paths and treating them as missing means we just
// ignore them during path resolution.
error.ENOENT,
error.ENOTDIR,
error.IsDir,
error.NotDir,
error.FileNotFound,
=> {
return null;
},
else => {
var cached_dir_entry_result = rfs.entries.getOrPut(queue_top.unsafe_path) catch unreachable;
r.dir_cache.markNotFound(queue_top.result);
rfs.entries.markNotFound(cached_dir_entry_result);
const pretty = r.prettyPath(Path.init(queue_top.unsafe_path));
r.log.addErrorFmt(
null,
logger.Loc{},
r.allocator,
"Cannot read directory \"{s}\": {s}",
.{
pretty,
@errorName(err),
},
) catch {};
},
}
return null;
};
// these objects mostly just wrap the file descriptor, so it's fine to keep it.
_open_dirs[open_dir_count] = open_dir;
open_dir_count += 1;
if (_safe_path == null) {
// Now that we've opened the topmost directory successfully, it's reasonable to store the slice.
_safe_path = try r.fs.dirname_store.append(path);
}
const safe_path = _safe_path.?;
var dir_path_i = std.mem.indexOf(u8, safe_path, queue_top.unsafe_path) orelse unreachable;
const dir_path = safe_path[dir_path_i .. dir_path_i + queue_top.unsafe_path.len];
var dir_iterator = open_dir.iterate();
var cached_dir_entry_result = rfs.entries.getOrPut(dir_path) catch unreachable;
var dir_entries_option: *Fs.FileSystem.RealFS.EntriesOption = undefined;
var has_dir_entry_result: bool = false;
if (rfs.entries.atIndex(cached_dir_entry_result.index)) |cached_entry| {
if (std.meta.activeTag(cached_entry.*) == .entries) {
dir_entries_option = cached_entry;
}
}
if (!has_dir_entry_result) {
dir_entries_option = try rfs.entries.put(&cached_dir_entry_result, .{
.entries = Fs.FileSystem.DirEntry.init(dir_path, r.fs.allocator),
});
has_dir_entry_result = true;
}
while (try dir_iterator.next()) |_value| {
const value: std.fs.Dir.Entry = _value;
dir_entries_option.entries.addEntry(value) catch unreachable;
}
const dir_info = try r.dirInfoUncached(
dir_path,
dir_entries_option,
queue_top.result,
cached_dir_entry_result.index,
r.dir_cache.atIndex(top_parent.index),
top_parent.index,
);
var dir_info_ptr = try r.dir_cache.put(&queue_top.result, dir_info);
if (queue_slice.len == 0) {
return dir_info_ptr;
// Is the directory we're searching for actually a file?
} else if (queue_slice.len == 1) {
// const next_in_queue = queue_slice[0];
// const next_basename = std.fs.path.basename(next_in_queue.unsafe_path);
// if (dir_info_ptr.getEntries()) |entries| {
// if (entries.get(next_basename) != null) {
// return null;
// }
// }
}
}
unreachable;
}
pub const MatchResult = struct {
@@ -1024,15 +1222,17 @@ pub const Resolver = struct {
base[0.."index".len].* = "index".*;
std.mem.copy(u8, base["index".len..base.len], ext);
if (dir_info.entries.get(base)) |lookup| {
if (lookup.entry.kind(rfs) == .file) {
const parts = [_]string{ path, base };
const out_buf = r.fs.joinAlloc(r.allocator, &parts) catch unreachable;
if (r.debug_logs) |*debug| {
debug.addNoteFmt("Found file: \"{s}\"", .{out_buf}) catch unreachable;
}
if (dir_info.getEntries()) |entries| {
if (entries.get(base)) |lookup| {
if (lookup.entry.kind(rfs) == .file) {
const parts = [_]string{ path, base };
const out_buf = r.fs.joinAlloc(r.allocator, &parts) catch unreachable;
if (r.debug_logs) |*debug| {
debug.addNoteFmt("Found file: \"{s}\"", .{out_buf}) catch unreachable;
}
return MatchResult{ .path_pair = .{ .primary = Path.init(out_buf) }, .diff_case = lookup.diff_case };
return MatchResult{ .path_pair = .{ .primary = Path.init(out_buf) }, .diff_case = lookup.diff_case };
}
}
}
@@ -1324,113 +1524,46 @@ pub const Resolver = struct {
return null;
}
fn dirInfoUncached(r: *Resolver, unsafe_path: string, result: *allocators.Result) anyerror!?*DirInfo {
fn dirInfoUncached(
r: *Resolver,
path: string,
_entries: *Fs.FileSystem.RealFS.EntriesOption,
_result: allocators.Result,
dir_entry_index: allocators.IndexType,
parent: ?*DirInfo,
parent_index: allocators.IndexType,
) anyerror!DirInfo {
var result = _result;
var rfs: *Fs.FileSystem.RealFS = &r.fs.fs;
var parent: ?*DirInfo = null;
var entries = _entries.entries;
var is_root = false;
const parent_dir = (std.fs.path.dirname(unsafe_path) orelse parent_dir_handle: {
is_root = true;
break :parent_dir_handle "/";
});
var parent_result: allocators.Result = allocators.Result{
.hash = std.math.maxInt(u64),
.index = allocators.NotFound,
.status = .unknown,
var info = DirInfo{
.abs_path = path,
.parent = parent_index,
.entries = dir_entry_index,
};
if (!is_root and !strings.eql(parent_dir, unsafe_path)) {
parent = r.dirInfoCached(parent_dir) catch null;
if (parent != null) {
parent_result = try r.dir_cache.getOrPut(parent_dir);
}
}
var entries: Fs.FileSystem.DirEntry = Fs.FileSystem.DirEntry.empty(unsafe_path, r.allocator);
// List the directories
if (!is_root) {
var _entries: *Fs.FileSystem.RealFS.EntriesOption = undefined;
_entries = try rfs.readDirectory(unsafe_path, null, true);
if (std.meta.activeTag(_entries.*) == .err) {
// Just pretend this directory is empty if we can't access it. This is the
// case on Unix for directories that only have the execute permission bit
// set. It means we will just pass through the empty directory and
// continue to check the directories above it, which is now node behaves.
switch (_entries.err.original_err) {
error.EACCESS => {
entries = Fs.FileSystem.DirEntry.empty(unsafe_path, r.allocator);
},
// Ignore "ENOTDIR" here so that calling "ReadDirectory" on a file behaves
// as if there is nothing there at all instead of causing an error due to
// the directory actually being a file. This is a workaround for situations
// where people try to import from a path containing a file as a parent
// directory. The "pnpm" package manager generates a faulty "NODE_PATH"
// list which contains such paths and treating them as missing means we just
// ignore them during path resolution.
error.ENOENT,
error.ENOTDIR,
error.IsDir,
=> {
entries = Fs.FileSystem.DirEntry.empty(unsafe_path, r.allocator);
},
else => {
const pretty = r.prettyPath(Path.init(unsafe_path));
result.status = .not_found;
r.log.addErrorFmt(
null,
logger.Loc{},
r.allocator,
"Cannot read directory \"{s}\": {s}",
.{
pretty,
@errorName(_entries.err.original_err),
},
) catch {};
r.dir_cache.markNotFound(result.*);
return null;
},
}
} else {
entries = _entries.entries;
}
}
var info = dir_info_getter: {
var _info = DirInfo{
.abs_path = "",
.parent = parent_result.index,
.entries = entries,
};
result.status = .exists;
var __info = try r.dir_cache.put(unsafe_path, true, result, _info);
__info.abs_path = r.dir_cache.keyAtIndex(result.index).?;
break :dir_info_getter __info;
};
const path = info.abs_path;
// A "node_modules" directory isn't allowed to directly contain another "node_modules" directory
var base = std.fs.path.basename(path);
// if (entries != null) {
if (!strings.eqlComptime(base, "node_modules")) {
if (entries.get("node_modules")) |entry| {
// the catch might be wrong!
info.has_node_modules = (entry.entry.kind(rfs)) == .dir;
}
}
// }
if (parent != null) {
if (parent_result.status != .unknown) {
// Propagate the browser scope into child directories
if (parent) |parent_info| {
info.enclosing_browser_scope = parent_info.enclosing_browser_scope;
info.enclosing_browser_scope = parent.?.enclosing_browser_scope;
// Make sure "absRealPath" is the real path of the directory (resolving any symlinks)
if (!r.opts.preserve_symlinks) {
if (parent_info.entries.get(base)) |lookup| {
// Make sure "absRealPath" is the real path of the directory (resolving any symlinks)
if (!r.opts.preserve_symlinks) {
if (parent.?.getEntries()) |parent_entries| {
if (parent_entries.get(base)) |lookup| {
const entry = lookup.entry;
var symlink = entry.symlink(rfs);
@@ -1439,9 +1572,9 @@ pub const Resolver = struct {
try logs.addNote(std.fmt.allocPrint(r.allocator, "Resolved symlink \"{s}\" to \"{s}\"", .{ path, symlink }) catch unreachable);
}
info.abs_real_path = symlink;
} else if (parent_info.abs_real_path.len > 0) {
} else if (parent.?.abs_real_path.len > 0) {
// this might leak a little i'm not sure
const parts = [_]string{ parent_info.abs_real_path, base };
const parts = [_]string{ parent.?.abs_real_path, base };
symlink = r.fs.joinAlloc(r.allocator, &parts) catch unreachable;
if (r.debug_logs) |*logs| {
try logs.addNote(std.fmt.allocPrint(r.allocator, "Resolved symlink \"{s}\" to \"{s}\"", .{ path, symlink }) catch unreachable);