This commit is contained in:
Jarred Sumner
2021-05-28 23:26:13 -07:00
parent 95a3b72e94
commit 664dbf569c
14 changed files with 587 additions and 89 deletions

BIN
esdev-fd Executable file

Binary file not shown.

BIN
esdev-fd-relative Executable file

Binary file not shown.

BIN
esdev-lists Executable file

Binary file not shown.

BIN
esdev-nolists Executable file

Binary file not shown.

332
src/ast/Wyhash.zig Normal file
View File

@@ -0,0 +1,332 @@
// SPDX-License-Identifier: MIT
// Copyright (c) 2015-2021 Zig Contributors
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
const std = @import("std");
const mem = std.mem;
const primes = [_]u64{
0xa0761d6478bd642f,
0xe7037ed1a0b428db,
0x8ebc6af09c88c6e3,
0x589965cc75374cc3,
0x1d8e4e27c47d124f,
};
fn read_bytes(comptime bytes: u8, data: []const u8) u64 {
const T = std.meta.Int(.unsigned, 8 * bytes);
return mem.readIntLittle(T, data[0..bytes]);
}
fn read_8bytes_swapped(data: []const u8) u64 {
return (read_bytes(4, data) << 32 | read_bytes(4, data[4..]));
}
fn mum(a: u64, b: u64) u64 {
var r = std.math.mulWide(u64, a, b);
r = (r >> 64) ^ r;
return @truncate(u64, r);
}
fn mix0(a: u64, b: u64, seed: u64) u64 {
return mum(a ^ seed ^ primes[0], b ^ seed ^ primes[1]);
}
fn mix1(a: u64, b: u64, seed: u64) u64 {
return mum(a ^ seed ^ primes[2], b ^ seed ^ primes[3]);
}
// Wyhash version which does not store internal state for handling partial buffers.
// This is needed so that we can maximize the speed for the short key case, which will
// use the non-iterative api which the public Wyhash exposes.
pub fn WyhashGenerator(comptime ValueType: type) type {
return struct {
seed: u64,
msg_len: usize,
pub fn init(seed: u64) WyhashStateless {
return WyhashStateless{
.seed = seed,
.msg_len = 0,
};
}
fn round(self: *WyhashStateless, b: []const u8) void {
std.debug.assert(b.len == 32);
self.seed = mix0(
read_bytes(8, b[0..]),
read_bytes(8, b[8..]),
self.seed,
) ^ mix1(
read_bytes(8, b[16..]),
read_bytes(8, b[24..]),
self.seed,
);
}
pub fn update(self: *WyhashStateless, b: []const u8) void {
std.debug.assert(b.len % 32 == 0);
var off: usize = 0;
while (off < b.len) : (off += 32) {
@call(.{ .modifier = .always_inline }, self.round, .{b[off .. off + 32]});
}
self.msg_len += b.len;
}
pub fn final(self: *WyhashStateless, b: []const u8) u64 {
std.debug.assert(b.len < 32);
const seed = self.seed;
const rem_len = @intCast(u5, b.len);
const rem_key = b[0..rem_len];
self.seed = switch (rem_len) {
0 => seed,
1 => mix0(read_bytes(1, rem_key), primes[4], seed),
2 => mix0(read_bytes(2, rem_key), primes[4], seed),
3 => mix0((read_bytes(2, rem_key) << 8) | read_bytes(1, rem_key[2..]), primes[4], seed),
4 => mix0(read_bytes(4, rem_key), primes[4], seed),
5 => mix0((read_bytes(4, rem_key) << 8) | read_bytes(1, rem_key[4..]), primes[4], seed),
6 => mix0((read_bytes(4, rem_key) << 16) | read_bytes(2, rem_key[4..]), primes[4], seed),
7 => mix0((read_bytes(4, rem_key) << 24) | (read_bytes(2, rem_key[4..]) << 8) | read_bytes(1, rem_key[6..]), primes[4], seed),
8 => mix0(read_8bytes_swapped(rem_key), primes[4], seed),
9 => mix0(read_8bytes_swapped(rem_key), read_bytes(1, rem_key[8..]), seed),
10 => mix0(read_8bytes_swapped(rem_key), read_bytes(2, rem_key[8..]), seed),
11 => mix0(read_8bytes_swapped(rem_key), (read_bytes(2, rem_key[8..]) << 8) | read_bytes(1, rem_key[10..]), seed),
12 => mix0(read_8bytes_swapped(rem_key), read_bytes(4, rem_key[8..]), seed),
13 => mix0(read_8bytes_swapped(rem_key), (read_bytes(4, rem_key[8..]) << 8) | read_bytes(1, rem_key[12..]), seed),
14 => mix0(read_8bytes_swapped(rem_key), (read_bytes(4, rem_key[8..]) << 16) | read_bytes(2, rem_key[12..]), seed),
15 => mix0(read_8bytes_swapped(rem_key), (read_bytes(4, rem_key[8..]) << 24) | (read_bytes(2, rem_key[12..]) << 8) | read_bytes(1, rem_key[14..]), seed),
16 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed),
17 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_bytes(1, rem_key[16..]), primes[4], seed),
18 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_bytes(2, rem_key[16..]), primes[4], seed),
19 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1((read_bytes(2, rem_key[16..]) << 8) | read_bytes(1, rem_key[18..]), primes[4], seed),
20 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_bytes(4, rem_key[16..]), primes[4], seed),
21 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1((read_bytes(4, rem_key[16..]) << 8) | read_bytes(1, rem_key[20..]), primes[4], seed),
22 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1((read_bytes(4, rem_key[16..]) << 16) | read_bytes(2, rem_key[20..]), primes[4], seed),
23 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1((read_bytes(4, rem_key[16..]) << 24) | (read_bytes(2, rem_key[20..]) << 8) | read_bytes(1, rem_key[22..]), primes[4], seed),
24 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), primes[4], seed),
25 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), read_bytes(1, rem_key[24..]), seed),
26 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), read_bytes(2, rem_key[24..]), seed),
27 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), (read_bytes(2, rem_key[24..]) << 8) | read_bytes(1, rem_key[26..]), seed),
28 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), read_bytes(4, rem_key[24..]), seed),
29 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), (read_bytes(4, rem_key[24..]) << 8) | read_bytes(1, rem_key[28..]), seed),
30 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), (read_bytes(4, rem_key[24..]) << 16) | read_bytes(2, rem_key[28..]), seed),
31 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), (read_bytes(4, rem_key[24..]) << 24) | (read_bytes(2, rem_key[28..]) << 8) | read_bytes(1, rem_key[30..]), seed),
};
self.msg_len += b.len;
return mum(self.seed ^ self.msg_len, primes[4]);
}
pub fn hash(seed: u64, value: ValueType) u64 {
const input = std.mem.asBytes(&value);
const aligned_len = @sizeOf(value) - (@sizeOf(value) % 32);
var c = WyhashStateless.init(seed);
@call(.{ .modifier = .always_inline }, c.update, .{input[0..aligned_len]});
return @call(.{ .modifier = .always_inline }, c.final, .{input[aligned_len..]});
}
};
}
// Wyhash version which does not store internal state for handling partial buffers.
// This is needed so that we can maximize the speed for the short key case, which will
// use the non-iterative api which the public Wyhash exposes.
const WyhashStateless = struct {
seed: u64,
msg_len: usize,
pub fn init(seed: u64) WyhashStateless {
return WyhashStateless{
.seed = seed,
.msg_len = 0,
};
}
fn round(self: *WyhashStateless, b: []const u8) void {
std.debug.assert(b.len == 32);
self.seed = mix0(
read_bytes(8, b[0..]),
read_bytes(8, b[8..]),
self.seed,
) ^ mix1(
read_bytes(8, b[16..]),
read_bytes(8, b[24..]),
self.seed,
);
}
pub fn update(self: *WyhashStateless, b: []const u8) void {
std.debug.assert(b.len % 32 == 0);
var off: usize = 0;
while (off < b.len) : (off += 32) {
@call(.{ .modifier = .always_inline }, self.round, .{b[off .. off + 32]});
}
self.msg_len += b.len;
}
pub fn final(self: *WyhashStateless, b: []const u8) u64 {
std.debug.assert(b.len < 32);
const seed = self.seed;
const rem_len = @intCast(u5, b.len);
const rem_key = b[0..rem_len];
self.seed = switch (rem_len) {
0 => seed,
1 => mix0(read_bytes(1, rem_key), primes[4], seed),
2 => mix0(read_bytes(2, rem_key), primes[4], seed),
3 => mix0((read_bytes(2, rem_key) << 8) | read_bytes(1, rem_key[2..]), primes[4], seed),
4 => mix0(read_bytes(4, rem_key), primes[4], seed),
5 => mix0((read_bytes(4, rem_key) << 8) | read_bytes(1, rem_key[4..]), primes[4], seed),
6 => mix0((read_bytes(4, rem_key) << 16) | read_bytes(2, rem_key[4..]), primes[4], seed),
7 => mix0((read_bytes(4, rem_key) << 24) | (read_bytes(2, rem_key[4..]) << 8) | read_bytes(1, rem_key[6..]), primes[4], seed),
8 => mix0(read_8bytes_swapped(rem_key), primes[4], seed),
9 => mix0(read_8bytes_swapped(rem_key), read_bytes(1, rem_key[8..]), seed),
10 => mix0(read_8bytes_swapped(rem_key), read_bytes(2, rem_key[8..]), seed),
11 => mix0(read_8bytes_swapped(rem_key), (read_bytes(2, rem_key[8..]) << 8) | read_bytes(1, rem_key[10..]), seed),
12 => mix0(read_8bytes_swapped(rem_key), read_bytes(4, rem_key[8..]), seed),
13 => mix0(read_8bytes_swapped(rem_key), (read_bytes(4, rem_key[8..]) << 8) | read_bytes(1, rem_key[12..]), seed),
14 => mix0(read_8bytes_swapped(rem_key), (read_bytes(4, rem_key[8..]) << 16) | read_bytes(2, rem_key[12..]), seed),
15 => mix0(read_8bytes_swapped(rem_key), (read_bytes(4, rem_key[8..]) << 24) | (read_bytes(2, rem_key[12..]) << 8) | read_bytes(1, rem_key[14..]), seed),
16 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed),
17 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_bytes(1, rem_key[16..]), primes[4], seed),
18 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_bytes(2, rem_key[16..]), primes[4], seed),
19 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1((read_bytes(2, rem_key[16..]) << 8) | read_bytes(1, rem_key[18..]), primes[4], seed),
20 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_bytes(4, rem_key[16..]), primes[4], seed),
21 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1((read_bytes(4, rem_key[16..]) << 8) | read_bytes(1, rem_key[20..]), primes[4], seed),
22 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1((read_bytes(4, rem_key[16..]) << 16) | read_bytes(2, rem_key[20..]), primes[4], seed),
23 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1((read_bytes(4, rem_key[16..]) << 24) | (read_bytes(2, rem_key[20..]) << 8) | read_bytes(1, rem_key[22..]), primes[4], seed),
24 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), primes[4], seed),
25 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), read_bytes(1, rem_key[24..]), seed),
26 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), read_bytes(2, rem_key[24..]), seed),
27 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), (read_bytes(2, rem_key[24..]) << 8) | read_bytes(1, rem_key[26..]), seed),
28 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), read_bytes(4, rem_key[24..]), seed),
29 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), (read_bytes(4, rem_key[24..]) << 8) | read_bytes(1, rem_key[28..]), seed),
30 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), (read_bytes(4, rem_key[24..]) << 16) | read_bytes(2, rem_key[28..]), seed),
31 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), (read_bytes(4, rem_key[24..]) << 24) | (read_bytes(2, rem_key[28..]) << 8) | read_bytes(1, rem_key[30..]), seed),
};
self.msg_len += b.len;
return mum(self.seed ^ self.msg_len, primes[4]);
}
pub fn hash(seed: u64, input: []const u8) u64 {
const aligned_len = input.len - (input.len % 32);
var c = WyhashStateless.init(seed);
@call(.{ .modifier = .always_inline }, c.update, .{input[0..aligned_len]});
return @call(.{ .modifier = .always_inline }, c.final, .{input[aligned_len..]});
}
};
/// Fast non-cryptographic 64bit hash function.
/// See https://github.com/wangyi-fudan/wyhash
pub const Wyhash = struct {
state: WyhashStateless,
buf: [32]u8,
buf_len: usize,
pub fn init(seed: u64) Wyhash {
return Wyhash{
.state = WyhashStateless.init(seed),
.buf = undefined,
.buf_len = 0,
};
}
pub fn update(self: *Wyhash, b: []const u8) void {
var off: usize = 0;
if (self.buf_len != 0 and self.buf_len + b.len >= 32) {
off += 32 - self.buf_len;
mem.copy(u8, self.buf[self.buf_len..], b[0..off]);
self.state.update(self.buf[0..]);
self.buf_len = 0;
}
const remain_len = b.len - off;
const aligned_len = remain_len - (remain_len % 32);
self.state.update(b[off .. off + aligned_len]);
mem.copy(u8, self.buf[self.buf_len..], b[off + aligned_len ..]);
self.buf_len += @intCast(u8, b[off + aligned_len ..].len);
}
pub fn final(self: *Wyhash) u64 {
const seed = self.state.seed;
const rem_len = @intCast(u5, self.buf_len);
const rem_key = self.buf[0..self.buf_len];
return self.state.final(rem_key);
}
pub fn hash(seed: u64, input: []const u8) u64 {
return WyhashStateless.hash(seed, input);
}
};
const expectEqual = std.testing.expectEqual;
test "test vectors" {
const hash = Wyhash.hash;
try expectEqual(hash(0, ""), 0x0);
try expectEqual(hash(1, "a"), 0xbed235177f41d328);
try expectEqual(hash(2, "abc"), 0xbe348debe59b27c3);
try expectEqual(hash(3, "message digest"), 0x37320f657213a290);
try expectEqual(hash(4, "abcdefghijklmnopqrstuvwxyz"), 0xd0b270e1d8a7019c);
try expectEqual(hash(5, "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"), 0x602a1894d3bbfe7f);
try expectEqual(hash(6, "12345678901234567890123456789012345678901234567890123456789012345678901234567890"), 0x829e9c148b75970e);
}
test "test vectors streaming" {
var wh = Wyhash.init(5);
for ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789") |e| {
wh.update(mem.asBytes(&e));
}
try expectEqual(wh.final(), 0x602a1894d3bbfe7f);
const pattern = "1234567890";
const count = 8;
const result = 0x829e9c148b75970e;
try expectEqual(Wyhash.hash(6, pattern ** 8), result);
wh = Wyhash.init(6);
var i: u32 = 0;
while (i < count) : (i += 1) {
wh.update(pattern);
}
try expectEqual(wh.final(), result);
}
test "iterative non-divisible update" {
var buf: [8192]u8 = undefined;
for (buf) |*e, i| {
e.* = @truncate(u8, i);
}
const seed = 0x128dad08f;
var end: usize = 32;
while (end < buf.len) : (end += 32) {
const non_iterative_hash = Wyhash.hash(seed, buf[0..end]);
var wy = Wyhash.init(seed);
var i: usize = 0;
while (i < end) : (i += 33) {
wy.update(buf[i..std.math.min(i + 33, end)]);
}
const iterative_hash = wy.final();
try std.testing.expectEqual(iterative_hash, non_iterative_hash);
}
}

View File

@@ -27,6 +27,10 @@ pub const Ref = packed struct {
inner_index: Int = 0,
is_source_contents_slice: bool = false,
pub fn hash(key: Ref) u64 {
@compileError("Dont call");
}
// 2 bits of padding for whatever is the parent
pub const Int = u30;
pub const None = Ref{
@@ -70,11 +74,11 @@ pub const RequireOrImportMeta = struct {
exports_ref: Ref = Ref.None,
is_wrapper_async: bool = false,
};
pub fn debug(comptime fmt: []const u8, args: anytype) callconv(.Inline) void {
pub inline fn debug(comptime fmt: []const u8, args: anytype) void {
// Output.print(fmt, args);
}
pub fn debugl(
pub inline fn debugl(
comptime fmt: []const u8,
) callconv(.Inline) void {
) void {
// Output.print("{s}\n", .{fmt});
}

View File

@@ -171,15 +171,20 @@ pub const Bundler = struct {
// Run the resolver
// Don't parse/print automatically.
if (bundler.options.resolve_mode != .lazy) {
if (!bundler.resolve_results.contains(resolve_result.path_pair.primary.text)) {
try bundler.resolve_results.put(resolve_result.path_pair.primary.text, resolve_result);
var hash_key = resolve_result.path_pair.primary.text;
// Shorter hash key is faster to hash
if (strings.startsWith(resolve_result.path_pair.primary.text, bundler.fs.top_level_dir)) {
hash_key = resolve_result.path_pair.primary.text[bundler.fs.top_level_dir.len..];
}
if (!bundler.resolve_results.contains(hash_key)) {
try bundler.resolve_results.put(hash_key, resolve_result);
try bundler.resolve_queue.writeItem(resolve_result);
}
}
if (!strings.eql(import_record.path.text, resolve_result.path_pair.primary.text)) {
import_record.path = try bundler.generateImportPath(source_dir, resolve_result.path_pair.primary.text);
}
import_record.path = try bundler.generateImportPath(source_dir, resolve_result.path_pair.primary.text);
}
pub fn buildWithResolveResult(bundler: *Bundler, resolve_result: Resolver.Resolver.Result) !?options.OutputFile {
@@ -190,8 +195,9 @@ pub const Bundler = struct {
// Step 1. Parse & scan
const loader = bundler.options.loaders.get(resolve_result.path_pair.primary.name.ext) orelse .file;
var file_path = resolve_result.path_pair.primary;
file_path.pretty = relative_paths_list.append(bundler.fs.relativeTo(file_path.text)) catch unreachable;
var result = bundler.parse(file_path, loader) orelse return null;
var result = bundler.parse(file_path, loader, resolve_result.dirname_fd) orelse return null;
switch (result.loader) {
.jsx, .js, .ts, .tsx => {
@@ -304,7 +310,7 @@ pub const Bundler = struct {
ast: js_ast.Ast,
};
pub var tracing_start: i128 = if (enableTracing) 0 else undefined;
pub fn parse(bundler: *Bundler, path: Fs.Path, loader: options.Loader) ?ParseResult {
pub fn parse(bundler: *Bundler, path: Fs.Path, loader: options.Loader, dirname_fd: StoredFileDescriptorType) ?ParseResult {
if (enableTracing) {
tracing_start = std.time.nanoTimestamp();
}
@@ -314,7 +320,7 @@ pub const Bundler = struct {
}
}
var result: ParseResult = undefined;
const entry = bundler.resolver.caches.fs.readFile(bundler.fs, path.text) catch return null;
const entry = bundler.resolver.caches.fs.readFile(bundler.fs, path.text, dirname_fd) catch return null;
const source = logger.Source.initFile(Fs.File{ .path = path, .contents = entry.contents }, bundler.allocator) catch return null;
switch (loader) {
@@ -498,6 +504,11 @@ pub const Bundler = struct {
) !options.TransformResult {
var bundler = try Bundler.init(allocator, log, opts);
// 100.00 µs std.fifo.LinearFifo(resolver.resolver.Result,std.fifo.LinearFifoBufferType { .Dynamic = {}}).writeItemAssumeCapacity
if (bundler.options.resolve_mode != .lazy) {
try bundler.resolve_queue.ensureUnusedCapacity(1000);
}
var entry_points = try allocator.alloc(Resolver.Resolver.Result, bundler.options.entry_points.len);
if (isDebug) {
@@ -660,6 +671,11 @@ pub const Transformer = struct {
var arena: std.heap.ArenaAllocator = undefined;
const use_arenas = opts.entry_points.len > 8;
js_ast.Expr.Data.Store.create(allocator);
js_ast.Stmt.Data.Store.create(allocator);
var ulimit: usize = Fs.FileSystem.RealFS.adjustUlimit();
var care_about_closing_files = !(FeatureFlags.store_file_descriptors and opts.entry_points.len * 2 < ulimit);
for (opts.entry_points) |entry_point, i| {
if (use_arenas) {
arena = std.heap.ArenaAllocator.init(allocator);
@@ -674,19 +690,23 @@ pub const Transformer = struct {
var _log = logger.Log.init(allocator);
var __log = &_log;
var paths = [_]string{ cwd, entry_point };
const absolutePath = try std.fs.path.resolve(chosen_alloc, &paths);
const absolutePath = resolve_path.joinAbs(cwd, .auto, entry_point);
const file = try std.fs.openFileAbsolute(absolutePath, std.fs.File.OpenFlags{ .read = true });
defer file.close();
defer {
if (care_about_closing_files) {
file.close();
}
}
const stat = try file.stat();
// 1 byte sentinel
const code = try file.readToEndAlloc(allocator, stat.size);
defer {
if (_log.msgs.items.len == 0) {
allocator.free(code);
}
chosen_alloc.free(absolutePath);
_log.appendTo(log) catch {};
}
const _file = Fs.File{ .path = Fs.Path.init(entry_point), .contents = code };
@@ -710,6 +730,8 @@ pub const Transformer = struct {
const relative_path = resolve_path.relative(cwd, absolutePath);
const out_path = resolve_path.joinAbs2(cwd, .auto, absolutePath, relative_path);
try output_files.append(options.OutputFile{ .path = allocator.dupe(u8, out_path) catch continue, .contents = res.js });
js_ast.Expr.Data.Store.reset();
js_ast.Stmt.Data.Store.reset();
}
return try options.TransformResult.init(output_dir, output_files.toOwnedSlice(), log, allocator);

View File

@@ -37,6 +37,7 @@ pub const Cache = struct {
pub const Entry = struct {
contents: string,
fd: StoredFileDescriptorType = 0,
// Null means its not usable
mod_key: ?fs.FileSystem.Implementation.ModKey = null,
@@ -56,7 +57,7 @@ pub const Cache = struct {
c.entries.deinit();
}
pub fn readFile(c: *Fs, _fs: *fs.FileSystem, path: string) !Entry {
pub fn readFile(c: *Fs, _fs: *fs.FileSystem, path: string, dirname_fd: StoredFileDescriptorType) !Entry {
var rfs = _fs.fs;
{
@@ -67,9 +68,23 @@ pub const Cache = struct {
}
}
var file_handle: std.fs.File = undefined;
if (FeatureFlags.store_file_descriptors and dirname_fd > 0) {
file_handle = try std.fs.Dir.openFile(std.fs.Dir{ .fd = dirname_fd }, std.fs.path.basename(path), .{ .read = true });
} else {
file_handle = try std.fs.openFileAbsolute(path, .{ .read = true });
}
defer {
if (rfs.needToCloseFiles()) {
file_handle.close();
}
}
// If the file's modification key hasn't changed since it was cached, assume
// the contents of the file are also the same and skip reading the file.
var mod_key: ?fs.FileSystem.Implementation.ModKey = rfs.modKey(path) catch |err| handler: {
var mod_key: ?fs.FileSystem.Implementation.ModKey = rfs.modKeyWithFile(path, file_handle) catch |err| handler: {
switch (err) {
error.FileNotFound, error.AccessDenied => {
return err;
@@ -85,14 +100,14 @@ pub const Cache = struct {
var file: fs.File = undefined;
if (mod_key) |modk| {
file = rfs.readFile(path, modk.size) catch |err| {
file = rfs.readFileWithHandle(path, modk.size, file_handle) catch |err| {
if (isDebug) {
Output.printError("{s}: readFile error -- {s}", .{ path, @errorName(err) });
}
return err;
};
} else {
file = rfs.readFile(path, null) catch |err| {
file = rfs.readFileWithHandle(path, null, file_handle) catch |err| {
if (isDebug) {
Output.printError("{s}: readFile error -- {s}", .{ path, @errorName(err) });
}
@@ -103,6 +118,7 @@ pub const Cache = struct {
const entry = Entry{
.contents = file.contents,
.mod_key = mod_key,
.fd = if (FeatureFlags.store_file_descriptors) file_handle.handle else 0,
};
c.mutex.lock();

View File

@@ -21,6 +21,8 @@ const clap = @import("clap");
const bundler = @import("bundler.zig");
const fs = @import("fs.zig");
pub fn constStrToU8(s: string) []u8 {
return @intToPtr([*]u8, @ptrToInt(s.ptr))[0..s.len];
}
@@ -319,17 +321,19 @@ pub const Cli = struct {
var did_write = false;
var writer = stdout.writer();
var open_file_limit: usize = 32;
if (args.write) |write| {
if (write) {
var open_file_limit: usize = 32;
if (std.os.getrlimit(.NOFILE)) |limit| {
open_file_limit = limit.cur;
} else |err| {}
const do_we_need_to_close = open_file_limit > result.output_files.len * 2;
did_write = true;
var root_dir = try std.fs.openDirAbsolute(result.outbase, std.fs.Dir.OpenDirOptions{});
// On posix, file handles automatically close on process exit by the OS
// Closing files shows up in profiling.
// So don't do that unless we actually need to.
const do_we_need_to_close = !FeatureFlags.store_file_descriptors or (@intCast(usize, root_dir.fd) + open_file_limit) < result.output_files.len;
defer {
if (do_we_need_to_close) {
@@ -390,6 +394,11 @@ pub const Cli = struct {
if (isDebug) {
Output.println("Expr count: {d}", .{js_ast.Expr.icount});
Output.println("Stmt count: {d}", .{js_ast.Stmt.icount});
Output.println("File Descriptors: {d} / {d}", .{
fs.FileSystem.max_fd,
open_file_limit,
});
}
if (!did_write) {
@@ -415,8 +424,10 @@ pub const Cli = struct {
const duration = std.time.nanoTimestamp() - start_time;
if (did_write and duration < @as(i128, @as(i128, std.time.ns_per_s) * @as(i128, 2))) {
var elapsed = @divFloor(duration, @as(i128, std.time.ns_per_ms));
var elapsed = @divExact(duration, @as(i128, std.time.ns_per_ms));
try writer.print("\nCompleted in {d}ms", .{elapsed});
}
std.os.exit(0);
}
};

View File

@@ -28,6 +28,17 @@ pub const FileSystem = struct {
dirname_store: *DirnameStore,
filename_store: *FilenameStore,
pub var max_fd: FileDescriptorType = 0;
pub inline fn setMaxFd(fd: anytype) void {
if (!FeatureFlags.store_file_descriptors) {
return;
}
max_fd = std.math.max(fd, max_fd);
}
pub var instance: FileSystem = undefined;
pub const DirnameStore = allocators.BSSStringList(Preallocate.Counts.dir_entry, 256);
@@ -73,6 +84,7 @@ pub const FileSystem = struct {
pub const EntryMap = std.StringHashMap(EntryStore.ListIndex);
pub const EntryStore = allocators.BSSList(Entry, Preallocate.Counts.files);
dir: string,
fd: StoredFileDescriptorType = 0,
data: EntryMap,
pub fn addEntry(dir: *DirEntry, entry: std.fs.Dir.Entry) !void {
@@ -153,7 +165,7 @@ pub const FileSystem = struct {
d.data.deinit();
}
pub fn get(entry: *DirEntry, _query: string) ?Entry.Lookup {
pub fn get(entry: *const DirEntry, _query: string) ?Entry.Lookup {
if (_query.len == 0) return null;
var end: usize = 0;
@@ -354,6 +366,17 @@ pub const FileSystem = struct {
file_limit: usize = 32,
file_quota: usize = 32,
pub fn needToCloseFiles(rfs: *const RealFS) bool {
// On Windows, we must always close open file handles
// Windows locks files
if (!FeatureFlags.store_file_descriptors) {
return true;
}
// If we're not near the max amount of open files, don't worry about it.
return !(rfs.file_limit > 254 and rfs.file_limit > (FileSystem.max_fd + 1) * 2);
}
// Always try to max out how many files we can keep open
pub fn adjustUlimit() usize {
var limit = std.os.getrlimit(.NOFILE) catch return 32;
@@ -375,7 +398,7 @@ pub const FileSystem = struct {
.cwd = cwd,
.file_limit = file_limit,
.file_quota = file_limit,
.limiter = Limiter.init(allocator),
.limiter = Limiter.init(allocator, file_limit),
.watcher = if (enable_watcher) std.StringHashMap(WatchData).init(allocator) else null,
};
}
@@ -389,9 +412,7 @@ pub const FileSystem = struct {
mtime: i128 = 0,
mode: std.fs.File.Mode = 0,
pub fn generate(fs: *RealFS, path: string) anyerror!ModKey {
var file = try std.fs.openFileAbsolute(path, std.fs.File.OpenFlags{ .read = true });
defer file.close();
pub fn generate(fs: *RealFS, path: string, file: std.fs.File) anyerror!ModKey {
const stat = try file.stat();
const seconds = @divTrunc(stat.mtime, @as(@TypeOf(stat.mtime), std.time.ns_per_s));
@@ -437,11 +458,8 @@ pub const FileSystem = struct {
}
}
pub fn modKey(fs: *RealFS, path: string) anyerror!ModKey {
fs.limiter.before();
defer fs.limiter.after();
const key = ModKey.generate(fs, path) catch |err| {
pub fn modKeyWithFile(fs: *RealFS, path: string, file: anytype) anyerror!ModKey {
const key = ModKey.generate(fs, path, file) catch |err| {
fs.modKeyError(path, err);
return err;
};
@@ -457,6 +475,18 @@ pub const FileSystem = struct {
return key;
}
pub fn modKey(fs: *RealFS, path: string) anyerror!ModKey {
fs.limiter.before();
defer fs.limiter.after();
var file = try std.fs.openFileAbsolute(path, std.fs.File.OpenFlags{ .read = true });
defer {
if (fs.needToCloseFiles()) {
file.close();
}
}
return try fs.modKeyWithFile(path, file);
}
pub const WatchData = struct {
dir_entries: []string = &([_]string{}),
file_contents: string = "",
@@ -493,9 +523,9 @@ pub const FileSystem = struct {
// Limit the number of files open simultaneously to avoid ulimit issues
pub const Limiter = struct {
semaphore: Semaphore,
pub fn init(allocator: *std.mem.Allocator) Limiter {
pub fn init(allocator: *std.mem.Allocator, limit: usize) Limiter {
return Limiter{
.semaphore = Semaphore.init(32),
.semaphore = Semaphore.init(limit),
// .counter = std.atomic.Int(u8).init(0),
// .lock = std.Thread.Mutex.init(),
};
@@ -532,6 +562,12 @@ pub const FileSystem = struct {
var iter: std.fs.Dir.Iterator = handle.iterate();
var dir = DirEntry.init(_dir, fs.allocator);
errdefer dir.deinit();
if (FeatureFlags.store_file_descriptors) {
FileSystem.setMaxFd(handle.fd);
dir.fd = handle.fd;
}
while (try iter.next()) |_entry| {
try dir.addEntry(_entry);
}
@@ -585,7 +621,7 @@ pub const FileSystem = struct {
var handle = _handle orelse try fs.openDir(dir);
defer {
if (_handle == null) {
if (_handle == null and fs.needToCloseFiles()) {
handle.close();
}
}
@@ -596,7 +632,7 @@ pub const FileSystem = struct {
}
// Cache miss: read the directory entries
const entries = fs.readdir(
var entries = fs.readdir(
dir,
handle,
) catch |err| {
@@ -643,15 +679,8 @@ pub const FileSystem = struct {
}
}
pub fn readFile(fs: *RealFS, path: string, _size: ?usize) !File {
fs.limiter.before();
defer fs.limiter.after();
const file: std.fs.File = std.fs.openFileAbsolute(path, std.fs.File.OpenFlags{ .read = true, .write = false }) catch |err| {
fs.readFileError(path, err);
return err;
};
defer file.close();
pub fn readFileWithHandle(fs: *RealFS, path: string, _size: ?usize, file: std.fs.File) !File {
FileSystem.setMaxFd(file.handle);
// Skip the extra file.stat() call when possible
var size = _size orelse (file.getEndPos() catch |err| {
@@ -675,6 +704,26 @@ pub const FileSystem = struct {
return File{ .path = Path.init(path), .contents = file_contents };
}
pub fn readFile(
fs: *RealFS,
path: string,
_size: ?usize,
) !File {
fs.limiter.before();
defer fs.limiter.after();
const file: std.fs.File = std.fs.openFileAbsolute(path, std.fs.File.OpenFlags{ .read = true, .write = false }) catch |err| {
fs.readFileError(path, err);
return err;
};
defer {
if (fs.needToCloseFiles()) {
file.close();
}
}
return try fs.readFileWithHandle(path, _size, file);
}
pub fn kind(fs: *RealFS, _dir: string, base: string) !Entry.Cache {
var dir = _dir;
var combo = [2]string{ dir, base };
@@ -684,7 +733,11 @@ pub const FileSystem = struct {
defer fs.limiter.after();
const file = try std.fs.openFileAbsolute(entry_path, .{ .read = true, .write = false });
defer file.close();
defer {
if (fs.needToCloseFiles()) {
file.close();
}
}
var stat = try file.stat();
var _kind = stat.kind;
@@ -711,6 +764,7 @@ pub const FileSystem = struct {
symlink = link;
const file2 = std.fs.openFileAbsolute(symlink, std.fs.File.OpenFlags{ .read = true, .write = false }) catch return cache;
// These ones we always close
defer file2.close();
const stat2 = file2.stat() catch return cache;
@@ -756,18 +810,6 @@ pub const FileSystem = struct {
};
};
pub const FileSystemEntry = union(FileSystemEntry.Kind) {
file: File,
directory: Directory,
not_found: FileNotFound,
pub const Kind = enum(u8) {
file,
directory,
not_found,
};
};
pub const Directory = struct { path: Path, contents: []string };
pub const File = struct { path: Path, contents: string };

View File

@@ -29,6 +29,8 @@ pub const FeatureFlags = struct {
pub const print_ast = false;
pub const disable_printing_null = false;
pub const store_file_descriptors = !isWindows and !isBrowser;
};
pub const enableTracing = true;
@@ -135,3 +137,10 @@ pub const Global = struct {
Global.panic("Not implemented yet!!!!!", .{});
}
};
pub const FileDescriptorType = if (isBrowser) u0 else std.os.fd_t;
// When we are on a computer with an absurdly high number of max open file handles
// such is often the case with macOS
// As a useful optimization, we can store file descriptors and just keep them open...forever
pub const StoredFileDescriptorType = if (isWindows or isBrowser) u0 else std.os.fd_t;

View File

@@ -44,11 +44,13 @@ pub const PackageJSON = struct {
//
browser_map: BrowserMap,
pub fn parse(r: *resolver.Resolver, input_path: string) ?PackageJSON {
pub fn parse(r: *resolver.Resolver, input_path: string, dirname_fd: StoredFileDescriptorType) ?PackageJSON {
const parts = [_]string{ input_path, "package.json" };
const package_json_path = r.fs.join(&parts);
const entry = r.caches.fs.readFile(r.fs, input_path) catch |err| {
const package_json_path_ = r.fs.abs(&parts);
const package_json_path = r.fs.filename_store.append(package_json_path_) catch unreachable;
const entry = r.caches.fs.readFile(r.fs, package_json_path, dirname_fd) catch |err| {
if (err != error.IsDir) {
r.log.addErrorFmt(null, logger.Loc.Empty, r.allocator, "Cannot read file \"{s}\": {s}", .{ r.prettyPath(fs.Path.init(input_path)), @errorName(err) }) catch unreachable;
}
@@ -60,7 +62,7 @@ pub const PackageJSON = struct {
debug.addNoteFmt("The file \"{s}\" exists", .{package_json_path}) catch unreachable;
}
const key_path = fs.Path.init(r.allocator.dupe(u8, package_json_path) catch unreachable);
const key_path = fs.Path.init(package_json_path);
var json_source = logger.Source.initPathString(key_path.text, entry.contents);
json_source.path.pretty = r.prettyPath(json_source.path);

View File

@@ -45,7 +45,19 @@ pub const DirInfo = struct {
tsconfig_json: ?*TSConfigJSON = null, // Is there a "tsconfig.json" file in this directory or a parent directory?
abs_real_path: string = "", // If non-empty, this is the real absolute path resolving any symlinks
pub fn getEntries(dirinfo: *DirInfo) ?*Fs.FileSystem.DirEntry {
pub fn getFileDescriptor(dirinfo: *const DirInfo) StoredFileDescriptorType {
if (!FeatureFlags.store_file_descriptors) {
return 0;
}
if (dirinfo.getEntries()) |entries| {
return entries.fd;
} else {
return 0;
}
}
pub fn getEntries(dirinfo: *const DirInfo) ?*Fs.FileSystem.DirEntry {
var entries_ptr = Fs.FileSystem.instance.fs.entries.atIndex(dirinfo.entries) orelse return null;
switch (entries_ptr.*) {
.entries => |entr| {
@@ -57,10 +69,10 @@ pub const DirInfo = struct {
}
}
pub fn getParent(i: *DirInfo) ?*DirInfo {
pub fn getParent(i: *const DirInfo) ?*DirInfo {
return HashMap.instance.atIndex(i.parent);
}
pub fn getEnclosingBrowserScope(i: *DirInfo) ?*DirInfo {
pub fn getEnclosingBrowserScope(i: *const DirInfo) ?*DirInfo {
return HashMap.instance.atIndex(i.enclosing_browser_scope);
}
@@ -250,6 +262,9 @@ pub const Resolver = struct {
debug_meta: ?DebugMeta = null,
dirname_fd: StoredFileDescriptorType = 0,
file_fd: StoredFileDescriptorType = 0,
// Most NPM modules are CommonJS
// If unspecified, assume CommonJS.
// If internal app code, assume ESM. Since this is designed for ESM.`
@@ -428,6 +443,7 @@ pub const Resolver = struct {
return Result{
.path_pair = res.path_pair,
.diff_case = res.diff_case,
.dirname_fd = dir_info.getFileDescriptor(),
.is_from_node_modules = res.is_node_module,
};
}
@@ -452,7 +468,12 @@ pub const Resolver = struct {
// Run node's resolution rules (e.g. adding ".js")
if (r.loadAsFileOrDirectory(import_path, kind)) |entry| {
return Result{ .path_pair = entry.path_pair, .diff_case = entry.diff_case, .is_from_node_modules = entry.is_node_module };
return Result{
.dirname_fd = entry.dirname_fd,
.path_pair = entry.path_pair,
.diff_case = entry.diff_case,
.is_from_node_modules = entry.is_node_module,
};
}
return null;
@@ -508,6 +529,7 @@ pub const Resolver = struct {
.diff_case = _result.diff_case,
.is_from_node_modules = _result.is_node_module,
.module_type = pkg.module_type,
.dirname_fd = _result.dirname_fd,
};
check_relative = false;
check_package = false;
@@ -524,6 +546,7 @@ pub const Resolver = struct {
.path_pair = res.path_pair,
.diff_case = res.diff_case,
.is_from_node_modules = res.is_node_module,
.dirname_fd = res.dirname_fd,
};
} else if (!check_package) {
return null;
@@ -569,6 +592,7 @@ pub const Resolver = struct {
}
return Result{
.path_pair = pair,
.dirname_fd = node_module.dirname_fd,
.diff_case = node_module.diff_case,
.is_from_node_modules = true,
};
@@ -591,6 +615,7 @@ pub const Resolver = struct {
.path_pair = res.path_pair,
.diff_case = res.diff_case,
.is_from_node_modules = res.is_node_module,
.dirname_fd = res.dirname_fd,
};
} else {
// Note: node's "self references" are not currently supported
@@ -611,9 +636,11 @@ pub const Resolver = struct {
path.is_disabled = true;
} else if (r.resolveWithoutRemapping(dir_info, remapped, kind)) |remapped_result| {
result.is_from_node_modules = remapped_result.is_node_module;
switch (iter.index) {
0 => {
result.path_pair.primary = remapped_result.path_pair.primary;
result.dirname_fd = remapped_result.dirname_fd;
},
else => {
result.path_pair.secondary = remapped_result.path_pair.primary;
@@ -728,7 +755,7 @@ pub const Resolver = struct {
// // if (!strings.eql(std.fs.path.basename(current), "node_modules")) {
// // var paths1 = [_]string{ current, "node_modules", extends };
// // var join1 = r.fs.absAlloc(ctx.r.allocator, &paths1) catch unreachable;
// // const res = ctx.r.parseTSConfig(join1, ctx.visited) catch |err| {
// // const res = ctx.r.parseTSConfig(join1, ctx.1) catch |err| {
// // if (err == error.ENOENT) {
// // continue;
// // } else if (err == error.ParseErrorImportCycle) {} else if (err != error.ParseErrorAlreadyLogged) {}
@@ -743,12 +770,16 @@ pub const Resolver = struct {
};
threadlocal var tsconfig_base_url_buf: [std.fs.MAX_PATH_BYTES]u8 = undefined;
pub fn parseTSConfig(r: *Resolver, file: string, visited: *StringBoolMap) !?*TSConfigJSON {
if (visited.contains(file)) {
return error.ParseErrorImportCycle;
}
visited.put(file, true) catch unreachable;
const entry = try r.caches.fs.readFile(r.fs, file);
pub fn parseTSConfig(
r: *Resolver,
file: string,
dirname_fd: StoredFileDescriptorType,
) !?*TSConfigJSON {
const entry = try r.caches.fs.readFile(
r.fs,
file,
dirname_fd,
);
const key_path = Path.init(file);
const source = logger.Source.initPathString(key_path.text, entry.contents);
@@ -778,8 +809,8 @@ pub const Resolver = struct {
return path.text;
}
pub fn parsePackageJSON(r: *Resolver, file: string) !?*PackageJSON {
const pkg = PackageJSON.parse(r, file) orelse return null;
pub fn parsePackageJSON(r: *Resolver, file: string, dirname_fd: StoredFileDescriptorType) !?*PackageJSON {
const pkg = PackageJSON.parse(r, file, dirname_fd) orelse return null;
var _pkg = try r.allocator.create(PackageJSON);
_pkg.* = pkg;
return _pkg;
@@ -846,7 +877,7 @@ pub const Resolver = struct {
defer {
// Anything
if (open_dir_count > 0) {
if (open_dir_count > 0 and r.fs.fs.needToCloseFiles()) {
var open_dirs: []std.fs.Dir = _open_dirs[0..open_dir_count];
for (open_dirs) |*open_dir| {
open_dir.close();
@@ -928,7 +959,7 @@ pub const Resolver = struct {
return null;
};
Fs.FileSystem.setMaxFd(open_dir.fd);
// these objects mostly just wrap the file descriptor, so it's fine to keep it.
_open_dirs[open_dir_count] = open_dir;
open_dir_count += 1;
@@ -959,6 +990,12 @@ pub const Resolver = struct {
dir_entries_option = try rfs.entries.put(&cached_dir_entry_result, .{
.entries = Fs.FileSystem.DirEntry.init(dir_path, r.fs.allocator),
});
if (FeatureFlags.store_file_descriptors) {
Fs.FileSystem.setMaxFd(open_dir.fd);
dir_entries_option.entries.fd = open_dir.fd;
}
has_dir_entry_result = true;
}
@@ -974,6 +1011,7 @@ pub const Resolver = struct {
cached_dir_entry_result.index,
r.dir_cache.atIndex(top_parent.index),
top_parent.index,
open_dir.fd,
);
var dir_info_ptr = try r.dir_cache.put(&queue_top.result, dir_info);
@@ -998,6 +1036,8 @@ pub const Resolver = struct {
pub const MatchResult = struct {
path_pair: PathPair,
dirname_fd: StoredFileDescriptorType = 0,
file_fd: StoredFileDescriptorType = 0,
is_node_module: bool = false,
diff_case: ?Fs.FileSystem.Entry.Lookup.DifferentCase = null,
};
@@ -1137,6 +1177,7 @@ pub const Resolver = struct {
pub const LoadResult = struct {
path: string,
diff_case: ?Fs.FileSystem.Entry.Lookup.DifferentCase,
dirname_fd: StoredFileDescriptorType = 0,
};
pub fn checkBrowserMap(r: *Resolver, pkg: *PackageJSON, input_path: string) ?string {
@@ -1259,7 +1300,11 @@ pub const Resolver = struct {
debug.addNoteFmt("Found file: \"{s}\"", .{out_buf}) catch unreachable;
}
return MatchResult{ .path_pair = .{ .primary = Path.init(out_buf) }, .diff_case = lookup.diff_case };
return MatchResult{
.path_pair = .{ .primary = Path.init(out_buf) },
.diff_case = lookup.diff_case,
.dirname_fd = dir_info.getFileDescriptor(),
};
}
}
}
@@ -1296,7 +1341,7 @@ pub const Resolver = struct {
// Is this a file
if (r.loadAsFile(remapped_abs, extension_order)) |file_result| {
return MatchResult{ .path_pair = .{ .primary = Path.init(file_result.path) }, .diff_case = file_result.diff_case };
return MatchResult{ .dirname_fd = file_result.dirname_fd, .path_pair = .{ .primary = Path.init(file_result.path) }, .diff_case = file_result.diff_case };
}
// Is it a directory with an index?
@@ -1319,7 +1364,11 @@ pub const Resolver = struct {
// Is this a file?
if (r.loadAsFile(path, extension_order)) |file| {
return MatchResult{ .path_pair = .{ .primary = Path.init(file.path) }, .diff_case = file.diff_case };
return MatchResult{
.path_pair = .{ .primary = Path.init(file.path) },
.diff_case = file.diff_case,
.dirname_fd = file.dirname_fd,
};
}
// Is this a directory?
@@ -1396,6 +1445,7 @@ pub const Resolver = struct {
.secondary = _result.path_pair.primary,
},
.diff_case = auto_main_result.diff_case,
.dirname_fd = auto_main_result.dirname_fd,
};
} else {
if (r.debug_logs) |*debug| {
@@ -1453,7 +1503,7 @@ pub const Resolver = struct {
return null;
}
var entries = dir_entry.entries;
const entries = dir_entry.entries;
const base = std.fs.path.basename(path);
@@ -1470,7 +1520,11 @@ pub const Resolver = struct {
const abs_path_parts = [_]string{ query.entry.dir, query.entry.base };
const abs_path = r.fs.filename_store.append(r.fs.joinBuf(&abs_path_parts, &TemporaryBuffer.ExtensionPathBuf)) catch unreachable;
return LoadResult{ .path = abs_path, .diff_case = query.diff_case };
return LoadResult{
.path = abs_path,
.diff_case = query.diff_case,
.dirname_fd = entries.fd,
};
}
}
@@ -1496,6 +1550,7 @@ pub const Resolver = struct {
return LoadResult{
.path = r.fs.filename_store.append(buffer) catch unreachable,
.diff_case = query.diff_case,
.dirname_fd = entries.fd,
};
}
}
@@ -1537,6 +1592,7 @@ pub const Resolver = struct {
return LoadResult{
.path = r.fs.filename_store.append(buffer) catch unreachable,
.diff_case = query.diff_case,
.dirname_fd = entries.fd,
};
}
}
@@ -1562,6 +1618,7 @@ pub const Resolver = struct {
dir_entry_index: allocators.IndexType,
parent: ?*DirInfo,
parent_index: allocators.IndexType,
fd: FileDescriptorType,
) anyerror!DirInfo {
var result = _result;
@@ -1621,7 +1678,7 @@ pub const Resolver = struct {
if (entries.get("package.json")) |lookup| {
const entry = lookup.entry;
if (entry.kind(rfs) == .file) {
info.package_json = r.parsePackageJSON(path) catch null;
info.package_json = r.parsePackageJSON(path, if (FeatureFlags.store_file_descriptors) fd else 0) catch null;
if (info.package_json) |pkg| {
if (pkg.browser_map.count() > 0) {
@@ -1663,9 +1720,10 @@ pub const Resolver = struct {
}
if (tsconfig_path) |tsconfigpath| {
var visited = std.StringHashMap(bool).init(r.allocator);
defer visited.deinit();
info.tsconfig_json = r.parseTSConfig(tsconfigpath, &visited) catch |err| brk: {
info.tsconfig_json = r.parseTSConfig(
tsconfigpath,
if (FeatureFlags.store_file_descriptors) fd else 0,
) catch |err| brk: {
const pretty = r.prettyPath(Path.init(tsconfigpath));
if (err == error.ENOENT) {

View File

@@ -1 +1,3 @@
const init: (VariableDeclaration | AnyExpression) = true;
function hey() {
const foo = process.env.node_eNV;
}