mirror of
https://github.com/oven-sh/bun
synced 2026-02-09 10:28:47 +00:00
@@ -1,166 +0,0 @@
|
||||
import type {ByteBuffer} from "peechy";
|
||||
|
||||
type byte = number;
|
||||
type float = number;
|
||||
type int = number;
|
||||
type alphanumeric = string;
|
||||
type uint = number;
|
||||
type int8 = number;
|
||||
type lowp = number;
|
||||
type int16 = number;
|
||||
type int32 = number;
|
||||
type float32 = number;
|
||||
type uint16 = number;
|
||||
type uint32 = number;
|
||||
export enum Loader {
|
||||
jsx = 1,
|
||||
js = 2,
|
||||
ts = 3,
|
||||
tsx = 4,
|
||||
css = 5,
|
||||
file = 6,
|
||||
json = 7
|
||||
}
|
||||
export const LoaderKeys = {
|
||||
1: "jsx",
|
||||
jsx: "jsx",
|
||||
2: "js",
|
||||
js: "js",
|
||||
3: "ts",
|
||||
ts: "ts",
|
||||
4: "tsx",
|
||||
tsx: "tsx",
|
||||
5: "css",
|
||||
css: "css",
|
||||
6: "file",
|
||||
file: "file",
|
||||
7: "json",
|
||||
json: "json"
|
||||
}
|
||||
export enum JSXRuntime {
|
||||
automatic = 1,
|
||||
classic = 2
|
||||
}
|
||||
export const JSXRuntimeKeys = {
|
||||
1: "automatic",
|
||||
automatic: "automatic",
|
||||
2: "classic",
|
||||
classic: "classic"
|
||||
}
|
||||
export enum TransformResponseStatus {
|
||||
success = 1,
|
||||
fail = 2
|
||||
}
|
||||
export const TransformResponseStatusKeys = {
|
||||
1: "success",
|
||||
success: "success",
|
||||
2: "fail",
|
||||
fail: "fail"
|
||||
}
|
||||
export enum MessageKind {
|
||||
err = 1,
|
||||
warn = 2,
|
||||
note = 3,
|
||||
debug = 4
|
||||
}
|
||||
export const MessageKindKeys = {
|
||||
1: "err",
|
||||
err: "err",
|
||||
2: "warn",
|
||||
warn: "warn",
|
||||
3: "note",
|
||||
note: "note",
|
||||
4: "debug",
|
||||
debug: "debug"
|
||||
}
|
||||
export interface JSX {
|
||||
factory: string;
|
||||
runtime: JSXRuntime;
|
||||
fragment: string;
|
||||
production: boolean;
|
||||
import_source: string;
|
||||
react_fast_refresh: boolean;
|
||||
loader_keys: string[];
|
||||
loader_values: Loader[];
|
||||
}
|
||||
|
||||
export interface TransformOptions {
|
||||
jsx: JSX;
|
||||
ts: boolean;
|
||||
base_path: string;
|
||||
define_keys: string[];
|
||||
define_values: string[];
|
||||
}
|
||||
|
||||
export interface FileHandle {
|
||||
path: string;
|
||||
size: uint;
|
||||
fd: uint;
|
||||
}
|
||||
|
||||
export interface Transform {
|
||||
handle?: FileHandle;
|
||||
path?: string;
|
||||
contents?: Uint8Array;
|
||||
loader?: Loader;
|
||||
options?: TransformOptions;
|
||||
}
|
||||
|
||||
export interface OutputFile {
|
||||
data: Uint8Array;
|
||||
path: string;
|
||||
}
|
||||
|
||||
export interface TransformResponse {
|
||||
status: TransformResponseStatus;
|
||||
files: OutputFile[];
|
||||
errors: Message[];
|
||||
}
|
||||
|
||||
export interface Location {
|
||||
file: string;
|
||||
namespace: string;
|
||||
line: int32;
|
||||
column: int32;
|
||||
line_text: string;
|
||||
suggestion: string;
|
||||
offset: uint;
|
||||
}
|
||||
|
||||
export interface MessageData {
|
||||
text?: string;
|
||||
location?: Location;
|
||||
}
|
||||
|
||||
export interface Message {
|
||||
kind: MessageKind;
|
||||
data: MessageData;
|
||||
notes: MessageData[];
|
||||
}
|
||||
|
||||
export interface Log {
|
||||
warnings: uint32;
|
||||
errors: uint32;
|
||||
msgs: Message[];
|
||||
}
|
||||
|
||||
export declare function encodeJSX(message: JSX, bb: ByteBuffer): void;
|
||||
export declare function decodeJSX(buffer: ByteBuffer): JSX;
|
||||
export declare function encodeTransformOptions(message: TransformOptions, bb: ByteBuffer): void;
|
||||
export declare function decodeTransformOptions(buffer: ByteBuffer): TransformOptions;
|
||||
export declare function encodeFileHandle(message: FileHandle, bb: ByteBuffer): void;
|
||||
export declare function decodeFileHandle(buffer: ByteBuffer): FileHandle;
|
||||
export declare function encodeTransform(message: Transform, bb: ByteBuffer): void;
|
||||
export declare function decodeTransform(buffer: ByteBuffer): Transform;
|
||||
export declare function encodeOutputFile(message: OutputFile, bb: ByteBuffer): void;
|
||||
export declare function decodeOutputFile(buffer: ByteBuffer): OutputFile;
|
||||
export declare function encodeTransformResponse(message: TransformResponse, bb: ByteBuffer): void;
|
||||
export declare function decodeTransformResponse(buffer: ByteBuffer): TransformResponse;
|
||||
export declare function encodeLocation(message: Location, bb: ByteBuffer): void;
|
||||
export declare function decodeLocation(buffer: ByteBuffer): Location;
|
||||
export declare function encodeMessageData(message: MessageData, bb: ByteBuffer): void;
|
||||
export declare function decodeMessageData(buffer: ByteBuffer): MessageData;
|
||||
export declare function encodeMessage(message: Message, bb: ByteBuffer): void;
|
||||
export declare function decodeMessage(buffer: ByteBuffer): Message;
|
||||
export declare function encodeLog(message: Log, bb: ByteBuffer): void;
|
||||
export declare function decodeLog(buffer: ByteBuffer): Log;
|
||||
@@ -11,7 +11,6 @@ pub const Bundler = struct {
|
||||
|
||||
pub fn init(options: options.TransformOptions, allocator: *std.mem.Allocator) Bundler {
|
||||
var log = logger.Log.init(allocator);
|
||||
|
||||
return Bundler{
|
||||
.options = options,
|
||||
.allocator = allocator,
|
||||
|
||||
9
src/cache.zig
Normal file
9
src/cache.zig
Normal file
@@ -0,0 +1,9 @@
|
||||
pub const Cache = struct {
|
||||
pub const Fs = struct {};
|
||||
|
||||
pub const Css = struct {};
|
||||
|
||||
pub const JavaScript = struct {};
|
||||
|
||||
pub const Json = struct {};
|
||||
};
|
||||
496
src/fs.zig
496
src/fs.zig
@@ -4,6 +4,7 @@ usingnamespace @import("global.zig");
|
||||
|
||||
const alloc = @import("alloc.zig");
|
||||
const expect = std.testing.expect;
|
||||
const Mutex = std.Thread.Mutex;
|
||||
|
||||
// pub const FilesystemImplementation = @import("fs_impl.zig");
|
||||
|
||||
@@ -16,13 +17,134 @@ pub const Stat = packed struct {
|
||||
kind: FileSystemEntry.Kind,
|
||||
};
|
||||
|
||||
threadlocal var scratch_lookup_buffer = [_]u8{0} ** 255;
|
||||
|
||||
pub const FileSystem = struct {
|
||||
// This maps paths relative to absolute_working_dir to the structure of arrays of paths
|
||||
stats: std.StringHashMap(Stat) = undefined,
|
||||
entries: std.ArrayList(FileSystemEntry),
|
||||
allocator: *std.mem.Allocator,
|
||||
top_level_dir = "/",
|
||||
fs: Implementation,
|
||||
|
||||
absolute_working_dir = "/",
|
||||
implementation: anytype = undefined,
|
||||
pub const Error = error{
|
||||
ENOENT,
|
||||
EACCESS,
|
||||
INVALID_NAME,
|
||||
ENOTDIR,
|
||||
};
|
||||
|
||||
pub const DirEntry = struct {
|
||||
pub const EntryMap = std.StringArrayHashMap(*Entry);
|
||||
dir: string,
|
||||
data: EntryMap,
|
||||
|
||||
pub fn empty(dir: string, allocator: std.mem.Allocator) DirEntry {
|
||||
return DirEntry{ .dir = dir, .data = EntryMap.init(allocator) };
|
||||
}
|
||||
|
||||
pub fn init(dir: string, allocator: std.mem.Allocator) DirEntry {
|
||||
return DirEntry{ .dir = dir, .data = EntryMap.init(allocator) };
|
||||
}
|
||||
|
||||
pub const Err = struct {
|
||||
original_error: anyerror,
|
||||
canonical_error: anyerror,
|
||||
};
|
||||
|
||||
pub fn init(dir: string, allocator: *std.mem.Allocator) DirEntry {
|
||||
return DirEntry{
|
||||
.dir = dir,
|
||||
.data = std.StringArrayHashMap(*Entry).init(allocator),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(d: *DirEntry) void {
|
||||
d.data.allocator.free(d.dir);
|
||||
|
||||
for (d.data.items()) |item| {
|
||||
item.value.deinit(d.data.allocator);
|
||||
}
|
||||
d.data.deinit();
|
||||
}
|
||||
|
||||
pub fn get(entry: *DirEntry, _query: string) ?Entry.Lookup {
|
||||
if (_query.len == 0) return null;
|
||||
|
||||
var end: usize = 0;
|
||||
std.debug.assert(scratch_lookup_buffer.len >= _query.len);
|
||||
for (_query) |c, i| {
|
||||
scratch_lookup_buffer[i] = std.ascii.toLower(c);
|
||||
end = i;
|
||||
}
|
||||
const query = scratch_lookup_buffer[0 .. end + 1];
|
||||
const result = entry.data.get(query) orelse return null;
|
||||
if (!strings.eql(dir.base, query)) {
|
||||
return Entry.Lookup{ .entry = result, .different_case = Entry.Lookup.DifferentCase{
|
||||
.dir = entry.dir,
|
||||
.query = _query,
|
||||
.actual = result.base,
|
||||
} };
|
||||
}
|
||||
|
||||
return Entry.Lookup{ .entry = entry };
|
||||
}
|
||||
};
|
||||
|
||||
pub const Entry = struct {
|
||||
cache: Cache = Cache{},
|
||||
dir: string,
|
||||
base: string,
|
||||
mutex: Mutex,
|
||||
need_stat: bool = true,
|
||||
|
||||
pub const Lookup = struct {
|
||||
entry: *Entry,
|
||||
different_case: ?DifferentCase,
|
||||
|
||||
pub const DifferentCase = struct {
|
||||
dir: string,
|
||||
query: string,
|
||||
actual: string,
|
||||
};
|
||||
};
|
||||
|
||||
pub fn deinit(e: *Entry, allocator: *std.mem.Allocator) void {
|
||||
allocator.free(e.base);
|
||||
allocator.free(e.dir);
|
||||
allocator.free(e.cache.kind);
|
||||
allocator.destroy(e);
|
||||
}
|
||||
|
||||
pub const Cache = struct {
|
||||
symlink: string = "",
|
||||
kind: Kind,
|
||||
};
|
||||
|
||||
pub const Kind = enum {
|
||||
dir,
|
||||
file,
|
||||
};
|
||||
|
||||
pub fn kind(entry: *Entry, fs: *Implementation) Kind {
|
||||
const held = entry.mutex.acquire();
|
||||
defer held.release();
|
||||
if (entry.need_stat) {
|
||||
entry.need_stat = false;
|
||||
entry.cache = fs.kind(entry.dir, entry.base);
|
||||
}
|
||||
return entry.cache.kind;
|
||||
}
|
||||
|
||||
pub fn symlink(entry: *Entry, fs: *Implementation) string {
|
||||
const held = entry.mutex.acquire();
|
||||
defer held.release();
|
||||
if (entry.need_stat) {
|
||||
entry.need_stat = false;
|
||||
entry.cache = fs.kind(entry.dir, entry.base);
|
||||
}
|
||||
return entry.cache.symlink;
|
||||
}
|
||||
};
|
||||
|
||||
// pub fn statBatch(fs: *FileSystemEntry, paths: []string) ![]?Stat {
|
||||
|
||||
@@ -37,30 +159,365 @@ pub const FileSystem = struct {
|
||||
|
||||
// }
|
||||
|
||||
pub fn Implementation(comptime Context: type) type {
|
||||
return struct {
|
||||
context: *Context,
|
||||
pub const RealFS = struct {
|
||||
entries_mutex: Mutex = Mutex{},
|
||||
entries: std.StringHashMap(EntriesOption),
|
||||
allocator: *std.mem.Allocator,
|
||||
do_not_cache_entries: bool = false,
|
||||
limiter: Limiter,
|
||||
watcher: ?std.StringHashMap(WatchData) = null,
|
||||
watcher_mutex: Mutex = Mutex{},
|
||||
|
||||
pub fn statBatch(context: *Context, path: string) ![]?Stat {
|
||||
return try context.statBatch(path);
|
||||
pub const ModKey = struct {
|
||||
inode: std.fs.File.INode = 0,
|
||||
size: u64 = 0,
|
||||
mtime: i128 = 0,
|
||||
mode: std.fs.File.Mode = 0,
|
||||
|
||||
pub const Error = error{
|
||||
Unusable,
|
||||
};
|
||||
pub fn generate(fs: *RealFS, path: string) anyerror!ModKey {
|
||||
var file = try std.fs.openFileAbsolute(path, std.fs.File.OpenFlags{ .read = true });
|
||||
defer file.close();
|
||||
const stat = try file.stat();
|
||||
|
||||
const seconds = stat.mtime / std.time.ns_per_s;
|
||||
|
||||
// We can't detect changes if the file system zeros out the modification time
|
||||
if (seconds == 0 and std.time.ns_per_s == 0) {
|
||||
return Error.Unusable;
|
||||
}
|
||||
|
||||
// Don't generate a modification key if the file is too new
|
||||
const now = std.time.nanoTimestamp();
|
||||
const now_seconds = now / std.time.ns_per_s;
|
||||
if (seconds > seconds or (seconds == now_seconds and stat.mtime > now)) {
|
||||
return Error.Unusable;
|
||||
}
|
||||
|
||||
return ModKey{
|
||||
.inode = stat.inode,
|
||||
.size = stat.size,
|
||||
.mtime = stat.mtime,
|
||||
.mode = stat.mode,
|
||||
// .uid = stat.
|
||||
};
|
||||
}
|
||||
pub const SafetyGap = 3;
|
||||
};
|
||||
|
||||
fn modKeyError(fs: *RealFS, path: string, err: anyerror) !void {
|
||||
if (fs.watcher) |watcher| {
|
||||
const hold = watch_data.watch_mutex.acquire();
|
||||
defer hold.release();
|
||||
var state = WatchData.State.file_missing;
|
||||
|
||||
switch (err) {
|
||||
ModKey.Error.Unusable => {
|
||||
state = WatchData.State.file_unusable_mod_key;
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
var entry = try watcher.getOrPutValue(path, WatchData{ .state = state });
|
||||
entry.value.state = state;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
pub fn modKey(fs: *RealFS, path: string) !ModKey {
|
||||
fs.limiter.before();
|
||||
defer fs.limiter.after();
|
||||
|
||||
const key = ModKey.generate(fs, path) catch |err| return fs.modKeyError(path, err);
|
||||
if (fs.watcher) |watcher| {
|
||||
const hold = fs.watcher_mutex.acquire();
|
||||
defer hold.release();
|
||||
|
||||
var entry = try watcher.getOrPutValue(path, WatchData{ .state = .file_has_mod_key, .mod_key = key });
|
||||
entry.value.mod_key = key;
|
||||
}
|
||||
|
||||
pub fn stat(context: *Context, path: string) !?Stat {
|
||||
return try context.stat(path);
|
||||
return key;
|
||||
}
|
||||
|
||||
pub const WatchData = struct {
|
||||
dir_entries: []string = &([_]string{}),
|
||||
file_contents: string = "",
|
||||
mod_key: ModKey = ModKey{},
|
||||
watch_mutex: Mutex = Mutex{},
|
||||
state: State = State.none,
|
||||
|
||||
pub const State = enum {
|
||||
none,
|
||||
dir_has_entries,
|
||||
dir_missing,
|
||||
file_has_mod_key,
|
||||
file_need_mod_key,
|
||||
file_missing,
|
||||
file_unusable_mod_key,
|
||||
};
|
||||
};
|
||||
|
||||
pub const EntriesOption = union(Tag) {
|
||||
entries: DirEntry,
|
||||
err: DirEntry.Err,
|
||||
|
||||
pub const Tag = enum {
|
||||
entries,
|
||||
err,
|
||||
};
|
||||
};
|
||||
|
||||
// Limit the number of files open simultaneously to avoid ulimit issues
|
||||
pub const Limiter = struct {
|
||||
chan: std.event.Channel(bool),
|
||||
|
||||
pub fn init(allocator: *std.mem.Allocator) !Limiter {
|
||||
var limiter = Limiter{ .chan = std.event.Channel(bool) };
|
||||
var buf = try allocator.create(bool, 32);
|
||||
limiter.chan.init(buf);
|
||||
|
||||
return limiter;
|
||||
}
|
||||
|
||||
pub fn readFile(context: *Context, path: string) !?File {
|
||||
return try context.readFile(path);
|
||||
// This will block if the number of open files is already at the limit
|
||||
pub fn before(limiter: *Limiter) void {
|
||||
limiter.chan.put(false);
|
||||
}
|
||||
|
||||
pub fn readDir(context: *Context, path: string) []string {
|
||||
return context.readdir(path);
|
||||
pub fn after(limiter: *Limiter) void {
|
||||
_ = await limiter.chan.get();
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub const FileNotFound = struct {};
|
||||
fn readdir(fs: *RealFS, dir: string) !DirEntry {
|
||||
fs.limiter.before();
|
||||
defer fs.limiter.after();
|
||||
|
||||
var handle = try std.fs.openDirAbsolute(dir, std.fs.Dir.OpenDirOptions{ .iterate = true, .access_sub_paths = true });
|
||||
defer handle.close();
|
||||
|
||||
var iter: std.fs.Dir.Iterator = handle.iterate();
|
||||
var dir = DirEntry{ .data = DirEntry.EntryMap.init(fs.allocator) };
|
||||
errdefer dir.deinit();
|
||||
while (try iter.next()) |_entry| {
|
||||
const entry: std.fs.Dir.Entry = _entry;
|
||||
var kind: Entry.Kind = undefined;
|
||||
switch (entry.kind) {
|
||||
Directory => {
|
||||
kind = Entry.Kind.dir;
|
||||
},
|
||||
SymLink => {
|
||||
// This might be wrong!
|
||||
kind = Entry.Kind.file;
|
||||
},
|
||||
File => {
|
||||
kind = Entry.Kind.file;
|
||||
},
|
||||
else => {
|
||||
continue;
|
||||
},
|
||||
}
|
||||
|
||||
// entry.name only lives for the duration of the iteration
|
||||
var name = try fs.allocator.alloc(u8, entry.name.len);
|
||||
for (entry.name) |c, i| {
|
||||
name[i] = std.ascii.toLower(c);
|
||||
}
|
||||
try dir.data.put(name, Entry{
|
||||
.base = name,
|
||||
.dir = dir,
|
||||
.mutex = Mutex{},
|
||||
// Call "stat" lazily for performance. The "@material-ui/icons" package
|
||||
// contains a directory with over 11,000 entries in it and running "stat"
|
||||
// for each entry was a big performance issue for that package.
|
||||
.need_stat = true,
|
||||
.cache = Entry.Cache{
|
||||
.symlink = if (entry.kind == std.fs.Dir.Entry.Kind.SymLink) (try fs.allocator.dupe(u8, name)) else "",
|
||||
.kind = kind,
|
||||
},
|
||||
});
|
||||
}
|
||||
// Copy at the bottom here so in the event of an error, we don't deinit the dir string.
|
||||
dir.dir = dir;
|
||||
return dir;
|
||||
}
|
||||
|
||||
fn readDirectoryError(fs: *RealFS, dir: string, err: anyerror) !void {
|
||||
if (fs.watcher) |watcher| {
|
||||
var hold = fs.watcher_mutex.acquire();
|
||||
defer hold.release();
|
||||
try watcher.put(dir, WatchData{ .state = .dir_missing });
|
||||
}
|
||||
|
||||
if (!fs.do_not_cache_entries) {
|
||||
var hold = fs.entries_mutex.acquire();
|
||||
defer hold.release();
|
||||
|
||||
try fs.entries.put(dir, EntriesOption{
|
||||
.err = DirEntry.Err{ .original_err = err, .canonical_err = err },
|
||||
});
|
||||
}
|
||||
}
|
||||
pub fn readDirectory(fs: *RealFS, dir: string) !EntriesOption {
|
||||
if (!fs.do_not_cache_entries) {
|
||||
var hold = fs.entries_mutex.acquire();
|
||||
defer hold.release();
|
||||
|
||||
// First, check the cache
|
||||
if (fs.entries.get(dir)) |dir| {
|
||||
return EntriesOption{ .entries = dir };
|
||||
}
|
||||
}
|
||||
|
||||
// Cache miss: read the directory entries
|
||||
const entries = fs.readdir(dir) catch |err| return (try fs.readDirectoryError(dir, err));
|
||||
|
||||
if (fs.watcher) |watcher| {
|
||||
var hold = fs.watcher_mutex.acquire();
|
||||
defer hold.release();
|
||||
var _entries = entries.data.items();
|
||||
const names = try fs.allocator.alloc([]const u8, _entries.len);
|
||||
for (_entries) |entry, i| {
|
||||
names[i] = try fs.allocator.dupe(u8, entry.key);
|
||||
}
|
||||
strings.sortAsc(names);
|
||||
|
||||
try watcher.put(
|
||||
try fs.allocator.dupe(u8, dir),
|
||||
WatchData{ .dir_entries = names, .state = .dir_has_entries },
|
||||
);
|
||||
}
|
||||
|
||||
if (!fs.do_not_cache_entries) {
|
||||
var hold = fs.entries_mutex.acquire();
|
||||
defer hold.release();
|
||||
|
||||
try fs.entries.put(dir, EntriesOption{
|
||||
.err = DirEntry.Err{ .original_err = err, .canonical_err = err },
|
||||
});
|
||||
}
|
||||
|
||||
return entries;
|
||||
}
|
||||
|
||||
fn readFileError(fs: *RealFS, path: string, err: anyerror) !void {
|
||||
if (fs.watcher) |watcher| {
|
||||
var hold = fs.watcher_mutex.acquire();
|
||||
defer hold.release();
|
||||
var res = try watcher.getOrPutValue(path, WatchData{ .state = .file_missing });
|
||||
res.value.state = .file_missing;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
pub fn readFile(fs: *RealFS, path: string) !File {
|
||||
fs.limiter.before();
|
||||
defer fs.limiter.after();
|
||||
|
||||
const file: std.fs.File = std.fs.openFileAbsolute(path, std.fs.File.OpenFlags{ .read = true, .write = false }) catch |err| return fs.readFileError(path, err);
|
||||
defer file.close();
|
||||
|
||||
// return self.readFileAllocOptions(allocator, file_path, max_bytes, null, @alignOf(u8), null);
|
||||
// TODO: this causes an extra call to .stat, do it manually and cache the results ourself.
|
||||
const size = try file.getEndPos() catch |err| return fs.readFileError(path, err);
|
||||
const file_contents: []u8 = file.readToEndAllocOptions(fs.allocator, size, size, @alignOf(u8), null) catch |err| return fs.readFileError(path, err);
|
||||
|
||||
if (fs.watcher) |watcher| {
|
||||
var hold = fs.watcher_mutex.acquire();
|
||||
defer hold.release();
|
||||
var res = try watcher.getOrPutValue(path, WatchData{});
|
||||
res.value.state = .file_need_mod_key;
|
||||
res.value.file_contents = file_contents;
|
||||
}
|
||||
|
||||
return File{ .path = Path.init(path), .contents = file_contents };
|
||||
}
|
||||
|
||||
pub fn kind(fs: *RealFS, _dir: string, base: string) !Entry.Cache {
|
||||
var dir = _dir;
|
||||
var combo = [2]string{ dir, base };
|
||||
var entry_path = try std.fs.path.join(fs.allocator, &combo);
|
||||
defer fs.allocator.free(entry_path);
|
||||
|
||||
fs.limiter.before();
|
||||
defer fs.limiter.after();
|
||||
|
||||
const file = try std.fs.openFileAbsolute(entry_path, .{ .read = true, .write = false });
|
||||
defer file.close();
|
||||
const stat = try file.stat();
|
||||
var kind = stat.kind;
|
||||
var cache = Entry.Cache{ .kind = Entry.Kind.file, .symlink = "" };
|
||||
|
||||
if (kind == .Symlink) {
|
||||
// windows has a max filepath of 255 chars
|
||||
// we give it a little longer for other platforms
|
||||
var out_buffer = [_]u8{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
|
||||
var out_slice = &out_buffer;
|
||||
var symlink = entry_path;
|
||||
var links_walked: u8 = 0;
|
||||
|
||||
while (links_walked < 255) : (links_walked += 1) {
|
||||
var link = try std.os.readLink(symlink, out_buffer);
|
||||
|
||||
if (!std.fs.path.isAbsolute(link)) {
|
||||
combo[0] = dir;
|
||||
combo[1] = link;
|
||||
if (link.ptr != out_slice.ptr) {
|
||||
fs.allocator.free(link);
|
||||
}
|
||||
link = std.fs.path.join(fs.allocator, &combo) catch return cache;
|
||||
}
|
||||
// TODO: do we need to clean the path?
|
||||
symlink = link;
|
||||
|
||||
const file2 = std.fs.openFileAbsolute(symlink, File.OpenFlags{ .read = true, .write = false }) catch return cache;
|
||||
defer file2.close();
|
||||
|
||||
const stat2 = file2.stat() catch return cache;
|
||||
|
||||
// Re-run "lstat" on the symlink target
|
||||
mode = stat2.mode;
|
||||
if (mode == .Symlink) {
|
||||
break;
|
||||
}
|
||||
dir = std.fs.path.dirname(link) orelse return cache;
|
||||
}
|
||||
|
||||
if (links_walked > 255) {
|
||||
return cache;
|
||||
}
|
||||
}
|
||||
|
||||
if (mode == .Directory) {
|
||||
kind = Entry.Kind.dir;
|
||||
} else {
|
||||
kind = Entry.Kind.file;
|
||||
}
|
||||
cache.kind = kind;
|
||||
cache.symlink = symlink;
|
||||
|
||||
return cache;
|
||||
}
|
||||
|
||||
// // Stores the file entries for directories we've listed before
|
||||
// entries_mutex: std.Mutex
|
||||
// entries map[string]entriesOrErr
|
||||
|
||||
// // If true, do not use the "entries" cache
|
||||
// doNotCacheEntries bool
|
||||
};
|
||||
|
||||
pub const Implementation = comptime {
|
||||
switch (build_target) {
|
||||
.wasi, .native => RealFS,
|
||||
.wasm => WasmFS,
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
pub const FileSystemEntry = union(FileSystemEntry.Kind) {
|
||||
file: File,
|
||||
@@ -145,6 +602,11 @@ pub const Path = struct {
|
||||
namespace: string,
|
||||
name: PathName,
|
||||
|
||||
// TODO:
|
||||
pub fn normalize(str: string) string {
|
||||
return str;
|
||||
}
|
||||
|
||||
pub fn init(text: string) Path {
|
||||
return Path{ .pretty = text, .text = text, .namespace = "file", .name = PathName.init(text) };
|
||||
}
|
||||
|
||||
@@ -1,7 +1,20 @@
|
||||
const std = @import("std");
|
||||
pub usingnamespace @import("strings.zig");
|
||||
|
||||
pub const isWasm = comptime std.Target.current.isWasm();
|
||||
pub const BuildTarget = enum { native, wasm, wasi };
|
||||
pub const build_target: BuildTarget = comptime {
|
||||
if (std.Target.current.isWasm() and std.Target.current.getOsTag() == .wasi) {
|
||||
return BuildTarget.wasi;
|
||||
} else if (std.Target.current.isWasm()) {
|
||||
return BuildTarget.wasm;
|
||||
} else {
|
||||
return BuildTarget.native;
|
||||
}
|
||||
};
|
||||
|
||||
pub const isWasm = build_target == .wasm;
|
||||
pub const isNative = build_target == .native;
|
||||
pub const isWasi = build_target == .wasi;
|
||||
|
||||
pub const Output = struct {
|
||||
var source: *Source = undefined;
|
||||
|
||||
@@ -1380,6 +1380,35 @@ pub const Expr = struct {
|
||||
loc: logger.Loc,
|
||||
data: Data,
|
||||
|
||||
pub const Query = struct { expr: Expr, loc: logger.Loc };
|
||||
|
||||
pub fn getProperty(expr: *Expr, name: string) ?Query {
|
||||
const obj: *E.Object = expr.data.e_object orelse return null;
|
||||
|
||||
for (obj.properties) |prop| {
|
||||
const value = prop.value orelse continue;
|
||||
const key = prop.key orelse continue;
|
||||
const key_str: *E.String = key.data.e_string orelse continue;
|
||||
if (key_str.eql(string, name)) {
|
||||
return Query{ .expr = value, .loc = key.loc };
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
pub fn getString(expr: *Expr, allocator: *std.mem.Allocator) !?string {
|
||||
const key_str: *E.String = expr.data.e_string orelse return null;
|
||||
|
||||
return if (key_str.isUTF8()) key_str.value else key_str.string(allocator);
|
||||
}
|
||||
|
||||
pub fn getBool(expr: *Expr, allocator: *std.mem.Allocator) ?bool {
|
||||
const obj: *E.Boolean = expr.data.e_boolean orelse return null;
|
||||
|
||||
return obj.value;
|
||||
}
|
||||
|
||||
pub const EFlags = enum { none, ts_decorator };
|
||||
|
||||
const Serializable = struct {
|
||||
|
||||
@@ -7,6 +7,51 @@ usingnamespace @import("global.zig");
|
||||
|
||||
const assert = std.debug.assert;
|
||||
|
||||
pub const Platform = enum {
|
||||
node,
|
||||
browser,
|
||||
neutral,
|
||||
|
||||
const MAIN_FIELD_NAMES = [_]string{ "browser", "module", "main" };
|
||||
pub const DefaultMainFields: std.EnumArray(Platform, []string) = comptime {
|
||||
var array = std.EnumArray(Platform, []string);
|
||||
|
||||
// Note that this means if a package specifies "module" and "main", the ES6
|
||||
// module will not be selected. This means tree shaking will not work when
|
||||
// targeting node environments.
|
||||
//
|
||||
// This is unfortunately necessary for compatibility. Some packages
|
||||
// incorrectly treat the "module" field as "code for the browser". It
|
||||
// actually means "code for ES6 environments" which includes both node
|
||||
// and the browser.
|
||||
//
|
||||
// For example, the package "@firebase/app" prints a warning on startup about
|
||||
// the bundler incorrectly using code meant for the browser if the bundler
|
||||
// selects the "module" field instead of the "main" field.
|
||||
//
|
||||
// If you want to enable tree shaking when targeting node, you will have to
|
||||
// configure the main fields to be "module" and then "main". Keep in mind
|
||||
// that some packages may break if you do this.
|
||||
array.set(Platform.node, &([_]string{ MAIN_FIELD_NAMES[1], MAIN_FIELD_NAMES[2] }));
|
||||
|
||||
// Note that this means if a package specifies "main", "module", and
|
||||
// "browser" then "browser" will win out over "module". This is the
|
||||
// same behavior as webpack: https://github.com/webpack/webpack/issues/4674.
|
||||
//
|
||||
// This is deliberate because the presence of the "browser" field is a
|
||||
// good signal that the "module" field may have non-browser stuff in it,
|
||||
// which will crash or fail to be bundled when targeting the browser.
|
||||
array.set(Platform.browser, &([_]string{ MAIN_FIELD_NAMES[0], MAIN_FIELD_NAMES[1], MAIN_FIELD_NAMES[2] }));
|
||||
|
||||
// The neutral platform is for people that don't want esbuild to try to
|
||||
// pick good defaults for their platform. In that case, the list of main
|
||||
// fields is empty by default. You must explicitly configure it yourself.
|
||||
array.set(Platform.neutral, &([_]string{}));
|
||||
|
||||
return array;
|
||||
};
|
||||
};
|
||||
|
||||
pub const Loader = enum {
|
||||
jsx,
|
||||
js,
|
||||
@@ -28,6 +73,17 @@ pub const defaultLoaders = std.ComptimeStringMap(Loader, .{
|
||||
});
|
||||
|
||||
pub const JSX = struct {
|
||||
pub const Pragma = struct {
|
||||
factory: string = "React.createElement",
|
||||
fragment: string = "React.Fragment",
|
||||
runtime: JSX.Runtime = JSX.Runtime.automatic,
|
||||
|
||||
/// Facilitates automatic JSX importing
|
||||
/// Set on a per file basis like this:
|
||||
/// /** @jsxImportSource @emotion/core */
|
||||
import_source: string = "react",
|
||||
};
|
||||
|
||||
parse: bool = true,
|
||||
factory: string = "createElement",
|
||||
fragment: string = "Fragment",
|
||||
|
||||
82
src/resolver/resolve_path.zig
Normal file
82
src/resolver/resolve_path.zig
Normal file
@@ -0,0 +1,82 @@
|
||||
// https://github.com/MasterQ32/ftz/blob/3183b582211f8e38c1c3363c56753026ca45c11f/src/main.zig#L431-L509
|
||||
// Thanks, Felix! We should get this into std perhaps.
|
||||
|
||||
const std = @import("std");
|
||||
|
||||
/// Resolves a unix-like path and removes all "." and ".." from it. Will not escape the root and can be used to sanitize inputs.
|
||||
pub fn resolvePath(buffer: []u8, src_path: []const u8) error{BufferTooSmall}![]u8 {
|
||||
if (buffer.len == 0)
|
||||
return error.BufferTooSmall;
|
||||
if (src_path.len == 0) {
|
||||
buffer[0] = '/';
|
||||
return buffer[0..1];
|
||||
}
|
||||
|
||||
var end: usize = 0;
|
||||
buffer[0] = '/';
|
||||
|
||||
var iter = std.mem.tokenize(src_path, "/");
|
||||
while (iter.next()) |segment| {
|
||||
if (std.mem.eql(u8, segment, ".")) {
|
||||
continue;
|
||||
} else if (std.mem.eql(u8, segment, "..")) {
|
||||
while (true) {
|
||||
if (end == 0)
|
||||
break;
|
||||
if (buffer[end] == '/') {
|
||||
break;
|
||||
}
|
||||
end -= 1;
|
||||
}
|
||||
} else {
|
||||
if (end + segment.len + 1 > buffer.len)
|
||||
return error.BufferTooSmall;
|
||||
|
||||
const start = end;
|
||||
buffer[end] = '/';
|
||||
end += segment.len + 1;
|
||||
std.mem.copy(u8, buffer[start + 1 .. end], segment);
|
||||
}
|
||||
}
|
||||
|
||||
return if (end == 0)
|
||||
buffer[0 .. end + 1]
|
||||
else
|
||||
buffer[0..end];
|
||||
}
|
||||
|
||||
fn testResolve(expected: []const u8, input: []const u8) !void {
|
||||
var buffer: [1024]u8 = undefined;
|
||||
|
||||
const actual = try resolvePath(&buffer, input);
|
||||
std.testing.expectEqualStrings(expected, actual);
|
||||
}
|
||||
|
||||
test "resolvePath" {
|
||||
try testResolve("/", "");
|
||||
try testResolve("/", "/");
|
||||
try testResolve("/", "////////////");
|
||||
|
||||
try testResolve("/a", "a");
|
||||
try testResolve("/a", "/a");
|
||||
try testResolve("/a", "////////////a");
|
||||
try testResolve("/a", "////////////a///");
|
||||
|
||||
try testResolve("/a/b/c/d", "/a/b/c/d");
|
||||
|
||||
try testResolve("/a/b/d", "/a/b/c/../d");
|
||||
|
||||
try testResolve("/", "..");
|
||||
try testResolve("/", "/..");
|
||||
try testResolve("/", "/../../../..");
|
||||
try testResolve("/a/b/c", "a/b/c/");
|
||||
|
||||
try testResolve("/new/date.txt", "/new/../../new/date.txt");
|
||||
}
|
||||
|
||||
test "resolvePath overflow" {
|
||||
var buf: [1]u8 = undefined;
|
||||
|
||||
std.testing.expectEqualStrings("/", try resolvePath(&buf, "/"));
|
||||
std.testing.expectError(error.BufferTooSmall, resolvePath(&buf, "a")); // will resolve to "/a"
|
||||
}
|
||||
219
src/resolver/resolver.zig
Normal file
219
src/resolver/resolver.zig
Normal file
@@ -0,0 +1,219 @@
|
||||
usingnamespace @import("../global.zig");
|
||||
const ast = @import("../ast.zig");
|
||||
const logger = @import("../logger.zig");
|
||||
const options = @import("../options.zig");
|
||||
const fs = @import("../fs.zig");
|
||||
const std = @import("std");
|
||||
|
||||
pub const SideEffectsData = struct {
|
||||
source: *logger.Source,
|
||||
range: logger.Range,
|
||||
|
||||
// If true, "sideEffects" was an array. If false, "sideEffects" was false.
|
||||
is_side_effects_array_in_json: bool = false,
|
||||
};
|
||||
|
||||
pub const DirInfo = struct {
|
||||
// These objects are immutable, so we can just point to the parent directory
|
||||
// and avoid having to lock the cache again
|
||||
parent: ?*DirInfo = null,
|
||||
|
||||
// A pointer to the enclosing dirInfo with a valid "browser" field in
|
||||
// package.json. We need this to remap paths after they have been resolved.
|
||||
enclosing_browser_scope: *?DirInfo = null,
|
||||
|
||||
abs_path: string,
|
||||
entries: fs.FileSystem.DirEntry,
|
||||
has_node_modules: bool = false, // Is there a "node_modules" subdirectory?
|
||||
package_json: ?*PackageJSON, // Is there a "package.json" file?
|
||||
ts_config_json: ?*TSConfigJSON, // Is there a "tsconfig.json" file in this directory or a parent directory?
|
||||
abs_real_path: string = "", // If non-empty, this is the real absolute path resolving any symlinks
|
||||
|
||||
};
|
||||
|
||||
pub const Resolver = struct {
|
||||
opts: options.TransformOptions,
|
||||
fs: *fs.FileSystem,
|
||||
log: *logger.Log,
|
||||
allocator: *std.mem.Allocator,
|
||||
|
||||
debug_logs: ?DebugLogs = null,
|
||||
|
||||
// These are sets that represent various conditions for the "exports" field
|
||||
// in package.json.
|
||||
esm_conditions_default: std.StringHashMap(bool),
|
||||
esm_conditions_import: std.StringHashMap(bool),
|
||||
esm_conditions_require: std.StringHashMap(bool),
|
||||
|
||||
// A special filtered import order for CSS "@import" imports.
|
||||
//
|
||||
// The "resolve extensions" setting determines the order of implicit
|
||||
// extensions to try when resolving imports with the extension omitted.
|
||||
// Sometimes people create a JavaScript/TypeScript file and a CSS file with
|
||||
// the same name when they create a component. At a high level, users expect
|
||||
// implicit extensions to resolve to the JS file when being imported from JS
|
||||
// and to resolve to the CSS file when being imported from CSS.
|
||||
//
|
||||
// Different bundlers handle this in different ways. Parcel handles this by
|
||||
// having the resolver prefer the same extension as the importing file in
|
||||
// front of the configured "resolve extensions" order. Webpack's "css-loader"
|
||||
// plugin just explicitly configures a special "resolve extensions" order
|
||||
// consisting of only ".css" for CSS files.
|
||||
//
|
||||
// It's unclear what behavior is best here. What we currently do is to create
|
||||
// a special filtered version of the configured "resolve extensions" order
|
||||
// for CSS files that filters out any extension that has been explicitly
|
||||
// configured with a non-CSS loader. This still gives users control over the
|
||||
// order but avoids the scenario where we match an import in a CSS file to a
|
||||
// JavaScript-related file. It's probably not perfect with plugins in the
|
||||
// picture but it's better than some alternatives and probably pretty good.
|
||||
// atImportExtensionOrder []string
|
||||
|
||||
// This mutex serves two purposes. First of all, it guards access to "dirCache"
|
||||
// which is potentially mutated during path resolution. But this mutex is also
|
||||
// necessary for performance. The "React admin" benchmark mysteriously runs
|
||||
// twice as fast when this mutex is locked around the whole resolve operation
|
||||
// instead of around individual accesses to "dirCache". For some reason,
|
||||
// reducing parallelism in the resolver helps the rest of the bundler go
|
||||
// faster. I'm not sure why this is but please don't change this unless you
|
||||
// do a lot of testing with various benchmarks and there aren't any regressions.
|
||||
mutex: std.Thread.Mutex,
|
||||
|
||||
// This cache maps a directory path to information about that directory and
|
||||
// all parent directories
|
||||
dir_cache: std.StringHashMap(?*DirInfo),
|
||||
|
||||
pub const DebugLogs = struct {
|
||||
what: string = "",
|
||||
indent: MutableString,
|
||||
notes: std.ArrayList(logger.Data),
|
||||
|
||||
pub fn init(allocator: *std.mem.Allocator) DebugLogs {
|
||||
return .{
|
||||
.indent = MutableString.init(allocator, 0),
|
||||
.notes = std.ArrayList(logger.Data).init(allocator),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(d: DebugLogs) void {
|
||||
var allocator = d.notes.allocator;
|
||||
d.notes.deinit();
|
||||
d.indent.deinit();
|
||||
}
|
||||
|
||||
pub fn increaseIndent(d: *DebugLogs) !void {
|
||||
try d.indent.append(" ");
|
||||
}
|
||||
|
||||
pub fn decreaseIndent(d: *DebugLogs) !void {
|
||||
d.indent.list.shrinkRetainingCapacity(d.indent.list.items.len - 1);
|
||||
}
|
||||
|
||||
pub fn addNote(d: *DebugLogs, _text: string) !void {
|
||||
var text = _text;
|
||||
const len = d.indent.len();
|
||||
if (len > 0) {
|
||||
text = try d.notes.allocator.alloc(u8, text.len + d.indent.len);
|
||||
std.mem.copy(u8, text, d.indent);
|
||||
std.mem.copy(u8, text[d.indent.len..text.len], _text);
|
||||
d.notes.allocator.free(_text);
|
||||
}
|
||||
|
||||
try d.notes.append(logger.rangeData(null, logger.Range.None, text));
|
||||
}
|
||||
};
|
||||
|
||||
pub const PathPair = struct {
|
||||
primary: logger.Path,
|
||||
secondary: ?logger.Path = null,
|
||||
};
|
||||
|
||||
pub const Result = struct {
|
||||
path_pair: PathPair,
|
||||
|
||||
jsx: options.JSX.Pragma = options.JSX.Pragma{},
|
||||
|
||||
// plugin_data: void
|
||||
};
|
||||
|
||||
pub fn resolve(r: *Resolver, source_dir: string, import_path: string, kind: ast.ImportKind) Result {}
|
||||
|
||||
fn dirInfoCached(r: *Resolver, path: string) !*DirInfo {
|
||||
// First, check the cache
|
||||
if (r.dir_cache.get(path)) |dir| {
|
||||
return dir;
|
||||
}
|
||||
|
||||
const info = try r.dirInfoUncached(path);
|
||||
|
||||
try r.dir_cache.put(path, info);
|
||||
}
|
||||
|
||||
fn dirInfoUncached(r: *Resolver, path: string) !?*DirInfo {
|
||||
const rfs: r.fs.RealFS = r.fs.fs;
|
||||
var parent: ?*DirInfo = null;
|
||||
const parent_dir = std.fs.path.dirname(path) orelse return null;
|
||||
if (!strings.eql(parent_dir, path)) {
|
||||
parent = r.dirInfoCached(parent_dir);
|
||||
}
|
||||
|
||||
// List the directories
|
||||
var _entries = try rfs.readDirectory(path);
|
||||
var entries: @TypeOf(_entries.entries) = undefined;
|
||||
if (std.meta.activeTag(_entries) == .err) {
|
||||
// Just pretend this directory is empty if we can't access it. This is the
|
||||
// case on Unix for directories that only have the execute permission bit
|
||||
// set. It means we will just pass through the empty directory and
|
||||
// continue to check the directories above it, which is now node behaves.
|
||||
switch (_entries.err) {
|
||||
fs.FileSystem.Error.EACCESS => {
|
||||
entries = fs.FileSystem.DirEntry.empty(path, r.allocator);
|
||||
},
|
||||
|
||||
// Ignore "ENOTDIR" here so that calling "ReadDirectory" on a file behaves
|
||||
// as if there is nothing there at all instead of causing an error due to
|
||||
// the directory actually being a file. This is a workaround for situations
|
||||
// where people try to import from a path containing a file as a parent
|
||||
// directory. The "pnpm" package manager generates a faulty "NODE_PATH"
|
||||
// list which contains such paths and treating them as missing means we just
|
||||
// ignore them during path resolution.
|
||||
fs.FileSystem.Error.ENOENT,
|
||||
fs.FileSystem.Error.ENOTDIR,
|
||||
=> {},
|
||||
else => {
|
||||
const pretty = r.prettyPath(fs.Path{ .text = path, .namespace = "file" });
|
||||
r.log.addErrorFmt(
|
||||
null,
|
||||
logger.Loc{},
|
||||
r.allocator,
|
||||
"Cannot read directory \"{s}\": {s}",
|
||||
.{
|
||||
pretty,
|
||||
@errorName(err),
|
||||
},
|
||||
);
|
||||
return null;
|
||||
},
|
||||
}
|
||||
} else {
|
||||
entries = _entries.entries;
|
||||
}
|
||||
|
||||
var info = try r.allocator.create(DirInfo);
|
||||
info.* = DirInfo{
|
||||
.abs_path = path,
|
||||
.parent = parent_dir,
|
||||
.entries = entries,
|
||||
};
|
||||
|
||||
// A "node_modules" directory isn't allowed to directly contain another "node_modules" directory
|
||||
var base = std.fs.path.basename(path);
|
||||
if (!strings.eqlComptime(base, "node_modules")) {
|
||||
if (entries.get("node_modules")) |entry| {
|
||||
info.has_node_modules = entry.entry.kind(rfs) == .dir;
|
||||
}
|
||||
}
|
||||
|
||||
// Propagate the browser scope into child directories
|
||||
}
|
||||
};
|
||||
@@ -14,6 +14,10 @@ pub const MutableString = struct {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(str: *MutableString) void {
|
||||
str.list.deinit(str.allocator);
|
||||
}
|
||||
|
||||
pub fn growIfNeeded(self: *MutableString, amount: usize) !void {
|
||||
try self.list.ensureUnusedCapacity(self.allocator, amount);
|
||||
}
|
||||
|
||||
57
test.js
57
test.js
@@ -1,57 +0,0 @@
|
||||
import React from "react";
|
||||
|
||||
const foo = {
|
||||
object: {
|
||||
nested: `foo1`,
|
||||
},
|
||||
bar: 1,
|
||||
// React: React,
|
||||
};
|
||||
|
||||
const arrays = [1, 2, 3, "10", 200n, React.createElement("foo")];
|
||||
|
||||
function hi() {
|
||||
console.log("We need to go deeper.");
|
||||
function hey() {
|
||||
hi();
|
||||
}
|
||||
}
|
||||
|
||||
class Foo {
|
||||
get prop() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
set prop(v) {
|
||||
this._v = v;
|
||||
}
|
||||
|
||||
static staticInstance() {
|
||||
return "hi";
|
||||
}
|
||||
|
||||
static get prop() {
|
||||
return "yo";
|
||||
}
|
||||
|
||||
static set prop(v) {
|
||||
Foo.v = v;
|
||||
}
|
||||
|
||||
insance() {}
|
||||
insanceWithArgs(arg, arg2) {}
|
||||
insanceWithRestArgs(arg, arg2, ...arg3) {}
|
||||
}
|
||||
|
||||
try {
|
||||
console.log("HI");
|
||||
} catch (e) {
|
||||
console.log("HEY", e);
|
||||
}
|
||||
|
||||
if (true) {
|
||||
for (let i = 0; i < 100; i++) {
|
||||
console.log();
|
||||
}
|
||||
console.log("development!");
|
||||
}
|
||||
27
test.jsx
27
test.jsx
@@ -1,27 +0,0 @@
|
||||
var Button = () => {
|
||||
return <div className="button">Button!</div>;
|
||||
};
|
||||
|
||||
var Bar = () => {
|
||||
return (
|
||||
<div prop={1}>
|
||||
Plain text
|
||||
<div>
|
||||
← A child div
|
||||
<Button>Red</Button>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
var Triple = () => {
|
||||
return (
|
||||
<div prop={1}>
|
||||
Plain text
|
||||
<div>
|
||||
← A child div
|
||||
<Button>Red</Button>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
Reference in New Issue
Block a user