Generate differnet versions of Bundler, Resolver, and Caches at comptime based on whether we're serving over HTTP

Former-commit-id: e1a8852706
This commit is contained in:
Jarred Sumner
2021-06-04 16:06:38 -07:00
parent 3d827342a5
commit 65f4ea1e18
10 changed files with 2534 additions and 2467 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -11,203 +11,215 @@ const fs = @import("./fs.zig");
const sync = @import("sync.zig");
const Mutex = sync.Mutex;
pub const Cache = struct {
pub const Set = struct {
js: JavaScript,
fs: Fs,
json: Json,
pub fn NewCache(comptime cache_files: bool) type {
return struct {
pub const Set = struct {
js: JavaScript,
fs: Fs,
json: Json,
pub fn init(allocator: *std.mem.Allocator) Set {
return Set{
.js = JavaScript.init(allocator),
.fs = Fs{
.mutex = Mutex.init(),
.entries = std.StringHashMap(Fs.Entry).init(allocator),
},
.json = Json{
.mutex = Mutex.init(),
.entries = std.StringHashMap(*Json.Entry).init(allocator),
},
};
}
};
pub const Fs = struct {
mutex: Mutex,
entries: std.StringHashMap(Entry),
pub const Entry = struct {
contents: string,
fd: StoredFileDescriptorType = 0,
// Null means its not usable
mod_key: ?fs.FileSystem.Implementation.ModKey = null,
pub fn deinit(entry: *Entry, allocator: *std.mem.Allocator) void {
if (entry.contents.len > 0) {
allocator.free(entry.contents);
entry.contents = "";
}
pub fn init(allocator: *std.mem.Allocator) Set {
return Set{
.js = JavaScript.init(allocator),
.fs = Fs{
.mutex = Mutex.init(),
.entries = std.StringHashMap(Fs.Entry).init(allocator),
.shared_buffer = MutableString.init(allocator, 0) catch unreachable,
},
.json = Json{
.mutex = Mutex.init(),
.entries = std.StringHashMap(*Json.Entry).init(allocator),
},
};
}
};
pub const Fs = struct {
mutex: Mutex,
entries: std.StringHashMap(Entry),
shared_buffer: MutableString,
pub fn deinit(c: *Fs) void {
var iter = c.entries.iterator();
while (iter.next()) |entry| {
entry.value.deinit(c.entries.allocator);
pub const Entry = struct {
contents: string,
fd: StoredFileDescriptorType = 0,
// Null means its not usable
mod_key: ?fs.FileSystem.Implementation.ModKey = null,
pub fn deinit(entry: *Entry, allocator: *std.mem.Allocator) void {
if (entry.contents.len > 0) {
allocator.free(entry.contents);
entry.contents = "";
}
}
};
pub fn deinit(c: *Fs) void {
var iter = c.entries.iterator();
while (iter.next()) |entry| {
entry.value.deinit(c.entries.allocator);
}
c.entries.deinit();
}
c.entries.deinit();
}
pub fn readFile(c: *Fs, _fs: *fs.FileSystem, path: string, dirname_fd: StoredFileDescriptorType) !Entry {
var rfs = _fs.fs;
pub fn readFile(c: *Fs, _fs: *fs.FileSystem, path: string, dirname_fd: StoredFileDescriptorType, comptime use_shared_buffer: bool) !Entry {
var rfs = _fs.fs;
{
c.mutex.lock();
defer c.mutex.unlock();
if (c.entries.get(path)) |entry| {
if (cache_files) {
{
c.mutex.lock();
defer c.mutex.unlock();
if (c.entries.get(path)) |entry| {
return entry;
}
}
}
var file_handle: std.fs.File = undefined;
if (FeatureFlags.store_file_descriptors and dirname_fd > 0) {
file_handle = try std.fs.Dir.openFile(std.fs.Dir{ .fd = dirname_fd }, std.fs.path.basename(path), .{ .read = true });
} else {
file_handle = try std.fs.openFileAbsolute(path, .{ .read = true });
}
defer {
if (rfs.needToCloseFiles()) {
file_handle.close();
}
}
// If the file's modification key hasn't changed since it was cached, assume
// the contents of the file are also the same and skip reading the file.
var mod_key: ?fs.FileSystem.Implementation.ModKey = rfs.modKeyWithFile(path, file_handle) catch |err| handler: {
switch (err) {
error.FileNotFound, error.AccessDenied => {
return err;
},
else => {
if (isDebug) {
Output.printError("modkey error: {s}", .{@errorName(err)});
}
break :handler null;
},
}
};
var file: fs.File = undefined;
if (mod_key) |modk| {
file = rfs.readFileWithHandle(path, modk.size, file_handle, use_shared_buffer, &c.shared_buffer) catch |err| {
if (isDebug) {
Output.printError("{s}: readFile error -- {s}", .{ path, @errorName(err) });
}
return err;
};
} else {
file = rfs.readFileWithHandle(path, null, file_handle, use_shared_buffer, &c.shared_buffer) catch |err| {
if (isDebug) {
Output.printError("{s}: readFile error -- {s}", .{ path, @errorName(err) });
}
return err;
};
}
const entry = Entry{
.contents = file.contents,
.mod_key = mod_key,
.fd = if (FeatureFlags.store_file_descriptors) file_handle.handle else 0,
};
if (cache_files) {
c.mutex.lock();
defer c.mutex.unlock();
var res = c.entries.getOrPut(path) catch unreachable;
if (res.found_existing) {
res.entry.value.deinit(c.entries.allocator);
}
res.entry.value = entry;
return res.entry.value;
} else {
return entry;
}
}
};
var file_handle: std.fs.File = undefined;
if (FeatureFlags.store_file_descriptors and dirname_fd > 0) {
file_handle = try std.fs.Dir.openFile(std.fs.Dir{ .fd = dirname_fd }, std.fs.path.basename(path), .{ .read = true });
} else {
file_handle = try std.fs.openFileAbsolute(path, .{ .read = true });
}
defer {
if (rfs.needToCloseFiles()) {
file_handle.close();
}
}
// If the file's modification key hasn't changed since it was cached, assume
// the contents of the file are also the same and skip reading the file.
var mod_key: ?fs.FileSystem.Implementation.ModKey = rfs.modKeyWithFile(path, file_handle) catch |err| handler: {
switch (err) {
error.FileNotFound, error.AccessDenied => {
return err;
},
else => {
if (isDebug) {
Output.printError("modkey error: {s}", .{@errorName(err)});
}
break :handler null;
},
}
pub const Css = struct {
pub const Entry = struct {};
pub const Result = struct {
ok: bool,
value: void,
};
pub fn parse(cache: *@This(), log: *logger.Log, source: logger.Source) !Result {
Global.notimpl();
}
};
var file: fs.File = undefined;
if (mod_key) |modk| {
file = rfs.readFileWithHandle(path, modk.size, file_handle) catch |err| {
if (isDebug) {
Output.printError("{s}: readFile error -- {s}", .{ path, @errorName(err) });
}
return err;
pub const JavaScript = struct {
mutex: Mutex,
entries: std.StringHashMap(Result),
pub const Result = js_ast.Result;
pub fn init(allocator: *std.mem.Allocator) JavaScript {
return JavaScript{ .mutex = Mutex.init(), .entries = std.StringHashMap(Result).init(allocator) };
}
// For now, we're not going to cache JavaScript ASTs.
// It's probably only relevant when bundling for production.
pub fn parse(
cache: *@This(),
allocator: *std.mem.Allocator,
opts: js_parser.Parser.Options,
defines: *Define,
log: *logger.Log,
source: *const logger.Source,
) anyerror!?js_ast.Ast {
var temp_log = logger.Log.init(allocator);
defer temp_log.appendTo(log) catch {};
var parser = js_parser.Parser.init(opts, &temp_log, source, defines, allocator) catch |err| {
return null;
};
} else {
file = rfs.readFileWithHandle(path, null, file_handle) catch |err| {
if (isDebug) {
Output.printError("{s}: readFile error -- {s}", .{ path, @errorName(err) });
}
return err;
const result = try parser.parse();
return if (result.ok) result.ast else null;
}
};
pub const Json = struct {
pub const Entry = struct {
is_tsconfig: bool = false,
source: logger.Source,
expr: ?js_ast.Expr = null,
ok: bool = false,
// msgs: []logger.Msg,
};
mutex: Mutex,
entries: std.StringHashMap(*Entry),
pub fn init(allocator: *std.mem.Allocator) Json {
return Json{
.mutex = Mutex.init(),
.entries = std.StringHashMap(Entry).init(allocator),
};
}
const entry = Entry{
.contents = file.contents,
.mod_key = mod_key,
.fd = if (FeatureFlags.store_file_descriptors) file_handle.handle else 0,
};
c.mutex.lock();
defer c.mutex.unlock();
var res = c.entries.getOrPut(path) catch unreachable;
if (res.found_existing) {
res.entry.value.deinit(c.entries.allocator);
fn parse(cache: *@This(), log: *logger.Log, source: logger.Source, allocator: *std.mem.Allocator, is_tsconfig: bool, func: anytype) anyerror!?js_ast.Expr {
var temp_log = logger.Log.init(allocator);
defer {
temp_log.appendTo(log) catch {};
}
return func(&source, &temp_log, allocator) catch handler: {
break :handler null;
};
}
pub fn parseJSON(cache: *@This(), log: *logger.Log, source: logger.Source, allocator: *std.mem.Allocator) anyerror!?js_ast.Expr {
return try parse(cache, log, source, allocator, false, json_parser.ParseJSON);
}
res.entry.value = entry;
return res.entry.value;
}
};
pub const Css = struct {
pub const Entry = struct {};
pub const Result = struct {
ok: bool,
value: void,
};
pub fn parse(cache: *@This(), log: *logger.Log, source: logger.Source) !Result {
Global.notimpl();
}
};
pub const JavaScript = struct {
mutex: Mutex,
entries: std.StringHashMap(Result),
pub const Result = js_ast.Result;
pub fn init(allocator: *std.mem.Allocator) JavaScript {
return JavaScript{ .mutex = Mutex.init(), .entries = std.StringHashMap(Result).init(allocator) };
}
// For now, we're not going to cache JavaScript ASTs.
// It's probably only relevant when bundling for production.
pub fn parse(
cache: *@This(),
allocator: *std.mem.Allocator,
opts: js_parser.Parser.Options,
defines: *Define,
log: *logger.Log,
source: *const logger.Source,
) anyerror!?js_ast.Ast {
var temp_log = logger.Log.init(allocator);
defer temp_log.appendTo(log) catch {};
var parser = js_parser.Parser.init(opts, &temp_log, source, defines, allocator) catch |err| {
return null;
};
const result = try parser.parse();
return if (result.ok) result.ast else null;
}
};
pub const Json = struct {
pub const Entry = struct {
is_tsconfig: bool = false,
source: logger.Source,
expr: ?js_ast.Expr = null,
ok: bool = false,
// msgs: []logger.Msg,
};
mutex: Mutex,
entries: std.StringHashMap(*Entry),
pub fn init(allocator: *std.mem.Allocator) Json {
return Json{
.mutex = Mutex.init(),
.entries = std.StringHashMap(Entry).init(allocator),
};
}
fn parse(cache: *@This(), log: *logger.Log, source: logger.Source, allocator: *std.mem.Allocator, is_tsconfig: bool, func: anytype) anyerror!?js_ast.Expr {
var temp_log = logger.Log.init(allocator);
defer {
temp_log.appendTo(log) catch {};
pub fn parseTSConfig(cache: *@This(), log: *logger.Log, source: logger.Source, allocator: *std.mem.Allocator) anyerror!?js_ast.Expr {
return try parse(cache, log, source, allocator, true, json_parser.ParseTSConfig);
}
return func(&source, &temp_log, allocator) catch handler: {
break :handler null;
};
}
pub fn parseJSON(cache: *@This(), log: *logger.Log, source: logger.Source, allocator: *std.mem.Allocator) anyerror!?js_ast.Expr {
return try parse(cache, log, source, allocator, false, json_parser.ParseJSON);
}
pub fn parseTSConfig(cache: *@This(), log: *logger.Log, source: logger.Source, allocator: *std.mem.Allocator) anyerror!?js_ast.Expr {
return try parse(cache, log, source, allocator, true, json_parser.ParseTSConfig);
}
};
};
};
}
pub const Cache = NewCache(true);
pub const ServeCache = NewCache(false);

View File

@@ -729,7 +729,14 @@ pub const FileSystem = struct {
}
}
pub fn readFileWithHandle(fs: *RealFS, path: string, _size: ?usize, file: std.fs.File) !File {
pub fn readFileWithHandle(
fs: *RealFS,
path: string,
_size: ?usize,
file: std.fs.File,
comptime use_shared_buffer: bool,
shared_buffer: *MutableString,
) !File {
FileSystem.setMaxFd(file.handle);
if (FeatureFlags.disable_filesystem_cache) {
@@ -742,10 +749,28 @@ pub const FileSystem = struct {
return err;
});
const file_contents: []u8 = file.readToEndAllocOptions(fs.allocator, size, size, @alignOf(u8), null) catch |err| {
fs.readFileError(path, err);
return err;
};
var file_contents: []u8 = undefined;
// When we're serving a JavaScript-like file over HTTP, we do not want to cache the contents in memory
// This imposes a performance hit because not reading from disk is faster than reading from disk
// Part of that hit is allocating a temporary buffer to store the file contents in
// As a mitigation, we can just keep one buffer forever and re-use it for the parsed files
if (use_shared_buffer) {
shared_buffer.reset();
try shared_buffer.growBy(size);
shared_buffer.list.expandToCapacity();
var read_count = file.readAll(shared_buffer.list.items) catch |err| {
fs.readFileError(path, err);
return err;
};
shared_buffer.list.items = shared_buffer.list.items[0..read_count];
file_contents = shared_buffer.list.items;
} else {
file_contents = file.readToEndAllocOptions(fs.allocator, size, size, @alignOf(u8), null) catch |err| {
fs.readFileError(path, err);
return err;
};
}
if (fs.watcher) |*watcher| {
fs.watcher_mutex.lock();

View File

@@ -19,7 +19,7 @@ const Request = picohttp.Request;
const Response = picohttp.Response;
const Headers = picohttp.Headers;
const MimeType = @import("http/mime_type.zig");
const Bundler = bundler.Bundler;
const Bundler = bundler.ServeBundler;
const js_printer = @import("js_printer.zig");
const SOCKET_FLAGS = os.SOCK_CLOEXEC;

View File

@@ -41,7 +41,6 @@ const last_high_surrogate: u21 = 0xDBFF;
const first_low_surrogate: u21 = 0xDC00;
const last_low_surrogate: u21 = 0xDFFF;
const assert = std.debug.assert;
const Linker = @import("linker.zig").Linker;
fn notimpl() void {
Global.panic("Not implemented yet!", .{});
@@ -118,7 +117,7 @@ const ExprFlag = packed struct {
}
};
pub fn NewPrinter(comptime ascii_only: bool, comptime Writer: type) type {
pub fn NewPrinter(comptime ascii_only: bool, comptime Writer: type, comptime Linker: type) type {
// comptime const comptime_buf_len = 64;
// comptime var comptime_buf = [comptime_buf_len]u8{};
// comptime var comptime_buf_i: usize = 0;
@@ -3135,9 +3134,10 @@ pub fn printAst(
source: *const logger.Source,
ascii_only: bool,
opts: Options,
linker: ?*Linker,
comptime LinkerType: type,
linker: ?*LinkerType,
) !usize {
const PrinterType = NewPrinter(false, Writer);
const PrinterType = NewPrinter(false, Writer, LinkerType);
var writer = _writer;
var printer = try PrinterType.init(
writer,

View File

@@ -26,277 +26,283 @@ const Bundler = _bundler.Bundler;
const ResolveQueue = _bundler.ResolveQueue;
const Runtime = @import("./runtime.zig").Runtime;
pub const Linker = struct {
allocator: *std.mem.Allocator,
options: *Options.BundleOptions,
fs: *Fs.FileSystem,
log: *logger.Log,
resolve_queue: *ResolveQueue,
resolver: *Resolver.Resolver,
resolve_results: *_bundler.ResolveResults,
any_needs_runtime: bool = false,
runtime_import_record: ?ImportRecord = null,
runtime_source_path: string,
pub fn init(
pub fn NewLinker(comptime BundlerType: type) type {
return struct {
const ThisLinker = @This();
allocator: *std.mem.Allocator,
options: *Options.BundleOptions,
fs: *Fs.FileSystem,
log: *logger.Log,
resolve_queue: *ResolveQueue,
options: *Options.BundleOptions,
resolver: *Resolver.Resolver,
resolver: *BundlerType.Resolver,
resolve_results: *_bundler.ResolveResults,
fs: *Fs.FileSystem,
) Linker {
relative_paths_list = ImportPathsList.init(allocator);
any_needs_runtime: bool = false,
runtime_import_record: ?ImportRecord = null,
runtime_source_path: string,
return Linker{
.allocator = allocator,
.options = options,
.fs = fs,
.log = log,
.resolve_queue = resolve_queue,
.resolver = resolver,
.resolve_results = resolve_results,
.runtime_source_path = fs.absAlloc(allocator, &([_]string{"__runtime.js"})) catch unreachable,
};
}
pub fn init(
allocator: *std.mem.Allocator,
log: *logger.Log,
resolve_queue: *ResolveQueue,
options: *Options.BundleOptions,
resolver: *BundlerType.Resolver,
resolve_results: *_bundler.ResolveResults,
fs: *Fs.FileSystem,
) ThisLinker {
relative_paths_list = ImportPathsList.init(allocator);
// fs: fs.FileSystem,
// TODO:
pub fn requireOrImportMetaForSource(c: Linker, source_index: Ref.Int) RequireOrImportMeta {
return RequireOrImportMeta{};
}
return ThisLinker{
.allocator = allocator,
.options = options,
.fs = fs,
.log = log,
.resolve_queue = resolve_queue,
.resolver = resolver,
.resolve_results = resolve_results,
.runtime_source_path = fs.absAlloc(allocator, &([_]string{"__runtime.js"})) catch unreachable,
};
}
// pub const Scratch = struct {
// threadlocal var externals: std.ArrayList(u32) = undefined;
// threadlocal var has_externals: std.ArrayList(u32) = undefined;
// pub fn externals() {
// fs: fs.FileSystem,
// TODO:
pub fn requireOrImportMetaForSource(c: ThisLinker, source_index: Ref.Int) RequireOrImportMeta {
return RequireOrImportMeta{};
}
// }
// };
// This modifies the Ast in-place!
// But more importantly, this does the following:
// - Wrap CommonJS files
pub fn link(linker: *Linker, file_path: Fs.Path, result: *Bundler.ParseResult) !void {
var needs_runtime = result.ast.uses_exports_ref or result.ast.uses_module_ref or result.ast.runtime_imports.hasAny();
const source_dir = file_path.name.dir;
var externals = std.ArrayList(u32).init(linker.allocator);
// pub const Scratch = struct {
// threadlocal var externals: std.ArrayList(u32) = undefined;
// threadlocal var has_externals: std.ArrayList(u32) = undefined;
// pub fn externals() {
// Step 1. Resolve imports & requires
switch (result.loader) {
.jsx, .js, .ts, .tsx => {
for (result.ast.import_records) |*import_record, record_index| {
if (strings.eqlComptime(import_record.path.text, Runtime.Imports.Name)) {
import_record.path = try linker.generateImportPath(
source_dir,
linker.runtime_source_path,
Runtime.version(),
);
result.ast.runtime_import_record_id = @truncate(u32, record_index);
result.ast.needs_runtime = true;
continue;
}
// }
// };
// This modifies the Ast in-place!
// But more importantly, this does the following:
// - Wrap CommonJS files
pub fn link(linker: *ThisLinker, file_path: Fs.Path, result: *_bundler.ParseResult) !void {
var needs_runtime = result.ast.uses_exports_ref or result.ast.uses_module_ref or result.ast.runtime_imports.hasAny();
const source_dir = file_path.name.dir;
var externals = std.ArrayList(u32).init(linker.allocator);
if (linker.resolver.resolve(source_dir, import_record.path.text, import_record.kind)) |*resolved_import| {
if (resolved_import.is_external) {
externals.append(@truncate(u32, record_index)) catch unreachable;
// Step 1. Resolve imports & requires
switch (result.loader) {
.jsx, .js, .ts, .tsx => {
for (result.ast.import_records) |*import_record, record_index| {
if (strings.eqlComptime(import_record.path.text, Runtime.Imports.Name)) {
import_record.path = try linker.generateImportPath(
source_dir,
linker.runtime_source_path,
Runtime.version(),
);
result.ast.runtime_import_record_id = @truncate(u32, record_index);
result.ast.needs_runtime = true;
continue;
}
linker.processImportRecord(
// Include trailing slash
file_path.text[0 .. source_dir.len + 1],
resolved_import,
import_record,
) catch continue;
if (linker.resolver.resolve(source_dir, import_record.path.text, import_record.kind)) |*resolved_import| {
if (resolved_import.is_external) {
externals.append(@truncate(u32, record_index)) catch unreachable;
continue;
}
// If we're importing a CommonJS module as ESM
// We need to do the following transform:
// import React from 'react';
// =>
// import {_require} from 'RUNTIME_IMPORTS';
// import * as react_module from 'react';
// var React = _require(react_module).default;
// UNLESS it's a namespace import
// If it's a namespace import, assume it's safe.
// We can do this in the printer instead of creating a bunch of AST nodes here.
// But we need to at least tell the printer that this needs to happen.
if (import_record.kind == .stmt and resolved_import.shouldAssumeCommonJS(import_record)) {
import_record.wrap_with_to_module = true;
result.ast.needs_runtime = true;
}
} else |err| {
switch (err) {
error.ModuleNotFound => {
if (Resolver.Resolver.isPackagePath(import_record.path.text)) {
if (linker.options.platform != .node and Options.ExternalModules.isNodeBuiltin(import_record.path.text)) {
try linker.log.addRangeErrorFmt(
&result.source,
import_record.range,
linker.allocator,
"Could not resolve: \"{s}\". Try setting --platform=\"node\"",
.{import_record.path.text},
);
linker.processImportRecord(
// Include trailing slash
file_path.text[0 .. source_dir.len + 1],
resolved_import,
import_record,
) catch continue;
// If we're importing a CommonJS module as ESM
// We need to do the following transform:
// import React from 'react';
// =>
// import {_require} from 'RUNTIME_IMPORTS';
// import * as react_module from 'react';
// var React = _require(react_module).default;
// UNLESS it's a namespace import
// If it's a namespace import, assume it's safe.
// We can do this in the printer instead of creating a bunch of AST nodes here.
// But we need to at least tell the printer that this needs to happen.
if (import_record.kind == .stmt and resolved_import.shouldAssumeCommonJS(import_record)) {
import_record.wrap_with_to_module = true;
result.ast.needs_runtime = true;
}
} else |err| {
switch (err) {
error.ModuleNotFound => {
if (BundlerType.Resolver.isPackagePath(import_record.path.text)) {
if (linker.options.platform != .node and Options.ExternalModules.isNodeBuiltin(import_record.path.text)) {
try linker.log.addRangeErrorFmt(
&result.source,
import_record.range,
linker.allocator,
"Could not resolve: \"{s}\". Try setting --platform=\"node\"",
.{import_record.path.text},
);
} else {
try linker.log.addRangeErrorFmt(
&result.source,
import_record.range,
linker.allocator,
"Could not resolve: \"{s}\". Maybe you need to \"npm install\" (or yarn/pnpm)?",
.{import_record.path.text},
);
}
} else {
try linker.log.addRangeErrorFmt(
&result.source,
import_record.range,
linker.allocator,
"Could not resolve: \"{s}\". Maybe you need to \"npm install\" (or yarn/pnpm)?",
.{import_record.path.text},
"Could not resolve: \"{s}\"",
.{
import_record.path.text,
},
);
continue;
}
} else {
try linker.log.addRangeErrorFmt(
&result.source,
import_record.range,
linker.allocator,
"Could not resolve: \"{s}\"",
.{
import_record.path.text,
},
);
},
else => {
continue;
}
},
else => {
continue;
},
},
}
}
}
}
},
else => {},
}
result.ast.externals = externals.toOwnedSlice();
},
else => {},
}
result.ast.externals = externals.toOwnedSlice();
if (result.ast.needs_runtime and result.ast.runtime_import_record_id == null) {
var import_records = try linker.allocator.alloc(ImportRecord, result.ast.import_records.len + 1);
std.mem.copy(ImportRecord, import_records, result.ast.import_records);
import_records[import_records.len - 1] = ImportRecord{
.kind = .stmt,
.path = try linker.generateImportPath(
source_dir,
linker.runtime_source_path,
Runtime.version(),
),
.range = logger.Range{ .loc = logger.Loc{ .start = 0 }, .len = 0 },
};
}
}
const ImportPathsList = allocators.BSSStringList(512, 128);
pub var relative_paths_list: *ImportPathsList = undefined;
threadlocal var relative_path_allocator: std.heap.FixedBufferAllocator = undefined;
threadlocal var relative_path_allocator_buf: [4096]u8 = undefined;
threadlocal var relative_path_allocator_buf_loaded: bool = false;
pub fn generateImportPath(linker: *Linker, source_dir: string, source_path: string, package_version: ?string) !Fs.Path {
if (!relative_path_allocator_buf_loaded) {
relative_path_allocator_buf_loaded = true;
relative_path_allocator = std.heap.FixedBufferAllocator.init(&relative_path_allocator_buf);
}
defer relative_path_allocator.reset();
var absolute_pathname = Fs.PathName.init(source_path);
if (!linker.options.preserve_extensions) {
if (linker.options.out_extensions.get(absolute_pathname.ext)) |ext| {
absolute_pathname.ext = ext;
if (result.ast.needs_runtime and result.ast.runtime_import_record_id == null) {
var import_records = try linker.allocator.alloc(ImportRecord, result.ast.import_records.len + 1);
std.mem.copy(ImportRecord, import_records, result.ast.import_records);
import_records[import_records.len - 1] = ImportRecord{
.kind = .stmt,
.path = try linker.generateImportPath(
source_dir,
linker.runtime_source_path,
Runtime.version(),
),
.range = logger.Range{ .loc = logger.Loc{ .start = 0 }, .len = 0 },
};
}
}
switch (linker.options.import_path_format) {
.relative => {
var pretty = try linker.allocator.dupe(u8, linker.fs.relative(source_dir, source_path));
var pathname = Fs.PathName.init(pretty);
return Fs.Path.initWithPretty(pretty, pretty);
},
.relative_nodejs => {
var pretty = try linker.allocator.dupe(u8, linker.fs.relative(source_dir, source_path));
var pathname = Fs.PathName.init(pretty);
var path = Fs.Path.initWithPretty(pretty, pretty);
path.text = path.text[0 .. path.text.len - path.name.ext.len];
return path;
},
const ImportPathsList = allocators.BSSStringList(512, 128);
pub var relative_paths_list: *ImportPathsList = undefined;
threadlocal var relative_path_allocator: std.heap.FixedBufferAllocator = undefined;
threadlocal var relative_path_allocator_buf: [4096]u8 = undefined;
threadlocal var relative_path_allocator_buf_loaded: bool = false;
.absolute_url => {
var base = linker.fs.relativeTo(source_path);
if (strings.lastIndexOfChar(base, '.')) |dot| {
base = base[0..dot];
pub fn generateImportPath(linker: *ThisLinker, source_dir: string, source_path: string, package_version: ?string) !Fs.Path {
if (!relative_path_allocator_buf_loaded) {
relative_path_allocator_buf_loaded = true;
relative_path_allocator = std.heap.FixedBufferAllocator.init(&relative_path_allocator_buf);
}
defer relative_path_allocator.reset();
var absolute_pathname = Fs.PathName.init(source_path);
if (!linker.options.preserve_extensions) {
if (linker.options.out_extensions.get(absolute_pathname.ext)) |ext| {
absolute_pathname.ext = ext;
}
}
if (linker.options.append_package_version_in_query_string and package_version != null) {
const absolute_url =
try std.fmt.allocPrint(
linker.allocator,
"{s}{s}{s}?v={s}",
.{
linker.options.public_url,
base,
absolute_pathname.ext,
package_version.?,
},
);
switch (linker.options.import_path_format) {
.relative => {
var pretty = try linker.allocator.dupe(u8, linker.fs.relative(source_dir, source_path));
var pathname = Fs.PathName.init(pretty);
return Fs.Path.initWithPretty(pretty, pretty);
},
.relative_nodejs => {
var pretty = try linker.allocator.dupe(u8, linker.fs.relative(source_dir, source_path));
var pathname = Fs.PathName.init(pretty);
var path = Fs.Path.initWithPretty(pretty, pretty);
path.text = path.text[0 .. path.text.len - path.name.ext.len];
return path;
},
return Fs.Path.initWithPretty(absolute_url, absolute_url);
} else {
const absolute_url = try std.fmt.allocPrint(
linker.allocator,
"{s}{s}{s}",
.{
linker.options.public_url,
base,
absolute_pathname.ext,
},
);
.absolute_url => {
var base = linker.fs.relativeTo(source_path);
if (strings.lastIndexOfChar(base, '.')) |dot| {
base = base[0..dot];
}
return Fs.Path.initWithPretty(absolute_url, absolute_url);
}
},
if (linker.options.append_package_version_in_query_string and package_version != null) {
const absolute_url =
try std.fmt.allocPrint(
linker.allocator,
"{s}{s}{s}?v={s}",
.{
linker.options.public_url,
base,
absolute_pathname.ext,
package_version.?,
},
);
else => unreachable,
}
}
return Fs.Path.initWithPretty(absolute_url, absolute_url);
} else {
const absolute_url = try std.fmt.allocPrint(
linker.allocator,
"{s}{s}{s}",
.{
linker.options.public_url,
base,
absolute_pathname.ext,
},
);
pub fn processImportRecord(linker: *Linker, source_dir: string, resolve_result: *Resolver.Resolver.Result, import_record: *ImportRecord) !void {
return Fs.Path.initWithPretty(absolute_url, absolute_url);
}
},
// extremely naive.
resolve_result.is_from_node_modules = strings.contains(resolve_result.path_pair.primary.text, "/node_modules");
// lazy means:
// Run the resolver
// Don't parse/print automatically.
if (linker.options.resolve_mode != .lazy) {
try linker.enqueueResolveResult(resolve_result);
else => unreachable,
}
}
import_record.path = try linker.generateImportPath(
source_dir,
resolve_result.path_pair.primary.text,
resolve_result.package_json_version,
);
}
pub fn processImportRecord(linker: *ThisLinker, source_dir: string, resolve_result: *Resolver.Result, import_record: *ImportRecord) !void {
pub fn resolveResultHashKey(linker: *Linker, resolve_result: *const Resolver.Resolver.Result) string {
var hash_key = resolve_result.path_pair.primary.text;
// extremely naive.
resolve_result.is_from_node_modules = strings.contains(resolve_result.path_pair.primary.text, "/node_modules");
// Shorter hash key is faster to hash
if (strings.startsWith(resolve_result.path_pair.primary.text, linker.fs.top_level_dir)) {
hash_key = resolve_result.path_pair.primary.text[linker.fs.top_level_dir.len..];
// lazy means:
// Run the resolver
// Don't parse/print automatically.
if (linker.options.resolve_mode != .lazy) {
try linker.enqueueResolveResult(resolve_result);
}
import_record.path = try linker.generateImportPath(
source_dir,
resolve_result.path_pair.primary.text,
resolve_result.package_json_version,
);
}
return hash_key;
}
pub fn resolveResultHashKey(linker: *ThisLinker, resolve_result: *const Resolver.Result) string {
var hash_key = resolve_result.path_pair.primary.text;
pub fn enqueueResolveResult(linker: *Linker, resolve_result: *const Resolver.Resolver.Result) !void {
const hash_key = linker.resolveResultHashKey(resolve_result);
// Shorter hash key is faster to hash
if (strings.startsWith(resolve_result.path_pair.primary.text, linker.fs.top_level_dir)) {
hash_key = resolve_result.path_pair.primary.text[linker.fs.top_level_dir.len..];
}
const get_or_put_entry = try linker.resolve_results.backing.getOrPut(hash_key);
if (!get_or_put_entry.found_existing) {
get_or_put_entry.entry.value = resolve_result.*;
try linker.resolve_queue.writeItem(resolve_result.*);
return hash_key;
}
}
};
pub fn enqueueResolveResult(linker: *ThisLinker, resolve_result: *const Resolver.Result) !void {
const hash_key = linker.resolveResultHashKey(resolve_result);
const get_or_put_entry = try linker.resolve_results.backing.getOrPut(hash_key);
if (!get_or_put_entry.found_existing) {
get_or_put_entry.entry.value = resolve_result.*;
try linker.resolve_queue.writeItem(resolve_result.*);
}
}
};
}
pub const Linker = NewLinker(_bundler.Bundler);
pub const ServeLinker = NewLinker(_bundler.ServeBundler);

View File

@@ -769,12 +769,12 @@ pub const OutputFile = struct {
move: FileOperation,
copy: FileOperation,
noop: u0,
pending: resolver.Resolver.Result,
pending: resolver.Result,
};
pub const Kind = enum { move, copy, noop, buffer, pending };
pub fn initPending(loader: Loader, pending: resolver.Resolver.Result) OutputFile {
pub fn initPending(loader: Loader, pending: resolver.Result) OutputFile {
return .{
.loader = .file,
.input = pending.path_pair.primary,

View File

@@ -45,13 +45,13 @@ pub const PackageJSON = struct {
//
browser_map: BrowserMap,
pub fn parse(r: *resolver.Resolver, input_path: string, dirname_fd: StoredFileDescriptorType) ?PackageJSON {
pub fn parse(comptime ResolverType: type, r: *ResolverType, input_path: string, dirname_fd: StoredFileDescriptorType) ?PackageJSON {
const parts = [_]string{ input_path, "package.json" };
const package_json_path_ = r.fs.abs(&parts);
const package_json_path = r.fs.filename_store.append(package_json_path_) catch unreachable;
const entry = r.caches.fs.readFile(r.fs, package_json_path, dirname_fd) catch |err| {
const entry = r.caches.fs.readFile(r.fs, package_json_path, dirname_fd, false) catch |err| {
if (err != error.IsDir) {
r.log.addErrorFmt(null, logger.Loc.Empty, r.allocator, "Cannot read file \"{s}\": {s}", .{ r.prettyPath(fs.Path.init(input_path)), @errorName(err) }) catch unreachable;
}

File diff suppressed because it is too large Load Diff

View File

@@ -60,7 +60,8 @@ pub const TSConfigJSON = struct {
allocator: *std.mem.Allocator,
log: *logger.Log,
source: logger.Source,
json_cache: *cache.Cache.Json,
comptime JSONCache: type,
json_cache: *JSONCache,
) anyerror!?*TSConfigJSON {
// Unfortunately "tsconfig.json" isn't actually JSON. It's some other
// format that appears to be defined by the implementation details of the