This is alot

Former-commit-id: 4b2a396611ec03270dc768b70e488b0f5eee2a37
This commit is contained in:
Jarred Sumner
2021-08-10 18:26:16 -07:00
parent 0daff24b16
commit 10b4b872a2
32 changed files with 1257 additions and 570 deletions

4
.gitignore vendored
View File

@@ -45,4 +45,6 @@ outcss
.next
txt.js
.idea
.vscode/cpp*
.vscode/cpp*
node_modules_*

58
.vscode/launch.json vendored
View File

@@ -10,7 +10,7 @@
"./routes",
"--resolve=dev",
"--outdir=out"
// "--public-url=https://localhost:9000/"
// "--origin=https://localhost:9000/"
],
"cwd": "${workspaceFolder}/demos/css-stress-test",
"console": "internalConsole"
@@ -39,15 +39,13 @@
{
"type": "lldb",
"request": "launch",
"name": "Eval Small TEst",
"program": "${workspaceFolder}/build/debug/macos-x86_64/spjs",
"name": "Transpile small",
"program": "${workspaceFolder}/build/debug/macos-x86_64/esdev",
"args": [
"./quoted-escape.js",
"--resolve=dev",
"--outdir=outcss"
// "--public-url=https://localhost:9000/"
"demos/css-stress-test/pages/index.tsx"
// "--origin=https://localhost:9000/"
],
"cwd": "${workspaceFolder}/src/test/fixtures",
"cwd": "${workspaceFolder}",
"console": "internalConsole"
},
{
@@ -59,7 +57,7 @@
"error.js",
"--resolve=dev",
"--outdir=outcss"
// "--public-url=https://localhost:9000/"
// "--origin=https://localhost:9000/"
],
"cwd": "${workspaceFolder}",
"console": "internalConsole"
@@ -71,7 +69,7 @@
"program": "${workspaceFolder}/build/debug/macos-x86_64/spjs",
"args": [
"./src/index.tsx"
// "--public-url=https://localhost:9000/"
// "--origin=https://localhost:9000/"
],
"cwd": "${workspaceFolder}/demos/css-stress-test",
"console": "internalConsole"
@@ -86,7 +84,7 @@
"./simple.css",
"--resolve=dev",
"--outdir=outcss",
"--public-url=https://localhost:9000/"
"--origin=https://localhost:9000/"
],
"cwd": "${workspaceFolder}/src/test/fixtures",
"console": "internalConsole"
@@ -97,14 +95,7 @@
"request": "launch",
"name": "Demo Serve",
"program": "${workspaceFolder}/build/debug/macos-x86_64/esdev",
"args": [
"pages",
"--resolve=lazy",
"--outdir=public",
"--framework=framework.tsx",
"--serve",
"--public-url=http://localhost:9000/"
],
"args": ["--serve", "--origin=http://localhost:9000/"],
"cwd": "${workspaceFolder}/demos/css-stress-test",
"console": "internalConsole"
},
@@ -116,7 +107,7 @@
"args": [
"./src/index.tsx",
"--resolve=lazy",
"--public-url=http://localhost:9000/"
"--origin=http://localhost:9000/"
],
"cwd": "${workspaceFolder}/demos/simple-react",
"console": "internalConsole"
@@ -132,7 +123,7 @@
"--resolve=dev",
"--outdir=outcss",
"--platform=browser",
"--public-url=http://localhost:9000/"
"--origin=http://localhost:9000/"
],
"cwd": "${workspaceFolder}/demos/css-stress-test",
"console": "internalConsole"
@@ -146,7 +137,7 @@
"args": [
"./src/index.tsx",
"--resolve=lazy",
"--public-url=http://localhost:9000/"
"--origin=http://localhost:9000/"
],
"cwd": "${workspaceFolder}/demos/simple-react",
"console": "internalConsole"
@@ -158,11 +149,20 @@
"name": "Demo Build .jsb",
"program": "${workspaceFolder}/build/debug/macos-x86_64/esdev",
"args": [
"./src/index.tsx",
"--public-url=http://localhost:9000/",
"--new-jsb"
"--origin=http://localhost:9000/",
"--new-jsb",
"--use=./nexty2"
],
"cwd": "${workspaceFolder}/demos/simple-react",
"cwd": "${workspaceFolder}/demos/css-stress-test",
"console": "internalConsole"
},
{
"type": "lldb",
"request": "launch",
"name": "PNPM Resolve symlink",
"program": "${workspaceFolder}/build/debug/macos-x86_64/esdev",
"args": ["--resolve=dev", "test-pnpm.js"],
"cwd": "${workspaceFolder}/demos/css-stress-test",
"console": "internalConsole"
},
{
@@ -265,7 +265,7 @@
// "pages/index.jsx",
// "-o",
// "out",
// "--public-url=https://hello.com/",
// "--origin=https://hello.com/",
// "--serve"
// ],
// "cwd": "${workspaceFolder}",
@@ -288,7 +288,7 @@
// "@romejs/js-analysis/evaluators/modules/ImportCall.ts",
"--outdir=${workspaceFolder}/bench/rome/src/out",
// "@romejs/cli-diagnostics/banners/success.json",
"--public-url=https://hello.com/"
"--origin=https://hello.com/"
],
"cwd": "${workspaceFolder}/bench/rome/src",
"console": "internalConsole"
@@ -310,7 +310,7 @@
// "@romejs/js-analysis/evaluators/modules/ImportCall.ts",
"--outdir=${workspaceFolder}/bench/rome/src/out",
// "@romejs/cli-diagnostics/banners/success.json",
"--public-url=https://hello.com/"
"--origin=https://hello.com/"
],
"cwd": "${workspaceFolder}/bench/rome/src",
"console": "internalConsole"

View File

@@ -8,7 +8,7 @@
},
"search.followSymlinks": false,
"search.useIgnoreFiles": true,
"zig.buildOnSave": false,
"C_Cpp.files.exclude": {
"**/.vscode": true,
"src/javascript/jsc/WebKit/JSTests": true,

View File

@@ -1,10 +1,10 @@
# Speedy - a fast web bundler & JavaScript runtime environment
# A fast bundler & JS Runtime built for developer iteration cycle
Speedy bundles & transpiles JavaScript, TypeScript, and CSS. Speedy is probably the fastest bundler out today.
Speedy is a fast bundler, transpiler, and runtime environment for JavaScript & TypeScript. It also supports bundling CSS.
### Speed hacking
### Performance optimizations
Here are some techniques Speedy uses to make your builds shockingly fast. Most are small wins. Some are big.
Here are some techniques Speedy uses to go fast. Most are small wins. Some are big.
#### Compare comptime-known strings by nearest `(u64 || u32 || u16 || u8)`-sized integer

View File

@@ -0,0 +1,5 @@
import ReactDOM from "react-dom";
export function start(EntryPointNamespace) {
ReactDOM.hydrate(<EntryPointNamespace.default />);
}

View File

@@ -1,4 +0,0 @@
import * as ReactDOM from "react-dom";
export default function start(EntryPointNamespace) {
ReactDOM.render(EntryPointNamespace.default);
}

View File

@@ -11,13 +11,14 @@
".tsx"
]
},
"static": "public",
"development": {
"client": "framework.client.development.tsx",
"server": "framework.server.development.tsx"
"client": "client.development.tsx",
"server": "server.development.tsx"
},
"production": {
"client": "framework.client.production.tsx",
"server": "framework.server.production.tsx"
"client": "client.production.tsx",
"server": "server.production.tsx"
}
},
"scripts": {

View File

@@ -0,0 +1,50 @@
const std = @import("std");
const path_handler = @import("../src/resolver/resolve_path.zig");
usingnamespace @import("../src/global.zig");
// zig build-exe -Drelease-fast --main-pkg-path ../ ./readlink-getfd.zig
pub fn main() anyerror!void {
var stdout_ = std.io.getStdOut();
var stderr_ = std.io.getStdErr();
var output_source = Output.Source.init(stdout_, stderr_);
Output.Source.set(&output_source);
defer Output.flush();
var args_buffer: [8096 * 2]u8 = undefined;
var fixed_buffer = std.heap.FixedBufferAllocator.init(&args_buffer);
var allocator = &fixed_buffer.allocator;
var args = std.mem.span(try std.process.argsAlloc(allocator));
const to_resolve = args[args.len - 1];
const cwd = try std.process.getCwdAlloc(allocator);
var parts = [1][]const u8{std.mem.span(to_resolve)};
var joined_buf: [std.fs.MAX_PATH_BYTES]u8 = undefined;
var joined = path_handler.joinAbsStringBuf(
cwd,
&joined_buf,
&parts,
.loose,
);
joined_buf[joined.len] = 0;
const joined_z: [:0]const u8 = joined_buf[0..joined.len :0];
var file = std.fs.openFileAbsoluteZ(joined_z, .{ .read = false }) catch |err| {
switch (err) {
error.NotDir, error.FileNotFound => {
Output.prettyError("<r><red>404 Not Found<r>: <b>\"{s}\"<r>", .{joined_z});
Output.flush();
std.process.exit(1);
},
else => {
return err;
},
}
};
var out_buffer: [std.fs.MAX_PATH_BYTES]u8 = undefined;
var path = try std.os.getFdPath(file.handle, &out_buffer);
Output.print("{s}", .{path});
}

View File

@@ -1,5 +1,6 @@
const std = @import("std");
const FeatureFlags = @import("./feature_flags.zig");
const Wyhash = std.hash.Wyhash;
const FixedBufferAllocator = std.heap.FixedBufferAllocator;
@@ -231,7 +232,18 @@ pub fn BSSList(comptime ValueType: type, comptime _count: anytype) type {
}
};
}
const Mutex = @import("./sync.zig").Mutex;
/// Append-only list.
/// Stores an initial count in .bss section of the object file
/// Overflows to heap when count is exceeded.
pub fn BSSStringList(comptime _count: usize, comptime _item_length: usize) type {
// I experimented with string interning here and it was around...maybe 1% when generating a .jsb?
// I tried:
// - arraybacked list
// - hashmap list
// + 1 for sentinel
const item_length = _item_length + 1;
const count = _count * 2;
@@ -250,30 +262,26 @@ pub fn BSSStringList(comptime _count: usize, comptime _item_length: usize) type
allocator: *Allocator,
pub var instance: Self = undefined;
var loaded: bool = false;
// only need the mutex on append
var mutex: Mutex = undefined;
pub fn init(allocator: *std.mem.Allocator) *Self {
instance = Self{
.allocator = allocator,
.overflow_list = std.ArrayListUnmanaged(ValueType){},
};
if (!loaded) {
instance = Self{
.allocator = allocator,
.overflow_list = std.ArrayListUnmanaged(ValueType){},
};
mutex = Mutex.init();
}
return &instance;
}
pub fn isOverflowing() bool {
pub inline fn isOverflowing() bool {
return slice_buf_used >= @as(u16, count);
}
pub fn at(self: *const Self, index: IndexType) ?ValueType {
if (index.index == NotFound.index or index.index == Unassigned.index) return null;
if (index.is_overflow) {
return &self.overflow_list.items[index.index];
} else {
return &slice_buf[index.index];
}
}
pub fn exists(self: *Self, value: ValueType) bool {
return isSliceInBuffer(value, slice_buf);
}
@@ -283,6 +291,30 @@ pub fn BSSStringList(comptime _count: usize, comptime _item_length: usize) type
}
pub fn append(self: *Self, comptime AppendType: type, _value: AppendType) ![]const u8 {
return try self.doAppend(AppendType, _value);
}
threadlocal var lowercase_append_buf: [std.fs.MAX_PATH_BYTES]u8 = undefined;
pub fn appendLowerCase(self: *Self, comptime AppendType: type, _value: AppendType) ![]const u8 {
for (_value) |c, i| {
lowercase_append_buf[i] = std.ascii.toLower(c);
}
var slice = lowercase_append_buf[0.._value.len];
return self.doAppend(
@TypeOf(slice),
slice,
);
}
inline fn doAppend(
self: *Self,
comptime AppendType: type,
_value: AppendType,
) ![]const u8 {
mutex.lock();
defer mutex.unlock();
const value_len: usize = brk: {
switch (comptime AppendType) {
[]const u8, []u8 => {
@@ -634,6 +666,243 @@ pub fn BSSMap(comptime ValueType: type, comptime count: anytype, store_keys: boo
};
}
pub fn TBSSMap(comptime ValueType: type, comptime count: anytype, store_keys: bool, estimated_key_length: usize) type {
const max_index = count - 1;
const BSSMapType = struct {
pub threadlocal var backing_buf: [count]ValueType = undefined;
pub threadlocal var backing_buf_used: u16 = 0;
const Allocator = std.mem.Allocator;
const Self = @This();
index: IndexMap,
overflow_list: std.ArrayListUnmanaged(ValueType),
allocator: *Allocator,
pub threadlocal var instance: Self = undefined;
pub fn init(allocator: *std.mem.Allocator) *Self {
instance = Self{
.index = IndexMap{},
.allocator = allocator,
.overflow_list = std.ArrayListUnmanaged(ValueType){},
};
return &instance;
}
pub fn isOverflowing() bool {
return backing_buf_used >= @as(u16, count);
}
pub fn getOrPut(self: *Self, key: []const u8) !Result {
const _key = Wyhash.hash(Seed, key);
var index = try self.index.getOrPut(self.allocator, _key);
if (index.found_existing) {
return Result{
.hash = _key,
.index = index.value_ptr.*,
.status = switch (index.value_ptr.index) {
NotFound.index => .not_found,
Unassigned.index => .unknown,
else => .exists,
},
};
}
index.value_ptr.* = Unassigned;
return Result{
.hash = _key,
.index = Unassigned,
.status = .unknown,
};
}
pub fn get(self: *const Self, key: []const u8) ?*ValueType {
const _key = Wyhash.hash(Seed, key);
const index = self.index.get(_key) orelse return null;
return self.atIndex(index);
}
pub fn markNotFound(self: *Self, result: Result) void {
self.index.put(self.allocator, result.hash, NotFound) catch unreachable;
}
pub fn atIndex(self: *const Self, index: IndexType) ?*ValueType {
if (index.index == NotFound.index or index.index == Unassigned.index) return null;
if (index.is_overflow) {
return &self.overflow_list.items[index.index];
} else {
return &backing_buf[index.index];
}
}
pub fn put(self: *Self, result: *Result, value: ValueType) !*ValueType {
if (result.index.index == NotFound.index or result.index.index == Unassigned.index) {
result.index.is_overflow = backing_buf_used > max_index;
if (result.index.is_overflow) {
result.index.index = @intCast(u31, self.overflow_list.items.len);
} else {
result.index.index = backing_buf_used;
backing_buf_used += 1;
if (backing_buf_used >= max_index) {
self.overflow_list = try @TypeOf(self.overflow_list).initCapacity(self.allocator, count);
}
}
}
try self.index.put(self.allocator, result.hash, result.index);
if (result.index.is_overflow) {
if (self.overflow_list.items.len == result.index.index) {
const real_index = self.overflow_list.items.len;
try self.overflow_list.append(self.allocator, value);
} else {
self.overflow_list.items[result.index.index] = value;
}
return &self.overflow_list.items[result.index.index];
} else {
backing_buf[result.index.index] = value;
return &backing_buf[result.index.index];
}
}
pub fn remove(self: *Self, key: string) IndexType {
const _key = Wyhash.hash(Seed, key);
const index = self.index.get(_key) orelse return;
switch (index) {
Unassigned.index => {
self.index.remove(_key);
},
NotFound.index => {
self.index.remove(_key);
},
0...max_index => {
if (hasDeinit(ValueType)) {
backing_buf[index].deinit();
}
backing_buf[index] = undefined;
},
else => {
const i = index - count;
if (hasDeinit(ValueType)) {
self.overflow_list.items[i].deinit();
}
self.overflow_list.items[index - count] = undefined;
},
}
return index;
}
};
if (!store_keys) {
return BSSMapType;
}
return struct {
map: *BSSMapType,
const Self = @This();
pub threadlocal var instance: Self = undefined;
threadlocal var key_list_buffer: [count * estimated_key_length]u8 = undefined;
threadlocal var key_list_buffer_used: usize = 0;
threadlocal var key_list_slices: [count][]u8 = undefined;
threadlocal var key_list_overflow: std.ArrayListUnmanaged([]u8) = undefined;
pub fn init(allocator: *std.mem.Allocator) *Self {
instance = Self{
.map = BSSMapType.init(allocator),
};
return &instance;
}
pub fn isOverflowing() bool {
return instance.map.backing_buf_used >= count;
}
pub fn getOrPut(self: *Self, key: []const u8) !Result {
return try self.map.getOrPut(key);
}
pub fn get(self: *Self, key: []const u8) ?*ValueType {
return @call(.{ .modifier = .always_inline }, BSSMapType.get, .{ self.map, key });
}
pub fn atIndex(self: *Self, index: IndexType) ?*ValueType {
return @call(.{ .modifier = .always_inline }, BSSMapType.atIndex, .{ self.map, index });
}
pub fn keyAtIndex(self: *Self, index: IndexType) ?[]const u8 {
return switch (index.index) {
Unassigned.index, NotFound.index => null,
else => {
if (!index.is_overflow) {
return key_list_slices[index.index];
} else {
return key_list_overflow.items[index.index];
}
},
};
}
pub fn put(self: *Self, key: anytype, comptime store_key: bool, result: *Result, value: ValueType) !*ValueType {
var ptr = try self.map.put(result, value);
if (store_key) {
try self.putKey(key, result);
}
return ptr;
}
pub fn isKeyStaticallyAllocated(key: anytype) bool {
return isSliceInBuffer(key, &key_list_buffer);
}
// There's two parts to this.
// 1. Storing the underyling string.
// 2. Making the key accessible at the index.
pub fn putKey(self: *Self, key: anytype, result: *Result) !void {
var slice: []u8 = undefined;
// Is this actually a slice into the map? Don't free it.
if (isKeyStaticallyAllocated(key)) {
slice = constStrToU8(key);
} else if (key_list_buffer_used + key.len < key_list_buffer.len) {
const start = key_list_buffer_used;
key_list_buffer_used += key.len;
slice = key_list_buffer[start..key_list_buffer_used];
std.mem.copy(u8, slice, key);
} else {
slice = try self.map.allocator.dupe(u8, key);
}
if (!result.index.is_overflow) {
key_list_slices[result.index.index] = slice;
} else {
if (@intCast(u31, key_list_overflow.items.len) > result.index.index) {
const existing_slice = key_list_overflow.items[result.index.index];
if (!isKeyStaticallyAllocated(existing_slice)) {
self.map.allocator.free(existing_slice);
}
key_list_overflow.items[result.index.index] = slice;
} else {
try key_list_overflow.append(self.map.allocator, slice);
}
}
}
pub fn markNotFound(self: *Self, result: Result) void {
self.map.markNotFound(result);
}
// For now, don't free the keys.
pub fn remove(self: *Self, key: string) IndexType {
return self.map.remove(key);
}
};
}
pub fn constStrToU8(s: []const u8) []u8 {
return @intToPtr([*]u8, @ptrToInt(s.ptr))[0..s.len];
}

View File

3
src/api/schema.d.ts vendored
View File

@@ -253,11 +253,13 @@ type uint32 = number;
export interface LoadedRouteConfig {
dir: string;
extensions: string[];
static_dir: string;
}
export interface RouteConfig {
dir?: string;
extensions?: string[];
static_dir?: string;
}
export interface TransformOptions {
@@ -278,7 +280,6 @@ type uint32 = number;
platform?: Platform;
serve?: boolean;
extension_order?: string[];
public_dir?: string;
only_scan_dependencies?: ScanDependencyMode;
generate_node_module_bundle?: boolean;
node_modules_bundle_path?: string;

View File

@@ -697,6 +697,7 @@ function decodeLoadedRouteConfig(bb) {
var length = bb.readVarUint();
var values = result["extensions"] = Array(length);
for (var i = 0; i < length; i++) values[i] = bb.readString();
result["static_dir"] = bb.readString();
return result;
}
@@ -721,6 +722,13 @@ function encodeLoadedRouteConfig(message, bb) {
throw new Error("Missing required field \"extensions\"");
}
var value = message["static_dir"];
if (value != null) {
bb.writeString(value);
} else {
throw new Error("Missing required field \"static_dir\"");
}
}
function decodeRouteConfig(bb) {
@@ -741,6 +749,10 @@ function decodeRouteConfig(bb) {
for (var i = 0; i < length; i++) values[i] = bb.readString();
break;
case 3:
result["static_dir"] = bb.readString();
break;
default:
throw new Error("Attempted to parse invalid message");
}
@@ -765,6 +777,12 @@ function encodeRouteConfig(message, bb) {
bb.writeString(value);
}
}
var value = message["static_dir"];
if (value != null) {
bb.writeByte(3);
bb.writeString(value);
}
bb.writeByte(0);
}
@@ -856,30 +874,26 @@ function decodeTransformOptions(bb) {
break;
case 18:
result["public_dir"] = bb.readString();
break;
case 19:
result["only_scan_dependencies"] = ScanDependencyMode[bb.readByte()];
break;
case 20:
case 19:
result["generate_node_module_bundle"] = !!bb.readByte();
break;
case 21:
case 20:
result["node_modules_bundle_path"] = bb.readString();
break;
case 22:
case 21:
result["node_modules_bundle_path_server"] = bb.readString();
break;
case 23:
case 22:
result["framework"] = decodeFrameworkConfig(bb);
break;
case 24:
case 23:
result["router"] = decodeRouteConfig(bb);
break;
@@ -1022,15 +1036,9 @@ bb.writeByte(encoded);
}
}
var value = message["public_dir"];
if (value != null) {
bb.writeByte(18);
bb.writeString(value);
}
var value = message["only_scan_dependencies"];
if (value != null) {
bb.writeByte(19);
bb.writeByte(18);
var encoded = ScanDependencyMode[value];
if (encoded === void 0) throw new Error("Invalid value " + JSON.stringify(value) + " for enum \"ScanDependencyMode\"");
bb.writeByte(encoded);
@@ -1038,31 +1046,31 @@ bb.writeByte(encoded);
var value = message["generate_node_module_bundle"];
if (value != null) {
bb.writeByte(20);
bb.writeByte(19);
bb.writeByte(value);
}
var value = message["node_modules_bundle_path"];
if (value != null) {
bb.writeByte(21);
bb.writeByte(20);
bb.writeString(value);
}
var value = message["node_modules_bundle_path_server"];
if (value != null) {
bb.writeByte(22);
bb.writeByte(21);
bb.writeString(value);
}
var value = message["framework"];
if (value != null) {
bb.writeByte(23);
bb.writeByte(22);
encodeFrameworkConfig(value, bb);
}
var value = message["router"];
if (value != null) {
bb.writeByte(24);
bb.writeByte(23);
encodeRouteConfig(value, bb);
}
bb.writeByte(0);

View File

@@ -152,11 +152,13 @@ struct LoadedFramework {
struct LoadedRouteConfig {
string dir;
string[] extensions;
string static_dir;
}
message RouteConfig {
string dir = 1;
string[] extensions = 2;
string static_dir = 3;
}
message TransformOptions {
@@ -188,17 +190,15 @@ message TransformOptions {
string[] extension_order = 17;
string public_dir = 18;
ScanDependencyMode only_scan_dependencies = 18;
ScanDependencyMode only_scan_dependencies = 19;
bool generate_node_module_bundle = 19;
bool generate_node_module_bundle = 20;
string node_modules_bundle_path = 20;
string node_modules_bundle_path_server = 21;
string node_modules_bundle_path = 21;
string node_modules_bundle_path_server = 22;
FrameworkConfig framework = 23;
RouteConfig router = 24;
FrameworkConfig framework = 22;
RouteConfig router = 23;
}
struct FileHandle {

View File

@@ -839,17 +839,22 @@ pub const Api = struct {
/// extensions
extensions: []const []const u8,
/// static_dir
static_dir: []const u8,
pub fn decode(reader: anytype) anyerror!LoadedRouteConfig {
var this = std.mem.zeroes(LoadedRouteConfig);
this.dir = try reader.readValue([]const u8);
this.extensions = try reader.readArray([]const u8);
this.static_dir = try reader.readValue([]const u8);
return this;
}
pub fn encode(this: *const @This(), writer: anytype) anyerror!void {
try writer.writeValue(this.dir);
try writer.writeArray([]const u8, this.extensions);
try writer.writeValue(this.static_dir);
}
};
@@ -860,6 +865,9 @@ pub const Api = struct {
/// extensions
extensions: []const []const u8,
/// static_dir
static_dir: ?[]const u8 = null,
pub fn decode(reader: anytype) anyerror!RouteConfig {
var this = std.mem.zeroes(RouteConfig);
@@ -875,6 +883,9 @@ pub const Api = struct {
2 => {
this.extensions = try reader.readArray([]const u8);
},
3 => {
this.static_dir = try reader.readValue([]const u8);
},
else => {
return error.InvalidMessage;
},
@@ -892,6 +903,10 @@ pub const Api = struct {
try writer.writeFieldID(2);
try writer.writeArray([]const u8, extensions);
}
if (this.static_dir) |static_dir| {
try writer.writeFieldID(3);
try writer.writeValue(static_dir);
}
try writer.endMessage();
}
};
@@ -948,9 +963,6 @@ pub const Api = struct {
/// extension_order
extension_order: []const []const u8,
/// public_dir
public_dir: ?[]const u8 = null,
/// only_scan_dependencies
only_scan_dependencies: ?ScanDependencyMode = null,
@@ -1030,24 +1042,21 @@ pub const Api = struct {
this.extension_order = try reader.readArray([]const u8);
},
18 => {
this.public_dir = try reader.readValue([]const u8);
},
19 => {
this.only_scan_dependencies = try reader.readValue(ScanDependencyMode);
},
20 => {
19 => {
this.generate_node_module_bundle = try reader.readValue(bool);
},
21 => {
20 => {
this.node_modules_bundle_path = try reader.readValue([]const u8);
},
22 => {
21 => {
this.node_modules_bundle_path_server = try reader.readValue([]const u8);
},
23 => {
22 => {
this.framework = try reader.readValue(FrameworkConfig);
},
24 => {
23 => {
this.router = try reader.readValue(RouteConfig);
},
else => {
@@ -1127,32 +1136,28 @@ pub const Api = struct {
try writer.writeFieldID(17);
try writer.writeArray([]const u8, extension_order);
}
if (this.public_dir) |public_dir| {
try writer.writeFieldID(18);
try writer.writeValue(public_dir);
}
if (this.only_scan_dependencies) |only_scan_dependencies| {
try writer.writeFieldID(19);
try writer.writeFieldID(18);
try writer.writeEnum(only_scan_dependencies);
}
if (this.generate_node_module_bundle) |generate_node_module_bundle| {
try writer.writeFieldID(20);
try writer.writeFieldID(19);
try writer.writeInt(@intCast(u8, @boolToInt(generate_node_module_bundle)));
}
if (this.node_modules_bundle_path) |node_modules_bundle_path| {
try writer.writeFieldID(21);
try writer.writeFieldID(20);
try writer.writeValue(node_modules_bundle_path);
}
if (this.node_modules_bundle_path_server) |node_modules_bundle_path_server| {
try writer.writeFieldID(22);
try writer.writeFieldID(21);
try writer.writeValue(node_modules_bundle_path_server);
}
if (this.framework) |framework| {
try writer.writeFieldID(23);
try writer.writeFieldID(22);
try writer.writeValue(framework);
}
if (this.router) |router| {
try writer.writeFieldID(24);
try writer.writeFieldID(23);
try writer.writeValue(router);
}
try writer.endMessage();

View File

@@ -40,7 +40,7 @@ pub const ServeResult = struct {
};
// const BundleMap =
pub const ResolveResults = ThreadSafeHashMap.ThreadSafeStringHashMap(_resolver.Result);
pub const ResolveResults = std.AutoHashMap(u64, void);
pub const ResolveQueue = std.fifo.LinearFifo(_resolver.Result, std.fifo.LinearFifoBufferType.Dynamic);
// How it works end-to-end
@@ -185,6 +185,8 @@ pub fn NewBundler(cache_files: bool) type {
// try pool.init(ThreadPool.InitConfig{
// .allocator = allocator,
// });
var resolve_results = try allocator.create(ResolveResults);
resolve_results.* = ResolveResults.init(allocator);
return ThisBundler{
.options = bundle_options,
.fs = fs,
@@ -194,7 +196,7 @@ pub fn NewBundler(cache_files: bool) type {
// .thread_pool = pool,
.linker = undefined,
.result = options.TransformResult{ .outbase = bundle_options.output_dir },
.resolve_results = try ResolveResults.init(allocator),
.resolve_results = resolve_results,
.resolve_queue = ResolveQueue.init(allocator),
.output_files = std.ArrayList(options.OutputFile).init(allocator),
};
@@ -215,7 +217,7 @@ pub fn NewBundler(cache_files: bool) type {
pub fn configureFramework(this: *ThisBundler) !void {
if (this.options.framework) |*framework| {
if (framework.needsResolveFromPackage()) {
var route_config = this.options.route_config orelse options.RouteConfig.zero();
var route_config = this.options.routes;
var pair = PackageJSON.FrameworkRouterPair{ .framework = framework, .router = &route_config };
if (framework.development) {
@@ -225,7 +227,7 @@ pub fn NewBundler(cache_files: bool) type {
}
if (pair.loaded_routes) {
this.options.route_config = route_config;
this.options.routes = route_config;
}
framework.resolved = true;
this.options.framework = framework.*;
@@ -240,11 +242,11 @@ pub fn NewBundler(cache_files: bool) type {
try this.configureFramework();
if (comptime client) {
if (this.options.framework.?.client.len > 0) {
return try this.resolver.resolve(this.fs.top_level_dir, this.options.framework.?.client, .internal);
return try this.resolver.resolve(this.fs.top_level_dir, this.options.framework.?.client, .stmt);
}
} else {
if (this.options.framework.?.server.len > 0) {
return try this.resolver.resolve(this.fs.top_level_dir, this.options.framework.?.server, .internal);
return try this.resolver.resolve(this.fs.top_level_dir, this.options.framework.?.server, .stmt);
}
}
}
@@ -258,7 +260,7 @@ pub fn NewBundler(cache_files: bool) type {
// for now:
// - "." is not supported
// - multiple pages directories is not supported
if (this.options.route_config == null and this.options.entry_points.len == 1) {
if (!this.options.routes.routes_enabled and this.options.entry_points.len == 1) {
// When inferring:
// - pages directory with a file extension is not supported. e.g. "pages.app/" won't work.
@@ -277,24 +279,28 @@ pub fn NewBundler(cache_files: bool) type {
var dir_info_ = this.resolver.readDirInfo(entry) catch return;
var dir_info = dir_info_ orelse return;
this.options.route_config = options.RouteConfig{
.dir = dir_info.abs_path,
.extensions = std.mem.span(&options.RouteConfig.DefaultExtensions),
};
this.router = try Router.init(this.fs, this.allocator, this.options.route_config.?);
this.options.routes.dir = dir_info.abs_path;
this.options.routes.extensions = std.mem.span(&options.RouteConfig.DefaultExtensions);
this.options.routes.routes_enabled = true;
this.router = try Router.init(this.fs, this.allocator, this.options.routes);
try this.router.?.loadRoutes(dir_info, Resolver, &this.resolver, std.math.maxInt(u16), true);
return;
}
}
} else if (this.options.route_config) |*route_config| {
var dir_info_ = try this.resolver.readDirInfo(route_config.dir);
} else if (this.options.routes.routes_enabled) {
var dir_info_ = try this.resolver.readDirInfo(this.options.routes.dir);
var dir_info = dir_info_ orelse return error.MissingRoutesDir;
this.options.route_config = options.RouteConfig{
.dir = dir_info.abs_path,
.extensions = route_config.extensions,
};
this.router = try Router.init(this.fs, this.allocator, this.options.route_config.?);
this.options.routes.dir = dir_info.abs_path;
this.router = try Router.init(this.fs, this.allocator, this.options.routes);
try this.router.?.loadRoutes(dir_info, Resolver, &this.resolver, std.math.maxInt(u16), true);
return;
}
// If we get this far, it means they're trying to run the bundler without a preconfigured router
if (this.options.entry_points.len > 0) {
this.options.routes.routes_enabled = false;
}
}
@@ -304,11 +310,35 @@ pub fn NewBundler(cache_files: bool) type {
}
pub const GenerateNodeModuleBundle = struct {
pub const PathMap = struct {
const HashTable = std.StringHashMap(u32);
backing: HashTable,
pub fn init(allocator: *std.mem.Allocator) PathMap {
return PathMap{
.backing = HashTable.init(allocator),
};
}
pub inline fn hashOf(str: string) u64 {
return std.hash.Wyhash.hash(0, str);
}
pub inline fn getOrPut(this: *PathMap, str: string) !HashTable.GetOrPutResult {
return this.backing.getOrPut(str);
}
pub inline fn contains(this: *PathMap, str: string) bool {
return this.backing.contains(str);
}
};
module_list: std.ArrayList(Api.JavascriptBundledModule),
package_list: std.ArrayList(Api.JavascriptBundledPackage),
header_string_buffer: MutableString,
// Just need to know if we've already enqueued this one
resolved_paths: hash_map.StringHashMap(u32),
resolved_paths: PathMap,
package_list_map: std.AutoHashMap(u64, u32),
resolve_queue: std.fifo.LinearFifo(_resolver.Result, .Dynamic),
bundler: *ThisBundler,
@@ -376,6 +406,7 @@ pub fn NewBundler(cache_files: bool) type {
) !Api.JavascriptBundleContainer {
var tmpdir: std.fs.Dir = try bundler.fs.fs.openTmpDir();
var tmpname_buf: [64]u8 = undefined;
bundler.resetStore();
const tmpname = try bundler.fs.tmpname(
".jsb",
@@ -391,7 +422,7 @@ pub fn NewBundler(cache_files: bool) type {
.scan_pass_result = js_parser.ScanPassResult.init(allocator),
.header_string_buffer = try MutableString.init(allocator, 0),
.allocator = allocator,
.resolved_paths = hash_map.StringHashMap(u32).init(allocator),
.resolved_paths = PathMap.init(allocator),
.resolve_queue = std.fifo.LinearFifo(_resolver.Result, .Dynamic).init(allocator),
.bundler = bundler,
.tmpfile = tmpfile,
@@ -422,6 +453,7 @@ pub fn NewBundler(cache_files: bool) type {
const resolved = try bundler.linker.resolver.resolve(source_dir, entry_point, .entry_point);
this.resolve_queue.writeItemAssumeCapacity(resolved);
}
this.bundler.resetStore();
} else {
try this.resolve_queue.ensureUnusedCapacity(bundler.options.entry_points.len + @intCast(usize, @boolToInt(framework_config != null)));
}
@@ -449,10 +481,26 @@ pub fn NewBundler(cache_files: bool) type {
}
}
this.bundler.resetStore();
while (this.resolve_queue.readItem()) |resolved| {
try this.processFile(resolved);
}
// Normally, this is automatic
// However, since we only do the parsing pass, it may not get imported automatically.
if (this.has_jsx) {
if (this.bundler.resolver.resolve(
this.bundler.fs.top_level_dir,
this.bundler.options.jsx.import_source,
.stmt,
)) |new_jsx_runtime| {
if (!this.resolved_paths.contains(new_jsx_runtime.path_pair.primary.text)) {
try this.processFile(new_jsx_runtime);
}
} else |err| {}
}
if (this.has_jsx and this.bundler.options.jsx.supports_fast_refresh) {
if (this.bundler.resolver.resolve(
this.bundler.fs.top_level_dir,
@@ -592,14 +640,6 @@ pub fn NewBundler(cache_files: bool) type {
// chmod 777
0000010 | 0000100 | 0000001 | 0001000 | 0000040 | 0000004 | 0000002 | 0000400 | 0000200 | 0000020,
);
// Delete if already exists, ignoring errors
// std.os.unlinkatZ(top_dir.fd, destination, 0) catch {};
tmpdir = bundler.fs.tmpdir();
defer {
tmpdir.close();
bundler.fs._tmpdir = null;
}
try std.os.renameatZ(tmpdir.fd, tmpname, top_dir.fd, destination);
// Print any errors at the end
@@ -642,7 +682,7 @@ pub fn NewBundler(cache_files: bool) type {
}
fn processImportRecord(this: *GenerateNodeModuleBundle, import_record: ImportRecord) !void {}
const node_module_root_string = "node_modules" ++ std.fs.path.sep_str;
const node_module_root_string = std.fs.path.sep_str ++ "node_modules" ++ std.fs.path.sep_str;
threadlocal var package_key_buf: [512]u8 = undefined;
threadlocal var file_path_buf: [4096]u8 = undefined;
fn processFile(this: *GenerateNodeModuleBundle, _resolve: _resolver.Result) !void {
@@ -655,13 +695,6 @@ pub fn NewBundler(cache_files: bool) type {
defer this.scan_pass_result.reset();
defer this.bundler.resetStore();
var file_path = resolve.path_pair.primary;
std.mem.copy(u8, file_path_buf[0..file_path.text.len], resolve.path_pair.primary.text);
file_path.text = file_path_buf[0..file_path.text.len];
if (file_path.pretty.len > 0) {
std.mem.copy(u8, file_path_buf[file_path.text.len..], resolve.path_pair.primary.pretty);
file_path.pretty = file_path_buf[file_path.text.len..][0..resolve.path_pair.primary.pretty.len];
file_path.name = Fs.PathName.init(file_path.text);
}
var hasher = std.hash.Wyhash.init(0);
// If we're in a node_module, build that almost normally
@@ -711,6 +744,8 @@ pub fn NewBundler(cache_files: bool) type {
}
const absolute_path = resolved_import.path_pair.primary.text;
const get_or_put_result = try this.resolved_paths.getOrPut(absolute_path);
const package_json: *const PackageJSON = (resolved_import.package_json orelse (this.bundler.resolver.packageJSONForResolvedNodeModule(resolved_import) orelse {
this.log.addWarningFmt(
&source,
@@ -724,28 +759,20 @@ pub fn NewBundler(cache_files: bool) type {
continue;
}));
const package_relative_path = bundler.fs.relative(
package_json.source.path.name.dirWithTrailingSlash(),
resolved_import.path_pair.primary.text,
);
// trim node_modules/${package.name}/ from the string to save space
// This reduces metadata size by about 30% for a large-ish file
// A future optimization here could be to reuse the string from the original path
var node_module_root = strings.indexOf(resolved_import.path_pair.primary.text, node_module_root_string) orelse unreachable;
// omit node_modules
node_module_root += node_module_root_string.len;
// // omit package name
node_module_root += package_json.name.len;
node_module_root += 1;
// It should be the first index, not the last to support bundling multiple of the same package
import_record.path = Fs.Path.init(
absolute_path[node_module_root..],
package_relative_path,
);
const get_or_put_result = try this.resolved_paths.getOrPut(absolute_path);
if (get_or_put_result.found_existing) {
import_record.module_id = get_or_put_result.entry.value;
import_record.module_id = get_or_put_result.value_ptr.*;
import_record.is_bundled = true;
continue;
}
@@ -755,7 +782,7 @@ pub fn NewBundler(cache_files: bool) type {
hasher.update(std.mem.asBytes(&package_json.hash));
import_record.module_id = @truncate(u32, hasher.final());
get_or_put_result.entry.value = import_record.module_id;
get_or_put_result.value_ptr.* = import_record.module_id;
import_record.is_bundled = true;
try this.resolve_queue.writeItem(_resolved_import.*);
@@ -763,24 +790,7 @@ pub fn NewBundler(cache_files: bool) type {
}
const package = resolve.package_json orelse this.bundler.resolver.packageJSONForResolvedNodeModule(&resolve) orelse unreachable;
const package_relative_path = brk: {
// trim node_modules/${package.name}/ from the string to save space
// This reduces metadata size by about 30% for a large-ish file
// A future optimization here could be to reuse the string from the original path
var node_module_root = strings.indexOf(resolve.path_pair.primary.text, node_module_root_string) orelse unreachable;
// omit node_modules
node_module_root += node_module_root_string.len;
file_path.pretty = resolve.path_pair.primary.text[node_module_root..];
// omit trailing separator
node_module_root += 1;
// omit package name
node_module_root += package.name.len;
break :brk resolve.path_pair.primary.text[node_module_root..];
};
var package_relative_path = file_path.packageRelativePathString(package.name);
// const load_from_symbol_ref = ast.runtime_imports.$$r.?;
// const reexport_ref = ast.runtime_imports.__reExport.?;
@@ -788,7 +798,12 @@ pub fn NewBundler(cache_files: bool) type {
const E = js_ast.E;
const Expr = js_ast.Expr;
const Stmt = js_ast.Stmt;
if (ast.parts.len == 0) {
if (comptime isDebug) {
Output.prettyErrorln("Missing AST for file: {s}", .{file_path.text});
Output.flush();
}
}
var part = &ast.parts[ast.parts.len - 1];
var new_stmts: [1]Stmt = undefined;
var register_args: [3]Expr = undefined;
@@ -975,11 +990,6 @@ pub fn NewBundler(cache_files: bool) type {
// Always enqueue unwalked import paths, but if it's not a node_module, we don't care about the hash
try this.resolve_queue.writeItem(_resolved_import.*);
// trim node_modules/${package.name}/ from the string to save space
// This reduces metadata size by about 30% for a large-ish file
// A future optimization here could be to reuse the string from the original path
var node_module_root = strings.indexOf(resolved_import.path_pair.primary.text, node_module_root_string) orelse continue;
const package_json: *const PackageJSON = (resolved_import.package_json orelse (this.bundler.resolver.packageJSONForResolvedNodeModule(resolved_import) orelse {
this.log.addWarningFmt(
&source,
@@ -993,21 +1003,20 @@ pub fn NewBundler(cache_files: bool) type {
continue;
}));
// omit node_modules
node_module_root += node_module_root_string.len;
// // omit package name
node_module_root += package_json.name.len;
node_module_root += 1;
// It should be the first index, not the last to support bundling multiple of the same package
import_record.path = Fs.Path.init(
resolved_import.path_pair.primary.text[node_module_root..],
// This string is printed in the summary.
const package_relative_path = bundler.fs.relative(
package_json.source.path.name.dirWithTrailingSlash(),
resolved_import.path_pair.primary.text,
);
import_record.path = Fs.Path.init(package_relative_path);
hasher = std.hash.Wyhash.init(0);
hasher.update(import_record.path.text);
hasher.update(std.mem.asBytes(&package_json.hash));
get_or_put_result.entry.value = @truncate(u32, hasher.final());
get_or_put_result.value_ptr.* = @truncate(u32, hasher.final());
} else |err| {}
}
},
@@ -1583,91 +1592,91 @@ pub fn NewBundler(cache_files: bool) type {
return entry;
}
pub fn scanDependencies(
allocator: *std.mem.Allocator,
log: *logger.Log,
_opts: Api.TransformOptions,
) !ScanResult.Summary {
var opts = _opts;
opts.resolve = .dev;
var bundler = try ThisBundler.init(allocator, log, opts, null);
// pub fn scanDependencies(
// allocator: *std.mem.Allocator,
// log: *logger.Log,
// _opts: Api.TransformOptions,
// ) !ScanResult.Summary {
// var opts = _opts;
// opts.resolve = .dev;
// var bundler = try ThisBundler.init(allocator, log, opts, null);
bundler.configureLinker();
// bundler.configureLinker();
var entry_points = try allocator.alloc(_resolver.Result, bundler.options.entry_points.len);
// var entry_points = try allocator.alloc(_resolver.Result, bundler.options.entry_points.len);
if (log.level == .verbose) {
bundler.resolver.debug_logs = try DebugLogs.init(allocator);
}
// if (log.level == .verbose) {
// bundler.resolver.debug_logs = try DebugLogs.init(allocator);
// }
var rfs: *Fs.FileSystem.RealFS = &bundler.fs.fs;
// var rfs: *Fs.FileSystem.RealFS = &bundler.fs.fs;
var entry_point_i: usize = 0;
for (bundler.options.entry_points) |_entry| {
var entry: string = bundler.normalizeEntryPointPath(_entry);
// var entry_point_i: usize = 0;
// for (bundler.options.entry_points) |_entry| {
// var entry: string = bundler.normalizeEntryPointPath(_entry);
defer {
js_ast.Expr.Data.Store.reset();
js_ast.Stmt.Data.Store.reset();
}
// defer {
// js_ast.Expr.Data.Store.reset();
// js_ast.Stmt.Data.Store.reset();
// }
const result = bundler.resolver.resolve(bundler.fs.top_level_dir, entry, .entry_point) catch |err| {
Output.printError("Error resolving \"{s}\": {s}\n", .{ entry, @errorName(err) });
continue;
};
// const result = bundler.resolver.resolve(bundler.fs.top_level_dir, entry, .entry_point) catch |err| {
// Output.printError("Error resolving \"{s}\": {s}\n", .{ entry, @errorName(err) });
// continue;
// };
const key = result.path_pair.primary.text;
if (bundler.resolve_results.contains(key)) {
continue;
}
try bundler.resolve_results.put(key, result);
entry_points[entry_point_i] = result;
// const key = result.path_pair.primary.text;
// if (bundler.resolve_results.contains(key)) {
// continue;
// }
// try bundler.resolve_results.put(key, result);
// entry_points[entry_point_i] = result;
if (isDebug) {
Output.print("Resolved {s} => {s}", .{ entry, result.path_pair.primary.text });
}
// if (isDebug) {
// Output.print("Resolved {s} => {s}", .{ entry, result.path_pair.primary.text });
// }
entry_point_i += 1;
bundler.resolve_queue.writeItem(result) catch unreachable;
}
var scan_results = std.ArrayList(ScanResult).init(allocator);
var scan_pass_result = js_parser.ScanPassResult.init(allocator);
// entry_point_i += 1;
// bundler.resolve_queue.writeItem(result) catch unreachable;
// }
// var scan_results = std.ArrayList(ScanResult).init(allocator);
// var scan_pass_result = js_parser.ScanPassResult.init(allocator);
switch (bundler.options.resolve_mode) {
.lazy, .dev, .bundle => {
while (bundler.resolve_queue.readItem()) |item| {
js_ast.Expr.Data.Store.reset();
js_ast.Stmt.Data.Store.reset();
scan_pass_result.named_imports.clearRetainingCapacity();
scan_results.append(bundler.scanWithResolveResult(item, &scan_pass_result) catch continue orelse continue) catch continue;
}
},
else => Global.panic("Unsupported resolve mode: {s}", .{@tagName(bundler.options.resolve_mode)}),
}
// switch (bundler.options.resolve_mode) {
// .lazy, .dev, .bundle => {
// while (bundler.resolve_queue.readItem()) |item| {
// js_ast.Expr.Data.Store.reset();
// js_ast.Stmt.Data.Store.reset();
// scan_pass_result.named_imports.clearRetainingCapacity();
// scan_results.append(bundler.scanWithResolveResult(item, &scan_pass_result) catch continue orelse continue) catch continue;
// }
// },
// else => Global.panic("Unsupported resolve mode: {s}", .{@tagName(bundler.options.resolve_mode)}),
// }
// if (log.level == .verbose) {
// for (log.msgs.items) |msg| {
// try msg.writeFormat(std.io.getStdOut().writer());
// }
// }
// // if (log.level == .verbose) {
// // for (log.msgs.items) |msg| {
// // try msg.writeFormat(std.io.getStdOut().writer());
// // }
// // }
if (FeatureFlags.tracing) {
Output.printError(
"\n---Tracing---\nResolve time: {d}\nParsing time: {d}\n---Tracing--\n\n",
.{
bundler.resolver.elapsed,
bundler.elapsed,
},
);
}
// if (FeatureFlags.tracing) {
// Output.printError(
// "\n---Tracing---\nResolve time: {d}\nParsing time: {d}\n---Tracing--\n\n",
// .{
// bundler.resolver.elapsed,
// bundler.elapsed,
// },
// );
// }
return ScanResult.Summary{
.scan_results = scan_results,
.import_records = scan_pass_result.import_records,
};
}
// return ScanResult.Summary{
// .scan_results = scan_results,
// .import_records = scan_pass_result.import_records,
// };
// }
fn enqueueEntryPoints(bundler: *ThisBundler, entry_points: []_resolver.Result, comptime normalize_entry_point: bool) void {
fn enqueueEntryPoints(bundler: *ThisBundler, entry_points: []_resolver.Result, comptime normalize_entry_point: bool) usize {
var entry_point_i: usize = 0;
for (bundler.options.entry_points) |_entry| {
@@ -1683,21 +1692,13 @@ pub fn NewBundler(cache_files: bool) type {
continue;
};
const key = result.path_pair.primary.text;
if (bundler.resolve_results.contains(key)) {
continue;
if (bundler.linker.enqueueResolveResult(&result) catch unreachable) {
entry_points[entry_point_i] = result;
entry_point_i += 1;
}
bundler.resolve_results.put(key, result) catch unreachable;
entry_points[entry_point_i] = result;
if (isDebug) {
Output.print("Resolved {s} => {s}", .{ entry, result.path_pair.primary.text });
}
entry_point_i += 1;
bundler.resolve_queue.writeItem(result) catch unreachable;
}
return entry_point_i;
}
pub fn bundle(
@@ -1710,9 +1711,11 @@ pub fn NewBundler(cache_files: bool) type {
try bundler.configureRouter();
var skip_normalize = false;
if (bundler.router) |router| {
bundler.options.entry_points = try router.getEntryPoints(allocator);
skip_normalize = true;
if (bundler.options.routes.routes_enabled) {
if (bundler.router) |router| {
bundler.options.entry_points = try router.getEntryPoints(allocator);
skip_normalize = true;
}
}
if (bundler.options.write and bundler.options.output_dir.len > 0) {}
@@ -1724,9 +1727,9 @@ pub fn NewBundler(cache_files: bool) type {
var entry_points = try allocator.alloc(_resolver.Result, bundler.options.entry_points.len);
if (skip_normalize) {
bundler.enqueueEntryPoints(entry_points, false);
entry_points = entry_points[0..bundler.enqueueEntryPoints(entry_points, false)];
} else {
bundler.enqueueEntryPoints(entry_points, true);
entry_points = entry_points[0..bundler.enqueueEntryPoints(entry_points, true)];
}
if (log.level == .verbose) {

View File

@@ -115,10 +115,10 @@ pub const Cli = struct {
clap.parseParam("-o, --outdir <STR> Save output to directory (default: \"out\" if none provided and multiple entry points passed)") catch unreachable,
clap.parseParam("-e, --external <STR>... Exclude module from transpilation (can use * wildcards). ex: -e react") catch unreachable,
clap.parseParam("-i, --inject <STR>... Inject module at the top of every file") catch unreachable,
clap.parseParam("--cwd <STR> Absolute path to resolve entry points from. Defaults to cwd") catch unreachable,
clap.parseParam("--origin <STR> Rewrite import paths to start with --origin. Useful for web browsers.") catch unreachable,
clap.parseParam("--cwd <STR> Absolute path to resolve entry points from.") catch unreachable,
clap.parseParam("--origin <STR> Rewrite import paths to start with --origin. Useful for web browsers. Default: \"/\"") catch unreachable,
clap.parseParam("--serve Start a local dev server. This also sets resolve to \"lazy\".") catch unreachable,
clap.parseParam("--public-dir <STR> Top-level directory for .html files, fonts, images, or anything external. Only relevant with --serve. Defaults to \"<cwd>/public\", to match create-react-app and Next.js") catch unreachable,
clap.parseParam("--static-dir <STR> Top-level directory for .html files, fonts or anything external. Defaults to \"<cwd>/public\", to match create-react-app and Next.js") catch unreachable,
clap.parseParam("--jsx-factory <STR> Changes the function called when compiling JSX elements using the classic JSX runtime") catch unreachable,
clap.parseParam("--jsx-fragment <STR> Changes the function called when compiling JSX fragments using the classic JSX runtime") catch unreachable,
clap.parseParam("--jsx-import-source <STR> Declares the module specifier to be used for importing the jsx and jsxs factory functions. Default: \"react\"") catch unreachable,
@@ -133,7 +133,7 @@ pub const Cli = struct {
clap.parseParam("--new-jsb Generate a new node_modules.jsb file from node_modules and entry point(s)") catch unreachable,
clap.parseParam("--jsb <STR> Use a Speedy JavaScript Bundle (default: \"./node_modules.jsb\" if exists)") catch unreachable,
clap.parseParam("--jsb-for-server <STR> Use a server-only Speedy JavaScript Bundle (default: \"./node_modules.server.jsb\" if exists)") catch unreachable,
clap.parseParam("--framework <STR> Use a JavaScript framework (package name or path to package)") catch unreachable,
clap.parseParam("--use <STR> Use a JavaScript framework (package name or path to package)") catch unreachable,
clap.parseParam("--production This sets the defaults to production. Applies to jsx & framework") catch unreachable,
clap.parseParam("<POS>... Entry point(s) to use. Can be individual files, npm packages, or one directory. If one directory, it will auto-detect entry points using a filesystem router. If you're using a framework, passing entry points are optional.") catch unreachable,
@@ -190,7 +190,7 @@ pub const Cli = struct {
var jsx_production = args.flag("--jsx-production") or production;
var react_fast_refresh = false;
var framework_entry_point = args.option("--framework");
var framework_entry_point = args.option("--use");
if (serve or args.flag("--new-jsb")) {
react_fast_refresh = true;
@@ -223,6 +223,14 @@ pub const Cli = struct {
if (args.flag("--new-jsb")) {
node_modules_bundle_path = null;
node_modules_bundle_path_server = null;
}
var route_config: ?Api.RouteConfig = null;
if (args.option("--static-dir")) |public_dir| {
route_config = route_config orelse Api.RouteConfig{ .extensions = &.{} };
route_config.?.static_dir = public_dir;
}
const PlatformMatcher = strings.ExactSizeMatcher(8);
@@ -324,8 +332,8 @@ pub const Cli = struct {
},
.node_modules_bundle_path = node_modules_bundle_path,
.node_modules_bundle_path_server = node_modules_bundle_path_server,
.public_dir = if (args.option("--public-dir")) |public_dir| allocator.dupe(u8, public_dir) catch unreachable else null,
.write = write,
.router = route_config,
.serve = serve,
.inject = inject,
.entry_points = entry_points,
@@ -391,9 +399,9 @@ pub const Cli = struct {
return;
}
if ((args.only_scan_dependencies orelse ._none) == .all) {
return try printScanResults(try bundler.Bundler.scanDependencies(allocator, &log, args), allocator);
}
// if ((args.only_scan_dependencies orelse ._none) == .all) {
// return try printScanResults(try bundler.Bundler.scanDependencies(allocator, &log, args), allocator);
// }
if ((args.generate_node_module_bundle orelse false)) {
var log_ = try allocator.create(logger.Log);
@@ -406,8 +414,8 @@ pub const Cli = struct {
try this_bundler.configureRouter();
var loaded_route_config: ?Api.LoadedRouteConfig = brk: {
if (this_bundler.options.route_config) |*conf| {
break :brk conf.toAPI();
if (this_bundler.options.routes.routes_enabled) {
break :brk this_bundler.options.routes.toAPI();
}
break :brk null;
};
@@ -459,25 +467,40 @@ pub const Cli = struct {
Output.Source.set(&output_source);
defer Output.flush();
defer wait_group.done();
defer {
if (FeatureFlags.parallel_jsb) {
wait_group.done();
}
}
Output.enable_ansi_colors = stderr_.isTty();
_generate(logs, std.heap.c_allocator, transform_args, _filepath, server_conf, route_conf_, router) catch return;
}
};
wait_group.add();
server_bundler_generator_thread = try std.Thread.spawn(
.{},
ServerBundleGeneratorThread.generate,
.{
if (FeatureFlags.parallel_jsb) {
wait_group.add();
server_bundler_generator_thread = try std.Thread.spawn(
.{},
ServerBundleGeneratorThread.generate,
.{
log_,
args,
server_bundle_filepath,
_server_conf,
loaded_route_config,
this_bundler.router,
},
);
} else {
ServerBundleGeneratorThread.generate(
log_,
args,
server_bundle_filepath,
_server_conf,
loaded_route_config,
this_bundler.router,
},
);
);
}
}
}
@@ -504,8 +527,8 @@ pub const Cli = struct {
var elapsed = @divTrunc(std.time.nanoTimestamp() - start_time, @as(i128, std.time.ns_per_ms));
var bundle = NodeModuleBundle.init(node_modules, allocator);
if (log.errors > 0 or log.warnings > 0) {
try log.print(Output.errorWriter());
if (log_.errors > 0) {
try log_.print(Output.errorWriter());
} else {
bundle.printSummary();
const indent = comptime " ";
@@ -516,6 +539,8 @@ pub const Cli = struct {
} else {
Output.prettyln(indent ++ "<r>Saved to ./{s}", .{filepath});
}
try log_.printForLogLevel(Output.errorWriter());
}
}
return;

View File

@@ -1,5 +1,13 @@
usingnamespace @import("std").c;
const std = @import("std");
usingnamespace std.c;
const builtin = @import("builtin");
const os = std.os;
const mem = std.mem;
const Stat = std.fs.File.Stat;
const Kind = std.fs.File.Kind;
const StatError = std.fs.File.StatError;
const errno = os.errno;
const zeroes = mem.zeroes;
// int clonefileat(int src_dirfd, const char * src, int dst_dirfd, const char * dst, int flags);
pub extern "c" fn clonefileat(c_int, [*c]const u8, c_int, [*c]const u8, uint32_t: c_int) c_int;
// int fclonefileat(int srcfd, int dst_dirfd, const char * dst, int flags);
@@ -11,3 +19,140 @@ pub extern "c" fn chmod([*c]const u8, mode_t) c_int;
pub extern "c" fn fchmod(c_int, mode_t) c_int;
pub extern "c" fn umask(mode_t) mode_t;
pub extern "c" fn fchmodat(c_int, [*c]const u8, mode_t, c_int) c_int;
pub extern "c" fn lstat([*c]const u8, [*c]libc_stat) c_int;
pub extern "c" fn lstat64([*c]const u8, [*c]libc_stat) c_int;
pub fn lstat_absolute(path: [:0]const u8) StatError!Stat {
if (builtin.os.tag == .windows) {
var io_status_block: windows.IO_STATUS_BLOCK = undefined;
var info: windows.FILE_ALL_INFORMATION = undefined;
const rc = windows.ntdll.NtQueryInformationFile(self.handle, &io_status_block, &info, @sizeOf(windows.FILE_ALL_INFORMATION), .FileAllInformation);
switch (rc) {
.SUCCESS => {},
.BUFFER_OVERFLOW => {},
.INVALID_PARAMETER => unreachable,
.ACCESS_DENIED => return error.AccessDenied,
else => return windows.unexpectedStatus(rc),
}
return Stat{
.inode = info.InternalInformation.IndexNumber,
.size = @bitCast(u64, info.StandardInformation.EndOfFile),
.mode = 0,
.kind = if (info.StandardInformation.Directory == 0) .File else .Directory,
.atime = windows.fromSysTime(info.BasicInformation.LastAccessTime),
.mtime = windows.fromSysTime(info.BasicInformation.LastWriteTime),
.ctime = windows.fromSysTime(info.BasicInformation.CreationTime),
};
}
var st = zeroes(libc_stat);
switch (errno(lstat64(path.ptr, &st))) {
0 => {},
EINVAL => unreachable,
EBADF => unreachable, // Always a race condition.
ENOMEM => return error.SystemResources,
EACCES => return error.AccessDenied,
else => |err| return os.unexpectedErrno(err),
}
const atime = st.atime();
const mtime = st.mtime();
const ctime = st.ctime();
return Stat{
.inode = st.ino,
.size = @bitCast(u64, st.size),
.mode = st.mode,
.kind = switch (builtin.os.tag) {
.wasi => switch (st.filetype) {
os.FILETYPE_BLOCK_DEVICE => Kind.BlockDevice,
os.FILETYPE_CHARACTER_DEVICE => Kind.CharacterDevice,
os.FILETYPE_DIRECTORY => Kind.Directory,
os.FILETYPE_SYMBOLIC_LINK => Kind.SymLink,
os.FILETYPE_REGULAR_FILE => Kind.File,
os.FILETYPE_SOCKET_STREAM, os.FILETYPE_SOCKET_DGRAM => Kind.UnixDomainSocket,
else => Kind.Unknown,
},
else => switch (st.mode & os.S_IFMT) {
os.S_IFBLK => Kind.BlockDevice,
os.S_IFCHR => Kind.CharacterDevice,
os.S_IFDIR => Kind.Directory,
os.S_IFIFO => Kind.NamedPipe,
os.S_IFLNK => Kind.SymLink,
os.S_IFREG => Kind.File,
os.S_IFSOCK => Kind.UnixDomainSocket,
else => Kind.Unknown,
},
},
.atime = @as(i128, atime.tv_sec) * std.time.ns_per_s + atime.tv_nsec,
.mtime = @as(i128, mtime.tv_sec) * std.time.ns_per_s + mtime.tv_nsec,
.ctime = @as(i128, ctime.tv_sec) * std.time.ns_per_s + ctime.tv_nsec,
};
}
pub fn stat_absolute(path: [:0]const u8) StatError!Stat {
if (builtin.os.tag == .windows) {
var io_status_block: windows.IO_STATUS_BLOCK = undefined;
var info: windows.FILE_ALL_INFORMATION = undefined;
const rc = windows.ntdll.NtQueryInformationFile(self.handle, &io_status_block, &info, @sizeOf(windows.FILE_ALL_INFORMATION), .FileAllInformation);
switch (rc) {
.SUCCESS => {},
.BUFFER_OVERFLOW => {},
.INVALID_PARAMETER => unreachable,
.ACCESS_DENIED => return error.AccessDenied,
else => return windows.unexpectedStatus(rc),
}
return Stat{
.inode = info.InternalInformation.IndexNumber,
.size = @bitCast(u64, info.StandardInformation.EndOfFile),
.mode = 0,
.kind = if (info.StandardInformation.Directory == 0) .File else .Directory,
.atime = windows.fromSysTime(info.BasicInformation.LastAccessTime),
.mtime = windows.fromSysTime(info.BasicInformation.LastWriteTime),
.ctime = windows.fromSysTime(info.BasicInformation.CreationTime),
};
}
var st = zeroes(libc_stat);
switch (errno(stat(path.ptr, &st))) {
0 => {},
EINVAL => unreachable,
EBADF => unreachable, // Always a race condition.
ENOMEM => return error.SystemResources,
EACCES => return error.AccessDenied,
else => |err| return os.unexpectedErrno(err),
}
const atime = st.atime();
const mtime = st.mtime();
const ctime = st.ctime();
return Stat{
.inode = st.ino,
.size = @bitCast(u64, st.size),
.mode = st.mode,
.kind = switch (builtin.os.tag) {
.wasi => switch (st.filetype) {
os.FILETYPE_BLOCK_DEVICE => Kind.BlockDevice,
os.FILETYPE_CHARACTER_DEVICE => Kind.CharacterDevice,
os.FILETYPE_DIRECTORY => Kind.Directory,
os.FILETYPE_SYMBOLIC_LINK => Kind.SymLink,
os.FILETYPE_REGULAR_FILE => Kind.File,
os.FILETYPE_SOCKET_STREAM, os.FILETYPE_SOCKET_DGRAM => Kind.UnixDomainSocket,
else => Kind.Unknown,
},
else => switch (st.mode & os.S_IFMT) {
os.S_IFBLK => Kind.BlockDevice,
os.S_IFCHR => Kind.CharacterDevice,
os.S_IFDIR => Kind.Directory,
os.S_IFIFO => Kind.NamedPipe,
os.S_IFLNK => Kind.SymLink,
os.S_IFREG => Kind.File,
os.S_IFSOCK => Kind.UnixDomainSocket,
else => Kind.Unknown,
},
},
.atime = @as(i128, atime.tv_sec) * std.time.ns_per_s + atime.tv_nsec,
.mtime = @as(i128, mtime.tv_sec) * std.time.ns_per_s + mtime.tv_nsec,
.ctime = @as(i128, ctime.tv_sec) * std.time.ns_per_s + ctime.tv_nsec,
};
}

View File

@@ -34,6 +34,15 @@ pub const css_supports_fence = true;
pub const disable_entry_cache = false;
pub const enable_bytecode_caching = false;
// Disabled due to concurrency bug I don't have time to fix right now.
// I suspect it's like 3 undefined memory issues.
// This was the command I ran to reproduce it:
// for i in (seq 1000)
// command ../../build/debug/macos-x86_64/esdev --use=./nexty2 --new-jsb > /dev/null
// end
// It only happens 1 out of every N times, probably like 50.
pub const parallel_jsb = true;
pub const CSSModulePolyfill = enum {
// When you import a .css file and you reference the import in JavaScript
// Just return whatever the property key they referenced was

View File

@@ -179,29 +179,8 @@ pub const FileSystem = struct {
}
// entry.name only lives for the duration of the iteration
var name: []u8 = undefined;
const name = try FileSystem.FilenameStore.instance.appendLowerCase(@TypeOf(entry.name), entry.name);
switch (_kind) {
.file => {
name = FileSystem.FilenameStore.editableSlice(try FileSystem.FilenameStore.instance.append(@TypeOf(entry.name), entry.name));
},
.dir => {
// FileSystem.FilenameStore here because it's not an absolute path
// it's a path relative to the parent directory
// so it's a tiny path like "foo" instead of "/bar/baz/foo"
name = FileSystem.FilenameStore.editableSlice(try FileSystem.FilenameStore.instance.append(@TypeOf(entry.name), entry.name));
},
}
for (name) |c, i| {
name[i] = std.ascii.toLower(c);
}
var symlink: []u8 = "";
if (entry.kind == std.fs.Dir.Entry.Kind.SymLink) {
symlink = name;
}
const index = try EntryStore.instance.append(Entry{
.base = name,
.dir = dir.dir,
@@ -211,7 +190,7 @@ pub const FileSystem = struct {
// for each entry was a big performance issue for that package.
.need_stat = entry.kind == .SymLink,
.cache = Entry.Cache{
.symlink = symlink,
.symlink = "",
.kind = _kind,
},
});
@@ -332,8 +311,6 @@ pub const FileSystem = struct {
};
pub fn kind(entry: *Entry, fs: *Implementation) Kind {
// entry.mutex.lock();
// defer entry.mutex.unlock();
if (entry.need_stat) {
entry.need_stat = false;
entry.cache = fs.kind(entry.dir, entry.base) catch unreachable;
@@ -342,8 +319,6 @@ pub const FileSystem = struct {
}
pub fn symlink(entry: *Entry, fs: *Implementation) string {
// entry.mutex.lock();
// defer entry.mutex.unlock();
if (entry.need_stat) {
entry.need_stat = false;
entry.cache = fs.kind(entry.dir, entry.base) catch unreachable;
@@ -464,7 +439,7 @@ pub const FileSystem = struct {
entries_mutex: Mutex = Mutex.init(),
entries: *EntriesOption.Map,
allocator: *std.mem.Allocator,
limiter: Limiter,
limiter: *Limiter,
cwd: string,
parent_fs: *FileSystem = undefined,
file_limit: usize = 32,
@@ -529,18 +504,28 @@ pub const FileSystem = struct {
return limit.cur;
}
threadlocal var _entries_option_map: *EntriesOption.Map = undefined;
threadlocal var _entries_option_map_loaded: bool = false;
var __limiter: Limiter = undefined;
pub fn init(
allocator: *std.mem.Allocator,
cwd: string,
) RealFS {
const file_limit = adjustUlimit();
if (!_entries_option_map_loaded) {
_entries_option_map = EntriesOption.Map.init(allocator);
_entries_option_map_loaded = true;
__limiter = Limiter.init(allocator, file_limit);
}
return RealFS{
.entries = EntriesOption.Map.init(allocator),
.entries = _entries_option_map,
.allocator = allocator,
.cwd = cwd,
.file_limit = file_limit,
.file_quota = file_limit,
.limiter = Limiter.init(allocator, file_limit),
.limiter = &__limiter,
};
}
@@ -637,7 +622,7 @@ pub const FileSystem = struct {
// This custom map implementation:
// - Preallocates a fixed amount of directory name space
// - Doesn't store directory names which don't exist.
pub const Map = allocators.BSSMap(EntriesOption, Preallocate.Counts.dir_entry, false, 128);
pub const Map = allocators.TBSSMap(EntriesOption, Preallocate.Counts.dir_entry, false, 128);
};
// Limit the number of files open simultaneously to avoid ulimit issues
@@ -668,7 +653,7 @@ pub const FileSystem = struct {
};
pub fn openDir(fs: *RealFS, unsafe_dir_string: string) std.fs.File.OpenError!std.fs.Dir {
return try std.fs.openDirAbsolute(unsafe_dir_string, std.fs.Dir.OpenDirOptions{ .iterate = true, .access_sub_paths = true, .no_follow = true });
return try std.fs.openDirAbsolute(unsafe_dir_string, std.fs.Dir.OpenDirOptions{ .iterate = true, .access_sub_paths = true, .no_follow = false });
}
fn readdir(
@@ -843,61 +828,39 @@ pub const FileSystem = struct {
pub fn kind(fs: *RealFS, _dir: string, base: string) !Entry.Cache {
var dir = _dir;
var combo = [2]string{ dir, base };
var entry_path = path_handler.joinAbsString(fs.cwd, &combo, .auto);
var outpath: [std.fs.MAX_PATH_BYTES]u8 = undefined;
var entry_path = path_handler.joinAbsStringBuf(fs.cwd, &outpath, &combo, .auto);
outpath[entry_path.len + 1] = 0;
outpath[entry_path.len] = 0;
const absolute_path_c: [:0]const u8 = outpath[0..entry_path.len :0];
fs.limiter.before();
defer fs.limiter.after();
const file = try std.fs.openFileAbsolute(entry_path, .{ .read = true, .write = false });
defer {
if (fs.needToCloseFiles()) {
file.close();
}
}
var stat = try file.stat();
var stat = try C.lstat_absolute(absolute_path_c);
const is_symlink = stat.kind == std.fs.File.Kind.SymLink;
var _kind = stat.kind;
var cache = Entry.Cache{ .kind = Entry.Kind.file, .symlink = "" };
var symlink: []const u8 = "";
if (_kind == .SymLink) {
// windows has a max filepath of 255 chars
// we give it a little longer for other platforms
var out_buffer = std.mem.zeroes([512]u8);
var out_slice = &out_buffer;
symlink = entry_path;
var links_walked: u8 = 0;
if (is_symlink) {
var file = try std.fs.openFileAbsoluteZ(absolute_path_c, .{ .read = true });
setMaxFd(file.handle);
while (links_walked < 255) : (links_walked += 1) {
var link: string = try std.os.readlink(symlink, out_slice);
if (!std.fs.path.isAbsolute(link)) {
combo[0] = dir;
combo[1] = link;
link = path_handler.joinAbsStringBuf(fs.cwd, out_slice, &combo, .auto);
defer {
if (fs.needToCloseFiles()) {
file.close();
}
// TODO: do we need to clean the path?
symlink = link;
const file2 = std.fs.openFileAbsolute(symlink, std.fs.File.OpenFlags{ .read = true, .write = false }) catch return cache;
// These ones we always close
defer file2.close();
const stat2 = file2.stat() catch return cache;
// Re-run "lstat" on the symlink target
_kind = stat2.kind;
if (_kind != .SymLink) {
break;
}
dir = std.fs.path.dirname(link) orelse return cache;
}
const _stat = try file.stat();
if (links_walked > 255) {
return cache;
}
symlink = try std.os.getFdPath(file.handle, &outpath);
_kind = _stat.kind;
}
std.debug.assert(_kind != .SymLink);
if (_kind == .Directory) {
cache.kind = .dir;
} else {
@@ -931,6 +894,7 @@ pub const PathName = struct {
base: string,
dir: string,
ext: string,
filename: string,
// For readability, the names of certain automatically-generated symbols are
// derived from the file name. For example, instead of the CommonJS wrapper for
@@ -1004,6 +968,7 @@ pub const PathName = struct {
.dir = dir,
.base = base,
.ext = ext,
.filename = if (dir.len > 0) _path[dir.len + 1 ..] else _path,
};
}
};
@@ -1014,10 +979,42 @@ threadlocal var join_buf: [1024]u8 = undefined;
pub const Path = struct {
pretty: string,
text: string,
non_symlink: string = "",
namespace: string = "unspecified",
name: PathName,
is_disabled: bool = false,
// "/foo/bar/node_modules/react/index.js" => "index.js"
// "/foo/bar/node_modules/.pnpm/react@17.0.1/node_modules/react/index.js" => "index.js"
pub fn packageRelativePathString(this: *const Path, name: string) string {
// TODO: we don't need to print this buffer, this is inefficient
var buffer: [std.fs.MAX_PATH_BYTES]u8 = undefined;
const search_path = std.fmt.bufPrint(&buffer, std.fs.path.sep_str ++ "node_modules" ++ std.fs.path.sep_str ++ "{s}" ++ std.fs.path.sep_str, .{name}) catch return this.text;
if (strings.lastIndexOf(this.canonicalNodeModuleText(), search_path)) |i| {
return this.canonicalNodeModuleText()[i + search_path.len ..];
}
return this.canonicalNodeModuleText();
}
pub fn nodeModulesRelativePathString(
this: *const Path,
name: string,
) string {
// TODO: we don't need to print this buffer, this is inefficient
var buffer: [std.fs.MAX_PATH_BYTES]u8 = undefined;
const search_path = std.fmt.bufPrint(&buffer, std.fs.path.sep_str ++ "node_modules" ++ std.fs.path.sep_str ++ "{s}" ++ std.fs.path.sep_str, .{name}) catch return this.text;
if (strings.lastIndexOf(this.canonicalNodeModuleText(), search_path)) |i| {
return this.canonicalNodeModuleText()[i + search_path.len - name.len - 1 ..];
}
return this.canonicalNodeModuleText();
}
pub inline fn canonicalNodeModuleText(this: *const Path) string {
return this.text;
}
pub fn jsonStringify(self: *const @This(), options: anytype, writer: anytype) !void {
return try std.json.stringify(self.text, options, writer);
}

View File

@@ -170,16 +170,16 @@ pub const RequestContext = struct {
}
fn matchPublicFolder(this: *RequestContext) ?bundler.ServeResult {
if (!this.bundler.options.public_dir_enabled) return null;
if (!this.bundler.options.routes.static_dir_enabled) return null;
const relative_path = this.url.path;
var extension = this.url.extname;
var tmp_buildfile_buf = std.mem.span(&Bundler.tmp_buildfile_buf);
// On Windows, we don't keep the directory handle open forever because Windows doesn't like that.
const public_dir: std.fs.Dir = this.bundler.options.public_dir_handle orelse std.fs.openDirAbsolute(this.bundler.options.public_dir, .{}) catch |err| {
const public_dir: std.fs.Dir = this.bundler.options.routes.static_dir_handle orelse std.fs.openDirAbsolute(this.bundler.options.routes.static_dir, .{}) catch |err| {
this.bundler.log.addErrorFmt(null, logger.Loc.Empty, this.allocator, "Opening public directory failed: {s}", .{@errorName(err)}) catch unreachable;
Output.printErrorln("Opening public directory failed: {s}", .{@errorName(err)});
this.bundler.options.public_dir_enabled = false;
this.bundler.options.routes.static_dir_enabled = false;
return null;
};
@@ -244,7 +244,7 @@ pub const RequestContext = struct {
if (_file) |*file| {
var stat = file.stat() catch return null;
var absolute_path = resolve_path.joinAbs(this.bundler.options.public_dir, .auto, relative_unrooted_path);
var absolute_path = resolve_path.joinAbs(this.bundler.options.routes.static_dir, .auto, relative_unrooted_path);
if (stat.kind == .SymLink) {
absolute_path = std.fs.realpath(absolute_path, &Bundler.tmp_buildfile_buf) catch return null;
@@ -1920,7 +1920,7 @@ pub const Server = struct {
try server.initWatcher();
if (server.bundler.router != null and server.bundler.options.public_dir_enabled) {
if (server.bundler.router != null and server.bundler.options.routes.static_dir_enabled) {
try server.run(
ConnectionFeatures{ .public_folder = true, .filesystem_router = true },
);
@@ -1928,7 +1928,7 @@ pub const Server = struct {
try server.run(
ConnectionFeatures{ .public_folder = false, .filesystem_router = true },
);
} else if (server.bundler.options.public_dir_enabled) {
} else if (server.bundler.options.routes.static_dir_enabled) {
try server.run(
ConnectionFeatures{ .public_folder = true, .filesystem_router = false },
);

View File

@@ -274,14 +274,14 @@ pub const VirtualMachine = struct {
inline fn _fetch(
global: *JSGlobalObject,
specifier: string,
_specifier: string,
source: string,
log: *logger.Log,
) !ResolvedSource {
std.debug.assert(VirtualMachine.vm_loaded);
std.debug.assert(VirtualMachine.vm.global == global);
if (vm.node_modules != null and strings.eql(vm.bundler.linker.nodeModuleBundleImportPath(), specifier)) {
if (vm.node_modules != null and strings.eql(vm.bundler.linker.nodeModuleBundleImportPath(), _specifier)) {
// We kind of need an abstraction around this.
// Basically we should subclass JSC::SourceCode with:
// - hash
@@ -300,7 +300,7 @@ pub const VirtualMachine = struct {
&vm.bundler.fs.fs,
) orelse 0),
};
} else if (strings.eqlComptime(specifier, Runtime.Runtime.Imports.Name)) {
} else if (strings.eqlComptime(_specifier, Runtime.Runtime.Imports.Name)) {
return ResolvedSource{
.source_code = ZigString.init(Runtime.Runtime.sourceContent()),
.specifier = ZigString.init(Runtime.Runtime.Imports.Name),
@@ -313,8 +313,10 @@ pub const VirtualMachine = struct {
};
}
const result = vm.bundler.resolve_results.get(specifier) orelse return error.MissingResolveResult;
const path = result.path_pair.primary;
const specifier = normalizeSpecifier(_specifier);
std.debug.assert(std.fs.path.isAbsolute(specifier)); // if this crashes, it means the resolver was skipped.
const path = Fs.Path.init(specifier);
const loader = vm.bundler.options.loaders.get(path.name.ext) orelse .file;
switch (loader) {
@@ -344,7 +346,7 @@ pub const VirtualMachine = struct {
vm.bundler.allocator,
path,
loader,
result.dirname_fd,
0,
fd,
hash,
) orelse {
@@ -416,17 +418,11 @@ pub const VirtualMachine = struct {
return;
}
const result: Resolver.Result = vm.bundler.resolve_results.get(specifier) orelse brk: {
// We don't want to write to the hash table if there's an error
// That's why we don't use getOrPut here
const res = try vm.bundler.resolver.resolve(
Fs.PathName.init(source).dirWithTrailingSlash(),
specifier,
.stmt,
);
try vm.bundler.resolve_results.put(res.path_pair.primary.text, res);
break :brk res;
};
const result = try vm.bundler.resolver.resolve(
Fs.PathName.init(source).dirWithTrailingSlash(),
specifier,
.stmt,
);
ret.result = result;
if (vm.node_modules != null and result.isLikelyNodeModule()) {
@@ -507,7 +503,17 @@ pub const VirtualMachine = struct {
res.* = ErrorableZigString.ok(ZigString.init(result.path));
}
pub fn normalizeSpecifier(slice: string) string {
if (slice.len == 0) return slice;
if (VirtualMachine.vm.bundler.options.origin.len > 0) {
if (strings.startsWith(slice, VirtualMachine.vm.bundler.options.origin)) {
return slice[VirtualMachine.vm.bundler.options.origin.len..];
}
}
return slice;
}
threadlocal var errors_stack: [256]*c_void = undefined;
pub fn fetch(ret: *ErrorableResolvedSource, global: *JSGlobalObject, specifier: ZigString, source: ZigString) callconv(.C) void {
var log = logger.Log.init(vm.bundler.allocator);

View File

@@ -31,18 +31,25 @@ pub const NodeEnvBufMap = struct {
std.mem.copy(u8, bufkeybuf["process.env.".len..], key);
var key_slice = bufkeybuf[0 .. key.len + "process.env.".len];
var value_slice = value;
const max_value_slice_len = std.math.min(value.len, bufkeybuf.len - key_slice.len);
if (value_slice[0] != '"' and value_slice[value.len - 1] != '"') {
value_slice = bufkeybuf[key_slice.len..][0 .. max_value_slice_len + 2];
value_slice[0] = '"';
std.mem.copy(u8, value_slice[1..], value[0..max_value_slice_len]);
value_slice[value_slice.len - 1] = '"';
} else if (value_slice[0] != '"') {
value_slice[0] = '"';
std.mem.copy(u8, value_slice[1..], value[0..max_value_slice_len]);
} else if (value_slice[value.len - 1] != '"') {
std.mem.copy(u8, value_slice[1..], value[0..max_value_slice_len]);
value_slice[value_slice.len - 1] = '"';
if (value_slice.len > 0) {
const max_value_slice_len = std.math.min(value.len, bufkeybuf.len - key_slice.len);
if (key_slice.len < bufkeybuf.len and value_slice[0] != '"' and value_slice[value.len - 1] != '"') {
value_slice = bufkeybuf[key_slice.len..];
if (value_slice.len > 0) {
value_slice = value_slice[0 .. max_value_slice_len + 2];
value_slice[0] = '"';
std.mem.copy(u8, value_slice[1..], value[0..max_value_slice_len]);
value_slice[value_slice.len - 1] = '"';
} else {
value_slice.len = 0;
}
} else if (value_slice[0] != '"') {
value_slice[0] = '"';
std.mem.copy(u8, value_slice[1..], value[0..max_value_slice_len]);
} else if (value_slice[value.len - 1] != '"') {
std.mem.copy(u8, value_slice[1..], value[0..max_value_slice_len]);
value_slice[value_slice.len - 1] = '"';
}
}
return this.backing.put(key_slice, value_slice);

View File

@@ -1730,14 +1730,14 @@ pub const Parser = struct {
// if yes, just automatically add the import so that .jsb knows to include the file.
if (self.options.jsx.parse and p.needs_jsx_import) {
_ = p.addImportRecord(
.internal,
.require,
logger.Loc{ .start = 0 },
p.options.jsx.import_source,
);
// Ensure we have both classic and automatic
// This is to handle cases where they use fragments in the automatic runtime
_ = p.addImportRecord(
.internal,
.require,
logger.Loc{ .start = 0 },
p.options.jsx.classic_import_source,
);
@@ -1934,7 +1934,7 @@ pub const Parser = struct {
decl_i += 1;
}
const import_record_id = p.addImportRecord(.internal, loc, p.options.jsx.import_source);
const import_record_id = p.addImportRecord(.require, loc, p.options.jsx.import_source);
// When everything is CommonJS
// We import JSX like this:
// var {jsxDev} = require("react/jsx-dev")
@@ -2010,7 +2010,7 @@ pub const Parser = struct {
};
decl_i += 1;
}
const import_record_id = p.addImportRecord(.internal, loc, p.options.jsx.classic_import_source);
const import_record_id = p.addImportRecord(.require, loc, p.options.jsx.classic_import_source);
jsx_part_stmts[stmt_i] = p.s(S.Import{
.namespace_ref = classic_namespace_ref,
.star_name_loc = loc,

View File

@@ -577,7 +577,7 @@ pub fn NewLinker(comptime BundlerType: type) type {
// Run the resolver
// Don't parse/print automatically.
if (linker.options.resolve_mode != .lazy) {
try linker.enqueueResolveResult(resolve_result);
_ = try linker.enqueueResolveResult(resolve_result);
}
import_record.path = try linker.generateImportPath(
@@ -595,7 +595,7 @@ pub fn NewLinker(comptime BundlerType: type) type {
}
}
pub fn resolveResultHashKey(linker: *ThisLinker, resolve_result: *const Resolver.Result) string {
pub fn resolveResultHashKey(linker: *ThisLinker, resolve_result: *const Resolver.Result) u64 {
var hash_key = resolve_result.path_pair.primary.text;
// Shorter hash key is faster to hash
@@ -603,18 +603,19 @@ pub fn NewLinker(comptime BundlerType: type) type {
hash_key = resolve_result.path_pair.primary.text[linker.fs.top_level_dir.len..];
}
return hash_key;
return std.hash.Wyhash.hash(0, hash_key);
}
pub fn enqueueResolveResult(linker: *ThisLinker, resolve_result: *const Resolver.Result) !void {
pub fn enqueueResolveResult(linker: *ThisLinker, resolve_result: *const Resolver.Result) !bool {
const hash_key = linker.resolveResultHashKey(resolve_result);
const get_or_put_entry = try linker.resolve_results.backing.getOrPut(hash_key);
const get_or_put_entry = try linker.resolve_results.getOrPut(hash_key);
if (!get_or_put_entry.found_existing) {
get_or_put_entry.entry.value = resolve_result.*;
try linker.resolve_queue.writeItem(resolve_result.*);
}
return !get_or_put_entry.found_existing;
}
};
}

View File

@@ -11,13 +11,17 @@ const expect = std.testing.expect;
const assert = std.debug.assert;
const ArrayList = std.ArrayList;
pub const Kind = enum {
pub const Kind = enum(i8) {
err,
warn,
note,
debug,
verbose,
pub inline fn shouldPrint(this: Kind, other: Log.Level) bool {
return @enumToInt(this) - @enumToInt(other) >= 0;
}
pub fn string(self: Kind) string {
return switch (self) {
.err => "error",
@@ -323,7 +327,7 @@ pub const Log = struct {
warnings: usize = 0,
errors: usize = 0,
msgs: ArrayList(Msg),
level: Level = Level.debug,
level: Level = Level.info,
pub fn toAPI(this: *const Log, allocator: *std.mem.Allocator) !Api.Log {
return Api.Log{
@@ -333,7 +337,7 @@ pub const Log = struct {
};
}
pub const Level = enum {
pub const Level = enum(i8) {
verbose,
debug,
info,
@@ -516,6 +520,14 @@ pub const Log = struct {
}
}
pub fn printForLogLevel(self: *Log, to: anytype) !void {
for (self.msgs.items) |msg| {
if (msg.kind.shouldPrint(self.level)) {
try msg.writeFormat(to);
}
}
}
pub fn toZigException(this: *const Log, allocator: *std.mem.Allocator) *js.ZigException.Holder {
var holder = try allocator.create(js.ZigException.Holder);
holder.* = js.ZigException.Holder.init();

View File

@@ -611,13 +611,12 @@ pub const BundleOptions = struct {
hot_module_reloading: bool = false,
inject: ?[]string = null,
origin: string = "",
public_dir: string = "public",
public_dir_enabled: bool = true,
output_dir: string = "",
output_dir_handle: ?std.fs.Dir = null,
node_modules_bundle_url: string = "",
node_modules_bundle_pretty_path: string = "",
public_dir_handle: ?std.fs.Dir = null,
write: bool = false,
preserve_symlinks: bool = false,
preserve_extensions: bool = false,
@@ -638,7 +637,7 @@ pub const BundleOptions = struct {
out_extensions: std.StringHashMap(string),
import_path_format: ImportPathFormat = ImportPathFormat.relative,
framework: ?Framework = null,
route_config: ?RouteConfig = null,
routes: RouteConfig = RouteConfig.zero(),
pub fn asJavascriptBundleConfig(this: *const BundleOptions) Api.JavascriptBundleConfig {}
@@ -701,85 +700,6 @@ pub const BundleOptions = struct {
else => {},
}
if (transform.main_fields.len > 0) {
opts.main_fields = transform.main_fields;
}
opts.external = ExternalModules.init(allocator, &fs.fs, fs.top_level_dir, transform.external, log, opts.platform);
opts.out_extensions = opts.platform.outExtensions(allocator);
if (transform.framework) |_framework| {
opts.framework = try Framework.fromApi(_framework);
}
if (transform.router) |route_config| {
opts.route_config = try RouteConfig.fromApi(route_config, allocator);
}
if (transform.serve orelse false) {
opts.preserve_extensions = true;
opts.append_package_version_in_query_string = true;
if (opts.origin.len == 0) {
opts.origin = "/";
}
opts.resolve_mode = .lazy;
var _dirs = [_]string{transform.public_dir orelse opts.public_dir};
opts.public_dir = try fs.absAlloc(allocator, &_dirs);
opts.public_dir_handle = std.fs.openDirAbsolute(opts.public_dir, .{ .iterate = true }) catch |err| brk: {
var did_warn = false;
switch (err) {
error.FileNotFound => {
// Be nice.
// Check "static" since sometimes people use that instead.
// Don't switch to it, but just tell "hey try --public-dir=static" next time
if (transform.public_dir == null or transform.public_dir.?.len == 0) {
_dirs[0] = "static";
const check_static = try fs.joinAlloc(allocator, &_dirs);
defer allocator.free(check_static);
std.fs.accessAbsolute(check_static, .{}) catch {
Output.prettyErrorln("warn: \"public\" folder missing. If there are external assets used in your project, pass --public-dir=\"public-folder-name\"", .{});
did_warn = true;
};
}
if (!did_warn) {
Output.prettyErrorln("warn: \"public\" folder missing. If you want to use \"static\" as the public folder, pass --public-dir=\"static\".", .{});
}
opts.public_dir_enabled = false;
},
error.AccessDenied => {
Output.prettyErrorln(
"error: access denied when trying to open public_dir: \"{s}\".\nPlease re-open Speedy with access to this folder or pass a different folder via \"--public-dir\". Note: --public-dir is relative to --cwd (or the process' current working directory).\n\nThe public folder is where static assets such as images, fonts, and .html files go.",
.{opts.public_dir},
);
std.process.exit(1);
},
else => {
Output.prettyErrorln(
"error: \"{s}\" when accessing public folder: \"{s}\"",
.{ @errorName(err), opts.public_dir },
);
std.process.exit(1);
},
}
break :brk null;
};
// Windows has weird locking rules for file access.
// so it's a bad idea to keep a file handle open for a long time on Windows.
if (isWindows and opts.public_dir_handle != null) {
opts.public_dir_handle.?.close();
}
opts.hot_module_reloading = true;
}
if (opts.write and opts.output_dir.len > 0) {
opts.output_dir_handle = try openOutputDir(opts.output_dir);
}
if (!(transform.generate_node_module_bundle orelse false)) {
if (node_modules_bundle_existing) |node_mods| {
opts.node_modules_bundle = node_mods;
@@ -834,15 +754,15 @@ pub const BundleOptions = struct {
},
);
Output.flush();
if (opts.framework == null) {
if (transform.framework == null) {
if (node_module_bundle.container.framework) |loaded_framework| {
opts.framework = Framework.fromLoadedFramework(loaded_framework);
}
}
if (opts.route_config == null) {
if (transform.router == null) {
if (node_module_bundle.container.routes) |routes| {
opts.route_config = RouteConfig.fromLoadedRoutes(routes);
opts.routes = RouteConfig.fromLoadedRoutes(routes);
}
}
} else |err| {
@@ -858,6 +778,89 @@ pub const BundleOptions = struct {
}
}
if (transform.main_fields.len > 0) {
opts.main_fields = transform.main_fields;
}
opts.external = ExternalModules.init(allocator, &fs.fs, fs.top_level_dir, transform.external, log, opts.platform);
opts.out_extensions = opts.platform.outExtensions(allocator);
if (transform.framework) |_framework| {
opts.framework = try Framework.fromApi(_framework);
}
if (transform.router) |routes| {
opts.routes = try RouteConfig.fromApi(routes, allocator);
}
if (transform.serve orelse false) {
opts.preserve_extensions = true;
opts.append_package_version_in_query_string = true;
if (opts.origin.len == 0) {
opts.origin = "/";
}
opts.resolve_mode = .lazy;
var dir_to_use: string = opts.routes.static_dir;
const static_dir_set = opts.routes.static_dir_enabled;
var _dirs = [_]string{dir_to_use};
opts.routes.static_dir = try fs.absAlloc(allocator, &_dirs);
opts.routes.static_dir_handle = std.fs.openDirAbsolute(opts.routes.static_dir, .{ .iterate = true }) catch |err| brk: {
var did_warn = false;
switch (err) {
error.FileNotFound => {
// Be nice.
// Check "static" since sometimes people use that instead.
// Don't switch to it, but just tell "hey try --public-dir=static" next time
if (!static_dir_set) {
_dirs[0] = "static";
const check_static = try fs.joinAlloc(allocator, &_dirs);
defer allocator.free(check_static);
std.fs.accessAbsolute(check_static, .{}) catch {
Output.prettyErrorln("warn: \"{s}\" folder missing. If there are external assets used in your project, pass --public-dir=\"public-folder-name\"", .{_dirs[0]});
did_warn = true;
};
}
if (!did_warn) {
Output.prettyErrorln("warn: \"{s}\" folder missing. If you want to use \"static\" as the public folder, pass --public-dir=\"static\".", .{_dirs[0]});
}
opts.routes.static_dir_enabled = false;
},
error.AccessDenied => {
Output.prettyErrorln(
"error: access denied when trying to open dir: \"{s}\".\nPlease re-open Speedy with access to this folder or pass a different folder via \"--public-dir\". Note: --public-dir is relative to --cwd (or the process' current working directory).\n\nThe public folder is where static assets such as images, fonts, and .html files go.",
.{opts.routes.static_dir},
);
std.process.exit(1);
},
else => {
Output.prettyErrorln(
"error: \"{s}\" when accessing public folder: \"{s}\"",
.{ @errorName(err), opts.routes.static_dir },
);
std.process.exit(1);
},
}
break :brk null;
};
// Windows has weird locking rules for file access.
// so it's a bad idea to keep a file handle open for a long time on Windows.
if (isWindows and opts.routes.static_dir_handle != null) {
opts.routes.static_dir_handle.?.close();
}
opts.hot_module_reloading = opts.platform.isWebLike();
}
if (opts.write and opts.output_dir.len > 0) {
opts.output_dir_handle = try openOutputDir(opts.output_dir);
}
return opts;
}
};
@@ -1211,7 +1214,6 @@ pub const Framework = struct {
};
pub const RouteConfig = struct {
///
dir: string = "",
// TODO: do we need a separate list for data-only extensions?
// e.g. /foo.json just to get the data for the route, without rendering the html
@@ -1219,17 +1221,29 @@ pub const RouteConfig = struct {
// I would consider using a custom binary format to minimize request size
// maybe like CBOR
extensions: []const string = &[_][]const string{},
routes_enabled: bool = false,
static_dir: string = "",
static_dir_handle: ?std.fs.Dir = null,
static_dir_enabled: bool = false,
pub fn toAPI(this: *const RouteConfig) Api.LoadedRouteConfig {
return .{ .dir = this.dir, .extensions = this.extensions };
return .{
.dir = if (this.routes_enabled) this.dir else "",
.extensions = this.extensions,
.static_dir = if (this.static_dir_enabled) this.static_dir else "",
};
}
pub const DefaultDir = "pages";
pub const DefaultStaticDir = "public";
pub const DefaultExtensions = [_]string{ "tsx", "ts", "mjs", "jsx", "js" };
pub inline fn zero() RouteConfig {
return RouteConfig{
.dir = DefaultDir,
.extensions = std.mem.span(&DefaultExtensions),
.static_dir = DefaultStaticDir,
.routes_enabled = false,
};
}
@@ -1237,6 +1251,9 @@ pub const RouteConfig = struct {
return RouteConfig{
.extensions = loaded.extensions,
.dir = loaded.dir,
.static_dir = loaded.static_dir,
.routes_enabled = loaded.dir.len > 0,
.static_dir_enabled = loaded.static_dir.len > 0,
};
}
@@ -1244,9 +1261,15 @@ pub const RouteConfig = struct {
var router = zero();
var router_dir: string = std.mem.trimRight(u8, router_.dir orelse "", "/\\");
var static_dir: string = std.mem.trimRight(u8, router_.static_dir orelse "", "/\\");
if (router_dir.len != 0) {
router.dir = router_dir;
router.routes_enabled = true;
}
if (static_dir.len > 0) {
router.static_dir = static_dir;
}
if (router_.extensions.len > 0) {

View File

@@ -84,40 +84,50 @@ pub const PackageJSON = struct {
pub fn loadFrameworkWithPreference(package_json: *const PackageJSON, pair: *FrameworkRouterPair, json: js_ast.Expr, allocator: *std.mem.Allocator, comptime load_framework: LoadFramework) void {
const framework_object = json.asProperty("framework") orelse return;
if (framework_object.expr.asProperty("router")) |router| {
if (router.expr.asProperty("dir")) |route_dir| {
if (route_dir.expr.asString(allocator)) |str| {
if (str.len > 0) {
pair.router.dir = str;
pair.loaded_routes = true;
}
if (framework_object.expr.asProperty("static")) |static_prop| {
if (static_prop.expr.asString(allocator)) |str| {
if (str.len > 0) {
pair.router.static_dir = str;
}
}
}
if (router.expr.asProperty("extensions")) |extensions_expr| {
if (extensions_expr.expr.asArray()) |*array| {
const count = array.array.items.len;
var valid_count: usize = 0;
while (array.next()) |expr| {
if (expr.data != .e_string) continue;
const e_str: *const js_ast.E.String = expr.data.e_string;
if (e_str.utf8.len == 0 or e_str.utf8[0] != '.') continue;
valid_count += 1;
if (!pair.router.routes_enabled) {
if (framework_object.expr.asProperty("router")) |router| {
if (router.expr.asProperty("dir")) |route_dir| {
if (route_dir.expr.asString(allocator)) |str| {
if (str.len > 0) {
pair.router.dir = str;
pair.loaded_routes = true;
}
}
}
if (valid_count > 0) {
var extensions = allocator.alloc(string, valid_count) catch unreachable;
array.index = 0;
var i: usize = 0;
if (router.expr.asProperty("extensions")) |extensions_expr| {
if (extensions_expr.expr.asArray()) |*array| {
const count = array.array.items.len;
var valid_count: usize = 0;
// We don't need to allocate the strings because we keep the package.json source string in memory
while (array.next()) |expr| {
if (expr.data != .e_string) continue;
const e_str: *const js_ast.E.String = expr.data.e_string;
if (e_str.utf8.len == 0 or e_str.utf8[0] != '.') continue;
extensions[i] = e_str.utf8;
i += 1;
valid_count += 1;
}
if (valid_count > 0) {
var extensions = allocator.alloc(string, valid_count) catch unreachable;
array.index = 0;
var i: usize = 0;
// We don't need to allocate the strings because we keep the package.json source string in memory
while (array.next()) |expr| {
if (expr.data != .e_string) continue;
const e_str: *const js_ast.E.String = expr.data.e_string;
if (e_str.utf8.len == 0 or e_str.utf8[0] != '.') continue;
extensions[i] = e_str.utf8;
i += 1;
}
}
}
}
@@ -130,6 +140,15 @@ pub const PackageJSON = struct {
if (loadFrameworkExpression(pair.framework, env.expr, allocator)) {
pair.framework.package = package_json.name;
pair.framework.development = true;
if (env.expr.asProperty("static")) |static_prop| {
if (static_prop.expr.asString(allocator)) |str| {
if (str.len > 0) {
pair.router.static_dir = str;
pair.router.static_dir_enabled = true;
}
}
}
return;
}
}
@@ -139,6 +158,16 @@ pub const PackageJSON = struct {
if (loadFrameworkExpression(pair.framework, env.expr, allocator)) {
pair.framework.package = package_json.name;
pair.framework.development = false;
if (env.expr.asProperty("static")) |static_prop| {
if (static_prop.expr.asString(allocator)) |str| {
if (str.len > 0) {
pair.router.static_dir = str;
pair.router.static_dir_enabled = true;
}
}
}
return;
}
}
@@ -159,8 +188,9 @@ pub const PackageJSON = struct {
dirname_fd: StoredFileDescriptorType,
comptime generate_hash: bool,
) ?PackageJSON {
const parts = [_]string{ input_path, "package.json" };
// TODO: remove this extra copy
const parts = [_]string{ input_path, "package.json" };
const package_json_path_ = r.fs.abs(&parts);
const package_json_path = r.fs.filename_store.append(@TypeOf(package_json_path_), package_json_path_) catch unreachable;

View File

@@ -648,6 +648,15 @@ pub fn joinAbsString(_cwd: []const u8, parts: anytype, comptime _platform: Platf
);
}
pub fn joinAbsStringZ(_cwd: []const u8, parts: anytype, comptime _platform: Platform) [:0]const u8 {
return joinAbsStringBufZ(
_cwd,
&parser_join_input_buffer,
parts,
_platform,
);
}
pub fn joinStringBuf(buf: []u8, _parts: anytype, comptime _platform: Platform) []const u8 {
if (FeatureFlags.use_std_path_join) {
var alloc = std.heap.FixedBufferAllocator.init(buf);
@@ -701,8 +710,19 @@ pub fn joinStringBuf(buf: []u8, _parts: anytype, comptime _platform: Platform) [
}
pub fn joinAbsStringBuf(_cwd: []const u8, buf: []u8, _parts: anytype, comptime _platform: Platform) []const u8 {
return _joinAbsStringBuf(false, []const u8, _cwd, buf, _parts, _platform);
}
pub fn joinAbsStringBufZ(_cwd: []const u8, buf: []u8, _parts: anytype, comptime _platform: Platform) [:0]const u8 {
return _joinAbsStringBuf(true, [:0]const u8, _cwd, buf, _parts, _platform);
}
inline fn _joinAbsStringBuf(comptime is_sentinel: bool, comptime ReturnType: type, _cwd: []const u8, buf: []u8, _parts: anytype, comptime _platform: Platform) ReturnType {
var parts: []const []const u8 = _parts;
if (parts.len == 0) {
if (comptime is_sentinel) {
unreachable;
}
return _cwd;
}
@@ -779,7 +799,12 @@ pub fn joinAbsStringBuf(_cwd: []const u8, buf: []u8, _parts: anytype, comptime _
std.mem.copy(u8, buf[leading_separator.len .. result.len + leading_separator.len], result);
return buf[0 .. result.len + leading_separator.len];
if (comptime is_sentinel) {
buf.ptr[result.len + leading_separator.len + 1] = 0;
return buf[0 .. result.len + leading_separator.len :0];
} else {
return buf[0 .. result.len + leading_separator.len];
}
}
pub fn isSepPosix(char: u8) bool {

View File

@@ -49,13 +49,13 @@ pub const PathPair = struct {
pub const Iter = struct {
index: u2,
ctx: *PathPair,
pub fn next(i: *Iter) ?Path {
pub fn next(i: *Iter) ?*Path {
const ind = i.index;
i.index += 1;
switch (ind) {
0 => return i.ctx.primary,
1 => return i.ctx.secondary,
0 => return &i.ctx.primary,
1 => return if (i.ctx.secondary) |*sec| sec else null,
else => return null,
}
}
@@ -402,6 +402,7 @@ pub fn NewResolver(cache_files: bool) type {
std.mem.copy(u8, out, realpath);
out[out.len - 1] = '/';
pair.router.dir = out;
pair.router.routes_enabled = true;
}
}
@@ -488,17 +489,56 @@ pub fn NewResolver(cache_files: bool) type {
r.mutex.lock();
defer r.mutex.unlock();
const result = r.resolveWithoutSymlinks(source_dir, import_path, kind) catch |err| {
errdefer (r.flushDebugLogs(.fail) catch {});
var result = (try r.resolveWithoutSymlinks(source_dir, import_path, kind)) orelse {
r.flushDebugLogs(.fail) catch {};
return err;
return error.ModuleNotFound;
};
defer {
if (result == null) r.flushDebugLogs(.fail) catch {} else r.flushDebugLogs(.success) catch {};
try r.finalizeResult(&result);
r.flushDebugLogs(.success) catch {};
return result;
}
pub fn finalizeResult(r: *ThisResolver, result: *Result) !void {
if (result.package_json) |package_json| {
result.module_type = switch (package_json.module_type) {
.esm, .cjs => package_json.module_type,
.unknown => result.module_type,
};
}
return result orelse return error.ModuleNotFound;
var iter = result.path_pair.iter();
while (iter.next()) |path| {
var dir: *DirInfo = (r.readDirInfo(path.name.dir) catch continue) orelse continue;
if (dir.getEntries()) |entries| {
if (entries.get(path.name.filename)) |query| {
const symlink_path = query.entry.symlink(&r.fs.fs);
if (symlink_path.len > 0) {
path.non_symlink = path.text;
// Is this entry itself a symlink?
path.text = symlink_path;
path.name = Fs.PathName.init(path.text);
if (r.debug_logs) |*debug| {
debug.addNoteFmt("Resolved symlink \"{s}\" to \"{s}\"", .{ path.non_symlink, path.text }) catch {};
}
} else if (dir.abs_real_path.len > 0) {
path.non_symlink = path.text;
var parts = [_]string{ dir.abs_real_path, query.entry.base };
var buf: [std.fs.MAX_PATH_BYTES]u8 = undefined;
var out = r.fs.absBuf(&parts, &buf);
const symlink = try Fs.FileSystem.FilenameStore.instance.append(@TypeOf(out), out);
if (r.debug_logs) |*debug| {
debug.addNoteFmt("Resolved symlink \"{s}\" to \"{s}\"", .{ symlink, path.text }) catch {};
}
query.entry.cache.symlink = symlink;
path.name = Fs.PathName.init(path.text);
}
}
}
}
}
pub fn resolveWithoutSymlinks(r: *ThisResolver, source_dir: string, import_path: string, kind: ast.ImportKind) !?Result {
@@ -716,7 +756,7 @@ pub fn NewResolver(cache_files: bool) type {
}
var iter = result.path_pair.iter();
while (iter.next()) |*path| {
while (iter.next()) |path| {
const dirname = std.fs.path.dirname(path.text) orelse continue;
const base_dir_info = ((r.dirInfoCached(dirname) catch null)) orelse continue;
const dir_info = base_dir_info.getEnclosingBrowserScope() orelse continue;
@@ -752,11 +792,13 @@ pub fn NewResolver(cache_files: bool) type {
}
// This is a fallback, hopefully not called often. It should be relatively quick because everything should be in the cache.
fn packageJSONForResolvedNodeModuleWithIgnoreMissingName(r: *ThisResolver, result: *const Result, comptime ignore_missing_name: bool) ?*const PackageJSON {
var current_dir = std.fs.path.dirname(result.path_pair.primary.text);
while (current_dir != null) {
var dir_info = (r.dirInfoCached(current_dir orelse unreachable) catch null) orelse return null;
fn packageJSONForResolvedNodeModuleWithIgnoreMissingName(
r: *ThisResolver,
result: *const Result,
comptime ignore_missing_name: bool,
) ?*const PackageJSON {
var dir_info = (r.dirInfoCached(result.path_pair.primary.name.dir) catch null) orelse return null;
while (true) {
if (dir_info.package_json) |pkg| {
// if it doesn't have a name, assume it's something just for adjusting the main fields (react-bootstrap does this)
// In that case, we really would like the top-level package that you download from NPM
@@ -765,13 +807,15 @@ pub fn NewResolver(cache_files: bool) type {
if (pkg.name.len > 0) {
return pkg;
}
} else {
return pkg;
}
}
current_dir = std.fs.path.dirname(current_dir.?);
dir_info = dir_info.getParent() orelse return null;
}
return null;
unreachable;
}
pub fn loadNodeModules(r: *ThisResolver, import_path: string, kind: ast.ImportKind, _dir_info: *DirInfo) ?MatchResult {
@@ -916,24 +960,31 @@ pub fn NewResolver(cache_files: bool) type {
r: *ThisResolver,
path: string,
) !?*DirInfo {
return try r.dirInfoCachedMaybeLog(path, true);
return try r.dirInfoCachedMaybeLog(path, true, true);
}
pub fn readDirInfo(
r: *ThisResolver,
path: string,
) !?*DirInfo {
return try r.dirInfoCachedMaybeLog(path, false);
return try r.dirInfoCachedMaybeLog(path, false, true);
}
pub fn readDirInfoIgnoreError(
r: *ThisResolver,
path: string,
) ?*const DirInfo {
return r.dirInfoCachedMaybeLog(path, false) catch null;
return r.dirInfoCachedMaybeLog(path, false, true) catch null;
}
inline fn dirInfoCachedMaybeLog(r: *ThisResolver, path: string, comptime enable_logging: bool) !?*DirInfo {
pub inline fn readDirInfoCacheOnly(
r: *ThisResolver,
path: string,
) ?*DirInfo {
return r.dir_cache.get(path);
}
inline fn dirInfoCachedMaybeLog(r: *ThisResolver, path: string, comptime enable_logging: bool, comptime follow_symlinks: bool) !?*DirInfo {
const top_result = try r.dir_cache.getOrPut(path);
if (top_result.status != .unknown) {
return r.dir_cache.atIndex(top_result.index);
@@ -1021,9 +1072,18 @@ pub fn NewResolver(cache_files: bool) type {
var _open_dir: anyerror!std.fs.Dir = undefined;
if (open_dir_count > 0) {
_open_dir = _open_dirs[open_dir_count - 1].openDir(std.fs.path.basename(queue_top.unsafe_path), .{ .iterate = true });
_open_dir = _open_dirs[open_dir_count - 1].openDir(
std.fs.path.basename(queue_top.unsafe_path),
.{ .iterate = true, .no_follow = !follow_symlinks },
);
} else {
_open_dir = std.fs.openDirAbsolute(queue_top.unsafe_path, .{ .iterate = true });
_open_dir = std.fs.openDirAbsolute(
queue_top.unsafe_path,
.{
.iterate = true,
.no_follow = !follow_symlinks,
},
);
}
const open_dir = _open_dir catch |err| {
@@ -1684,7 +1744,7 @@ pub fn NewResolver(cache_files: bool) type {
debug.addNoteFmt("Found file \"{s}\" ", .{base}) catch {};
}
const abs_path_parts = [_]string{ query.entry.dir, query.entry.base };
const abs_path = r.fs.filename_store.append(string, r.fs.joinBuf(&abs_path_parts, &TemporaryBuffer.ExtensionPathBuf)) catch unreachable;
const abs_path = r.fs.filename_store.append(string, r.fs.absBuf(&abs_path_parts, &TemporaryBuffer.ExtensionPathBuf)) catch unreachable;
return LoadResult{
.path = abs_path,

View File

@@ -69,6 +69,7 @@ pub fn NewWatcher(comptime ContextType: type) type {
ctx: ContextType,
allocator: *std.mem.Allocator,
watchloop_handle: ?std.Thread.Id = null,
cwd: string,
pub const HashType = u32;
@@ -86,6 +87,7 @@ pub fn NewWatcher(comptime ContextType: type) type {
.ctx = ctx,
.watchlist = Watchlist{},
.mutex = sync.Mutex.init(),
.cwd = fs.top_level_dir,
};
return watcher;
@@ -202,10 +204,11 @@ pub fn NewWatcher(comptime ContextType: type) type {
event.filter = std.os.EVFILT_VNODE;
// monitor:
// - Delete
// - Write
// - Metadata
// - Rename
// we should monitor:
// - Delete
event.fflags = std.os.NOTE_WRITE | std.os.NOTE_RENAME;
// id
@@ -241,7 +244,11 @@ pub fn NewWatcher(comptime ContextType: type) type {
});
if (FeatureFlags.verbose_watcher) {
Output.prettyln("<r>Added <b>{s}<r> to watch list.", .{file_path});
if (strings.indexOf(file_path, this.cwd)) |i| {
Output.prettyln("<r><d>Added <b>./{s}<r><d> to watch list.<r>", .{file_path[i + this.cwd.len ..]});
} else {
Output.prettyln("<r><d>Added <b>{s}<r><d> to watch list.<r>", .{file_path});
}
}
}
};