mirror of
https://github.com/oven-sh/bun
synced 2026-02-09 10:28:47 +00:00
Remove cache_files since it's not used and causes slower Bun compilation times
This commit is contained in:
4974
src/bundler.zig
4974
src/bundler.zig
File diff suppressed because it is too large
Load Diff
551
src/cache.zig
551
src/cache.zig
@@ -28,328 +28,251 @@ pub const FsCacheEntry = struct {
|
||||
}
|
||||
};
|
||||
|
||||
pub fn NewCache(comptime cache_files: bool) type {
|
||||
return struct {
|
||||
pub const Set = struct {
|
||||
js: JavaScript,
|
||||
fs: Fs,
|
||||
json: Json,
|
||||
pub const Set = struct {
|
||||
js: JavaScript,
|
||||
fs: Fs,
|
||||
json: Json,
|
||||
|
||||
pub fn init(allocator: *std.mem.Allocator) Set {
|
||||
return Set{
|
||||
.js = JavaScript.init(allocator),
|
||||
.fs = Fs{
|
||||
.mutex = Mutex.init(),
|
||||
.entries = std.StringHashMap(Fs.Entry).init(allocator),
|
||||
.shared_buffer = MutableString.init(allocator, 0) catch unreachable,
|
||||
},
|
||||
.json = Json{
|
||||
.mutex = Mutex.init(),
|
||||
.entries = std.StringHashMap(*Json.Entry).init(allocator),
|
||||
},
|
||||
};
|
||||
}
|
||||
pub fn init(allocator: *std.mem.Allocator) Set {
|
||||
return Set{
|
||||
.js = JavaScript.init(allocator),
|
||||
.fs = Fs{
|
||||
.shared_buffer = MutableString.init(allocator, 0) catch unreachable,
|
||||
},
|
||||
.json = Json{},
|
||||
};
|
||||
pub const Fs = struct {
|
||||
const Entry = FsCacheEntry;
|
||||
}
|
||||
};
|
||||
pub const Fs = struct {
|
||||
const Entry = FsCacheEntry;
|
||||
|
||||
mutex: Mutex,
|
||||
entries: std.StringHashMap(Entry),
|
||||
shared_buffer: MutableString,
|
||||
shared_buffer: MutableString,
|
||||
|
||||
pub fn deinit(c: *Fs) void {
|
||||
var iter = c.entries.iterator();
|
||||
while (iter.next()) |entry| {
|
||||
entry.value.deinit(c.entries.allocator);
|
||||
}
|
||||
c.entries.deinit();
|
||||
pub fn deinit(c: *Fs) void {
|
||||
var iter = c.entries.iterator();
|
||||
while (iter.next()) |entry| {
|
||||
entry.value.deinit(c.entries.allocator);
|
||||
}
|
||||
c.entries.deinit();
|
||||
}
|
||||
|
||||
pub fn readFileShared(
|
||||
c: *Fs,
|
||||
_fs: *fs.FileSystem,
|
||||
path: [:0]const u8,
|
||||
dirname_fd: StoredFileDescriptorType,
|
||||
_file_handle: ?StoredFileDescriptorType,
|
||||
shared: *MutableString,
|
||||
) !Entry {
|
||||
var rfs = _fs.fs;
|
||||
|
||||
var file_handle: std.fs.File = if (_file_handle) |__file| std.fs.File{ .handle = __file } else undefined;
|
||||
|
||||
if (_file_handle == null) {
|
||||
file_handle = try std.fs.openFileAbsoluteZ(path, .{ .read = true });
|
||||
}
|
||||
|
||||
defer {
|
||||
if (rfs.needToCloseFiles() and _file_handle == null) {
|
||||
file_handle.close();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn readFileShared(
|
||||
c: *Fs,
|
||||
_fs: *fs.FileSystem,
|
||||
path: [:0]const u8,
|
||||
dirname_fd: StoredFileDescriptorType,
|
||||
_file_handle: ?StoredFileDescriptorType,
|
||||
shared: *MutableString,
|
||||
) !Entry {
|
||||
var rfs = _fs.fs;
|
||||
|
||||
if (comptime cache_files) {
|
||||
{
|
||||
c.mutex.lock();
|
||||
defer c.mutex.unlock();
|
||||
if (c.entries.get(path)) |entry| {
|
||||
return entry;
|
||||
}
|
||||
// If the file's modification key hasn't changed since it was cached, assume
|
||||
// the contents of the file are also the same and skip reading the file.
|
||||
var mod_key: ?fs.FileSystem.Implementation.ModKey = rfs.modKeyWithFile(path, file_handle) catch |err| handler: {
|
||||
switch (err) {
|
||||
error.FileNotFound, error.AccessDenied => {
|
||||
return err;
|
||||
},
|
||||
else => {
|
||||
if (isDebug) {
|
||||
Output.printError("modkey error: {s}", .{@errorName(err)});
|
||||
}
|
||||
}
|
||||
|
||||
var file_handle: std.fs.File = if (_file_handle) |__file| std.fs.File{ .handle = __file } else undefined;
|
||||
|
||||
if (_file_handle == null) {
|
||||
file_handle = try std.fs.openFileAbsoluteZ(path, .{ .read = true });
|
||||
}
|
||||
|
||||
defer {
|
||||
if (rfs.needToCloseFiles() and _file_handle == null) {
|
||||
file_handle.close();
|
||||
}
|
||||
}
|
||||
|
||||
// If the file's modification key hasn't changed since it was cached, assume
|
||||
// the contents of the file are also the same and skip reading the file.
|
||||
var mod_key: ?fs.FileSystem.Implementation.ModKey = rfs.modKeyWithFile(path, file_handle) catch |err| handler: {
|
||||
switch (err) {
|
||||
error.FileNotFound, error.AccessDenied => {
|
||||
return err;
|
||||
},
|
||||
else => {
|
||||
if (isDebug) {
|
||||
Output.printError("modkey error: {s}", .{@errorName(err)});
|
||||
}
|
||||
break :handler null;
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
var file: fs.File = undefined;
|
||||
if (mod_key) |modk| {
|
||||
file = rfs.readFileWithHandle(path, modk.size, file_handle, true, shared) catch |err| {
|
||||
if (isDebug) {
|
||||
Output.printError("{s}: readFile error -- {s}", .{ path, @errorName(err) });
|
||||
}
|
||||
return err;
|
||||
};
|
||||
} else {
|
||||
file = rfs.readFileWithHandle(path, null, file_handle, true, shared) catch |err| {
|
||||
if (isDebug) {
|
||||
Output.printError("{s}: readFile error -- {s}", .{ path, @errorName(err) });
|
||||
}
|
||||
return err;
|
||||
};
|
||||
}
|
||||
|
||||
const entry = Entry{
|
||||
.contents = file.contents,
|
||||
.mod_key = mod_key,
|
||||
.fd = if (FeatureFlags.store_file_descriptors) file_handle.handle else 0,
|
||||
};
|
||||
|
||||
if (comptime cache_files) {
|
||||
c.mutex.lock();
|
||||
defer c.mutex.unlock();
|
||||
var res = c.entries.getOrPut(path) catch unreachable;
|
||||
|
||||
if (res.found_existing) {
|
||||
res.value_ptr.*.deinit(c.entries.allocator);
|
||||
}
|
||||
res.value_ptr.* = entry;
|
||||
return res.value_ptr.*;
|
||||
} else {
|
||||
return entry;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn readFile(
|
||||
c: *Fs,
|
||||
_fs: *fs.FileSystem,
|
||||
path: string,
|
||||
dirname_fd: StoredFileDescriptorType,
|
||||
comptime use_shared_buffer: bool,
|
||||
_file_handle: ?StoredFileDescriptorType,
|
||||
) !Entry {
|
||||
var rfs = _fs.fs;
|
||||
|
||||
if (comptime cache_files) {
|
||||
{
|
||||
c.mutex.lock();
|
||||
defer c.mutex.unlock();
|
||||
if (c.entries.get(path)) |entry| {
|
||||
return entry;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var file_handle: std.fs.File = if (_file_handle) |__file| std.fs.File{ .handle = __file } else undefined;
|
||||
|
||||
if (_file_handle == null) {
|
||||
if (FeatureFlags.store_file_descriptors and dirname_fd > 0) {
|
||||
file_handle = std.fs.Dir.openFile(std.fs.Dir{ .fd = dirname_fd }, std.fs.path.basename(path), .{ .read = true }) catch |err| brk: {
|
||||
switch (err) {
|
||||
error.FileNotFound => {
|
||||
const handle = try std.fs.openFileAbsolute(path, .{ .read = true });
|
||||
Output.prettyErrorln(
|
||||
"<r><d>Internal error: directory mismatch for directory \"{s}\", fd {d}<r>. You don't need to do anything, but this indicates a bug.",
|
||||
.{ path, dirname_fd },
|
||||
);
|
||||
break :brk handle;
|
||||
},
|
||||
else => return err,
|
||||
}
|
||||
};
|
||||
} else {
|
||||
file_handle = try std.fs.openFileAbsolute(path, .{ .read = true });
|
||||
}
|
||||
}
|
||||
|
||||
defer {
|
||||
if (rfs.needToCloseFiles() and _file_handle == null) {
|
||||
file_handle.close();
|
||||
}
|
||||
}
|
||||
|
||||
// If the file's modification key hasn't changed since it was cached, assume
|
||||
// the contents of the file are also the same and skip reading the file.
|
||||
var mod_key: ?fs.FileSystem.Implementation.ModKey = rfs.modKeyWithFile(path, file_handle) catch |err| handler: {
|
||||
switch (err) {
|
||||
error.FileNotFound, error.AccessDenied => {
|
||||
return err;
|
||||
},
|
||||
else => {
|
||||
if (isDebug) {
|
||||
Output.printError("modkey error: {s}", .{@errorName(err)});
|
||||
}
|
||||
break :handler null;
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
var file: fs.File = undefined;
|
||||
if (mod_key) |modk| {
|
||||
file = rfs.readFileWithHandle(path, modk.size, file_handle, use_shared_buffer, &c.shared_buffer) catch |err| {
|
||||
if (isDebug) {
|
||||
Output.printError("{s}: readFile error -- {s}", .{ path, @errorName(err) });
|
||||
}
|
||||
return err;
|
||||
};
|
||||
} else {
|
||||
file = rfs.readFileWithHandle(path, null, file_handle, use_shared_buffer, &c.shared_buffer) catch |err| {
|
||||
if (isDebug) {
|
||||
Output.printError("{s}: readFile error -- {s}", .{ path, @errorName(err) });
|
||||
}
|
||||
return err;
|
||||
};
|
||||
}
|
||||
|
||||
const entry = Entry{
|
||||
.contents = file.contents,
|
||||
.mod_key = mod_key,
|
||||
.fd = if (FeatureFlags.store_file_descriptors) file_handle.handle else 0,
|
||||
};
|
||||
|
||||
if (comptime cache_files) {
|
||||
c.mutex.lock();
|
||||
defer c.mutex.unlock();
|
||||
var res = c.entries.getOrPut(path) catch unreachable;
|
||||
|
||||
if (res.found_existing) {
|
||||
res.value_ptr.*.deinit(c.entries.allocator);
|
||||
}
|
||||
res.value_ptr.* = entry;
|
||||
return res.value_ptr.*;
|
||||
} else {
|
||||
return entry;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub const Css = struct {
|
||||
pub const Entry = struct {};
|
||||
pub const Result = struct {
|
||||
ok: bool,
|
||||
value: void,
|
||||
};
|
||||
pub fn parse(cache: *@This(), log: *logger.Log, source: logger.Source) !Result {
|
||||
Global.notimpl();
|
||||
}
|
||||
};
|
||||
|
||||
pub const JavaScript = struct {
|
||||
mutex: Mutex,
|
||||
entries: std.StringHashMap(Result),
|
||||
|
||||
pub const Result = js_ast.Result;
|
||||
|
||||
pub fn init(allocator: *std.mem.Allocator) JavaScript {
|
||||
return JavaScript{ .mutex = Mutex.init(), .entries = std.StringHashMap(Result).init(allocator) };
|
||||
}
|
||||
// For now, we're not going to cache JavaScript ASTs.
|
||||
// It's probably only relevant when bundling for production.
|
||||
pub fn parse(
|
||||
cache: *@This(),
|
||||
allocator: *std.mem.Allocator,
|
||||
opts: js_parser.Parser.Options,
|
||||
defines: *Define,
|
||||
log: *logger.Log,
|
||||
source: *const logger.Source,
|
||||
) anyerror!?js_ast.Ast {
|
||||
var temp_log = logger.Log.init(allocator);
|
||||
defer temp_log.appendToMaybeRecycled(log, source) catch {};
|
||||
var parser = js_parser.Parser.init(opts, &temp_log, source, defines, allocator) catch |err| {
|
||||
return null;
|
||||
};
|
||||
|
||||
const result = try parser.parse();
|
||||
|
||||
return if (result.ok) result.ast else null;
|
||||
}
|
||||
|
||||
pub fn scan(
|
||||
cache: *@This(),
|
||||
allocator: *std.mem.Allocator,
|
||||
scan_pass_result: *js_parser.ScanPassResult,
|
||||
opts: js_parser.Parser.Options,
|
||||
defines: *Define,
|
||||
log: *logger.Log,
|
||||
source: *const logger.Source,
|
||||
) anyerror!void {
|
||||
var temp_log = logger.Log.init(allocator);
|
||||
defer temp_log.appendToMaybeRecycled(log, source) catch {};
|
||||
|
||||
var parser = js_parser.Parser.init(opts, &temp_log, source, defines, allocator) catch |err| {
|
||||
return;
|
||||
};
|
||||
|
||||
return try parser.scanImports(scan_pass_result);
|
||||
}
|
||||
};
|
||||
|
||||
pub const Json = struct {
|
||||
pub const Entry = struct {
|
||||
is_tsconfig: bool = false,
|
||||
source: logger.Source,
|
||||
expr: ?js_ast.Expr = null,
|
||||
ok: bool = false,
|
||||
// msgs: []logger.Msg,
|
||||
};
|
||||
mutex: Mutex,
|
||||
entries: std.StringHashMap(*Entry),
|
||||
pub fn init(allocator: *std.mem.Allocator) Json {
|
||||
return Json{
|
||||
.mutex = Mutex.init(),
|
||||
.entries = std.StringHashMap(Entry).init(allocator),
|
||||
};
|
||||
}
|
||||
fn parse(cache: *@This(), log: *logger.Log, source: logger.Source, allocator: *std.mem.Allocator, is_tsconfig: bool, func: anytype) anyerror!?js_ast.Expr {
|
||||
var temp_log = logger.Log.init(allocator);
|
||||
defer {
|
||||
temp_log.appendTo(log) catch {};
|
||||
}
|
||||
return func(&source, &temp_log, allocator) catch handler: {
|
||||
break :handler null;
|
||||
};
|
||||
}
|
||||
pub fn parseJSON(cache: *@This(), log: *logger.Log, source: logger.Source, allocator: *std.mem.Allocator) anyerror!?js_ast.Expr {
|
||||
return try parse(cache, log, source, allocator, false, json_parser.ParseJSON);
|
||||
}
|
||||
|
||||
pub fn parseTSConfig(cache: *@This(), log: *logger.Log, source: logger.Source, allocator: *std.mem.Allocator) anyerror!?js_ast.Expr {
|
||||
return try parse(cache, log, source, allocator, true, json_parser.ParseTSConfig);
|
||||
},
|
||||
}
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
pub const Cache = NewCache(true);
|
||||
pub const ServeCache = NewCache(false);
|
||||
var file: fs.File = undefined;
|
||||
if (mod_key) |modk| {
|
||||
file = rfs.readFileWithHandle(path, modk.size, file_handle, true, shared) catch |err| {
|
||||
if (isDebug) {
|
||||
Output.printError("{s}: readFile error -- {s}", .{ path, @errorName(err) });
|
||||
}
|
||||
return err;
|
||||
};
|
||||
} else {
|
||||
file = rfs.readFileWithHandle(path, null, file_handle, true, shared) catch |err| {
|
||||
if (isDebug) {
|
||||
Output.printError("{s}: readFile error -- {s}", .{ path, @errorName(err) });
|
||||
}
|
||||
return err;
|
||||
};
|
||||
}
|
||||
|
||||
return Entry{
|
||||
.contents = file.contents,
|
||||
.mod_key = mod_key,
|
||||
.fd = if (FeatureFlags.store_file_descriptors) file_handle.handle else 0,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn readFile(
|
||||
c: *Fs,
|
||||
_fs: *fs.FileSystem,
|
||||
path: string,
|
||||
dirname_fd: StoredFileDescriptorType,
|
||||
comptime use_shared_buffer: bool,
|
||||
_file_handle: ?StoredFileDescriptorType,
|
||||
) !Entry {
|
||||
var rfs = _fs.fs;
|
||||
|
||||
var file_handle: std.fs.File = if (_file_handle) |__file| std.fs.File{ .handle = __file } else undefined;
|
||||
|
||||
if (_file_handle == null) {
|
||||
if (FeatureFlags.store_file_descriptors and dirname_fd > 0) {
|
||||
file_handle = std.fs.Dir.openFile(std.fs.Dir{ .fd = dirname_fd }, std.fs.path.basename(path), .{ .read = true }) catch |err| brk: {
|
||||
switch (err) {
|
||||
error.FileNotFound => {
|
||||
const handle = try std.fs.openFileAbsolute(path, .{ .read = true });
|
||||
Output.prettyErrorln(
|
||||
"<r><d>Internal error: directory mismatch for directory \"{s}\", fd {d}<r>. You don't need to do anything, but this indicates a bug.",
|
||||
.{ path, dirname_fd },
|
||||
);
|
||||
break :brk handle;
|
||||
},
|
||||
else => return err,
|
||||
}
|
||||
};
|
||||
} else {
|
||||
file_handle = try std.fs.openFileAbsolute(path, .{ .read = true });
|
||||
}
|
||||
}
|
||||
|
||||
defer {
|
||||
if (rfs.needToCloseFiles() and _file_handle == null) {
|
||||
file_handle.close();
|
||||
}
|
||||
}
|
||||
|
||||
// If the file's modification key hasn't changed since it was cached, assume
|
||||
// the contents of the file are also the same and skip reading the file.
|
||||
var mod_key: ?fs.FileSystem.Implementation.ModKey = rfs.modKeyWithFile(path, file_handle) catch |err| handler: {
|
||||
switch (err) {
|
||||
error.FileNotFound, error.AccessDenied => {
|
||||
return err;
|
||||
},
|
||||
else => {
|
||||
if (isDebug) {
|
||||
Output.printError("modkey error: {s}", .{@errorName(err)});
|
||||
}
|
||||
break :handler null;
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
var file: fs.File = undefined;
|
||||
if (mod_key) |modk| {
|
||||
file = rfs.readFileWithHandle(path, modk.size, file_handle, use_shared_buffer, &c.shared_buffer) catch |err| {
|
||||
if (isDebug) {
|
||||
Output.printError("{s}: readFile error -- {s}", .{ path, @errorName(err) });
|
||||
}
|
||||
return err;
|
||||
};
|
||||
} else {
|
||||
file = rfs.readFileWithHandle(path, null, file_handle, use_shared_buffer, &c.shared_buffer) catch |err| {
|
||||
if (isDebug) {
|
||||
Output.printError("{s}: readFile error -- {s}", .{ path, @errorName(err) });
|
||||
}
|
||||
return err;
|
||||
};
|
||||
}
|
||||
|
||||
return Entry{
|
||||
.contents = file.contents,
|
||||
.mod_key = mod_key,
|
||||
.fd = if (FeatureFlags.store_file_descriptors) file_handle.handle else 0,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub const Css = struct {
|
||||
pub const Entry = struct {};
|
||||
pub const Result = struct {
|
||||
ok: bool,
|
||||
value: void,
|
||||
};
|
||||
pub fn parse(cache: *@This(), log: *logger.Log, source: logger.Source) !Result {
|
||||
Global.notimpl();
|
||||
}
|
||||
};
|
||||
|
||||
pub const JavaScript = struct {
|
||||
pub const Result = js_ast.Result;
|
||||
|
||||
pub fn init(allocator: *std.mem.Allocator) JavaScript {
|
||||
return JavaScript{};
|
||||
}
|
||||
// For now, we're not going to cache JavaScript ASTs.
|
||||
// It's probably only relevant when bundling for production.
|
||||
pub fn parse(
|
||||
cache: *@This(),
|
||||
allocator: *std.mem.Allocator,
|
||||
opts: js_parser.Parser.Options,
|
||||
defines: *Define,
|
||||
log: *logger.Log,
|
||||
source: *const logger.Source,
|
||||
) anyerror!?js_ast.Ast {
|
||||
var temp_log = logger.Log.init(allocator);
|
||||
defer temp_log.appendToMaybeRecycled(log, source) catch {};
|
||||
var parser = js_parser.Parser.init(opts, &temp_log, source, defines, allocator) catch |err| {
|
||||
return null;
|
||||
};
|
||||
|
||||
const result = try parser.parse();
|
||||
|
||||
return if (result.ok) result.ast else null;
|
||||
}
|
||||
|
||||
pub fn scan(
|
||||
cache: *@This(),
|
||||
allocator: *std.mem.Allocator,
|
||||
scan_pass_result: *js_parser.ScanPassResult,
|
||||
opts: js_parser.Parser.Options,
|
||||
defines: *Define,
|
||||
log: *logger.Log,
|
||||
source: *const logger.Source,
|
||||
) anyerror!void {
|
||||
var temp_log = logger.Log.init(allocator);
|
||||
defer temp_log.appendToMaybeRecycled(log, source) catch {};
|
||||
|
||||
var parser = js_parser.Parser.init(opts, &temp_log, source, defines, allocator) catch |err| {
|
||||
return;
|
||||
};
|
||||
|
||||
return try parser.scanImports(scan_pass_result);
|
||||
}
|
||||
};
|
||||
|
||||
pub const Json = struct {
|
||||
pub fn init(allocator: *std.mem.Allocator) Json {
|
||||
return Json{};
|
||||
}
|
||||
fn parse(cache: *@This(), log: *logger.Log, source: logger.Source, allocator: *std.mem.Allocator, is_tsconfig: bool, func: anytype) anyerror!?js_ast.Expr {
|
||||
var temp_log = logger.Log.init(allocator);
|
||||
defer {
|
||||
temp_log.appendTo(log) catch {};
|
||||
}
|
||||
return func(&source, &temp_log, allocator) catch handler: {
|
||||
break :handler null;
|
||||
};
|
||||
}
|
||||
pub fn parseJSON(cache: *@This(), log: *logger.Log, source: logger.Source, allocator: *std.mem.Allocator) anyerror!?js_ast.Expr {
|
||||
return try parse(cache, log, source, allocator, false, json_parser.ParseJSON);
|
||||
}
|
||||
|
||||
pub fn parseTSConfig(cache: *@This(), log: *logger.Log, source: logger.Source, allocator: *std.mem.Allocator) anyerror!?js_ast.Expr {
|
||||
return try parse(cache, log, source, allocator, true, json_parser.ParseTSConfig);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -33,7 +33,7 @@ pub const BuildCommand = struct {
|
||||
);
|
||||
},
|
||||
.lazy => {
|
||||
result = try bundler.ServeBundler.bundle(
|
||||
result = try bundler.Bundler.bundle(
|
||||
ctx.allocator,
|
||||
ctx.log,
|
||||
ctx.args,
|
||||
|
||||
@@ -38,7 +38,7 @@ const ServerBundleGeneratorThread = struct {
|
||||
route_conf_: ?Api.LoadedRouteConfig,
|
||||
router: ?Router,
|
||||
) !void {
|
||||
var server_bundler = try bundler.ServeBundler.init(
|
||||
var server_bundler = try bundler.Bundler.init(
|
||||
allocator_,
|
||||
logs,
|
||||
try configureTransformOptionsForBun(allocator_, transform_args),
|
||||
@@ -53,7 +53,7 @@ const ServerBundleGeneratorThread = struct {
|
||||
return err;
|
||||
};
|
||||
var estimated_input_lines_of_code: usize = 0;
|
||||
_ = try bundler.ServeBundler.GenerateNodeModuleBundle.generate(
|
||||
_ = try bundler.Bundler.GenerateNodeModuleBundle.generate(
|
||||
&server_bundler,
|
||||
allocator_,
|
||||
server_conf,
|
||||
@@ -99,7 +99,7 @@ pub const BunCommand = struct {
|
||||
var log = ctx.log;
|
||||
estimated_input_lines_of_code_ = 0;
|
||||
|
||||
var this_bundler = try bundler.ServeBundler.init(allocator, log, ctx.args, null, null);
|
||||
var this_bundler = try bundler.Bundler.init(allocator, log, ctx.args, null, null);
|
||||
this_bundler.configureLinker();
|
||||
var filepath: [*:0]const u8 = "node_modules.bun";
|
||||
var server_bundle_filepath: [*:0]const u8 = "node_modules.server.bun";
|
||||
@@ -170,7 +170,7 @@ pub const BunCommand = struct {
|
||||
|
||||
// Always generate the client-only bundle
|
||||
// we can revisit this decision if people ask
|
||||
var node_modules_ = try bundler.ServeBundler.GenerateNodeModuleBundle.generate(
|
||||
var node_modules_ = try bundler.Bundler.GenerateNodeModuleBundle.generate(
|
||||
&this_bundler,
|
||||
allocator,
|
||||
loaded_framework,
|
||||
|
||||
@@ -40,7 +40,7 @@ const Request = picohttp.Request;
|
||||
const Response = picohttp.Response;
|
||||
pub const Headers = picohttp.Headers;
|
||||
pub const MimeType = @import("./http/mime_type.zig");
|
||||
const Bundler = bundler.ServeBundler;
|
||||
const Bundler = bundler.Bundler;
|
||||
const Websocket = @import("./http/websocket.zig");
|
||||
const js_printer = @import("./js_printer.zig");
|
||||
const SOCKET_FLAGS = os.SOCK_CLOEXEC;
|
||||
|
||||
@@ -7,7 +7,7 @@ const NodeModuleBundle = @import("../../node_module_bundle.zig").NodeModuleBundl
|
||||
const logger = @import("../../logger.zig");
|
||||
const Api = @import("../../api/schema.zig").Api;
|
||||
const options = @import("../../options.zig");
|
||||
const Bundler = @import("../../bundler.zig").ServeBundler;
|
||||
const Bundler = @import("../../bundler.zig").Bundler;
|
||||
const ServerEntryPoint = @import("../../bundler.zig").ServerEntryPoint;
|
||||
const js_printer = @import("../../js_printer.zig");
|
||||
const js_parser = @import("../../js_parser.zig");
|
||||
|
||||
1160
src/linker.zig
1160
src/linker.zig
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user