mirror of
https://github.com/oven-sh/bun
synced 2026-02-17 22:32:06 +00:00
Compare commits
21 Commits
claude/fix
...
claude/fix
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c0862efcd9 | ||
|
|
5145714720 | ||
|
|
15f2e7d367 | ||
|
|
87b8522412 | ||
|
|
84a4c468bb | ||
|
|
95130eee87 | ||
|
|
e75de029a0 | ||
|
|
e34ba78ac2 | ||
|
|
35983fc86d | ||
|
|
3e4777de85 | ||
|
|
afcd62b237 | ||
|
|
1b335fc02d | ||
|
|
8d71c5ea66 | ||
|
|
1b023c5e5f | ||
|
|
d55260233c | ||
|
|
9dc92fff39 | ||
|
|
dd4b86a1bd | ||
|
|
31088e4575 | ||
|
|
0e52d24bc4 | ||
|
|
b93468ca48 | ||
|
|
35e9f3d4a2 |
@@ -443,6 +443,7 @@ src/bundler/linker_context/writeOutputFilesToDisk.zig
|
||||
src/bundler/LinkerContext.zig
|
||||
src/bundler/LinkerGraph.zig
|
||||
src/bundler/ParseTask.zig
|
||||
src/bundler/PathToSourceIndexMap.zig
|
||||
src/bundler/ServerComponentParseTask.zig
|
||||
src/bundler/ThreadPool.zig
|
||||
src/bunfig.zig
|
||||
|
||||
@@ -1496,9 +1496,8 @@ pub fn IncrementalGraph(comptime side: bake.Side) type {
|
||||
|
||||
// Additionally, clear the cached entry of the file from the path to
|
||||
// source index map.
|
||||
const hash = bun.hash(abs_path);
|
||||
for (&bv2.graph.build_graphs.values) |*map| {
|
||||
_ = map.remove(hash);
|
||||
_ = map.remove(abs_path);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1114,13 +1114,12 @@ pub const TextChunk = struct {
|
||||
}
|
||||
|
||||
fn contentHandler(this: *TextChunk, comptime Callback: (fn (*LOLHTML.TextChunk, []const u8, bool) LOLHTML.Error!void), thisObject: JSValue, globalObject: *JSGlobalObject, content: ZigString, contentOptions: ?ContentOptions) JSValue {
|
||||
if (this.text_chunk == null)
|
||||
return .js_undefined;
|
||||
const text_chunk = this.text_chunk orelse return .js_undefined;
|
||||
var content_slice = content.toSlice(bun.default_allocator);
|
||||
defer content_slice.deinit();
|
||||
|
||||
Callback(
|
||||
this.text_chunk.?,
|
||||
text_chunk,
|
||||
content_slice.slice(),
|
||||
contentOptions != null and contentOptions.?.html,
|
||||
) catch return createLOLHTMLError(globalObject);
|
||||
@@ -1167,27 +1166,27 @@ pub const TextChunk = struct {
|
||||
_: *JSGlobalObject,
|
||||
callFrame: *jsc.CallFrame,
|
||||
) bun.JSError!JSValue {
|
||||
if (this.text_chunk == null)
|
||||
return .js_undefined;
|
||||
this.text_chunk.?.remove();
|
||||
const text_chunk = this.text_chunk orelse return .js_undefined;
|
||||
text_chunk.remove();
|
||||
return callFrame.this();
|
||||
}
|
||||
|
||||
pub fn getText(
|
||||
this: *TextChunk,
|
||||
global: *JSGlobalObject,
|
||||
) JSValue {
|
||||
if (this.text_chunk == null)
|
||||
return .js_undefined;
|
||||
return ZigString.init(this.text_chunk.?.getContent().slice()).withEncoding().toJS(global);
|
||||
) bun.JSError!JSValue {
|
||||
const text_chunk = this.text_chunk orelse return .js_undefined;
|
||||
return bun.String.createUTF8ForJS(global, text_chunk.getContent().slice());
|
||||
}
|
||||
|
||||
pub fn removed(this: *TextChunk, _: *JSGlobalObject) JSValue {
|
||||
return JSValue.jsBoolean(this.text_chunk.?.isRemoved());
|
||||
const text_chunk = this.text_chunk orelse return .js_undefined;
|
||||
return JSValue.jsBoolean(text_chunk.isRemoved());
|
||||
}
|
||||
|
||||
pub fn lastInTextNode(this: *TextChunk, _: *JSGlobalObject) JSValue {
|
||||
return JSValue.jsBoolean(this.text_chunk.?.isLastInTextNode());
|
||||
const text_chunk = this.text_chunk orelse return .js_undefined;
|
||||
return JSValue.jsBoolean(text_chunk.isLastInTextNode());
|
||||
}
|
||||
|
||||
pub fn finalize(this: *TextChunk) void {
|
||||
|
||||
@@ -34,7 +34,7 @@ pending_items: u32 = 0,
|
||||
deferred_pending: u32 = 0,
|
||||
|
||||
/// A map of build targets to their corresponding module graphs.
|
||||
build_graphs: std.EnumArray(options.Target, PathToSourceIndexMap) = .initFill(.{}),
|
||||
build_graphs: std.EnumArray(options.Target, PathToSourceIndexMap),
|
||||
|
||||
/// When Server Components is enabled, this holds a list of all boundary
|
||||
/// files. This happens for all files with a "use <side>" directive.
|
||||
@@ -62,8 +62,14 @@ additional_output_files: std.ArrayListUnmanaged(options.OutputFile) = .{},
|
||||
kit_referenced_server_data: bool,
|
||||
kit_referenced_client_data: bool,
|
||||
|
||||
/// Do any input_files have a secondary_path.len > 0?
|
||||
///
|
||||
/// Helps skip a loop.
|
||||
has_any_secondary_paths: bool = false,
|
||||
|
||||
pub const InputFile = struct {
|
||||
source: Logger.Source,
|
||||
secondary_path: []const u8 = "",
|
||||
loader: options.Loader = options.Loader.file,
|
||||
side_effects: _resolver.SideEffects,
|
||||
allocator: std.mem.Allocator = bun.default_allocator,
|
||||
|
||||
@@ -304,7 +304,7 @@ pub const LinkerContext = struct {
|
||||
|
||||
for (server_source_indices.slice()) |html_import| {
|
||||
const source = &input_files[html_import];
|
||||
const source_index = map.get(source.path.hashKey()) orelse {
|
||||
const source_index = map.get(source.path.text) orelse {
|
||||
@panic("Assertion failed: HTML import file not found in pathToSourceIndexMap");
|
||||
};
|
||||
|
||||
|
||||
@@ -319,13 +319,12 @@ pub fn load(
|
||||
for (this.reachable_files) |source_id| {
|
||||
for (import_records_list[source_id.get()].slice()) |*import_record| {
|
||||
if (import_record.source_index.isValid() and this.is_scb_bitset.isSet(import_record.source_index.get())) {
|
||||
import_record.source_index = Index.init(
|
||||
scb.getReferenceSourceIndex(import_record.source_index.get()) orelse
|
||||
// If this gets hit, might be fine to switch this to `orelse continue`
|
||||
// not confident in this assertion
|
||||
Output.panic("Missing SCB boundary for file #{d}", .{import_record.source_index.get()}),
|
||||
);
|
||||
bun.assert(import_record.source_index.isValid()); // did not generate
|
||||
// Only rewrite if this is an original SCB file, not a reference file
|
||||
if (scb.getReferenceSourceIndex(import_record.source_index.get())) |ref_index| {
|
||||
import_record.source_index = Index.init(ref_index);
|
||||
bun.assert(import_record.source_index.isValid()); // did not generate
|
||||
}
|
||||
// If it's already a reference file, leave it as-is
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
46
src/bundler/PathToSourceIndexMap.zig
Normal file
46
src/bundler/PathToSourceIndexMap.zig
Normal file
@@ -0,0 +1,46 @@
|
||||
const PathToSourceIndexMap = @This();
|
||||
|
||||
/// The lifetime of the keys are not owned by this map.
|
||||
///
|
||||
/// We assume it's arena allocated.
|
||||
map: Map = .{},
|
||||
|
||||
const Map = bun.StringHashMapUnmanaged(Index.Int);
|
||||
|
||||
pub fn getPath(this: *const PathToSourceIndexMap, path: *const Fs.Path) ?Index.Int {
|
||||
return this.get(path.text);
|
||||
}
|
||||
|
||||
pub fn get(this: *const PathToSourceIndexMap, text: []const u8) ?Index.Int {
|
||||
return this.map.get(text);
|
||||
}
|
||||
|
||||
pub fn putPath(this: *PathToSourceIndexMap, allocator: std.mem.Allocator, path: *const Fs.Path, value: Index.Int) bun.OOM!void {
|
||||
try this.map.put(allocator, path.text, value);
|
||||
}
|
||||
|
||||
pub fn put(this: *PathToSourceIndexMap, allocator: std.mem.Allocator, text: []const u8, value: Index.Int) bun.OOM!void {
|
||||
try this.map.put(allocator, text, value);
|
||||
}
|
||||
|
||||
pub fn getOrPutPath(this: *PathToSourceIndexMap, allocator: std.mem.Allocator, path: *const Fs.Path) bun.OOM!Map.GetOrPutResult {
|
||||
return this.getOrPut(allocator, path.text);
|
||||
}
|
||||
|
||||
pub fn getOrPut(this: *PathToSourceIndexMap, allocator: std.mem.Allocator, text: []const u8) bun.OOM!Map.GetOrPutResult {
|
||||
return try this.map.getOrPut(allocator, text);
|
||||
}
|
||||
|
||||
pub fn remove(this: *PathToSourceIndexMap, text: []const u8) bool {
|
||||
return this.map.remove(text);
|
||||
}
|
||||
|
||||
pub fn removePath(this: *PathToSourceIndexMap, path: *const Fs.Path) bool {
|
||||
return this.remove(path.text);
|
||||
}
|
||||
|
||||
const std = @import("std");
|
||||
|
||||
const bun = @import("bun");
|
||||
const Fs = bun.fs;
|
||||
const Index = bun.ast.Index;
|
||||
@@ -45,14 +45,15 @@
|
||||
pub const logPartDependencyTree = Output.scoped(.part_dep_tree, .visible);
|
||||
|
||||
pub const MangledProps = std.AutoArrayHashMapUnmanaged(Ref, []const u8);
|
||||
pub const PathToSourceIndexMap = std.HashMapUnmanaged(u64, Index.Int, IdentityContext(u64), 80);
|
||||
pub const PathToSourceIndexMap = @import("./PathToSourceIndexMap.zig");
|
||||
|
||||
pub const Watcher = bun.jsc.hot_reloader.NewHotReloader(BundleV2, EventLoop, true);
|
||||
|
||||
/// This assigns a concise, predictable, and unique `.pretty` attribute to a Path.
|
||||
/// DevServer relies on pretty paths for identifying modules, so they must be unique.
|
||||
pub fn genericPathWithPrettyInitialized(path: Fs.Path, target: options.Target, top_level_dir: string, allocator: std.mem.Allocator) !Fs.Path {
|
||||
var buf: bun.PathBuffer = undefined;
|
||||
const buf = bun.path_buffer_pool.get();
|
||||
defer bun.path_buffer_pool.put(buf);
|
||||
|
||||
const is_node = bun.strings.eqlComptime(path.namespace, "node");
|
||||
if (is_node and
|
||||
@@ -66,14 +67,16 @@ pub fn genericPathWithPrettyInitialized(path: Fs.Path, target: options.Target, t
|
||||
// the "node" namespace is also put through this code path so that the
|
||||
// "node:" prefix is not emitted.
|
||||
if (path.isFile() or is_node) {
|
||||
const rel = bun.path.relativePlatform(top_level_dir, path.text, .loose, false);
|
||||
const buf2 = if (target == .bake_server_components_ssr) bun.path_buffer_pool.get() else buf;
|
||||
defer if (target == .bake_server_components_ssr) bun.path_buffer_pool.put(buf2);
|
||||
const rel = bun.path.relativePlatformBuf(buf2, top_level_dir, path.text, .loose, false);
|
||||
var path_clone = path;
|
||||
// stack-allocated temporary is not leaked because dupeAlloc on the path will
|
||||
// move .pretty into the heap. that function also fixes some slash issues.
|
||||
if (target == .bake_server_components_ssr) {
|
||||
// the SSR graph needs different pretty names or else HMR mode will
|
||||
// confuse the two modules.
|
||||
path_clone.pretty = std.fmt.bufPrint(&buf, "ssr:{s}", .{rel}) catch buf[0..];
|
||||
path_clone.pretty = std.fmt.bufPrint(buf, "ssr:{s}", .{rel}) catch buf[0..];
|
||||
} else {
|
||||
path_clone.pretty = rel;
|
||||
}
|
||||
@@ -81,7 +84,7 @@ pub fn genericPathWithPrettyInitialized(path: Fs.Path, target: options.Target, t
|
||||
} else {
|
||||
// in non-file namespaces, standard filesystem rules do not apply.
|
||||
var path_clone = path;
|
||||
path_clone.pretty = std.fmt.bufPrint(&buf, "{s}{}:{s}", .{
|
||||
path_clone.pretty = std.fmt.bufPrint(buf, "{s}{}:{s}", .{
|
||||
if (target == .bake_server_components_ssr) "ssr:" else "",
|
||||
// make sure that a namespace including a colon wont collide with anything
|
||||
std.fmt.Formatter(fmtEscapedNamespace){ .data = path.namespace },
|
||||
@@ -469,6 +472,55 @@ pub const BundleV2 = struct {
|
||||
debug("Parsed {d} files, producing {d} ASTs", .{ this.graph.input_files.len, this.graph.ast.len });
|
||||
}
|
||||
|
||||
pub fn scanForSecondaryPaths(this: *BundleV2) void {
|
||||
if (!this.graph.has_any_secondary_paths) {
|
||||
|
||||
// Assert the boolean is accurate.
|
||||
if (comptime Environment.ci_assert) {
|
||||
for (this.graph.input_files.items(.secondary_path)) |secondary_path| {
|
||||
if (secondary_path.len > 0) {
|
||||
@panic("secondary_path is not empty");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// No dual package hazard. Do nothing.
|
||||
return;
|
||||
}
|
||||
|
||||
// Now that all files have been scanned, look for packages that are imported
|
||||
// both with "import" and "require". Rewrite any imports that reference the
|
||||
// "module" package.json field to the "main" package.json field instead.
|
||||
//
|
||||
// This attempts to automatically avoid the "dual package hazard" where a
|
||||
// package has both a CommonJS module version and an ECMAScript module
|
||||
// version and exports a non-object in CommonJS (often a function). If we
|
||||
// pick the "module" field and the package is imported with "require" then
|
||||
// code expecting a function will crash.
|
||||
const ast_import_records: []const ImportRecord.List = this.graph.ast.items(.import_records);
|
||||
const targets: []const options.Target = this.graph.ast.items(.target);
|
||||
const max_valid_source_index: Index = .init(this.graph.input_files.len);
|
||||
const secondary_paths: []const []const u8 = this.graph.input_files.items(.secondary_path);
|
||||
|
||||
for (ast_import_records, targets) |*ast_import_record_list, target| {
|
||||
const import_records: []ImportRecord = ast_import_record_list.slice();
|
||||
const path_to_source_index_map = this.pathToSourceIndexMap(target);
|
||||
for (import_records) |*import_record| {
|
||||
const source_index = import_record.source_index.get();
|
||||
if (source_index >= max_valid_source_index.get()) {
|
||||
continue;
|
||||
}
|
||||
const secondary_path = secondary_paths[source_index];
|
||||
if (secondary_path.len > 0) {
|
||||
const secondary_source_index = path_to_source_index_map.get(secondary_path) orelse continue;
|
||||
import_record.source_index = Index.init(secondary_source_index);
|
||||
// Keep path in sync for determinism, diagnostics, and dev tooling.
|
||||
import_record.path = this.graph.input_files.items(.source)[secondary_source_index].path;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// This runs on the Bundle Thread.
|
||||
pub fn runResolver(
|
||||
this: *BundleV2,
|
||||
@@ -477,7 +529,7 @@ pub const BundleV2 = struct {
|
||||
) void {
|
||||
const transpiler = this.transpilerForTarget(target);
|
||||
var had_busted_dir_cache: bool = false;
|
||||
var resolve_result = while (true) break transpiler.resolver.resolve(
|
||||
var resolve_result: _resolver.Result = while (true) break transpiler.resolver.resolve(
|
||||
Fs.PathName.init(import_record.source_file).dirWithTrailingSlash(),
|
||||
import_record.specifier,
|
||||
import_record.kind,
|
||||
@@ -594,19 +646,11 @@ pub const BundleV2 = struct {
|
||||
}
|
||||
path.assertPrettyIsValid();
|
||||
|
||||
var secondary_path_to_copy: ?Fs.Path = null;
|
||||
if (resolve_result.path_pair.secondary) |*secondary| {
|
||||
if (!secondary.is_disabled and
|
||||
secondary != path and
|
||||
!strings.eqlLong(secondary.text, path.text, true))
|
||||
{
|
||||
secondary_path_to_copy = bun.handleOom(secondary.dupeAlloc(this.allocator()));
|
||||
}
|
||||
}
|
||||
|
||||
const entry = bun.handleOom(this.pathToSourceIndexMap(target).getOrPut(this.allocator(), path.hashKey()));
|
||||
path.assertFilePathIsAbsolute();
|
||||
const entry = bun.handleOom(this.pathToSourceIndexMap(target).getOrPut(this.allocator(), path.text));
|
||||
if (!entry.found_existing) {
|
||||
path.* = bun.handleOom(this.pathWithPrettyInitialized(path.*, target));
|
||||
entry.key_ptr.* = path.text;
|
||||
const loader: Loader = brk: {
|
||||
const record: *ImportRecord = &this.graph.ast.items(.import_records)[import_record.importer_source_index].slice()[import_record.import_record_index];
|
||||
if (record.loader) |out_loader| {
|
||||
@@ -627,6 +671,18 @@ pub const BundleV2 = struct {
|
||||
entry.value_ptr.* = idx;
|
||||
out_source_index = Index.init(idx);
|
||||
|
||||
if (resolve_result.path_pair.secondary) |*secondary| {
|
||||
if (!secondary.is_disabled and
|
||||
secondary != path and
|
||||
!strings.eqlLong(secondary.text, path.text, true))
|
||||
{
|
||||
const secondary_path_to_copy = secondary.dupeAlloc(this.allocator()) catch |err| bun.handleOom(err);
|
||||
this.graph.input_files.items(.secondary_path)[idx] = secondary_path_to_copy.text;
|
||||
// Ensure the determinism pass runs.
|
||||
this.graph.has_any_secondary_paths = true;
|
||||
}
|
||||
}
|
||||
|
||||
// For non-javascript files, make all of these files share indices.
|
||||
// For example, it is silly to bundle index.css depended on by client+server twice.
|
||||
// It makes sense to separate these for JS because the target affects DCE
|
||||
@@ -656,7 +712,7 @@ pub const BundleV2 = struct {
|
||||
target: options.Target,
|
||||
) !void {
|
||||
// TODO: plugins with non-file namespaces
|
||||
const entry = try this.pathToSourceIndexMap(target).getOrPut(this.allocator(), bun.hash(path_slice));
|
||||
const entry = try this.pathToSourceIndexMap(target).getOrPut(this.allocator(), path_slice);
|
||||
if (entry.found_existing) {
|
||||
return;
|
||||
}
|
||||
@@ -673,6 +729,7 @@ pub const BundleV2 = struct {
|
||||
|
||||
path = bun.handleOom(this.pathWithPrettyInitialized(path, target));
|
||||
path.assertPrettyIsValid();
|
||||
entry.key_ptr.* = path.text;
|
||||
entry.value_ptr.* = source_index.get();
|
||||
bun.handleOom(this.graph.ast.append(this.allocator(), JSAst.empty));
|
||||
|
||||
@@ -712,7 +769,6 @@ pub const BundleV2 = struct {
|
||||
|
||||
pub fn enqueueEntryItem(
|
||||
this: *BundleV2,
|
||||
hash: ?u64,
|
||||
resolve: _resolver.Result,
|
||||
is_entry_point: bool,
|
||||
target: options.Target,
|
||||
@@ -720,7 +776,8 @@ pub const BundleV2 = struct {
|
||||
var result = resolve;
|
||||
var path = result.path() orelse return null;
|
||||
|
||||
const entry = try this.pathToSourceIndexMap(target).getOrPut(this.allocator(), hash orelse path.hashKey());
|
||||
path.assertFilePathIsAbsolute();
|
||||
const entry = try this.pathToSourceIndexMap(target).getOrPut(this.allocator(), path.text);
|
||||
if (entry.found_existing) {
|
||||
return null;
|
||||
}
|
||||
@@ -734,6 +791,7 @@ pub const BundleV2 = struct {
|
||||
|
||||
path.* = bun.handleOom(this.pathWithPrettyInitialized(path.*, target));
|
||||
path.assertPrettyIsValid();
|
||||
entry.key_ptr.* = path.text;
|
||||
entry.value_ptr.* = source_index.get();
|
||||
bun.handleOom(this.graph.ast.append(this.allocator(), JSAst.empty));
|
||||
|
||||
@@ -805,6 +863,7 @@ pub const BundleV2 = struct {
|
||||
.heap = heap,
|
||||
.kit_referenced_server_data = false,
|
||||
.kit_referenced_client_data = false,
|
||||
.build_graphs = .initFill(.{}),
|
||||
},
|
||||
.linker = .{
|
||||
.loop = event_loop,
|
||||
@@ -930,7 +989,7 @@ pub const BundleV2 = struct {
|
||||
|
||||
// try this.graph.entry_points.append(allocator, Index.runtime);
|
||||
try this.graph.ast.append(this.allocator(), JSAst.empty);
|
||||
try this.pathToSourceIndexMap(this.transpiler.options.target).put(this.allocator(), bun.hash("bun:wrap"), Index.runtime.get());
|
||||
try this.pathToSourceIndexMap(this.transpiler.options.target).put(this.allocator(), "bun:wrap", Index.runtime.get());
|
||||
var runtime_parse_task = try this.allocator().create(ParseTask);
|
||||
runtime_parse_task.* = rt.parse_task;
|
||||
runtime_parse_task.ctx = this;
|
||||
@@ -973,7 +1032,6 @@ pub const BundleV2 = struct {
|
||||
continue;
|
||||
|
||||
_ = try this.enqueueEntryItem(
|
||||
null,
|
||||
resolved,
|
||||
true,
|
||||
brk: {
|
||||
@@ -1041,13 +1099,13 @@ pub const BundleV2 = struct {
|
||||
};
|
||||
|
||||
if (flags.client) brk: {
|
||||
const source_index = try this.enqueueEntryItem(null, resolved, true, .browser) orelse break :brk;
|
||||
const source_index = try this.enqueueEntryItem(resolved, true, .browser) orelse break :brk;
|
||||
if (flags.css) {
|
||||
try data.css_data.putNoClobber(this.allocator(), Index.init(source_index), .{ .imported_on_server = false });
|
||||
}
|
||||
}
|
||||
if (flags.server) _ = try this.enqueueEntryItem(null, resolved, true, this.transpiler.options.target);
|
||||
if (flags.ssr) _ = try this.enqueueEntryItem(null, resolved, true, .bake_server_components_ssr);
|
||||
if (flags.server) _ = try this.enqueueEntryItem(resolved, true, this.transpiler.options.target);
|
||||
if (flags.ssr) _ = try this.enqueueEntryItem(resolved, true, .bake_server_components_ssr);
|
||||
}
|
||||
},
|
||||
.bake_production => {
|
||||
@@ -1067,7 +1125,7 @@ pub const BundleV2 = struct {
|
||||
continue;
|
||||
|
||||
// TODO: wrap client files so the exports arent preserved.
|
||||
_ = try this.enqueueEntryItem(null, resolved, true, target) orelse continue;
|
||||
_ = try this.enqueueEntryItem(resolved, true, target) orelse continue;
|
||||
}
|
||||
},
|
||||
}
|
||||
@@ -1444,6 +1502,8 @@ pub const BundleV2 = struct {
|
||||
return error.BuildFailed;
|
||||
}
|
||||
|
||||
this.scanForSecondaryPaths();
|
||||
|
||||
try this.processServerComponentManifestFiles();
|
||||
|
||||
const reachable_files = try this.findReachableFiles();
|
||||
@@ -1505,6 +1565,8 @@ pub const BundleV2 = struct {
|
||||
return error.BuildFailed;
|
||||
}
|
||||
|
||||
this.scanForSecondaryPaths();
|
||||
|
||||
try this.processServerComponentManifestFiles();
|
||||
|
||||
const reachable_files = try this.findReachableFiles();
|
||||
@@ -2265,7 +2327,7 @@ pub const BundleV2 = struct {
|
||||
const resolved = this.transpilerForTarget(target).resolveEntryPoint(resolve.import_record.specifier) catch {
|
||||
return;
|
||||
};
|
||||
const source_index = this.enqueueEntryItem(null, resolved, true, target) catch {
|
||||
const source_index = this.enqueueEntryItem(resolved, true, target) catch {
|
||||
return;
|
||||
};
|
||||
|
||||
@@ -2314,11 +2376,12 @@ pub const BundleV2 = struct {
|
||||
path.namespace = result.namespace;
|
||||
}
|
||||
|
||||
const existing = this.pathToSourceIndexMap(resolve.import_record.original_target).getOrPut(this.allocator(), path.hashKey()) catch unreachable;
|
||||
const existing = this.pathToSourceIndexMap(resolve.import_record.original_target)
|
||||
.getOrPutPath(this.allocator(), &path) catch |err| bun.handleOom(err);
|
||||
if (!existing.found_existing) {
|
||||
this.free_list.appendSlice(&.{ result.namespace, result.path }) catch {};
|
||||
|
||||
path = bun.handleOom(this.pathWithPrettyInitialized(path, resolve.import_record.original_target));
|
||||
existing.key_ptr.* = path.text;
|
||||
|
||||
// We need to parse this
|
||||
const source_index = Index.init(@as(u32, @intCast(this.graph.ast.len)));
|
||||
@@ -2482,6 +2545,8 @@ pub const BundleV2 = struct {
|
||||
return error.BuildFailed;
|
||||
}
|
||||
|
||||
this.scanForSecondaryPaths();
|
||||
|
||||
try this.processServerComponentManifestFiles();
|
||||
|
||||
this.graph.heap.helpCatchMemoryIssues();
|
||||
@@ -2953,7 +3018,7 @@ pub const BundleV2 = struct {
|
||||
estimated_resolve_queue_count += @as(usize, @intFromBool(!(import_record.is_internal or import_record.is_unused or import_record.source_index.isValid())));
|
||||
}
|
||||
var resolve_queue = ResolveQueue.init(this.allocator());
|
||||
bun.handleOom(resolve_queue.ensureTotalCapacity(estimated_resolve_queue_count));
|
||||
bun.handleOom(resolve_queue.ensureTotalCapacity(@intCast(estimated_resolve_queue_count)));
|
||||
|
||||
var last_error: ?anyerror = null;
|
||||
|
||||
@@ -3305,14 +3370,12 @@ pub const BundleV2 = struct {
|
||||
}
|
||||
}
|
||||
|
||||
const hash_key = path.hashKey();
|
||||
|
||||
const import_record_loader = import_record.loader orelse path.loader(&transpiler.options.loaders) orelse .file;
|
||||
import_record.loader = import_record_loader;
|
||||
|
||||
const is_html_entrypoint = import_record_loader == .html and target.isServerSide() and this.transpiler.options.dev_server == null;
|
||||
|
||||
if (this.pathToSourceIndexMap(target).get(hash_key)) |id| {
|
||||
if (this.pathToSourceIndexMap(target).get(path.text)) |id| {
|
||||
if (this.transpiler.options.dev_server != null and loader != .html) {
|
||||
import_record.path = this.graph.input_files.items(.source)[id].path;
|
||||
} else {
|
||||
@@ -3325,7 +3388,7 @@ pub const BundleV2 = struct {
|
||||
import_record.kind = .html_manifest;
|
||||
}
|
||||
|
||||
const resolve_entry = bun.handleOom(resolve_queue.getOrPut(hash_key));
|
||||
const resolve_entry = resolve_queue.getOrPut(path.text) catch |err| bun.handleOom(err);
|
||||
if (resolve_entry.found_existing) {
|
||||
import_record.path = resolve_entry.value_ptr.*.path;
|
||||
continue;
|
||||
@@ -3333,21 +3396,11 @@ pub const BundleV2 = struct {
|
||||
|
||||
path.* = bun.handleOom(this.pathWithPrettyInitialized(path.*, target));
|
||||
|
||||
var secondary_path_to_copy: ?Fs.Path = null;
|
||||
if (resolve_result.path_pair.secondary) |*secondary| {
|
||||
if (!secondary.is_disabled and
|
||||
secondary != path and
|
||||
!strings.eqlLong(secondary.text, path.text, true))
|
||||
{
|
||||
secondary_path_to_copy = bun.handleOom(secondary.dupeAlloc(this.allocator()));
|
||||
}
|
||||
}
|
||||
|
||||
import_record.path = path.*;
|
||||
resolve_entry.key_ptr.* = path.text;
|
||||
debug("created ParseTask: {s}", .{path.text});
|
||||
const resolve_task = bun.handleOom(bun.default_allocator.create(ParseTask));
|
||||
resolve_task.* = ParseTask.init(&resolve_result, Index.invalid, this);
|
||||
resolve_task.secondary_path_for_commonjs_interop = secondary_path_to_copy;
|
||||
|
||||
resolve_task.known_target = if (import_record.kind == .html_manifest)
|
||||
.browser
|
||||
@@ -3364,9 +3417,17 @@ pub const BundleV2 = struct {
|
||||
resolve_task.loader = import_record_loader;
|
||||
resolve_task.tree_shaking = transpiler.options.tree_shaking;
|
||||
resolve_entry.value_ptr.* = resolve_task;
|
||||
if (resolve_result.path_pair.secondary) |*secondary| {
|
||||
if (!secondary.is_disabled and
|
||||
secondary != path and
|
||||
!strings.eqlLong(secondary.text, path.text, true))
|
||||
{
|
||||
resolve_task.secondary_path_for_commonjs_interop = secondary.dupeAlloc(this.allocator()) catch |err| bun.handleOom(err);
|
||||
}
|
||||
}
|
||||
|
||||
if (is_html_entrypoint) {
|
||||
this.generateServerHTMLModule(path, target, import_record, hash_key) catch unreachable;
|
||||
this.generateServerHTMLModule(path, target, import_record, path.text) catch unreachable;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3387,7 +3448,7 @@ pub const BundleV2 = struct {
|
||||
return resolve_queue;
|
||||
}
|
||||
|
||||
fn generateServerHTMLModule(this: *BundleV2, path: *const Fs.Path, target: options.Target, import_record: *ImportRecord, hash_key: u64) !void {
|
||||
fn generateServerHTMLModule(this: *BundleV2, path: *const Fs.Path, target: options.Target, import_record: *ImportRecord, path_text: []const u8) !void {
|
||||
// 1. Create the ast right here
|
||||
// 2. Create a separate "virutal" module that becomes the manifest later on.
|
||||
// 3. Add it to the graph
|
||||
@@ -3434,12 +3495,12 @@ pub const BundleV2 = struct {
|
||||
try graph.ast.append(this.allocator(), ast_for_html_entrypoint);
|
||||
|
||||
import_record.source_index = fake_input_file.source.index;
|
||||
try this.pathToSourceIndexMap(target).put(this.allocator(), hash_key, fake_input_file.source.index.get());
|
||||
try this.pathToSourceIndexMap(target).put(this.allocator(), path_text, fake_input_file.source.index.get());
|
||||
try graph.html_imports.server_source_indices.push(this.allocator(), fake_input_file.source.index.get());
|
||||
this.ensureClientTranspiler();
|
||||
}
|
||||
|
||||
const ResolveQueue = std.AutoArrayHashMap(u64, *ParseTask);
|
||||
const ResolveQueue = bun.StringHashMap(*ParseTask);
|
||||
|
||||
pub fn onNotifyDefer(this: *BundleV2) void {
|
||||
this.thread_lock.assertLocked();
|
||||
@@ -3562,33 +3623,35 @@ pub const BundleV2 = struct {
|
||||
const path_to_source_index_map = this.pathToSourceIndexMap(result.ast.target);
|
||||
const original_target = result.ast.target;
|
||||
while (iter.next()) |entry| {
|
||||
const hash = entry.key_ptr.*;
|
||||
const value: *ParseTask = entry.value_ptr.*;
|
||||
|
||||
const loader = value.loader orelse value.path.loader(&this.transpiler.options.loaders) orelse options.Loader.file;
|
||||
|
||||
const is_html_entrypoint = loader == .html and original_target.isServerSide() and this.transpiler.options.dev_server == null;
|
||||
const map: *PathToSourceIndexMap = if (is_html_entrypoint) this.pathToSourceIndexMap(.browser) else path_to_source_index_map;
|
||||
const existing = map.getOrPut(this.allocator(), entry.key_ptr.*) catch unreachable;
|
||||
|
||||
const map = if (is_html_entrypoint) this.pathToSourceIndexMap(.browser) else path_to_source_index_map;
|
||||
var existing = map.getOrPut(this.allocator(), hash) catch unreachable;
|
||||
|
||||
// If the same file is imported and required, and those point to different files
|
||||
// Automatically rewrite it to the secondary one
|
||||
if (value.secondary_path_for_commonjs_interop) |secondary_path| {
|
||||
const secondary_hash = secondary_path.hashKey();
|
||||
if (map.get(secondary_hash)) |secondary| {
|
||||
existing.found_existing = true;
|
||||
existing.value_ptr.* = secondary;
|
||||
}
|
||||
}
|
||||
// Originally, we attempted to avoid the "dual package
|
||||
// hazard" right here by checking if pathToSourceIndexMap
|
||||
// already contained the secondary_path for the ParseTask.
|
||||
// That leads to a race condition where whichever parse task
|
||||
// completes first ends up being used in the bundle. So we
|
||||
// added `scanForSecondaryPaths` before `findReachableFiles`
|
||||
// to prevent that.
|
||||
//
|
||||
// It would be nice, in theory, to find a way to bring that
|
||||
// back because it means we can skip parsing the files we
|
||||
// don't end up using.
|
||||
//
|
||||
|
||||
if (!existing.found_existing) {
|
||||
var new_task: *ParseTask = value;
|
||||
var new_input_file = Graph.InputFile{
|
||||
.source = Logger.Source.initEmptyFile(new_task.path.text),
|
||||
.side_effects = value.side_effects,
|
||||
.secondary_path = if (value.secondary_path_for_commonjs_interop) |*secondary_path| secondary_path.text else "",
|
||||
};
|
||||
|
||||
graph.has_any_secondary_paths = graph.has_any_secondary_paths or new_input_file.secondary_path.len > 0;
|
||||
|
||||
new_input_file.source.index = Index.source(graph.input_files.len);
|
||||
new_input_file.source.path = new_task.path;
|
||||
|
||||
@@ -3656,7 +3719,7 @@ pub const BundleV2 = struct {
|
||||
}
|
||||
|
||||
for (import_records.slice(), 0..) |*record, i| {
|
||||
if (path_to_source_index_map.get(record.path.hashKey())) |source_index| {
|
||||
if (path_to_source_index_map.getPath(&record.path)) |source_index| {
|
||||
if (save_import_record_source_index or input_file_loaders[source_index] == .css)
|
||||
record.source_index.value = source_index;
|
||||
|
||||
@@ -3664,7 +3727,7 @@ pub const BundleV2 = struct {
|
||||
if (compare == @as(u32, @truncate(i))) {
|
||||
path_to_source_index_map.put(
|
||||
this.allocator(),
|
||||
result.source.path.hashKey(),
|
||||
result.source.path.text,
|
||||
source_index,
|
||||
) catch unreachable;
|
||||
}
|
||||
@@ -3720,7 +3783,7 @@ pub const BundleV2 = struct {
|
||||
|
||||
graph.pathToSourceIndexMap(result.ast.target).put(
|
||||
this.allocator(),
|
||||
result.source.path.hashKey(),
|
||||
result.source.path.text,
|
||||
reference_source_index,
|
||||
) catch |err| bun.handleOom(err);
|
||||
|
||||
@@ -4448,7 +4511,6 @@ pub const Graph = @import("./Graph.zig");
|
||||
const string = []const u8;
|
||||
|
||||
const options = @import("../options.zig");
|
||||
const IdentityContext = @import("../identity_context.zig").IdentityContext;
|
||||
|
||||
const bun = @import("bun");
|
||||
const Environment = bun.Environment;
|
||||
|
||||
@@ -1784,6 +1784,14 @@ pub const Path = struct {
|
||||
}
|
||||
}
|
||||
|
||||
pub inline fn assertFilePathIsAbsolute(path: *const Path) void {
|
||||
if (bun.Environment.ci_assert) {
|
||||
if (path.isFile()) {
|
||||
bun.assert(std.fs.path.isAbsolute(path.text));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub inline fn isPrettyPathPosix(path: *const Path) bool {
|
||||
if (!Environment.isWindows) return true;
|
||||
return bun.strings.indexOfChar(path.pretty, '\\') == null;
|
||||
|
||||
@@ -212,8 +212,10 @@ function hasProtocol(url: string) {
|
||||
"mysql",
|
||||
"mysql2",
|
||||
"mariadb",
|
||||
"mysqls",
|
||||
"file",
|
||||
"sqlite",
|
||||
"unix",
|
||||
];
|
||||
for (const protocol of protocols) {
|
||||
if (url.startsWith(protocol + "://")) {
|
||||
@@ -223,82 +225,416 @@ function hasProtocol(url: string) {
|
||||
return false;
|
||||
}
|
||||
|
||||
function defaultToPostgresIfNoProtocol(url: string | URL | null): URL {
|
||||
if (url instanceof URL) {
|
||||
return url;
|
||||
function getAdapterFromProtocol(protocol: string): Bun.SQL.__internal.Adapter | null {
|
||||
switch (protocol) {
|
||||
case "postgres":
|
||||
case "postgresql":
|
||||
return "postgres";
|
||||
case "mysql":
|
||||
case "mysql2":
|
||||
case "mariadb":
|
||||
case "mysqls":
|
||||
return "mysql";
|
||||
case "file":
|
||||
case "sqlite":
|
||||
return "sqlite";
|
||||
case "unix":
|
||||
return null; // Unix sockets require explicit adapter
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
if (hasProtocol(url as string)) {
|
||||
return new URL(url as string);
|
||||
}
|
||||
return new URL("postgres://" + url);
|
||||
}
|
||||
|
||||
function determineAdapter(
|
||||
options: Bun.SQL.Options,
|
||||
urlString: string | URL | null,
|
||||
env?: Record<string, string | undefined>,
|
||||
): Bun.SQL.__internal.Adapter {
|
||||
// 1. Use explicit adapter if provided
|
||||
if (options.adapter) {
|
||||
const adapter = options.adapter;
|
||||
switch (adapter) {
|
||||
case "postgres":
|
||||
case "postgresql":
|
||||
return "postgres";
|
||||
case "mysql":
|
||||
case "mysql2":
|
||||
case "mariadb":
|
||||
return "mysql";
|
||||
case "sqlite":
|
||||
return "sqlite";
|
||||
default:
|
||||
throw new Error(`Unsupported adapter: ${adapter}. Supported adapters: "postgres", "sqlite", "mysql"`);
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Infer from URL protocol if present
|
||||
if (urlString) {
|
||||
const urlStr = urlString instanceof URL ? urlString.href : urlString;
|
||||
|
||||
// Check for SQLite URLs first
|
||||
if (parseDefinitelySqliteUrl(urlStr) !== null) {
|
||||
return "sqlite";
|
||||
}
|
||||
|
||||
// Extract protocol
|
||||
const colonIndex = urlStr.indexOf(":");
|
||||
if (colonIndex !== -1) {
|
||||
const protocol = urlStr.substring(0, colonIndex);
|
||||
const adapterFromProtocol = getAdapterFromProtocol(protocol);
|
||||
if (adapterFromProtocol) {
|
||||
return adapterFromProtocol;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 3. If no URL provided, check environment variables to infer adapter
|
||||
// Respect precedence: POSTGRES_URL > DATABASE_URL > PGURL > PG_URL > MYSQL_URL
|
||||
if (!urlString && env) {
|
||||
// Check in order of precedence (including TLS variants)
|
||||
const envVars = [
|
||||
{ name: "POSTGRES_URL", url: env.POSTGRES_URL },
|
||||
{ name: "TLS_POSTGRES_DATABASE_URL", url: env.TLS_POSTGRES_DATABASE_URL },
|
||||
{ name: "DATABASE_URL", url: env.DATABASE_URL },
|
||||
{ name: "TLS_DATABASE_URL", url: env.TLS_DATABASE_URL },
|
||||
{ name: "PGURL", url: env.PGURL },
|
||||
{ name: "PG_URL", url: env.PG_URL },
|
||||
{ name: "MYSQL_URL", url: env.MYSQL_URL },
|
||||
{ name: "TLS_MYSQL_DATABASE_URL", url: env.TLS_MYSQL_DATABASE_URL },
|
||||
];
|
||||
|
||||
for (const { name, url: envUrl } of envVars) {
|
||||
if (envUrl) {
|
||||
// Check for SQLite URLs first (special case)
|
||||
if (parseDefinitelySqliteUrl(envUrl) !== null) {
|
||||
return "sqlite";
|
||||
}
|
||||
|
||||
// Environment variable name takes precedence over protocol
|
||||
if (name === "MYSQL_URL" || name === "TLS_MYSQL_DATABASE_URL") {
|
||||
return "mysql";
|
||||
} else if (
|
||||
name === "POSTGRES_URL" ||
|
||||
name === "TLS_POSTGRES_DATABASE_URL" ||
|
||||
name === "PGURL" ||
|
||||
name === "PG_URL"
|
||||
) {
|
||||
return "postgres";
|
||||
}
|
||||
|
||||
// For generic DATABASE_URL and TLS_DATABASE_URL, use protocol detection as fallback
|
||||
if (name === "DATABASE_URL" || name === "TLS_DATABASE_URL") {
|
||||
const colonIndex = envUrl.indexOf(":");
|
||||
if (colonIndex !== -1) {
|
||||
const protocol = envUrl.substring(0, colonIndex);
|
||||
const adapterFromProtocol = getAdapterFromProtocol(protocol);
|
||||
if (adapterFromProtocol) {
|
||||
return adapterFromProtocol;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// No adapter inferred from this env var; continue to lower-precedence vars
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Default to postgres if no explicit adapter or protocol
|
||||
return "postgres";
|
||||
}
|
||||
|
||||
function getEnvironmentUrlsForAdapter(adapter: Bun.SQL.__internal.Adapter, env: Record<string, string | undefined>) {
|
||||
const urls: (string | undefined)[] = [];
|
||||
|
||||
if (adapter === "postgres") {
|
||||
urls.push(env.POSTGRES_URL, env.DATABASE_URL, env.PGURL, env.PG_URL);
|
||||
// Also check TLS variants
|
||||
urls.push(env.TLS_POSTGRES_DATABASE_URL, env.TLS_DATABASE_URL);
|
||||
} else if (adapter === "mysql") {
|
||||
urls.push(env.MYSQL_URL, env.DATABASE_URL);
|
||||
// Also check TLS variants
|
||||
urls.push(env.TLS_MYSQL_DATABASE_URL, env.TLS_DATABASE_URL);
|
||||
} else if (adapter === "sqlite") {
|
||||
urls.push(env.DATABASE_URL);
|
||||
}
|
||||
|
||||
return urls.filter((url): url is string => typeof url === "string" && url.length > 0);
|
||||
}
|
||||
|
||||
function getAdapterSpecificDefaults(adapter: Bun.SQL.__internal.Adapter, env: Record<string, string | undefined>) {
|
||||
const defaults: {
|
||||
hostname?: string;
|
||||
port?: number;
|
||||
username?: string;
|
||||
password?: string;
|
||||
database?: string;
|
||||
} = {};
|
||||
|
||||
if (adapter === "postgres") {
|
||||
defaults.hostname = env.PGHOST;
|
||||
defaults.port = env.PGPORT ? Number(env.PGPORT) : undefined;
|
||||
defaults.username = env.PGUSERNAME || env.PGUSER || env.USER || env.USERNAME;
|
||||
defaults.password = env.PGPASSWORD;
|
||||
defaults.database = env.PGDATABASE;
|
||||
} else if (adapter === "mysql") {
|
||||
defaults.hostname = env.MYSQL_HOST;
|
||||
defaults.port = env.MYSQL_PORT ? Number(env.MYSQL_PORT) : undefined;
|
||||
defaults.username = env.MYSQL_USER || env.USER || env.USERNAME;
|
||||
defaults.password = env.MYSQL_PASSWORD;
|
||||
defaults.database = env.MYSQL_DATABASE;
|
||||
} else if (adapter === "sqlite") {
|
||||
// SQLite doesn't use these connection parameters
|
||||
}
|
||||
|
||||
return defaults;
|
||||
}
|
||||
|
||||
function parseOptions(
|
||||
stringOrUrlOrOptions: Bun.SQL.Options | string | URL | undefined,
|
||||
definitelyOptionsButMaybeEmpty: Bun.SQL.Options,
|
||||
): Bun.SQL.__internal.DefinedOptions {
|
||||
const env = Bun.env;
|
||||
|
||||
let [
|
||||
stringOrUrl = env.POSTGRES_URL || env.DATABASE_URL || env.PGURL || env.PG_URL || env.MYSQL_URL || null,
|
||||
options,
|
||||
]: [string | URL | null, Bun.SQL.Options] =
|
||||
typeof stringOrUrlOrOptions === "string" || stringOrUrlOrOptions instanceof URL
|
||||
? [stringOrUrlOrOptions, definitelyOptionsButMaybeEmpty]
|
||||
: stringOrUrlOrOptions
|
||||
? [null, { ...stringOrUrlOrOptions, ...definitelyOptionsButMaybeEmpty }]
|
||||
: [null, definitelyOptionsButMaybeEmpty];
|
||||
// Step 1: Determine input string/URL and options
|
||||
let inputUrl: string | URL | null = null;
|
||||
let options: Bun.SQL.Options;
|
||||
|
||||
if (options.adapter === undefined && stringOrUrl !== null) {
|
||||
const sqliteUrl = parseDefinitelySqliteUrl(stringOrUrl);
|
||||
if (typeof stringOrUrlOrOptions === "string" || stringOrUrlOrOptions instanceof URL) {
|
||||
inputUrl = stringOrUrlOrOptions;
|
||||
options = definitelyOptionsButMaybeEmpty;
|
||||
} else if (stringOrUrlOrOptions) {
|
||||
options = { ...stringOrUrlOrOptions, ...definitelyOptionsButMaybeEmpty };
|
||||
inputUrl = options.url || null;
|
||||
} else {
|
||||
options = definitelyOptionsButMaybeEmpty;
|
||||
}
|
||||
|
||||
if (sqliteUrl !== null) {
|
||||
const sqliteOptions: Bun.SQL.__internal.DefinedSQLiteOptions = {
|
||||
...options,
|
||||
adapter: "sqlite",
|
||||
filename: sqliteUrl,
|
||||
};
|
||||
// Step 2: Determine the adapter (without reading environment variables yet)
|
||||
const adapter = determineAdapter(options, inputUrl, env);
|
||||
|
||||
return parseSQLiteOptionsWithQueryParams(sqliteOptions, stringOrUrl);
|
||||
// Step 2.5: Validate adapter matches protocol if URL is provided
|
||||
if (inputUrl) {
|
||||
let urlToValidate: URL | null;
|
||||
if (typeof inputUrl === "string") {
|
||||
// Parse the URL for validation - handle SQLite URLs specially
|
||||
if (parseDefinitelySqliteUrl(inputUrl) !== null) {
|
||||
// Create a fake URL for SQLite validation
|
||||
urlToValidate = new URL("sqlite:///" + encodeURIComponent(inputUrl));
|
||||
} else if (hasProtocol(inputUrl)) {
|
||||
// Only validate URLs that have protocols
|
||||
urlToValidate = parseUrlForAdapter(inputUrl, adapter);
|
||||
} else {
|
||||
// For URLs without protocols, skip validation (could be filenames)
|
||||
urlToValidate = null;
|
||||
}
|
||||
} else {
|
||||
urlToValidate = inputUrl;
|
||||
}
|
||||
|
||||
if (urlToValidate) {
|
||||
validateAdapterProtocolMatch(adapter, urlToValidate, inputUrl);
|
||||
}
|
||||
}
|
||||
|
||||
if (options.adapter === "sqlite") {
|
||||
let filenameFromOptions = options.filename || stringOrUrl;
|
||||
// Handle SQLite early since it has different logic
|
||||
if (adapter === "sqlite") {
|
||||
return handleSQLiteOptions(options, inputUrl, env);
|
||||
}
|
||||
|
||||
// Parse sqlite:// URLs when adapter is explicitly sqlite
|
||||
if (typeof filenameFromOptions === "string" || filenameFromOptions instanceof URL) {
|
||||
const parsed = parseDefinitelySqliteUrl(filenameFromOptions);
|
||||
if (parsed !== null) {
|
||||
filenameFromOptions = parsed;
|
||||
// Step 3: Get the appropriate URL for this adapter
|
||||
let finalUrl: URL | null = null;
|
||||
let sslMode: SSLMode = SSLMode.disable;
|
||||
|
||||
if (inputUrl) {
|
||||
// User provided a URL directly
|
||||
finalUrl = inputUrl instanceof URL ? inputUrl : parseUrlForAdapter(inputUrl, adapter);
|
||||
} else {
|
||||
// Look for environment URLs appropriate for this adapter
|
||||
// Only use environment URLs if no explicit connection options are provided
|
||||
const hasExplicitConnectionOptions = !!(
|
||||
options.hostname ||
|
||||
options.host ||
|
||||
options.port ||
|
||||
options.username ||
|
||||
options.user ||
|
||||
options.password ||
|
||||
options.pass ||
|
||||
options.database ||
|
||||
options.db
|
||||
);
|
||||
|
||||
if (!hasExplicitConnectionOptions) {
|
||||
const envUrls = getEnvironmentUrlsForAdapter(adapter, env);
|
||||
const envUrl = envUrls[0]; // Get first available URL
|
||||
|
||||
if (envUrl) {
|
||||
// Check if it's a TLS URL that sets SSL mode
|
||||
if (
|
||||
envUrl === env.TLS_POSTGRES_DATABASE_URL ||
|
||||
envUrl === env.TLS_DATABASE_URL ||
|
||||
envUrl === env.TLS_MYSQL_DATABASE_URL
|
||||
) {
|
||||
sslMode = SSLMode.require;
|
||||
}
|
||||
finalUrl = parseUrlForAdapter(envUrl, adapter);
|
||||
}
|
||||
}
|
||||
|
||||
const sqliteOptions: Bun.SQL.__internal.DefinedSQLiteOptions = {
|
||||
...options,
|
||||
adapter: "sqlite",
|
||||
filename: filenameFromOptions || ":memory:",
|
||||
};
|
||||
|
||||
return parseSQLiteOptionsWithQueryParams(sqliteOptions, stringOrUrl);
|
||||
}
|
||||
|
||||
if (!stringOrUrl) {
|
||||
const url = options?.url;
|
||||
if (typeof url === "string") {
|
||||
stringOrUrl = defaultToPostgresIfNoProtocol(url);
|
||||
} else if (url instanceof URL) {
|
||||
stringOrUrl = url;
|
||||
// Step 4: Normalize and validate options for the specific adapter
|
||||
return normalizeOptionsForAdapter(adapter, options, finalUrl, env, sslMode);
|
||||
}
|
||||
|
||||
function handleSQLiteOptions(
|
||||
options: Bun.SQL.Options,
|
||||
inputUrl: string | URL | null,
|
||||
env: Record<string, string | undefined>,
|
||||
): Bun.SQL.__internal.DefinedSQLiteOptions {
|
||||
let filename: string | URL | null = options.filename || inputUrl;
|
||||
|
||||
// If no filename provided, check environment
|
||||
if (!filename) {
|
||||
const envUrl = env.DATABASE_URL;
|
||||
if (envUrl) {
|
||||
const parsed = parseDefinitelySqliteUrl(envUrl);
|
||||
if (parsed !== null) {
|
||||
filename = parsed;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Parse SQLite URLs
|
||||
if (typeof filename === "string" || filename instanceof URL) {
|
||||
const parsed = parseDefinitelySqliteUrl(filename);
|
||||
if (parsed !== null) {
|
||||
filename = parsed;
|
||||
}
|
||||
}
|
||||
|
||||
// Special handling for empty strings: should default to :memory:
|
||||
let finalFilename: string;
|
||||
if (filename === null || filename === undefined) {
|
||||
finalFilename = ":memory:";
|
||||
} else if (filename === "") {
|
||||
// Empty string when explicitly passed (like new SQL("", {adapter: "sqlite"})) should be :memory:
|
||||
// This should only be set to ":memory:" if the inputUrl is also an empty string
|
||||
finalFilename = inputUrl === "" ? ":memory:" : "";
|
||||
} else {
|
||||
finalFilename = filename as string;
|
||||
}
|
||||
|
||||
const sqliteOptions: Bun.SQL.__internal.DefinedSQLiteOptions = {
|
||||
...options,
|
||||
adapter: "sqlite",
|
||||
filename: finalFilename,
|
||||
};
|
||||
|
||||
return parseSQLiteOptionsWithQueryParams(sqliteOptions, inputUrl);
|
||||
}
|
||||
|
||||
function defaultProtocolForAdapter(adapter: Bun.SQL.__internal.Adapter) {
|
||||
switch (adapter) {
|
||||
case "mariadb":
|
||||
case "mysql2":
|
||||
case "mysql":
|
||||
return "mysql://";
|
||||
case "sqlite":
|
||||
return "sqlite://";
|
||||
case "postgres":
|
||||
case "postgresql":
|
||||
default:
|
||||
return "postgres://";
|
||||
}
|
||||
}
|
||||
|
||||
function parseUrlForAdapter(urlString: string, adapter: Bun.SQL.__internal.Adapter): URL {
|
||||
if (urlString.startsWith("unix://")) {
|
||||
// Handle unix:// URLs specially
|
||||
return new URL(urlString);
|
||||
}
|
||||
|
||||
// Check if it's a SQLite URL that can't be parsed as a standard URL
|
||||
if (parseDefinitelySqliteUrl(urlString) !== null) {
|
||||
// Create a fake URL for SQLite that won't fail URL parsing
|
||||
return new URL("sqlite:///" + encodeURIComponent(urlString));
|
||||
}
|
||||
|
||||
if (hasProtocol(urlString)) {
|
||||
return new URL(urlString);
|
||||
}
|
||||
|
||||
// Add default protocol for the adapter
|
||||
const defaultProtocol = defaultProtocolForAdapter(adapter);
|
||||
try {
|
||||
return new URL(defaultProtocol + urlString);
|
||||
} catch (error) {
|
||||
try {
|
||||
// can be a "sqlite://file with empty spaces.db"
|
||||
return new URL(encodeURI(defaultProtocol + urlString));
|
||||
} catch {
|
||||
// throw the original error if the URL is invalid
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function validateAdapterProtocolMatch(
|
||||
adapter: Bun.SQL.__internal.Adapter,
|
||||
url: URL,
|
||||
originalUrl: string | URL | null = null,
|
||||
) {
|
||||
let protocol = url.protocol.replace(":", "");
|
||||
|
||||
if (protocol === "unix") {
|
||||
// Unix sockets are valid for any adapter
|
||||
return;
|
||||
}
|
||||
|
||||
const expectedAdapter = getAdapterFromProtocol(protocol);
|
||||
if (!expectedAdapter) {
|
||||
// Unknown protocol, let it through
|
||||
return;
|
||||
}
|
||||
protocol = getAdapterFromProtocol(protocol) as string;
|
||||
|
||||
// Special handling for SQLite
|
||||
if (protocol === "sqlite" && adapter !== "sqlite") {
|
||||
const urlString = originalUrl ? originalUrl.toString() : url.href;
|
||||
throw new Error(`Invalid URL '${urlString}' for ${adapter}. Did you mean to specify \`{ adapter: "sqlite" }\`?`);
|
||||
}
|
||||
|
||||
// Special handling: postgres:// protocol with sqlite adapter is allowed
|
||||
// (explicit adapter wins over protocol for backward compatibility)
|
||||
if (protocol === "postgres" && adapter === "sqlite") {
|
||||
return;
|
||||
}
|
||||
|
||||
// For network databases (postgres/mysql), validate the match
|
||||
if ((protocol === "postgres" || protocol === "mysql") && expectedAdapter !== adapter) {
|
||||
throw new Error(
|
||||
`Protocol '${protocol}' is not compatible with adapter '${adapter}'. Expected adapter '${expectedAdapter}'.`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
function normalizeOptionsForAdapter(
|
||||
adapter: Bun.SQL.__internal.Adapter,
|
||||
options: Bun.SQL.Options,
|
||||
url: URL | null,
|
||||
env: Record<string, string | undefined>,
|
||||
sslMode: SSLMode,
|
||||
): Bun.SQL.__internal.DefinedOptions {
|
||||
// Get adapter-specific defaults from environment
|
||||
const envDefaults = getAdapterSpecificDefaults(adapter, env);
|
||||
|
||||
let hostname: string | undefined,
|
||||
port: number | string | undefined,
|
||||
username: string | null | undefined,
|
||||
password: string | (() => Bun.MaybePromise<string>) | undefined | null,
|
||||
database: string | undefined,
|
||||
tls: Bun.TLSOptions | boolean | undefined,
|
||||
url: URL | undefined,
|
||||
query: string,
|
||||
query = "",
|
||||
idleTimeout: number | null | undefined,
|
||||
connectionTimeout: number | null | undefined,
|
||||
maxLifetime: number | null | undefined,
|
||||
@@ -306,152 +642,71 @@ function parseOptions(
|
||||
onclose: ((client: Bun.SQL) => void) | undefined,
|
||||
max: number | null | undefined,
|
||||
bigint: boolean | undefined,
|
||||
path: string,
|
||||
adapter: Bun.SQL.__internal.Adapter;
|
||||
path = "";
|
||||
|
||||
let prepare = true;
|
||||
let sslMode: SSLMode = SSLMode.disable;
|
||||
|
||||
if (!stringOrUrl || (typeof stringOrUrl === "string" && stringOrUrl.length === 0)) {
|
||||
let urlString = env.POSTGRES_URL || env.DATABASE_URL || env.PGURL || env.PG_URL;
|
||||
|
||||
if (!urlString) {
|
||||
urlString = env.TLS_POSTGRES_DATABASE_URL || env.TLS_DATABASE_URL;
|
||||
if (urlString) {
|
||||
sslMode = SSLMode.require;
|
||||
}
|
||||
}
|
||||
|
||||
if (urlString) {
|
||||
// Check if it's a SQLite URL before trying to parse as regular URL
|
||||
const sqliteUrl = parseDefinitelySqliteUrl(urlString);
|
||||
if (sqliteUrl !== null) {
|
||||
const sqliteOptions: Bun.SQL.__internal.DefinedSQLiteOptions = {
|
||||
...options,
|
||||
adapter: "sqlite",
|
||||
filename: sqliteUrl,
|
||||
};
|
||||
return parseSQLiteOptionsWithQueryParams(sqliteOptions, urlString);
|
||||
}
|
||||
|
||||
url = new URL(urlString);
|
||||
}
|
||||
} else if (stringOrUrl && typeof stringOrUrl === "object") {
|
||||
if (stringOrUrl instanceof URL) {
|
||||
url = stringOrUrl;
|
||||
} else if (options?.url) {
|
||||
const _url = options.url;
|
||||
if (typeof _url === "string") {
|
||||
url = defaultToPostgresIfNoProtocol(_url);
|
||||
} else if (_url && typeof _url === "object" && _url instanceof URL) {
|
||||
url = _url;
|
||||
}
|
||||
}
|
||||
if (options?.tls) {
|
||||
sslMode = SSLMode.require;
|
||||
tls = options.tls;
|
||||
}
|
||||
} else if (typeof stringOrUrl === "string") {
|
||||
try {
|
||||
url = defaultToPostgresIfNoProtocol(stringOrUrl);
|
||||
} catch (e) {
|
||||
throw new Error(`Invalid URL '${stringOrUrl}' for postgres. Did you mean to specify \`{ adapter: "sqlite" }\`?`, {
|
||||
cause: e,
|
||||
});
|
||||
}
|
||||
}
|
||||
query = "";
|
||||
adapter = options.adapter;
|
||||
// Parse URL if provided
|
||||
if (url) {
|
||||
({ hostname, port, username, password, adapter } = options);
|
||||
// object overrides url
|
||||
hostname ||= url.hostname;
|
||||
port ||= url.port;
|
||||
username ||= decodeIfValid(url.username);
|
||||
password ||= decodeIfValid(url.password);
|
||||
adapter ||= url.protocol as Bun.SQL.__internal.Adapter;
|
||||
if (adapter && adapter[adapter.length - 1] === ":") {
|
||||
adapter = adapter.slice(0, -1) as Bun.SQL.__internal.Adapter;
|
||||
}
|
||||
if (url.protocol === "unix:") {
|
||||
// Handle unix domain socket
|
||||
path = url.pathname;
|
||||
} else {
|
||||
hostname = url.hostname;
|
||||
port = url.port;
|
||||
username = decodeIfValid(url.username);
|
||||
password = decodeIfValid(url.password);
|
||||
database = decodeIfValid(url.pathname.slice(1)); // Remove leading /
|
||||
|
||||
const queryObject = url.searchParams.toJSON();
|
||||
for (const key in queryObject) {
|
||||
if (key.toLowerCase() === "sslmode") {
|
||||
sslMode = normalizeSSLMode(queryObject[key]);
|
||||
} else if (key.toLowerCase() === "path") {
|
||||
path = queryObject[key];
|
||||
} else {
|
||||
// this is valid for postgres for other databases it might not be valid
|
||||
// check adapter then implement for other databases
|
||||
// encode string with \0 as finalizer
|
||||
// must be key\0value\0
|
||||
query += `${key}\0${queryObject[key]}\0`;
|
||||
const queryObject = url.searchParams.toJSON();
|
||||
for (const key in queryObject) {
|
||||
if (key.toLowerCase() === "sslmode") {
|
||||
sslMode = normalizeSSLMode(queryObject[key]);
|
||||
} else if (key.toLowerCase() === "path") {
|
||||
path = queryObject[key];
|
||||
} else {
|
||||
query += `${key}\0${queryObject[key]}\0`;
|
||||
}
|
||||
}
|
||||
query = query.trim();
|
||||
}
|
||||
query = query.trim();
|
||||
}
|
||||
if (adapter) {
|
||||
switch (adapter) {
|
||||
case "http":
|
||||
case "https":
|
||||
case "ftp":
|
||||
case "postgres":
|
||||
case "postgresql":
|
||||
adapter = "postgres";
|
||||
break;
|
||||
case "mysql":
|
||||
case "mysql2":
|
||||
case "mariadb":
|
||||
adapter = "mysql";
|
||||
break;
|
||||
case "file":
|
||||
case "sqlite":
|
||||
adapter = "sqlite";
|
||||
break;
|
||||
default:
|
||||
options.adapter satisfies never; // This will type error if we support a new adapter in the future, which will let us know to update this check
|
||||
throw new Error(`Unsupported adapter: ${options.adapter}. Supported adapters: "postgres", "sqlite", "mysql"`);
|
||||
}
|
||||
|
||||
// Apply explicit options (highest precedence) - they override URL parameters
|
||||
hostname = options.hostname || options.host || hostname;
|
||||
port = options.port || port;
|
||||
username = options.username || options.user || username;
|
||||
password = options.password || options.pass || password;
|
||||
database = options.database || options.db || database;
|
||||
path = (options as { path?: string }).path || path;
|
||||
|
||||
// Apply adapter-specific environment defaults (medium precedence)
|
||||
hostname ||= envDefaults.hostname;
|
||||
port ||= envDefaults.port;
|
||||
username ||= envDefaults.username;
|
||||
password ||= envDefaults.password;
|
||||
database ||= envDefaults.database;
|
||||
|
||||
// Apply final defaults (lowest precedence)
|
||||
hostname ||= "localhost";
|
||||
if (port === undefined || port === "") {
|
||||
port = adapter === "mysql" ? 3306 : 5432;
|
||||
} else {
|
||||
adapter = "postgres";
|
||||
port = Number(port);
|
||||
}
|
||||
options.adapter = adapter;
|
||||
assertIsOptionsOfAdapter(options, adapter);
|
||||
hostname ||= options.hostname || options.host || env.PGHOST || "localhost";
|
||||
username ||= adapter === "mysql" ? "root" : "postgres";
|
||||
database ||= adapter === "mysql" ? "mysql" : username;
|
||||
password ||= "";
|
||||
|
||||
port ||= Number(options.port || env.PGPORT || (adapter === "mysql" ? 3306 : 5432));
|
||||
|
||||
path ||= (options as { path?: string }).path || "";
|
||||
|
||||
if (adapter === "postgres") {
|
||||
// add /.s.PGSQL.${port} if the unix domain socket is listening on that path
|
||||
if (path && Number.isSafeInteger(port) && path?.indexOf("/.s.PGSQL.") === -1) {
|
||||
const pathWithSocket = `${path}/.s.PGSQL.${port}`;
|
||||
|
||||
// Only add the path if it actually exists. It would be better to just
|
||||
// always respect whatever the user passes in, but that would technically
|
||||
// be a breakpoint change at this point.
|
||||
if (require("node:fs").existsSync(pathWithSocket)) {
|
||||
path = pathWithSocket;
|
||||
}
|
||||
// Handle PostgreSQL unix domain socket special case
|
||||
if (adapter === "postgres" && path && Number.isSafeInteger(port) && path.indexOf("/.s.PGSQL.") === -1) {
|
||||
const pathWithSocket = `${path}/.s.PGSQL.${port}`;
|
||||
if (require("node:fs").existsSync(pathWithSocket)) {
|
||||
path = pathWithSocket;
|
||||
}
|
||||
}
|
||||
|
||||
username ||=
|
||||
options.username ||
|
||||
options.user ||
|
||||
env.PGUSERNAME ||
|
||||
env.PGUSER ||
|
||||
env.USER ||
|
||||
env.USERNAME ||
|
||||
(adapter === "mysql" ? "root" : "postgres"); // default username for mysql is root and for postgres is postgres;
|
||||
database ||=
|
||||
options.database ||
|
||||
options.db ||
|
||||
decodeIfValid((url?.pathname ?? "").slice(1)) ||
|
||||
env.PGDATABASE ||
|
||||
(adapter === "mysql" ? "mysql" : username); // default database;
|
||||
password ||= options.password || options.pass || env.PGPASSWORD || "";
|
||||
// Handle connection parameters
|
||||
const connection = options.connection;
|
||||
if (connection && $isObject(connection)) {
|
||||
for (const key in connection) {
|
||||
@@ -461,9 +716,15 @@ function parseOptions(
|
||||
}
|
||||
}
|
||||
|
||||
// Handle TLS
|
||||
tls ||= options.tls || options.ssl;
|
||||
max = options.max;
|
||||
if (options?.tls) {
|
||||
sslMode = SSLMode.require;
|
||||
tls = options.tls;
|
||||
}
|
||||
|
||||
// Handle other options
|
||||
max = options.max;
|
||||
idleTimeout ??= options.idleTimeout;
|
||||
idleTimeout ??= options.idle_timeout;
|
||||
connectionTimeout ??= options.connectionTimeout;
|
||||
@@ -473,7 +734,8 @@ function parseOptions(
|
||||
maxLifetime ??= options.maxLifetime;
|
||||
maxLifetime ??= options.max_lifetime;
|
||||
bigint ??= options.bigint;
|
||||
// we need to explicitly set prepare to false if it is false
|
||||
|
||||
// Handle prepare option
|
||||
if (options.prepare === false) {
|
||||
if (adapter === "mysql") {
|
||||
throw $ERR_INVALID_ARG_VALUE("options.prepare", false, "prepared: false is not supported in MySQL");
|
||||
@@ -495,6 +757,7 @@ function parseOptions(
|
||||
}
|
||||
}
|
||||
|
||||
// Validate numeric options
|
||||
if (idleTimeout != null) {
|
||||
idleTimeout = Number(idleTimeout);
|
||||
if (idleTimeout > 2 ** 31 || idleTimeout < 0 || idleTimeout !== idleTimeout) {
|
||||
@@ -538,19 +801,18 @@ function parseOptions(
|
||||
}
|
||||
}
|
||||
|
||||
// Handle TLS configuration
|
||||
if (sslMode !== SSLMode.disable && !tls?.serverName) {
|
||||
if (hostname) {
|
||||
tls = { ...tls, serverName: hostname };
|
||||
} else if (tls) {
|
||||
tls = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (tls && sslMode === SSLMode.disable) {
|
||||
sslMode = SSLMode.prefer;
|
||||
}
|
||||
port = Number(port);
|
||||
|
||||
port = Number(port);
|
||||
if (!Number.isSafeInteger(port) || port < 1 || port > 65535) {
|
||||
throw $ERR_INVALID_ARG_VALUE("port", port, "must be a non-negative integer between 1 and 65535");
|
||||
}
|
||||
@@ -591,7 +853,11 @@ function parseOptions(
|
||||
}
|
||||
|
||||
if (path) {
|
||||
if (require("node:fs").existsSync(path)) {
|
||||
// For unix sockets or when explicitly set, always use the path
|
||||
// Don't require existence check for unix sockets since they might not exist yet
|
||||
if (url?.protocol === "unix:" || (options as { path?: string }).path) {
|
||||
ret.path = path;
|
||||
} else if (require("node:fs").existsSync(path)) {
|
||||
ret.path = path;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -871,6 +871,30 @@ pub const Resolver = struct {
|
||||
|
||||
r.flushDebugLogs(.success) catch {};
|
||||
result.import_kind = kind;
|
||||
if (comptime Environment.enable_logs) {
|
||||
if (result.path_pair.secondary) |secondary| {
|
||||
debuglog(
|
||||
"resolve({}, from: {}, {s}) = {} (secondary: {})",
|
||||
.{
|
||||
bun.fmt.fmtPath(u8, import_path, .{}),
|
||||
bun.fmt.fmtPath(u8, source_dir, .{}),
|
||||
kind.label(),
|
||||
bun.fmt.fmtPath(u8, if (result.path()) |path| path.text else "<NULL>", .{}),
|
||||
bun.fmt.fmtPath(u8, secondary.text, .{}),
|
||||
},
|
||||
);
|
||||
} else {
|
||||
debuglog(
|
||||
"resolve({}, from: {}, {s}) = {}",
|
||||
.{
|
||||
bun.fmt.fmtPath(u8, import_path, .{}),
|
||||
bun.fmt.fmtPath(u8, source_dir, .{}),
|
||||
kind.label(),
|
||||
bun.fmt.fmtPath(u8, if (result.path()) |path| path.text else "<NULL>", .{}),
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
return .{ .success = result.* };
|
||||
},
|
||||
.failure => |e| {
|
||||
|
||||
@@ -793,7 +793,7 @@ describe("bundler", () => {
|
||||
stdout: "main",
|
||||
},
|
||||
});
|
||||
itBundled.skip("packagejson/DualPackageHazardImportAndRequireSameFile", {
|
||||
itBundled("packagejson/DualPackageHazardImportAndRequireSameFile", {
|
||||
files: {
|
||||
"/Users/user/project/src/entry.js": /* js */ `
|
||||
import value from 'demo-pkg'
|
||||
@@ -812,7 +812,7 @@ describe("bundler", () => {
|
||||
stdout: "main main",
|
||||
},
|
||||
});
|
||||
itBundled.skip("packagejson/DualPackageHazardImportAndRequireSeparateFiles", {
|
||||
itBundled("packagejson/DualPackageHazardImportAndRequireSeparateFiles", {
|
||||
files: {
|
||||
"/Users/user/project/src/entry.js": /* js */ `
|
||||
import './test-main'
|
||||
@@ -861,7 +861,7 @@ describe("bundler", () => {
|
||||
stdout: "module\nmodule",
|
||||
},
|
||||
});
|
||||
itBundled.skip("packagejson/DualPackageHazardImportAndRequireImplicitMain", {
|
||||
itBundled("packagejson/DualPackageHazardImportAndRequireImplicitMain", {
|
||||
files: {
|
||||
"/Users/user/project/src/entry.js": /* js */ `
|
||||
import './test-index'
|
||||
|
||||
567
test/js/sql/adapter-env-var-precedence.test.ts
Normal file
567
test/js/sql/adapter-env-var-precedence.test.ts
Normal file
@@ -0,0 +1,567 @@
|
||||
import { SQL } from "bun";
|
||||
import { describe, expect, test } from "bun:test";
|
||||
|
||||
describe("SQL adapter environment variable precedence", () => {
|
||||
const originalEnv = { ...process.env };
|
||||
|
||||
function cleanEnv() {
|
||||
// Clean all SQL-related env vars
|
||||
delete process.env.DATABASE_URL;
|
||||
delete process.env.POSTGRES_URL;
|
||||
delete process.env.PGURL;
|
||||
delete process.env.PG_URL;
|
||||
delete process.env.MYSQL_URL;
|
||||
delete process.env.TLS_DATABASE_URL;
|
||||
delete process.env.TLS_POSTGRES_DATABASE_URL;
|
||||
delete process.env.TLS_MYSQL_DATABASE_URL;
|
||||
delete process.env.PGHOST;
|
||||
delete process.env.PGPORT;
|
||||
delete process.env.PGUSER;
|
||||
delete process.env.PGUSERNAME;
|
||||
delete process.env.PGPASSWORD;
|
||||
delete process.env.PGDATABASE;
|
||||
delete process.env.MYSQL_HOST;
|
||||
delete process.env.MYSQL_PORT;
|
||||
delete process.env.MYSQL_USER;
|
||||
delete process.env.MYSQL_PASSWORD;
|
||||
delete process.env.MYSQL_DATABASE;
|
||||
delete process.env.USER;
|
||||
delete process.env.USERNAME;
|
||||
}
|
||||
|
||||
function restoreEnv() {
|
||||
// Restore original env
|
||||
Object.assign(process.env, originalEnv);
|
||||
}
|
||||
|
||||
test("should not prioritize DATABASE_URL over explicit options (issue #22147)", () => {
|
||||
cleanEnv();
|
||||
process.env.DATABASE_URL = "foo_url";
|
||||
|
||||
const options = new SQL({
|
||||
hostname: "bar_url",
|
||||
username: "postgres",
|
||||
password: "postgres",
|
||||
port: 5432,
|
||||
});
|
||||
|
||||
expect(options.options.hostname).toBe("bar_url");
|
||||
expect(options.options.port).toBe(5432);
|
||||
expect(options.options.username).toBe("postgres");
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("should only read PostgreSQL env vars when adapter is postgres", () => {
|
||||
cleanEnv();
|
||||
process.env.PGHOST = "pg-host";
|
||||
process.env.PGUSER = "pg-user";
|
||||
process.env.PGPASSWORD = "pg-pass";
|
||||
process.env.MYSQL_URL = "mysql://mysql-host/db";
|
||||
|
||||
const options = new SQL({
|
||||
adapter: "postgres",
|
||||
});
|
||||
|
||||
expect(options.options.hostname).toBe("pg-host");
|
||||
expect(options.options.username).toBe("pg-user");
|
||||
expect(options.options.password).toBe("pg-pass");
|
||||
// Should not use MYSQL_URL
|
||||
expect(options.options.hostname).not.toBe("mysql-host");
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("should only read MySQL env vars when adapter is mysql", () => {
|
||||
cleanEnv();
|
||||
process.env.PGHOST = "pg-host";
|
||||
process.env.PGUSER = "pg-user";
|
||||
process.env.MYSQL_URL = "mysql://mysql-host/db";
|
||||
|
||||
const options = new SQL({
|
||||
adapter: "mysql",
|
||||
});
|
||||
|
||||
// Should use MYSQL_URL and not read PostgreSQL env vars
|
||||
expect(options.options.hostname).toBe("mysql-host");
|
||||
expect(options.options.username).not.toBe("pg-user");
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("should infer postgres adapter from postgres:// protocol", () => {
|
||||
cleanEnv();
|
||||
const options = new SQL("postgres://user:pass@host:5432/db");
|
||||
expect(options.options.adapter).toBe("postgres");
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("should infer mysql adapter from mysql:// protocol", () => {
|
||||
cleanEnv();
|
||||
const options = new SQL("mysql://user:pass@host:3306/db");
|
||||
expect(options.options.adapter).toBe("mysql");
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("should default to postgres when no protocol specified", () => {
|
||||
cleanEnv();
|
||||
const options = new SQL("user:pass@host/db");
|
||||
expect(options.options.adapter).toBe("postgres");
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("should support unix:// with explicit adapter", () => {
|
||||
cleanEnv();
|
||||
const options = new SQL("unix:///tmp/mysql.sock", {
|
||||
adapter: "mysql",
|
||||
});
|
||||
|
||||
expect(options.options.adapter).toBe("mysql");
|
||||
expect(options.options.path).toBe("/tmp/mysql.sock");
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("should validate adapter matches protocol", () => {
|
||||
cleanEnv();
|
||||
expect(() => {
|
||||
new SQL("mysql://host/db", { adapter: "postgres" });
|
||||
}).toThrow(/mysql.*postgres/i);
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("adapter-specific env vars should take precedence over generic ones", () => {
|
||||
cleanEnv();
|
||||
process.env.USER = "generic-user";
|
||||
process.env.PGUSER = "postgres-user";
|
||||
|
||||
const options = new SQL({
|
||||
adapter: "postgres",
|
||||
});
|
||||
|
||||
expect(options.options.username).toBe("postgres-user");
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("should infer mysql adapter from MYSQL_URL env var", () => {
|
||||
cleanEnv();
|
||||
process.env.MYSQL_URL = "mysql://user:pass@host:3306/db";
|
||||
|
||||
const options = new SQL();
|
||||
expect(options.options.adapter).toBe("mysql");
|
||||
expect(options.options.hostname).toBe("host");
|
||||
expect(options.options.port).toBe(3306);
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("should default to port 3306 for MySQL when no port specified", () => {
|
||||
cleanEnv();
|
||||
process.env.MYSQL_URL = "mysql://user:pass@host/db";
|
||||
|
||||
const options = new SQL();
|
||||
expect(options.options.adapter).toBe("mysql");
|
||||
expect(options.options.hostname).toBe("host");
|
||||
expect(options.options.port).toBe(3306); // Should default to MySQL port
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("should default to port 3306 for explicit MySQL adapter", () => {
|
||||
cleanEnv();
|
||||
const options = new SQL({
|
||||
adapter: "mysql",
|
||||
hostname: "localhost",
|
||||
});
|
||||
|
||||
expect(options.options.adapter).toBe("mysql");
|
||||
expect(options.options.port).toBe(3306); // Should default to MySQL port
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("should infer postgres adapter from POSTGRES_URL env var", () => {
|
||||
cleanEnv();
|
||||
process.env.POSTGRES_URL = "postgres://user:pass@host:5432/db";
|
||||
|
||||
const options = new SQL();
|
||||
expect(options.options.adapter).toBe("postgres");
|
||||
expect(options.options.hostname).toBe("host");
|
||||
expect(options.options.port).toBe(5432);
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("POSTGRES_URL should take precedence over MYSQL_URL", () => {
|
||||
cleanEnv();
|
||||
process.env.POSTGRES_URL = "postgres://pg-host:5432/pgdb";
|
||||
process.env.MYSQL_URL = "mysql://mysql-host:3306/mysqldb";
|
||||
|
||||
const options = new SQL();
|
||||
expect(options.options.adapter).toBe("postgres");
|
||||
expect(options.options.hostname).toBe("pg-host");
|
||||
expect(options.options.port).toBe(5432);
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("should infer mysql from MYSQL_URL even without protocol", () => {
|
||||
cleanEnv();
|
||||
process.env.MYSQL_URL = "root@localhost:3306/test";
|
||||
|
||||
const options = new SQL();
|
||||
expect(options.options.adapter).toBe("mysql");
|
||||
expect(options.options.hostname).toBe("localhost");
|
||||
expect(options.options.port).toBe(3306);
|
||||
expect(options.options.username).toBe("root");
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("should infer postgres from POSTGRES_URL even without protocol", () => {
|
||||
cleanEnv();
|
||||
process.env.POSTGRES_URL = "user@localhost:5432/test";
|
||||
|
||||
const options = new SQL();
|
||||
expect(options.options.adapter).toBe("postgres");
|
||||
expect(options.options.hostname).toBe("localhost");
|
||||
expect(options.options.port).toBe(5432);
|
||||
expect(options.options.username).toBe("user");
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("environment variable name should override protocol (PGURL with mysql protocol should be postgres)", () => {
|
||||
cleanEnv();
|
||||
process.env.PGURL = "mysql://host:3306/db";
|
||||
|
||||
const options = new SQL();
|
||||
expect(options.options.adapter).toBe("postgres");
|
||||
expect(options.options.hostname).toBe("host");
|
||||
expect(options.options.port).toBe(3306);
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("environment variable name should override protocol (MYSQL_URL with postgres protocol should be mysql)", () => {
|
||||
cleanEnv();
|
||||
process.env.MYSQL_URL = "postgres://host:5432/db";
|
||||
|
||||
const options = new SQL();
|
||||
expect(options.options.adapter).toBe("mysql");
|
||||
expect(options.options.hostname).toBe("host");
|
||||
expect(options.options.port).toBe(5432);
|
||||
restoreEnv();
|
||||
});
|
||||
test("should use MySQL-specific environment variables", () => {
|
||||
cleanEnv();
|
||||
process.env.MYSQL_HOST = "mysql-server";
|
||||
process.env.MYSQL_PORT = "3307";
|
||||
process.env.MYSQL_USER = "admin";
|
||||
process.env.MYSQL_PASSWORD = "secret";
|
||||
process.env.MYSQL_DATABASE = "production";
|
||||
|
||||
const options = new SQL({ adapter: "mysql" });
|
||||
expect(options.options.adapter).toBe("mysql");
|
||||
expect(options.options.hostname).toBe("mysql-server");
|
||||
expect(options.options.port).toBe(3307);
|
||||
expect(options.options.username).toBe("admin");
|
||||
expect(options.options.password).toBe("secret");
|
||||
expect(options.options.database).toBe("production");
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("MySQL-specific env vars should take precedence over generic ones", () => {
|
||||
cleanEnv();
|
||||
process.env.USER = "generic-user";
|
||||
process.env.MYSQL_USER = "mysql-user";
|
||||
|
||||
const options = new SQL({ adapter: "mysql" });
|
||||
expect(options.options.username).toBe("mysql-user");
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("should default to database name 'mysql' for MySQL adapter", () => {
|
||||
cleanEnv();
|
||||
|
||||
const options = new SQL({ adapter: "mysql", hostname: "localhost" });
|
||||
expect(options.options.adapter).toBe("mysql");
|
||||
expect(options.options.database).toBe("mysql");
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("should default to username as database name for PostgreSQL adapter", () => {
|
||||
cleanEnv();
|
||||
|
||||
const options = new SQL({ adapter: "postgres", hostname: "localhost", username: "testuser" });
|
||||
expect(options.options.adapter).toBe("postgres");
|
||||
expect(options.options.database).toBe("testuser");
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("should infer mysql adapter from TLS_MYSQL_DATABASE_URL", () => {
|
||||
cleanEnv();
|
||||
process.env.TLS_MYSQL_DATABASE_URL = "mysql://user:pass@host:3306/db";
|
||||
|
||||
const options = new SQL();
|
||||
expect(options.options.adapter).toBe("mysql");
|
||||
expect(options.options.hostname).toBe("host");
|
||||
expect(options.options.port).toBe(3306);
|
||||
expect(options.options.sslMode).toBe(2); // SSLMode.require
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("should infer postgres adapter from TLS_POSTGRES_DATABASE_URL", () => {
|
||||
cleanEnv();
|
||||
process.env.TLS_POSTGRES_DATABASE_URL = "postgres://user:pass@host:5432/db";
|
||||
|
||||
const options = new SQL();
|
||||
expect(options.options.adapter).toBe("postgres");
|
||||
expect(options.options.hostname).toBe("host");
|
||||
expect(options.options.port).toBe(5432);
|
||||
expect(options.options.sslMode).toBe(2); // SSLMode.require
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("should infer adapter from TLS_DATABASE_URL using protocol", () => {
|
||||
cleanEnv();
|
||||
process.env.TLS_DATABASE_URL = "mysql://user:pass@host:3306/db";
|
||||
|
||||
const options = new SQL();
|
||||
expect(options.options.adapter).toBe("mysql");
|
||||
expect(options.options.hostname).toBe("host");
|
||||
expect(options.options.port).toBe(3306);
|
||||
expect(options.options.sslMode).toBe(2); // SSLMode.require
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
describe("Adapter-Protocol Validation", () => {
|
||||
test("should work with explicit adapter and URL without protocol", () => {
|
||||
cleanEnv();
|
||||
|
||||
const options = new SQL("user:pass@host:3306/db", { adapter: "mysql" });
|
||||
expect(options.options.adapter).toBe("mysql");
|
||||
expect(options.options.hostname).toBe("host");
|
||||
expect(options.options.port).toBe(3306);
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("should work with explicit adapter and matching protocol", () => {
|
||||
cleanEnv();
|
||||
|
||||
const options = new SQL("mysql://user:pass@host:3306/db", { adapter: "mysql" });
|
||||
expect(options.options.adapter).toBe("mysql");
|
||||
expect(options.options.hostname).toBe("host");
|
||||
expect(options.options.port).toBe(3306);
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("should throw error when adapter conflicts with protocol (mysql adapter with postgres protocol)", () => {
|
||||
cleanEnv();
|
||||
|
||||
expect(() => {
|
||||
new SQL("postgres://user:pass@host:5432/db", { adapter: "mysql" });
|
||||
}).toThrow(/Protocol 'postgres' is not compatible with adapter 'mysql'/);
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("should throw error when adapter conflicts with protocol (postgres adapter with mysql protocol)", () => {
|
||||
cleanEnv();
|
||||
|
||||
expect(() => {
|
||||
new SQL("mysql://user:pass@host:3306/db", { adapter: "postgres" });
|
||||
}).toThrow(/Protocol 'mysql' is not compatible with adapter 'postgres'/);
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("should throw error when sqlite adapter used with mysql protocol", () => {
|
||||
cleanEnv();
|
||||
|
||||
expect(() => {
|
||||
new SQL("mysql://user:pass@host:3306/db", { adapter: "sqlite" });
|
||||
}).toThrow(/Protocol 'mysql' is not compatible with adapter 'sqlite'/);
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("should throw error when mysql adapter used with postgres protocol", () => {
|
||||
cleanEnv();
|
||||
|
||||
expect(() => {
|
||||
new SQL("postgres://user:pass@host:5432/db", { adapter: "mysql" });
|
||||
}).toThrow(/Protocol 'postgres' is not compatible with adapter 'mysql'/);
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("should work with unix:// protocol and explicit adapter", () => {
|
||||
cleanEnv();
|
||||
|
||||
const options = new SQL("unix:///tmp/mysql.sock", { adapter: "mysql" });
|
||||
expect(options.options.adapter).toBe("mysql");
|
||||
expect(options.options.path).toBe("/tmp/mysql.sock");
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("should work with sqlite:// protocol and sqlite adapter", () => {
|
||||
cleanEnv();
|
||||
|
||||
const options = new SQL("sqlite:///tmp/test.db", { adapter: "sqlite" });
|
||||
expect(options.options.adapter).toBe("sqlite");
|
||||
expect(options.options.filename).toBe("/tmp/test.db");
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
describe("Explicit options override URL parameters", () => {
|
||||
test("explicit hostname should override URL hostname", () => {
|
||||
cleanEnv();
|
||||
|
||||
const options = new SQL("postgres://urluser:urlpass@urlhost:1234/urldb", {
|
||||
hostname: "explicithost",
|
||||
});
|
||||
|
||||
expect(options.options.hostname).toBe("explicithost");
|
||||
expect(options.options.port).toBe(1234); // URL port should remain
|
||||
expect(options.options.username).toBe("urluser"); // URL username should remain
|
||||
expect(options.options.database).toBe("urldb"); // URL database should remain
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("explicit port should override URL port", () => {
|
||||
cleanEnv();
|
||||
|
||||
const options = new SQL("postgres://urluser:urlpass@urlhost:1234/urldb", {
|
||||
port: 5432,
|
||||
});
|
||||
|
||||
expect(options.options.hostname).toBe("urlhost"); // URL hostname should remain
|
||||
expect(options.options.port).toBe(5432);
|
||||
expect(options.options.username).toBe("urluser"); // URL username should remain
|
||||
expect(options.options.database).toBe("urldb"); // URL database should remain
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("explicit username should override URL username", () => {
|
||||
cleanEnv();
|
||||
|
||||
const options = new SQL("postgres://urluser:urlpass@urlhost:1234/urldb", {
|
||||
username: "explicituser",
|
||||
});
|
||||
|
||||
expect(options.options.hostname).toBe("urlhost"); // URL hostname should remain
|
||||
expect(options.options.port).toBe(1234); // URL port should remain
|
||||
expect(options.options.username).toBe("explicituser");
|
||||
expect(options.options.database).toBe("urldb"); // URL database should remain
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("explicit password should override URL password", () => {
|
||||
cleanEnv();
|
||||
|
||||
const options = new SQL("postgres://urluser:urlpass@urlhost:1234/urldb", {
|
||||
password: "explicitpass",
|
||||
});
|
||||
|
||||
expect(options.options.hostname).toBe("urlhost"); // URL hostname should remain
|
||||
expect(options.options.port).toBe(1234); // URL port should remain
|
||||
expect(options.options.username).toBe("urluser"); // URL username should remain
|
||||
expect(options.options.password).toBe("explicitpass");
|
||||
expect(options.options.database).toBe("urldb"); // URL database should remain
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("explicit database should override URL database", () => {
|
||||
cleanEnv();
|
||||
|
||||
const options = new SQL("postgres://urluser:urlpass@urlhost:1234/urldb", {
|
||||
database: "explicitdb",
|
||||
});
|
||||
|
||||
expect(options.options.hostname).toBe("urlhost"); // URL hostname should remain
|
||||
expect(options.options.port).toBe(1234); // URL port should remain
|
||||
expect(options.options.username).toBe("urluser"); // URL username should remain
|
||||
expect(options.options.database).toBe("explicitdb");
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("multiple explicit options should override corresponding URL parameters", () => {
|
||||
cleanEnv();
|
||||
|
||||
const options = new SQL("postgres://urluser:urlpass@urlhost:1234/urldb", {
|
||||
hostname: "explicithost",
|
||||
port: 5432,
|
||||
username: "explicituser",
|
||||
password: "explicitpass",
|
||||
database: "explicitdb",
|
||||
});
|
||||
|
||||
expect(options.options.hostname).toBe("explicithost");
|
||||
expect(options.options.port).toBe(5432);
|
||||
expect(options.options.username).toBe("explicituser");
|
||||
expect(options.options.password).toBe("explicitpass");
|
||||
expect(options.options.database).toBe("explicitdb");
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("should work with MySQL URLs and explicit options", () => {
|
||||
cleanEnv();
|
||||
|
||||
const options = new SQL("mysql://urluser:urlpass@urlhost:3306/urldb", {
|
||||
hostname: "explicithost",
|
||||
port: 3307,
|
||||
username: "explicituser",
|
||||
});
|
||||
|
||||
expect(options.options.adapter).toBe("mysql");
|
||||
expect(options.options.hostname).toBe("explicithost");
|
||||
expect(options.options.port).toBe(3307);
|
||||
expect(options.options.username).toBe("explicituser");
|
||||
expect(options.options.password).toBe("urlpass"); // URL password should remain
|
||||
expect(options.options.database).toBe("urldb"); // URL database should remain
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("should work with alternative option names (user, pass, db, host)", () => {
|
||||
cleanEnv();
|
||||
|
||||
const options = new SQL("postgres://urluser:urlpass@urlhost:1234/urldb", {
|
||||
host: "explicithost",
|
||||
user: "explicituser",
|
||||
pass: "explicitpass",
|
||||
db: "explicitdb",
|
||||
});
|
||||
|
||||
expect(options.options.hostname).toBe("explicithost");
|
||||
expect(options.options.username).toBe("explicituser");
|
||||
expect(options.options.password).toBe("explicitpass");
|
||||
expect(options.options.database).toBe("explicitdb");
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("explicit options should override URL even when environment variables are present", () => {
|
||||
cleanEnv();
|
||||
process.env.PGHOST = "envhost";
|
||||
process.env.PGPORT = "9999";
|
||||
process.env.PGUSER = "envuser";
|
||||
|
||||
const options = new SQL("postgres://urluser:urlpass@urlhost:1234/urldb", {
|
||||
hostname: "explicithost",
|
||||
port: 5432,
|
||||
username: "explicituser",
|
||||
});
|
||||
|
||||
expect(options.options.hostname).toBe("explicithost");
|
||||
expect(options.options.port).toBe(5432);
|
||||
expect(options.options.username).toBe("explicituser");
|
||||
expect(options.options.password).toBe("urlpass"); // URL password should remain since no explicit password
|
||||
expect(options.options.database).toBe("urldb"); // URL database should remain
|
||||
restoreEnv();
|
||||
});
|
||||
|
||||
test("explicit options should have higher precedence than environment-specific variables", () => {
|
||||
cleanEnv();
|
||||
process.env.MYSQL_HOST = "mysqlhost";
|
||||
process.env.MYSQL_USER = "mysqluser";
|
||||
process.env.MYSQL_PASSWORD = "mysqlpass";
|
||||
|
||||
const options = new SQL("mysql://urluser:urlpass@urlhost:3306/urldb", {
|
||||
hostname: "explicithost",
|
||||
username: "explicituser",
|
||||
});
|
||||
|
||||
expect(options.options.adapter).toBe("mysql");
|
||||
expect(options.options.hostname).toBe("explicithost");
|
||||
expect(options.options.username).toBe("explicituser");
|
||||
expect(options.options.password).toBe("urlpass"); // URL password (not env)
|
||||
expect(options.options.database).toBe("urldb"); // URL database should remain
|
||||
restoreEnv();
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
28
test/regression/issue/text-chunk-null-access.test.ts
Normal file
28
test/regression/issue/text-chunk-null-access.test.ts
Normal file
@@ -0,0 +1,28 @@
|
||||
import { expect, test } from "bun:test";
|
||||
|
||||
test("TextChunk methods handle null text_chunk gracefully", async () => {
|
||||
// This test reproduces a crash where TextChunk methods are called
|
||||
// after the underlying text_chunk has been cleaned up or is null
|
||||
|
||||
let textChunkRef: any;
|
||||
|
||||
const html = "<p>Test content</p>";
|
||||
|
||||
const rewriter = new HTMLRewriter().on("p", {
|
||||
text(text) {
|
||||
// Store reference to the text chunk
|
||||
textChunkRef = text;
|
||||
},
|
||||
});
|
||||
|
||||
await rewriter.transform(new Response(html)).text();
|
||||
|
||||
// Force garbage collection to clean up internal references
|
||||
if (typeof Bun !== "undefined" && Bun.gc) {
|
||||
Bun.gc(true);
|
||||
}
|
||||
|
||||
// It should be undefined to be consistent with the rest of the APIs.
|
||||
expect(textChunkRef.removed).toBeUndefined();
|
||||
expect(textChunkRef.lastInTextNode).toBeUndefined();
|
||||
});
|
||||
Reference in New Issue
Block a user