Compare commits

...

1 Commits

Author SHA1 Message Date
Jarred Sumner
22e4c9fb01 commit experiment that did not work 2025-03-29 21:12:28 -07:00
7 changed files with 795 additions and 754 deletions

View File

@@ -677,13 +677,13 @@ pub const Framework = struct {
if ((bundler_options.define.keys.len + bundler_options.drop.count()) > 0) {
for (bundler_options.define.keys, bundler_options.define.values) |k, v| {
const parsed = try bun.options.Define.Data.parse(k, v, false, false, log, arena);
try out.options.define.insert(arena, k, parsed);
try out.options.define.insert(arena, k, &parsed);
}
for (bundler_options.drop.keys()) |drop_item| {
if (drop_item.len > 0) {
const parsed = try bun.options.Define.Data.parse(drop_item, "", true, true, log, arena);
try out.options.define.insert(arena, drop_item, parsed);
try out.options.define.insert(arena, drop_item, &parsed);
}
}
}
@@ -791,10 +791,10 @@ pub fn addImportMetaDefines(
try define.insert(
allocator,
"import.meta.env.MODE",
Define.Data.initStaticString(switch (mode) {
.development => &.{ .data = "development" },
.production_dynamic, .production_static => &.{ .data = "production" },
}),
switch (mode) {
.development => Define.Data.initStaticString(&.{ .data = "development" }),
.production_dynamic, .production_static => Define.Data.initStaticString(&.{ .data = "production" }),
},
);
try define.insert(
allocator,

View File

@@ -13,7 +13,8 @@ const strings = @import("./string_immutable.zig");
/// TODO: https://github.com/ziglang/zig/issues/4335
pub fn ComptimeStringMapWithKeyType(comptime KeyType: type, comptime V: type, comptime kvs_list: anytype) type {
const KV = struct {
key: []const KeyType,
key_ptr: [*]const KeyType,
key_len: u8,
value: V,
};
@@ -24,29 +25,30 @@ pub fn ComptimeStringMapWithKeyType(comptime KeyType: type, comptime V: type, co
const lenAsc = (struct {
fn lenAsc(context: void, a: KV, b: KV) bool {
_ = context;
if (a.key.len != b.key.len) {
return a.key.len < b.key.len;
if (a.key_len != b.key_len) {
return a.key_len < b.key_len;
}
// https://stackoverflow.com/questions/11227809/why-is-processing-a-sorted-array-faster-than-processing-an-unsorted-array
@setEvalBranchQuota(999999);
return std.mem.order(KeyType, a.key, b.key) == .lt;
return std.mem.order(KeyType, a.key_ptr[0..a.key_len], b.key_ptr[0..b.key_len]) == .lt;
}
}).lenAsc;
if (KeyType == u8) {
for (kvs_list, 0..) |kv, i| {
if (V != void) {
sorted_kvs[i] = .{ .key = kv.@"0", .value = kv.@"1" };
sorted_kvs[i] = .{ .key_ptr = kv.@"0".ptr, .key_len = @as(u8, @intCast(kv.@"0".len)), .value = kv.@"1" };
} else {
sorted_kvs[i] = .{ .key = kv.@"0", .value = {} };
sorted_kvs[i] = .{ .key_ptr = kv.@"0".ptr, .key_len = @as(u8, @intCast(kv.@"0".len)), .value = {} };
}
}
} else {
@compileError("Not implemented for this key type");
}
std.sort.pdq(KV, &sorted_kvs, {}, lenAsc);
const min_len = sorted_kvs[0].key.len;
const max_len = sorted_kvs[sorted_kvs.len - 1].key.len;
var len_indexes: [max_len + 1]usize = undefined;
const min_len = sorted_kvs[0].key_len;
const max_len = sorted_kvs[sorted_kvs.len - 1].key_len;
const LenIndexInt = if (sorted_kvs.len > std.math.maxInt(u8)) u16 else u8;
var len_indexes: [max_len + 1]LenIndexInt = undefined;
var len: usize = 0;
var i: usize = 0;
@@ -54,35 +56,37 @@ pub fn ComptimeStringMapWithKeyType(comptime KeyType: type, comptime V: type, co
@setEvalBranchQuota(99999);
// find the first keyword len == len
while (len > sorted_kvs[i].key.len) {
while (len > sorted_kvs[i].key_len) {
i += 1;
}
len_indexes[len] = i;
len_indexes[len] = @intCast(i);
}
break :blk .{
.min_len = min_len,
.max_len = max_len,
.min_len = @as(LenIndexInt, @intCast(min_len)),
.max_len = @as(LenIndexInt, @intCast(max_len)),
.sorted_kvs = sorted_kvs,
.len_indexes = len_indexes,
};
};
return struct {
const len_indexes = precomputed.len_indexes;
pub const kvs = precomputed.sorted_kvs;
const keys_list: []const []const KeyType = blk: {
var k: [kvs.len][]const KeyType = undefined;
for (kvs, 0..) |kv, i| {
k[i] = kv.key;
}
const final = k;
break :blk &final;
};
const len_indexes = &precomputed.len_indexes;
pub const kvs = &precomputed.sorted_kvs;
pub const Value = V;
pub fn keys() []const []const KeyType {
const keys_list = struct {
const list: []const []const KeyType = blk: {
var k: [kvs.len][]const KeyType = undefined;
for (kvs, 0..) |kv, i| {
k[i] = kv.key_ptr[0..kv.key_len];
}
const final = k;
break :blk &final;
};
}.list;
return keys_list;
}
@@ -91,20 +95,57 @@ pub fn ComptimeStringMapWithKeyType(comptime KeyType: type, comptime V: type, co
}
pub fn getWithLength(str: []const KeyType, comptime len: usize) ?V {
const start: u16 = comptime len_indexes[len];
const end = comptime brk: {
var i = len_indexes[len];
@setEvalBranchQuota(99999);
while (i < kvs.len and kvs[i].key.len == len) : (i += 1) {}
while (i < kvs.len and kvs[i].key_len == len) : (i += 1) {}
break :brk i;
};
// This benchmarked faster for both small and large lists of strings than using a big switch statement
// But only so long as the keys are a sorted list.
inline for (len_indexes[len]..end) |i| {
if (strings.eqlComptimeCheckLenWithType(KeyType, str, kvs[i].key, false)) {
return kvs[i].value;
if (comptime end == start) {
return null;
}
if (comptime len == 8) {
const key: u64 = @bitCast(str[0..8].*);
inline for (comptime start..end) |k| {
if ((comptime @as(u64, @bitCast(kvs[k].key_ptr[0..8].*))) == key) {
return kvs[k].value;
}
}
} else if (comptime len == 4) {
const key: u32 = @bitCast(str[0..4].*);
inline for (comptime start..end) |k| {
if ((comptime @as(u32, @bitCast(kvs[k].key_ptr[0..4].*))) == key) {
return kvs[k].value;
}
}
} else if (comptime len == 2) {
const key: u16 = @bitCast(str[0..2].*);
inline for (comptime start..end) |k| {
if ((comptime @as(u16, @bitCast(kvs[k].key_ptr[0..2].*))) == key) {
return kvs[k].value;
}
}
} else if (comptime len == 1) {
const key: u8 = str[0];
inline for (comptime start..end) |k| {
if (key == comptime kvs[k].key_ptr[0]) {
return kvs[k].value;
}
}
} else {
// This benchmarked faster for both small and large lists of strings than using a big switch statement
// But only so long as the keys are a sorted list.
inline for (start..end) |i| {
if (strings.eqlComptimeCheckLenWithType(KeyType, str, kvs[i].key_ptr[0..kvs[i].key_len], false)) {
return kvs[i].value;
}
}
}
@@ -116,7 +157,7 @@ pub fn ComptimeStringMapWithKeyType(comptime KeyType: type, comptime V: type, co
var i = len_indexes[len];
@setEvalBranchQuota(99999);
while (i < kvs.len and kvs[i].key.len == len) : (i += 1) {}
while (i < kvs.len and kvs[i].key_len == len) : (i += 1) {}
break :brk i;
};
@@ -124,7 +165,7 @@ pub fn ComptimeStringMapWithKeyType(comptime KeyType: type, comptime V: type, co
// This benchmarked faster for both small and large lists of strings than using a big switch statement
// But only so long as the keys are a sorted list.
inline for (len_indexes[len]..end) |i| {
if (eqls(str, kvs[i].key)) {
if (eqls(str, kvs[i].key_ptr[0..kvs[i].key_len])) {
return kvs[i].value;
}
}
@@ -137,7 +178,7 @@ pub fn ComptimeStringMapWithKeyType(comptime KeyType: type, comptime V: type, co
var i = len_indexes[len];
@setEvalBranchQuota(99999);
while (i < kvs.len and kvs[i].key.len == len) : (i += 1) {}
while (i < kvs.len and kvs[i].key_len == len) : (i += 1) {}
break :brk i;
};
@@ -155,14 +196,10 @@ pub fn ComptimeStringMapWithKeyType(comptime KeyType: type, comptime V: type, co
if (str.len < precomputed.min_len or str.len > precomputed.max_len)
return null;
comptime var i: usize = precomputed.min_len;
inline while (i <= precomputed.max_len) : (i += 1) {
if (str.len == i) {
return getWithLength(str, i);
}
}
return null;
return switch (str.len) {
inline precomputed.min_len...precomputed.max_len => |len| getWithLength(str, len),
else => null,
};
}
/// Returns the index of the key in the sorted list of keys.
@@ -177,7 +214,7 @@ pub fn ComptimeStringMapWithKeyType(comptime KeyType: type, comptime V: type, co
var i = len_indexes[len];
@setEvalBranchQuota(99999);
while (i < kvs.len and kvs[i].key.len == len) : (i += 1) {}
while (i < kvs.len and kvs[i].key_len == len) : (i += 1) {}
break :brk i;
};
@@ -185,7 +222,7 @@ pub fn ComptimeStringMapWithKeyType(comptime KeyType: type, comptime V: type, co
// This benchmarked faster for both small and large lists of strings than using a big switch statement
// But only so long as the keys are a sorted list.
inline for (len_indexes[len]..end) |i| {
if (strings.eqlComptimeCheckLenWithType(KeyType, str, kvs[i].key, false)) {
if (strings.eqlComptimeCheckLenWithType(KeyType, str, kvs[i].key_ptr[0..kvs[i].key_len], false)) {
return i;
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -40,8 +40,8 @@ pub const UserDefinesArray = bun.StringArrayHashMap(DefineData);
pub const DefineData = struct {
value: js_ast.Expr.Data,
valueless: bool = false,
original_name: ?string = null,
valueless: bool = false,
// True if accessing this value is known to not have any side effects. For
// example, a bare reference to "Object.create" can be removed because it
@@ -59,21 +59,32 @@ pub const DefineData = struct {
return self.valueless;
}
pub fn initBoolean(value: bool) DefineData {
return .{
.value = .{ .e_boolean = .{ .value = value } },
.can_be_removed_if_unused = true,
};
const define_data_with_true = &DefineData{
.value = .{ .e_boolean = .{ .value = true } },
.can_be_removed_if_unused = true,
};
const define_data_with_false = &DefineData{
.value = .{ .e_boolean = .{ .value = false } },
.can_be_removed_if_unused = true,
};
pub fn initBoolean(value: bool) *const DefineData {
return if (value) define_data_with_true else define_data_with_false;
}
pub fn initStaticString(str: *const js_ast.E.String) DefineData {
return .{
.value = .{ .e_string = @constCast(str) },
.can_be_removed_if_unused = true,
pub fn initStaticString(comptime str: *const js_ast.E.String) *const DefineData {
const Holder = struct {
pub const data = &DefineData{
.value = .{ .e_string = @constCast(str) },
.can_be_removed_if_unused = true,
};
};
return Holder.data;
}
pub fn merge(a: DefineData, b: DefineData) DefineData {
pub fn merge(a: *const DefineData, b: *const DefineData) DefineData {
return DefineData{
.value = b.value,
.can_be_removed_if_unused = a.can_be_removed_if_unused,
@@ -209,21 +220,25 @@ pub const Define = struct {
pub const Data = DefineData;
pub fn forIdentifier(this: *const Define, name: []const u8) ?IdentifierDefine {
if (this.identifiers.get(name)) |data| {
pub fn forIdentifier(this: *const Define, name: []const u8) ?*const IdentifierDefine {
if (this.identifiers.getPtr(name)) |data| {
return data;
}
return table.pure_global_identifier_map.get(name);
if (table.pure_global_identifier_map.get(name)) |entry| {
return entry.get();
}
return null;
}
pub fn insertFromIterator(define: *Define, allocator: std.mem.Allocator, comptime Iterator: type, iter: Iterator) !void {
while (iter.next()) |user_define| {
try define.insert(allocator, user_define.key_ptr.*, user_define.value_ptr.*);
try define.insert(allocator, user_define.key_ptr.*, user_define.value_ptr);
}
}
pub fn insert(define: *Define, allocator: std.mem.Allocator, key: []const u8, value: DefineData) !void {
pub fn insert(define: *Define, allocator: std.mem.Allocator, key: []const u8, value: *const DefineData) !void {
// If it has a dot, then it's a DotDefine.
// e.g. process.env.NODE_ENV
if (strings.lastIndexOfChar(key, '.')) |last_dot| {
@@ -246,7 +261,9 @@ pub const Define = struct {
for (gpe_entry.value_ptr.*) |*part| {
// ["process", "env"] === ["process", "env"] (if that actually worked)
if (arePartsEqual(part.parts, parts)) {
part.data = part.data.merge(value);
const prev_data = part.data;
part.data = prev_data.merge(value);
return;
}
}
@@ -260,14 +277,14 @@ pub const Define = struct {
}
list.appendAssumeCapacity(DotDefine{
.data = value,
.data = value.*,
// TODO: do we need to allocate this?
.parts = parts,
});
gpe_entry.value_ptr.* = try list.toOwnedSlice();
} else {
// e.g. IS_BROWSER
try define.identifiers.put(key, value);
try define.identifiers.put(key, value.*);
}
}
@@ -279,35 +296,6 @@ pub const Define = struct {
define.drop_debugger = drop_debugger;
try define.dots.ensureTotalCapacity(124);
const value_define = DefineData{
.value = .{ .e_undefined = .{} },
.valueless = true,
.can_be_removed_if_unused = true,
};
// Step 1. Load the globals into the hash tables
for (GlobalDefinesKey) |global| {
const key = global[global.len - 1];
const gpe = try define.dots.getOrPut(key);
if (gpe.found_existing) {
var list = try std.ArrayList(DotDefine).initCapacity(allocator, gpe.value_ptr.*.len + 1);
list.appendSliceAssumeCapacity(gpe.value_ptr.*);
list.appendAssumeCapacity(DotDefine{
.parts = global[0..global.len],
.data = value_define,
});
gpe.value_ptr.* = try list.toOwnedSlice();
} else {
var list = try std.ArrayList(DotDefine).initCapacity(allocator, 1);
list.appendAssumeCapacity(DotDefine{
.parts = global[0..global.len],
.data = value_define,
});
gpe.value_ptr.* = try list.toOwnedSlice();
}
}
// Step 3. Load user data into hash tables
// At this stage, user data has already been validated.
if (_user_defines) |user_defines| {

View File

@@ -7717,7 +7717,7 @@ pub fn jsonStringify(this: *const Lockfile, w: anytype) !void {
for (Npm.Architecture.NameMap.kvs) |kv| {
if (pkg.meta.arch.has(kv.value)) {
try w.write(kv.key);
try w.write(kv.key_ptr[0..kv.key_len]);
}
}
}
@@ -7729,7 +7729,7 @@ pub fn jsonStringify(this: *const Lockfile, w: anytype) !void {
for (Npm.OperatingSystem.NameMap.kvs) |kv| {
if (pkg.meta.os.has(kv.value)) {
try w.write(kv.key);
try w.write(kv.key_ptr[0..kv.key_len]);
}
}
}

View File

@@ -654,13 +654,13 @@ pub fn Negatable(comptime T: type) type {
if (has and print_included) {
try writer.print(
\\"{s}"
, .{kv.key});
, .{kv.key_ptr[0..kv.key_len]});
if (one) return;
try writer.writeAll(", ");
} else if (!has and !print_included) {
try writer.print(
\\"!{s}"
, .{kv.key});
, .{kv.key_ptr[0..kv.key_len]});
if (one) return;
try writer.writeAll(", ");
}
@@ -2164,7 +2164,7 @@ pub const PackageManifest = struct {
if (count > 0 and
((comptime !is_peer) or
optional_peer_dep_names.items.len == 0))
optional_peer_dep_names.items.len == 0))
{
const name_map_hash = name_hasher.final();
const version_map_hash = version_hasher.final();

View File

@@ -16241,7 +16241,7 @@ fn NewParser_(
if (p.symbols.items[e_.ref.innerIndex()].kind == .unbound and !result.is_inside_with_scope and !is_delete_target) {
if (p.define.forIdentifier(name)) |def| {
if (!def.valueless) {
const newvalue = p.valueForDefine(expr.loc, in.assign_target, is_delete_target, &def);
const newvalue = p.valueForDefine(expr.loc, in.assign_target, is_delete_target, def);
// Don't substitute an identifier for a non-identifier if this is an
// assignment target, since it'll cause a syntax error
@@ -16915,7 +16915,7 @@ fn NewParser_(
const is_call_target = @as(Expr.Tag, p.call_target) == .e_dot and expr.data.e_dot == p.call_target.e_dot;
if (p.define.dots.get(e_.name)) |parts| {
for (parts) |define| {
for (parts) |*define| {
if (p.isDotDefineMatch(expr, define.parts)) {
if (in.assign_target == .none) {
// Substitute user-specified defines