Compare commits

...

6 Commits

Author SHA1 Message Date
claude[bot]
d89da289ac Add ELF support for Linux heap allocation counters
- Enable allocation counters on Linux builds with ASSERTIONS_ENABLED
- Use platform-specific section names: .bunheapcnt for ELF, __DATA,BUNHEAPCNT for Mach-O
- Implement ELF parsing using dl_iterate_phdr() to walk sections and symbols
- Fix include structure per review comment by moving mach-o headers inside proper conditional
- Mirror macOS functionality for parsing Bun__allocationCounter__Bun__<Type> symbols

Co-authored-by: Jarred-Sumner <Jarred-Sumner@users.noreply.github.com>
2025-06-10 00:59:06 +00:00
Jarred-Sumner
628fcb13a3 bun run clang-format 2025-06-09 13:44:34 +00:00
Cursor Agent
6291485f7c Refactor Zone allocator to support type-specific allocation tracking 2025-06-09 13:30:11 +00:00
Cursor Agent
6d2c9ef3b2 Refactor heap breakdown allocator and zone destruction logic 2025-06-09 13:16:54 +00:00
Cursor Agent
d9757e06c5 Refactor allocation tracking to use dynamic symbol table lookup 2025-06-09 13:10:34 +00:00
Cursor Agent
a5085c167c Add Zig allocation counters for memory usage tracking 2025-06-09 12:41:15 +00:00
2 changed files with 323 additions and 33 deletions

View File

@@ -54,10 +54,24 @@
#if !__has_feature(address_sanitizer)
#include <malloc/malloc.h>
#define IS_MALLOC_DEBUGGING_ENABLED 1
// Add mach-o related headers for reading sections
#include <mach-o/dyld.h>
#include <mach-o/getsect.h>
#include <mach-o/loader.h>
#include <mach-o/nlist.h>
#endif
#endif
#endif
#if OS(LINUX)
#if ASSERT_ENABLED
// Add ELF related headers for reading sections
#include <elf.h>
#include <link.h>
#include <dlfcn.h>
#endif
#endif
using namespace JSC;
using namespace WTF;
using namespace WebCore;
@@ -349,6 +363,213 @@ JSC_DEFINE_HOST_FUNCTION(functionMemoryUsageStatistics,
object->putDirect(vm, Identifier::fromString(vm, "zones"_s),
zoneSizesObject);
}
// Read Zig allocation counters dynamically from the BUNHEAPCNT section
{
const struct mach_header_64* header = (struct mach_header_64*)_dyld_get_image_header(0);
intptr_t slide = _dyld_get_image_vmaddr_slide(0);
const struct load_command* cmd = (const struct load_command*)(header + 1);
const struct section_64* target_sect = NULL;
uint8_t target_index = 0;
const struct symtab_command* symtab = NULL;
uint8_t sect_index = 1;
for (uint32_t i = 0; i < header->ncmds; i++) {
if (cmd->cmd == LC_SEGMENT_64) {
const struct segment_command_64* seg = (const struct segment_command_64*)cmd;
const struct section_64* sect = (const struct section_64*)(seg + 1);
for (uint32_t j = 0; j < seg->nsects; j++) {
if (strcmp(sect[j].segname, "__DATA") == 0 && strcmp(sect[j].sectname, "BUNHEAPCNT") == 0) {
target_sect = &sect[j];
target_index = sect_index;
}
sect_index++;
}
} else if (cmd->cmd == LC_SYMTAB) {
symtab = (const struct symtab_command*)cmd;
}
cmd = (const struct load_command*)((const char*)cmd + cmd->cmdsize);
}
if (symtab && target_sect) {
const char* base = (const char*)header;
const struct nlist_64* symbols = (const struct nlist_64*)(base + symtab->symoff);
const char* strtab = base + symtab->stroff;
uint64_t sect_start = target_sect->addr;
uint64_t sect_end = sect_start + target_sect->size;
Vector<std::pair<Identifier, size_t>> zigCounts;
for (uint32_t i = 0; i < symtab->nsyms; i++) {
const struct nlist_64* sym = &symbols[i];
if (!(sym->n_type & N_SECT) || sym->n_sect != target_index) continue;
if (sym->n_value < sect_start || sym->n_value >= sect_end) continue;
uintptr_t* ptr = (uintptr_t*)(sym->n_value + slide);
const char* name = strtab + sym->n_un.n_strx;
// Parse the type name from the symbol name
// Symbol format: "Bun__allocationCounter__Bun__TypeName"
const char* prefix = "Bun__allocationCounter__Bun__";
size_t prefix_len = strlen(prefix);
if (strncmp(name, prefix, prefix_len) == 0) {
const char* type_name = name + prefix_len;
size_t count = *ptr;
if (count > 0) {
zigCounts.append(std::make_pair(
Identifier::fromString(vm, String::fromUTF8(type_name)),
count));
}
}
}
if (!zigCounts.isEmpty()) {
// Sort by count first, then by name
std::sort(zigCounts.begin(), zigCounts.end(),
[](const std::pair<Identifier, size_t>& a,
const std::pair<Identifier, size_t>& b) {
if (a.second == b.second) {
WTF::StringView left = a.first.string();
WTF::StringView right = b.first.string();
return WTF::codePointCompare(left, right) == std::strong_ordering::less;
}
return a.second > b.second;
});
auto* zigObject = constructEmptyObject(globalObject);
for (auto& it : zigCounts) {
zigObject->putDirect(vm, it.first, jsDoubleNumber(it.second));
}
object->putDirect(vm, Identifier::fromString(vm, "zig"_s), zigObject);
}
}
}
#endif
#endif
#if OS(LINUX)
#if ASSERT_ENABLED
// Read Zig allocation counters dynamically from the .bunheapcnt section on Linux
{
// Get information about the current executable
struct CallbackData {
Vector<std::pair<Identifier, size_t>>* zigCounts;
VM* vm;
};
CallbackData data = {
.zigCounts = new Vector<std::pair<Identifier, size_t>>(),
.vm = &vm
};
auto callback = [](struct dl_phdr_info* info, size_t size, void* user_data) -> int {
CallbackData* data = static_cast<CallbackData*>(user_data);
// Only process the main executable (first entry with empty name)
if (info->dlpi_name && strlen(info->dlpi_name) > 0) {
return 0;
}
const ElfW(Phdr)* phdr = info->dlpi_phdr;
for (int i = 0; i < info->dlpi_phnum; i++) {
if (phdr[i].p_type == PT_LOAD && (phdr[i].p_flags & PF_R)) {
// Map the segment to find ELF header
ElfW(Ehdr)* ehdr = (ElfW(Ehdr)*)(info->dlpi_addr + phdr[i].p_vaddr - phdr[i].p_offset);
// Check if this looks like a valid ELF header
if (ehdr->e_ident[EI_MAG0] != ELFMAG0 ||
ehdr->e_ident[EI_MAG1] != ELFMAG1 ||
ehdr->e_ident[EI_MAG2] != ELFMAG2 ||
ehdr->e_ident[EI_MAG3] != ELFMAG3) {
continue;
}
// Find section header table
ElfW(Shdr)* shdr = (ElfW(Shdr)*)((char*)ehdr + ehdr->e_shoff);
char* shstrtab = (char*)ehdr + shdr[ehdr->e_shstrndx].sh_offset;
ElfW(Shdr)* target_sect = nullptr;
ElfW(Shdr)* symtab_sect = nullptr;
ElfW(Shdr)* strtab_sect = nullptr;
// Find .bunheapcnt section and symbol table
for (int j = 0; j < ehdr->e_shnum; j++) {
const char* sect_name = shstrtab + shdr[j].sh_name;
if (strcmp(sect_name, ".bunheapcnt") == 0) {
target_sect = &shdr[j];
} else if (shdr[j].sh_type == SHT_SYMTAB) {
symtab_sect = &shdr[j];
strtab_sect = &shdr[shdr[j].sh_link];
}
}
if (target_sect && symtab_sect && strtab_sect) {
ElfW(Sym)* symbols = (ElfW(Sym)*)((char*)ehdr + symtab_sect->sh_offset);
char* strtab = (char*)ehdr + strtab_sect->sh_offset;
size_t num_symbols = symtab_sect->sh_size / sizeof(ElfW(Sym));
uintptr_t sect_start = info->dlpi_addr + target_sect->sh_addr;
uintptr_t sect_end = sect_start + target_sect->sh_size;
for (size_t k = 0; k < num_symbols; k++) {
ElfW(Sym)* sym = &symbols[k];
if (sym->st_shndx == SHN_UNDEF) continue;
uintptr_t sym_addr = info->dlpi_addr + sym->st_value;
if (sym_addr < sect_start || sym_addr >= sect_end) continue;
const char* name = strtab + sym->st_name;
// Parse the type name from the symbol name
// Symbol format: "Bun__allocationCounter__Bun__TypeName"
const char* prefix = "Bun__allocationCounter__Bun__";
size_t prefix_len = strlen(prefix);
if (strncmp(name, prefix, prefix_len) == 0) {
const char* type_name = name + prefix_len;
uintptr_t* counter_ptr = (uintptr_t*)sym_addr;
size_t count = *counter_ptr;
if (count > 0) {
data->zigCounts->append(std::make_pair(
Identifier::fromString(*data->vm, String::fromUTF8(type_name)),
count));
}
}
}
}
return 1; // Stop after processing main executable
}
}
return 0;
};
dl_iterate_phdr(callback, &data);
if (!data.zigCounts->isEmpty()) {
// Sort by count first, then by name
std::sort(data.zigCounts->begin(), data.zigCounts->end(),
[](const std::pair<Identifier, size_t>& a,
const std::pair<Identifier, size_t>& b) {
if (a.second == b.second) {
WTF::StringView left = a.first.string();
WTF::StringView right = b.first.string();
return WTF::codePointCompare(left, right) == std::strong_ordering::less;
}
return a.second > b.second;
});
auto* zigObject = constructEmptyObject(globalObject);
for (auto& it : *data.zigCounts) {
zigObject->putDirect(vm, it.first, jsDoubleNumber(it.second));
}
object->putDirect(vm, Identifier::fromString(vm, "zig"_s), zigObject);
}
delete data.zigCounts;
}
#endif
#endif

View File

@@ -4,7 +4,7 @@ const Environment = bun.Environment;
const Allocator = std.mem.Allocator;
const vm_size_t = usize;
pub const enabled = Environment.allow_assert and Environment.isMac;
pub const enabled = Environment.allow_assert and (Environment.isMac or Environment.isLinux);
fn heapLabel(comptime T: type) [:0]const u8 {
const base_name = if (@hasDecl(T, "heap_label"))
@@ -14,15 +14,74 @@ fn heapLabel(comptime T: type) [:0]const u8 {
return base_name;
}
pub fn allocator(comptime T: type) std.mem.Allocator {
return namedAllocator(comptime heapLabel(T));
// Function to get the allocation counter for a given type
fn getAllocationCounter(comptime T: type) *std.atomic.Value(usize) {
const name = comptime heapLabel(T);
const full_name = comptime "Bun__" ++ name;
const static = struct {
pub var active_allocation_counter = std.atomic.Value(usize).init(0);
};
// Export the counter with the specific naming convention and section
comptime {
const section_name = if (Environment.isMac) "__DATA,BUNHEAPCNT" else ".bunheapcnt";
@export(&static.active_allocation_counter, .{
.name = std.fmt.comptimePrint("Bun__allocationCounter__{s}", .{full_name}),
.section = section_name,
});
}
return &static.active_allocation_counter;
}
pub fn allocator(comptime T: type) std.mem.Allocator {
const zone = getZoneT(T);
return zone.allocator(T);
}
pub fn namedAllocator(comptime name: [:0]const u8) std.mem.Allocator {
return getZone("Bun__" ++ name).allocator();
// For named allocators, we don't have a type to track
const zone = getZone("Bun__" ++ name);
const S = struct {
fn rawAlloc(zone_ptr: *anyopaque, len: usize, alignment: std.mem.Alignment, _: usize) ?[*]u8 {
return Zone.alignedAlloc(@ptrCast(zone_ptr), len, alignment);
}
fn resize(_: *anyopaque, buf: []u8, _: std.mem.Alignment, new_len: usize, _: usize) bool {
if (new_len <= buf.len) {
return true;
}
const full_len = Zone.alignedAllocSize(buf.ptr);
if (new_len <= full_len) {
return true;
}
return false;
}
fn rawFree(zone_ptr: *anyopaque, buf: []u8, _: std.mem.Alignment, _: usize) void {
Zone.malloc_zone_free(@ptrCast(zone_ptr), @ptrCast(buf.ptr));
}
};
const vtable = comptime std.mem.Allocator.VTable{
.alloc = &S.rawAlloc,
.resize = &S.resize,
.remap = &std.mem.Allocator.noRemap,
.free = &S.rawFree,
};
return .{
.vtable = &vtable,
.ptr = zone,
};
}
pub fn getZoneT(comptime T: type) *Zone {
return getZone(comptime heapLabel(T));
return getZone(comptime "Bun__" ++ heapLabel(T));
}
pub fn getZone(comptime name: [:0]const u8) *Zone {
@@ -62,35 +121,46 @@ pub const Zone = opaque {
return std.c.malloc_size(ptr);
}
fn rawAlloc(zone: *anyopaque, len: usize, alignment: std.mem.Alignment, _: usize) ?[*]u8 {
return alignedAlloc(@ptrCast(zone), len, alignment);
}
pub fn allocator(zone: *Zone, comptime T: type) std.mem.Allocator {
const S = struct {
fn rawAlloc(zone_ptr: *anyopaque, len: usize, alignment: std.mem.Alignment, _: usize) ?[*]u8 {
const result = Zone.alignedAlloc(@ptrCast(zone_ptr), len, alignment);
if (result) |_| {
if (comptime enabled) {
_ = getAllocationCounter(T).fetchAdd(1, .monotonic);
}
}
return result;
}
fn resize(_: *anyopaque, buf: []u8, _: std.mem.Alignment, new_len: usize, _: usize) bool {
if (new_len <= buf.len) {
return true;
}
fn resize(_: *anyopaque, buf: []u8, _: std.mem.Alignment, new_len: usize, _: usize) bool {
if (new_len <= buf.len) {
return true;
}
const full_len = alignedAllocSize(buf.ptr);
if (new_len <= full_len) {
return true;
}
const full_len = Zone.alignedAllocSize(buf.ptr);
if (new_len <= full_len) {
return true;
}
return false;
}
return false;
}
fn rawFree(zone: *anyopaque, buf: []u8, _: std.mem.Alignment, _: usize) void {
malloc_zone_free(@ptrCast(zone), @ptrCast(buf.ptr));
}
fn rawFree(zone_ptr: *anyopaque, buf: []u8, _: std.mem.Alignment, _: usize) void {
if (comptime enabled) {
_ = getAllocationCounter(T).fetchSub(1, .monotonic);
}
Zone.malloc_zone_free(@ptrCast(zone_ptr), @ptrCast(buf.ptr));
}
};
pub const vtable = std.mem.Allocator.VTable{
.alloc = &rawAlloc,
.resize = &resize,
.remap = &std.mem.Allocator.noRemap,
.free = &rawFree,
};
const vtable = comptime std.mem.Allocator.VTable{
.alloc = &S.rawAlloc,
.resize = &S.resize,
.remap = &std.mem.Allocator.noRemap,
.free = &S.rawFree,
};
pub fn allocator(zone: *Zone) std.mem.Allocator {
return .{
.vtable = &vtable,
.ptr = zone,
@@ -99,17 +169,16 @@ pub const Zone = opaque {
/// Create a single-item pointer with initialized data.
pub inline fn create(zone: *Zone, comptime T: type, data: T) *T {
const alignment: std.mem.Alignment = .fromByteUnits(@alignOf(T));
const ptr: *T = @alignCast(@ptrCast(
rawAlloc(zone, @sizeOf(T), alignment, @returnAddress()) orelse bun.outOfMemory(),
));
const alloc = zone.allocator(T);
const ptr = alloc.create(T) catch bun.outOfMemory();
ptr.* = data;
return ptr;
}
/// Free a single-item pointer
pub inline fn destroy(zone: *Zone, comptime T: type, ptr: *T) void {
malloc_zone_free(zone, @ptrCast(ptr));
const alloc = zone.allocator(T);
alloc.destroy(ptr);
}
pub extern fn malloc_default_zone() *Zone;