Refactor trace format to compact array structure

Changed trace output from object format to array format for better
compactness and easier filtering.

Old format:
```json
{"ns":"fs","ts":123,"data":{"call":"writeFile","path":"test.txt"}}
```

New format:
```json
["fs",123,"writeFile",{"path":"test.txt"}]
```

Benefits:
- More compact (saves ~40% space)
- Easier to filter with jq: select(.[0] == "fs" and .[2] == "writeFile")
- Faster to parse (arrays vs objects)
- "call" field promoted to top level as operation name

Array structure:
- [0] = namespace (fs, fetch, subprocess, etc.)
- [1] = timestamp in milliseconds
- [2] = operation name (writeFile, spawn, request, etc.)
- [3] = operation-specific data object

Updated:
- output.zig tracer implementation to output array format
- All 19 tests updated for new format
- All tests passing

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Claude Bot
2025-10-27 20:51:42 +00:00
parent a33e5a5842
commit 4628b69025
4 changed files with 126 additions and 93 deletions

View File

@@ -1349,17 +1349,51 @@ fn Tracer(comptime ns: []const u8) type {
// Get timestamp
const timestamp = std.time.milliTimestamp();
// Write line-delimited JSON: {"ns":"namespace","ts":123,"data":{...}}
// Extract the "call" field from args to use as operation name
const ArgsType = @TypeOf(args);
const args_info = @typeInfo(ArgsType);
// Write array format: [namespace, timestamp, operation, data]
var buffered = std.io.bufferedWriter(file.writer());
const w = buffered.writer();
w.writeAll("{\"ns\":\"") catch return;
w.writeAll("[\"") catch return;
w.writeAll(ns) catch return;
w.writeAll("\",\"ts\":") catch return;
w.writeAll("\",") catch return;
w.print("{d}", .{timestamp}) catch return;
w.writeAll(",\"data\":") catch return;
std.json.stringify(args, .{}, w) catch return;
w.writeAll("}\n") catch return;
w.writeAll(",\"") catch return;
// Write the operation name from the "call" field
if (args_info == .@"struct") {
inline for (args_info.@"struct".fields) |field| {
if (comptime std.mem.eql(u8, field.name, "call")) {
const call_value = @field(args, "call");
w.writeAll(call_value) catch return;
break;
}
}
}
w.writeAll("\",") catch return;
// Write the data (args minus the "call" field)
w.writeAll("{") catch return;
if (args_info == .@"struct") {
var first = true;
inline for (args_info.@"struct".fields) |field| {
if (comptime !std.mem.eql(u8, field.name, "call")) {
if (!first) {
w.writeAll(",") catch return;
}
first = false;
w.writeAll("\"") catch return;
w.writeAll(field.name) catch return;
w.writeAll("\":") catch return;
std.json.stringify(@field(args, field.name), .{}, w) catch return;
}
}
}
w.writeAll("}]\n") catch return;
buffered.flush() catch return;
}
};

View File

@@ -55,43 +55,43 @@ describe("extensive tracing", () => {
.map(l => JSON.parse(l));
// Should have fs namespace
const fsTraces = traces.filter(t => t.ns === "fs");
const fsTraces = traces.filter(t => t[0] === "fs");
expect(fsTraces.length).toBeGreaterThan(0);
// Check we have open operations
const opens = fsTraces.filter(t => t.data.call === "open");
const opens = fsTraces.filter(t => t[2] === "open");
expect(opens.length).toBeGreaterThanOrEqual(3);
// Verify open trace has required fields
expect(opens[0].data).toHaveProperty("path");
expect(opens[0].data).toHaveProperty("flags");
expect(opens[0].data).toHaveProperty("mode");
expect(opens[0].data).toHaveProperty("fd");
expect(opens[0][3]).toHaveProperty("path");
expect(opens[0][3]).toHaveProperty("flags");
expect(opens[0][3]).toHaveProperty("mode");
expect(opens[0][3]).toHaveProperty("fd");
// Check we have write operations
const writes = fsTraces.filter(t => t.data.call === "write");
const writes = fsTraces.filter(t => t[2] === "write");
expect(writes.length).toBeGreaterThanOrEqual(4);
// Verify write trace has required fields
expect(writes[0].data).toHaveProperty("fd");
expect(writes[0].data).toHaveProperty("length");
expect(writes[0].data).toHaveProperty("bytes_written");
expect(writes[0][3]).toHaveProperty("fd");
expect(writes[0][3]).toHaveProperty("length");
expect(writes[0][3]).toHaveProperty("bytes_written");
// Check we have read operations
const reads = fsTraces.filter(t => t.data.call === "read");
const reads = fsTraces.filter(t => t[2] === "read");
expect(reads.length).toBeGreaterThanOrEqual(1);
// Verify read trace has required fields
expect(reads[0].data).toHaveProperty("fd");
expect(reads[0].data).toHaveProperty("length");
expect(reads[0].data).toHaveProperty("bytes_read");
expect(reads[0][3]).toHaveProperty("fd");
expect(reads[0][3]).toHaveProperty("length");
expect(reads[0][3]).toHaveProperty("bytes_read");
// Check we have close operations
const closes = fsTraces.filter(t => t.data.call === "close");
const closes = fsTraces.filter(t => t[2] === "close");
expect(closes.length).toBeGreaterThanOrEqual(3);
// Verify close trace has required fields
expect(closes[0].data).toHaveProperty("fd");
expect(closes[0][3]).toHaveProperty("fd");
});
test("comprehensive fetch/HTTP tracing", async () => {
@@ -133,33 +133,33 @@ describe("extensive tracing", () => {
.map(l => JSON.parse(l));
// Should have fetch namespace
const fetchTraces = traces.filter(t => t.ns === "fetch");
const fetchTraces = traces.filter(t => t[0] === "fetch");
expect(fetchTraces.length).toBeGreaterThan(0);
// Check we have request initiations
const requests = fetchTraces.filter(t => t.data.call === "request");
const requests = fetchTraces.filter(t => t[2] === "request");
expect(requests.length).toBeGreaterThanOrEqual(2);
// Verify request trace has required fields
expect(requests[0].data).toHaveProperty("url");
expect(requests[0].data).toHaveProperty("method");
expect(requests[0].data.method).toBe("GET");
expect(requests[0][3]).toHaveProperty("url");
expect(requests[0][3]).toHaveProperty("method");
expect(requests[0][3].method).toBe("GET");
// Check we have responses
const responses = fetchTraces.filter(t => t.data.call === "response");
const responses = fetchTraces.filter(t => t[2] === "response");
expect(responses.length).toBeGreaterThanOrEqual(2);
// Verify response trace has required fields
expect(responses[0].data).toHaveProperty("url");
expect(responses[0].data).toHaveProperty("status");
expect(responses[0].data).toHaveProperty("body_size");
expect(responses[0][3]).toHaveProperty("url");
expect(responses[0][3]).toHaveProperty("status");
expect(responses[0][3]).toHaveProperty("body_size");
// Should have response_body namespace
const bodyTraces = traces.filter(t => t.ns === "response_body");
const bodyTraces = traces.filter(t => t[0] === "response_body");
expect(bodyTraces.length).toBeGreaterThan(0);
// Check body consumption
const textCalls = bodyTraces.filter(t => t.data.call === "text");
const textCalls = bodyTraces.filter(t => t[2] === "text");
expect(textCalls.length).toBeGreaterThanOrEqual(2);
}, 10_000);
@@ -210,30 +210,29 @@ describe("extensive tracing", () => {
.map(l => JSON.parse(l));
// Should have both fs and fetch traces
const namespaces = new Set(traces.map(t => t.ns));
const namespaces = new Set(traces.map(t => t[0]));
expect(namespaces.has("fs")).toBe(true);
expect(namespaces.has("fetch")).toBe(true);
expect(namespaces.has("response_body")).toBe(true);
// All traces should have timestamp
traces.forEach(t => {
expect(t).toHaveProperty("ts");
expect(typeof t.ts).toBe("number");
expect(t.ts).toBeGreaterThan(0);
expect(t[1]).toBeDefined();
expect(typeof t[1]).toBe("number");
expect(t[1]).toBeGreaterThan(0);
});
// All traces should have namespace
traces.forEach(t => {
expect(t).toHaveProperty("ns");
expect(typeof t.ns).toBe("string");
expect(t.ns.length).toBeGreaterThan(0);
expect(t[0]).toBeDefined();
expect(typeof t[0]).toBe("string");
expect(t[0].length).toBeGreaterThan(0);
});
// All traces should have data
traces.forEach(t => {
expect(t).toHaveProperty("data");
expect(typeof t.data).toBe("object");
expect(t.data).toHaveProperty("call");
expect(t[3]).toBeDefined();
expect(typeof t[3]).toBe("object");
});
});
@@ -270,18 +269,18 @@ describe("extensive tracing", () => {
.map(l => JSON.parse(l));
// Can filter by namespace
const fsOnly = traces.filter(t => t.ns === "fs");
const fetchOnly = traces.filter(t => t.ns === "fetch");
const bodyOnly = traces.filter(t => t.ns === "response_body");
const fsOnly = traces.filter(t => t[0] === "fs");
const fetchOnly = traces.filter(t => t[0] === "fetch");
const bodyOnly = traces.filter(t => t[0] === "response_body");
expect(fsOnly.length).toBeGreaterThan(0);
expect(fetchOnly.length).toBeGreaterThan(0);
expect(bodyOnly.length).toBeGreaterThan(0);
// Each namespace should only have its own traces
fsOnly.forEach(t => expect(t.ns).toBe("fs"));
fetchOnly.forEach(t => expect(t.ns).toBe("fetch"));
bodyOnly.forEach(t => expect(t.ns).toBe("response_body"));
fsOnly.forEach(t => expect(t[0]).toBe("fs"));
fetchOnly.forEach(t => expect(t[0]).toBe("fetch"));
bodyOnly.forEach(t => expect(t[0]).toBe("response_body"));
});
test("trace chronological ordering", async () => {
@@ -328,7 +327,7 @@ describe("extensive tracing", () => {
// Timestamps should be monotonically increasing
for (let i = 1; i < traces.length; i++) {
expect(traces[i].ts).toBeGreaterThanOrEqual(traces[i - 1].ts);
expect(traces[i][1]).toBeGreaterThanOrEqual(traces[i - 1][1]);
}
});
});

View File

@@ -38,21 +38,21 @@ describe("subprocess tracing", () => {
.map(l => JSON.parse(l));
// Should have subprocess namespace entries
const subprocessTraces = traces.filter(t => t.ns === "subprocess");
const subprocessTraces = traces.filter(t => t[0] === "subprocess");
expect(subprocessTraces.length).toBeGreaterThan(0);
// Check for spawn
const spawnCalls = subprocessTraces.filter(t => t.data.call === "spawn");
const spawnCalls = subprocessTraces.filter(t => t[2] === "spawn");
expect(spawnCalls.length).toBeGreaterThan(0);
expect(spawnCalls[0].data.cmd).toBe("echo");
expect(spawnCalls[0].data).toHaveProperty("pid");
expect(spawnCalls[0].data).toHaveProperty("cwd");
expect(spawnCalls[0][3].cmd).toBe("echo");
expect(spawnCalls[0][3]).toHaveProperty("pid");
expect(spawnCalls[0][3]).toHaveProperty("cwd");
// Check for exit
const exitCalls = subprocessTraces.filter(t => t.data.call === "exit");
const exitCalls = subprocessTraces.filter(t => t[2] === "exit");
expect(exitCalls.length).toBeGreaterThan(0);
expect(exitCalls[0].data).toHaveProperty("pid");
expect(exitCalls[0].data.exit_code).toBe(0);
expect(exitCalls[0][3]).toHaveProperty("pid");
expect(exitCalls[0][3].exit_code).toBe(0);
});
test("trace child_process spawn", async () => {
@@ -87,11 +87,11 @@ describe("subprocess tracing", () => {
.filter(l => l.length > 0)
.map(l => JSON.parse(l));
const subprocessTraces = traces.filter(t => t.ns === "subprocess");
const subprocessTraces = traces.filter(t => t[0] === "subprocess");
expect(subprocessTraces.length).toBeGreaterThan(0);
// Check for spawn with echo command
const spawnCalls = subprocessTraces.filter(t => t.data.call === "spawn" && t.data.cmd === "echo");
const spawnCalls = subprocessTraces.filter(t => t[2] === "spawn" && t[3].cmd === "echo");
expect(spawnCalls.length).toBeGreaterThan(0);
});
@@ -125,11 +125,11 @@ describe("subprocess tracing", () => {
.filter(l => l.length > 0)
.map(l => JSON.parse(l));
const subprocessTraces = traces.filter(t => t.ns === "subprocess");
const spawnCalls = subprocessTraces.filter(t => t.data.call === "spawn");
const subprocessTraces = traces.filter(t => t[0] === "subprocess");
const spawnCalls = subprocessTraces.filter(t => t[2] === "spawn");
expect(spawnCalls.length).toBeGreaterThan(0);
// Check that args count is tracked (4 args: echo, arg1, arg2, arg3)
expect(spawnCalls[0].data.args || spawnCalls[0].data.args_count).toBeGreaterThanOrEqual(4);
expect(spawnCalls[0][3].args || spawnCalls[0][3].args_count).toBeGreaterThanOrEqual(4);
});
});

View File

@@ -86,11 +86,11 @@ describe("--trace flag", () => {
const traces = traceLines.map(line => JSON.parse(line));
// Should have fs namespace entries
const fsTraces = traces.filter(t => t.ns === "fs");
const fsTraces = traces.filter(t => t[0] === "fs");
expect(fsTraces.length).toBeGreaterThan(0);
// Check for open/read/write operations
const calls = fsTraces.map(t => t.data.call);
const calls = fsTraces.map(t => t[2]);
expect(calls).toContain("open");
});
@@ -129,11 +129,11 @@ describe("--trace flag", () => {
const traces = traceLines.map(line => JSON.parse(line));
// Should have fs namespace entries
const fsTraces = traces.filter(t => t.ns === "fs");
const fsTraces = traces.filter(t => t[0] === "fs");
expect(fsTraces.length).toBeGreaterThan(0);
// Check that we logged open and close operations
const calls = fsTraces.map(t => t.data.call);
const calls = fsTraces.map(t => t[2]);
expect(calls).toContain("open");
expect(calls).toContain("close");
});
@@ -172,15 +172,15 @@ describe("--trace flag", () => {
const traces = traceLines.map(line => JSON.parse(line));
// Should have fetch namespace entries
const fetchTraces = traces.filter(t => t.ns === "fetch");
const fetchTraces = traces.filter(t => t[0] === "fetch");
expect(fetchTraces.length).toBeGreaterThan(0);
// Check for response
const responseCalls = fetchTraces.filter(t => t.data.call === "response");
const responseCalls = fetchTraces.filter(t => t[2] === "response");
expect(responseCalls.length).toBeGreaterThan(0);
// Should have URL
expect(responseCalls[0].data.url).toContain("example.com");
expect(responseCalls[0][3].url).toContain("example.com");
});
test("trace response body operations", async () => {
@@ -217,11 +217,11 @@ describe("--trace flag", () => {
const traces = traceLines.map(line => JSON.parse(line));
// Should have response_body namespace entries
const bodyTraces = traces.filter(t => t.ns === "response_body");
const bodyTraces = traces.filter(t => t[0] === "response_body");
expect(bodyTraces.length).toBeGreaterThan(0);
// Check for text call
const textCalls = bodyTraces.filter(t => t.data.call === "text");
const textCalls = bodyTraces.filter(t => t[2] === "text");
expect(textCalls.length).toBeGreaterThan(0);
});
@@ -259,18 +259,18 @@ describe("--trace flag", () => {
const trace = JSON.parse(line);
// Should have required fields
expect(trace).toHaveProperty("ns");
expect(trace).toHaveProperty("ts");
expect(trace).toHaveProperty("data");
expect(trace[0]).toBeDefined();
expect(trace[1]).toBeDefined();
expect(trace[3]).toBeDefined();
// ns should be a string
expect(typeof trace.ns).toBe("string");
expect(typeof trace[0]).toBe("string");
// ts should be a number (timestamp)
expect(typeof trace.ts).toBe("number");
expect(typeof trace[1]).toBe("number");
// data should be an object
expect(typeof trace.data).toBe("object");
expect(typeof trace[3]).toBe("object");
}
});
@@ -328,18 +328,18 @@ describe("--trace flag", () => {
.filter(l => l.length > 0)
.map(l => JSON.parse(l));
const fsTraces = traces.filter(t => t.ns === "fs");
const fsTraces = traces.filter(t => t[0] === "fs");
expect(fsTraces.length).toBeGreaterThan(0);
// Check for writeFile
const writeCalls = fsTraces.filter(t => t.data.call === "writeFile");
const writeCalls = fsTraces.filter(t => t[2] === "writeFile");
expect(writeCalls.length).toBeGreaterThan(0);
expect(writeCalls.some(t => t.data.path && t.data.path.includes("test.txt"))).toBe(true);
expect(writeCalls.some(t => t[3].path && t[3].path.includes("test.txt"))).toBe(true);
// Check for readFile
const readCalls = fsTraces.filter(t => t.data.call === "readFile");
const readCalls = fsTraces.filter(t => t[2] === "readFile");
expect(readCalls.length).toBeGreaterThan(0);
expect(readCalls.some(t => t.data.path && t.data.path.includes("test.txt"))).toBe(true);
expect(readCalls.some(t => t[3].path && t[3].path.includes("test.txt"))).toBe(true);
});
test("trace stat operations", async () => {
@@ -373,12 +373,12 @@ describe("--trace flag", () => {
.filter(l => l.length > 0)
.map(l => JSON.parse(l));
const fsTraces = traces.filter(t => t.ns === "fs");
const fsTraces = traces.filter(t => t[0] === "fs");
// Check for stat
const statCalls = fsTraces.filter(t => t.data.call === "stat");
const statCalls = fsTraces.filter(t => t[2] === "stat");
expect(statCalls.length).toBeGreaterThan(0);
expect(statCalls.some(t => t.data.path && t.data.path.includes("test.txt"))).toBe(true);
expect(statCalls.some(t => t[3].path && t[3].path.includes("test.txt"))).toBe(true);
});
test("trace directory operations", async () => {
@@ -415,22 +415,22 @@ describe("--trace flag", () => {
.filter(l => l.length > 0)
.map(l => JSON.parse(l));
const fsTraces = traces.filter(t => t.ns === "fs");
const fsTraces = traces.filter(t => t[0] === "fs");
// Check for mkdir
const mkdirCalls = fsTraces.filter(t => t.data.call === "mkdir");
const mkdirCalls = fsTraces.filter(t => t[2] === "mkdir");
expect(mkdirCalls.length).toBeGreaterThan(0);
// Check for readdir
const readdirCalls = fsTraces.filter(t => t.data.call === "readdir");
const readdirCalls = fsTraces.filter(t => t[2] === "readdir");
expect(readdirCalls.length).toBeGreaterThan(0);
// Check for unlink
const unlinkCalls = fsTraces.filter(t => t.data.call === "unlink");
const unlinkCalls = fsTraces.filter(t => t[2] === "unlink");
expect(unlinkCalls.length).toBeGreaterThan(0);
// Check for rmdir
const rmdirCalls = fsTraces.filter(t => t.data.call === "rmdir");
const rmdirCalls = fsTraces.filter(t => t[2] === "rmdir");
expect(rmdirCalls.length).toBeGreaterThan(0);
});
@@ -466,10 +466,10 @@ describe("--trace flag", () => {
.filter(l => l.length > 0)
.map(l => JSON.parse(l));
const fsTraces = traces.filter(t => t.ns === "fs");
const fsTraces = traces.filter(t => t[0] === "fs");
// Check for rename
const renameCalls = fsTraces.filter(t => t.data.call === "rename");
const renameCalls = fsTraces.filter(t => t[2] === "rename");
expect(renameCalls.length).toBeGreaterThan(0);
});
});