mirror of
https://github.com/oven-sh/bun
synced 2026-02-02 15:08:46 +00:00
### What does this PR do? Fixes data loss when reading large amounts of data from subprocess pipes on Windows, a regression introduced by the libuv 1.51.0 upgrade in commite3783c244f. ### The Problem When piping large data through a subprocess on Windows (e.g., `process.stdin.pipe(process.stdout)`), Bun randomly loses ~73KB of data out of 1MB, receiving only ~974KB instead of the full 1048576 bytes. The subprocess correctly receives all 1MB on stdin, but the parent process loses data when reading from the subprocess stdout. ### Root Cause Analysis #### libuv 1.51.0 Change The libuv 1.51.0 upgrade (commit [libuv/libuv@727ee723](727ee7237e)) changed Windows pipe reading behavior: **Before:** libuv would call `PeekNamedPipe` to check available bytes, then read exactly that amount. **After:** libuv attempts immediate non-blocking reads (up to 65536 bytes) before falling back to async reads. If less data is available than requested, it returns what's available and signals `more=0`, causing the read loop to break. This optimization introduces **0-byte reads** when data isn't immediately available, which are delivered to Bun's read callback. #### The Race Condition When Bun's `WindowsBufferedReader` called `onRead(.drained)` for these 0-byte reads, it created a race condition. Debug logs clearly show the issue: **Error case (log.txt):** ``` Line 79-80: onStreamRead = 0 (drained) Line 81: filesink closes (stdin closes) Line 85: onStreamRead = 6024 ← Should be 74468! Line 89: onStreamRead = -4095 (EOF) ``` **Success case (success.log.txt):** ``` Line 79-80: onStreamRead = 0 (drained) Line 81: filesink closes (stdin closes) Line 85: onStreamRead = 74468 ← Full chunk! Line 89-90: onStreamRead = 0 (drained) Line 91: onStreamRead = 6024 Line 95: onStreamRead = -4095 (EOF) ``` When stdin closes while a 0-byte drained read is pending, the next read returns truncated data (6024 bytes instead of 74468 bytes). ### The Fix Two changes to `WindowsBufferedReader` in `src/io/PipeReader.zig`: #### 1. Ignore 0-byte reads (line 937-940) Don't call `onRead(.drained)` for 0-byte reads. Just return and let libuv queue the next read. This prevents the race condition that causes truncated reads. ```zig 0 => { // With libuv 1.51.0+, calling onRead(.drained) here causes a race condition // where subsequent reads return truncated data. Just ignore 0-byte reads. return; }, ``` #### 2. Defer `has_inflight_read` flag clearing (line 827-839) Clear the flag **after** the read callback completes, not before. This prevents libuv from starting a new overlapped read operation while we're still processing the current data buffer, which could cause memory corruption per the libuv commit message: > "Starting a new read after uv_read_cb returns causes memory corruption on the OVERLAPPED read_req if uv_read_stop+uv_read_start was called during the callback" ```zig const result = onReadChunkFn(this.parent, buf, hasMore); // Clear has_inflight_read after the callback completes this.flags.has_inflight_read = false; return result; ``` ### How to Test Run the modified test in `test/js/bun/spawn/spawn-stdin-readable-stream.test.ts`: ```js test("ReadableStream with very large chunked data", async () => { const chunkSize = 64 * 1024; // 64KB chunks const numChunks = 16; // 1MB total const chunk = Buffer.alloc(chunkSize, "x"); const stream = new ReadableStream({ pull(controller) { if (pushedChunks < numChunks) { controller.enqueue(chunk); pushedChunks++; } else { controller.close(); } }, }); await using proc = spawn({ cmd: [bunExe(), "-e", ` let length = 0; process.stdin.on('data', (data) => length += data.length); process.once('beforeExit', () => console.error(length)); process.stdin.pipe(process.stdout) `], stdin: stream, stdout: "pipe", env: bunEnv, }); const text = await proc.stdout.text(); expect(text.length).toBe(chunkSize * numChunks); // Should be 1048576 }); ``` **Before fix:** Randomly fails with ~974KB instead of 1MB **After fix:** Consistently passes with full 1MB Run ~100 times to verify the race condition is fixed. ### Related Issues This may also fix #23071 (Windows scripts hanging), though that issue needs separate verification. ### Why Draft? Marking as draft for Windows testing by the team. The fix is based on detailed debug log analysis showing the exact race condition, but needs verification on Windows CI. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com> --------- Co-authored-by: Claude Bot <claude-bot@bun.sh> Co-authored-by: Claude <noreply@anthropic.com> Co-authored-by: Jarred Sumner <jarred@jarredsumner.com>
601 lines
16 KiB
TypeScript
601 lines
16 KiB
TypeScript
import { spawn } from "bun";
|
|
import { describe, expect, mock, test } from "bun:test";
|
|
import { bunEnv, bunExe, expectMaxObjectTypeCount, isASAN, isCI } from "harness";
|
|
|
|
describe("spawn stdin ReadableStream", () => {
|
|
test("basic ReadableStream as stdin", async () => {
|
|
const stream = new ReadableStream({
|
|
start(controller) {
|
|
controller.enqueue("hello from stream");
|
|
controller.close();
|
|
},
|
|
});
|
|
|
|
await using proc = spawn({
|
|
cmd: [bunExe(), "-e", "process.stdin.pipe(process.stdout)"],
|
|
stdin: stream,
|
|
stdout: "pipe",
|
|
env: bunEnv,
|
|
});
|
|
|
|
const text = await proc.stdout.text();
|
|
expect(text).toBe("hello from stream");
|
|
expect(await proc.exited).toBe(0);
|
|
});
|
|
|
|
test("ReadableStream with multiple chunks", async () => {
|
|
const chunks = ["chunk1\n", "chunk2\n", "chunk3\n"];
|
|
const stream = new ReadableStream({
|
|
start(controller) {
|
|
for (const chunk of chunks) {
|
|
controller.enqueue(chunk);
|
|
}
|
|
controller.close();
|
|
},
|
|
});
|
|
|
|
await using proc = spawn({
|
|
cmd: [bunExe(), "-e", "process.stdin.pipe(process.stdout)"],
|
|
stdin: stream,
|
|
stdout: "pipe",
|
|
env: bunEnv,
|
|
});
|
|
|
|
const text = await proc.stdout.text();
|
|
expect(text).toBe(chunks.join(""));
|
|
expect(await proc.exited).toBe(0);
|
|
});
|
|
|
|
test("ReadableStream with Uint8Array chunks", async () => {
|
|
const encoder = new TextEncoder();
|
|
const stream = new ReadableStream({
|
|
start(controller) {
|
|
controller.enqueue(encoder.encode("binary "));
|
|
controller.enqueue(encoder.encode("data "));
|
|
controller.enqueue(encoder.encode("stream"));
|
|
controller.close();
|
|
},
|
|
});
|
|
|
|
await using proc = spawn({
|
|
cmd: [bunExe(), "-e", "process.stdin.pipe(process.stdout)"],
|
|
stdin: stream,
|
|
stdout: "pipe",
|
|
env: bunEnv,
|
|
});
|
|
|
|
const text = await proc.stdout.text();
|
|
expect(text).toBe("binary data stream");
|
|
expect(await proc.exited).toBe(0);
|
|
});
|
|
|
|
test("ReadableStream with delays between chunks", async () => {
|
|
const stream = new ReadableStream({
|
|
async start(controller) {
|
|
controller.enqueue("first\n");
|
|
await Bun.sleep(50);
|
|
controller.enqueue("second\n");
|
|
await Bun.sleep(50);
|
|
controller.enqueue("third\n");
|
|
controller.close();
|
|
},
|
|
});
|
|
|
|
await using proc = spawn({
|
|
cmd: [bunExe(), "-e", "process.stdin.pipe(process.stdout)"],
|
|
stdin: stream,
|
|
stdout: "pipe",
|
|
env: bunEnv,
|
|
});
|
|
|
|
const text = await proc.stdout.text();
|
|
expect(text).toBe("first\nsecond\nthird\n");
|
|
expect(await proc.exited).toBe(0);
|
|
});
|
|
|
|
test("ReadableStream with pull method", async () => {
|
|
let pullCount = 0;
|
|
const stream = new ReadableStream({
|
|
pull(controller) {
|
|
pullCount++;
|
|
if (pullCount <= 3) {
|
|
controller.enqueue(`pull ${pullCount}\n`);
|
|
} else {
|
|
controller.close();
|
|
}
|
|
},
|
|
});
|
|
|
|
await using proc = spawn({
|
|
cmd: [bunExe(), "-e", "process.stdin.pipe(process.stdout)"],
|
|
stdin: stream,
|
|
stdout: "pipe",
|
|
env: bunEnv,
|
|
});
|
|
|
|
const text = await proc.stdout.text();
|
|
expect(text).toBe("pull 1\npull 2\npull 3\n");
|
|
expect(await proc.exited).toBe(0);
|
|
});
|
|
|
|
test("ReadableStream with async pull and delays", async () => {
|
|
let pullCount = 0;
|
|
const stream = new ReadableStream({
|
|
async pull(controller) {
|
|
pullCount++;
|
|
if (pullCount <= 3) {
|
|
await Bun.sleep(30);
|
|
controller.enqueue(`async pull ${pullCount}\n`);
|
|
} else {
|
|
controller.close();
|
|
}
|
|
},
|
|
});
|
|
|
|
await using proc = spawn({
|
|
cmd: [bunExe(), "-e", "process.stdin.pipe(process.stdout)"],
|
|
stdin: stream,
|
|
stdout: "pipe",
|
|
env: bunEnv,
|
|
});
|
|
|
|
const text = await proc.stdout.text();
|
|
expect(text).toBe("async pull 1\nasync pull 2\nasync pull 3\n");
|
|
expect(await proc.exited).toBe(0);
|
|
});
|
|
|
|
test("ReadableStream with large data", async () => {
|
|
const largeData = "x".repeat(1024 * 1024); // 1MB
|
|
const stream = new ReadableStream({
|
|
start(controller) {
|
|
controller.enqueue(largeData);
|
|
controller.close();
|
|
},
|
|
});
|
|
|
|
await using proc = spawn({
|
|
cmd: [bunExe(), "-e", "process.stdin.pipe(process.stdout)"],
|
|
stdin: stream,
|
|
stdout: "pipe",
|
|
env: bunEnv,
|
|
});
|
|
|
|
const text = await proc.stdout.text();
|
|
expect(text).toBe(largeData);
|
|
expect(await proc.exited).toBe(0);
|
|
});
|
|
|
|
test("ReadableStream with very large chunked data", async () => {
|
|
const chunkSize = 64 * 1024; // 64KB chunks
|
|
const numChunks = 16; // 1MB total
|
|
let pushedChunks = 0;
|
|
const chunk = Buffer.alloc(chunkSize, "x");
|
|
|
|
const stream = new ReadableStream({
|
|
pull(controller) {
|
|
if (pushedChunks < numChunks) {
|
|
controller.enqueue(chunk);
|
|
pushedChunks++;
|
|
} else {
|
|
controller.close();
|
|
}
|
|
},
|
|
});
|
|
|
|
await using proc = spawn({
|
|
cmd: [
|
|
bunExe(),
|
|
"-e",
|
|
`
|
|
let length = 0;
|
|
process.stdin.on('data', (data) => length += data.length);
|
|
process.once('beforeExit', () => console.error(length));
|
|
process.stdin.pipe(process.stdout)
|
|
`,
|
|
],
|
|
stdin: stream,
|
|
stdout: "pipe",
|
|
env: bunEnv,
|
|
});
|
|
|
|
const text = await proc.stdout.text();
|
|
expect(text.length).toBe(chunkSize * numChunks);
|
|
expect(text).toBe(chunk.toString().repeat(numChunks));
|
|
expect(await proc.exited).toBe(0);
|
|
});
|
|
|
|
test.todo("ReadableStream cancellation when process exits early", async () => {
|
|
let cancelled = false;
|
|
let chunksEnqueued = 0;
|
|
|
|
const stream = new ReadableStream({
|
|
async pull(controller) {
|
|
// Keep enqueueing data slowly
|
|
await Bun.sleep(50);
|
|
chunksEnqueued++;
|
|
controller.enqueue(`chunk ${chunksEnqueued}\n`);
|
|
},
|
|
cancel(_reason) {
|
|
cancelled = true;
|
|
},
|
|
});
|
|
|
|
await using proc = spawn({
|
|
cmd: [
|
|
bunExe(),
|
|
"-e",
|
|
`const readline = require('readline');
|
|
const rl = readline.createInterface({
|
|
input: process.stdin,
|
|
output: process.stdout,
|
|
terminal: false
|
|
});
|
|
let lines = 0;
|
|
rl.on('line', (line) => {
|
|
console.log(line);
|
|
lines++;
|
|
if (lines >= 2) process.exit(0);
|
|
});`,
|
|
],
|
|
stdin: stream,
|
|
stdout: "pipe",
|
|
env: bunEnv,
|
|
});
|
|
|
|
const text = await proc.stdout.text();
|
|
await proc.exited;
|
|
|
|
// Give some time for cancellation to happen
|
|
await Bun.sleep(100);
|
|
|
|
expect(cancelled).toBe(true);
|
|
expect(chunksEnqueued).toBeGreaterThanOrEqual(2);
|
|
// head -n 2 should only output 2 lines
|
|
expect(text.trim().split("\n").length).toBe(2);
|
|
});
|
|
|
|
test("ReadableStream error handling", async () => {
|
|
const stream = new ReadableStream({
|
|
async start(controller) {
|
|
controller.enqueue("before error\n");
|
|
// Give time for the data to be consumed
|
|
await Bun.sleep(10);
|
|
controller.error(new Error("Stream error"));
|
|
},
|
|
});
|
|
|
|
await using proc = spawn({
|
|
cmd: [bunExe(), "-e", "process.stdin.pipe(process.stdout)"],
|
|
stdin: stream,
|
|
stdout: "pipe",
|
|
env: bunEnv,
|
|
});
|
|
|
|
const text = await proc.stdout.text();
|
|
// Process should receive data before the error
|
|
expect(text).toBe("before error\n");
|
|
|
|
// Process should exit normally (the stream error happens after data is sent)
|
|
expect(await proc.exited).toBe(0);
|
|
});
|
|
|
|
test("ReadableStream with process that exits immediately", async () => {
|
|
const stream = new ReadableStream({
|
|
start(controller) {
|
|
// Enqueue a lot of data
|
|
for (let i = 0; i < 1000; i++) {
|
|
controller.enqueue(`line ${i}\n`);
|
|
}
|
|
controller.close();
|
|
},
|
|
});
|
|
|
|
await using proc = spawn({
|
|
cmd: [bunExe(), "-e", "process.exit(0)"], // exits immediately
|
|
stdin: stream,
|
|
env: bunEnv,
|
|
});
|
|
|
|
expect(await proc.exited).toBe(0);
|
|
|
|
// Give time for any pending operations
|
|
await Bun.sleep(50);
|
|
|
|
// The stream might be cancelled since the process exits before reading
|
|
// This is implementation-dependent behavior
|
|
});
|
|
|
|
test("ReadableStream with process that fails", async () => {
|
|
const stream = new ReadableStream({
|
|
async pull(controller) {
|
|
await Bun.sleep(0);
|
|
controller.enqueue("data for failing process\n");
|
|
controller.close();
|
|
},
|
|
});
|
|
|
|
await using proc = spawn({
|
|
cmd: [bunExe(), "-e", "process.exit(1)"],
|
|
stdin: stream,
|
|
env: bunEnv,
|
|
});
|
|
|
|
expect(await proc.exited).toBe(1);
|
|
});
|
|
|
|
test("already disturbed ReadableStream throws error", async () => {
|
|
const stream = new ReadableStream({
|
|
async pull(controller) {
|
|
await Bun.sleep(0);
|
|
controller.enqueue("data");
|
|
controller.close();
|
|
},
|
|
});
|
|
|
|
// Disturb the stream by reading from it
|
|
const reader = stream.getReader();
|
|
await reader.read();
|
|
reader.releaseLock();
|
|
|
|
expect(() => {
|
|
const proc = spawn({
|
|
cmd: [bunExe(), "-e", "process.stdin.pipe(process.stdout)"],
|
|
stdin: stream,
|
|
env: bunEnv,
|
|
});
|
|
}).toThrow("'stdin' ReadableStream has already been used");
|
|
});
|
|
|
|
test("ReadableStream with abort signal calls cancel", async () => {
|
|
const controller = new AbortController();
|
|
const cancel = mock();
|
|
const stream = new ReadableStream({
|
|
start(controller) {
|
|
controller.enqueue("data before abort\n");
|
|
},
|
|
async pull(controller) {
|
|
// Keep the stream open
|
|
// but don't block the event loop.
|
|
await Bun.sleep(1);
|
|
controller.enqueue("more data\n");
|
|
},
|
|
cancel,
|
|
});
|
|
await using proc = spawn({
|
|
cmd: [bunExe(), "-e", "process.stdin.pipe(process.stdout)"],
|
|
stdin: stream,
|
|
stdout: "pipe",
|
|
signal: controller.signal,
|
|
env: bunEnv,
|
|
});
|
|
|
|
// Give it some time to start
|
|
await Bun.sleep(10);
|
|
|
|
// Abort the process
|
|
controller.abort();
|
|
|
|
try {
|
|
await proc.exited;
|
|
} catch (e) {
|
|
// Process was aborted
|
|
}
|
|
|
|
// The process should have been killed
|
|
expect(proc.killed).toBe(true);
|
|
expect(cancel).toHaveBeenCalledTimes(1);
|
|
});
|
|
|
|
test("ReadableStream with backpressure", async () => {
|
|
let pullCalls = 0;
|
|
const maxChunks = 5;
|
|
|
|
const stream = new ReadableStream({
|
|
async pull(controller) {
|
|
pullCalls++;
|
|
if (pullCalls <= maxChunks) {
|
|
// Add async to prevent optimization to blob
|
|
await Bun.sleep(0);
|
|
controller.enqueue(`chunk ${pullCalls}\n`);
|
|
} else {
|
|
controller.close();
|
|
}
|
|
},
|
|
});
|
|
|
|
await using proc = spawn({
|
|
cmd: [bunExe(), "-e", "process.stdin.pipe(process.stdout)"],
|
|
stdin: stream,
|
|
stdout: "pipe",
|
|
env: bunEnv,
|
|
});
|
|
|
|
const text = await proc.stdout.text();
|
|
await proc.exited;
|
|
|
|
// The pull method should have been called multiple times
|
|
expect(pullCalls).toBeGreaterThan(1);
|
|
expect(pullCalls).toBeLessThanOrEqual(maxChunks + 1); // +1 for the close pull
|
|
expect(text).toContain("chunk 1\n");
|
|
expect(text).toContain(`chunk ${maxChunks}\n`);
|
|
});
|
|
|
|
test("ReadableStream with multiple processes", async () => {
|
|
const stream1 = new ReadableStream({
|
|
start(controller) {
|
|
controller.enqueue("stream1 data");
|
|
controller.close();
|
|
},
|
|
});
|
|
|
|
const stream2 = new ReadableStream({
|
|
start(controller) {
|
|
controller.enqueue("stream2 data");
|
|
controller.close();
|
|
},
|
|
});
|
|
|
|
await using proc1 = spawn({
|
|
cmd: [bunExe(), "-e", "process.stdin.pipe(process.stdout)"],
|
|
stdin: stream1,
|
|
stdout: "pipe",
|
|
env: bunEnv,
|
|
});
|
|
|
|
await using proc2 = spawn({
|
|
cmd: [bunExe(), "-e", "process.stdin.pipe(process.stdout)"],
|
|
stdin: stream2,
|
|
stdout: "pipe",
|
|
env: bunEnv,
|
|
});
|
|
|
|
const [text1, text2] = await Promise.all([new Response(proc1.stdout).text(), new Response(proc2.stdout).text()]);
|
|
|
|
expect(text1).toBe("stream1 data");
|
|
expect(text2).toBe("stream2 data");
|
|
expect(await proc1.exited).toBe(0);
|
|
expect(await proc2.exited).toBe(0);
|
|
});
|
|
|
|
test("ReadableStream with empty stream", async () => {
|
|
const stream = new ReadableStream({
|
|
start(controller) {
|
|
// Close immediately without enqueueing anything
|
|
controller.close();
|
|
},
|
|
});
|
|
|
|
await using proc = spawn({
|
|
cmd: [bunExe(), "-e", "process.stdin.pipe(process.stdout)"],
|
|
stdin: stream,
|
|
stdout: "pipe",
|
|
env: bunEnv,
|
|
});
|
|
|
|
const text = await proc.stdout.text();
|
|
expect(text).toBe("");
|
|
expect(await proc.exited).toBe(0);
|
|
});
|
|
|
|
test("ReadableStream with null bytes", async () => {
|
|
const stream = new ReadableStream({
|
|
start(controller) {
|
|
controller.enqueue(new Uint8Array([72, 101, 108, 108, 111, 0, 87, 111, 114, 108, 100])); // "Hello\0World"
|
|
controller.close();
|
|
},
|
|
});
|
|
|
|
await using proc = spawn({
|
|
cmd: [bunExe(), "-e", "process.stdin.pipe(process.stdout)"],
|
|
stdin: stream,
|
|
stdout: "pipe",
|
|
env: bunEnv,
|
|
});
|
|
|
|
const buffer = await new Response(proc.stdout).arrayBuffer();
|
|
const bytes = new Uint8Array(buffer);
|
|
expect(bytes).toEqual(new Uint8Array([72, 101, 108, 108, 111, 0, 87, 111, 114, 108, 100]));
|
|
expect(await proc.exited).toBe(0);
|
|
});
|
|
|
|
test("ReadableStream with transform stream", async () => {
|
|
// Create a transform stream that uppercases text
|
|
const upperCaseTransform = new TransformStream({
|
|
transform(chunk, controller) {
|
|
controller.enqueue(chunk.toUpperCase());
|
|
},
|
|
});
|
|
|
|
const originalStream = new ReadableStream({
|
|
start(controller) {
|
|
controller.enqueue("hello ");
|
|
controller.enqueue("world");
|
|
controller.close();
|
|
},
|
|
});
|
|
|
|
const transformedStream = originalStream.pipeThrough(upperCaseTransform);
|
|
|
|
await using proc = spawn({
|
|
cmd: [bunExe(), "-e", "process.stdin.pipe(process.stdout)"],
|
|
stdin: transformedStream,
|
|
stdout: "pipe",
|
|
env: bunEnv,
|
|
});
|
|
|
|
const text = await proc.stdout.text();
|
|
expect(text).toBe("HELLO WORLD");
|
|
expect(await proc.exited).toBe(0);
|
|
});
|
|
|
|
test("ReadableStream with tee", async () => {
|
|
const originalStream = new ReadableStream({
|
|
start(controller) {
|
|
controller.enqueue("shared data");
|
|
controller.close();
|
|
},
|
|
});
|
|
|
|
const [stream1, stream2] = originalStream.tee();
|
|
|
|
// Use the first branch for the process
|
|
await using proc = spawn({
|
|
cmd: [bunExe(), "-e", "process.stdin.pipe(process.stdout)"],
|
|
stdin: stream1,
|
|
stdout: "pipe",
|
|
env: bunEnv,
|
|
});
|
|
|
|
// Read from the second branch independently
|
|
const text2 = await new Response(stream2).text();
|
|
|
|
const text1 = await proc.stdout.text();
|
|
expect(text1).toBe("shared data");
|
|
expect(text2).toBe("shared data");
|
|
expect(await proc.exited).toBe(0);
|
|
});
|
|
|
|
test("ReadableStream object type count", async () => {
|
|
const iterations =
|
|
isASAN && isCI
|
|
? // With ASAN, entire process gets killed, including the test runner in CI. Likely an OOM or out of file descriptors.
|
|
10
|
|
: 50;
|
|
|
|
async function main() {
|
|
async function iterate(i: number) {
|
|
const stream = new ReadableStream({
|
|
async pull(controller) {
|
|
await Bun.sleep(0);
|
|
controller.enqueue(`iteration ${i}`);
|
|
controller.close();
|
|
},
|
|
});
|
|
|
|
await using proc = spawn({
|
|
cmd: [bunExe(), "-e", "process.stdin.pipe(process.stdout)"],
|
|
stdin: stream,
|
|
stdout: "pipe",
|
|
stderr: "inherit",
|
|
env: bunEnv,
|
|
});
|
|
|
|
await Promise.all([proc.stdout.text(), proc.exited]);
|
|
}
|
|
|
|
const promises = Array.from({ length: iterations }, (_, i) => iterate(i));
|
|
await Promise.all(promises);
|
|
}
|
|
|
|
await main();
|
|
|
|
await Bun.sleep(1);
|
|
Bun.gc(true);
|
|
await Bun.sleep(1);
|
|
|
|
// Check that we're not leaking objects
|
|
await expectMaxObjectTypeCount(expect, "ReadableStream", 10);
|
|
await expectMaxObjectTypeCount(expect, "Subprocess", 5);
|
|
});
|
|
});
|