mirror of
https://github.com/oven-sh/bun
synced 2026-02-02 15:08:46 +00:00
node child process maxbuf support (#18293)
This commit is contained in:
6
.vscode/launch.json
generated
vendored
6
.vscode/launch.json
generated
vendored
@@ -1118,7 +1118,11 @@
|
|||||||
"request": "attach",
|
"request": "attach",
|
||||||
"name": "rr",
|
"name": "rr",
|
||||||
"trace": "Off",
|
"trace": "Off",
|
||||||
"setupCommands": ["handle SIGPWR nostop noprint pass"],
|
"setupCommands": [
|
||||||
|
"handle SIGPWR nostop noprint pass",
|
||||||
|
"source ${workspaceFolder}/misctools/gdb/std_gdb_pretty_printers.py",
|
||||||
|
"source ${workspaceFolder}/misctools/gdb/zig_gdb_pretty_printers.py",
|
||||||
|
],
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
"inputs": [
|
"inputs": [
|
||||||
|
|||||||
@@ -253,6 +253,19 @@ const proc = Bun.spawn({
|
|||||||
|
|
||||||
The `killSignal` option also controls which signal is sent when an AbortSignal is aborted.
|
The `killSignal` option also controls which signal is sent when an AbortSignal is aborted.
|
||||||
|
|
||||||
|
## Using maxBuffer
|
||||||
|
|
||||||
|
For spawnSync, you can limit the maximum number of bytes of output before the process is killed:
|
||||||
|
|
||||||
|
```ts
|
||||||
|
// KIll 'yes' after it emits over 100 bytes of output
|
||||||
|
const result = Bun.spawnSync({
|
||||||
|
cmd: ["yes"], // or ["bun", "exec", "yes"] on windows
|
||||||
|
maxBuffer: 100,
|
||||||
|
});
|
||||||
|
// process exits
|
||||||
|
```
|
||||||
|
|
||||||
## Inter-process communication (IPC)
|
## Inter-process communication (IPC)
|
||||||
|
|
||||||
Bun supports direct inter-process communication channel between two `bun` processes. To receive messages from a spawned Bun subprocess, specify an `ipc` handler.
|
Bun supports direct inter-process communication channel between two `bun` processes. To receive messages from a spawned Bun subprocess, specify an `ipc` handler.
|
||||||
@@ -423,6 +436,7 @@ namespace SpawnOptions {
|
|||||||
signal?: AbortSignal;
|
signal?: AbortSignal;
|
||||||
timeout?: number;
|
timeout?: number;
|
||||||
killSignal?: string | number;
|
killSignal?: string | number;
|
||||||
|
maxBuffer?: number;
|
||||||
}
|
}
|
||||||
|
|
||||||
type Readable =
|
type Readable =
|
||||||
|
|||||||
142
misctools/gdb/std_gdb_pretty_printers.py
Normal file
142
misctools/gdb/std_gdb_pretty_printers.py
Normal file
@@ -0,0 +1,142 @@
|
|||||||
|
# pretty printing for the standard library.
|
||||||
|
# put "source /path/to/stage2_gdb_pretty_printers.py" in ~/.gdbinit to load it automatically.
|
||||||
|
import re
|
||||||
|
import gdb.printing
|
||||||
|
|
||||||
|
# Handles both ArrayList and ArrayListUnmanaged.
|
||||||
|
class ArrayListPrinter:
|
||||||
|
def __init__(self, val):
|
||||||
|
self.val = val
|
||||||
|
|
||||||
|
def to_string(self):
|
||||||
|
type = self.val.type.name[len('std.array_list.'):]
|
||||||
|
type = re.sub(r'^ArrayListAligned(Unmanaged)?\((.*),null\)$', r'ArrayList\1(\2)', type)
|
||||||
|
return '%s of length %s, capacity %s' % (type, self.val['items']['len'], self.val['capacity'])
|
||||||
|
|
||||||
|
def children(self):
|
||||||
|
for i in range(self.val['items']['len']):
|
||||||
|
item = self.val['items']['ptr'] + i
|
||||||
|
yield ('[%d]' % i, item.dereference())
|
||||||
|
|
||||||
|
def display_hint(self):
|
||||||
|
return 'array'
|
||||||
|
|
||||||
|
class MultiArrayListPrinter:
|
||||||
|
def __init__(self, val):
|
||||||
|
self.val = val
|
||||||
|
|
||||||
|
def child_type(self):
|
||||||
|
(helper_fn, _) = gdb.lookup_symbol('%s.dbHelper' % self.val.type.name)
|
||||||
|
return helper_fn.type.fields()[1].type.target()
|
||||||
|
|
||||||
|
def to_string(self):
|
||||||
|
type = self.val.type.name[len('std.multi_array_list.'):]
|
||||||
|
return '%s of length %s, capacity %s' % (type, self.val['len'], self.val['capacity'])
|
||||||
|
|
||||||
|
def slice(self):
|
||||||
|
fields = self.child_type().fields()
|
||||||
|
base = self.val['bytes']
|
||||||
|
cap = self.val['capacity']
|
||||||
|
len = self.val['len']
|
||||||
|
|
||||||
|
if len == 0:
|
||||||
|
return
|
||||||
|
|
||||||
|
fields = sorted(fields, key=lambda field: field.type.alignof, reverse=True)
|
||||||
|
|
||||||
|
for field in fields:
|
||||||
|
ptr = base.cast(field.type.pointer()).dereference().cast(field.type.array(len - 1))
|
||||||
|
base += field.type.sizeof * cap
|
||||||
|
yield (field.name, ptr)
|
||||||
|
|
||||||
|
def children(self):
|
||||||
|
for i, (name, ptr) in enumerate(self.slice()):
|
||||||
|
yield ('[%d]' % i, name)
|
||||||
|
yield ('[%d]' % i, ptr)
|
||||||
|
|
||||||
|
def display_hint(self):
|
||||||
|
return 'map'
|
||||||
|
|
||||||
|
# Handles both HashMap and HashMapUnmanaged.
|
||||||
|
class HashMapPrinter:
|
||||||
|
def __init__(self, val):
|
||||||
|
self.type = val.type
|
||||||
|
is_managed = re.search(r'^std\.hash_map\.HashMap\(', self.type.name)
|
||||||
|
self.val = val['unmanaged'] if is_managed else val
|
||||||
|
|
||||||
|
def header_ptr_type(self):
|
||||||
|
(helper_fn, _) = gdb.lookup_symbol('%s.dbHelper' % self.val.type.name)
|
||||||
|
return helper_fn.type.fields()[1].type
|
||||||
|
|
||||||
|
def header(self):
|
||||||
|
if self.val['metadata'] == 0:
|
||||||
|
return None
|
||||||
|
return (self.val['metadata'].cast(self.header_ptr_type()) - 1).dereference()
|
||||||
|
|
||||||
|
def to_string(self):
|
||||||
|
type = self.type.name[len('std.hash_map.'):]
|
||||||
|
type = re.sub(r'^HashMap(Unmanaged)?\((.*),std.hash_map.AutoContext\(.*$', r'AutoHashMap\1(\2)', type)
|
||||||
|
hdr = self.header()
|
||||||
|
if hdr is not None:
|
||||||
|
cap = hdr['capacity']
|
||||||
|
else:
|
||||||
|
cap = 0
|
||||||
|
return '%s of length %s, capacity %s' % (type, self.val['size'], cap)
|
||||||
|
|
||||||
|
def children(self):
|
||||||
|
hdr = self.header()
|
||||||
|
if hdr is None:
|
||||||
|
return
|
||||||
|
is_map = self.display_hint() == 'map'
|
||||||
|
for i in range(hdr['capacity']):
|
||||||
|
metadata = self.val['metadata'] + i
|
||||||
|
if metadata.dereference()['used'] == 1:
|
||||||
|
yield ('[%d]' % i, (hdr['keys'] + i).dereference())
|
||||||
|
if is_map:
|
||||||
|
yield ('[%d]' % i, (hdr['values'] + i).dereference())
|
||||||
|
|
||||||
|
def display_hint(self):
|
||||||
|
for field in self.header_ptr_type().target().fields():
|
||||||
|
if field.name == 'values':
|
||||||
|
return 'map'
|
||||||
|
return 'array'
|
||||||
|
|
||||||
|
# Handles both ArrayHashMap and ArrayHashMapUnmanaged.
|
||||||
|
class ArrayHashMapPrinter:
|
||||||
|
def __init__(self, val):
|
||||||
|
self.type = val.type
|
||||||
|
is_managed = re.search(r'^std\.array_hash_map\.ArrayHashMap\(', self.type.name)
|
||||||
|
self.val = val['unmanaged'] if is_managed else val
|
||||||
|
|
||||||
|
def to_string(self):
|
||||||
|
type = self.type.name[len('std.array_hash_map.'):]
|
||||||
|
type = re.sub(r'^ArrayHashMap(Unmanaged)?\((.*),std.array_hash_map.AutoContext\(.*$', r'AutoArrayHashMap\1(\2)', type)
|
||||||
|
return '%s of length %s' % (type, self.val['entries']['len'])
|
||||||
|
|
||||||
|
def children(self):
|
||||||
|
entries = MultiArrayListPrinter(self.val['entries'])
|
||||||
|
len = self.val['entries']['len']
|
||||||
|
fields = {}
|
||||||
|
for name, ptr in entries.slice():
|
||||||
|
fields[str(name)] = ptr
|
||||||
|
|
||||||
|
for i in range(len):
|
||||||
|
if 'key' in fields:
|
||||||
|
yield ('[%d]' % i, fields['key'][i])
|
||||||
|
else:
|
||||||
|
yield ('[%d]' % i, '{}')
|
||||||
|
if 'value' in fields:
|
||||||
|
yield ('[%d]' % i, fields['value'][i])
|
||||||
|
|
||||||
|
def display_hint(self):
|
||||||
|
for name, ptr in MultiArrayListPrinter(self.val['entries']).slice():
|
||||||
|
if name == 'value':
|
||||||
|
return 'map'
|
||||||
|
return 'array'
|
||||||
|
|
||||||
|
pp = gdb.printing.RegexpCollectionPrettyPrinter('Zig standard library')
|
||||||
|
pp.add_printer('ArrayList', r'^std\.array_list\.ArrayListAligned(Unmanaged)?\(.*\)$', ArrayListPrinter)
|
||||||
|
pp.add_printer('MultiArrayList', r'^std\.multi_array_list\.MultiArrayList\(.*\)$', MultiArrayListPrinter)
|
||||||
|
pp.add_printer('HashMap', r'^std\.hash_map\.HashMap(Unmanaged)?\(.*\)$', HashMapPrinter)
|
||||||
|
pp.add_printer('ArrayHashMap', r'^std\.array_hash_map\.ArrayHashMap(Unmanaged)?\(.*\)$', ArrayHashMapPrinter)
|
||||||
|
gdb.printing.register_pretty_printer(gdb.current_objfile(), pp)
|
||||||
63
misctools/gdb/zig_gdb_pretty_printers.py
Normal file
63
misctools/gdb/zig_gdb_pretty_printers.py
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
# pretty printing for the language.
|
||||||
|
# put "source /path/to/zig_gdb_pretty_printers.py" in ~/.gdbinit to load it automatically.
|
||||||
|
import gdb.printing
|
||||||
|
|
||||||
|
|
||||||
|
class ZigPrettyPrinter(gdb.printing.PrettyPrinter):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__('Zig')
|
||||||
|
|
||||||
|
def __call__(self, val):
|
||||||
|
tag = val.type.tag
|
||||||
|
if tag is None:
|
||||||
|
return None
|
||||||
|
if tag == '[]u8':
|
||||||
|
return StringPrinter(val)
|
||||||
|
if tag.startswith('[]'):
|
||||||
|
return SlicePrinter(val)
|
||||||
|
if tag.startswith('?'):
|
||||||
|
return OptionalPrinter(val)
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
class SlicePrinter:
|
||||||
|
def __init__(self, val):
|
||||||
|
self.val = val
|
||||||
|
|
||||||
|
def to_string(self):
|
||||||
|
return f"{self.val['len']} items at {self.val['ptr']}"
|
||||||
|
|
||||||
|
def children(self):
|
||||||
|
def it(val):
|
||||||
|
for i in range(int(val['len'])):
|
||||||
|
item = val['ptr'] + i
|
||||||
|
yield (f'[{i}]', item.dereference())
|
||||||
|
return it(self.val)
|
||||||
|
|
||||||
|
def display_hint(self):
|
||||||
|
return 'array'
|
||||||
|
|
||||||
|
|
||||||
|
class StringPrinter:
|
||||||
|
def __init__(self, val):
|
||||||
|
self.val = val
|
||||||
|
|
||||||
|
def to_string(self):
|
||||||
|
return self.val['ptr'].string(length=int(self.val['len']))
|
||||||
|
|
||||||
|
def display_hint(self):
|
||||||
|
return 'string'
|
||||||
|
|
||||||
|
|
||||||
|
class OptionalPrinter:
|
||||||
|
def __init__(self, val):
|
||||||
|
self.val = val
|
||||||
|
|
||||||
|
def to_string(self):
|
||||||
|
if self.val['some']:
|
||||||
|
return self.val['data']
|
||||||
|
else:
|
||||||
|
return 'null'
|
||||||
|
|
||||||
|
|
||||||
|
gdb.printing.register_pretty_printer(gdb.current_objfile(), ZigPrettyPrinter())
|
||||||
14
packages/bun-types/bun.d.ts
vendored
14
packages/bun-types/bun.d.ts
vendored
@@ -6587,7 +6587,8 @@ declare module "bun" {
|
|||||||
timeout?: number;
|
timeout?: number;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The signal to use when killing the process after a timeout or when the AbortSignal is aborted.
|
* The signal to use when killing the process after a timeout, when the AbortSignal is aborted,
|
||||||
|
* or when the process goes over the `maxBuffer` limit.
|
||||||
*
|
*
|
||||||
* @default "SIGTERM" (signal 15)
|
* @default "SIGTERM" (signal 15)
|
||||||
*
|
*
|
||||||
@@ -6602,6 +6603,14 @@ declare module "bun" {
|
|||||||
* ```
|
* ```
|
||||||
*/
|
*/
|
||||||
killSignal?: string | number;
|
killSignal?: string | number;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The maximum number of bytes the process may output. If the process goes over this limit,
|
||||||
|
* it is killed with signal `killSignal` (defaults to SIGTERM).
|
||||||
|
*
|
||||||
|
* @default undefined (no limit)
|
||||||
|
*/
|
||||||
|
maxBuffer?: number;
|
||||||
}
|
}
|
||||||
|
|
||||||
type OptionsToSubprocess<Opts extends OptionsObject> =
|
type OptionsToSubprocess<Opts extends OptionsObject> =
|
||||||
@@ -6847,7 +6856,8 @@ declare module "bun" {
|
|||||||
resourceUsage: ResourceUsage;
|
resourceUsage: ResourceUsage;
|
||||||
|
|
||||||
signalCode?: string;
|
signalCode?: string;
|
||||||
exitedDueToTimeout?: true;
|
exitedDueToTimeout?: boolean;
|
||||||
|
exitedDueToMaxBuffer?: boolean;
|
||||||
pid: number;
|
pid: number;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
84
src/bun.js/MaxBuf.zig
Normal file
84
src/bun.js/MaxBuf.zig
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
const Subprocess = @import("api/bun/subprocess.zig");
|
||||||
|
const MaxBuf = @This();
|
||||||
|
const bun = @import("root").bun;
|
||||||
|
const std = @import("std");
|
||||||
|
|
||||||
|
pub const Kind = enum {
|
||||||
|
stdout,
|
||||||
|
stderr,
|
||||||
|
};
|
||||||
|
// null after subprocess finalize
|
||||||
|
owned_by_subprocess: ?*Subprocess,
|
||||||
|
// null after pipereader finalize
|
||||||
|
owned_by_reader: bool,
|
||||||
|
// if this goes negative, onMaxBuffer is called on the subprocess
|
||||||
|
remaining_bytes: i64,
|
||||||
|
// (once both are null, it is freed)
|
||||||
|
|
||||||
|
pub fn createForSubprocess(owner: *Subprocess, ptr: *?*MaxBuf, initial: ?i64) void {
|
||||||
|
if (initial == null) {
|
||||||
|
ptr.* = null;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const maxbuf = bun.default_allocator.create(MaxBuf) catch bun.outOfMemory();
|
||||||
|
maxbuf.* = .{
|
||||||
|
.owned_by_subprocess = owner,
|
||||||
|
.owned_by_reader = false,
|
||||||
|
.remaining_bytes = initial.?,
|
||||||
|
};
|
||||||
|
ptr.* = maxbuf;
|
||||||
|
}
|
||||||
|
fn disowned(this: *MaxBuf) bool {
|
||||||
|
return this.owned_by_subprocess != null and this.owned_by_reader == false;
|
||||||
|
}
|
||||||
|
fn destroy(this: *MaxBuf) void {
|
||||||
|
bun.assert(this.disowned());
|
||||||
|
bun.default_allocator.destroy(this);
|
||||||
|
}
|
||||||
|
pub fn removeFromSubprocess(ptr: *?*MaxBuf) void {
|
||||||
|
if (ptr.* == null) return;
|
||||||
|
const this = ptr.*.?;
|
||||||
|
bun.assert(this.owned_by_subprocess != null);
|
||||||
|
this.owned_by_subprocess = null;
|
||||||
|
ptr.* = null;
|
||||||
|
if (this.disowned()) {
|
||||||
|
this.destroy();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn addToPipereader(value: ?*MaxBuf, ptr: *?*MaxBuf) void {
|
||||||
|
if (value == null) return;
|
||||||
|
bun.assert(ptr.* == null);
|
||||||
|
ptr.* = value;
|
||||||
|
bun.assert(!value.?.owned_by_reader);
|
||||||
|
value.?.owned_by_reader = true;
|
||||||
|
}
|
||||||
|
pub fn removeFromPipereader(ptr: *?*MaxBuf) void {
|
||||||
|
if (ptr.* == null) return;
|
||||||
|
const this = ptr.*.?;
|
||||||
|
bun.assert(this.owned_by_reader);
|
||||||
|
this.owned_by_reader = false;
|
||||||
|
ptr.* = null;
|
||||||
|
if (this.disowned()) {
|
||||||
|
this.destroy();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn transferToPipereader(prev: *?*MaxBuf, next: *?*MaxBuf) void {
|
||||||
|
if (prev.* == null) return;
|
||||||
|
next.* = prev.*;
|
||||||
|
prev.* = null;
|
||||||
|
}
|
||||||
|
pub fn onReadBytes(this: *MaxBuf, bytes: u64) void {
|
||||||
|
this.remaining_bytes = std.math.sub(i64, this.remaining_bytes, std.math.cast(i64, bytes) orelse 0) catch -1;
|
||||||
|
if (this.remaining_bytes < 0 and this.owned_by_subprocess != null) {
|
||||||
|
const owned_by = this.owned_by_subprocess.?;
|
||||||
|
if (owned_by.stderr_maxbuf == this) {
|
||||||
|
MaxBuf.removeFromSubprocess(&owned_by.stderr_maxbuf);
|
||||||
|
owned_by.onMaxBuffer(.stderr);
|
||||||
|
} else if (owned_by.stdout_maxbuf == this) {
|
||||||
|
MaxBuf.removeFromSubprocess(&owned_by.stdout_maxbuf);
|
||||||
|
owned_by.onMaxBuffer(.stdout);
|
||||||
|
} else {
|
||||||
|
bun.assert(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -87,7 +87,7 @@ pub const Stdio = union(enum) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn canUseMemfd(this: *const @This(), is_sync: bool) bool {
|
pub fn canUseMemfd(this: *const @This(), is_sync: bool, has_max_buffer: bool) bool {
|
||||||
if (comptime !Environment.isLinux) {
|
if (comptime !Environment.isLinux) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@@ -95,7 +95,7 @@ pub const Stdio = union(enum) {
|
|||||||
return switch (this.*) {
|
return switch (this.*) {
|
||||||
.blob => !this.blob.needsToReadFile(),
|
.blob => !this.blob.needsToReadFile(),
|
||||||
.memfd, .array_buffer => true,
|
.memfd, .array_buffer => true,
|
||||||
.pipe => is_sync,
|
.pipe => is_sync and !has_max_buffer,
|
||||||
else => false,
|
else => false,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
//! The Subprocess object is returned by `Bun.spawn`. This file also holds the
|
//! The Subprocess object is returned by `Bun.spawn`. This file also holds the
|
||||||
//! code for `Bun.spawnSync`
|
//! code for `Bun.spawnSync`
|
||||||
const Subprocess = @This();
|
const Subprocess = @This();
|
||||||
|
const MaxBuf = @import("../../MaxBuf.zig");
|
||||||
pub usingnamespace JSC.Codegen.JSSubprocess;
|
pub usingnamespace JSC.Codegen.JSSubprocess;
|
||||||
pub usingnamespace bun.NewRefCounted(@This(), deinit, null);
|
pub usingnamespace bun.NewRefCounted(@This(), deinit, null);
|
||||||
|
|
||||||
@@ -40,6 +41,10 @@ event_loop_timer: JSC.API.Bun.Timer.EventLoopTimer = .{
|
|||||||
},
|
},
|
||||||
killSignal: SignalCode,
|
killSignal: SignalCode,
|
||||||
|
|
||||||
|
stdout_maxbuf: ?*MaxBuf = null,
|
||||||
|
stderr_maxbuf: ?*MaxBuf = null,
|
||||||
|
exited_due_to_maxbuf: ?MaxBuf.Kind = null,
|
||||||
|
|
||||||
pub const Flags = packed struct {
|
pub const Flags = packed struct {
|
||||||
is_sync: bool = false,
|
is_sync: bool = false,
|
||||||
killed: bool = false,
|
killed: bool = false,
|
||||||
@@ -177,7 +182,6 @@ pub fn appendEnvpFromJS(globalThis: *JSC.JSGlobalObject, object: *JSC.JSObject,
|
|||||||
}
|
}
|
||||||
|
|
||||||
const log = Output.scoped(.Subprocess, false);
|
const log = Output.scoped(.Subprocess, false);
|
||||||
const default_max_buffer_size = 1024 * 1024 * 4;
|
|
||||||
pub const StdioKind = enum {
|
pub const StdioKind = enum {
|
||||||
stdin,
|
stdin,
|
||||||
stdout,
|
stdout,
|
||||||
@@ -412,24 +416,11 @@ const Readable = union(enum) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn init(stdio: Stdio, event_loop: *JSC.EventLoop, process: *Subprocess, result: StdioResult, allocator: std.mem.Allocator, max_size: u32, is_sync: bool) Readable {
|
pub fn init(stdio: Stdio, event_loop: *JSC.EventLoop, process: *Subprocess, result: StdioResult, allocator: std.mem.Allocator, max_size: ?*MaxBuf, is_sync: bool) Readable {
|
||||||
_ = allocator; // autofix
|
_ = allocator; // autofix
|
||||||
_ = max_size; // autofix
|
|
||||||
_ = is_sync; // autofix
|
_ = is_sync; // autofix
|
||||||
assertStdioResult(result);
|
assertStdioResult(result);
|
||||||
|
|
||||||
if (Environment.isWindows) {
|
|
||||||
return switch (stdio) {
|
|
||||||
.inherit => Readable{ .inherit = {} },
|
|
||||||
.ignore, .ipc, .path, .memfd => Readable{ .ignore = {} },
|
|
||||||
.fd => |fd| Readable{ .fd = fd },
|
|
||||||
.dup2 => |dup2| Readable{ .fd = dup2.out.toFd() },
|
|
||||||
.pipe => Readable{ .pipe = PipeReader.create(event_loop, process, result) },
|
|
||||||
.array_buffer, .blob => Output.panic("TODO: implement ArrayBuffer & Blob support in Stdio readable", .{}),
|
|
||||||
.capture => Output.panic("TODO: implement capture support in Stdio readable", .{}),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
if (comptime Environment.isPosix) {
|
if (comptime Environment.isPosix) {
|
||||||
if (stdio == .pipe) {
|
if (stdio == .pipe) {
|
||||||
_ = bun.sys.setNonblocking(result.?);
|
_ = bun.sys.setNonblocking(result.?);
|
||||||
@@ -439,12 +430,12 @@ const Readable = union(enum) {
|
|||||||
return switch (stdio) {
|
return switch (stdio) {
|
||||||
.inherit => Readable{ .inherit = {} },
|
.inherit => Readable{ .inherit = {} },
|
||||||
.ignore, .ipc, .path => Readable{ .ignore = {} },
|
.ignore, .ipc, .path => Readable{ .ignore = {} },
|
||||||
.fd => Readable{ .fd = result.? },
|
.fd => |fd| if (Environment.isPosix) Readable{ .fd = result.? } else Readable{ .fd = fd },
|
||||||
.memfd => Readable{ .memfd = stdio.memfd },
|
.memfd => if (Environment.isPosix) Readable{ .memfd = stdio.memfd } else Readable{ .ignore = {} },
|
||||||
.pipe => Readable{ .pipe = PipeReader.create(event_loop, process, result) },
|
.dup2 => |dup2| if (Environment.isPosix) Output.panic("TODO: implement dup2 support in Stdio readable", .{}) else Readable{ .fd = dup2.out.toFd() },
|
||||||
|
.pipe => Readable{ .pipe = PipeReader.create(event_loop, process, result, max_size) },
|
||||||
.array_buffer, .blob => Output.panic("TODO: implement ArrayBuffer & Blob support in Stdio readable", .{}),
|
.array_buffer, .blob => Output.panic("TODO: implement ArrayBuffer & Blob support in Stdio readable", .{}),
|
||||||
.capture => Output.panic("TODO: implement capture support in Stdio readable", .{}),
|
.capture => Output.panic("TODO: implement capture support in Stdio readable", .{}),
|
||||||
.dup2 => Output.panic("TODO: implement dup2 support in Stdio readable", .{}),
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -637,6 +628,11 @@ pub fn timeoutCallback(this: *Subprocess) JSC.API.Bun.Timer.EventLoopTimer.Arm {
|
|||||||
return .disarm;
|
return .disarm;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn onMaxBuffer(this: *Subprocess, kind: MaxBuf.Kind) void {
|
||||||
|
this.exited_due_to_maxbuf = kind;
|
||||||
|
_ = this.tryKill(this.killSignal);
|
||||||
|
}
|
||||||
|
|
||||||
fn parseSignal(arg: JSC.JSValue, globalThis: *JSC.JSGlobalObject) !SignalCode {
|
fn parseSignal(arg: JSC.JSValue, globalThis: *JSC.JSGlobalObject) !SignalCode {
|
||||||
if (arg.getNumber()) |sig64| {
|
if (arg.getNumber()) |sig64| {
|
||||||
// Node does this:
|
// Node does this:
|
||||||
@@ -1039,7 +1035,6 @@ pub const PipeReader = struct {
|
|||||||
err: bun.sys.Error,
|
err: bun.sys.Error,
|
||||||
} = .{ .pending = {} },
|
} = .{ .pending = {} },
|
||||||
stdio_result: StdioResult,
|
stdio_result: StdioResult,
|
||||||
|
|
||||||
pub const IOReader = bun.io.BufferedReader;
|
pub const IOReader = bun.io.BufferedReader;
|
||||||
pub const Poll = IOReader;
|
pub const Poll = IOReader;
|
||||||
|
|
||||||
@@ -1061,13 +1056,14 @@ pub const PipeReader = struct {
|
|||||||
this.deref();
|
this.deref();
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn create(event_loop: *JSC.EventLoop, process: *Subprocess, result: StdioResult) *PipeReader {
|
pub fn create(event_loop: *JSC.EventLoop, process: *Subprocess, result: StdioResult, limit: ?*MaxBuf) *PipeReader {
|
||||||
var this = PipeReader.new(.{
|
var this = PipeReader.new(.{
|
||||||
.process = process,
|
.process = process,
|
||||||
.reader = IOReader.init(@This()),
|
.reader = IOReader.init(@This()),
|
||||||
.event_loop = event_loop,
|
.event_loop = event_loop,
|
||||||
.stdio_result = result,
|
.stdio_result = result,
|
||||||
});
|
});
|
||||||
|
MaxBuf.addToPipereader(limit, &this.reader.maxbuf);
|
||||||
if (Environment.isWindows) {
|
if (Environment.isWindows) {
|
||||||
this.reader.source = .{ .pipe = this.stdio_result.buffer };
|
this.reader.source = .{ .pipe = this.stdio_result.buffer };
|
||||||
}
|
}
|
||||||
@@ -1754,6 +1750,9 @@ pub fn finalize(this: *Subprocess) callconv(.C) void {
|
|||||||
}
|
}
|
||||||
this.setEventLoopTimerRefd(false);
|
this.setEventLoopTimerRefd(false);
|
||||||
|
|
||||||
|
MaxBuf.removeFromSubprocess(&this.stdout_maxbuf);
|
||||||
|
MaxBuf.removeFromSubprocess(&this.stderr_maxbuf);
|
||||||
|
|
||||||
this.flags.finalized = true;
|
this.flags.finalized = true;
|
||||||
this.deref();
|
this.deref();
|
||||||
}
|
}
|
||||||
@@ -1947,6 +1946,7 @@ pub fn spawnMaybeSync(
|
|||||||
var ipc_channel: i32 = -1;
|
var ipc_channel: i32 = -1;
|
||||||
var timeout: ?i32 = null;
|
var timeout: ?i32 = null;
|
||||||
var killSignal: SignalCode = SignalCode.default;
|
var killSignal: SignalCode = SignalCode.default;
|
||||||
|
var maxBuffer: ?i64 = null;
|
||||||
|
|
||||||
var windows_hide: bool = false;
|
var windows_hide: bool = false;
|
||||||
var windows_verbatim_arguments: bool = false;
|
var windows_verbatim_arguments: bool = false;
|
||||||
@@ -2139,7 +2139,7 @@ pub fn spawnMaybeSync(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (try args.get(globalThis, "timeout")) |val| {
|
if (try args.get(globalThis, "timeout")) |val| {
|
||||||
if (val.isNumber()) {
|
if (val.isNumber() and val.isFinite()) {
|
||||||
timeout = @max(val.coerce(i32, globalThis), 1);
|
timeout = @max(val.coerce(i32, globalThis), 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -2147,18 +2147,26 @@ pub fn spawnMaybeSync(
|
|||||||
if (try args.get(globalThis, "killSignal")) |val| {
|
if (try args.get(globalThis, "killSignal")) |val| {
|
||||||
killSignal = try parseSignal(val, globalThis);
|
killSignal = try parseSignal(val, globalThis);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (try args.get(globalThis, "maxBuffer")) |val| {
|
||||||
|
if (val.isNumber() and val.isFinite()) { // 'Infinity' does not set maxBuffer
|
||||||
|
maxBuffer = val.coerce(i64, globalThis);
|
||||||
|
}
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
try getArgv(globalThis, cmd_value, PATH, cwd, &argv0, allocator, &argv);
|
try getArgv(globalThis, cmd_value, PATH, cwd, &argv0, allocator, &argv);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log("spawn maxBuffer: {?d}", .{maxBuffer});
|
||||||
|
|
||||||
if (!override_env and env_array.items.len == 0) {
|
if (!override_env and env_array.items.len == 0) {
|
||||||
env_array.items = jsc_vm.transpiler.env.map.createNullDelimitedEnvMap(allocator) catch |err| return globalThis.throwError(err, "in Bun.spawn") catch return .zero;
|
env_array.items = jsc_vm.transpiler.env.map.createNullDelimitedEnvMap(allocator) catch |err| return globalThis.throwError(err, "in Bun.spawn") catch return .zero;
|
||||||
env_array.capacity = env_array.items.len;
|
env_array.capacity = env_array.items.len;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline for (0..stdio.len) |fd_index| {
|
inline for (0..stdio.len) |fd_index| {
|
||||||
if (stdio[fd_index].canUseMemfd(is_sync)) {
|
if (stdio[fd_index].canUseMemfd(is_sync, fd_index > 0 and maxBuffer != null)) {
|
||||||
stdio[fd_index].useMemfd(fd_index);
|
stdio[fd_index].useMemfd(fd_index);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -2310,6 +2318,9 @@ pub fn spawnMaybeSync(
|
|||||||
else
|
else
|
||||||
bun.invalid_fd;
|
bun.invalid_fd;
|
||||||
|
|
||||||
|
MaxBuf.createForSubprocess(subprocess, &subprocess.stderr_maxbuf, maxBuffer);
|
||||||
|
MaxBuf.createForSubprocess(subprocess, &subprocess.stdout_maxbuf, maxBuffer);
|
||||||
|
|
||||||
// When run synchronously, subprocess isn't garbage collected
|
// When run synchronously, subprocess isn't garbage collected
|
||||||
subprocess.* = Subprocess{
|
subprocess.* = Subprocess{
|
||||||
.globalThis = globalThis,
|
.globalThis = globalThis,
|
||||||
@@ -2330,7 +2341,7 @@ pub fn spawnMaybeSync(
|
|||||||
subprocess,
|
subprocess,
|
||||||
spawned.stdout,
|
spawned.stdout,
|
||||||
jsc_vm.allocator,
|
jsc_vm.allocator,
|
||||||
default_max_buffer_size,
|
subprocess.stdout_maxbuf,
|
||||||
is_sync,
|
is_sync,
|
||||||
),
|
),
|
||||||
.stderr = Readable.init(
|
.stderr = Readable.init(
|
||||||
@@ -2339,7 +2350,7 @@ pub fn spawnMaybeSync(
|
|||||||
subprocess,
|
subprocess,
|
||||||
spawned.stderr,
|
spawned.stderr,
|
||||||
jsc_vm.allocator,
|
jsc_vm.allocator,
|
||||||
default_max_buffer_size,
|
subprocess.stderr_maxbuf,
|
||||||
is_sync,
|
is_sync,
|
||||||
),
|
),
|
||||||
// 1. JavaScript.
|
// 1. JavaScript.
|
||||||
@@ -2357,6 +2368,8 @@ pub fn spawnMaybeSync(
|
|||||||
.is_sync = is_sync,
|
.is_sync = is_sync,
|
||||||
},
|
},
|
||||||
.killSignal = killSignal,
|
.killSignal = killSignal,
|
||||||
|
.stderr_maxbuf = subprocess.stderr_maxbuf,
|
||||||
|
.stdout_maxbuf = subprocess.stdout_maxbuf,
|
||||||
};
|
};
|
||||||
|
|
||||||
subprocess.process.setExitHandler(subprocess);
|
subprocess.process.setExitHandler(subprocess);
|
||||||
@@ -2544,6 +2557,7 @@ pub fn spawnMaybeSync(
|
|||||||
const stderr = try subprocess.stderr.toBufferedValue(globalThis);
|
const stderr = try subprocess.stderr.toBufferedValue(globalThis);
|
||||||
const resource_usage: JSValue = if (!globalThis.hasException()) subprocess.createResourceUsageObject(globalThis) else .zero;
|
const resource_usage: JSValue = if (!globalThis.hasException()) subprocess.createResourceUsageObject(globalThis) else .zero;
|
||||||
const exitedDueToTimeout = subprocess.event_loop_timer.state == .FIRED;
|
const exitedDueToTimeout = subprocess.event_loop_timer.state == .FIRED;
|
||||||
|
const exitedDueToMaxBuffer = subprocess.exited_due_to_maxbuf;
|
||||||
const resultPid = JSC.JSValue.jsNumberFromInt32(subprocess.pid());
|
const resultPid = JSC.JSValue.jsNumberFromInt32(subprocess.pid());
|
||||||
subprocess.finalize();
|
subprocess.finalize();
|
||||||
|
|
||||||
@@ -2561,7 +2575,8 @@ pub fn spawnMaybeSync(
|
|||||||
sync_value.put(globalThis, JSC.ZigString.static("stderr"), stderr);
|
sync_value.put(globalThis, JSC.ZigString.static("stderr"), stderr);
|
||||||
sync_value.put(globalThis, JSC.ZigString.static("success"), JSValue.jsBoolean(exitCode.isInt32() and exitCode.asInt32() == 0));
|
sync_value.put(globalThis, JSC.ZigString.static("success"), JSValue.jsBoolean(exitCode.isInt32() and exitCode.asInt32() == 0));
|
||||||
sync_value.put(globalThis, JSC.ZigString.static("resourceUsage"), resource_usage);
|
sync_value.put(globalThis, JSC.ZigString.static("resourceUsage"), resource_usage);
|
||||||
if (exitedDueToTimeout) sync_value.put(globalThis, JSC.ZigString.static("exitedDueToTimeout"), JSC.JSValue.true);
|
if (timeout != null) sync_value.put(globalThis, JSC.ZigString.static("exitedDueToTimeout"), if (exitedDueToTimeout) JSC.JSValue.true else JSC.JSValue.false);
|
||||||
|
if (maxBuffer != null) sync_value.put(globalThis, JSC.ZigString.static("exitedDueToMaxBuffer"), if (exitedDueToMaxBuffer != null) JSC.JSValue.true else JSC.JSValue.false);
|
||||||
sync_value.put(globalThis, JSC.ZigString.static("pid"), resultPid);
|
sync_value.put(globalThis, JSC.ZigString.static("pid"), resultPid);
|
||||||
|
|
||||||
return sync_value;
|
return sync_value;
|
||||||
|
|||||||
@@ -2001,6 +2001,14 @@ JSC_DEFINE_HOST_FUNCTION(Bun::jsFunctionMakeErrorWithCode, (JSC::JSGlobalObject
|
|||||||
return JSC::JSValue::encode(createError(globalObject, ErrorCode::ERR_AMBIGUOUS_ARGUMENT, message));
|
return JSC::JSValue::encode(createError(globalObject, ErrorCode::ERR_AMBIGUOUS_ARGUMENT, message));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case Bun::ErrorCode::ERR_CHILD_PROCESS_STDIO_MAXBUFFER: {
|
||||||
|
auto arg0 = callFrame->argument(1);
|
||||||
|
auto str0 = arg0.toWTFString(globalObject);
|
||||||
|
RETURN_IF_EXCEPTION(scope, {});
|
||||||
|
auto message = makeString(str0, " maxBuffer length exceeded"_s);
|
||||||
|
return JSC::JSValue::encode(createError(globalObject, ErrorCode::ERR_CHILD_PROCESS_STDIO_MAXBUFFER, message));
|
||||||
|
}
|
||||||
|
|
||||||
case ErrorCode::ERR_IPC_DISCONNECTED:
|
case ErrorCode::ERR_IPC_DISCONNECTED:
|
||||||
return JSC::JSValue::encode(createError(globalObject, ErrorCode::ERR_IPC_DISCONNECTED, "IPC channel is already disconnected"_s));
|
return JSC::JSValue::encode(createError(globalObject, ErrorCode::ERR_IPC_DISCONNECTED, "IPC channel is already disconnected"_s));
|
||||||
case ErrorCode::ERR_SERVER_NOT_RUNNING:
|
case ErrorCode::ERR_SERVER_NOT_RUNNING:
|
||||||
|
|||||||
@@ -26,6 +26,7 @@ const errors: ErrorCodeMapping = [
|
|||||||
["ERR_BUFFER_OUT_OF_BOUNDS", RangeError],
|
["ERR_BUFFER_OUT_OF_BOUNDS", RangeError],
|
||||||
["ERR_BUFFER_TOO_LARGE", RangeError],
|
["ERR_BUFFER_TOO_LARGE", RangeError],
|
||||||
["ERR_CHILD_PROCESS_IPC_REQUIRED", Error],
|
["ERR_CHILD_PROCESS_IPC_REQUIRED", Error],
|
||||||
|
["ERR_CHILD_PROCESS_STDIO_MAXBUFFER", RangeError],
|
||||||
["ERR_CLOSED_MESSAGE_PORT", Error],
|
["ERR_CLOSED_MESSAGE_PORT", Error],
|
||||||
["ERR_CONSOLE_WRITABLE_STREAM", TypeError, "TypeError"],
|
["ERR_CONSOLE_WRITABLE_STREAM", TypeError, "TypeError"],
|
||||||
["ERR_CONSTRUCT_CALL_INVALID", TypeError],
|
["ERR_CONSTRUCT_CALL_INVALID", TypeError],
|
||||||
|
|||||||
@@ -113,6 +113,10 @@ pub fn etimedoutErrorCode(_: *JSC.JSGlobalObject, _: *JSC.CallFrame) bun.JSError
|
|||||||
return JSC.JSValue.jsNumberFromInt32(-bun.C.UV_ETIMEDOUT);
|
return JSC.JSValue.jsNumberFromInt32(-bun.C.UV_ETIMEDOUT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn enobufsErrorCode(_: *JSC.JSGlobalObject, _: *JSC.CallFrame) bun.JSError!JSC.JSValue {
|
||||||
|
return JSC.JSValue.jsNumberFromInt32(-bun.C.UV_ENOBUFS);
|
||||||
|
}
|
||||||
|
|
||||||
/// `extractedSplitNewLines` for ASCII/Latin1 strings. Panics if passed a non-string.
|
/// `extractedSplitNewLines` for ASCII/Latin1 strings. Panics if passed a non-string.
|
||||||
/// Returns `undefined` if param is utf8 or utf16 and not fully ascii.
|
/// Returns `undefined` if param is utf8 or utf16 and not fully ascii.
|
||||||
///
|
///
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ const Source = @import("./source.zig").Source;
|
|||||||
|
|
||||||
const ReadState = @import("./pipes.zig").ReadState;
|
const ReadState = @import("./pipes.zig").ReadState;
|
||||||
const FileType = @import("./pipes.zig").FileType;
|
const FileType = @import("./pipes.zig").FileType;
|
||||||
|
const MaxBuf = @import("../bun.js/MaxBuf.zig");
|
||||||
|
|
||||||
const PollOrFd = @import("./pipes.zig").PollOrFd;
|
const PollOrFd = @import("./pipes.zig").PollOrFd;
|
||||||
|
|
||||||
@@ -92,6 +93,8 @@ const PosixBufferedReader = struct {
|
|||||||
_offset: usize = 0,
|
_offset: usize = 0,
|
||||||
vtable: BufferedReaderVTable,
|
vtable: BufferedReaderVTable,
|
||||||
flags: Flags = .{},
|
flags: Flags = .{},
|
||||||
|
count: usize = 0,
|
||||||
|
maxbuf: ?*MaxBuf = null,
|
||||||
|
|
||||||
const Flags = packed struct {
|
const Flags = packed struct {
|
||||||
is_done: bool = false,
|
is_done: bool = false,
|
||||||
@@ -139,6 +142,7 @@ const PosixBufferedReader = struct {
|
|||||||
other.flags.is_done = true;
|
other.flags.is_done = true;
|
||||||
other.handle = .{ .closed = {} };
|
other.handle = .{ .closed = {} };
|
||||||
other._offset = 0;
|
other._offset = 0;
|
||||||
|
MaxBuf.transferToPipereader(&other.maxbuf, &to.maxbuf);
|
||||||
to.handle.setOwner(to);
|
to.handle.setOwner(to);
|
||||||
|
|
||||||
// note: the caller is supposed to drain the buffer themselves
|
// note: the caller is supposed to drain the buffer themselves
|
||||||
@@ -184,14 +188,6 @@ const PosixBufferedReader = struct {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn _onReadChunk(this: *PosixBufferedReader, chunk: []u8, hasMore: ReadState) bool {
|
|
||||||
if (hasMore == .eof) {
|
|
||||||
this.flags.received_eof = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
return this.vtable.onReadChunk(chunk, hasMore);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn getFd(this: *PosixBufferedReader) bun.FileDescriptor {
|
pub fn getFd(this: *PosixBufferedReader) bun.FileDescriptor {
|
||||||
return this.handle.getFd();
|
return this.handle.getFd();
|
||||||
}
|
}
|
||||||
@@ -266,6 +262,7 @@ const PosixBufferedReader = struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn deinit(this: *PosixBufferedReader) void {
|
pub fn deinit(this: *PosixBufferedReader) void {
|
||||||
|
MaxBuf.removeFromPipereader(&this.maxbuf);
|
||||||
this.buffer().clearAndFree();
|
this.buffer().clearAndFree();
|
||||||
this.closeWithoutReporting();
|
this.closeWithoutReporting();
|
||||||
}
|
}
|
||||||
@@ -468,6 +465,7 @@ const PosixBufferedReader = struct {
|
|||||||
parent._offset,
|
parent._offset,
|
||||||
)) {
|
)) {
|
||||||
.result => |bytes_read| {
|
.result => |bytes_read| {
|
||||||
|
if (parent.maxbuf) |l| l.onReadBytes(bytes_read);
|
||||||
parent._offset += bytes_read;
|
parent._offset += bytes_read;
|
||||||
buf = stack_buffer_head[0..bytes_read];
|
buf = stack_buffer_head[0..bytes_read];
|
||||||
stack_buffer_head = stack_buffer_head[bytes_read..];
|
stack_buffer_head = stack_buffer_head[bytes_read..];
|
||||||
@@ -560,6 +558,7 @@ const PosixBufferedReader = struct {
|
|||||||
|
|
||||||
switch (sys_fn(fd, buf, parent._offset)) {
|
switch (sys_fn(fd, buf, parent._offset)) {
|
||||||
.result => |bytes_read| {
|
.result => |bytes_read| {
|
||||||
|
if (parent.maxbuf) |l| l.onReadBytes(bytes_read);
|
||||||
parent._offset += bytes_read;
|
parent._offset += bytes_read;
|
||||||
buf = buf[0..bytes_read];
|
buf = buf[0..bytes_read];
|
||||||
resizable_buffer.items.len += bytes_read;
|
resizable_buffer.items.len += bytes_read;
|
||||||
@@ -678,6 +677,7 @@ pub const WindowsBufferedReader = struct {
|
|||||||
_buffer: std.ArrayList(u8) = std.ArrayList(u8).init(bun.default_allocator),
|
_buffer: std.ArrayList(u8) = std.ArrayList(u8).init(bun.default_allocator),
|
||||||
// for compatibility with Linux
|
// for compatibility with Linux
|
||||||
flags: Flags = .{},
|
flags: Flags = .{},
|
||||||
|
maxbuf: ?*MaxBuf = null,
|
||||||
|
|
||||||
parent: *anyopaque = undefined,
|
parent: *anyopaque = undefined,
|
||||||
vtable: WindowsOutputReaderVTable = undefined,
|
vtable: WindowsOutputReaderVTable = undefined,
|
||||||
@@ -741,6 +741,7 @@ pub const WindowsBufferedReader = struct {
|
|||||||
other._offset = 0;
|
other._offset = 0;
|
||||||
other.buffer().* = std.ArrayList(u8).init(bun.default_allocator);
|
other.buffer().* = std.ArrayList(u8).init(bun.default_allocator);
|
||||||
other.source = null;
|
other.source = null;
|
||||||
|
MaxBuf.transferToPipereader(&other.maxbuf, &to.maxbuf);
|
||||||
to.setParent(parent);
|
to.setParent(parent);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -802,6 +803,7 @@ pub const WindowsBufferedReader = struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn _onReadChunk(this: *WindowsOutputReader, buf: []u8, hasMore: ReadState) bool {
|
fn _onReadChunk(this: *WindowsOutputReader, buf: []u8, hasMore: ReadState) bool {
|
||||||
|
if (this.maxbuf) |m| m.onReadBytes(buf.len);
|
||||||
this.flags.has_inflight_read = false;
|
this.flags.has_inflight_read = false;
|
||||||
if (hasMore == .eof) {
|
if (hasMore == .eof) {
|
||||||
this.flags.received_eof = true;
|
this.flags.received_eof = true;
|
||||||
@@ -867,6 +869,7 @@ pub const WindowsBufferedReader = struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn deinit(this: *WindowsOutputReader) void {
|
pub fn deinit(this: *WindowsOutputReader) void {
|
||||||
|
MaxBuf.removeFromPipereader(&this.maxbuf);
|
||||||
this.buffer().deinit();
|
this.buffer().deinit();
|
||||||
const source = this.source orelse return;
|
const source = this.source orelse return;
|
||||||
if (!source.isClosed()) {
|
if (!source.isClosed()) {
|
||||||
|
|||||||
3
src/js/builtins.d.ts
vendored
3
src/js/builtins.d.ts
vendored
@@ -488,7 +488,7 @@ declare function $createCommonJSModule(
|
|||||||
): JSCommonJSModule;
|
): JSCommonJSModule;
|
||||||
declare function $evaluateCommonJSModule(
|
declare function $evaluateCommonJSModule(
|
||||||
moduleToEvaluate: JSCommonJSModule,
|
moduleToEvaluate: JSCommonJSModule,
|
||||||
sourceModule: JSCommonJSModule
|
sourceModule: JSCommonJSModule,
|
||||||
): JSCommonJSModule[];
|
): JSCommonJSModule[];
|
||||||
|
|
||||||
declare function $overridableRequire(this: JSCommonJSModule, id: string): any;
|
declare function $overridableRequire(this: JSCommonJSModule, id: string): any;
|
||||||
@@ -644,6 +644,7 @@ declare function $ERR_BUFFER_OUT_OF_BOUNDS(name?: string): RangeError;
|
|||||||
declare function $ERR_CRYPTO_INVALID_KEY_OBJECT_TYPE(value, expected): TypeError;
|
declare function $ERR_CRYPTO_INVALID_KEY_OBJECT_TYPE(value, expected): TypeError;
|
||||||
declare function $ERR_CRYPTO_INCOMPATIBLE_KEY(name, value): Error;
|
declare function $ERR_CRYPTO_INCOMPATIBLE_KEY(name, value): Error;
|
||||||
declare function $ERR_CHILD_PROCESS_IPC_REQUIRED(where): Error;
|
declare function $ERR_CHILD_PROCESS_IPC_REQUIRED(where): Error;
|
||||||
|
declare function $ERR_CHILD_PROCESS_STDIO_MAXBUFFER(message): Error;
|
||||||
declare function $ERR_INVALID_ASYNC_ID(name, value): RangeError;
|
declare function $ERR_INVALID_ASYNC_ID(name, value): RangeError;
|
||||||
declare function $ERR_ASYNC_TYPE(name): TypeError;
|
declare function $ERR_ASYNC_TYPE(name): TypeError;
|
||||||
declare function $ERR_ASYNC_CALLBACK(name): TypeError;
|
declare function $ERR_ASYNC_CALLBACK(name): TypeError;
|
||||||
|
|||||||
@@ -38,6 +38,7 @@ cluster._setupWorker = function () {
|
|||||||
// before calling, check if the channel is refd. if it isn't, then unref it after calling process.once();
|
// before calling, check if the channel is refd. if it isn't, then unref it after calling process.once();
|
||||||
$newZigFunction("node_cluster_binding.zig", "channelIgnoreOneDisconnectEventListener", 0)();
|
$newZigFunction("node_cluster_binding.zig", "channelIgnoreOneDisconnectEventListener", 0)();
|
||||||
process.once("disconnect", () => {
|
process.once("disconnect", () => {
|
||||||
|
process.channel = null;
|
||||||
worker.emit("disconnect");
|
worker.emit("disconnect");
|
||||||
|
|
||||||
if (!worker.exitedAfterDisconnect) {
|
if (!worker.exitedAfterDisconnect) {
|
||||||
|
|||||||
@@ -153,6 +153,7 @@ cluster.fork = function (env) {
|
|||||||
});
|
});
|
||||||
|
|
||||||
worker.process.once("disconnect", () => {
|
worker.process.once("disconnect", () => {
|
||||||
|
worker.process.channel = null;
|
||||||
/*
|
/*
|
||||||
* Now is a good time to remove the handles
|
* Now is a good time to remove the handles
|
||||||
* associated with this worker because it is
|
* associated with this worker because it is
|
||||||
|
|||||||
@@ -241,6 +241,7 @@ function execFile(file, args, options, callback) {
|
|||||||
windowsVerbatimArguments: options.windowsVerbatimArguments,
|
windowsVerbatimArguments: options.windowsVerbatimArguments,
|
||||||
shell: options.shell,
|
shell: options.shell,
|
||||||
signal: options.signal,
|
signal: options.signal,
|
||||||
|
maxBuffer: options.maxBuffer,
|
||||||
});
|
});
|
||||||
|
|
||||||
let encoding;
|
let encoding;
|
||||||
@@ -270,16 +271,15 @@ function execFile(file, args, options, callback) {
|
|||||||
|
|
||||||
if (!callback) return;
|
if (!callback) return;
|
||||||
|
|
||||||
const readableEncoding = child?.stdout?.readableEncoding;
|
|
||||||
// merge chunks
|
// merge chunks
|
||||||
let stdout;
|
let stdout;
|
||||||
let stderr;
|
let stderr;
|
||||||
if (encoding || (child.stdout && readableEncoding)) {
|
if (child.stdout?.readableEncoding) {
|
||||||
stdout = ArrayPrototypeJoin.$call(_stdout, "");
|
stdout = ArrayPrototypeJoin.$call(_stdout, "");
|
||||||
} else {
|
} else {
|
||||||
stdout = BufferConcat(_stdout);
|
stdout = BufferConcat(_stdout);
|
||||||
}
|
}
|
||||||
if (encoding || (child.stderr && readableEncoding)) {
|
if (child.stderr?.readableEncoding) {
|
||||||
stderr = ArrayPrototypeJoin.$call(_stderr, "");
|
stderr = ArrayPrototypeJoin.$call(_stderr, "");
|
||||||
} else {
|
} else {
|
||||||
stderr = BufferConcat(_stderr);
|
stderr = BufferConcat(_stderr);
|
||||||
@@ -339,84 +339,50 @@ function execFile(file, args, options, callback) {
|
|||||||
}, options.timeout).unref();
|
}, options.timeout).unref();
|
||||||
}
|
}
|
||||||
|
|
||||||
const onData = (array, kind) => {
|
function addOnDataListener(child_buffer, _buffer, kind) {
|
||||||
let total = 0;
|
if (encoding) child_buffer.setEncoding(encoding);
|
||||||
let encodedLength;
|
|
||||||
return encoding
|
|
||||||
? function onDataEncoded(chunk) {
|
|
||||||
total += chunk.length;
|
|
||||||
|
|
||||||
if (total > maxBuffer) {
|
let totalLen = 0;
|
||||||
const out = child[kind];
|
if (maxBuffer === Infinity) {
|
||||||
const encoding = out.readableEncoding;
|
child_buffer.on("data", function onDataNoMaxBuf(chunk) {
|
||||||
const actualLen = Buffer.byteLength(chunk, encoding);
|
$arrayPush(_buffer, chunk);
|
||||||
if (encodedLength === undefined) {
|
});
|
||||||
encodedLength = 0;
|
return;
|
||||||
|
}
|
||||||
|
child_buffer.on("data", function onData(chunk) {
|
||||||
|
const encoding = child_buffer.readableEncoding;
|
||||||
|
if (encoding) {
|
||||||
|
const length = Buffer.byteLength(chunk, encoding);
|
||||||
|
totalLen += length;
|
||||||
|
|
||||||
for (let i = 0, length = array.length; i < length; i++) {
|
if (totalLen > maxBuffer) {
|
||||||
encodedLength += Buffer.byteLength(array[i], encoding);
|
const truncatedLen = maxBuffer - (totalLen - length);
|
||||||
}
|
$arrayPush(_buffer, String.prototype.slice.$call(chunk, 0, truncatedLen));
|
||||||
}
|
|
||||||
|
|
||||||
encodedLength += actualLen;
|
ex = $ERR_CHILD_PROCESS_STDIO_MAXBUFFER(kind);
|
||||||
|
kill();
|
||||||
if (encodedLength > maxBuffer) {
|
} else {
|
||||||
const joined = ArrayPrototypeJoin.$call(array, "");
|
$arrayPush(_buffer, chunk);
|
||||||
let combined = joined + chunk;
|
|
||||||
combined = StringPrototypeSlice.$call(combined, 0, maxBuffer);
|
|
||||||
array.length = 1;
|
|
||||||
array[0] = combined;
|
|
||||||
ex = ERR_CHILD_PROCESS_STDIO_MAXBUFFER(kind);
|
|
||||||
kill();
|
|
||||||
} else {
|
|
||||||
const val = ArrayPrototypeJoin.$call(array, "") + chunk;
|
|
||||||
array.length = 1;
|
|
||||||
array[0] = val;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
$arrayPush(array, chunk);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
: function onDataRaw(chunk) {
|
} else {
|
||||||
total += chunk.length;
|
const length = chunk.length;
|
||||||
|
totalLen += length;
|
||||||
|
|
||||||
if (total > maxBuffer) {
|
if (totalLen > maxBuffer) {
|
||||||
const truncatedLen = maxBuffer - (total - chunk.length);
|
const truncatedLen = maxBuffer - (totalLen - length);
|
||||||
$arrayPush(array, chunk.slice(0, truncatedLen));
|
$arrayPush(_buffer, chunk.slice(0, truncatedLen));
|
||||||
|
|
||||||
ex = ERR_CHILD_PROCESS_STDIO_MAXBUFFER(kind);
|
ex = $ERR_CHILD_PROCESS_STDIO_MAXBUFFER(kind);
|
||||||
kill();
|
kill();
|
||||||
} else {
|
} else {
|
||||||
$arrayPush(array, chunk);
|
$arrayPush(_buffer, chunk);
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
};
|
});
|
||||||
|
|
||||||
if (child.stdout) {
|
|
||||||
if (encoding) child.stdout.setEncoding(encoding);
|
|
||||||
|
|
||||||
child.stdout.on(
|
|
||||||
"data",
|
|
||||||
maxBuffer === Infinity
|
|
||||||
? function onUnlimitedSizeBufferedData(chunk) {
|
|
||||||
$arrayPush(_stdout, chunk);
|
|
||||||
}
|
|
||||||
: onData(_stdout, "stdout"),
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (child.stderr) {
|
if (child.stdout) addOnDataListener(child.stdout, _stdout, "stdout");
|
||||||
if (encoding) child.stderr.setEncoding(encoding);
|
if (child.stderr) addOnDataListener(child.stderr, _stderr, "stderr");
|
||||||
|
|
||||||
child.stderr.on(
|
|
||||||
"data",
|
|
||||||
maxBuffer === Infinity
|
|
||||||
? function onUnlimitedSizeBufferedData(chunk) {
|
|
||||||
$arrayPush(_stderr, chunk);
|
|
||||||
}
|
|
||||||
: onData(_stderr, "stderr"),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
child.addListener("close", exitHandler);
|
child.addListener("close", exitHandler);
|
||||||
child.addListener("error", errorHandler);
|
child.addListener("error", errorHandler);
|
||||||
@@ -563,6 +529,7 @@ function spawnSync(file, args, options) {
|
|||||||
exitCode,
|
exitCode,
|
||||||
signalCode,
|
signalCode,
|
||||||
exitedDueToTimeout,
|
exitedDueToTimeout,
|
||||||
|
exitedDueToMaxBuffer,
|
||||||
pid,
|
pid,
|
||||||
} = Bun.spawnSync({
|
} = Bun.spawnSync({
|
||||||
// normalizeSpawnargs has already prepended argv0 to the spawnargs array
|
// normalizeSpawnargs has already prepended argv0 to the spawnargs array
|
||||||
@@ -577,6 +544,7 @@ function spawnSync(file, args, options) {
|
|||||||
argv0: options.args[0],
|
argv0: options.args[0],
|
||||||
timeout: options.timeout,
|
timeout: options.timeout,
|
||||||
killSignal: options.killSignal,
|
killSignal: options.killSignal,
|
||||||
|
maxBuffer: options.maxBuffer,
|
||||||
});
|
});
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
error = err;
|
error = err;
|
||||||
@@ -616,6 +584,15 @@ function spawnSync(file, args, options) {
|
|||||||
"ETIMEDOUT",
|
"ETIMEDOUT",
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
if (exitedDueToMaxBuffer && error == null) {
|
||||||
|
result.error = new SystemError(
|
||||||
|
"spawnSync " + options.file + " ENOBUFS (stdout or stderr buffer reached maxBuffer size limit)",
|
||||||
|
options.file,
|
||||||
|
"spawnSync " + options.file,
|
||||||
|
enobufsErrorCode(),
|
||||||
|
"ENOBUFS",
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
if (result.error) {
|
if (result.error) {
|
||||||
result.error.syscall = "spawnSync " + options.file;
|
result.error.syscall = "spawnSync " + options.file;
|
||||||
@@ -625,6 +602,7 @@ function spawnSync(file, args, options) {
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
const etimedoutErrorCode = $newZigFunction("node_util_binding.zig", "etimedoutErrorCode", 0);
|
const etimedoutErrorCode = $newZigFunction("node_util_binding.zig", "etimedoutErrorCode", 0);
|
||||||
|
const enobufsErrorCode = $newZigFunction("node_util_binding.zig", "enobufsErrorCode", 0);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Spawns a file as a shell synchronously.
|
* Spawns a file as a shell synchronously.
|
||||||
@@ -1336,6 +1314,7 @@ class ChildProcess extends EventEmitter {
|
|||||||
argv0: spawnargs[0],
|
argv0: spawnargs[0],
|
||||||
windowsHide: !!options.windowsHide,
|
windowsHide: !!options.windowsHide,
|
||||||
windowsVerbatimArguments: !!options.windowsVerbatimArguments,
|
windowsVerbatimArguments: !!options.windowsVerbatimArguments,
|
||||||
|
maxBuffer: options.maxBuffer,
|
||||||
});
|
});
|
||||||
this.pid = this.#handle.pid;
|
this.pid = this.#handle.pid;
|
||||||
|
|
||||||
@@ -1348,6 +1327,15 @@ class ChildProcess extends EventEmitter {
|
|||||||
if (has_ipc) {
|
if (has_ipc) {
|
||||||
this.send = this.#send;
|
this.send = this.#send;
|
||||||
this.disconnect = this.#disconnect;
|
this.disconnect = this.#disconnect;
|
||||||
|
this.channel = new Control();
|
||||||
|
Object.defineProperty(this, "_channel", {
|
||||||
|
get() {
|
||||||
|
return this.channel;
|
||||||
|
},
|
||||||
|
set(value) {
|
||||||
|
this.channel = value;
|
||||||
|
},
|
||||||
|
});
|
||||||
if (options[kFromNode]) this.#closesNeeded += 1;
|
if (options[kFromNode]) this.#closesNeeded += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1416,8 +1404,8 @@ class ChildProcess extends EventEmitter {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
$assert(!this.connected);
|
$assert(!this.connected);
|
||||||
this.#maybeClose();
|
|
||||||
process.nextTick(() => this.emit("disconnect"));
|
process.nextTick(() => this.emit("disconnect"));
|
||||||
|
process.nextTick(() => this.#maybeClose());
|
||||||
}
|
}
|
||||||
#disconnect() {
|
#disconnect() {
|
||||||
if (!this.connected) {
|
if (!this.connected) {
|
||||||
@@ -1425,6 +1413,7 @@ class ChildProcess extends EventEmitter {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
this.#handle.disconnect();
|
this.#handle.disconnect();
|
||||||
|
this.channel = null;
|
||||||
}
|
}
|
||||||
|
|
||||||
kill(sig?) {
|
kill(sig?) {
|
||||||
@@ -1612,6 +1601,12 @@ function abortChildProcess(child, killSignal, reason) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
class Control extends EventEmitter {
|
||||||
|
constructor() {
|
||||||
|
super();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
class ShimmedStdin extends EventEmitter {
|
class ShimmedStdin extends EventEmitter {
|
||||||
constructor() {
|
constructor() {
|
||||||
super();
|
super();
|
||||||
@@ -1882,12 +1877,6 @@ function genericNodeError(message, errorProperties) {
|
|||||||
// TypeError
|
// TypeError
|
||||||
// );
|
// );
|
||||||
|
|
||||||
function ERR_CHILD_PROCESS_STDIO_MAXBUFFER(stdio) {
|
|
||||||
const err = Error(`${stdio} maxBuffer length exceeded`);
|
|
||||||
err.code = "ERR_CHILD_PROCESS_STDIO_MAXBUFFER";
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
function ERR_UNKNOWN_SIGNAL(name) {
|
function ERR_UNKNOWN_SIGNAL(name) {
|
||||||
const err = new TypeError(`Unknown signal: ${name}`);
|
const err = new TypeError(`Unknown signal: ${name}`);
|
||||||
err.code = "ERR_UNKNOWN_SIGNAL";
|
err.code = "ERR_UNKNOWN_SIGNAL";
|
||||||
|
|||||||
154
test/js/bun/spawn/spawn-maxbuf.test.ts
Normal file
154
test/js/bun/spawn/spawn-maxbuf.test.ts
Normal file
@@ -0,0 +1,154 @@
|
|||||||
|
import { bunExe } from "harness";
|
||||||
|
|
||||||
|
const { isWindows } = require("../../node/test/common");
|
||||||
|
|
||||||
|
async function toUtf8(out: ReadableStream<Uint8Array>): Promise<string> {
|
||||||
|
const stream = new TextDecoderStream();
|
||||||
|
out.pipeTo(stream.writable);
|
||||||
|
let result = "";
|
||||||
|
for await (const chunk of stream.readable) {
|
||||||
|
result += chunk;
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
describe("yes is killed", () => {
|
||||||
|
// TODO
|
||||||
|
test("Bun.spawn", async () => {
|
||||||
|
const timeStart = Date.now();
|
||||||
|
const proc = Bun.spawn([bunExe(), "exec", "yes"], {
|
||||||
|
maxBuffer: 256,
|
||||||
|
killSignal: isWindows ? "SIGKILL" : "SIGHUP",
|
||||||
|
stdio: ["pipe", "pipe", "pipe"],
|
||||||
|
});
|
||||||
|
await proc.exited;
|
||||||
|
expect(proc.exitCode).toBe(null);
|
||||||
|
expect(proc.signalCode).toBe(isWindows ? "SIGKILL" : "SIGHUP");
|
||||||
|
const timeEnd = Date.now();
|
||||||
|
expect(timeEnd - timeStart).toBeLessThan(100); // make sure it's not waiting a full tick
|
||||||
|
const result = await toUtf8(proc.stdout);
|
||||||
|
expect(result).toStartWith("y\n".repeat(128));
|
||||||
|
const stderr = await toUtf8(proc.stderr);
|
||||||
|
expect(stderr).toBe("");
|
||||||
|
});
|
||||||
|
|
||||||
|
test("Bun.spawnSync", () => {
|
||||||
|
const timeStart = Date.now();
|
||||||
|
const proc = Bun.spawnSync([bunExe(), "exec", "yes"], {
|
||||||
|
maxBuffer: 256,
|
||||||
|
killSignal: isWindows ? "SIGKILL" : "SIGHUP",
|
||||||
|
stdio: ["pipe", "pipe", "pipe"],
|
||||||
|
});
|
||||||
|
expect(proc.exitedDueToMaxBuffer).toBe(true);
|
||||||
|
expect(proc.exitCode).toBe(null);
|
||||||
|
expect(proc.signalCode).toBe(isWindows ? "SIGKILL" : "SIGHUP");
|
||||||
|
const timeEnd = Date.now();
|
||||||
|
expect(timeEnd - timeStart).toBeLessThan(100); // make sure it's not waiting a full tick
|
||||||
|
const result = proc.stdout.toString("utf-8");
|
||||||
|
expect(result).toStartWith("y\n".repeat(128));
|
||||||
|
const stderr = proc.stderr.toString("utf-8");
|
||||||
|
expect(stderr).toBe("");
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("maxBuffer infinity does not limit the number of bytes", () => {
|
||||||
|
const sample = "this is a long example string\n";
|
||||||
|
const sample_repeat_count = 10000;
|
||||||
|
test("Bun.spawn", async () => {
|
||||||
|
const proc = Bun.spawn([bunExe(), "-e", `console.log(${JSON.stringify(sample)}.repeat(${sample_repeat_count}))`], {
|
||||||
|
maxBuffer: Infinity,
|
||||||
|
});
|
||||||
|
await proc.exited;
|
||||||
|
expect(proc.exitCode).toBe(0);
|
||||||
|
const result = await toUtf8(proc.stdout);
|
||||||
|
expect(result).toBe(sample.repeat(sample_repeat_count) + "\n");
|
||||||
|
});
|
||||||
|
|
||||||
|
test("Bun.spawnSync", () => {
|
||||||
|
const proc = Bun.spawnSync(
|
||||||
|
[bunExe(), "-e", `console.log(${JSON.stringify(sample)}.repeat(${sample_repeat_count}))`],
|
||||||
|
{
|
||||||
|
maxBuffer: Infinity,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
expect(proc.exitCode).toBe(0);
|
||||||
|
const result = proc.stdout.toString("utf-8");
|
||||||
|
expect(result).toBe(sample.repeat(sample_repeat_count) + "\n");
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("timeout kills the process", () => {
|
||||||
|
test("Bun.spawn", async () => {
|
||||||
|
const timeStart = Date.now();
|
||||||
|
const proc = Bun.spawn([bunExe(), "exec", "sleep 5"], {
|
||||||
|
timeout: 100,
|
||||||
|
killSignal: isWindows ? "SIGKILL" : "SIGHUP",
|
||||||
|
stdio: ["pipe", "pipe", "pipe"],
|
||||||
|
});
|
||||||
|
await proc.exited;
|
||||||
|
expect(proc.exitCode).toBe(null);
|
||||||
|
expect(proc.signalCode).toBe(isWindows ? "SIGKILL" : "SIGHUP");
|
||||||
|
const timeEnd = Date.now();
|
||||||
|
expect(timeEnd - timeStart).toBeLessThan(200); // make sure it's terminating early
|
||||||
|
const result = await toUtf8(proc.stdout);
|
||||||
|
expect(result).toBe("");
|
||||||
|
const stderr = await toUtf8(proc.stderr);
|
||||||
|
expect(stderr).toBe("");
|
||||||
|
});
|
||||||
|
|
||||||
|
test("Bun.spawnSync", () => {
|
||||||
|
const timeStart = Date.now();
|
||||||
|
const proc = Bun.spawnSync([bunExe(), "exec", "sleep 5"], {
|
||||||
|
timeout: 100,
|
||||||
|
killSignal: isWindows ? "SIGKILL" : "SIGHUP",
|
||||||
|
stdio: ["pipe", "pipe", "pipe"],
|
||||||
|
});
|
||||||
|
expect(proc.exitedDueToTimeout).toBe(true);
|
||||||
|
expect(proc.exitCode).toBe(null);
|
||||||
|
expect(proc.signalCode).toBe(isWindows ? "SIGKILL" : "SIGHUP");
|
||||||
|
const timeEnd = Date.now();
|
||||||
|
expect(timeEnd - timeStart).toBeGreaterThan(100); // make sure it actually waits
|
||||||
|
expect(timeEnd - timeStart).toBeLessThan(200); // make sure it's terminating early
|
||||||
|
const result = proc.stdout.toString("utf-8");
|
||||||
|
expect(result).toBe("");
|
||||||
|
const stderr = proc.stderr.toString("utf-8");
|
||||||
|
expect(stderr).toBe("");
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("timeout Infinity does not kill the process", () => {
|
||||||
|
test("Bun.spawn", async () => {
|
||||||
|
const timeStart = Date.now();
|
||||||
|
const proc = Bun.spawn([bunExe(), "exec", "sleep 1"], {
|
||||||
|
timeout: Infinity,
|
||||||
|
killSignal: isWindows ? "SIGKILL" : "SIGHUP",
|
||||||
|
stdio: ["pipe", "pipe", "pipe"],
|
||||||
|
});
|
||||||
|
await proc.exited;
|
||||||
|
expect(proc.exitCode).toBe(0);
|
||||||
|
const timeEnd = Date.now();
|
||||||
|
expect(timeEnd - timeStart).toBeGreaterThan(1000); // make sure it actually waits
|
||||||
|
expect(timeEnd - timeStart).toBeLessThan(1500); // make sure it's terminating early
|
||||||
|
const result = await toUtf8(proc.stdout);
|
||||||
|
expect(result).toBe("");
|
||||||
|
const stderr = await toUtf8(proc.stderr);
|
||||||
|
expect(stderr).toBe("");
|
||||||
|
});
|
||||||
|
|
||||||
|
test("Bun.spawnSync", () => {
|
||||||
|
const timeStart = Date.now();
|
||||||
|
const proc = Bun.spawnSync([bunExe(), "exec", "sleep 1"], {
|
||||||
|
timeout: Infinity,
|
||||||
|
killSignal: isWindows ? "SIGKILL" : "SIGHUP",
|
||||||
|
stdio: ["pipe", "pipe", "pipe"],
|
||||||
|
});
|
||||||
|
expect(proc.exitCode).toBe(0);
|
||||||
|
const timeEnd = Date.now();
|
||||||
|
expect(timeEnd - timeStart).toBeGreaterThan(1000); // make sure it actually waits
|
||||||
|
expect(timeEnd - timeStart).toBeLessThan(1500);
|
||||||
|
const result = proc.stdout.toString("utf-8");
|
||||||
|
expect(result).toBe("");
|
||||||
|
const stderr = proc.stderr.toString("utf-8");
|
||||||
|
expect(stderr).toBe("");
|
||||||
|
});
|
||||||
|
});
|
||||||
@@ -0,0 +1,88 @@
|
|||||||
|
'use strict';
|
||||||
|
const common = require('../common');
|
||||||
|
const assert = require('assert');
|
||||||
|
const exec = require('child_process').exec;
|
||||||
|
const { promisify } = require('util');
|
||||||
|
|
||||||
|
const execPromisifed = promisify(exec);
|
||||||
|
const invalidArgTypeError = {
|
||||||
|
code: 'ERR_INVALID_ARG_TYPE',
|
||||||
|
name: 'TypeError'
|
||||||
|
};
|
||||||
|
|
||||||
|
const waitCommand = common.isWindows ?
|
||||||
|
// `"` is forbidden for Windows paths, no need for escaping.
|
||||||
|
`"${process.execPath}" -e "setInterval(()=>{}, 99)"` :
|
||||||
|
'sleep 2m';
|
||||||
|
|
||||||
|
{
|
||||||
|
const ac = new AbortController();
|
||||||
|
const signal = ac.signal;
|
||||||
|
const promise = execPromisifed(waitCommand, { signal });
|
||||||
|
ac.abort();
|
||||||
|
assert.rejects(promise, {
|
||||||
|
name: 'AbortError',
|
||||||
|
cause: ac.signal.reason,
|
||||||
|
}).then(common.mustCall());
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
const err = new Error('boom');
|
||||||
|
const ac = new AbortController();
|
||||||
|
const signal = ac.signal;
|
||||||
|
const promise = execPromisifed(waitCommand, { signal });
|
||||||
|
assert.rejects(promise, {
|
||||||
|
name: 'AbortError',
|
||||||
|
cause: err
|
||||||
|
}).then(common.mustCall());
|
||||||
|
ac.abort(err);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
const ac = new AbortController();
|
||||||
|
const signal = ac.signal;
|
||||||
|
const promise = execPromisifed(waitCommand, { signal });
|
||||||
|
assert.rejects(promise, {
|
||||||
|
name: 'AbortError',
|
||||||
|
cause: 'boom'
|
||||||
|
}).then(common.mustCall());
|
||||||
|
ac.abort('boom');
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
assert.throws(() => {
|
||||||
|
execPromisifed(waitCommand, { signal: {} });
|
||||||
|
}, invalidArgTypeError);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
function signal() {}
|
||||||
|
assert.throws(() => {
|
||||||
|
execPromisifed(waitCommand, { signal });
|
||||||
|
}, invalidArgTypeError);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
const signal = AbortSignal.abort(); // Abort in advance
|
||||||
|
const promise = execPromisifed(waitCommand, { signal });
|
||||||
|
|
||||||
|
assert.rejects(promise, { name: 'AbortError' })
|
||||||
|
.then(common.mustCall());
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
const err = new Error('boom');
|
||||||
|
const signal = AbortSignal.abort(err); // Abort in advance
|
||||||
|
const promise = execPromisifed(waitCommand, { signal });
|
||||||
|
|
||||||
|
assert.rejects(promise, { name: 'AbortError', cause: err })
|
||||||
|
.then(common.mustCall());
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
const signal = AbortSignal.abort('boom'); // Abort in advance
|
||||||
|
const promise = execPromisifed(waitCommand, { signal });
|
||||||
|
|
||||||
|
assert.rejects(promise, { name: 'AbortError', cause: 'boom' })
|
||||||
|
.then(common.mustCall());
|
||||||
|
}
|
||||||
146
test/js/node/test/parallel/test-child-process-exec-maxbuf.js
Normal file
146
test/js/node/test/parallel/test-child-process-exec-maxbuf.js
Normal file
@@ -0,0 +1,146 @@
|
|||||||
|
'use strict';
|
||||||
|
const common = require('../common');
|
||||||
|
const assert = require('assert');
|
||||||
|
const cp = require('child_process');
|
||||||
|
|
||||||
|
function runChecks(err, stdio, streamName, expected) {
|
||||||
|
assert.strictEqual(err.message, `${streamName} maxBuffer length exceeded`);
|
||||||
|
assert(err instanceof RangeError);
|
||||||
|
assert.strictEqual(err.code, 'ERR_CHILD_PROCESS_STDIO_MAXBUFFER');
|
||||||
|
assert.deepStrictEqual(stdio[streamName], expected);
|
||||||
|
}
|
||||||
|
|
||||||
|
// The execPath might contain chars that should be escaped in a shell context.
|
||||||
|
// On non-Windows, we can pass the path via the env; `"` is not a valid char on
|
||||||
|
// Windows, so we can simply pass the path.
|
||||||
|
const execNode = (args, optionsOrCallback, callback) => {
|
||||||
|
const [cmd, opts] = common.escapePOSIXShell`"${process.execPath}" `;
|
||||||
|
let options = optionsOrCallback;
|
||||||
|
if (typeof optionsOrCallback === 'function') {
|
||||||
|
options = undefined;
|
||||||
|
callback = optionsOrCallback;
|
||||||
|
}
|
||||||
|
return cp.exec(
|
||||||
|
cmd + args,
|
||||||
|
{ ...opts, ...options },
|
||||||
|
callback,
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
// default value
|
||||||
|
{
|
||||||
|
execNode(`-e "console.log('a'.repeat(1024 * 1024))"`, common.mustCall((err) => {
|
||||||
|
assert(err instanceof RangeError);
|
||||||
|
assert.strictEqual(err.message, 'stdout maxBuffer length exceeded');
|
||||||
|
assert.strictEqual(err.code, 'ERR_CHILD_PROCESS_STDIO_MAXBUFFER');
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
// default value
|
||||||
|
{
|
||||||
|
execNode(`-e "console.log('a'.repeat(1024 * 1024 - 1))"`, common.mustSucceed((stdout, stderr) => {
|
||||||
|
assert.strictEqual(stdout.trim(), 'a'.repeat(1024 * 1024 - 1));
|
||||||
|
assert.strictEqual(stderr, '');
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
const options = { maxBuffer: Infinity };
|
||||||
|
|
||||||
|
execNode(`-e "console.log('hello world');"`, options, common.mustSucceed((stdout, stderr) => {
|
||||||
|
assert.strictEqual(stdout.trim(), 'hello world');
|
||||||
|
assert.strictEqual(stderr, '');
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
const cmd = 'echo hello world';
|
||||||
|
|
||||||
|
cp.exec(
|
||||||
|
cmd,
|
||||||
|
{ maxBuffer: 5 },
|
||||||
|
common.mustCall((err, stdout, stderr) => {
|
||||||
|
runChecks(err, { stdout, stderr }, 'stdout', 'hello');
|
||||||
|
})
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// default value
|
||||||
|
{
|
||||||
|
execNode(
|
||||||
|
`-e "console.log('a'.repeat(1024 * 1024))"`,
|
||||||
|
common.mustCall((err, stdout, stderr) => {
|
||||||
|
runChecks(
|
||||||
|
err,
|
||||||
|
{ stdout, stderr },
|
||||||
|
'stdout',
|
||||||
|
'a'.repeat(1024 * 1024)
|
||||||
|
);
|
||||||
|
})
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// default value
|
||||||
|
{
|
||||||
|
execNode(`-e "console.log('a'.repeat(1024 * 1024 - 1))"`, common.mustSucceed((stdout, stderr) => {
|
||||||
|
assert.strictEqual(stdout.trim(), 'a'.repeat(1024 * 1024 - 1));
|
||||||
|
assert.strictEqual(stderr, '');
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
const unicode = '中文测试'; // length = 4, byte length = 12
|
||||||
|
|
||||||
|
{
|
||||||
|
execNode(
|
||||||
|
`-e "console.log('${unicode}');"`,
|
||||||
|
{ maxBuffer: 10 },
|
||||||
|
common.mustCall((err, stdout, stderr) => {
|
||||||
|
runChecks(err, { stdout, stderr }, 'stdout', '中文测试\n');
|
||||||
|
})
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
execNode(
|
||||||
|
`-e "console.error('${unicode}');"`,
|
||||||
|
{ maxBuffer: 3 },
|
||||||
|
common.mustCall((err, stdout, stderr) => {
|
||||||
|
runChecks(err, { stdout, stderr }, 'stderr', '中文测');
|
||||||
|
})
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
const child = execNode(
|
||||||
|
`-e "console.log('${unicode}');"`,
|
||||||
|
{ encoding: null, maxBuffer: 10 },
|
||||||
|
common.mustCall((err, stdout, stderr) => {
|
||||||
|
runChecks(err, { stdout, stderr }, 'stdout', '中文测试\n');
|
||||||
|
})
|
||||||
|
);
|
||||||
|
|
||||||
|
child.stdout.setEncoding('utf-8');
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
const child = execNode(
|
||||||
|
`-e "console.error('${unicode}');"`,
|
||||||
|
{ encoding: null, maxBuffer: 3 },
|
||||||
|
common.mustCall((err, stdout, stderr) => {
|
||||||
|
runChecks(err, { stdout, stderr }, 'stderr', '中文测');
|
||||||
|
})
|
||||||
|
);
|
||||||
|
|
||||||
|
child.stderr.setEncoding('utf-8');
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
execNode(
|
||||||
|
`-e "console.error('${unicode}');"`,
|
||||||
|
{ encoding: null, maxBuffer: 5 },
|
||||||
|
common.mustCall((err, stdout, stderr) => {
|
||||||
|
const buf = Buffer.from(unicode).slice(0, 5);
|
||||||
|
runChecks(err, { stdout, stderr }, 'stderr', buf);
|
||||||
|
})
|
||||||
|
);
|
||||||
|
}
|
||||||
@@ -0,0 +1,92 @@
|
|||||||
|
'use strict';
|
||||||
|
const common = require('../common');
|
||||||
|
const assert = require('assert');
|
||||||
|
const { execFile } = require('child_process');
|
||||||
|
|
||||||
|
function checkFactory(streamName) {
|
||||||
|
return common.mustCall((err) => {
|
||||||
|
assert(err instanceof RangeError);
|
||||||
|
assert.strictEqual(err.message, `${streamName} maxBuffer length exceeded`);
|
||||||
|
assert.strictEqual(err.code, 'ERR_CHILD_PROCESS_STDIO_MAXBUFFER');
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// default value
|
||||||
|
{
|
||||||
|
execFile(
|
||||||
|
process.execPath,
|
||||||
|
['-e', 'console.log("a".repeat(1024 * 1024))'],
|
||||||
|
checkFactory('stdout')
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// default value
|
||||||
|
{
|
||||||
|
execFile(
|
||||||
|
process.execPath,
|
||||||
|
['-e', 'console.log("a".repeat(1024 * 1024 - 1))'],
|
||||||
|
common.mustSucceed((stdout, stderr) => {
|
||||||
|
assert.strictEqual(stdout.trim(), 'a'.repeat(1024 * 1024 - 1));
|
||||||
|
assert.strictEqual(stderr, '');
|
||||||
|
})
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
const options = { maxBuffer: Infinity };
|
||||||
|
|
||||||
|
execFile(
|
||||||
|
process.execPath,
|
||||||
|
['-e', 'console.log("hello world");'],
|
||||||
|
options,
|
||||||
|
common.mustSucceed((stdout, stderr) => {
|
||||||
|
assert.strictEqual(stdout.trim(), 'hello world');
|
||||||
|
assert.strictEqual(stderr, '');
|
||||||
|
})
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
execFile('echo', ['hello world'], { maxBuffer: 5 }, checkFactory('stdout'));
|
||||||
|
}
|
||||||
|
|
||||||
|
const unicode = '中文测试'; // length = 4, byte length = 12
|
||||||
|
|
||||||
|
{
|
||||||
|
execFile(
|
||||||
|
process.execPath,
|
||||||
|
['-e', `console.log('${unicode}');`],
|
||||||
|
{ maxBuffer: 10 },
|
||||||
|
checkFactory('stdout'));
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
execFile(
|
||||||
|
process.execPath,
|
||||||
|
['-e', `console.error('${unicode}');`],
|
||||||
|
{ maxBuffer: 10 },
|
||||||
|
checkFactory('stderr')
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
const child = execFile(
|
||||||
|
process.execPath,
|
||||||
|
['-e', `console.log('${unicode}');`],
|
||||||
|
{ encoding: null, maxBuffer: 10 },
|
||||||
|
checkFactory('stdout')
|
||||||
|
);
|
||||||
|
|
||||||
|
child.stdout.setEncoding('utf-8');
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
const child = execFile(
|
||||||
|
process.execPath,
|
||||||
|
['-e', `console.error('${unicode}');`],
|
||||||
|
{ encoding: null, maxBuffer: 10 },
|
||||||
|
checkFactory('stderr')
|
||||||
|
);
|
||||||
|
|
||||||
|
child.stderr.setEncoding('utf-8');
|
||||||
|
}
|
||||||
@@ -0,0 +1,53 @@
|
|||||||
|
'use strict';
|
||||||
|
require('../common');
|
||||||
|
|
||||||
|
// This test checks that the maxBuffer option for child_process.execFileSync()
|
||||||
|
// works as expected.
|
||||||
|
|
||||||
|
const assert = require('assert');
|
||||||
|
const { getSystemErrorName } = require('util');
|
||||||
|
const { execFileSync } = require('child_process');
|
||||||
|
const msgOut = 'this is stdout';
|
||||||
|
const msgOutBuf = Buffer.from(`${msgOut}\n`);
|
||||||
|
|
||||||
|
const args = [
|
||||||
|
'-e',
|
||||||
|
`console.log("${msgOut}");`,
|
||||||
|
];
|
||||||
|
|
||||||
|
// Verify that an error is returned if maxBuffer is surpassed.
|
||||||
|
{
|
||||||
|
assert.throws(() => {
|
||||||
|
execFileSync(process.execPath, args, { maxBuffer: 1 });
|
||||||
|
}, (e) => {
|
||||||
|
assert.ok(e, 'maxBuffer should error');
|
||||||
|
assert.strictEqual(e.code, 'ENOBUFS');
|
||||||
|
assert.strictEqual(getSystemErrorName(e.errno), 'ENOBUFS');
|
||||||
|
// We can have buffers larger than maxBuffer because underneath we alloc 64k
|
||||||
|
// that matches our read sizes.
|
||||||
|
assert.deepStrictEqual(e.stdout, msgOutBuf);
|
||||||
|
return true;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that a maxBuffer size of Infinity works.
|
||||||
|
{
|
||||||
|
const ret = execFileSync(process.execPath, args, { maxBuffer: Infinity });
|
||||||
|
|
||||||
|
assert.deepStrictEqual(ret, msgOutBuf);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default maxBuffer size is 1024 * 1024.
|
||||||
|
{
|
||||||
|
assert.throws(() => {
|
||||||
|
execFileSync(
|
||||||
|
process.execPath,
|
||||||
|
['-e', "console.log('a'.repeat(1024 * 1024))"]
|
||||||
|
);
|
||||||
|
}, (e) => {
|
||||||
|
assert.ok(e, 'maxBuffer should error');
|
||||||
|
assert.strictEqual(e.code, 'ENOBUFS');
|
||||||
|
assert.strictEqual(getSystemErrorName(e.errno), 'ENOBUFS');
|
||||||
|
return true;
|
||||||
|
});
|
||||||
|
}
|
||||||
@@ -0,0 +1,60 @@
|
|||||||
|
'use strict';
|
||||||
|
const { escapePOSIXShell } = require('../common');
|
||||||
|
|
||||||
|
// This test checks that the maxBuffer option for child_process.spawnSync()
|
||||||
|
// works as expected.
|
||||||
|
|
||||||
|
const assert = require('assert');
|
||||||
|
const { getSystemErrorName } = require('util');
|
||||||
|
const { execSync } = require('child_process');
|
||||||
|
const msgOut = 'this is stdout';
|
||||||
|
const msgOutBuf = Buffer.from(`${msgOut}\n`);
|
||||||
|
|
||||||
|
const [cmd, opts] = escapePOSIXShell`"${process.execPath}" -e "${`console.log('${msgOut}')`}"`;
|
||||||
|
|
||||||
|
// Verify that an error is returned if maxBuffer is surpassed.
|
||||||
|
{
|
||||||
|
assert.throws(() => {
|
||||||
|
execSync(cmd, { ...opts, maxBuffer: 1 });
|
||||||
|
}, (e) => {
|
||||||
|
assert.ok(e, 'maxBuffer should error');
|
||||||
|
assert.strictEqual(e.code, 'ENOBUFS');
|
||||||
|
assert.strictEqual(getSystemErrorName(e.errno), 'ENOBUFS');
|
||||||
|
// We can have buffers larger than maxBuffer because underneath we alloc 64k
|
||||||
|
// that matches our read sizes.
|
||||||
|
assert.deepStrictEqual(e.stdout, msgOutBuf);
|
||||||
|
return true;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that a maxBuffer size of Infinity works.
|
||||||
|
{
|
||||||
|
const ret = execSync(
|
||||||
|
cmd,
|
||||||
|
{ ...opts, maxBuffer: Infinity },
|
||||||
|
);
|
||||||
|
|
||||||
|
assert.deepStrictEqual(ret, msgOutBuf);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default maxBuffer size is 1024 * 1024.
|
||||||
|
{
|
||||||
|
assert.throws(() => {
|
||||||
|
execSync(...escapePOSIXShell`"${process.execPath}" -e "console.log('a'.repeat(1024 * 1024))"`);
|
||||||
|
}, (e) => {
|
||||||
|
assert.ok(e, 'maxBuffer should error');
|
||||||
|
assert.strictEqual(e.code, 'ENOBUFS');
|
||||||
|
assert.strictEqual(getSystemErrorName(e.errno), 'ENOBUFS');
|
||||||
|
return true;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default maxBuffer size is 1024 * 1024.
|
||||||
|
{
|
||||||
|
const ret = execSync(...escapePOSIXShell`"${process.execPath}" -e "console.log('a'.repeat(1024 * 1024 - 1))"`);
|
||||||
|
|
||||||
|
assert.deepStrictEqual(
|
||||||
|
ret.toString().trim(),
|
||||||
|
'a'.repeat(1024 * 1024 - 1)
|
||||||
|
);
|
||||||
|
}
|
||||||
@@ -0,0 +1,88 @@
|
|||||||
|
'use strict';
|
||||||
|
const common = require('../common');
|
||||||
|
|
||||||
|
// Before https://github.com/nodejs/node/pull/2847 a child process trying
|
||||||
|
// (asynchronously) to use the closed channel to it's creator caused a segfault.
|
||||||
|
|
||||||
|
const assert = require('assert');
|
||||||
|
const cluster = require('cluster');
|
||||||
|
const net = require('net');
|
||||||
|
|
||||||
|
if (!cluster.isPrimary) {
|
||||||
|
// Exit on first received handle to leave the queue non-empty in primary
|
||||||
|
process.on('message', function() {
|
||||||
|
process.exit(1);
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const server = net
|
||||||
|
.createServer(function(s) {
|
||||||
|
if (common.isWindows) {
|
||||||
|
s.on('error', function(err) {
|
||||||
|
// Prevent possible ECONNRESET errors from popping up
|
||||||
|
if (err.code !== 'ECONNRESET') throw err;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
setTimeout(function() {
|
||||||
|
s.destroy();
|
||||||
|
}, 100);
|
||||||
|
})
|
||||||
|
.listen(0, function() {
|
||||||
|
const worker = cluster.fork();
|
||||||
|
|
||||||
|
worker.on('error', function(err) {
|
||||||
|
if (
|
||||||
|
err.code !== 'ECONNRESET' &&
|
||||||
|
err.code !== 'ECONNREFUSED' &&
|
||||||
|
err.code !== 'EMFILE'
|
||||||
|
) {
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
function send(callback) {
|
||||||
|
const s = net.connect(server.address().port, function() {
|
||||||
|
worker.send({}, s, callback);
|
||||||
|
});
|
||||||
|
|
||||||
|
// https://github.com/nodejs/node/issues/3635#issuecomment-157714683
|
||||||
|
// ECONNREFUSED or ECONNRESET errors can happen if this connection is
|
||||||
|
// still establishing while the server has already closed.
|
||||||
|
// EMFILE can happen if the worker __and__ the server had already closed.
|
||||||
|
s.on('error', function(err) {
|
||||||
|
if (
|
||||||
|
err.code !== 'ECONNRESET' &&
|
||||||
|
err.code !== 'ECONNREFUSED' &&
|
||||||
|
err.code !== 'EMFILE'
|
||||||
|
) {
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
worker.process.once(
|
||||||
|
'close',
|
||||||
|
common.mustCall(function() {
|
||||||
|
// Otherwise the crash on `channel.fd` access may happen
|
||||||
|
assert.strictEqual(worker.process.channel, null);
|
||||||
|
server.close();
|
||||||
|
})
|
||||||
|
);
|
||||||
|
|
||||||
|
worker.on('online', function() {
|
||||||
|
send(function(err) {
|
||||||
|
assert.ifError(err);
|
||||||
|
send(function(err) {
|
||||||
|
// Ignore errors when sending the second handle because the worker
|
||||||
|
// may already have exited.
|
||||||
|
if (err && err.code !== 'ERR_IPC_CHANNEL_CLOSED' &&
|
||||||
|
err.code !== 'ECONNRESET' &&
|
||||||
|
err.code !== 'ECONNREFUSED' &&
|
||||||
|
err.code !== 'EMFILE') {
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
@@ -0,0 +1,58 @@
|
|||||||
|
'use strict';
|
||||||
|
require('../common');
|
||||||
|
|
||||||
|
// This test checks that the maxBuffer option for child_process.spawnSync()
|
||||||
|
// works as expected.
|
||||||
|
|
||||||
|
const assert = require('assert');
|
||||||
|
const spawnSync = require('child_process').spawnSync;
|
||||||
|
const { getSystemErrorName } = require('util');
|
||||||
|
const msgOut = 'this is stdout';
|
||||||
|
const msgOutBuf = Buffer.from(`${msgOut}\n`);
|
||||||
|
|
||||||
|
const args = [
|
||||||
|
'-e',
|
||||||
|
`console.log("${msgOut}");`,
|
||||||
|
];
|
||||||
|
|
||||||
|
// Verify that an error is returned if maxBuffer is surpassed.
|
||||||
|
{
|
||||||
|
const ret = spawnSync(process.execPath, args, { maxBuffer: 1 });
|
||||||
|
|
||||||
|
assert.ok(ret.error, 'maxBuffer should error');
|
||||||
|
assert.strictEqual(ret.error.code, 'ENOBUFS');
|
||||||
|
assert.strictEqual(getSystemErrorName(ret.error.errno), 'ENOBUFS');
|
||||||
|
// We can have buffers larger than maxBuffer because underneath we alloc 64k
|
||||||
|
// that matches our read sizes.
|
||||||
|
assert.deepStrictEqual(ret.stdout, msgOutBuf);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that a maxBuffer size of Infinity works.
|
||||||
|
{
|
||||||
|
const ret = spawnSync(process.execPath, args, { maxBuffer: Infinity });
|
||||||
|
|
||||||
|
assert.ifError(ret.error);
|
||||||
|
assert.deepStrictEqual(ret.stdout, msgOutBuf);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default maxBuffer size is 1024 * 1024.
|
||||||
|
{
|
||||||
|
const args = ['-e', "console.log('a'.repeat(1024 * 1024))"];
|
||||||
|
const ret = spawnSync(process.execPath, args);
|
||||||
|
|
||||||
|
assert.ok(ret.error, 'maxBuffer should error');
|
||||||
|
assert.strictEqual(ret.error.code, 'ENOBUFS');
|
||||||
|
assert.strictEqual(getSystemErrorName(ret.error.errno), 'ENOBUFS');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default maxBuffer size is 1024 * 1024.
|
||||||
|
{
|
||||||
|
const args = ['-e', "console.log('a'.repeat(1024 * 1024 - 1))"];
|
||||||
|
const ret = spawnSync(process.execPath, args);
|
||||||
|
|
||||||
|
assert.ifError(ret.error);
|
||||||
|
assert.deepStrictEqual(
|
||||||
|
ret.stdout.toString().trim(),
|
||||||
|
'a'.repeat(1024 * 1024 - 1)
|
||||||
|
);
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user