mirror of
https://github.com/oven-sh/bun
synced 2026-02-21 16:21:51 +00:00
Compare commits
2 Commits
claude/fix
...
jarred/str
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1a88df0cda | ||
|
|
dc4dd3aa5b |
30
.github/workflows/close-stale-robobun-prs.yml
vendored
30
.github/workflows/close-stale-robobun-prs.yml
vendored
@@ -1,30 +0,0 @@
|
||||
name: Close stale robobun PRs
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 0 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
close-stale-robobun-prs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
permissions:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Close stale robobun PRs
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GH_REPO: ${{ github.repository }}
|
||||
run: |
|
||||
ninety_days_ago=$(date -u -d '90 days ago' +%Y-%m-%dT%H:%M:%SZ)
|
||||
|
||||
gh pr list \
|
||||
--author robobun \
|
||||
--state open \
|
||||
--json number,updatedAt \
|
||||
--limit 1000 \
|
||||
--jq ".[] | select(.updatedAt < \"$ninety_days_ago\") | .number" |
|
||||
while read -r pr_number; do
|
||||
echo "Closing PR #$pr_number (last updated before $ninety_days_ago)"
|
||||
gh pr close "$pr_number" --comment "Closing this PR because it has been inactive for more than 90 days."
|
||||
done
|
||||
@@ -91,6 +91,8 @@ pub const Features = struct {
|
||||
pub var yaml_parse: usize = 0;
|
||||
pub var cpu_profile: usize = 0;
|
||||
pub var heap_snapshot: usize = 0;
|
||||
pub var fs_watch: usize = 0;
|
||||
pub var fs_watchfile: usize = 0;
|
||||
|
||||
comptime {
|
||||
@export(&napi_module_register, .{ .name = "Bun__napi_module_register_count" });
|
||||
|
||||
@@ -1088,7 +1088,7 @@ pub const WindowsSpawnOptions = struct {
|
||||
|
||||
pub fn deinit(this: *const Stdio) void {
|
||||
if (this.* == .buffer) {
|
||||
this.buffer.closeAndDestroy();
|
||||
bun.default_allocator.destroy(this.buffer);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
const WindowsNamedPipeContext = @This();
|
||||
|
||||
ref_count: RefCount,
|
||||
named_pipe: uws.WindowsNamedPipe,
|
||||
socket: SocketType,
|
||||
|
||||
@@ -11,14 +10,6 @@ task: jsc.AnyTask,
|
||||
task_event: EventState = .none,
|
||||
is_open: bool = false,
|
||||
|
||||
const RefCount = bun.ptr.RefCount(@This(), "ref_count", scheduleDeinit, .{});
|
||||
pub const ref = RefCount.ref;
|
||||
pub const deref = RefCount.deref;
|
||||
|
||||
fn scheduleDeinit(this: *WindowsNamedPipeContext) void {
|
||||
this.deinitInNextTick();
|
||||
}
|
||||
|
||||
pub const EventState = enum(u8) {
|
||||
deinit,
|
||||
none,
|
||||
@@ -157,7 +148,7 @@ fn onClose(this: *WindowsNamedPipeContext) void {
|
||||
.none => {},
|
||||
}
|
||||
|
||||
this.deref();
|
||||
this.deinitInNextTick();
|
||||
}
|
||||
|
||||
fn runEvent(this: *WindowsNamedPipeContext) void {
|
||||
@@ -178,7 +169,6 @@ fn deinitInNextTick(this: *WindowsNamedPipeContext) void {
|
||||
pub fn create(globalThis: *jsc.JSGlobalObject, socket: SocketType) *WindowsNamedPipeContext {
|
||||
const vm = globalThis.bunVM();
|
||||
const this = WindowsNamedPipeContext.new(.{
|
||||
.ref_count = .init(),
|
||||
.vm = vm,
|
||||
.globalThis = globalThis,
|
||||
.task = undefined,
|
||||
@@ -189,8 +179,6 @@ pub fn create(globalThis: *jsc.JSGlobalObject, socket: SocketType) *WindowsNamed
|
||||
// named_pipe owns the pipe (PipeWriter owns the pipe and will close and deinit it)
|
||||
this.named_pipe = uws.WindowsNamedPipe.from(bun.handleOom(bun.default_allocator.create(uv.Pipe)), .{
|
||||
.ctx = this,
|
||||
.ref_ctx = @ptrCast(&WindowsNamedPipeContext.ref),
|
||||
.deref_ctx = @ptrCast(&WindowsNamedPipeContext.deref),
|
||||
.onOpen = @ptrCast(&WindowsNamedPipeContext.onOpen),
|
||||
.onData = @ptrCast(&WindowsNamedPipeContext.onData),
|
||||
.onHandshake = @ptrCast(&WindowsNamedPipeContext.onHandshake),
|
||||
@@ -230,7 +218,7 @@ pub fn open(globalThis: *jsc.JSGlobalObject, fd: bun.FileDescriptor, ssl_config:
|
||||
},
|
||||
.none => {},
|
||||
}
|
||||
this.deref();
|
||||
this.deinitInNextTick();
|
||||
}
|
||||
try this.named_pipe.open(fd, ssl_config).unwrap();
|
||||
return &this.named_pipe;
|
||||
@@ -250,7 +238,7 @@ pub fn connect(globalThis: *jsc.JSGlobalObject, path: []const u8, ssl_config: ?j
|
||||
},
|
||||
.none => {},
|
||||
}
|
||||
this.deref();
|
||||
this.deinitInNextTick();
|
||||
}
|
||||
|
||||
if (path[path.len - 1] == 0) {
|
||||
|
||||
@@ -2450,20 +2450,13 @@ void JSC__JSObject__putRecord(JSC::JSObject* object, JSC::JSGlobalObject* global
|
||||
descriptor.setValue(JSC::jsString(global->vm(), Zig::toStringCopy(values[0])));
|
||||
} else {
|
||||
|
||||
// Pre-convert all strings to JSValues before entering ObjectInitializationScope,
|
||||
// since jsString() allocates GC cells which is not allowed inside the scope.
|
||||
MarkedArgumentBuffer strings;
|
||||
for (size_t i = 0; i < valuesLen; ++i) {
|
||||
strings.append(JSC::jsString(global->vm(), Zig::toStringCopy(values[i])));
|
||||
}
|
||||
|
||||
JSC::JSArray* array = nullptr;
|
||||
{
|
||||
JSC::ObjectInitializationScope initializationScope(global->vm());
|
||||
if ((array = JSC::JSArray::tryCreateUninitializedRestricted(initializationScope, nullptr, global->arrayStructureForIndexingTypeDuringAllocation(JSC::ArrayWithContiguous), valuesLen))) {
|
||||
|
||||
for (size_t i = 0; i < valuesLen; ++i) {
|
||||
array->initializeIndexWithoutBarrier(initializationScope, i, strings.at(i));
|
||||
array->initializeIndexWithoutBarrier(initializationScope, i, JSC::jsString(global->vm(), Zig::toStringCopy(values[i])));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2497,13 +2490,6 @@ void JSC__JSValue__putRecord(JSC::EncodedJSValue objectValue, JSC::JSGlobalObjec
|
||||
descriptor.setValue(JSC::jsString(global->vm(), Zig::toString(values[0])));
|
||||
} else {
|
||||
|
||||
// Pre-convert all strings to JSValues before entering ObjectInitializationScope,
|
||||
// since jsString() allocates GC cells which is not allowed inside the scope.
|
||||
MarkedArgumentBuffer strings;
|
||||
for (size_t i = 0; i < valuesLen; ++i) {
|
||||
strings.append(JSC::jsString(global->vm(), Zig::toString(values[i])));
|
||||
}
|
||||
|
||||
JSC::JSArray* array = nullptr;
|
||||
{
|
||||
JSC::ObjectInitializationScope initializationScope(global->vm());
|
||||
@@ -2514,7 +2500,7 @@ void JSC__JSValue__putRecord(JSC::EncodedJSValue objectValue, JSC::JSGlobalObjec
|
||||
|
||||
for (size_t i = 0; i < valuesLen; ++i) {
|
||||
array->initializeIndexWithoutBarrier(
|
||||
initializationScope, i, strings.at(i));
|
||||
initializationScope, i, JSC::jsString(global->vm(), Zig::toString(values[i])));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -759,6 +759,8 @@ static void initializeColumnNames(JSC::JSGlobalObject* lexicalGlobalObject, JSSQ
|
||||
|
||||
// Slow path:
|
||||
|
||||
JSC::ObjectInitializationScope initializationScope(vm);
|
||||
|
||||
// 64 is the maximum we can preallocate here
|
||||
// see https://github.com/oven-sh/bun/issues/987
|
||||
JSObject* prototype = castedThis->userPrototype ? castedThis->userPrototype.get() : lexicalGlobalObject->objectPrototype();
|
||||
|
||||
@@ -929,7 +929,7 @@ pub const SendQueue = struct {
|
||||
return err;
|
||||
};
|
||||
ipc_pipe.open(pipe_fd).unwrap() catch |err| {
|
||||
ipc_pipe.closeAndDestroy();
|
||||
bun.default_allocator.destroy(ipc_pipe);
|
||||
return err;
|
||||
};
|
||||
ipc_pipe.unref();
|
||||
|
||||
@@ -486,9 +486,11 @@ pub const StatWatcher = struct {
|
||||
|
||||
/// After a restat found the file changed, this calls the listener function.
|
||||
pub fn swapAndCallListenerOnMainThread(this: *StatWatcher) void {
|
||||
bun.analytics.Features.fs_watchfile += 1;
|
||||
defer this.deref(); // Balance the ref from restat().
|
||||
const prev_jsvalue = this.last_jsvalue.swap();
|
||||
const globalThis = this.globalThis;
|
||||
|
||||
const current_jsvalue = statToJSStats(globalThis, &this.getLastStat(), this.bigint) catch return; // TODO: properly propagate exception upwards
|
||||
this.last_jsvalue.set(globalThis, current_jsvalue);
|
||||
|
||||
|
||||
@@ -261,6 +261,7 @@ pub const FSWatcher = struct {
|
||||
|
||||
pub fn onPathUpdatePosix(ctx: ?*anyopaque, event: Event, is_file: bool) void {
|
||||
const this = bun.cast(*FSWatcher, ctx.?);
|
||||
bun.analytics.Features.fs_watch += 1;
|
||||
|
||||
if (this.verbose) {
|
||||
switch (event) {
|
||||
@@ -281,6 +282,7 @@ pub const FSWatcher = struct {
|
||||
|
||||
pub fn onPathUpdateWindows(ctx: ?*anyopaque, event: Event, is_file: bool) void {
|
||||
const this = bun.cast(*FSWatcher, ctx.?);
|
||||
bun.analytics.Features.fs_watch += 1;
|
||||
|
||||
if (this.verbose) {
|
||||
switch (event) {
|
||||
@@ -299,9 +301,23 @@ pub const FSWatcher = struct {
|
||||
return;
|
||||
}
|
||||
|
||||
// The event's path comes from PathWatcherManager.onFileUpdate as a
|
||||
// []const u8 sub-slice of the watchlist's file_path storage, auto-coerced
|
||||
// to StringOrBytesToDecode{.bytes_to_free}. We must create a properly
|
||||
// owned copy: either a bun.String for utf8 encoding or a duped []const u8
|
||||
// for other encodings.
|
||||
const owned_event: Event = switch (event) {
|
||||
inline .rename, .change => |path, t| @unionInit(Event, @tagName(t), if (this.encoding == .utf8)
|
||||
FSWatchTaskWindows.StringOrBytesToDecode{ .string = bun.String.cloneUTF8(path.bytes_to_free) }
|
||||
else
|
||||
FSWatchTaskWindows.StringOrBytesToDecode{ .bytes_to_free = bun.default_allocator.dupe(u8, path.bytes_to_free) catch return }),
|
||||
.@"error" => |err| .{ .@"error" = err.clone(bun.default_allocator) },
|
||||
inline else => |value, t| @unionInit(Event, @tagName(t), value),
|
||||
};
|
||||
|
||||
const task = bun.new(FSWatchTaskWindows, .{
|
||||
.ctx = this,
|
||||
.event = event,
|
||||
.event = owned_event,
|
||||
});
|
||||
this.eventLoop().enqueueTask(jsc.Task.init(task));
|
||||
}
|
||||
|
||||
@@ -587,12 +587,12 @@ pub const PathLike = union(enum) {
|
||||
if (std.fs.path.isAbsolute(sliced)) {
|
||||
if (sliced.len > 2 and bun.path.isDriveLetter(sliced[0]) and sliced[1] == ':' and bun.path.isSepAny(sliced[2])) {
|
||||
// Add the long path syntax. This affects most of node:fs
|
||||
// Normalize the path directly into buf without an intermediate
|
||||
// buffer. The input (sliced) already has a drive letter, so
|
||||
// resolveCWDWithExternalBufZ would just memcpy it, making the
|
||||
// temporary allocation unnecessary.
|
||||
const drive_resolve_buf = bun.path_buffer_pool.get();
|
||||
defer bun.path_buffer_pool.put(drive_resolve_buf);
|
||||
const rest = path_handler.PosixToWinNormalizer.resolveCWDWithExternalBufZ(drive_resolve_buf, sliced) catch @panic("Error while resolving path.");
|
||||
buf[0..4].* = bun.windows.long_path_prefix_u8;
|
||||
const n = bun.path.normalizeBuf(sliced, buf[4..], .windows).len;
|
||||
// When long path syntax is used, the entire string should be normalized
|
||||
const n = bun.path.normalizeBuf(rest, buf[4..], .windows).len;
|
||||
buf[4 + n] = 0;
|
||||
return buf[0 .. 4 + n :0];
|
||||
}
|
||||
|
||||
@@ -815,10 +815,16 @@ pub const Value = union(Tag) {
|
||||
}
|
||||
|
||||
pub fn tryUseAsAnyBlob(this: *Value) ?AnyBlob {
|
||||
if (this.* == .WTFStringImpl) {
|
||||
if (this.WTFStringImpl.canUseAsUTF8()) {
|
||||
return AnyBlob{ .WTFStringImpl = this.WTFStringImpl };
|
||||
}
|
||||
}
|
||||
|
||||
const any_blob: AnyBlob = switch (this.*) {
|
||||
.Blob => .{ .Blob = this.Blob },
|
||||
.InternalBlob => .{ .InternalBlob = this.InternalBlob },
|
||||
.WTFStringImpl => |str| if (str.canUseAsUTF8()) .{ .WTFStringImpl = str } else return null,
|
||||
.Blob => AnyBlob{ .Blob = this.Blob },
|
||||
.InternalBlob => AnyBlob{ .InternalBlob = this.InternalBlob },
|
||||
// .InlineBlob => AnyBlob{ .InlineBlob = this.InlineBlob },
|
||||
.Locked => this.Locked.toAnyBlobAllowPromise() orelse return null,
|
||||
else => return null,
|
||||
};
|
||||
|
||||
@@ -1414,17 +1414,6 @@ pub const Pipe = extern struct {
|
||||
pub fn asStream(this: *@This()) *uv_stream_t {
|
||||
return @ptrCast(this);
|
||||
}
|
||||
|
||||
/// Close the pipe handle and then free it in the close callback.
|
||||
/// Use this when a pipe has been init'd but needs to be destroyed
|
||||
/// (e.g. when open() fails after init() succeeded).
|
||||
pub fn closeAndDestroy(this: *@This()) void {
|
||||
this.close(&onCloseDestroy);
|
||||
}
|
||||
|
||||
fn onCloseDestroy(handle: *@This()) callconv(.c) void {
|
||||
bun.default_allocator.destroy(handle);
|
||||
}
|
||||
};
|
||||
const union_unnamed_416 = extern union {
|
||||
fd: c_int,
|
||||
|
||||
@@ -51,8 +51,6 @@ pub const Flags = packed struct(u8) {
|
||||
};
|
||||
pub const Handlers = struct {
|
||||
ctx: *anyopaque,
|
||||
ref_ctx: *const fn (*anyopaque) void,
|
||||
deref_ctx: *const fn (*anyopaque) void,
|
||||
onOpen: *const fn (*anyopaque) void,
|
||||
onHandshake: *const fn (*anyopaque, bool, uws.us_bun_verify_error_t) void,
|
||||
onData: *const fn (*anyopaque, []const u8) void,
|
||||
@@ -273,16 +271,7 @@ pub fn from(
|
||||
.handlers = handlers,
|
||||
};
|
||||
}
|
||||
pub fn ref(this: *WindowsNamedPipe) void {
|
||||
this.handlers.ref_ctx(this.handlers.ctx);
|
||||
}
|
||||
|
||||
pub fn deref(this: *WindowsNamedPipe) void {
|
||||
this.handlers.deref_ctx(this.handlers.ctx);
|
||||
}
|
||||
|
||||
fn onConnect(this: *WindowsNamedPipe, status: uv.ReturnCode) void {
|
||||
defer this.deref();
|
||||
if (this.pipe) |pipe| {
|
||||
_ = pipe.unref();
|
||||
}
|
||||
@@ -387,7 +376,6 @@ pub fn open(this: *WindowsNamedPipe, fd: bun.FileDescriptor, ssl_options: ?jsc.A
|
||||
return openResult;
|
||||
}
|
||||
|
||||
this.ref();
|
||||
onConnect(this, uv.ReturnCode.zero);
|
||||
return .success;
|
||||
}
|
||||
@@ -422,12 +410,7 @@ pub fn connect(this: *WindowsNamedPipe, path: []const u8, ssl_options: ?jsc.API.
|
||||
}
|
||||
|
||||
this.connect_req.data = this;
|
||||
const result = this.pipe.?.connect(&this.connect_req, path, this, onConnect);
|
||||
if (result.asErr() != null) {
|
||||
return result;
|
||||
}
|
||||
this.ref();
|
||||
return result;
|
||||
return this.pipe.?.connect(&this.connect_req, path, this, onConnect);
|
||||
}
|
||||
pub fn startTLS(this: *WindowsNamedPipe, ssl_options: jsc.API.ServerConfig.SSLConfig, is_client: bool) !void {
|
||||
this.flags.is_ssl = true;
|
||||
|
||||
@@ -694,8 +694,8 @@ pub fn PosixStreamingWriter(comptime Parent: type, comptime function_table: anyt
|
||||
}
|
||||
|
||||
pub fn deinit(this: *PosixWriter) void {
|
||||
this.closeWithoutReporting();
|
||||
this.outgoing.deinit();
|
||||
this.closeWithoutReporting();
|
||||
}
|
||||
|
||||
pub fn hasRef(this: *PosixWriter) bool {
|
||||
@@ -815,39 +815,29 @@ fn BaseWindowsPipeWriter(
|
||||
|
||||
pub fn close(this: *WindowsPipeWriter) void {
|
||||
this.is_done = true;
|
||||
const source = this.source orelse return;
|
||||
// Check for in-flight file write before detaching. detach()
|
||||
// nulls fs.data so onFsWriteComplete can't recover the writer
|
||||
// to call deref(). We must balance processSend's ref() here.
|
||||
const has_inflight_write = if (@hasField(WindowsPipeWriter, "current_payload")) switch (source) {
|
||||
.sync_file, .file => |file| file.state == .operating or file.state == .canceling,
|
||||
else => false,
|
||||
} else false;
|
||||
switch (source) {
|
||||
.sync_file, .file => |file| {
|
||||
// Use state machine to handle close after operation completes
|
||||
if (this.owns_fd) {
|
||||
file.detach();
|
||||
} else {
|
||||
// Don't own fd, just stop operations and detach parent
|
||||
file.stop();
|
||||
file.fs.data = null;
|
||||
}
|
||||
},
|
||||
.pipe => |pipe| {
|
||||
pipe.data = pipe;
|
||||
pipe.close(onPipeClose);
|
||||
},
|
||||
.tty => |tty| {
|
||||
tty.data = tty;
|
||||
tty.close(onTTYClose);
|
||||
},
|
||||
}
|
||||
this.source = null;
|
||||
this.onCloseSource();
|
||||
// Deref last — this may free the parent and `this`.
|
||||
if (has_inflight_write) {
|
||||
this.parent.deref();
|
||||
if (this.source) |source| {
|
||||
switch (source) {
|
||||
.sync_file, .file => |file| {
|
||||
// Use state machine to handle close after operation completes
|
||||
if (this.owns_fd) {
|
||||
file.detach();
|
||||
} else {
|
||||
// Don't own fd, just stop operations and detach parent
|
||||
file.stop();
|
||||
file.fs.data = null;
|
||||
}
|
||||
},
|
||||
.pipe => |pipe| {
|
||||
pipe.data = pipe;
|
||||
pipe.close(onPipeClose);
|
||||
},
|
||||
.tty => |tty| {
|
||||
tty.data = tty;
|
||||
tty.close(onTTYClose);
|
||||
},
|
||||
}
|
||||
this.source = null;
|
||||
this.onCloseSource();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1308,10 +1298,6 @@ pub fn WindowsStreamingWriter(comptime Parent: type, function_table: anytype) ty
|
||||
}
|
||||
|
||||
fn onWriteComplete(this: *WindowsWriter, status: uv.ReturnCode) void {
|
||||
// Deref the parent at the end to balance the ref taken in
|
||||
// processSend before submitting the async write request.
|
||||
defer this.parent.deref();
|
||||
|
||||
if (status.toError(.write)) |err| {
|
||||
this.last_write_result = .{ .err = err };
|
||||
log("onWrite() = {s}", .{err.name()});
|
||||
@@ -1361,8 +1347,7 @@ pub fn WindowsStreamingWriter(comptime Parent: type, function_table: anytype) ty
|
||||
// ALWAYS complete first
|
||||
file.complete(was_canceled);
|
||||
|
||||
// If detached, file may be closing (owned fd) or just stopped (non-owned fd).
|
||||
// The deref to balance processSend's ref was already done in close().
|
||||
// If detached, file may be closing (owned fd) or just stopped (non-owned fd)
|
||||
if (parent_ptr == null) {
|
||||
return;
|
||||
}
|
||||
@@ -1370,21 +1355,17 @@ pub fn WindowsStreamingWriter(comptime Parent: type, function_table: anytype) ty
|
||||
const this = bun.cast(*WindowsWriter, parent_ptr);
|
||||
|
||||
if (was_canceled) {
|
||||
// Canceled write - reset buffers and deref to balance processSend ref
|
||||
// Canceled write - reset buffers
|
||||
this.current_payload.reset();
|
||||
this.parent.deref();
|
||||
return;
|
||||
}
|
||||
|
||||
if (result.toError(.write)) |err| {
|
||||
// deref to balance processSend ref
|
||||
defer this.parent.deref();
|
||||
this.close();
|
||||
onError(this.parent, err);
|
||||
return;
|
||||
}
|
||||
|
||||
// onWriteComplete handles the deref
|
||||
this.onWriteComplete(.zero);
|
||||
}
|
||||
|
||||
@@ -1447,10 +1428,6 @@ pub fn WindowsStreamingWriter(comptime Parent: type, function_table: anytype) ty
|
||||
}
|
||||
},
|
||||
}
|
||||
// Ref the parent to prevent it from being freed while the async
|
||||
// write is in flight. The matching deref is in onWriteComplete
|
||||
// or onFsWriteComplete.
|
||||
this.parent.ref();
|
||||
this.last_write_result = .{ .pending = 0 };
|
||||
}
|
||||
|
||||
@@ -1465,11 +1442,10 @@ pub fn WindowsStreamingWriter(comptime Parent: type, function_table: anytype) ty
|
||||
}
|
||||
|
||||
pub fn deinit(this: *WindowsWriter) void {
|
||||
// Close the pipe first to cancel any in-flight writes before
|
||||
// freeing the buffers they reference.
|
||||
this.closeWithoutReporting();
|
||||
// clean both buffers if needed
|
||||
this.outgoing.deinit();
|
||||
this.current_payload.deinit();
|
||||
this.closeWithoutReporting();
|
||||
}
|
||||
|
||||
fn writeInternal(this: *WindowsWriter, buffer: anytype, comptime writeFn: anytype) WriteResult {
|
||||
|
||||
@@ -222,7 +222,7 @@ pub const Source = union(enum) {
|
||||
|
||||
switch (pipe.open(fd)) {
|
||||
.err => |err| {
|
||||
pipe.closeAndDestroy();
|
||||
bun.default_allocator.destroy(pipe);
|
||||
return .{
|
||||
.err = err,
|
||||
};
|
||||
|
||||
@@ -1,130 +0,0 @@
|
||||
import { expect, test } from "bun:test";
|
||||
import { bunEnv, bunExe } from "harness";
|
||||
|
||||
// https://github.com/oven-sh/bun/issues/24277
|
||||
// AsyncDisposableStack.use() should accept objects with only @@dispose (sync),
|
||||
// falling back from @@asyncDispose per the TC39 spec.
|
||||
|
||||
test("AsyncDisposableStack.use() with sync @@dispose falls back correctly", async () => {
|
||||
await using proc = Bun.spawn({
|
||||
cmd: [
|
||||
bunExe(),
|
||||
"-e",
|
||||
`
|
||||
await using scope = new AsyncDisposableStack();
|
||||
|
||||
scope.use({
|
||||
async [Symbol.asyncDispose]() {
|
||||
console.log("async dispose");
|
||||
},
|
||||
});
|
||||
|
||||
scope.use({
|
||||
[Symbol.dispose]() {
|
||||
console.log("sync dispose");
|
||||
},
|
||||
});
|
||||
`,
|
||||
],
|
||||
env: bunEnv,
|
||||
stdout: "pipe",
|
||||
stderr: "pipe",
|
||||
});
|
||||
|
||||
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
|
||||
|
||||
expect(stderr).toBe("");
|
||||
// Resources are disposed in LIFO order (stack)
|
||||
expect(stdout).toBe("sync dispose\nasync dispose\n");
|
||||
expect(exitCode).toBe(0);
|
||||
});
|
||||
|
||||
test("await using with sync @@dispose falls back correctly", async () => {
|
||||
await using proc = Bun.spawn({
|
||||
cmd: [
|
||||
bunExe(),
|
||||
"-e",
|
||||
`
|
||||
async function main() {
|
||||
const log = [];
|
||||
{
|
||||
await using a = {
|
||||
async [Symbol.asyncDispose]() {
|
||||
log.push("async");
|
||||
},
|
||||
};
|
||||
await using b = {
|
||||
[Symbol.dispose]() {
|
||||
log.push("sync");
|
||||
},
|
||||
};
|
||||
}
|
||||
console.log(log.join(","));
|
||||
}
|
||||
await main();
|
||||
`,
|
||||
],
|
||||
env: bunEnv,
|
||||
stdout: "pipe",
|
||||
stderr: "pipe",
|
||||
});
|
||||
|
||||
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
|
||||
|
||||
expect(stderr).toBe("");
|
||||
// LIFO order: b disposed first (sync), then a (async)
|
||||
expect(stdout).toBe("sync,async\n");
|
||||
expect(exitCode).toBe(0);
|
||||
});
|
||||
|
||||
test("AsyncDisposableStack.use() throws when neither @@asyncDispose nor @@dispose is present", async () => {
|
||||
await using proc = Bun.spawn({
|
||||
cmd: [
|
||||
bunExe(),
|
||||
"-e",
|
||||
`
|
||||
try {
|
||||
await using scope = new AsyncDisposableStack();
|
||||
scope.use({});
|
||||
console.log("ERROR: should have thrown");
|
||||
} catch (e) {
|
||||
console.log("caught: " + (e instanceof TypeError));
|
||||
}
|
||||
`,
|
||||
],
|
||||
env: bunEnv,
|
||||
stdout: "pipe",
|
||||
stderr: "pipe",
|
||||
});
|
||||
|
||||
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
|
||||
|
||||
expect(stdout).toBe("caught: true\n");
|
||||
expect(exitCode).toBe(0);
|
||||
});
|
||||
|
||||
test("AsyncDisposableStack.use() with @@asyncDispose still works", async () => {
|
||||
await using proc = Bun.spawn({
|
||||
cmd: [
|
||||
bunExe(),
|
||||
"-e",
|
||||
`
|
||||
await using scope = new AsyncDisposableStack();
|
||||
scope.use({
|
||||
async [Symbol.asyncDispose]() {
|
||||
console.log("async only");
|
||||
},
|
||||
});
|
||||
`,
|
||||
],
|
||||
env: bunEnv,
|
||||
stdout: "pipe",
|
||||
stderr: "pipe",
|
||||
});
|
||||
|
||||
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
|
||||
|
||||
expect(stderr).toBe("");
|
||||
expect(stdout).toBe("async only\n");
|
||||
expect(exitCode).toBe(0);
|
||||
});
|
||||
@@ -1,28 +0,0 @@
|
||||
import { test } from "bun:test";
|
||||
|
||||
test("issue #27099", async () => {
|
||||
// Run it twice to trigger ASAN.
|
||||
await run();
|
||||
await run();
|
||||
});
|
||||
|
||||
async function run() {
|
||||
const fileOps = Array.from({ length: 10 }, () => Bun.file("/tmp/nope").exists());
|
||||
|
||||
const outer = Bun.spawn(["bash", "-c", 'for j in $(seq 1 100); do echo "padding padding padding"; done'], {
|
||||
stdout: "pipe",
|
||||
stderr: "pipe",
|
||||
});
|
||||
const outerText = new Response(outer.stdout as ReadableStream).text();
|
||||
|
||||
const inner = Bun.spawn(["cat"], {
|
||||
stdin: new Response(Buffer.allocUnsafe(20000).fill("a").toString()),
|
||||
stdout: "pipe",
|
||||
});
|
||||
await new Response(inner.stdout as ReadableStream).text();
|
||||
|
||||
await inner.exited;
|
||||
await outerText;
|
||||
await outer.exited;
|
||||
await Promise.all(fileOps);
|
||||
}
|
||||
Reference in New Issue
Block a user