mirror of
https://github.com/oven-sh/bun
synced 2026-02-20 07:42:30 +00:00
Compare commits
4 Commits
claude/fix
...
claude/opt
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
76582063b8 | ||
|
|
d6a5cfa69e | ||
|
|
9bd4ad7166 | ||
|
|
d6d41d58d1 |
108
bench/snippets/microtask-throughput.mjs
Normal file
108
bench/snippets/microtask-throughput.mjs
Normal file
@@ -0,0 +1,108 @@
|
||||
import { AsyncLocalStorage } from "node:async_hooks";
|
||||
import { bench, group, run } from "../runner.mjs";
|
||||
|
||||
// Benchmark 1: queueMicrotask throughput
|
||||
// Tests the BunPerformMicrotaskJob handler path directly.
|
||||
// The optimization removes the JS trampoline and uses callMicrotask.
|
||||
group("queueMicrotask throughput", () => {
|
||||
bench("queueMicrotask 1k", () => {
|
||||
return new Promise(resolve => {
|
||||
let remaining = 1000;
|
||||
const tick = () => {
|
||||
if (--remaining === 0) resolve();
|
||||
else queueMicrotask(tick);
|
||||
};
|
||||
queueMicrotask(tick);
|
||||
});
|
||||
});
|
||||
|
||||
bench("queueMicrotask 10k", () => {
|
||||
return new Promise(resolve => {
|
||||
let remaining = 10000;
|
||||
const tick = () => {
|
||||
if (--remaining === 0) resolve();
|
||||
else queueMicrotask(tick);
|
||||
};
|
||||
queueMicrotask(tick);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// Benchmark 2: Promise.resolve chain
|
||||
// Each .then() queues a microtask via the promise machinery.
|
||||
// Benefits from smaller QueuedTask (better cache locality in the Deque).
|
||||
group("Promise.resolve chain", () => {
|
||||
bench("Promise chain 1k", () => {
|
||||
let p = Promise.resolve();
|
||||
for (let i = 0; i < 1000; i++) {
|
||||
p = p.then(() => {});
|
||||
}
|
||||
return p;
|
||||
});
|
||||
|
||||
bench("Promise chain 10k", () => {
|
||||
let p = Promise.resolve();
|
||||
for (let i = 0; i < 10000; i++) {
|
||||
p = p.then(() => {});
|
||||
}
|
||||
return p;
|
||||
});
|
||||
});
|
||||
|
||||
// Benchmark 3: Promise.all (many simultaneous resolves)
|
||||
// All promises resolve at once, flooding the microtask queue.
|
||||
// Smaller QueuedTask = less memory, better cache utilization.
|
||||
group("Promise.all simultaneous", () => {
|
||||
bench("Promise.all 1k", () => {
|
||||
const promises = [];
|
||||
for (let i = 0; i < 1000; i++) {
|
||||
promises.push(Promise.resolve(i));
|
||||
}
|
||||
return Promise.all(promises);
|
||||
});
|
||||
|
||||
bench("Promise.all 10k", () => {
|
||||
const promises = [];
|
||||
for (let i = 0; i < 10000; i++) {
|
||||
promises.push(Promise.resolve(i));
|
||||
}
|
||||
return Promise.all(promises);
|
||||
});
|
||||
});
|
||||
|
||||
// Benchmark 4: queueMicrotask with AsyncLocalStorage
|
||||
// Tests the inlined async context save/restore path.
|
||||
// Previously went through performMicrotaskFunction JS trampoline.
|
||||
group("queueMicrotask + AsyncLocalStorage", () => {
|
||||
const als = new AsyncLocalStorage();
|
||||
|
||||
bench("ALS.run + queueMicrotask 1k", () => {
|
||||
return als.run({ id: 1 }, () => {
|
||||
return new Promise(resolve => {
|
||||
let remaining = 1000;
|
||||
const tick = () => {
|
||||
als.getStore(); // force context read
|
||||
if (--remaining === 0) resolve();
|
||||
else queueMicrotask(tick);
|
||||
};
|
||||
queueMicrotask(tick);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// Benchmark 5: async/await (each await queues microtasks)
|
||||
group("async/await chain", () => {
|
||||
async function asyncChain(n) {
|
||||
let sum = 0;
|
||||
for (let i = 0; i < n; i++) {
|
||||
sum += await Promise.resolve(i);
|
||||
}
|
||||
return sum;
|
||||
}
|
||||
|
||||
bench("async/await 1k", () => asyncChain(1000));
|
||||
bench("async/await 10k", () => asyncChain(10000));
|
||||
});
|
||||
|
||||
await run();
|
||||
@@ -6,7 +6,7 @@ option(WEBKIT_LOCAL "If a local version of WebKit should be used instead of down
|
||||
option(WEBKIT_BUILD_TYPE "The build type for local WebKit (defaults to CMAKE_BUILD_TYPE)")
|
||||
|
||||
if(NOT WEBKIT_VERSION)
|
||||
set(WEBKIT_VERSION 8af7958ff0e2a4787569edf64641a1ae7cfe074a)
|
||||
set(WEBKIT_VERSION preview-pr-160-8680a32c)
|
||||
endif()
|
||||
|
||||
# Use preview build URL for Windows ARM64 until the fix is merged to main
|
||||
|
||||
@@ -954,7 +954,6 @@ BUN_DEFINE_HOST_FUNCTION(jsFunctionBunPluginClear, (JSC::JSGlobalObject * global
|
||||
global->onResolvePlugins.namespaces.clear();
|
||||
|
||||
delete global->onLoadPlugins.virtualModules;
|
||||
global->onLoadPlugins.virtualModules = nullptr;
|
||||
|
||||
return JSC::JSValue::encode(JSC::jsUndefined());
|
||||
}
|
||||
|
||||
@@ -1061,9 +1061,7 @@ JSC_DEFINE_HOST_FUNCTION(functionQueueMicrotask,
|
||||
|
||||
auto* globalObject = defaultGlobalObject(lexicalGlobalObject);
|
||||
JSC::JSValue asyncContext = globalObject->m_asyncContextData.get()->getInternalField(0);
|
||||
auto function = globalObject->performMicrotaskFunction();
|
||||
#if ASSERT_ENABLED
|
||||
ASSERT_WITH_MESSAGE(function, "Invalid microtask function");
|
||||
ASSERT_WITH_MESSAGE(!callback.isEmpty(), "Invalid microtask callback");
|
||||
#endif
|
||||
|
||||
@@ -1071,10 +1069,8 @@ JSC_DEFINE_HOST_FUNCTION(functionQueueMicrotask,
|
||||
asyncContext = JSC::jsUndefined();
|
||||
}
|
||||
|
||||
// BunPerformMicrotaskJob accepts a variable number of arguments (up to: performMicrotask, job, asyncContext, arg0, arg1).
|
||||
// The runtime inspects argumentCount to determine which arguments are present, so callers may pass only the subset they need.
|
||||
// Here we pass: function, callback, asyncContext.
|
||||
JSC::QueuedTask task { nullptr, JSC::InternalMicrotask::BunPerformMicrotaskJob, 0, globalObject, function, callback, asyncContext };
|
||||
// BunPerformMicrotaskJob: callback, asyncContext
|
||||
JSC::QueuedTask task { nullptr, JSC::InternalMicrotask::BunPerformMicrotaskJob, 0, globalObject, callback, asyncContext };
|
||||
globalObject->vm().queueMicrotask(WTF::move(task));
|
||||
|
||||
return JSC::JSValue::encode(JSC::jsUndefined());
|
||||
@@ -1554,63 +1550,6 @@ extern "C" napi_env ZigGlobalObject__makeNapiEnvForFFI(Zig::GlobalObject* global
|
||||
return globalObject->makeNapiEnvForFFI();
|
||||
}
|
||||
|
||||
JSC_DEFINE_HOST_FUNCTION(jsFunctionPerformMicrotask, (JSGlobalObject * globalObject, CallFrame* callframe))
|
||||
{
|
||||
auto& vm = JSC::getVM(globalObject);
|
||||
auto scope = DECLARE_TOP_EXCEPTION_SCOPE(vm);
|
||||
|
||||
auto job = callframe->argument(0);
|
||||
if (!job || job.isUndefinedOrNull()) [[unlikely]] {
|
||||
return JSValue::encode(jsUndefined());
|
||||
}
|
||||
|
||||
auto callData = JSC::getCallData(job);
|
||||
MarkedArgumentBuffer arguments;
|
||||
|
||||
if (callData.type == CallData::Type::None) [[unlikely]] {
|
||||
return JSValue::encode(jsUndefined());
|
||||
}
|
||||
|
||||
JSValue result;
|
||||
WTF::NakedPtr<JSC::Exception> exceptionPtr;
|
||||
|
||||
JSValue restoreAsyncContext = {};
|
||||
InternalFieldTuple* asyncContextData = nullptr;
|
||||
auto setAsyncContext = callframe->argument(1);
|
||||
if (!setAsyncContext.isUndefined()) {
|
||||
asyncContextData = globalObject->m_asyncContextData.get();
|
||||
restoreAsyncContext = asyncContextData->getInternalField(0);
|
||||
asyncContextData->putInternalField(vm, 0, setAsyncContext);
|
||||
}
|
||||
|
||||
size_t argCount = callframe->argumentCount();
|
||||
switch (argCount) {
|
||||
case 3: {
|
||||
arguments.append(callframe->uncheckedArgument(2));
|
||||
break;
|
||||
}
|
||||
case 4: {
|
||||
arguments.append(callframe->uncheckedArgument(2));
|
||||
arguments.append(callframe->uncheckedArgument(3));
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
JSC::profiledCall(globalObject, ProfilingReason::API, job, callData, jsUndefined(), arguments, exceptionPtr);
|
||||
|
||||
if (asyncContextData) {
|
||||
asyncContextData->putInternalField(vm, 0, restoreAsyncContext);
|
||||
}
|
||||
|
||||
if (auto* exception = exceptionPtr.get()) {
|
||||
Bun__reportUnhandledError(globalObject, JSValue::encode(exception));
|
||||
}
|
||||
|
||||
return JSValue::encode(jsUndefined());
|
||||
}
|
||||
|
||||
JSC_DEFINE_HOST_FUNCTION(jsFunctionPerformMicrotaskVariadic, (JSGlobalObject * globalObject, CallFrame* callframe))
|
||||
{
|
||||
auto& vm = JSC::getVM(globalObject);
|
||||
@@ -1940,11 +1879,6 @@ void GlobalObject::finishCreation(VM& vm)
|
||||
scope.assertNoExceptionExceptTermination();
|
||||
init.set(subclassStructure);
|
||||
});
|
||||
m_performMicrotaskFunction.initLater(
|
||||
[](const Initializer<JSFunction>& init) {
|
||||
init.set(JSFunction::create(init.vm, init.owner, 4, "performMicrotask"_s, jsFunctionPerformMicrotask, ImplementationVisibility::Public));
|
||||
});
|
||||
|
||||
m_performMicrotaskVariadicFunction.initLater(
|
||||
[](const Initializer<JSFunction>& init) {
|
||||
init.set(JSFunction::create(init.vm, init.owner, 4, "performMicrotaskVariadic"_s, jsFunctionPerformMicrotaskVariadic, ImplementationVisibility::Public));
|
||||
|
||||
@@ -272,7 +272,6 @@ public:
|
||||
|
||||
JSC::JSObject* performanceObject() const { return m_performanceObject.getInitializedOnMainThread(this); }
|
||||
|
||||
JSC::JSFunction* performMicrotaskFunction() const { return m_performMicrotaskFunction.getInitializedOnMainThread(this); }
|
||||
JSC::JSFunction* performMicrotaskVariadicFunction() const { return m_performMicrotaskVariadicFunction.getInitializedOnMainThread(this); }
|
||||
|
||||
JSC::Structure* utilInspectOptionsStructure() const { return m_utilInspectOptionsStructure.getInitializedOnMainThread(this); }
|
||||
@@ -569,7 +568,6 @@ public:
|
||||
V(private, LazyPropertyOfGlobalObject<Structure>, m_jsonlParseResultStructure) \
|
||||
V(private, LazyPropertyOfGlobalObject<Structure>, m_pathParsedObjectStructure) \
|
||||
V(private, LazyPropertyOfGlobalObject<Structure>, m_pendingVirtualModuleResultStructure) \
|
||||
V(private, LazyPropertyOfGlobalObject<JSFunction>, m_performMicrotaskFunction) \
|
||||
V(private, LazyPropertyOfGlobalObject<JSFunction>, m_nativeMicrotaskTrampoline) \
|
||||
V(private, LazyPropertyOfGlobalObject<JSFunction>, m_performMicrotaskVariadicFunction) \
|
||||
V(private, LazyPropertyOfGlobalObject<JSFunction>, m_utilInspectFunction) \
|
||||
|
||||
@@ -3538,13 +3538,11 @@ void JSC__JSPromise__rejectOnNextTickWithHandled(JSC::JSPromise* promise, JSC::J
|
||||
|
||||
promise->internalField(JSC::JSPromise::Field::Flags).set(vm, promise, jsNumber(flags | JSC::JSPromise::isFirstResolvingFunctionCalledFlag));
|
||||
auto* globalObject = jsCast<Zig::GlobalObject*>(promise->globalObject());
|
||||
auto microtaskFunction = globalObject->performMicrotaskFunction();
|
||||
auto rejectPromiseFunction = globalObject->rejectPromiseFunction();
|
||||
|
||||
auto asyncContext = globalObject->m_asyncContextData.get()->getInternalField(0);
|
||||
|
||||
#if ASSERT_ENABLED
|
||||
ASSERT_WITH_MESSAGE(microtaskFunction, "Invalid microtask function");
|
||||
ASSERT_WITH_MESSAGE(rejectPromiseFunction, "Invalid microtask callback");
|
||||
ASSERT_WITH_MESSAGE(!value.isEmpty(), "Invalid microtask value");
|
||||
#endif
|
||||
@@ -3557,7 +3555,8 @@ void JSC__JSPromise__rejectOnNextTickWithHandled(JSC::JSPromise* promise, JSC::J
|
||||
value = jsUndefined();
|
||||
}
|
||||
|
||||
JSC::QueuedTask task { nullptr, JSC::InternalMicrotask::BunPerformMicrotaskJob, 0, globalObject, microtaskFunction, rejectPromiseFunction, globalObject->m_asyncContextData.get()->getInternalField(0), promise, value };
|
||||
// BunPerformMicrotaskJob: rejectPromiseFunction, asyncContext, promise, value
|
||||
JSC::QueuedTask task { nullptr, JSC::InternalMicrotask::BunPerformMicrotaskJob, 0, globalObject, rejectPromiseFunction, globalObject->m_asyncContextData.get()->getInternalField(0), promise, value };
|
||||
globalObject->vm().queueMicrotask(WTF::move(task));
|
||||
RETURN_IF_EXCEPTION(scope, );
|
||||
}
|
||||
@@ -5438,9 +5437,7 @@ extern "C" void JSC__JSGlobalObject__queueMicrotaskJob(JSC::JSGlobalObject* arg0
|
||||
if (microtaskArgs[3].isEmpty()) {
|
||||
microtaskArgs[3] = jsUndefined();
|
||||
}
|
||||
JSC::JSFunction* microTaskFunction = globalObject->performMicrotaskFunction();
|
||||
#if ASSERT_ENABLED
|
||||
ASSERT_WITH_MESSAGE(microTaskFunction, "Invalid microtask function");
|
||||
auto& vm = globalObject->vm();
|
||||
if (microtaskArgs[0].isCell()) {
|
||||
JSC::Integrity::auditCellFully(vm, microtaskArgs[0].asCell());
|
||||
@@ -5460,7 +5457,8 @@ extern "C" void JSC__JSGlobalObject__queueMicrotaskJob(JSC::JSGlobalObject* arg0
|
||||
|
||||
#endif
|
||||
|
||||
JSC::QueuedTask task { nullptr, JSC::InternalMicrotask::BunPerformMicrotaskJob, 0, globalObject, microTaskFunction, WTF::move(microtaskArgs[0]), WTF::move(microtaskArgs[1]), WTF::move(microtaskArgs[2]), WTF::move(microtaskArgs[3]) };
|
||||
// BunPerformMicrotaskJob: job, asyncContext, arg0, arg1
|
||||
JSC::QueuedTask task { nullptr, JSC::InternalMicrotask::BunPerformMicrotaskJob, 0, globalObject, WTF::move(microtaskArgs[0]), WTF::move(microtaskArgs[1]), WTF::move(microtaskArgs[2]), WTF::move(microtaskArgs[3]) };
|
||||
globalObject->vm().queueMicrotask(WTF::move(task));
|
||||
}
|
||||
|
||||
|
||||
@@ -948,7 +948,6 @@ pub const CommandLineReporter = struct {
|
||||
this.printSummary();
|
||||
Output.prettyError("\nBailed out after {d} failure{s}<r>\n", .{ this.jest.bail, if (this.jest.bail == 1) "" else "s" });
|
||||
Output.flush();
|
||||
this.writeJUnitReportIfNeeded();
|
||||
Global.exit(1);
|
||||
}
|
||||
},
|
||||
@@ -971,20 +970,6 @@ pub const CommandLineReporter = struct {
|
||||
Output.printStartEnd(bun.start_time, std.time.nanoTimestamp());
|
||||
}
|
||||
|
||||
/// Writes the JUnit reporter output file if a JUnit reporter is active and
|
||||
/// an outfile path was configured. This must be called before any early exit
|
||||
/// (e.g. bail) so that the report is not lost.
|
||||
pub fn writeJUnitReportIfNeeded(this: *CommandLineReporter) void {
|
||||
if (this.reporters.junit) |junit| {
|
||||
if (this.jest.test_options.reporter_outfile) |outfile| {
|
||||
if (junit.current_file.len > 0) {
|
||||
junit.endTestSuite() catch {};
|
||||
}
|
||||
junit.writeToFile(outfile) catch {};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn generateCodeCoverage(this: *CommandLineReporter, vm: *jsc.VirtualMachine, opts: *TestCommand.CodeCoverageOptions, comptime reporters: TestCommand.Reporters, comptime enable_ansi_colors: bool) !void {
|
||||
if (comptime !reporters.text and !reporters.lcov) {
|
||||
return;
|
||||
@@ -1787,7 +1772,12 @@ pub const TestCommand = struct {
|
||||
Output.prettyError("\n", .{});
|
||||
Output.flush();
|
||||
|
||||
reporter.writeJUnitReportIfNeeded();
|
||||
if (reporter.reporters.junit) |junit| {
|
||||
if (junit.current_file.len > 0) {
|
||||
junit.endTestSuite() catch {};
|
||||
}
|
||||
junit.writeToFile(ctx.test_options.reporter_outfile.?) catch {};
|
||||
}
|
||||
|
||||
if (vm.hot_reload == .watch) {
|
||||
vm.runWithAPILock(jsc.VirtualMachine, vm, runEventLoopForWatch);
|
||||
@@ -1930,7 +1920,6 @@ pub const TestCommand = struct {
|
||||
if (reporter.jest.bail == reporter.summary().fail) {
|
||||
reporter.printSummary();
|
||||
Output.prettyError("\nBailed out after {d} failure{s}<r>\n", .{ reporter.jest.bail, if (reporter.jest.bail == 1) "" else "s" });
|
||||
reporter.writeJUnitReportIfNeeded();
|
||||
|
||||
vm.exit_handler.exit_code = 1;
|
||||
vm.is_shutting_down = true;
|
||||
|
||||
@@ -27,7 +27,6 @@ pub fn NewWebSocketClient(comptime ssl: bool) type {
|
||||
ping_frame_bytes: [128 + 6]u8 = [_]u8{0} ** (128 + 6),
|
||||
ping_len: u8 = 0,
|
||||
ping_received: bool = false,
|
||||
pong_received: bool = false,
|
||||
close_received: bool = false,
|
||||
close_frame_buffering: bool = false,
|
||||
|
||||
@@ -121,7 +120,6 @@ pub fn NewWebSocketClient(comptime ssl: bool) type {
|
||||
this.clearReceiveBuffers(true);
|
||||
this.clearSendBuffers(true);
|
||||
this.ping_received = false;
|
||||
this.pong_received = false;
|
||||
this.ping_len = 0;
|
||||
this.close_frame_buffering = false;
|
||||
this.receive_pending_chunk_len = 0;
|
||||
@@ -652,38 +650,14 @@ pub fn NewWebSocketClient(comptime ssl: bool) type {
|
||||
if (data.len == 0) break;
|
||||
},
|
||||
.pong => {
|
||||
if (!this.pong_received) {
|
||||
if (receive_body_remain > 125) {
|
||||
this.terminate(ErrorCode.invalid_control_frame);
|
||||
terminated = true;
|
||||
break;
|
||||
}
|
||||
this.ping_len = @truncate(receive_body_remain);
|
||||
receive_body_remain = 0;
|
||||
this.pong_received = true;
|
||||
}
|
||||
const pong_len = this.ping_len;
|
||||
const pong_len = @min(data.len, @min(receive_body_remain, this.ping_frame_bytes.len));
|
||||
|
||||
if (data.len > 0) {
|
||||
const total_received = @min(pong_len, receive_body_remain + data.len);
|
||||
const slice = this.ping_frame_bytes[6..][receive_body_remain..total_received];
|
||||
@memcpy(slice, data[0..slice.len]);
|
||||
receive_body_remain = total_received;
|
||||
data = data[slice.len..];
|
||||
}
|
||||
const pending_body = pong_len - receive_body_remain;
|
||||
if (pending_body > 0) {
|
||||
// wait for more data - pong payload is fragmented across TCP segments
|
||||
break;
|
||||
}
|
||||
|
||||
const pong_data = this.ping_frame_bytes[6..][0..pong_len];
|
||||
this.dispatchData(pong_data, .Pong);
|
||||
this.dispatchData(data[0..pong_len], .Pong);
|
||||
|
||||
data = data[pong_len..];
|
||||
receive_state = .need_header;
|
||||
receive_body_remain = 0;
|
||||
receiving_type = last_receive_data_type;
|
||||
this.pong_received = false;
|
||||
|
||||
if (data.len == 0) break;
|
||||
},
|
||||
|
||||
72
src/ini.zig
72
src/ini.zig
@@ -291,32 +291,25 @@ pub const Parser = struct {
|
||||
}
|
||||
},
|
||||
else => {
|
||||
switch (bun.strings.utf8ByteSequenceLength(c)) {
|
||||
0, 1 => try unesc.appendSlice(&[_]u8{ '\\', c }),
|
||||
2 => if (val.len - i >= 2) {
|
||||
try unesc.appendSlice(&[_]u8{ '\\', c, val[i + 1] });
|
||||
i += 1;
|
||||
} else {
|
||||
try unesc.appendSlice(&[_]u8{ '\\', c });
|
||||
try unesc.appendSlice(switch (bun.strings.utf8ByteSequenceLength(c)) {
|
||||
1 => brk: {
|
||||
break :brk &[_]u8{ '\\', c };
|
||||
},
|
||||
3 => if (val.len - i >= 3) {
|
||||
try unesc.appendSlice(&[_]u8{ '\\', c, val[i + 1], val[i + 2] });
|
||||
i += 2;
|
||||
} else {
|
||||
try unesc.append('\\');
|
||||
try unesc.appendSlice(val[i..val.len]);
|
||||
i = val.len - 1;
|
||||
2 => brk: {
|
||||
defer i += 1;
|
||||
break :brk &[_]u8{ '\\', c, val[i + 1] };
|
||||
},
|
||||
4 => if (val.len - i >= 4) {
|
||||
try unesc.appendSlice(&[_]u8{ '\\', c, val[i + 1], val[i + 2], val[i + 3] });
|
||||
i += 3;
|
||||
} else {
|
||||
try unesc.append('\\');
|
||||
try unesc.appendSlice(val[i..val.len]);
|
||||
i = val.len - 1;
|
||||
3 => brk: {
|
||||
defer i += 2;
|
||||
break :brk &[_]u8{ '\\', c, val[i + 1], val[i + 2] };
|
||||
},
|
||||
4 => brk: {
|
||||
defer i += 3;
|
||||
break :brk &[_]u8{ '\\', c, val[i + 1], val[i + 2], val[i + 3] };
|
||||
},
|
||||
// this means invalid utf8
|
||||
else => unreachable,
|
||||
}
|
||||
});
|
||||
},
|
||||
}
|
||||
|
||||
@@ -349,30 +342,25 @@ pub const Parser = struct {
|
||||
try unesc.append('.');
|
||||
}
|
||||
},
|
||||
else => switch (bun.strings.utf8ByteSequenceLength(c)) {
|
||||
0, 1 => try unesc.append(c),
|
||||
2 => if (val.len - i >= 2) {
|
||||
try unesc.appendSlice(&[_]u8{ c, val[i + 1] });
|
||||
i += 1;
|
||||
} else {
|
||||
try unesc.append(c);
|
||||
else => try unesc.appendSlice(switch (bun.strings.utf8ByteSequenceLength(c)) {
|
||||
1 => brk: {
|
||||
break :brk &[_]u8{c};
|
||||
},
|
||||
3 => if (val.len - i >= 3) {
|
||||
try unesc.appendSlice(&[_]u8{ c, val[i + 1], val[i + 2] });
|
||||
i += 2;
|
||||
} else {
|
||||
try unesc.appendSlice(val[i..val.len]);
|
||||
i = val.len - 1;
|
||||
2 => brk: {
|
||||
defer i += 1;
|
||||
break :brk &[_]u8{ c, val[i + 1] };
|
||||
},
|
||||
4 => if (val.len - i >= 4) {
|
||||
try unesc.appendSlice(&[_]u8{ c, val[i + 1], val[i + 2], val[i + 3] });
|
||||
i += 3;
|
||||
} else {
|
||||
try unesc.appendSlice(val[i..val.len]);
|
||||
i = val.len - 1;
|
||||
3 => brk: {
|
||||
defer i += 2;
|
||||
break :brk &[_]u8{ c, val[i + 1], val[i + 2] };
|
||||
},
|
||||
4 => brk: {
|
||||
defer i += 3;
|
||||
break :brk &[_]u8{ c, val[i + 1], val[i + 2], val[i + 3] };
|
||||
},
|
||||
// this means invalid utf8
|
||||
else => unreachable,
|
||||
},
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -725,15 +725,7 @@ pub fn enqueueDependencyWithMainAndSuccessFn(
|
||||
if (version.tag == .npm and version.value.npm.version.isExact()) {
|
||||
if (loaded_manifest.?.findByVersion(version.value.npm.version.head.head.range.left.version)) |find_result| {
|
||||
if (this.options.minimum_release_age_ms) |min_age_ms| {
|
||||
// Only apply minimum-release-age to workspace dependencies
|
||||
// (direct deps of any workspace package.json).
|
||||
// Transitive dependencies should not be filtered, as they are
|
||||
// pinned by their parent package and filtering them would break
|
||||
// the dependency tree (see #27004).
|
||||
if (this.lockfile.isWorkspaceDependency(id) and
|
||||
!loaded_manifest.?.shouldExcludeFromAgeFilter(this.options.minimum_release_age_excludes) and
|
||||
Npm.PackageManifest.isPackageVersionTooRecent(find_result.package, min_age_ms))
|
||||
{
|
||||
if (!loaded_manifest.?.shouldExcludeFromAgeFilter(this.options.minimum_release_age_excludes) and Npm.PackageManifest.isPackageVersionTooRecent(find_result.package, min_age_ms)) {
|
||||
const package_name = this.lockfile.str(&name);
|
||||
const min_age_seconds = min_age_ms / std.time.ms_per_s;
|
||||
this.log.addErrorFmt(null, logger.Loc.Empty, this.allocator, "Version \"{s}@{f}\" was published within minimum release age of {d} seconds", .{ package_name, find_result.version.fmt(this.lockfile.buffers.string_bytes.items), min_age_seconds }) catch {};
|
||||
@@ -1633,19 +1625,9 @@ fn getOrPutResolvedPackage(
|
||||
this.options.minimum_release_age_ms != null,
|
||||
) orelse return null; // manifest might still be downloading. This feels unreliable.
|
||||
|
||||
// Only apply minimum-release-age to workspace dependencies
|
||||
// (direct deps of any workspace package.json).
|
||||
// Transitive dependencies should not be filtered, as they are pinned
|
||||
// by their parent package and filtering them would break the
|
||||
// dependency tree (see #27004).
|
||||
const effective_min_age_ms: ?f64 = if (this.lockfile.isWorkspaceDependency(dependency_id))
|
||||
this.options.minimum_release_age_ms
|
||||
else
|
||||
null;
|
||||
|
||||
const version_result: Npm.PackageManifest.FindVersionResult = switch (version.tag) {
|
||||
.dist_tag => manifest.findByDistTagWithFilter(this.lockfile.str(&version.value.dist_tag.tag), effective_min_age_ms, this.options.minimum_release_age_excludes),
|
||||
.npm => manifest.findBestVersionWithFilter(version.value.npm.version, this.lockfile.buffers.string_bytes.items, effective_min_age_ms, this.options.minimum_release_age_excludes),
|
||||
.dist_tag => manifest.findByDistTagWithFilter(this.lockfile.str(&version.value.dist_tag.tag), this.options.minimum_release_age_ms, this.options.minimum_release_age_excludes),
|
||||
.npm => manifest.findBestVersionWithFilter(version.value.npm.version, this.lockfile.buffers.string_bytes.items, this.options.minimum_release_age_ms, this.options.minimum_release_age_excludes),
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
|
||||
@@ -460,13 +460,13 @@ pub const Archiver = struct {
|
||||
if (comptime Environment.isWindows) {
|
||||
try bun.MakePath.makePath(u16, dir, path);
|
||||
} else {
|
||||
std.posix.mkdiratZ(dir_fd, path, @intCast(mode)) catch |err| {
|
||||
std.posix.mkdiratZ(dir_fd, pathname, @intCast(mode)) catch |err| {
|
||||
// It's possible for some tarballs to return a directory twice, with and
|
||||
// without `./` in the beginning. So if it already exists, continue to the
|
||||
// next entry.
|
||||
if (err == error.PathAlreadyExists or err == error.NotDir) continue;
|
||||
bun.makePath(dir, std.fs.path.dirname(path_slice) orelse return err) catch {};
|
||||
std.posix.mkdiratZ(dir_fd, path, 0o777) catch {};
|
||||
std.posix.mkdiratZ(dir_fd, pathname, 0o777) catch {};
|
||||
};
|
||||
}
|
||||
},
|
||||
|
||||
@@ -221,11 +221,7 @@ pub const S3Credentials = struct {
|
||||
defer str.deref();
|
||||
if (str.tag != .Empty and str.tag != .Dead) {
|
||||
new_credentials._contentDispositionSlice = str.toUTF8(bun.default_allocator);
|
||||
const slice = new_credentials._contentDispositionSlice.?.slice();
|
||||
if (containsNewlineOrCR(slice)) {
|
||||
return globalObject.throwInvalidArguments("contentDisposition must not contain newline characters (CR/LF)", .{});
|
||||
}
|
||||
new_credentials.content_disposition = slice;
|
||||
new_credentials.content_disposition = new_credentials._contentDispositionSlice.?.slice();
|
||||
}
|
||||
} else {
|
||||
return globalObject.throwInvalidArgumentTypeValue("contentDisposition", "string", js_value);
|
||||
@@ -240,11 +236,7 @@ pub const S3Credentials = struct {
|
||||
defer str.deref();
|
||||
if (str.tag != .Empty and str.tag != .Dead) {
|
||||
new_credentials._contentTypeSlice = str.toUTF8(bun.default_allocator);
|
||||
const slice = new_credentials._contentTypeSlice.?.slice();
|
||||
if (containsNewlineOrCR(slice)) {
|
||||
return globalObject.throwInvalidArguments("type must not contain newline characters (CR/LF)", .{});
|
||||
}
|
||||
new_credentials.content_type = slice;
|
||||
new_credentials.content_type = new_credentials._contentTypeSlice.?.slice();
|
||||
}
|
||||
} else {
|
||||
return globalObject.throwInvalidArgumentTypeValue("type", "string", js_value);
|
||||
@@ -259,11 +251,7 @@ pub const S3Credentials = struct {
|
||||
defer str.deref();
|
||||
if (str.tag != .Empty and str.tag != .Dead) {
|
||||
new_credentials._contentEncodingSlice = str.toUTF8(bun.default_allocator);
|
||||
const slice = new_credentials._contentEncodingSlice.?.slice();
|
||||
if (containsNewlineOrCR(slice)) {
|
||||
return globalObject.throwInvalidArguments("contentEncoding must not contain newline characters (CR/LF)", .{});
|
||||
}
|
||||
new_credentials.content_encoding = slice;
|
||||
new_credentials.content_encoding = new_credentials._contentEncodingSlice.?.slice();
|
||||
}
|
||||
} else {
|
||||
return globalObject.throwInvalidArgumentTypeValue("contentEncoding", "string", js_value);
|
||||
@@ -1162,12 +1150,6 @@ const CanonicalRequest = struct {
|
||||
}
|
||||
};
|
||||
|
||||
/// Returns true if the given slice contains any CR (\r) or LF (\n) characters,
|
||||
/// which would allow HTTP header injection if used in a header value.
|
||||
fn containsNewlineOrCR(value: []const u8) bool {
|
||||
return std.mem.indexOfAny(u8, value, "\r\n") != null;
|
||||
}
|
||||
|
||||
const std = @import("std");
|
||||
const ACL = @import("./acl.zig").ACL;
|
||||
const MultiPartUploadOptions = @import("./multipart_options.zig").MultiPartUploadOptions;
|
||||
|
||||
@@ -1154,7 +1154,7 @@ pub const Interpreter = struct {
|
||||
_ = callframe; // autofix
|
||||
|
||||
if (this.setupIOBeforeRun().asErr()) |e| {
|
||||
defer this.#derefRootShellAndIOIfNeeded(true);
|
||||
defer this.#deinitFromExec();
|
||||
const shellerr = bun.shell.ShellErr.newSys(e);
|
||||
return try throwShellErr(&shellerr, .{ .js = globalThis.bunVM().event_loop });
|
||||
}
|
||||
|
||||
@@ -422,19 +422,6 @@ pub fn createInstance(globalObject: *jsc.JSGlobalObject, callframe: *jsc.CallFra
|
||||
break :brk b.allocatedSlice();
|
||||
};
|
||||
|
||||
// Reject null bytes in connection parameters to prevent protocol injection
|
||||
// (null bytes act as field terminators in the MySQL wire protocol).
|
||||
inline for (.{ .{ username, "username" }, .{ password, "password" }, .{ database, "database" }, .{ path, "path" } }) |entry| {
|
||||
if (entry[0].len > 0 and std.mem.indexOfScalar(u8, entry[0], 0) != null) {
|
||||
bun.default_allocator.free(options_buf);
|
||||
tls_config.deinit();
|
||||
if (tls_ctx) |tls| {
|
||||
tls.deinit(true);
|
||||
}
|
||||
return globalObject.throwInvalidArguments(entry[1] ++ " must not contain null bytes", .{});
|
||||
}
|
||||
}
|
||||
|
||||
const on_connect = arguments[9];
|
||||
const on_close = arguments[10];
|
||||
const idle_timeout = arguments[11].toInt32();
|
||||
|
||||
@@ -680,20 +680,6 @@ pub fn call(globalObject: *jsc.JSGlobalObject, callframe: *jsc.CallFrame) bun.JS
|
||||
break :brk b.allocatedSlice();
|
||||
};
|
||||
|
||||
// Reject null bytes in connection parameters to prevent Postgres startup
|
||||
// message parameter injection (null bytes act as field terminators in the
|
||||
// wire protocol's key\0value\0 format).
|
||||
inline for (.{ .{ username, "username" }, .{ password, "password" }, .{ database, "database" }, .{ path, "path" } }) |entry| {
|
||||
if (entry[0].len > 0 and std.mem.indexOfScalar(u8, entry[0], 0) != null) {
|
||||
bun.default_allocator.free(options_buf);
|
||||
tls_config.deinit();
|
||||
if (tls_ctx) |tls| {
|
||||
tls.deinit(true);
|
||||
}
|
||||
return globalObject.throwInvalidArguments(entry[1] ++ " must not contain null bytes", .{});
|
||||
}
|
||||
}
|
||||
|
||||
const on_connect = arguments[9];
|
||||
const on_close = arguments[10];
|
||||
const idle_timeout = arguments[11].toInt32();
|
||||
@@ -1640,10 +1626,7 @@ pub fn on(this: *PostgresSQLConnection, comptime MessageType: @Type(.enum_litera
|
||||
// This will usually start with "v="
|
||||
const comparison_signature = final.data.slice();
|
||||
|
||||
if (comparison_signature.len < 2 or
|
||||
server_signature.len != comparison_signature.len - 2 or
|
||||
BoringSSL.c.CRYPTO_memcmp(server_signature.ptr, comparison_signature[2..].ptr, server_signature.len) != 0)
|
||||
{
|
||||
if (comparison_signature.len < 2 or !bun.strings.eqlLong(server_signature, comparison_signature[2..], true)) {
|
||||
debug("SASLFinal - SASL Server signature mismatch\nExpected: {s}\nActual: {s}", .{ server_signature, comparison_signature[2..] });
|
||||
this.fail("The server did not return the correct signature", error.SASL_SIGNATURE_MISMATCH);
|
||||
} else {
|
||||
|
||||
@@ -260,35 +260,14 @@ devTest("hmr handles rapid consecutive edits", {
|
||||
await Bun.sleep(1);
|
||||
}
|
||||
|
||||
// Wait event-driven for "render 10" to appear. Intermediate renders may
|
||||
// be skipped (watcher coalescing) and the final render may fire multiple
|
||||
// times (duplicate reloads), so we just listen for any occurrence.
|
||||
const finalRender = "render 10";
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
const check = () => {
|
||||
for (const msg of client.messages) {
|
||||
if (typeof msg === "string" && msg.includes("HMR_ERROR")) {
|
||||
cleanup();
|
||||
reject(new Error("Unexpected HMR error message: " + msg));
|
||||
return;
|
||||
}
|
||||
if (msg === finalRender) {
|
||||
cleanup();
|
||||
resolve();
|
||||
return;
|
||||
}
|
||||
}
|
||||
};
|
||||
const cleanup = () => {
|
||||
client.off("message", check);
|
||||
};
|
||||
client.on("message", check);
|
||||
// Check messages already buffered.
|
||||
check();
|
||||
});
|
||||
// Drain all buffered messages — intermediate renders and possible
|
||||
// duplicates of the final render are expected and harmless.
|
||||
client.messages.length = 0;
|
||||
while (true) {
|
||||
const message = await client.getStringMessage();
|
||||
if (message === finalRender) break;
|
||||
if (typeof message === "string" && message.includes("HMR_ERROR")) {
|
||||
throw new Error("Unexpected HMR error message: " + message);
|
||||
}
|
||||
}
|
||||
|
||||
const hmrErrors = await client.js`return globalThis.__hmrErrors ? [...globalThis.__hmrErrors] : [];`;
|
||||
if (hmrErrors.length > 0) {
|
||||
|
||||
@@ -786,69 +786,6 @@ describe("minimum-release-age", () => {
|
||||
return Response.json(packageData);
|
||||
}
|
||||
|
||||
// TEST PACKAGE: parent-package (has exact transitive dep on recent-child-package)
|
||||
if (url.pathname === "/parent-package") {
|
||||
const packageData = {
|
||||
name: "parent-package",
|
||||
"dist-tags": { latest: "1.0.0" },
|
||||
versions: {
|
||||
"1.0.0": {
|
||||
name: "parent-package",
|
||||
version: "1.0.0",
|
||||
dependencies: {
|
||||
"recent-child-package": "1.0.0", // exact pin on a recent version
|
||||
},
|
||||
dist: {
|
||||
tarball: `${mockRegistryUrl}/parent-package/-/parent-package-1.0.0.tgz`,
|
||||
integrity: "sha512-parent1==",
|
||||
},
|
||||
},
|
||||
},
|
||||
time: {
|
||||
"1.0.0": daysAgo(10), // old enough to pass filter
|
||||
},
|
||||
};
|
||||
|
||||
if (req.headers.get("accept")?.includes("application/vnd.npm.install-v1+json")) {
|
||||
return Response.json({
|
||||
name: packageData.name,
|
||||
"dist-tags": packageData["dist-tags"],
|
||||
versions: packageData.versions,
|
||||
});
|
||||
}
|
||||
return Response.json(packageData);
|
||||
}
|
||||
|
||||
// TEST PACKAGE: recent-child-package (transitive dep that is too recent)
|
||||
if (url.pathname === "/recent-child-package") {
|
||||
const packageData = {
|
||||
name: "recent-child-package",
|
||||
"dist-tags": { latest: "1.0.0" },
|
||||
versions: {
|
||||
"1.0.0": {
|
||||
name: "recent-child-package",
|
||||
version: "1.0.0",
|
||||
dist: {
|
||||
tarball: `${mockRegistryUrl}/recent-child-package/-/recent-child-package-1.0.0.tgz`,
|
||||
integrity: "sha512-child1==",
|
||||
},
|
||||
},
|
||||
},
|
||||
time: {
|
||||
"1.0.0": daysAgo(1), // too recent - would fail if filtered
|
||||
},
|
||||
};
|
||||
|
||||
if (req.headers.get("accept")?.includes("application/vnd.npm.install-v1+json")) {
|
||||
return Response.json({
|
||||
name: packageData.name,
|
||||
"dist-tags": packageData["dist-tags"],
|
||||
versions: packageData.versions,
|
||||
});
|
||||
}
|
||||
return Response.json(packageData);
|
||||
}
|
||||
|
||||
// Serve tarballs
|
||||
if (url.pathname.includes(".tgz")) {
|
||||
// Match both regular and scoped package tarballs
|
||||
@@ -1756,38 +1693,6 @@ registry = "${mockRegistryUrl}"`,
|
||||
// Direct dependency should be filtered
|
||||
expect(lockfile).toContain("regular-package@2.1.0");
|
||||
});
|
||||
|
||||
test("exact-pinned transitive dependencies that are too recent should not be filtered (#27004)", async () => {
|
||||
// parent-package@1.0.0 (10 days old, passes filter) depends on
|
||||
// recent-child-package@1.0.0 (1 day old, would fail if filtered).
|
||||
// The child should NOT be filtered because it's a transitive dependency.
|
||||
using dir = tempDir("transitive-exact-pin", {
|
||||
"package.json": JSON.stringify({
|
||||
dependencies: {
|
||||
"parent-package": "*",
|
||||
},
|
||||
}),
|
||||
".npmrc": `registry=${mockRegistryUrl}`,
|
||||
});
|
||||
|
||||
const proc = Bun.spawn({
|
||||
cmd: [bunExe(), "install", "--minimum-release-age", `${5 * SECONDS_PER_DAY}`, "--no-verify"],
|
||||
cwd: String(dir),
|
||||
env: bunEnv,
|
||||
stdout: "pipe",
|
||||
stderr: "pipe",
|
||||
});
|
||||
|
||||
const [stdout, stderr, exitCode] = await Promise.all([proc.stdout.text(), proc.stderr.text(), proc.exited]);
|
||||
|
||||
expect(stderr).not.toContain("minimum release age");
|
||||
expect(exitCode).toBe(0);
|
||||
|
||||
const lockfile = await Bun.file(`${dir}/bun.lock`).text();
|
||||
// Both packages should be installed
|
||||
expect(lockfile).toContain("parent-package");
|
||||
expect(lockfile).toContain("recent-child-package");
|
||||
});
|
||||
});
|
||||
|
||||
describe("special dependencies", () => {
|
||||
|
||||
@@ -611,82 +611,6 @@ describe("Bun.Archive", () => {
|
||||
// Very deep paths might fail on some systems - that's acceptable
|
||||
}
|
||||
});
|
||||
|
||||
test("directory entries with path traversal components cannot escape extraction root", async () => {
|
||||
// Manually craft a tar archive containing directory entries with "../" traversal
|
||||
// components in their pathnames. This tests that the extraction code uses the
|
||||
// normalized path (which strips "..") rather than the raw pathname from the tarball.
|
||||
function createTarHeader(
|
||||
name: string,
|
||||
size: number,
|
||||
type: "0" | "5", // 0=file, 5=directory
|
||||
): Uint8Array {
|
||||
const header = new Uint8Array(512);
|
||||
const enc = new TextEncoder();
|
||||
header.set(enc.encode(name).slice(0, 100), 0);
|
||||
header.set(enc.encode(type === "5" ? "0000755 " : "0000644 "), 100);
|
||||
header.set(enc.encode("0000000 "), 108);
|
||||
header.set(enc.encode("0000000 "), 116);
|
||||
header.set(enc.encode(size.toString(8).padStart(11, "0") + " "), 124);
|
||||
const mtime = Math.floor(Date.now() / 1000)
|
||||
.toString(8)
|
||||
.padStart(11, "0");
|
||||
header.set(enc.encode(mtime + " "), 136);
|
||||
header.set(enc.encode(" "), 148); // checksum placeholder
|
||||
header[156] = type.charCodeAt(0);
|
||||
header.set(enc.encode("ustar"), 257);
|
||||
header[262] = 0;
|
||||
header.set(enc.encode("00"), 263);
|
||||
let checksum = 0;
|
||||
for (let i = 0; i < 512; i++) checksum += header[i];
|
||||
header.set(enc.encode(checksum.toString(8).padStart(6, "0") + "\0 "), 148);
|
||||
return header;
|
||||
}
|
||||
|
||||
const blocks: Uint8Array[] = [];
|
||||
const enc = new TextEncoder();
|
||||
|
||||
// A legitimate directory
|
||||
blocks.push(createTarHeader("safe_dir/", 0, "5"));
|
||||
// A directory entry with traversal: "safe_dir/../../escaped_dir/"
|
||||
// After normalization this becomes "escaped_dir" (safe),
|
||||
// but the raw pathname resolves ".." via the kernel in mkdirat.
|
||||
blocks.push(createTarHeader("safe_dir/../../escaped_dir/", 0, "5"));
|
||||
// A normal file
|
||||
const content = enc.encode("hello");
|
||||
blocks.push(createTarHeader("safe_dir/file.txt", content.length, "0"));
|
||||
blocks.push(content);
|
||||
const pad = 512 - (content.length % 512);
|
||||
if (pad < 512) blocks.push(new Uint8Array(pad));
|
||||
// End-of-archive markers
|
||||
blocks.push(new Uint8Array(1024));
|
||||
|
||||
const totalLen = blocks.reduce((s, b) => s + b.length, 0);
|
||||
const tarball = new Uint8Array(totalLen);
|
||||
let offset = 0;
|
||||
for (const b of blocks) {
|
||||
tarball.set(b, offset);
|
||||
offset += b.length;
|
||||
}
|
||||
|
||||
// Create a parent directory so we can check if "escaped_dir" appears outside extractDir
|
||||
using parentDir = tempDir("archive-traversal-parent", {});
|
||||
const extractPath = join(String(parentDir), "extract");
|
||||
const { mkdirSync, existsSync } = require("fs");
|
||||
mkdirSync(extractPath, { recursive: true });
|
||||
|
||||
const archive = new Bun.Archive(tarball);
|
||||
await archive.extract(extractPath);
|
||||
|
||||
// The "escaped_dir" should NOT exist in the parent directory (outside extraction root)
|
||||
const escapedOutside = join(String(parentDir), "escaped_dir");
|
||||
expect(existsSync(escapedOutside)).toBe(false);
|
||||
|
||||
// The "safe_dir" should exist inside the extraction directory
|
||||
expect(existsSync(join(extractPath, "safe_dir"))).toBe(true);
|
||||
// The normalized "escaped_dir" may or may not exist inside extractPath
|
||||
// (depending on whether normalization keeps it), but it must NOT be outside
|
||||
});
|
||||
});
|
||||
|
||||
describe("Archive.write()", () => {
|
||||
|
||||
@@ -489,61 +489,6 @@ brr = 3
|
||||
"zr": ["deedee"],
|
||||
});
|
||||
});
|
||||
|
||||
describe("truncated/invalid utf-8", () => {
|
||||
test("bare continuation byte (0x80) should not crash", () => {
|
||||
// 0x80 is a continuation byte without a leading byte
|
||||
// utf8ByteSequenceLength returns 0, which must not hit unreachable
|
||||
const ini = Buffer.concat([Buffer.from("key = "), Buffer.from([0x80])]).toString("latin1");
|
||||
// Should not crash - just parse gracefully
|
||||
expect(() => parse(ini)).not.toThrow();
|
||||
});
|
||||
|
||||
test("truncated 2-byte sequence at end of value", () => {
|
||||
// 0xC0 is a 2-byte lead byte, but there's no continuation byte following
|
||||
const ini = Buffer.concat([Buffer.from("key = "), Buffer.from([0xc0])]).toString("latin1");
|
||||
expect(() => parse(ini)).not.toThrow();
|
||||
});
|
||||
|
||||
test("truncated 3-byte sequence at end of value", () => {
|
||||
// 0xE0 is a 3-byte lead byte, but only 0 continuation bytes follow
|
||||
const ini = Buffer.concat([Buffer.from("key = "), Buffer.from([0xe0])]).toString("latin1");
|
||||
expect(() => parse(ini)).not.toThrow();
|
||||
});
|
||||
|
||||
test("truncated 3-byte sequence with 1 continuation byte at end", () => {
|
||||
// 0xE0 is a 3-byte lead byte, but only 1 continuation byte follows
|
||||
const ini = Buffer.concat([Buffer.from("key = "), Buffer.from([0xe0, 0x80])]).toString("latin1");
|
||||
expect(() => parse(ini)).not.toThrow();
|
||||
});
|
||||
|
||||
test("truncated 4-byte sequence at end of value", () => {
|
||||
// 0xF0 is a 4-byte lead byte, but only 0 continuation bytes follow
|
||||
const ini = Buffer.concat([Buffer.from("key = "), Buffer.from([0xf0])]).toString("latin1");
|
||||
expect(() => parse(ini)).not.toThrow();
|
||||
});
|
||||
|
||||
test("truncated 4-byte sequence with 1 continuation byte at end", () => {
|
||||
const ini = Buffer.concat([Buffer.from("key = "), Buffer.from([0xf0, 0x80])]).toString("latin1");
|
||||
expect(() => parse(ini)).not.toThrow();
|
||||
});
|
||||
|
||||
test("truncated 4-byte sequence with 2 continuation bytes at end", () => {
|
||||
const ini = Buffer.concat([Buffer.from("key = "), Buffer.from([0xf0, 0x80, 0x80])]).toString("latin1");
|
||||
expect(() => parse(ini)).not.toThrow();
|
||||
});
|
||||
|
||||
test("truncated 2-byte sequence in escaped context", () => {
|
||||
// Backslash followed by a 2-byte lead byte at end of value
|
||||
const ini = Buffer.concat([Buffer.from("key = \\"), Buffer.from([0xc0])]).toString("latin1");
|
||||
expect(() => parse(ini)).not.toThrow();
|
||||
});
|
||||
|
||||
test("bare continuation byte in escaped context", () => {
|
||||
const ini = Buffer.concat([Buffer.from("key = \\"), Buffer.from([0x80])]).toString("latin1");
|
||||
expect(() => parse(ini)).not.toThrow();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
const wtf = {
|
||||
|
||||
@@ -1,222 +0,0 @@
|
||||
import { TCPSocketListener } from "bun";
|
||||
import { describe, expect, test } from "bun:test";
|
||||
|
||||
const hostname = "127.0.0.1";
|
||||
const MAX_HEADER_SIZE = 16 * 1024;
|
||||
|
||||
function doHandshake(
|
||||
socket: any,
|
||||
handshakeBuffer: Uint8Array,
|
||||
data: Uint8Array,
|
||||
): { buffer: Uint8Array; done: boolean } {
|
||||
const newBuffer = new Uint8Array(handshakeBuffer.length + data.length);
|
||||
newBuffer.set(handshakeBuffer);
|
||||
newBuffer.set(data, handshakeBuffer.length);
|
||||
|
||||
if (newBuffer.length > MAX_HEADER_SIZE) {
|
||||
socket.end();
|
||||
throw new Error("Handshake headers too large");
|
||||
}
|
||||
|
||||
const dataStr = new TextDecoder("utf-8").decode(newBuffer);
|
||||
const endOfHeaders = dataStr.indexOf("\r\n\r\n");
|
||||
if (endOfHeaders === -1) {
|
||||
return { buffer: newBuffer, done: false };
|
||||
}
|
||||
|
||||
if (!dataStr.startsWith("GET")) {
|
||||
throw new Error("Invalid handshake");
|
||||
}
|
||||
|
||||
const magic = /Sec-WebSocket-Key:\s*(.*)\r\n/i.exec(dataStr);
|
||||
if (!magic) {
|
||||
throw new Error("Missing Sec-WebSocket-Key");
|
||||
}
|
||||
|
||||
const hasher = new Bun.CryptoHasher("sha1");
|
||||
hasher.update(magic[1].trim());
|
||||
hasher.update("258EAFA5-E914-47DA-95CA-C5AB0DC85B11");
|
||||
const accept = hasher.digest("base64");
|
||||
|
||||
socket.write(
|
||||
"HTTP/1.1 101 Switching Protocols\r\n" +
|
||||
"Upgrade: websocket\r\n" +
|
||||
"Connection: Upgrade\r\n" +
|
||||
`Sec-WebSocket-Accept: ${accept}\r\n` +
|
||||
"\r\n",
|
||||
);
|
||||
socket.flush();
|
||||
|
||||
return { buffer: newBuffer, done: true };
|
||||
}
|
||||
|
||||
function makeTextFrame(text: string): Uint8Array {
|
||||
const payload = new TextEncoder().encode(text);
|
||||
const len = payload.length;
|
||||
let header: Uint8Array;
|
||||
if (len < 126) {
|
||||
header = new Uint8Array([0x81, len]);
|
||||
} else if (len < 65536) {
|
||||
header = new Uint8Array([0x81, 126, (len >> 8) & 0xff, len & 0xff]);
|
||||
} else {
|
||||
throw new Error("Message too large for this test");
|
||||
}
|
||||
const frame = new Uint8Array(header.length + len);
|
||||
frame.set(header);
|
||||
frame.set(payload, header.length);
|
||||
return frame;
|
||||
}
|
||||
|
||||
describe("WebSocket", () => {
|
||||
test("fragmented pong frame does not cause frame desync", async () => {
|
||||
let server: TCPSocketListener | undefined;
|
||||
let client: WebSocket | undefined;
|
||||
let handshakeBuffer = new Uint8Array(0);
|
||||
let handshakeComplete = false;
|
||||
|
||||
try {
|
||||
const { promise, resolve, reject } = Promise.withResolvers<void>();
|
||||
|
||||
server = Bun.listen({
|
||||
socket: {
|
||||
data(socket, data) {
|
||||
if (handshakeComplete) {
|
||||
// After handshake, we just receive client frames (like close) - ignore them
|
||||
return;
|
||||
}
|
||||
|
||||
const result = doHandshake(socket, handshakeBuffer, new Uint8Array(data));
|
||||
handshakeBuffer = result.buffer;
|
||||
if (!result.done) return;
|
||||
|
||||
handshakeComplete = true;
|
||||
|
||||
// Build a pong frame with a 50-byte payload, but deliver it in two parts.
|
||||
// Pong opcode = 0x8A, FIN=1
|
||||
const pongPayload = new Uint8Array(50);
|
||||
for (let i = 0; i < 50; i++) pongPayload[i] = 0x41 + (i % 26); // 'A'-'Z' repeated
|
||||
const pongFrame = new Uint8Array(2 + 50);
|
||||
pongFrame[0] = 0x8a; // FIN + Pong opcode
|
||||
pongFrame[1] = 50; // payload length
|
||||
pongFrame.set(pongPayload, 2);
|
||||
|
||||
// Part 1 of pong: header (2 bytes) + first 2 bytes of payload = 4 bytes
|
||||
// This leaves 48 bytes of pong payload undelivered.
|
||||
const pongPart1 = pongFrame.slice(0, 4);
|
||||
// Part 2: remaining 48 bytes of pong payload
|
||||
const pongPart2 = pongFrame.slice(4);
|
||||
|
||||
// A text message to send after the pong completes.
|
||||
const textFrame = makeTextFrame("hello after pong");
|
||||
|
||||
// Send part 1 of pong
|
||||
socket.write(pongPart1);
|
||||
socket.flush();
|
||||
|
||||
// After a delay, send part 2 of pong + the follow-up text message
|
||||
setTimeout(() => {
|
||||
// Concatenate part2 + text frame to simulate them arriving together
|
||||
const combined = new Uint8Array(pongPart2.length + textFrame.length);
|
||||
combined.set(pongPart2);
|
||||
combined.set(textFrame, pongPart2.length);
|
||||
socket.write(combined);
|
||||
socket.flush();
|
||||
}, 50);
|
||||
},
|
||||
},
|
||||
hostname,
|
||||
port: 0,
|
||||
});
|
||||
|
||||
const messages: string[] = [];
|
||||
|
||||
client = new WebSocket(`ws://${server.hostname}:${server.port}`);
|
||||
client.addEventListener("error", event => {
|
||||
reject(new Error("WebSocket error"));
|
||||
});
|
||||
client.addEventListener("close", event => {
|
||||
// If the connection closes unexpectedly due to frame desync, the test should fail
|
||||
reject(new Error(`WebSocket closed unexpectedly: code=${event.code} reason=${event.reason}`));
|
||||
});
|
||||
client.addEventListener("message", event => {
|
||||
messages.push(event.data as string);
|
||||
if (messages.length === 1) {
|
||||
// We got the text message after the fragmented pong
|
||||
try {
|
||||
expect(messages[0]).toBe("hello after pong");
|
||||
resolve();
|
||||
} catch (err) {
|
||||
reject(err);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
await promise;
|
||||
} finally {
|
||||
client?.close();
|
||||
server?.stop(true);
|
||||
}
|
||||
});
|
||||
|
||||
test("pong frame with payload > 125 bytes is rejected", async () => {
|
||||
let server: TCPSocketListener | undefined;
|
||||
let client: WebSocket | undefined;
|
||||
let handshakeBuffer = new Uint8Array(0);
|
||||
let handshakeComplete = false;
|
||||
|
||||
try {
|
||||
const { promise, resolve, reject } = Promise.withResolvers<void>();
|
||||
|
||||
server = Bun.listen({
|
||||
socket: {
|
||||
data(socket, data) {
|
||||
if (handshakeComplete) return;
|
||||
|
||||
const result = doHandshake(socket, handshakeBuffer, new Uint8Array(data));
|
||||
handshakeBuffer = result.buffer;
|
||||
if (!result.done) return;
|
||||
|
||||
handshakeComplete = true;
|
||||
|
||||
// Send a pong frame with a 126-byte payload (invalid per RFC 6455 Section 5.5)
|
||||
// Control frames MUST have a payload length of 125 bytes or less.
|
||||
// Use 2-byte extended length encoding since 126 > 125.
|
||||
// But actually, the 7-bit length field in byte[1] can encode 0-125 directly.
|
||||
// For 126, the server must use the extended 16-bit length.
|
||||
// However, control frames with >125 payload are invalid regardless of encoding.
|
||||
const pongFrame = new Uint8Array(4 + 126);
|
||||
pongFrame[0] = 0x8a; // FIN + Pong
|
||||
pongFrame[1] = 126; // Signals 16-bit extended length follows
|
||||
pongFrame[2] = 0; // High byte of length
|
||||
pongFrame[3] = 126; // Low byte of length = 126
|
||||
// Fill payload with arbitrary data
|
||||
for (let i = 0; i < 126; i++) pongFrame[4 + i] = 0x42;
|
||||
|
||||
socket.write(pongFrame);
|
||||
socket.flush();
|
||||
},
|
||||
},
|
||||
hostname,
|
||||
port: 0,
|
||||
});
|
||||
|
||||
client = new WebSocket(`ws://${server.hostname}:${server.port}`);
|
||||
client.addEventListener("error", () => {
|
||||
// Expected - the connection should error due to invalid control frame
|
||||
resolve();
|
||||
});
|
||||
client.addEventListener("close", () => {
|
||||
// Also acceptable - connection closes due to protocol error
|
||||
resolve();
|
||||
});
|
||||
client.addEventListener("message", () => {
|
||||
reject(new Error("Should not receive a message from an invalid pong frame"));
|
||||
});
|
||||
|
||||
await promise;
|
||||
} finally {
|
||||
client?.close();
|
||||
server?.stop(true);
|
||||
}
|
||||
});
|
||||
});
|
||||
@@ -1,77 +0,0 @@
|
||||
import { expect, test } from "bun:test";
|
||||
import { bunEnv, bunExe, tempDir } from "harness";
|
||||
import { join } from "path";
|
||||
|
||||
test("--bail writes JUnit reporter outfile", async () => {
|
||||
using dir = tempDir("bail-junit", {
|
||||
"fail.test.ts": `
|
||||
import { test, expect } from "bun:test";
|
||||
test("failing test", () => { expect(1).toBe(2); });
|
||||
`,
|
||||
});
|
||||
|
||||
const outfile = join(String(dir), "results.xml");
|
||||
|
||||
await using proc = Bun.spawn({
|
||||
cmd: [bunExe(), "test", "--bail", "--reporter=junit", `--reporter-outfile=${outfile}`, "fail.test.ts"],
|
||||
env: bunEnv,
|
||||
cwd: String(dir),
|
||||
stdout: "pipe",
|
||||
stderr: "pipe",
|
||||
});
|
||||
|
||||
const exitCode = await proc.exited;
|
||||
|
||||
// The test should fail and bail
|
||||
expect(exitCode).not.toBe(0);
|
||||
|
||||
// The JUnit report file should still be written despite bail
|
||||
const file = Bun.file(outfile);
|
||||
expect(await file.exists()).toBe(true);
|
||||
|
||||
const xml = await file.text();
|
||||
expect(xml).toContain("<?xml");
|
||||
expect(xml).toContain("<testsuites");
|
||||
expect(xml).toContain("</testsuites>");
|
||||
expect(xml).toContain("failing test");
|
||||
});
|
||||
|
||||
test("--bail writes JUnit reporter outfile with multiple files", async () => {
|
||||
using dir = tempDir("bail-junit-multi", {
|
||||
"a_pass.test.ts": `
|
||||
import { test, expect } from "bun:test";
|
||||
test("passing test", () => { expect(1).toBe(1); });
|
||||
`,
|
||||
"b_fail.test.ts": `
|
||||
import { test, expect } from "bun:test";
|
||||
test("another failing test", () => { expect(1).toBe(2); });
|
||||
`,
|
||||
});
|
||||
|
||||
const outfile = join(String(dir), "results.xml");
|
||||
|
||||
await using proc = Bun.spawn({
|
||||
cmd: [bunExe(), "test", "--bail", "--reporter=junit", `--reporter-outfile=${outfile}`],
|
||||
env: bunEnv,
|
||||
cwd: String(dir),
|
||||
stdout: "pipe",
|
||||
stderr: "pipe",
|
||||
});
|
||||
|
||||
const exitCode = await proc.exited;
|
||||
|
||||
// The test should fail and bail
|
||||
expect(exitCode).not.toBe(0);
|
||||
|
||||
// The JUnit report file should still be written despite bail
|
||||
const file = Bun.file(outfile);
|
||||
expect(await file.exists()).toBe(true);
|
||||
|
||||
const xml = await file.text();
|
||||
expect(xml).toContain("<?xml");
|
||||
expect(xml).toContain("<testsuites");
|
||||
expect(xml).toContain("</testsuites>");
|
||||
// Both the passing and failing tests should be recorded
|
||||
expect(xml).toContain("passing test");
|
||||
expect(xml).toContain("another failing test");
|
||||
});
|
||||
@@ -1,187 +0,0 @@
|
||||
import { SQL } from "bun";
|
||||
import { expect, test } from "bun:test";
|
||||
import net from "net";
|
||||
|
||||
test("postgres connection rejects null bytes in username", async () => {
|
||||
let serverReceivedData = false;
|
||||
|
||||
const server = net.createServer(socket => {
|
||||
serverReceivedData = true;
|
||||
socket.destroy();
|
||||
});
|
||||
|
||||
await new Promise<void>(r => server.listen(0, "127.0.0.1", () => r()));
|
||||
const port = (server.address() as net.AddressInfo).port;
|
||||
|
||||
try {
|
||||
const sql = new SQL({
|
||||
hostname: "127.0.0.1",
|
||||
port,
|
||||
username: "alice\x00search_path\x00evil_schema,public",
|
||||
database: "testdb",
|
||||
max: 1,
|
||||
idleTimeout: 1,
|
||||
connectionTimeout: 2,
|
||||
});
|
||||
|
||||
await sql`SELECT 1`;
|
||||
expect.unreachable();
|
||||
} catch (e: any) {
|
||||
expect(e.message).toContain("null bytes");
|
||||
} finally {
|
||||
server.close();
|
||||
}
|
||||
|
||||
// The server should never have received any data because the null byte
|
||||
// should be rejected before the connection is established.
|
||||
expect(serverReceivedData).toBe(false);
|
||||
});
|
||||
|
||||
test("postgres connection rejects null bytes in database", async () => {
|
||||
let serverReceivedData = false;
|
||||
|
||||
const server = net.createServer(socket => {
|
||||
serverReceivedData = true;
|
||||
socket.destroy();
|
||||
});
|
||||
|
||||
await new Promise<void>(r => server.listen(0, "127.0.0.1", () => r()));
|
||||
const port = (server.address() as net.AddressInfo).port;
|
||||
|
||||
try {
|
||||
const sql = new SQL({
|
||||
hostname: "127.0.0.1",
|
||||
port,
|
||||
username: "alice",
|
||||
database: "testdb\x00search_path\x00evil_schema,public",
|
||||
max: 1,
|
||||
idleTimeout: 1,
|
||||
connectionTimeout: 2,
|
||||
});
|
||||
|
||||
await sql`SELECT 1`;
|
||||
expect.unreachable();
|
||||
} catch (e: any) {
|
||||
expect(e.message).toContain("null bytes");
|
||||
} finally {
|
||||
server.close();
|
||||
}
|
||||
|
||||
expect(serverReceivedData).toBe(false);
|
||||
});
|
||||
|
||||
test("postgres connection rejects null bytes in password", async () => {
|
||||
let serverReceivedData = false;
|
||||
|
||||
const server = net.createServer(socket => {
|
||||
serverReceivedData = true;
|
||||
socket.destroy();
|
||||
});
|
||||
|
||||
await new Promise<void>(r => server.listen(0, "127.0.0.1", () => r()));
|
||||
const port = (server.address() as net.AddressInfo).port;
|
||||
|
||||
try {
|
||||
const sql = new SQL({
|
||||
hostname: "127.0.0.1",
|
||||
port,
|
||||
username: "alice",
|
||||
password: "pass\x00search_path\x00evil_schema",
|
||||
database: "testdb",
|
||||
max: 1,
|
||||
idleTimeout: 1,
|
||||
connectionTimeout: 2,
|
||||
});
|
||||
|
||||
await sql`SELECT 1`;
|
||||
expect.unreachable();
|
||||
} catch (e: any) {
|
||||
expect(e.message).toContain("null bytes");
|
||||
} finally {
|
||||
server.close();
|
||||
}
|
||||
|
||||
expect(serverReceivedData).toBe(false);
|
||||
});
|
||||
|
||||
test("postgres connection does not use truncated path with null bytes", async () => {
|
||||
// The JS layer's fs.existsSync() rejects paths containing null bytes,
|
||||
// so the path is dropped before reaching the native layer. Verify that a
|
||||
// path with null bytes doesn't silently connect via a truncated path.
|
||||
let serverReceivedData = false;
|
||||
|
||||
const server = net.createServer(socket => {
|
||||
serverReceivedData = true;
|
||||
socket.destroy();
|
||||
});
|
||||
|
||||
await new Promise<void>(r => server.listen(0, "127.0.0.1", () => r()));
|
||||
const port = (server.address() as net.AddressInfo).port;
|
||||
|
||||
try {
|
||||
const sql = new SQL({
|
||||
hostname: "127.0.0.1",
|
||||
port,
|
||||
username: "alice",
|
||||
database: "testdb",
|
||||
path: "/tmp\x00injected",
|
||||
max: 1,
|
||||
idleTimeout: 1,
|
||||
connectionTimeout: 2,
|
||||
});
|
||||
|
||||
await sql`SELECT 1`;
|
||||
} catch {
|
||||
// Expected to fail
|
||||
} finally {
|
||||
server.close();
|
||||
}
|
||||
|
||||
// The path had null bytes so it should have been dropped by the JS layer,
|
||||
// falling back to TCP where it hits our mock server (not a truncated Unix socket).
|
||||
expect(serverReceivedData).toBe(true);
|
||||
});
|
||||
|
||||
test("postgres connection works with normal parameters (no null bytes)", async () => {
|
||||
// Verify that normal connections without null bytes still work.
|
||||
// Use a mock server that sends an auth error so we can verify the
|
||||
// startup message is sent correctly.
|
||||
let receivedData = false;
|
||||
|
||||
const server = net.createServer(socket => {
|
||||
socket.once("data", () => {
|
||||
receivedData = true;
|
||||
const errMsg = Buffer.from("SFATAL\0VFATAL\0C28000\0Mauthentication failed\0\0");
|
||||
const len = errMsg.length + 4;
|
||||
const header = Buffer.alloc(5);
|
||||
header.write("E", 0);
|
||||
header.writeInt32BE(len, 1);
|
||||
socket.write(Buffer.concat([header, errMsg]));
|
||||
socket.destroy();
|
||||
});
|
||||
});
|
||||
|
||||
await new Promise<void>(r => server.listen(0, "127.0.0.1", () => r()));
|
||||
const port = (server.address() as net.AddressInfo).port;
|
||||
|
||||
try {
|
||||
const sql = new SQL({
|
||||
hostname: "127.0.0.1",
|
||||
port,
|
||||
username: "alice",
|
||||
database: "testdb",
|
||||
max: 1,
|
||||
idleTimeout: 1,
|
||||
connectionTimeout: 2,
|
||||
});
|
||||
|
||||
await sql`SELECT 1`;
|
||||
} catch {
|
||||
// Expected - mock server sends auth error
|
||||
} finally {
|
||||
server.close();
|
||||
}
|
||||
|
||||
// Normal parameters should connect fine - the server should receive data
|
||||
expect(receivedData).toBe(true);
|
||||
});
|
||||
@@ -1,148 +0,0 @@
|
||||
import { S3Client } from "bun";
|
||||
import { describe, expect, test } from "bun:test";
|
||||
|
||||
// Test that CRLF characters in S3 options are rejected to prevent header injection.
|
||||
// See: HTTP Header Injection via S3 Content-Disposition Value
|
||||
|
||||
describe("S3 header injection prevention", () => {
|
||||
test("contentDisposition with CRLF should throw", () => {
|
||||
using server = Bun.serve({
|
||||
port: 0,
|
||||
fetch() {
|
||||
return new Response("OK", { status: 200 });
|
||||
},
|
||||
});
|
||||
|
||||
const client = new S3Client({
|
||||
accessKeyId: "test-key",
|
||||
secretAccessKey: "test-secret",
|
||||
endpoint: server.url.href,
|
||||
bucket: "test-bucket",
|
||||
});
|
||||
|
||||
expect(() =>
|
||||
client.write("test-file.txt", "Hello", {
|
||||
contentDisposition: 'attachment; filename="evil"\r\nX-Injected: value',
|
||||
}),
|
||||
).toThrow(/CR\/LF/);
|
||||
});
|
||||
|
||||
test("contentEncoding with CRLF should throw", () => {
|
||||
using server = Bun.serve({
|
||||
port: 0,
|
||||
fetch() {
|
||||
return new Response("OK", { status: 200 });
|
||||
},
|
||||
});
|
||||
|
||||
const client = new S3Client({
|
||||
accessKeyId: "test-key",
|
||||
secretAccessKey: "test-secret",
|
||||
endpoint: server.url.href,
|
||||
bucket: "test-bucket",
|
||||
});
|
||||
|
||||
expect(() =>
|
||||
client.write("test-file.txt", "Hello", {
|
||||
contentEncoding: "gzip\r\nX-Injected: value",
|
||||
}),
|
||||
).toThrow(/CR\/LF/);
|
||||
});
|
||||
|
||||
test("type (content-type) with CRLF should throw", () => {
|
||||
using server = Bun.serve({
|
||||
port: 0,
|
||||
fetch() {
|
||||
return new Response("OK", { status: 200 });
|
||||
},
|
||||
});
|
||||
|
||||
const client = new S3Client({
|
||||
accessKeyId: "test-key",
|
||||
secretAccessKey: "test-secret",
|
||||
endpoint: server.url.href,
|
||||
bucket: "test-bucket",
|
||||
});
|
||||
|
||||
expect(() =>
|
||||
client.write("test-file.txt", "Hello", {
|
||||
type: "text/plain\r\nX-Injected: value",
|
||||
}),
|
||||
).toThrow(/CR\/LF/);
|
||||
});
|
||||
|
||||
test("contentDisposition with only CR should throw", () => {
|
||||
using server = Bun.serve({
|
||||
port: 0,
|
||||
fetch() {
|
||||
return new Response("OK", { status: 200 });
|
||||
},
|
||||
});
|
||||
|
||||
const client = new S3Client({
|
||||
accessKeyId: "test-key",
|
||||
secretAccessKey: "test-secret",
|
||||
endpoint: server.url.href,
|
||||
bucket: "test-bucket",
|
||||
});
|
||||
|
||||
expect(() =>
|
||||
client.write("test-file.txt", "Hello", {
|
||||
contentDisposition: "attachment\rinjected",
|
||||
}),
|
||||
).toThrow(/CR\/LF/);
|
||||
});
|
||||
|
||||
test("contentDisposition with only LF should throw", () => {
|
||||
using server = Bun.serve({
|
||||
port: 0,
|
||||
fetch() {
|
||||
return new Response("OK", { status: 200 });
|
||||
},
|
||||
});
|
||||
|
||||
const client = new S3Client({
|
||||
accessKeyId: "test-key",
|
||||
secretAccessKey: "test-secret",
|
||||
endpoint: server.url.href,
|
||||
bucket: "test-bucket",
|
||||
});
|
||||
|
||||
expect(() =>
|
||||
client.write("test-file.txt", "Hello", {
|
||||
contentDisposition: "attachment\ninjected",
|
||||
}),
|
||||
).toThrow(/CR\/LF/);
|
||||
});
|
||||
|
||||
test("valid contentDisposition without CRLF should not throw", async () => {
|
||||
const { promise: requestReceived, resolve: onRequestReceived } = Promise.withResolvers<Headers>();
|
||||
|
||||
using server = Bun.serve({
|
||||
port: 0,
|
||||
async fetch(req) {
|
||||
onRequestReceived(req.headers);
|
||||
return new Response("OK", { status: 200 });
|
||||
},
|
||||
});
|
||||
|
||||
const client = new S3Client({
|
||||
accessKeyId: "test-key",
|
||||
secretAccessKey: "test-secret",
|
||||
endpoint: server.url.href,
|
||||
bucket: "test-bucket",
|
||||
});
|
||||
|
||||
// Valid content-disposition values should not throw synchronously.
|
||||
// The write may eventually fail because the mock server doesn't speak S3 protocol,
|
||||
// but the option parsing should succeed and a request should be initiated.
|
||||
expect(() =>
|
||||
client.write("test-file.txt", "Hello", {
|
||||
contentDisposition: 'attachment; filename="report.pdf"',
|
||||
}),
|
||||
).not.toThrow();
|
||||
|
||||
const receivedHeaders = await requestReceived;
|
||||
expect(receivedHeaders.get("content-disposition")).toBe('attachment; filename="report.pdf"');
|
||||
});
|
||||
});
|
||||
Reference in New Issue
Block a user