mirror of
https://github.com/oven-sh/bun
synced 2026-02-06 17:08:51 +00:00
Compare commits
63 Commits
claude/imp
...
claude/com
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a428c88b88 | ||
|
|
c5a067fecf | ||
|
|
00ce0847d9 | ||
|
|
07631e0742 | ||
|
|
155f0a8bbb | ||
|
|
61bd3e67da | ||
|
|
8a64dd86c6 | ||
|
|
a9a56e6230 | ||
|
|
477aa56aa4 | ||
|
|
6c837f1b87 | ||
|
|
bf21748803 | ||
|
|
4e79fe2a52 | ||
|
|
3dedf03182 | ||
|
|
f468a8b58a | ||
|
|
61c9b828aa | ||
|
|
ae3bb1af33 | ||
|
|
1b406f64dd | ||
|
|
e5d5f9e159 | ||
|
|
b726162564 | ||
|
|
060c71994a | ||
|
|
05a03500eb | ||
|
|
06fcaad1ee | ||
|
|
1a5660ba39 | ||
|
|
8eb6b933b6 | ||
|
|
2cc90a7615 | ||
|
|
7d7c3daccf | ||
|
|
e557195dde | ||
|
|
32d5797866 | ||
|
|
bca76bf378 | ||
|
|
75fde6ce45 | ||
|
|
6d789e2eb7 | ||
|
|
09099b3747 | ||
|
|
1c0ae14239 | ||
|
|
ec1fc80111 | ||
|
|
054de551c2 | ||
|
|
38a798baa8 | ||
|
|
f763180b95 | ||
|
|
08f3270203 | ||
|
|
c4996b2d99 | ||
|
|
fa0febc520 | ||
|
|
f1a52633a3 | ||
|
|
b778121cf1 | ||
|
|
3e84a2964d | ||
|
|
7753820bd6 | ||
|
|
b79f0d1e39 | ||
|
|
c5fbe102e2 | ||
|
|
7c30edd20e | ||
|
|
60d4f92491 | ||
|
|
3d053712d4 | ||
|
|
173309ba95 | ||
|
|
b3d46fa99a | ||
|
|
edeca4602d | ||
|
|
f424308a31 | ||
|
|
07ca61f814 | ||
|
|
becf7777ef | ||
|
|
413dd86bf8 | ||
|
|
7eecefecf0 | ||
|
|
b809cd297e | ||
|
|
6f125e0375 | ||
|
|
edec93a475 | ||
|
|
3be879682e | ||
|
|
8bf8b8e32f | ||
|
|
4654a8a886 |
133
scripts/ci/docker-prepull.sh
Executable file
133
scripts/ci/docker-prepull.sh
Executable file
@@ -0,0 +1,133 @@
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
# Docker image prepull and build script for CI
|
||||
# This script ensures all required Docker images are available locally
|
||||
# to avoid network pulls during test execution
|
||||
|
||||
echo "🐳 Docker image preparation starting..."
|
||||
|
||||
# Function to check if image exists
|
||||
image_exists() {
|
||||
docker image inspect "$1" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
# Function to pull image if not exists
|
||||
pull_if_missing() {
|
||||
local image="$1"
|
||||
if image_exists "$image"; then
|
||||
echo "✓ Image $image already exists"
|
||||
else
|
||||
echo "⬇️ Pulling $image..."
|
||||
docker pull "$image"
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to build local image
|
||||
build_local_image() {
|
||||
local tag="$1"
|
||||
local context="$2"
|
||||
local dockerfile="${3:-Dockerfile}"
|
||||
|
||||
if image_exists "$tag"; then
|
||||
echo "✓ Local image $tag already exists"
|
||||
else
|
||||
echo "🔨 Building $tag from $context..."
|
||||
docker build -t "$tag" -f "$context/$dockerfile" "$context"
|
||||
fi
|
||||
}
|
||||
|
||||
# Ensure Docker is available
|
||||
if ! command -v docker &> /dev/null; then
|
||||
echo "❌ Docker is not installed or not in PATH"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check Docker daemon is running
|
||||
if ! docker info >/dev/null 2>&1; then
|
||||
echo "❌ Docker daemon is not running"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check Docker Compose v2 is available
|
||||
if ! docker compose version >/dev/null 2>&1; then
|
||||
echo "❌ Docker Compose v2 is not available"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "📦 Pulling base images..."
|
||||
|
||||
# Pull PostgreSQL
|
||||
pull_if_missing "postgres:15"
|
||||
|
||||
# Pull MySQL
|
||||
pull_if_missing "mysql:8.4"
|
||||
|
||||
# Pull Redis
|
||||
pull_if_missing "redis:7-alpine"
|
||||
|
||||
# Pull MinIO
|
||||
pull_if_missing "minio/minio:latest"
|
||||
|
||||
# Pull Autobahn WebSocket test suite
|
||||
pull_if_missing "crossbario/autobahn-testsuite"
|
||||
|
||||
echo "🔨 Building local images..."
|
||||
|
||||
# Build MySQL TLS image
|
||||
build_local_image "bun-mysql-tls:local" "test/js/sql/mysql-tls"
|
||||
|
||||
# Build Redis unified image
|
||||
build_local_image "bun-redis-unified:local" "test/js/valkey/docker-unified"
|
||||
|
||||
echo "✅ Validating docker-compose configuration..."
|
||||
|
||||
# Validate compose file if it exists
|
||||
COMPOSE_FILE="${BUN_DOCKER_COMPOSE_FILE:-test/docker/docker-compose.yml}"
|
||||
if [[ -f "$COMPOSE_FILE" ]]; then
|
||||
if docker compose -f "$COMPOSE_FILE" config >/dev/null 2>&1; then
|
||||
echo "✓ Docker Compose configuration is valid"
|
||||
else
|
||||
echo "⚠️ Docker Compose configuration validation failed"
|
||||
docker compose -f "$COMPOSE_FILE" config
|
||||
fi
|
||||
else
|
||||
echo "⚠️ Compose file not found at $COMPOSE_FILE"
|
||||
fi
|
||||
|
||||
# Optional: Save images to cache (useful for ephemeral CI instances)
|
||||
if [[ "${BUN_DOCKER_SAVE_CACHE:-0}" == "1" ]]; then
|
||||
CACHE_FILE="/var/cache/bun-docker-images.tar"
|
||||
echo "💾 Saving images to cache at $CACHE_FILE..."
|
||||
|
||||
docker save \
|
||||
postgres:15 \
|
||||
mysql:8.4 \
|
||||
redis:7-alpine \
|
||||
minio/minio:latest \
|
||||
crossbario/autobahn-testsuite \
|
||||
bun-mysql-tls:local \
|
||||
bun-redis-unified:local \
|
||||
-o "$CACHE_FILE"
|
||||
|
||||
echo "✓ Images saved to cache"
|
||||
fi
|
||||
|
||||
# Optional: Load images from cache
|
||||
if [[ "${BUN_DOCKER_LOAD_CACHE:-0}" == "1" ]]; then
|
||||
CACHE_FILE="/var/cache/bun-docker-images.tar"
|
||||
if [[ -f "$CACHE_FILE" ]]; then
|
||||
echo "💾 Loading images from cache at $CACHE_FILE..."
|
||||
docker load -i "$CACHE_FILE"
|
||||
echo "✓ Images loaded from cache"
|
||||
else
|
||||
echo "⚠️ Cache file not found at $CACHE_FILE"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "🎉 Docker image preparation complete!"
|
||||
|
||||
# List all images for verification
|
||||
echo ""
|
||||
echo "📋 Available images:"
|
||||
docker images --format "table {{.Repository}}:{{.Tag}}\t{{.Size}}" | grep -E "(postgres|mysql|redis|minio|autobahn|bun-)" || true
|
||||
@@ -627,8 +627,8 @@ pub fn NewServer(protocol_enum: enum { http, https }, development_kind: enum { d
|
||||
}
|
||||
|
||||
pub fn jsValueAssertAlive(server: *ThisServer) jsc.JSValue {
|
||||
// With JSRef, we can safely access the JS value even after stop() via weak reference
|
||||
return server.js_value.get();
|
||||
bun.assert(server.js_value.isNotEmpty());
|
||||
return server.js_value.tryGet().?;
|
||||
}
|
||||
|
||||
pub fn requestIP(this: *ThisServer, request: *jsc.WebCore.Request) bun.JSError!jsc.JSValue {
|
||||
@@ -1124,7 +1124,7 @@ pub fn NewServer(protocol_enum: enum { http, https }, development_kind: enum { d
|
||||
|
||||
this.onReloadFromZig(&new_config, globalThis);
|
||||
|
||||
return this.js_value.get();
|
||||
return this.js_value.tryGet() orelse .js_undefined;
|
||||
}
|
||||
|
||||
pub fn onFetch(this: *ThisServer, ctx: *jsc.JSGlobalObject, callframe: *jsc.CallFrame) bun.JSError!jsc.JSValue {
|
||||
@@ -1539,8 +1539,7 @@ pub fn NewServer(protocol_enum: enum { http, https }, development_kind: enum { d
|
||||
}
|
||||
|
||||
pub fn stop(this: *ThisServer, abrupt: bool) void {
|
||||
const current_value = this.js_value.get();
|
||||
this.js_value.setWeak(current_value);
|
||||
this.js_value.downgrade();
|
||||
|
||||
if (this.config.allow_hot and this.config.id.len > 0) {
|
||||
if (this.globalThis.bunVM().hotMap()) |hot| {
|
||||
|
||||
@@ -1981,7 +1981,7 @@ pub fn NewRequestContext(comptime ssl_enabled: bool, comptime debug_mode: bool,
|
||||
this.flags.has_called_error_handler = true;
|
||||
const result = server.config.onError.call(
|
||||
server.globalThis,
|
||||
server.js_value.get(),
|
||||
server.js_value.tryGet() orelse .js_undefined,
|
||||
&.{value},
|
||||
) catch |err| server.globalThis.takeException(err);
|
||||
defer result.ensureStillAlive();
|
||||
|
||||
@@ -9,7 +9,7 @@ for (const type of types) {
|
||||
construct: true,
|
||||
finalize: true,
|
||||
configurable: false,
|
||||
hasPendingActivity: true,
|
||||
hasPendingActivity: type === "PostgresSQL",
|
||||
klass: {
|
||||
// escapeString: {
|
||||
// fn: "escapeString",
|
||||
@@ -60,7 +60,6 @@ for (const type of types) {
|
||||
construct: true,
|
||||
finalize: true,
|
||||
configurable: false,
|
||||
|
||||
JSType: "0b11101110",
|
||||
klass: {},
|
||||
proto: {
|
||||
|
||||
@@ -8,6 +8,7 @@ pub const JSRef = union(enum) {
|
||||
}
|
||||
|
||||
pub fn initStrong(value: jsc.JSValue, globalThis: *jsc.JSGlobalObject) @This() {
|
||||
bun.assert(value != .zero);
|
||||
return .{ .strong = .create(value, globalThis) };
|
||||
}
|
||||
|
||||
@@ -15,15 +16,7 @@ pub const JSRef = union(enum) {
|
||||
return .{ .weak = .zero };
|
||||
}
|
||||
|
||||
pub fn get(this: *@This()) jsc.JSValue {
|
||||
return switch (this.*) {
|
||||
.weak => this.weak,
|
||||
.strong => this.strong.get() orelse .zero,
|
||||
.finalized => .zero,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn tryGet(this: *@This()) ?jsc.JSValue {
|
||||
pub fn tryGet(this: *const @This()) ?jsc.JSValue {
|
||||
return switch (this.*) {
|
||||
.weak => if (this.weak != .zero) this.weak else null,
|
||||
.strong => this.strong.get(),
|
||||
@@ -44,6 +37,7 @@ pub const JSRef = union(enum) {
|
||||
}
|
||||
|
||||
pub fn setStrong(this: *@This(), value: jsc.JSValue, globalThis: *jsc.JSGlobalObject) void {
|
||||
bun.assert(value != .zero);
|
||||
if (this.* == .strong) {
|
||||
this.strong.set(globalThis, value);
|
||||
return;
|
||||
@@ -64,6 +58,37 @@ pub const JSRef = union(enum) {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn downgrade(this: *@This()) void {
|
||||
switch (this.*) {
|
||||
.weak => {},
|
||||
.strong => |*strong| {
|
||||
const value = strong.get() orelse .zero;
|
||||
value.ensureStillAlive();
|
||||
strong.deinit();
|
||||
this.* = .{ .weak = value };
|
||||
},
|
||||
.finalized => {
|
||||
bun.debugAssert(false);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn isEmpty(this: *const @This()) bool {
|
||||
return switch (this.*) {
|
||||
.weak => this.weak == .zero,
|
||||
.strong => !this.strong.has(),
|
||||
.finalized => true,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn isNotEmpty(this: *const @This()) bool {
|
||||
return switch (this.*) {
|
||||
.weak => this.weak != .zero,
|
||||
.strong => this.strong.has(),
|
||||
.finalized => false,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(this: *@This()) void {
|
||||
switch (this.*) {
|
||||
.weak => {
|
||||
|
||||
@@ -1548,10 +1548,20 @@ JSC_DEFINE_HOST_FUNCTION(functionQueueMicrotask,
|
||||
|
||||
auto* globalObject = defaultGlobalObject(lexicalGlobalObject);
|
||||
JSC::JSValue asyncContext = globalObject->m_asyncContextData.get()->getInternalField(0);
|
||||
auto function = globalObject->performMicrotaskFunction();
|
||||
#if ASSERT_ENABLED
|
||||
ASSERT_WITH_MESSAGE(function, "Invalid microtask function");
|
||||
ASSERT_WITH_MESSAGE(!asyncContext.isEmpty(), "Invalid microtask context");
|
||||
ASSERT_WITH_MESSAGE(!callback.isEmpty(), "Invalid microtask callback");
|
||||
#endif
|
||||
|
||||
if (asyncContext.isEmpty()) {
|
||||
asyncContext = JSC::jsUndefined();
|
||||
}
|
||||
|
||||
// This is a JSC builtin function
|
||||
lexicalGlobalObject->queueMicrotask(globalObject->performMicrotaskFunction(), callback, asyncContext,
|
||||
JSC::JSValue {}, JSC::JSValue {});
|
||||
lexicalGlobalObject->queueMicrotask(function, callback, asyncContext,
|
||||
JSC::jsUndefined(), JSC::jsUndefined());
|
||||
|
||||
return JSC::JSValue::encode(JSC::jsUndefined());
|
||||
}
|
||||
@@ -4147,6 +4157,12 @@ extern "C" void JSC__JSGlobalObject__queueMicrotaskCallback(Zig::GlobalObject* g
|
||||
{
|
||||
JSFunction* function = globalObject->nativeMicrotaskTrampoline();
|
||||
|
||||
#if ASSERT_ENABLED
|
||||
ASSERT_WITH_MESSAGE(function, "Invalid microtask function");
|
||||
ASSERT_WITH_MESSAGE(ptr, "Invalid microtask context");
|
||||
ASSERT_WITH_MESSAGE(callback, "Invalid microtask callback");
|
||||
#endif
|
||||
|
||||
// Do not use JSCell* here because the GC will try to visit it.
|
||||
globalObject->queueMicrotask(function, JSValue(std::bit_cast<double>(reinterpret_cast<uintptr_t>(ptr))), JSValue(std::bit_cast<double>(reinterpret_cast<uintptr_t>(callback))), jsUndefined(), jsUndefined());
|
||||
}
|
||||
|
||||
@@ -3453,6 +3453,7 @@ void JSC__JSPromise__rejectOnNextTickWithHandled(JSC::JSPromise* promise, JSC::J
|
||||
JSC::EncodedJSValue encoedValue, bool handled)
|
||||
{
|
||||
JSC::JSValue value = JSC::JSValue::decode(encoedValue);
|
||||
|
||||
auto& vm = JSC::getVM(lexicalGlobalObject);
|
||||
auto scope = DECLARE_THROW_SCOPE(vm);
|
||||
uint32_t flags = promise->internalField(JSC::JSPromise::Field::Flags).get().asUInt32();
|
||||
@@ -3463,10 +3464,29 @@ void JSC__JSPromise__rejectOnNextTickWithHandled(JSC::JSPromise* promise, JSC::J
|
||||
|
||||
promise->internalField(JSC::JSPromise::Field::Flags).set(vm, promise, jsNumber(flags | JSC::JSPromise::isFirstResolvingFunctionCalledFlag));
|
||||
auto* globalObject = jsCast<Zig::GlobalObject*>(promise->globalObject());
|
||||
auto microtaskFunction = globalObject->performMicrotaskFunction();
|
||||
auto rejectPromiseFunction = globalObject->rejectPromiseFunction();
|
||||
|
||||
auto asyncContext = globalObject->m_asyncContextData.get()->getInternalField(0);
|
||||
|
||||
#if ASSERT_ENABLED
|
||||
ASSERT_WITH_MESSAGE(microtaskFunction, "Invalid microtask function");
|
||||
ASSERT_WITH_MESSAGE(!asyncContext.isEmpty(), "Invalid microtask context");
|
||||
ASSERT_WITH_MESSAGE(rejectPromiseFunction, "Invalid microtask callback");
|
||||
ASSERT_WITH_MESSAGE(!value.isEmpty(), "Invalid microtask value");
|
||||
#endif
|
||||
|
||||
if (asyncContext.isEmpty()) {
|
||||
asyncContext = jsUndefined();
|
||||
}
|
||||
|
||||
if (value.isEmpty()) {
|
||||
value = jsUndefined();
|
||||
}
|
||||
|
||||
globalObject->queueMicrotask(
|
||||
globalObject->performMicrotaskFunction(),
|
||||
globalObject->rejectPromiseFunction(),
|
||||
microtaskFunction,
|
||||
rejectPromiseFunction,
|
||||
globalObject->m_asyncContextData.get()->getInternalField(0),
|
||||
promise,
|
||||
value);
|
||||
@@ -6121,8 +6141,9 @@ extern "C" void JSC__JSGlobalObject__queueMicrotaskJob(JSC::JSGlobalObject* arg0
|
||||
if (microtaskArgs[3].isEmpty()) {
|
||||
microtaskArgs[3] = jsUndefined();
|
||||
}
|
||||
|
||||
auto microTaskFunction = globalObject->performMicrotaskFunction();
|
||||
#if ASSERT_ENABLED
|
||||
ASSERT_WITH_MESSAGE(microTaskFunction, "Invalid microtask function");
|
||||
auto& vm = globalObject->vm();
|
||||
if (microtaskArgs[0].isCell()) {
|
||||
JSC::Integrity::auditCellFully(vm, microtaskArgs[0].asCell());
|
||||
@@ -6139,10 +6160,11 @@ extern "C" void JSC__JSGlobalObject__queueMicrotaskJob(JSC::JSGlobalObject* arg0
|
||||
if (microtaskArgs[3].isCell()) {
|
||||
JSC::Integrity::auditCellFully(vm, microtaskArgs[3].asCell());
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
globalObject->queueMicrotask(
|
||||
globalObject->performMicrotaskFunction(),
|
||||
microTaskFunction,
|
||||
WTFMove(microtaskArgs[0]),
|
||||
WTFMove(microtaskArgs[1]),
|
||||
WTFMove(microtaskArgs[2]),
|
||||
|
||||
@@ -98,8 +98,8 @@ for (let i = 0; i < nativeStartIndex; i++) {
|
||||
|
||||
// TODO: there is no reason this cannot be converted automatically.
|
||||
// import { ... } from '...' -> `const { ... } = require('...')`
|
||||
const scannedImports = t.scanImports(input);
|
||||
for (const imp of scannedImports) {
|
||||
const scannedImports = t.scan(input);
|
||||
for (const imp of scannedImports.imports) {
|
||||
if (imp.kind === "import-statement") {
|
||||
var isBuiltin = true;
|
||||
try {
|
||||
@@ -120,6 +120,14 @@ for (let i = 0; i < nativeStartIndex; i++) {
|
||||
}
|
||||
}
|
||||
|
||||
if (scannedImports.exports.includes("default") && scannedImports.exports.length > 1) {
|
||||
const err = new Error(
|
||||
`Using \`export default\` AND named exports together in builtin modules is unsupported. See src/js/README.md (from ${moduleList[i]})`,
|
||||
);
|
||||
err.name = "BunError";
|
||||
err.fileName = moduleList[i];
|
||||
throw err;
|
||||
}
|
||||
let importStatements: string[] = [];
|
||||
|
||||
const processed = sliceSourceCode(
|
||||
|
||||
@@ -291,7 +291,7 @@ const SQL: typeof Bun.SQL = function SQL(
|
||||
|
||||
reserved_sql.connect = () => {
|
||||
if (state.connectionState & ReservedConnectionState.closed) {
|
||||
return Promise.$reject(this.connectionClosedError());
|
||||
return Promise.$reject(pool.connectionClosedError());
|
||||
}
|
||||
return Promise.$resolve(reserved_sql);
|
||||
};
|
||||
@@ -322,7 +322,7 @@ const SQL: typeof Bun.SQL = function SQL(
|
||||
reserved_sql.beginDistributed = (name: string, fn: TransactionCallback) => {
|
||||
// begin is allowed the difference is that we need to make sure to use the same connection and never release it
|
||||
if (state.connectionState & ReservedConnectionState.closed) {
|
||||
return Promise.$reject(this.connectionClosedError());
|
||||
return Promise.$reject(pool.connectionClosedError());
|
||||
}
|
||||
let callback = fn;
|
||||
|
||||
@@ -346,7 +346,7 @@ const SQL: typeof Bun.SQL = function SQL(
|
||||
state.connectionState & ReservedConnectionState.closed ||
|
||||
!(state.connectionState & ReservedConnectionState.acceptQueries)
|
||||
) {
|
||||
return Promise.$reject(this.connectionClosedError());
|
||||
return Promise.$reject(pool.connectionClosedError());
|
||||
}
|
||||
let callback = fn;
|
||||
let options: string | undefined = options_or_fn as unknown as string;
|
||||
@@ -369,7 +369,7 @@ const SQL: typeof Bun.SQL = function SQL(
|
||||
|
||||
reserved_sql.flush = () => {
|
||||
if (state.connectionState & ReservedConnectionState.closed) {
|
||||
throw this.connectionClosedError();
|
||||
throw pool.connectionClosedError();
|
||||
}
|
||||
// Use pooled connection's flush if available, otherwise use adapter's flush
|
||||
if (pooledConnection.flush) {
|
||||
@@ -429,7 +429,7 @@ const SQL: typeof Bun.SQL = function SQL(
|
||||
state.connectionState & ReservedConnectionState.closed ||
|
||||
!(state.connectionState & ReservedConnectionState.acceptQueries)
|
||||
) {
|
||||
return Promise.$reject(this.connectionClosedError());
|
||||
return Promise.$reject(pool.connectionClosedError());
|
||||
}
|
||||
// just release the connection back to the pool
|
||||
state.connectionState |= ReservedConnectionState.closed;
|
||||
@@ -552,7 +552,7 @@ const SQL: typeof Bun.SQL = function SQL(
|
||||
|
||||
function run_internal_transaction_sql(string) {
|
||||
if (state.connectionState & ReservedConnectionState.closed) {
|
||||
return Promise.$reject(this.connectionClosedError());
|
||||
return Promise.$reject(pool.connectionClosedError());
|
||||
}
|
||||
return unsafeQueryFromTransaction(string, [], pooledConnection, state.queries);
|
||||
}
|
||||
@@ -564,7 +564,7 @@ const SQL: typeof Bun.SQL = function SQL(
|
||||
state.connectionState & ReservedConnectionState.closed ||
|
||||
!(state.connectionState & ReservedConnectionState.acceptQueries)
|
||||
) {
|
||||
return Promise.$reject(this.connectionClosedError());
|
||||
return Promise.$reject(pool.connectionClosedError());
|
||||
}
|
||||
if ($isArray(strings)) {
|
||||
// detect if is tagged template
|
||||
@@ -593,7 +593,7 @@ const SQL: typeof Bun.SQL = function SQL(
|
||||
|
||||
transaction_sql.connect = () => {
|
||||
if (state.connectionState & ReservedConnectionState.closed) {
|
||||
return Promise.$reject(this.connectionClosedError());
|
||||
return Promise.$reject(pool.connectionClosedError());
|
||||
}
|
||||
|
||||
return Promise.$resolve(transaction_sql);
|
||||
@@ -732,7 +732,7 @@ const SQL: typeof Bun.SQL = function SQL(
|
||||
state.connectionState & ReservedConnectionState.closed ||
|
||||
!(state.connectionState & ReservedConnectionState.acceptQueries)
|
||||
) {
|
||||
throw this.connectionClosedError();
|
||||
throw pool.connectionClosedError();
|
||||
}
|
||||
|
||||
if ($isCallable(name)) {
|
||||
@@ -816,7 +816,7 @@ const SQL: typeof Bun.SQL = function SQL(
|
||||
|
||||
sql.reserve = () => {
|
||||
if (pool.closed) {
|
||||
return Promise.$reject(this.connectionClosedError());
|
||||
return Promise.$reject(pool.connectionClosedError());
|
||||
}
|
||||
|
||||
// Check if adapter supports reserved connections
|
||||
@@ -831,7 +831,7 @@ const SQL: typeof Bun.SQL = function SQL(
|
||||
};
|
||||
sql.rollbackDistributed = async function (name: string) {
|
||||
if (pool.closed) {
|
||||
throw this.connectionClosedError();
|
||||
throw pool.connectionClosedError();
|
||||
}
|
||||
|
||||
if (!pool.getRollbackDistributedSQL) {
|
||||
@@ -844,7 +844,7 @@ const SQL: typeof Bun.SQL = function SQL(
|
||||
|
||||
sql.commitDistributed = async function (name: string) {
|
||||
if (pool.closed) {
|
||||
throw this.connectionClosedError();
|
||||
throw pool.connectionClosedError();
|
||||
}
|
||||
|
||||
if (!pool.getCommitDistributedSQL) {
|
||||
@@ -857,7 +857,7 @@ const SQL: typeof Bun.SQL = function SQL(
|
||||
|
||||
sql.beginDistributed = (name: string, fn: TransactionCallback) => {
|
||||
if (pool.closed) {
|
||||
return Promise.$reject(this.connectionClosedError());
|
||||
return Promise.$reject(pool.connectionClosedError());
|
||||
}
|
||||
let callback = fn;
|
||||
|
||||
@@ -876,7 +876,7 @@ const SQL: typeof Bun.SQL = function SQL(
|
||||
|
||||
sql.begin = (options_or_fn: string | TransactionCallback, fn?: TransactionCallback) => {
|
||||
if (pool.closed) {
|
||||
return Promise.$reject(this.connectionClosedError());
|
||||
return Promise.$reject(pool.connectionClosedError());
|
||||
}
|
||||
let callback = fn;
|
||||
let options: string | undefined = options_or_fn as unknown as string;
|
||||
@@ -896,7 +896,7 @@ const SQL: typeof Bun.SQL = function SQL(
|
||||
};
|
||||
sql.connect = () => {
|
||||
if (pool.closed) {
|
||||
return Promise.$reject(this.connectionClosedError());
|
||||
return Promise.$reject(pool.connectionClosedError());
|
||||
}
|
||||
|
||||
if (pool.isConnected()) {
|
||||
|
||||
@@ -344,10 +344,13 @@ class PooledMySQLConnection {
|
||||
/// queryCount is used to indicate the number of queries using the connection, if a connection is reserved or if its a transaction queryCount will be 1 independently of the number of queries
|
||||
queryCount: number = 0;
|
||||
|
||||
#onConnected(err, _) {
|
||||
#onConnected(err, connection) {
|
||||
if (err) {
|
||||
err = wrapError(err);
|
||||
} else {
|
||||
this.connection = connection;
|
||||
}
|
||||
|
||||
const connectionInfo = this.connectionInfo;
|
||||
if (connectionInfo?.onconnect) {
|
||||
connectionInfo.onconnect(err);
|
||||
@@ -413,12 +416,8 @@ class PooledMySQLConnection {
|
||||
this.#startConnection();
|
||||
}
|
||||
|
||||
async #startConnection() {
|
||||
this.connection = await PooledMySQLConnection.createConnection(
|
||||
this.connectionInfo,
|
||||
this.#onConnected.bind(this),
|
||||
this.#onClose.bind(this),
|
||||
);
|
||||
#startConnection() {
|
||||
PooledMySQLConnection.createConnection(this.connectionInfo, this.#onConnected.bind(this), this.#onClose.bind(this));
|
||||
}
|
||||
|
||||
onClose(onClose: (err: Error) => void) {
|
||||
@@ -482,7 +481,7 @@ class PooledMySQLConnection {
|
||||
}
|
||||
}
|
||||
|
||||
export class MySQLAdapter
|
||||
class MySQLAdapter
|
||||
implements
|
||||
DatabaseAdapter<PooledMySQLConnection, $ZigGeneratedClasses.MySQLConnection, $ZigGeneratedClasses.MySQLQuery>
|
||||
{
|
||||
|
||||
@@ -499,7 +499,7 @@ class PooledPostgresConnection {
|
||||
}
|
||||
}
|
||||
|
||||
export class PostgresAdapter
|
||||
class PostgresAdapter
|
||||
implements
|
||||
DatabaseAdapter<
|
||||
PooledPostgresConnection,
|
||||
|
||||
@@ -293,7 +293,7 @@ function parseSQLQuery(query: string): SQLParsedInfo {
|
||||
return { command, firstKeyword, hasReturning };
|
||||
}
|
||||
|
||||
export class SQLiteQueryHandle implements BaseQueryHandle<BunSQLiteModule.Database> {
|
||||
class SQLiteQueryHandle implements BaseQueryHandle<BunSQLiteModule.Database> {
|
||||
private mode = SQLQueryResultMode.objects;
|
||||
|
||||
private readonly sql: string;
|
||||
@@ -380,9 +380,7 @@ export class SQLiteQueryHandle implements BaseQueryHandle<BunSQLiteModule.Databa
|
||||
}
|
||||
}
|
||||
|
||||
export class SQLiteAdapter
|
||||
implements DatabaseAdapter<BunSQLiteModule.Database, BunSQLiteModule.Database, SQLiteQueryHandle>
|
||||
{
|
||||
class SQLiteAdapter implements DatabaseAdapter<BunSQLiteModule.Database, BunSQLiteModule.Database, SQLiteQueryHandle> {
|
||||
public readonly connectionInfo: Bun.SQL.__internal.DefinedSQLiteOptions;
|
||||
public db: BunSQLiteModule.Database | null = null;
|
||||
public storedError: Error | null = null;
|
||||
@@ -807,4 +805,5 @@ export default {
|
||||
SQLCommand,
|
||||
commandToString,
|
||||
parseSQLQuery,
|
||||
SQLiteQueryHandle,
|
||||
};
|
||||
|
||||
@@ -21,8 +21,7 @@ poll_ref: bun.Async.KeepAlive = .{},
|
||||
globalObject: *jsc.JSGlobalObject,
|
||||
vm: *jsc.VirtualMachine,
|
||||
|
||||
pending_activity_count: std.atomic.Value(u32) = std.atomic.Value(u32).init(0),
|
||||
js_value: JSValue = .js_undefined,
|
||||
js_value: jsc.JSRef = jsc.JSRef.empty(),
|
||||
|
||||
server_version: bun.ByteList = .{},
|
||||
connection_id: u32 = 0,
|
||||
@@ -122,14 +121,14 @@ pub const AuthState = union(enum) {
|
||||
};
|
||||
};
|
||||
|
||||
pub fn hasPendingActivity(this: *MySQLConnection) bool {
|
||||
return this.pending_activity_count.load(.acquire) > 0;
|
||||
}
|
||||
|
||||
fn updateHasPendingActivity(this: *MySQLConnection) void {
|
||||
const a: u32 = if (this.requests.readableLength() > 0) 1 else 0;
|
||||
const b: u32 = if (this.status != .disconnected) 1 else 0;
|
||||
this.pending_activity_count.store(a + b, .release);
|
||||
fn updateReferenceType(this: *MySQLConnection) void {
|
||||
if (this.js_value.isNotEmpty()) {
|
||||
if (this.requests.readableLength() > 0 or (this.status != .disconnected and this.status != .failed)) {
|
||||
this.js_value.upgrade(this.globalObject);
|
||||
return;
|
||||
}
|
||||
this.js_value.downgrade();
|
||||
}
|
||||
}
|
||||
|
||||
fn hasDataToSend(this: *@This()) bool {
|
||||
@@ -225,15 +224,16 @@ pub fn onConnectionTimeout(this: *@This()) bun.api.Timer.EventLoopTimer.Arm {
|
||||
.connected => {
|
||||
this.failFmt(error.IdleTimeout, "Idle timeout reached after {}", .{bun.fmt.fmtDurationOneDecimal(@as(u64, this.idle_timeout_interval_ms) *| std.time.ns_per_ms)});
|
||||
},
|
||||
else => {
|
||||
.connecting => {
|
||||
this.failFmt(error.ConnectionTimedOut, "Connection timeout after {}", .{bun.fmt.fmtDurationOneDecimal(@as(u64, this.connection_timeout_ms) *| std.time.ns_per_ms)});
|
||||
},
|
||||
.handshaking,
|
||||
.authenticating,
|
||||
.authentication_awaiting_pk,
|
||||
=> {
|
||||
this.failFmt(error.ConnectionTimedOut, "Connection timed out after {} (during authentication)", .{bun.fmt.fmtDurationOneDecimal(@as(u64, this.connection_timeout_ms) *| std.time.ns_per_ms)});
|
||||
this.failFmt(error.ConnectionTimedOut, "Connection timeout after {} (during authentication)", .{bun.fmt.fmtDurationOneDecimal(@as(u64, this.connection_timeout_ms) *| std.time.ns_per_ms)});
|
||||
},
|
||||
.disconnected, .failed => {},
|
||||
}
|
||||
return .disarm;
|
||||
}
|
||||
@@ -264,28 +264,24 @@ fn drainInternal(this: *@This()) void {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn finalize(this: *MySQLConnection) void {
|
||||
this.stopTimers();
|
||||
debug("MySQLConnection finalize", .{});
|
||||
|
||||
// Ensure we disconnect before finalizing
|
||||
if (this.status != .disconnected) {
|
||||
this.disconnect();
|
||||
}
|
||||
|
||||
this.js_value = .zero;
|
||||
this.js_value.deinit();
|
||||
this.deref();
|
||||
}
|
||||
|
||||
pub fn doRef(this: *@This(), _: *jsc.JSGlobalObject, _: *jsc.CallFrame) bun.JSError!JSValue {
|
||||
this.poll_ref.ref(this.vm);
|
||||
this.updateHasPendingActivity();
|
||||
this.updateReferenceType();
|
||||
return .js_undefined;
|
||||
}
|
||||
|
||||
pub fn doUnref(this: *@This(), _: *jsc.JSGlobalObject, _: *jsc.CallFrame) bun.JSError!JSValue {
|
||||
this.poll_ref.unref(this.vm);
|
||||
this.updateHasPendingActivity();
|
||||
this.updateReferenceType();
|
||||
return .js_undefined;
|
||||
}
|
||||
|
||||
@@ -352,7 +348,10 @@ pub fn stopTimers(this: *@This()) void {
|
||||
}
|
||||
|
||||
pub fn getQueriesArray(this: *const @This()) JSValue {
|
||||
return js.queriesGetCached(this.js_value) orelse .js_undefined;
|
||||
if (this.js_value.tryGet()) |value| {
|
||||
return js.queriesGetCached(value) orelse .js_undefined;
|
||||
}
|
||||
return .js_undefined;
|
||||
}
|
||||
pub fn failFmt(this: *@This(), error_code: AnyMySQLError.Error, comptime fmt: [:0]const u8, args: anytype) void {
|
||||
const message = bun.handleOom(std.fmt.allocPrint(bun.default_allocator, fmt, args));
|
||||
@@ -362,26 +361,39 @@ pub fn failFmt(this: *@This(), error_code: AnyMySQLError.Error, comptime fmt: [:
|
||||
this.failWithJSValue(err);
|
||||
}
|
||||
pub fn failWithJSValue(this: *MySQLConnection, value: JSValue) void {
|
||||
defer this.updateHasPendingActivity();
|
||||
defer this.updateReferenceType();
|
||||
this.stopTimers();
|
||||
if (this.status == .failed) return;
|
||||
this.setStatus(.failed);
|
||||
|
||||
this.ref();
|
||||
defer this.deref();
|
||||
// we defer the refAndClose so the on_close will be called first before we reject the pending requests
|
||||
defer this.refAndClose(value);
|
||||
const on_close = this.consumeOnCloseCallback(this.globalObject) orelse return;
|
||||
defer {
|
||||
// we defer the refAndClose so the on_close will be called first before we reject the pending requests
|
||||
this.refAndClose(value);
|
||||
this.deref();
|
||||
}
|
||||
|
||||
this.status = .failed;
|
||||
|
||||
const on_close = this.consumeOnCloseCallback(this.globalObject) orelse return;
|
||||
on_close.ensureStillAlive();
|
||||
const loop = this.vm.eventLoop();
|
||||
loop.enter();
|
||||
defer loop.exit();
|
||||
|
||||
var js_error = value.toError() orelse value;
|
||||
if (js_error == .zero) {
|
||||
js_error = AnyMySQLError.mysqlErrorToJS(this.globalObject, "Connection closed", error.ConnectionClosed);
|
||||
}
|
||||
js_error.ensureStillAlive();
|
||||
|
||||
const queries_array = this.getQueriesArray();
|
||||
queries_array.ensureStillAlive();
|
||||
_ = on_close.call(
|
||||
this.globalObject,
|
||||
this.js_value,
|
||||
.js_undefined,
|
||||
&[_]JSValue{
|
||||
value,
|
||||
this.getQueriesArray(),
|
||||
js_error,
|
||||
queries_array,
|
||||
},
|
||||
) catch |e| this.globalObject.reportActiveExceptionAsUnhandled(e);
|
||||
}
|
||||
@@ -392,30 +404,36 @@ pub fn fail(this: *MySQLConnection, message: []const u8, err: AnyMySQLError.Erro
|
||||
this.failWithJSValue(instance);
|
||||
}
|
||||
|
||||
pub fn onClose(this: *MySQLConnection) void {
|
||||
var vm = this.vm;
|
||||
defer vm.drainMicrotasks();
|
||||
pub fn onEnd(this: *MySQLConnection) void {
|
||||
// no more socket
|
||||
this.fail("Connection closed", error.ConnectionClosed);
|
||||
}
|
||||
|
||||
pub fn onClose(this: *MySQLConnection) void {
|
||||
// no more socket
|
||||
defer this.deref();
|
||||
this.onEnd();
|
||||
}
|
||||
|
||||
fn refAndClose(this: *@This(), js_reason: ?jsc.JSValue) void {
|
||||
// refAndClose is always called when we wanna to disconnect or when we are closed
|
||||
|
||||
// cleanup requests
|
||||
this.cleanUpRequests(js_reason);
|
||||
|
||||
if (!this.socket.isClosed()) {
|
||||
// event loop need to be alive to close the socket
|
||||
this.poll_ref.ref(this.vm);
|
||||
// will unref on socket close
|
||||
this.socket.close();
|
||||
}
|
||||
|
||||
// cleanup requests
|
||||
this.cleanUpRequests(js_reason);
|
||||
}
|
||||
|
||||
pub fn disconnect(this: *@This()) void {
|
||||
this.stopTimers();
|
||||
if (this.status == .connected) {
|
||||
this.setStatus(.disconnected);
|
||||
defer this.updateReferenceType();
|
||||
this.status = .disconnected;
|
||||
this.poll_ref.disable();
|
||||
|
||||
const requests = this.requests.readableSlice(0);
|
||||
@@ -721,12 +739,12 @@ fn SocketHandler(comptime ssl: bool) type {
|
||||
|
||||
pub fn onEnd(this: *MySQLConnection, socket: SocketType) void {
|
||||
_ = socket;
|
||||
this.onClose();
|
||||
this.onEnd();
|
||||
}
|
||||
|
||||
pub fn onConnectError(this: *MySQLConnection, socket: SocketType, _: i32) void {
|
||||
_ = socket;
|
||||
this.onClose();
|
||||
this.onEnd();
|
||||
}
|
||||
|
||||
pub fn onTimeout(this: *MySQLConnection, socket: SocketType) void {
|
||||
@@ -747,7 +765,7 @@ fn SocketHandler(comptime ssl: bool) type {
|
||||
}
|
||||
|
||||
pub fn onTimeout(this: *MySQLConnection) void {
|
||||
this.fail("Connection timed out", error.ConnectionTimedOut);
|
||||
this.fail("Connection timeout", error.ConnectionTimedOut);
|
||||
}
|
||||
|
||||
pub fn onDrain(this: *MySQLConnection) void {
|
||||
@@ -938,12 +956,11 @@ pub fn call(globalObject: *jsc.JSGlobalObject, callframe: *jsc.CallFrame) bun.JS
|
||||
}
|
||||
}
|
||||
ptr.setStatus(.connecting);
|
||||
ptr.updateHasPendingActivity();
|
||||
ptr.resetConnectionTimeout();
|
||||
ptr.poll_ref.ref(vm);
|
||||
const js_value = ptr.toJS(globalObject);
|
||||
js_value.ensureStillAlive();
|
||||
ptr.js_value = js_value;
|
||||
ptr.js_value.setStrong(js_value, globalObject);
|
||||
js.onconnectSetCached(js_value, globalObject, on_connect);
|
||||
js.oncloseSetCached(js_value, globalObject, on_close);
|
||||
|
||||
@@ -1002,10 +1019,11 @@ pub fn onOpen(this: *MySQLConnection, socket: Socket) void {
|
||||
this.socket = socket;
|
||||
if (socket == .SocketTCP) {
|
||||
// when upgrading to TLS the onOpen callback will be called again and at this moment we dont wanna to change the status to handshaking
|
||||
this.setStatus(.handshaking);
|
||||
this.status = .handshaking;
|
||||
this.ref(); // keep a ref for the socket
|
||||
}
|
||||
this.poll_ref.ref(this.vm);
|
||||
this.updateHasPendingActivity();
|
||||
this.updateReferenceType();
|
||||
}
|
||||
|
||||
pub fn onHandshake(this: *MySQLConnection, success: i32, ssl_error: uws.us_bun_verify_error_t) void {
|
||||
@@ -1142,12 +1160,18 @@ pub fn processPackets(this: *MySQLConnection, comptime Context: type, reader: Ne
|
||||
|
||||
// Read packet header
|
||||
const header = PacketHeader.decode(reader.peek()) orelse return AnyMySQLError.Error.ShortRead;
|
||||
const header_length = header.length;
|
||||
const header_length: usize = header.length;
|
||||
// MySQL packets have a maximum length of 16MB (0xFFFFFF bytes)
|
||||
// Reject obviously invalid packet lengths to prevent resource exhaustion
|
||||
if (header_length >= 0xFFFFFF) {
|
||||
return AnyMySQLError.Error.UnexpectedPacket;
|
||||
}
|
||||
const packet_length: usize = header_length +| PacketHeader.size;
|
||||
debug("sequence_id: {d} header: {d}", .{ this.sequence_id, header_length });
|
||||
// Ensure we have the full packet
|
||||
reader.ensureCapacity(header_length + PacketHeader.size) catch return AnyMySQLError.Error.ShortRead;
|
||||
reader.ensureCapacity(packet_length) catch return AnyMySQLError.Error.ShortRead;
|
||||
// always skip the full packet, we dont care about padding or unreaded bytes
|
||||
defer reader.setOffsetFromStart(header_length + PacketHeader.size);
|
||||
defer reader.setOffsetFromStart(packet_length);
|
||||
reader.skip(PacketHeader.size);
|
||||
|
||||
// Update sequence id
|
||||
@@ -1156,8 +1180,8 @@ pub fn processPackets(this: *MySQLConnection, comptime Context: type, reader: Ne
|
||||
// Process packet based on connection state
|
||||
switch (this.status) {
|
||||
.handshaking => try this.handleHandshake(Context, reader),
|
||||
.authenticating, .authentication_awaiting_pk => try this.handleAuth(Context, reader, header_length),
|
||||
.connected => try this.handleCommand(Context, reader, header_length),
|
||||
.authenticating, .authentication_awaiting_pk => try this.handleAuth(Context, reader, @truncate(header_length)),
|
||||
.connected => try this.handleCommand(Context, reader, @truncate(header_length)),
|
||||
else => {
|
||||
debug("Unexpected packet in state {s}", .{@tagName(this.status)});
|
||||
return error.UnexpectedPacket;
|
||||
@@ -1270,24 +1294,35 @@ fn handleHandshakeDecodePublicKey(this: *MySQLConnection, comptime Context: type
|
||||
|
||||
pub fn consumeOnConnectCallback(this: *const @This(), globalObject: *jsc.JSGlobalObject) ?jsc.JSValue {
|
||||
debug("consumeOnConnectCallback", .{});
|
||||
const on_connect = js.onconnectGetCached(this.js_value) orelse return null;
|
||||
debug("consumeOnConnectCallback exists", .{});
|
||||
|
||||
js.onconnectSetCached(this.js_value, globalObject, .zero);
|
||||
return on_connect;
|
||||
if (this.js_value.tryGet()) |value| {
|
||||
const on_connect = js.onconnectGetCached(value) orelse return null;
|
||||
debug("consumeOnConnectCallback exists", .{});
|
||||
js.onconnectSetCached(value, globalObject, .zero);
|
||||
if (on_connect == .zero) {
|
||||
return null;
|
||||
}
|
||||
return on_connect;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
pub fn consumeOnCloseCallback(this: *const @This(), globalObject: *jsc.JSGlobalObject) ?jsc.JSValue {
|
||||
debug("consumeOnCloseCallback", .{});
|
||||
const on_close = js.oncloseGetCached(this.js_value) orelse return null;
|
||||
debug("consumeOnCloseCallback exists", .{});
|
||||
js.oncloseSetCached(this.js_value, globalObject, .zero);
|
||||
return on_close;
|
||||
if (this.js_value.tryGet()) |value| {
|
||||
const on_close = js.oncloseGetCached(value) orelse return null;
|
||||
debug("consumeOnCloseCallback exists", .{});
|
||||
js.oncloseSetCached(value, globalObject, .zero);
|
||||
if (on_close == .zero) {
|
||||
return null;
|
||||
}
|
||||
return on_close;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
pub fn setStatus(this: *@This(), status: ConnectionState) void {
|
||||
if (this.status == status) return;
|
||||
defer this.updateHasPendingActivity();
|
||||
defer this.updateReferenceType();
|
||||
|
||||
this.status = status;
|
||||
this.resetConnectionTimeout();
|
||||
@@ -1296,7 +1331,8 @@ pub fn setStatus(this: *@This(), status: ConnectionState) void {
|
||||
switch (status) {
|
||||
.connected => {
|
||||
const on_connect = this.consumeOnConnectCallback(this.globalObject) orelse return;
|
||||
const js_value = this.js_value;
|
||||
on_connect.ensureStillAlive();
|
||||
var js_value = this.js_value.tryGet() orelse .js_undefined;
|
||||
js_value.ensureStillAlive();
|
||||
this.globalObject.queueMicrotask(on_connect, &[_]JSValue{ JSValue.jsNull(), js_value });
|
||||
this.poll_ref.unref(this.vm);
|
||||
@@ -1306,8 +1342,8 @@ pub fn setStatus(this: *@This(), status: ConnectionState) void {
|
||||
}
|
||||
|
||||
pub fn updateRef(this: *@This()) void {
|
||||
this.updateHasPendingActivity();
|
||||
if (this.pending_activity_count.raw > 0) {
|
||||
this.updateReferenceType();
|
||||
if (this.js_value == .strong) {
|
||||
this.poll_ref.ref(this.vm);
|
||||
} else {
|
||||
this.poll_ref.unref(this.vm);
|
||||
@@ -1765,7 +1801,7 @@ fn handleResultSetOK(this: *MySQLConnection, request: *MySQLQuery, statement: *M
|
||||
request.onResult(
|
||||
statement.result_count,
|
||||
this.globalObject,
|
||||
this.js_value,
|
||||
this.js_value.tryGet() orelse .js_undefined,
|
||||
this.flags.is_ready_for_query,
|
||||
last_insert_id,
|
||||
affected_rows,
|
||||
@@ -1874,7 +1910,7 @@ pub fn handleResultSet(this: *MySQLConnection, comptime Context: type, reader: N
|
||||
var cached_structure: ?CachedStructure = null;
|
||||
switch (request.flags.result_mode) {
|
||||
.objects => {
|
||||
cached_structure = statement.structure(this.js_value, this.globalObject);
|
||||
cached_structure = if (this.js_value.tryGet()) |value| statement.structure(value, this.globalObject) else null;
|
||||
structure = cached_structure.?.jsValue() orelse .js_undefined;
|
||||
},
|
||||
.raw, .values => {
|
||||
@@ -1884,7 +1920,7 @@ pub fn handleResultSet(this: *MySQLConnection, comptime Context: type, reader: N
|
||||
defer row.deinit(allocator);
|
||||
try row.decode(allocator, reader);
|
||||
|
||||
const pending_value = MySQLQuery.js.pendingValueGetCached(request.thisValue.get()) orelse .zero;
|
||||
const pending_value = (if (request.thisValue.tryGet()) |value| MySQLQuery.js.pendingValueGetCached(value) else .js_undefined) orelse .js_undefined;
|
||||
|
||||
// Process row data
|
||||
const row_value = row.toJS(
|
||||
@@ -1902,8 +1938,10 @@ pub fn handleResultSet(this: *MySQLConnection, comptime Context: type, reader: N
|
||||
}
|
||||
statement.result_count += 1;
|
||||
|
||||
if (pending_value == .zero) {
|
||||
MySQLQuery.js.pendingValueSetCached(request.thisValue.get(), this.globalObject, row_value);
|
||||
if (pending_value.isEmptyOrUndefinedOrNull()) {
|
||||
if (request.thisValue.tryGet()) |value| {
|
||||
MySQLQuery.js.pendingValueSetCached(value, this.globalObject, row_value);
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
const MySQLQuery = @This();
|
||||
const RefCount = bun.ptr.ThreadSafeRefCount(@This(), "ref_count", deinit, .{});
|
||||
const RefCount = bun.ptr.RefCount(@This(), "ref_count", deinit, .{});
|
||||
|
||||
statement: ?*MySQLStatement = null,
|
||||
query: bun.String = bun.String.empty,
|
||||
@@ -42,10 +42,6 @@ pub const Status = enum(u8) {
|
||||
}
|
||||
};
|
||||
|
||||
pub fn hasPendingActivity(this: *@This()) bool {
|
||||
return this.ref_count.load(.monotonic) > 1;
|
||||
}
|
||||
|
||||
pub fn deinit(this: *@This()) void {
|
||||
this.thisValue.deinit();
|
||||
if (this.statement) |statement| {
|
||||
@@ -66,11 +62,7 @@ pub fn finalize(this: *@This()) void {
|
||||
this.statement = null;
|
||||
}
|
||||
|
||||
if (this.thisValue == .weak) {
|
||||
// clean up if is a weak reference, if is a strong reference we need to wait until the query is done
|
||||
// if we are a strong reference, here is probably a bug because GC'd should not happen
|
||||
this.thisValue.weak = .zero;
|
||||
}
|
||||
this.thisValue.deinit();
|
||||
this.deref();
|
||||
}
|
||||
|
||||
@@ -81,12 +73,9 @@ pub fn onWriteFail(
|
||||
queries_array: JSValue,
|
||||
) void {
|
||||
this.status = .fail;
|
||||
const thisValue = this.thisValue.get();
|
||||
const thisValue = this.thisValue.tryGet() orelse return;
|
||||
defer this.thisValue.deinit();
|
||||
const targetValue = this.getTarget(globalObject, true);
|
||||
if (thisValue == .zero or targetValue == .zero) {
|
||||
return;
|
||||
}
|
||||
const targetValue = this.getTarget(globalObject, true) orelse return;
|
||||
|
||||
const instance = AnyMySQLError.mysqlErrorToJS(globalObject, "Failed to bind query", err);
|
||||
|
||||
@@ -95,9 +84,7 @@ pub fn onWriteFail(
|
||||
const event_loop = vm.eventLoop();
|
||||
event_loop.runCallback(function, globalObject, thisValue, &.{
|
||||
targetValue,
|
||||
// TODO: add mysql error to JS
|
||||
// postgresErrorToJS(globalObject, null, err),
|
||||
instance,
|
||||
instance.toError() orelse instance,
|
||||
queries_array,
|
||||
});
|
||||
}
|
||||
@@ -124,9 +111,9 @@ pub fn bindAndExecute(this: *MySQLQuery, writer: anytype, statement: *MySQLState
|
||||
}
|
||||
|
||||
fn bind(this: *MySQLQuery, execute: *PreparedStatement.Execute, globalObject: *jsc.JSGlobalObject) AnyMySQLError.Error!void {
|
||||
const thisValue = this.thisValue.get();
|
||||
const binding_value = js.bindingGetCached(thisValue) orelse .zero;
|
||||
const columns_value = js.columnsGetCached(thisValue) orelse .zero;
|
||||
const thisValue = this.thisValue.tryGet() orelse return error.InvalidState;
|
||||
const binding_value = js.bindingGetCached(thisValue) orelse .js_undefined;
|
||||
const columns_value = js.columnsGetCached(thisValue) orelse .js_undefined;
|
||||
|
||||
var iter = try QueryBindingIterator.init(binding_value, columns_value, globalObject);
|
||||
|
||||
@@ -167,24 +154,26 @@ pub fn onJSError(this: *@This(), err: jsc.JSValue, globalObject: *jsc.JSGlobalOb
|
||||
this.ref();
|
||||
defer this.deref();
|
||||
this.status = .fail;
|
||||
const thisValue = this.thisValue.get();
|
||||
const thisValue = this.thisValue.tryGet() orelse return;
|
||||
defer this.thisValue.deinit();
|
||||
const targetValue = this.getTarget(globalObject, true);
|
||||
if (thisValue == .zero or targetValue == .zero) {
|
||||
return;
|
||||
}
|
||||
const targetValue = this.getTarget(globalObject, true) orelse return;
|
||||
|
||||
var vm = jsc.VirtualMachine.get();
|
||||
const function = vm.rareData().mysql_context.onQueryRejectFn.get().?;
|
||||
const event_loop = vm.eventLoop();
|
||||
var js_error = err.toError() orelse err;
|
||||
if (js_error == .zero) {
|
||||
js_error = AnyMySQLError.mysqlErrorToJS(globalObject, "Query failed", error.UnknownError);
|
||||
}
|
||||
js_error.ensureStillAlive();
|
||||
event_loop.runCallback(function, globalObject, thisValue, &.{
|
||||
targetValue,
|
||||
err,
|
||||
js_error,
|
||||
});
|
||||
}
|
||||
pub fn getTarget(this: *@This(), globalObject: *jsc.JSGlobalObject, clean_target: bool) jsc.JSValue {
|
||||
const thisValue = this.thisValue.tryGet() orelse return .zero;
|
||||
const target = js.targetGetCached(thisValue) orelse return .zero;
|
||||
pub fn getTarget(this: *@This(), globalObject: *jsc.JSGlobalObject, clean_target: bool) ?jsc.JSValue {
|
||||
const thisValue = this.thisValue.tryGet() orelse return null;
|
||||
const target = js.targetGetCached(thisValue) orelse return null;
|
||||
if (clean_target) {
|
||||
js.targetSetCached(thisValue, globalObject, .zero);
|
||||
}
|
||||
@@ -219,32 +208,36 @@ pub fn onResult(this: *@This(), result_count: u64, globalObject: *jsc.JSGlobalOb
|
||||
this.ref();
|
||||
defer this.deref();
|
||||
|
||||
const thisValue = this.thisValue.get();
|
||||
const targetValue = this.getTarget(globalObject, is_last);
|
||||
if (is_last) {
|
||||
this.status = .success;
|
||||
} else {
|
||||
this.status = .partial_response;
|
||||
}
|
||||
const thisValue = this.thisValue.tryGet() orelse return;
|
||||
|
||||
defer if (is_last) {
|
||||
allowGC(thisValue, globalObject);
|
||||
this.thisValue.deinit();
|
||||
};
|
||||
if (thisValue == .zero or targetValue == .zero) {
|
||||
return;
|
||||
}
|
||||
const targetValue = this.getTarget(globalObject, is_last) orelse return;
|
||||
|
||||
const vm = jsc.VirtualMachine.get();
|
||||
const function = vm.rareData().mysql_context.onQueryResolveFn.get().?;
|
||||
const event_loop = vm.eventLoop();
|
||||
const tag: CommandTag = .{ .SELECT = result_count };
|
||||
var queries_array = if (connection == .zero) .js_undefined else MySQLConnection.js.queriesGetCached(connection) orelse .js_undefined;
|
||||
if (queries_array == .zero) {
|
||||
queries_array = .js_undefined;
|
||||
} else {
|
||||
queries_array.ensureStillAlive();
|
||||
}
|
||||
|
||||
event_loop.runCallback(function, globalObject, thisValue, &.{
|
||||
targetValue,
|
||||
consumePendingValue(thisValue, globalObject) orelse .js_undefined,
|
||||
tag.toJSTag(globalObject),
|
||||
tag.toJSNumber(),
|
||||
if (connection == .zero) .js_undefined else MySQLConnection.js.queriesGetCached(connection) orelse .js_undefined,
|
||||
queries_array,
|
||||
JSValue.jsBoolean(is_last),
|
||||
JSValue.jsNumber(last_insert_id),
|
||||
JSValue.jsNumber(affected_rows),
|
||||
@@ -364,7 +357,7 @@ pub fn doRun(this: *MySQLQuery, globalObject: *jsc.JSGlobalObject, callframe: *j
|
||||
return globalObject.throw("connection must be a MySQLConnection", .{});
|
||||
};
|
||||
|
||||
connection.poll_ref.ref(globalObject.bunVM());
|
||||
defer connection.updateRef();
|
||||
var query = arguments[1];
|
||||
|
||||
if (!query.isObject()) {
|
||||
|
||||
@@ -33,6 +33,8 @@ pub const Error = error{
|
||||
InvalidErrorPacket,
|
||||
UnexpectedPacket,
|
||||
ShortRead,
|
||||
UnknownError,
|
||||
InvalidState,
|
||||
};
|
||||
|
||||
pub fn mysqlErrorToJS(globalObject: *jsc.JSGlobalObject, message: ?[]const u8, err: Error) JSValue {
|
||||
@@ -64,6 +66,8 @@ pub fn mysqlErrorToJS(globalObject: *jsc.JSGlobalObject, message: ?[]const u8, e
|
||||
error.MissingAuthData => "ERR_MYSQL_MISSING_AUTH_DATA",
|
||||
error.FailedToEncryptPassword => "ERR_MYSQL_FAILED_TO_ENCRYPT_PASSWORD",
|
||||
error.InvalidPublicKey => "ERR_MYSQL_INVALID_PUBLIC_KEY",
|
||||
error.UnknownError => "ERR_MYSQL_UNKNOWN_ERROR",
|
||||
error.InvalidState => "ERR_MYSQL_INVALID_STATE",
|
||||
error.JSError => {
|
||||
return globalObject.takeException(error.JSError);
|
||||
},
|
||||
|
||||
@@ -19,7 +19,7 @@ pub fn createMySQLError(
|
||||
message: []const u8,
|
||||
options: MySQLErrorOptions,
|
||||
) bun.JSError!JSValue {
|
||||
const opts_obj = JSValue.createEmptyObject(globalObject, 18);
|
||||
const opts_obj = JSValue.createEmptyObject(globalObject, 0);
|
||||
opts_obj.ensureStillAlive();
|
||||
opts_obj.put(globalObject, JSC.ZigString.static("code"), try bun.String.createUTF8ForJS(globalObject, options.code));
|
||||
if (options.errno) |errno| {
|
||||
|
||||
@@ -219,7 +219,7 @@ pub fn onConnectionTimeout(this: *PostgresSQLConnection) bun.api.Timer.EventLoop
|
||||
this.failFmt("ERR_POSTGRES_CONNECTION_TIMEOUT", "Connection timeout after {}", .{bun.fmt.fmtDurationOneDecimal(@as(u64, this.connection_timeout_ms) *| std.time.ns_per_ms)});
|
||||
},
|
||||
.sent_startup_message => {
|
||||
this.failFmt("ERR_POSTGRES_CONNECTION_TIMEOUT", "Connection timed out after {} (sent startup message, but never received response)", .{bun.fmt.fmtDurationOneDecimal(@as(u64, this.connection_timeout_ms) *| std.time.ns_per_ms)});
|
||||
this.failFmt("ERR_POSTGRES_CONNECTION_TIMEOUT", "Connection timeout after {} (sent startup message, but never received response)", .{bun.fmt.fmtDurationOneDecimal(@as(u64, this.connection_timeout_ms) *| std.time.ns_per_ms)});
|
||||
},
|
||||
}
|
||||
return .disarm;
|
||||
@@ -311,7 +311,7 @@ pub fn failWithJSValue(this: *PostgresSQLConnection, value: JSValue) void {
|
||||
this.stopTimers();
|
||||
if (this.status == .failed) return;
|
||||
|
||||
this.setStatus(.failed);
|
||||
this.status = .failed;
|
||||
|
||||
this.ref();
|
||||
defer this.deref();
|
||||
@@ -321,12 +321,17 @@ pub fn failWithJSValue(this: *PostgresSQLConnection, value: JSValue) void {
|
||||
|
||||
const loop = this.vm.eventLoop();
|
||||
loop.enter();
|
||||
var js_error = value.toError() orelse value;
|
||||
if (js_error == .zero) {
|
||||
js_error = postgresErrorToJS(this.globalObject, "Connection closed", error.ConnectionClosed);
|
||||
}
|
||||
js_error.ensureStillAlive();
|
||||
defer loop.exit();
|
||||
_ = on_close.call(
|
||||
this.globalObject,
|
||||
this.js_value,
|
||||
.js_undefined,
|
||||
&[_]JSValue{
|
||||
value.toError() orelse value,
|
||||
js_error,
|
||||
this.getQueriesArray(),
|
||||
},
|
||||
) catch |e| this.globalObject.reportActiveExceptionAsUnhandled(e);
|
||||
@@ -1350,6 +1355,9 @@ fn advance(this: *PostgresSQLConnection) void {
|
||||
}
|
||||
|
||||
pub fn getQueriesArray(this: *const PostgresSQLConnection) JSValue {
|
||||
if (this.js_value.isEmptyOrUndefinedOrNull()) {
|
||||
return .js_undefined;
|
||||
}
|
||||
return js.queriesGetCached(this.js_value) orelse .js_undefined;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
const PostgresSQLQuery = @This();
|
||||
const RefCount = bun.ptr.ThreadSafeRefCount(@This(), "ref_count", deinit, .{});
|
||||
const RefCount = bun.ptr.RefCount(@This(), "ref_count", deinit, .{});
|
||||
statement: ?*PostgresSQLStatement = null,
|
||||
query: bun.String = bun.String.empty,
|
||||
cursor_name: bun.String = bun.String.empty,
|
||||
@@ -23,9 +23,9 @@ flags: packed struct(u8) {
|
||||
pub const ref = RefCount.ref;
|
||||
pub const deref = RefCount.deref;
|
||||
|
||||
pub fn getTarget(this: *PostgresSQLQuery, globalObject: *jsc.JSGlobalObject, clean_target: bool) jsc.JSValue {
|
||||
const thisValue = this.thisValue.tryGet() orelse return .zero;
|
||||
const target = js.targetGetCached(thisValue) orelse return .zero;
|
||||
pub fn getTarget(this: *PostgresSQLQuery, globalObject: *jsc.JSGlobalObject, clean_target: bool) ?jsc.JSValue {
|
||||
const thisValue = this.thisValue.tryGet() orelse return null;
|
||||
const target = js.targetGetCached(thisValue) orelse return null;
|
||||
if (clean_target) {
|
||||
js.targetSetCached(thisValue, globalObject, .zero);
|
||||
}
|
||||
@@ -51,10 +51,6 @@ pub const Status = enum(u8) {
|
||||
}
|
||||
};
|
||||
|
||||
pub fn hasPendingActivity(this: *@This()) bool {
|
||||
return this.ref_count.get() > 1;
|
||||
}
|
||||
|
||||
pub fn deinit(this: *@This()) void {
|
||||
this.thisValue.deinit();
|
||||
if (this.statement) |statement| {
|
||||
@@ -84,12 +80,9 @@ pub fn onWriteFail(
|
||||
this.ref();
|
||||
defer this.deref();
|
||||
this.status = .fail;
|
||||
const thisValue = this.thisValue.get();
|
||||
const thisValue = this.thisValue.tryGet() orelse return;
|
||||
defer this.thisValue.deinit();
|
||||
const targetValue = this.getTarget(globalObject, true);
|
||||
if (thisValue == .zero or targetValue == .zero) {
|
||||
return;
|
||||
}
|
||||
const targetValue = this.getTarget(globalObject, true) orelse return;
|
||||
|
||||
const vm = jsc.VirtualMachine.get();
|
||||
const function = vm.rareData().postgresql_context.onQueryRejectFn.get().?;
|
||||
@@ -105,12 +98,9 @@ pub fn onJSError(this: *@This(), err: jsc.JSValue, globalObject: *jsc.JSGlobalOb
|
||||
this.ref();
|
||||
defer this.deref();
|
||||
this.status = .fail;
|
||||
const thisValue = this.thisValue.get();
|
||||
const thisValue = this.thisValue.tryGet() orelse return;
|
||||
defer this.thisValue.deinit();
|
||||
const targetValue = this.getTarget(globalObject, true);
|
||||
if (thisValue == .zero or targetValue == .zero) {
|
||||
return;
|
||||
}
|
||||
const targetValue = this.getTarget(globalObject, true) orelse return;
|
||||
|
||||
var vm = jsc.VirtualMachine.get();
|
||||
const function = vm.rareData().postgresql_context.onQueryRejectFn.get().?;
|
||||
@@ -145,21 +135,17 @@ fn consumePendingValue(thisValue: jsc.JSValue, globalObject: *jsc.JSGlobalObject
|
||||
pub fn onResult(this: *@This(), command_tag_str: []const u8, globalObject: *jsc.JSGlobalObject, connection: jsc.JSValue, is_last: bool) void {
|
||||
this.ref();
|
||||
defer this.deref();
|
||||
|
||||
const thisValue = this.thisValue.get();
|
||||
const targetValue = this.getTarget(globalObject, is_last);
|
||||
if (is_last) {
|
||||
this.status = .success;
|
||||
} else {
|
||||
this.status = .partial_response;
|
||||
}
|
||||
const thisValue = this.thisValue.tryGet() orelse return;
|
||||
defer if (is_last) {
|
||||
allowGC(thisValue, globalObject);
|
||||
this.thisValue.deinit();
|
||||
};
|
||||
if (thisValue == .zero or targetValue == .zero) {
|
||||
return;
|
||||
}
|
||||
const targetValue = this.getTarget(globalObject, is_last) orelse return;
|
||||
|
||||
const vm = jsc.VirtualMachine.get();
|
||||
const function = vm.rareData().postgresql_context.onQueryResolveFn.get().?;
|
||||
|
||||
@@ -11,6 +11,10 @@ array_length: usize = 0,
|
||||
any_failed: bool = false,
|
||||
|
||||
pub fn next(this: *ObjectIterator) ?jsc.JSValue {
|
||||
if (this.array.isEmptyOrUndefinedOrNull() or this.columns.isEmptyOrUndefinedOrNull()) {
|
||||
this.any_failed = true;
|
||||
return null;
|
||||
}
|
||||
if (this.row_i >= this.array_length) {
|
||||
return null;
|
||||
}
|
||||
|
||||
10
test/docker/config/fuzzingserver.json
Normal file
10
test/docker/config/fuzzingserver.json
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"url": "ws://0.0.0.0:9002",
|
||||
"options": {
|
||||
"failByDrop": false
|
||||
},
|
||||
"outdir": "./reports/servers",
|
||||
"cases": ["*"],
|
||||
"exclude-cases": [],
|
||||
"exclude-agent-cases": {}
|
||||
}
|
||||
25
test/docker/config/pg_hba_auth.conf
Normal file
25
test/docker/config/pg_hba_auth.conf
Normal file
@@ -0,0 +1,25 @@
|
||||
# PostgreSQL HBA configuration for authentication testing
|
||||
# TYPE DATABASE USER ADDRESS METHOD
|
||||
|
||||
# Local connections
|
||||
local all postgres trust
|
||||
local all bun_sql_test trust
|
||||
local all bun_sql_test_md5 md5
|
||||
local all bun_sql_test_scram scram-sha-256
|
||||
|
||||
# IPv4 connections
|
||||
host all postgres 127.0.0.1/32 trust
|
||||
host all bun_sql_test 127.0.0.1/32 trust
|
||||
host all bun_sql_test_md5 127.0.0.1/32 md5
|
||||
host all bun_sql_test_scram 127.0.0.1/32 scram-sha-256
|
||||
|
||||
# IPv6 connections
|
||||
host all postgres ::1/128 trust
|
||||
host all bun_sql_test ::1/128 trust
|
||||
host all bun_sql_test_md5 ::1/128 md5
|
||||
host all bun_sql_test_scram ::1/128 scram-sha-256
|
||||
|
||||
# Replication
|
||||
local replication all trust
|
||||
host replication all 127.0.0.1/32 trust
|
||||
host replication all ::1/128 trust
|
||||
216
test/docker/docker-compose.yml
Normal file
216
test/docker/docker-compose.yml
Normal file
@@ -0,0 +1,216 @@
|
||||
services:
|
||||
# PostgreSQL Services
|
||||
postgres_plain:
|
||||
image: postgres:15
|
||||
environment:
|
||||
POSTGRES_HOST_AUTH_METHOD: trust
|
||||
POSTGRES_USER: postgres
|
||||
volumes:
|
||||
- ./init-scripts/postgres:/docker-entrypoint-initdb.d:ro
|
||||
ports:
|
||||
- target: 5432
|
||||
published: 0
|
||||
protocol: tcp
|
||||
tmpfs:
|
||||
- /var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U postgres"]
|
||||
interval: 1h # Effectively disable after startup
|
||||
timeout: 5s
|
||||
retries: 30
|
||||
start_period: 5s
|
||||
|
||||
postgres_tls:
|
||||
image: postgres:15
|
||||
environment:
|
||||
POSTGRES_HOST_AUTH_METHOD: trust
|
||||
POSTGRES_USER: postgres
|
||||
volumes:
|
||||
- ./init-scripts/postgres:/docker-entrypoint-initdb.d:ro
|
||||
- ../js/sql/docker-tls/server.crt:/etc/postgresql/ssl/server.crt:ro
|
||||
- ../js/sql/docker-tls/server.key:/etc/postgresql/ssl/server.key:ro
|
||||
ports:
|
||||
- target: 5432
|
||||
published: 0
|
||||
protocol: tcp
|
||||
command: >
|
||||
postgres
|
||||
-c ssl=on
|
||||
-c ssl_cert_file=/etc/postgresql/ssl/server.crt
|
||||
-c ssl_key_file=/etc/postgresql/ssl/server.key
|
||||
-c max_prepared_transactions=1000
|
||||
-c max_connections=2000
|
||||
tmpfs:
|
||||
- /var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U postgres"]
|
||||
interval: 1h # Effectively disable after startup
|
||||
timeout: 5s
|
||||
retries: 30
|
||||
start_period: 5s
|
||||
|
||||
postgres_auth:
|
||||
image: postgres:15
|
||||
environment:
|
||||
POSTGRES_HOST_AUTH_METHOD: trust
|
||||
POSTGRES_USER: postgres
|
||||
volumes:
|
||||
- ./init-scripts/postgres-auth:/docker-entrypoint-initdb.d:ro
|
||||
- ./config/pg_hba_auth.conf:/etc/postgresql/pg_hba.conf:ro
|
||||
ports:
|
||||
- target: 5432
|
||||
published: 0
|
||||
protocol: tcp
|
||||
command: >
|
||||
postgres
|
||||
-c hba_file=/etc/postgresql/pg_hba.conf
|
||||
-c max_prepared_transactions=1000
|
||||
-c max_connections=2000
|
||||
tmpfs:
|
||||
- /var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U postgres"]
|
||||
interval: 1h # Effectively disable after startup
|
||||
timeout: 5s
|
||||
retries: 30
|
||||
start_period: 5s
|
||||
|
||||
# MySQL Services
|
||||
mysql_plain:
|
||||
image: mysql:8.4
|
||||
environment:
|
||||
MYSQL_ALLOW_EMPTY_PASSWORD: "yes"
|
||||
MYSQL_DATABASE: bun_sql_test
|
||||
ports:
|
||||
- target: 3306
|
||||
published: 0
|
||||
protocol: tcp
|
||||
tmpfs:
|
||||
- /var/lib/mysql
|
||||
healthcheck:
|
||||
test: ["CMD", "mysqladmin", "ping", "-h", "localhost"]
|
||||
interval: 1h # Effectively disable after startup
|
||||
timeout: 5s
|
||||
retries: 30
|
||||
start_period: 10s
|
||||
|
||||
mysql_native_password:
|
||||
image: mysql:8.0
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: bun
|
||||
MYSQL_DATABASE: bun_sql_test
|
||||
MYSQL_ROOT_HOST: "%"
|
||||
command: --default-authentication-plugin=mysql_native_password
|
||||
ports:
|
||||
- target: 3306
|
||||
published: 0
|
||||
protocol: tcp
|
||||
tmpfs:
|
||||
- /var/lib/mysql
|
||||
healthcheck:
|
||||
test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "-pbun"]
|
||||
interval: 1h # Effectively disable after startup
|
||||
timeout: 5s
|
||||
retries: 30
|
||||
start_period: 10s
|
||||
|
||||
mysql_tls:
|
||||
build:
|
||||
context: ../js/sql/mysql-tls
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
MYSQL_VERSION: 8.4
|
||||
image: bun-mysql-tls:local
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: bun
|
||||
MYSQL_DATABASE: bun_sql_test
|
||||
ports:
|
||||
- target: 3306
|
||||
published: 0
|
||||
protocol: tcp
|
||||
tmpfs:
|
||||
- /var/lib/mysql
|
||||
healthcheck:
|
||||
test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "-pbun"]
|
||||
interval: 1h # Effectively disable after startup
|
||||
timeout: 5s
|
||||
retries: 30
|
||||
start_period: 10s
|
||||
|
||||
# Redis/Valkey Services
|
||||
redis_plain:
|
||||
image: redis:7-alpine
|
||||
command: redis-server --bind 0.0.0.0 --protected-mode no
|
||||
ports:
|
||||
- target: 6379
|
||||
published: 0
|
||||
protocol: tcp
|
||||
tmpfs:
|
||||
- /data
|
||||
|
||||
redis_unified:
|
||||
build:
|
||||
context: ../js/valkey/docker-unified
|
||||
dockerfile: Dockerfile
|
||||
image: bun-redis-unified:local
|
||||
ports:
|
||||
- target: 6379
|
||||
published: 0
|
||||
protocol: tcp
|
||||
name: tcp
|
||||
- target: 6380
|
||||
published: 0
|
||||
protocol: tcp
|
||||
name: tls
|
||||
volumes:
|
||||
- redis-unix:/tmp/redis
|
||||
tmpfs:
|
||||
- /data
|
||||
|
||||
# MinIO (S3) Service
|
||||
minio:
|
||||
image: minio/minio:latest
|
||||
environment:
|
||||
MINIO_ROOT_USER: minioadmin
|
||||
MINIO_ROOT_PASSWORD: minioadmin
|
||||
MINIO_DOMAIN: localhost
|
||||
command: server /data --console-address :9001
|
||||
ports:
|
||||
- target: 9000
|
||||
published: 0
|
||||
protocol: tcp
|
||||
name: api
|
||||
- target: 9001
|
||||
published: 0
|
||||
protocol: tcp
|
||||
name: console
|
||||
tmpfs:
|
||||
- /data
|
||||
healthcheck:
|
||||
test: ["CMD", "mc", "ready", "local"]
|
||||
interval: 1h # Effectively disable after startup
|
||||
timeout: 5s
|
||||
retries: 30
|
||||
start_period: 5s
|
||||
|
||||
# WebSocket Autobahn Test Suite
|
||||
# NOTE: Autobahn requires port 9002 to match both internal and external ports
|
||||
# because it validates the Host header against its configured listening port.
|
||||
# Dynamic port mapping causes "port X does not match server listening port 9002" errors.
|
||||
autobahn:
|
||||
image: crossbario/autobahn-testsuite
|
||||
volumes:
|
||||
- ./config/fuzzingserver.json:/config/fuzzingserver.json:ro
|
||||
command: wstest -m fuzzingserver -s /config/fuzzingserver.json
|
||||
ports:
|
||||
- target: 9002
|
||||
published: 9002 # Must be 9002, not dynamic (0)
|
||||
protocol: tcp
|
||||
|
||||
volumes:
|
||||
redis-unix:
|
||||
driver: local
|
||||
|
||||
networks:
|
||||
default:
|
||||
driver: bridge
|
||||
484
test/docker/index.ts
Normal file
484
test/docker/index.ts
Normal file
@@ -0,0 +1,484 @@
|
||||
import { spawn } from "bun";
|
||||
import { join, dirname } from "path";
|
||||
import { fileURLToPath } from "url";
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = dirname(__filename);
|
||||
|
||||
export type ServiceName =
|
||||
| "postgres_plain"
|
||||
| "postgres_tls"
|
||||
| "postgres_auth"
|
||||
| "mysql_plain"
|
||||
| "mysql_native_password"
|
||||
| "mysql_tls"
|
||||
| "redis_plain"
|
||||
| "redis_unified"
|
||||
| "minio"
|
||||
| "autobahn";
|
||||
|
||||
export interface ServiceInfo {
|
||||
host: string;
|
||||
ports: Record<number, number>;
|
||||
tls?: {
|
||||
ca?: string;
|
||||
cert?: string;
|
||||
key?: string;
|
||||
};
|
||||
socketPath?: string;
|
||||
users?: Record<string, string>;
|
||||
}
|
||||
|
||||
interface DockerComposeOptions {
|
||||
projectName?: string;
|
||||
composeFile?: string;
|
||||
}
|
||||
|
||||
class DockerComposeHelper {
|
||||
private projectName: string;
|
||||
private composeFile: string;
|
||||
private runningServices: Set<ServiceName> = new Set();
|
||||
|
||||
constructor(options: DockerComposeOptions = {}) {
|
||||
this.projectName = options.projectName ||
|
||||
process.env.BUN_DOCKER_PROJECT_NAME ||
|
||||
process.env.COMPOSE_PROJECT_NAME ||
|
||||
"bun-test-services"; // Default project name for all test services
|
||||
|
||||
this.composeFile = options.composeFile ||
|
||||
process.env.BUN_DOCKER_COMPOSE_FILE ||
|
||||
join(__dirname, "docker-compose.yml");
|
||||
}
|
||||
|
||||
private async exec(args: string[]): Promise<{ stdout: string; stderr: string; exitCode: number }> {
|
||||
const proc = spawn({
|
||||
cmd: ["docker", "compose", "-p", this.projectName, "-f", this.composeFile, ...args],
|
||||
stdout: "pipe",
|
||||
stderr: "pipe",
|
||||
});
|
||||
|
||||
const [stdout, stderr] = await Promise.all([
|
||||
proc.stdout.text(),
|
||||
proc.stderr.text(),
|
||||
]);
|
||||
|
||||
const exitCode = await proc.exited;
|
||||
|
||||
return { stdout, stderr, exitCode };
|
||||
}
|
||||
|
||||
async ensureDocker(): Promise<void> {
|
||||
// Check Docker is available
|
||||
const dockerCheck = spawn({
|
||||
cmd: ["docker", "version"],
|
||||
stdout: "pipe",
|
||||
stderr: "pipe",
|
||||
});
|
||||
|
||||
const exitCode = await dockerCheck.exited;
|
||||
if (exitCode !== 0) {
|
||||
throw new Error("Docker is not available. Please ensure Docker is installed and running.");
|
||||
}
|
||||
|
||||
// Check docker compose v2 is available
|
||||
const composeCheck = spawn({
|
||||
cmd: ["docker", "compose", "version"],
|
||||
stdout: "pipe",
|
||||
stderr: "pipe",
|
||||
});
|
||||
|
||||
const composeExitCode = await composeCheck.exited;
|
||||
if (composeExitCode !== 0) {
|
||||
throw new Error("Docker Compose v2 is not available. Please ensure Docker Compose v2 is installed.");
|
||||
}
|
||||
}
|
||||
|
||||
async up(service: ServiceName): Promise<void> {
|
||||
if (this.runningServices.has(service)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Build the service if needed (for services like mysql_tls that need building)
|
||||
if (service === "mysql_tls") {
|
||||
const buildResult = await this.exec(["build", service]);
|
||||
if (buildResult.exitCode !== 0) {
|
||||
throw new Error(`Failed to build service ${service}: ${buildResult.stderr}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Start the service and wait for it to be healthy
|
||||
// Remove --quiet-pull to see pull progress and avoid confusion
|
||||
const { exitCode, stderr } = await this.exec(["up", "-d", "--wait", service]);
|
||||
|
||||
if (exitCode !== 0) {
|
||||
throw new Error(`Failed to start service ${service}: ${stderr}`);
|
||||
}
|
||||
|
||||
this.runningServices.add(service);
|
||||
}
|
||||
|
||||
async port(service: ServiceName, targetPort: number): Promise<number> {
|
||||
const { stdout, exitCode } = await this.exec(["port", service, targetPort.toString()]);
|
||||
|
||||
if (exitCode !== 0) {
|
||||
throw new Error(`Failed to get port for ${service}:${targetPort}`);
|
||||
}
|
||||
|
||||
const match = stdout.trim().match(/:(\d+)$/);
|
||||
if (!match) {
|
||||
throw new Error(`Invalid port output: ${stdout}`);
|
||||
}
|
||||
|
||||
return parseInt(match[1], 10);
|
||||
}
|
||||
|
||||
async ensure(service: ServiceName): Promise<ServiceInfo> {
|
||||
await this.up(service);
|
||||
|
||||
const info: ServiceInfo = {
|
||||
host: "127.0.0.1",
|
||||
ports: {},
|
||||
};
|
||||
|
||||
// Get ports based on service type
|
||||
switch (service) {
|
||||
case "postgres_plain":
|
||||
case "postgres_tls":
|
||||
case "postgres_auth":
|
||||
info.ports[5432] = await this.port(service, 5432);
|
||||
|
||||
if (service === "postgres_tls") {
|
||||
info.tls = {
|
||||
cert: join(__dirname, "../js/sql/docker-tls/server.crt"),
|
||||
key: join(__dirname, "../js/sql/docker-tls/server.key"),
|
||||
};
|
||||
}
|
||||
|
||||
if (service === "postgres_auth") {
|
||||
info.users = {
|
||||
bun_sql_test: "",
|
||||
bun_sql_test_md5: "bun_sql_test_md5",
|
||||
bun_sql_test_scram: "bun_sql_test_scram",
|
||||
};
|
||||
}
|
||||
break;
|
||||
|
||||
case "mysql_plain":
|
||||
case "mysql_native_password":
|
||||
case "mysql_tls":
|
||||
info.ports[3306] = await this.port(service, 3306);
|
||||
|
||||
if (service === "mysql_tls") {
|
||||
info.tls = {
|
||||
ca: join(__dirname, "../js/sql/mysql-tls/ssl/ca.pem"),
|
||||
cert: join(__dirname, "../js/sql/mysql-tls/ssl/server-cert.pem"),
|
||||
key: join(__dirname, "../js/sql/mysql-tls/ssl/server-key.pem"),
|
||||
};
|
||||
}
|
||||
break;
|
||||
|
||||
case "redis_plain":
|
||||
info.ports[6379] = await this.port(service, 6379);
|
||||
break;
|
||||
|
||||
case "redis_unified":
|
||||
info.ports[6379] = await this.port(service, 6379);
|
||||
info.ports[6380] = await this.port(service, 6380);
|
||||
info.socketPath = "/tmp/redis/redis.sock";
|
||||
info.tls = {
|
||||
cert: join(__dirname, "../js/valkey/docker-unified/server.crt"),
|
||||
key: join(__dirname, "../js/valkey/docker-unified/server.key"),
|
||||
};
|
||||
info.users = {
|
||||
default: "",
|
||||
testuser: "test123",
|
||||
readonly: "readonly",
|
||||
writeonly: "writeonly",
|
||||
};
|
||||
break;
|
||||
|
||||
case "minio":
|
||||
info.ports[9000] = await this.port(service, 9000);
|
||||
info.ports[9001] = await this.port(service, 9001);
|
||||
break;
|
||||
|
||||
case "autobahn":
|
||||
info.ports[9002] = await this.port(service, 9002);
|
||||
break;
|
||||
}
|
||||
|
||||
return info;
|
||||
}
|
||||
|
||||
async envFor(service: ServiceName): Promise<Record<string, string>> {
|
||||
const info = await this.ensure(service);
|
||||
const env: Record<string, string> = {};
|
||||
|
||||
switch (service) {
|
||||
case "postgres_plain":
|
||||
case "postgres_tls":
|
||||
case "postgres_auth":
|
||||
env.PGHOST = info.host;
|
||||
env.PGPORT = info.ports[5432].toString();
|
||||
env.PGUSER = "bun_sql_test";
|
||||
env.PGDATABASE = "bun_sql_test";
|
||||
|
||||
if (info.tls) {
|
||||
env.PGSSLMODE = "require";
|
||||
env.PGSSLCERT = info.tls.cert!;
|
||||
env.PGSSLKEY = info.tls.key!;
|
||||
}
|
||||
break;
|
||||
|
||||
case "mysql_plain":
|
||||
case "mysql_native_password":
|
||||
case "mysql_tls":
|
||||
env.MYSQL_HOST = info.host;
|
||||
env.MYSQL_PORT = info.ports[3306].toString();
|
||||
env.MYSQL_USER = "root";
|
||||
env.MYSQL_PASSWORD = service === "mysql_plain" ? "" : "bun";
|
||||
env.MYSQL_DATABASE = "bun_sql_test";
|
||||
|
||||
if (info.tls) {
|
||||
env.MYSQL_SSL_CA = info.tls.ca!;
|
||||
}
|
||||
break;
|
||||
|
||||
case "redis_plain":
|
||||
case "redis_unified":
|
||||
env.REDIS_HOST = info.host;
|
||||
env.REDIS_PORT = info.ports[6379].toString();
|
||||
env.REDIS_URL = `redis://${info.host}:${info.ports[6379]}`;
|
||||
|
||||
if (info.ports[6380]) {
|
||||
env.REDIS_TLS_PORT = info.ports[6380].toString();
|
||||
env.REDIS_TLS_URL = `rediss://${info.host}:${info.ports[6380]}`;
|
||||
}
|
||||
|
||||
if (info.socketPath) {
|
||||
env.REDIS_SOCKET = info.socketPath;
|
||||
}
|
||||
break;
|
||||
|
||||
case "minio":
|
||||
env.S3_ENDPOINT = `http://${info.host}:${info.ports[9000]}`;
|
||||
env.S3_ACCESS_KEY_ID = "minioadmin";
|
||||
env.S3_SECRET_ACCESS_KEY = "minioadmin";
|
||||
env.AWS_ACCESS_KEY_ID = "minioadmin";
|
||||
env.AWS_SECRET_ACCESS_KEY = "minioadmin";
|
||||
env.AWS_ENDPOINT_URL_S3 = `http://${info.host}:${info.ports[9000]}`;
|
||||
break;
|
||||
|
||||
case "autobahn":
|
||||
env.AUTOBAHN_URL = `ws://${info.host}:${info.ports[9002]}`;
|
||||
break;
|
||||
}
|
||||
|
||||
return env;
|
||||
}
|
||||
|
||||
async down(): Promise<void> {
|
||||
if (process.env.BUN_KEEP_DOCKER === "1") {
|
||||
return;
|
||||
}
|
||||
|
||||
const { exitCode } = await this.exec(["down", "-v"]);
|
||||
if (exitCode !== 0) {
|
||||
console.warn("Failed to tear down Docker services");
|
||||
}
|
||||
|
||||
this.runningServices.clear();
|
||||
}
|
||||
|
||||
async waitTcp(host: string, port: number, timeout = 30000): Promise<void> {
|
||||
const start = Date.now();
|
||||
|
||||
while (Date.now() - start < timeout) {
|
||||
try {
|
||||
const socket = await Bun.connect({
|
||||
hostname: host,
|
||||
port,
|
||||
});
|
||||
socket.end();
|
||||
return;
|
||||
} catch {
|
||||
await Bun.sleep(500);
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error(`TCP connection to ${host}:${port} timed out`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Pull all Docker images explicitly - useful for CI
|
||||
*/
|
||||
async pullImages(): Promise<void> {
|
||||
console.log("Pulling Docker images...");
|
||||
const { exitCode, stderr } = await this.exec(["pull", "--ignore-pull-failures"]);
|
||||
|
||||
if (exitCode !== 0) {
|
||||
// Don't fail on pull errors since some services need building
|
||||
console.warn(`Warning during image pull: ${stderr}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build all services that need building - useful for CI
|
||||
*/
|
||||
async buildServices(): Promise<void> {
|
||||
console.log("Building Docker services...");
|
||||
// Only mysql_tls needs building currently
|
||||
const servicesToBuild = ["mysql_tls"];
|
||||
|
||||
for (const service of servicesToBuild) {
|
||||
console.log(`Building ${service}...`);
|
||||
const { exitCode, stderr } = await this.exec(["build", service]);
|
||||
|
||||
if (exitCode !== 0) {
|
||||
throw new Error(`Failed to build ${service}: ${stderr}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepare all images (pull and build) - useful for CI
|
||||
*/
|
||||
async prepareImages(): Promise<void> {
|
||||
await this.pullImages();
|
||||
await this.buildServices();
|
||||
}
|
||||
}
|
||||
|
||||
// Global instance
|
||||
let globalHelper: DockerComposeHelper | null = null;
|
||||
|
||||
function getHelper(): DockerComposeHelper {
|
||||
if (!globalHelper) {
|
||||
globalHelper = new DockerComposeHelper();
|
||||
}
|
||||
return globalHelper;
|
||||
}
|
||||
|
||||
// Exported functions
|
||||
export async function ensureDocker(): Promise<void> {
|
||||
return getHelper().ensureDocker();
|
||||
}
|
||||
|
||||
export async function ensure(service: ServiceName): Promise<ServiceInfo> {
|
||||
return getHelper().ensure(service);
|
||||
}
|
||||
|
||||
export async function port(service: ServiceName, targetPort: number): Promise<number> {
|
||||
return getHelper().port(service, targetPort);
|
||||
}
|
||||
|
||||
export async function envFor(service: ServiceName): Promise<Record<string, string>> {
|
||||
return getHelper().envFor(service);
|
||||
}
|
||||
|
||||
export async function down(): Promise<void> {
|
||||
return getHelper().down();
|
||||
}
|
||||
|
||||
export async function waitTcp(host: string, port: number, timeout?: number): Promise<void> {
|
||||
return getHelper().waitTcp(host, port, timeout);
|
||||
}
|
||||
|
||||
export async function pullImages(): Promise<void> {
|
||||
return getHelper().pullImages();
|
||||
}
|
||||
|
||||
export async function buildServices(): Promise<void> {
|
||||
return getHelper().buildServices();
|
||||
}
|
||||
|
||||
export async function prepareImages(): Promise<void> {
|
||||
return getHelper().prepareImages();
|
||||
}
|
||||
|
||||
// Higher-level wrappers for tests
|
||||
export async function withPostgres(
|
||||
opts: { variant?: "plain" | "tls" | "auth" },
|
||||
fn: (info: ServiceInfo & { url: string }) => Promise<void>
|
||||
): Promise<void> {
|
||||
const variant = opts.variant || "plain";
|
||||
const serviceName = `postgres_${variant}` as ServiceName;
|
||||
const info = await ensure(serviceName);
|
||||
|
||||
const user = variant === "auth" ? "bun_sql_test" : "postgres";
|
||||
const url = `postgres://${user}@${info.host}:${info.ports[5432]}/bun_sql_test`;
|
||||
|
||||
try {
|
||||
await fn({ ...info, url });
|
||||
} finally {
|
||||
// Services persist - no teardown
|
||||
}
|
||||
}
|
||||
|
||||
export async function withMySQL(
|
||||
opts: { variant?: "plain" | "native_password" | "tls" },
|
||||
fn: (info: ServiceInfo & { url: string }) => Promise<void>
|
||||
): Promise<void> {
|
||||
const variant = opts.variant || "plain";
|
||||
const serviceName = `mysql_${variant}` as ServiceName;
|
||||
const info = await ensure(serviceName);
|
||||
|
||||
const password = variant === "plain" ? "" : ":bun";
|
||||
const url = `mysql://root${password}@${info.host}:${info.ports[3306]}/bun_sql_test`;
|
||||
|
||||
try {
|
||||
await fn({ ...info, url });
|
||||
} finally {
|
||||
// Services persist - no teardown
|
||||
}
|
||||
}
|
||||
|
||||
export async function withRedis(
|
||||
opts: { variant?: "plain" | "unified" },
|
||||
fn: (info: ServiceInfo & { url: string; tlsUrl?: string }) => Promise<void>
|
||||
): Promise<void> {
|
||||
const variant = opts.variant || "plain";
|
||||
const serviceName = `redis_${variant}` as ServiceName;
|
||||
const info = await ensure(serviceName);
|
||||
|
||||
const url = `redis://${info.host}:${info.ports[6379]}`;
|
||||
const tlsUrl = info.ports[6380] ? `rediss://${info.host}:${info.ports[6380]}` : undefined;
|
||||
|
||||
try {
|
||||
await fn({ ...info, url, tlsUrl });
|
||||
} finally {
|
||||
// Services persist - no teardown
|
||||
}
|
||||
}
|
||||
|
||||
export async function withMinio(
|
||||
fn: (info: ServiceInfo & { endpoint: string; accessKeyId: string; secretAccessKey: string }) => Promise<void>
|
||||
): Promise<void> {
|
||||
const info = await ensure("minio");
|
||||
|
||||
try {
|
||||
await fn({
|
||||
...info,
|
||||
endpoint: `http://${info.host}:${info.ports[9000]}`,
|
||||
accessKeyId: "minioadmin",
|
||||
secretAccessKey: "minioadmin",
|
||||
});
|
||||
} finally {
|
||||
// Services persist - no teardown
|
||||
}
|
||||
}
|
||||
|
||||
export async function withAutobahn(
|
||||
fn: (info: ServiceInfo & { url: string }) => Promise<void>
|
||||
): Promise<void> {
|
||||
const info = await ensure("autobahn");
|
||||
|
||||
try {
|
||||
await fn({
|
||||
...info,
|
||||
url: `ws://${info.host}:${info.ports[9002]}`,
|
||||
});
|
||||
} finally {
|
||||
// Services persist - no teardown
|
||||
}
|
||||
}
|
||||
40
test/docker/init-scripts/postgres-auth/01-init-auth.sh
Executable file
40
test/docker/init-scripts/postgres-auth/01-init-auth.sh
Executable file
@@ -0,0 +1,40 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Wait for PostgreSQL to start
|
||||
until pg_isready; do
|
||||
echo "Waiting for PostgreSQL to start..."
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# Drop database if exists
|
||||
dropdb --if-exists bun_sql_test || true
|
||||
|
||||
# Create users with different auth methods
|
||||
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
|
||||
-- Create basic user
|
||||
DROP USER IF EXISTS bun_sql_test;
|
||||
CREATE USER bun_sql_test;
|
||||
|
||||
-- Create MD5 user
|
||||
ALTER SYSTEM SET password_encryption = 'md5';
|
||||
SELECT pg_reload_conf();
|
||||
DROP USER IF EXISTS bun_sql_test_md5;
|
||||
CREATE USER bun_sql_test_md5 WITH PASSWORD 'bun_sql_test_md5';
|
||||
|
||||
-- Create SCRAM user
|
||||
ALTER SYSTEM SET password_encryption = 'scram-sha-256';
|
||||
SELECT pg_reload_conf();
|
||||
DROP USER IF EXISTS bun_sql_test_scram;
|
||||
CREATE USER bun_sql_test_scram WITH PASSWORD 'bun_sql_test_scram';
|
||||
EOSQL
|
||||
|
||||
# Create database and set permissions
|
||||
createdb bun_sql_test
|
||||
|
||||
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
|
||||
GRANT ALL ON DATABASE bun_sql_test TO bun_sql_test;
|
||||
GRANT ALL ON DATABASE bun_sql_test TO bun_sql_test_md5;
|
||||
GRANT ALL ON DATABASE bun_sql_test TO bun_sql_test_scram;
|
||||
ALTER DATABASE bun_sql_test OWNER TO bun_sql_test;
|
||||
EOSQL
|
||||
18
test/docker/init-scripts/postgres/01-init.sql
Normal file
18
test/docker/init-scripts/postgres/01-init.sql
Normal file
@@ -0,0 +1,18 @@
|
||||
-- PostgreSQL initialization script for plain setup
|
||||
ALTER SYSTEM SET max_prepared_transactions = '1000';
|
||||
ALTER SYSTEM SET max_connections = '2000';
|
||||
|
||||
-- Create test users with different auth methods
|
||||
CREATE USER bun_sql_test;
|
||||
CREATE USER bun_sql_test_md5 WITH PASSWORD 'bun_sql_test_md5';
|
||||
CREATE USER bun_sql_test_scram WITH PASSWORD 'bun_sql_test_scram';
|
||||
|
||||
-- Create test database
|
||||
CREATE DATABASE bun_sql_test;
|
||||
|
||||
-- Grant permissions to all test users
|
||||
GRANT ALL ON DATABASE bun_sql_test TO bun_sql_test;
|
||||
GRANT ALL ON DATABASE bun_sql_test TO bun_sql_test_md5;
|
||||
GRANT ALL ON DATABASE bun_sql_test TO bun_sql_test_scram;
|
||||
|
||||
ALTER DATABASE bun_sql_test OWNER TO bun_sql_test;
|
||||
26
test/docker/prepare-ci.ts
Normal file
26
test/docker/prepare-ci.ts
Normal file
@@ -0,0 +1,26 @@
|
||||
#!/usr/bin/env bun
|
||||
/**
|
||||
* CI preparation script for Docker test services
|
||||
*
|
||||
* This script pre-pulls and builds all Docker images needed for tests
|
||||
* to avoid failures during test execution.
|
||||
*
|
||||
* Usage: bun test/docker/prepare-ci.ts
|
||||
*/
|
||||
|
||||
import { prepareImages } from "./index";
|
||||
|
||||
async function main() {
|
||||
console.log("Preparing Docker test infrastructure for CI...");
|
||||
|
||||
try {
|
||||
await prepareImages();
|
||||
console.log("✅ Docker test infrastructure is ready");
|
||||
process.exit(0);
|
||||
} catch (error) {
|
||||
console.error("❌ Failed to prepare Docker test infrastructure:", error);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
main();
|
||||
@@ -862,11 +862,6 @@ export function isDockerEnabled(): boolean {
|
||||
return false;
|
||||
}
|
||||
|
||||
// TODO: investigate why its not starting on Linux arm64
|
||||
if ((isLinux && process.arch === "arm64") || isMacOS) {
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
const info = execSync(`${dockerCLI} info`, { stdio: ["ignore", "pipe", "inherit"] });
|
||||
return info.toString().indexOf("Server Version:") !== -1;
|
||||
@@ -902,7 +897,7 @@ export async function waitForPort(port: number, timeout: number = 60_000): Promi
|
||||
throw error;
|
||||
}
|
||||
|
||||
export async function describeWithContainer(
|
||||
export function describeWithContainer(
|
||||
label: string,
|
||||
{
|
||||
image,
|
||||
@@ -915,16 +910,48 @@ export async function describeWithContainer(
|
||||
args?: string[];
|
||||
archs?: NodeJS.Architecture[];
|
||||
},
|
||||
fn: (port: number) => void,
|
||||
fn: (container: { port: number; host: string }) => void,
|
||||
) {
|
||||
describe(label, () => {
|
||||
// Check if this is one of our docker-compose services
|
||||
const services: Record<string, number> = {
|
||||
"postgres_plain": 5432,
|
||||
"postgres_tls": 5432,
|
||||
"postgres_auth": 5432,
|
||||
"mysql_plain": 3306,
|
||||
"mysql_native_password": 3306,
|
||||
"mysql_tls": 3306,
|
||||
"redis_plain": 6379,
|
||||
"redis_unified": 6379,
|
||||
"minio": 9000,
|
||||
"autobahn": 9002,
|
||||
};
|
||||
|
||||
const servicePort = services[image];
|
||||
if (servicePort) {
|
||||
let containerInfo = { host: "127.0.0.1", port: 0 };
|
||||
|
||||
// Start the service before any tests
|
||||
beforeAll(async () => {
|
||||
const dockerHelper = await import("./docker/index.ts");
|
||||
const info = await dockerHelper.ensure(image as any);
|
||||
containerInfo.host = info.host;
|
||||
containerInfo.port = info.ports[servicePort];
|
||||
console.log(`Container ready: ${image} at ${containerInfo.host}:${containerInfo.port}`);
|
||||
});
|
||||
|
||||
fn(containerInfo);
|
||||
return;
|
||||
}
|
||||
|
||||
// Fall back to original implementation for unknown images
|
||||
const docker = dockerExe();
|
||||
if (!docker) {
|
||||
test.skip(`docker is not installed, skipped: ${image}`, () => {});
|
||||
return;
|
||||
}
|
||||
const { arch, platform } = process;
|
||||
if ((archs && !archs?.includes(arch)) || platform === "win32" || platform === "darwin") {
|
||||
if ((archs && !archs?.includes(arch)) || platform === "win32") {
|
||||
test.skip(`docker image is not supported on ${platform}/${arch}, skipped: ${image}`, () => {});
|
||||
return false;
|
||||
}
|
||||
@@ -980,7 +1007,7 @@ export async function describeWithContainer(
|
||||
stderr: "ignore",
|
||||
});
|
||||
});
|
||||
fn(port);
|
||||
fn({ port, host: "127.0.0.1" });
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -8,6 +8,9 @@ import path from "path";
|
||||
const s3 = (...args) => defaultS3.file(...args);
|
||||
const S3 = (...args) => new S3Client(...args);
|
||||
|
||||
// Import docker-compose helper
|
||||
import * as dockerCompose from "../../../docker/index.ts";
|
||||
|
||||
const dockerCLI = which("docker") as string;
|
||||
function isDockerEnabled(): boolean {
|
||||
if (!dockerCLI) {
|
||||
@@ -36,48 +39,26 @@ const allCredentials: S3Credentials[] = [
|
||||
];
|
||||
|
||||
if (isDockerEnabled()) {
|
||||
const result = child_process.spawnSync(
|
||||
"docker",
|
||||
[
|
||||
"run",
|
||||
"-d",
|
||||
"--name",
|
||||
"minio",
|
||||
"-p",
|
||||
"9000:9000",
|
||||
"-p",
|
||||
"9001:9001",
|
||||
"-e",
|
||||
"MINIO_ROOT_USER=minioadmin",
|
||||
"-e",
|
||||
"MINIO_ROOT_PASSWORD=minioadmin",
|
||||
"--mount",
|
||||
"type=tmpfs,destination=/data",
|
||||
"minio/minio",
|
||||
"server",
|
||||
"--console-address",
|
||||
":9001",
|
||||
"/data",
|
||||
],
|
||||
{
|
||||
stdio: ["ignore", "pipe", "pipe"],
|
||||
},
|
||||
);
|
||||
// Use docker-compose to start MinIO
|
||||
const minioInfo = await dockerCompose.ensure("minio");
|
||||
|
||||
if (result.error) {
|
||||
if (!result.error.message.includes('The container name "/minio" is already in use by container'))
|
||||
throw result.error;
|
||||
// Get container name for docker exec
|
||||
const containerName = child_process
|
||||
.execSync(
|
||||
`docker ps --filter "ancestor=minio/minio:latest" --filter "status=running" --format "{{.Names}}" | head -1`,
|
||||
{ encoding: "utf-8" },
|
||||
)
|
||||
.trim();
|
||||
|
||||
if (containerName) {
|
||||
// Create a bucket using mc inside the container
|
||||
child_process.spawnSync(dockerCLI, [`exec`, containerName, `mc`, `mb`, `data/buntest`], {
|
||||
stdio: "ignore",
|
||||
});
|
||||
}
|
||||
// wait for minio to be ready
|
||||
await Bun.sleep(1_000);
|
||||
|
||||
/// create a bucket
|
||||
child_process.spawnSync(dockerCLI, [`exec`, `minio`, `mc`, `mb`, `data/buntest`], {
|
||||
stdio: "ignore",
|
||||
});
|
||||
|
||||
minioCredentials = {
|
||||
endpoint: "http://localhost:9000", // MinIO endpoint
|
||||
endpoint: `http://${minioInfo.host}:${minioInfo.ports[9000]}`, // MinIO endpoint from docker-compose
|
||||
accessKeyId: "minioadmin",
|
||||
secretAccessKey: "minioadmin",
|
||||
bucket: "buntest",
|
||||
|
||||
@@ -5,21 +5,20 @@ import { describeWithContainer } from "harness";
|
||||
describeWithContainer(
|
||||
"mysql",
|
||||
{
|
||||
image: "mysql:8.0.43",
|
||||
env: {
|
||||
MYSQL_ROOT_PASSWORD: "bun",
|
||||
MYSQL_DEFAULT_AUTHENTICATION_PLUGIN: "mysql_native_password",
|
||||
},
|
||||
args: ["--default-authentication-plugin=mysql_native_password"],
|
||||
image: "mysql_native_password",
|
||||
env: {},
|
||||
args: [],
|
||||
},
|
||||
(port: number) => {
|
||||
const options = {
|
||||
url: `mysql://root:bun@localhost:${port}`,
|
||||
max: 1,
|
||||
};
|
||||
container => {
|
||||
// Create getters that will be evaluated when the test runs
|
||||
const getUrl = () => `mysql://root:bun@${container.host}:${container.port}/bun_sql_test`;
|
||||
|
||||
test("should be able to connect with mysql_native_password auth plugin", async () => {
|
||||
const sql = new SQL({ ...options, password: "bun" });
|
||||
console.log("Container info in test:", container);
|
||||
const sql = new SQL({
|
||||
url: getUrl(),
|
||||
max: 1,
|
||||
});
|
||||
const result = await sql`select 1 as x`;
|
||||
expect(result).toEqual([{ x: 1 }]);
|
||||
await sql.end();
|
||||
@@ -27,13 +26,17 @@ describeWithContainer(
|
||||
|
||||
test("should be able to switch auth plugin", async () => {
|
||||
{
|
||||
const sql = new SQL({ ...options, password: "bun" });
|
||||
const sql = new SQL({
|
||||
url: getUrl(),
|
||||
max: 1,
|
||||
});
|
||||
|
||||
await sql`DROP USER IF EXISTS caching@'%';`.simple();
|
||||
await sql`CREATE USER caching@'%' IDENTIFIED WITH caching_sha2_password BY 'bunbun';
|
||||
GRANT ALL PRIVILEGES ON mysql.* TO caching@'%';
|
||||
GRANT ALL PRIVILEGES ON bun_sql_test.* TO caching@'%';
|
||||
FLUSH PRIVILEGES;`.simple();
|
||||
}
|
||||
const sql = new SQL(`mysql://caching:bunbun@localhost:${port}`);
|
||||
const sql = new SQL(`mysql://caching:bunbun@${container.host}:${container.port}/bun_sql_test`);
|
||||
const result = await sql`select 1 as x`;
|
||||
expect(result).toEqual([{ x: 1 }]);
|
||||
await sql.end();
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -4,13 +4,6 @@ import { bunEnv, bunExe, isCI, isDockerEnabled, tempDirWithFiles } from "harness
|
||||
import path from "path";
|
||||
const postgres = (...args) => new SQL(...args);
|
||||
|
||||
import { exec } from "child_process";
|
||||
import net from "net";
|
||||
import { promisify } from "util";
|
||||
|
||||
const execAsync = promisify(exec);
|
||||
const dockerCLI = Bun.which("docker") as string;
|
||||
|
||||
const dir = tempDirWithFiles("sql-test", {
|
||||
"select-param.sql": `select $1 as x`,
|
||||
"select.sql": `select 1 as x`,
|
||||
@@ -19,85 +12,25 @@ const dir = tempDirWithFiles("sql-test", {
|
||||
function rel(filename: string) {
|
||||
return path.join(dir, filename);
|
||||
}
|
||||
async function findRandomPort() {
|
||||
return new Promise<number>((resolve, reject) => {
|
||||
// Create a server to listen on a random port
|
||||
const server = net.createServer();
|
||||
server.listen(0, () => {
|
||||
const port = (server.address() as import("node:net").AddressInfo).port;
|
||||
server.close(() => resolve(port));
|
||||
});
|
||||
server.on("error", reject);
|
||||
});
|
||||
}
|
||||
// Use docker-compose infrastructure
|
||||
import * as dockerCompose from "../../docker/index.ts";
|
||||
|
||||
async function waitForPostgres(port: number, count = 10) {
|
||||
console.log(`Attempting to connect to postgres://postgres@localhost:${port}/postgres`);
|
||||
|
||||
for (let i = 0; i < count; i++) {
|
||||
try {
|
||||
const sql = new SQL(`postgres://postgres@localhost:${port}/postgres`, {
|
||||
idle_timeout: 20,
|
||||
max_lifetime: 60 * 30,
|
||||
});
|
||||
|
||||
await sql`SELECT 1`;
|
||||
await sql.end();
|
||||
console.log("PostgreSQL is ready!");
|
||||
return true;
|
||||
} catch (error) {
|
||||
console.log(`Waiting for PostgreSQL... (${i + 1}/${count})`, error);
|
||||
if (error && typeof error === "object" && "stack" in error) {
|
||||
console.log("Error stack:", error.stack);
|
||||
}
|
||||
await new Promise(resolve => setTimeout(resolve, 1000));
|
||||
}
|
||||
}
|
||||
throw new Error("PostgreSQL failed to start");
|
||||
}
|
||||
|
||||
async function startContainer(): Promise<{ port: number; containerName: string }> {
|
||||
try {
|
||||
// Build the Docker image
|
||||
console.log("Building Docker image...");
|
||||
const dockerfilePath = path.join(import.meta.dir, "docker", "Dockerfile");
|
||||
await execAsync(`${dockerCLI} build --pull --rm -f "${dockerfilePath}" -t custom-postgres .`, {
|
||||
cwd: path.join(import.meta.dir, "docker"),
|
||||
});
|
||||
const port = await findRandomPort();
|
||||
const containerName = `postgres-test-${port}`;
|
||||
// Check if container exists and remove it
|
||||
try {
|
||||
await execAsync(`${dockerCLI} rm -f ${containerName}`);
|
||||
} catch (error) {
|
||||
// Container might not exist, ignore error
|
||||
}
|
||||
|
||||
// Start the container
|
||||
await execAsync(`${dockerCLI} run -d --name ${containerName} -p ${port}:5432 custom-postgres`);
|
||||
|
||||
// Wait for PostgreSQL to be ready
|
||||
await waitForPostgres(port);
|
||||
return {
|
||||
port,
|
||||
containerName,
|
||||
};
|
||||
} catch (error) {
|
||||
console.error("Error:", error);
|
||||
process.exit(1);
|
||||
}
|
||||
async function startContainer(): Promise<{ port: number; host: string }> {
|
||||
const info = await dockerCompose.ensure("postgres_plain");
|
||||
console.log("PostgreSQL container ready at:", info.host + ":" + info.ports[5432]);
|
||||
return {
|
||||
port: info.ports[5432],
|
||||
host: info.host,
|
||||
};
|
||||
}
|
||||
|
||||
if (isDockerEnabled()) {
|
||||
const container: { port: number; containerName: string } = await startContainer();
|
||||
const container = await startContainer();
|
||||
afterAll(async () => {
|
||||
try {
|
||||
await execAsync(`${dockerCLI} stop -t 0 ${container.containerName}`);
|
||||
} catch (error) {}
|
||||
|
||||
try {
|
||||
await execAsync(`${dockerCLI} rm -f ${container.containerName}`);
|
||||
} catch (error) {}
|
||||
// Containers persist - managed by docker-compose
|
||||
if (!process.env.BUN_KEEP_DOCKER) {
|
||||
await dockerCompose.down();
|
||||
}
|
||||
});
|
||||
|
||||
// require("./bootstrap.js");
|
||||
@@ -128,7 +61,7 @@ if (isDockerEnabled()) {
|
||||
// host replication all 127.0.0.1/32 trust
|
||||
// host replication all ::1/128 trust
|
||||
// --- Expected pg_hba.conf ---
|
||||
process.env.DATABASE_URL = `postgres://bun_sql_test@localhost:${container.port}/bun_sql_test`;
|
||||
process.env.DATABASE_URL = `postgres://bun_sql_test@${container.host}:${container.port}/bun_sql_test`;
|
||||
|
||||
const net = require("node:net");
|
||||
const fs = require("node:fs");
|
||||
@@ -149,8 +82,8 @@ if (isDockerEnabled()) {
|
||||
|
||||
// Create connection to the actual PostgreSQL container
|
||||
const containerSocket = net.createConnection({
|
||||
host: login.host,
|
||||
port: login.port,
|
||||
host: container.host,
|
||||
port: container.port,
|
||||
});
|
||||
|
||||
// Handle container connection
|
||||
@@ -204,12 +137,14 @@ if (isDockerEnabled()) {
|
||||
|
||||
const login: Bun.SQL.PostgresOrMySQLOptions = {
|
||||
username: "bun_sql_test",
|
||||
host: container.host,
|
||||
port: container.port,
|
||||
path: socketPath,
|
||||
};
|
||||
|
||||
const login_domain_socket: Bun.SQL.PostgresOrMySQLOptions = {
|
||||
username: "bun_sql_test",
|
||||
host: container.host,
|
||||
port: container.port,
|
||||
path: socketPath,
|
||||
};
|
||||
@@ -217,12 +152,14 @@ if (isDockerEnabled()) {
|
||||
const login_md5: Bun.SQL.PostgresOrMySQLOptions = {
|
||||
username: "bun_sql_test_md5",
|
||||
password: "bun_sql_test_md5",
|
||||
host: container.host,
|
||||
port: container.port,
|
||||
};
|
||||
|
||||
const login_scram: Bun.SQL.PostgresOrMySQLOptions = {
|
||||
username: "bun_sql_test_scram",
|
||||
password: "bun_sql_test_scram",
|
||||
host: container.host,
|
||||
port: container.port,
|
||||
};
|
||||
|
||||
@@ -230,6 +167,7 @@ if (isDockerEnabled()) {
|
||||
db: "bun_sql_test",
|
||||
username: login.username,
|
||||
password: login.password,
|
||||
host: container.host,
|
||||
port: container.port,
|
||||
max: 1,
|
||||
};
|
||||
@@ -481,7 +419,7 @@ if (isDockerEnabled()) {
|
||||
|
||||
test("Connects with no options", async () => {
|
||||
// we need at least the usename and port
|
||||
await using sql = postgres({ max: 1, port: container.port, username: login.username });
|
||||
await using sql = postgres({ max: 1, host: container.host, port: container.port, username: login.username });
|
||||
|
||||
const result = (await sql`select 1 as x`)[0].x;
|
||||
sql.close();
|
||||
@@ -2685,7 +2623,7 @@ if (isDockerEnabled()) {
|
||||
expect(e).toBeInstanceOf(SQL.SQLError);
|
||||
expect(e).toBeInstanceOf(SQL.PostgresError);
|
||||
expect(e.code).toBe("ERR_POSTGRES_CONNECTION_TIMEOUT");
|
||||
expect(e.message).toMatch(/Connection timed out after 200ms/);
|
||||
expect(e.message).toMatch(/Connection timeout after 200ms/);
|
||||
} finally {
|
||||
sql.close();
|
||||
server.close();
|
||||
|
||||
Reference in New Issue
Block a user